LCOV - code coverage report
Current view: top level - /home/runner/zephyrproject/zephyr/kernel/include - kswap.h (source / functions) Coverage Total Hit
Test: lcov.info Lines: 100.0 % 9 9
Test Date: 2026-03-12 12:01:18 Functions: 100.0 % 2 2
Legend: Lines: hit not hit | Branches: + taken - not taken # not executed Branches: 75.0 % 4 3

             Branch data     Line data    Source code
       1                 :             : /*
       2                 :             :  * Copyright (c) 2018 Intel Corporation
       3                 :             :  *
       4                 :             :  * SPDX-License-Identifier: Apache-2.0
       5                 :             :  */
       6                 :             : #ifndef ZEPHYR_KERNEL_INCLUDE_KSWAP_H_
       7                 :             : #define ZEPHYR_KERNEL_INCLUDE_KSWAP_H_
       8                 :             : 
       9                 :             : #include <ksched.h>
      10                 :             : #include <zephyr/spinlock.h>
      11                 :             : #include <zephyr/sys/barrier.h>
      12                 :             : #include <kernel_arch_func.h>
      13                 :             : 
      14                 :             : #ifdef CONFIG_STACK_SENTINEL
      15                 :             : extern void z_check_stack_sentinel(void);
      16                 :             : #else
      17                 :             : #define z_check_stack_sentinel() /**/
      18                 :             : #endif /* CONFIG_STACK_SENTINEL */
      19                 :             : 
      20                 :             : extern struct k_spinlock _sched_spinlock;
      21                 :             : 
      22                 :             : /* In SMP, the irq_lock() is a spinlock which is implicitly released
      23                 :             :  * and reacquired on context switch to preserve the existing
      24                 :             :  * semantics.  This means that whenever we are about to return to a
      25                 :             :  * thread (via either z_swap() or interrupt/exception return!) we need
      26                 :             :  * to restore the lock state to whatever the thread's counter
      27                 :             :  * expects.
      28                 :             :  */
      29                 :             : void z_smp_release_global_lock(struct k_thread *thread);
      30                 :             : 
      31                 :             : /* context switching and scheduling-related routines */
      32                 :             : #ifdef CONFIG_USE_SWITCH
      33                 :             : 
      34                 :             : /* Spin, with the scheduler lock held (!), on a thread that is known
      35                 :             :  * (!!) to have released the lock and be on a path where it will
      36                 :             :  * deterministically (!!!) reach arch_switch() in very small constant
      37                 :             :  * time.
      38                 :             :  *
      39                 :             :  * This exists to treat an unavoidable SMP race when threads swap --
      40                 :             :  * their thread record is in the queue (and visible to other CPUs)
      41                 :             :  * before arch_switch() finishes saving state.  We must spin for the
      42                 :             :  * switch handle before entering a new thread.  See docs on
      43                 :             :  * arch_switch().
      44                 :             :  *
      45                 :             :  * Stated differently: there's a chicken and egg bug with the question
      46                 :             :  * of "is a thread running or not?".  The thread needs to mark itself
      47                 :             :  * "not running" from its own context, but at that moment it obviously
      48                 :             :  * is still running until it reaches arch_switch()!  Locking can't
      49                 :             :  * treat this because the scheduler lock can't be released by the
      50                 :             :  * switched-to thread, which is going to (obviously) be running its
      51                 :             :  * own code and doesn't know it was switched out.
      52                 :             :  */
      53                 :             : static inline void z_sched_switch_spin(struct k_thread *thread)
      54                 :             : {
      55                 :             : #ifdef CONFIG_SMP
      56                 :             :         volatile void **shp = (void *)&thread->switch_handle;
      57                 :             : 
      58                 :             :         while (*shp == NULL) {
      59                 :             :                 arch_spin_relax();
      60                 :             :         }
      61                 :             :         /* Read barrier: don't allow any subsequent loads in the
      62                 :             :          * calling code to reorder before we saw switch_handle go
      63                 :             :          * non-null.
      64                 :             :          */
      65                 :             :         barrier_dmem_fence_full();
      66                 :             : #endif /* CONFIG_SMP */
      67                 :             : }
      68                 :             : 
      69                 :             : /* New style context switching.  arch_switch() is a lower level
      70                 :             :  * primitive that doesn't know about the scheduler or return value.
      71                 :             :  * Needed for SMP, where the scheduler requires spinlocking that we
      72                 :             :  * don't want to have to do in per-architecture assembly.
      73                 :             :  *
      74                 :             :  * Note that is_spinlock is a compile-time construct which will be
      75                 :             :  * optimized out when this function is expanded.
      76                 :             :  */
      77                 :             : static ALWAYS_INLINE unsigned int do_swap(unsigned int key,
      78                 :             :                                           struct k_spinlock *lock,
      79                 :             :                                           bool is_spinlock)
      80                 :             : {
      81                 :             :         struct k_thread *new_thread, *old_thread;
      82                 :             : 
      83                 :             : #ifdef CONFIG_SPIN_VALIDATE
      84                 :             :         /* Make sure the key acts to unmask interrupts, if it doesn't,
      85                 :             :          * then we are context switching out of a nested lock
      86                 :             :          * (i.e. breaking the lock of someone up the stack) which is
      87                 :             :          * forbidden!  The sole exception are dummy threads used
      88                 :             :          * during initialization (where we start with interrupts
      89                 :             :          * masked and switch away to begin scheduling) and the case of
      90                 :             :          * a dead current thread that was just aborted (where the
      91                 :             :          * damage was already done by the abort anyway).
      92                 :             :          *
      93                 :             :          * (Note that this is disabled on ARM64, where system calls
      94                 :             :          * can sometimes run with interrupts masked in ways that don't
      95                 :             :          * represent lock state.  See #35307)
      96                 :             :          */
      97                 :             : # ifndef CONFIG_ARM64
      98                 :             :         __ASSERT(arch_irq_unlocked(key) ||
      99                 :             :                  _current->base.thread_state & (_THREAD_DUMMY | _THREAD_DEAD),
     100                 :             :                  "Context switching while holding lock!");
     101                 :             : # endif /* CONFIG_ARM64 */
     102                 :             : #endif /* CONFIG_SPIN_VALIDATE */
     103                 :             : 
     104                 :             :         old_thread = _current;
     105                 :             : 
     106                 :             :         z_check_stack_sentinel();
     107                 :             : 
     108                 :             :         old_thread->swap_retval = -EAGAIN;
     109                 :             : 
     110                 :             :         /* We always take the scheduler spinlock if we don't already
     111                 :             :          * have it.  We "release" other spinlocks here.  But we never
     112                 :             :          * drop the interrupt lock.
     113                 :             :          */
     114                 :             :         if (is_spinlock && lock != NULL && lock != &_sched_spinlock) {
     115                 :             :                 k_spin_release(lock);
     116                 :             :         }
     117                 :             :         if (IS_ENABLED(CONFIG_SMP) || IS_ENABLED(CONFIG_SPIN_VALIDATE)) {
     118                 :             :                 /* Taking a nested uniprocessor lock in void context is a noop */
     119                 :             :                 if (!is_spinlock || lock != &_sched_spinlock) {
     120                 :             :                         (void)k_spin_lock(&_sched_spinlock);
     121                 :             :                 }
     122                 :             :         }
     123                 :             : 
     124                 :             : #ifdef CONFIG_SMP
     125                 :             :         new_thread = z_swap_next_thread();
     126                 :             : #else
     127                 :             :         new_thread = _kernel.ready_q.cache;
     128                 :             : #endif
     129                 :             : 
     130                 :             :         if (new_thread != old_thread) {
     131                 :             :                 z_sched_usage_switch(new_thread);
     132                 :             : 
     133                 :             : #ifdef CONFIG_SMP
     134                 :             :                 new_thread->base.cpu = arch_curr_cpu()->id;
     135                 :             : 
     136                 :             :                 if (!is_spinlock) {
     137                 :             :                         z_smp_release_global_lock(new_thread);
     138                 :             :                 }
     139                 :             : #endif /* CONFIG_SMP */
     140                 :             :                 z_thread_mark_switched_out();
     141                 :             :                 z_sched_switch_spin(new_thread);
     142                 :             :                 z_current_thread_set(new_thread);
     143                 :             : 
     144                 :             : #ifdef CONFIG_TIMESLICING
     145                 :             :                 z_reset_time_slice(new_thread);
     146                 :             : #endif /* CONFIG_TIMESLICING */
     147                 :             : 
     148                 :             : #ifdef CONFIG_SPIN_VALIDATE
     149                 :             :                 z_spin_lock_set_owner(&_sched_spinlock);
     150                 :             : #endif /* CONFIG_SPIN_VALIDATE */
     151                 :             : 
     152                 :             :                 arch_cohere_stacks(old_thread, NULL, new_thread);
     153                 :             : 
     154                 :             : #ifdef CONFIG_SMP
     155                 :             :                 /* Now add _current back to the run queue, once we are
     156                 :             :                  * guaranteed to reach the context switch in finite
     157                 :             :                  * time.  See z_sched_switch_spin().
     158                 :             :                  */
     159                 :             :                 z_requeue_current(old_thread);
     160                 :             : #endif /* CONFIG_SMP */
     161                 :             :                 void *newsh = new_thread->switch_handle;
     162                 :             : 
     163                 :             :                 if (IS_ENABLED(CONFIG_SMP)) {
     164                 :             :                         /* Active threads must have a null here.  And
     165                 :             :                          * it must be seen before the scheduler lock
     166                 :             :                          * is released!
     167                 :             :                          */
     168                 :             :                         new_thread->switch_handle = NULL;
     169                 :             :                         barrier_dmem_fence_full(); /* write barrier */
     170                 :             :                 }
     171                 :             :                 k_spin_release(&_sched_spinlock);
     172                 :             :                 arch_switch(newsh, &old_thread->switch_handle);
     173                 :             :         } else {
     174                 :             :                 k_spin_release(&_sched_spinlock);
     175                 :             :         }
     176                 :             : 
     177                 :             :         if (is_spinlock) {
     178                 :             :                 arch_irq_unlock(key);
     179                 :             :         } else {
     180                 :             :                 irq_unlock(key);
     181                 :             :         }
     182                 :             : 
     183                 :             :         return _current->swap_retval;
     184                 :             : }
     185                 :             : 
     186                 :             : static inline int z_swap_irqlock(unsigned int key)
     187                 :             : {
     188                 :             :         return do_swap(key, NULL, false);
     189                 :             : }
     190                 :             : 
     191                 :             : static inline int z_swap(struct k_spinlock *lock, k_spinlock_key_t key)
     192                 :             : {
     193                 :             :         return do_swap(key.key, lock, true);
     194                 :             : }
     195                 :             : 
     196                 :             : static inline void z_swap_unlocked(void)
     197                 :             : {
     198                 :             :         (void) do_swap(arch_irq_lock(), NULL, true);
     199                 :             : }
     200                 :             : 
     201                 :             : #else /* !CONFIG_USE_SWITCH */
     202                 :             : 
     203                 :             : 
     204                 :             : static inline void z_sched_switch_spin(struct k_thread *thread)
     205                 :             : {
     206                 :             :         ARG_UNUSED(thread);
     207                 :             : }
     208                 :             : 
     209                 :          98 : static inline int z_swap_irqlock(unsigned int key)
     210                 :             : {
     211                 :          98 :         int ret;
     212                 :          98 :         z_check_stack_sentinel();
     213                 :             : 
     214                 :             : #ifdef CONFIG_SPIN_VALIDATE
     215                 :             :         /* Refer to comment in do_swap() above for details */
     216                 :             : # ifndef CONFIG_ARM64
     217   [ +  +  -  + ]:          98 :         __ASSERT(arch_irq_unlocked(key) ||
     218                 :             :                  _current->base.thread_state & (_THREAD_DUMMY | _THREAD_DEAD),
     219                 :             :                  "Context switching while holding lock!");
     220                 :             : # endif /* CONFIG_ARM64 */
     221                 :             : #endif /* CONFIG_SPIN_VALIDATE */
     222                 :             : 
     223                 :          98 :         ret = arch_swap(key);
     224                 :          52 :         return ret;
     225                 :             : }
     226                 :             : 
     227                 :             : /* If !USE_SWITCH, then spinlocks are guaranteed degenerate as we
     228                 :             :  * can't be in SMP.  The k_spin_release() call is just for validation
     229                 :             :  * handling.
     230                 :             :  */
     231                 :          98 : static ALWAYS_INLINE int z_swap(struct k_spinlock *lock, k_spinlock_key_t key)
     232                 :             : {
     233                 :          98 :         k_spin_release(lock);
     234                 :          98 :         return z_swap_irqlock(key.key);
     235                 :             : }
     236                 :             : 
     237                 :             : static inline void z_swap_unlocked(void)
     238                 :             : {
     239                 :             :         (void) z_swap_irqlock(arch_irq_lock());
     240                 :             : }
     241                 :             : 
     242                 :             : #endif /* !CONFIG_USE_SWITCH */
     243                 :             : 
     244                 :             : /**
     245                 :             :  * Set up a "dummy" thread, used at early initialization to launch the
     246                 :             :  * first thread on a CPU.
     247                 :             :  *
     248                 :             :  * Needs to set enough fields such that the context switching code can
     249                 :             :  * use it to properly store state, which will just be discarded.
     250                 :             :  *
     251                 :             :  * The memory of the dummy thread can be completely uninitialized.
     252                 :             :  */
     253                 :             : void z_dummy_thread_init(struct k_thread *dummy_thread);
     254                 :             : 
     255                 :             : #endif /* ZEPHYR_KERNEL_INCLUDE_KSWAP_H_ */
        

Generated by: LCOV version 2.0-1