LCOV - code coverage report
Current view: top level - home/runner/zephyrproject/zephyr/kernel/include - kswap.h (source / functions) Hit Total Coverage
Test: lcov.info Lines: 15 15 100.0 %
Date: 2024-09-16 20:15:30 Functions: 3 3 100.0 %
Legend: Lines: hit not hit | Branches: + taken - not taken # not executed Branches: 0 0 -

           Branch data     Line data    Source code
       1                 :            : /*
       2                 :            :  * Copyright (c) 2018 Intel Corporation
       3                 :            :  *
       4                 :            :  * SPDX-License-Identifier: Apache-2.0
       5                 :            :  */
       6                 :            : #ifndef ZEPHYR_KERNEL_INCLUDE_KSWAP_H_
       7                 :            : #define ZEPHYR_KERNEL_INCLUDE_KSWAP_H_
       8                 :            : 
       9                 :            : #include <ksched.h>
      10                 :            : #include <zephyr/spinlock.h>
      11                 :            : #include <zephyr/sys/barrier.h>
      12                 :            : #include <kernel_arch_func.h>
      13                 :            : 
      14                 :            : #ifdef CONFIG_STACK_SENTINEL
      15                 :            : extern void z_check_stack_sentinel(void);
      16                 :            : #else
      17                 :            : #define z_check_stack_sentinel() /**/
      18                 :            : #endif /* CONFIG_STACK_SENTINEL */
      19                 :            : 
      20                 :            : extern struct k_spinlock _sched_spinlock;
      21                 :            : 
      22                 :            : /* In SMP, the irq_lock() is a spinlock which is implicitly released
      23                 :            :  * and reacquired on context switch to preserve the existing
      24                 :            :  * semantics.  This means that whenever we are about to return to a
      25                 :            :  * thread (via either z_swap() or interrupt/exception return!) we need
      26                 :            :  * to restore the lock state to whatever the thread's counter
      27                 :            :  * expects.
      28                 :            :  */
      29                 :            : void z_smp_release_global_lock(struct k_thread *thread);
      30                 :            : 
      31                 :            : /* context switching and scheduling-related routines */
      32                 :            : #ifdef CONFIG_USE_SWITCH
      33                 :            : 
      34                 :            : /* Spin, with the scheduler lock held (!), on a thread that is known
      35                 :            :  * (!!) to have released the lock and be on a path where it will
      36                 :            :  * deterministically (!!!) reach arch_switch() in very small constant
      37                 :            :  * time.
      38                 :            :  *
      39                 :            :  * This exists to treat an unavoidable SMP race when threads swap --
      40                 :            :  * their thread record is in the queue (and visible to other CPUs)
      41                 :            :  * before arch_switch() finishes saving state.  We must spin for the
      42                 :            :  * switch handle before entering a new thread.  See docs on
      43                 :            :  * arch_switch().
      44                 :            :  *
      45                 :            :  * Stated differently: there's a chicken and egg bug with the question
      46                 :            :  * of "is a thread running or not?".  The thread needs to mark itself
      47                 :            :  * "not running" from its own context, but at that moment it obviously
      48                 :            :  * is still running until it reaches arch_switch()!  Locking can't
      49                 :            :  * treat this because the scheduler lock can't be released by the
      50                 :            :  * switched-to thread, which is going to (obviously) be running its
      51                 :            :  * own code and doesn't know it was switched out.
      52                 :            :  */
      53                 :            : static inline void z_sched_switch_spin(struct k_thread *thread)
      54                 :            : {
      55                 :            : #ifdef CONFIG_SMP
      56                 :            :         volatile void **shp = (void *)&thread->switch_handle;
      57                 :            : 
      58                 :            :         while (*shp == NULL) {
      59                 :            :                 arch_spin_relax();
      60                 :            :         }
      61                 :            :         /* Read barrier: don't allow any subsequent loads in the
      62                 :            :          * calling code to reorder before we saw switch_handle go
      63                 :            :          * non-null.
      64                 :            :          */
      65                 :            :         barrier_dmem_fence_full();
      66                 :            : #endif /* CONFIG_SMP */
      67                 :            : }
      68                 :            : 
      69                 :            : /* New style context switching.  arch_switch() is a lower level
      70                 :            :  * primitive that doesn't know about the scheduler or return value.
      71                 :            :  * Needed for SMP, where the scheduler requires spinlocking that we
      72                 :            :  * don't want to have to do in per-architecture assembly.
      73                 :            :  *
      74                 :            :  * Note that is_spinlock is a compile-time construct which will be
      75                 :            :  * optimized out when this function is expanded.
      76                 :            :  */
      77                 :            : static ALWAYS_INLINE unsigned int do_swap(unsigned int key,
      78                 :            :                                           struct k_spinlock *lock,
      79                 :            :                                           bool is_spinlock)
      80                 :            : {
      81                 :            :         ARG_UNUSED(lock);
      82                 :            :         struct k_thread *new_thread, *old_thread;
      83                 :            : 
      84                 :            : #ifdef CONFIG_SPIN_VALIDATE
      85                 :            :         /* Make sure the key acts to unmask interrupts, if it doesn't,
      86                 :            :          * then we are context switching out of a nested lock
      87                 :            :          * (i.e. breaking the lock of someone up the stack) which is
      88                 :            :          * forbidden!  The sole exception are dummy threads used
      89                 :            :          * during initialization (where we start with interrupts
      90                 :            :          * masked and switch away to begin scheduling) and the case of
      91                 :            :          * a dead current thread that was just aborted (where the
      92                 :            :          * damage was already done by the abort anyway).
      93                 :            :          *
      94                 :            :          * (Note that this is disabled on ARM64, where system calls
      95                 :            :          * can sometimes run with interrupts masked in ways that don't
      96                 :            :          * represent lock state.  See #35307)
      97                 :            :          */
      98                 :            : # ifndef CONFIG_ARM64
      99                 :            :         __ASSERT(arch_irq_unlocked(key) ||
     100                 :            :                  _current->base.thread_state & (_THREAD_DUMMY | _THREAD_DEAD),
     101                 :            :                  "Context switching while holding lock!");
     102                 :            : # endif /* CONFIG_ARM64 */
     103                 :            : #endif /* CONFIG_SPIN_VALIDATE */
     104                 :            : 
     105                 :            :         old_thread = _current;
     106                 :            : 
     107                 :            :         z_check_stack_sentinel();
     108                 :            : 
     109                 :            :         old_thread->swap_retval = -EAGAIN;
     110                 :            : 
     111                 :            :         /* We always take the scheduler spinlock if we don't already
     112                 :            :          * have it.  We "release" other spinlocks here.  But we never
     113                 :            :          * drop the interrupt lock.
     114                 :            :          */
     115                 :            :         if (is_spinlock && lock != NULL && lock != &_sched_spinlock) {
     116                 :            :                 k_spin_release(lock);
     117                 :            :         }
     118                 :            :         if (!is_spinlock || lock != &_sched_spinlock) {
     119                 :            :                 (void) k_spin_lock(&_sched_spinlock);
     120                 :            :         }
     121                 :            : 
     122                 :            :         new_thread = z_swap_next_thread();
     123                 :            : 
     124                 :            :         if (new_thread != old_thread) {
     125                 :            :                 z_sched_usage_switch(new_thread);
     126                 :            : 
     127                 :            : #ifdef CONFIG_SMP
     128                 :            :                 _current_cpu->swap_ok = 0;
     129                 :            :                 new_thread->base.cpu = arch_curr_cpu()->id;
     130                 :            : 
     131                 :            :                 if (!is_spinlock) {
     132                 :            :                         z_smp_release_global_lock(new_thread);
     133                 :            :                 }
     134                 :            : #endif /* CONFIG_SMP */
     135                 :            :                 z_thread_mark_switched_out();
     136                 :            :                 z_sched_switch_spin(new_thread);
     137                 :            :                 _current_cpu->current = new_thread;
     138                 :            : 
     139                 :            : #ifdef CONFIG_TIMESLICING
     140                 :            :                 z_reset_time_slice(new_thread);
     141                 :            : #endif /* CONFIG_TIMESLICING */
     142                 :            : 
     143                 :            : #ifdef CONFIG_SPIN_VALIDATE
     144                 :            :                 z_spin_lock_set_owner(&_sched_spinlock);
     145                 :            : #endif /* CONFIG_SPIN_VALIDATE */
     146                 :            : 
     147                 :            :                 arch_cohere_stacks(old_thread, NULL, new_thread);
     148                 :            : 
     149                 :            : #ifdef CONFIG_SMP
     150                 :            :                 /* Now add _current back to the run queue, once we are
     151                 :            :                  * guaranteed to reach the context switch in finite
     152                 :            :                  * time.  See z_sched_switch_spin().
     153                 :            :                  */
     154                 :            :                 z_requeue_current(old_thread);
     155                 :            : #endif /* CONFIG_SMP */
     156                 :            :                 void *newsh = new_thread->switch_handle;
     157                 :            : 
     158                 :            :                 if (IS_ENABLED(CONFIG_SMP)) {
     159                 :            :                         /* Active threads must have a null here.  And
     160                 :            :                          * it must be seen before the scheduler lock
     161                 :            :                          * is released!
     162                 :            :                          */
     163                 :            :                         new_thread->switch_handle = NULL;
     164                 :            :                         barrier_dmem_fence_full(); /* write barrier */
     165                 :            :                 }
     166                 :            :                 k_spin_release(&_sched_spinlock);
     167                 :            :                 arch_switch(newsh, &old_thread->switch_handle);
     168                 :            :         } else {
     169                 :            :                 k_spin_release(&_sched_spinlock);
     170                 :            :         }
     171                 :            : 
     172                 :            :         if (is_spinlock) {
     173                 :            :                 arch_irq_unlock(key);
     174                 :            :         } else {
     175                 :            :                 irq_unlock(key);
     176                 :            :         }
     177                 :            : 
     178                 :            :         return _current->swap_retval;
     179                 :            : }
     180                 :            : 
     181                 :            : static inline int z_swap_irqlock(unsigned int key)
     182                 :            : {
     183                 :            :         return do_swap(key, NULL, false);
     184                 :            : }
     185                 :            : 
     186                 :            : static inline int z_swap(struct k_spinlock *lock, k_spinlock_key_t key)
     187                 :            : {
     188                 :            :         return do_swap(key.key, lock, true);
     189                 :            : }
     190                 :            : 
     191                 :            : static inline void z_swap_unlocked(void)
     192                 :            : {
     193                 :            :         (void) do_swap(arch_irq_lock(), NULL, true);
     194                 :            : }
     195                 :            : 
     196                 :            : #else /* !CONFIG_USE_SWITCH */
     197                 :            : 
     198                 :            : extern int arch_swap(unsigned int key);
     199                 :            : 
     200                 :            : static inline void z_sched_switch_spin(struct k_thread *thread)
     201                 :            : {
     202                 :            :         ARG_UNUSED(thread);
     203                 :            : }
     204                 :            : 
     205                 :         98 : static inline int z_swap_irqlock(unsigned int key)
     206                 :            : {
     207                 :         98 :         int ret;
     208                 :         98 :         z_check_stack_sentinel();
     209                 :         98 :         ret = arch_swap(key);
     210                 :         52 :         return ret;
     211                 :            : }
     212                 :            : 
     213                 :            : /* If !USE_SWITCH, then spinlocks are guaranteed degenerate as we
     214                 :            :  * can't be in SMP.  The k_spin_release() call is just for validation
     215                 :            :  * handling.
     216                 :            :  */
     217                 :         98 : static ALWAYS_INLINE int z_swap(struct k_spinlock *lock, k_spinlock_key_t key)
     218                 :            : {
     219                 :         98 :         k_spin_release(lock);
     220                 :         98 :         return z_swap_irqlock(key.key);
     221                 :            : }
     222                 :            : 
     223                 :            : static inline void z_swap_unlocked(void)
     224                 :            : {
     225                 :            :         (void) z_swap_irqlock(arch_irq_lock());
     226                 :            : }
     227                 :            : 
     228                 :            : #endif /* !CONFIG_USE_SWITCH */
     229                 :            : 
     230                 :            : /**
     231                 :            :  * Set up a "dummy" thread, used at early initialization to launch the
     232                 :            :  * first thread on a CPU.
     233                 :            :  *
     234                 :            :  * Needs to set enough fields such that the context switching code can
     235                 :            :  * use it to properly store state, which will just be discarded.
     236                 :            :  *
     237                 :            :  * The memory of the dummy thread can be completely uninitialized.
     238                 :            :  */
     239                 :          1 : static inline void z_dummy_thread_init(struct k_thread *dummy_thread)
     240                 :            : {
     241                 :          1 :         dummy_thread->base.thread_state = _THREAD_DUMMY;
     242                 :            : #ifdef CONFIG_SCHED_CPU_MASK
     243                 :          1 :         dummy_thread->base.cpu_mask = -1;
     244                 :            : #endif /* CONFIG_SCHED_CPU_MASK */
     245                 :          1 :         dummy_thread->base.user_options = K_ESSENTIAL;
     246                 :            : #ifdef CONFIG_THREAD_STACK_INFO
     247                 :            :         dummy_thread->stack_info.start = 0U;
     248                 :            :         dummy_thread->stack_info.size = 0U;
     249                 :            : #endif /* CONFIG_THREAD_STACK_INFO */
     250                 :            : #ifdef CONFIG_USERSPACE
     251                 :            :         dummy_thread->mem_domain_info.mem_domain = &k_mem_domain_default;
     252                 :            : #endif /* CONFIG_USERSPACE */
     253                 :            : #if (K_HEAP_MEM_POOL_SIZE > 0)
     254                 :            :         k_thread_system_pool_assign(dummy_thread);
     255                 :            : #else
     256                 :          1 :         dummy_thread->resource_pool = NULL;
     257                 :            : #endif /* K_HEAP_MEM_POOL_SIZE */
     258                 :            : 
     259                 :            : #ifdef CONFIG_TIMESLICE_PER_THREAD
     260                 :            :         dummy_thread->base.slice_ticks = 0;
     261                 :            : #endif /* CONFIG_TIMESLICE_PER_THREAD */
     262                 :            : 
     263                 :          1 :         _current_cpu->current = dummy_thread;
     264                 :          1 : }
     265                 :            : #endif /* ZEPHYR_KERNEL_INCLUDE_KSWAP_H_ */

Generated by: LCOV version 1.14