LCOV - code coverage report
Current view: top level - home/runner/zephyrproject/zephyr/kernel/include - kernel_internal.h (source / functions) Hit Total Coverage
Test: lcov.info Lines: 0 4 0.0 %
Date: 2024-09-16 20:15:30 Functions: 0 1 0.0 %
Legend: Lines: hit not hit | Branches: + taken - not taken # not executed Branches: 0 0 -

           Branch data     Line data    Source code
       1                 :            : /*
       2                 :            :  * Copyright (c) 2010-2012, 2014-2015 Wind River Systems, Inc.
       3                 :            :  *
       4                 :            :  * SPDX-License-Identifier: Apache-2.0
       5                 :            :  */
       6                 :            : 
       7                 :            : /**
       8                 :            :  * @file
       9                 :            :  * @brief Architecture-independent private kernel APIs
      10                 :            :  *
      11                 :            :  * This file contains private kernel APIs that are not architecture-specific.
      12                 :            :  */
      13                 :            : 
      14                 :            : #ifndef ZEPHYR_KERNEL_INCLUDE_KERNEL_INTERNAL_H_
      15                 :            : #define ZEPHYR_KERNEL_INCLUDE_KERNEL_INTERNAL_H_
      16                 :            : 
      17                 :            : #include <zephyr/kernel.h>
      18                 :            : #include <kernel_arch_interface.h>
      19                 :            : #include <string.h>
      20                 :            : 
      21                 :            : #ifndef _ASMLANGUAGE
      22                 :            : 
      23                 :            : #ifdef __cplusplus
      24                 :            : extern "C" {
      25                 :            : #endif
      26                 :            : 
      27                 :            : /* Initialize per-CPU kernel data */
      28                 :            : void z_init_cpu(int id);
      29                 :            : 
      30                 :            : /* Initialize a thread */
      31                 :            : void z_init_thread_base(struct _thread_base *thread_base, int priority,
      32                 :            :                         uint32_t initial_state, unsigned int options);
      33                 :            : 
      34                 :            : /* Early boot functions */
      35                 :            : void z_early_memset(void *dst, int c, size_t n);
      36                 :            : void z_early_memcpy(void *dst, const void *src, size_t n);
      37                 :            : 
      38                 :            : void z_bss_zero(void);
      39                 :            : #ifdef CONFIG_XIP
      40                 :            : void z_data_copy(void);
      41                 :            : #else
      42                 :            : static inline void z_data_copy(void)
      43                 :            : {
      44                 :            :         /* Do nothing */
      45                 :            : }
      46                 :            : #endif /* CONFIG_XIP */
      47                 :            : 
      48                 :            : #ifdef CONFIG_LINKER_USE_BOOT_SECTION
      49                 :            : void z_bss_zero_boot(void);
      50                 :            : #else
      51                 :            : static inline void z_bss_zero_boot(void)
      52                 :            : {
      53                 :            :         /* Do nothing */
      54                 :            : }
      55                 :            : #endif /* CONFIG_LINKER_USE_BOOT_SECTION */
      56                 :            : 
      57                 :            : #ifdef CONFIG_LINKER_USE_PINNED_SECTION
      58                 :            : void z_bss_zero_pinned(void);
      59                 :            : #else
      60                 :            : static inline void z_bss_zero_pinned(void)
      61                 :            : {
      62                 :            :         /* Do nothing */
      63                 :            : }
      64                 :            : #endif /* CONFIG_LINKER_USE_PINNED_SECTION */
      65                 :            : 
      66                 :            : FUNC_NORETURN void z_cstart(void);
      67                 :            : 
      68                 :            : void z_device_state_init(void);
      69                 :            : 
      70                 :            : extern FUNC_NORETURN void z_thread_entry(k_thread_entry_t entry,
      71                 :            :                           void *p1, void *p2, void *p3);
      72                 :            : 
      73                 :            : extern char *z_setup_new_thread(struct k_thread *new_thread,
      74                 :            :                                 k_thread_stack_t *stack, size_t stack_size,
      75                 :            :                                 k_thread_entry_t entry,
      76                 :            :                                 void *p1, void *p2, void *p3,
      77                 :            :                                 int prio, uint32_t options, const char *name);
      78                 :            : 
      79                 :            : /**
      80                 :            :  * @brief Allocate aligned memory from the current thread's resource pool
      81                 :            :  *
      82                 :            :  * Threads may be assigned a resource pool, which will be used to allocate
      83                 :            :  * memory on behalf of certain kernel and driver APIs. Memory reserved
      84                 :            :  * in this way should be freed with k_free().
      85                 :            :  *
      86                 :            :  * If called from an ISR, the k_malloc() system heap will be used if it exists.
      87                 :            :  *
      88                 :            :  * @param align Required memory alignment
      89                 :            :  * @param size Memory allocation size
      90                 :            :  * @return A pointer to the allocated memory, or NULL if there is insufficient
      91                 :            :  * RAM in the pool or there is no pool to draw memory from
      92                 :            :  */
      93                 :            : void *z_thread_aligned_alloc(size_t align, size_t size);
      94                 :            : 
      95                 :            : /**
      96                 :            :  * @brief Allocate some memory from the current thread's resource pool
      97                 :            :  *
      98                 :            :  * Threads may be assigned a resource pool, which will be used to allocate
      99                 :            :  * memory on behalf of certain kernel and driver APIs. Memory reserved
     100                 :            :  * in this way should be freed with k_free().
     101                 :            :  *
     102                 :            :  * If called from an ISR, the k_malloc() system heap will be used if it exists.
     103                 :            :  *
     104                 :            :  * @param size Memory allocation size
     105                 :            :  * @return A pointer to the allocated memory, or NULL if there is insufficient
     106                 :            :  * RAM in the pool or there is no pool to draw memory from
     107                 :            :  */
     108                 :            : static inline void *z_thread_malloc(size_t size)
     109                 :            : {
     110                 :            :         return z_thread_aligned_alloc(0, size);
     111                 :            : }
     112                 :            : 
     113                 :            : 
     114                 :            : #ifdef CONFIG_USE_SWITCH
     115                 :            : /* This is a arch function traditionally, but when the switch-based
     116                 :            :  * z_swap() is in use it's a simple inline provided by the kernel.
     117                 :            :  */
     118                 :            : static ALWAYS_INLINE void
     119                 :            : arch_thread_return_value_set(struct k_thread *thread, unsigned int value)
     120                 :            : {
     121                 :            :         thread->swap_retval = value;
     122                 :            : }
     123                 :            : #endif
     124                 :            : 
     125                 :            : static ALWAYS_INLINE void
     126                 :          0 : z_thread_return_value_set_with_data(struct k_thread *thread,
     127                 :            :                                    unsigned int value,
     128                 :            :                                    void *data)
     129                 :            : {
     130                 :          0 :         arch_thread_return_value_set(thread, value);
     131                 :          0 :         thread->base.swap_data = data;
     132                 :          0 : }
     133                 :            : 
     134                 :            : #ifdef CONFIG_SMP
     135                 :            : extern void z_smp_init(void);
     136                 :            : #ifdef CONFIG_SYS_CLOCK_EXISTS
     137                 :            : extern void smp_timer_init(void);
     138                 :            : #endif /* CONFIG_SYS_CLOCK_EXISTS */
     139                 :            : #endif /* CONFIG_SMP */
     140                 :            : 
     141                 :            : extern void z_early_rand_get(uint8_t *buf, size_t length);
     142                 :            : 
     143                 :            : #if defined(CONFIG_STACK_POINTER_RANDOM) && (CONFIG_STACK_POINTER_RANDOM != 0)
     144                 :            : extern int z_stack_adjust_initialized;
     145                 :            : #endif /* CONFIG_STACK_POINTER_RANDOM */
     146                 :            : 
     147                 :            : extern struct k_thread z_main_thread;
     148                 :            : 
     149                 :            : 
     150                 :            : #ifdef CONFIG_MULTITHREADING
     151                 :            : extern struct k_thread z_idle_threads[CONFIG_MP_MAX_NUM_CPUS];
     152                 :            : #endif /* CONFIG_MULTITHREADING */
     153                 :            : K_KERNEL_PINNED_STACK_ARRAY_DECLARE(z_interrupt_stacks, CONFIG_MP_MAX_NUM_CPUS,
     154                 :            :                                     CONFIG_ISR_STACK_SIZE);
     155                 :            : 
     156                 :            : #ifdef CONFIG_GEN_PRIV_STACKS
     157                 :            : extern uint8_t *z_priv_stack_find(k_thread_stack_t *stack);
     158                 :            : #endif /* CONFIG_GEN_PRIV_STACKS */
     159                 :            : 
     160                 :            : /* Calculate stack usage. */
     161                 :            : int z_stack_space_get(const uint8_t *stack_start, size_t size, size_t *unused_ptr);
     162                 :            : 
     163                 :            : #ifdef CONFIG_USERSPACE
     164                 :            : bool z_stack_is_user_capable(k_thread_stack_t *stack);
     165                 :            : 
     166                 :            : /* Memory domain setup hook, called from z_setup_new_thread() */
     167                 :            : void z_mem_domain_init_thread(struct k_thread *thread);
     168                 :            : 
     169                 :            : /* Memory domain teardown hook, called from z_thread_abort() */
     170                 :            : void z_mem_domain_exit_thread(struct k_thread *thread);
     171                 :            : 
     172                 :            : /* This spinlock:
     173                 :            :  *
     174                 :            :  * - Protects the full set of active k_mem_domain objects and their contents
     175                 :            :  * - Serializes calls to arch_mem_domain_* APIs
     176                 :            :  *
     177                 :            :  * If architecture code needs to access k_mem_domain structures or the
     178                 :            :  * partitions they contain at any other point, this spinlock should be held.
     179                 :            :  * Uniprocessor systems can get away with just locking interrupts but this is
     180                 :            :  * not recommended.
     181                 :            :  */
     182                 :            : extern struct k_spinlock z_mem_domain_lock;
     183                 :            : #endif /* CONFIG_USERSPACE */
     184                 :            : 
     185                 :            : #ifdef CONFIG_GDBSTUB
     186                 :            : struct gdb_ctx;
     187                 :            : 
     188                 :            : /* Should be called by the arch layer. This is the gdbstub main loop
     189                 :            :  * and synchronously communicate with gdb on host.
     190                 :            :  */
     191                 :            : extern int z_gdb_main_loop(struct gdb_ctx *ctx);
     192                 :            : #endif /* CONFIG_GDBSTUB */
     193                 :            : 
     194                 :            : #ifdef CONFIG_INSTRUMENT_THREAD_SWITCHING
     195                 :            : void z_thread_mark_switched_in(void);
     196                 :            : void z_thread_mark_switched_out(void);
     197                 :            : #else
     198                 :            : 
     199                 :            : /**
     200                 :            :  * @brief Called after a thread has been selected to run
     201                 :            :  */
     202                 :            : #define z_thread_mark_switched_in()
     203                 :            : 
     204                 :            : /**
     205                 :            :  * @brief Called before a thread has been selected to run
     206                 :            :  */
     207                 :            : 
     208                 :            : #define z_thread_mark_switched_out()
     209                 :            : 
     210                 :            : #endif /* CONFIG_INSTRUMENT_THREAD_SWITCHING */
     211                 :            : 
     212                 :            : /* Init hook for page frame management, invoked immediately upon entry of
     213                 :            :  * main thread, before POST_KERNEL tasks
     214                 :            :  */
     215                 :            : void z_mem_manage_init(void);
     216                 :            : 
     217                 :            : /**
     218                 :            :  * @brief Finalize page frame management at the end of boot process.
     219                 :            :  */
     220                 :            : void z_mem_manage_boot_finish(void);
     221                 :            : 
     222                 :            : 
     223                 :            : void z_handle_obj_poll_events(sys_dlist_t *events, uint32_t state);
     224                 :            : 
     225                 :            : #ifdef CONFIG_PM
     226                 :            : 
     227                 :            : /* When the kernel is about to go idle, it calls this function to notify the
     228                 :            :  * power management subsystem, that the kernel is ready to enter the idle state.
     229                 :            :  *
     230                 :            :  * At this point, the kernel has disabled interrupts and computed the maximum
     231                 :            :  * time the system can remain idle. The function passes the time that the system
     232                 :            :  * can remain idle. The SOC interface performs power operations that can be done
     233                 :            :  * in the available time. The power management operations must halt execution of
     234                 :            :  * the CPU.
     235                 :            :  *
     236                 :            :  * This function assumes that a wake up event has already been set up by the
     237                 :            :  * application.
     238                 :            :  *
     239                 :            :  * This function is entered with interrupts disabled. It should re-enable
     240                 :            :  * interrupts if it had entered a power state.
     241                 :            :  *
     242                 :            :  * @return True if the system suspended, otherwise return false
     243                 :            :  */
     244                 :            : bool pm_system_suspend(int32_t ticks);
     245                 :            : 
     246                 :            : #endif /* CONFIG_PM */
     247                 :            : 
     248                 :            : #ifdef CONFIG_DEMAND_PAGING_TIMING_HISTOGRAM
     249                 :            : /**
     250                 :            :  * Initialize the timing histograms for demand paging.
     251                 :            :  */
     252                 :            : void z_paging_histogram_init(void);
     253                 :            : 
     254                 :            : /**
     255                 :            :  * Increment the counter in the timing histogram.
     256                 :            :  *
     257                 :            :  * @param hist The timing histogram to be updated.
     258                 :            :  * @param cycles Time spent in measured operation.
     259                 :            :  */
     260                 :            : void z_paging_histogram_inc(struct k_mem_paging_histogram_t *hist,
     261                 :            :                             uint32_t cycles);
     262                 :            : #endif /* CONFIG_DEMAND_PAGING_TIMING_HISTOGRAM */
     263                 :            : 
     264                 :            : #ifdef CONFIG_OBJ_CORE_STATS_THREAD
     265                 :            : int z_thread_stats_raw(struct k_obj_core *obj_core, void *stats);
     266                 :            : int z_thread_stats_query(struct k_obj_core *obj_core, void *stats);
     267                 :            : int z_thread_stats_reset(struct k_obj_core *obj_core);
     268                 :            : int z_thread_stats_disable(struct k_obj_core *obj_core);
     269                 :            : int z_thread_stats_enable(struct k_obj_core *obj_core);
     270                 :            : #endif /* CONFIG_OBJ_CORE_STATS_THREAD */
     271                 :            : 
     272                 :            : #ifdef CONFIG_OBJ_CORE_STATS_SYSTEM
     273                 :            : int z_cpu_stats_raw(struct k_obj_core *obj_core, void *stats);
     274                 :            : int z_cpu_stats_query(struct k_obj_core *obj_core, void *stats);
     275                 :            : 
     276                 :            : int z_kernel_stats_raw(struct k_obj_core *obj_core, void *stats);
     277                 :            : int z_kernel_stats_query(struct k_obj_core *obj_core, void *stats);
     278                 :            : #endif /* CONFIG_OBJ_CORE_STATS_SYSTEM */
     279                 :            : 
     280                 :            : #if defined(CONFIG_THREAD_ABORT_NEED_CLEANUP)
     281                 :            : /**
     282                 :            :  * Perform cleanup at the end of k_thread_abort().
     283                 :            :  *
     284                 :            :  * This performs additional cleanup steps at the end of k_thread_abort()
     285                 :            :  * where these steps require that the thread is no longer running.
     286                 :            :  * If the target thread is not the current running thread, the cleanup
     287                 :            :  * steps will be performed immediately. However, if the target thread is
     288                 :            :  * the current running thread (e.g. k_thread_abort(_current)), it defers
     289                 :            :  * the cleanup steps to later when the work will be finished in another
     290                 :            :  * context.
     291                 :            :  *
     292                 :            :  * @param thread Pointer to thread to be cleaned up.
     293                 :            :  */
     294                 :            : void k_thread_abort_cleanup(struct k_thread *thread);
     295                 :            : 
     296                 :            : /**
     297                 :            :  * Check if thread is the same as the one waiting for cleanup.
     298                 :            :  *
     299                 :            :  * This is used to guard against reusing the same thread object
     300                 :            :  * before the previous cleanup has finished. This will perform
     301                 :            :  * the necessary cleanups before the thread object can be
     302                 :            :  * reused. Should mainly be used during thread creation.
     303                 :            :  *
     304                 :            :  * @param thread Pointer to thread to be checked.
     305                 :            :  */
     306                 :            : void k_thread_abort_cleanup_check_reuse(struct k_thread *thread);
     307                 :            : #endif /* CONFIG_THREAD_ABORT_NEED_CLEANUP */
     308                 :            : 
     309                 :            : #ifdef __cplusplus
     310                 :            : }
     311                 :            : #endif
     312                 :            : 
     313                 :            : #endif /* _ASMLANGUAGE */
     314                 :            : 
     315                 :            : #endif /* ZEPHYR_KERNEL_INCLUDE_KERNEL_INTERNAL_H_ */

Generated by: LCOV version 1.14