Branch data Line data Source code
1 : : /*
2 : : * Copyright (c) 2010-2012, 2014-2015 Wind River Systems, Inc.
3 : : *
4 : : * SPDX-License-Identifier: Apache-2.0
5 : : */
6 : :
7 : : /**
8 : : * @file
9 : : * @brief Architecture-independent private kernel APIs
10 : : *
11 : : * This file contains private kernel APIs that are not architecture-specific.
12 : : */
13 : :
14 : : #ifndef ZEPHYR_KERNEL_INCLUDE_KERNEL_INTERNAL_H_
15 : : #define ZEPHYR_KERNEL_INCLUDE_KERNEL_INTERNAL_H_
16 : :
17 : : #include <zephyr/kernel.h>
18 : : #include <kernel_arch_interface.h>
19 : : #include <kthread.h>
20 : : #include <string.h>
21 : :
22 : : #ifndef _ASMLANGUAGE
23 : :
24 : : #ifdef __cplusplus
25 : : extern "C" {
26 : : #endif
27 : :
28 : : /* Initialize per-CPU kernel data */
29 : : void z_init_cpu(int id);
30 : :
31 : : /* Initialize a thread */
32 : : void z_init_thread_base(struct _thread_base *thread_base, int priority,
33 : : uint32_t initial_state, unsigned int options);
34 : :
35 : :
36 : : FUNC_NORETURN void z_cstart(void);
37 : :
38 : : extern FUNC_NORETURN void z_thread_entry(k_thread_entry_t entry,
39 : : void *p1, void *p2, void *p3);
40 : :
41 : : extern char *z_setup_new_thread(struct k_thread *new_thread,
42 : : k_thread_stack_t *stack, size_t stack_size,
43 : : k_thread_entry_t entry,
44 : : void *p1, void *p2, void *p3,
45 : : int prio, uint32_t options, const char *name);
46 : :
47 : : /**
48 : : * @brief Allocate aligned memory from the current thread's resource pool
49 : : *
50 : : * Threads may be assigned a resource pool, which will be used to allocate
51 : : * memory on behalf of certain kernel and driver APIs. Memory reserved
52 : : * in this way should be freed with k_free().
53 : : *
54 : : * If called from an ISR, the k_malloc() system heap will be used if it exists.
55 : : *
56 : : * @param align Required memory alignment
57 : : * @param size Memory allocation size
58 : : * @return A pointer to the allocated memory, or NULL if there is insufficient
59 : : * RAM in the pool or there is no pool to draw memory from
60 : : */
61 : : void *z_thread_aligned_alloc(size_t align, size_t size);
62 : :
63 : : /**
64 : : * @brief Allocate some memory from the current thread's resource pool
65 : : *
66 : : * Threads may be assigned a resource pool, which will be used to allocate
67 : : * memory on behalf of certain kernel and driver APIs. Memory reserved
68 : : * in this way should be freed with k_free().
69 : : *
70 : : * If called from an ISR, the k_malloc() system heap will be used if it exists.
71 : : *
72 : : * @param size Memory allocation size
73 : : * @return A pointer to the allocated memory, or NULL if there is insufficient
74 : : * RAM in the pool or there is no pool to draw memory from
75 : : */
76 : : void *z_thread_malloc(size_t size);
77 : :
78 : :
79 : : #ifdef CONFIG_USE_SWITCH
80 : : /* This is a arch function traditionally, but when the switch-based
81 : : * z_swap() is in use it's a simple inline provided by the kernel.
82 : : */
83 : : static ALWAYS_INLINE void
84 : : arch_thread_return_value_set(struct k_thread *thread, unsigned int value)
85 : : {
86 : : thread->swap_retval = value;
87 : : }
88 : : #endif
89 : :
90 : : static ALWAYS_INLINE void
91 : 0 : z_thread_return_value_set_with_data(struct k_thread *thread,
92 : : unsigned int value,
93 : : void *data)
94 : : {
95 : 0 : arch_thread_return_value_set(thread, value);
96 : 0 : thread->base.swap_data = data;
97 : 0 : }
98 : :
99 : : #ifdef CONFIG_SMP
100 : : extern void z_smp_init(void);
101 : : #ifdef CONFIG_SYS_CLOCK_EXISTS
102 : : extern void smp_timer_init(void);
103 : : #endif /* CONFIG_SYS_CLOCK_EXISTS */
104 : : #endif /* CONFIG_SMP */
105 : :
106 : : extern void z_early_rand_get(uint8_t *buf, size_t length);
107 : :
108 : : #if defined(CONFIG_STACK_POINTER_RANDOM) && (CONFIG_STACK_POINTER_RANDOM != 0)
109 : : extern int z_stack_adjust_initialized;
110 : : #endif /* CONFIG_STACK_POINTER_RANDOM */
111 : :
112 : : extern struct k_thread z_main_thread;
113 : :
114 : :
115 : : K_KERNEL_PINNED_STACK_ARRAY_DECLARE(z_interrupt_stacks, CONFIG_MP_MAX_NUM_CPUS,
116 : : CONFIG_ISR_STACK_SIZE);
117 : : K_THREAD_STACK_DECLARE(z_main_stack, CONFIG_MAIN_STACK_SIZE);
118 : :
119 : : #ifdef CONFIG_GEN_PRIV_STACKS
120 : : extern uint8_t *z_priv_stack_find(k_thread_stack_t *stack);
121 : : #endif /* CONFIG_GEN_PRIV_STACKS */
122 : :
123 : : /* Calculate stack usage. */
124 : : int z_stack_space_get(const uint8_t *stack_start, size_t size, size_t *unused_ptr);
125 : :
126 : : #ifdef CONFIG_USERSPACE
127 : : bool z_stack_is_user_capable(k_thread_stack_t *stack);
128 : :
129 : : /* Memory domain setup hook, called from z_setup_new_thread() */
130 : : void z_mem_domain_init_thread(struct k_thread *thread);
131 : :
132 : : /* Memory domain teardown hook, called from z_thread_abort() */
133 : : void z_mem_domain_exit_thread(struct k_thread *thread);
134 : :
135 : : /* This spinlock:
136 : : *
137 : : * - Protects the full set of active k_mem_domain objects and their contents
138 : : * - Serializes calls to arch_mem_domain_* APIs
139 : : *
140 : : * If architecture code needs to access k_mem_domain structures or the
141 : : * partitions they contain at any other point, this spinlock should be held.
142 : : * Uniprocessor systems can get away with just locking interrupts but this is
143 : : * not recommended.
144 : : */
145 : : extern struct k_spinlock z_mem_domain_lock;
146 : : #endif /* CONFIG_USERSPACE */
147 : :
148 : : #ifdef CONFIG_GDBSTUB
149 : : struct gdb_ctx;
150 : :
151 : : /* Should be called by the arch layer. This is the gdbstub main loop
152 : : * and synchronously communicate with gdb on host.
153 : : */
154 : : extern int z_gdb_main_loop(struct gdb_ctx *ctx);
155 : : #endif /* CONFIG_GDBSTUB */
156 : :
157 : : #ifdef CONFIG_INSTRUMENT_THREAD_SWITCHING
158 : : void z_thread_mark_switched_in(void);
159 : : void z_thread_mark_switched_out(void);
160 : : #else
161 : :
162 : : /**
163 : : * @brief Called after a thread has been selected to run
164 : : */
165 : : #define z_thread_mark_switched_in()
166 : :
167 : : /**
168 : : * @brief Called before a thread has been selected to run
169 : : */
170 : :
171 : : #define z_thread_mark_switched_out()
172 : :
173 : : #endif /* CONFIG_INSTRUMENT_THREAD_SWITCHING */
174 : :
175 : : /* Init hook for page frame management, invoked immediately upon entry of
176 : : * main thread, before POST_KERNEL tasks
177 : : */
178 : : void z_mem_manage_init(void);
179 : :
180 : : /**
181 : : * @brief Finalize page frame management at the end of boot process.
182 : : */
183 : : void z_mem_manage_boot_finish(void);
184 : :
185 : :
186 : : bool z_handle_obj_poll_events(sys_dlist_t *events, uint32_t state);
187 : :
188 : : #ifdef CONFIG_PM
189 : :
190 : : /* When the kernel is about to go idle, it calls this function to notify the
191 : : * power management subsystem, that the kernel is ready to enter the idle state.
192 : : *
193 : : * At this point, the kernel has disabled interrupts and computed the maximum
194 : : * time the system can remain idle. The function passes the time that the system
195 : : * can remain idle. The SOC interface performs power operations that can be done
196 : : * in the available time. The power management operations must halt execution of
197 : : * the CPU.
198 : : *
199 : : * This function assumes that a wake up event has already been set up by the
200 : : * application.
201 : : *
202 : : * This function is entered with interrupts disabled. It should re-enable
203 : : * interrupts if it had entered a power state.
204 : : *
205 : : * @return True if the system suspended, otherwise return false
206 : : */
207 : : bool pm_system_suspend(int32_t ticks);
208 : :
209 : : #endif /* CONFIG_PM */
210 : :
211 : : #ifdef CONFIG_DEMAND_PAGING_TIMING_HISTOGRAM
212 : : /**
213 : : * Initialize the timing histograms for demand paging.
214 : : */
215 : : void z_paging_histogram_init(void);
216 : :
217 : : /**
218 : : * Increment the counter in the timing histogram.
219 : : *
220 : : * @param hist The timing histogram to be updated.
221 : : * @param cycles Time spent in measured operation.
222 : : */
223 : : void z_paging_histogram_inc(struct k_mem_paging_histogram_t *hist,
224 : : uint32_t cycles);
225 : : #endif /* CONFIG_DEMAND_PAGING_TIMING_HISTOGRAM */
226 : :
227 : : #ifdef CONFIG_OBJ_CORE_STATS_THREAD
228 : : int z_thread_stats_raw(struct k_obj_core *obj_core, void *stats);
229 : : int z_thread_stats_query(struct k_obj_core *obj_core, void *stats);
230 : : int z_thread_stats_reset(struct k_obj_core *obj_core);
231 : : int z_thread_stats_disable(struct k_obj_core *obj_core);
232 : : int z_thread_stats_enable(struct k_obj_core *obj_core);
233 : : #endif /* CONFIG_OBJ_CORE_STATS_THREAD */
234 : :
235 : : #ifdef CONFIG_OBJ_CORE_STATS_SYSTEM
236 : : int z_cpu_stats_raw(struct k_obj_core *obj_core, void *stats);
237 : : int z_cpu_stats_query(struct k_obj_core *obj_core, void *stats);
238 : :
239 : : int z_kernel_stats_raw(struct k_obj_core *obj_core, void *stats);
240 : : int z_kernel_stats_query(struct k_obj_core *obj_core, void *stats);
241 : : #endif /* CONFIG_OBJ_CORE_STATS_SYSTEM */
242 : :
243 : : #if defined(CONFIG_THREAD_ABORT_NEED_CLEANUP)
244 : : /**
245 : : * Perform cleanup at the end of k_thread_abort().
246 : : *
247 : : * This performs additional cleanup steps at the end of k_thread_abort()
248 : : * where these steps require that the thread is no longer running.
249 : : * If the target thread is not the current running thread, the cleanup
250 : : * steps will be performed immediately. However, if the target thread is
251 : : * the current running thread (e.g. k_thread_abort(_current)), it defers
252 : : * the cleanup steps to later when the work will be finished in another
253 : : * context.
254 : : *
255 : : * @param thread Pointer to thread to be cleaned up.
256 : : */
257 : : void k_thread_abort_cleanup(struct k_thread *thread);
258 : :
259 : : /**
260 : : * Check if thread is the same as the one waiting for cleanup.
261 : : *
262 : : * This is used to guard against reusing the same thread object
263 : : * before the previous cleanup has finished. This will perform
264 : : * the necessary cleanups before the thread object can be
265 : : * reused. Should mainly be used during thread creation.
266 : : *
267 : : * @param thread Pointer to thread to be checked.
268 : : */
269 : : void k_thread_abort_cleanup_check_reuse(struct k_thread *thread);
270 : : #endif /* CONFIG_THREAD_ABORT_NEED_CLEANUP */
271 : :
272 : : #ifdef __cplusplus
273 : : }
274 : : #endif
275 : :
276 : : #endif /* _ASMLANGUAGE */
277 : :
278 : : #endif /* ZEPHYR_KERNEL_INCLUDE_KERNEL_INTERNAL_H_ */
|