Branch data Line data Source code
1 : : /*
2 : : * Copyright (c) 2010-2012, 2014-2015 Wind River Systems, Inc.
3 : : *
4 : : * SPDX-License-Identifier: Apache-2.0
5 : : */
6 : :
7 : : /**
8 : : * @file
9 : : * @brief Architecture-independent private kernel APIs
10 : : *
11 : : * This file contains private kernel APIs that are not architecture-specific.
12 : : */
13 : :
14 : : #ifndef ZEPHYR_KERNEL_INCLUDE_KERNEL_INTERNAL_H_
15 : : #define ZEPHYR_KERNEL_INCLUDE_KERNEL_INTERNAL_H_
16 : :
17 : : #include <zephyr/kernel.h>
18 : : #include <kernel_arch_interface.h>
19 : : #include <string.h>
20 : :
21 : : #ifndef _ASMLANGUAGE
22 : :
23 : : #ifdef __cplusplus
24 : : extern "C" {
25 : : #endif
26 : :
27 : : /* Initialize per-CPU kernel data */
28 : : void z_init_cpu(int id);
29 : :
30 : : /* Initialize a thread */
31 : : void z_init_thread_base(struct _thread_base *thread_base, int priority,
32 : : uint32_t initial_state, unsigned int options);
33 : :
34 : : /* Early boot functions */
35 : : void z_early_memset(void *dst, int c, size_t n);
36 : : void z_early_memcpy(void *dst, const void *src, size_t n);
37 : :
38 : : void z_bss_zero(void);
39 : : #ifdef CONFIG_XIP
40 : : void z_data_copy(void);
41 : : #else
42 : : static inline void z_data_copy(void)
43 : : {
44 : : /* Do nothing */
45 : : }
46 : : #endif /* CONFIG_XIP */
47 : :
48 : : #ifdef CONFIG_LINKER_USE_BOOT_SECTION
49 : : void z_bss_zero_boot(void);
50 : : #else
51 : : static inline void z_bss_zero_boot(void)
52 : : {
53 : : /* Do nothing */
54 : : }
55 : : #endif /* CONFIG_LINKER_USE_BOOT_SECTION */
56 : :
57 : : #ifdef CONFIG_LINKER_USE_PINNED_SECTION
58 : : void z_bss_zero_pinned(void);
59 : : #else
60 : : static inline void z_bss_zero_pinned(void)
61 : : {
62 : : /* Do nothing */
63 : : }
64 : : #endif /* CONFIG_LINKER_USE_PINNED_SECTION */
65 : :
66 : : FUNC_NORETURN void z_cstart(void);
67 : :
68 : : void z_device_state_init(void);
69 : :
70 : : extern FUNC_NORETURN void z_thread_entry(k_thread_entry_t entry,
71 : : void *p1, void *p2, void *p3);
72 : :
73 : : extern char *z_setup_new_thread(struct k_thread *new_thread,
74 : : k_thread_stack_t *stack, size_t stack_size,
75 : : k_thread_entry_t entry,
76 : : void *p1, void *p2, void *p3,
77 : : int prio, uint32_t options, const char *name);
78 : :
79 : : /**
80 : : * @brief Allocate aligned memory from the current thread's resource pool
81 : : *
82 : : * Threads may be assigned a resource pool, which will be used to allocate
83 : : * memory on behalf of certain kernel and driver APIs. Memory reserved
84 : : * in this way should be freed with k_free().
85 : : *
86 : : * If called from an ISR, the k_malloc() system heap will be used if it exists.
87 : : *
88 : : * @param align Required memory alignment
89 : : * @param size Memory allocation size
90 : : * @return A pointer to the allocated memory, or NULL if there is insufficient
91 : : * RAM in the pool or there is no pool to draw memory from
92 : : */
93 : : void *z_thread_aligned_alloc(size_t align, size_t size);
94 : :
95 : : /**
96 : : * @brief Allocate some memory from the current thread's resource pool
97 : : *
98 : : * Threads may be assigned a resource pool, which will be used to allocate
99 : : * memory on behalf of certain kernel and driver APIs. Memory reserved
100 : : * in this way should be freed with k_free().
101 : : *
102 : : * If called from an ISR, the k_malloc() system heap will be used if it exists.
103 : : *
104 : : * @param size Memory allocation size
105 : : * @return A pointer to the allocated memory, or NULL if there is insufficient
106 : : * RAM in the pool or there is no pool to draw memory from
107 : : */
108 : : static inline void *z_thread_malloc(size_t size)
109 : : {
110 : : return z_thread_aligned_alloc(0, size);
111 : : }
112 : :
113 : :
114 : : #ifdef CONFIG_USE_SWITCH
115 : : /* This is a arch function traditionally, but when the switch-based
116 : : * z_swap() is in use it's a simple inline provided by the kernel.
117 : : */
118 : : static ALWAYS_INLINE void
119 : : arch_thread_return_value_set(struct k_thread *thread, unsigned int value)
120 : : {
121 : : thread->swap_retval = value;
122 : : }
123 : : #endif
124 : :
125 : : static ALWAYS_INLINE void
126 : 0 : z_thread_return_value_set_with_data(struct k_thread *thread,
127 : : unsigned int value,
128 : : void *data)
129 : : {
130 : 0 : arch_thread_return_value_set(thread, value);
131 : 0 : thread->base.swap_data = data;
132 : 0 : }
133 : :
134 : : #ifdef CONFIG_SMP
135 : : extern void z_smp_init(void);
136 : : #ifdef CONFIG_SYS_CLOCK_EXISTS
137 : : extern void smp_timer_init(void);
138 : : #endif /* CONFIG_SYS_CLOCK_EXISTS */
139 : : #endif /* CONFIG_SMP */
140 : :
141 : : extern void z_early_rand_get(uint8_t *buf, size_t length);
142 : :
143 : : #if defined(CONFIG_STACK_POINTER_RANDOM) && (CONFIG_STACK_POINTER_RANDOM != 0)
144 : : extern int z_stack_adjust_initialized;
145 : : #endif /* CONFIG_STACK_POINTER_RANDOM */
146 : :
147 : : extern struct k_thread z_main_thread;
148 : :
149 : :
150 : : #ifdef CONFIG_MULTITHREADING
151 : : extern struct k_thread z_idle_threads[CONFIG_MP_MAX_NUM_CPUS];
152 : : #endif /* CONFIG_MULTITHREADING */
153 : : K_KERNEL_PINNED_STACK_ARRAY_DECLARE(z_interrupt_stacks, CONFIG_MP_MAX_NUM_CPUS,
154 : : CONFIG_ISR_STACK_SIZE);
155 : : K_THREAD_STACK_DECLARE(z_main_stack, CONFIG_MAIN_STACK_SIZE);
156 : :
157 : : #ifdef CONFIG_GEN_PRIV_STACKS
158 : : extern uint8_t *z_priv_stack_find(k_thread_stack_t *stack);
159 : : #endif /* CONFIG_GEN_PRIV_STACKS */
160 : :
161 : : /* Calculate stack usage. */
162 : : int z_stack_space_get(const uint8_t *stack_start, size_t size, size_t *unused_ptr);
163 : :
164 : : #ifdef CONFIG_USERSPACE
165 : : bool z_stack_is_user_capable(k_thread_stack_t *stack);
166 : :
167 : : /* Memory domain setup hook, called from z_setup_new_thread() */
168 : : void z_mem_domain_init_thread(struct k_thread *thread);
169 : :
170 : : /* Memory domain teardown hook, called from z_thread_abort() */
171 : : void z_mem_domain_exit_thread(struct k_thread *thread);
172 : :
173 : : /* This spinlock:
174 : : *
175 : : * - Protects the full set of active k_mem_domain objects and their contents
176 : : * - Serializes calls to arch_mem_domain_* APIs
177 : : *
178 : : * If architecture code needs to access k_mem_domain structures or the
179 : : * partitions they contain at any other point, this spinlock should be held.
180 : : * Uniprocessor systems can get away with just locking interrupts but this is
181 : : * not recommended.
182 : : */
183 : : extern struct k_spinlock z_mem_domain_lock;
184 : : #endif /* CONFIG_USERSPACE */
185 : :
186 : : #ifdef CONFIG_GDBSTUB
187 : : struct gdb_ctx;
188 : :
189 : : /* Should be called by the arch layer. This is the gdbstub main loop
190 : : * and synchronously communicate with gdb on host.
191 : : */
192 : : extern int z_gdb_main_loop(struct gdb_ctx *ctx);
193 : : #endif /* CONFIG_GDBSTUB */
194 : :
195 : : #ifdef CONFIG_INSTRUMENT_THREAD_SWITCHING
196 : : void z_thread_mark_switched_in(void);
197 : : void z_thread_mark_switched_out(void);
198 : : #else
199 : :
200 : : /**
201 : : * @brief Called after a thread has been selected to run
202 : : */
203 : : #define z_thread_mark_switched_in()
204 : :
205 : : /**
206 : : * @brief Called before a thread has been selected to run
207 : : */
208 : :
209 : : #define z_thread_mark_switched_out()
210 : :
211 : : #endif /* CONFIG_INSTRUMENT_THREAD_SWITCHING */
212 : :
213 : : /* Init hook for page frame management, invoked immediately upon entry of
214 : : * main thread, before POST_KERNEL tasks
215 : : */
216 : : void z_mem_manage_init(void);
217 : :
218 : : /**
219 : : * @brief Finalize page frame management at the end of boot process.
220 : : */
221 : : void z_mem_manage_boot_finish(void);
222 : :
223 : :
224 : : void z_handle_obj_poll_events(sys_dlist_t *events, uint32_t state);
225 : :
226 : : #ifdef CONFIG_PM
227 : :
228 : : /* When the kernel is about to go idle, it calls this function to notify the
229 : : * power management subsystem, that the kernel is ready to enter the idle state.
230 : : *
231 : : * At this point, the kernel has disabled interrupts and computed the maximum
232 : : * time the system can remain idle. The function passes the time that the system
233 : : * can remain idle. The SOC interface performs power operations that can be done
234 : : * in the available time. The power management operations must halt execution of
235 : : * the CPU.
236 : : *
237 : : * This function assumes that a wake up event has already been set up by the
238 : : * application.
239 : : *
240 : : * This function is entered with interrupts disabled. It should re-enable
241 : : * interrupts if it had entered a power state.
242 : : *
243 : : * @return True if the system suspended, otherwise return false
244 : : */
245 : : bool pm_system_suspend(int32_t ticks);
246 : :
247 : : #endif /* CONFIG_PM */
248 : :
249 : : #ifdef CONFIG_DEMAND_PAGING_TIMING_HISTOGRAM
250 : : /**
251 : : * Initialize the timing histograms for demand paging.
252 : : */
253 : : void z_paging_histogram_init(void);
254 : :
255 : : /**
256 : : * Increment the counter in the timing histogram.
257 : : *
258 : : * @param hist The timing histogram to be updated.
259 : : * @param cycles Time spent in measured operation.
260 : : */
261 : : void z_paging_histogram_inc(struct k_mem_paging_histogram_t *hist,
262 : : uint32_t cycles);
263 : : #endif /* CONFIG_DEMAND_PAGING_TIMING_HISTOGRAM */
264 : :
265 : : #ifdef CONFIG_OBJ_CORE_STATS_THREAD
266 : : int z_thread_stats_raw(struct k_obj_core *obj_core, void *stats);
267 : : int z_thread_stats_query(struct k_obj_core *obj_core, void *stats);
268 : : int z_thread_stats_reset(struct k_obj_core *obj_core);
269 : : int z_thread_stats_disable(struct k_obj_core *obj_core);
270 : : int z_thread_stats_enable(struct k_obj_core *obj_core);
271 : : #endif /* CONFIG_OBJ_CORE_STATS_THREAD */
272 : :
273 : : #ifdef CONFIG_OBJ_CORE_STATS_SYSTEM
274 : : int z_cpu_stats_raw(struct k_obj_core *obj_core, void *stats);
275 : : int z_cpu_stats_query(struct k_obj_core *obj_core, void *stats);
276 : :
277 : : int z_kernel_stats_raw(struct k_obj_core *obj_core, void *stats);
278 : : int z_kernel_stats_query(struct k_obj_core *obj_core, void *stats);
279 : : #endif /* CONFIG_OBJ_CORE_STATS_SYSTEM */
280 : :
281 : : #if defined(CONFIG_THREAD_ABORT_NEED_CLEANUP)
282 : : /**
283 : : * Perform cleanup at the end of k_thread_abort().
284 : : *
285 : : * This performs additional cleanup steps at the end of k_thread_abort()
286 : : * where these steps require that the thread is no longer running.
287 : : * If the target thread is not the current running thread, the cleanup
288 : : * steps will be performed immediately. However, if the target thread is
289 : : * the current running thread (e.g. k_thread_abort(arch_current_thread())), it defers
290 : : * the cleanup steps to later when the work will be finished in another
291 : : * context.
292 : : *
293 : : * @param thread Pointer to thread to be cleaned up.
294 : : */
295 : : void k_thread_abort_cleanup(struct k_thread *thread);
296 : :
297 : : /**
298 : : * Check if thread is the same as the one waiting for cleanup.
299 : : *
300 : : * This is used to guard against reusing the same thread object
301 : : * before the previous cleanup has finished. This will perform
302 : : * the necessary cleanups before the thread object can be
303 : : * reused. Should mainly be used during thread creation.
304 : : *
305 : : * @param thread Pointer to thread to be checked.
306 : : */
307 : : void k_thread_abort_cleanup_check_reuse(struct k_thread *thread);
308 : : #endif /* CONFIG_THREAD_ABORT_NEED_CLEANUP */
309 : :
310 : : #ifdef __cplusplus
311 : : }
312 : : #endif
313 : :
314 : : #endif /* _ASMLANGUAGE */
315 : :
316 : : #endif /* ZEPHYR_KERNEL_INCLUDE_KERNEL_INTERNAL_H_ */
|