LCOV - code coverage report
Current view: top level - /home/runner/zephyrproject/zephyr/lib/heap - heap.h (source / functions) Coverage Total Hit
Test: lcov.info Lines: 0.0 % 85 0
Test Date: 2026-03-12 12:01:18 Functions: 0.0 % 26 0
Legend: Lines: hit not hit | Branches: + taken - not taken # not executed Branches: 0.0 % 20 0

             Branch data     Line data    Source code
       1                 :             : /*
       2                 :             :  * Copyright (c) 2019 Intel Corporation
       3                 :             :  *
       4                 :             :  * SPDX-License-Identifier: Apache-2.0
       5                 :             :  */
       6                 :             : #ifndef ZEPHYR_INCLUDE_LIB_OS_HEAP_H_
       7                 :             : #define ZEPHYR_INCLUDE_LIB_OS_HEAP_H_
       8                 :             : 
       9                 :             : /*
      10                 :             :  * Internal heap APIs
      11                 :             :  */
      12                 :             : 
      13                 :             : /* These validation checks are non-trivially expensive, so enable
      14                 :             :  * only when debugging the heap code.  They shouldn't be routine
      15                 :             :  * assertions.
      16                 :             :  */
      17                 :             : #ifdef CONFIG_SYS_HEAP_VALIDATE
      18                 :             : #define CHECK(x) __ASSERT(x, "")
      19                 :             : #else
      20                 :             : #define CHECK(x) /**/
      21                 :             : #endif
      22                 :             : 
      23                 :             : /* Heap hardening level predicates.  Each is true when the configured
      24                 :             :  * hardening level is at or above the named level.  The compiler
      25                 :             :  * eliminates dead code at lower levels.
      26                 :             :  */
      27                 :             : #define SYS_HEAP_HARDENING_BASIC    (CONFIG_SYS_HEAP_HARDENING_LEVEL >= 1)
      28                 :             : #define SYS_HEAP_HARDENING_MODERATE (CONFIG_SYS_HEAP_HARDENING_LEVEL >= 2)
      29                 :             : #define SYS_HEAP_HARDENING_FULL     (CONFIG_SYS_HEAP_HARDENING_LEVEL >= 3)
      30                 :             : #define SYS_HEAP_HARDENING_EXTREME (CONFIG_SYS_HEAP_HARDENING_LEVEL >= 4)
      31                 :             : 
      32                 :             : /* Chunks are identified by their offset in 8 byte units from the
      33                 :             :  * first address in the buffer (a zero-valued chunkid_t is used as a
      34                 :             :  * null; that chunk would always point into the metadata at the start
      35                 :             :  * of the heap and cannot be allocated).  They are prefixed by a
      36                 :             :  * variable size header that depends on the size of the heap.  Heaps
      37                 :             :  * with fewer than 2^15 units (256kb) of storage use shorts to store
      38                 :             :  * the fields, otherwise the units are 32 bit integers for a 16Gb heap
      39                 :             :  * space (larger spaces really aren't in scope for this code, but
      40                 :             :  * could be handled similarly I suppose).  Because of that design
      41                 :             :  * there's a certain amount of boilerplate API needed to expose the
      42                 :             :  * field accessors since we can't use natural syntax.
      43                 :             :  *
      44                 :             :  * The fields are:
      45                 :             :  *   LEFT_SIZE: The size of the left (next lower chunk in memory)
      46                 :             :  *              neighbor chunk.
      47                 :             :  *   SIZE_AND_USED: the total size (including header) of the chunk in
      48                 :             :  *                  8-byte units.  The bottom bit stores a "used" flag.
      49                 :             :  *   FREE_PREV: Chunk ID of the previous node in a free list.
      50                 :             :  *   FREE_NEXT: Chunk ID of the next node in a free list.
      51                 :             :  *
      52                 :             :  * The free lists are circular lists, one for each power-of-two size
      53                 :             :  * category.  The free list pointers exist only for free chunks,
      54                 :             :  * obviously.  This memory is part of the user's buffer when
      55                 :             :  * allocated.
      56                 :             :  *
      57                 :             :  * The field order is so that allocated buffers are immediately bounded
      58                 :             :  * by SIZE_AND_USED of the current chunk at the bottom, and LEFT_SIZE of
      59                 :             :  * the following chunk at the top. This ordering allows for quick buffer
      60                 :             :  * overflow detection by testing left_chunk(c + chunk_size(c)) == c.
      61                 :             :  */
      62                 :             : 
      63                 :             : enum chunk_fields { LEFT_SIZE, SIZE_AND_USED, FREE_PREV, FREE_NEXT };
      64                 :             : 
      65                 :             : #define CHUNK_UNIT 8U
      66                 :             : 
      67                 :             : typedef struct { char bytes[CHUNK_UNIT]; } chunk_unit_t;
      68                 :             : 
      69                 :             : /* big_heap needs uint32_t, small_heap needs uint16_t */
      70                 :             : typedef uint32_t chunkid_t;
      71                 :             : typedef uint32_t chunksz_t;
      72                 :             : 
      73                 :             : #ifdef CONFIG_SYS_HEAP_CANARIES
      74                 :             : 
      75                 :             : struct z_heap_chunk_trailer {
      76                 :             :         uint64_t canary;
      77                 :             : } __aligned(CHUNK_UNIT);
      78                 :             : 
      79                 :             : #define CHUNK_TRAILER_SIZE (sizeof(struct z_heap_chunk_trailer) / CHUNK_UNIT)
      80                 :             : 
      81                 :             : #else
      82                 :             : 
      83                 :             : #define CHUNK_TRAILER_SIZE 0
      84                 :             : 
      85                 :             : #endif /* CONFIG_SYS_HEAP_CANARIES */
      86                 :             : 
      87                 :             : struct z_heap_bucket {
      88                 :             :         chunkid_t next;
      89                 :             : };
      90                 :             : 
      91                 :             : struct z_heap {
      92                 :             :         chunkid_t chunk0_hdr[2];
      93                 :             :         chunkid_t end_chunk;
      94                 :             :         uint32_t avail_buckets;
      95                 :             : #ifdef CONFIG_SYS_HEAP_RUNTIME_STATS
      96                 :             :         size_t free_bytes;
      97                 :             :         size_t allocated_bytes;
      98                 :             :         size_t max_allocated_bytes;
      99                 :             : #endif
     100                 :             :         struct z_heap_bucket buckets[];
     101                 :             : };
     102                 :             : 
     103                 :           0 : static inline bool big_heap_chunks(chunksz_t chunks)
     104                 :             : {
     105                 :           0 :         if (IS_ENABLED(CONFIG_SYS_HEAP_SMALL_ONLY)) {
     106                 :           0 :                 return false;
     107                 :             :         }
     108                 :             :         if (IS_ENABLED(CONFIG_SYS_HEAP_BIG_ONLY) || sizeof(void *) > 4U) {
     109                 :             :                 return true;
     110                 :             :         }
     111                 :             :         return chunks > 0x7fffU;
     112                 :             : }
     113                 :             : 
     114                 :           0 : static inline bool big_heap_bytes(size_t bytes)
     115                 :             : {
     116                 :           0 :         return big_heap_chunks(bytes / CHUNK_UNIT);
     117                 :             : }
     118                 :             : 
     119                 :           0 : static inline bool big_heap(struct z_heap *h)
     120                 :             : {
     121                 :           0 :         return big_heap_chunks(h->end_chunk);
     122                 :             : }
     123                 :             : 
     124                 :           0 : static inline chunk_unit_t *chunk_buf(struct z_heap *h)
     125                 :             : {
     126                 :             :         /* the struct z_heap matches with the first chunk */
     127                 :           0 :         return (chunk_unit_t *)h;
     128                 :             : }
     129                 :             : 
     130                 :           0 : static inline chunkid_t chunk_field(struct z_heap *h, chunkid_t c,
     131                 :             :                                     enum chunk_fields f)
     132                 :             : {
     133                 :           0 :         chunk_unit_t *buf = chunk_buf(h);
     134                 :           0 :         void *cmem = &buf[c];
     135                 :             : 
     136         [ #  # ]:           0 :         if (big_heap(h)) {
     137                 :           0 :                 return ((uint32_t *)cmem)[f];
     138                 :             :         } else {
     139                 :           0 :                 return ((uint16_t *)cmem)[f];
     140                 :             :         }
     141                 :             : }
     142                 :             : 
     143                 :           0 : static inline void chunk_set(struct z_heap *h, chunkid_t c,
     144                 :             :                              enum chunk_fields f, chunkid_t val)
     145                 :             : {
     146                 :           0 :         CHECK(c <= h->end_chunk);
     147                 :             : 
     148                 :           0 :         chunk_unit_t *buf = chunk_buf(h);
     149                 :           0 :         void *cmem = &buf[c];
     150                 :             : 
     151         [ #  # ]:           0 :         if (big_heap(h)) {
     152                 :           0 :                 CHECK(val == (uint32_t)val);
     153                 :           0 :                 ((uint32_t *)cmem)[f] = val;
     154                 :             :         } else {
     155                 :           0 :                 CHECK(val == (uint16_t)val);
     156                 :           0 :                 ((uint16_t *)cmem)[f] = val;
     157                 :             :         }
     158                 :           0 : }
     159                 :             : 
     160                 :           0 : static inline bool chunk_used(struct z_heap *h, chunkid_t c)
     161                 :             : {
     162                 :           0 :         return chunk_field(h, c, SIZE_AND_USED) & 1U;
     163                 :             : }
     164                 :             : 
     165                 :           0 : static inline chunksz_t chunk_size(struct z_heap *h, chunkid_t c)
     166                 :             : {
     167                 :           0 :         return chunk_field(h, c, SIZE_AND_USED) >> 1;
     168                 :             : }
     169                 :             : 
     170                 :           0 : static inline void set_chunk_used(struct z_heap *h, chunkid_t c, bool used)
     171                 :             : {
     172                 :           0 :         chunk_unit_t *buf = chunk_buf(h);
     173                 :           0 :         void *cmem = &buf[c];
     174                 :             : 
     175         [ #  # ]:           0 :         if (big_heap(h)) {
     176         [ #  # ]:           0 :                 if (used) {
     177                 :           0 :                         ((uint32_t *)cmem)[SIZE_AND_USED] |= 1U;
     178                 :             :                 } else {
     179                 :           0 :                         ((uint32_t *)cmem)[SIZE_AND_USED] &= ~1U;
     180                 :             :                 }
     181                 :             :         } else {
     182         [ #  # ]:           0 :                 if (used) {
     183                 :           0 :                         ((uint16_t *)cmem)[SIZE_AND_USED] |= 1U;
     184                 :             :                 } else {
     185                 :           0 :                         ((uint16_t *)cmem)[SIZE_AND_USED] &= ~1U;
     186                 :             :                 }
     187                 :             :         }
     188                 :           0 : }
     189                 :             : 
     190                 :             : /*
     191                 :             :  * Note: no need to preserve the used bit here as the chunk is never in use
     192                 :             :  * when its size is modified, and potential set_chunk_used() is always
     193                 :             :  * invoked after set_chunk_size().
     194                 :             :  */
     195                 :           0 : static inline void set_chunk_size(struct z_heap *h, chunkid_t c, chunksz_t size)
     196                 :             : {
     197                 :           0 :         chunk_set(h, c, SIZE_AND_USED, size << 1);
     198                 :           0 : }
     199                 :             : 
     200                 :           0 : static inline chunkid_t prev_free_chunk(struct z_heap *h, chunkid_t c)
     201                 :             : {
     202                 :           0 :         return chunk_field(h, c, FREE_PREV);
     203                 :             : }
     204                 :             : 
     205                 :           0 : static inline chunkid_t next_free_chunk(struct z_heap *h, chunkid_t c)
     206                 :             : {
     207                 :           0 :         return chunk_field(h, c, FREE_NEXT);
     208                 :             : }
     209                 :             : 
     210                 :           0 : static inline void set_prev_free_chunk(struct z_heap *h, chunkid_t c,
     211                 :             :                                        chunkid_t prev)
     212                 :             : {
     213                 :           0 :         chunk_set(h, c, FREE_PREV, prev);
     214                 :           0 : }
     215                 :             : 
     216                 :           0 : static inline void set_next_free_chunk(struct z_heap *h, chunkid_t c,
     217                 :             :                                        chunkid_t next)
     218                 :             : {
     219                 :           0 :         chunk_set(h, c, FREE_NEXT, next);
     220                 :           0 : }
     221                 :             : 
     222                 :           0 : static inline chunkid_t left_chunk(struct z_heap *h, chunkid_t c)
     223                 :             : {
     224                 :           0 :         return c - chunk_field(h, c, LEFT_SIZE);
     225                 :             : }
     226                 :             : 
     227                 :           0 : static inline chunkid_t right_chunk(struct z_heap *h, chunkid_t c)
     228                 :             : {
     229                 :           0 :         return c + chunk_size(h, c);
     230                 :             : }
     231                 :             : 
     232                 :           0 : static inline void set_left_chunk_size(struct z_heap *h, chunkid_t c,
     233                 :             :                                        chunksz_t size)
     234                 :             : {
     235                 :           0 :         chunk_set(h, c, LEFT_SIZE, size);
     236                 :           0 : }
     237                 :             : 
     238                 :           0 : static inline size_t chunk_header_bytes(struct z_heap *h)
     239                 :             : {
     240         [ #  # ]:           0 :         return big_heap(h) ? 8 : 4;
     241                 :             : }
     242                 :             : 
     243                 :           0 : static inline size_t heap_footer_bytes(size_t size)
     244                 :             : {
     245         [ #  # ]:           0 :         return big_heap_bytes(size) ? 8 : 4;
     246                 :             : }
     247                 :             : 
     248                 :           0 : static inline chunksz_t chunksz(size_t bytes)
     249                 :             : {
     250                 :           0 :         return (bytes + CHUNK_UNIT - 1U) / CHUNK_UNIT;
     251                 :             : }
     252                 :             : 
     253                 :             : /**
     254                 :             :  * Convert the number of requested bytes to chunks and clamp it to facilitate
     255                 :             :  * error handling. As some of the heap is used for metadata, there will never
     256                 :             :  * be enough space for 'end_chunk' chunks. Also note that since 'size_t' may
     257                 :             :  * be 64-bits wide, clamping guards against overflow when converting to the
     258                 :             :  * 32-bit wide 'chunksz_t'.
     259                 :             :  */
     260                 :           0 : static ALWAYS_INLINE chunksz_t bytes_to_chunksz(struct z_heap *h, size_t bytes, size_t extra)
     261                 :             : {
     262                 :           0 :         size_t chunks = (bytes / CHUNK_UNIT) + (extra / CHUNK_UNIT);
     263                 :           0 :         size_t oddments = ((bytes % CHUNK_UNIT) + (extra % CHUNK_UNIT) +
     264                 :           0 :                            chunk_header_bytes(h) + CHUNK_UNIT - 1U) / CHUNK_UNIT;
     265                 :             : 
     266                 :           0 :         return (chunksz_t)min(chunks + oddments + CHUNK_TRAILER_SIZE, h->end_chunk);
     267                 :             : }
     268                 :             : 
     269                 :           0 : static inline chunksz_t min_chunk_size(struct z_heap *h)
     270                 :             : {
     271                 :           0 :         return chunksz(chunk_header_bytes(h) + 1) + CHUNK_TRAILER_SIZE;
     272                 :             : }
     273                 :             : 
     274                 :             : /*
     275                 :             :  * Return true if chunk is undersized, i.e. smaller than min_chunk_size().
     276                 :             :  * Such chunks are not added to the free list because:
     277                 :             :  * 1) they would be too small to be allocatable anyway, and
     278                 :             :  * 2) they might be too small to store free list pointers.
     279                 :             :  * It happens that min_chunk_size() is always >= the free pointer storage size.
     280                 :             :  * The initial conditions short-circuit the comparison with build-time constants
     281                 :             :  * when undersized chunks cannot occur.
     282                 :             :  */
     283                 :           0 : static inline bool undersized_chunk(struct z_heap *h, chunkid_t c)
     284                 :             : {
     285         [ #  # ]:           0 :         return (CHUNK_TRAILER_SIZE != 0 || big_heap(h)) &&
     286         [ #  # ]:           0 :                 chunk_size(h, c) < min_chunk_size(h);
     287                 :             : }
     288                 :             : 
     289                 :           0 : static inline size_t chunk_usable_bytes(struct z_heap *h, chunkid_t c)
     290                 :             : {
     291                 :           0 :         return chunk_size(h, c) * CHUNK_UNIT - chunk_header_bytes(h)
     292                 :           0 :                - CHUNK_TRAILER_SIZE * CHUNK_UNIT;
     293                 :             : }
     294                 :             : 
     295                 :           0 : static inline size_t mem_align_gap(struct z_heap *h, void *mem)
     296                 :             : {
     297         [ #  # ]:           0 :         if (chunk_header_bytes(h) == CHUNK_UNIT) {
     298                 :             :                 return 0;
     299                 :             :         }
     300                 :           0 :         return ((uintptr_t)mem - chunk_header_bytes(h)) & (CHUNK_UNIT - 1);
     301                 :             : }
     302                 :             : 
     303                 :           0 : static inline int bucket_idx(struct z_heap *h, chunksz_t sz)
     304                 :             : {
     305                 :           0 :         unsigned int usable_sz = sz - min_chunk_size(h) + 1;
     306                 :           0 :         return 31 - __builtin_clz(usable_sz);
     307                 :             : }
     308                 :             : 
     309                 :             : static inline void get_alloc_info(struct z_heap *h, size_t *alloc_bytes,
     310                 :             :                            size_t *free_bytes)
     311                 :             : {
     312                 :             :         chunkid_t c;
     313                 :             : 
     314                 :             :         *alloc_bytes = 0;
     315                 :             :         *free_bytes = 0;
     316                 :             : 
     317                 :             :         for (c = right_chunk(h, 0); c < h->end_chunk; c = right_chunk(h, c)) {
     318                 :             :                 if (chunk_used(h, c)) {
     319                 :             :                         *alloc_bytes += chunk_usable_bytes(h, c);
     320                 :             :                 } else if (!undersized_chunk(h, c)) {
     321                 :             :                         *free_bytes += chunk_usable_bytes(h, c);
     322                 :             :                 }
     323                 :             :         }
     324                 :             : }
     325                 :             : 
     326                 :             : #ifdef CONFIG_SYS_HEAP_CANARIES
     327                 :             : 
     328                 :             : /* Returns pointer to the chunk trailer (at the end of the chunk) */
     329                 :             : static inline struct z_heap_chunk_trailer *chunk_trailer(struct z_heap *h,
     330                 :             :                                                          chunkid_t c)
     331                 :             : {
     332                 :             :         chunk_unit_t *buf = chunk_buf(h);
     333                 :             : 
     334                 :             :         return (struct z_heap_chunk_trailer *)&buf[c + chunk_size(h, c)
     335                 :             :                                                    - CHUNK_TRAILER_SIZE];
     336                 :             : }
     337                 :             : 
     338                 :             : #endif /* CONFIG_SYS_HEAP_CANARIES */
     339                 :             : 
     340                 :             : #ifdef CONFIG_SYS_HEAP_VALIDATE
     341                 :             : bool z_heap_full_check(struct z_heap *h);
     342                 :             : #else
     343                 :             : static inline bool z_heap_full_check(struct z_heap *h)
     344                 :             : {
     345                 :             :         ARG_UNUSED(h);
     346                 :             :         return true;
     347                 :             : }
     348                 :             : #endif
     349                 :             : 
     350                 :             : #endif /* ZEPHYR_INCLUDE_LIB_OS_HEAP_H_ */
        

Generated by: LCOV version 2.0-1