github.com/moontrade/nogc@v0.1.7/alloc/mimalloc/mimalloc-types.h (about)

     1  /* ----------------------------------------------------------------------------
     2  Copyright (c) 2018-2021, Microsoft Research, Daan Leijen
     3  This is free software; you can redistribute it and/or modify it under the
     4  terms of the MIT license. A copy of the license can be found in the file
     5  "LICENSE" at the root of this distribution.
     6  -----------------------------------------------------------------------------*/
     7  #pragma once
     8  #ifndef MIMALLOC_TYPES_H
     9  #define MIMALLOC_TYPES_H
    10  
    11  #include <stddef.h>   // ptrdiff_t
    12  #include <stdint.h>   // uintptr_t, uint16_t, etc
    13  #include "mimalloc-atomic.h"  // _Atomic
    14  
    15  #ifdef _MSC_VER
    16  #pragma warning(disable:4214) // bitfield is not int
    17  #endif 
    18  
    19  // Minimal alignment necessary. On most platforms 16 bytes are needed
    20  // due to SSE registers for example. This must be at least `MI_INTPTR_SIZE`
    21  #ifndef MI_MAX_ALIGN_SIZE
    22  #define MI_MAX_ALIGN_SIZE  16   // sizeof(max_align_t)
    23  #endif
    24  
    25  // ------------------------------------------------------
    26  // Variants
    27  // ------------------------------------------------------
    28  
    29  // Define NDEBUG in the release version to disable assertions.
    30  // #define NDEBUG
    31  
    32  // Define MI_STAT as 1 to maintain statistics; set it to 2 to have detailed statistics (but costs some performance).
    33  // #define MI_STAT 1
    34  
    35  // Define MI_SECURE to enable security mitigations
    36  // #define MI_SECURE 1  // guard page around metadata
    37  // #define MI_SECURE 2  // guard page around each mimalloc page
    38  // #define MI_SECURE 3  // encode free lists (detect corrupted free list (buffer overflow), and invalid pointer free)
    39  // #define MI_SECURE 4  // checks for double free. (may be more expensive)
    40  
    41  #if !defined(MI_SECURE)
    42  #define MI_SECURE 0
    43  #endif
    44  
    45  // Define MI_DEBUG for debug mode
    46  // #define MI_DEBUG 1  // basic assertion checks and statistics, check double free, corrupted free list, and invalid pointer free.
    47  // #define MI_DEBUG 2  // + internal assertion checks
    48  // #define MI_DEBUG 3  // + extensive internal invariant checking (cmake -DMI_DEBUG_FULL=ON)
    49  #if !defined(MI_DEBUG)
    50  #if !defined(NDEBUG) || defined(_DEBUG)
    51  #define MI_DEBUG 2
    52  #else
    53  #define MI_DEBUG 0
    54  #endif
    55  #endif
    56  
    57  // Reserve extra padding at the end of each block to be more resilient against heap block overflows.
    58  // The padding can detect byte-precise buffer overflow on free.
    59  #if !defined(MI_PADDING) && (MI_DEBUG>=1)
    60  #define MI_PADDING  1
    61  #endif
    62  
    63  
    64  // Encoded free lists allow detection of corrupted free lists
    65  // and can detect buffer overflows, modify after free, and double `free`s.
    66  #if (MI_SECURE>=3 || MI_DEBUG>=1 || MI_PADDING > 0)
    67  #define MI_ENCODE_FREELIST  1
    68  #endif
    69  
    70  // ------------------------------------------------------
    71  // Platform specific values
    72  // ------------------------------------------------------
    73  
    74  // ------------------------------------------------------
    75  // Size of a pointer.
    76  // We assume that `sizeof(void*)==sizeof(intptr_t)`
    77  // and it holds for all platforms we know of.
    78  //
    79  // However, the C standard only requires that:
    80  //  p == (void*)((intptr_t)p))
    81  // but we also need:
    82  //  i == (intptr_t)((void*)i)
    83  // or otherwise one might define an intptr_t type that is larger than a pointer...
    84  // ------------------------------------------------------
    85  
    86  #if INTPTR_MAX == 9223372036854775807LL
    87  # define MI_INTPTR_SHIFT (3)
    88  #elif INTPTR_MAX == 2147483647LL
    89  # define MI_INTPTR_SHIFT (2)
    90  #else
    91  #error platform must be 32 or 64 bits
    92  #endif
    93  
    94  #define MI_INTPTR_SIZE  (1<<MI_INTPTR_SHIFT)
    95  #define MI_INTPTR_BITS  (MI_INTPTR_SIZE*8)
    96  
    97  #define KiB     ((size_t)1024)
    98  #define MiB     (KiB*KiB)
    99  #define GiB     (MiB*KiB)
   100  
   101  
   102  // ------------------------------------------------------
   103  // Main internal data-structures
   104  // ------------------------------------------------------
   105  
   106  // Main tuning parameters for segment and page sizes
   107  // Sizes for 64-bit, divide by two for 32-bit
   108  #define MI_SMALL_PAGE_SHIFT               (13 + MI_INTPTR_SHIFT)      // 64kb
   109  #define MI_MEDIUM_PAGE_SHIFT              ( 3 + MI_SMALL_PAGE_SHIFT)  // 512kb
   110  #define MI_LARGE_PAGE_SHIFT               ( 3 + MI_MEDIUM_PAGE_SHIFT) // 4mb
   111  #define MI_SEGMENT_SHIFT                  ( MI_LARGE_PAGE_SHIFT)      // 4mb
   112  
   113  // Derived constants
   114  #define MI_SEGMENT_SIZE                   (1UL<<MI_SEGMENT_SHIFT)
   115  #define MI_SEGMENT_MASK                   ((uintptr_t)MI_SEGMENT_SIZE - 1)
   116  
   117  #define MI_SMALL_PAGE_SIZE                (1UL<<MI_SMALL_PAGE_SHIFT)
   118  #define MI_MEDIUM_PAGE_SIZE               (1UL<<MI_MEDIUM_PAGE_SHIFT)
   119  #define MI_LARGE_PAGE_SIZE                (1UL<<MI_LARGE_PAGE_SHIFT)
   120  
   121  #define MI_SMALL_PAGES_PER_SEGMENT        (MI_SEGMENT_SIZE/MI_SMALL_PAGE_SIZE)
   122  #define MI_MEDIUM_PAGES_PER_SEGMENT       (MI_SEGMENT_SIZE/MI_MEDIUM_PAGE_SIZE)
   123  #define MI_LARGE_PAGES_PER_SEGMENT        (MI_SEGMENT_SIZE/MI_LARGE_PAGE_SIZE)
   124  
   125  // The max object size are checked to not waste more than 12.5% internally over the page sizes.
   126  // (Except for large pages since huge objects are allocated in 4MiB chunks)
   127  #define MI_SMALL_OBJ_SIZE_MAX             (MI_SMALL_PAGE_SIZE/4)   // 16kb
   128  #define MI_MEDIUM_OBJ_SIZE_MAX            (MI_MEDIUM_PAGE_SIZE/4)  // 128kb
   129  #define MI_LARGE_OBJ_SIZE_MAX             (MI_LARGE_PAGE_SIZE/2)   // 2mb
   130  #define MI_LARGE_OBJ_WSIZE_MAX            (MI_LARGE_OBJ_SIZE_MAX/MI_INTPTR_SIZE)
   131  #define MI_HUGE_OBJ_SIZE_MAX              (2*MI_INTPTR_SIZE*MI_SEGMENT_SIZE)        // (must match MI_REGION_MAX_ALLOC_SIZE in memory.c)
   132  
   133  // Maximum number of size classes. (spaced exponentially in 12.5% increments)
   134  #define MI_BIN_HUGE  (73U)
   135  
   136  #if (MI_LARGE_OBJ_WSIZE_MAX >= 655360)
   137  #error "define more bins"
   138  #endif
   139  
   140  // Used as a special value to encode block sizes in 32 bits.
   141  #define MI_HUGE_BLOCK_SIZE   ((uint32_t)MI_HUGE_OBJ_SIZE_MAX)
   142  
   143  // The free lists use encoded next fields
   144  // (Only actually encodes when MI_ENCODED_FREELIST is defined.)
   145  typedef uintptr_t mi_encoded_t;
   146  
   147  // free lists contain blocks
   148  typedef struct mi_block_s {
   149    mi_encoded_t next;
   150  } mi_block_t;
   151  
   152  
   153  // The delayed flags are used for efficient multi-threaded free-ing
   154  typedef enum mi_delayed_e {
   155    MI_USE_DELAYED_FREE   = 0, // push on the owning heap thread delayed list
   156    MI_DELAYED_FREEING    = 1, // temporary: another thread is accessing the owning heap
   157    MI_NO_DELAYED_FREE    = 2, // optimize: push on page local thread free queue if another block is already in the heap thread delayed free list
   158    MI_NEVER_DELAYED_FREE = 3  // sticky, only resets on page reclaim
   159  } mi_delayed_t;
   160  
   161  
   162  // The `in_full` and `has_aligned` page flags are put in a union to efficiently
   163  // test if both are false (`full_aligned == 0`) in the `mi_free` routine.
   164  #if !MI_TSAN
   165  typedef union mi_page_flags_s {
   166    uint8_t full_aligned;
   167    struct {
   168      uint8_t in_full : 1;
   169      uint8_t has_aligned : 1;
   170    } x;
   171  } mi_page_flags_t;
   172  #else
   173  // under thread sanitizer, use a byte for each flag to suppress warning, issue #130
   174  typedef union mi_page_flags_s {
   175    uint16_t full_aligned;
   176    struct {
   177      uint8_t in_full;
   178      uint8_t has_aligned;
   179    } x;
   180  } mi_page_flags_t;
   181  #endif
   182  
   183  // Thread free list.
   184  // We use the bottom 2 bits of the pointer for mi_delayed_t flags
   185  typedef uintptr_t mi_thread_free_t;
   186  
   187  // A page contains blocks of one specific size (`block_size`).
   188  // Each page has three list of free blocks:
   189  // `free` for blocks that can be allocated,
   190  // `local_free` for freed blocks that are not yet available to `mi_malloc`
   191  // `thread_free` for freed blocks by other threads
   192  // The `local_free` and `thread_free` lists are migrated to the `free` list
   193  // when it is exhausted. The separate `local_free` list is necessary to
   194  // implement a monotonic heartbeat. The `thread_free` list is needed for
   195  // avoiding atomic operations in the common case.
   196  //
   197  //
   198  // `used - |thread_free|` == actual blocks that are in use (alive)
   199  // `used - |thread_free| + |free| + |local_free| == capacity`
   200  //
   201  // We don't count `freed` (as |free|) but use `used` to reduce
   202  // the number of memory accesses in the `mi_page_all_free` function(s).
   203  //
   204  // Notes: 
   205  // - Access is optimized for `mi_free` and `mi_page_alloc` (in `alloc.c`)
   206  // - Using `uint16_t` does not seem to slow things down
   207  // - The size is 8 words on 64-bit which helps the page index calculations
   208  //   (and 10 words on 32-bit, and encoded free lists add 2 words. Sizes 10 
   209  //    and 12 are still good for address calculation)
   210  // - To limit the structure size, the `xblock_size` is 32-bits only; for 
   211  //   blocks > MI_HUGE_BLOCK_SIZE the size is determined from the segment page size
   212  // - `thread_free` uses the bottom bits as a delayed-free flags to optimize
   213  //   concurrent frees where only the first concurrent free adds to the owning
   214  //   heap `thread_delayed_free` list (see `alloc.c:mi_free_block_mt`).
   215  //   The invariant is that no-delayed-free is only set if there is
   216  //   at least one block that will be added, or as already been added, to 
   217  //   the owning heap `thread_delayed_free` list. This guarantees that pages
   218  //   will be freed correctly even if only other threads free blocks.
   219  typedef struct mi_page_s {
   220    // "owned" by the segment
   221    uint8_t               segment_idx;       // index in the segment `pages` array, `page == &segment->pages[page->segment_idx]`
   222    uint8_t               segment_in_use:1;  // `true` if the segment allocated this page
   223    uint8_t               is_reset:1;        // `true` if the page memory was reset
   224    uint8_t               is_committed:1;    // `true` if the page virtual memory is committed
   225    uint8_t               is_zero_init:1;    // `true` if the page was zero initialized
   226  
   227    // layout like this to optimize access in `mi_malloc` and `mi_free`
   228    uint16_t              capacity;          // number of blocks committed, must be the first field, see `segment.c:page_clear`
   229    uint16_t              reserved;          // number of blocks reserved in memory
   230    mi_page_flags_t       flags;             // `in_full` and `has_aligned` flags (8 bits)
   231    uint8_t               is_zero:1;         // `true` if the blocks in the free list are zero initialized
   232    uint8_t               retire_expire:7;   // expiration count for retired blocks
   233  
   234    mi_block_t*           free;              // list of available free blocks (`malloc` allocates from this list)
   235    #ifdef MI_ENCODE_FREELIST
   236    uintptr_t             keys[2];           // two random keys to encode the free lists (see `_mi_block_next`)
   237    #endif
   238    uint32_t              used;              // number of blocks in use (including blocks in `local_free` and `thread_free`)
   239    uint32_t              xblock_size;       // size available in each block (always `>0`) 
   240  
   241    mi_block_t*           local_free;        // list of deferred free blocks by this thread (migrates to `free`)
   242    _Atomic(mi_thread_free_t) xthread_free;  // list of deferred free blocks freed by other threads
   243    _Atomic(uintptr_t)        xheap;
   244    
   245    struct mi_page_s*     next;              // next page owned by this thread with the same `block_size`
   246    struct mi_page_s*     prev;              // previous page owned by this thread with the same `block_size`
   247  } mi_page_t;
   248  
   249  
   250  
   251  typedef enum mi_page_kind_e {
   252    MI_PAGE_SMALL,    // small blocks go into 64kb pages inside a segment
   253    MI_PAGE_MEDIUM,   // medium blocks go into 512kb pages inside a segment
   254    MI_PAGE_LARGE,    // larger blocks go into a single page spanning a whole segment
   255    MI_PAGE_HUGE      // huge blocks (>512kb) are put into a single page in a segment of the exact size (but still 2mb aligned)
   256  } mi_page_kind_t;
   257  
   258  // Segments are large allocated memory blocks (2mb on 64 bit) from
   259  // the OS. Inside segments we allocated fixed size _pages_ that
   260  // contain blocks.
   261  typedef struct mi_segment_s {
   262    // memory fields
   263    size_t               memid;            // id for the os-level memory manager
   264    bool                 mem_is_pinned;    // `true` if we cannot decommit/reset/protect in this memory (i.e. when allocated using large OS pages)
   265    bool                 mem_is_committed; // `true` if the whole segment is eagerly committed  
   266  
   267    // segment fields
   268    _Atomic(struct mi_segment_s*) abandoned_next;
   269    struct mi_segment_s* next;             // must be the first segment field after abandoned_next -- see `segment.c:segment_init`
   270    struct mi_segment_s* prev;
   271  
   272    size_t               abandoned;        // abandoned pages (i.e. the original owning thread stopped) (`abandoned <= used`)
   273    size_t               abandoned_visits; // count how often this segment is visited in the abandoned list (to force reclaim if it is too long)
   274  
   275    size_t               used;             // count of pages in use (`used <= capacity`)
   276    size_t               capacity;         // count of available pages (`#free + used`)
   277    size_t               segment_size;     // for huge pages this may be different from `MI_SEGMENT_SIZE`
   278    size_t               segment_info_size;// space we are using from the first page for segment meta-data and possible guard pages.
   279    uintptr_t            cookie;           // verify addresses in secure mode: `_mi_ptr_cookie(segment) == segment->cookie`
   280  
   281    // layout like this to optimize access in `mi_free`
   282    size_t               page_shift;       // `1 << page_shift` == the page sizes == `page->block_size * page->reserved` (unless the first page, then `-segment_info_size`).
   283    _Atomic(uintptr_t)   thread_id;        // unique id of the thread owning this segment
   284    mi_page_kind_t       page_kind;        // kind of pages: small, large, or huge
   285    mi_page_t            pages[1];         // up to `MI_SMALL_PAGES_PER_SEGMENT` pages
   286  } mi_segment_t;
   287  
   288  
   289  // ------------------------------------------------------
   290  // Heaps
   291  // Provide first-class heaps to allocate from.
   292  // A heap just owns a set of pages for allocation and
   293  // can only be allocate/reallocate from the thread that created it.
   294  // Freeing blocks can be done from any thread though.
   295  // Per thread, the segments are shared among its heaps.
   296  // Per thread, there is always a default heap that is
   297  // used for allocation; it is initialized to statically
   298  // point to an empty heap to avoid initialization checks
   299  // in the fast path.
   300  // ------------------------------------------------------
   301  
   302  // Thread local data
   303  typedef struct mi_tld_s mi_tld_t;
   304  
   305  // Pages of a certain block size are held in a queue.
   306  typedef struct mi_page_queue_s {
   307    mi_page_t* first;
   308    mi_page_t* last;
   309    size_t     block_size;
   310  } mi_page_queue_t;
   311  
   312  #define MI_BIN_FULL  (MI_BIN_HUGE+1)
   313  
   314  // Random context
   315  typedef struct mi_random_cxt_s {
   316    uint32_t input[16];
   317    uint32_t output[16];
   318    int      output_available;
   319  } mi_random_ctx_t;
   320  
   321  
   322  // In debug mode there is a padding stucture at the end of the blocks to check for buffer overflows
   323  #if (MI_PADDING)
   324  typedef struct mi_padding_s {
   325    uint32_t canary; // encoded block value to check validity of the padding (in case of overflow)
   326    uint32_t delta;  // padding bytes before the block. (mi_usable_size(p) - delta == exact allocated bytes)
   327  } mi_padding_t;
   328  #define MI_PADDING_SIZE   (sizeof(mi_padding_t))
   329  #define MI_PADDING_WSIZE  ((MI_PADDING_SIZE + MI_INTPTR_SIZE - 1) / MI_INTPTR_SIZE)
   330  #else
   331  #define MI_PADDING_SIZE   0
   332  #define MI_PADDING_WSIZE  0
   333  #endif
   334  
   335  #define MI_PAGES_DIRECT   (MI_SMALL_WSIZE_MAX + MI_PADDING_WSIZE + 1)
   336  
   337  
   338  // A heap owns a set of pages.
   339  struct mi_heap_s {
   340    mi_tld_t*             tld;
   341    mi_page_t*            pages_free_direct[MI_PAGES_DIRECT];  // optimize: array where every entry points a page with possibly free blocks in the corresponding queue for that size.
   342    mi_page_queue_t       pages[MI_BIN_FULL + 1];              // queue of pages for each size class (or "bin")
   343    _Atomic(mi_block_t*)  thread_delayed_free;
   344    uintptr_t             thread_id;                           // thread this heap belongs too
   345    uintptr_t             cookie;                              // random cookie to verify pointers (see `_mi_ptr_cookie`)
   346    uintptr_t             keys[2];                             // two random keys used to encode the `thread_delayed_free` list
   347    mi_random_ctx_t       random;                              // random number context used for secure allocation
   348    size_t                page_count;                          // total number of pages in the `pages` queues.
   349    size_t                page_retired_min;                    // smallest retired index (retired pages are fully free, but still in the page queues)
   350    size_t                page_retired_max;                    // largest retired index into the `pages` array.
   351    mi_heap_t*            next;                                // list of heaps per thread
   352    bool                  no_reclaim;                          // `true` if this heap should not reclaim abandoned pages
   353  };
   354  
   355  
   356  
   357  // ------------------------------------------------------
   358  // Debug
   359  // ------------------------------------------------------
   360  
   361  #define MI_DEBUG_UNINIT     (0xD0)
   362  #define MI_DEBUG_FREED      (0xDF)
   363  #define MI_DEBUG_PADDING    (0xDE)
   364  
   365  #if (MI_DEBUG)
   366  // use our own assertion to print without memory allocation
   367  void _mi_assert_fail(const char* assertion, const char* fname, unsigned int line, const char* func );
   368  #define mi_assert(expr)     ((expr) ? (void)0 : _mi_assert_fail(#expr,__FILE__,__LINE__,__func__))
   369  #else
   370  #define mi_assert(x)
   371  #endif
   372  
   373  #if (MI_DEBUG>1)
   374  #define mi_assert_internal    mi_assert
   375  #else
   376  #define mi_assert_internal(x)
   377  #endif
   378  
   379  #if (MI_DEBUG>2)
   380  #define mi_assert_expensive   mi_assert
   381  #else
   382  #define mi_assert_expensive(x)
   383  #endif
   384  
   385  // ------------------------------------------------------
   386  // Statistics
   387  // ------------------------------------------------------
   388  
   389  #ifndef MI_STAT
   390  #if (MI_DEBUG>0)
   391  #define MI_STAT 2
   392  #else
   393  #define MI_STAT 0
   394  #endif
   395  #endif
   396  
   397  typedef struct mi_stat_count_s {
   398    int64_t allocated;
   399    int64_t freed;
   400    int64_t peak;
   401    int64_t current;
   402  } mi_stat_count_t;
   403  
   404  typedef struct mi_stat_counter_s {
   405    int64_t total;
   406    int64_t count;
   407  } mi_stat_counter_t;
   408  
   409  typedef struct mi_stats_s {
   410    mi_stat_count_t segments;
   411    mi_stat_count_t pages;
   412    mi_stat_count_t reserved;
   413    mi_stat_count_t committed;
   414    mi_stat_count_t reset;
   415    mi_stat_count_t page_committed;
   416    mi_stat_count_t segments_abandoned;
   417    mi_stat_count_t pages_abandoned;
   418    mi_stat_count_t threads;
   419    mi_stat_count_t normal;
   420    mi_stat_count_t huge;
   421    mi_stat_count_t giant;
   422    mi_stat_count_t malloc;
   423    mi_stat_count_t segments_cache;
   424    mi_stat_counter_t pages_extended;
   425    mi_stat_counter_t mmap_calls;
   426    mi_stat_counter_t commit_calls;
   427    mi_stat_counter_t page_no_retire;
   428    mi_stat_counter_t searches;
   429    mi_stat_counter_t normal_count;
   430    mi_stat_counter_t huge_count;
   431    mi_stat_counter_t giant_count;
   432  #if MI_STAT>1
   433    mi_stat_count_t normal_bins[MI_BIN_HUGE+1];
   434  #endif
   435  } mi_stats_t;
   436  
   437  
   438  void _mi_stat_increase(mi_stat_count_t* stat, size_t amount);
   439  void _mi_stat_decrease(mi_stat_count_t* stat, size_t amount);
   440  void _mi_stat_counter_increase(mi_stat_counter_t* stat, size_t amount);
   441  
   442  #if (MI_STAT)
   443  #define mi_stat_increase(stat,amount)         _mi_stat_increase( &(stat), amount)
   444  #define mi_stat_decrease(stat,amount)         _mi_stat_decrease( &(stat), amount)
   445  #define mi_stat_counter_increase(stat,amount) _mi_stat_counter_increase( &(stat), amount)
   446  #else
   447  #define mi_stat_increase(stat,amount)         (void)0
   448  #define mi_stat_decrease(stat,amount)         (void)0
   449  #define mi_stat_counter_increase(stat,amount) (void)0
   450  #endif
   451  
   452  #define mi_heap_stat_counter_increase(heap,stat,amount)  mi_stat_counter_increase( (heap)->tld->stats.stat, amount)
   453  #define mi_heap_stat_increase(heap,stat,amount)  mi_stat_increase( (heap)->tld->stats.stat, amount)
   454  #define mi_heap_stat_decrease(heap,stat,amount)  mi_stat_decrease( (heap)->tld->stats.stat, amount)
   455  
   456  // ------------------------------------------------------
   457  // Thread Local data
   458  // ------------------------------------------------------
   459  
   460  typedef int64_t  mi_msecs_t;
   461  
   462  // Queue of segments
   463  typedef struct mi_segment_queue_s {
   464    mi_segment_t* first;
   465    mi_segment_t* last;
   466  } mi_segment_queue_t;
   467  
   468  // OS thread local data
   469  typedef struct mi_os_tld_s {
   470    size_t                region_idx;   // start point for next allocation
   471    mi_stats_t*           stats;        // points to tld stats
   472  } mi_os_tld_t;
   473  
   474  // Segments thread local data
   475  typedef struct mi_segments_tld_s {
   476    mi_segment_queue_t  small_free;   // queue of segments with free small pages
   477    mi_segment_queue_t  medium_free;  // queue of segments with free medium pages
   478    mi_page_queue_t     pages_reset;  // queue of freed pages that can be reset
   479    size_t              count;        // current number of segments;
   480    size_t              peak_count;   // peak number of segments
   481    size_t              current_size; // current size of all segments
   482    size_t              peak_size;    // peak size of all segments
   483    size_t              cache_count;  // number of segments in the cache
   484    size_t              cache_size;   // total size of all segments in the cache
   485    mi_segment_t*       cache;        // (small) cache of segments
   486    mi_stats_t*         stats;        // points to tld stats
   487    mi_os_tld_t*        os;           // points to os stats
   488  } mi_segments_tld_t;
   489  
   490  // Thread local data
   491  struct mi_tld_s {
   492    unsigned long long  heartbeat;     // monotonic heartbeat count
   493    bool                recurse;       // true if deferred was called; used to prevent infinite recursion.
   494    mi_heap_t*          heap_backing;  // backing heap of this thread (cannot be deleted)
   495    mi_heap_t*          heaps;         // list of heaps in this thread (so we can abandon all when the thread terminates)
   496    mi_segments_tld_t   segments;      // segment tld
   497    mi_os_tld_t         os;            // os tld
   498    mi_stats_t          stats;         // statistics
   499  };
   500  
   501  #endif