X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/3e170ce000f1506b7b5d2c5c7faec85ceabb573d..5ba3f43ea354af8ad55bea84372a2bc834d8757c:/osfmk/kern/zalloc.h diff --git a/osfmk/kern/zalloc.h b/osfmk/kern/zalloc.h index 544689055..6a585b83f 100644 --- a/osfmk/kern/zalloc.h +++ b/osfmk/kern/zalloc.h @@ -77,6 +77,12 @@ #include #include #include +#include + +#if KASAN +#include +#include +#endif #if CONFIG_GZALLOC typedef struct gzalloc_data { @@ -106,6 +112,7 @@ struct zone { } pages; /* list of zone_page_metadata structs, which maintain per-page free element lists */ int count; /* Number of elements used now */ int countfree; /* Number of free elements */ + int count_all_free_pages; /* Number of pages collectable by GC */ lck_attr_t lock_attr; /* zone lock attribute */ decl_lck_mtx_data(,lock) /* zone lock */ lck_mtx_ext_t lock_ext; /* placeholder for indirect mutex */ @@ -126,21 +133,22 @@ struct zone { /* boolean_t */ async_pending :1, /* asynchronous allocation pending? */ /* boolean_t */ zleak_on :1, /* Are we collecting allocation information? */ /* boolean_t */ caller_acct :1, /* do we account allocation/free to the caller? */ - /* boolean_t */ doing_gc :1, /* garbage collect in progress? */ /* boolean_t */ noencrypt :1, /* boolean_t */ no_callout :1, /* boolean_t */ async_prio_refill :1, /* boolean_t */ gzalloc_exempt :1, /* boolean_t */ alignment_required :1, - /* boolean_t */ use_page_list :1, - /* future */ _reserved :15; + /* boolean_t */ zone_logging :1, /* Enable zone logging for this zone. */ + /* boolean_t */ zone_replenishing :1, + /* boolean_t */ kasan_quarantine :1, + /* boolean_t */ tags :1, + /* boolean_t */ tags_inline :1, + /* future */ tag_zone_index :6, + /* boolean_t */ zone_valid :1, + /* future */ _reserved :5; int index; /* index into zone_info arrays for this zone */ - struct zone *next_zone; /* Link for all-zones list */ const char *zone_name; /* a name for the zone */ -#if ZONE_DEBUG - queue_head_t active_zones; /* active elements */ -#endif /* ZONE_DEBUG */ #if CONFIG_ZLEAKS uint32_t zleak_capture; /* per-zone counter for capturing every N allocations */ @@ -151,6 +159,12 @@ struct zone { #if CONFIG_GZALLOC gzalloc_data_t gz; #endif /* CONFIG_GZALLOC */ + +#if KASAN_ZALLOC + vm_size_t kasan_redzone; +#endif + + btlog_t *zlog_btlog; /* zone logging structure to hold stacks and element references to those stacks. */ }; /* @@ -162,13 +176,26 @@ typedef struct zinfo_usage_store_t { uint64_t alloc __attribute__((aligned(8))); /* allocation counter */ uint64_t free __attribute__((aligned(8))); /* free counter */ } zinfo_usage_store_t; -typedef zinfo_usage_store_t *zinfo_usage_t; -extern void zone_gc(boolean_t); -extern void consider_zone_gc(boolean_t); +/* + * For sysctl kern.zones_collectable_bytes used by memory_maintenance to check if a + * userspace reboot is needed. The only other way to query for this information + * is via mach_memory_info() which is unavailable on release kernels. + */ +extern uint64_t get_zones_collectable_bytes(void); + +/* + * zone_gc also checks if the zone_map is getting close to full and triggers jetsams if needed, provided + * consider_jetsams is set to TRUE. To avoid deadlocks, we only pass a value of TRUE from within the + * vm_pageout_garbage_collect thread. + */ +extern void zone_gc(boolean_t consider_jetsams); +extern void consider_zone_gc(boolean_t consider_jetsams); +extern void drop_free_elements(zone_t z); -/* Steal memory for zone module */ -extern void zone_steal_memory(void); +/* Debug logging for zone-map-exhaustion jetsams. */ +extern void get_zone_map_size(uint64_t *current_size, uint64_t *capacity); +extern void get_largest_zone_info(char *zone_name, size_t zone_name_len, uint64_t *zone_size); /* Bootstrap zone module (create zone zone) */ extern void zone_bootstrap(void); @@ -177,11 +204,6 @@ extern void zone_bootstrap(void); extern void zone_init( vm_size_t map_size); -/* Handle per-task zone info */ -extern void zinfo_task_init(task_t task); -extern void zinfo_task_free(task_t task); - - /* Stack use statistics */ extern void stack_fake_zone_init(int zone_index); extern void stack_fake_zone_info( @@ -208,46 +230,66 @@ extern void zone_debug_disable( #define ZONE_DEBUG_OFFSET ROUNDUP(sizeof(queue_chain_t),16) #endif /* ZONE_DEBUG */ +extern unsigned int num_zones; +extern struct zone zone_array[]; + +/* zindex and page_count must pack into 16 bits + * update tools/lldbmacros/memory.py:GetRealMetadata + * when these values change */ + +#define ZINDEX_BITS (10U) +#define PAGECOUNT_BITS (16U - ZINDEX_BITS) +#define MULTIPAGE_METADATA_MAGIC ((1UL << ZINDEX_BITS) - 1) +#define ZONE_CHUNK_MAXPAGES ((1UL << PAGECOUNT_BITS) - 1) + +/* + * The max # of elements in a chunk should fit into zone_page_metadata.free_count (uint16_t). + * Update this if the type of free_count changes. + */ +#define ZONE_CHUNK_MAXELEMENTS (UINT16_MAX) + #endif /* MACH_KERNEL_PRIVATE */ __BEGIN_DECLS + +/* Item definitions for zalloc/zinit/zone_change */ +#define Z_EXHAUST 1 /* Make zone exhaustible */ +#define Z_COLLECT 2 /* Make zone collectable */ +#define Z_EXPAND 3 /* Make zone expandable */ +#define Z_FOREIGN 4 /* Allow collectable zone to contain foreign elements */ +#define Z_CALLERACCT 5 /* Account alloc/free against the caller */ +#define Z_NOENCRYPT 6 /* Don't encrypt zone during hibernation */ +#define Z_NOCALLOUT 7 /* Don't asynchronously replenish the zone via callouts */ +#define Z_ALIGNMENT_REQUIRED 8 +#define Z_GZALLOC_EXEMPT 9 /* Not tracked in guard allocation mode */ +#define Z_KASAN_QUARANTINE 10 /* Allow zone elements to be quarantined on free */ +#ifdef XNU_KERNEL_PRIVATE +#define Z_TAGS_ENABLED 11 /* Store tags */ +#endif /* XNU_KERNEL_PRIVATE */ + #ifdef XNU_KERNEL_PRIVATE extern vm_offset_t zone_map_min_address; extern vm_offset_t zone_map_max_address; -/* Allocate from zone */ -extern void * zalloc( - zone_t zone); - -/* Free zone element */ -extern void zfree( - zone_t zone, - void *elem); - -/* Create zone */ -extern zone_t zinit( - vm_size_t size, /* the size of an element */ - vm_size_t maxmem, /* maximum memory to use */ - vm_size_t alloc, /* allocation size */ - const char *name); /* a name for the zone */ - - /* Non-waiting for memory version of zalloc */ extern void * zalloc_nopagewait( zone_t zone); -/* Non-blocking version of zalloc */ -extern void * zalloc_noblock( - zone_t zone); - /* selective version of zalloc */ extern void * zalloc_canblock( zone_t zone, boolean_t canblock); +/* selective version of zalloc */ +extern void * zalloc_canblock_tag( + zone_t zone, + boolean_t canblock, + vm_size_t reqsize, + vm_tag_t tag); + /* Get from zone free list */ extern void * zget( zone_t zone); @@ -263,24 +305,9 @@ extern int zfill( zone_t zone, int nelem); -/* Change zone parameters */ -extern void zone_change( - zone_t zone, - unsigned int item, - boolean_t value); extern void zone_prio_refill_configure(zone_t, vm_size_t); -/* Item definitions */ -#define Z_EXHAUST 1 /* Make zone exhaustible */ -#define Z_COLLECT 2 /* Make zone collectable */ -#define Z_EXPAND 3 /* Make zone expandable */ -#define Z_FOREIGN 4 /* Allow collectable zone to contain foreign elements */ -#define Z_CALLERACCT 5 /* Account alloc/free against the caller */ -#define Z_NOENCRYPT 6 /* Don't encrypt zone during hibernation */ -#define Z_NOCALLOUT 7 /* Don't asynchronously replenish the zone via - * callouts - */ -#define Z_ALIGNMENT_REQUIRED 8 -#define Z_GZALLOC_EXEMPT 9 /* Not tracked in guard allocation mode */ + +/* See above/top of file. Z_* definitions moved so they would be usable by kexts */ /* Preallocate space for zone from zone map */ extern void zprealloc( @@ -290,6 +317,10 @@ extern void zprealloc( extern integer_t zone_free_count( zone_t zone); +extern vm_size_t zone_element_size( + void *addr, + zone_t *z); + /* * MAX_ZTRACE_DEPTH configures how deep of a stack trace is taken on each zalloc in the zone of interest. 15 * levels is usually enough to get past all the layers of code in kalloc and IOKit and see who the actual @@ -326,15 +357,25 @@ extern int get_zleak_state(void); #endif /* CONFIG_ZLEAKS */ +#ifndef VM_MAX_TAG_ZONES +#error MAX_TAG_ZONES +#endif + +#if VM_MAX_TAG_ZONES + +extern boolean_t zone_tagging_on; +extern uint32_t zone_index_from_tag_index(uint32_t tag_zone_index, vm_size_t * elem_size); + +#endif /* VM_MAX_TAG_ZONES */ + /* These functions used for leak detection both in zalloc.c and mbuf.c */ -extern uint32_t fastbacktrace(uintptr_t* bt, uint32_t max_frames) __attribute__((noinline)); extern uintptr_t hash_mix(uintptr_t); extern uint32_t hashbacktrace(uintptr_t *, uint32_t, uint32_t); extern uint32_t hashaddr(uintptr_t, uint32_t); #define lock_zone(zone) \ MACRO_BEGIN \ - lck_mtx_lock_spin(&(zone)->lock); \ + lck_mtx_lock_spin_always(&(zone)->lock); \ MACRO_END #define unlock_zone(zone) \ @@ -347,14 +388,60 @@ void gzalloc_init(vm_size_t); void gzalloc_zone_init(zone_t); void gzalloc_configure(void); void gzalloc_reconfigure(zone_t); +void gzalloc_empty_free_cache(zone_t); boolean_t gzalloc_enabled(void); vm_offset_t gzalloc_alloc(zone_t, boolean_t); boolean_t gzalloc_free(zone_t, void *); +boolean_t gzalloc_element_size(void *, zone_t *, vm_size_t *); #endif /* CONFIG_GZALLOC */ +/* Callbacks for btlog lock/unlock */ +void zlog_btlog_lock(__unused void *); +void zlog_btlog_unlock(__unused void *); + +#ifdef MACH_KERNEL_PRIVATE +#define MAX_ZONE_NAME 32 /* max length of a zone name we can take from the boot-args */ +int track_this_zone(const char *zonename, const char *logname); +#endif + +#if DEBUG || DEVELOPMENT +extern boolean_t run_zone_test(void); +extern vm_size_t zone_element_info(void *addr, vm_tag_t * ptag); +#endif /* DEBUG || DEVELOPMENT */ + #endif /* XNU_KERNEL_PRIVATE */ +/* Allocate from zone */ +extern void * zalloc( + zone_t zone); + +/* Non-blocking version of zalloc */ +extern void * zalloc_noblock( + zone_t zone); + +/* Free zone element */ +extern void zfree( + zone_t zone, + void *elem); + +/* Create zone */ +extern zone_t zinit( + vm_size_t size, /* the size of an element */ + vm_size_t maxmem, /* maximum memory to use */ + vm_size_t alloc, /* allocation size */ + const char *name); /* a name for the zone */ + +/* Change zone parameters */ +extern void zone_change( + zone_t zone, + unsigned int item, + boolean_t value); + +/* Destroy the zone */ +extern void zdestroy( + zone_t zone); + __END_DECLS #endif /* _KERN_ZALLOC_H_ */