X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/55e303ae13a4cf49d70f2294092726f2fffb9ef2..5ba3f43ea354af8ad55bea84372a2bc834d8757c:/osfmk/kern/zalloc.h diff --git a/osfmk/kern/zalloc.h b/osfmk/kern/zalloc.h index 045c1523a..6a585b83f 100644 --- a/osfmk/kern/zalloc.h +++ b/osfmk/kern/zalloc.h @@ -1,16 +1,19 @@ /* - * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. + * Copyright (c) 2000-2014 Apple Inc. All rights reserved. * - * @APPLE_LICENSE_HEADER_START@ - * - * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved. + * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * * This file contains Original Code and/or Modifications of Original Code * as defined in and that are subject to the Apple Public Source License * Version 2.0 (the 'License'). You may not use this file except in - * compliance with the License. Please obtain a copy of the License at - * http://www.opensource.apple.com/apsl/ and read it before using this - * file. + * compliance with the License. The rights granted to you under the License + * may not be used to create, or enable the creation or redistribution of, + * unlawful or unlicensed copies of an Apple operating system, or to + * circumvent, violate, or enable the circumvention or violation of, any + * terms of an Apple operating system software license agreement. + * + * Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this file. * * The Original Code and all software distributed under the License are * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER @@ -20,7 +23,7 @@ * Please see the License for the specific language governing rights and * limitations under the License. * - * @APPLE_LICENSE_HEADER_END@ + * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ */ /* * @OSF_COPYRIGHT@ @@ -59,23 +62,34 @@ * */ +#ifdef KERNEL_PRIVATE + #ifndef _KERN_ZALLOC_H_ #define _KERN_ZALLOC_H_ #include #include - -#include - -#ifdef __APPLE_API_PRIVATE +#include #ifdef MACH_KERNEL_PRIVATE #include -#include -#include +#include #include -#include +#include +#include + +#if KASAN +#include +#include +#endif + +#if CONFIG_GZALLOC +typedef struct gzalloc_data { + uint32_t gzfc_index; + vm_offset_t *gzfc; +} gzalloc_data_t; +#endif /* * A zone is a collection of fixed size blocks for which there @@ -85,136 +99,351 @@ * */ +struct zone_free_element; +struct zone_page_metadata; + struct zone { + struct zone_free_element *free_elements; /* free elements directly linked */ + struct { + queue_head_t any_free_foreign; /* foreign pages crammed into zone */ + queue_head_t all_free; + queue_head_t intermediate; + queue_head_t all_used; + } pages; /* list of zone_page_metadata structs, which maintain per-page free element lists */ int count; /* Number of elements used now */ - vm_offset_t free_elements; + int countfree; /* Number of free elements */ + int count_all_free_pages; /* Number of pages collectable by GC */ + lck_attr_t lock_attr; /* zone lock attribute */ + decl_lck_mtx_data(,lock) /* zone lock */ + lck_mtx_ext_t lock_ext; /* placeholder for indirect mutex */ vm_size_t cur_size; /* current memory utilization */ vm_size_t max_size; /* how large can this zone grow */ vm_size_t elem_size; /* size of an element */ vm_size_t alloc_size; /* size used for more memory */ - char *zone_name; /* a name for the zone */ - unsigned int - /* boolean_t */ exhaustible :1, /* (F) merely return if empty? */ - /* boolean_t */ collectable :1, /* (F) garbage collect empty pages */ - /* boolean_t */ expandable :1, /* (T) expand zone (with message)? */ - /* boolean_t */ allows_foreign :1,/* (F) allow non-zalloc space */ - /* boolean_t */ doing_alloc :1, /* is zone expanding now? */ - /* boolean_t */ waiting :1, /* is thread waiting for expansion? */ - /* boolean_t */ async_pending :1; /* asynchronous allocation pending? */ - struct zone * next_zone; /* Link for all-zones list */ - call_entry_data_t call_async_alloc; /* callout for asynchronous alloc */ -#if ZONE_DEBUG - queue_head_t active_zones; /* active elements */ -#endif /* ZONE_DEBUG */ - decl_simple_lock_data(,lock) /* generic lock */ + uint64_t page_count __attribute__((aligned(8))); /* number of pages used by this zone */ + uint64_t sum_count; /* count of allocs (life of zone) */ + uint32_t + /* boolean_t */ exhaustible :1, /* (F) merely return if empty? */ + /* boolean_t */ collectable :1, /* (F) garbage collect empty pages */ + /* boolean_t */ expandable :1, /* (T) expand zone (with message)? */ + /* boolean_t */ allows_foreign :1, /* (F) allow non-zalloc space */ + /* boolean_t */ doing_alloc_without_vm_priv:1, /* is zone expanding now via a non-vm_privileged thread? */ + /* boolean_t */ doing_alloc_with_vm_priv:1, /* is zone expanding now via a vm_privileged thread? */ + /* boolean_t */ waiting :1, /* is thread waiting for expansion? */ + /* boolean_t */ async_pending :1, /* asynchronous allocation pending? */ + /* boolean_t */ zleak_on :1, /* Are we collecting allocation information? */ + /* boolean_t */ caller_acct :1, /* do we account allocation/free to the caller? */ + /* boolean_t */ noencrypt :1, + /* boolean_t */ no_callout :1, + /* boolean_t */ async_prio_refill :1, + /* boolean_t */ gzalloc_exempt :1, + /* boolean_t */ alignment_required :1, + /* boolean_t */ zone_logging :1, /* Enable zone logging for this zone. */ + /* boolean_t */ zone_replenishing :1, + /* boolean_t */ kasan_quarantine :1, + /* boolean_t */ tags :1, + /* boolean_t */ tags_inline :1, + /* future */ tag_zone_index :6, + /* boolean_t */ zone_valid :1, + /* future */ _reserved :5; + + int index; /* index into zone_info arrays for this zone */ + const char *zone_name; /* a name for the zone */ + +#if CONFIG_ZLEAKS + uint32_t zleak_capture; /* per-zone counter for capturing every N allocations */ +#endif /* CONFIG_ZLEAKS */ + uint32_t zp_count; /* counter for poisoning every N frees */ + vm_size_t prio_refill_watermark; + thread_t zone_replenish_thread; +#if CONFIG_GZALLOC + gzalloc_data_t gz; +#endif /* CONFIG_GZALLOC */ + +#if KASAN_ZALLOC + vm_size_t kasan_redzone; +#endif + + btlog_t *zlog_btlog; /* zone logging structure to hold stacks and element references to those stacks. */ }; -extern void zone_gc(void); -extern void consider_zone_gc(void); +/* + * structure for tracking zone usage + * Used either one per task/thread for all zones or . + */ +typedef struct zinfo_usage_store_t { + /* These fields may be updated atomically, and so must be 8 byte aligned */ + uint64_t alloc __attribute__((aligned(8))); /* allocation counter */ + uint64_t free __attribute__((aligned(8))); /* free counter */ +} zinfo_usage_store_t; -/* Steal memory for zone module */ -extern void zone_steal_memory(void); +/* + * For sysctl kern.zones_collectable_bytes used by memory_maintenance to check if a + * userspace reboot is needed. The only other way to query for this information + * is via mach_memory_info() which is unavailable on release kernels. + */ +extern uint64_t get_zones_collectable_bytes(void); + +/* + * zone_gc also checks if the zone_map is getting close to full and triggers jetsams if needed, provided + * consider_jetsams is set to TRUE. To avoid deadlocks, we only pass a value of TRUE from within the + * vm_pageout_garbage_collect thread. + */ +extern void zone_gc(boolean_t consider_jetsams); +extern void consider_zone_gc(boolean_t consider_jetsams); +extern void drop_free_elements(zone_t z); + +/* Debug logging for zone-map-exhaustion jetsams. */ +extern void get_zone_map_size(uint64_t *current_size, uint64_t *capacity); +extern void get_largest_zone_info(char *zone_name, size_t zone_name_len, uint64_t *zone_size); /* Bootstrap zone module (create zone zone) */ extern void zone_bootstrap(void); /* Init zone module */ -extern void zone_init(vm_size_t); +extern void zone_init( + vm_size_t map_size); + +/* Stack use statistics */ +extern void stack_fake_zone_init(int zone_index); +extern void stack_fake_zone_info( + int *count, + vm_size_t *cur_size, + vm_size_t *max_size, + vm_size_t *elem_size, + vm_size_t *alloc_size, + uint64_t *sum_size, + int *collectable, + int *exhaustable, + int *caller_acct); + +#if ZONE_DEBUG -#endif /* MACH_KERNEL_PRIVATE */ +extern void zone_debug_enable( + zone_t z); + +extern void zone_debug_disable( + zone_t z); -#endif /* __APPLE_API_PRIVATE */ +#define zone_debug_enabled(z) z->active_zones.next +#define ROUNDUP(x,y) ((((x)+(y)-1)/(y))*(y)) +#define ZONE_DEBUG_OFFSET ROUNDUP(sizeof(queue_chain_t),16) +#endif /* ZONE_DEBUG */ -/* Allocate from zone */ -extern vm_offset_t zalloc( - zone_t zone); +extern unsigned int num_zones; +extern struct zone zone_array[]; -/* Non-blocking version of zalloc */ -extern vm_offset_t zalloc_noblock( - zone_t zone); +/* zindex and page_count must pack into 16 bits + * update tools/lldbmacros/memory.py:GetRealMetadata + * when these values change */ -/* Get from zone free list */ -extern vm_offset_t zget( - zone_t zone); +#define ZINDEX_BITS (10U) +#define PAGECOUNT_BITS (16U - ZINDEX_BITS) +#define MULTIPAGE_METADATA_MAGIC ((1UL << ZINDEX_BITS) - 1) +#define ZONE_CHUNK_MAXPAGES ((1UL << PAGECOUNT_BITS) - 1) -/* Create zone */ -extern zone_t zinit( - vm_size_t size, /* the size of an element */ - vm_size_t max, /* maximum memory to use */ - vm_size_t alloc, /* allocation size */ - char *name); /* a name for the zone */ +/* + * The max # of elements in a chunk should fit into zone_page_metadata.free_count (uint16_t). + * Update this if the type of free_count changes. + */ +#define ZONE_CHUNK_MAXELEMENTS (UINT16_MAX) -/* Free zone element */ -extern void zfree( - zone_t zone, - vm_offset_t elem); +#endif /* MACH_KERNEL_PRIVATE */ + +__BEGIN_DECLS + + +/* Item definitions for zalloc/zinit/zone_change */ +#define Z_EXHAUST 1 /* Make zone exhaustible */ +#define Z_COLLECT 2 /* Make zone collectable */ +#define Z_EXPAND 3 /* Make zone expandable */ +#define Z_FOREIGN 4 /* Allow collectable zone to contain foreign elements */ +#define Z_CALLERACCT 5 /* Account alloc/free against the caller */ +#define Z_NOENCRYPT 6 /* Don't encrypt zone during hibernation */ +#define Z_NOCALLOUT 7 /* Don't asynchronously replenish the zone via callouts */ +#define Z_ALIGNMENT_REQUIRED 8 +#define Z_GZALLOC_EXEMPT 9 /* Not tracked in guard allocation mode */ +#define Z_KASAN_QUARANTINE 10 /* Allow zone elements to be quarantined on free */ +#ifdef XNU_KERNEL_PRIVATE +#define Z_TAGS_ENABLED 11 /* Store tags */ +#endif /* XNU_KERNEL_PRIVATE */ + +#ifdef XNU_KERNEL_PRIVATE + +extern vm_offset_t zone_map_min_address; +extern vm_offset_t zone_map_max_address; + + +/* Non-waiting for memory version of zalloc */ +extern void * zalloc_nopagewait( + zone_t zone); + +/* selective version of zalloc */ +extern void * zalloc_canblock( + zone_t zone, + boolean_t canblock); + +/* selective version of zalloc */ +extern void * zalloc_canblock_tag( + zone_t zone, + boolean_t canblock, + vm_size_t reqsize, + vm_tag_t tag); + +/* Get from zone free list */ +extern void * zget( + zone_t zone); /* Fill zone with memory */ extern void zcram( - zone_t zone, - vm_offset_t newmem, - vm_size_t size); + zone_t zone, + vm_offset_t newmem, + vm_size_t size); /* Initially fill zone with specified number of elements */ extern int zfill( - zone_t zone, - int nelem); -/* Change zone parameters */ -extern void zone_change( - zone_t zone, - unsigned int item, - boolean_t value); + zone_t zone, + int nelem); + +extern void zone_prio_refill_configure(zone_t, vm_size_t); + +/* See above/top of file. Z_* definitions moved so they would be usable by kexts */ /* Preallocate space for zone from zone map */ extern void zprealloc( - zone_t zone, - vm_size_t size); + zone_t zone, + vm_size_t size); + +extern integer_t zone_free_count( + zone_t zone); + +extern vm_size_t zone_element_size( + void *addr, + zone_t *z); /* - * zone_free_count returns a hint as to the current number of free elements - * in the zone. By the time it returns, it may no longer be true (a new - * element might have been added, or an element removed). - * This routine may be used in conjunction with zcram and a lock to regulate - * adding memory to a non-expandable zone. + * MAX_ZTRACE_DEPTH configures how deep of a stack trace is taken on each zalloc in the zone of interest. 15 + * levels is usually enough to get past all the layers of code in kalloc and IOKit and see who the actual + * caller is up above these lower levels. + * + * This is used both for the zone leak detector and the zone corruption log. */ -extern integer_t zone_free_count(zone_t zone); -/* - * Item definitions for zone_change: +#define MAX_ZTRACE_DEPTH 15 + +/* + * Structure for keeping track of a backtrace, used for leak detection. + * This is in the .h file because it is used during panic, see kern/debug.c + * A non-zero size indicates that the trace is in use. */ -#define Z_EXHAUST 1 /* Make zone exhaustible */ -#define Z_COLLECT 2 /* Make zone collectable */ -#define Z_EXPAND 3 /* Make zone expandable */ -#define Z_FOREIGN 4 /* Allow collectable zone to contain foreign */ - /* (not allocated via zalloc) elements. */ +struct ztrace { + vm_size_t zt_size; /* How much memory are all the allocations referring to this trace taking up? */ + uint32_t zt_depth; /* depth of stack (0 to MAX_ZTRACE_DEPTH) */ + void* zt_stack[MAX_ZTRACE_DEPTH]; /* series of return addresses from OSBacktrace */ + uint32_t zt_collisions; /* How many times did a different stack land here while it was occupied? */ + uint32_t zt_hit_count; /* for determining effectiveness of hash function */ +}; -#ifdef __APPLE_API_PRIVATE +#if CONFIG_ZLEAKS -#ifdef MACH_KERNEL_PRIVATE +/* support for the kern.zleak.* sysctls */ -#if ZONE_DEBUG +extern kern_return_t zleak_activate(void); +extern vm_size_t zleak_max_zonemap_size; +extern vm_size_t zleak_global_tracking_threshold; +extern vm_size_t zleak_per_zone_tracking_threshold; -#if MACH_KDB +extern int get_zleak_state(void); -extern vm_offset_t next_element( - zone_t z, - vm_offset_t elt); +#endif /* CONFIG_ZLEAKS */ -extern vm_offset_t first_element( - zone_t z); +#ifndef VM_MAX_TAG_ZONES +#error MAX_TAG_ZONES +#endif -#endif /* MACH_KDB */ +#if VM_MAX_TAG_ZONES -extern void zone_debug_enable( - zone_t z); +extern boolean_t zone_tagging_on; +extern uint32_t zone_index_from_tag_index(uint32_t tag_zone_index, vm_size_t * elem_size); -extern void zone_debug_disable( - zone_t z); +#endif /* VM_MAX_TAG_ZONES */ -#endif /* ZONE_DEBUG */ +/* These functions used for leak detection both in zalloc.c and mbuf.c */ +extern uintptr_t hash_mix(uintptr_t); +extern uint32_t hashbacktrace(uintptr_t *, uint32_t, uint32_t); +extern uint32_t hashaddr(uintptr_t, uint32_t); -#endif /* MACH_KERNEL_PRIVATE */ +#define lock_zone(zone) \ +MACRO_BEGIN \ + lck_mtx_lock_spin_always(&(zone)->lock); \ +MACRO_END -#endif /* __APPLE_API_PRIVATE */ +#define unlock_zone(zone) \ +MACRO_BEGIN \ + lck_mtx_unlock(&(zone)->lock); \ +MACRO_END + +#if CONFIG_GZALLOC +void gzalloc_init(vm_size_t); +void gzalloc_zone_init(zone_t); +void gzalloc_configure(void); +void gzalloc_reconfigure(zone_t); +void gzalloc_empty_free_cache(zone_t); +boolean_t gzalloc_enabled(void); + +vm_offset_t gzalloc_alloc(zone_t, boolean_t); +boolean_t gzalloc_free(zone_t, void *); +boolean_t gzalloc_element_size(void *, zone_t *, vm_size_t *); +#endif /* CONFIG_GZALLOC */ + +/* Callbacks for btlog lock/unlock */ +void zlog_btlog_lock(__unused void *); +void zlog_btlog_unlock(__unused void *); + +#ifdef MACH_KERNEL_PRIVATE +#define MAX_ZONE_NAME 32 /* max length of a zone name we can take from the boot-args */ +int track_this_zone(const char *zonename, const char *logname); +#endif + +#if DEBUG || DEVELOPMENT +extern boolean_t run_zone_test(void); +extern vm_size_t zone_element_info(void *addr, vm_tag_t * ptag); +#endif /* DEBUG || DEVELOPMENT */ + +#endif /* XNU_KERNEL_PRIVATE */ + +/* Allocate from zone */ +extern void * zalloc( + zone_t zone); + +/* Non-blocking version of zalloc */ +extern void * zalloc_noblock( + zone_t zone); + +/* Free zone element */ +extern void zfree( + zone_t zone, + void *elem); + +/* Create zone */ +extern zone_t zinit( + vm_size_t size, /* the size of an element */ + vm_size_t maxmem, /* maximum memory to use */ + vm_size_t alloc, /* allocation size */ + const char *name); /* a name for the zone */ + +/* Change zone parameters */ +extern void zone_change( + zone_t zone, + unsigned int item, + boolean_t value); + +/* Destroy the zone */ +extern void zdestroy( + zone_t zone); + +__END_DECLS #endif /* _KERN_ZALLOC_H_ */ + +#endif /* KERNEL_PRIVATE */