/*
- * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 2000-2014 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
#ifdef MACH_KERNEL_PRIVATE
#include <zone_debug.h>
-#include <mach_kdb.h>
-#include <kern/lock.h>
#include <kern/locks.h>
#include <kern/queue.h>
-#include <kern/call_entry.h>
+#include <kern/thread_call.h>
+
+#if CONFIG_GZALLOC
+typedef struct gzalloc_data {
+ uint32_t gzfc_index;
+ vm_offset_t *gzfc;
+} gzalloc_data_t;
+#endif
/*
* A zone is a collection of fixed size blocks for which there
*
*/
+struct zone_free_element;
+struct zone_page_metadata;
+
struct zone {
+ struct zone_free_element *free_elements; /* free elements directly linked */
+ struct {
+ queue_head_t any_free_foreign; /* foreign pages crammed into zone */
+ queue_head_t all_free;
+ queue_head_t intermediate;
+ queue_head_t all_used;
+ } pages; /* list of zone_page_metadata structs, which maintain per-page free element lists */
int count; /* Number of elements used now */
- vm_offset_t free_elements;
+ int countfree; /* Number of free elements */
+ lck_attr_t lock_attr; /* zone lock attribute */
decl_lck_mtx_data(,lock) /* zone lock */
lck_mtx_ext_t lock_ext; /* placeholder for indirect mutex */
- lck_attr_t lock_attr; /* zone lock attribute */
- lck_grp_t lock_grp; /* zone lock group */
- lck_grp_attr_t lock_grp_attr; /* zone lock group attribute */
vm_size_t cur_size; /* current memory utilization */
vm_size_t max_size; /* how large can this zone grow */
vm_size_t elem_size; /* size of an element */
vm_size_t alloc_size; /* size used for more memory */
+ uint64_t page_count __attribute__((aligned(8))); /* number of pages used by this zone */
uint64_t sum_count; /* count of allocs (life of zone) */
- unsigned int
- /* boolean_t */ exhaustible :1, /* (F) merely return if empty? */
- /* boolean_t */ collectable :1, /* (F) garbage collect empty pages */
- /* boolean_t */ expandable :1, /* (T) expand zone (with message)? */
- /* boolean_t */ allows_foreign :1,/* (F) allow non-zalloc space */
- /* boolean_t */ doing_alloc :1, /* is zone expanding now? */
- /* boolean_t */ waiting :1, /* is thread waiting for expansion? */
- /* boolean_t */ async_pending :1, /* asynchronous allocation pending? */
-#if CONFIG_ZLEAKS
- /* boolean_t */ zleak_on :1, /* Are we collecting allocation information? */
-#endif /* ZONE_DEBUG */
- /* boolean_t */ caller_acct: 1, /* do we account allocation/free to the caller? */
- /* boolean_t */ doing_gc :1, /* garbage collect in progress? */
- /* boolean_t */ noencrypt :1;
+ uint32_t
+ /* boolean_t */ exhaustible :1, /* (F) merely return if empty? */
+ /* boolean_t */ collectable :1, /* (F) garbage collect empty pages */
+ /* boolean_t */ expandable :1, /* (T) expand zone (with message)? */
+ /* boolean_t */ allows_foreign :1, /* (F) allow non-zalloc space */
+ /* boolean_t */ doing_alloc_without_vm_priv:1, /* is zone expanding now via a non-vm_privileged thread? */
+ /* boolean_t */ doing_alloc_with_vm_priv:1, /* is zone expanding now via a vm_privileged thread? */
+ /* boolean_t */ waiting :1, /* is thread waiting for expansion? */
+ /* boolean_t */ async_pending :1, /* asynchronous allocation pending? */
+ /* boolean_t */ zleak_on :1, /* Are we collecting allocation information? */
+ /* boolean_t */ caller_acct :1, /* do we account allocation/free to the caller? */
+ /* boolean_t */ doing_gc :1, /* garbage collect in progress? */
+ /* boolean_t */ noencrypt :1,
+ /* boolean_t */ no_callout :1,
+ /* boolean_t */ async_prio_refill :1,
+ /* boolean_t */ gzalloc_exempt :1,
+ /* boolean_t */ alignment_required :1,
+ /* boolean_t */ use_page_list :1,
+ /* future */ _reserved :15;
+
int index; /* index into zone_info arrays for this zone */
- struct zone * next_zone; /* Link for all-zones list */
- call_entry_data_t call_async_alloc; /* callout for asynchronous alloc */
+ struct zone *next_zone; /* Link for all-zones list */
const char *zone_name; /* a name for the zone */
#if ZONE_DEBUG
queue_head_t active_zones; /* active elements */
#endif /* ZONE_DEBUG */
#if CONFIG_ZLEAKS
- uint32_t num_allocs; /* alloc stats for zleak benchmarks */
- uint32_t num_frees; /* free stats for zleak benchmarks */
- uint32_t zleak_capture; /* per-zone counter for capturing every N allocations */
+ uint32_t zleak_capture; /* per-zone counter for capturing every N allocations */
#endif /* CONFIG_ZLEAKS */
+ uint32_t zp_count; /* counter for poisoning every N frees */
+ vm_size_t prio_refill_watermark;
+ thread_t zone_replenish_thread;
+#if CONFIG_GZALLOC
+ gzalloc_data_t gz;
+#endif /* CONFIG_GZALLOC */
};
/*
} zinfo_usage_store_t;
typedef zinfo_usage_store_t *zinfo_usage_t;
-extern void zone_gc(void);
+extern void zone_gc(boolean_t);
extern void consider_zone_gc(boolean_t);
/* Steal memory for zone module */
extern void zone_steal_memory(void);
/* Bootstrap zone module (create zone zone) */
-extern void zone_bootstrap(void) __attribute__((section("__TEXT, initcode")));
+extern void zone_bootstrap(void);
/* Init zone module */
extern void zone_init(
- vm_size_t map_size) __attribute__((section("__TEXT, initcode")));
+ vm_size_t map_size);
/* Handle per-task zone info */
extern void zinfo_task_init(task_t task);
#if ZONE_DEBUG
-#if MACH_KDB
-
-extern void * next_element(
- zone_t z,
- void *elt);
-
-extern void * first_element(
- zone_t z);
-
-#endif /* MACH_KDB */
-
extern void zone_debug_enable(
zone_t z);
extern void zone_debug_disable(
zone_t z);
+#define zone_debug_enabled(z) z->active_zones.next
+#define ROUNDUP(x,y) ((((x)+(y)-1)/(y))*(y))
+#define ZONE_DEBUG_OFFSET ROUNDUP(sizeof(queue_chain_t),16)
#endif /* ZONE_DEBUG */
#endif /* MACH_KERNEL_PRIVATE */
#ifdef XNU_KERNEL_PRIVATE
+extern vm_offset_t zone_map_min_address;
+extern vm_offset_t zone_map_max_address;
+
+
/* Allocate from zone */
extern void * zalloc(
zone_t zone);
const char *name); /* a name for the zone */
+/* Non-waiting for memory version of zalloc */
+extern void * zalloc_nopagewait(
+ zone_t zone);
+
/* Non-blocking version of zalloc */
extern void * zalloc_noblock(
zone_t zone);
-/* direct (non-wrappered) interface */
+/* selective version of zalloc */
extern void * zalloc_canblock(
zone_t zone,
boolean_t canblock);
/* Fill zone with memory */
extern void zcram(
zone_t zone,
- void *newmem,
+ vm_offset_t newmem,
vm_size_t size);
/* Initially fill zone with specified number of elements */
zone_t zone,
unsigned int item,
boolean_t value);
-
+extern void zone_prio_refill_configure(zone_t, vm_size_t);
/* Item definitions */
#define Z_EXHAUST 1 /* Make zone exhaustible */
#define Z_COLLECT 2 /* Make zone collectable */
#define Z_FOREIGN 4 /* Allow collectable zone to contain foreign elements */
#define Z_CALLERACCT 5 /* Account alloc/free against the caller */
#define Z_NOENCRYPT 6 /* Don't encrypt zone during hibernation */
+#define Z_NOCALLOUT 7 /* Don't asynchronously replenish the zone via
+ * callouts
+ */
+#define Z_ALIGNMENT_REQUIRED 8
+#define Z_GZALLOC_EXEMPT 9 /* Not tracked in guard allocation mode */
/* Preallocate space for zone from zone map */
extern void zprealloc(
#endif /* CONFIG_ZLEAKS */
/* These functions used for leak detection both in zalloc.c and mbuf.c */
-extern uint32_t fastbacktrace(uintptr_t* bt, uint32_t max_frames);
-extern uintptr_t hash_mix(uintptr_t x);
-extern uint32_t hashbacktrace(uintptr_t* bt, uint32_t depth, uint32_t max_size);
-extern uint32_t hashaddr(uintptr_t pt, uint32_t max_size);
+extern uint32_t fastbacktrace(uintptr_t* bt, uint32_t max_frames) __attribute__((noinline));
+extern uintptr_t hash_mix(uintptr_t);
+extern uint32_t hashbacktrace(uintptr_t *, uint32_t, uint32_t);
+extern uint32_t hashaddr(uintptr_t, uint32_t);
+
+#define lock_zone(zone) \
+MACRO_BEGIN \
+ lck_mtx_lock_spin(&(zone)->lock); \
+MACRO_END
+
+#define unlock_zone(zone) \
+MACRO_BEGIN \
+ lck_mtx_unlock(&(zone)->lock); \
+MACRO_END
+
+#if CONFIG_GZALLOC
+void gzalloc_init(vm_size_t);
+void gzalloc_zone_init(zone_t);
+void gzalloc_configure(void);
+void gzalloc_reconfigure(zone_t);
+boolean_t gzalloc_enabled(void);
+
+vm_offset_t gzalloc_alloc(zone_t, boolean_t);
+boolean_t gzalloc_free(zone_t, void *);
+#endif /* CONFIG_GZALLOC */
#endif /* XNU_KERNEL_PRIVATE */