2 * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
46 * Carnegie Mellon requests users of this software to return to
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
60 * Author: Avadis Tevanian, Jr.
67 #ifndef _KERN_ZALLOC_H_
68 #define _KERN_ZALLOC_H_
70 #include <mach/machine/vm_types.h>
71 #include <mach_debug/zone_info.h>
72 #include <kern/kern_types.h>
73 #include <sys/cdefs.h>
75 #if XNU_KERNEL_PRIVATE && !defined(ZALLOC_ALLOW_DEPRECATED)
76 #define __zalloc_deprecated(msg) __deprecated_msg(msg)
78 #define __zalloc_deprecated(msg)
87 * The type for a zone ID.
89 typedef uint16_t zone_id_t
;
92 * @enum zone_create_flags_t
95 * Set of flags to pass to zone_create().
98 * Some kernel-wide policies affect all possible created zones.
99 * Explicit @c ZC_* win over such policies.
101 __options_decl(zone_create_flags_t
, uint64_t, {
102 /** The default value to pass to zone_create() */
103 ZC_NONE
= 0x00000000,
105 /** Force the created zone to use VA sequestering */
106 ZC_SEQUESTER
= 0x00000001,
107 /** Force the created zone @b NOT to use VA sequestering */
108 ZC_NOSEQUESTER
= 0x00000002,
110 /** Enable per-CPU zone caching for this zone */
111 ZC_CACHING
= 0x00000010,
112 /** Disable per-CPU zone caching for this zone */
113 ZC_NOCACHING
= 0x00000020,
116 /** Mark zone as a per-cpu zone */
117 ZC_PERCPU
= 0x01000000,
119 /** Force the created zone to clear every allocation on free */
120 ZC_ZFREE_CLEARMEM
= 0x02000000,
122 /** Mark zone as non collectable by zone_gc() */
123 ZC_NOGC
= 0x04000000,
125 /** Do not encrypt this zone during hibernation */
126 ZC_NOENCRYPT
= 0x08000000,
128 /** Type requires alignment to be preserved */
129 ZC_ALIGNMENT_REQUIRED
= 0x10000000,
131 /** Do not track this zone when gzalloc is engaged */
132 ZC_NOGZALLOC
= 0x20000000,
134 /** Don't asynchronously replenish the zone via callouts */
135 ZC_NOCALLOUT
= 0x40000000,
137 /** Can be zdestroy()ed, not default unlike zinit() */
138 ZC_DESTRUCTIBLE
= 0x80000000,
140 #ifdef XNU_KERNEL_PRIVATE
142 /** This zone will back a kalloc heap */
143 ZC_KALLOC_HEAP
= 0x0800000000000000,
145 /** This zone can be crammed with foreign pages */
146 ZC_ALLOW_FOREIGN
= 0x1000000000000000,
148 /** This zone contains bytes / data buffers only */
149 ZC_DATA_BUFFERS
= 0x2000000000000000,
151 /** Disable kasan quarantine for this zone */
152 ZC_KASAN_NOQUARANTINE
= 0x4000000000000000,
154 /** Disable kasan redzones for this zone */
155 ZC_KASAN_NOREDZONE
= 0x8000000000000000,
160 * @union zone_or_view
163 * A type used for calls that admit both a zone or a zone view.
166 * @c zalloc() and @c zfree() and their variants can act on both
167 * zones and zone views.
170 struct zone_view
*zov_view
;
171 struct zone
*zov_zone
;
173 inline zone_or_view(struct zone_view
*zv
) : zov_view(zv
) {
175 inline zone_or_view(struct zone
*z
) : zov_zone(z
) {
180 typedef union zone_or_view zone_or_view_t
;
182 typedef union zone_or_view zone_or_view_t
__attribute__((transparent_union
));
186 * @function zone_create
189 * Creates a zone with the specified parameters.
192 * A Zone is a slab allocator that returns objects of a given size very quickly.
194 * @param name the name for the new zone.
195 * @param size the size of the elements returned by this zone.
196 * @param flags a set of @c zone_create_flags_t flags.
198 * @returns the created zone, this call never fails.
200 extern zone_t
zone_create(
203 zone_create_flags_t flags
);
209 * Destroys a zone previously made with zone_create.
212 * Zones must have been made destructible for @c zdestroy() to be allowed,
213 * passing @c ZC_DESTRUCTIBLE at @c zone_create() time.
215 * @param zone the zone to destroy.
217 extern void zdestroy(
221 * @function zone_require
224 * Requires for a given pointer to belong to the specified zone.
227 * The function panics if the check fails as it indicates that the kernel
228 * internals have been compromised.
230 * Note that zone_require() can only work with:
231 * - zones not allowing foreign memory
232 * - zones in the general submap.
234 * @param zone the zone the address needs to belong to.
235 * @param addr the element address to check.
237 extern void zone_require(
242 * @enum zalloc_flags_t
245 * Flags that can be passed to @c zalloc_internal or @c zalloc_flags.
248 * It is encouraged that any callsite passing flags uses exactly one of:
249 * @c Z_WAITOK, @c Z_NOWAIT or @c Z_NOPAGEWAIT, the default being @c Z_WAITOK
250 * if nothing else was specified.
252 * If any @c Z_NO*WAIT flag is passed alongside @c Z_WAITOK,
253 * then @c Z_WAITOK is ignored.
256 * Means that it's OK for zalloc() to block to wait for memory,
257 * when Z_WAITOK is passed, zalloc will never return NULL.
260 * Passing this flag means that zalloc is not allowed to ever block.
262 * @const Z_NOPAGEWAIT
263 * Passing this flag means that zalloc is allowed to wait due to lock
264 * contention, but will not wait for the VM to wait for pages when
265 * under memory pressure.
268 * Passing this flags means that the returned memory has been zeroed out.
271 * Passing this flag means that the caller expects the allocation to always
272 * succeed. This will result in a panic if this assumption isn't correct.
274 * This flag is incompatible with @c Z_NOWAIT or @c Z_NOPAGEWAIT. It also can't
275 * be used on exhaustible zones.
277 #if XNU_KERNEL_PRIVATE
279 * @const Z_VM_TAG_MASK
280 * Represents bits in which a vm_tag_t for the allocation can be passed.
281 * (used by kalloc for the zone tagging debugging feature).
284 __options_decl(zalloc_flags_t
, uint32_t, {
285 // values smaller than 0xff are shared with the M_* flags from BSD MALLOC
288 Z_NOPAGEWAIT
= 0x0002,
292 #if XNU_KERNEL_PRIVATE
293 /** used by kalloc to propagate vm tags for -zt */
294 Z_VM_TAG_MASK
= 0xffff0000,
296 #define Z_VM_TAG_SHIFT 16
297 #define Z_VM_TAG(tag) ((zalloc_flags_t)(tag) << Z_VM_TAG_SHIFT)
305 * Allocates an element from a specified zone.
308 * If the zone isn't exhaustible and is expandable, this call never fails.
310 * @param zone_or_view the zone or zone view to allocate from
312 * @returns NULL or the allocated element
315 zone_or_view_t zone_or_view
);
318 * @function zalloc_noblock
321 * Allocates an element from a specified zone, but never blocks.
324 * This call is suitable for preemptible code, however allocation
325 * isn't allowed from interrupt context.
327 * @param zone_or_view the zone or zone view to allocate from
329 * @returns NULL or the allocated element
331 extern void *zalloc_noblock(
332 zone_or_view_t zone_or_view
);
335 * @function zalloc_flags()
338 * Allocates an element from a specified zone, with flags.
340 * @param zone_or_view the zone or zone view to allocate from
341 * @param flags a collection of @c zalloc_flags_t.
343 * @returns NULL or the allocated element
345 extern void *zalloc_flags(
346 zone_or_view_t zone_or_view
,
347 zalloc_flags_t flags
);
353 * Frees an element allocated with @c zalloc*.
356 * If the element being freed doesn't belong to the specified zone,
357 * then this call will panic.
359 * @param zone_or_view the zone or zone view to free the element to.
360 * @param elem the element to free
363 zone_or_view_t zone_or_view
,
366 /* deprecated KPIS */
368 __zalloc_deprecated("use zone_create()")
370 vm_size_t size
, /* the size of an element */
371 vm_size_t maxmem
, /* maximum memory to use */
372 vm_size_t alloc
, /* allocation size */
373 const char *name
); /* a name for the zone */
375 #ifdef XNU_KERNEL_PRIVATE
376 #pragma mark - XNU only interfaces
377 #include <kern/startup.h>
378 #include <kern/cpu_number.h>
380 #pragma GCC visibility push(hidden)
382 #pragma mark XNU only: zalloc (extended)
384 #define ZALIGN_NONE (sizeof(uint8_t) - 1)
385 #define ZALIGN_16 (sizeof(uint16_t) - 1)
386 #define ZALIGN_32 (sizeof(uint32_t) - 1)
387 #define ZALIGN_PTR (sizeof(void *) - 1)
388 #define ZALIGN_64 (sizeof(uint64_t) - 1)
389 #define ZALIGN(t) (_Alignof(t) - 1)
393 * @function zalloc_permanent()
396 * Allocates a permanent element from the permanent zone
399 * Memory returned by this function is always 0-initialized.
400 * Note that the size of this allocation can not be determined
401 * by zone_element_size so it should not be used for copyio.
403 * @param size the element size (must be smaller than PAGE_SIZE)
404 * @param align_mask the required alignment for this allocation
406 * @returns the allocated element
408 extern void *zalloc_permanent(
410 vm_offset_t align_mask
);
413 * @function zalloc_permanent_type()
416 * Allocates a permanent element of a given type with its natural alignment.
419 * Memory returned by this function is always 0-initialized.
421 * @param type_t the element type
423 * @returns the allocated element
425 #define zalloc_permanent_type(type_t) \
426 ((type_t *)zalloc_permanent(sizeof(type_t), ZALIGN(type_t)))
429 * @function zalloc_first_proc_made()
432 * Declare that the "early" allocation phase is done.
435 zalloc_first_proc_made(void);
437 #pragma mark XNU only: per-cpu allocations
443 * Annotation that helps denoting a per-cpu pointer that requires usage of
444 * @c zpercpu_*() for access.
449 * @macro zpercpu_get_cpu()
452 * Get a pointer to a specific CPU slot of a given per-cpu variable.
454 * @param ptr the per-cpu pointer (returned by @c zalloc_percpu*()).
455 * @param cpu the specified CPU number as returned by @c cpu_number()
457 * @returns the per-CPU slot for @c ptr for the specified CPU.
459 #define zpercpu_get_cpu(ptr, cpu) \
460 __zpcpu_cast(ptr, __zpcpu_demangle(ptr) + ptoa((unsigned)cpu))
463 * @macro zpercpu_get()
466 * Get a pointer to the current CPU slot of a given per-cpu variable.
468 * @param ptr the per-cpu pointer (returned by @c zalloc_percpu*()).
470 * @returns the per-CPU slot for @c ptr for the current CPU.
472 #define zpercpu_get(ptr) \
473 zpercpu_get_cpu(ptr, cpu_number())
476 * @macro zpercpu_foreach()
479 * Enumerate all per-CPU slots by address.
481 * @param it the name for the iterator
482 * @param ptr the per-cpu pointer (returned by @c zalloc_percpu*()).
484 #define zpercpu_foreach(it, ptr) \
485 for (typeof(ptr) it = zpercpu_get_cpu(ptr, 0), \
486 __end_##it = zpercpu_get_cpu(ptr, zpercpu_count()); \
487 it < __end_##it; it = __zpcpu_next(it))
490 * @macro zpercpu_foreach_cpu()
493 * Enumerate all per-CPU slots by CPU slot number.
495 * @param cpu the name for cpu number iterator.
497 #define zpercpu_foreach_cpu(cpu) \
498 for (unsigned cpu = 0; cpu < zpercpu_count(); cpu++)
501 * @function zalloc_percpu()
504 * Allocates an element from a per-cpu zone.
507 * The returned pointer cannot be used directly and must be manipulated
508 * through the @c zpercpu_get*() interfaces.
510 * @param zone_or_view the zone or zone view to allocate from
511 * @param flags a collection of @c zalloc_flags_t.
513 * @returns NULL or the allocated element
515 extern void *zalloc_percpu(
516 zone_or_view_t zone_or_view
,
517 zalloc_flags_t flags
);
520 * @function zfree_percpu()
523 * Frees an element previously allocated with @c zalloc_percpu().
525 * @param zone_or_view the zone or zone view to free the element to.
526 * @param addr the address to free
528 extern void zfree_percpu(
529 zone_or_view_t zone_or_view
,
533 * @function zalloc_percpu_permanent()
536 * Allocates a permanent percpu-element from the permanent percpu zone.
539 * Memory returned by this function is always 0-initialized.
541 * @param size the element size (must be smaller than PAGE_SIZE)
542 * @param align_mask the required alignment for this allocation
544 * @returns the allocated element
546 extern void *zalloc_percpu_permanent(
548 vm_offset_t align_mask
);
551 * @function zalloc_percpu_permanent_type()
554 * Allocates a permanent percpu-element from the permanent percpu zone of a given
555 * type with its natural alignment.
558 * Memory returned by this function is always 0-initialized.
560 * @param type_t the element type
562 * @returns the allocated element
564 #define zalloc_percpu_permanent_type(type_t) \
565 ((type_t *)zalloc_percpu_permanent(sizeof(type_t), ZALIGN(type_t)))
567 #pragma mark XNU only: zone views
570 * @enum zone_kheap_id_t
573 * Enumerate a particular kalloc heap.
576 * More documentation about heaps is available in @c <kern/kalloc.h>.
578 * @const KHEAP_ID_NONE
579 * This value denotes regular zones, not used by kalloc.
581 * @const KHEAP_ID_DEFAULT
582 * Indicates zones part of the KHEAP_DEFAULT heap.
584 * @const KHEAP_ID_DATA_BUFFERS
585 * Indicates zones part of the KHEAP_DATA_BUFFERS heap.
587 * @const KHEAP_ID_KEXT
588 * Indicates zones part of the KHEAP_KEXT heap.
590 __enum_decl(zone_kheap_id_t
, uint32_t, {
593 KHEAP_ID_DATA_BUFFERS
,
596 #define KHEAP_ID_COUNT (KHEAP_ID_KEXT + 1)
600 * @typedef zone_stats_t
603 * The opaque type for per-cpu zone stats that are accumulated per zone
606 typedef struct zone_stats
*__zpercpu zone_stats_t
;
609 * @typedef zone_view_t
612 * A view on a zone for accounting purposes.
615 * A zone view uses the zone it references for the allocations backing store,
616 * but does the allocation accounting at the view level.
618 * These accounting are surfaced by @b zprint(1) and similar tools,
619 * which allow for cheap but finer grained understanding of allocations
620 * without any fragmentation cost.
622 * Zone views are protected by the kernel lockdown and can't be initialized
623 * dynamically. They must be created using @c ZONE_VIEW_DEFINE().
625 typedef struct zone_view
*zone_view_t
;
628 zone_stats_t zv_stats
;
634 * @macro ZONE_VIEW_DECLARE
637 * (optionally) declares a zone view (in a header).
639 * @param var the name for the zone view.
641 #define ZONE_VIEW_DECLARE(var) \
642 extern struct zone_view var[1]
645 * @macro ZONE_VIEW_DEFINE
648 * Defines a given zone view and what it points to.
651 * Zone views can either share a pre-existing zone,
652 * or perform a lookup into a kalloc heap for the zone
653 * backing the bucket of the proper size.
655 * Zone views are initialized during the @c STARTUP_SUB_ZALLOC phase,
656 * as the last rank. If views on zones are created, these must have been
657 * created before this stage.
659 * @param var the name for the zone view.
660 * @param name a string describing the zone view.
661 * @param heap_or_zone a @c KHEAP_ID_* constant or a pointer to a zone.
662 * @param size the element size to be allocated from this view.
664 #define ZONE_VIEW_DEFINE(var, name, heap_or_zone, size) \
665 SECURITY_READ_ONLY_LATE(struct zone_view) var[1] = { { \
668 static __startup_data struct zone_view_startup_spec \
669 __startup_zone_view_spec_ ## var = { var, { heap_or_zone }, size }; \
670 STARTUP_ARG(ZALLOC, STARTUP_RANK_LAST, zone_view_startup_init, \
671 &__startup_zone_view_spec_ ## var)
674 #pragma mark XNU only: zone creation (extended)
677 * @enum zone_reserved_id_t
680 * Well known pre-registered zones, allowing use of zone_id_require()
683 * @c ZONE_ID__* aren't real zone IDs.
685 * @c ZONE_ID__ZERO reserves zone index 0 so that it can't be used, as 0 is too
686 * easy a value to produce (by malice or accident).
688 * @c ZONE_ID__FIRST_DYNAMIC is the first dynamic zone ID that can be used by
691 __enum_decl(zone_reserved_id_t
, zone_id_t
, {
695 ZONE_ID_PERCPU_PERMANENT
,
698 ZONE_ID_IPC_PORT_SET
,
699 ZONE_ID_IPC_VOUCHERS
,
706 ZONE_ID__FIRST_DYNAMIC
,
711 * The value to pass to @c zone_create_ext() to allocate a non pre-registered
714 #define ZONE_ID_ANY ((zone_id_t)-1)
717 * @function zone_name
719 * @param zone the specified zone
720 * @returns the name of the specified zone.
722 const char *zone_name(
726 * @function zone_heap_name
728 * @param zone the specified zone
729 * @returns the name of the heap this zone is part of, or "".
731 const char *zone_heap_name(
735 * @function zone_submap
737 * @param zone the specified zone
738 * @returns the zone (sub)map this zone allocates from.
741 extern vm_map_t
zone_submap(
745 * @function zone_create_ext
748 * Creates a zone with the specified parameters.
751 * This is an extended version of @c zone_create().
753 * @param name the name for the new zone.
754 * @param size the size of the elements returned by this zone.
755 * @param flags a set of @c zone_create_flags_t flags.
756 * @param desired_zid a @c zone_reserved_id_t value or @c ZONE_ID_ANY.
758 * @param extra_setup a block that can perform non trivial initialization
759 * on the zone before it is marked valid.
760 * This block can call advanced setups like:
761 * - zone_set_submap_idx()
762 * - zone_set_exhaustible()
763 * - zone_set_noexpand()
765 * @returns the created zone, this call never fails.
767 extern zone_t
zone_create_ext(
770 zone_create_flags_t flags
,
771 zone_id_t desired_zid
,
772 void (^extra_setup
)(zone_t
));
775 * @macro ZONE_DECLARE
778 * Declares a zone variable to automatically initialize with the specified
781 * @param var the name of the variable to declare.
782 * @param name the name for the zone
783 * @param size the size of the elements returned by this zone.
784 * @param flags a set of @c zone_create_flags_t flags.
786 #define ZONE_DECLARE(var, name, size, flags) \
787 SECURITY_READ_ONLY_LATE(zone_t) var; \
788 static_assert(((flags) & ZC_DESTRUCTIBLE) == 0); \
789 static __startup_data struct zone_create_startup_spec \
790 __startup_zone_spec_ ## var = { &var, name, size, flags, \
791 ZONE_ID_ANY, NULL }; \
792 STARTUP_ARG(ZALLOC, STARTUP_RANK_MIDDLE, zone_create_startup, \
793 &__startup_zone_spec_ ## var)
799 * Initializes a given zone automatically during startup with the specified
802 * @param var the name of the variable to initialize.
803 * @param name the name for the zone
804 * @param size the size of the elements returned by this zone.
805 * @param flags a set of @c zone_create_flags_t flags.
806 * @param desired_zid a @c zone_reserved_id_t value or @c ZONE_ID_ANY.
807 * @param extra_setup a block that can perform non trivial initialization
808 * (@see @c zone_create_ext()).
810 #define ZONE_INIT(var, name, size, flags, desired_zid, extra_setup) \
811 __ZONE_INIT(__LINE__, var, name, size, flags, desired_zid, extra_setup)
814 * @function zone_id_require
817 * Requires for a given pointer to belong to the specified zone, by ID and size.
820 * The function panics if the check fails as it indicates that the kernel
821 * internals have been compromised.
823 * This is a variant of @c zone_require() which:
824 * - isn't sensitive to @c zone_t::elem_size being compromised,
825 * - is slightly faster as it saves one load and a multiplication.
827 * @warning: zones using foreign memory can't use this interface.
829 * @param zone_id the zone ID the address needs to belong to.
830 * @param elem_size the size of elements for this zone.
831 * @param addr the element address to check.
833 extern void zone_id_require(
839 * @function zone_id_require_allow_foreign
842 * Requires for a given pointer to belong to the specified zone, by ID and size.
845 * This is a version of @c zone_id_require() that works with zones allowing
848 extern void zone_id_require_allow_foreign(
854 * Zone submap indices
856 * Z_SUBMAP_IDX_VA_RESTRICTED (LP64)
857 * used to restrict VM allocations lower in the kernel VA space,
858 * for pointer packing
860 * Z_SUBMAP_IDX_VA_RESERVE (ILP32)
861 * used to keep a reserve of VA space for the urgent allocations
862 * backing allocations of crucial VM types (fictious pages, holes, ...)
864 * Z_SUBMAP_IDX_GENERAL
865 * used for unrestricted allocations
867 * Z_SUBMAP_IDX_BAG_OF_BYTES
868 * used to sequester bags of bytes from all other allocations and allow VA reuse
871 #if defined(__LP64__)
872 #define Z_SUBMAP_IDX_VA_RESTRICTED 0
874 #define Z_SUBMAP_IDX_VA_RESERVE 0
876 #define Z_SUBMAP_IDX_GENERAL 1
877 #define Z_SUBMAP_IDX_BAG_OF_BYTES 2
878 #define Z_SUBMAP_IDX_COUNT 3
880 /* Change zone sub-map, to be called from the zone_create_ext() setup hook */
881 extern void zone_set_submap_idx(
883 unsigned int submap_idx
);
885 /* Make zone as non expandable, to be called from the zone_create_ext() setup hook */
886 extern void zone_set_noexpand(
888 vm_size_t max_elements
);
890 /* Make zone exhaustible, to be called from the zone_create_ext() setup hook */
891 extern void zone_set_exhaustible(
893 vm_size_t max_elements
);
896 * @function zone_fill_initially
899 * Initially fill a non collectable zone to have the specified amount of
903 * This function must be called on a non collectable permanent zone before it
906 * @param zone The zone to fill.
907 * @param nelems The number of elements to be able to hold.
909 extern void zone_fill_initially(
913 #pragma mark XNU only: misc & implementation details
916 * This macro sets "elem" to NULL on free.
918 * Note: all values passed to zfree() might be in the element to be freed,
919 * temporaries must be taken, and the resetting to be done prior to free.
921 #define zfree(zone, elem) ({ \
922 _Static_assert(sizeof(elem) == sizeof(void *), "elem isn't pointer sized"); \
923 __auto_type __zfree_zone = (zone); \
924 __auto_type __zfree_eptr = &(elem); \
925 __auto_type __zfree_elem = *__zfree_eptr; \
926 *__zfree_eptr = (__typeof__(__zfree_elem))NULL; \
927 (zfree)(__zfree_zone, (void *)__zfree_elem); \
930 struct zone_create_startup_spec
{
934 zone_create_flags_t z_flags
;
936 void (^z_setup
)(zone_t
);
939 extern void zone_create_startup(
940 struct zone_create_startup_spec
*spec
);
942 #define __ZONE_INIT1(ns, var, name, size, flags, zid, setup) \
943 static __startup_data struct zone_create_startup_spec \
944 __startup_zone_spec_ ## ns = { var, name, size, flags, zid, setup }; \
945 STARTUP_ARG(ZALLOC, STARTUP_RANK_MIDDLE, zone_create_startup, \
946 &__startup_zone_spec_ ## ns)
948 #define __ZONE_INIT(ns, var, name, size, flags, zid, setup) \
949 __ZONE_INIT1(ns, var, name, size, flags, zid, setup) \
951 struct zone_view_startup_spec {
954 zone_kheap_id_t zv_heapid
;
960 extern void zone_view_startup_init(
961 struct zone_view_startup_spec
*spec
);
964 #if DEBUG || DEVELOPMENT
966 # define ZPCPU_MANGLE_BIT (1ul << 63)
967 # else /* !__LP64__ */
968 # define ZPCPU_MANGLE_BIT (1ul << 31)
969 # endif /* !__LP64__ */
970 #else /* !(DEBUG || DEVELOPMENT) */
971 # define ZPCPU_MANGLE_BIT 0ul
972 #endif /* !(DEBUG || DEVELOPMENT) */
974 #define __zpcpu_mangle(ptr) (__zpcpu_addr(ptr) & ~ZPCPU_MANGLE_BIT)
975 #define __zpcpu_demangle(ptr) (__zpcpu_addr(ptr) | ZPCPU_MANGLE_BIT)
976 #define __zpcpu_addr(e) ((vm_address_t)(e))
977 #define __zpcpu_cast(ptr, e) ((typeof(ptr))(e))
978 #define __zpcpu_next(ptr) __zpcpu_cast(ptr, __zpcpu_addr(ptr) + PAGE_SIZE)
981 * @macro __zpcpu_mangle_for_boot()
984 * Per-cpu variables allocated in zones (as opposed to percpu globals) that need
985 * to function early during boot (before @c STARTUP_SUB_ZALLOC) might use static
986 * storage marked @c __startup_data and replace it with the proper allocation
987 * at the end of the @c STARTUP_SUB_ZALLOC phase (@c STARTUP_RANK_LAST).
989 * However, some devices boot from a cpu where @c cpu_number() != 0. This macro
990 * provides the proper mangling of the storage into a "fake" percpu pointer so
991 * that accesses through @c zpercpu_get() functions properly.
993 * This is invalid to use after the @c STARTUP_SUB_ZALLOC phase has completed.
995 #define __zpcpu_mangle_for_boot(ptr) ({ \
996 assert(startup_phase < STARTUP_SUB_ZALLOC); \
997 __zpcpu_cast(ptr, __zpcpu_mangle(__zpcpu_addr(ptr) - ptoa(cpu_number()))); \
1000 extern unsigned zpercpu_count(void) __pure2
;
1003 /* These functions used for leak detection both in zalloc.c and mbuf.c */
1004 extern uintptr_t hash_mix(uintptr_t);
1005 extern uint32_t hashbacktrace(uintptr_t *, uint32_t, uint32_t);
1006 extern uint32_t hashaddr(uintptr_t, uint32_t);
1009 /* support for the kern.zleak.* sysctls */
1011 extern kern_return_t
zleak_activate(void);
1012 extern vm_size_t zleak_max_zonemap_size
;
1013 extern vm_size_t zleak_global_tracking_threshold
;
1014 extern vm_size_t zleak_per_zone_tracking_threshold
;
1016 extern int get_zleak_state(void);
1018 #endif /* CONFIG_ZLEAKS */
1019 #if DEBUG || DEVELOPMENT
1021 extern boolean_t
run_zone_test(void);
1022 extern void zone_gc_replenish_test(void);
1023 extern void zone_alloc_replenish_test(void);
1025 #endif /* DEBUG || DEVELOPMENT */
1027 #pragma GCC visibility pop
1028 #endif /* XNU_KERNEL_PRIVATE */
1032 #endif /* _KERN_ZALLOC_H_ */
1034 #endif /* KERNEL_PRIVATE */