]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/zalloc.h
541de3bdd3ebb75d493a4f8b6e4c17ff3a11a50c
[apple/xnu.git] / osfmk / kern / zalloc.h
1 /*
2 * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 */
58 /*
59 * File: zalloc.h
60 * Author: Avadis Tevanian, Jr.
61 * Date: 1985
62 *
63 */
64
65 #ifdef KERNEL_PRIVATE
66
67 #ifndef _KERN_ZALLOC_H_
68 #define _KERN_ZALLOC_H_
69
70 #include <mach/machine/vm_types.h>
71 #include <mach_debug/zone_info.h>
72 #include <kern/kern_types.h>
73 #include <sys/cdefs.h>
74
75 #if XNU_KERNEL_PRIVATE && !defined(ZALLOC_ALLOW_DEPRECATED)
76 #define __zalloc_deprecated(msg) __deprecated_msg(msg)
77 #else
78 #define __zalloc_deprecated(msg)
79 #endif
80
81 __BEGIN_DECLS
82
83 /*!
84 * @typedef zone_id_t
85 *
86 * @abstract
87 * The type for a zone ID.
88 */
89 typedef uint16_t zone_id_t;
90
91 /**
92 * @enum zone_create_flags_t
93 *
94 * @abstract
95 * Set of flags to pass to zone_create().
96 *
97 * @discussion
98 * Some kernel-wide policies affect all possible created zones.
99 * Explicit @c ZC_* win over such policies.
100 */
101 __options_decl(zone_create_flags_t, uint64_t, {
102 /** The default value to pass to zone_create() */
103 ZC_NONE = 0x00000000,
104
105 /** Force the created zone to use VA sequestering */
106 ZC_SEQUESTER = 0x00000001,
107 /** Force the created zone @b NOT to use VA sequestering */
108 ZC_NOSEQUESTER = 0x00000002,
109
110 /** Enable per-CPU zone caching for this zone */
111 ZC_CACHING = 0x00000010,
112 /** Disable per-CPU zone caching for this zone */
113 ZC_NOCACHING = 0x00000020,
114
115
116 /** Mark zone as a per-cpu zone */
117 ZC_PERCPU = 0x01000000,
118
119 /** Force the created zone to clear every allocation on free */
120 ZC_ZFREE_CLEARMEM = 0x02000000,
121
122 /** Mark zone as non collectable by zone_gc() */
123 ZC_NOGC = 0x04000000,
124
125 /** Do not encrypt this zone during hibernation */
126 ZC_NOENCRYPT = 0x08000000,
127
128 /** Type requires alignment to be preserved */
129 ZC_ALIGNMENT_REQUIRED = 0x10000000,
130
131 /** Do not track this zone when gzalloc is engaged */
132 ZC_NOGZALLOC = 0x20000000,
133
134 /** Don't asynchronously replenish the zone via callouts */
135 ZC_NOCALLOUT = 0x40000000,
136
137 /** Can be zdestroy()ed, not default unlike zinit() */
138 ZC_DESTRUCTIBLE = 0x80000000,
139
140 #ifdef XNU_KERNEL_PRIVATE
141
142 /** This zone will back a kalloc heap */
143 ZC_KALLOC_HEAP = 0x0800000000000000,
144
145 /** This zone can be crammed with foreign pages */
146 ZC_ALLOW_FOREIGN = 0x1000000000000000,
147
148 /** This zone contains bytes / data buffers only */
149 ZC_DATA_BUFFERS = 0x2000000000000000,
150
151 /** Disable kasan quarantine for this zone */
152 ZC_KASAN_NOQUARANTINE = 0x4000000000000000,
153
154 /** Disable kasan redzones for this zone */
155 ZC_KASAN_NOREDZONE = 0x8000000000000000,
156 #endif
157 });
158
159 /*!
160 * @union zone_or_view
161 *
162 * @abstract
163 * A type used for calls that admit both a zone or a zone view.
164 *
165 * @discussion
166 * @c zalloc() and @c zfree() and their variants can act on both
167 * zones and zone views.
168 */
169 union zone_or_view {
170 struct zone_view *zov_view;
171 struct zone *zov_zone;
172 #ifdef __cplusplus
173 inline zone_or_view(struct zone_view *zv) : zov_view(zv) {
174 }
175 inline zone_or_view(struct zone *z) : zov_zone(z) {
176 }
177 #endif
178 };
179 #ifdef __cplusplus
180 typedef union zone_or_view zone_or_view_t;
181 #else
182 typedef union zone_or_view zone_or_view_t __attribute__((transparent_union));
183 #endif
184
185 /*!
186 * @function zone_create
187 *
188 * @abstract
189 * Creates a zone with the specified parameters.
190 *
191 * @discussion
192 * A Zone is a slab allocator that returns objects of a given size very quickly.
193 *
194 * @param name the name for the new zone.
195 * @param size the size of the elements returned by this zone.
196 * @param flags a set of @c zone_create_flags_t flags.
197 *
198 * @returns the created zone, this call never fails.
199 */
200 extern zone_t zone_create(
201 const char *name,
202 vm_size_t size,
203 zone_create_flags_t flags);
204
205 /*!
206 * @function zdestroy
207 *
208 * @abstract
209 * Destroys a zone previously made with zone_create.
210 *
211 * @discussion
212 * Zones must have been made destructible for @c zdestroy() to be allowed,
213 * passing @c ZC_DESTRUCTIBLE at @c zone_create() time.
214 *
215 * @param zone the zone to destroy.
216 */
217 extern void zdestroy(
218 zone_t zone);
219
220 /*!
221 * @function zone_require
222 *
223 * @abstract
224 * Requires for a given pointer to belong to the specified zone.
225 *
226 * @discussion
227 * The function panics if the check fails as it indicates that the kernel
228 * internals have been compromised.
229 *
230 * Note that zone_require() can only work with:
231 * - zones not allowing foreign memory
232 * - zones in the general submap.
233 *
234 * @param zone the zone the address needs to belong to.
235 * @param addr the element address to check.
236 */
237 extern void zone_require(
238 zone_t zone,
239 void *addr);
240
241 /*!
242 * @enum zalloc_flags_t
243 *
244 * @brief
245 * Flags that can be passed to @c zalloc_internal or @c zalloc_flags.
246 *
247 * @discussion
248 * It is encouraged that any callsite passing flags uses exactly one of:
249 * @c Z_WAITOK, @c Z_NOWAIT or @c Z_NOPAGEWAIT, the default being @c Z_WAITOK
250 * if nothing else was specified.
251 *
252 * If any @c Z_NO*WAIT flag is passed alongside @c Z_WAITOK,
253 * then @c Z_WAITOK is ignored.
254 *
255 * @const Z_WAITOK
256 * Means that it's OK for zalloc() to block to wait for memory,
257 * when Z_WAITOK is passed, zalloc will never return NULL.
258 *
259 * @const Z_NOWAIT
260 * Passing this flag means that zalloc is not allowed to ever block.
261 *
262 * @const Z_NOPAGEWAIT
263 * Passing this flag means that zalloc is allowed to wait due to lock
264 * contention, but will not wait for the VM to wait for pages when
265 * under memory pressure.
266 *
267 * @const Z_ZERO
268 * Passing this flags means that the returned memory has been zeroed out.
269 *
270 * @const Z_NOFAIL
271 * Passing this flag means that the caller expects the allocation to always
272 * succeed. This will result in a panic if this assumption isn't correct.
273 *
274 * This flag is incompatible with @c Z_NOWAIT or @c Z_NOPAGEWAIT. It also can't
275 * be used on exhaustible zones.
276 *
277 #if XNU_KERNEL_PRIVATE
278 *
279 * @const Z_VM_TAG_MASK
280 * Represents bits in which a vm_tag_t for the allocation can be passed.
281 * (used by kalloc for the zone tagging debugging feature).
282 #endif
283 */
284 __options_decl(zalloc_flags_t, uint32_t, {
285 // values smaller than 0xff are shared with the M_* flags from BSD MALLOC
286 Z_WAITOK = 0x0000,
287 Z_NOWAIT = 0x0001,
288 Z_NOPAGEWAIT = 0x0002,
289 Z_ZERO = 0x0004,
290
291 Z_NOFAIL = 0x8000,
292 #if XNU_KERNEL_PRIVATE
293 /** used by kalloc to propagate vm tags for -zt */
294 Z_VM_TAG_MASK = 0xffff0000,
295
296 #define Z_VM_TAG_SHIFT 16
297 #define Z_VM_TAG(tag) ((zalloc_flags_t)(tag) << Z_VM_TAG_SHIFT)
298 #endif
299 });
300
301 /*!
302 * @function zalloc
303 *
304 * @abstract
305 * Allocates an element from a specified zone.
306 *
307 * @discussion
308 * If the zone isn't exhaustible and is expandable, this call never fails.
309 *
310 * @param zone_or_view the zone or zone view to allocate from
311 *
312 * @returns NULL or the allocated element
313 */
314 extern void *zalloc(
315 zone_or_view_t zone_or_view);
316
317 /*!
318 * @function zalloc_noblock
319 *
320 * @abstract
321 * Allocates an element from a specified zone, but never blocks.
322 *
323 * @discussion
324 * This call is suitable for preemptible code, however allocation
325 * isn't allowed from interrupt context.
326 *
327 * @param zone_or_view the zone or zone view to allocate from
328 *
329 * @returns NULL or the allocated element
330 */
331 extern void *zalloc_noblock(
332 zone_or_view_t zone_or_view);
333
334 /*!
335 * @function zalloc_flags()
336 *
337 * @abstract
338 * Allocates an element from a specified zone, with flags.
339 *
340 * @param zone_or_view the zone or zone view to allocate from
341 * @param flags a collection of @c zalloc_flags_t.
342 *
343 * @returns NULL or the allocated element
344 */
345 extern void *zalloc_flags(
346 zone_or_view_t zone_or_view,
347 zalloc_flags_t flags);
348
349 /*!
350 * @function zfree
351 *
352 * @abstract
353 * Frees an element allocated with @c zalloc*.
354 *
355 * @discussion
356 * If the element being freed doesn't belong to the specified zone,
357 * then this call will panic.
358 *
359 * @param zone_or_view the zone or zone view to free the element to.
360 * @param elem the element to free
361 */
362 extern void zfree(
363 zone_or_view_t zone_or_view,
364 void *elem);
365
366 /* deprecated KPIS */
367
368 __zalloc_deprecated("use zone_create()")
369 extern zone_t zinit(
370 vm_size_t size, /* the size of an element */
371 vm_size_t maxmem, /* maximum memory to use */
372 vm_size_t alloc, /* allocation size */
373 const char *name); /* a name for the zone */
374
375 #ifdef XNU_KERNEL_PRIVATE
376 #pragma mark - XNU only interfaces
377 #include <kern/startup.h>
378 #include <kern/cpu_number.h>
379
380 #pragma GCC visibility push(hidden)
381
382 #pragma mark XNU only: zalloc (extended)
383
384 #define ZALIGN_NONE (sizeof(uint8_t) - 1)
385 #define ZALIGN_16 (sizeof(uint16_t) - 1)
386 #define ZALIGN_32 (sizeof(uint32_t) - 1)
387 #define ZALIGN_PTR (sizeof(void *) - 1)
388 #define ZALIGN_64 (sizeof(uint64_t) - 1)
389 #define ZALIGN(t) (_Alignof(t) - 1)
390
391
392 /*!
393 * @function zalloc_permanent()
394 *
395 * @abstract
396 * Allocates a permanent element from the permanent zone
397 *
398 * @discussion
399 * Memory returned by this function is always 0-initialized.
400 * Note that the size of this allocation can not be determined
401 * by zone_element_size so it should not be used for copyio.
402 *
403 * @param size the element size (must be smaller than PAGE_SIZE)
404 * @param align_mask the required alignment for this allocation
405 *
406 * @returns the allocated element
407 */
408 extern void *zalloc_permanent(
409 vm_size_t size,
410 vm_offset_t align_mask);
411
412 /*!
413 * @function zalloc_permanent_type()
414 *
415 * @abstract
416 * Allocates a permanent element of a given type with its natural alignment.
417 *
418 * @discussion
419 * Memory returned by this function is always 0-initialized.
420 *
421 * @param type_t the element type
422 *
423 * @returns the allocated element
424 */
425 #define zalloc_permanent_type(type_t) \
426 ((type_t *)zalloc_permanent(sizeof(type_t), ZALIGN(type_t)))
427
428 #pragma mark XNU only: per-cpu allocations
429
430 /*!
431 * @macro __zpercpu
432 *
433 * @abstract
434 * Annotation that helps denoting a per-cpu pointer that requires usage of
435 * @c zpercpu_*() for access.
436 */
437 #define __zpercpu
438
439 /*!
440 * @macro zpercpu_get_cpu()
441 *
442 * @abstract
443 * Get a pointer to a specific CPU slot of a given per-cpu variable.
444 *
445 * @param ptr the per-cpu pointer (returned by @c zalloc_percpu*()).
446 * @param cpu the specified CPU number as returned by @c cpu_number()
447 *
448 * @returns the per-CPU slot for @c ptr for the specified CPU.
449 */
450 #define zpercpu_get_cpu(ptr, cpu) \
451 __zpcpu_cast(ptr, __zpcpu_demangle(ptr) + ptoa((unsigned)cpu))
452
453 /*!
454 * @macro zpercpu_get()
455 *
456 * @abstract
457 * Get a pointer to the current CPU slot of a given per-cpu variable.
458 *
459 * @param ptr the per-cpu pointer (returned by @c zalloc_percpu*()).
460 *
461 * @returns the per-CPU slot for @c ptr for the current CPU.
462 */
463 #define zpercpu_get(ptr) \
464 zpercpu_get_cpu(ptr, cpu_number())
465
466 /*!
467 * @macro zpercpu_foreach()
468 *
469 * @abstract
470 * Enumerate all per-CPU slots by address.
471 *
472 * @param it the name for the iterator
473 * @param ptr the per-cpu pointer (returned by @c zalloc_percpu*()).
474 */
475 #define zpercpu_foreach(it, ptr) \
476 for (typeof(ptr) it = zpercpu_get_cpu(ptr, 0), \
477 __end_##it = zpercpu_get_cpu(ptr, zpercpu_count()); \
478 it < __end_##it; it = __zpcpu_next(it))
479
480 /*!
481 * @macro zpercpu_foreach_cpu()
482 *
483 * @abstract
484 * Enumerate all per-CPU slots by CPU slot number.
485 *
486 * @param cpu the name for cpu number iterator.
487 */
488 #define zpercpu_foreach_cpu(cpu) \
489 for (unsigned cpu = 0; cpu < zpercpu_count(); cpu++)
490
491 /*!
492 * @function zalloc_percpu()
493 *
494 * @abstract
495 * Allocates an element from a per-cpu zone.
496 *
497 * @discussion
498 * The returned pointer cannot be used directly and must be manipulated
499 * through the @c zpercpu_get*() interfaces.
500 *
501 * @param zone_or_view the zone or zone view to allocate from
502 * @param flags a collection of @c zalloc_flags_t.
503 *
504 * @returns NULL or the allocated element
505 */
506 extern void *zalloc_percpu(
507 zone_or_view_t zone_or_view,
508 zalloc_flags_t flags);
509
510 /*!
511 * @function zfree_percpu()
512 *
513 * @abstract
514 * Frees an element previously allocated with @c zalloc_percpu().
515 *
516 * @param zone_or_view the zone or zone view to free the element to.
517 * @param addr the address to free
518 */
519 extern void zfree_percpu(
520 zone_or_view_t zone_or_view,
521 void *addr);
522
523 /*!
524 * @function zalloc_percpu_permanent()
525 *
526 * @abstract
527 * Allocates a permanent percpu-element from the permanent percpu zone.
528 *
529 * @discussion
530 * Memory returned by this function is always 0-initialized.
531 *
532 * @param size the element size (must be smaller than PAGE_SIZE)
533 * @param align_mask the required alignment for this allocation
534 *
535 * @returns the allocated element
536 */
537 extern void *zalloc_percpu_permanent(
538 vm_size_t size,
539 vm_offset_t align_mask);
540
541 /*!
542 * @function zalloc_percpu_permanent_type()
543 *
544 * @abstract
545 * Allocates a permanent percpu-element from the permanent percpu zone of a given
546 * type with its natural alignment.
547 *
548 * @discussion
549 * Memory returned by this function is always 0-initialized.
550 *
551 * @param type_t the element type
552 *
553 * @returns the allocated element
554 */
555 #define zalloc_percpu_permanent_type(type_t) \
556 ((type_t *)zalloc_percpu_permanent(sizeof(type_t), ZALIGN(type_t)))
557
558 #pragma mark XNU only: zone views
559
560 /*!
561 * @enum zone_kheap_id_t
562 *
563 * @brief
564 * Enumerate a particular kalloc heap.
565 *
566 * @discussion
567 * More documentation about heaps is available in @c <kern/kalloc.h>.
568 *
569 * @const KHEAP_ID_NONE
570 * This value denotes regular zones, not used by kalloc.
571 *
572 * @const KHEAP_ID_DEFAULT
573 * Indicates zones part of the KHEAP_DEFAULT heap.
574 *
575 * @const KHEAP_ID_DATA_BUFFERS
576 * Indicates zones part of the KHEAP_DATA_BUFFERS heap.
577 *
578 * @const KHEAP_ID_KEXT
579 * Indicates zones part of the KHEAP_KEXT heap.
580 */
581 __enum_decl(zone_kheap_id_t, uint32_t, {
582 KHEAP_ID_NONE,
583 KHEAP_ID_DEFAULT,
584 KHEAP_ID_DATA_BUFFERS,
585 KHEAP_ID_KEXT,
586
587 #define KHEAP_ID_COUNT (KHEAP_ID_KEXT + 1)
588 });
589
590 /*!
591 * @typedef zone_stats_t
592 *
593 * @abstract
594 * The opaque type for per-cpu zone stats that are accumulated per zone
595 * or per zone-view.
596 */
597 typedef struct zone_stats *__zpercpu zone_stats_t;
598
599 /*!
600 * @typedef zone_view_t
601 *
602 * @abstract
603 * A view on a zone for accounting purposes.
604 *
605 * @discussion
606 * A zone view uses the zone it references for the allocations backing store,
607 * but does the allocation accounting at the view level.
608 *
609 * These accounting are surfaced by @b zprint(1) and similar tools,
610 * which allow for cheap but finer grained understanding of allocations
611 * without any fragmentation cost.
612 *
613 * Zone views are protected by the kernel lockdown and can't be initialized
614 * dynamically. They must be created using @c ZONE_VIEW_DEFINE().
615 */
616 typedef struct zone_view *zone_view_t;
617 struct zone_view {
618 zone_t zv_zone;
619 zone_stats_t zv_stats;
620 const char *zv_name;
621 zone_view_t zv_next;
622 };
623
624 /*!
625 * @macro ZONE_VIEW_DECLARE
626 *
627 * @abstract
628 * (optionally) declares a zone view (in a header).
629 *
630 * @param var the name for the zone view.
631 */
632 #define ZONE_VIEW_DECLARE(var) \
633 extern struct zone_view var[1]
634
635 /*!
636 * @macro ZONE_VIEW_DEFINE
637 *
638 * @abstract
639 * Defines a given zone view and what it points to.
640 *
641 * @discussion
642 * Zone views can either share a pre-existing zone,
643 * or perform a lookup into a kalloc heap for the zone
644 * backing the bucket of the proper size.
645 *
646 * Zone views are initialized during the @c STARTUP_SUB_ZALLOC phase,
647 * as the last rank. If views on zones are created, these must have been
648 * created before this stage.
649 *
650 * @param var the name for the zone view.
651 * @param name a string describing the zone view.
652 * @param heap_or_zone a @c KHEAP_ID_* constant or a pointer to a zone.
653 * @param size the element size to be allocated from this view.
654 */
655 #define ZONE_VIEW_DEFINE(var, name, heap_or_zone, size) \
656 SECURITY_READ_ONLY_LATE(struct zone_view) var[1] = { { \
657 .zv_name = name, \
658 } }; \
659 static __startup_data struct zone_view_startup_spec \
660 __startup_zone_view_spec_ ## var = { var, { heap_or_zone }, size }; \
661 STARTUP_ARG(ZALLOC, STARTUP_RANK_LAST, zone_view_startup_init, \
662 &__startup_zone_view_spec_ ## var)
663
664
665 #pragma mark XNU only: zone creation (extended)
666
667 /*!
668 * @enum zone_reserved_id_t
669 *
670 * @abstract
671 * Well known pre-registered zones, allowing use of zone_id_require()
672 *
673 * @discussion
674 * @c ZONE_ID__* aren't real zone IDs.
675 *
676 * @c ZONE_ID__ZERO reserves zone index 0 so that it can't be used, as 0 is too
677 * easy a value to produce (by malice or accident).
678 *
679 * @c ZONE_ID__FIRST_DYNAMIC is the first dynamic zone ID that can be used by
680 * @c zone_create().
681 */
682 __enum_decl(zone_reserved_id_t, zone_id_t, {
683 ZONE_ID__ZERO,
684
685 ZONE_ID_PERMANENT,
686 ZONE_ID_PERCPU_PERMANENT,
687
688 ZONE_ID_IPC_PORT,
689 ZONE_ID_IPC_PORT_SET,
690 ZONE_ID_IPC_VOUCHERS,
691 ZONE_ID_TASK,
692 ZONE_ID_PROC,
693 ZONE_ID_VM_MAP_COPY,
694 ZONE_ID_PMAP,
695
696 ZONE_ID__FIRST_DYNAMIC,
697 });
698
699 /*!
700 * @const ZONE_ID_ANY
701 * The value to pass to @c zone_create_ext() to allocate a non pre-registered
702 * Zone ID.
703 */
704 #define ZONE_ID_ANY ((zone_id_t)-1)
705
706 /**!
707 * @function zone_name
708 *
709 * @param zone the specified zone
710 * @returns the name of the specified zone.
711 */
712 const char *zone_name(
713 zone_t zone);
714
715 /**!
716 * @function zone_heap_name
717 *
718 * @param zone the specified zone
719 * @returns the name of the heap this zone is part of, or "".
720 */
721 const char *zone_heap_name(
722 zone_t zone);
723
724 /**!
725 * @function zone_submap
726 *
727 * @param zone the specified zone
728 * @returns the zone (sub)map this zone allocates from.
729 */
730 extern vm_map_t zone_submap(
731 zone_t zone);
732
733 /*!
734 * @function zone_create_ext
735 *
736 * @abstract
737 * Creates a zone with the specified parameters.
738 *
739 * @discussion
740 * This is an extended version of @c zone_create().
741 *
742 * @param name the name for the new zone.
743 * @param size the size of the elements returned by this zone.
744 * @param flags a set of @c zone_create_flags_t flags.
745 * @param desired_zid a @c zone_reserved_id_t value or @c ZONE_ID_ANY.
746 *
747 * @param extra_setup a block that can perform non trivial initialization
748 * on the zone before it is marked valid.
749 * This block can call advanced setups like:
750 * - zone_set_submap_idx()
751 * - zone_set_exhaustible()
752 * - zone_set_noexpand()
753 *
754 * @returns the created zone, this call never fails.
755 */
756 extern zone_t zone_create_ext(
757 const char *name,
758 vm_size_t size,
759 zone_create_flags_t flags,
760 zone_id_t desired_zid,
761 void (^extra_setup)(zone_t));
762
763 /*!
764 * @macro ZONE_DECLARE
765 *
766 * @abstract
767 * Declares a zone variable to automatically initialize with the specified
768 * parameters.
769 *
770 * @param var the name of the variable to declare.
771 * @param name the name for the zone
772 * @param size the size of the elements returned by this zone.
773 * @param flags a set of @c zone_create_flags_t flags.
774 */
775 #define ZONE_DECLARE(var, name, size, flags) \
776 SECURITY_READ_ONLY_LATE(zone_t) var; \
777 static_assert(((flags) & ZC_DESTRUCTIBLE) == 0); \
778 static __startup_data struct zone_create_startup_spec \
779 __startup_zone_spec_ ## var = { &var, name, size, flags, \
780 ZONE_ID_ANY, NULL }; \
781 STARTUP_ARG(ZALLOC, STARTUP_RANK_MIDDLE, zone_create_startup, \
782 &__startup_zone_spec_ ## var)
783
784 /*!
785 * @macro ZONE_INIT
786 *
787 * @abstract
788 * Initializes a given zone automatically during startup with the specified
789 * parameters.
790 *
791 * @param var the name of the variable to initialize.
792 * @param name the name for the zone
793 * @param size the size of the elements returned by this zone.
794 * @param flags a set of @c zone_create_flags_t flags.
795 * @param desired_zid a @c zone_reserved_id_t value or @c ZONE_ID_ANY.
796 * @param extra_setup a block that can perform non trivial initialization
797 * (@see @c zone_create_ext()).
798 */
799 #define ZONE_INIT(var, name, size, flags, desired_zid, extra_setup) \
800 __ZONE_INIT(__LINE__, var, name, size, flags, desired_zid, extra_setup)
801
802 /*!
803 * @function zone_id_require
804 *
805 * @abstract
806 * Requires for a given pointer to belong to the specified zone, by ID and size.
807 *
808 * @discussion
809 * The function panics if the check fails as it indicates that the kernel
810 * internals have been compromised.
811 *
812 * This is a variant of @c zone_require() which:
813 * - isn't sensitive to @c zone_t::elem_size being compromised,
814 * - is slightly faster as it saves one load and a multiplication.
815 *
816 * @param zone_id the zone ID the address needs to belong to.
817 * @param elem_size the size of elements for this zone.
818 * @param addr the element address to check.
819 */
820 extern void zone_id_require(
821 zone_id_t zone_id,
822 vm_size_t elem_size,
823 void *addr);
824
825 /*
826 * Zone submap indices
827 *
828 * Z_SUBMAP_IDX_VA_RESTRICTED_MAP (LP64)
829 * used to restrict VM allocations lower in the kernel VA space,
830 * for pointer packing
831 *
832 * Z_SUBMAP_IDX_GENERAL_MAP
833 * used for unrestricted allocations
834 *
835 * Z_SUBMAP_IDX_BAG_OF_BYTES_MAP
836 * used to sequester bags of bytes from all other allocations and allow VA reuse
837 * within the map
838 */
839 #if !defined(__LP64__)
840 #define Z_SUBMAP_IDX_GENERAL_MAP 0
841 #define Z_SUBMAP_IDX_BAG_OF_BYTES_MAP 1
842 #define Z_SUBMAP_IDX_COUNT 2
843 #else
844 #define Z_SUBMAP_IDX_VA_RESTRICTED_MAP 0
845 #define Z_SUBMAP_IDX_GENERAL_MAP 1
846 #define Z_SUBMAP_IDX_BAG_OF_BYTES_MAP 2
847 #define Z_SUBMAP_IDX_COUNT 3
848 #endif
849
850 /* Change zone sub-map, to be called from the zone_create_ext() setup hook */
851 extern void zone_set_submap_idx(
852 zone_t zone,
853 unsigned int submap_idx);
854
855 /* Make zone as non expandable, to be called from the zone_create_ext() setup hook */
856 extern void zone_set_noexpand(
857 zone_t zone,
858 vm_size_t maxsize);
859
860 /* Make zone exhaustible, to be called from the zone_create_ext() setup hook */
861 extern void zone_set_exhaustible(
862 zone_t zone,
863 vm_size_t maxsize);
864
865 /* Initially fill zone with specified number of elements */
866 extern int zfill(
867 zone_t zone,
868 int nelem);
869
870 /* Fill zone with memory */
871 extern void zcram(
872 zone_t zone,
873 vm_offset_t newmem,
874 vm_size_t size);
875
876 #pragma mark XNU only: misc & implementation details
877
878 /*
879 * This macro sets "elem" to NULL on free.
880 *
881 * Note: all values passed to zfree() might be in the element to be freed,
882 * temporaries must be taken, and the resetting to be done prior to free.
883 */
884 #define zfree(zone, elem) ({ \
885 _Static_assert(sizeof(elem) == sizeof(void *), "elem isn't pointer sized"); \
886 __auto_type __zfree_zone = (zone); \
887 __auto_type __zfree_eptr = &(elem); \
888 __auto_type __zfree_elem = *__zfree_eptr; \
889 *__zfree_eptr = (__typeof__(__zfree_elem))NULL; \
890 (zfree)(__zfree_zone, (void *)__zfree_elem); \
891 })
892
893 struct zone_create_startup_spec {
894 zone_t *z_var;
895 const char *z_name;
896 vm_size_t z_size;
897 zone_create_flags_t z_flags;
898 zone_id_t z_zid;
899 void (^z_setup)(zone_t);
900 };
901
902 extern void zone_create_startup(
903 struct zone_create_startup_spec *spec);
904
905 #define __ZONE_INIT1(ns, var, name, size, flags, zid, setup) \
906 static __startup_data struct zone_create_startup_spec \
907 __startup_zone_spec_ ## ns = { var, name, size, flags, zid, setup }; \
908 STARTUP_ARG(ZALLOC, STARTUP_RANK_MIDDLE, zone_create_startup, \
909 &__startup_zone_spec_ ## ns)
910
911 #define __ZONE_INIT(ns, var, name, size, flags, zid, setup) \
912 __ZONE_INIT1(ns, var, name, size, flags, zid, setup) \
913
914 struct zone_view_startup_spec {
915 zone_view_t zv_view;
916 union {
917 zone_kheap_id_t zv_heapid;
918 zone_t zv_zone;
919 };
920 vm_size_t zv_size;
921 };
922
923 extern void zone_view_startup_init(
924 struct zone_view_startup_spec *spec);
925
926
927 #if DEBUG || DEVELOPMENT
928 # if __LP64__
929 # define ZPCPU_MANGLE_BIT (1ul << 63)
930 # else /* !__LP64__ */
931 # define ZPCPU_MANGLE_BIT (1ul << 31)
932 # endif /* !__LP64__ */
933 #else /* !(DEBUG || DEVELOPMENT) */
934 # define ZPCPU_MANGLE_BIT 0ul
935 #endif /* !(DEBUG || DEVELOPMENT) */
936
937 #define __zpcpu_mangle(ptr) (__zpcpu_addr(ptr) & ~ZPCPU_MANGLE_BIT)
938 #define __zpcpu_demangle(ptr) (__zpcpu_addr(ptr) | ZPCPU_MANGLE_BIT)
939 #define __zpcpu_addr(e) ((vm_address_t)(e))
940 #define __zpcpu_cast(ptr, e) ((typeof(ptr))(e))
941 #define __zpcpu_next(ptr) __zpcpu_cast(ptr, __zpcpu_addr(ptr) + PAGE_SIZE)
942
943 extern unsigned zpercpu_count(void) __pure2;
944
945
946 /* These functions used for leak detection both in zalloc.c and mbuf.c */
947 extern uintptr_t hash_mix(uintptr_t);
948 extern uint32_t hashbacktrace(uintptr_t *, uint32_t, uint32_t);
949 extern uint32_t hashaddr(uintptr_t, uint32_t);
950
951 #if CONFIG_ZLEAKS
952 /* support for the kern.zleak.* sysctls */
953
954 extern kern_return_t zleak_activate(void);
955 extern vm_size_t zleak_max_zonemap_size;
956 extern vm_size_t zleak_global_tracking_threshold;
957 extern vm_size_t zleak_per_zone_tracking_threshold;
958
959 extern int get_zleak_state(void);
960
961 #endif /* CONFIG_ZLEAKS */
962 #if DEBUG || DEVELOPMENT
963
964 extern boolean_t run_zone_test(void);
965 extern void zone_gc_replenish_test(void);
966 extern void zone_alloc_replenish_test(void);
967
968 #endif /* DEBUG || DEVELOPMENT */
969
970 #pragma GCC visibility pop
971 #endif /* XNU_KERNEL_PRIVATE */
972
973 __END_DECLS
974
975 #endif /* _KERN_ZALLOC_H_ */
976
977 #endif /* KERNEL_PRIVATE */