]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/zalloc.h
xnu-7195.101.1.tar.gz
[apple/xnu.git] / osfmk / kern / zalloc.h
1 /*
2 * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 */
58 /*
59 * File: zalloc.h
60 * Author: Avadis Tevanian, Jr.
61 * Date: 1985
62 *
63 */
64
65 #ifdef KERNEL_PRIVATE
66
67 #ifndef _KERN_ZALLOC_H_
68 #define _KERN_ZALLOC_H_
69
70 #include <mach/machine/vm_types.h>
71 #include <mach_debug/zone_info.h>
72 #include <kern/kern_types.h>
73 #include <sys/cdefs.h>
74
75 #if XNU_KERNEL_PRIVATE && !defined(ZALLOC_ALLOW_DEPRECATED)
76 #define __zalloc_deprecated(msg) __deprecated_msg(msg)
77 #else
78 #define __zalloc_deprecated(msg)
79 #endif
80
81 __BEGIN_DECLS
82
83 /*!
84 * @typedef zone_id_t
85 *
86 * @abstract
87 * The type for a zone ID.
88 */
89 typedef uint16_t zone_id_t;
90
91 /**
92 * @enum zone_create_flags_t
93 *
94 * @abstract
95 * Set of flags to pass to zone_create().
96 *
97 * @discussion
98 * Some kernel-wide policies affect all possible created zones.
99 * Explicit @c ZC_* win over such policies.
100 */
101 __options_decl(zone_create_flags_t, uint64_t, {
102 /** The default value to pass to zone_create() */
103 ZC_NONE = 0x00000000,
104
105 /** Force the created zone to use VA sequestering */
106 ZC_SEQUESTER = 0x00000001,
107 /** Force the created zone @b NOT to use VA sequestering */
108 ZC_NOSEQUESTER = 0x00000002,
109
110 /** Enable per-CPU zone caching for this zone */
111 ZC_CACHING = 0x00000010,
112 /** Disable per-CPU zone caching for this zone */
113 ZC_NOCACHING = 0x00000020,
114
115
116 /** Mark zone as a per-cpu zone */
117 ZC_PERCPU = 0x01000000,
118
119 /** Force the created zone to clear every allocation on free */
120 ZC_ZFREE_CLEARMEM = 0x02000000,
121
122 /** Mark zone as non collectable by zone_gc() */
123 ZC_NOGC = 0x04000000,
124
125 /** Do not encrypt this zone during hibernation */
126 ZC_NOENCRYPT = 0x08000000,
127
128 /** Type requires alignment to be preserved */
129 ZC_ALIGNMENT_REQUIRED = 0x10000000,
130
131 /** Do not track this zone when gzalloc is engaged */
132 ZC_NOGZALLOC = 0x20000000,
133
134 /** Don't asynchronously replenish the zone via callouts */
135 ZC_NOCALLOUT = 0x40000000,
136
137 /** Can be zdestroy()ed, not default unlike zinit() */
138 ZC_DESTRUCTIBLE = 0x80000000,
139
140 #ifdef XNU_KERNEL_PRIVATE
141
142 /** This zone will back a kalloc heap */
143 ZC_KALLOC_HEAP = 0x0800000000000000,
144
145 /** This zone can be crammed with foreign pages */
146 ZC_ALLOW_FOREIGN = 0x1000000000000000,
147
148 /** This zone contains bytes / data buffers only */
149 ZC_DATA_BUFFERS = 0x2000000000000000,
150
151 /** Disable kasan quarantine for this zone */
152 ZC_KASAN_NOQUARANTINE = 0x4000000000000000,
153
154 /** Disable kasan redzones for this zone */
155 ZC_KASAN_NOREDZONE = 0x8000000000000000,
156 #endif
157 });
158
159 /*!
160 * @union zone_or_view
161 *
162 * @abstract
163 * A type used for calls that admit both a zone or a zone view.
164 *
165 * @discussion
166 * @c zalloc() and @c zfree() and their variants can act on both
167 * zones and zone views.
168 */
169 union zone_or_view {
170 struct zone_view *zov_view;
171 struct zone *zov_zone;
172 #ifdef __cplusplus
173 inline zone_or_view(struct zone_view *zv) : zov_view(zv) {
174 }
175 inline zone_or_view(struct zone *z) : zov_zone(z) {
176 }
177 #endif
178 };
179 #ifdef __cplusplus
180 typedef union zone_or_view zone_or_view_t;
181 #else
182 typedef union zone_or_view zone_or_view_t __attribute__((transparent_union));
183 #endif
184
185 /*!
186 * @function zone_create
187 *
188 * @abstract
189 * Creates a zone with the specified parameters.
190 *
191 * @discussion
192 * A Zone is a slab allocator that returns objects of a given size very quickly.
193 *
194 * @param name the name for the new zone.
195 * @param size the size of the elements returned by this zone.
196 * @param flags a set of @c zone_create_flags_t flags.
197 *
198 * @returns the created zone, this call never fails.
199 */
200 extern zone_t zone_create(
201 const char *name,
202 vm_size_t size,
203 zone_create_flags_t flags);
204
205 /*!
206 * @function zdestroy
207 *
208 * @abstract
209 * Destroys a zone previously made with zone_create.
210 *
211 * @discussion
212 * Zones must have been made destructible for @c zdestroy() to be allowed,
213 * passing @c ZC_DESTRUCTIBLE at @c zone_create() time.
214 *
215 * @param zone the zone to destroy.
216 */
217 extern void zdestroy(
218 zone_t zone);
219
220 /*!
221 * @function zone_require
222 *
223 * @abstract
224 * Requires for a given pointer to belong to the specified zone.
225 *
226 * @discussion
227 * The function panics if the check fails as it indicates that the kernel
228 * internals have been compromised.
229 *
230 * Note that zone_require() can only work with:
231 * - zones not allowing foreign memory
232 * - zones in the general submap.
233 *
234 * @param zone the zone the address needs to belong to.
235 * @param addr the element address to check.
236 */
237 extern void zone_require(
238 zone_t zone,
239 void *addr);
240
241 /*!
242 * @enum zalloc_flags_t
243 *
244 * @brief
245 * Flags that can be passed to @c zalloc_internal or @c zalloc_flags.
246 *
247 * @discussion
248 * It is encouraged that any callsite passing flags uses exactly one of:
249 * @c Z_WAITOK, @c Z_NOWAIT or @c Z_NOPAGEWAIT, the default being @c Z_WAITOK
250 * if nothing else was specified.
251 *
252 * If any @c Z_NO*WAIT flag is passed alongside @c Z_WAITOK,
253 * then @c Z_WAITOK is ignored.
254 *
255 * @const Z_WAITOK
256 * Means that it's OK for zalloc() to block to wait for memory,
257 * when Z_WAITOK is passed, zalloc will never return NULL.
258 *
259 * @const Z_NOWAIT
260 * Passing this flag means that zalloc is not allowed to ever block.
261 *
262 * @const Z_NOPAGEWAIT
263 * Passing this flag means that zalloc is allowed to wait due to lock
264 * contention, but will not wait for the VM to wait for pages when
265 * under memory pressure.
266 *
267 * @const Z_ZERO
268 * Passing this flags means that the returned memory has been zeroed out.
269 *
270 * @const Z_NOFAIL
271 * Passing this flag means that the caller expects the allocation to always
272 * succeed. This will result in a panic if this assumption isn't correct.
273 *
274 * This flag is incompatible with @c Z_NOWAIT or @c Z_NOPAGEWAIT. It also can't
275 * be used on exhaustible zones.
276 *
277 #if XNU_KERNEL_PRIVATE
278 *
279 * @const Z_VM_TAG_MASK
280 * Represents bits in which a vm_tag_t for the allocation can be passed.
281 * (used by kalloc for the zone tagging debugging feature).
282 #endif
283 */
284 __options_decl(zalloc_flags_t, uint32_t, {
285 // values smaller than 0xff are shared with the M_* flags from BSD MALLOC
286 Z_WAITOK = 0x0000,
287 Z_NOWAIT = 0x0001,
288 Z_NOPAGEWAIT = 0x0002,
289 Z_ZERO = 0x0004,
290
291 Z_NOFAIL = 0x8000,
292 #if XNU_KERNEL_PRIVATE
293 /** used by kalloc to propagate vm tags for -zt */
294 Z_VM_TAG_MASK = 0xffff0000,
295
296 #define Z_VM_TAG_SHIFT 16
297 #define Z_VM_TAG(tag) ((zalloc_flags_t)(tag) << Z_VM_TAG_SHIFT)
298 #endif
299 });
300
301 /*!
302 * @function zalloc
303 *
304 * @abstract
305 * Allocates an element from a specified zone.
306 *
307 * @discussion
308 * If the zone isn't exhaustible and is expandable, this call never fails.
309 *
310 * @param zone_or_view the zone or zone view to allocate from
311 *
312 * @returns NULL or the allocated element
313 */
314 extern void *zalloc(
315 zone_or_view_t zone_or_view);
316
317 /*!
318 * @function zalloc_noblock
319 *
320 * @abstract
321 * Allocates an element from a specified zone, but never blocks.
322 *
323 * @discussion
324 * This call is suitable for preemptible code, however allocation
325 * isn't allowed from interrupt context.
326 *
327 * @param zone_or_view the zone or zone view to allocate from
328 *
329 * @returns NULL or the allocated element
330 */
331 extern void *zalloc_noblock(
332 zone_or_view_t zone_or_view);
333
334 /*!
335 * @function zalloc_flags()
336 *
337 * @abstract
338 * Allocates an element from a specified zone, with flags.
339 *
340 * @param zone_or_view the zone or zone view to allocate from
341 * @param flags a collection of @c zalloc_flags_t.
342 *
343 * @returns NULL or the allocated element
344 */
345 extern void *zalloc_flags(
346 zone_or_view_t zone_or_view,
347 zalloc_flags_t flags);
348
349 /*!
350 * @function zfree
351 *
352 * @abstract
353 * Frees an element allocated with @c zalloc*.
354 *
355 * @discussion
356 * If the element being freed doesn't belong to the specified zone,
357 * then this call will panic.
358 *
359 * @param zone_or_view the zone or zone view to free the element to.
360 * @param elem the element to free
361 */
362 extern void zfree(
363 zone_or_view_t zone_or_view,
364 void *elem);
365
366 /* deprecated KPIS */
367
368 __zalloc_deprecated("use zone_create()")
369 extern zone_t zinit(
370 vm_size_t size, /* the size of an element */
371 vm_size_t maxmem, /* maximum memory to use */
372 vm_size_t alloc, /* allocation size */
373 const char *name); /* a name for the zone */
374
375 #ifdef XNU_KERNEL_PRIVATE
376 #pragma mark - XNU only interfaces
377 #include <kern/startup.h>
378 #include <kern/cpu_number.h>
379
380 #pragma GCC visibility push(hidden)
381
382 #pragma mark XNU only: zalloc (extended)
383
384 #define ZALIGN_NONE (sizeof(uint8_t) - 1)
385 #define ZALIGN_16 (sizeof(uint16_t) - 1)
386 #define ZALIGN_32 (sizeof(uint32_t) - 1)
387 #define ZALIGN_PTR (sizeof(void *) - 1)
388 #define ZALIGN_64 (sizeof(uint64_t) - 1)
389 #define ZALIGN(t) (_Alignof(t) - 1)
390
391
392 /*!
393 * @function zalloc_permanent()
394 *
395 * @abstract
396 * Allocates a permanent element from the permanent zone
397 *
398 * @discussion
399 * Memory returned by this function is always 0-initialized.
400 * Note that the size of this allocation can not be determined
401 * by zone_element_size so it should not be used for copyio.
402 *
403 * @param size the element size (must be smaller than PAGE_SIZE)
404 * @param align_mask the required alignment for this allocation
405 *
406 * @returns the allocated element
407 */
408 extern void *zalloc_permanent(
409 vm_size_t size,
410 vm_offset_t align_mask);
411
412 /*!
413 * @function zalloc_permanent_type()
414 *
415 * @abstract
416 * Allocates a permanent element of a given type with its natural alignment.
417 *
418 * @discussion
419 * Memory returned by this function is always 0-initialized.
420 *
421 * @param type_t the element type
422 *
423 * @returns the allocated element
424 */
425 #define zalloc_permanent_type(type_t) \
426 ((type_t *)zalloc_permanent(sizeof(type_t), ZALIGN(type_t)))
427
428 /*!
429 * @function zalloc_first_proc_made()
430 *
431 * @abstract
432 * Declare that the "early" allocation phase is done.
433 */
434 extern void
435 zalloc_first_proc_made(void);
436
437 #pragma mark XNU only: per-cpu allocations
438
439 /*!
440 * @macro __zpercpu
441 *
442 * @abstract
443 * Annotation that helps denoting a per-cpu pointer that requires usage of
444 * @c zpercpu_*() for access.
445 */
446 #define __zpercpu
447
448 /*!
449 * @macro zpercpu_get_cpu()
450 *
451 * @abstract
452 * Get a pointer to a specific CPU slot of a given per-cpu variable.
453 *
454 * @param ptr the per-cpu pointer (returned by @c zalloc_percpu*()).
455 * @param cpu the specified CPU number as returned by @c cpu_number()
456 *
457 * @returns the per-CPU slot for @c ptr for the specified CPU.
458 */
459 #define zpercpu_get_cpu(ptr, cpu) \
460 __zpcpu_cast(ptr, __zpcpu_demangle(ptr) + ptoa((unsigned)cpu))
461
462 /*!
463 * @macro zpercpu_get()
464 *
465 * @abstract
466 * Get a pointer to the current CPU slot of a given per-cpu variable.
467 *
468 * @param ptr the per-cpu pointer (returned by @c zalloc_percpu*()).
469 *
470 * @returns the per-CPU slot for @c ptr for the current CPU.
471 */
472 #define zpercpu_get(ptr) \
473 zpercpu_get_cpu(ptr, cpu_number())
474
475 /*!
476 * @macro zpercpu_foreach()
477 *
478 * @abstract
479 * Enumerate all per-CPU slots by address.
480 *
481 * @param it the name for the iterator
482 * @param ptr the per-cpu pointer (returned by @c zalloc_percpu*()).
483 */
484 #define zpercpu_foreach(it, ptr) \
485 for (typeof(ptr) it = zpercpu_get_cpu(ptr, 0), \
486 __end_##it = zpercpu_get_cpu(ptr, zpercpu_count()); \
487 it < __end_##it; it = __zpcpu_next(it))
488
489 /*!
490 * @macro zpercpu_foreach_cpu()
491 *
492 * @abstract
493 * Enumerate all per-CPU slots by CPU slot number.
494 *
495 * @param cpu the name for cpu number iterator.
496 */
497 #define zpercpu_foreach_cpu(cpu) \
498 for (unsigned cpu = 0; cpu < zpercpu_count(); cpu++)
499
500 /*!
501 * @function zalloc_percpu()
502 *
503 * @abstract
504 * Allocates an element from a per-cpu zone.
505 *
506 * @discussion
507 * The returned pointer cannot be used directly and must be manipulated
508 * through the @c zpercpu_get*() interfaces.
509 *
510 * @param zone_or_view the zone or zone view to allocate from
511 * @param flags a collection of @c zalloc_flags_t.
512 *
513 * @returns NULL or the allocated element
514 */
515 extern void *zalloc_percpu(
516 zone_or_view_t zone_or_view,
517 zalloc_flags_t flags);
518
519 /*!
520 * @function zfree_percpu()
521 *
522 * @abstract
523 * Frees an element previously allocated with @c zalloc_percpu().
524 *
525 * @param zone_or_view the zone or zone view to free the element to.
526 * @param addr the address to free
527 */
528 extern void zfree_percpu(
529 zone_or_view_t zone_or_view,
530 void *addr);
531
532 /*!
533 * @function zalloc_percpu_permanent()
534 *
535 * @abstract
536 * Allocates a permanent percpu-element from the permanent percpu zone.
537 *
538 * @discussion
539 * Memory returned by this function is always 0-initialized.
540 *
541 * @param size the element size (must be smaller than PAGE_SIZE)
542 * @param align_mask the required alignment for this allocation
543 *
544 * @returns the allocated element
545 */
546 extern void *zalloc_percpu_permanent(
547 vm_size_t size,
548 vm_offset_t align_mask);
549
550 /*!
551 * @function zalloc_percpu_permanent_type()
552 *
553 * @abstract
554 * Allocates a permanent percpu-element from the permanent percpu zone of a given
555 * type with its natural alignment.
556 *
557 * @discussion
558 * Memory returned by this function is always 0-initialized.
559 *
560 * @param type_t the element type
561 *
562 * @returns the allocated element
563 */
564 #define zalloc_percpu_permanent_type(type_t) \
565 ((type_t *)zalloc_percpu_permanent(sizeof(type_t), ZALIGN(type_t)))
566
567 #pragma mark XNU only: zone views
568
569 /*!
570 * @enum zone_kheap_id_t
571 *
572 * @brief
573 * Enumerate a particular kalloc heap.
574 *
575 * @discussion
576 * More documentation about heaps is available in @c <kern/kalloc.h>.
577 *
578 * @const KHEAP_ID_NONE
579 * This value denotes regular zones, not used by kalloc.
580 *
581 * @const KHEAP_ID_DEFAULT
582 * Indicates zones part of the KHEAP_DEFAULT heap.
583 *
584 * @const KHEAP_ID_DATA_BUFFERS
585 * Indicates zones part of the KHEAP_DATA_BUFFERS heap.
586 *
587 * @const KHEAP_ID_KEXT
588 * Indicates zones part of the KHEAP_KEXT heap.
589 */
590 __enum_decl(zone_kheap_id_t, uint32_t, {
591 KHEAP_ID_NONE,
592 KHEAP_ID_DEFAULT,
593 KHEAP_ID_DATA_BUFFERS,
594 KHEAP_ID_KEXT,
595
596 #define KHEAP_ID_COUNT (KHEAP_ID_KEXT + 1)
597 });
598
599 /*!
600 * @typedef zone_stats_t
601 *
602 * @abstract
603 * The opaque type for per-cpu zone stats that are accumulated per zone
604 * or per zone-view.
605 */
606 typedef struct zone_stats *__zpercpu zone_stats_t;
607
608 /*!
609 * @typedef zone_view_t
610 *
611 * @abstract
612 * A view on a zone for accounting purposes.
613 *
614 * @discussion
615 * A zone view uses the zone it references for the allocations backing store,
616 * but does the allocation accounting at the view level.
617 *
618 * These accounting are surfaced by @b zprint(1) and similar tools,
619 * which allow for cheap but finer grained understanding of allocations
620 * without any fragmentation cost.
621 *
622 * Zone views are protected by the kernel lockdown and can't be initialized
623 * dynamically. They must be created using @c ZONE_VIEW_DEFINE().
624 */
625 typedef struct zone_view *zone_view_t;
626 struct zone_view {
627 zone_t zv_zone;
628 zone_stats_t zv_stats;
629 const char *zv_name;
630 zone_view_t zv_next;
631 };
632
633 /*!
634 * @macro ZONE_VIEW_DECLARE
635 *
636 * @abstract
637 * (optionally) declares a zone view (in a header).
638 *
639 * @param var the name for the zone view.
640 */
641 #define ZONE_VIEW_DECLARE(var) \
642 extern struct zone_view var[1]
643
644 /*!
645 * @macro ZONE_VIEW_DEFINE
646 *
647 * @abstract
648 * Defines a given zone view and what it points to.
649 *
650 * @discussion
651 * Zone views can either share a pre-existing zone,
652 * or perform a lookup into a kalloc heap for the zone
653 * backing the bucket of the proper size.
654 *
655 * Zone views are initialized during the @c STARTUP_SUB_ZALLOC phase,
656 * as the last rank. If views on zones are created, these must have been
657 * created before this stage.
658 *
659 * @param var the name for the zone view.
660 * @param name a string describing the zone view.
661 * @param heap_or_zone a @c KHEAP_ID_* constant or a pointer to a zone.
662 * @param size the element size to be allocated from this view.
663 */
664 #define ZONE_VIEW_DEFINE(var, name, heap_or_zone, size) \
665 SECURITY_READ_ONLY_LATE(struct zone_view) var[1] = { { \
666 .zv_name = name, \
667 } }; \
668 static __startup_data struct zone_view_startup_spec \
669 __startup_zone_view_spec_ ## var = { var, { heap_or_zone }, size }; \
670 STARTUP_ARG(ZALLOC, STARTUP_RANK_LAST, zone_view_startup_init, \
671 &__startup_zone_view_spec_ ## var)
672
673
674 #pragma mark XNU only: zone creation (extended)
675
676 /*!
677 * @enum zone_reserved_id_t
678 *
679 * @abstract
680 * Well known pre-registered zones, allowing use of zone_id_require()
681 *
682 * @discussion
683 * @c ZONE_ID__* aren't real zone IDs.
684 *
685 * @c ZONE_ID__ZERO reserves zone index 0 so that it can't be used, as 0 is too
686 * easy a value to produce (by malice or accident).
687 *
688 * @c ZONE_ID__FIRST_DYNAMIC is the first dynamic zone ID that can be used by
689 * @c zone_create().
690 */
691 __enum_decl(zone_reserved_id_t, zone_id_t, {
692 ZONE_ID__ZERO,
693
694 ZONE_ID_PERMANENT,
695 ZONE_ID_PERCPU_PERMANENT,
696
697 ZONE_ID_IPC_PORT,
698 ZONE_ID_IPC_PORT_SET,
699 ZONE_ID_IPC_VOUCHERS,
700 ZONE_ID_TASK,
701 ZONE_ID_PROC,
702 ZONE_ID_VM_MAP_COPY,
703 ZONE_ID_PMAP,
704 ZONE_ID_VM_MAP,
705
706 ZONE_ID__FIRST_DYNAMIC,
707 });
708
709 /*!
710 * @const ZONE_ID_ANY
711 * The value to pass to @c zone_create_ext() to allocate a non pre-registered
712 * Zone ID.
713 */
714 #define ZONE_ID_ANY ((zone_id_t)-1)
715
716 /**!
717 * @function zone_name
718 *
719 * @param zone the specified zone
720 * @returns the name of the specified zone.
721 */
722 const char *zone_name(
723 zone_t zone);
724
725 /**!
726 * @function zone_heap_name
727 *
728 * @param zone the specified zone
729 * @returns the name of the heap this zone is part of, or "".
730 */
731 const char *zone_heap_name(
732 zone_t zone);
733
734 /**!
735 * @function zone_submap
736 *
737 * @param zone the specified zone
738 * @returns the zone (sub)map this zone allocates from.
739 */
740 __pure2
741 extern vm_map_t zone_submap(
742 zone_t zone);
743
744 /*!
745 * @function zone_create_ext
746 *
747 * @abstract
748 * Creates a zone with the specified parameters.
749 *
750 * @discussion
751 * This is an extended version of @c zone_create().
752 *
753 * @param name the name for the new zone.
754 * @param size the size of the elements returned by this zone.
755 * @param flags a set of @c zone_create_flags_t flags.
756 * @param desired_zid a @c zone_reserved_id_t value or @c ZONE_ID_ANY.
757 *
758 * @param extra_setup a block that can perform non trivial initialization
759 * on the zone before it is marked valid.
760 * This block can call advanced setups like:
761 * - zone_set_submap_idx()
762 * - zone_set_exhaustible()
763 * - zone_set_noexpand()
764 *
765 * @returns the created zone, this call never fails.
766 */
767 extern zone_t zone_create_ext(
768 const char *name,
769 vm_size_t size,
770 zone_create_flags_t flags,
771 zone_id_t desired_zid,
772 void (^extra_setup)(zone_t));
773
774 /*!
775 * @macro ZONE_DECLARE
776 *
777 * @abstract
778 * Declares a zone variable to automatically initialize with the specified
779 * parameters.
780 *
781 * @param var the name of the variable to declare.
782 * @param name the name for the zone
783 * @param size the size of the elements returned by this zone.
784 * @param flags a set of @c zone_create_flags_t flags.
785 */
786 #define ZONE_DECLARE(var, name, size, flags) \
787 SECURITY_READ_ONLY_LATE(zone_t) var; \
788 static_assert(((flags) & ZC_DESTRUCTIBLE) == 0); \
789 static __startup_data struct zone_create_startup_spec \
790 __startup_zone_spec_ ## var = { &var, name, size, flags, \
791 ZONE_ID_ANY, NULL }; \
792 STARTUP_ARG(ZALLOC, STARTUP_RANK_MIDDLE, zone_create_startup, \
793 &__startup_zone_spec_ ## var)
794
795 /*!
796 * @macro ZONE_INIT
797 *
798 * @abstract
799 * Initializes a given zone automatically during startup with the specified
800 * parameters.
801 *
802 * @param var the name of the variable to initialize.
803 * @param name the name for the zone
804 * @param size the size of the elements returned by this zone.
805 * @param flags a set of @c zone_create_flags_t flags.
806 * @param desired_zid a @c zone_reserved_id_t value or @c ZONE_ID_ANY.
807 * @param extra_setup a block that can perform non trivial initialization
808 * (@see @c zone_create_ext()).
809 */
810 #define ZONE_INIT(var, name, size, flags, desired_zid, extra_setup) \
811 __ZONE_INIT(__LINE__, var, name, size, flags, desired_zid, extra_setup)
812
813 /*!
814 * @function zone_id_require
815 *
816 * @abstract
817 * Requires for a given pointer to belong to the specified zone, by ID and size.
818 *
819 * @discussion
820 * The function panics if the check fails as it indicates that the kernel
821 * internals have been compromised.
822 *
823 * This is a variant of @c zone_require() which:
824 * - isn't sensitive to @c zone_t::elem_size being compromised,
825 * - is slightly faster as it saves one load and a multiplication.
826 *
827 * @warning: zones using foreign memory can't use this interface.
828 *
829 * @param zone_id the zone ID the address needs to belong to.
830 * @param elem_size the size of elements for this zone.
831 * @param addr the element address to check.
832 */
833 extern void zone_id_require(
834 zone_id_t zone_id,
835 vm_size_t elem_size,
836 void *addr);
837
838 /*!
839 * @function zone_id_require_allow_foreign
840 *
841 * @abstract
842 * Requires for a given pointer to belong to the specified zone, by ID and size.
843 *
844 * @discussion
845 * This is a version of @c zone_id_require() that works with zones allowing
846 * foreign memory.
847 */
848 extern void zone_id_require_allow_foreign(
849 zone_id_t zone_id,
850 vm_size_t elem_size,
851 void *addr);
852
853 /*
854 * Zone submap indices
855 *
856 * Z_SUBMAP_IDX_VA_RESTRICTED (LP64)
857 * used to restrict VM allocations lower in the kernel VA space,
858 * for pointer packing
859 *
860 * Z_SUBMAP_IDX_VA_RESERVE (ILP32)
861 * used to keep a reserve of VA space for the urgent allocations
862 * backing allocations of crucial VM types (fictious pages, holes, ...)
863 *
864 * Z_SUBMAP_IDX_GENERAL
865 * used for unrestricted allocations
866 *
867 * Z_SUBMAP_IDX_BAG_OF_BYTES
868 * used to sequester bags of bytes from all other allocations and allow VA reuse
869 * within the map
870 */
871 #if defined(__LP64__)
872 #define Z_SUBMAP_IDX_VA_RESTRICTED 0
873 #else
874 #define Z_SUBMAP_IDX_VA_RESERVE 0
875 #endif
876 #define Z_SUBMAP_IDX_GENERAL 1
877 #define Z_SUBMAP_IDX_BAG_OF_BYTES 2
878 #define Z_SUBMAP_IDX_COUNT 3
879
880 /* Change zone sub-map, to be called from the zone_create_ext() setup hook */
881 extern void zone_set_submap_idx(
882 zone_t zone,
883 unsigned int submap_idx);
884
885 /* Make zone as non expandable, to be called from the zone_create_ext() setup hook */
886 extern void zone_set_noexpand(
887 zone_t zone,
888 vm_size_t max_elements);
889
890 /* Make zone exhaustible, to be called from the zone_create_ext() setup hook */
891 extern void zone_set_exhaustible(
892 zone_t zone,
893 vm_size_t max_elements);
894
895 /*!
896 * @function zone_fill_initially
897 *
898 * @brief
899 * Initially fill a non collectable zone to have the specified amount of
900 * elements.
901 *
902 * @discussion
903 * This function must be called on a non collectable permanent zone before it
904 * has been used yet.
905 *
906 * @param zone The zone to fill.
907 * @param nelems The number of elements to be able to hold.
908 */
909 extern void zone_fill_initially(
910 zone_t zone,
911 vm_size_t nelems);
912
913 #pragma mark XNU only: misc & implementation details
914
915 /*
916 * This macro sets "elem" to NULL on free.
917 *
918 * Note: all values passed to zfree() might be in the element to be freed,
919 * temporaries must be taken, and the resetting to be done prior to free.
920 */
921 #define zfree(zone, elem) ({ \
922 _Static_assert(sizeof(elem) == sizeof(void *), "elem isn't pointer sized"); \
923 __auto_type __zfree_zone = (zone); \
924 __auto_type __zfree_eptr = &(elem); \
925 __auto_type __zfree_elem = *__zfree_eptr; \
926 *__zfree_eptr = (__typeof__(__zfree_elem))NULL; \
927 (zfree)(__zfree_zone, (void *)__zfree_elem); \
928 })
929
930 struct zone_create_startup_spec {
931 zone_t *z_var;
932 const char *z_name;
933 vm_size_t z_size;
934 zone_create_flags_t z_flags;
935 zone_id_t z_zid;
936 void (^z_setup)(zone_t);
937 };
938
939 extern void zone_create_startup(
940 struct zone_create_startup_spec *spec);
941
942 #define __ZONE_INIT1(ns, var, name, size, flags, zid, setup) \
943 static __startup_data struct zone_create_startup_spec \
944 __startup_zone_spec_ ## ns = { var, name, size, flags, zid, setup }; \
945 STARTUP_ARG(ZALLOC, STARTUP_RANK_MIDDLE, zone_create_startup, \
946 &__startup_zone_spec_ ## ns)
947
948 #define __ZONE_INIT(ns, var, name, size, flags, zid, setup) \
949 __ZONE_INIT1(ns, var, name, size, flags, zid, setup) \
950
951 struct zone_view_startup_spec {
952 zone_view_t zv_view;
953 union {
954 zone_kheap_id_t zv_heapid;
955 zone_t zv_zone;
956 };
957 vm_size_t zv_size;
958 };
959
960 extern void zone_view_startup_init(
961 struct zone_view_startup_spec *spec);
962
963
964 #if DEBUG || DEVELOPMENT
965 # if __LP64__
966 # define ZPCPU_MANGLE_BIT (1ul << 63)
967 # else /* !__LP64__ */
968 # define ZPCPU_MANGLE_BIT (1ul << 31)
969 # endif /* !__LP64__ */
970 #else /* !(DEBUG || DEVELOPMENT) */
971 # define ZPCPU_MANGLE_BIT 0ul
972 #endif /* !(DEBUG || DEVELOPMENT) */
973
974 #define __zpcpu_mangle(ptr) (__zpcpu_addr(ptr) & ~ZPCPU_MANGLE_BIT)
975 #define __zpcpu_demangle(ptr) (__zpcpu_addr(ptr) | ZPCPU_MANGLE_BIT)
976 #define __zpcpu_addr(e) ((vm_address_t)(e))
977 #define __zpcpu_cast(ptr, e) ((typeof(ptr))(e))
978 #define __zpcpu_next(ptr) __zpcpu_cast(ptr, __zpcpu_addr(ptr) + PAGE_SIZE)
979
980 /**
981 * @macro __zpcpu_mangle_for_boot()
982 *
983 * @discussion
984 * Per-cpu variables allocated in zones (as opposed to percpu globals) that need
985 * to function early during boot (before @c STARTUP_SUB_ZALLOC) might use static
986 * storage marked @c __startup_data and replace it with the proper allocation
987 * at the end of the @c STARTUP_SUB_ZALLOC phase (@c STARTUP_RANK_LAST).
988 *
989 * However, some devices boot from a cpu where @c cpu_number() != 0. This macro
990 * provides the proper mangling of the storage into a "fake" percpu pointer so
991 * that accesses through @c zpercpu_get() functions properly.
992 *
993 * This is invalid to use after the @c STARTUP_SUB_ZALLOC phase has completed.
994 */
995 #define __zpcpu_mangle_for_boot(ptr) ({ \
996 assert(startup_phase < STARTUP_SUB_ZALLOC); \
997 __zpcpu_cast(ptr, __zpcpu_mangle(__zpcpu_addr(ptr) - ptoa(cpu_number()))); \
998 })
999
1000 extern unsigned zpercpu_count(void) __pure2;
1001
1002
1003 /* These functions used for leak detection both in zalloc.c and mbuf.c */
1004 extern uintptr_t hash_mix(uintptr_t);
1005 extern uint32_t hashbacktrace(uintptr_t *, uint32_t, uint32_t);
1006 extern uint32_t hashaddr(uintptr_t, uint32_t);
1007
1008 #if CONFIG_ZLEAKS
1009 /* support for the kern.zleak.* sysctls */
1010
1011 extern kern_return_t zleak_activate(void);
1012 extern vm_size_t zleak_max_zonemap_size;
1013 extern vm_size_t zleak_global_tracking_threshold;
1014 extern vm_size_t zleak_per_zone_tracking_threshold;
1015
1016 extern int get_zleak_state(void);
1017
1018 #endif /* CONFIG_ZLEAKS */
1019 #if DEBUG || DEVELOPMENT
1020
1021 extern boolean_t run_zone_test(void);
1022 extern void zone_gc_replenish_test(void);
1023 extern void zone_alloc_replenish_test(void);
1024
1025 #endif /* DEBUG || DEVELOPMENT */
1026
1027 #pragma GCC visibility pop
1028 #endif /* XNU_KERNEL_PRIVATE */
1029
1030 __END_DECLS
1031
1032 #endif /* _KERN_ZALLOC_H_ */
1033
1034 #endif /* KERNEL_PRIVATE */