2 * Copyright (c) 2000-2020 Apple Computer, Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
46 * Carnegie Mellon requests users of this software to return to
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
60 * Author: Avadis Tevanian, Jr.
63 * General kernel memory allocator. This allocator is designed
64 * to be used by the kernel to manage dynamic memory fast.
67 #include <mach/boolean.h>
69 #include <mach/machine/vm_types.h>
70 #include <mach/vm_param.h>
71 #include <kern/misc_protos.h>
72 #include <kern/zalloc_internal.h>
73 #include <kern/kalloc.h>
74 #include <kern/ledger.h>
75 #include <kern/backtrace.h>
76 #include <vm/vm_kern.h>
77 #include <vm/vm_object.h>
78 #include <vm/vm_map.h>
79 #include <sys/kdebug.h>
81 #include <san/kasan.h>
82 #include <libkern/section_keywords.h>
84 /* #define KALLOC_DEBUG 1 */
86 #define KALLOC_MAP_SIZE_MIN (16 * 1024 * 1024)
87 #define KALLOC_MAP_SIZE_MAX (128 * 1024 * 1024)
89 static SECURITY_READ_ONLY_LATE(vm_offset_t
) kalloc_map_min
;
90 static SECURITY_READ_ONLY_LATE(vm_offset_t
) kalloc_map_max
;
91 static SECURITY_READ_ONLY_LATE(vm_size_t
) kalloc_max
;
92 SECURITY_READ_ONLY_LATE(vm_size_t
) kalloc_max_prerounded
;
93 /* size of kallocs that can come from kernel map */
94 SECURITY_READ_ONLY_LATE(vm_size_t
) kalloc_kernmap_size
;
95 SECURITY_READ_ONLY_LATE(vm_map_t
) kalloc_map
;
96 #if DEBUG || DEVELOPMENT
97 static TUNABLE(bool, kheap_temp_debug
, "kheap_temp_debug", false);
99 #define KHT_BT_COUNT 14
100 struct kheap_temp_header
{
101 queue_chain_t kht_hdr_link
;
102 uintptr_t kht_hdr_pcs
[KHT_BT_COUNT
];
106 /* how many times we couldn't allocate out of kalloc_map and fell back to kernel_map */
107 unsigned long kalloc_fallback_count
;
109 uint_t kalloc_large_inuse
;
110 vm_size_t kalloc_large_total
;
111 vm_size_t kalloc_large_max
;
112 vm_size_t kalloc_largest_allocated
= 0;
113 uint64_t kalloc_large_sum
;
115 LCK_GRP_DECLARE(kalloc_lck_grp
, "kalloc.large");
116 LCK_SPIN_DECLARE(kalloc_lock
, &kalloc_lck_grp
);
118 #define kalloc_spin_lock() lck_spin_lock(&kalloc_lock)
119 #define kalloc_unlock() lck_spin_unlock(&kalloc_lock)
121 #pragma mark initialization
124 * All allocations of size less than kalloc_max are rounded to the next nearest
125 * sized zone. This allocator is built on top of the zone allocator. A zone
126 * is created for each potential size that we are willing to get in small
129 * We assume that kalloc_max is not greater than 64K;
131 * Note that kalloc_max is somewhat confusingly named. It represents the first
132 * power of two for which no zone exists. kalloc_max_prerounded is the
133 * smallest allocation size, before rounding, for which no zone exists.
135 * Also if the allocation size is more than kalloc_kernmap_size then allocate
136 * from kernel map rather than kalloc_map.
139 #define KiB(x) (1024 * (x))
142 * The k_zone_cfg table defines the configuration of zones on various platforms.
143 * The currently defined list of zones and their per-CPU caching behavior are as
147 * N:zone present no cpu-caching
148 * Y:zone present with cpu-caching
150 * Size macOS(64-bit) embedded(32-bit) embedded(64-bit)
151 *-------- ---------------- ---------------- ----------------
194 struct kalloc_zone_cfg
{
197 const char *kzc_name
;
199 static SECURITY_READ_ONLY_LATE(struct kalloc_zone_cfg
) k_zone_cfg
[] = {
200 #define KZC_ENTRY(SIZE, caching) { \
201 .kzc_caching = (caching), \
202 .kzc_size = (SIZE), \
203 .kzc_name = "kalloc." #SIZE \
206 #if !defined(XNU_TARGET_OS_OSX)
208 #if KALLOC_MINSIZE == 16 && KALLOC_LOG2_MINALIGN == 4
209 /* Zone config for embedded 64-bit platforms */
216 KZC_ENTRY(128, true),
217 KZC_ENTRY(160, true),
218 KZC_ENTRY(192, true),
219 KZC_ENTRY(224, true),
220 KZC_ENTRY(256, true),
221 KZC_ENTRY(288, true),
222 KZC_ENTRY(368, true),
223 KZC_ENTRY(400, true),
224 KZC_ENTRY(512, true),
225 KZC_ENTRY(576, false),
226 KZC_ENTRY(768, false),
227 KZC_ENTRY(1024, true),
228 KZC_ENTRY(1152, false),
229 KZC_ENTRY(1280, false),
230 KZC_ENTRY(1664, false),
231 KZC_ENTRY(2048, false),
232 KZC_ENTRY(4096, false),
233 KZC_ENTRY(6144, false),
234 KZC_ENTRY(8192, false),
235 KZC_ENTRY(16384, false),
236 KZC_ENTRY(32768, false),
238 #elif KALLOC_MINSIZE == 8 && KALLOC_LOG2_MINALIGN == 3
239 /* Zone config for embedded 32-bit platforms */
249 KZC_ENTRY(112, true),
250 KZC_ENTRY(128, true),
251 KZC_ENTRY(192, true),
252 KZC_ENTRY(256, true),
253 KZC_ENTRY(288, true),
254 KZC_ENTRY(384, true),
255 KZC_ENTRY(440, true),
256 KZC_ENTRY(512, true),
257 KZC_ENTRY(576, false),
258 KZC_ENTRY(768, false),
259 KZC_ENTRY(1024, true),
260 KZC_ENTRY(1152, false),
261 KZC_ENTRY(1280, false),
262 KZC_ENTRY(1536, false),
263 KZC_ENTRY(2048, false),
264 KZC_ENTRY(2128, false),
265 KZC_ENTRY(3072, false),
266 KZC_ENTRY(4096, false),
267 KZC_ENTRY(6144, false),
268 KZC_ENTRY(8192, false),
269 /* To limit internal fragmentation, only add the following zones if the
270 * page size is greater than 4K.
271 * Note that we use ARM_PGBYTES here (instead of one of the VM macros)
272 * since it's guaranteed to be a compile time constant.
274 #if ARM_PGBYTES > 4096
275 KZC_ENTRY(16384, false),
276 KZC_ENTRY(32768, false),
277 #endif /* ARM_PGBYTES > 4096 */
280 #error missing or invalid zone size parameters for kalloc
283 #else /* !defined(XNU_TARGET_OS_OSX) */
285 /* Zone config for macOS 64-bit platforms */
292 KZC_ENTRY(128, true),
293 KZC_ENTRY(160, true),
294 KZC_ENTRY(192, true),
295 KZC_ENTRY(224, true),
296 KZC_ENTRY(256, true),
297 KZC_ENTRY(288, true),
298 KZC_ENTRY(368, true),
299 KZC_ENTRY(400, true),
300 KZC_ENTRY(512, true),
301 KZC_ENTRY(576, true),
302 KZC_ENTRY(768, true),
303 KZC_ENTRY(1024, true),
304 KZC_ENTRY(1152, false),
305 KZC_ENTRY(1280, false),
306 KZC_ENTRY(1664, false),
307 KZC_ENTRY(2048, true),
308 KZC_ENTRY(4096, true),
309 KZC_ENTRY(6144, false),
310 KZC_ENTRY(8192, true),
311 KZC_ENTRY(12288, false),
312 KZC_ENTRY(16384, false)
314 #endif /* !defined(XNU_TARGET_OS_OSX) */
319 #define MAX_K_ZONE(kzc) (uint32_t)(sizeof(kzc) / sizeof(kzc[0]))
322 * Many kalloc() allocations are for small structures containing a few
323 * pointers and longs - the dlut[] direct lookup table, indexed by
324 * size normalized to the minimum alignment, finds the right zone index
325 * for them in one dereference.
328 #define INDEX_ZDLUT(size) (((size) + KALLOC_MINALIGN - 1) / KALLOC_MINALIGN)
329 #define MAX_SIZE_ZDLUT ((KALLOC_DLUT_SIZE - 1) * KALLOC_MINALIGN)
331 static SECURITY_READ_ONLY_LATE(zone_t
) k_zone_default
[MAX_K_ZONE(k_zone_cfg
)];
332 static SECURITY_READ_ONLY_LATE(zone_t
) k_zone_data_buffers
[MAX_K_ZONE(k_zone_cfg
)];
333 static SECURITY_READ_ONLY_LATE(zone_t
) k_zone_kext
[MAX_K_ZONE(k_zone_cfg
)];
337 static_assert(VM_MAX_TAG_ZONES
>=
338 MAX_K_ZONE(k_zone_cfg
) + MAX_K_ZONE(k_zone_cfg
) + MAX_K_ZONE(k_zone_cfg
));
340 static_assert(VM_MAX_TAG_ZONES
>= MAX_K_ZONE(k_zone_cfg
));
344 const char * const kalloc_heap_names
[] = {
345 [KHEAP_ID_NONE
] = "",
346 [KHEAP_ID_DEFAULT
] = "default.",
347 [KHEAP_ID_DATA_BUFFERS
] = "data.",
348 [KHEAP_ID_KEXT
] = "kext.",
352 * Default kalloc heap configuration
354 static SECURITY_READ_ONLY_LATE(struct kheap_zones
) kalloc_zones_default
= {
356 .heap_id
= KHEAP_ID_DEFAULT
,
357 .k_zone
= k_zone_default
,
358 .max_k_zone
= MAX_K_ZONE(k_zone_cfg
)
360 SECURITY_READ_ONLY_LATE(struct kalloc_heap
) KHEAP_DEFAULT
[1] = {
362 .kh_zones
= &kalloc_zones_default
,
363 .kh_name
= "default.",
364 .kh_heap_id
= KHEAP_ID_DEFAULT
,
368 KALLOC_HEAP_DEFINE(KHEAP_TEMP
, "temp allocations", KHEAP_ID_DEFAULT
);
372 * Bag of bytes heap configuration
374 static SECURITY_READ_ONLY_LATE(struct kheap_zones
) kalloc_zones_data_buffers
= {
376 .heap_id
= KHEAP_ID_DATA_BUFFERS
,
377 .k_zone
= k_zone_data_buffers
,
378 .max_k_zone
= MAX_K_ZONE(k_zone_cfg
)
380 SECURITY_READ_ONLY_LATE(struct kalloc_heap
) KHEAP_DATA_BUFFERS
[1] = {
382 .kh_zones
= &kalloc_zones_data_buffers
,
384 .kh_heap_id
= KHEAP_ID_DATA_BUFFERS
,
390 * Kext heap configuration
392 static SECURITY_READ_ONLY_LATE(struct kheap_zones
) kalloc_zones_kext
= {
394 .heap_id
= KHEAP_ID_KEXT
,
395 .k_zone
= k_zone_kext
,
396 .max_k_zone
= MAX_K_ZONE(k_zone_cfg
)
398 SECURITY_READ_ONLY_LATE(struct kalloc_heap
) KHEAP_KEXT
[1] = {
400 .kh_zones
= &kalloc_zones_kext
,
402 .kh_heap_id
= KHEAP_ID_KEXT
,
406 KALLOC_HEAP_DEFINE(KERN_OS_MALLOC
, "kern_os_malloc", KHEAP_ID_KEXT
);
409 * Initialize kalloc heap: Create zones, generate direct lookup table and
410 * do a quick test on lookups
414 kalloc_zones_init(struct kheap_zones
*zones
)
416 struct kalloc_zone_cfg
*cfg
= zones
->cfg
;
417 zone_t
*k_zone
= zones
->k_zone
;
421 * Allocate a zone for each size we are going to handle.
423 for (uint32_t i
= 0; i
< zones
->max_k_zone
&&
424 (size
= cfg
[i
].kzc_size
) < kalloc_max
; i
++) {
425 zone_create_flags_t flags
= ZC_KASAN_NOREDZONE
|
426 ZC_KASAN_NOQUARANTINE
| ZC_KALLOC_HEAP
;
427 if (cfg
[i
].kzc_caching
) {
431 k_zone
[i
] = zone_create_ext(cfg
[i
].kzc_name
, size
, flags
,
432 ZONE_ID_ANY
, ^(zone_t z
){
433 z
->kalloc_heap
= zones
->heap_id
;
436 * Set the updated elem size back to the config
438 cfg
[i
].kzc_size
= k_zone
[i
]->z_elem_size
;
442 * Count all the "raw" views for zones in the heap.
444 zone_view_count
+= zones
->max_k_zone
;
447 * Build the Direct LookUp Table for small allocations
448 * As k_zone_cfg is shared between the heaps the
449 * Direct LookUp Table is also shared and doesn't need to
450 * be rebuilt per heap.
453 for (int i
= 0; i
<= KALLOC_DLUT_SIZE
; i
++, size
+= KALLOC_MINALIGN
) {
456 while ((vm_size_t
)(cfg
[zindex
].kzc_size
) < size
) {
460 if (i
== KALLOC_DLUT_SIZE
) {
461 zones
->k_zindex_start
= zindex
;
464 zones
->dlut
[i
] = zindex
;
468 printf("kalloc_init: k_zindex_start %d\n", zones
->k_zindex_start
);
471 * Do a quick synthesis to see how well/badly we can
472 * find-a-zone for a given size.
473 * Useful when debugging/tweaking the array of zone sizes.
474 * Cache misses probably more critical than compare-branches!
476 for (uint32_t i
= 0; i
< zones
->max_k_zone
; i
++) {
477 vm_size_t testsize
= (vm_size_t
)(cfg
[i
].kzc_size
- 1);
481 if (testsize
< MAX_SIZE_ZDLUT
) {
482 compare
+= 1; /* 'if' (T) */
484 long dindex
= INDEX_ZDLUT(testsize
);
485 zindex
= (int)zones
->dlut
[dindex
];
486 } else if (testsize
< kalloc_max_prerounded
) {
487 compare
+= 2; /* 'if' (F), 'if' (T) */
489 zindex
= zones
->k_zindex_start
;
490 while ((vm_size_t
)(cfg
[zindex
].kzc_size
) < testsize
) {
492 compare
++; /* 'while' (T) */
494 compare
++; /* 'while' (F) */
496 break; /* not zone-backed */
498 zone_t z
= k_zone
[zindex
];
499 printf("kalloc_init: req size %4lu: %8s.%16s took %d compare%s\n",
500 (unsigned long)testsize
, kalloc_heap_names
[zones
->heap_id
],
501 z
->z_name
, compare
, compare
== 1 ? "" : "s");
507 * Initialize the memory allocator. This should be called only
508 * once on a system wide basis (i.e. first processor to get here
509 * does the initialization).
511 * This initializes all of the zones.
518 kern_return_t retval
;
520 vm_size_t kalloc_map_size
;
521 vm_map_kernel_flags_t vmk_flags
;
524 * Scale the kalloc_map_size to physical memory size: stay below
525 * 1/8th the total zone map size, or 128 MB (for a 32-bit kernel).
527 kalloc_map_size
= (vm_size_t
)(sane_size
>> 5);
529 if (kalloc_map_size
> KALLOC_MAP_SIZE_MAX
) {
530 kalloc_map_size
= KALLOC_MAP_SIZE_MAX
;
532 #endif /* !__LP64__ */
533 if (kalloc_map_size
< KALLOC_MAP_SIZE_MIN
) {
534 kalloc_map_size
= KALLOC_MAP_SIZE_MIN
;
537 vmk_flags
= VM_MAP_KERNEL_FLAGS_NONE
;
538 vmk_flags
.vmkf_permanent
= TRUE
;
540 retval
= kmem_suballoc(kernel_map
, &min
, kalloc_map_size
,
541 FALSE
, VM_FLAGS_ANYWHERE
, vmk_flags
,
542 VM_KERN_MEMORY_KALLOC
, &kalloc_map
);
544 if (retval
!= KERN_SUCCESS
) {
545 panic("kalloc_init: kmem_suballoc failed");
548 kalloc_map_min
= min
;
549 kalloc_map_max
= min
+ kalloc_map_size
- 1;
551 struct kheap_zones
*khz_default
= &kalloc_zones_default
;
552 kalloc_max
= (khz_default
->cfg
[khz_default
->max_k_zone
- 1].kzc_size
<< 1);
553 if (kalloc_max
< KiB(16)) {
554 kalloc_max
= KiB(16);
556 assert(kalloc_max
<= KiB(64)); /* assumption made in size arrays */
558 kalloc_max_prerounded
= kalloc_max
/ 2 + 1;
559 /* allocations larger than 16 times kalloc_max go directly to kernel map */
560 kalloc_kernmap_size
= (kalloc_max
* 16) + 1;
561 kalloc_largest_allocated
= kalloc_kernmap_size
;
563 /* Initialize kalloc default heap */
564 kalloc_zones_init(&kalloc_zones_default
);
566 /* Initialize kalloc data buffers heap */
567 if (ZSECURITY_OPTIONS_SUBMAP_USER_DATA
& zsecurity_options
) {
568 kalloc_zones_init(&kalloc_zones_data_buffers
);
570 *KHEAP_DATA_BUFFERS
= *KHEAP_DEFAULT
;
573 /* Initialize kalloc kext heap */
574 if (ZSECURITY_OPTIONS_SEQUESTER_KEXT_KALLOC
& zsecurity_options
) {
575 kalloc_zones_init(&kalloc_zones_kext
);
577 *KHEAP_KEXT
= *KHEAP_DEFAULT
;
580 STARTUP(ZALLOC
, STARTUP_RANK_THIRD
, kalloc_init
);
583 #pragma mark accessors
586 KALLOC_ZINFO_SALLOC(vm_size_t bytes
)
588 thread_t thr
= current_thread();
589 ledger_debit_thread(thr
, thr
->t_ledger
, task_ledgers
.tkm_shared
, bytes
);
593 KALLOC_ZINFO_SFREE(vm_size_t bytes
)
595 thread_t thr
= current_thread();
596 ledger_credit_thread(thr
, thr
->t_ledger
, task_ledgers
.tkm_shared
, bytes
);
599 static inline vm_map_t
600 kalloc_map_for_addr(vm_address_t addr
)
602 if (addr
>= kalloc_map_min
&& addr
< kalloc_map_max
) {
608 static inline vm_map_t
609 kalloc_map_for_size(vm_size_t size
)
611 if (size
< kalloc_kernmap_size
) {
618 kalloc_heap_zone_for_size(kalloc_heap_t kheap
, vm_size_t size
)
620 struct kheap_zones
*khz
= kheap
->kh_zones
;
622 if (size
< MAX_SIZE_ZDLUT
) {
623 uint32_t zindex
= khz
->dlut
[INDEX_ZDLUT(size
)];
624 return khz
->k_zone
[zindex
];
627 if (size
< kalloc_max_prerounded
) {
628 uint32_t zindex
= khz
->k_zindex_start
;
629 while (khz
->cfg
[zindex
].kzc_size
< size
) {
632 assert(zindex
< khz
->max_k_zone
);
633 return khz
->k_zone
[zindex
];
640 vm_map_lookup_kalloc_entry_locked(vm_map_t map
, void *addr
)
642 vm_map_entry_t vm_entry
= NULL
;
644 if (!vm_map_lookup_entry(map
, (vm_map_offset_t
)addr
, &vm_entry
)) {
645 panic("address %p not allocated via kalloc, map %p",
648 if (vm_entry
->vme_start
!= (vm_map_offset_t
)addr
) {
649 panic("address %p inside vm entry %p [%p:%p), map %p",
650 addr
, vm_entry
, (void *)vm_entry
->vme_start
,
651 (void *)vm_entry
->vme_end
, map
);
653 if (!vm_entry
->vme_atomic
) {
654 panic("address %p not managed by kalloc (entry %p, map %p)",
655 addr
, vm_entry
, map
);
657 return vm_entry
->vme_end
- vm_entry
->vme_start
;
662 * KASAN kalloc stashes the original user-requested size away in the poisoned
663 * area. Return that directly.
666 kalloc_size(void *addr
)
668 (void)vm_map_lookup_kalloc_entry_locked
; /* silence warning */
669 return kasan_user_size((vm_offset_t
)addr
);
673 kalloc_size(void *addr
)
678 size
= zone_element_size(addr
, NULL
);
683 map
= kalloc_map_for_addr((vm_offset_t
)addr
);
684 vm_map_lock_read(map
);
685 size
= vm_map_lookup_kalloc_entry_locked(map
, addr
);
686 vm_map_unlock_read(map
);
692 kalloc_bucket_size(vm_size_t size
)
694 zone_t z
= kalloc_heap_zone_for_size(KHEAP_DEFAULT
, size
);
695 vm_map_t map
= kalloc_map_for_size(size
);
698 return zone_elem_size(z
);
700 return vm_map_round_page(size
, VM_MAP_PAGE_MASK(map
));
706 kheap_temp_leak_panic(thread_t self
)
708 #if DEBUG || DEVELOPMENT
709 if (__improbable(kheap_temp_debug
)) {
710 struct kheap_temp_header
*hdr
= qe_dequeue_head(&self
->t_temp_alloc_list
,
711 struct kheap_temp_header
, kht_hdr_link
);
713 panic_plain("KHEAP_TEMP leak on thread %p (%d), allocated at:\n"
714 " %#016lx\n" " %#016lx\n" " %#016lx\n" " %#016lx\n"
715 " %#016lx\n" " %#016lx\n" " %#016lx\n" " %#016lx\n"
716 " %#016lx\n" " %#016lx\n" " %#016lx\n" " %#016lx\n"
717 " %#016lx\n" " %#016lx\n",
718 self
, self
->t_temp_alloc_count
,
719 hdr
->kht_hdr_pcs
[0], hdr
->kht_hdr_pcs
[1],
720 hdr
->kht_hdr_pcs
[2], hdr
->kht_hdr_pcs
[3],
721 hdr
->kht_hdr_pcs
[4], hdr
->kht_hdr_pcs
[5],
722 hdr
->kht_hdr_pcs
[6], hdr
->kht_hdr_pcs
[7],
723 hdr
->kht_hdr_pcs
[8], hdr
->kht_hdr_pcs
[9],
724 hdr
->kht_hdr_pcs
[10], hdr
->kht_hdr_pcs
[11],
725 hdr
->kht_hdr_pcs
[12], hdr
->kht_hdr_pcs
[13]);
727 panic("KHEAP_TEMP leak on thread %p (%d) "
728 "(boot with kheap_temp_debug=1 to debug)",
729 self
, self
->t_temp_alloc_count
);
730 #else /* !DEBUG && !DEVELOPMENT */
731 panic("KHEAP_TEMP leak on thread %p (%d)",
732 self
, self
->t_temp_alloc_count
);
733 #endif /* !DEBUG && !DEVELOPMENT */
738 kheap_temp_overuse_panic(thread_t self
)
740 panic("too many KHEAP_TEMP allocations in flight: %d",
741 self
->t_temp_alloc_count
);
744 __attribute__((noinline
))
745 static struct kalloc_result
750 zalloc_flags_t flags
,
751 vm_allocation_site_t
*site
)
753 int kma_flags
= KMA_ATOMIC
;
758 if (flags
& Z_NOFAIL
) {
759 panic("trying to kalloc(Z_NOFAIL) with a large size (%zd)",
762 /* kmem_alloc could block so we return if noblock */
763 if (flags
& Z_NOWAIT
) {
764 return (struct kalloc_result
){ };
769 * (73465472) on Intel we didn't use to pass this flag,
770 * which in turned allowed kalloc_large() memory to be shared
771 * with user directly.
773 * We're bound by this unfortunate ABI.
775 kma_flags
|= KMA_KOBJECT
;
777 if (flags
& Z_NOPAGEWAIT
) {
778 kma_flags
|= KMA_NOPAGEWAIT
;
780 if (flags
& Z_ZERO
) {
781 kma_flags
|= KMA_ZERO
;
785 /* large allocation - use guard pages instead of small redzones */
786 size
= round_page(req_size
+ 2 * PAGE_SIZE
);
787 assert(size
>= MAX_SIZE_ZDLUT
&& size
>= kalloc_max_prerounded
);
789 size
= round_page(size
);
792 alloc_map
= kalloc_map_for_size(size
);
794 tag
= zalloc_flags_get_tag(flags
);
795 if (tag
== VM_KERN_MEMORY_NONE
) {
797 tag
= vm_tag_alloc(site
);
799 tag
= VM_KERN_MEMORY_KALLOC
;
803 if (kmem_alloc_flags(alloc_map
, &addr
, size
, tag
, kma_flags
) != KERN_SUCCESS
) {
804 if (alloc_map
!= kernel_map
) {
805 if (kalloc_fallback_count
++ == 0) {
806 printf("%s: falling back to kernel_map\n", __func__
);
808 if (kmem_alloc_flags(kernel_map
, &addr
, size
, tag
, kma_flags
) != KERN_SUCCESS
) {
819 * Thread-safe version of the workaround for 4740071
822 if (size
> kalloc_largest_allocated
) {
823 kalloc_largest_allocated
= size
;
826 kalloc_large_inuse
++;
827 assert(kalloc_large_total
+ size
>= kalloc_large_total
); /* no wrap around */
828 kalloc_large_total
+= size
;
829 kalloc_large_sum
+= size
;
831 if (kalloc_large_total
> kalloc_large_max
) {
832 kalloc_large_max
= kalloc_large_total
;
837 KALLOC_ZINFO_SALLOC(size
);
840 /* fixup the return address to skip the redzone */
841 addr
= kasan_alloc(addr
, size
, req_size
, PAGE_SIZE
);
843 * Initialize buffer with unique pattern only if memory
844 * wasn't expected to be zeroed.
846 if (!(flags
& Z_ZERO
)) {
847 kasan_leak_init(addr
, req_size
);
853 if (addr
&& kheap
== KHEAP_TEMP
) {
854 thread_t self
= current_thread();
856 if (self
->t_temp_alloc_count
++ > UINT16_MAX
) {
857 kheap_temp_overuse_panic(self
);
859 #if DEBUG || DEVELOPMENT
860 if (__improbable(kheap_temp_debug
)) {
861 struct kheap_temp_header
*hdr
= (void *)addr
;
862 enqueue_head(&self
->t_temp_alloc_list
,
864 backtrace(hdr
->kht_hdr_pcs
, KHT_BT_COUNT
, NULL
);
865 req_size
-= sizeof(struct kheap_temp_header
);
866 addr
+= sizeof(struct kheap_temp_header
);
868 #endif /* DEBUG || DEVELOPMENT */
871 DTRACE_VM3(kalloc
, vm_size_t
, size
, vm_size_t
, req_size
, void*, addr
);
872 return (struct kalloc_result
){ .addr
= (void *)addr
, .size
= req_size
};
879 zalloc_flags_t flags
,
880 vm_allocation_site_t
*site
)
886 #if DEBUG || DEVELOPMENT
887 if (__improbable(kheap_temp_debug
)) {
888 if (kheap
== KHEAP_TEMP
) {
889 req_size
+= sizeof(struct kheap_temp_header
);
892 #endif /* DEBUG || DEVELOPMENT */
895 * Kasan for kalloc heaps will put the redzones *inside*
896 * the allocation, and hence augment its size.
898 * kalloc heaps do not use zone_t::z_kasan_redzone.
901 size
= kasan_alloc_resize(req_size
);
905 z
= kalloc_heap_zone_for_size(kheap
, size
);
906 if (__improbable(z
== ZONE_NULL
)) {
907 return kalloc_large(kheap
, req_size
, size
, flags
, site
);
911 if (size
> zone_elem_size(z
)) {
912 panic("%s: z %p (%s%s) but requested size %lu", __func__
, z
,
913 kalloc_heap_names
[kheap
->kh_zones
->heap_id
], z
->z_name
,
914 (unsigned long)size
);
917 assert(size
<= zone_elem_size(z
));
921 vm_tag_t tag
= zalloc_flags_get_tag(flags
);
922 if (tag
== VM_KERN_MEMORY_NONE
&& site
) {
923 tag
= vm_tag_alloc(site
);
925 if (tag
!= VM_KERN_MEMORY_NONE
) {
926 tag
= vm_tag_will_update_zone(tag
, z
->tag_zone_index
,
927 flags
& (Z_WAITOK
| Z_NOWAIT
| Z_NOPAGEWAIT
));
929 flags
|= Z_VM_TAG(tag
);
932 addr
= zalloc_ext(z
, kheap
->kh_stats
?: z
->z_stats
, flags
);
935 addr
= (void *)kasan_alloc((vm_offset_t
)addr
, zone_elem_size(z
),
936 req_size
, KASAN_GUARD_SIZE
);
938 req_size
= zone_elem_size(z
);
941 if (addr
&& kheap
== KHEAP_TEMP
) {
942 thread_t self
= current_thread();
944 if (self
->t_temp_alloc_count
++ > UINT16_MAX
) {
945 kheap_temp_overuse_panic(self
);
947 #if DEBUG || DEVELOPMENT
948 if (__improbable(kheap_temp_debug
)) {
949 struct kheap_temp_header
*hdr
= (void *)addr
;
950 enqueue_head(&self
->t_temp_alloc_list
,
952 backtrace(hdr
->kht_hdr_pcs
, KHT_BT_COUNT
, NULL
);
953 req_size
-= sizeof(struct kheap_temp_header
);
954 addr
+= sizeof(struct kheap_temp_header
);
956 #endif /* DEBUG || DEVELOPMENT */
959 DTRACE_VM3(kalloc
, vm_size_t
, size
, vm_size_t
, req_size
, void*, addr
);
960 return (struct kalloc_result
){ .addr
= addr
, .size
= req_size
};
964 kalloc_external(vm_size_t size
);
966 kalloc_external(vm_size_t size
)
968 return kheap_alloc_tag_bt(KHEAP_KEXT
, size
, Z_WAITOK
, VM_KERN_MEMORY_KALLOC
);
974 __attribute__((noinline
))
976 kfree_large(vm_offset_t addr
, vm_size_t size
)
978 vm_map_t map
= kalloc_map_for_addr(addr
);
982 if (addr
< VM_MIN_KERNEL_AND_KEXT_ADDRESS
||
983 os_add_overflow(addr
, size
, &end
) ||
984 end
> VM_MAX_KERNEL_ADDRESS
) {
985 panic("kfree: address range (%p, %ld) doesn't belong to the kernel",
986 (void *)addr
, (uintptr_t)size
);
991 size
= vm_map_lookup_kalloc_entry_locked(map
, (void *)addr
);
992 ret
= vm_map_remove_locked(map
,
993 vm_map_trunc_page(addr
, VM_MAP_PAGE_MASK(map
)),
994 vm_map_round_page(addr
+ size
, VM_MAP_PAGE_MASK(map
)),
995 VM_MAP_REMOVE_KUNWIRE
);
996 if (ret
!= KERN_SUCCESS
) {
997 panic("kfree: vm_map_remove_locked() failed for "
998 "addr: %p, map: %p ret: %d", (void *)addr
, map
, ret
);
1002 size
= round_page(size
);
1004 if (size
> kalloc_largest_allocated
) {
1005 panic("kfree: size %lu > kalloc_largest_allocated %lu",
1006 (uintptr_t)size
, (uintptr_t)kalloc_largest_allocated
);
1008 kmem_free(map
, addr
, size
);
1013 assert(kalloc_large_total
>= size
);
1014 kalloc_large_total
-= size
;
1015 kalloc_large_inuse
--;
1020 DTRACE_VM3(kfree
, vm_size_t
, size
, vm_size_t
, size
, void*, addr
);
1023 KALLOC_ZINFO_SFREE(size
);
1029 kfree_heap_confusion_panic(kalloc_heap_t kheap
, void *data
, size_t size
, zone_t z
)
1031 if (z
->kalloc_heap
== KHEAP_ID_NONE
) {
1032 panic("kfree: addr %p, size %zd found in regular zone '%s%s'",
1033 data
, size
, zone_heap_name(z
), z
->z_name
);
1035 panic("kfree: addr %p, size %zd found in heap %s* instead of %s*",
1036 data
, size
, zone_heap_name(z
),
1037 kalloc_heap_names
[kheap
->kh_heap_id
]);
1043 kfree_size_confusion_panic(zone_t z
, void *data
, size_t size
, size_t zsize
)
1046 panic("kfree: addr %p, size %zd found in zone '%s%s' "
1047 "with elem_size %zd",
1048 data
, size
, zone_heap_name(z
), z
->z_name
, zsize
);
1050 panic("kfree: addr %p, size %zd not found in any zone",
1057 kfree_size_invalid_panic(void *data
, size_t size
)
1059 panic("kfree: addr %p trying to free with nonsensical size %zd",
1065 krealloc_size_invalid_panic(void *data
, size_t size
)
1067 panic("krealloc: addr %p trying to free with nonsensical size %zd",
1073 kfree_temp_imbalance_panic(void *data
, size_t size
)
1075 panic("kfree: KHEAP_TEMP allocation imbalance freeing addr %p, size %zd",
1079 /* used to implement kheap_free_addr() */
1080 #define KFREE_UNKNOWN_SIZE ((vm_size_t)~0)
1081 #define KFREE_ABSURD_SIZE \
1082 ((VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_AND_KEXT_ADDRESS) / 2)
1085 kfree_ext(kalloc_heap_t kheap
, void *data
, vm_size_t size
)
1087 zone_stats_t zs
= NULL
;
1091 if (__improbable(data
== NULL
)) {
1095 if (kheap
== KHEAP_TEMP
) {
1096 assert(size
!= KFREE_UNKNOWN_SIZE
);
1097 if (current_thread()->t_temp_alloc_count
-- == 0) {
1098 kfree_temp_imbalance_panic(data
, size
);
1100 #if DEBUG || DEVELOPMENT
1101 if (__improbable(kheap_temp_debug
)) {
1102 size
+= sizeof(struct kheap_temp_header
);
1103 data
-= sizeof(struct kheap_temp_header
);
1104 remqueue(&((struct kheap_temp_header
*)data
)->kht_hdr_link
);
1106 #endif /* DEBUG || DEVELOPMENT */
1111 * Resize back to the real allocation size and hand off to the KASan
1112 * quarantine. `data` may then point to a different allocation.
1114 vm_size_t user_size
= size
;
1115 if (size
== KFREE_UNKNOWN_SIZE
) {
1116 user_size
= size
= kalloc_size(data
);
1118 kasan_check_free((vm_address_t
)data
, size
, KASAN_HEAP_KALLOC
);
1119 data
= (void *)kasan_dealloc((vm_address_t
)data
, &size
);
1120 kasan_free(&data
, &size
, KASAN_HEAP_KALLOC
, NULL
, user_size
, true);
1126 if (size
>= kalloc_max_prerounded
&& size
!= KFREE_UNKNOWN_SIZE
) {
1127 return kfree_large((vm_offset_t
)data
, size
);
1130 zsize
= zone_element_size(data
, &z
);
1131 if (size
== KFREE_UNKNOWN_SIZE
) {
1133 return kfree_large((vm_offset_t
)data
, 0);
1136 } else if (size
> zsize
) {
1137 kfree_size_confusion_panic(z
, data
, size
, zsize
);
1140 if (kheap
!= KHEAP_ANY
) {
1141 if (kheap
->kh_heap_id
!= z
->kalloc_heap
) {
1142 kfree_heap_confusion_panic(kheap
, data
, size
, z
);
1144 zs
= kheap
->kh_stats
;
1145 } else if (z
->kalloc_heap
!= KHEAP_ID_DEFAULT
&&
1146 z
->kalloc_heap
!= KHEAP_ID_KEXT
) {
1147 kfree_heap_confusion_panic(kheap
, data
, size
, z
);
1151 DTRACE_VM3(kfree
, vm_size_t
, size
, vm_size_t
, zsize
, void*, data
);
1153 zfree_ext(z
, zs
?: z
->z_stats
, data
);
1157 (kfree
)(void *addr
, vm_size_t size
)
1159 if (size
> KFREE_ABSURD_SIZE
) {
1160 kfree_size_invalid_panic(addr
, size
);
1162 kfree_ext(KHEAP_ANY
, addr
, size
);
1166 (kheap_free
)(kalloc_heap_t kheap
, void *addr
, vm_size_t size
)
1168 if (size
> KFREE_ABSURD_SIZE
) {
1169 kfree_size_invalid_panic(addr
, size
);
1171 kfree_ext(kheap
, addr
, size
);
1175 (kheap_free_addr
)(kalloc_heap_t kheap
, void *addr
)
1177 kfree_ext(kheap
, addr
, KFREE_UNKNOWN_SIZE
);
1180 static struct kalloc_result
1182 kalloc_heap_t kheap
,
1186 zalloc_flags_t flags
,
1187 vm_allocation_site_t
*site
)
1189 vm_size_t old_bucket_size
, new_bucket_size
, min_size
;
1190 struct kalloc_result kr
;
1192 if (new_size
== 0) {
1193 kfree_ext(kheap
, addr
, old_size
);
1194 return (struct kalloc_result
){ };
1198 return kalloc_ext(kheap
, new_size
, flags
, site
);
1202 * Find out the size of the bucket in which the new sized allocation
1203 * would land. If it matches the bucket of the original allocation,
1204 * simply return the same address.
1206 new_bucket_size
= kalloc_bucket_size(new_size
);
1207 if (old_size
== KFREE_UNKNOWN_SIZE
) {
1208 old_size
= old_bucket_size
= kalloc_size(addr
);
1210 old_bucket_size
= kalloc_bucket_size(old_size
);
1212 min_size
= MIN(old_size
, new_size
);
1214 if (old_bucket_size
== new_bucket_size
) {
1219 kr
.size
= new_bucket_size
;
1222 kr
= kalloc_ext(kheap
, new_size
, flags
& ~Z_ZERO
, site
);
1223 if (kr
.addr
== NULL
) {
1227 memcpy(kr
.addr
, addr
, min_size
);
1228 kfree_ext(kheap
, addr
, old_size
);
1230 if ((flags
& Z_ZERO
) && kr
.size
> min_size
) {
1231 bzero(kr
.addr
+ min_size
, kr
.size
- min_size
);
1236 struct kalloc_result
1238 kalloc_heap_t kheap
,
1242 zalloc_flags_t flags
,
1243 vm_allocation_site_t
*site
)
1245 if (old_size
> KFREE_ABSURD_SIZE
) {
1246 krealloc_size_invalid_panic(addr
, old_size
);
1248 return _krealloc_ext(kheap
, addr
, old_size
, new_size
, flags
, site
);
1251 struct kalloc_result
1253 kalloc_heap_t kheap
,
1256 zalloc_flags_t flags
,
1257 vm_allocation_site_t
*site
)
1259 return _krealloc_ext(kheap
, addr
, KFREE_UNKNOWN_SIZE
, size
, flags
, site
);
1264 kheap_startup_init(kalloc_heap_t kheap
)
1266 struct kheap_zones
*zones
;
1268 switch (kheap
->kh_heap_id
) {
1269 case KHEAP_ID_DEFAULT
:
1270 zones
= KHEAP_DEFAULT
->kh_zones
;
1272 case KHEAP_ID_DATA_BUFFERS
:
1273 zones
= KHEAP_DATA_BUFFERS
->kh_zones
;
1276 zones
= KHEAP_KEXT
->kh_zones
;
1279 panic("kalloc_heap_startup_init: invalid KHEAP_ID: %d",
1283 kheap
->kh_heap_id
= zones
->heap_id
;
1284 kheap
->kh_zones
= zones
;
1285 kheap
->kh_stats
= zalloc_percpu_permanent_type(struct zone_stats
);
1286 kheap
->kh_next
= zones
->views
;
1287 zones
->views
= kheap
;
1289 zone_view_count
+= 1;
1292 #pragma mark OSMalloc
1294 * This is a deprecated interface, here only for legacy reasons.
1295 * There is no internal variant of any of these symbols on purpose.
1297 #define OSMallocDeprecated
1298 #include <libkern/OSMalloc.h>
1300 static KALLOC_HEAP_DEFINE(OSMALLOC
, "osmalloc", KHEAP_ID_KEXT
);
1301 static queue_head_t OSMalloc_tag_list
= QUEUE_HEAD_INITIALIZER(OSMalloc_tag_list
);
1302 static LCK_GRP_DECLARE(OSMalloc_tag_lck_grp
, "OSMalloc_tag");
1303 static LCK_SPIN_DECLARE(OSMalloc_tag_lock
, &OSMalloc_tag_lck_grp
);
1305 #define OSMalloc_tag_spin_lock() lck_spin_lock(&OSMalloc_tag_lock)
1306 #define OSMalloc_tag_unlock() lck_spin_unlock(&OSMalloc_tag_lock)
1308 extern typeof(OSMalloc_Tagalloc
) OSMalloc_Tagalloc_external
;
1310 OSMalloc_Tagalloc_external(const char *str
, uint32_t flags
)
1314 OSMTag
= kheap_alloc(OSMALLOC
, sizeof(*OSMTag
), Z_WAITOK
| Z_ZERO
);
1316 if (flags
& OSMT_PAGEABLE
) {
1317 OSMTag
->OSMT_attr
= OSMT_ATTR_PAGEABLE
;
1320 OSMTag
->OSMT_refcnt
= 1;
1322 strlcpy(OSMTag
->OSMT_name
, str
, OSMT_MAX_NAME
);
1324 OSMalloc_tag_spin_lock();
1325 enqueue_tail(&OSMalloc_tag_list
, (queue_entry_t
)OSMTag
);
1326 OSMalloc_tag_unlock();
1327 OSMTag
->OSMT_state
= OSMT_VALID
;
1332 OSMalloc_Tagref(OSMallocTag tag
)
1334 if (!((tag
->OSMT_state
& OSMT_VALID_MASK
) == OSMT_VALID
)) {
1335 panic("OSMalloc_Tagref():'%s' has bad state 0x%08X\n",
1336 tag
->OSMT_name
, tag
->OSMT_state
);
1339 os_atomic_inc(&tag
->OSMT_refcnt
, relaxed
);
1343 OSMalloc_Tagrele(OSMallocTag tag
)
1345 if (!((tag
->OSMT_state
& OSMT_VALID_MASK
) == OSMT_VALID
)) {
1346 panic("OSMalloc_Tagref():'%s' has bad state 0x%08X\n",
1347 tag
->OSMT_name
, tag
->OSMT_state
);
1350 if (os_atomic_dec(&tag
->OSMT_refcnt
, relaxed
) != 0) {
1354 if (os_atomic_cmpxchg(&tag
->OSMT_state
,
1355 OSMT_VALID
| OSMT_RELEASED
, OSMT_VALID
| OSMT_RELEASED
, acq_rel
)) {
1356 OSMalloc_tag_spin_lock();
1357 (void)remque((queue_entry_t
)tag
);
1358 OSMalloc_tag_unlock();
1359 kheap_free(OSMALLOC
, tag
, sizeof(*tag
));
1361 panic("OSMalloc_Tagrele():'%s' has refcnt 0\n", tag
->OSMT_name
);
1365 extern typeof(OSMalloc_Tagfree
) OSMalloc_Tagfree_external
;
1367 OSMalloc_Tagfree_external(OSMallocTag tag
)
1369 if (!os_atomic_cmpxchg(&tag
->OSMT_state
,
1370 OSMT_VALID
, OSMT_VALID
| OSMT_RELEASED
, acq_rel
)) {
1371 panic("OSMalloc_Tagfree():'%s' has bad state 0x%08X \n",
1372 tag
->OSMT_name
, tag
->OSMT_state
);
1375 if (os_atomic_dec(&tag
->OSMT_refcnt
, relaxed
) == 0) {
1376 OSMalloc_tag_spin_lock();
1377 (void)remque((queue_entry_t
)tag
);
1378 OSMalloc_tag_unlock();
1379 kheap_free(OSMALLOC
, tag
, sizeof(*tag
));
1383 extern typeof(OSMalloc
) OSMalloc_external
;
1386 uint32_t size
, OSMallocTag tag
)
1391 OSMalloc_Tagref(tag
);
1392 if ((tag
->OSMT_attr
& OSMT_PAGEABLE
) && (size
& ~PAGE_MASK
)) {
1393 if ((kr
= kmem_alloc_pageable_external(kernel_map
,
1394 (vm_offset_t
*)&addr
, size
)) != KERN_SUCCESS
) {
1398 addr
= kheap_alloc_tag_bt(OSMALLOC
, size
,
1399 Z_WAITOK
, VM_KERN_MEMORY_KALLOC
);
1403 OSMalloc_Tagrele(tag
);
1409 extern typeof(OSMalloc_nowait
) OSMalloc_nowait_external
;
1411 OSMalloc_nowait_external(uint32_t size
, OSMallocTag tag
)
1415 if (tag
->OSMT_attr
& OSMT_PAGEABLE
) {
1419 OSMalloc_Tagref(tag
);
1420 /* XXX: use non-blocking kalloc for now */
1421 addr
= kheap_alloc_tag_bt(OSMALLOC
, (vm_size_t
)size
,
1422 Z_NOWAIT
, VM_KERN_MEMORY_KALLOC
);
1424 OSMalloc_Tagrele(tag
);
1430 extern typeof(OSMalloc_noblock
) OSMalloc_noblock_external
;
1432 OSMalloc_noblock_external(uint32_t size
, OSMallocTag tag
)
1436 if (tag
->OSMT_attr
& OSMT_PAGEABLE
) {
1440 OSMalloc_Tagref(tag
);
1441 addr
= kheap_alloc_tag_bt(OSMALLOC
, (vm_size_t
)size
,
1442 Z_NOWAIT
, VM_KERN_MEMORY_KALLOC
);
1444 OSMalloc_Tagrele(tag
);
1450 extern typeof(OSFree
) OSFree_external
;
1452 OSFree_external(void *addr
, uint32_t size
, OSMallocTag tag
)
1454 if ((tag
->OSMT_attr
& OSMT_PAGEABLE
)
1455 && (size
& ~PAGE_MASK
)) {
1456 kmem_free(kernel_map
, (vm_offset_t
)addr
, size
);
1458 kheap_free(OSMALLOC
, addr
, size
);
1461 OSMalloc_Tagrele(tag
);
1464 #pragma mark kern_os_malloc
1467 kern_os_malloc_external(size_t size
);
1469 kern_os_malloc_external(size_t size
)
1475 return kheap_alloc_tag_bt(KERN_OS_MALLOC
, size
, Z_WAITOK
| Z_ZERO
,
1476 VM_KERN_MEMORY_LIBKERN
);
1480 kern_os_free_external(void *addr
);
1482 kern_os_free_external(void *addr
)
1484 kheap_free_addr(KERN_OS_MALLOC
, addr
);
1488 kern_os_realloc_external(void *addr
, size_t nsize
);
1490 kern_os_realloc_external(void *addr
, size_t nsize
)
1492 VM_ALLOC_SITE_STATIC(VM_TAG_BT
, VM_KERN_MEMORY_LIBKERN
);
1494 return kheap_realloc_addr(KERN_OS_MALLOC
, addr
, nsize
,
1495 Z_WAITOK
| Z_ZERO
, &site
).addr
;
1499 kern_os_zfree(zone_t zone
, void *addr
, vm_size_t size
)
1501 if (zsecurity_options
& ZSECURITY_OPTIONS_STRICT_IOKIT_FREE
1502 || zone_owns(zone
, addr
)) {
1506 * Third party kexts might not know about the operator new
1507 * and be allocated from the KEXT heap
1509 printf("kern_os_zfree: kheap_free called for object from zone %s\n",
1511 kheap_free(KHEAP_KEXT
, addr
, size
);
1516 kern_os_kfree(void *addr
, vm_size_t size
)
1518 if (zsecurity_options
& ZSECURITY_OPTIONS_STRICT_IOKIT_FREE
) {
1519 kheap_free(KHEAP_DEFAULT
, addr
, size
);
1522 * Third party kexts may not know about newly added operator
1523 * default new/delete. If they call new for any iokit object
1524 * it will end up coming from the KEXT heap. If these objects
1525 * are freed by calling release() or free(), the internal
1526 * version of operator delete is called and the kernel ends
1527 * up freeing the object to the DEFAULT heap.
1529 kheap_free(KHEAP_ANY
, addr
, size
);