]>
Commit | Line | Data |
---|---|---|
1c79356b | 1 | /* |
f427ee49 | 2 | * Copyright (c) 2000-2020 Apple Computer, Inc. All rights reserved. |
1c79356b | 3 | * |
2d21ac55 | 4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
0a7de745 | 5 | * |
2d21ac55 A |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
0a7de745 | 14 | * |
2d21ac55 A |
15 | * Please obtain a copy of the License at |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
0a7de745 | 17 | * |
2d21ac55 A |
18 | * The Original Code and all software distributed under the License are |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
8f6c56a5 A |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
2d21ac55 A |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
0a7de745 | 25 | * |
2d21ac55 | 26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
1c79356b A |
27 | */ |
28 | /* | |
29 | * @OSF_COPYRIGHT@ | |
30 | */ | |
0a7de745 | 31 | /* |
1c79356b A |
32 | * Mach Operating System |
33 | * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University | |
34 | * All Rights Reserved. | |
0a7de745 | 35 | * |
1c79356b A |
36 | * Permission to use, copy, modify and distribute this software and its |
37 | * documentation is hereby granted, provided that both the copyright | |
38 | * notice and this permission notice appear in all copies of the | |
39 | * software, derivative works or modified versions, and any portions | |
40 | * thereof, and that both notices appear in supporting documentation. | |
0a7de745 | 41 | * |
1c79356b A |
42 | * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" |
43 | * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR | |
44 | * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. | |
0a7de745 | 45 | * |
1c79356b | 46 | * Carnegie Mellon requests users of this software to return to |
0a7de745 | 47 | * |
1c79356b A |
48 | * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU |
49 | * School of Computer Science | |
50 | * Carnegie Mellon University | |
51 | * Pittsburgh PA 15213-3890 | |
0a7de745 | 52 | * |
1c79356b A |
53 | * any improvements or extensions that they make and grant Carnegie Mellon |
54 | * the rights to redistribute these changes. | |
55 | */ | |
56 | /* | |
57 | */ | |
58 | /* | |
59 | * File: kern/kalloc.c | |
60 | * Author: Avadis Tevanian, Jr. | |
61 | * Date: 1985 | |
62 | * | |
63 | * General kernel memory allocator. This allocator is designed | |
64 | * to be used by the kernel to manage dynamic memory fast. | |
65 | */ | |
66 | ||
1c79356b | 67 | #include <mach/boolean.h> |
a39ff7e2 | 68 | #include <mach/sdt.h> |
1c79356b A |
69 | #include <mach/machine/vm_types.h> |
70 | #include <mach/vm_param.h> | |
71 | #include <kern/misc_protos.h> | |
f427ee49 | 72 | #include <kern/zalloc_internal.h> |
1c79356b | 73 | #include <kern/kalloc.h> |
316670eb | 74 | #include <kern/ledger.h> |
f427ee49 | 75 | #include <kern/backtrace.h> |
1c79356b A |
76 | #include <vm/vm_kern.h> |
77 | #include <vm/vm_object.h> | |
78 | #include <vm/vm_map.h> | |
39037602 | 79 | #include <sys/kdebug.h> |
1c79356b | 80 | |
5ba3f43e | 81 | #include <san/kasan.h> |
f427ee49 | 82 | #include <libkern/section_keywords.h> |
5ba3f43e | 83 | |
f427ee49 | 84 | /* #define KALLOC_DEBUG 1 */ |
1c79356b | 85 | |
2d21ac55 A |
86 | #define KALLOC_MAP_SIZE_MIN (16 * 1024 * 1024) |
87 | #define KALLOC_MAP_SIZE_MAX (128 * 1024 * 1024) | |
f427ee49 A |
88 | |
89 | static SECURITY_READ_ONLY_LATE(vm_offset_t) kalloc_map_min; | |
90 | static SECURITY_READ_ONLY_LATE(vm_offset_t) kalloc_map_max; | |
91 | static SECURITY_READ_ONLY_LATE(vm_size_t) kalloc_max; | |
92 | SECURITY_READ_ONLY_LATE(vm_size_t) kalloc_max_prerounded; | |
93 | /* size of kallocs that can come from kernel map */ | |
94 | SECURITY_READ_ONLY_LATE(vm_size_t) kalloc_kernmap_size; | |
95 | SECURITY_READ_ONLY_LATE(vm_map_t) kalloc_map; | |
96 | #if DEBUG || DEVELOPMENT | |
97 | static TUNABLE(bool, kheap_temp_debug, "kheap_temp_debug", false); | |
98 | ||
99 | #define KHT_BT_COUNT 14 | |
100 | struct kheap_temp_header { | |
101 | queue_chain_t kht_hdr_link; | |
102 | uintptr_t kht_hdr_pcs[KHT_BT_COUNT]; | |
103 | }; | |
104 | #endif | |
1c79356b | 105 | |
3e170ce0 A |
106 | /* how many times we couldn't allocate out of kalloc_map and fell back to kernel_map */ |
107 | unsigned long kalloc_fallback_count; | |
108 | ||
cb323159 A |
109 | uint_t kalloc_large_inuse; |
110 | vm_size_t kalloc_large_total; | |
111 | vm_size_t kalloc_large_max; | |
112 | vm_size_t kalloc_largest_allocated = 0; | |
113 | uint64_t kalloc_large_sum; | |
6d2010ae | 114 | |
f427ee49 A |
115 | LCK_GRP_DECLARE(kalloc_lck_grp, "kalloc.large"); |
116 | LCK_SPIN_DECLARE(kalloc_lock, &kalloc_lck_grp); | |
1c79356b | 117 | |
f427ee49 A |
118 | #define kalloc_spin_lock() lck_spin_lock(&kalloc_lock) |
119 | #define kalloc_unlock() lck_spin_unlock(&kalloc_lock) | |
6d2010ae | 120 | |
f427ee49 | 121 | #pragma mark initialization |
6d2010ae | 122 | |
1c79356b | 123 | /* |
a39ff7e2 A |
124 | * All allocations of size less than kalloc_max are rounded to the next nearest |
125 | * sized zone. This allocator is built on top of the zone allocator. A zone | |
126 | * is created for each potential size that we are willing to get in small | |
127 | * blocks. | |
1c79356b | 128 | * |
a39ff7e2 | 129 | * We assume that kalloc_max is not greater than 64K; |
1c79356b | 130 | * |
a39ff7e2 A |
131 | * Note that kalloc_max is somewhat confusingly named. It represents the first |
132 | * power of two for which no zone exists. kalloc_max_prerounded is the | |
133 | * smallest allocation size, before rounding, for which no zone exists. | |
316670eb | 134 | * |
a39ff7e2 A |
135 | * Also if the allocation size is more than kalloc_kernmap_size then allocate |
136 | * from kernel map rather than kalloc_map. | |
316670eb A |
137 | */ |
138 | ||
a39ff7e2 | 139 | #define KiB(x) (1024 * (x)) |
316670eb | 140 | |
0a7de745 | 141 | /* |
f427ee49 | 142 | * The k_zone_cfg table defines the configuration of zones on various platforms. |
0a7de745 | 143 | * The currently defined list of zones and their per-CPU caching behavior are as |
f427ee49 A |
144 | * follows |
145 | * | |
146 | * X:zone not present | |
147 | * N:zone present no cpu-caching | |
148 | * Y:zone present with cpu-caching | |
0a7de745 A |
149 | * |
150 | * Size macOS(64-bit) embedded(32-bit) embedded(64-bit) | |
151 | *-------- ---------------- ---------------- ---------------- | |
152 | * | |
153 | * 8 X Y X | |
154 | * 16 Y Y Y | |
155 | * 24 X Y X | |
156 | * 32 Y Y Y | |
157 | * 40 X Y X | |
158 | * 48 Y Y Y | |
159 | * 64 Y Y Y | |
160 | * 72 X Y X | |
161 | * 80 Y X Y | |
162 | * 88 X Y X | |
163 | * 96 Y X Y | |
164 | * 112 X Y X | |
165 | * 128 Y Y Y | |
166 | * 160 Y X Y | |
167 | * 192 Y Y Y | |
168 | * 224 Y X Y | |
169 | * 256 Y Y Y | |
170 | * 288 Y Y Y | |
171 | * 368 Y X Y | |
172 | * 384 X Y X | |
173 | * 400 Y X Y | |
174 | * 440 X Y X | |
175 | * 512 Y Y Y | |
176 | * 576 Y N N | |
177 | * 768 Y N N | |
178 | * 1024 Y Y Y | |
179 | * 1152 N N N | |
180 | * 1280 N N N | |
181 | * 1536 X N X | |
182 | * 1664 N X N | |
183 | * 2048 Y N N | |
184 | * 2128 X N X | |
185 | * 3072 X N X | |
186 | * 4096 Y N N | |
187 | * 6144 N N N | |
188 | * 8192 Y N N | |
cb323159 | 189 | * 12288 N X X |
94ff46dc A |
190 | * 16384 N X N |
191 | * 32768 X X N | |
0a7de745 A |
192 | * |
193 | */ | |
f427ee49 | 194 | struct kalloc_zone_cfg { |
0a7de745 | 195 | bool kzc_caching; |
f427ee49 | 196 | uint32_t kzc_size; |
a39ff7e2 | 197 | const char *kzc_name; |
f427ee49 A |
198 | }; |
199 | static SECURITY_READ_ONLY_LATE(struct kalloc_zone_cfg) k_zone_cfg[] = { | |
200 | #define KZC_ENTRY(SIZE, caching) { \ | |
201 | .kzc_caching = (caching), \ | |
202 | .kzc_size = (SIZE), \ | |
203 | .kzc_name = "kalloc." #SIZE \ | |
204 | } | |
0a7de745 | 205 | |
f427ee49 | 206 | #if !defined(XNU_TARGET_OS_OSX) |
316670eb | 207 | |
a39ff7e2 | 208 | #if KALLOC_MINSIZE == 16 && KALLOC_LOG2_MINALIGN == 4 |
0a7de745 A |
209 | /* Zone config for embedded 64-bit platforms */ |
210 | KZC_ENTRY(16, true), | |
211 | KZC_ENTRY(32, true), | |
212 | KZC_ENTRY(48, true), | |
213 | KZC_ENTRY(64, true), | |
214 | KZC_ENTRY(80, true), | |
215 | KZC_ENTRY(96, true), | |
216 | KZC_ENTRY(128, true), | |
217 | KZC_ENTRY(160, true), | |
218 | KZC_ENTRY(192, true), | |
219 | KZC_ENTRY(224, true), | |
220 | KZC_ENTRY(256, true), | |
221 | KZC_ENTRY(288, true), | |
222 | KZC_ENTRY(368, true), | |
223 | KZC_ENTRY(400, true), | |
224 | KZC_ENTRY(512, true), | |
225 | KZC_ENTRY(576, false), | |
226 | KZC_ENTRY(768, false), | |
227 | KZC_ENTRY(1024, true), | |
228 | KZC_ENTRY(1152, false), | |
229 | KZC_ENTRY(1280, false), | |
230 | KZC_ENTRY(1664, false), | |
231 | KZC_ENTRY(2048, false), | |
232 | KZC_ENTRY(4096, false), | |
233 | KZC_ENTRY(6144, false), | |
234 | KZC_ENTRY(8192, false), | |
235 | KZC_ENTRY(16384, false), | |
236 | KZC_ENTRY(32768, false), | |
237 | ||
316670eb | 238 | #elif KALLOC_MINSIZE == 8 && KALLOC_LOG2_MINALIGN == 3 |
0a7de745 A |
239 | /* Zone config for embedded 32-bit platforms */ |
240 | KZC_ENTRY(8, true), | |
241 | KZC_ENTRY(16, true), | |
242 | KZC_ENTRY(24, true), | |
243 | KZC_ENTRY(32, true), | |
244 | KZC_ENTRY(40, true), | |
245 | KZC_ENTRY(48, true), | |
246 | KZC_ENTRY(64, true), | |
247 | KZC_ENTRY(72, true), | |
248 | KZC_ENTRY(88, true), | |
249 | KZC_ENTRY(112, true), | |
250 | KZC_ENTRY(128, true), | |
251 | KZC_ENTRY(192, true), | |
252 | KZC_ENTRY(256, true), | |
253 | KZC_ENTRY(288, true), | |
254 | KZC_ENTRY(384, true), | |
255 | KZC_ENTRY(440, true), | |
256 | KZC_ENTRY(512, true), | |
257 | KZC_ENTRY(576, false), | |
258 | KZC_ENTRY(768, false), | |
259 | KZC_ENTRY(1024, true), | |
260 | KZC_ENTRY(1152, false), | |
261 | KZC_ENTRY(1280, false), | |
262 | KZC_ENTRY(1536, false), | |
263 | KZC_ENTRY(2048, false), | |
264 | KZC_ENTRY(2128, false), | |
265 | KZC_ENTRY(3072, false), | |
266 | KZC_ENTRY(4096, false), | |
267 | KZC_ENTRY(6144, false), | |
268 | KZC_ENTRY(8192, false), | |
94ff46dc A |
269 | /* To limit internal fragmentation, only add the following zones if the |
270 | * page size is greater than 4K. | |
271 | * Note that we use ARM_PGBYTES here (instead of one of the VM macros) | |
272 | * since it's guaranteed to be a compile time constant. | |
273 | */ | |
274 | #if ARM_PGBYTES > 4096 | |
0a7de745 A |
275 | KZC_ENTRY(16384, false), |
276 | KZC_ENTRY(32768, false), | |
94ff46dc | 277 | #endif /* ARM_PGBYTES > 4096 */ |
0a7de745 | 278 | |
316670eb | 279 | #else |
5c9f4661 | 280 | #error missing or invalid zone size parameters for kalloc |
316670eb A |
281 | #endif |
282 | ||
f427ee49 | 283 | #else /* !defined(XNU_TARGET_OS_OSX) */ |
0a7de745 A |
284 | |
285 | /* Zone config for macOS 64-bit platforms */ | |
286 | KZC_ENTRY(16, true), | |
287 | KZC_ENTRY(32, true), | |
288 | KZC_ENTRY(48, true), | |
289 | KZC_ENTRY(64, true), | |
290 | KZC_ENTRY(80, true), | |
291 | KZC_ENTRY(96, true), | |
292 | KZC_ENTRY(128, true), | |
293 | KZC_ENTRY(160, true), | |
294 | KZC_ENTRY(192, true), | |
295 | KZC_ENTRY(224, true), | |
296 | KZC_ENTRY(256, true), | |
297 | KZC_ENTRY(288, true), | |
298 | KZC_ENTRY(368, true), | |
299 | KZC_ENTRY(400, true), | |
300 | KZC_ENTRY(512, true), | |
301 | KZC_ENTRY(576, true), | |
302 | KZC_ENTRY(768, true), | |
303 | KZC_ENTRY(1024, true), | |
304 | KZC_ENTRY(1152, false), | |
305 | KZC_ENTRY(1280, false), | |
306 | KZC_ENTRY(1664, false), | |
307 | KZC_ENTRY(2048, true), | |
308 | KZC_ENTRY(4096, true), | |
309 | KZC_ENTRY(6144, false), | |
310 | KZC_ENTRY(8192, true), | |
cb323159 A |
311 | KZC_ENTRY(12288, false), |
312 | KZC_ENTRY(16384, false) | |
0a7de745 | 313 | |
f427ee49 | 314 | #endif /* !defined(XNU_TARGET_OS_OSX) */ |
0a7de745 | 315 | |
a39ff7e2 | 316 | #undef KZC_ENTRY |
316670eb A |
317 | }; |
318 | ||
f427ee49 | 319 | #define MAX_K_ZONE(kzc) (uint32_t)(sizeof(kzc) / sizeof(kzc[0])) |
316670eb A |
320 | |
321 | /* | |
322 | * Many kalloc() allocations are for small structures containing a few | |
f427ee49 | 323 | * pointers and longs - the dlut[] direct lookup table, indexed by |
316670eb A |
324 | * size normalized to the minimum alignment, finds the right zone index |
325 | * for them in one dereference. | |
326 | */ | |
327 | ||
f427ee49 A |
328 | #define INDEX_ZDLUT(size) (((size) + KALLOC_MINALIGN - 1) / KALLOC_MINALIGN) |
329 | #define MAX_SIZE_ZDLUT ((KALLOC_DLUT_SIZE - 1) * KALLOC_MINALIGN) | |
330 | ||
331 | static SECURITY_READ_ONLY_LATE(zone_t) k_zone_default[MAX_K_ZONE(k_zone_cfg)]; | |
332 | static SECURITY_READ_ONLY_LATE(zone_t) k_zone_data_buffers[MAX_K_ZONE(k_zone_cfg)]; | |
333 | static SECURITY_READ_ONLY_LATE(zone_t) k_zone_kext[MAX_K_ZONE(k_zone_cfg)]; | |
334 | ||
335 | #if VM_MAX_TAG_ZONES | |
336 | #if __LP64__ | |
337 | static_assert(VM_MAX_TAG_ZONES >= | |
338 | MAX_K_ZONE(k_zone_cfg) + MAX_K_ZONE(k_zone_cfg) + MAX_K_ZONE(k_zone_cfg)); | |
339 | #else | |
340 | static_assert(VM_MAX_TAG_ZONES >= MAX_K_ZONE(k_zone_cfg)); | |
341 | #endif | |
342 | #endif | |
316670eb | 343 | |
f427ee49 A |
344 | const char * const kalloc_heap_names[] = { |
345 | [KHEAP_ID_NONE] = "", | |
346 | [KHEAP_ID_DEFAULT] = "default.", | |
347 | [KHEAP_ID_DATA_BUFFERS] = "data.", | |
348 | [KHEAP_ID_KEXT] = "kext.", | |
349 | }; | |
316670eb A |
350 | |
351 | /* | |
f427ee49 | 352 | * Default kalloc heap configuration |
316670eb | 353 | */ |
f427ee49 A |
354 | static SECURITY_READ_ONLY_LATE(struct kheap_zones) kalloc_zones_default = { |
355 | .cfg = k_zone_cfg, | |
356 | .heap_id = KHEAP_ID_DEFAULT, | |
357 | .k_zone = k_zone_default, | |
358 | .max_k_zone = MAX_K_ZONE(k_zone_cfg) | |
359 | }; | |
360 | SECURITY_READ_ONLY_LATE(struct kalloc_heap) KHEAP_DEFAULT[1] = { | |
361 | { | |
362 | .kh_zones = &kalloc_zones_default, | |
363 | .kh_name = "default.", | |
364 | .kh_heap_id = KHEAP_ID_DEFAULT, | |
365 | } | |
366 | }; | |
316670eb | 367 | |
f427ee49 | 368 | KALLOC_HEAP_DEFINE(KHEAP_TEMP, "temp allocations", KHEAP_ID_DEFAULT); |
1c79356b | 369 | |
316670eb | 370 | |
f427ee49 A |
371 | /* |
372 | * Bag of bytes heap configuration | |
373 | */ | |
374 | static SECURITY_READ_ONLY_LATE(struct kheap_zones) kalloc_zones_data_buffers = { | |
375 | .cfg = k_zone_cfg, | |
376 | .heap_id = KHEAP_ID_DATA_BUFFERS, | |
377 | .k_zone = k_zone_data_buffers, | |
378 | .max_k_zone = MAX_K_ZONE(k_zone_cfg) | |
379 | }; | |
380 | SECURITY_READ_ONLY_LATE(struct kalloc_heap) KHEAP_DATA_BUFFERS[1] = { | |
381 | { | |
382 | .kh_zones = &kalloc_zones_data_buffers, | |
383 | .kh_name = "data.", | |
384 | .kh_heap_id = KHEAP_ID_DATA_BUFFERS, | |
385 | } | |
386 | }; | |
91447636 | 387 | |
6d2010ae | 388 | |
f427ee49 A |
389 | /* |
390 | * Kext heap configuration | |
391 | */ | |
392 | static SECURITY_READ_ONLY_LATE(struct kheap_zones) kalloc_zones_kext = { | |
393 | .cfg = k_zone_cfg, | |
394 | .heap_id = KHEAP_ID_KEXT, | |
395 | .k_zone = k_zone_kext, | |
396 | .max_k_zone = MAX_K_ZONE(k_zone_cfg) | |
397 | }; | |
398 | SECURITY_READ_ONLY_LATE(struct kalloc_heap) KHEAP_KEXT[1] = { | |
399 | { | |
400 | .kh_zones = &kalloc_zones_kext, | |
401 | .kh_name = "kext.", | |
402 | .kh_heap_id = KHEAP_ID_KEXT, | |
403 | } | |
404 | }; | |
6d2010ae | 405 | |
f427ee49 | 406 | KALLOC_HEAP_DEFINE(KERN_OS_MALLOC, "kern_os_malloc", KHEAP_ID_KEXT); |
6d2010ae | 407 | |
f427ee49 A |
408 | /* |
409 | * Initialize kalloc heap: Create zones, generate direct lookup table and | |
410 | * do a quick test on lookups | |
411 | */ | |
412 | __startup_func | |
413 | static void | |
414 | kalloc_zones_init(struct kheap_zones *zones) | |
415 | { | |
416 | struct kalloc_zone_cfg *cfg = zones->cfg; | |
417 | zone_t *k_zone = zones->k_zone; | |
418 | vm_size_t size; | |
91447636 | 419 | |
f427ee49 A |
420 | /* |
421 | * Allocate a zone for each size we are going to handle. | |
422 | */ | |
423 | for (uint32_t i = 0; i < zones->max_k_zone && | |
424 | (size = cfg[i].kzc_size) < kalloc_max; i++) { | |
425 | zone_create_flags_t flags = ZC_KASAN_NOREDZONE | | |
426 | ZC_KASAN_NOQUARANTINE | ZC_KALLOC_HEAP; | |
427 | if (cfg[i].kzc_caching) { | |
428 | flags |= ZC_CACHING; | |
429 | } | |
6d2010ae | 430 | |
f427ee49 A |
431 | k_zone[i] = zone_create_ext(cfg[i].kzc_name, size, flags, |
432 | ZONE_ID_ANY, ^(zone_t z){ | |
433 | z->kalloc_heap = zones->heap_id; | |
434 | }); | |
435 | /* | |
436 | * Set the updated elem size back to the config | |
437 | */ | |
438 | cfg[i].kzc_size = k_zone[i]->z_elem_size; | |
439 | } | |
6d2010ae | 440 | |
f427ee49 A |
441 | /* |
442 | * Count all the "raw" views for zones in the heap. | |
443 | */ | |
444 | zone_view_count += zones->max_k_zone; | |
445 | ||
446 | /* | |
447 | * Build the Direct LookUp Table for small allocations | |
448 | * As k_zone_cfg is shared between the heaps the | |
449 | * Direct LookUp Table is also shared and doesn't need to | |
450 | * be rebuilt per heap. | |
451 | */ | |
452 | size = 0; | |
453 | for (int i = 0; i <= KALLOC_DLUT_SIZE; i++, size += KALLOC_MINALIGN) { | |
454 | uint8_t zindex = 0; | |
455 | ||
456 | while ((vm_size_t)(cfg[zindex].kzc_size) < size) { | |
457 | zindex++; | |
458 | } | |
459 | ||
460 | if (i == KALLOC_DLUT_SIZE) { | |
461 | zones->k_zindex_start = zindex; | |
462 | break; | |
463 | } | |
464 | zones->dlut[i] = zindex; | |
465 | } | |
91447636 | 466 | |
f427ee49 A |
467 | #ifdef KALLOC_DEBUG |
468 | printf("kalloc_init: k_zindex_start %d\n", zones->k_zindex_start); | |
469 | ||
470 | /* | |
471 | * Do a quick synthesis to see how well/badly we can | |
472 | * find-a-zone for a given size. | |
473 | * Useful when debugging/tweaking the array of zone sizes. | |
474 | * Cache misses probably more critical than compare-branches! | |
475 | */ | |
476 | for (uint32_t i = 0; i < zones->max_k_zone; i++) { | |
477 | vm_size_t testsize = (vm_size_t)(cfg[i].kzc_size - 1); | |
478 | int compare = 0; | |
479 | uint8_t zindex; | |
480 | ||
481 | if (testsize < MAX_SIZE_ZDLUT) { | |
482 | compare += 1; /* 'if' (T) */ | |
483 | ||
484 | long dindex = INDEX_ZDLUT(testsize); | |
485 | zindex = (int)zones->dlut[dindex]; | |
486 | } else if (testsize < kalloc_max_prerounded) { | |
487 | compare += 2; /* 'if' (F), 'if' (T) */ | |
488 | ||
489 | zindex = zones->k_zindex_start; | |
490 | while ((vm_size_t)(cfg[zindex].kzc_size) < testsize) { | |
491 | zindex++; | |
492 | compare++; /* 'while' (T) */ | |
493 | } | |
494 | compare++; /* 'while' (F) */ | |
495 | } else { | |
496 | break; /* not zone-backed */ | |
497 | } | |
498 | zone_t z = k_zone[zindex]; | |
499 | printf("kalloc_init: req size %4lu: %8s.%16s took %d compare%s\n", | |
500 | (unsigned long)testsize, kalloc_heap_names[zones->heap_id], | |
501 | z->z_name, compare, compare == 1 ? "" : "s"); | |
502 | } | |
503 | #endif | |
504 | } | |
91447636 | 505 | |
1c79356b A |
506 | /* |
507 | * Initialize the memory allocator. This should be called only | |
508 | * once on a system wide basis (i.e. first processor to get here | |
509 | * does the initialization). | |
510 | * | |
511 | * This initializes all of the zones. | |
512 | */ | |
513 | ||
f427ee49 A |
514 | __startup_func |
515 | static void | |
516 | kalloc_init(void) | |
1c79356b A |
517 | { |
518 | kern_return_t retval; | |
519 | vm_offset_t min; | |
f427ee49 | 520 | vm_size_t kalloc_map_size; |
5ba3f43e | 521 | vm_map_kernel_flags_t vmk_flags; |
1c79356b | 522 | |
0a7de745 A |
523 | /* |
524 | * Scale the kalloc_map_size to physical memory size: stay below | |
b0d623f7 | 525 | * 1/8th the total zone map size, or 128 MB (for a 32-bit kernel). |
2d21ac55 | 526 | */ |
b0d623f7 A |
527 | kalloc_map_size = (vm_size_t)(sane_size >> 5); |
528 | #if !__LP64__ | |
0a7de745 | 529 | if (kalloc_map_size > KALLOC_MAP_SIZE_MAX) { |
2d21ac55 | 530 | kalloc_map_size = KALLOC_MAP_SIZE_MAX; |
0a7de745 | 531 | } |
b0d623f7 | 532 | #endif /* !__LP64__ */ |
0a7de745 | 533 | if (kalloc_map_size < KALLOC_MAP_SIZE_MIN) { |
2d21ac55 | 534 | kalloc_map_size = KALLOC_MAP_SIZE_MIN; |
0a7de745 | 535 | } |
2d21ac55 | 536 | |
5ba3f43e A |
537 | vmk_flags = VM_MAP_KERNEL_FLAGS_NONE; |
538 | vmk_flags.vmkf_permanent = TRUE; | |
539 | ||
1c79356b | 540 | retval = kmem_suballoc(kernel_map, &min, kalloc_map_size, |
f427ee49 A |
541 | FALSE, VM_FLAGS_ANYWHERE, vmk_flags, |
542 | VM_KERN_MEMORY_KALLOC, &kalloc_map); | |
91447636 | 543 | |
0a7de745 | 544 | if (retval != KERN_SUCCESS) { |
1c79356b | 545 | panic("kalloc_init: kmem_suballoc failed"); |
0a7de745 | 546 | } |
1c79356b | 547 | |
b0d623f7 A |
548 | kalloc_map_min = min; |
549 | kalloc_map_max = min + kalloc_map_size - 1; | |
550 | ||
f427ee49 A |
551 | struct kheap_zones *khz_default = &kalloc_zones_default; |
552 | kalloc_max = (khz_default->cfg[khz_default->max_k_zone - 1].kzc_size << 1); | |
3e170ce0 | 553 | if (kalloc_max < KiB(16)) { |
0a7de745 | 554 | kalloc_max = KiB(16); |
3e170ce0 A |
555 | } |
556 | assert(kalloc_max <= KiB(64)); /* assumption made in size arrays */ | |
1c79356b | 557 | |
1c79356b | 558 | kalloc_max_prerounded = kalloc_max / 2 + 1; |
3e170ce0 | 559 | /* allocations larger than 16 times kalloc_max go directly to kernel map */ |
0c530ab8 | 560 | kalloc_kernmap_size = (kalloc_max * 16) + 1; |
b0d623f7 | 561 | kalloc_largest_allocated = kalloc_kernmap_size; |
1c79356b | 562 | |
f427ee49 A |
563 | /* Initialize kalloc default heap */ |
564 | kalloc_zones_init(&kalloc_zones_default); | |
a39ff7e2 | 565 | |
f427ee49 A |
566 | /* Initialize kalloc data buffers heap */ |
567 | if (ZSECURITY_OPTIONS_SUBMAP_USER_DATA & zsecurity_options) { | |
568 | kalloc_zones_init(&kalloc_zones_data_buffers); | |
569 | } else { | |
570 | *KHEAP_DATA_BUFFERS = *KHEAP_DEFAULT; | |
1c79356b | 571 | } |
316670eb | 572 | |
f427ee49 A |
573 | /* Initialize kalloc kext heap */ |
574 | if (ZSECURITY_OPTIONS_SEQUESTER_KEXT_KALLOC & zsecurity_options) { | |
575 | kalloc_zones_init(&kalloc_zones_kext); | |
576 | } else { | |
577 | *KHEAP_KEXT = *KHEAP_DEFAULT; | |
316670eb | 578 | } |
f427ee49 A |
579 | } |
580 | STARTUP(ZALLOC, STARTUP_RANK_THIRD, kalloc_init); | |
316670eb | 581 | |
316670eb | 582 | |
f427ee49 | 583 | #pragma mark accessors |
316670eb | 584 | |
f427ee49 A |
585 | static void |
586 | KALLOC_ZINFO_SALLOC(vm_size_t bytes) | |
587 | { | |
588 | thread_t thr = current_thread(); | |
589 | ledger_debit_thread(thr, thr->t_ledger, task_ledgers.tkm_shared, bytes); | |
590 | } | |
316670eb | 591 | |
f427ee49 A |
592 | static void |
593 | KALLOC_ZINFO_SFREE(vm_size_t bytes) | |
594 | { | |
595 | thread_t thr = current_thread(); | |
596 | ledger_credit_thread(thr, thr->t_ledger, task_ledgers.tkm_shared, bytes); | |
597 | } | |
316670eb | 598 | |
f427ee49 A |
599 | static inline vm_map_t |
600 | kalloc_map_for_addr(vm_address_t addr) | |
601 | { | |
602 | if (addr >= kalloc_map_min && addr < kalloc_map_max) { | |
603 | return kalloc_map; | |
316670eb | 604 | } |
f427ee49 | 605 | return kernel_map; |
316670eb | 606 | } |
6d2010ae | 607 | |
f427ee49 A |
608 | static inline vm_map_t |
609 | kalloc_map_for_size(vm_size_t size) | |
316670eb | 610 | { |
f427ee49 A |
611 | if (size < kalloc_kernmap_size) { |
612 | return kalloc_map; | |
613 | } | |
614 | return kernel_map; | |
316670eb A |
615 | } |
616 | ||
f427ee49 A |
617 | zone_t |
618 | kalloc_heap_zone_for_size(kalloc_heap_t kheap, vm_size_t size) | |
316670eb | 619 | { |
f427ee49 | 620 | struct kheap_zones *khz = kheap->kh_zones; |
316670eb | 621 | |
f427ee49 A |
622 | if (size < MAX_SIZE_ZDLUT) { |
623 | uint32_t zindex = khz->dlut[INDEX_ZDLUT(size)]; | |
624 | return khz->k_zone[zindex]; | |
0a7de745 | 625 | } |
316670eb | 626 | |
f427ee49 A |
627 | if (size < kalloc_max_prerounded) { |
628 | uint32_t zindex = khz->k_zindex_start; | |
629 | while (khz->cfg[zindex].kzc_size < size) { | |
630 | zindex++; | |
631 | } | |
632 | assert(zindex < khz->max_k_zone); | |
633 | return khz->k_zone[zindex]; | |
634 | } | |
316670eb | 635 | |
f427ee49 | 636 | return ZONE_NULL; |
1c79356b A |
637 | } |
638 | ||
39037602 | 639 | static vm_size_t |
f427ee49 | 640 | vm_map_lookup_kalloc_entry_locked(vm_map_t map, void *addr) |
39037602 | 641 | { |
f427ee49 | 642 | vm_map_entry_t vm_entry = NULL; |
0a7de745 | 643 | |
f427ee49 A |
644 | if (!vm_map_lookup_entry(map, (vm_map_offset_t)addr, &vm_entry)) { |
645 | panic("address %p not allocated via kalloc, map %p", | |
646 | addr, map); | |
39037602 A |
647 | } |
648 | if (vm_entry->vme_start != (vm_map_offset_t)addr) { | |
f427ee49 A |
649 | panic("address %p inside vm entry %p [%p:%p), map %p", |
650 | addr, vm_entry, (void *)vm_entry->vme_start, | |
651 | (void *)vm_entry->vme_end, map); | |
39037602 A |
652 | } |
653 | if (!vm_entry->vme_atomic) { | |
f427ee49 A |
654 | panic("address %p not managed by kalloc (entry %p, map %p)", |
655 | addr, vm_entry, map); | |
39037602 | 656 | } |
0a7de745 | 657 | return vm_entry->vme_end - vm_entry->vme_start; |
39037602 A |
658 | } |
659 | ||
5ba3f43e A |
660 | #if KASAN_KALLOC |
661 | /* | |
662 | * KASAN kalloc stashes the original user-requested size away in the poisoned | |
663 | * area. Return that directly. | |
664 | */ | |
665 | vm_size_t | |
666 | kalloc_size(void *addr) | |
667 | { | |
668 | (void)vm_map_lookup_kalloc_entry_locked; /* silence warning */ | |
669 | return kasan_user_size((vm_offset_t)addr); | |
670 | } | |
671 | #else | |
39037602 | 672 | vm_size_t |
f427ee49 | 673 | kalloc_size(void *addr) |
39037602 | 674 | { |
f427ee49 A |
675 | vm_map_t map; |
676 | vm_size_t size; | |
39037602 A |
677 | |
678 | size = zone_element_size(addr, NULL); | |
679 | if (size) { | |
680 | return size; | |
681 | } | |
f427ee49 A |
682 | |
683 | map = kalloc_map_for_addr((vm_offset_t)addr); | |
39037602 A |
684 | vm_map_lock_read(map); |
685 | size = vm_map_lookup_kalloc_entry_locked(map, addr); | |
686 | vm_map_unlock_read(map); | |
687 | return size; | |
688 | } | |
5ba3f43e | 689 | #endif |
39037602 A |
690 | |
691 | vm_size_t | |
f427ee49 | 692 | kalloc_bucket_size(vm_size_t size) |
39037602 | 693 | { |
f427ee49 A |
694 | zone_t z = kalloc_heap_zone_for_size(KHEAP_DEFAULT, size); |
695 | vm_map_t map = kalloc_map_for_size(size); | |
0a7de745 | 696 | |
f427ee49 A |
697 | if (z) { |
698 | return zone_elem_size(z); | |
0a7de745 | 699 | } |
f427ee49 A |
700 | return vm_map_round_page(size, VM_MAP_PAGE_MASK(map)); |
701 | } | |
0a7de745 | 702 | |
f427ee49 | 703 | #pragma mark kalloc |
39037602 | 704 | |
f427ee49 A |
705 | void |
706 | kheap_temp_leak_panic(thread_t self) | |
707 | { | |
708 | #if DEBUG || DEVELOPMENT | |
709 | if (__improbable(kheap_temp_debug)) { | |
710 | struct kheap_temp_header *hdr = qe_dequeue_head(&self->t_temp_alloc_list, | |
711 | struct kheap_temp_header, kht_hdr_link); | |
712 | ||
713 | panic_plain("KHEAP_TEMP leak on thread %p (%d), allocated at:\n" | |
714 | " %#016lx\n" " %#016lx\n" " %#016lx\n" " %#016lx\n" | |
715 | " %#016lx\n" " %#016lx\n" " %#016lx\n" " %#016lx\n" | |
716 | " %#016lx\n" " %#016lx\n" " %#016lx\n" " %#016lx\n" | |
717 | " %#016lx\n" " %#016lx\n", | |
718 | self, self->t_temp_alloc_count, | |
719 | hdr->kht_hdr_pcs[0], hdr->kht_hdr_pcs[1], | |
720 | hdr->kht_hdr_pcs[2], hdr->kht_hdr_pcs[3], | |
721 | hdr->kht_hdr_pcs[4], hdr->kht_hdr_pcs[5], | |
722 | hdr->kht_hdr_pcs[6], hdr->kht_hdr_pcs[7], | |
723 | hdr->kht_hdr_pcs[8], hdr->kht_hdr_pcs[9], | |
724 | hdr->kht_hdr_pcs[10], hdr->kht_hdr_pcs[11], | |
725 | hdr->kht_hdr_pcs[12], hdr->kht_hdr_pcs[13]); | |
0a7de745 | 726 | } |
f427ee49 A |
727 | panic("KHEAP_TEMP leak on thread %p (%d) " |
728 | "(boot with kheap_temp_debug=1 to debug)", | |
729 | self, self->t_temp_alloc_count); | |
730 | #else /* !DEBUG && !DEVELOPMENT */ | |
731 | panic("KHEAP_TEMP leak on thread %p (%d)", | |
732 | self, self->t_temp_alloc_count); | |
733 | #endif /* !DEBUG && !DEVELOPMENT */ | |
39037602 A |
734 | } |
735 | ||
f427ee49 A |
736 | __abortlike |
737 | static void | |
738 | kheap_temp_overuse_panic(thread_t self) | |
5ba3f43e | 739 | { |
f427ee49 A |
740 | panic("too many KHEAP_TEMP allocations in flight: %d", |
741 | self->t_temp_alloc_count); | |
5ba3f43e | 742 | } |
39037602 | 743 | |
f427ee49 A |
744 | __attribute__((noinline)) |
745 | static struct kalloc_result | |
746 | kalloc_large( | |
747 | kalloc_heap_t kheap, | |
748 | vm_size_t req_size, | |
749 | vm_size_t size, | |
750 | zalloc_flags_t flags, | |
751 | vm_allocation_site_t *site) | |
752 | { | |
c3c9b80d A |
753 | int kma_flags = KMA_ATOMIC; |
754 | vm_tag_t tag; | |
f427ee49 A |
755 | vm_map_t alloc_map; |
756 | vm_offset_t addr; | |
757 | ||
758 | if (flags & Z_NOFAIL) { | |
759 | panic("trying to kalloc(Z_NOFAIL) with a large size (%zd)", | |
760 | (size_t)size); | |
761 | } | |
762 | /* kmem_alloc could block so we return if noblock */ | |
763 | if (flags & Z_NOWAIT) { | |
764 | return (struct kalloc_result){ }; | |
39037602 A |
765 | } |
766 | ||
c3c9b80d A |
767 | #ifndef __x86_64__ |
768 | /* | |
769 | * (73465472) on Intel we didn't use to pass this flag, | |
770 | * which in turned allowed kalloc_large() memory to be shared | |
771 | * with user directly. | |
772 | * | |
773 | * We're bound by this unfortunate ABI. | |
774 | */ | |
775 | kma_flags |= KMA_KOBJECT; | |
776 | #endif | |
f427ee49 A |
777 | if (flags & Z_NOPAGEWAIT) { |
778 | kma_flags |= KMA_NOPAGEWAIT; | |
39037602 | 779 | } |
f427ee49 A |
780 | if (flags & Z_ZERO) { |
781 | kma_flags |= KMA_ZERO; | |
39037602 A |
782 | } |
783 | ||
f427ee49 A |
784 | #if KASAN_KALLOC |
785 | /* large allocation - use guard pages instead of small redzones */ | |
786 | size = round_page(req_size + 2 * PAGE_SIZE); | |
787 | assert(size >= MAX_SIZE_ZDLUT && size >= kalloc_max_prerounded); | |
788 | #else | |
789 | size = round_page(size); | |
5ba3f43e A |
790 | #endif |
791 | ||
f427ee49 | 792 | alloc_map = kalloc_map_for_size(size); |
39037602 | 793 | |
c3c9b80d A |
794 | tag = zalloc_flags_get_tag(flags); |
795 | if (tag == VM_KERN_MEMORY_NONE) { | |
796 | if (site) { | |
797 | tag = vm_tag_alloc(site); | |
798 | } else { | |
799 | tag = VM_KERN_MEMORY_KALLOC; | |
800 | } | |
f427ee49 | 801 | } |
316670eb | 802 | |
f427ee49 A |
803 | if (kmem_alloc_flags(alloc_map, &addr, size, tag, kma_flags) != KERN_SUCCESS) { |
804 | if (alloc_map != kernel_map) { | |
805 | if (kalloc_fallback_count++ == 0) { | |
806 | printf("%s: falling back to kernel_map\n", __func__); | |
807 | } | |
808 | if (kmem_alloc_flags(kernel_map, &addr, size, tag, kma_flags) != KERN_SUCCESS) { | |
809 | addr = 0; | |
810 | } | |
811 | } else { | |
812 | addr = 0; | |
813 | } | |
814 | } | |
5ba3f43e | 815 | |
f427ee49 A |
816 | if (addr != 0) { |
817 | kalloc_spin_lock(); | |
316670eb | 818 | /* |
f427ee49 A |
819 | * Thread-safe version of the workaround for 4740071 |
820 | * (a double FREE()) | |
316670eb | 821 | */ |
f427ee49 A |
822 | if (size > kalloc_largest_allocated) { |
823 | kalloc_largest_allocated = size; | |
824 | } | |
825 | ||
826 | kalloc_large_inuse++; | |
827 | assert(kalloc_large_total + size >= kalloc_large_total); /* no wrap around */ | |
828 | kalloc_large_total += size; | |
829 | kalloc_large_sum += size; | |
1c79356b | 830 | |
f427ee49 A |
831 | if (kalloc_large_total > kalloc_large_max) { |
832 | kalloc_large_max = kalloc_large_total; | |
1c79356b | 833 | } |
0c530ab8 | 834 | |
f427ee49 A |
835 | kalloc_unlock(); |
836 | ||
837 | KALLOC_ZINFO_SALLOC(size); | |
838 | } | |
5ba3f43e | 839 | #if KASAN_KALLOC |
f427ee49 A |
840 | /* fixup the return address to skip the redzone */ |
841 | addr = kasan_alloc(addr, size, req_size, PAGE_SIZE); | |
842 | /* | |
843 | * Initialize buffer with unique pattern only if memory | |
844 | * wasn't expected to be zeroed. | |
845 | */ | |
846 | if (!(flags & Z_ZERO)) { | |
847 | kasan_leak_init(addr, req_size); | |
848 | } | |
cb323159 | 849 | #else |
f427ee49 | 850 | req_size = size; |
5ba3f43e A |
851 | #endif |
852 | ||
f427ee49 A |
853 | if (addr && kheap == KHEAP_TEMP) { |
854 | thread_t self = current_thread(); | |
0c530ab8 | 855 | |
f427ee49 A |
856 | if (self->t_temp_alloc_count++ > UINT16_MAX) { |
857 | kheap_temp_overuse_panic(self); | |
0a7de745 | 858 | } |
f427ee49 A |
859 | #if DEBUG || DEVELOPMENT |
860 | if (__improbable(kheap_temp_debug)) { | |
861 | struct kheap_temp_header *hdr = (void *)addr; | |
862 | enqueue_head(&self->t_temp_alloc_list, | |
863 | &hdr->kht_hdr_link); | |
864 | backtrace(hdr->kht_hdr_pcs, KHT_BT_COUNT, NULL); | |
865 | req_size -= sizeof(struct kheap_temp_header); | |
866 | addr += sizeof(struct kheap_temp_header); | |
b0d623f7 | 867 | } |
f427ee49 A |
868 | #endif /* DEBUG || DEVELOPMENT */ |
869 | } | |
1c79356b | 870 | |
f427ee49 A |
871 | DTRACE_VM3(kalloc, vm_size_t, size, vm_size_t, req_size, void*, addr); |
872 | return (struct kalloc_result){ .addr = (void *)addr, .size = req_size }; | |
873 | } | |
6d2010ae | 874 | |
f427ee49 A |
875 | struct kalloc_result |
876 | kalloc_ext( | |
877 | kalloc_heap_t kheap, | |
878 | vm_size_t req_size, | |
879 | zalloc_flags_t flags, | |
880 | vm_allocation_site_t *site) | |
881 | { | |
f427ee49 A |
882 | vm_size_t size; |
883 | void *addr; | |
884 | zone_t z; | |
6d2010ae | 885 | |
f427ee49 A |
886 | #if DEBUG || DEVELOPMENT |
887 | if (__improbable(kheap_temp_debug)) { | |
888 | if (kheap == KHEAP_TEMP) { | |
889 | req_size += sizeof(struct kheap_temp_header); | |
1c79356b | 890 | } |
f427ee49 A |
891 | } |
892 | #endif /* DEBUG || DEVELOPMENT */ | |
893 | ||
894 | /* | |
895 | * Kasan for kalloc heaps will put the redzones *inside* | |
896 | * the allocation, and hence augment its size. | |
897 | * | |
c3c9b80d | 898 | * kalloc heaps do not use zone_t::z_kasan_redzone. |
f427ee49 | 899 | */ |
5ba3f43e | 900 | #if KASAN_KALLOC |
f427ee49 | 901 | size = kasan_alloc_resize(req_size); |
5ba3f43e | 902 | #else |
f427ee49 | 903 | size = req_size; |
5ba3f43e | 904 | #endif |
f427ee49 A |
905 | z = kalloc_heap_zone_for_size(kheap, size); |
906 | if (__improbable(z == ZONE_NULL)) { | |
907 | return kalloc_large(kheap, req_size, size, flags, site); | |
1c79356b | 908 | } |
f427ee49 | 909 | |
316670eb | 910 | #ifdef KALLOC_DEBUG |
f427ee49 A |
911 | if (size > zone_elem_size(z)) { |
912 | panic("%s: z %p (%s%s) but requested size %lu", __func__, z, | |
913 | kalloc_heap_names[kheap->kh_zones->heap_id], z->z_name, | |
914 | (unsigned long)size); | |
0a7de745 | 915 | } |
316670eb | 916 | #endif |
f427ee49 | 917 | assert(size <= zone_elem_size(z)); |
5ba3f43e A |
918 | |
919 | #if VM_MAX_TAG_ZONES | |
c3c9b80d A |
920 | if (z->tags) { |
921 | vm_tag_t tag = zalloc_flags_get_tag(flags); | |
922 | if (tag == VM_KERN_MEMORY_NONE && site) { | |
923 | tag = vm_tag_alloc(site); | |
924 | } | |
925 | if (tag != VM_KERN_MEMORY_NONE) { | |
926 | tag = vm_tag_will_update_zone(tag, z->tag_zone_index, | |
927 | flags & (Z_WAITOK | Z_NOWAIT | Z_NOPAGEWAIT)); | |
0a7de745 | 928 | } |
c3c9b80d | 929 | flags |= Z_VM_TAG(tag); |
0a7de745 | 930 | } |
5ba3f43e | 931 | #endif |
c3c9b80d | 932 | addr = zalloc_ext(z, kheap->kh_stats ?: z->z_stats, flags); |
5ba3f43e A |
933 | |
934 | #if KASAN_KALLOC | |
f427ee49 A |
935 | addr = (void *)kasan_alloc((vm_offset_t)addr, zone_elem_size(z), |
936 | req_size, KASAN_GUARD_SIZE); | |
5ba3f43e | 937 | #else |
f427ee49 | 938 | req_size = zone_elem_size(z); |
5ba3f43e A |
939 | #endif |
940 | ||
f427ee49 A |
941 | if (addr && kheap == KHEAP_TEMP) { |
942 | thread_t self = current_thread(); | |
943 | ||
944 | if (self->t_temp_alloc_count++ > UINT16_MAX) { | |
945 | kheap_temp_overuse_panic(self); | |
946 | } | |
947 | #if DEBUG || DEVELOPMENT | |
948 | if (__improbable(kheap_temp_debug)) { | |
949 | struct kheap_temp_header *hdr = (void *)addr; | |
950 | enqueue_head(&self->t_temp_alloc_list, | |
951 | &hdr->kht_hdr_link); | |
952 | backtrace(hdr->kht_hdr_pcs, KHT_BT_COUNT, NULL); | |
953 | req_size -= sizeof(struct kheap_temp_header); | |
954 | addr += sizeof(struct kheap_temp_header); | |
955 | } | |
956 | #endif /* DEBUG || DEVELOPMENT */ | |
957 | } | |
958 | ||
959 | DTRACE_VM3(kalloc, vm_size_t, size, vm_size_t, req_size, void*, addr); | |
960 | return (struct kalloc_result){ .addr = addr, .size = req_size }; | |
1c79356b A |
961 | } |
962 | ||
91447636 | 963 | void * |
f427ee49 | 964 | kalloc_external(vm_size_t size); |
91447636 | 965 | void * |
f427ee49 | 966 | kalloc_external(vm_size_t size) |
1c79356b | 967 | { |
f427ee49 | 968 | return kheap_alloc_tag_bt(KHEAP_KEXT, size, Z_WAITOK, VM_KERN_MEMORY_KALLOC); |
1c79356b A |
969 | } |
970 | ||
f427ee49 A |
971 | |
972 | #pragma mark kfree | |
973 | ||
974 | __attribute__((noinline)) | |
975 | static void | |
976 | kfree_large(vm_offset_t addr, vm_size_t size) | |
1c79356b | 977 | { |
f427ee49 A |
978 | vm_map_t map = kalloc_map_for_addr(addr); |
979 | kern_return_t ret; | |
980 | vm_offset_t end; | |
981 | ||
982 | if (addr < VM_MIN_KERNEL_AND_KEXT_ADDRESS || | |
983 | os_add_overflow(addr, size, &end) || | |
984 | end > VM_MAX_KERNEL_ADDRESS) { | |
985 | panic("kfree: address range (%p, %ld) doesn't belong to the kernel", | |
986 | (void *)addr, (uintptr_t)size); | |
987 | } | |
988 | ||
989 | if (size == 0) { | |
990 | vm_map_lock(map); | |
991 | size = vm_map_lookup_kalloc_entry_locked(map, (void *)addr); | |
992 | ret = vm_map_remove_locked(map, | |
993 | vm_map_trunc_page(addr, VM_MAP_PAGE_MASK(map)), | |
994 | vm_map_round_page(addr + size, VM_MAP_PAGE_MASK(map)), | |
995 | VM_MAP_REMOVE_KUNWIRE); | |
996 | if (ret != KERN_SUCCESS) { | |
997 | panic("kfree: vm_map_remove_locked() failed for " | |
998 | "addr: %p, map: %p ret: %d", (void *)addr, map, ret); | |
999 | } | |
1000 | vm_map_unlock(map); | |
1001 | } else { | |
1002 | size = round_page(size); | |
1003 | ||
1004 | if (size > kalloc_largest_allocated) { | |
1005 | panic("kfree: size %lu > kalloc_largest_allocated %lu", | |
1006 | (uintptr_t)size, (uintptr_t)kalloc_largest_allocated); | |
1007 | } | |
1008 | kmem_free(map, addr, size); | |
1009 | } | |
1010 | ||
1011 | kalloc_spin_lock(); | |
1012 | ||
1013 | assert(kalloc_large_total >= size); | |
1014 | kalloc_large_total -= size; | |
1015 | kalloc_large_inuse--; | |
1016 | ||
1017 | kalloc_unlock(); | |
1018 | ||
1019 | #if !KASAN_KALLOC | |
1020 | DTRACE_VM3(kfree, vm_size_t, size, vm_size_t, size, void*, addr); | |
1021 | #endif | |
1022 | ||
1023 | KALLOC_ZINFO_SFREE(size); | |
1024 | return; | |
1025 | } | |
1026 | ||
1027 | __abortlike | |
1028 | static void | |
1029 | kfree_heap_confusion_panic(kalloc_heap_t kheap, void *data, size_t size, zone_t z) | |
1030 | { | |
1031 | if (z->kalloc_heap == KHEAP_ID_NONE) { | |
1032 | panic("kfree: addr %p, size %zd found in regular zone '%s%s'", | |
1033 | data, size, zone_heap_name(z), z->z_name); | |
1034 | } else { | |
1035 | panic("kfree: addr %p, size %zd found in heap %s* instead of %s*", | |
1036 | data, size, zone_heap_name(z), | |
1037 | kalloc_heap_names[kheap->kh_heap_id]); | |
1038 | } | |
1039 | } | |
1040 | ||
1041 | __abortlike | |
1042 | static void | |
1043 | kfree_size_confusion_panic(zone_t z, void *data, size_t size, size_t zsize) | |
1044 | { | |
1045 | if (z) { | |
1046 | panic("kfree: addr %p, size %zd found in zone '%s%s' " | |
1047 | "with elem_size %zd", | |
1048 | data, size, zone_heap_name(z), z->z_name, zsize); | |
1049 | } else { | |
1050 | panic("kfree: addr %p, size %zd not found in any zone", | |
1051 | data, size); | |
1052 | } | |
1053 | } | |
1054 | ||
1055 | __abortlike | |
1056 | static void | |
1057 | kfree_size_invalid_panic(void *data, size_t size) | |
1058 | { | |
1059 | panic("kfree: addr %p trying to free with nonsensical size %zd", | |
1060 | data, size); | |
1061 | } | |
1062 | ||
1063 | __abortlike | |
1064 | static void | |
1065 | krealloc_size_invalid_panic(void *data, size_t size) | |
1066 | { | |
1067 | panic("krealloc: addr %p trying to free with nonsensical size %zd", | |
1068 | data, size); | |
1069 | } | |
1070 | ||
1071 | __abortlike | |
1072 | static void | |
1073 | kfree_temp_imbalance_panic(void *data, size_t size) | |
1074 | { | |
1075 | panic("kfree: KHEAP_TEMP allocation imbalance freeing addr %p, size %zd", | |
1076 | data, size); | |
1077 | } | |
1078 | ||
1079 | /* used to implement kheap_free_addr() */ | |
1080 | #define KFREE_UNKNOWN_SIZE ((vm_size_t)~0) | |
1081 | #define KFREE_ABSURD_SIZE \ | |
1082 | ((VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_AND_KEXT_ADDRESS) / 2) | |
1083 | ||
1084 | static void | |
1085 | kfree_ext(kalloc_heap_t kheap, void *data, vm_size_t size) | |
1086 | { | |
1087 | zone_stats_t zs = NULL; | |
316670eb | 1088 | zone_t z; |
f427ee49 A |
1089 | vm_size_t zsize; |
1090 | ||
1091 | if (__improbable(data == NULL)) { | |
1092 | return; | |
1093 | } | |
1094 | ||
1095 | if (kheap == KHEAP_TEMP) { | |
1096 | assert(size != KFREE_UNKNOWN_SIZE); | |
1097 | if (current_thread()->t_temp_alloc_count-- == 0) { | |
1098 | kfree_temp_imbalance_panic(data, size); | |
1099 | } | |
1100 | #if DEBUG || DEVELOPMENT | |
1101 | if (__improbable(kheap_temp_debug)) { | |
1102 | size += sizeof(struct kheap_temp_header); | |
1103 | data -= sizeof(struct kheap_temp_header); | |
1104 | remqueue(&((struct kheap_temp_header *)data)->kht_hdr_link); | |
1105 | } | |
1106 | #endif /* DEBUG || DEVELOPMENT */ | |
1107 | } | |
316670eb | 1108 | |
5ba3f43e A |
1109 | #if KASAN_KALLOC |
1110 | /* | |
1111 | * Resize back to the real allocation size and hand off to the KASan | |
1112 | * quarantine. `data` may then point to a different allocation. | |
1113 | */ | |
1114 | vm_size_t user_size = size; | |
f427ee49 A |
1115 | if (size == KFREE_UNKNOWN_SIZE) { |
1116 | user_size = size = kalloc_size(data); | |
1117 | } | |
5ba3f43e A |
1118 | kasan_check_free((vm_address_t)data, size, KASAN_HEAP_KALLOC); |
1119 | data = (void *)kasan_dealloc((vm_address_t)data, &size); | |
1120 | kasan_free(&data, &size, KASAN_HEAP_KALLOC, NULL, user_size, true); | |
1121 | if (!data) { | |
1122 | return; | |
1123 | } | |
1124 | #endif | |
1125 | ||
f427ee49 A |
1126 | if (size >= kalloc_max_prerounded && size != KFREE_UNKNOWN_SIZE) { |
1127 | return kfree_large((vm_offset_t)data, size); | |
1128 | } | |
1c79356b | 1129 | |
f427ee49 A |
1130 | zsize = zone_element_size(data, &z); |
1131 | if (size == KFREE_UNKNOWN_SIZE) { | |
1132 | if (zsize == 0) { | |
1133 | return kfree_large((vm_offset_t)data, 0); | |
b0d623f7 | 1134 | } |
f427ee49 A |
1135 | size = zsize; |
1136 | } else if (size > zsize) { | |
1137 | kfree_size_confusion_panic(z, data, size, zsize); | |
1138 | } | |
1c79356b | 1139 | |
f427ee49 A |
1140 | if (kheap != KHEAP_ANY) { |
1141 | if (kheap->kh_heap_id != z->kalloc_heap) { | |
1142 | kfree_heap_confusion_panic(kheap, data, size, z); | |
1143 | } | |
1144 | zs = kheap->kh_stats; | |
1145 | } else if (z->kalloc_heap != KHEAP_ID_DEFAULT && | |
1146 | z->kalloc_heap != KHEAP_ID_KEXT) { | |
1147 | kfree_heap_confusion_panic(kheap, data, size, z); | |
1148 | } | |
6d2010ae | 1149 | |
a39ff7e2 | 1150 | #if !KASAN_KALLOC |
f427ee49 | 1151 | DTRACE_VM3(kfree, vm_size_t, size, vm_size_t, zsize, void*, data); |
a39ff7e2 | 1152 | #endif |
f427ee49 A |
1153 | zfree_ext(z, zs ?: z->z_stats, data); |
1154 | } | |
a39ff7e2 | 1155 | |
f427ee49 A |
1156 | void |
1157 | (kfree)(void *addr, vm_size_t size) | |
1158 | { | |
1159 | if (size > KFREE_ABSURD_SIZE) { | |
1160 | kfree_size_invalid_panic(addr, size); | |
1c79356b | 1161 | } |
f427ee49 A |
1162 | kfree_ext(KHEAP_ANY, addr, size); |
1163 | } | |
1c79356b | 1164 | |
f427ee49 A |
1165 | void |
1166 | (kheap_free)(kalloc_heap_t kheap, void *addr, vm_size_t size) | |
1167 | { | |
1168 | if (size > KFREE_ABSURD_SIZE) { | |
1169 | kfree_size_invalid_panic(addr, size); | |
0a7de745 | 1170 | } |
f427ee49 | 1171 | kfree_ext(kheap, addr, size); |
1c79356b A |
1172 | } |
1173 | ||
f427ee49 A |
1174 | void |
1175 | (kheap_free_addr)(kalloc_heap_t kheap, void *addr) | |
1c79356b | 1176 | { |
f427ee49 A |
1177 | kfree_ext(kheap, addr, KFREE_UNKNOWN_SIZE); |
1178 | } | |
1179 | ||
1180 | static struct kalloc_result | |
1181 | _krealloc_ext( | |
1182 | kalloc_heap_t kheap, | |
1183 | void *addr, | |
1184 | vm_size_t old_size, | |
1185 | vm_size_t new_size, | |
1186 | zalloc_flags_t flags, | |
1187 | vm_allocation_site_t *site) | |
1188 | { | |
1189 | vm_size_t old_bucket_size, new_bucket_size, min_size; | |
1190 | struct kalloc_result kr; | |
1191 | ||
1192 | if (new_size == 0) { | |
1193 | kfree_ext(kheap, addr, old_size); | |
1194 | return (struct kalloc_result){ }; | |
0a7de745 | 1195 | } |
f427ee49 A |
1196 | |
1197 | if (addr == NULL) { | |
1198 | return kalloc_ext(kheap, new_size, flags, site); | |
0a7de745 | 1199 | } |
f427ee49 A |
1200 | |
1201 | /* | |
1202 | * Find out the size of the bucket in which the new sized allocation | |
1203 | * would land. If it matches the bucket of the original allocation, | |
1204 | * simply return the same address. | |
1205 | */ | |
1206 | new_bucket_size = kalloc_bucket_size(new_size); | |
1207 | if (old_size == KFREE_UNKNOWN_SIZE) { | |
1208 | old_size = old_bucket_size = kalloc_size(addr); | |
1209 | } else { | |
1210 | old_bucket_size = kalloc_bucket_size(old_size); | |
1211 | } | |
1212 | min_size = MIN(old_size, new_size); | |
1213 | ||
1214 | if (old_bucket_size == new_bucket_size) { | |
1215 | kr.addr = addr; | |
1216 | #if KASAN_KALLOC | |
1217 | kr.size = new_size; | |
1218 | #else | |
1219 | kr.size = new_bucket_size; | |
1c79356b | 1220 | #endif |
f427ee49 A |
1221 | } else { |
1222 | kr = kalloc_ext(kheap, new_size, flags & ~Z_ZERO, site); | |
1223 | if (kr.addr == NULL) { | |
1224 | return kr; | |
1225 | } | |
1c79356b | 1226 | |
f427ee49 A |
1227 | memcpy(kr.addr, addr, min_size); |
1228 | kfree_ext(kheap, addr, old_size); | |
1229 | } | |
1230 | if ((flags & Z_ZERO) && kr.size > min_size) { | |
1231 | bzero(kr.addr + min_size, kr.size - min_size); | |
1232 | } | |
1233 | return kr; | |
1234 | } | |
1235 | ||
1236 | struct kalloc_result | |
1237 | krealloc_ext( | |
1238 | kalloc_heap_t kheap, | |
1239 | void *addr, | |
1240 | vm_size_t old_size, | |
1241 | vm_size_t new_size, | |
1242 | zalloc_flags_t flags, | |
1243 | vm_allocation_site_t *site) | |
1244 | { | |
1245 | if (old_size > KFREE_ABSURD_SIZE) { | |
1246 | krealloc_size_invalid_panic(addr, old_size); | |
1247 | } | |
1248 | return _krealloc_ext(kheap, addr, old_size, new_size, flags, site); | |
1249 | } | |
1250 | ||
1251 | struct kalloc_result | |
1252 | kheap_realloc_addr( | |
1253 | kalloc_heap_t kheap, | |
1254 | void *addr, | |
1255 | vm_size_t size, | |
1256 | zalloc_flags_t flags, | |
1257 | vm_allocation_site_t *site) | |
1258 | { | |
1259 | return _krealloc_ext(kheap, addr, KFREE_UNKNOWN_SIZE, size, flags, site); | |
1260 | } | |
1261 | ||
1262 | __startup_func | |
91447636 | 1263 | void |
f427ee49 | 1264 | kheap_startup_init(kalloc_heap_t kheap) |
91447636 | 1265 | { |
f427ee49 A |
1266 | struct kheap_zones *zones; |
1267 | ||
1268 | switch (kheap->kh_heap_id) { | |
1269 | case KHEAP_ID_DEFAULT: | |
1270 | zones = KHEAP_DEFAULT->kh_zones; | |
1271 | break; | |
1272 | case KHEAP_ID_DATA_BUFFERS: | |
1273 | zones = KHEAP_DATA_BUFFERS->kh_zones; | |
1274 | break; | |
1275 | case KHEAP_ID_KEXT: | |
1276 | zones = KHEAP_KEXT->kh_zones; | |
1277 | break; | |
1278 | default: | |
1279 | panic("kalloc_heap_startup_init: invalid KHEAP_ID: %d", | |
1280 | kheap->kh_heap_id); | |
1281 | } | |
1282 | ||
1283 | kheap->kh_heap_id = zones->heap_id; | |
1284 | kheap->kh_zones = zones; | |
1285 | kheap->kh_stats = zalloc_percpu_permanent_type(struct zone_stats); | |
1286 | kheap->kh_next = zones->views; | |
1287 | zones->views = kheap; | |
6d2010ae | 1288 | |
f427ee49 | 1289 | zone_view_count += 1; |
91447636 A |
1290 | } |
1291 | ||
f427ee49 A |
1292 | #pragma mark OSMalloc |
1293 | /* | |
1294 | * This is a deprecated interface, here only for legacy reasons. | |
1295 | * There is no internal variant of any of these symbols on purpose. | |
1296 | */ | |
1297 | #define OSMallocDeprecated | |
1298 | #include <libkern/OSMalloc.h> | |
1299 | ||
1300 | static KALLOC_HEAP_DEFINE(OSMALLOC, "osmalloc", KHEAP_ID_KEXT); | |
1301 | static queue_head_t OSMalloc_tag_list = QUEUE_HEAD_INITIALIZER(OSMalloc_tag_list); | |
1302 | static LCK_GRP_DECLARE(OSMalloc_tag_lck_grp, "OSMalloc_tag"); | |
1303 | static LCK_SPIN_DECLARE(OSMalloc_tag_lock, &OSMalloc_tag_lck_grp); | |
1304 | ||
1305 | #define OSMalloc_tag_spin_lock() lck_spin_lock(&OSMalloc_tag_lock) | |
1306 | #define OSMalloc_tag_unlock() lck_spin_unlock(&OSMalloc_tag_lock) | |
1307 | ||
1308 | extern typeof(OSMalloc_Tagalloc) OSMalloc_Tagalloc_external; | |
91447636 | 1309 | OSMallocTag |
f427ee49 | 1310 | OSMalloc_Tagalloc_external(const char *str, uint32_t flags) |
91447636 | 1311 | { |
f427ee49 | 1312 | OSMallocTag OSMTag; |
91447636 | 1313 | |
f427ee49 | 1314 | OSMTag = kheap_alloc(OSMALLOC, sizeof(*OSMTag), Z_WAITOK | Z_ZERO); |
91447636 | 1315 | |
0a7de745 | 1316 | if (flags & OSMT_PAGEABLE) { |
91447636 | 1317 | OSMTag->OSMT_attr = OSMT_ATTR_PAGEABLE; |
0a7de745 | 1318 | } |
91447636 A |
1319 | |
1320 | OSMTag->OSMT_refcnt = 1; | |
1321 | ||
3e170ce0 | 1322 | strlcpy(OSMTag->OSMT_name, str, OSMT_MAX_NAME); |
91447636 | 1323 | |
6d2010ae | 1324 | OSMalloc_tag_spin_lock(); |
91447636 | 1325 | enqueue_tail(&OSMalloc_tag_list, (queue_entry_t)OSMTag); |
6d2010ae | 1326 | OSMalloc_tag_unlock(); |
91447636 | 1327 | OSMTag->OSMT_state = OSMT_VALID; |
0a7de745 | 1328 | return OSMTag; |
91447636 A |
1329 | } |
1330 | ||
f427ee49 A |
1331 | static void |
1332 | OSMalloc_Tagref(OSMallocTag tag) | |
91447636 | 1333 | { |
0a7de745 | 1334 | if (!((tag->OSMT_state & OSMT_VALID_MASK) == OSMT_VALID)) { |
f427ee49 A |
1335 | panic("OSMalloc_Tagref():'%s' has bad state 0x%08X\n", |
1336 | tag->OSMT_name, tag->OSMT_state); | |
0a7de745 | 1337 | } |
91447636 | 1338 | |
cb323159 | 1339 | os_atomic_inc(&tag->OSMT_refcnt, relaxed); |
91447636 A |
1340 | } |
1341 | ||
f427ee49 A |
1342 | static void |
1343 | OSMalloc_Tagrele(OSMallocTag tag) | |
91447636 | 1344 | { |
0a7de745 | 1345 | if (!((tag->OSMT_state & OSMT_VALID_MASK) == OSMT_VALID)) { |
f427ee49 A |
1346 | panic("OSMalloc_Tagref():'%s' has bad state 0x%08X\n", |
1347 | tag->OSMT_name, tag->OSMT_state); | |
0a7de745 | 1348 | } |
91447636 | 1349 | |
f427ee49 A |
1350 | if (os_atomic_dec(&tag->OSMT_refcnt, relaxed) != 0) { |
1351 | return; | |
1352 | } | |
1353 | ||
1354 | if (os_atomic_cmpxchg(&tag->OSMT_state, | |
1355 | OSMT_VALID | OSMT_RELEASED, OSMT_VALID | OSMT_RELEASED, acq_rel)) { | |
1356 | OSMalloc_tag_spin_lock(); | |
1357 | (void)remque((queue_entry_t)tag); | |
1358 | OSMalloc_tag_unlock(); | |
1359 | kheap_free(OSMALLOC, tag, sizeof(*tag)); | |
1360 | } else { | |
1361 | panic("OSMalloc_Tagrele():'%s' has refcnt 0\n", tag->OSMT_name); | |
91447636 A |
1362 | } |
1363 | } | |
1364 | ||
f427ee49 | 1365 | extern typeof(OSMalloc_Tagfree) OSMalloc_Tagfree_external; |
91447636 | 1366 | void |
f427ee49 | 1367 | OSMalloc_Tagfree_external(OSMallocTag tag) |
91447636 | 1368 | { |
f427ee49 A |
1369 | if (!os_atomic_cmpxchg(&tag->OSMT_state, |
1370 | OSMT_VALID, OSMT_VALID | OSMT_RELEASED, acq_rel)) { | |
1371 | panic("OSMalloc_Tagfree():'%s' has bad state 0x%08X \n", | |
1372 | tag->OSMT_name, tag->OSMT_state); | |
0a7de745 | 1373 | } |
91447636 | 1374 | |
cb323159 | 1375 | if (os_atomic_dec(&tag->OSMT_refcnt, relaxed) == 0) { |
6d2010ae | 1376 | OSMalloc_tag_spin_lock(); |
91447636 | 1377 | (void)remque((queue_entry_t)tag); |
6d2010ae | 1378 | OSMalloc_tag_unlock(); |
f427ee49 | 1379 | kheap_free(OSMALLOC, tag, sizeof(*tag)); |
91447636 A |
1380 | } |
1381 | } | |
1382 | ||
f427ee49 | 1383 | extern typeof(OSMalloc) OSMalloc_external; |
91447636 | 1384 | void * |
f427ee49 A |
1385 | OSMalloc_external( |
1386 | uint32_t size, OSMallocTag tag) | |
91447636 | 1387 | { |
f427ee49 | 1388 | void *addr = NULL; |
0a7de745 | 1389 | kern_return_t kr; |
91447636 A |
1390 | |
1391 | OSMalloc_Tagref(tag); | |
f427ee49 A |
1392 | if ((tag->OSMT_attr & OSMT_PAGEABLE) && (size & ~PAGE_MASK)) { |
1393 | if ((kr = kmem_alloc_pageable_external(kernel_map, | |
1394 | (vm_offset_t *)&addr, size)) != KERN_SUCCESS) { | |
2d21ac55 | 1395 | addr = NULL; |
0a7de745 A |
1396 | } |
1397 | } else { | |
f427ee49 A |
1398 | addr = kheap_alloc_tag_bt(OSMALLOC, size, |
1399 | Z_WAITOK, VM_KERN_MEMORY_KALLOC); | |
0a7de745 | 1400 | } |
91447636 | 1401 | |
0a7de745 | 1402 | if (!addr) { |
2d21ac55 | 1403 | OSMalloc_Tagrele(tag); |
0a7de745 | 1404 | } |
2d21ac55 | 1405 | |
0a7de745 | 1406 | return addr; |
91447636 A |
1407 | } |
1408 | ||
f427ee49 | 1409 | extern typeof(OSMalloc_nowait) OSMalloc_nowait_external; |
91447636 | 1410 | void * |
f427ee49 | 1411 | OSMalloc_nowait_external(uint32_t size, OSMallocTag tag) |
91447636 | 1412 | { |
0a7de745 | 1413 | void *addr = NULL; |
91447636 | 1414 | |
0a7de745 A |
1415 | if (tag->OSMT_attr & OSMT_PAGEABLE) { |
1416 | return NULL; | |
1417 | } | |
91447636 A |
1418 | |
1419 | OSMalloc_Tagref(tag); | |
1420 | /* XXX: use non-blocking kalloc for now */ | |
f427ee49 A |
1421 | addr = kheap_alloc_tag_bt(OSMALLOC, (vm_size_t)size, |
1422 | Z_NOWAIT, VM_KERN_MEMORY_KALLOC); | |
0a7de745 | 1423 | if (addr == NULL) { |
91447636 | 1424 | OSMalloc_Tagrele(tag); |
0a7de745 | 1425 | } |
91447636 | 1426 | |
0a7de745 | 1427 | return addr; |
91447636 A |
1428 | } |
1429 | ||
f427ee49 | 1430 | extern typeof(OSMalloc_noblock) OSMalloc_noblock_external; |
91447636 | 1431 | void * |
f427ee49 | 1432 | OSMalloc_noblock_external(uint32_t size, OSMallocTag tag) |
91447636 | 1433 | { |
0a7de745 | 1434 | void *addr = NULL; |
91447636 | 1435 | |
0a7de745 A |
1436 | if (tag->OSMT_attr & OSMT_PAGEABLE) { |
1437 | return NULL; | |
1438 | } | |
91447636 A |
1439 | |
1440 | OSMalloc_Tagref(tag); | |
f427ee49 A |
1441 | addr = kheap_alloc_tag_bt(OSMALLOC, (vm_size_t)size, |
1442 | Z_NOWAIT, VM_KERN_MEMORY_KALLOC); | |
0a7de745 | 1443 | if (addr == NULL) { |
91447636 | 1444 | OSMalloc_Tagrele(tag); |
0a7de745 | 1445 | } |
91447636 | 1446 | |
0a7de745 | 1447 | return addr; |
91447636 A |
1448 | } |
1449 | ||
f427ee49 | 1450 | extern typeof(OSFree) OSFree_external; |
91447636 | 1451 | void |
f427ee49 | 1452 | OSFree_external(void *addr, uint32_t size, OSMallocTag tag) |
91447636 A |
1453 | { |
1454 | if ((tag->OSMT_attr & OSMT_PAGEABLE) | |
1455 | && (size & ~PAGE_MASK)) { | |
1456 | kmem_free(kernel_map, (vm_offset_t)addr, size); | |
0a7de745 | 1457 | } else { |
f427ee49 | 1458 | kheap_free(OSMALLOC, addr, size); |
0a7de745 | 1459 | } |
91447636 A |
1460 | |
1461 | OSMalloc_Tagrele(tag); | |
1462 | } | |
39037602 | 1463 | |
f427ee49 A |
1464 | #pragma mark kern_os_malloc |
1465 | ||
1466 | void * | |
1467 | kern_os_malloc_external(size_t size); | |
1468 | void * | |
1469 | kern_os_malloc_external(size_t size) | |
39037602 | 1470 | { |
f427ee49 A |
1471 | if (size == 0) { |
1472 | return NULL; | |
1473 | } | |
1474 | ||
1475 | return kheap_alloc_tag_bt(KERN_OS_MALLOC, size, Z_WAITOK | Z_ZERO, | |
1476 | VM_KERN_MEMORY_LIBKERN); | |
1477 | } | |
1478 | ||
1479 | void | |
1480 | kern_os_free_external(void *addr); | |
1481 | void | |
1482 | kern_os_free_external(void *addr) | |
1483 | { | |
1484 | kheap_free_addr(KERN_OS_MALLOC, addr); | |
1485 | } | |
1486 | ||
1487 | void * | |
1488 | kern_os_realloc_external(void *addr, size_t nsize); | |
1489 | void * | |
1490 | kern_os_realloc_external(void *addr, size_t nsize) | |
1491 | { | |
1492 | VM_ALLOC_SITE_STATIC(VM_TAG_BT, VM_KERN_MEMORY_LIBKERN); | |
1493 | ||
1494 | return kheap_realloc_addr(KERN_OS_MALLOC, addr, nsize, | |
1495 | Z_WAITOK | Z_ZERO, &site).addr; | |
1496 | } | |
1497 | ||
1498 | void | |
1499 | kern_os_zfree(zone_t zone, void *addr, vm_size_t size) | |
1500 | { | |
1501 | if (zsecurity_options & ZSECURITY_OPTIONS_STRICT_IOKIT_FREE | |
1502 | || zone_owns(zone, addr)) { | |
1503 | zfree(zone, addr); | |
1504 | } else { | |
1505 | /* | |
1506 | * Third party kexts might not know about the operator new | |
1507 | * and be allocated from the KEXT heap | |
1508 | */ | |
1509 | printf("kern_os_zfree: kheap_free called for object from zone %s\n", | |
1510 | zone->z_name); | |
1511 | kheap_free(KHEAP_KEXT, addr, size); | |
1512 | } | |
1513 | } | |
1514 | ||
1515 | void | |
1516 | kern_os_kfree(void *addr, vm_size_t size) | |
1517 | { | |
1518 | if (zsecurity_options & ZSECURITY_OPTIONS_STRICT_IOKIT_FREE) { | |
1519 | kheap_free(KHEAP_DEFAULT, addr, size); | |
1520 | } else { | |
1521 | /* | |
1522 | * Third party kexts may not know about newly added operator | |
1523 | * default new/delete. If they call new for any iokit object | |
1524 | * it will end up coming from the KEXT heap. If these objects | |
1525 | * are freed by calling release() or free(), the internal | |
1526 | * version of operator delete is called and the kernel ends | |
1527 | * up freeing the object to the DEFAULT heap. | |
1528 | */ | |
1529 | kheap_free(KHEAP_ANY, addr, size); | |
1530 | } | |
39037602 | 1531 | } |