]>
Commit | Line | Data |
---|---|---|
1c79356b | 1 | /* |
316670eb | 2 | * Copyright (c) 2000-2011 Apple Computer, Inc. All rights reserved. |
1c79356b | 3 | * |
2d21ac55 | 4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
1c79356b | 5 | * |
2d21ac55 A |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
8f6c56a5 | 14 | * |
2d21ac55 A |
15 | * Please obtain a copy of the License at |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
8f6c56a5 A |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
2d21ac55 A |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
8f6c56a5 | 25 | * |
2d21ac55 | 26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
1c79356b A |
27 | */ |
28 | /* | |
29 | * @OSF_COPYRIGHT@ | |
30 | */ | |
1c79356b A |
31 | /* |
32 | * Mach Operating System | |
33 | * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University | |
34 | * All Rights Reserved. | |
35 | * | |
36 | * Permission to use, copy, modify and distribute this software and its | |
37 | * documentation is hereby granted, provided that both the copyright | |
38 | * notice and this permission notice appear in all copies of the | |
39 | * software, derivative works or modified versions, and any portions | |
40 | * thereof, and that both notices appear in supporting documentation. | |
41 | * | |
42 | * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" | |
43 | * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR | |
44 | * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. | |
45 | * | |
46 | * Carnegie Mellon requests users of this software to return to | |
47 | * | |
48 | * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU | |
49 | * School of Computer Science | |
50 | * Carnegie Mellon University | |
51 | * Pittsburgh PA 15213-3890 | |
52 | * | |
53 | * any improvements or extensions that they make and grant Carnegie Mellon | |
54 | * the rights to redistribute these changes. | |
55 | */ | |
56 | /* | |
57 | */ | |
58 | /* | |
59 | * File: kern/kalloc.c | |
60 | * Author: Avadis Tevanian, Jr. | |
61 | * Date: 1985 | |
62 | * | |
63 | * General kernel memory allocator. This allocator is designed | |
64 | * to be used by the kernel to manage dynamic memory fast. | |
65 | */ | |
66 | ||
67 | #include <zone_debug.h> | |
68 | ||
69 | #include <mach/boolean.h> | |
a39ff7e2 | 70 | #include <mach/sdt.h> |
1c79356b A |
71 | #include <mach/machine/vm_types.h> |
72 | #include <mach/vm_param.h> | |
73 | #include <kern/misc_protos.h> | |
74 | #include <kern/zalloc.h> | |
75 | #include <kern/kalloc.h> | |
316670eb | 76 | #include <kern/ledger.h> |
1c79356b A |
77 | #include <vm/vm_kern.h> |
78 | #include <vm/vm_object.h> | |
79 | #include <vm/vm_map.h> | |
91447636 | 80 | #include <libkern/OSMalloc.h> |
39037602 | 81 | #include <sys/kdebug.h> |
1c79356b | 82 | |
5ba3f43e A |
83 | #include <san/kasan.h> |
84 | ||
1c79356b A |
85 | #ifdef MACH_BSD |
86 | zone_t kalloc_zone(vm_size_t); | |
87 | #endif | |
88 | ||
2d21ac55 A |
89 | #define KALLOC_MAP_SIZE_MIN (16 * 1024 * 1024) |
90 | #define KALLOC_MAP_SIZE_MAX (128 * 1024 * 1024) | |
1c79356b | 91 | vm_map_t kalloc_map; |
1c79356b A |
92 | vm_size_t kalloc_max; |
93 | vm_size_t kalloc_max_prerounded; | |
0c530ab8 | 94 | vm_size_t kalloc_kernmap_size; /* size of kallocs that can come from kernel map */ |
1c79356b | 95 | |
3e170ce0 A |
96 | /* how many times we couldn't allocate out of kalloc_map and fell back to kernel_map */ |
97 | unsigned long kalloc_fallback_count; | |
98 | ||
1c79356b A |
99 | unsigned int kalloc_large_inuse; |
100 | vm_size_t kalloc_large_total; | |
101 | vm_size_t kalloc_large_max; | |
6d2010ae A |
102 | vm_size_t kalloc_largest_allocated = 0; |
103 | uint64_t kalloc_large_sum; | |
104 | ||
105 | int kalloc_fake_zone_index = -1; /* index of our fake zone in statistics arrays */ | |
b0d623f7 A |
106 | |
107 | vm_offset_t kalloc_map_min; | |
108 | vm_offset_t kalloc_map_max; | |
1c79356b | 109 | |
6d2010ae A |
110 | #ifdef MUTEX_ZONE |
111 | /* | |
112 | * Diagnostic code to track mutexes separately rather than via the 2^ zones | |
113 | */ | |
114 | zone_t lck_mtx_zone; | |
115 | #endif | |
116 | ||
117 | static void | |
118 | KALLOC_ZINFO_SALLOC(vm_size_t bytes) | |
119 | { | |
120 | thread_t thr = current_thread(); | |
316670eb | 121 | ledger_debit(thr->t_ledger, task_ledgers.tkm_shared, bytes); |
6d2010ae A |
122 | } |
123 | ||
124 | static void | |
125 | KALLOC_ZINFO_SFREE(vm_size_t bytes) | |
126 | { | |
127 | thread_t thr = current_thread(); | |
316670eb | 128 | ledger_credit(thr->t_ledger, task_ledgers.tkm_shared, bytes); |
6d2010ae A |
129 | } |
130 | ||
1c79356b | 131 | /* |
a39ff7e2 A |
132 | * All allocations of size less than kalloc_max are rounded to the next nearest |
133 | * sized zone. This allocator is built on top of the zone allocator. A zone | |
134 | * is created for each potential size that we are willing to get in small | |
135 | * blocks. | |
1c79356b | 136 | * |
a39ff7e2 | 137 | * We assume that kalloc_max is not greater than 64K; |
1c79356b | 138 | * |
a39ff7e2 A |
139 | * Note that kalloc_max is somewhat confusingly named. It represents the first |
140 | * power of two for which no zone exists. kalloc_max_prerounded is the | |
141 | * smallest allocation size, before rounding, for which no zone exists. | |
316670eb | 142 | * |
a39ff7e2 A |
143 | * Also if the allocation size is more than kalloc_kernmap_size then allocate |
144 | * from kernel map rather than kalloc_map. | |
316670eb A |
145 | */ |
146 | ||
a39ff7e2 A |
147 | #define KALLOC_MINALIGN (1 << KALLOC_LOG2_MINALIGN) |
148 | #define KiB(x) (1024 * (x)) | |
316670eb | 149 | |
a39ff7e2 A |
150 | static const struct kalloc_zone_config { |
151 | int kzc_size; | |
152 | const char *kzc_name; | |
153 | } k_zone_config[] = { | |
154 | #define KZC_ENTRY(SIZE) { .kzc_size = (SIZE), .kzc_name = "kalloc." #SIZE } | |
316670eb | 155 | |
a39ff7e2 A |
156 | #if KALLOC_MINSIZE == 16 && KALLOC_LOG2_MINALIGN == 4 |
157 | /* 64-bit targets, generally */ | |
158 | KZC_ENTRY(16), | |
159 | KZC_ENTRY(32), | |
160 | KZC_ENTRY(48), | |
161 | KZC_ENTRY(64), | |
162 | KZC_ENTRY(80), | |
163 | KZC_ENTRY(96), | |
164 | KZC_ENTRY(128), | |
165 | KZC_ENTRY(160), | |
166 | KZC_ENTRY(192), | |
167 | KZC_ENTRY(224), | |
168 | KZC_ENTRY(256), | |
169 | KZC_ENTRY(288), | |
170 | KZC_ENTRY(368), | |
171 | KZC_ENTRY(400), | |
172 | KZC_ENTRY(512), | |
173 | KZC_ENTRY(576), | |
174 | KZC_ENTRY(768), | |
175 | KZC_ENTRY(1024), | |
176 | KZC_ENTRY(1152), | |
177 | KZC_ENTRY(1280), | |
178 | KZC_ENTRY(1664), | |
179 | KZC_ENTRY(2048), | |
316670eb | 180 | #elif KALLOC_MINSIZE == 8 && KALLOC_LOG2_MINALIGN == 3 |
a39ff7e2 A |
181 | /* 32-bit targets, generally */ |
182 | KZC_ENTRY(8), | |
183 | KZC_ENTRY(16), | |
184 | KZC_ENTRY(24), | |
185 | KZC_ENTRY(32), | |
186 | KZC_ENTRY(40), | |
187 | KZC_ENTRY(48), | |
188 | KZC_ENTRY(64), | |
189 | KZC_ENTRY(72), | |
190 | KZC_ENTRY(88), | |
191 | KZC_ENTRY(112), | |
192 | KZC_ENTRY(128), | |
193 | KZC_ENTRY(192), | |
194 | KZC_ENTRY(256), | |
195 | KZC_ENTRY(288), | |
196 | KZC_ENTRY(384), | |
197 | KZC_ENTRY(440), | |
198 | KZC_ENTRY(512), | |
199 | KZC_ENTRY(576), | |
200 | KZC_ENTRY(768), | |
201 | KZC_ENTRY(1024), | |
202 | KZC_ENTRY(1152), | |
203 | KZC_ENTRY(1536), | |
204 | KZC_ENTRY(2048), | |
205 | KZC_ENTRY(2128), | |
206 | KZC_ENTRY(3072), | |
316670eb | 207 | #else |
5c9f4661 | 208 | #error missing or invalid zone size parameters for kalloc |
316670eb A |
209 | #endif |
210 | ||
a39ff7e2 A |
211 | /* all configurations get these zones */ |
212 | KZC_ENTRY(4096), | |
213 | KZC_ENTRY(6144), | |
214 | KZC_ENTRY(8192), | |
215 | KZC_ENTRY(16384), | |
216 | KZC_ENTRY(32768), | |
217 | #undef KZC_ENTRY | |
316670eb A |
218 | }; |
219 | ||
a39ff7e2 | 220 | #define MAX_K_ZONE (int)(sizeof(k_zone_config) / sizeof(k_zone_config[0])) |
316670eb A |
221 | |
222 | /* | |
223 | * Many kalloc() allocations are for small structures containing a few | |
224 | * pointers and longs - the k_zone_dlut[] direct lookup table, indexed by | |
225 | * size normalized to the minimum alignment, finds the right zone index | |
226 | * for them in one dereference. | |
227 | */ | |
228 | ||
229 | #define INDEX_ZDLUT(size) \ | |
230 | (((size) + KALLOC_MINALIGN - 1) / KALLOC_MINALIGN) | |
231 | #define N_K_ZDLUT (2048 / KALLOC_MINALIGN) | |
232 | /* covers sizes [0 .. 2048 - KALLOC_MINALIGN] */ | |
233 | #define MAX_SIZE_ZDLUT ((N_K_ZDLUT - 1) * KALLOC_MINALIGN) | |
234 | ||
235 | static int8_t k_zone_dlut[N_K_ZDLUT]; /* table of indices into k_zone[] */ | |
236 | ||
237 | /* | |
238 | * If there's no hit in the DLUT, then start searching from k_zindex_start. | |
239 | */ | |
240 | static int k_zindex_start; | |
241 | ||
3e170ce0 | 242 | static zone_t k_zone[MAX_K_ZONE]; |
1c79356b | 243 | |
316670eb A |
244 | /* #define KALLOC_DEBUG 1 */ |
245 | ||
91447636 | 246 | /* forward declarations */ |
91447636 | 247 | |
3e170ce0 | 248 | lck_grp_t kalloc_lck_grp; |
6d2010ae A |
249 | lck_mtx_t kalloc_lock; |
250 | ||
251 | #define kalloc_spin_lock() lck_mtx_lock_spin(&kalloc_lock) | |
252 | #define kalloc_unlock() lck_mtx_unlock(&kalloc_lock) | |
253 | ||
254 | ||
91447636 A |
255 | /* OSMalloc local data declarations */ |
256 | static | |
257 | queue_head_t OSMalloc_tag_list; | |
258 | ||
6d2010ae A |
259 | lck_grp_t *OSMalloc_tag_lck_grp; |
260 | lck_mtx_t OSMalloc_tag_lock; | |
261 | ||
262 | #define OSMalloc_tag_spin_lock() lck_mtx_lock_spin(&OSMalloc_tag_lock) | |
263 | #define OSMalloc_tag_unlock() lck_mtx_unlock(&OSMalloc_tag_lock) | |
264 | ||
91447636 A |
265 | |
266 | /* OSMalloc forward declarations */ | |
267 | void OSMalloc_init(void); | |
268 | void OSMalloc_Tagref(OSMallocTag tag); | |
269 | void OSMalloc_Tagrele(OSMallocTag tag); | |
270 | ||
1c79356b A |
271 | /* |
272 | * Initialize the memory allocator. This should be called only | |
273 | * once on a system wide basis (i.e. first processor to get here | |
274 | * does the initialization). | |
275 | * | |
276 | * This initializes all of the zones. | |
277 | */ | |
278 | ||
279 | void | |
280 | kalloc_init( | |
281 | void) | |
282 | { | |
283 | kern_return_t retval; | |
284 | vm_offset_t min; | |
2d21ac55 | 285 | vm_size_t size, kalloc_map_size; |
5ba3f43e | 286 | vm_map_kernel_flags_t vmk_flags; |
1c79356b | 287 | |
2d21ac55 A |
288 | /* |
289 | * Scale the kalloc_map_size to physical memory size: stay below | |
b0d623f7 | 290 | * 1/8th the total zone map size, or 128 MB (for a 32-bit kernel). |
2d21ac55 | 291 | */ |
b0d623f7 A |
292 | kalloc_map_size = (vm_size_t)(sane_size >> 5); |
293 | #if !__LP64__ | |
2d21ac55 A |
294 | if (kalloc_map_size > KALLOC_MAP_SIZE_MAX) |
295 | kalloc_map_size = KALLOC_MAP_SIZE_MAX; | |
b0d623f7 | 296 | #endif /* !__LP64__ */ |
2d21ac55 A |
297 | if (kalloc_map_size < KALLOC_MAP_SIZE_MIN) |
298 | kalloc_map_size = KALLOC_MAP_SIZE_MIN; | |
299 | ||
5ba3f43e A |
300 | vmk_flags = VM_MAP_KERNEL_FLAGS_NONE; |
301 | vmk_flags.vmkf_permanent = TRUE; | |
302 | ||
1c79356b | 303 | retval = kmem_suballoc(kernel_map, &min, kalloc_map_size, |
5ba3f43e A |
304 | FALSE, |
305 | (VM_FLAGS_ANYWHERE), | |
306 | vmk_flags, | |
307 | VM_KERN_MEMORY_KALLOC, | |
b0d623f7 | 308 | &kalloc_map); |
91447636 | 309 | |
1c79356b A |
310 | if (retval != KERN_SUCCESS) |
311 | panic("kalloc_init: kmem_suballoc failed"); | |
312 | ||
b0d623f7 A |
313 | kalloc_map_min = min; |
314 | kalloc_map_max = min + kalloc_map_size - 1; | |
315 | ||
1c79356b | 316 | /* |
a39ff7e2 A |
317 | * Create zones up to a least 4 pages because small page-multiples are |
318 | * common allocations. Also ensure that zones up to size 16KB bytes exist. | |
319 | * This is desirable because messages are allocated with kalloc(), and | |
320 | * messages up through size 8192 are common. | |
1c79356b | 321 | */ |
3e170ce0 A |
322 | kalloc_max = PAGE_SIZE << 2; |
323 | if (kalloc_max < KiB(16)) { | |
324 | kalloc_max = KiB(16); | |
325 | } | |
326 | assert(kalloc_max <= KiB(64)); /* assumption made in size arrays */ | |
1c79356b | 327 | |
1c79356b | 328 | kalloc_max_prerounded = kalloc_max / 2 + 1; |
3e170ce0 | 329 | /* allocations larger than 16 times kalloc_max go directly to kernel map */ |
0c530ab8 | 330 | kalloc_kernmap_size = (kalloc_max * 16) + 1; |
b0d623f7 | 331 | kalloc_largest_allocated = kalloc_kernmap_size; |
1c79356b A |
332 | |
333 | /* | |
a39ff7e2 | 334 | * Allocate a zone for each size we are going to handle. |
1c79356b | 335 | */ |
a39ff7e2 A |
336 | for (int i = 0; i < MAX_K_ZONE && (size = k_zone_config[i].kzc_size) < kalloc_max; i++) { |
337 | k_zone[i] = zinit(size, size, size, k_zone_config[i].kzc_name); | |
338 | ||
339 | /* | |
340 | * Don't charge the caller for the allocation, as we aren't sure how | |
341 | * the memory will be handled. | |
342 | */ | |
6d2010ae | 343 | zone_change(k_zone[i], Z_CALLERACCT, FALSE); |
5ba3f43e A |
344 | #if VM_MAX_TAG_ZONES |
345 | if (zone_tagging_on) zone_change(k_zone[i], Z_TAGS_ENABLED, TRUE); | |
346 | #endif | |
347 | zone_change(k_zone[i], Z_KASAN_QUARANTINE, FALSE); | |
1c79356b | 348 | } |
316670eb A |
349 | |
350 | /* | |
351 | * Build the Direct LookUp Table for small allocations | |
352 | */ | |
a39ff7e2 A |
353 | size = 0; |
354 | for (int i = 0; i <= N_K_ZDLUT; i++, size += KALLOC_MINALIGN) { | |
316670eb A |
355 | int zindex = 0; |
356 | ||
a39ff7e2 | 357 | while ((vm_size_t)k_zone_config[zindex].kzc_size < size) |
316670eb A |
358 | zindex++; |
359 | ||
360 | if (i == N_K_ZDLUT) { | |
361 | k_zindex_start = zindex; | |
362 | break; | |
363 | } | |
364 | k_zone_dlut[i] = (int8_t)zindex; | |
365 | } | |
366 | ||
367 | #ifdef KALLOC_DEBUG | |
368 | printf("kalloc_init: k_zindex_start %d\n", k_zindex_start); | |
369 | ||
370 | /* | |
371 | * Do a quick synthesis to see how well/badly we can | |
372 | * find-a-zone for a given size. | |
373 | * Useful when debugging/tweaking the array of zone sizes. | |
374 | * Cache misses probably more critical than compare-branches! | |
375 | */ | |
a39ff7e2 A |
376 | for (int i = 0; i < MAX_K_ZONE; i++) { |
377 | vm_size_t testsize = (vm_size_t)k_zone_config[i].kzc_size - 1; | |
316670eb A |
378 | int compare = 0; |
379 | int zindex; | |
380 | ||
381 | if (testsize < MAX_SIZE_ZDLUT) { | |
382 | compare += 1; /* 'if' (T) */ | |
383 | ||
384 | long dindex = INDEX_ZDLUT(testsize); | |
385 | zindex = (int)k_zone_dlut[dindex]; | |
386 | ||
387 | } else if (testsize < kalloc_max_prerounded) { | |
388 | ||
389 | compare += 2; /* 'if' (F), 'if' (T) */ | |
390 | ||
391 | zindex = k_zindex_start; | |
a39ff7e2 | 392 | while ((vm_size_t)k_zone_config[zindex].kzc_size < testsize) { |
316670eb A |
393 | zindex++; |
394 | compare++; /* 'while' (T) */ | |
395 | } | |
396 | compare++; /* 'while' (F) */ | |
397 | } else | |
398 | break; /* not zone-backed */ | |
399 | ||
400 | zone_t z = k_zone[zindex]; | |
401 | printf("kalloc_init: req size %4lu: %11s took %d compare%s\n", | |
402 | (unsigned long)testsize, z->zone_name, compare, | |
403 | compare == 1 ? "" : "s"); | |
404 | } | |
405 | #endif | |
3e170ce0 A |
406 | |
407 | lck_grp_init(&kalloc_lck_grp, "kalloc.large", LCK_GRP_ATTR_NULL); | |
408 | lck_mtx_init(&kalloc_lock, &kalloc_lck_grp, LCK_ATTR_NULL); | |
91447636 | 409 | OSMalloc_init(); |
3e170ce0 | 410 | #ifdef MUTEX_ZONE |
6d2010ae | 411 | lck_mtx_zone = zinit(sizeof(struct _lck_mtx_), 1024*256, 4096, "lck_mtx"); |
3e170ce0 | 412 | #endif |
316670eb | 413 | } |
6d2010ae | 414 | |
316670eb A |
415 | /* |
416 | * Given an allocation size, return the kalloc zone it belongs to. | |
417 | * Direct LookUp Table variant. | |
418 | */ | |
419 | static __inline zone_t | |
420 | get_zone_dlut(vm_size_t size) | |
421 | { | |
422 | long dindex = INDEX_ZDLUT(size); | |
423 | int zindex = (int)k_zone_dlut[dindex]; | |
424 | return (k_zone[zindex]); | |
425 | } | |
426 | ||
a39ff7e2 | 427 | /* As above, but linear search k_zone_config[] for the next zone that fits. */ |
316670eb A |
428 | |
429 | static __inline zone_t | |
430 | get_zone_search(vm_size_t size, int zindex) | |
431 | { | |
432 | assert(size < kalloc_max_prerounded); | |
433 | ||
a39ff7e2 | 434 | while ((vm_size_t)k_zone_config[zindex].kzc_size < size) |
316670eb A |
435 | zindex++; |
436 | ||
a39ff7e2 A |
437 | assert(zindex < MAX_K_ZONE && |
438 | (vm_size_t)k_zone_config[zindex].kzc_size < kalloc_max); | |
316670eb A |
439 | |
440 | return (k_zone[zindex]); | |
1c79356b A |
441 | } |
442 | ||
39037602 A |
443 | static vm_size_t |
444 | vm_map_lookup_kalloc_entry_locked( | |
445 | vm_map_t map, | |
446 | void *addr) | |
447 | { | |
448 | boolean_t ret; | |
449 | vm_map_entry_t vm_entry = NULL; | |
450 | ||
451 | ret = vm_map_lookup_entry(map, (vm_map_offset_t)addr, &vm_entry); | |
452 | if (!ret) { | |
453 | panic("Attempting to lookup/free an address not allocated via kalloc! (vm_map_lookup_entry() failed map: %p, addr: %p)\n", | |
454 | map, addr); | |
455 | } | |
456 | if (vm_entry->vme_start != (vm_map_offset_t)addr) { | |
457 | panic("Attempting to lookup/free the middle of a kalloc'ed element! (map: %p, addr: %p, entry: %p)\n", | |
458 | map, addr, vm_entry); | |
459 | } | |
460 | if (!vm_entry->vme_atomic) { | |
461 | panic("Attempting to lookup/free an address not managed by kalloc! (map: %p, addr: %p, entry: %p)\n", | |
462 | map, addr, vm_entry); | |
463 | } | |
464 | return (vm_entry->vme_end - vm_entry->vme_start); | |
465 | } | |
466 | ||
5ba3f43e A |
467 | #if KASAN_KALLOC |
468 | /* | |
469 | * KASAN kalloc stashes the original user-requested size away in the poisoned | |
470 | * area. Return that directly. | |
471 | */ | |
472 | vm_size_t | |
473 | kalloc_size(void *addr) | |
474 | { | |
475 | (void)vm_map_lookup_kalloc_entry_locked; /* silence warning */ | |
476 | return kasan_user_size((vm_offset_t)addr); | |
477 | } | |
478 | #else | |
39037602 A |
479 | vm_size_t |
480 | kalloc_size( | |
481 | void *addr) | |
482 | { | |
483 | vm_map_t map; | |
484 | vm_size_t size; | |
485 | ||
486 | size = zone_element_size(addr, NULL); | |
487 | if (size) { | |
488 | return size; | |
489 | } | |
490 | if (((vm_offset_t)addr >= kalloc_map_min) && ((vm_offset_t)addr < kalloc_map_max)) { | |
491 | map = kalloc_map; | |
492 | } else { | |
493 | map = kernel_map; | |
494 | } | |
495 | vm_map_lock_read(map); | |
496 | size = vm_map_lookup_kalloc_entry_locked(map, addr); | |
497 | vm_map_unlock_read(map); | |
498 | return size; | |
499 | } | |
5ba3f43e | 500 | #endif |
39037602 A |
501 | |
502 | vm_size_t | |
503 | kalloc_bucket_size( | |
504 | vm_size_t size) | |
505 | { | |
506 | zone_t z; | |
507 | vm_map_t map; | |
508 | ||
509 | if (size < MAX_SIZE_ZDLUT) { | |
510 | z = get_zone_dlut(size); | |
511 | return z->elem_size; | |
512 | } | |
513 | ||
514 | if (size < kalloc_max_prerounded) { | |
515 | z = get_zone_search(size, k_zindex_start); | |
516 | return z->elem_size; | |
517 | } | |
518 | ||
519 | if (size >= kalloc_kernmap_size) | |
520 | map = kernel_map; | |
521 | else | |
522 | map = kalloc_map; | |
523 | ||
524 | return vm_map_round_page(size, VM_MAP_PAGE_MASK(map)); | |
525 | } | |
526 | ||
5ba3f43e A |
527 | #if KASAN_KALLOC |
528 | vm_size_t | |
529 | kfree_addr(void *addr) | |
530 | { | |
531 | vm_size_t origsz = kalloc_size(addr); | |
532 | kfree(addr, origsz); | |
533 | return origsz; | |
534 | } | |
535 | #else | |
39037602 A |
536 | vm_size_t |
537 | kfree_addr( | |
538 | void *addr) | |
539 | { | |
540 | vm_map_t map; | |
541 | vm_size_t size = 0; | |
542 | kern_return_t ret; | |
543 | zone_t z; | |
544 | ||
545 | size = zone_element_size(addr, &z); | |
546 | if (size) { | |
a39ff7e2 | 547 | DTRACE_VM3(kfree, vm_size_t, -1, vm_size_t, z->elem_size, void*, addr); |
39037602 A |
548 | zfree(z, addr); |
549 | return size; | |
550 | } | |
551 | ||
552 | if (((vm_offset_t)addr >= kalloc_map_min) && ((vm_offset_t)addr < kalloc_map_max)) { | |
553 | map = kalloc_map; | |
554 | } else { | |
555 | map = kernel_map; | |
556 | } | |
557 | if ((vm_offset_t)addr < VM_MIN_KERNEL_AND_KEXT_ADDRESS) { | |
558 | panic("kfree on an address not in the kernel & kext address range! addr: %p\n", addr); | |
559 | } | |
560 | ||
561 | vm_map_lock(map); | |
562 | size = vm_map_lookup_kalloc_entry_locked(map, addr); | |
563 | ret = vm_map_remove_locked(map, | |
564 | vm_map_trunc_page((vm_map_offset_t)addr, | |
565 | VM_MAP_PAGE_MASK(map)), | |
566 | vm_map_round_page((vm_map_offset_t)addr + size, | |
567 | VM_MAP_PAGE_MASK(map)), | |
568 | VM_MAP_REMOVE_KUNWIRE); | |
569 | if (ret != KERN_SUCCESS) { | |
570 | panic("vm_map_remove_locked() failed for kalloc vm_entry! addr: %p, map: %p ret: %d\n", | |
571 | addr, map, ret); | |
572 | } | |
573 | vm_map_unlock(map); | |
a39ff7e2 | 574 | DTRACE_VM3(kfree, vm_size_t, -1, vm_size_t, size, void*, addr); |
39037602 A |
575 | |
576 | kalloc_spin_lock(); | |
577 | kalloc_large_total -= size; | |
578 | kalloc_large_inuse--; | |
579 | kalloc_unlock(); | |
580 | ||
581 | KALLOC_ZINFO_SFREE(size); | |
582 | return size; | |
583 | } | |
5ba3f43e A |
584 | #endif |
585 | ||
91447636 | 586 | void * |
1c79356b | 587 | kalloc_canblock( |
39037602 | 588 | vm_size_t * psize, |
3e170ce0 A |
589 | boolean_t canblock, |
590 | vm_allocation_site_t * site) | |
1c79356b | 591 | { |
316670eb | 592 | zone_t z; |
39037602 | 593 | vm_size_t size; |
5ba3f43e A |
594 | void *addr; |
595 | vm_tag_t tag; | |
39037602 | 596 | |
5ba3f43e | 597 | tag = VM_KERN_MEMORY_KALLOC; |
39037602 | 598 | size = *psize; |
316670eb | 599 | |
5ba3f43e A |
600 | #if KASAN_KALLOC |
601 | /* expand the allocation to accomodate redzones */ | |
602 | vm_size_t req_size = size; | |
603 | size = kasan_alloc_resize(req_size); | |
604 | #endif | |
605 | ||
316670eb A |
606 | if (size < MAX_SIZE_ZDLUT) |
607 | z = get_zone_dlut(size); | |
608 | else if (size < kalloc_max_prerounded) | |
609 | z = get_zone_search(size, k_zindex_start); | |
610 | else { | |
611 | /* | |
612 | * If size is too large for a zone, then use kmem_alloc. | |
613 | * (We use kmem_alloc instead of kmem_alloc_kobject so that | |
614 | * krealloc can use kmem_realloc.) | |
615 | */ | |
616 | vm_map_t alloc_map; | |
1c79356b A |
617 | |
618 | /* kmem_alloc could block so we return if noblock */ | |
619 | if (!canblock) { | |
6d2010ae | 620 | return(NULL); |
1c79356b | 621 | } |
0c530ab8 | 622 | |
5ba3f43e A |
623 | #if KASAN_KALLOC |
624 | /* large allocation - use guard pages instead of small redzones */ | |
625 | size = round_page(req_size + 2 * PAGE_SIZE); | |
626 | assert(size >= MAX_SIZE_ZDLUT && size >= kalloc_max_prerounded); | |
627 | #endif | |
628 | ||
6d2010ae | 629 | if (size >= kalloc_kernmap_size) |
2d21ac55 | 630 | alloc_map = kernel_map; |
6d2010ae | 631 | else |
0c530ab8 A |
632 | alloc_map = kalloc_map; |
633 | ||
5ba3f43e | 634 | if (site) tag = vm_tag_alloc(site); |
3e170ce0 | 635 | |
39037602 | 636 | if (kmem_alloc_flags(alloc_map, (vm_offset_t *)&addr, size, tag, KMA_ATOMIC) != KERN_SUCCESS) { |
b0d623f7 | 637 | if (alloc_map != kernel_map) { |
3e170ce0 A |
638 | if (kalloc_fallback_count++ == 0) { |
639 | printf("%s: falling back to kernel_map\n", __func__); | |
640 | } | |
39037602 | 641 | if (kmem_alloc_flags(kernel_map, (vm_offset_t *)&addr, size, tag, KMA_ATOMIC) != KERN_SUCCESS) |
b0d623f7 | 642 | addr = NULL; |
6d2010ae | 643 | } |
b0d623f7 A |
644 | else |
645 | addr = NULL; | |
646 | } | |
1c79356b | 647 | |
b0d623f7 | 648 | if (addr != NULL) { |
6d2010ae A |
649 | kalloc_spin_lock(); |
650 | /* | |
651 | * Thread-safe version of the workaround for 4740071 | |
652 | * (a double FREE()) | |
653 | */ | |
654 | if (size > kalloc_largest_allocated) | |
655 | kalloc_largest_allocated = size; | |
656 | ||
1c79356b A |
657 | kalloc_large_inuse++; |
658 | kalloc_large_total += size; | |
6d2010ae | 659 | kalloc_large_sum += size; |
1c79356b A |
660 | |
661 | if (kalloc_large_total > kalloc_large_max) | |
662 | kalloc_large_max = kalloc_large_total; | |
6d2010ae A |
663 | |
664 | kalloc_unlock(); | |
665 | ||
666 | KALLOC_ZINFO_SALLOC(size); | |
1c79356b | 667 | } |
5ba3f43e A |
668 | #if KASAN_KALLOC |
669 | /* fixup the return address to skip the redzone */ | |
670 | addr = (void *)kasan_alloc((vm_offset_t)addr, size, req_size, PAGE_SIZE); | |
671 | #else | |
39037602 | 672 | *psize = round_page(size); |
5ba3f43e | 673 | #endif |
a39ff7e2 | 674 | DTRACE_VM3(kalloc, vm_size_t, size, vm_size_t, *psize, void*, addr); |
1c79356b A |
675 | return(addr); |
676 | } | |
316670eb A |
677 | #ifdef KALLOC_DEBUG |
678 | if (size > z->elem_size) | |
679 | panic("%s: z %p (%s) but requested size %lu", __func__, | |
680 | z, z->zone_name, (unsigned long)size); | |
681 | #endif | |
5ba3f43e | 682 | |
316670eb | 683 | assert(size <= z->elem_size); |
5ba3f43e A |
684 | |
685 | #if VM_MAX_TAG_ZONES | |
686 | if (z->tags && site) | |
687 | { | |
688 | tag = vm_tag_alloc(site); | |
689 | if (!canblock && !vm_allocation_zone_totals[tag]) tag = VM_KERN_MEMORY_KALLOC; | |
690 | } | |
691 | #endif | |
692 | ||
693 | addr = zalloc_canblock_tag(z, canblock, size, tag); | |
694 | ||
695 | #if KASAN_KALLOC | |
696 | /* fixup the return address to skip the redzone */ | |
697 | addr = (void *)kasan_alloc((vm_offset_t)addr, z->elem_size, req_size, KASAN_GUARD_SIZE); | |
698 | ||
699 | /* For KASan, the redzone lives in any additional space, so don't | |
700 | * expand the allocation. */ | |
701 | #else | |
39037602 | 702 | *psize = z->elem_size; |
5ba3f43e A |
703 | #endif |
704 | ||
a39ff7e2 | 705 | DTRACE_VM3(kalloc, vm_size_t, size, vm_size_t, *psize, void*, addr); |
39037602 | 706 | return addr; |
1c79356b A |
707 | } |
708 | ||
91447636 | 709 | void * |
3e170ce0 A |
710 | kalloc_external( |
711 | vm_size_t size); | |
91447636 | 712 | void * |
3e170ce0 A |
713 | kalloc_external( |
714 | vm_size_t size) | |
1c79356b | 715 | { |
3e170ce0 | 716 | return( kalloc_tag_bt(size, VM_KERN_MEMORY_KALLOC) ); |
1c79356b A |
717 | } |
718 | ||
b0d623f7 A |
719 | volatile SInt32 kfree_nop_count = 0; |
720 | ||
1c79356b A |
721 | void |
722 | kfree( | |
91447636 | 723 | void *data, |
1c79356b A |
724 | vm_size_t size) |
725 | { | |
316670eb A |
726 | zone_t z; |
727 | ||
5ba3f43e A |
728 | #if KASAN_KALLOC |
729 | /* | |
730 | * Resize back to the real allocation size and hand off to the KASan | |
731 | * quarantine. `data` may then point to a different allocation. | |
732 | */ | |
733 | vm_size_t user_size = size; | |
734 | kasan_check_free((vm_address_t)data, size, KASAN_HEAP_KALLOC); | |
735 | data = (void *)kasan_dealloc((vm_address_t)data, &size); | |
736 | kasan_free(&data, &size, KASAN_HEAP_KALLOC, NULL, user_size, true); | |
737 | if (!data) { | |
738 | return; | |
739 | } | |
740 | #endif | |
741 | ||
316670eb A |
742 | if (size < MAX_SIZE_ZDLUT) |
743 | z = get_zone_dlut(size); | |
744 | else if (size < kalloc_max_prerounded) | |
745 | z = get_zone_search(size, k_zindex_start); | |
746 | else { | |
747 | /* if size was too large for a zone, then use kmem_free */ | |
1c79356b | 748 | |
316670eb | 749 | vm_map_t alloc_map = kernel_map; |
1c79356b | 750 | |
b0d623f7 A |
751 | if ((((vm_offset_t) data) >= kalloc_map_min) && (((vm_offset_t) data) <= kalloc_map_max)) |
752 | alloc_map = kalloc_map; | |
753 | if (size > kalloc_largest_allocated) { | |
0c530ab8 A |
754 | /* |
755 | * work around double FREEs of small MALLOCs | |
316670eb | 756 | * this used to end up being a nop |
0c530ab8 A |
757 | * since the pointer being freed from an |
758 | * alloc backed by the zalloc world could | |
759 | * never show up in the kalloc_map... however, | |
760 | * the kernel_map is a different issue... since it | |
761 | * was released back into the zalloc pool, a pointer | |
762 | * would have gotten written over the 'size' that | |
763 | * the MALLOC was retaining in the first 4 bytes of | |
764 | * the underlying allocation... that pointer ends up | |
765 | * looking like a really big size on the 2nd FREE and | |
766 | * pushes the kfree into the kernel_map... we | |
316670eb | 767 | * end up removing a ton of virtual space before we panic |
0c530ab8 A |
768 | * this check causes us to ignore the kfree for a size |
769 | * that must be 'bogus'... note that it might not be due | |
770 | * to the above scenario, but it would still be wrong and | |
771 | * cause serious damage. | |
772 | */ | |
b0d623f7 A |
773 | |
774 | OSAddAtomic(1, &kfree_nop_count); | |
0c530ab8 | 775 | return; |
b0d623f7 | 776 | } |
0c530ab8 | 777 | kmem_free(alloc_map, (vm_offset_t)data, size); |
6d2010ae A |
778 | kalloc_spin_lock(); |
779 | ||
1c79356b A |
780 | kalloc_large_total -= size; |
781 | kalloc_large_inuse--; | |
782 | ||
6d2010ae A |
783 | kalloc_unlock(); |
784 | ||
a39ff7e2 A |
785 | #if !KASAN_KALLOC |
786 | DTRACE_VM3(kfree, vm_size_t, size, vm_size_t, size, void*, data); | |
787 | #endif | |
788 | ||
6d2010ae | 789 | KALLOC_ZINFO_SFREE(size); |
1c79356b A |
790 | return; |
791 | } | |
792 | ||
1c79356b | 793 | /* free to the appropriate zone */ |
316670eb A |
794 | #ifdef KALLOC_DEBUG |
795 | if (size > z->elem_size) | |
796 | panic("%s: z %p (%s) but requested size %lu", __func__, | |
797 | z, z->zone_name, (unsigned long)size); | |
798 | #endif | |
799 | assert(size <= z->elem_size); | |
a39ff7e2 | 800 | DTRACE_VM3(kfree, vm_size_t, size, vm_size_t, z->elem_size, void*, data); |
316670eb | 801 | zfree(z, data); |
1c79356b A |
802 | } |
803 | ||
804 | #ifdef MACH_BSD | |
805 | zone_t | |
806 | kalloc_zone( | |
807 | vm_size_t size) | |
808 | { | |
316670eb A |
809 | if (size < MAX_SIZE_ZDLUT) |
810 | return (get_zone_dlut(size)); | |
811 | if (size <= kalloc_max) | |
812 | return (get_zone_search(size, k_zindex_start)); | |
1c79356b A |
813 | return (ZONE_NULL); |
814 | } | |
815 | #endif | |
816 | ||
91447636 A |
817 | void |
818 | OSMalloc_init( | |
819 | void) | |
820 | { | |
821 | queue_init(&OSMalloc_tag_list); | |
6d2010ae A |
822 | |
823 | OSMalloc_tag_lck_grp = lck_grp_alloc_init("OSMalloc_tag", LCK_GRP_ATTR_NULL); | |
824 | lck_mtx_init(&OSMalloc_tag_lock, OSMalloc_tag_lck_grp, LCK_ATTR_NULL); | |
91447636 A |
825 | } |
826 | ||
827 | OSMallocTag | |
828 | OSMalloc_Tagalloc( | |
829 | const char *str, | |
830 | uint32_t flags) | |
831 | { | |
832 | OSMallocTag OSMTag; | |
833 | ||
834 | OSMTag = (OSMallocTag)kalloc(sizeof(*OSMTag)); | |
835 | ||
836 | bzero((void *)OSMTag, sizeof(*OSMTag)); | |
837 | ||
838 | if (flags & OSMT_PAGEABLE) | |
839 | OSMTag->OSMT_attr = OSMT_ATTR_PAGEABLE; | |
840 | ||
841 | OSMTag->OSMT_refcnt = 1; | |
842 | ||
3e170ce0 | 843 | strlcpy(OSMTag->OSMT_name, str, OSMT_MAX_NAME); |
91447636 | 844 | |
6d2010ae | 845 | OSMalloc_tag_spin_lock(); |
91447636 | 846 | enqueue_tail(&OSMalloc_tag_list, (queue_entry_t)OSMTag); |
6d2010ae | 847 | OSMalloc_tag_unlock(); |
91447636 A |
848 | OSMTag->OSMT_state = OSMT_VALID; |
849 | return(OSMTag); | |
850 | } | |
851 | ||
852 | void | |
853 | OSMalloc_Tagref( | |
854 | OSMallocTag tag) | |
855 | { | |
856 | if (!((tag->OSMT_state & OSMT_VALID_MASK) == OSMT_VALID)) | |
316670eb | 857 | panic("OSMalloc_Tagref():'%s' has bad state 0x%08X\n", tag->OSMT_name, tag->OSMT_state); |
91447636 | 858 | |
2d21ac55 | 859 | (void)hw_atomic_add(&tag->OSMT_refcnt, 1); |
91447636 A |
860 | } |
861 | ||
862 | void | |
863 | OSMalloc_Tagrele( | |
864 | OSMallocTag tag) | |
865 | { | |
866 | if (!((tag->OSMT_state & OSMT_VALID_MASK) == OSMT_VALID)) | |
316670eb | 867 | panic("OSMalloc_Tagref():'%s' has bad state 0x%08X\n", tag->OSMT_name, tag->OSMT_state); |
91447636 | 868 | |
2d21ac55 | 869 | if (hw_atomic_sub(&tag->OSMT_refcnt, 1) == 0) { |
91447636 | 870 | if (hw_compare_and_store(OSMT_VALID|OSMT_RELEASED, OSMT_VALID|OSMT_RELEASED, &tag->OSMT_state)) { |
6d2010ae | 871 | OSMalloc_tag_spin_lock(); |
91447636 | 872 | (void)remque((queue_entry_t)tag); |
6d2010ae | 873 | OSMalloc_tag_unlock(); |
91447636 A |
874 | kfree((void*)tag, sizeof(*tag)); |
875 | } else | |
316670eb | 876 | panic("OSMalloc_Tagrele():'%s' has refcnt 0\n", tag->OSMT_name); |
91447636 A |
877 | } |
878 | } | |
879 | ||
880 | void | |
881 | OSMalloc_Tagfree( | |
882 | OSMallocTag tag) | |
883 | { | |
884 | if (!hw_compare_and_store(OSMT_VALID, OSMT_VALID|OSMT_RELEASED, &tag->OSMT_state)) | |
316670eb | 885 | panic("OSMalloc_Tagfree():'%s' has bad state 0x%08X \n", tag->OSMT_name, tag->OSMT_state); |
91447636 | 886 | |
2d21ac55 | 887 | if (hw_atomic_sub(&tag->OSMT_refcnt, 1) == 0) { |
6d2010ae | 888 | OSMalloc_tag_spin_lock(); |
91447636 | 889 | (void)remque((queue_entry_t)tag); |
6d2010ae | 890 | OSMalloc_tag_unlock(); |
91447636 A |
891 | kfree((void*)tag, sizeof(*tag)); |
892 | } | |
893 | } | |
894 | ||
895 | void * | |
896 | OSMalloc( | |
897 | uint32_t size, | |
898 | OSMallocTag tag) | |
899 | { | |
900 | void *addr=NULL; | |
901 | kern_return_t kr; | |
902 | ||
903 | OSMalloc_Tagref(tag); | |
904 | if ((tag->OSMT_attr & OSMT_PAGEABLE) | |
905 | && (size & ~PAGE_MASK)) { | |
3e170ce0 | 906 | if ((kr = kmem_alloc_pageable_external(kernel_map, (vm_offset_t *)&addr, size)) != KERN_SUCCESS) |
2d21ac55 | 907 | addr = NULL; |
91447636 | 908 | } else |
3e170ce0 | 909 | addr = kalloc_tag_bt((vm_size_t)size, VM_KERN_MEMORY_KALLOC); |
91447636 | 910 | |
2d21ac55 A |
911 | if (!addr) |
912 | OSMalloc_Tagrele(tag); | |
913 | ||
91447636 A |
914 | return(addr); |
915 | } | |
916 | ||
917 | void * | |
918 | OSMalloc_nowait( | |
919 | uint32_t size, | |
920 | OSMallocTag tag) | |
921 | { | |
922 | void *addr=NULL; | |
923 | ||
924 | if (tag->OSMT_attr & OSMT_PAGEABLE) | |
925 | return(NULL); | |
926 | ||
927 | OSMalloc_Tagref(tag); | |
928 | /* XXX: use non-blocking kalloc for now */ | |
3e170ce0 | 929 | addr = kalloc_noblock_tag_bt((vm_size_t)size, VM_KERN_MEMORY_KALLOC); |
91447636 A |
930 | if (addr == NULL) |
931 | OSMalloc_Tagrele(tag); | |
932 | ||
933 | return(addr); | |
934 | } | |
935 | ||
936 | void * | |
937 | OSMalloc_noblock( | |
938 | uint32_t size, | |
939 | OSMallocTag tag) | |
940 | { | |
941 | void *addr=NULL; | |
942 | ||
943 | if (tag->OSMT_attr & OSMT_PAGEABLE) | |
944 | return(NULL); | |
945 | ||
946 | OSMalloc_Tagref(tag); | |
3e170ce0 | 947 | addr = kalloc_noblock_tag_bt((vm_size_t)size, VM_KERN_MEMORY_KALLOC); |
91447636 A |
948 | if (addr == NULL) |
949 | OSMalloc_Tagrele(tag); | |
950 | ||
951 | return(addr); | |
952 | } | |
953 | ||
954 | void | |
955 | OSFree( | |
956 | void *addr, | |
957 | uint32_t size, | |
958 | OSMallocTag tag) | |
959 | { | |
960 | if ((tag->OSMT_attr & OSMT_PAGEABLE) | |
961 | && (size & ~PAGE_MASK)) { | |
962 | kmem_free(kernel_map, (vm_offset_t)addr, size); | |
963 | } else | |
316670eb | 964 | kfree((void *)addr, size); |
91447636 A |
965 | |
966 | OSMalloc_Tagrele(tag); | |
967 | } | |
39037602 A |
968 | |
969 | uint32_t | |
970 | OSMalloc_size( | |
971 | void *addr) | |
972 | { | |
973 | return (uint32_t)kalloc_size(addr); | |
974 | } | |
975 |