]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/kalloc.c
xnu-3789.60.24.tar.gz
[apple/xnu.git] / osfmk / kern / kalloc.c
1 /*
2 * Copyright (c) 2000-2011 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 */
58 /*
59 * File: kern/kalloc.c
60 * Author: Avadis Tevanian, Jr.
61 * Date: 1985
62 *
63 * General kernel memory allocator. This allocator is designed
64 * to be used by the kernel to manage dynamic memory fast.
65 */
66
67 #include <zone_debug.h>
68
69 #include <mach/boolean.h>
70 #include <mach/machine/vm_types.h>
71 #include <mach/vm_param.h>
72 #include <kern/misc_protos.h>
73 #include <kern/zalloc.h>
74 #include <kern/kalloc.h>
75 #include <kern/ledger.h>
76 #include <vm/vm_kern.h>
77 #include <vm/vm_object.h>
78 #include <vm/vm_map.h>
79 #include <libkern/OSMalloc.h>
80 #include <sys/kdebug.h>
81
82 #ifdef MACH_BSD
83 zone_t kalloc_zone(vm_size_t);
84 #endif
85
86 #define KALLOC_MAP_SIZE_MIN (16 * 1024 * 1024)
87 #define KALLOC_MAP_SIZE_MAX (128 * 1024 * 1024)
88 vm_map_t kalloc_map;
89 vm_size_t kalloc_max;
90 vm_size_t kalloc_max_prerounded;
91 vm_size_t kalloc_kernmap_size; /* size of kallocs that can come from kernel map */
92
93 /* how many times we couldn't allocate out of kalloc_map and fell back to kernel_map */
94 unsigned long kalloc_fallback_count;
95
96 unsigned int kalloc_large_inuse;
97 vm_size_t kalloc_large_total;
98 vm_size_t kalloc_large_max;
99 vm_size_t kalloc_largest_allocated = 0;
100 uint64_t kalloc_large_sum;
101
102 int kalloc_fake_zone_index = -1; /* index of our fake zone in statistics arrays */
103
104 vm_offset_t kalloc_map_min;
105 vm_offset_t kalloc_map_max;
106
107 #ifdef MUTEX_ZONE
108 /*
109 * Diagnostic code to track mutexes separately rather than via the 2^ zones
110 */
111 zone_t lck_mtx_zone;
112 #endif
113
114 static void
115 KALLOC_ZINFO_SALLOC(vm_size_t bytes)
116 {
117 thread_t thr = current_thread();
118 ledger_debit(thr->t_ledger, task_ledgers.tkm_shared, bytes);
119 }
120
121 static void
122 KALLOC_ZINFO_SFREE(vm_size_t bytes)
123 {
124 thread_t thr = current_thread();
125 ledger_credit(thr->t_ledger, task_ledgers.tkm_shared, bytes);
126 }
127
128 /*
129 * All allocations of size less than kalloc_max are rounded to the
130 * next nearest sized zone. This allocator is built on top of
131 * the zone allocator. A zone is created for each potential size
132 * that we are willing to get in small blocks.
133 *
134 * We assume that kalloc_max is not greater than 64K;
135 *
136 * Note that kalloc_max is somewhat confusingly named.
137 * It represents the first power of two for which no zone exists.
138 * kalloc_max_prerounded is the smallest allocation size, before
139 * rounding, for which no zone exists.
140 *
141 * Also if the allocation size is more than kalloc_kernmap_size
142 * then allocate from kernel map rather than kalloc_map.
143 */
144
145 #if KALLOC_MINSIZE == 16 && KALLOC_LOG2_MINALIGN == 4
146
147 #define K_ZONE_SIZES \
148 16, \
149 32, \
150 48, \
151 /* 3 */ 64, \
152 80, \
153 96, \
154 /* 6 */ 128, \
155 160, 192, \
156 256, \
157 /* 9 */ 288, \
158 512, 576, \
159 1024, 1152, \
160 /* C */ 1280, \
161 2048, \
162 4096
163
164 #define K_ZONE_NAMES \
165 "kalloc.16", \
166 "kalloc.32", \
167 "kalloc.48", \
168 /* 3 */ "kalloc.64", \
169 "kalloc.80", \
170 "kalloc.96", \
171 /* 6 */ "kalloc.128", \
172 "kalloc.160", \
173 "kalloc.192", \
174 "kalloc.256", \
175 /* 9 */ "kalloc.288", \
176 "kalloc.512", \
177 "kalloc.576", \
178 "kalloc.1024", \
179 "kalloc.1152", \
180 /* C */ "kalloc.1280", \
181 "kalloc.2048", \
182 "kalloc.4096"
183
184 #elif KALLOC_MINSIZE == 8 && KALLOC_LOG2_MINALIGN == 3
185
186 /*
187 * Tweaked for ARM (and x64) in 04/2011
188 */
189
190 #define K_ZONE_SIZES \
191 /* 3 */ 8, \
192 16, 24, \
193 32, 40, 48, \
194 /* 6 */ 64, 72, 88, 112, \
195 128, 192, \
196 256, 288, 384, 440, \
197 /* 9 */ 512, 576, 768, \
198 1024, 1152, 1536, \
199 2048, 2128, 3072, \
200 4096, 6144
201
202 #define K_ZONE_NAMES \
203 /* 3 */ "kalloc.8", \
204 "kalloc.16", "kalloc.24", \
205 "kalloc.32", "kalloc.40", "kalloc.48", \
206 /* 6 */ "kalloc.64", "kalloc.72", "kalloc.88", "kalloc.112", \
207 "kalloc.128", "kalloc.192", \
208 "kalloc.256", "kalloc.288", "kalloc.384", "kalloc.440", \
209 /* 9 */ "kalloc.512", "kalloc.576", "kalloc.768", \
210 "kalloc.1024", "kalloc.1152", "kalloc.1536", \
211 "kalloc.2048", "kalloc.2128", "kalloc.3072", \
212 "kalloc.4096", "kalloc.6144"
213
214 #else
215 #error missing zone size parameters for kalloc
216 #endif
217
218 #define KALLOC_MINALIGN (1 << KALLOC_LOG2_MINALIGN)
219 #define KiB(x) (1024 * (x))
220
221 static const int k_zone_size[] = {
222 K_ZONE_SIZES,
223 KiB(8),
224 KiB(16),
225 KiB(32)
226 };
227
228 #define MAX_K_ZONE (sizeof (k_zone_size) / sizeof (k_zone_size[0]))
229
230 static const char *k_zone_name[MAX_K_ZONE] = {
231 K_ZONE_NAMES,
232 "kalloc.8192",
233 "kalloc.16384",
234 "kalloc.32768"
235 };
236
237
238 /*
239 * Many kalloc() allocations are for small structures containing a few
240 * pointers and longs - the k_zone_dlut[] direct lookup table, indexed by
241 * size normalized to the minimum alignment, finds the right zone index
242 * for them in one dereference.
243 */
244
245 #define INDEX_ZDLUT(size) \
246 (((size) + KALLOC_MINALIGN - 1) / KALLOC_MINALIGN)
247 #define N_K_ZDLUT (2048 / KALLOC_MINALIGN)
248 /* covers sizes [0 .. 2048 - KALLOC_MINALIGN] */
249 #define MAX_SIZE_ZDLUT ((N_K_ZDLUT - 1) * KALLOC_MINALIGN)
250
251 static int8_t k_zone_dlut[N_K_ZDLUT]; /* table of indices into k_zone[] */
252
253 /*
254 * If there's no hit in the DLUT, then start searching from k_zindex_start.
255 */
256 static int k_zindex_start;
257
258 static zone_t k_zone[MAX_K_ZONE];
259
260 /* #define KALLOC_DEBUG 1 */
261
262 /* forward declarations */
263
264 lck_grp_t kalloc_lck_grp;
265 lck_mtx_t kalloc_lock;
266
267 #define kalloc_spin_lock() lck_mtx_lock_spin(&kalloc_lock)
268 #define kalloc_unlock() lck_mtx_unlock(&kalloc_lock)
269
270
271 /* OSMalloc local data declarations */
272 static
273 queue_head_t OSMalloc_tag_list;
274
275 lck_grp_t *OSMalloc_tag_lck_grp;
276 lck_mtx_t OSMalloc_tag_lock;
277
278 #define OSMalloc_tag_spin_lock() lck_mtx_lock_spin(&OSMalloc_tag_lock)
279 #define OSMalloc_tag_unlock() lck_mtx_unlock(&OSMalloc_tag_lock)
280
281
282 /* OSMalloc forward declarations */
283 void OSMalloc_init(void);
284 void OSMalloc_Tagref(OSMallocTag tag);
285 void OSMalloc_Tagrele(OSMallocTag tag);
286
287 /*
288 * Initialize the memory allocator. This should be called only
289 * once on a system wide basis (i.e. first processor to get here
290 * does the initialization).
291 *
292 * This initializes all of the zones.
293 */
294
295 void
296 kalloc_init(
297 void)
298 {
299 kern_return_t retval;
300 vm_offset_t min;
301 vm_size_t size, kalloc_map_size;
302 int i;
303
304 /*
305 * Scale the kalloc_map_size to physical memory size: stay below
306 * 1/8th the total zone map size, or 128 MB (for a 32-bit kernel).
307 */
308 kalloc_map_size = (vm_size_t)(sane_size >> 5);
309 #if !__LP64__
310 if (kalloc_map_size > KALLOC_MAP_SIZE_MAX)
311 kalloc_map_size = KALLOC_MAP_SIZE_MAX;
312 #endif /* !__LP64__ */
313 if (kalloc_map_size < KALLOC_MAP_SIZE_MIN)
314 kalloc_map_size = KALLOC_MAP_SIZE_MIN;
315
316 retval = kmem_suballoc(kernel_map, &min, kalloc_map_size,
317 FALSE, VM_FLAGS_ANYWHERE | VM_FLAGS_PERMANENT | VM_MAKE_TAG(0),
318 &kalloc_map);
319
320 if (retval != KERN_SUCCESS)
321 panic("kalloc_init: kmem_suballoc failed");
322
323 kalloc_map_min = min;
324 kalloc_map_max = min + kalloc_map_size - 1;
325
326 /*
327 * Create zones up to a least 2 pages because small page-multiples are common
328 * allocations. Also ensure that zones up to size 8192 bytes exist. This is
329 * desirable because messages are allocated with kalloc(), and messages up
330 * through size 8192 are common.
331 */
332 kalloc_max = PAGE_SIZE << 2;
333 if (kalloc_max < KiB(16)) {
334 kalloc_max = KiB(16);
335 }
336 assert(kalloc_max <= KiB(64)); /* assumption made in size arrays */
337
338 kalloc_max_prerounded = kalloc_max / 2 + 1;
339 /* allocations larger than 16 times kalloc_max go directly to kernel map */
340 kalloc_kernmap_size = (kalloc_max * 16) + 1;
341 kalloc_largest_allocated = kalloc_kernmap_size;
342
343 /*
344 * Allocate a zone for each size we are going to handle. Don't charge the
345 * caller for the allocation, as we aren't sure how the memory will be
346 * handled.
347 */
348 for (i = 0; i < (int)MAX_K_ZONE && (size = k_zone_size[i]) < kalloc_max; i++) {
349 k_zone[i] = zinit(size, size, size, k_zone_name[i]);
350 zone_change(k_zone[i], Z_CALLERACCT, FALSE);
351 }
352
353 /*
354 * Build the Direct LookUp Table for small allocations
355 */
356 for (i = 0, size = 0; i <= N_K_ZDLUT; i++, size += KALLOC_MINALIGN) {
357 int zindex = 0;
358
359 while ((vm_size_t)k_zone_size[zindex] < size)
360 zindex++;
361
362 if (i == N_K_ZDLUT) {
363 k_zindex_start = zindex;
364 break;
365 }
366 k_zone_dlut[i] = (int8_t)zindex;
367 }
368
369 #ifdef KALLOC_DEBUG
370 printf("kalloc_init: k_zindex_start %d\n", k_zindex_start);
371
372 /*
373 * Do a quick synthesis to see how well/badly we can
374 * find-a-zone for a given size.
375 * Useful when debugging/tweaking the array of zone sizes.
376 * Cache misses probably more critical than compare-branches!
377 */
378 for (i = 0; i < (int)MAX_K_ZONE; i++) {
379 vm_size_t testsize = (vm_size_t)k_zone_size[i] - 1;
380 int compare = 0;
381 int zindex;
382
383 if (testsize < MAX_SIZE_ZDLUT) {
384 compare += 1; /* 'if' (T) */
385
386 long dindex = INDEX_ZDLUT(testsize);
387 zindex = (int)k_zone_dlut[dindex];
388
389 } else if (testsize < kalloc_max_prerounded) {
390
391 compare += 2; /* 'if' (F), 'if' (T) */
392
393 zindex = k_zindex_start;
394 while ((vm_size_t)k_zone_size[zindex] < testsize) {
395 zindex++;
396 compare++; /* 'while' (T) */
397 }
398 compare++; /* 'while' (F) */
399 } else
400 break; /* not zone-backed */
401
402 zone_t z = k_zone[zindex];
403 printf("kalloc_init: req size %4lu: %11s took %d compare%s\n",
404 (unsigned long)testsize, z->zone_name, compare,
405 compare == 1 ? "" : "s");
406 }
407 #endif
408
409 lck_grp_init(&kalloc_lck_grp, "kalloc.large", LCK_GRP_ATTR_NULL);
410 lck_mtx_init(&kalloc_lock, &kalloc_lck_grp, LCK_ATTR_NULL);
411 OSMalloc_init();
412 #ifdef MUTEX_ZONE
413 lck_mtx_zone = zinit(sizeof(struct _lck_mtx_), 1024*256, 4096, "lck_mtx");
414 #endif
415 }
416
417 /*
418 * Given an allocation size, return the kalloc zone it belongs to.
419 * Direct LookUp Table variant.
420 */
421 static __inline zone_t
422 get_zone_dlut(vm_size_t size)
423 {
424 long dindex = INDEX_ZDLUT(size);
425 int zindex = (int)k_zone_dlut[dindex];
426 return (k_zone[zindex]);
427 }
428
429 /* As above, but linear search k_zone_size[] for the next zone that fits. */
430
431 static __inline zone_t
432 get_zone_search(vm_size_t size, int zindex)
433 {
434 assert(size < kalloc_max_prerounded);
435
436 while ((vm_size_t)k_zone_size[zindex] < size)
437 zindex++;
438
439 assert((unsigned)zindex < MAX_K_ZONE &&
440 (vm_size_t)k_zone_size[zindex] < kalloc_max);
441
442 return (k_zone[zindex]);
443 }
444
445 static vm_size_t
446 vm_map_lookup_kalloc_entry_locked(
447 vm_map_t map,
448 void *addr)
449 {
450 boolean_t ret;
451 vm_map_entry_t vm_entry = NULL;
452
453 ret = vm_map_lookup_entry(map, (vm_map_offset_t)addr, &vm_entry);
454 if (!ret) {
455 panic("Attempting to lookup/free an address not allocated via kalloc! (vm_map_lookup_entry() failed map: %p, addr: %p)\n",
456 map, addr);
457 }
458 if (vm_entry->vme_start != (vm_map_offset_t)addr) {
459 panic("Attempting to lookup/free the middle of a kalloc'ed element! (map: %p, addr: %p, entry: %p)\n",
460 map, addr, vm_entry);
461 }
462 if (!vm_entry->vme_atomic) {
463 panic("Attempting to lookup/free an address not managed by kalloc! (map: %p, addr: %p, entry: %p)\n",
464 map, addr, vm_entry);
465 }
466 return (vm_entry->vme_end - vm_entry->vme_start);
467 }
468
469 vm_size_t
470 kalloc_size(
471 void *addr)
472 {
473 vm_map_t map;
474 vm_size_t size;
475
476 size = zone_element_size(addr, NULL);
477 if (size) {
478 return size;
479 }
480 if (((vm_offset_t)addr >= kalloc_map_min) && ((vm_offset_t)addr < kalloc_map_max)) {
481 map = kalloc_map;
482 } else {
483 map = kernel_map;
484 }
485 vm_map_lock_read(map);
486 size = vm_map_lookup_kalloc_entry_locked(map, addr);
487 vm_map_unlock_read(map);
488 return size;
489 }
490
491 vm_size_t
492 kalloc_bucket_size(
493 vm_size_t size)
494 {
495 zone_t z;
496 vm_map_t map;
497
498 if (size < MAX_SIZE_ZDLUT) {
499 z = get_zone_dlut(size);
500 return z->elem_size;
501 }
502
503 if (size < kalloc_max_prerounded) {
504 z = get_zone_search(size, k_zindex_start);
505 return z->elem_size;
506 }
507
508 if (size >= kalloc_kernmap_size)
509 map = kernel_map;
510 else
511 map = kalloc_map;
512
513 return vm_map_round_page(size, VM_MAP_PAGE_MASK(map));
514 }
515
516 vm_size_t
517 kfree_addr(
518 void *addr)
519 {
520 vm_map_t map;
521 vm_size_t size = 0;
522 kern_return_t ret;
523 zone_t z;
524
525 size = zone_element_size(addr, &z);
526 if (size) {
527 zfree(z, addr);
528 return size;
529 }
530
531 if (((vm_offset_t)addr >= kalloc_map_min) && ((vm_offset_t)addr < kalloc_map_max)) {
532 map = kalloc_map;
533 } else {
534 map = kernel_map;
535 }
536 if ((vm_offset_t)addr < VM_MIN_KERNEL_AND_KEXT_ADDRESS) {
537 panic("kfree on an address not in the kernel & kext address range! addr: %p\n", addr);
538 }
539
540 vm_map_lock(map);
541 size = vm_map_lookup_kalloc_entry_locked(map, addr);
542 ret = vm_map_remove_locked(map,
543 vm_map_trunc_page((vm_map_offset_t)addr,
544 VM_MAP_PAGE_MASK(map)),
545 vm_map_round_page((vm_map_offset_t)addr + size,
546 VM_MAP_PAGE_MASK(map)),
547 VM_MAP_REMOVE_KUNWIRE);
548 if (ret != KERN_SUCCESS) {
549 panic("vm_map_remove_locked() failed for kalloc vm_entry! addr: %p, map: %p ret: %d\n",
550 addr, map, ret);
551 }
552 vm_map_unlock(map);
553
554 kalloc_spin_lock();
555 kalloc_large_total -= size;
556 kalloc_large_inuse--;
557 kalloc_unlock();
558
559 KALLOC_ZINFO_SFREE(size);
560 return size;
561 }
562
563 void *
564 kalloc_canblock(
565 vm_size_t * psize,
566 boolean_t canblock,
567 vm_allocation_site_t * site)
568 {
569 zone_t z;
570 vm_size_t size;
571
572 size = *psize;
573
574 if (size < MAX_SIZE_ZDLUT)
575 z = get_zone_dlut(size);
576 else if (size < kalloc_max_prerounded)
577 z = get_zone_search(size, k_zindex_start);
578 else {
579 /*
580 * If size is too large for a zone, then use kmem_alloc.
581 * (We use kmem_alloc instead of kmem_alloc_kobject so that
582 * krealloc can use kmem_realloc.)
583 */
584 vm_map_t alloc_map;
585 void *addr;
586
587 /* kmem_alloc could block so we return if noblock */
588 if (!canblock) {
589 return(NULL);
590 }
591
592 if (size >= kalloc_kernmap_size)
593 alloc_map = kernel_map;
594 else
595 alloc_map = kalloc_map;
596
597 vm_tag_t tag;
598 tag = (site ? tag = vm_tag_alloc(site) : VM_KERN_MEMORY_KALLOC);
599
600 if (kmem_alloc_flags(alloc_map, (vm_offset_t *)&addr, size, tag, KMA_ATOMIC) != KERN_SUCCESS) {
601 if (alloc_map != kernel_map) {
602 if (kalloc_fallback_count++ == 0) {
603 printf("%s: falling back to kernel_map\n", __func__);
604 }
605 if (kmem_alloc_flags(kernel_map, (vm_offset_t *)&addr, size, tag, KMA_ATOMIC) != KERN_SUCCESS)
606 addr = NULL;
607 }
608 else
609 addr = NULL;
610 }
611
612 if (addr != NULL) {
613 kalloc_spin_lock();
614 /*
615 * Thread-safe version of the workaround for 4740071
616 * (a double FREE())
617 */
618 if (size > kalloc_largest_allocated)
619 kalloc_largest_allocated = size;
620
621 kalloc_large_inuse++;
622 kalloc_large_total += size;
623 kalloc_large_sum += size;
624
625 if (kalloc_large_total > kalloc_large_max)
626 kalloc_large_max = kalloc_large_total;
627
628 kalloc_unlock();
629
630 KALLOC_ZINFO_SALLOC(size);
631 }
632 *psize = round_page(size);
633 return(addr);
634 }
635 #ifdef KALLOC_DEBUG
636 if (size > z->elem_size)
637 panic("%s: z %p (%s) but requested size %lu", __func__,
638 z, z->zone_name, (unsigned long)size);
639 #endif
640 assert(size <= z->elem_size);
641 *psize = z->elem_size;
642 void *addr = zalloc_canblock(z, canblock);
643 return addr;
644 }
645
646 void *
647 kalloc_external(
648 vm_size_t size);
649 void *
650 kalloc_external(
651 vm_size_t size)
652 {
653 return( kalloc_tag_bt(size, VM_KERN_MEMORY_KALLOC) );
654 }
655
656 volatile SInt32 kfree_nop_count = 0;
657
658 void
659 kfree(
660 void *data,
661 vm_size_t size)
662 {
663 zone_t z;
664
665 if (size < MAX_SIZE_ZDLUT)
666 z = get_zone_dlut(size);
667 else if (size < kalloc_max_prerounded)
668 z = get_zone_search(size, k_zindex_start);
669 else {
670 /* if size was too large for a zone, then use kmem_free */
671
672 vm_map_t alloc_map = kernel_map;
673
674 if ((((vm_offset_t) data) >= kalloc_map_min) && (((vm_offset_t) data) <= kalloc_map_max))
675 alloc_map = kalloc_map;
676 if (size > kalloc_largest_allocated) {
677 /*
678 * work around double FREEs of small MALLOCs
679 * this used to end up being a nop
680 * since the pointer being freed from an
681 * alloc backed by the zalloc world could
682 * never show up in the kalloc_map... however,
683 * the kernel_map is a different issue... since it
684 * was released back into the zalloc pool, a pointer
685 * would have gotten written over the 'size' that
686 * the MALLOC was retaining in the first 4 bytes of
687 * the underlying allocation... that pointer ends up
688 * looking like a really big size on the 2nd FREE and
689 * pushes the kfree into the kernel_map... we
690 * end up removing a ton of virtual space before we panic
691 * this check causes us to ignore the kfree for a size
692 * that must be 'bogus'... note that it might not be due
693 * to the above scenario, but it would still be wrong and
694 * cause serious damage.
695 */
696
697 OSAddAtomic(1, &kfree_nop_count);
698 return;
699 }
700 kmem_free(alloc_map, (vm_offset_t)data, size);
701 kalloc_spin_lock();
702
703 kalloc_large_total -= size;
704 kalloc_large_inuse--;
705
706 kalloc_unlock();
707
708 KALLOC_ZINFO_SFREE(size);
709 return;
710 }
711
712 /* free to the appropriate zone */
713 #ifdef KALLOC_DEBUG
714 if (size > z->elem_size)
715 panic("%s: z %p (%s) but requested size %lu", __func__,
716 z, z->zone_name, (unsigned long)size);
717 #endif
718 assert(size <= z->elem_size);
719 zfree(z, data);
720 }
721
722 #ifdef MACH_BSD
723 zone_t
724 kalloc_zone(
725 vm_size_t size)
726 {
727 if (size < MAX_SIZE_ZDLUT)
728 return (get_zone_dlut(size));
729 if (size <= kalloc_max)
730 return (get_zone_search(size, k_zindex_start));
731 return (ZONE_NULL);
732 }
733 #endif
734
735 void
736 kalloc_fake_zone_init(int zone_index)
737 {
738 kalloc_fake_zone_index = zone_index;
739 }
740
741 void
742 kalloc_fake_zone_info(int *count,
743 vm_size_t *cur_size, vm_size_t *max_size, vm_size_t *elem_size, vm_size_t *alloc_size,
744 uint64_t *sum_size, int *collectable, int *exhaustable, int *caller_acct)
745 {
746 *count = kalloc_large_inuse;
747 *cur_size = kalloc_large_total;
748 *max_size = kalloc_large_max;
749
750 if (kalloc_large_inuse) {
751 *elem_size = kalloc_large_total / kalloc_large_inuse;
752 *alloc_size = kalloc_large_total / kalloc_large_inuse;
753 } else {
754 *elem_size = 0;
755 *alloc_size = 0;
756 }
757 *sum_size = kalloc_large_sum;
758 *collectable = 0;
759 *exhaustable = 0;
760 *caller_acct = 0;
761 }
762
763
764 void
765 OSMalloc_init(
766 void)
767 {
768 queue_init(&OSMalloc_tag_list);
769
770 OSMalloc_tag_lck_grp = lck_grp_alloc_init("OSMalloc_tag", LCK_GRP_ATTR_NULL);
771 lck_mtx_init(&OSMalloc_tag_lock, OSMalloc_tag_lck_grp, LCK_ATTR_NULL);
772 }
773
774 OSMallocTag
775 OSMalloc_Tagalloc(
776 const char *str,
777 uint32_t flags)
778 {
779 OSMallocTag OSMTag;
780
781 OSMTag = (OSMallocTag)kalloc(sizeof(*OSMTag));
782
783 bzero((void *)OSMTag, sizeof(*OSMTag));
784
785 if (flags & OSMT_PAGEABLE)
786 OSMTag->OSMT_attr = OSMT_ATTR_PAGEABLE;
787
788 OSMTag->OSMT_refcnt = 1;
789
790 strlcpy(OSMTag->OSMT_name, str, OSMT_MAX_NAME);
791
792 OSMalloc_tag_spin_lock();
793 enqueue_tail(&OSMalloc_tag_list, (queue_entry_t)OSMTag);
794 OSMalloc_tag_unlock();
795 OSMTag->OSMT_state = OSMT_VALID;
796 return(OSMTag);
797 }
798
799 void
800 OSMalloc_Tagref(
801 OSMallocTag tag)
802 {
803 if (!((tag->OSMT_state & OSMT_VALID_MASK) == OSMT_VALID))
804 panic("OSMalloc_Tagref():'%s' has bad state 0x%08X\n", tag->OSMT_name, tag->OSMT_state);
805
806 (void)hw_atomic_add(&tag->OSMT_refcnt, 1);
807 }
808
809 void
810 OSMalloc_Tagrele(
811 OSMallocTag tag)
812 {
813 if (!((tag->OSMT_state & OSMT_VALID_MASK) == OSMT_VALID))
814 panic("OSMalloc_Tagref():'%s' has bad state 0x%08X\n", tag->OSMT_name, tag->OSMT_state);
815
816 if (hw_atomic_sub(&tag->OSMT_refcnt, 1) == 0) {
817 if (hw_compare_and_store(OSMT_VALID|OSMT_RELEASED, OSMT_VALID|OSMT_RELEASED, &tag->OSMT_state)) {
818 OSMalloc_tag_spin_lock();
819 (void)remque((queue_entry_t)tag);
820 OSMalloc_tag_unlock();
821 kfree((void*)tag, sizeof(*tag));
822 } else
823 panic("OSMalloc_Tagrele():'%s' has refcnt 0\n", tag->OSMT_name);
824 }
825 }
826
827 void
828 OSMalloc_Tagfree(
829 OSMallocTag tag)
830 {
831 if (!hw_compare_and_store(OSMT_VALID, OSMT_VALID|OSMT_RELEASED, &tag->OSMT_state))
832 panic("OSMalloc_Tagfree():'%s' has bad state 0x%08X \n", tag->OSMT_name, tag->OSMT_state);
833
834 if (hw_atomic_sub(&tag->OSMT_refcnt, 1) == 0) {
835 OSMalloc_tag_spin_lock();
836 (void)remque((queue_entry_t)tag);
837 OSMalloc_tag_unlock();
838 kfree((void*)tag, sizeof(*tag));
839 }
840 }
841
842 void *
843 OSMalloc(
844 uint32_t size,
845 OSMallocTag tag)
846 {
847 void *addr=NULL;
848 kern_return_t kr;
849
850 OSMalloc_Tagref(tag);
851 if ((tag->OSMT_attr & OSMT_PAGEABLE)
852 && (size & ~PAGE_MASK)) {
853 if ((kr = kmem_alloc_pageable_external(kernel_map, (vm_offset_t *)&addr, size)) != KERN_SUCCESS)
854 addr = NULL;
855 } else
856 addr = kalloc_tag_bt((vm_size_t)size, VM_KERN_MEMORY_KALLOC);
857
858 if (!addr)
859 OSMalloc_Tagrele(tag);
860
861 return(addr);
862 }
863
864 void *
865 OSMalloc_nowait(
866 uint32_t size,
867 OSMallocTag tag)
868 {
869 void *addr=NULL;
870
871 if (tag->OSMT_attr & OSMT_PAGEABLE)
872 return(NULL);
873
874 OSMalloc_Tagref(tag);
875 /* XXX: use non-blocking kalloc for now */
876 addr = kalloc_noblock_tag_bt((vm_size_t)size, VM_KERN_MEMORY_KALLOC);
877 if (addr == NULL)
878 OSMalloc_Tagrele(tag);
879
880 return(addr);
881 }
882
883 void *
884 OSMalloc_noblock(
885 uint32_t size,
886 OSMallocTag tag)
887 {
888 void *addr=NULL;
889
890 if (tag->OSMT_attr & OSMT_PAGEABLE)
891 return(NULL);
892
893 OSMalloc_Tagref(tag);
894 addr = kalloc_noblock_tag_bt((vm_size_t)size, VM_KERN_MEMORY_KALLOC);
895 if (addr == NULL)
896 OSMalloc_Tagrele(tag);
897
898 return(addr);
899 }
900
901 void
902 OSFree(
903 void *addr,
904 uint32_t size,
905 OSMallocTag tag)
906 {
907 if ((tag->OSMT_attr & OSMT_PAGEABLE)
908 && (size & ~PAGE_MASK)) {
909 kmem_free(kernel_map, (vm_offset_t)addr, size);
910 } else
911 kfree((void *)addr, size);
912
913 OSMalloc_Tagrele(tag);
914 }
915
916 uint32_t
917 OSMalloc_size(
918 void *addr)
919 {
920 return (uint32_t)kalloc_size(addr);
921 }
922