]> git.saurik.com Git - apple/xnu.git/blame - osfmk/kern/kalloc.c
xnu-3248.20.55.tar.gz
[apple/xnu.git] / osfmk / kern / kalloc.c
CommitLineData
1c79356b 1/*
316670eb 2 * Copyright (c) 2000-2011 Apple Computer, Inc. All rights reserved.
1c79356b 3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
1c79356b 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
8f6c56a5 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
8f6c56a5 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
1c79356b
A
27 */
28/*
29 * @OSF_COPYRIGHT@
30 */
1c79356b
A
31/*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56/*
57 */
58/*
59 * File: kern/kalloc.c
60 * Author: Avadis Tevanian, Jr.
61 * Date: 1985
62 *
63 * General kernel memory allocator. This allocator is designed
64 * to be used by the kernel to manage dynamic memory fast.
65 */
66
67#include <zone_debug.h>
68
69#include <mach/boolean.h>
70#include <mach/machine/vm_types.h>
71#include <mach/vm_param.h>
72#include <kern/misc_protos.h>
73#include <kern/zalloc.h>
74#include <kern/kalloc.h>
316670eb 75#include <kern/ledger.h>
1c79356b
A
76#include <vm/vm_kern.h>
77#include <vm/vm_object.h>
78#include <vm/vm_map.h>
91447636 79#include <libkern/OSMalloc.h>
1c79356b
A
80
81#ifdef MACH_BSD
82zone_t kalloc_zone(vm_size_t);
83#endif
84
2d21ac55
A
85#define KALLOC_MAP_SIZE_MIN (16 * 1024 * 1024)
86#define KALLOC_MAP_SIZE_MAX (128 * 1024 * 1024)
1c79356b 87vm_map_t kalloc_map;
1c79356b
A
88vm_size_t kalloc_max;
89vm_size_t kalloc_max_prerounded;
0c530ab8 90vm_size_t kalloc_kernmap_size; /* size of kallocs that can come from kernel map */
1c79356b 91
3e170ce0
A
92/* how many times we couldn't allocate out of kalloc_map and fell back to kernel_map */
93unsigned long kalloc_fallback_count;
94
1c79356b
A
95unsigned int kalloc_large_inuse;
96vm_size_t kalloc_large_total;
97vm_size_t kalloc_large_max;
6d2010ae
A
98vm_size_t kalloc_largest_allocated = 0;
99uint64_t kalloc_large_sum;
100
101int kalloc_fake_zone_index = -1; /* index of our fake zone in statistics arrays */
b0d623f7
A
102
103vm_offset_t kalloc_map_min;
104vm_offset_t kalloc_map_max;
1c79356b 105
6d2010ae
A
106#ifdef MUTEX_ZONE
107/*
108 * Diagnostic code to track mutexes separately rather than via the 2^ zones
109 */
110 zone_t lck_mtx_zone;
111#endif
112
113static void
114KALLOC_ZINFO_SALLOC(vm_size_t bytes)
115{
116 thread_t thr = current_thread();
117 task_t task;
118 zinfo_usage_t zinfo;
119
316670eb
A
120 ledger_debit(thr->t_ledger, task_ledgers.tkm_shared, bytes);
121
6d2010ae
A
122 if (kalloc_fake_zone_index != -1 &&
123 (task = thr->task) != NULL && (zinfo = task->tkm_zinfo) != NULL)
124 zinfo[kalloc_fake_zone_index].alloc += bytes;
125}
126
127static void
128KALLOC_ZINFO_SFREE(vm_size_t bytes)
129{
130 thread_t thr = current_thread();
131 task_t task;
132 zinfo_usage_t zinfo;
133
316670eb
A
134 ledger_credit(thr->t_ledger, task_ledgers.tkm_shared, bytes);
135
6d2010ae
A
136 if (kalloc_fake_zone_index != -1 &&
137 (task = thr->task) != NULL && (zinfo = task->tkm_zinfo) != NULL)
138 zinfo[kalloc_fake_zone_index].free += bytes;
139}
140
1c79356b
A
141/*
142 * All allocations of size less than kalloc_max are rounded to the
316670eb 143 * next nearest sized zone. This allocator is built on top of
1c79356b
A
144 * the zone allocator. A zone is created for each potential size
145 * that we are willing to get in small blocks.
146 *
147 * We assume that kalloc_max is not greater than 64K;
1c79356b
A
148 *
149 * Note that kalloc_max is somewhat confusingly named.
150 * It represents the first power of two for which no zone exists.
151 * kalloc_max_prerounded is the smallest allocation size, before
152 * rounding, for which no zone exists.
316670eb
A
153 *
154 * Also if the allocation size is more than kalloc_kernmap_size
155 * then allocate from kernel map rather than kalloc_map.
156 */
157
158#if KALLOC_MINSIZE == 16 && KALLOC_LOG2_MINALIGN == 4
159
316670eb
A
160#define K_ZONE_SIZES \
161 16, \
162 32, \
3e170ce0
A
163 48, \
164/* 3 */ 64, \
165 80, \
166 96, \
167/* 6 */ 128, \
168 160, \
316670eb 169 256, \
3e170ce0
A
170/* 9 */ 288, \
171 512, \
316670eb 172 1024, \
3e170ce0 173/* C */ 1280, \
316670eb 174 2048, \
3e170ce0 175 4096
316670eb
A
176
177#define K_ZONE_NAMES \
178 "kalloc.16", \
179 "kalloc.32", \
3e170ce0
A
180 "kalloc.48", \
181/* 3 */ "kalloc.64", \
182 "kalloc.80", \
183 "kalloc.96", \
184/* 6 */ "kalloc.128", \
185 "kalloc.160", \
316670eb 186 "kalloc.256", \
3e170ce0
A
187/* 9 */ "kalloc.288", \
188 "kalloc.512", \
316670eb 189 "kalloc.1024", \
3e170ce0 190/* C */ "kalloc.1280", \
316670eb 191 "kalloc.2048", \
3e170ce0 192 "kalloc.4096"
316670eb
A
193
194#elif KALLOC_MINSIZE == 8 && KALLOC_LOG2_MINALIGN == 3
195
196/*
197 * Tweaked for ARM (and x64) in 04/2011
1c79356b
A
198 */
199
316670eb
A
200#define K_ZONE_SIZES \
201/* 3 */ 8, \
202 16, 24, \
203 32, 40, 48, \
3e170ce0 204/* 6 */ 64, 72, 88, 112, \
316670eb 205 128, 192, \
3e170ce0
A
206 256, 288, 384, 440, \
207/* 9 */ 512, 768, \
208 1024, 1152, 1536, \
316670eb
A
209 2048, 3072, \
210 4096, 6144
211
212#define K_ZONE_NAMES \
213/* 3 */ "kalloc.8", \
214 "kalloc.16", "kalloc.24", \
215 "kalloc.32", "kalloc.40", "kalloc.48", \
3e170ce0 216/* 6 */ "kalloc.64", "kalloc.72", "kalloc.88", "kalloc.112", \
316670eb 217 "kalloc.128", "kalloc.192", \
3e170ce0 218 "kalloc.256", "kalloc.288", "kalloc.384", "kalloc.440", \
316670eb 219/* 9 */ "kalloc.512", "kalloc.768", \
3e170ce0 220 "kalloc.1024", "kalloc.1152", "kalloc.1536", \
316670eb
A
221 "kalloc.2048", "kalloc.3072", \
222 "kalloc.4096", "kalloc.6144"
223
316670eb
A
224#else
225#error missing zone size parameters for kalloc
226#endif
227
228#define KALLOC_MINALIGN (1 << KALLOC_LOG2_MINALIGN)
3e170ce0 229#define KiB(x) (1024 * (x))
316670eb
A
230
231static const int k_zone_size[] = {
232 K_ZONE_SIZES,
3e170ce0
A
233 KiB(8),
234 KiB(16),
235 KiB(32)
236};
237
238#define MAX_K_ZONE (sizeof (k_zone_size) / sizeof (k_zone_size[0]))
239
240static const char *k_zone_name[MAX_K_ZONE] = {
241 K_ZONE_NAMES,
242 "kalloc.8192",
243 "kalloc.16384",
244 "kalloc.32768"
316670eb
A
245};
246
316670eb
A
247
248/*
249 * Many kalloc() allocations are for small structures containing a few
250 * pointers and longs - the k_zone_dlut[] direct lookup table, indexed by
251 * size normalized to the minimum alignment, finds the right zone index
252 * for them in one dereference.
253 */
254
255#define INDEX_ZDLUT(size) \
256 (((size) + KALLOC_MINALIGN - 1) / KALLOC_MINALIGN)
257#define N_K_ZDLUT (2048 / KALLOC_MINALIGN)
258 /* covers sizes [0 .. 2048 - KALLOC_MINALIGN] */
259#define MAX_SIZE_ZDLUT ((N_K_ZDLUT - 1) * KALLOC_MINALIGN)
260
261static int8_t k_zone_dlut[N_K_ZDLUT]; /* table of indices into k_zone[] */
262
263/*
264 * If there's no hit in the DLUT, then start searching from k_zindex_start.
265 */
266static int k_zindex_start;
267
3e170ce0 268static zone_t k_zone[MAX_K_ZONE];
1c79356b 269
316670eb
A
270/* #define KALLOC_DEBUG 1 */
271
91447636 272/* forward declarations */
91447636 273
3e170ce0 274lck_grp_t kalloc_lck_grp;
6d2010ae
A
275lck_mtx_t kalloc_lock;
276
277#define kalloc_spin_lock() lck_mtx_lock_spin(&kalloc_lock)
278#define kalloc_unlock() lck_mtx_unlock(&kalloc_lock)
279
280
91447636
A
281/* OSMalloc local data declarations */
282static
283queue_head_t OSMalloc_tag_list;
284
6d2010ae
A
285lck_grp_t *OSMalloc_tag_lck_grp;
286lck_mtx_t OSMalloc_tag_lock;
287
288#define OSMalloc_tag_spin_lock() lck_mtx_lock_spin(&OSMalloc_tag_lock)
289#define OSMalloc_tag_unlock() lck_mtx_unlock(&OSMalloc_tag_lock)
290
91447636
A
291
292/* OSMalloc forward declarations */
293void OSMalloc_init(void);
294void OSMalloc_Tagref(OSMallocTag tag);
295void OSMalloc_Tagrele(OSMallocTag tag);
296
1c79356b
A
297/*
298 * Initialize the memory allocator. This should be called only
299 * once on a system wide basis (i.e. first processor to get here
300 * does the initialization).
301 *
302 * This initializes all of the zones.
303 */
304
305void
306kalloc_init(
307 void)
308{
309 kern_return_t retval;
310 vm_offset_t min;
2d21ac55 311 vm_size_t size, kalloc_map_size;
1c79356b
A
312 register int i;
313
2d21ac55
A
314 /*
315 * Scale the kalloc_map_size to physical memory size: stay below
b0d623f7 316 * 1/8th the total zone map size, or 128 MB (for a 32-bit kernel).
2d21ac55 317 */
b0d623f7
A
318 kalloc_map_size = (vm_size_t)(sane_size >> 5);
319#if !__LP64__
2d21ac55
A
320 if (kalloc_map_size > KALLOC_MAP_SIZE_MAX)
321 kalloc_map_size = KALLOC_MAP_SIZE_MAX;
b0d623f7 322#endif /* !__LP64__ */
2d21ac55
A
323 if (kalloc_map_size < KALLOC_MAP_SIZE_MIN)
324 kalloc_map_size = KALLOC_MAP_SIZE_MIN;
325
1c79356b 326 retval = kmem_suballoc(kernel_map, &min, kalloc_map_size,
3e170ce0 327 FALSE, VM_FLAGS_ANYWHERE | VM_FLAGS_PERMANENT | VM_MAKE_TAG(0),
b0d623f7 328 &kalloc_map);
91447636 329
1c79356b
A
330 if (retval != KERN_SUCCESS)
331 panic("kalloc_init: kmem_suballoc failed");
332
b0d623f7
A
333 kalloc_map_min = min;
334 kalloc_map_max = min + kalloc_map_size - 1;
335
1c79356b 336 /*
3e170ce0
A
337 * Create zones up to a least 2 pages because small page-multiples are common
338 * allocations. Also ensure that zones up to size 8192 bytes exist. This is
339 * desirable because messages are allocated with kalloc(), and messages up
340 * through size 8192 are common.
1c79356b 341 */
3e170ce0
A
342 kalloc_max = PAGE_SIZE << 2;
343 if (kalloc_max < KiB(16)) {
344 kalloc_max = KiB(16);
345 }
346 assert(kalloc_max <= KiB(64)); /* assumption made in size arrays */
1c79356b 347
1c79356b 348 kalloc_max_prerounded = kalloc_max / 2 + 1;
3e170ce0 349 /* allocations larger than 16 times kalloc_max go directly to kernel map */
0c530ab8 350 kalloc_kernmap_size = (kalloc_max * 16) + 1;
b0d623f7 351 kalloc_largest_allocated = kalloc_kernmap_size;
1c79356b
A
352
353 /*
3e170ce0
A
354 * Allocate a zone for each size we are going to handle. Don't charge the
355 * caller for the allocation, as we aren't sure how the memory will be
356 * handled.
1c79356b 357 */
3e170ce0
A
358 for (i = 0; i < (int)MAX_K_ZONE && (size = k_zone_size[i]) < kalloc_max; i++) {
359 k_zone[i] = zinit(size, size, size, k_zone_name[i]);
6d2010ae 360 zone_change(k_zone[i], Z_CALLERACCT, FALSE);
1c79356b 361 }
316670eb
A
362
363 /*
364 * Build the Direct LookUp Table for small allocations
365 */
366 for (i = 0, size = 0; i <= N_K_ZDLUT; i++, size += KALLOC_MINALIGN) {
367 int zindex = 0;
368
369 while ((vm_size_t)k_zone_size[zindex] < size)
370 zindex++;
371
372 if (i == N_K_ZDLUT) {
373 k_zindex_start = zindex;
374 break;
375 }
376 k_zone_dlut[i] = (int8_t)zindex;
377 }
378
379#ifdef KALLOC_DEBUG
380 printf("kalloc_init: k_zindex_start %d\n", k_zindex_start);
381
382 /*
383 * Do a quick synthesis to see how well/badly we can
384 * find-a-zone for a given size.
385 * Useful when debugging/tweaking the array of zone sizes.
386 * Cache misses probably more critical than compare-branches!
387 */
3e170ce0 388 for (i = 0; i < (int)MAX_K_ZONE; i++) {
316670eb
A
389 vm_size_t testsize = (vm_size_t)k_zone_size[i] - 1;
390 int compare = 0;
391 int zindex;
392
393 if (testsize < MAX_SIZE_ZDLUT) {
394 compare += 1; /* 'if' (T) */
395
396 long dindex = INDEX_ZDLUT(testsize);
397 zindex = (int)k_zone_dlut[dindex];
398
399 } else if (testsize < kalloc_max_prerounded) {
400
401 compare += 2; /* 'if' (F), 'if' (T) */
402
403 zindex = k_zindex_start;
404 while ((vm_size_t)k_zone_size[zindex] < testsize) {
405 zindex++;
406 compare++; /* 'while' (T) */
407 }
408 compare++; /* 'while' (F) */
409 } else
410 break; /* not zone-backed */
411
412 zone_t z = k_zone[zindex];
413 printf("kalloc_init: req size %4lu: %11s took %d compare%s\n",
414 (unsigned long)testsize, z->zone_name, compare,
415 compare == 1 ? "" : "s");
416 }
417#endif
3e170ce0
A
418
419 lck_grp_init(&kalloc_lck_grp, "kalloc.large", LCK_GRP_ATTR_NULL);
420 lck_mtx_init(&kalloc_lock, &kalloc_lck_grp, LCK_ATTR_NULL);
91447636 421 OSMalloc_init();
3e170ce0 422#ifdef MUTEX_ZONE
6d2010ae 423 lck_mtx_zone = zinit(sizeof(struct _lck_mtx_), 1024*256, 4096, "lck_mtx");
3e170ce0 424#endif
316670eb 425}
6d2010ae 426
316670eb
A
427/*
428 * Given an allocation size, return the kalloc zone it belongs to.
429 * Direct LookUp Table variant.
430 */
431static __inline zone_t
432get_zone_dlut(vm_size_t size)
433{
434 long dindex = INDEX_ZDLUT(size);
435 int zindex = (int)k_zone_dlut[dindex];
436 return (k_zone[zindex]);
437}
438
439/* As above, but linear search k_zone_size[] for the next zone that fits. */
440
441static __inline zone_t
442get_zone_search(vm_size_t size, int zindex)
443{
444 assert(size < kalloc_max_prerounded);
445
446 while ((vm_size_t)k_zone_size[zindex] < size)
447 zindex++;
448
3e170ce0 449 assert((unsigned)zindex < MAX_K_ZONE &&
316670eb
A
450 (vm_size_t)k_zone_size[zindex] < kalloc_max);
451
452 return (k_zone[zindex]);
1c79356b
A
453}
454
91447636 455void *
1c79356b 456kalloc_canblock(
3e170ce0
A
457 vm_size_t size,
458 boolean_t canblock,
459 vm_allocation_site_t * site)
1c79356b 460{
316670eb
A
461 zone_t z;
462
463 if (size < MAX_SIZE_ZDLUT)
464 z = get_zone_dlut(size);
465 else if (size < kalloc_max_prerounded)
466 z = get_zone_search(size, k_zindex_start);
467 else {
468 /*
469 * If size is too large for a zone, then use kmem_alloc.
470 * (We use kmem_alloc instead of kmem_alloc_kobject so that
471 * krealloc can use kmem_realloc.)
472 */
473 vm_map_t alloc_map;
91447636 474 void *addr;
1c79356b
A
475
476 /* kmem_alloc could block so we return if noblock */
477 if (!canblock) {
6d2010ae 478 return(NULL);
1c79356b 479 }
0c530ab8 480
6d2010ae 481 if (size >= kalloc_kernmap_size)
2d21ac55 482 alloc_map = kernel_map;
6d2010ae 483 else
0c530ab8
A
484 alloc_map = kalloc_map;
485
3e170ce0
A
486 vm_tag_t tag;
487 tag = (site ? tag = vm_tag_alloc(site) : VM_KERN_MEMORY_KALLOC);
488
489 if (kmem_alloc(alloc_map, (vm_offset_t *)&addr, size, tag) != KERN_SUCCESS) {
b0d623f7 490 if (alloc_map != kernel_map) {
3e170ce0
A
491 if (kalloc_fallback_count++ == 0) {
492 printf("%s: falling back to kernel_map\n", __func__);
493 }
494 if (kmem_alloc(kernel_map, (vm_offset_t *)&addr, size, tag) != KERN_SUCCESS)
b0d623f7 495 addr = NULL;
6d2010ae 496 }
b0d623f7
A
497 else
498 addr = NULL;
499 }
1c79356b 500
b0d623f7 501 if (addr != NULL) {
6d2010ae
A
502 kalloc_spin_lock();
503 /*
504 * Thread-safe version of the workaround for 4740071
505 * (a double FREE())
506 */
507 if (size > kalloc_largest_allocated)
508 kalloc_largest_allocated = size;
509
1c79356b
A
510 kalloc_large_inuse++;
511 kalloc_large_total += size;
6d2010ae 512 kalloc_large_sum += size;
1c79356b
A
513
514 if (kalloc_large_total > kalloc_large_max)
515 kalloc_large_max = kalloc_large_total;
6d2010ae
A
516
517 kalloc_unlock();
518
519 KALLOC_ZINFO_SALLOC(size);
1c79356b
A
520 }
521 return(addr);
522 }
316670eb
A
523#ifdef KALLOC_DEBUG
524 if (size > z->elem_size)
525 panic("%s: z %p (%s) but requested size %lu", __func__,
526 z, z->zone_name, (unsigned long)size);
527#endif
528 assert(size <= z->elem_size);
3e170ce0 529 return zalloc_canblock(z, canblock);
1c79356b
A
530}
531
91447636 532void *
3e170ce0
A
533kalloc_external(
534 vm_size_t size);
91447636 535void *
3e170ce0
A
536kalloc_external(
537 vm_size_t size)
1c79356b 538{
3e170ce0 539 return( kalloc_tag_bt(size, VM_KERN_MEMORY_KALLOC) );
1c79356b
A
540}
541
b0d623f7
A
542volatile SInt32 kfree_nop_count = 0;
543
1c79356b
A
544void
545kfree(
91447636 546 void *data,
1c79356b
A
547 vm_size_t size)
548{
316670eb
A
549 zone_t z;
550
551 if (size < MAX_SIZE_ZDLUT)
552 z = get_zone_dlut(size);
553 else if (size < kalloc_max_prerounded)
554 z = get_zone_search(size, k_zindex_start);
555 else {
556 /* if size was too large for a zone, then use kmem_free */
1c79356b 557
316670eb 558 vm_map_t alloc_map = kernel_map;
1c79356b 559
b0d623f7
A
560 if ((((vm_offset_t) data) >= kalloc_map_min) && (((vm_offset_t) data) <= kalloc_map_max))
561 alloc_map = kalloc_map;
562 if (size > kalloc_largest_allocated) {
0c530ab8
A
563 /*
564 * work around double FREEs of small MALLOCs
316670eb 565 * this used to end up being a nop
0c530ab8
A
566 * since the pointer being freed from an
567 * alloc backed by the zalloc world could
568 * never show up in the kalloc_map... however,
569 * the kernel_map is a different issue... since it
570 * was released back into the zalloc pool, a pointer
571 * would have gotten written over the 'size' that
572 * the MALLOC was retaining in the first 4 bytes of
573 * the underlying allocation... that pointer ends up
574 * looking like a really big size on the 2nd FREE and
575 * pushes the kfree into the kernel_map... we
316670eb 576 * end up removing a ton of virtual space before we panic
0c530ab8
A
577 * this check causes us to ignore the kfree for a size
578 * that must be 'bogus'... note that it might not be due
579 * to the above scenario, but it would still be wrong and
580 * cause serious damage.
581 */
b0d623f7
A
582
583 OSAddAtomic(1, &kfree_nop_count);
0c530ab8 584 return;
b0d623f7 585 }
0c530ab8 586 kmem_free(alloc_map, (vm_offset_t)data, size);
1c79356b 587
6d2010ae
A
588 kalloc_spin_lock();
589
1c79356b
A
590 kalloc_large_total -= size;
591 kalloc_large_inuse--;
592
6d2010ae
A
593 kalloc_unlock();
594
595 KALLOC_ZINFO_SFREE(size);
1c79356b
A
596 return;
597 }
598
1c79356b 599 /* free to the appropriate zone */
316670eb
A
600#ifdef KALLOC_DEBUG
601 if (size > z->elem_size)
602 panic("%s: z %p (%s) but requested size %lu", __func__,
603 z, z->zone_name, (unsigned long)size);
604#endif
605 assert(size <= z->elem_size);
606 zfree(z, data);
1c79356b
A
607}
608
609#ifdef MACH_BSD
610zone_t
611kalloc_zone(
612 vm_size_t size)
613{
316670eb
A
614 if (size < MAX_SIZE_ZDLUT)
615 return (get_zone_dlut(size));
616 if (size <= kalloc_max)
617 return (get_zone_search(size, k_zindex_start));
1c79356b
A
618 return (ZONE_NULL);
619}
620#endif
621
6d2010ae
A
622void
623kalloc_fake_zone_init(int zone_index)
624{
625 kalloc_fake_zone_index = zone_index;
626}
1c79356b 627
91447636 628void
6d2010ae
A
629kalloc_fake_zone_info(int *count,
630 vm_size_t *cur_size, vm_size_t *max_size, vm_size_t *elem_size, vm_size_t *alloc_size,
631 uint64_t *sum_size, int *collectable, int *exhaustable, int *caller_acct)
1c79356b 632{
91447636 633 *count = kalloc_large_inuse;
1c79356b
A
634 *cur_size = kalloc_large_total;
635 *max_size = kalloc_large_max;
6d2010ae
A
636
637 if (kalloc_large_inuse) {
638 *elem_size = kalloc_large_total / kalloc_large_inuse;
639 *alloc_size = kalloc_large_total / kalloc_large_inuse;
640 } else {
641 *elem_size = 0;
642 *alloc_size = 0;
643 }
644 *sum_size = kalloc_large_sum;
1c79356b
A
645 *collectable = 0;
646 *exhaustable = 0;
6d2010ae 647 *caller_acct = 0;
1c79356b
A
648}
649
91447636
A
650
651void
652OSMalloc_init(
653 void)
654{
655 queue_init(&OSMalloc_tag_list);
6d2010ae
A
656
657 OSMalloc_tag_lck_grp = lck_grp_alloc_init("OSMalloc_tag", LCK_GRP_ATTR_NULL);
658 lck_mtx_init(&OSMalloc_tag_lock, OSMalloc_tag_lck_grp, LCK_ATTR_NULL);
91447636
A
659}
660
661OSMallocTag
662OSMalloc_Tagalloc(
663 const char *str,
664 uint32_t flags)
665{
666 OSMallocTag OSMTag;
667
668 OSMTag = (OSMallocTag)kalloc(sizeof(*OSMTag));
669
670 bzero((void *)OSMTag, sizeof(*OSMTag));
671
672 if (flags & OSMT_PAGEABLE)
673 OSMTag->OSMT_attr = OSMT_ATTR_PAGEABLE;
674
675 OSMTag->OSMT_refcnt = 1;
676
3e170ce0 677 strlcpy(OSMTag->OSMT_name, str, OSMT_MAX_NAME);
91447636 678
6d2010ae 679 OSMalloc_tag_spin_lock();
91447636 680 enqueue_tail(&OSMalloc_tag_list, (queue_entry_t)OSMTag);
6d2010ae 681 OSMalloc_tag_unlock();
91447636
A
682 OSMTag->OSMT_state = OSMT_VALID;
683 return(OSMTag);
684}
685
686void
687OSMalloc_Tagref(
688 OSMallocTag tag)
689{
690 if (!((tag->OSMT_state & OSMT_VALID_MASK) == OSMT_VALID))
316670eb 691 panic("OSMalloc_Tagref():'%s' has bad state 0x%08X\n", tag->OSMT_name, tag->OSMT_state);
91447636 692
2d21ac55 693 (void)hw_atomic_add(&tag->OSMT_refcnt, 1);
91447636
A
694}
695
696void
697OSMalloc_Tagrele(
698 OSMallocTag tag)
699{
700 if (!((tag->OSMT_state & OSMT_VALID_MASK) == OSMT_VALID))
316670eb 701 panic("OSMalloc_Tagref():'%s' has bad state 0x%08X\n", tag->OSMT_name, tag->OSMT_state);
91447636 702
2d21ac55 703 if (hw_atomic_sub(&tag->OSMT_refcnt, 1) == 0) {
91447636 704 if (hw_compare_and_store(OSMT_VALID|OSMT_RELEASED, OSMT_VALID|OSMT_RELEASED, &tag->OSMT_state)) {
6d2010ae 705 OSMalloc_tag_spin_lock();
91447636 706 (void)remque((queue_entry_t)tag);
6d2010ae 707 OSMalloc_tag_unlock();
91447636
A
708 kfree((void*)tag, sizeof(*tag));
709 } else
316670eb 710 panic("OSMalloc_Tagrele():'%s' has refcnt 0\n", tag->OSMT_name);
91447636
A
711 }
712}
713
714void
715OSMalloc_Tagfree(
716 OSMallocTag tag)
717{
718 if (!hw_compare_and_store(OSMT_VALID, OSMT_VALID|OSMT_RELEASED, &tag->OSMT_state))
316670eb 719 panic("OSMalloc_Tagfree():'%s' has bad state 0x%08X \n", tag->OSMT_name, tag->OSMT_state);
91447636 720
2d21ac55 721 if (hw_atomic_sub(&tag->OSMT_refcnt, 1) == 0) {
6d2010ae 722 OSMalloc_tag_spin_lock();
91447636 723 (void)remque((queue_entry_t)tag);
6d2010ae 724 OSMalloc_tag_unlock();
91447636
A
725 kfree((void*)tag, sizeof(*tag));
726 }
727}
728
729void *
730OSMalloc(
731 uint32_t size,
732 OSMallocTag tag)
733{
734 void *addr=NULL;
735 kern_return_t kr;
736
737 OSMalloc_Tagref(tag);
738 if ((tag->OSMT_attr & OSMT_PAGEABLE)
739 && (size & ~PAGE_MASK)) {
3e170ce0 740 if ((kr = kmem_alloc_pageable_external(kernel_map, (vm_offset_t *)&addr, size)) != KERN_SUCCESS)
2d21ac55 741 addr = NULL;
91447636 742 } else
3e170ce0 743 addr = kalloc_tag_bt((vm_size_t)size, VM_KERN_MEMORY_KALLOC);
91447636 744
2d21ac55
A
745 if (!addr)
746 OSMalloc_Tagrele(tag);
747
91447636
A
748 return(addr);
749}
750
751void *
752OSMalloc_nowait(
753 uint32_t size,
754 OSMallocTag tag)
755{
756 void *addr=NULL;
757
758 if (tag->OSMT_attr & OSMT_PAGEABLE)
759 return(NULL);
760
761 OSMalloc_Tagref(tag);
762 /* XXX: use non-blocking kalloc for now */
3e170ce0 763 addr = kalloc_noblock_tag_bt((vm_size_t)size, VM_KERN_MEMORY_KALLOC);
91447636
A
764 if (addr == NULL)
765 OSMalloc_Tagrele(tag);
766
767 return(addr);
768}
769
770void *
771OSMalloc_noblock(
772 uint32_t size,
773 OSMallocTag tag)
774{
775 void *addr=NULL;
776
777 if (tag->OSMT_attr & OSMT_PAGEABLE)
778 return(NULL);
779
780 OSMalloc_Tagref(tag);
3e170ce0 781 addr = kalloc_noblock_tag_bt((vm_size_t)size, VM_KERN_MEMORY_KALLOC);
91447636
A
782 if (addr == NULL)
783 OSMalloc_Tagrele(tag);
784
785 return(addr);
786}
787
788void
789OSFree(
790 void *addr,
791 uint32_t size,
792 OSMallocTag tag)
793{
794 if ((tag->OSMT_attr & OSMT_PAGEABLE)
795 && (size & ~PAGE_MASK)) {
796 kmem_free(kernel_map, (vm_offset_t)addr, size);
797 } else
316670eb 798 kfree((void *)addr, size);
91447636
A
799
800 OSMalloc_Tagrele(tag);
801}