]> git.saurik.com Git - apple/xnu.git/blame - osfmk/kern/kalloc.c
xnu-1228.12.14.tar.gz
[apple/xnu.git] / osfmk / kern / kalloc.c
CommitLineData
1c79356b 1/*
2d21ac55 2 * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved.
1c79356b 3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
1c79356b 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
8f6c56a5 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
8f6c56a5 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
1c79356b
A
27 */
28/*
29 * @OSF_COPYRIGHT@
30 */
1c79356b
A
31/*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56/*
57 */
58/*
59 * File: kern/kalloc.c
60 * Author: Avadis Tevanian, Jr.
61 * Date: 1985
62 *
63 * General kernel memory allocator. This allocator is designed
64 * to be used by the kernel to manage dynamic memory fast.
65 */
66
67#include <zone_debug.h>
68
69#include <mach/boolean.h>
70#include <mach/machine/vm_types.h>
71#include <mach/vm_param.h>
72#include <kern/misc_protos.h>
73#include <kern/zalloc.h>
74#include <kern/kalloc.h>
75#include <kern/lock.h>
76#include <vm/vm_kern.h>
77#include <vm/vm_object.h>
78#include <vm/vm_map.h>
91447636 79#include <libkern/OSMalloc.h>
1c79356b
A
80
81#ifdef MACH_BSD
82zone_t kalloc_zone(vm_size_t);
83#endif
84
2d21ac55
A
85#define KALLOC_MAP_SIZE_MIN (16 * 1024 * 1024)
86#define KALLOC_MAP_SIZE_MAX (128 * 1024 * 1024)
1c79356b 87vm_map_t kalloc_map;
1c79356b
A
88vm_size_t kalloc_max;
89vm_size_t kalloc_max_prerounded;
0c530ab8 90vm_size_t kalloc_kernmap_size; /* size of kallocs that can come from kernel map */
1c79356b
A
91
92unsigned int kalloc_large_inuse;
93vm_size_t kalloc_large_total;
94vm_size_t kalloc_large_max;
0c530ab8 95vm_size_t kalloc_largest_allocated = 0;
1c79356b
A
96
97/*
98 * All allocations of size less than kalloc_max are rounded to the
99 * next highest power of 2. This allocator is built on top of
100 * the zone allocator. A zone is created for each potential size
101 * that we are willing to get in small blocks.
102 *
103 * We assume that kalloc_max is not greater than 64K;
104 * thus 16 is a safe array size for k_zone and k_zone_name.
105 *
106 * Note that kalloc_max is somewhat confusingly named.
107 * It represents the first power of two for which no zone exists.
108 * kalloc_max_prerounded is the smallest allocation size, before
109 * rounding, for which no zone exists.
0c530ab8
A
110 * Also if the allocation size is more than kalloc_kernmap_size
111 * then allocate from kernel map rather than kalloc_map.
1c79356b
A
112 */
113
114int first_k_zone = -1;
115struct zone *k_zone[16];
91447636 116static const char *k_zone_name[16] = {
1c79356b
A
117 "kalloc.1", "kalloc.2",
118 "kalloc.4", "kalloc.8",
119 "kalloc.16", "kalloc.32",
120 "kalloc.64", "kalloc.128",
121 "kalloc.256", "kalloc.512",
122 "kalloc.1024", "kalloc.2048",
123 "kalloc.4096", "kalloc.8192",
124 "kalloc.16384", "kalloc.32768"
125};
126
127/*
128 * Max number of elements per zone. zinit rounds things up correctly
129 * Doing things this way permits each zone to have a different maximum size
130 * based on need, rather than just guessing; it also
131 * means its patchable in case you're wrong!
132 */
133unsigned long k_zone_max[16] = {
134 1024, /* 1 Byte */
135 1024, /* 2 Byte */
136 1024, /* 4 Byte */
137 1024, /* 8 Byte */
138 1024, /* 16 Byte */
139 4096, /* 32 Byte */
140 4096, /* 64 Byte */
141 4096, /* 128 Byte */
142 4096, /* 256 Byte */
143 1024, /* 512 Byte */
144 1024, /* 1024 Byte */
145 1024, /* 2048 Byte */
146 1024, /* 4096 Byte */
147 4096, /* 8192 Byte */
148 64, /* 16384 Byte */
149 64, /* 32768 Byte */
150};
151
91447636
A
152/* forward declarations */
153void * kalloc_canblock(
154 vm_size_t size,
155 boolean_t canblock);
156
157
158/* OSMalloc local data declarations */
159static
160queue_head_t OSMalloc_tag_list;
161
162decl_simple_lock_data(static,OSMalloc_tag_lock)
163
164/* OSMalloc forward declarations */
165void OSMalloc_init(void);
166void OSMalloc_Tagref(OSMallocTag tag);
167void OSMalloc_Tagrele(OSMallocTag tag);
168
1c79356b
A
169/*
170 * Initialize the memory allocator. This should be called only
171 * once on a system wide basis (i.e. first processor to get here
172 * does the initialization).
173 *
174 * This initializes all of the zones.
175 */
176
177void
178kalloc_init(
179 void)
180{
181 kern_return_t retval;
182 vm_offset_t min;
2d21ac55 183 vm_size_t size, kalloc_map_size;
1c79356b
A
184 register int i;
185
2d21ac55
A
186 /*
187 * Scale the kalloc_map_size to physical memory size: stay below
188 * 1/8th the total zone map size, or 128 MB.
189 */
190 kalloc_map_size = sane_size >> 5;
191 if (kalloc_map_size > KALLOC_MAP_SIZE_MAX)
192 kalloc_map_size = KALLOC_MAP_SIZE_MAX;
193 if (kalloc_map_size < KALLOC_MAP_SIZE_MIN)
194 kalloc_map_size = KALLOC_MAP_SIZE_MIN;
195
1c79356b 196 retval = kmem_suballoc(kernel_map, &min, kalloc_map_size,
91447636
A
197 FALSE, VM_FLAGS_ANYWHERE, &kalloc_map);
198
1c79356b
A
199 if (retval != KERN_SUCCESS)
200 panic("kalloc_init: kmem_suballoc failed");
201
202 /*
203 * Ensure that zones up to size 8192 bytes exist.
204 * This is desirable because messages are allocated
205 * with kalloc, and messages up through size 8192 are common.
206 */
207
208 if (PAGE_SIZE < 16*1024)
209 kalloc_max = 16*1024;
210 else
211 kalloc_max = PAGE_SIZE;
212 kalloc_max_prerounded = kalloc_max / 2 + 1;
0c530ab8
A
213 /* size it to be more than 16 times kalloc_max (256k) for allocations from kernel map */
214 kalloc_kernmap_size = (kalloc_max * 16) + 1;
1c79356b
A
215
216 /*
217 * Allocate a zone for each size we are going to handle.
218 * We specify non-paged memory.
219 */
220 for (i = 0, size = 1; size < kalloc_max; i++, size <<= 1) {
221 if (size < KALLOC_MINSIZE) {
2d21ac55 222 k_zone[i] = NULL;
1c79356b
A
223 continue;
224 }
225 if (size == KALLOC_MINSIZE) {
226 first_k_zone = i;
227 }
228 k_zone[i] = zinit(size, k_zone_max[i] * size, size,
229 k_zone_name[i]);
230 }
91447636 231 OSMalloc_init();
1c79356b
A
232}
233
91447636 234void *
1c79356b
A
235kalloc_canblock(
236 vm_size_t size,
237 boolean_t canblock)
238{
239 register int zindex;
240 register vm_size_t allocsize;
0c530ab8 241 vm_map_t alloc_map = VM_MAP_NULL;
1c79356b
A
242
243 /*
244 * If size is too large for a zone, then use kmem_alloc.
245 * (We use kmem_alloc instead of kmem_alloc_wired so that
246 * krealloc can use kmem_realloc.)
247 */
248
249 if (size >= kalloc_max_prerounded) {
91447636 250 void *addr;
1c79356b
A
251
252 /* kmem_alloc could block so we return if noblock */
253 if (!canblock) {
2d21ac55 254 return(NULL);
1c79356b 255 }
0c530ab8 256
2d21ac55
A
257 if (size >= kalloc_kernmap_size) {
258 alloc_map = kernel_map;
0c530ab8
A
259
260 if (size > kalloc_largest_allocated)
261 kalloc_largest_allocated = size;
262 } else
263 alloc_map = kalloc_map;
264
265 if (kmem_alloc(alloc_map, (vm_offset_t *)&addr, size) != KERN_SUCCESS)
2d21ac55 266 addr = NULL;
1c79356b
A
267
268 if (addr) {
269 kalloc_large_inuse++;
270 kalloc_large_total += size;
271
272 if (kalloc_large_total > kalloc_large_max)
273 kalloc_large_max = kalloc_large_total;
274 }
275 return(addr);
276 }
277
278 /* compute the size of the block that we will actually allocate */
279
280 allocsize = KALLOC_MINSIZE;
281 zindex = first_k_zone;
282 while (allocsize < size) {
283 allocsize <<= 1;
284 zindex++;
285 }
286
287 /* allocate from the appropriate zone */
1c79356b
A
288 assert(allocsize < kalloc_max);
289 return(zalloc_canblock(k_zone[zindex], canblock));
290}
291
91447636 292void *
1c79356b
A
293kalloc(
294 vm_size_t size)
295{
91447636 296 return( kalloc_canblock(size, TRUE) );
1c79356b
A
297}
298
91447636 299void *
1c79356b
A
300kalloc_noblock(
301 vm_size_t size)
302{
91447636 303 return( kalloc_canblock(size, FALSE) );
1c79356b
A
304}
305
306
307void
308krealloc(
91447636 309 void **addrp,
1c79356b
A
310 vm_size_t old_size,
311 vm_size_t new_size,
312 simple_lock_t lock)
313{
314 register int zindex;
315 register vm_size_t allocsize;
91447636 316 void *naddr;
0c530ab8 317 vm_map_t alloc_map = VM_MAP_NULL;
1c79356b
A
318
319 /* can only be used for increasing allocation size */
320
321 assert(new_size > old_size);
322
323 /* if old_size is zero, then we are simply allocating */
324
325 if (old_size == 0) {
326 simple_unlock(lock);
327 naddr = kalloc(new_size);
328 simple_lock(lock);
329 *addrp = naddr;
330 return;
331 }
332
333 /* if old block was kmem_alloc'd, then use kmem_realloc if necessary */
334
335 if (old_size >= kalloc_max_prerounded) {
0c530ab8
A
336 if (old_size >= kalloc_kernmap_size)
337 alloc_map = kernel_map;
338 else
339 alloc_map = kalloc_map;
340
91447636
A
341 old_size = round_page(old_size);
342 new_size = round_page(new_size);
1c79356b
A
343 if (new_size > old_size) {
344
0c530ab8 345 if (KERN_SUCCESS != kmem_realloc(alloc_map,
91447636 346 (vm_offset_t)*addrp, old_size,
2d21ac55 347 (vm_offset_t *)&naddr, new_size))
1c79356b 348 panic("krealloc: kmem_realloc");
1c79356b
A
349
350 simple_lock(lock);
91447636 351 *addrp = (void *) naddr;
1c79356b
A
352
353 /* kmem_realloc() doesn't free old page range. */
0c530ab8 354 kmem_free(alloc_map, (vm_offset_t)*addrp, old_size);
1c79356b
A
355
356 kalloc_large_total += (new_size - old_size);
357
358 if (kalloc_large_total > kalloc_large_max)
91447636
A
359 kalloc_large_max = kalloc_large_total;
360
1c79356b
A
361 }
362 return;
363 }
364
365 /* compute the size of the block that we actually allocated */
366
367 allocsize = KALLOC_MINSIZE;
368 zindex = first_k_zone;
369 while (allocsize < old_size) {
370 allocsize <<= 1;
371 zindex++;
372 }
373
374 /* if new size fits in old block, then return */
375
376 if (new_size <= allocsize) {
377 return;
378 }
379
380 /* if new size does not fit in zone, kmem_alloc it, else zalloc it */
381
382 simple_unlock(lock);
383 if (new_size >= kalloc_max_prerounded) {
0c530ab8
A
384 if (new_size >= kalloc_kernmap_size)
385 alloc_map = kernel_map;
386 else
387 alloc_map = kalloc_map;
388 if (KERN_SUCCESS != kmem_alloc(alloc_map,
91447636 389 (vm_offset_t *)&naddr, new_size)) {
1c79356b
A
390 panic("krealloc: kmem_alloc");
391 simple_lock(lock);
91447636 392 *addrp = NULL;
1c79356b
A
393 return;
394 }
395 kalloc_large_inuse++;
396 kalloc_large_total += new_size;
397
398 if (kalloc_large_total > kalloc_large_max)
399 kalloc_large_max = kalloc_large_total;
400 } else {
401 register int new_zindex;
402
403 allocsize <<= 1;
404 new_zindex = zindex + 1;
405 while (allocsize < new_size) {
406 allocsize <<= 1;
407 new_zindex++;
408 }
409 naddr = zalloc(k_zone[new_zindex]);
410 }
411 simple_lock(lock);
412
413 /* copy existing data */
414
415 bcopy((const char *)*addrp, (char *)naddr, old_size);
416
417 /* free old block, and return */
418
419 zfree(k_zone[zindex], *addrp);
420
421 /* set up new address */
422
91447636 423 *addrp = (void *) naddr;
1c79356b
A
424}
425
426
91447636 427void *
1c79356b
A
428kget(
429 vm_size_t size)
430{
431 register int zindex;
432 register vm_size_t allocsize;
433
434 /* size must not be too large for a zone */
435
436 if (size >= kalloc_max_prerounded) {
437 /* This will never work, so we might as well panic */
438 panic("kget");
439 }
440
441 /* compute the size of the block that we will actually allocate */
442
443 allocsize = KALLOC_MINSIZE;
444 zindex = first_k_zone;
445 while (allocsize < size) {
446 allocsize <<= 1;
447 zindex++;
448 }
449
450 /* allocate from the appropriate zone */
451
452 assert(allocsize < kalloc_max);
453 return(zget(k_zone[zindex]));
454}
455
456void
457kfree(
91447636 458 void *data,
1c79356b
A
459 vm_size_t size)
460{
461 register int zindex;
462 register vm_size_t freesize;
0c530ab8 463 vm_map_t alloc_map = VM_MAP_NULL;
1c79356b
A
464
465 /* if size was too large for a zone, then use kmem_free */
466
467 if (size >= kalloc_max_prerounded) {
0c530ab8
A
468 if (size >= kalloc_kernmap_size) {
469 alloc_map = kernel_map;
470
471 if (size > kalloc_largest_allocated)
472 /*
473 * work around double FREEs of small MALLOCs
2d21ac55 474 * this use to end up being a nop
0c530ab8
A
475 * since the pointer being freed from an
476 * alloc backed by the zalloc world could
477 * never show up in the kalloc_map... however,
478 * the kernel_map is a different issue... since it
479 * was released back into the zalloc pool, a pointer
480 * would have gotten written over the 'size' that
481 * the MALLOC was retaining in the first 4 bytes of
482 * the underlying allocation... that pointer ends up
483 * looking like a really big size on the 2nd FREE and
484 * pushes the kfree into the kernel_map... we
485 * end up removing a ton of virutal space before we panic
486 * this check causes us to ignore the kfree for a size
487 * that must be 'bogus'... note that it might not be due
488 * to the above scenario, but it would still be wrong and
489 * cause serious damage.
490 */
491 return;
492 } else
493 alloc_map = kalloc_map;
494 kmem_free(alloc_map, (vm_offset_t)data, size);
1c79356b
A
495
496 kalloc_large_total -= size;
497 kalloc_large_inuse--;
498
499 return;
500 }
501
502 /* compute the size of the block that we actually allocated from */
503
504 freesize = KALLOC_MINSIZE;
505 zindex = first_k_zone;
506 while (freesize < size) {
507 freesize <<= 1;
508 zindex++;
509 }
510
511 /* free to the appropriate zone */
512
513 assert(freesize < kalloc_max);
514 zfree(k_zone[zindex], data);
515}
516
517#ifdef MACH_BSD
518zone_t
519kalloc_zone(
520 vm_size_t size)
521{
522 register int zindex = 0;
523 register vm_size_t allocsize;
524
525 /* compute the size of the block that we will actually allocate */
526
527 allocsize = size;
528 if (size <= kalloc_max) {
529 allocsize = KALLOC_MINSIZE;
530 zindex = first_k_zone;
531 while (allocsize < size) {
532 allocsize <<= 1;
533 zindex++;
534 }
535 return (k_zone[zindex]);
536 }
537 return (ZONE_NULL);
538}
539#endif
540
541
91447636 542void
1c79356b
A
543kalloc_fake_zone_info(int *count, vm_size_t *cur_size, vm_size_t *max_size, vm_size_t *elem_size,
544 vm_size_t *alloc_size, int *collectable, int *exhaustable)
545{
91447636 546 *count = kalloc_large_inuse;
1c79356b
A
547 *cur_size = kalloc_large_total;
548 *max_size = kalloc_large_max;
549 *elem_size = kalloc_large_total / kalloc_large_inuse;
550 *alloc_size = kalloc_large_total / kalloc_large_inuse;
551 *collectable = 0;
552 *exhaustable = 0;
553}
554
91447636
A
555
556void
557OSMalloc_init(
558 void)
559{
560 queue_init(&OSMalloc_tag_list);
561 simple_lock_init(&OSMalloc_tag_lock, 0);
562}
563
564OSMallocTag
565OSMalloc_Tagalloc(
566 const char *str,
567 uint32_t flags)
568{
569 OSMallocTag OSMTag;
570
571 OSMTag = (OSMallocTag)kalloc(sizeof(*OSMTag));
572
573 bzero((void *)OSMTag, sizeof(*OSMTag));
574
575 if (flags & OSMT_PAGEABLE)
576 OSMTag->OSMT_attr = OSMT_ATTR_PAGEABLE;
577
578 OSMTag->OSMT_refcnt = 1;
579
580 strncpy(OSMTag->OSMT_name, str, OSMT_MAX_NAME);
581
582 simple_lock(&OSMalloc_tag_lock);
583 enqueue_tail(&OSMalloc_tag_list, (queue_entry_t)OSMTag);
584 simple_unlock(&OSMalloc_tag_lock);
585 OSMTag->OSMT_state = OSMT_VALID;
586 return(OSMTag);
587}
588
589void
590OSMalloc_Tagref(
591 OSMallocTag tag)
592{
593 if (!((tag->OSMT_state & OSMT_VALID_MASK) == OSMT_VALID))
594 panic("OSMalloc_Tagref(): bad state 0x%08X\n",tag->OSMT_state);
595
2d21ac55 596 (void)hw_atomic_add(&tag->OSMT_refcnt, 1);
91447636
A
597}
598
599void
600OSMalloc_Tagrele(
601 OSMallocTag tag)
602{
603 if (!((tag->OSMT_state & OSMT_VALID_MASK) == OSMT_VALID))
604 panic("OSMalloc_Tagref(): bad state 0x%08X\n",tag->OSMT_state);
605
2d21ac55 606 if (hw_atomic_sub(&tag->OSMT_refcnt, 1) == 0) {
91447636
A
607 if (hw_compare_and_store(OSMT_VALID|OSMT_RELEASED, OSMT_VALID|OSMT_RELEASED, &tag->OSMT_state)) {
608 simple_lock(&OSMalloc_tag_lock);
609 (void)remque((queue_entry_t)tag);
610 simple_unlock(&OSMalloc_tag_lock);
611 kfree((void*)tag, sizeof(*tag));
612 } else
613 panic("OSMalloc_Tagrele(): refcnt 0\n");
614 }
615}
616
617void
618OSMalloc_Tagfree(
619 OSMallocTag tag)
620{
621 if (!hw_compare_and_store(OSMT_VALID, OSMT_VALID|OSMT_RELEASED, &tag->OSMT_state))
622 panic("OSMalloc_Tagfree(): bad state 0x%08X\n", tag->OSMT_state);
623
2d21ac55 624 if (hw_atomic_sub(&tag->OSMT_refcnt, 1) == 0) {
91447636
A
625 simple_lock(&OSMalloc_tag_lock);
626 (void)remque((queue_entry_t)tag);
627 simple_unlock(&OSMalloc_tag_lock);
628 kfree((void*)tag, sizeof(*tag));
629 }
630}
631
632void *
633OSMalloc(
634 uint32_t size,
635 OSMallocTag tag)
636{
637 void *addr=NULL;
638 kern_return_t kr;
639
640 OSMalloc_Tagref(tag);
641 if ((tag->OSMT_attr & OSMT_PAGEABLE)
642 && (size & ~PAGE_MASK)) {
643
644 if ((kr = kmem_alloc_pageable(kernel_map, (vm_offset_t *)&addr, size)) != KERN_SUCCESS)
2d21ac55 645 addr = NULL;
91447636
A
646 } else
647 addr = kalloc((vm_size_t)size);
648
2d21ac55
A
649 if (!addr)
650 OSMalloc_Tagrele(tag);
651
91447636
A
652 return(addr);
653}
654
655void *
656OSMalloc_nowait(
657 uint32_t size,
658 OSMallocTag tag)
659{
660 void *addr=NULL;
661
662 if (tag->OSMT_attr & OSMT_PAGEABLE)
663 return(NULL);
664
665 OSMalloc_Tagref(tag);
666 /* XXX: use non-blocking kalloc for now */
667 addr = kalloc_noblock((vm_size_t)size);
668 if (addr == NULL)
669 OSMalloc_Tagrele(tag);
670
671 return(addr);
672}
673
674void *
675OSMalloc_noblock(
676 uint32_t size,
677 OSMallocTag tag)
678{
679 void *addr=NULL;
680
681 if (tag->OSMT_attr & OSMT_PAGEABLE)
682 return(NULL);
683
684 OSMalloc_Tagref(tag);
685 addr = kalloc_noblock((vm_size_t)size);
686 if (addr == NULL)
687 OSMalloc_Tagrele(tag);
688
689 return(addr);
690}
691
692void
693OSFree(
694 void *addr,
695 uint32_t size,
696 OSMallocTag tag)
697{
698 if ((tag->OSMT_attr & OSMT_PAGEABLE)
699 && (size & ~PAGE_MASK)) {
700 kmem_free(kernel_map, (vm_offset_t)addr, size);
701 } else
702 kfree((void*)addr, size);
703
704 OSMalloc_Tagrele(tag);
705}