]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/kalloc.c
xnu-792.18.15.tar.gz
[apple/xnu.git] / osfmk / kern / kalloc.c
1 /*
2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 */
58 /*
59 * File: kern/kalloc.c
60 * Author: Avadis Tevanian, Jr.
61 * Date: 1985
62 *
63 * General kernel memory allocator. This allocator is designed
64 * to be used by the kernel to manage dynamic memory fast.
65 */
66
67 #include <zone_debug.h>
68
69 #include <mach/boolean.h>
70 #include <mach/machine/vm_types.h>
71 #include <mach/vm_param.h>
72 #include <kern/misc_protos.h>
73 #include <kern/zalloc.h>
74 #include <kern/kalloc.h>
75 #include <kern/lock.h>
76 #include <vm/vm_kern.h>
77 #include <vm/vm_object.h>
78 #include <vm/vm_map.h>
79 #include <libkern/OSMalloc.h>
80
81 #ifdef MACH_BSD
82 zone_t kalloc_zone(vm_size_t);
83 #endif
84
85 vm_map_t kalloc_map;
86 vm_size_t kalloc_map_size = 16 * 1024 * 1024;
87 vm_size_t kalloc_max;
88 vm_size_t kalloc_max_prerounded;
89 vm_size_t kalloc_kernmap_size; /* size of kallocs that can come from kernel map */
90
91 unsigned int kalloc_large_inuse;
92 vm_size_t kalloc_large_total;
93 vm_size_t kalloc_large_max;
94 vm_size_t kalloc_largest_allocated = 0;
95
96 /*
97 * All allocations of size less than kalloc_max are rounded to the
98 * next highest power of 2. This allocator is built on top of
99 * the zone allocator. A zone is created for each potential size
100 * that we are willing to get in small blocks.
101 *
102 * We assume that kalloc_max is not greater than 64K;
103 * thus 16 is a safe array size for k_zone and k_zone_name.
104 *
105 * Note that kalloc_max is somewhat confusingly named.
106 * It represents the first power of two for which no zone exists.
107 * kalloc_max_prerounded is the smallest allocation size, before
108 * rounding, for which no zone exists.
109 * Also if the allocation size is more than kalloc_kernmap_size
110 * then allocate from kernel map rather than kalloc_map.
111 */
112
113 int first_k_zone = -1;
114 struct zone *k_zone[16];
115 static const char *k_zone_name[16] = {
116 "kalloc.1", "kalloc.2",
117 "kalloc.4", "kalloc.8",
118 "kalloc.16", "kalloc.32",
119 "kalloc.64", "kalloc.128",
120 "kalloc.256", "kalloc.512",
121 "kalloc.1024", "kalloc.2048",
122 "kalloc.4096", "kalloc.8192",
123 "kalloc.16384", "kalloc.32768"
124 };
125
126 /*
127 * Max number of elements per zone. zinit rounds things up correctly
128 * Doing things this way permits each zone to have a different maximum size
129 * based on need, rather than just guessing; it also
130 * means its patchable in case you're wrong!
131 */
132 unsigned long k_zone_max[16] = {
133 1024, /* 1 Byte */
134 1024, /* 2 Byte */
135 1024, /* 4 Byte */
136 1024, /* 8 Byte */
137 1024, /* 16 Byte */
138 4096, /* 32 Byte */
139 4096, /* 64 Byte */
140 4096, /* 128 Byte */
141 4096, /* 256 Byte */
142 1024, /* 512 Byte */
143 1024, /* 1024 Byte */
144 1024, /* 2048 Byte */
145 1024, /* 4096 Byte */
146 4096, /* 8192 Byte */
147 64, /* 16384 Byte */
148 64, /* 32768 Byte */
149 };
150
151 /* forward declarations */
152 void * kalloc_canblock(
153 vm_size_t size,
154 boolean_t canblock);
155
156
157 /* OSMalloc local data declarations */
158 static
159 queue_head_t OSMalloc_tag_list;
160
161 decl_simple_lock_data(static,OSMalloc_tag_lock)
162
163 /* OSMalloc forward declarations */
164 void OSMalloc_init(void);
165 void OSMalloc_Tagref(OSMallocTag tag);
166 void OSMalloc_Tagrele(OSMallocTag tag);
167
168 /*
169 * Initialize the memory allocator. This should be called only
170 * once on a system wide basis (i.e. first processor to get here
171 * does the initialization).
172 *
173 * This initializes all of the zones.
174 */
175
176 void
177 kalloc_init(
178 void)
179 {
180 kern_return_t retval;
181 vm_offset_t min;
182 vm_size_t size;
183 register int i;
184
185 retval = kmem_suballoc(kernel_map, &min, kalloc_map_size,
186 FALSE, VM_FLAGS_ANYWHERE, &kalloc_map);
187
188 if (retval != KERN_SUCCESS)
189 panic("kalloc_init: kmem_suballoc failed");
190
191 /*
192 * Ensure that zones up to size 8192 bytes exist.
193 * This is desirable because messages are allocated
194 * with kalloc, and messages up through size 8192 are common.
195 */
196
197 if (PAGE_SIZE < 16*1024)
198 kalloc_max = 16*1024;
199 else
200 kalloc_max = PAGE_SIZE;
201 kalloc_max_prerounded = kalloc_max / 2 + 1;
202 /* size it to be more than 16 times kalloc_max (256k) for allocations from kernel map */
203 kalloc_kernmap_size = (kalloc_max * 16) + 1;
204
205 /*
206 * Allocate a zone for each size we are going to handle.
207 * We specify non-paged memory.
208 */
209 for (i = 0, size = 1; size < kalloc_max; i++, size <<= 1) {
210 if (size < KALLOC_MINSIZE) {
211 k_zone[i] = 0;
212 continue;
213 }
214 if (size == KALLOC_MINSIZE) {
215 first_k_zone = i;
216 }
217 k_zone[i] = zinit(size, k_zone_max[i] * size, size,
218 k_zone_name[i]);
219 }
220 OSMalloc_init();
221 }
222
223 void *
224 kalloc_canblock(
225 vm_size_t size,
226 boolean_t canblock)
227 {
228 register int zindex;
229 register vm_size_t allocsize;
230 vm_map_t alloc_map = VM_MAP_NULL;
231
232 /*
233 * If size is too large for a zone, then use kmem_alloc.
234 * (We use kmem_alloc instead of kmem_alloc_wired so that
235 * krealloc can use kmem_realloc.)
236 */
237
238 if (size >= kalloc_max_prerounded) {
239 void *addr;
240
241 /* kmem_alloc could block so we return if noblock */
242 if (!canblock) {
243 return(0);
244 }
245
246 if (size >= kalloc_kernmap_size) {
247 alloc_map = kernel_map;
248
249 if (size > kalloc_largest_allocated)
250 kalloc_largest_allocated = size;
251 } else
252 alloc_map = kalloc_map;
253
254 if (kmem_alloc(alloc_map, (vm_offset_t *)&addr, size) != KERN_SUCCESS)
255 addr = 0;
256
257 if (addr) {
258 kalloc_large_inuse++;
259 kalloc_large_total += size;
260
261 if (kalloc_large_total > kalloc_large_max)
262 kalloc_large_max = kalloc_large_total;
263 }
264 return(addr);
265 }
266
267 /* compute the size of the block that we will actually allocate */
268
269 allocsize = KALLOC_MINSIZE;
270 zindex = first_k_zone;
271 while (allocsize < size) {
272 allocsize <<= 1;
273 zindex++;
274 }
275
276 /* allocate from the appropriate zone */
277 assert(allocsize < kalloc_max);
278 return(zalloc_canblock(k_zone[zindex], canblock));
279 }
280
281 void *
282 kalloc(
283 vm_size_t size)
284 {
285 return( kalloc_canblock(size, TRUE) );
286 }
287
288 void *
289 kalloc_noblock(
290 vm_size_t size)
291 {
292 return( kalloc_canblock(size, FALSE) );
293 }
294
295
296 void
297 krealloc(
298 void **addrp,
299 vm_size_t old_size,
300 vm_size_t new_size,
301 simple_lock_t lock)
302 {
303 register int zindex;
304 register vm_size_t allocsize;
305 void *naddr;
306 vm_map_t alloc_map = VM_MAP_NULL;
307
308 /* can only be used for increasing allocation size */
309
310 assert(new_size > old_size);
311
312 /* if old_size is zero, then we are simply allocating */
313
314 if (old_size == 0) {
315 simple_unlock(lock);
316 naddr = kalloc(new_size);
317 simple_lock(lock);
318 *addrp = naddr;
319 return;
320 }
321
322 /* if old block was kmem_alloc'd, then use kmem_realloc if necessary */
323
324 if (old_size >= kalloc_max_prerounded) {
325 if (old_size >= kalloc_kernmap_size)
326 alloc_map = kernel_map;
327 else
328 alloc_map = kalloc_map;
329
330 old_size = round_page(old_size);
331 new_size = round_page(new_size);
332 if (new_size > old_size) {
333
334 if (KERN_SUCCESS != kmem_realloc(alloc_map,
335 (vm_offset_t)*addrp, old_size,
336 (vm_offset_t *)&naddr, new_size)) {
337 panic("krealloc: kmem_realloc");
338 naddr = 0;
339 }
340
341 simple_lock(lock);
342 *addrp = (void *) naddr;
343
344 /* kmem_realloc() doesn't free old page range. */
345 kmem_free(alloc_map, (vm_offset_t)*addrp, old_size);
346
347 kalloc_large_total += (new_size - old_size);
348
349 if (kalloc_large_total > kalloc_large_max)
350 kalloc_large_max = kalloc_large_total;
351
352 }
353 return;
354 }
355
356 /* compute the size of the block that we actually allocated */
357
358 allocsize = KALLOC_MINSIZE;
359 zindex = first_k_zone;
360 while (allocsize < old_size) {
361 allocsize <<= 1;
362 zindex++;
363 }
364
365 /* if new size fits in old block, then return */
366
367 if (new_size <= allocsize) {
368 return;
369 }
370
371 /* if new size does not fit in zone, kmem_alloc it, else zalloc it */
372
373 simple_unlock(lock);
374 if (new_size >= kalloc_max_prerounded) {
375 if (new_size >= kalloc_kernmap_size)
376 alloc_map = kernel_map;
377 else
378 alloc_map = kalloc_map;
379 if (KERN_SUCCESS != kmem_alloc(alloc_map,
380 (vm_offset_t *)&naddr, new_size)) {
381 panic("krealloc: kmem_alloc");
382 simple_lock(lock);
383 *addrp = NULL;
384 return;
385 }
386 kalloc_large_inuse++;
387 kalloc_large_total += new_size;
388
389 if (kalloc_large_total > kalloc_large_max)
390 kalloc_large_max = kalloc_large_total;
391 } else {
392 register int new_zindex;
393
394 allocsize <<= 1;
395 new_zindex = zindex + 1;
396 while (allocsize < new_size) {
397 allocsize <<= 1;
398 new_zindex++;
399 }
400 naddr = zalloc(k_zone[new_zindex]);
401 }
402 simple_lock(lock);
403
404 /* copy existing data */
405
406 bcopy((const char *)*addrp, (char *)naddr, old_size);
407
408 /* free old block, and return */
409
410 zfree(k_zone[zindex], *addrp);
411
412 /* set up new address */
413
414 *addrp = (void *) naddr;
415 }
416
417
418 void *
419 kget(
420 vm_size_t size)
421 {
422 register int zindex;
423 register vm_size_t allocsize;
424
425 /* size must not be too large for a zone */
426
427 if (size >= kalloc_max_prerounded) {
428 /* This will never work, so we might as well panic */
429 panic("kget");
430 }
431
432 /* compute the size of the block that we will actually allocate */
433
434 allocsize = KALLOC_MINSIZE;
435 zindex = first_k_zone;
436 while (allocsize < size) {
437 allocsize <<= 1;
438 zindex++;
439 }
440
441 /* allocate from the appropriate zone */
442
443 assert(allocsize < kalloc_max);
444 return(zget(k_zone[zindex]));
445 }
446
447 void
448 kfree(
449 void *data,
450 vm_size_t size)
451 {
452 register int zindex;
453 register vm_size_t freesize;
454 vm_map_t alloc_map = VM_MAP_NULL;
455
456 /* if size was too large for a zone, then use kmem_free */
457
458 if (size >= kalloc_max_prerounded) {
459 if (size >= kalloc_kernmap_size) {
460 alloc_map = kernel_map;
461
462 if (size > kalloc_largest_allocated)
463 /*
464 * work around double FREEs of small MALLOCs
465 * this used to end up being a nop
466 * since the pointer being freed from an
467 * alloc backed by the zalloc world could
468 * never show up in the kalloc_map... however,
469 * the kernel_map is a different issue... since it
470 * was released back into the zalloc pool, a pointer
471 * would have gotten written over the 'size' that
472 * the MALLOC was retaining in the first 4 bytes of
473 * the underlying allocation... that pointer ends up
474 * looking like a really big size on the 2nd FREE and
475 * pushes the kfree into the kernel_map... we
476 * end up removing a ton of virutal space before we panic
477 * this check causes us to ignore the kfree for a size
478 * that must be 'bogus'... note that it might not be due
479 * to the above scenario, but it would still be wrong and
480 * cause serious damage.
481 */
482 return;
483 } else
484 alloc_map = kalloc_map;
485 kmem_free(alloc_map, (vm_offset_t)data, size);
486
487 kalloc_large_total -= size;
488 kalloc_large_inuse--;
489
490 return;
491 }
492
493 /* compute the size of the block that we actually allocated from */
494
495 freesize = KALLOC_MINSIZE;
496 zindex = first_k_zone;
497 while (freesize < size) {
498 freesize <<= 1;
499 zindex++;
500 }
501
502 /* free to the appropriate zone */
503
504 assert(freesize < kalloc_max);
505 zfree(k_zone[zindex], data);
506 }
507
508 #ifdef MACH_BSD
509 zone_t
510 kalloc_zone(
511 vm_size_t size)
512 {
513 register int zindex = 0;
514 register vm_size_t allocsize;
515
516 /* compute the size of the block that we will actually allocate */
517
518 allocsize = size;
519 if (size <= kalloc_max) {
520 allocsize = KALLOC_MINSIZE;
521 zindex = first_k_zone;
522 while (allocsize < size) {
523 allocsize <<= 1;
524 zindex++;
525 }
526 return (k_zone[zindex]);
527 }
528 return (ZONE_NULL);
529 }
530 #endif
531
532
533 void
534 kalloc_fake_zone_info(int *count, vm_size_t *cur_size, vm_size_t *max_size, vm_size_t *elem_size,
535 vm_size_t *alloc_size, int *collectable, int *exhaustable)
536 {
537 *count = kalloc_large_inuse;
538 *cur_size = kalloc_large_total;
539 *max_size = kalloc_large_max;
540 *elem_size = kalloc_large_total / kalloc_large_inuse;
541 *alloc_size = kalloc_large_total / kalloc_large_inuse;
542 *collectable = 0;
543 *exhaustable = 0;
544 }
545
546
547 void
548 OSMalloc_init(
549 void)
550 {
551 queue_init(&OSMalloc_tag_list);
552 simple_lock_init(&OSMalloc_tag_lock, 0);
553 }
554
555 OSMallocTag
556 OSMalloc_Tagalloc(
557 const char *str,
558 uint32_t flags)
559 {
560 OSMallocTag OSMTag;
561
562 OSMTag = (OSMallocTag)kalloc(sizeof(*OSMTag));
563
564 bzero((void *)OSMTag, sizeof(*OSMTag));
565
566 if (flags & OSMT_PAGEABLE)
567 OSMTag->OSMT_attr = OSMT_ATTR_PAGEABLE;
568
569 OSMTag->OSMT_refcnt = 1;
570
571 strncpy(OSMTag->OSMT_name, str, OSMT_MAX_NAME);
572
573 simple_lock(&OSMalloc_tag_lock);
574 enqueue_tail(&OSMalloc_tag_list, (queue_entry_t)OSMTag);
575 simple_unlock(&OSMalloc_tag_lock);
576 OSMTag->OSMT_state = OSMT_VALID;
577 return(OSMTag);
578 }
579
580 void
581 OSMalloc_Tagref(
582 OSMallocTag tag)
583 {
584 if (!((tag->OSMT_state & OSMT_VALID_MASK) == OSMT_VALID))
585 panic("OSMalloc_Tagref(): bad state 0x%08X\n",tag->OSMT_state);
586
587 (void)hw_atomic_add((uint32_t *)(&tag->OSMT_refcnt), 1);
588 }
589
590 void
591 OSMalloc_Tagrele(
592 OSMallocTag tag)
593 {
594 if (!((tag->OSMT_state & OSMT_VALID_MASK) == OSMT_VALID))
595 panic("OSMalloc_Tagref(): bad state 0x%08X\n",tag->OSMT_state);
596
597 if (hw_atomic_sub((uint32_t *)(&tag->OSMT_refcnt), 1) == 0) {
598 if (hw_compare_and_store(OSMT_VALID|OSMT_RELEASED, OSMT_VALID|OSMT_RELEASED, &tag->OSMT_state)) {
599 simple_lock(&OSMalloc_tag_lock);
600 (void)remque((queue_entry_t)tag);
601 simple_unlock(&OSMalloc_tag_lock);
602 kfree((void*)tag, sizeof(*tag));
603 } else
604 panic("OSMalloc_Tagrele(): refcnt 0\n");
605 }
606 }
607
608 void
609 OSMalloc_Tagfree(
610 OSMallocTag tag)
611 {
612 if (!hw_compare_and_store(OSMT_VALID, OSMT_VALID|OSMT_RELEASED, &tag->OSMT_state))
613 panic("OSMalloc_Tagfree(): bad state 0x%08X\n", tag->OSMT_state);
614
615 if (hw_atomic_sub((uint32_t *)(&tag->OSMT_refcnt), 1) == 0) {
616 simple_lock(&OSMalloc_tag_lock);
617 (void)remque((queue_entry_t)tag);
618 simple_unlock(&OSMalloc_tag_lock);
619 kfree((void*)tag, sizeof(*tag));
620 }
621 }
622
623 void *
624 OSMalloc(
625 uint32_t size,
626 OSMallocTag tag)
627 {
628 void *addr=NULL;
629 kern_return_t kr;
630
631 OSMalloc_Tagref(tag);
632 if ((tag->OSMT_attr & OSMT_PAGEABLE)
633 && (size & ~PAGE_MASK)) {
634
635 if ((kr = kmem_alloc_pageable(kernel_map, (vm_offset_t *)&addr, size)) != KERN_SUCCESS)
636 panic("OSMalloc(): kmem_alloc_pageable() failed 0x%08X\n", kr);
637 } else
638 addr = kalloc((vm_size_t)size);
639
640 return(addr);
641 }
642
643 void *
644 OSMalloc_nowait(
645 uint32_t size,
646 OSMallocTag tag)
647 {
648 void *addr=NULL;
649
650 if (tag->OSMT_attr & OSMT_PAGEABLE)
651 return(NULL);
652
653 OSMalloc_Tagref(tag);
654 /* XXX: use non-blocking kalloc for now */
655 addr = kalloc_noblock((vm_size_t)size);
656 if (addr == NULL)
657 OSMalloc_Tagrele(tag);
658
659 return(addr);
660 }
661
662 void *
663 OSMalloc_noblock(
664 uint32_t size,
665 OSMallocTag tag)
666 {
667 void *addr=NULL;
668
669 if (tag->OSMT_attr & OSMT_PAGEABLE)
670 return(NULL);
671
672 OSMalloc_Tagref(tag);
673 addr = kalloc_noblock((vm_size_t)size);
674 if (addr == NULL)
675 OSMalloc_Tagrele(tag);
676
677 return(addr);
678 }
679
680 void
681 OSFree(
682 void *addr,
683 uint32_t size,
684 OSMallocTag tag)
685 {
686 if ((tag->OSMT_attr & OSMT_PAGEABLE)
687 && (size & ~PAGE_MASK)) {
688 kmem_free(kernel_map, (vm_offset_t)addr, size);
689 } else
690 kfree((void*)addr, size);
691
692 OSMalloc_Tagrele(tag);
693 }