]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/kalloc.c
ce4a718dcf2e592f8905a48dbe16e0f9888a1e46
[apple/xnu.git] / osfmk / kern / kalloc.c
1 /*
2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_OSREFERENCE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the
10 * License may not be used to create, or enable the creation or
11 * redistribution of, unlawful or unlicensed copies of an Apple operating
12 * system, or to circumvent, violate, or enable the circumvention or
13 * violation of, any terms of an Apple operating system software license
14 * agreement.
15 *
16 * Please obtain a copy of the License at
17 * http://www.opensource.apple.com/apsl/ and read it before using this
18 * file.
19 *
20 * The Original Code and all software distributed under the License are
21 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
22 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
23 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
25 * Please see the License for the specific language governing rights and
26 * limitations under the License.
27 *
28 * @APPLE_LICENSE_OSREFERENCE_HEADER_END@
29 */
30 /*
31 * @OSF_COPYRIGHT@
32 */
33 /*
34 * Mach Operating System
35 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
36 * All Rights Reserved.
37 *
38 * Permission to use, copy, modify and distribute this software and its
39 * documentation is hereby granted, provided that both the copyright
40 * notice and this permission notice appear in all copies of the
41 * software, derivative works or modified versions, and any portions
42 * thereof, and that both notices appear in supporting documentation.
43 *
44 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
45 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
46 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
47 *
48 * Carnegie Mellon requests users of this software to return to
49 *
50 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
51 * School of Computer Science
52 * Carnegie Mellon University
53 * Pittsburgh PA 15213-3890
54 *
55 * any improvements or extensions that they make and grant Carnegie Mellon
56 * the rights to redistribute these changes.
57 */
58 /*
59 */
60 /*
61 * File: kern/kalloc.c
62 * Author: Avadis Tevanian, Jr.
63 * Date: 1985
64 *
65 * General kernel memory allocator. This allocator is designed
66 * to be used by the kernel to manage dynamic memory fast.
67 */
68
69 #include <zone_debug.h>
70
71 #include <mach/boolean.h>
72 #include <mach/machine/vm_types.h>
73 #include <mach/vm_param.h>
74 #include <kern/misc_protos.h>
75 #include <kern/zalloc.h>
76 #include <kern/kalloc.h>
77 #include <kern/lock.h>
78 #include <vm/vm_kern.h>
79 #include <vm/vm_object.h>
80 #include <vm/vm_map.h>
81 #include <libkern/OSMalloc.h>
82
83 #ifdef MACH_BSD
84 zone_t kalloc_zone(vm_size_t);
85 #endif
86
87 vm_map_t kalloc_map;
88 vm_size_t kalloc_map_size = 16 * 1024 * 1024;
89 vm_size_t kalloc_max;
90 vm_size_t kalloc_max_prerounded;
91 vm_size_t kalloc_kernmap_size; /* size of kallocs that can come from kernel map */
92
93 unsigned int kalloc_large_inuse;
94 vm_size_t kalloc_large_total;
95 vm_size_t kalloc_large_max;
96 vm_size_t kalloc_largest_allocated = 0;
97
98 /*
99 * All allocations of size less than kalloc_max are rounded to the
100 * next highest power of 2. This allocator is built on top of
101 * the zone allocator. A zone is created for each potential size
102 * that we are willing to get in small blocks.
103 *
104 * We assume that kalloc_max is not greater than 64K;
105 * thus 16 is a safe array size for k_zone and k_zone_name.
106 *
107 * Note that kalloc_max is somewhat confusingly named.
108 * It represents the first power of two for which no zone exists.
109 * kalloc_max_prerounded is the smallest allocation size, before
110 * rounding, for which no zone exists.
111 * Also if the allocation size is more than kalloc_kernmap_size
112 * then allocate from kernel map rather than kalloc_map.
113 */
114
115 int first_k_zone = -1;
116 struct zone *k_zone[16];
117 static const char *k_zone_name[16] = {
118 "kalloc.1", "kalloc.2",
119 "kalloc.4", "kalloc.8",
120 "kalloc.16", "kalloc.32",
121 "kalloc.64", "kalloc.128",
122 "kalloc.256", "kalloc.512",
123 "kalloc.1024", "kalloc.2048",
124 "kalloc.4096", "kalloc.8192",
125 "kalloc.16384", "kalloc.32768"
126 };
127
128 /*
129 * Max number of elements per zone. zinit rounds things up correctly
130 * Doing things this way permits each zone to have a different maximum size
131 * based on need, rather than just guessing; it also
132 * means its patchable in case you're wrong!
133 */
134 unsigned long k_zone_max[16] = {
135 1024, /* 1 Byte */
136 1024, /* 2 Byte */
137 1024, /* 4 Byte */
138 1024, /* 8 Byte */
139 1024, /* 16 Byte */
140 4096, /* 32 Byte */
141 4096, /* 64 Byte */
142 4096, /* 128 Byte */
143 4096, /* 256 Byte */
144 1024, /* 512 Byte */
145 1024, /* 1024 Byte */
146 1024, /* 2048 Byte */
147 1024, /* 4096 Byte */
148 4096, /* 8192 Byte */
149 64, /* 16384 Byte */
150 64, /* 32768 Byte */
151 };
152
153 /* forward declarations */
154 void * kalloc_canblock(
155 vm_size_t size,
156 boolean_t canblock);
157
158
159 /* OSMalloc local data declarations */
160 static
161 queue_head_t OSMalloc_tag_list;
162
163 decl_simple_lock_data(static,OSMalloc_tag_lock)
164
165 /* OSMalloc forward declarations */
166 void OSMalloc_init(void);
167 void OSMalloc_Tagref(OSMallocTag tag);
168 void OSMalloc_Tagrele(OSMallocTag tag);
169
170 /*
171 * Initialize the memory allocator. This should be called only
172 * once on a system wide basis (i.e. first processor to get here
173 * does the initialization).
174 *
175 * This initializes all of the zones.
176 */
177
178 void
179 kalloc_init(
180 void)
181 {
182 kern_return_t retval;
183 vm_offset_t min;
184 vm_size_t size;
185 register int i;
186
187 retval = kmem_suballoc(kernel_map, &min, kalloc_map_size,
188 FALSE, VM_FLAGS_ANYWHERE, &kalloc_map);
189
190 if (retval != KERN_SUCCESS)
191 panic("kalloc_init: kmem_suballoc failed");
192
193 /*
194 * Ensure that zones up to size 8192 bytes exist.
195 * This is desirable because messages are allocated
196 * with kalloc, and messages up through size 8192 are common.
197 */
198
199 if (PAGE_SIZE < 16*1024)
200 kalloc_max = 16*1024;
201 else
202 kalloc_max = PAGE_SIZE;
203 kalloc_max_prerounded = kalloc_max / 2 + 1;
204 /* size it to be more than 16 times kalloc_max (256k) for allocations from kernel map */
205 kalloc_kernmap_size = (kalloc_max * 16) + 1;
206
207 /*
208 * Allocate a zone for each size we are going to handle.
209 * We specify non-paged memory.
210 */
211 for (i = 0, size = 1; size < kalloc_max; i++, size <<= 1) {
212 if (size < KALLOC_MINSIZE) {
213 k_zone[i] = 0;
214 continue;
215 }
216 if (size == KALLOC_MINSIZE) {
217 first_k_zone = i;
218 }
219 k_zone[i] = zinit(size, k_zone_max[i] * size, size,
220 k_zone_name[i]);
221 }
222 OSMalloc_init();
223 }
224
225 void *
226 kalloc_canblock(
227 vm_size_t size,
228 boolean_t canblock)
229 {
230 register int zindex;
231 register vm_size_t allocsize;
232 vm_map_t alloc_map = VM_MAP_NULL;
233
234 /*
235 * If size is too large for a zone, then use kmem_alloc.
236 * (We use kmem_alloc instead of kmem_alloc_wired so that
237 * krealloc can use kmem_realloc.)
238 */
239
240 if (size >= kalloc_max_prerounded) {
241 void *addr;
242
243 /* kmem_alloc could block so we return if noblock */
244 if (!canblock) {
245 return(0);
246 }
247
248 if (size >= kalloc_kernmap_size) {
249 alloc_map = kernel_map;
250
251 if (size > kalloc_largest_allocated)
252 kalloc_largest_allocated = size;
253 } else
254 alloc_map = kalloc_map;
255
256 if (kmem_alloc(alloc_map, (vm_offset_t *)&addr, size) != KERN_SUCCESS)
257 addr = 0;
258
259 if (addr) {
260 kalloc_large_inuse++;
261 kalloc_large_total += size;
262
263 if (kalloc_large_total > kalloc_large_max)
264 kalloc_large_max = kalloc_large_total;
265 }
266 return(addr);
267 }
268
269 /* compute the size of the block that we will actually allocate */
270
271 allocsize = KALLOC_MINSIZE;
272 zindex = first_k_zone;
273 while (allocsize < size) {
274 allocsize <<= 1;
275 zindex++;
276 }
277
278 /* allocate from the appropriate zone */
279 assert(allocsize < kalloc_max);
280 return(zalloc_canblock(k_zone[zindex], canblock));
281 }
282
283 void *
284 kalloc(
285 vm_size_t size)
286 {
287 return( kalloc_canblock(size, TRUE) );
288 }
289
290 void *
291 kalloc_noblock(
292 vm_size_t size)
293 {
294 return( kalloc_canblock(size, FALSE) );
295 }
296
297
298 void
299 krealloc(
300 void **addrp,
301 vm_size_t old_size,
302 vm_size_t new_size,
303 simple_lock_t lock)
304 {
305 register int zindex;
306 register vm_size_t allocsize;
307 void *naddr;
308 vm_map_t alloc_map = VM_MAP_NULL;
309
310 /* can only be used for increasing allocation size */
311
312 assert(new_size > old_size);
313
314 /* if old_size is zero, then we are simply allocating */
315
316 if (old_size == 0) {
317 simple_unlock(lock);
318 naddr = kalloc(new_size);
319 simple_lock(lock);
320 *addrp = naddr;
321 return;
322 }
323
324 /* if old block was kmem_alloc'd, then use kmem_realloc if necessary */
325
326 if (old_size >= kalloc_max_prerounded) {
327 if (old_size >= kalloc_kernmap_size)
328 alloc_map = kernel_map;
329 else
330 alloc_map = kalloc_map;
331
332 old_size = round_page(old_size);
333 new_size = round_page(new_size);
334 if (new_size > old_size) {
335
336 if (KERN_SUCCESS != kmem_realloc(alloc_map,
337 (vm_offset_t)*addrp, old_size,
338 (vm_offset_t *)&naddr, new_size)) {
339 panic("krealloc: kmem_realloc");
340 naddr = 0;
341 }
342
343 simple_lock(lock);
344 *addrp = (void *) naddr;
345
346 /* kmem_realloc() doesn't free old page range. */
347 kmem_free(alloc_map, (vm_offset_t)*addrp, old_size);
348
349 kalloc_large_total += (new_size - old_size);
350
351 if (kalloc_large_total > kalloc_large_max)
352 kalloc_large_max = kalloc_large_total;
353
354 }
355 return;
356 }
357
358 /* compute the size of the block that we actually allocated */
359
360 allocsize = KALLOC_MINSIZE;
361 zindex = first_k_zone;
362 while (allocsize < old_size) {
363 allocsize <<= 1;
364 zindex++;
365 }
366
367 /* if new size fits in old block, then return */
368
369 if (new_size <= allocsize) {
370 return;
371 }
372
373 /* if new size does not fit in zone, kmem_alloc it, else zalloc it */
374
375 simple_unlock(lock);
376 if (new_size >= kalloc_max_prerounded) {
377 if (new_size >= kalloc_kernmap_size)
378 alloc_map = kernel_map;
379 else
380 alloc_map = kalloc_map;
381 if (KERN_SUCCESS != kmem_alloc(alloc_map,
382 (vm_offset_t *)&naddr, new_size)) {
383 panic("krealloc: kmem_alloc");
384 simple_lock(lock);
385 *addrp = NULL;
386 return;
387 }
388 kalloc_large_inuse++;
389 kalloc_large_total += new_size;
390
391 if (kalloc_large_total > kalloc_large_max)
392 kalloc_large_max = kalloc_large_total;
393 } else {
394 register int new_zindex;
395
396 allocsize <<= 1;
397 new_zindex = zindex + 1;
398 while (allocsize < new_size) {
399 allocsize <<= 1;
400 new_zindex++;
401 }
402 naddr = zalloc(k_zone[new_zindex]);
403 }
404 simple_lock(lock);
405
406 /* copy existing data */
407
408 bcopy((const char *)*addrp, (char *)naddr, old_size);
409
410 /* free old block, and return */
411
412 zfree(k_zone[zindex], *addrp);
413
414 /* set up new address */
415
416 *addrp = (void *) naddr;
417 }
418
419
420 void *
421 kget(
422 vm_size_t size)
423 {
424 register int zindex;
425 register vm_size_t allocsize;
426
427 /* size must not be too large for a zone */
428
429 if (size >= kalloc_max_prerounded) {
430 /* This will never work, so we might as well panic */
431 panic("kget");
432 }
433
434 /* compute the size of the block that we will actually allocate */
435
436 allocsize = KALLOC_MINSIZE;
437 zindex = first_k_zone;
438 while (allocsize < size) {
439 allocsize <<= 1;
440 zindex++;
441 }
442
443 /* allocate from the appropriate zone */
444
445 assert(allocsize < kalloc_max);
446 return(zget(k_zone[zindex]));
447 }
448
449 void
450 kfree(
451 void *data,
452 vm_size_t size)
453 {
454 register int zindex;
455 register vm_size_t freesize;
456 vm_map_t alloc_map = VM_MAP_NULL;
457
458 /* if size was too large for a zone, then use kmem_free */
459
460 if (size >= kalloc_max_prerounded) {
461 if (size >= kalloc_kernmap_size) {
462 alloc_map = kernel_map;
463
464 if (size > kalloc_largest_allocated)
465 /*
466 * work around double FREEs of small MALLOCs
467 * this used to end up being a nop
468 * since the pointer being freed from an
469 * alloc backed by the zalloc world could
470 * never show up in the kalloc_map... however,
471 * the kernel_map is a different issue... since it
472 * was released back into the zalloc pool, a pointer
473 * would have gotten written over the 'size' that
474 * the MALLOC was retaining in the first 4 bytes of
475 * the underlying allocation... that pointer ends up
476 * looking like a really big size on the 2nd FREE and
477 * pushes the kfree into the kernel_map... we
478 * end up removing a ton of virutal space before we panic
479 * this check causes us to ignore the kfree for a size
480 * that must be 'bogus'... note that it might not be due
481 * to the above scenario, but it would still be wrong and
482 * cause serious damage.
483 */
484 return;
485 } else
486 alloc_map = kalloc_map;
487 kmem_free(alloc_map, (vm_offset_t)data, size);
488
489 kalloc_large_total -= size;
490 kalloc_large_inuse--;
491
492 return;
493 }
494
495 /* compute the size of the block that we actually allocated from */
496
497 freesize = KALLOC_MINSIZE;
498 zindex = first_k_zone;
499 while (freesize < size) {
500 freesize <<= 1;
501 zindex++;
502 }
503
504 /* free to the appropriate zone */
505
506 assert(freesize < kalloc_max);
507 zfree(k_zone[zindex], data);
508 }
509
510 #ifdef MACH_BSD
511 zone_t
512 kalloc_zone(
513 vm_size_t size)
514 {
515 register int zindex = 0;
516 register vm_size_t allocsize;
517
518 /* compute the size of the block that we will actually allocate */
519
520 allocsize = size;
521 if (size <= kalloc_max) {
522 allocsize = KALLOC_MINSIZE;
523 zindex = first_k_zone;
524 while (allocsize < size) {
525 allocsize <<= 1;
526 zindex++;
527 }
528 return (k_zone[zindex]);
529 }
530 return (ZONE_NULL);
531 }
532 #endif
533
534
535 void
536 kalloc_fake_zone_info(int *count, vm_size_t *cur_size, vm_size_t *max_size, vm_size_t *elem_size,
537 vm_size_t *alloc_size, int *collectable, int *exhaustable)
538 {
539 *count = kalloc_large_inuse;
540 *cur_size = kalloc_large_total;
541 *max_size = kalloc_large_max;
542 *elem_size = kalloc_large_total / kalloc_large_inuse;
543 *alloc_size = kalloc_large_total / kalloc_large_inuse;
544 *collectable = 0;
545 *exhaustable = 0;
546 }
547
548
549 void
550 OSMalloc_init(
551 void)
552 {
553 queue_init(&OSMalloc_tag_list);
554 simple_lock_init(&OSMalloc_tag_lock, 0);
555 }
556
557 OSMallocTag
558 OSMalloc_Tagalloc(
559 const char *str,
560 uint32_t flags)
561 {
562 OSMallocTag OSMTag;
563
564 OSMTag = (OSMallocTag)kalloc(sizeof(*OSMTag));
565
566 bzero((void *)OSMTag, sizeof(*OSMTag));
567
568 if (flags & OSMT_PAGEABLE)
569 OSMTag->OSMT_attr = OSMT_ATTR_PAGEABLE;
570
571 OSMTag->OSMT_refcnt = 1;
572
573 strncpy(OSMTag->OSMT_name, str, OSMT_MAX_NAME);
574
575 simple_lock(&OSMalloc_tag_lock);
576 enqueue_tail(&OSMalloc_tag_list, (queue_entry_t)OSMTag);
577 simple_unlock(&OSMalloc_tag_lock);
578 OSMTag->OSMT_state = OSMT_VALID;
579 return(OSMTag);
580 }
581
582 void
583 OSMalloc_Tagref(
584 OSMallocTag tag)
585 {
586 if (!((tag->OSMT_state & OSMT_VALID_MASK) == OSMT_VALID))
587 panic("OSMalloc_Tagref(): bad state 0x%08X\n",tag->OSMT_state);
588
589 (void)hw_atomic_add((uint32_t *)(&tag->OSMT_refcnt), 1);
590 }
591
592 void
593 OSMalloc_Tagrele(
594 OSMallocTag tag)
595 {
596 if (!((tag->OSMT_state & OSMT_VALID_MASK) == OSMT_VALID))
597 panic("OSMalloc_Tagref(): bad state 0x%08X\n",tag->OSMT_state);
598
599 if (hw_atomic_sub((uint32_t *)(&tag->OSMT_refcnt), 1) == 0) {
600 if (hw_compare_and_store(OSMT_VALID|OSMT_RELEASED, OSMT_VALID|OSMT_RELEASED, &tag->OSMT_state)) {
601 simple_lock(&OSMalloc_tag_lock);
602 (void)remque((queue_entry_t)tag);
603 simple_unlock(&OSMalloc_tag_lock);
604 kfree((void*)tag, sizeof(*tag));
605 } else
606 panic("OSMalloc_Tagrele(): refcnt 0\n");
607 }
608 }
609
610 void
611 OSMalloc_Tagfree(
612 OSMallocTag tag)
613 {
614 if (!hw_compare_and_store(OSMT_VALID, OSMT_VALID|OSMT_RELEASED, &tag->OSMT_state))
615 panic("OSMalloc_Tagfree(): bad state 0x%08X\n", tag->OSMT_state);
616
617 if (hw_atomic_sub((uint32_t *)(&tag->OSMT_refcnt), 1) == 0) {
618 simple_lock(&OSMalloc_tag_lock);
619 (void)remque((queue_entry_t)tag);
620 simple_unlock(&OSMalloc_tag_lock);
621 kfree((void*)tag, sizeof(*tag));
622 }
623 }
624
625 void *
626 OSMalloc(
627 uint32_t size,
628 OSMallocTag tag)
629 {
630 void *addr=NULL;
631 kern_return_t kr;
632
633 OSMalloc_Tagref(tag);
634 if ((tag->OSMT_attr & OSMT_PAGEABLE)
635 && (size & ~PAGE_MASK)) {
636
637 if ((kr = kmem_alloc_pageable(kernel_map, (vm_offset_t *)&addr, size)) != KERN_SUCCESS)
638 panic("OSMalloc(): kmem_alloc_pageable() failed 0x%08X\n", kr);
639 } else
640 addr = kalloc((vm_size_t)size);
641
642 return(addr);
643 }
644
645 void *
646 OSMalloc_nowait(
647 uint32_t size,
648 OSMallocTag tag)
649 {
650 void *addr=NULL;
651
652 if (tag->OSMT_attr & OSMT_PAGEABLE)
653 return(NULL);
654
655 OSMalloc_Tagref(tag);
656 /* XXX: use non-blocking kalloc for now */
657 addr = kalloc_noblock((vm_size_t)size);
658 if (addr == NULL)
659 OSMalloc_Tagrele(tag);
660
661 return(addr);
662 }
663
664 void *
665 OSMalloc_noblock(
666 uint32_t size,
667 OSMallocTag tag)
668 {
669 void *addr=NULL;
670
671 if (tag->OSMT_attr & OSMT_PAGEABLE)
672 return(NULL);
673
674 OSMalloc_Tagref(tag);
675 addr = kalloc_noblock((vm_size_t)size);
676 if (addr == NULL)
677 OSMalloc_Tagrele(tag);
678
679 return(addr);
680 }
681
682 void
683 OSFree(
684 void *addr,
685 uint32_t size,
686 OSMallocTag tag)
687 {
688 if ((tag->OSMT_attr & OSMT_PAGEABLE)
689 && (size & ~PAGE_MASK)) {
690 kmem_free(kernel_map, (vm_offset_t)addr, size);
691 } else
692 kfree((void*)addr, size);
693
694 OSMalloc_Tagrele(tag);
695 }