2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_OSREFERENCE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the
10 * License may not be used to create, or enable the creation or
11 * redistribution of, unlawful or unlicensed copies of an Apple operating
12 * system, or to circumvent, violate, or enable the circumvention or
13 * violation of, any terms of an Apple operating system software license
16 * Please obtain a copy of the License at
17 * http://www.opensource.apple.com/apsl/ and read it before using this
20 * The Original Code and all software distributed under the License are
21 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
22 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
23 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
25 * Please see the License for the specific language governing rights and
26 * limitations under the License.
28 * @APPLE_LICENSE_OSREFERENCE_HEADER_END@
34 * Mach Operating System
35 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
36 * All Rights Reserved.
38 * Permission to use, copy, modify and distribute this software and its
39 * documentation is hereby granted, provided that both the copyright
40 * notice and this permission notice appear in all copies of the
41 * software, derivative works or modified versions, and any portions
42 * thereof, and that both notices appear in supporting documentation.
44 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
45 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
46 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
48 * Carnegie Mellon requests users of this software to return to
50 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
51 * School of Computer Science
52 * Carnegie Mellon University
53 * Pittsburgh PA 15213-3890
55 * any improvements or extensions that they make and grant Carnegie Mellon
56 * the rights to redistribute these changes.
62 * Author: Avadis Tevanian, Jr.
65 * General kernel memory allocator. This allocator is designed
66 * to be used by the kernel to manage dynamic memory fast.
69 #include <zone_debug.h>
71 #include <mach/boolean.h>
72 #include <mach/machine/vm_types.h>
73 #include <mach/vm_param.h>
74 #include <kern/misc_protos.h>
75 #include <kern/zalloc.h>
76 #include <kern/kalloc.h>
77 #include <kern/lock.h>
78 #include <vm/vm_kern.h>
79 #include <vm/vm_object.h>
80 #include <vm/vm_map.h>
81 #include <libkern/OSMalloc.h>
84 zone_t
kalloc_zone(vm_size_t
);
88 vm_size_t kalloc_map_size
= 16 * 1024 * 1024;
90 vm_size_t kalloc_max_prerounded
;
91 vm_size_t kalloc_kernmap_size
; /* size of kallocs that can come from kernel map */
93 unsigned int kalloc_large_inuse
;
94 vm_size_t kalloc_large_total
;
95 vm_size_t kalloc_large_max
;
96 vm_size_t kalloc_largest_allocated
= 0;
99 * All allocations of size less than kalloc_max are rounded to the
100 * next highest power of 2. This allocator is built on top of
101 * the zone allocator. A zone is created for each potential size
102 * that we are willing to get in small blocks.
104 * We assume that kalloc_max is not greater than 64K;
105 * thus 16 is a safe array size for k_zone and k_zone_name.
107 * Note that kalloc_max is somewhat confusingly named.
108 * It represents the first power of two for which no zone exists.
109 * kalloc_max_prerounded is the smallest allocation size, before
110 * rounding, for which no zone exists.
111 * Also if the allocation size is more than kalloc_kernmap_size
112 * then allocate from kernel map rather than kalloc_map.
115 int first_k_zone
= -1;
116 struct zone
*k_zone
[16];
117 static const char *k_zone_name
[16] = {
118 "kalloc.1", "kalloc.2",
119 "kalloc.4", "kalloc.8",
120 "kalloc.16", "kalloc.32",
121 "kalloc.64", "kalloc.128",
122 "kalloc.256", "kalloc.512",
123 "kalloc.1024", "kalloc.2048",
124 "kalloc.4096", "kalloc.8192",
125 "kalloc.16384", "kalloc.32768"
129 * Max number of elements per zone. zinit rounds things up correctly
130 * Doing things this way permits each zone to have a different maximum size
131 * based on need, rather than just guessing; it also
132 * means its patchable in case you're wrong!
134 unsigned long k_zone_max
[16] = {
145 1024, /* 1024 Byte */
146 1024, /* 2048 Byte */
147 1024, /* 4096 Byte */
148 4096, /* 8192 Byte */
153 /* forward declarations */
154 void * kalloc_canblock(
159 /* OSMalloc local data declarations */
161 queue_head_t OSMalloc_tag_list
;
163 decl_simple_lock_data(static,OSMalloc_tag_lock
)
165 /* OSMalloc forward declarations */
166 void OSMalloc_init(void);
167 void OSMalloc_Tagref(OSMallocTag tag
);
168 void OSMalloc_Tagrele(OSMallocTag tag
);
171 * Initialize the memory allocator. This should be called only
172 * once on a system wide basis (i.e. first processor to get here
173 * does the initialization).
175 * This initializes all of the zones.
182 kern_return_t retval
;
187 retval
= kmem_suballoc(kernel_map
, &min
, kalloc_map_size
,
188 FALSE
, VM_FLAGS_ANYWHERE
, &kalloc_map
);
190 if (retval
!= KERN_SUCCESS
)
191 panic("kalloc_init: kmem_suballoc failed");
194 * Ensure that zones up to size 8192 bytes exist.
195 * This is desirable because messages are allocated
196 * with kalloc, and messages up through size 8192 are common.
199 if (PAGE_SIZE
< 16*1024)
200 kalloc_max
= 16*1024;
202 kalloc_max
= PAGE_SIZE
;
203 kalloc_max_prerounded
= kalloc_max
/ 2 + 1;
204 /* size it to be more than 16 times kalloc_max (256k) for allocations from kernel map */
205 kalloc_kernmap_size
= (kalloc_max
* 16) + 1;
208 * Allocate a zone for each size we are going to handle.
209 * We specify non-paged memory.
211 for (i
= 0, size
= 1; size
< kalloc_max
; i
++, size
<<= 1) {
212 if (size
< KALLOC_MINSIZE
) {
216 if (size
== KALLOC_MINSIZE
) {
219 k_zone
[i
] = zinit(size
, k_zone_max
[i
] * size
, size
,
231 register vm_size_t allocsize
;
232 vm_map_t alloc_map
= VM_MAP_NULL
;
235 * If size is too large for a zone, then use kmem_alloc.
236 * (We use kmem_alloc instead of kmem_alloc_wired so that
237 * krealloc can use kmem_realloc.)
240 if (size
>= kalloc_max_prerounded
) {
243 /* kmem_alloc could block so we return if noblock */
248 if (size
>= kalloc_kernmap_size
) {
249 alloc_map
= kernel_map
;
251 if (size
> kalloc_largest_allocated
)
252 kalloc_largest_allocated
= size
;
254 alloc_map
= kalloc_map
;
256 if (kmem_alloc(alloc_map
, (vm_offset_t
*)&addr
, size
) != KERN_SUCCESS
)
260 kalloc_large_inuse
++;
261 kalloc_large_total
+= size
;
263 if (kalloc_large_total
> kalloc_large_max
)
264 kalloc_large_max
= kalloc_large_total
;
269 /* compute the size of the block that we will actually allocate */
271 allocsize
= KALLOC_MINSIZE
;
272 zindex
= first_k_zone
;
273 while (allocsize
< size
) {
278 /* allocate from the appropriate zone */
279 assert(allocsize
< kalloc_max
);
280 return(zalloc_canblock(k_zone
[zindex
], canblock
));
287 return( kalloc_canblock(size
, TRUE
) );
294 return( kalloc_canblock(size
, FALSE
) );
306 register vm_size_t allocsize
;
308 vm_map_t alloc_map
= VM_MAP_NULL
;
310 /* can only be used for increasing allocation size */
312 assert(new_size
> old_size
);
314 /* if old_size is zero, then we are simply allocating */
318 naddr
= kalloc(new_size
);
324 /* if old block was kmem_alloc'd, then use kmem_realloc if necessary */
326 if (old_size
>= kalloc_max_prerounded
) {
327 if (old_size
>= kalloc_kernmap_size
)
328 alloc_map
= kernel_map
;
330 alloc_map
= kalloc_map
;
332 old_size
= round_page(old_size
);
333 new_size
= round_page(new_size
);
334 if (new_size
> old_size
) {
336 if (KERN_SUCCESS
!= kmem_realloc(alloc_map
,
337 (vm_offset_t
)*addrp
, old_size
,
338 (vm_offset_t
*)&naddr
, new_size
)) {
339 panic("krealloc: kmem_realloc");
344 *addrp
= (void *) naddr
;
346 /* kmem_realloc() doesn't free old page range. */
347 kmem_free(alloc_map
, (vm_offset_t
)*addrp
, old_size
);
349 kalloc_large_total
+= (new_size
- old_size
);
351 if (kalloc_large_total
> kalloc_large_max
)
352 kalloc_large_max
= kalloc_large_total
;
358 /* compute the size of the block that we actually allocated */
360 allocsize
= KALLOC_MINSIZE
;
361 zindex
= first_k_zone
;
362 while (allocsize
< old_size
) {
367 /* if new size fits in old block, then return */
369 if (new_size
<= allocsize
) {
373 /* if new size does not fit in zone, kmem_alloc it, else zalloc it */
376 if (new_size
>= kalloc_max_prerounded
) {
377 if (new_size
>= kalloc_kernmap_size
)
378 alloc_map
= kernel_map
;
380 alloc_map
= kalloc_map
;
381 if (KERN_SUCCESS
!= kmem_alloc(alloc_map
,
382 (vm_offset_t
*)&naddr
, new_size
)) {
383 panic("krealloc: kmem_alloc");
388 kalloc_large_inuse
++;
389 kalloc_large_total
+= new_size
;
391 if (kalloc_large_total
> kalloc_large_max
)
392 kalloc_large_max
= kalloc_large_total
;
394 register int new_zindex
;
397 new_zindex
= zindex
+ 1;
398 while (allocsize
< new_size
) {
402 naddr
= zalloc(k_zone
[new_zindex
]);
406 /* copy existing data */
408 bcopy((const char *)*addrp
, (char *)naddr
, old_size
);
410 /* free old block, and return */
412 zfree(k_zone
[zindex
], *addrp
);
414 /* set up new address */
416 *addrp
= (void *) naddr
;
425 register vm_size_t allocsize
;
427 /* size must not be too large for a zone */
429 if (size
>= kalloc_max_prerounded
) {
430 /* This will never work, so we might as well panic */
434 /* compute the size of the block that we will actually allocate */
436 allocsize
= KALLOC_MINSIZE
;
437 zindex
= first_k_zone
;
438 while (allocsize
< size
) {
443 /* allocate from the appropriate zone */
445 assert(allocsize
< kalloc_max
);
446 return(zget(k_zone
[zindex
]));
455 register vm_size_t freesize
;
456 vm_map_t alloc_map
= VM_MAP_NULL
;
458 /* if size was too large for a zone, then use kmem_free */
460 if (size
>= kalloc_max_prerounded
) {
461 if (size
>= kalloc_kernmap_size
) {
462 alloc_map
= kernel_map
;
464 if (size
> kalloc_largest_allocated
)
466 * work around double FREEs of small MALLOCs
467 * this used to end up being a nop
468 * since the pointer being freed from an
469 * alloc backed by the zalloc world could
470 * never show up in the kalloc_map... however,
471 * the kernel_map is a different issue... since it
472 * was released back into the zalloc pool, a pointer
473 * would have gotten written over the 'size' that
474 * the MALLOC was retaining in the first 4 bytes of
475 * the underlying allocation... that pointer ends up
476 * looking like a really big size on the 2nd FREE and
477 * pushes the kfree into the kernel_map... we
478 * end up removing a ton of virutal space before we panic
479 * this check causes us to ignore the kfree for a size
480 * that must be 'bogus'... note that it might not be due
481 * to the above scenario, but it would still be wrong and
482 * cause serious damage.
486 alloc_map
= kalloc_map
;
487 kmem_free(alloc_map
, (vm_offset_t
)data
, size
);
489 kalloc_large_total
-= size
;
490 kalloc_large_inuse
--;
495 /* compute the size of the block that we actually allocated from */
497 freesize
= KALLOC_MINSIZE
;
498 zindex
= first_k_zone
;
499 while (freesize
< size
) {
504 /* free to the appropriate zone */
506 assert(freesize
< kalloc_max
);
507 zfree(k_zone
[zindex
], data
);
515 register int zindex
= 0;
516 register vm_size_t allocsize
;
518 /* compute the size of the block that we will actually allocate */
521 if (size
<= kalloc_max
) {
522 allocsize
= KALLOC_MINSIZE
;
523 zindex
= first_k_zone
;
524 while (allocsize
< size
) {
528 return (k_zone
[zindex
]);
536 kalloc_fake_zone_info(int *count
, vm_size_t
*cur_size
, vm_size_t
*max_size
, vm_size_t
*elem_size
,
537 vm_size_t
*alloc_size
, int *collectable
, int *exhaustable
)
539 *count
= kalloc_large_inuse
;
540 *cur_size
= kalloc_large_total
;
541 *max_size
= kalloc_large_max
;
542 *elem_size
= kalloc_large_total
/ kalloc_large_inuse
;
543 *alloc_size
= kalloc_large_total
/ kalloc_large_inuse
;
553 queue_init(&OSMalloc_tag_list
);
554 simple_lock_init(&OSMalloc_tag_lock
, 0);
564 OSMTag
= (OSMallocTag
)kalloc(sizeof(*OSMTag
));
566 bzero((void *)OSMTag
, sizeof(*OSMTag
));
568 if (flags
& OSMT_PAGEABLE
)
569 OSMTag
->OSMT_attr
= OSMT_ATTR_PAGEABLE
;
571 OSMTag
->OSMT_refcnt
= 1;
573 strncpy(OSMTag
->OSMT_name
, str
, OSMT_MAX_NAME
);
575 simple_lock(&OSMalloc_tag_lock
);
576 enqueue_tail(&OSMalloc_tag_list
, (queue_entry_t
)OSMTag
);
577 simple_unlock(&OSMalloc_tag_lock
);
578 OSMTag
->OSMT_state
= OSMT_VALID
;
586 if (!((tag
->OSMT_state
& OSMT_VALID_MASK
) == OSMT_VALID
))
587 panic("OSMalloc_Tagref(): bad state 0x%08X\n",tag
->OSMT_state
);
589 (void)hw_atomic_add((uint32_t *)(&tag
->OSMT_refcnt
), 1);
596 if (!((tag
->OSMT_state
& OSMT_VALID_MASK
) == OSMT_VALID
))
597 panic("OSMalloc_Tagref(): bad state 0x%08X\n",tag
->OSMT_state
);
599 if (hw_atomic_sub((uint32_t *)(&tag
->OSMT_refcnt
), 1) == 0) {
600 if (hw_compare_and_store(OSMT_VALID
|OSMT_RELEASED
, OSMT_VALID
|OSMT_RELEASED
, &tag
->OSMT_state
)) {
601 simple_lock(&OSMalloc_tag_lock
);
602 (void)remque((queue_entry_t
)tag
);
603 simple_unlock(&OSMalloc_tag_lock
);
604 kfree((void*)tag
, sizeof(*tag
));
606 panic("OSMalloc_Tagrele(): refcnt 0\n");
614 if (!hw_compare_and_store(OSMT_VALID
, OSMT_VALID
|OSMT_RELEASED
, &tag
->OSMT_state
))
615 panic("OSMalloc_Tagfree(): bad state 0x%08X\n", tag
->OSMT_state
);
617 if (hw_atomic_sub((uint32_t *)(&tag
->OSMT_refcnt
), 1) == 0) {
618 simple_lock(&OSMalloc_tag_lock
);
619 (void)remque((queue_entry_t
)tag
);
620 simple_unlock(&OSMalloc_tag_lock
);
621 kfree((void*)tag
, sizeof(*tag
));
633 OSMalloc_Tagref(tag
);
634 if ((tag
->OSMT_attr
& OSMT_PAGEABLE
)
635 && (size
& ~PAGE_MASK
)) {
637 if ((kr
= kmem_alloc_pageable(kernel_map
, (vm_offset_t
*)&addr
, size
)) != KERN_SUCCESS
)
638 panic("OSMalloc(): kmem_alloc_pageable() failed 0x%08X\n", kr
);
640 addr
= kalloc((vm_size_t
)size
);
652 if (tag
->OSMT_attr
& OSMT_PAGEABLE
)
655 OSMalloc_Tagref(tag
);
656 /* XXX: use non-blocking kalloc for now */
657 addr
= kalloc_noblock((vm_size_t
)size
);
659 OSMalloc_Tagrele(tag
);
671 if (tag
->OSMT_attr
& OSMT_PAGEABLE
)
674 OSMalloc_Tagref(tag
);
675 addr
= kalloc_noblock((vm_size_t
)size
);
677 OSMalloc_Tagrele(tag
);
688 if ((tag
->OSMT_attr
& OSMT_PAGEABLE
)
689 && (size
& ~PAGE_MASK
)) {
690 kmem_free(kernel_map
, (vm_offset_t
)addr
, size
);
692 kfree((void*)addr
, size
);
694 OSMalloc_Tagrele(tag
);