2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
26 * Mach Operating System
27 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
28 * All Rights Reserved.
30 * Permission to use, copy, modify and distribute this software and its
31 * documentation is hereby granted, provided that both the copyright
32 * notice and this permission notice appear in all copies of the
33 * software, derivative works or modified versions, and any portions
34 * thereof, and that both notices appear in supporting documentation.
36 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
37 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
38 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
40 * Carnegie Mellon requests users of this software to return to
42 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
43 * School of Computer Science
44 * Carnegie Mellon University
45 * Pittsburgh PA 15213-3890
47 * any improvements or extensions that they make and grant Carnegie Mellon
48 * the rights to redistribute these changes.
54 * Author: Avadis Tevanian, Jr.
57 * General kernel memory allocator. This allocator is designed
58 * to be used by the kernel to manage dynamic memory fast.
61 #include <zone_debug.h>
63 #include <mach/boolean.h>
64 #include <mach/machine/vm_types.h>
65 #include <mach/vm_param.h>
66 #include <kern/misc_protos.h>
67 #include <kern/zalloc.h>
68 #include <kern/kalloc.h>
69 #include <kern/lock.h>
70 #include <vm/vm_kern.h>
71 #include <vm/vm_object.h>
72 #include <vm/vm_map.h>
73 #include <libkern/OSMalloc.h>
76 zone_t
kalloc_zone(vm_size_t
);
80 vm_size_t kalloc_map_size
= 16 * 1024 * 1024;
82 vm_size_t kalloc_max_prerounded
;
84 unsigned int kalloc_large_inuse
;
85 vm_size_t kalloc_large_total
;
86 vm_size_t kalloc_large_max
;
89 * All allocations of size less than kalloc_max are rounded to the
90 * next highest power of 2. This allocator is built on top of
91 * the zone allocator. A zone is created for each potential size
92 * that we are willing to get in small blocks.
94 * We assume that kalloc_max is not greater than 64K;
95 * thus 16 is a safe array size for k_zone and k_zone_name.
97 * Note that kalloc_max is somewhat confusingly named.
98 * It represents the first power of two for which no zone exists.
99 * kalloc_max_prerounded is the smallest allocation size, before
100 * rounding, for which no zone exists.
103 int first_k_zone
= -1;
104 struct zone
*k_zone
[16];
105 static const char *k_zone_name
[16] = {
106 "kalloc.1", "kalloc.2",
107 "kalloc.4", "kalloc.8",
108 "kalloc.16", "kalloc.32",
109 "kalloc.64", "kalloc.128",
110 "kalloc.256", "kalloc.512",
111 "kalloc.1024", "kalloc.2048",
112 "kalloc.4096", "kalloc.8192",
113 "kalloc.16384", "kalloc.32768"
117 * Max number of elements per zone. zinit rounds things up correctly
118 * Doing things this way permits each zone to have a different maximum size
119 * based on need, rather than just guessing; it also
120 * means its patchable in case you're wrong!
122 unsigned long k_zone_max
[16] = {
133 1024, /* 1024 Byte */
134 1024, /* 2048 Byte */
135 1024, /* 4096 Byte */
136 4096, /* 8192 Byte */
141 /* forward declarations */
142 void * kalloc_canblock(
147 /* OSMalloc local data declarations */
149 queue_head_t OSMalloc_tag_list
;
151 decl_simple_lock_data(static,OSMalloc_tag_lock
)
153 /* OSMalloc forward declarations */
154 void OSMalloc_init(void);
155 void OSMalloc_Tagref(OSMallocTag tag
);
156 void OSMalloc_Tagrele(OSMallocTag tag
);
159 * Initialize the memory allocator. This should be called only
160 * once on a system wide basis (i.e. first processor to get here
161 * does the initialization).
163 * This initializes all of the zones.
170 kern_return_t retval
;
175 retval
= kmem_suballoc(kernel_map
, &min
, kalloc_map_size
,
176 FALSE
, VM_FLAGS_ANYWHERE
, &kalloc_map
);
178 if (retval
!= KERN_SUCCESS
)
179 panic("kalloc_init: kmem_suballoc failed");
182 * Ensure that zones up to size 8192 bytes exist.
183 * This is desirable because messages are allocated
184 * with kalloc, and messages up through size 8192 are common.
187 if (PAGE_SIZE
< 16*1024)
188 kalloc_max
= 16*1024;
190 kalloc_max
= PAGE_SIZE
;
191 kalloc_max_prerounded
= kalloc_max
/ 2 + 1;
194 * Allocate a zone for each size we are going to handle.
195 * We specify non-paged memory.
197 for (i
= 0, size
= 1; size
< kalloc_max
; i
++, size
<<= 1) {
198 if (size
< KALLOC_MINSIZE
) {
202 if (size
== KALLOC_MINSIZE
) {
205 k_zone
[i
] = zinit(size
, k_zone_max
[i
] * size
, size
,
217 register vm_size_t allocsize
;
220 * If size is too large for a zone, then use kmem_alloc.
221 * (We use kmem_alloc instead of kmem_alloc_wired so that
222 * krealloc can use kmem_realloc.)
225 if (size
>= kalloc_max_prerounded
) {
228 /* kmem_alloc could block so we return if noblock */
232 if (kmem_alloc(kalloc_map
, (vm_offset_t
*)&addr
, size
) != KERN_SUCCESS
)
236 kalloc_large_inuse
++;
237 kalloc_large_total
+= size
;
239 if (kalloc_large_total
> kalloc_large_max
)
240 kalloc_large_max
= kalloc_large_total
;
245 /* compute the size of the block that we will actually allocate */
247 allocsize
= KALLOC_MINSIZE
;
248 zindex
= first_k_zone
;
249 while (allocsize
< size
) {
254 /* allocate from the appropriate zone */
255 assert(allocsize
< kalloc_max
);
256 return(zalloc_canblock(k_zone
[zindex
], canblock
));
263 return( kalloc_canblock(size
, TRUE
) );
270 return( kalloc_canblock(size
, FALSE
) );
282 register vm_size_t allocsize
;
285 /* can only be used for increasing allocation size */
287 assert(new_size
> old_size
);
289 /* if old_size is zero, then we are simply allocating */
293 naddr
= kalloc(new_size
);
299 /* if old block was kmem_alloc'd, then use kmem_realloc if necessary */
301 if (old_size
>= kalloc_max_prerounded
) {
302 old_size
= round_page(old_size
);
303 new_size
= round_page(new_size
);
304 if (new_size
> old_size
) {
306 if (KERN_SUCCESS
!= kmem_realloc(kalloc_map
,
307 (vm_offset_t
)*addrp
, old_size
,
308 (vm_offset_t
*)&naddr
, new_size
)) {
309 panic("krealloc: kmem_realloc");
314 *addrp
= (void *) naddr
;
316 /* kmem_realloc() doesn't free old page range. */
317 kmem_free(kalloc_map
, (vm_offset_t
)*addrp
, old_size
);
319 kalloc_large_total
+= (new_size
- old_size
);
321 if (kalloc_large_total
> kalloc_large_max
)
322 kalloc_large_max
= kalloc_large_total
;
328 /* compute the size of the block that we actually allocated */
330 allocsize
= KALLOC_MINSIZE
;
331 zindex
= first_k_zone
;
332 while (allocsize
< old_size
) {
337 /* if new size fits in old block, then return */
339 if (new_size
<= allocsize
) {
343 /* if new size does not fit in zone, kmem_alloc it, else zalloc it */
346 if (new_size
>= kalloc_max_prerounded
) {
347 if (KERN_SUCCESS
!= kmem_alloc(kalloc_map
,
348 (vm_offset_t
*)&naddr
, new_size
)) {
349 panic("krealloc: kmem_alloc");
354 kalloc_large_inuse
++;
355 kalloc_large_total
+= new_size
;
357 if (kalloc_large_total
> kalloc_large_max
)
358 kalloc_large_max
= kalloc_large_total
;
360 register int new_zindex
;
363 new_zindex
= zindex
+ 1;
364 while (allocsize
< new_size
) {
368 naddr
= zalloc(k_zone
[new_zindex
]);
372 /* copy existing data */
374 bcopy((const char *)*addrp
, (char *)naddr
, old_size
);
376 /* free old block, and return */
378 zfree(k_zone
[zindex
], *addrp
);
380 /* set up new address */
382 *addrp
= (void *) naddr
;
391 register vm_size_t allocsize
;
393 /* size must not be too large for a zone */
395 if (size
>= kalloc_max_prerounded
) {
396 /* This will never work, so we might as well panic */
400 /* compute the size of the block that we will actually allocate */
402 allocsize
= KALLOC_MINSIZE
;
403 zindex
= first_k_zone
;
404 while (allocsize
< size
) {
409 /* allocate from the appropriate zone */
411 assert(allocsize
< kalloc_max
);
412 return(zget(k_zone
[zindex
]));
421 register vm_size_t freesize
;
423 /* if size was too large for a zone, then use kmem_free */
425 if (size
>= kalloc_max_prerounded
) {
426 kmem_free(kalloc_map
, (vm_offset_t
)data
, size
);
428 kalloc_large_total
-= size
;
429 kalloc_large_inuse
--;
434 /* compute the size of the block that we actually allocated from */
436 freesize
= KALLOC_MINSIZE
;
437 zindex
= first_k_zone
;
438 while (freesize
< size
) {
443 /* free to the appropriate zone */
445 assert(freesize
< kalloc_max
);
446 zfree(k_zone
[zindex
], data
);
454 register int zindex
= 0;
455 register vm_size_t allocsize
;
457 /* compute the size of the block that we will actually allocate */
460 if (size
<= kalloc_max
) {
461 allocsize
= KALLOC_MINSIZE
;
462 zindex
= first_k_zone
;
463 while (allocsize
< size
) {
467 return (k_zone
[zindex
]);
475 kalloc_fake_zone_info(int *count
, vm_size_t
*cur_size
, vm_size_t
*max_size
, vm_size_t
*elem_size
,
476 vm_size_t
*alloc_size
, int *collectable
, int *exhaustable
)
478 *count
= kalloc_large_inuse
;
479 *cur_size
= kalloc_large_total
;
480 *max_size
= kalloc_large_max
;
481 *elem_size
= kalloc_large_total
/ kalloc_large_inuse
;
482 *alloc_size
= kalloc_large_total
/ kalloc_large_inuse
;
492 queue_init(&OSMalloc_tag_list
);
493 simple_lock_init(&OSMalloc_tag_lock
, 0);
503 OSMTag
= (OSMallocTag
)kalloc(sizeof(*OSMTag
));
505 bzero((void *)OSMTag
, sizeof(*OSMTag
));
507 if (flags
& OSMT_PAGEABLE
)
508 OSMTag
->OSMT_attr
= OSMT_ATTR_PAGEABLE
;
510 OSMTag
->OSMT_refcnt
= 1;
512 strncpy(OSMTag
->OSMT_name
, str
, OSMT_MAX_NAME
);
514 simple_lock(&OSMalloc_tag_lock
);
515 enqueue_tail(&OSMalloc_tag_list
, (queue_entry_t
)OSMTag
);
516 simple_unlock(&OSMalloc_tag_lock
);
517 OSMTag
->OSMT_state
= OSMT_VALID
;
525 if (!((tag
->OSMT_state
& OSMT_VALID_MASK
) == OSMT_VALID
))
526 panic("OSMalloc_Tagref(): bad state 0x%08X\n",tag
->OSMT_state
);
528 (void)hw_atomic_add((uint32_t *)(&tag
->OSMT_refcnt
), 1);
535 if (!((tag
->OSMT_state
& OSMT_VALID_MASK
) == OSMT_VALID
))
536 panic("OSMalloc_Tagref(): bad state 0x%08X\n",tag
->OSMT_state
);
538 if (hw_atomic_sub((uint32_t *)(&tag
->OSMT_refcnt
), 1) == 0) {
539 if (hw_compare_and_store(OSMT_VALID
|OSMT_RELEASED
, OSMT_VALID
|OSMT_RELEASED
, &tag
->OSMT_state
)) {
540 simple_lock(&OSMalloc_tag_lock
);
541 (void)remque((queue_entry_t
)tag
);
542 simple_unlock(&OSMalloc_tag_lock
);
543 kfree((void*)tag
, sizeof(*tag
));
545 panic("OSMalloc_Tagrele(): refcnt 0\n");
553 if (!hw_compare_and_store(OSMT_VALID
, OSMT_VALID
|OSMT_RELEASED
, &tag
->OSMT_state
))
554 panic("OSMalloc_Tagfree(): bad state 0x%08X\n", tag
->OSMT_state
);
556 if (hw_atomic_sub((uint32_t *)(&tag
->OSMT_refcnt
), 1) == 0) {
557 simple_lock(&OSMalloc_tag_lock
);
558 (void)remque((queue_entry_t
)tag
);
559 simple_unlock(&OSMalloc_tag_lock
);
560 kfree((void*)tag
, sizeof(*tag
));
572 OSMalloc_Tagref(tag
);
573 if ((tag
->OSMT_attr
& OSMT_PAGEABLE
)
574 && (size
& ~PAGE_MASK
)) {
576 if ((kr
= kmem_alloc_pageable(kernel_map
, (vm_offset_t
*)&addr
, size
)) != KERN_SUCCESS
)
577 panic("OSMalloc(): kmem_alloc_pageable() failed 0x%08X\n", kr
);
579 addr
= kalloc((vm_size_t
)size
);
591 if (tag
->OSMT_attr
& OSMT_PAGEABLE
)
594 OSMalloc_Tagref(tag
);
595 /* XXX: use non-blocking kalloc for now */
596 addr
= kalloc_noblock((vm_size_t
)size
);
598 OSMalloc_Tagrele(tag
);
610 if (tag
->OSMT_attr
& OSMT_PAGEABLE
)
613 OSMalloc_Tagref(tag
);
614 addr
= kalloc_noblock((vm_size_t
)size
);
616 OSMalloc_Tagrele(tag
);
627 if ((tag
->OSMT_attr
& OSMT_PAGEABLE
)
628 && (size
& ~PAGE_MASK
)) {
629 kmem_free(kernel_map
, (vm_offset_t
)addr
, size
);
631 kfree((void*)addr
, size
);
633 OSMalloc_Tagrele(tag
);