2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
21 * @APPLE_LICENSE_HEADER_END@
27 * Mach Operating System
28 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
29 * All Rights Reserved.
31 * Permission to use, copy, modify and distribute this software and its
32 * documentation is hereby granted, provided that both the copyright
33 * notice and this permission notice appear in all copies of the
34 * software, derivative works or modified versions, and any portions
35 * thereof, and that both notices appear in supporting documentation.
37 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
38 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
39 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
41 * Carnegie Mellon requests users of this software to return to
43 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
44 * School of Computer Science
45 * Carnegie Mellon University
46 * Pittsburgh PA 15213-3890
48 * any improvements or extensions that they make and grant Carnegie Mellon
49 * the rights to redistribute these changes.
55 * Author: Avadis Tevanian, Jr.
58 * General kernel memory allocator. This allocator is designed
59 * to be used by the kernel to manage dynamic memory fast.
62 #include <zone_debug.h>
64 #include <mach/boolean.h>
65 #include <mach/machine/vm_types.h>
66 #include <mach/vm_param.h>
67 #include <kern/misc_protos.h>
68 #include <kern/zalloc.h>
69 #include <kern/kalloc.h>
70 #include <kern/lock.h>
71 #include <vm/vm_kern.h>
72 #include <vm/vm_object.h>
73 #include <vm/vm_map.h>
74 #include <libkern/OSMalloc.h>
77 zone_t
kalloc_zone(vm_size_t
);
81 vm_size_t kalloc_map_size
= 16 * 1024 * 1024;
83 vm_size_t kalloc_max_prerounded
;
85 unsigned int kalloc_large_inuse
;
86 vm_size_t kalloc_large_total
;
87 vm_size_t kalloc_large_max
;
90 * All allocations of size less than kalloc_max are rounded to the
91 * next highest power of 2. This allocator is built on top of
92 * the zone allocator. A zone is created for each potential size
93 * that we are willing to get in small blocks.
95 * We assume that kalloc_max is not greater than 64K;
96 * thus 16 is a safe array size for k_zone and k_zone_name.
98 * Note that kalloc_max is somewhat confusingly named.
99 * It represents the first power of two for which no zone exists.
100 * kalloc_max_prerounded is the smallest allocation size, before
101 * rounding, for which no zone exists.
104 int first_k_zone
= -1;
105 struct zone
*k_zone
[16];
106 static const char *k_zone_name
[16] = {
107 "kalloc.1", "kalloc.2",
108 "kalloc.4", "kalloc.8",
109 "kalloc.16", "kalloc.32",
110 "kalloc.64", "kalloc.128",
111 "kalloc.256", "kalloc.512",
112 "kalloc.1024", "kalloc.2048",
113 "kalloc.4096", "kalloc.8192",
114 "kalloc.16384", "kalloc.32768"
118 * Max number of elements per zone. zinit rounds things up correctly
119 * Doing things this way permits each zone to have a different maximum size
120 * based on need, rather than just guessing; it also
121 * means its patchable in case you're wrong!
123 unsigned long k_zone_max
[16] = {
134 1024, /* 1024 Byte */
135 1024, /* 2048 Byte */
136 1024, /* 4096 Byte */
137 4096, /* 8192 Byte */
142 /* forward declarations */
143 void * kalloc_canblock(
148 /* OSMalloc local data declarations */
150 queue_head_t OSMalloc_tag_list
;
152 decl_simple_lock_data(static,OSMalloc_tag_lock
)
154 /* OSMalloc forward declarations */
155 void OSMalloc_init(void);
156 void OSMalloc_Tagref(OSMallocTag tag
);
157 void OSMalloc_Tagrele(OSMallocTag tag
);
160 * Initialize the memory allocator. This should be called only
161 * once on a system wide basis (i.e. first processor to get here
162 * does the initialization).
164 * This initializes all of the zones.
171 kern_return_t retval
;
176 retval
= kmem_suballoc(kernel_map
, &min
, kalloc_map_size
,
177 FALSE
, VM_FLAGS_ANYWHERE
, &kalloc_map
);
179 if (retval
!= KERN_SUCCESS
)
180 panic("kalloc_init: kmem_suballoc failed");
183 * Ensure that zones up to size 8192 bytes exist.
184 * This is desirable because messages are allocated
185 * with kalloc, and messages up through size 8192 are common.
188 if (PAGE_SIZE
< 16*1024)
189 kalloc_max
= 16*1024;
191 kalloc_max
= PAGE_SIZE
;
192 kalloc_max_prerounded
= kalloc_max
/ 2 + 1;
195 * Allocate a zone for each size we are going to handle.
196 * We specify non-paged memory.
198 for (i
= 0, size
= 1; size
< kalloc_max
; i
++, size
<<= 1) {
199 if (size
< KALLOC_MINSIZE
) {
203 if (size
== KALLOC_MINSIZE
) {
206 k_zone
[i
] = zinit(size
, k_zone_max
[i
] * size
, size
,
218 register vm_size_t allocsize
;
221 * If size is too large for a zone, then use kmem_alloc.
222 * (We use kmem_alloc instead of kmem_alloc_wired so that
223 * krealloc can use kmem_realloc.)
226 if (size
>= kalloc_max_prerounded
) {
229 /* kmem_alloc could block so we return if noblock */
233 if (kmem_alloc(kalloc_map
, (vm_offset_t
*)&addr
, size
) != KERN_SUCCESS
)
237 kalloc_large_inuse
++;
238 kalloc_large_total
+= size
;
240 if (kalloc_large_total
> kalloc_large_max
)
241 kalloc_large_max
= kalloc_large_total
;
246 /* compute the size of the block that we will actually allocate */
248 allocsize
= KALLOC_MINSIZE
;
249 zindex
= first_k_zone
;
250 while (allocsize
< size
) {
255 /* allocate from the appropriate zone */
256 assert(allocsize
< kalloc_max
);
257 return(zalloc_canblock(k_zone
[zindex
], canblock
));
264 return( kalloc_canblock(size
, TRUE
) );
271 return( kalloc_canblock(size
, FALSE
) );
283 register vm_size_t allocsize
;
286 /* can only be used for increasing allocation size */
288 assert(new_size
> old_size
);
290 /* if old_size is zero, then we are simply allocating */
294 naddr
= kalloc(new_size
);
300 /* if old block was kmem_alloc'd, then use kmem_realloc if necessary */
302 if (old_size
>= kalloc_max_prerounded
) {
303 old_size
= round_page(old_size
);
304 new_size
= round_page(new_size
);
305 if (new_size
> old_size
) {
307 if (KERN_SUCCESS
!= kmem_realloc(kalloc_map
,
308 (vm_offset_t
)*addrp
, old_size
,
309 (vm_offset_t
*)&naddr
, new_size
)) {
310 panic("krealloc: kmem_realloc");
315 *addrp
= (void *) naddr
;
317 /* kmem_realloc() doesn't free old page range. */
318 kmem_free(kalloc_map
, (vm_offset_t
)*addrp
, old_size
);
320 kalloc_large_total
+= (new_size
- old_size
);
322 if (kalloc_large_total
> kalloc_large_max
)
323 kalloc_large_max
= kalloc_large_total
;
329 /* compute the size of the block that we actually allocated */
331 allocsize
= KALLOC_MINSIZE
;
332 zindex
= first_k_zone
;
333 while (allocsize
< old_size
) {
338 /* if new size fits in old block, then return */
340 if (new_size
<= allocsize
) {
344 /* if new size does not fit in zone, kmem_alloc it, else zalloc it */
347 if (new_size
>= kalloc_max_prerounded
) {
348 if (KERN_SUCCESS
!= kmem_alloc(kalloc_map
,
349 (vm_offset_t
*)&naddr
, new_size
)) {
350 panic("krealloc: kmem_alloc");
355 kalloc_large_inuse
++;
356 kalloc_large_total
+= new_size
;
358 if (kalloc_large_total
> kalloc_large_max
)
359 kalloc_large_max
= kalloc_large_total
;
361 register int new_zindex
;
364 new_zindex
= zindex
+ 1;
365 while (allocsize
< new_size
) {
369 naddr
= zalloc(k_zone
[new_zindex
]);
373 /* copy existing data */
375 bcopy((const char *)*addrp
, (char *)naddr
, old_size
);
377 /* free old block, and return */
379 zfree(k_zone
[zindex
], *addrp
);
381 /* set up new address */
383 *addrp
= (void *) naddr
;
392 register vm_size_t allocsize
;
394 /* size must not be too large for a zone */
396 if (size
>= kalloc_max_prerounded
) {
397 /* This will never work, so we might as well panic */
401 /* compute the size of the block that we will actually allocate */
403 allocsize
= KALLOC_MINSIZE
;
404 zindex
= first_k_zone
;
405 while (allocsize
< size
) {
410 /* allocate from the appropriate zone */
412 assert(allocsize
< kalloc_max
);
413 return(zget(k_zone
[zindex
]));
422 register vm_size_t freesize
;
424 /* if size was too large for a zone, then use kmem_free */
426 if (size
>= kalloc_max_prerounded
) {
427 kmem_free(kalloc_map
, (vm_offset_t
)data
, size
);
429 kalloc_large_total
-= size
;
430 kalloc_large_inuse
--;
435 /* compute the size of the block that we actually allocated from */
437 freesize
= KALLOC_MINSIZE
;
438 zindex
= first_k_zone
;
439 while (freesize
< size
) {
444 /* free to the appropriate zone */
446 assert(freesize
< kalloc_max
);
447 zfree(k_zone
[zindex
], data
);
455 register int zindex
= 0;
456 register vm_size_t allocsize
;
458 /* compute the size of the block that we will actually allocate */
461 if (size
<= kalloc_max
) {
462 allocsize
= KALLOC_MINSIZE
;
463 zindex
= first_k_zone
;
464 while (allocsize
< size
) {
468 return (k_zone
[zindex
]);
476 kalloc_fake_zone_info(int *count
, vm_size_t
*cur_size
, vm_size_t
*max_size
, vm_size_t
*elem_size
,
477 vm_size_t
*alloc_size
, int *collectable
, int *exhaustable
)
479 *count
= kalloc_large_inuse
;
480 *cur_size
= kalloc_large_total
;
481 *max_size
= kalloc_large_max
;
482 *elem_size
= kalloc_large_total
/ kalloc_large_inuse
;
483 *alloc_size
= kalloc_large_total
/ kalloc_large_inuse
;
493 queue_init(&OSMalloc_tag_list
);
494 simple_lock_init(&OSMalloc_tag_lock
, 0);
504 OSMTag
= (OSMallocTag
)kalloc(sizeof(*OSMTag
));
506 bzero((void *)OSMTag
, sizeof(*OSMTag
));
508 if (flags
& OSMT_PAGEABLE
)
509 OSMTag
->OSMT_attr
= OSMT_ATTR_PAGEABLE
;
511 OSMTag
->OSMT_refcnt
= 1;
513 strncpy(OSMTag
->OSMT_name
, str
, OSMT_MAX_NAME
);
515 simple_lock(&OSMalloc_tag_lock
);
516 enqueue_tail(&OSMalloc_tag_list
, (queue_entry_t
)OSMTag
);
517 simple_unlock(&OSMalloc_tag_lock
);
518 OSMTag
->OSMT_state
= OSMT_VALID
;
526 if (!((tag
->OSMT_state
& OSMT_VALID_MASK
) == OSMT_VALID
))
527 panic("OSMalloc_Tagref(): bad state 0x%08X\n",tag
->OSMT_state
);
529 (void)hw_atomic_add((uint32_t *)(&tag
->OSMT_refcnt
), 1);
536 if (!((tag
->OSMT_state
& OSMT_VALID_MASK
) == OSMT_VALID
))
537 panic("OSMalloc_Tagref(): bad state 0x%08X\n",tag
->OSMT_state
);
539 if (hw_atomic_sub((uint32_t *)(&tag
->OSMT_refcnt
), 1) == 0) {
540 if (hw_compare_and_store(OSMT_VALID
|OSMT_RELEASED
, OSMT_VALID
|OSMT_RELEASED
, &tag
->OSMT_state
)) {
541 simple_lock(&OSMalloc_tag_lock
);
542 (void)remque((queue_entry_t
)tag
);
543 simple_unlock(&OSMalloc_tag_lock
);
544 kfree((void*)tag
, sizeof(*tag
));
546 panic("OSMalloc_Tagrele(): refcnt 0\n");
554 if (!hw_compare_and_store(OSMT_VALID
, OSMT_VALID
|OSMT_RELEASED
, &tag
->OSMT_state
))
555 panic("OSMalloc_Tagfree(): bad state 0x%08X\n", tag
->OSMT_state
);
557 if (hw_atomic_sub((uint32_t *)(&tag
->OSMT_refcnt
), 1) == 0) {
558 simple_lock(&OSMalloc_tag_lock
);
559 (void)remque((queue_entry_t
)tag
);
560 simple_unlock(&OSMalloc_tag_lock
);
561 kfree((void*)tag
, sizeof(*tag
));
573 OSMalloc_Tagref(tag
);
574 if ((tag
->OSMT_attr
& OSMT_PAGEABLE
)
575 && (size
& ~PAGE_MASK
)) {
577 if ((kr
= kmem_alloc_pageable(kernel_map
, (vm_offset_t
*)&addr
, size
)) != KERN_SUCCESS
)
578 panic("OSMalloc(): kmem_alloc_pageable() failed 0x%08X\n", kr
);
580 addr
= kalloc((vm_size_t
)size
);
592 if (tag
->OSMT_attr
& OSMT_PAGEABLE
)
595 OSMalloc_Tagref(tag
);
596 /* XXX: use non-blocking kalloc for now */
597 addr
= kalloc_noblock((vm_size_t
)size
);
599 OSMalloc_Tagrele(tag
);
611 if (tag
->OSMT_attr
& OSMT_PAGEABLE
)
614 OSMalloc_Tagref(tag
);
615 addr
= kalloc_noblock((vm_size_t
)size
);
617 OSMalloc_Tagrele(tag
);
628 if ((tag
->OSMT_attr
& OSMT_PAGEABLE
)
629 && (size
& ~PAGE_MASK
)) {
630 kmem_free(kernel_map
, (vm_offset_t
)addr
, size
);
632 kfree((void*)addr
, size
);
634 OSMalloc_Tagrele(tag
);