2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
26 * Mach Operating System
27 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
28 * All Rights Reserved.
30 * Permission to use, copy, modify and distribute this software and its
31 * documentation is hereby granted, provided that both the copyright
32 * notice and this permission notice appear in all copies of the
33 * software, derivative works or modified versions, and any portions
34 * thereof, and that both notices appear in supporting documentation.
36 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
37 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
38 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
40 * Carnegie Mellon requests users of this software to return to
42 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
43 * School of Computer Science
44 * Carnegie Mellon University
45 * Pittsburgh PA 15213-3890
47 * any improvements or extensions that they make and grant Carnegie Mellon
48 * the rights to redistribute these changes.
54 * Author: Avadis Tevanian, Jr.
57 * General kernel memory allocator. This allocator is designed
58 * to be used by the kernel to manage dynamic memory fast.
61 #include <zone_debug.h>
63 #include <mach/boolean.h>
64 #include <mach/machine/vm_types.h>
65 #include <mach/vm_param.h>
66 #include <kern/misc_protos.h>
67 #include <kern/zalloc.h>
68 #include <kern/kalloc.h>
69 #include <kern/lock.h>
70 #include <vm/vm_kern.h>
71 #include <vm/vm_object.h>
72 #include <vm/vm_map.h>
73 #include <libkern/OSMalloc.h>
76 zone_t
kalloc_zone(vm_size_t
);
80 vm_size_t kalloc_map_size
= 16 * 1024 * 1024;
82 vm_size_t kalloc_max_prerounded
;
83 vm_size_t kalloc_kernmap_size
; /* size of kallocs that can come from kernel map */
85 unsigned int kalloc_large_inuse
;
86 vm_size_t kalloc_large_total
;
87 vm_size_t kalloc_large_max
;
90 * All allocations of size less than kalloc_max are rounded to the
91 * next highest power of 2. This allocator is built on top of
92 * the zone allocator. A zone is created for each potential size
93 * that we are willing to get in small blocks.
95 * We assume that kalloc_max is not greater than 64K;
96 * thus 16 is a safe array size for k_zone and k_zone_name.
98 * Note that kalloc_max is somewhat confusingly named.
99 * It represents the first power of two for which no zone exists.
100 * kalloc_max_prerounded is the smallest allocation size, before
101 * rounding, for which no zone exists.
102 * Also if the allocation size is more than kalloc_kernmap_size
103 * then allocate from kernel map rather than kalloc_map.
106 int first_k_zone
= -1;
107 struct zone
*k_zone
[16];
108 static const char *k_zone_name
[16] = {
109 "kalloc.1", "kalloc.2",
110 "kalloc.4", "kalloc.8",
111 "kalloc.16", "kalloc.32",
112 "kalloc.64", "kalloc.128",
113 "kalloc.256", "kalloc.512",
114 "kalloc.1024", "kalloc.2048",
115 "kalloc.4096", "kalloc.8192",
116 "kalloc.16384", "kalloc.32768"
120 * Max number of elements per zone. zinit rounds things up correctly
121 * Doing things this way permits each zone to have a different maximum size
122 * based on need, rather than just guessing; it also
123 * means its patchable in case you're wrong!
125 unsigned long k_zone_max
[16] = {
136 1024, /* 1024 Byte */
137 1024, /* 2048 Byte */
138 1024, /* 4096 Byte */
139 4096, /* 8192 Byte */
144 /* forward declarations */
145 void * kalloc_canblock(
150 /* OSMalloc local data declarations */
152 queue_head_t OSMalloc_tag_list
;
154 decl_simple_lock_data(static,OSMalloc_tag_lock
)
156 /* OSMalloc forward declarations */
157 void OSMalloc_init(void);
158 void OSMalloc_Tagref(OSMallocTag tag
);
159 void OSMalloc_Tagrele(OSMallocTag tag
);
162 * Initialize the memory allocator. This should be called only
163 * once on a system wide basis (i.e. first processor to get here
164 * does the initialization).
166 * This initializes all of the zones.
173 kern_return_t retval
;
178 retval
= kmem_suballoc(kernel_map
, &min
, kalloc_map_size
,
179 FALSE
, VM_FLAGS_ANYWHERE
, &kalloc_map
);
181 if (retval
!= KERN_SUCCESS
)
182 panic("kalloc_init: kmem_suballoc failed");
185 * Ensure that zones up to size 8192 bytes exist.
186 * This is desirable because messages are allocated
187 * with kalloc, and messages up through size 8192 are common.
190 if (PAGE_SIZE
< 16*1024)
191 kalloc_max
= 16*1024;
193 kalloc_max
= PAGE_SIZE
;
194 kalloc_max_prerounded
= kalloc_max
/ 2 + 1;
195 /* size it to be more than 16 times kalloc_max (256k) for allocations from kernel map */
196 kalloc_kernmap_size
= (kalloc_max
* 16) + 1;
199 * Allocate a zone for each size we are going to handle.
200 * We specify non-paged memory.
202 for (i
= 0, size
= 1; size
< kalloc_max
; i
++, size
<<= 1) {
203 if (size
< KALLOC_MINSIZE
) {
207 if (size
== KALLOC_MINSIZE
) {
210 k_zone
[i
] = zinit(size
, k_zone_max
[i
] * size
, size
,
222 register vm_size_t allocsize
;
223 vm_map_t alloc_map
= VM_MAP_NULL
;
226 * If size is too large for a zone, then use kmem_alloc.
227 * (We use kmem_alloc instead of kmem_alloc_wired so that
228 * krealloc can use kmem_realloc.)
231 if (size
>= kalloc_max_prerounded
) {
234 /* kmem_alloc could block so we return if noblock */
239 if (size
>= kalloc_kernmap_size
)
240 alloc_map
= kernel_map
;
242 alloc_map
= kalloc_map
;
244 if (kmem_alloc(alloc_map
, (vm_offset_t
*)&addr
, size
) != KERN_SUCCESS
)
248 kalloc_large_inuse
++;
249 kalloc_large_total
+= size
;
251 if (kalloc_large_total
> kalloc_large_max
)
252 kalloc_large_max
= kalloc_large_total
;
257 /* compute the size of the block that we will actually allocate */
259 allocsize
= KALLOC_MINSIZE
;
260 zindex
= first_k_zone
;
261 while (allocsize
< size
) {
266 /* allocate from the appropriate zone */
267 assert(allocsize
< kalloc_max
);
268 return(zalloc_canblock(k_zone
[zindex
], canblock
));
275 return( kalloc_canblock(size
, TRUE
) );
282 return( kalloc_canblock(size
, FALSE
) );
294 register vm_size_t allocsize
;
296 vm_map_t alloc_map
= VM_MAP_NULL
;
298 /* can only be used for increasing allocation size */
300 assert(new_size
> old_size
);
302 /* if old_size is zero, then we are simply allocating */
306 naddr
= kalloc(new_size
);
312 /* if old block was kmem_alloc'd, then use kmem_realloc if necessary */
314 if (old_size
>= kalloc_max_prerounded
) {
315 if (old_size
>= kalloc_kernmap_size
)
316 alloc_map
= kernel_map
;
318 alloc_map
= kalloc_map
;
320 old_size
= round_page(old_size
);
321 new_size
= round_page(new_size
);
322 if (new_size
> old_size
) {
324 if (KERN_SUCCESS
!= kmem_realloc(alloc_map
,
325 (vm_offset_t
)*addrp
, old_size
,
326 (vm_offset_t
*)&naddr
, new_size
)) {
327 panic("krealloc: kmem_realloc");
332 *addrp
= (void *) naddr
;
334 /* kmem_realloc() doesn't free old page range. */
335 kmem_free(alloc_map
, (vm_offset_t
)*addrp
, old_size
);
337 kalloc_large_total
+= (new_size
- old_size
);
339 if (kalloc_large_total
> kalloc_large_max
)
340 kalloc_large_max
= kalloc_large_total
;
346 /* compute the size of the block that we actually allocated */
348 allocsize
= KALLOC_MINSIZE
;
349 zindex
= first_k_zone
;
350 while (allocsize
< old_size
) {
355 /* if new size fits in old block, then return */
357 if (new_size
<= allocsize
) {
361 /* if new size does not fit in zone, kmem_alloc it, else zalloc it */
364 if (new_size
>= kalloc_max_prerounded
) {
365 if (new_size
>= kalloc_kernmap_size
)
366 alloc_map
= kernel_map
;
368 alloc_map
= kalloc_map
;
369 if (KERN_SUCCESS
!= kmem_alloc(alloc_map
,
370 (vm_offset_t
*)&naddr
, new_size
)) {
371 panic("krealloc: kmem_alloc");
376 kalloc_large_inuse
++;
377 kalloc_large_total
+= new_size
;
379 if (kalloc_large_total
> kalloc_large_max
)
380 kalloc_large_max
= kalloc_large_total
;
382 register int new_zindex
;
385 new_zindex
= zindex
+ 1;
386 while (allocsize
< new_size
) {
390 naddr
= zalloc(k_zone
[new_zindex
]);
394 /* copy existing data */
396 bcopy((const char *)*addrp
, (char *)naddr
, old_size
);
398 /* free old block, and return */
400 zfree(k_zone
[zindex
], *addrp
);
402 /* set up new address */
404 *addrp
= (void *) naddr
;
413 register vm_size_t allocsize
;
415 /* size must not be too large for a zone */
417 if (size
>= kalloc_max_prerounded
) {
418 /* This will never work, so we might as well panic */
422 /* compute the size of the block that we will actually allocate */
424 allocsize
= KALLOC_MINSIZE
;
425 zindex
= first_k_zone
;
426 while (allocsize
< size
) {
431 /* allocate from the appropriate zone */
433 assert(allocsize
< kalloc_max
);
434 return(zget(k_zone
[zindex
]));
443 register vm_size_t freesize
;
444 vm_map_t alloc_map
= VM_MAP_NULL
;
446 /* if size was too large for a zone, then use kmem_free */
448 if (size
>= kalloc_max_prerounded
) {
449 if (size
>= kalloc_kernmap_size
)
450 alloc_map
= kernel_map
;
452 alloc_map
= kalloc_map
;
453 kmem_free(alloc_map
, (vm_offset_t
)data
, size
);
455 kalloc_large_total
-= size
;
456 kalloc_large_inuse
--;
461 /* compute the size of the block that we actually allocated from */
463 freesize
= KALLOC_MINSIZE
;
464 zindex
= first_k_zone
;
465 while (freesize
< size
) {
470 /* free to the appropriate zone */
472 assert(freesize
< kalloc_max
);
473 zfree(k_zone
[zindex
], data
);
481 register int zindex
= 0;
482 register vm_size_t allocsize
;
484 /* compute the size of the block that we will actually allocate */
487 if (size
<= kalloc_max
) {
488 allocsize
= KALLOC_MINSIZE
;
489 zindex
= first_k_zone
;
490 while (allocsize
< size
) {
494 return (k_zone
[zindex
]);
502 kalloc_fake_zone_info(int *count
, vm_size_t
*cur_size
, vm_size_t
*max_size
, vm_size_t
*elem_size
,
503 vm_size_t
*alloc_size
, int *collectable
, int *exhaustable
)
505 *count
= kalloc_large_inuse
;
506 *cur_size
= kalloc_large_total
;
507 *max_size
= kalloc_large_max
;
508 *elem_size
= kalloc_large_total
/ kalloc_large_inuse
;
509 *alloc_size
= kalloc_large_total
/ kalloc_large_inuse
;
519 queue_init(&OSMalloc_tag_list
);
520 simple_lock_init(&OSMalloc_tag_lock
, 0);
530 OSMTag
= (OSMallocTag
)kalloc(sizeof(*OSMTag
));
532 bzero((void *)OSMTag
, sizeof(*OSMTag
));
534 if (flags
& OSMT_PAGEABLE
)
535 OSMTag
->OSMT_attr
= OSMT_ATTR_PAGEABLE
;
537 OSMTag
->OSMT_refcnt
= 1;
539 strncpy(OSMTag
->OSMT_name
, str
, OSMT_MAX_NAME
);
541 simple_lock(&OSMalloc_tag_lock
);
542 enqueue_tail(&OSMalloc_tag_list
, (queue_entry_t
)OSMTag
);
543 simple_unlock(&OSMalloc_tag_lock
);
544 OSMTag
->OSMT_state
= OSMT_VALID
;
552 if (!((tag
->OSMT_state
& OSMT_VALID_MASK
) == OSMT_VALID
))
553 panic("OSMalloc_Tagref(): bad state 0x%08X\n",tag
->OSMT_state
);
555 (void)hw_atomic_add((uint32_t *)(&tag
->OSMT_refcnt
), 1);
562 if (!((tag
->OSMT_state
& OSMT_VALID_MASK
) == OSMT_VALID
))
563 panic("OSMalloc_Tagref(): bad state 0x%08X\n",tag
->OSMT_state
);
565 if (hw_atomic_sub((uint32_t *)(&tag
->OSMT_refcnt
), 1) == 0) {
566 if (hw_compare_and_store(OSMT_VALID
|OSMT_RELEASED
, OSMT_VALID
|OSMT_RELEASED
, &tag
->OSMT_state
)) {
567 simple_lock(&OSMalloc_tag_lock
);
568 (void)remque((queue_entry_t
)tag
);
569 simple_unlock(&OSMalloc_tag_lock
);
570 kfree((void*)tag
, sizeof(*tag
));
572 panic("OSMalloc_Tagrele(): refcnt 0\n");
580 if (!hw_compare_and_store(OSMT_VALID
, OSMT_VALID
|OSMT_RELEASED
, &tag
->OSMT_state
))
581 panic("OSMalloc_Tagfree(): bad state 0x%08X\n", tag
->OSMT_state
);
583 if (hw_atomic_sub((uint32_t *)(&tag
->OSMT_refcnt
), 1) == 0) {
584 simple_lock(&OSMalloc_tag_lock
);
585 (void)remque((queue_entry_t
)tag
);
586 simple_unlock(&OSMalloc_tag_lock
);
587 kfree((void*)tag
, sizeof(*tag
));
599 OSMalloc_Tagref(tag
);
600 if ((tag
->OSMT_attr
& OSMT_PAGEABLE
)
601 && (size
& ~PAGE_MASK
)) {
603 if ((kr
= kmem_alloc_pageable(kernel_map
, (vm_offset_t
*)&addr
, size
)) != KERN_SUCCESS
)
604 panic("OSMalloc(): kmem_alloc_pageable() failed 0x%08X\n", kr
);
606 addr
= kalloc((vm_size_t
)size
);
618 if (tag
->OSMT_attr
& OSMT_PAGEABLE
)
621 OSMalloc_Tagref(tag
);
622 /* XXX: use non-blocking kalloc for now */
623 addr
= kalloc_noblock((vm_size_t
)size
);
625 OSMalloc_Tagrele(tag
);
637 if (tag
->OSMT_attr
& OSMT_PAGEABLE
)
640 OSMalloc_Tagref(tag
);
641 addr
= kalloc_noblock((vm_size_t
)size
);
643 OSMalloc_Tagrele(tag
);
654 if ((tag
->OSMT_attr
& OSMT_PAGEABLE
)
655 && (size
& ~PAGE_MASK
)) {
656 kmem_free(kernel_map
, (vm_offset_t
)addr
, size
);
658 kfree((void*)addr
, size
);
660 OSMalloc_Tagrele(tag
);