2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
23 * @APPLE_LICENSE_HEADER_END@
31 * Revision 1.1.1.1 1998/09/22 21:05:34 wsanchez
32 * Import of Mac OS X kernel (~semeria)
34 * Revision 1.1.1.1 1998/03/07 02:25:55 wsanchez
35 * Import of OSF Mach kernel (~mburg)
37 * Revision 1.2.19.5 1995/02/24 15:20:29 alanl
38 * Lock package cleanup.
41 * Merge with DIPC2_SHARED.
42 * [1995/01/05 15:11:02 alanl]
44 * Revision 1.2.28.2 1994/11/10 06:12:50 dwm
45 * mk6 CR764 - s/spinlock/simple_lock/ (name change only)
46 * [1994/11/10 05:28:35 dwm]
48 * Revision 1.2.28.1 1994/11/04 10:07:40 dwm
49 * mk6 CR668 - 1.3b26 merge
50 * * Revision 1.2.2.4 1993/11/08 15:04:18 gm
51 * CR9710: Updated to new zinit() and zone_change() interfaces.
53 * [1994/11/04 09:25:48 dwm]
55 * Revision 1.2.19.3 1994/09/23 02:20:52 ezf
56 * change marker to not FREE
57 * [1994/09/22 21:33:57 ezf]
59 * Revision 1.2.19.2 1994/06/14 18:36:36 bolinger
60 * NMK17.2 merge: Replace simple_lock ops.
61 * [1994/06/14 18:35:17 bolinger]
63 * Revision 1.2.19.1 1994/06/14 17:04:23 bolinger
64 * Merge up to NMK17.2.
65 * [1994/06/14 16:54:19 bolinger]
67 * Revision 1.2.23.3 1994/10/14 12:24:33 sjs
68 * Removed krealloc_spinl routine: the newer locking scheme makes it
72 * Revision 1.2.23.2 1994/08/11 14:42:46 rwd
76 * Changed zcollectable to use zchange.
79 * Revision 1.2.17.2 1994/07/08 01:58:45 alanl
80 * Change comment to match function name.
81 * [1994/07/08 01:47:59 alanl]
83 * Revision 1.2.17.1 1994/05/26 16:20:38 sjs
84 * Added krealloc_spinl: same as krealloc but uses spin locks.
87 * Revision 1.2.23.1 1994/08/04 02:24:55 mmp
88 * Added krealloc_spinl: same as krealloc but uses spin locks.
91 * Revision 1.2.13.1 1994/02/11 14:27:12 paire
92 * Changed krealloc() to make it work on a MP system. Added a new parameter
93 * which is the simple lock that should be held while modifying the memory
94 * area already initialized.
95 * Change from NMK16.1 [93/09/02 paire]
97 * Do not set debug for kalloc zones as default. It wastes
99 * Change from NMK16.1 [93/08/16 bernadat]
102 * Revision 1.2.2.3 1993/07/28 17:15:44 bernard
103 * CR9523 -- Prototypes.
104 * [1993/07/27 20:14:12 bernard]
106 * Revision 1.2.2.2 1993/06/02 23:37:46 jeffc
107 * Added to OSF/1 R1.3 from NMK15.0.
108 * [1993/06/02 21:12:59 jeffc]
110 * Revision 1.2 1992/12/07 21:28:42 robert
111 * integrate any changes below for 14.0 (branch from 13.16 base)
113 * Joseph Barrera (jsb) at Carnegie-Mellon University 11-Sep-92
114 * Added krealloc. Added kalloc_max_prerounded for quicker choice between
115 * zalloc and kmem_alloc. Renamed MINSIZE to KALLOC_MINSIZE.
116 * [1992/12/06 19:47:16 robert]
118 * Revision 1.1 1992/09/30 02:09:23 robert
125 * Revision 2.9 91/05/14 16:43:17 mrt
126 * Correcting copyright
128 * Revision 2.8 91/03/16 14:50:37 rpd
129 * Updated for new kmem_alloc interface.
132 * Revision 2.7 91/02/05 17:27:22 mrt
133 * Changed to new Mach copyright
134 * [91/02/01 16:14:12 mrt]
136 * Revision 2.6 90/06/19 22:59:06 rpd
137 * Made the big kalloc zones collectable.
140 * Revision 2.5 90/06/02 14:54:47 rpd
141 * Added kalloc_max, kalloc_map_size.
142 * [90/03/26 22:06:39 rpd]
144 * Revision 2.4 90/01/11 11:43:13 dbg
148 * Revision 2.3 89/09/08 11:25:51 dbg
149 * MACH_KERNEL: remove non-MACH data types.
152 * Revision 2.2 89/08/31 16:18:59 rwd
154 * [89/08/23 15:41:37 rwd]
156 * Revision 2.6 89/08/02 08:03:28 jsb
157 * Make all kalloc zones 8 MB big. (No more kalloc panics!)
158 * [89/08/01 14:10:17 jsb]
160 * Revision 2.4 89/04/05 13:03:10 rvb
161 * Guarantee a zone max of at least 100 elements or 10 pages
162 * which ever is greater. Afs (AllocDouble()) puts a great demand
163 * on the 2048 zone and used to blow away.
166 * Revision 2.3 89/02/25 18:04:39 gm0w
167 * Changes for cleanup.
169 * Revision 2.2 89/01/18 02:07:04 jsb
170 * Give each kalloc zone a meaningful name (for panics);
171 * create a zone for each power of 2 between MINSIZE
172 * and PAGE_SIZE, instead of using (obsoleted) NQUEUES.
173 * [89/01/17 10:16:33 jsb]
176 * 13-Feb-88 John Seamons (jks) at NeXT
177 * Updated to use kmem routines instead of vmem routines.
179 * 21-Jun-85 Avadis Tevanian (avie) at Carnegie-Mellon University
184 * Mach Operating System
185 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
186 * All Rights Reserved.
188 * Permission to use, copy, modify and distribute this software and its
189 * documentation is hereby granted, provided that both the copyright
190 * notice and this permission notice appear in all copies of the
191 * software, derivative works or modified versions, and any portions
192 * thereof, and that both notices appear in supporting documentation.
194 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
195 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
196 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
198 * Carnegie Mellon requests users of this software to return to
200 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
201 * School of Computer Science
202 * Carnegie Mellon University
203 * Pittsburgh PA 15213-3890
205 * any improvements or extensions that they make and grant Carnegie Mellon
206 * the rights to redistribute these changes.
211 * File: kern/kalloc.c
212 * Author: Avadis Tevanian, Jr.
215 * General kernel memory allocator. This allocator is designed
216 * to be used by the kernel to manage dynamic memory fast.
219 #include <zone_debug.h>
221 #include <mach/boolean.h>
222 #include <mach/machine/vm_types.h>
223 #include <mach/vm_param.h>
224 #include <kern/misc_protos.h>
225 #include <kern/zalloc.h>
226 #include <kern/kalloc.h>
227 #include <kern/lock.h>
228 #include <vm/vm_kern.h>
229 #include <vm/vm_object.h>
230 #include <vm/vm_map.h>
233 zone_t
kalloc_zone(vm_size_t
);
237 vm_size_t kalloc_map_size
= 16 * 1024 * 1024;
238 vm_size_t kalloc_max
;
239 vm_size_t kalloc_max_prerounded
;
241 unsigned int kalloc_large_inuse
;
242 vm_size_t kalloc_large_total
;
243 vm_size_t kalloc_large_max
;
246 * All allocations of size less than kalloc_max are rounded to the
247 * next highest power of 2. This allocator is built on top of
248 * the zone allocator. A zone is created for each potential size
249 * that we are willing to get in small blocks.
251 * We assume that kalloc_max is not greater than 64K;
252 * thus 16 is a safe array size for k_zone and k_zone_name.
254 * Note that kalloc_max is somewhat confusingly named.
255 * It represents the first power of two for which no zone exists.
256 * kalloc_max_prerounded is the smallest allocation size, before
257 * rounding, for which no zone exists.
260 int first_k_zone
= -1;
261 struct zone
*k_zone
[16];
262 static char *k_zone_name
[16] = {
263 "kalloc.1", "kalloc.2",
264 "kalloc.4", "kalloc.8",
265 "kalloc.16", "kalloc.32",
266 "kalloc.64", "kalloc.128",
267 "kalloc.256", "kalloc.512",
268 "kalloc.1024", "kalloc.2048",
269 "kalloc.4096", "kalloc.8192",
270 "kalloc.16384", "kalloc.32768"
274 * Max number of elements per zone. zinit rounds things up correctly
275 * Doing things this way permits each zone to have a different maximum size
276 * based on need, rather than just guessing; it also
277 * means its patchable in case you're wrong!
279 unsigned long k_zone_max
[16] = {
290 1024, /* 1024 Byte */
291 1024, /* 2048 Byte */
292 1024, /* 4096 Byte */
293 4096, /* 8192 Byte */
299 * Initialize the memory allocator. This should be called only
300 * once on a system wide basis (i.e. first processor to get here
301 * does the initialization).
303 * This initializes all of the zones.
310 kern_return_t retval
;
315 retval
= kmem_suballoc(kernel_map
, &min
, kalloc_map_size
,
316 FALSE
, TRUE
, &kalloc_map
);
317 if (retval
!= KERN_SUCCESS
)
318 panic("kalloc_init: kmem_suballoc failed");
321 * Ensure that zones up to size 8192 bytes exist.
322 * This is desirable because messages are allocated
323 * with kalloc, and messages up through size 8192 are common.
326 if (PAGE_SIZE
< 16*1024)
327 kalloc_max
= 16*1024;
329 kalloc_max
= PAGE_SIZE
;
330 kalloc_max_prerounded
= kalloc_max
/ 2 + 1;
333 * Allocate a zone for each size we are going to handle.
334 * We specify non-paged memory.
336 for (i
= 0, size
= 1; size
< kalloc_max
; i
++, size
<<= 1) {
337 if (size
< KALLOC_MINSIZE
) {
341 if (size
== KALLOC_MINSIZE
) {
344 k_zone
[i
] = zinit(size
, k_zone_max
[i
] * size
, size
,
355 register vm_size_t allocsize
;
358 * If size is too large for a zone, then use kmem_alloc.
359 * (We use kmem_alloc instead of kmem_alloc_wired so that
360 * krealloc can use kmem_realloc.)
363 if (size
>= kalloc_max_prerounded
) {
366 /* kmem_alloc could block so we return if noblock */
370 if (kmem_alloc(kalloc_map
, &addr
, size
) != KERN_SUCCESS
)
374 kalloc_large_inuse
++;
375 kalloc_large_total
+= size
;
377 if (kalloc_large_total
> kalloc_large_max
)
378 kalloc_large_max
= kalloc_large_total
;
383 /* compute the size of the block that we will actually allocate */
385 allocsize
= KALLOC_MINSIZE
;
386 zindex
= first_k_zone
;
387 while (allocsize
< size
) {
392 /* allocate from the appropriate zone */
394 assert(allocsize
< kalloc_max
);
395 return(zalloc_canblock(k_zone
[zindex
], canblock
));
402 return( kalloc_canblock(size
, TRUE
) );
409 return( kalloc_canblock(size
, FALSE
) );
421 register vm_size_t allocsize
;
424 /* can only be used for increasing allocation size */
426 assert(new_size
> old_size
);
428 /* if old_size is zero, then we are simply allocating */
432 naddr
= kalloc(new_size
);
438 /* if old block was kmem_alloc'd, then use kmem_realloc if necessary */
440 if (old_size
>= kalloc_max_prerounded
) {
441 old_size
= round_page_32(old_size
);
442 new_size
= round_page_32(new_size
);
443 if (new_size
> old_size
) {
445 if (kmem_realloc(kalloc_map
, *addrp
, old_size
, &naddr
,
446 new_size
) != KERN_SUCCESS
) {
447 panic("krealloc: kmem_realloc");
454 /* kmem_realloc() doesn't free old page range. */
455 kmem_free(kalloc_map
, *addrp
, old_size
);
457 kalloc_large_total
+= (new_size
- old_size
);
459 if (kalloc_large_total
> kalloc_large_max
)
460 kalloc_large_max
= kalloc_large_total
;
465 /* compute the size of the block that we actually allocated */
467 allocsize
= KALLOC_MINSIZE
;
468 zindex
= first_k_zone
;
469 while (allocsize
< old_size
) {
474 /* if new size fits in old block, then return */
476 if (new_size
<= allocsize
) {
480 /* if new size does not fit in zone, kmem_alloc it, else zalloc it */
483 if (new_size
>= kalloc_max_prerounded
) {
484 if (kmem_alloc(kalloc_map
, &naddr
, new_size
) != KERN_SUCCESS
) {
485 panic("krealloc: kmem_alloc");
490 kalloc_large_inuse
++;
491 kalloc_large_total
+= new_size
;
493 if (kalloc_large_total
> kalloc_large_max
)
494 kalloc_large_max
= kalloc_large_total
;
496 register int new_zindex
;
499 new_zindex
= zindex
+ 1;
500 while (allocsize
< new_size
) {
504 naddr
= zalloc(k_zone
[new_zindex
]);
508 /* copy existing data */
510 bcopy((const char *)*addrp
, (char *)naddr
, old_size
);
512 /* free old block, and return */
514 zfree(k_zone
[zindex
], *addrp
);
516 /* set up new address */
527 register vm_size_t allocsize
;
529 /* size must not be too large for a zone */
531 if (size
>= kalloc_max_prerounded
) {
532 /* This will never work, so we might as well panic */
536 /* compute the size of the block that we will actually allocate */
538 allocsize
= KALLOC_MINSIZE
;
539 zindex
= first_k_zone
;
540 while (allocsize
< size
) {
545 /* allocate from the appropriate zone */
547 assert(allocsize
< kalloc_max
);
548 return(zget(k_zone
[zindex
]));
557 register vm_size_t freesize
;
559 /* if size was too large for a zone, then use kmem_free */
561 if (size
>= kalloc_max_prerounded
) {
562 kmem_free(kalloc_map
, data
, size
);
564 kalloc_large_total
-= size
;
565 kalloc_large_inuse
--;
570 /* compute the size of the block that we actually allocated from */
572 freesize
= KALLOC_MINSIZE
;
573 zindex
= first_k_zone
;
574 while (freesize
< size
) {
579 /* free to the appropriate zone */
581 assert(freesize
< kalloc_max
);
582 zfree(k_zone
[zindex
], data
);
590 register int zindex
= 0;
591 register vm_size_t allocsize
;
593 /* compute the size of the block that we will actually allocate */
596 if (size
<= kalloc_max
) {
597 allocsize
= KALLOC_MINSIZE
;
598 zindex
= first_k_zone
;
599 while (allocsize
< size
) {
603 return (k_zone
[zindex
]);
611 kalloc_fake_zone_info(int *count
, vm_size_t
*cur_size
, vm_size_t
*max_size
, vm_size_t
*elem_size
,
612 vm_size_t
*alloc_size
, int *collectable
, int *exhaustable
)
614 *count
= kalloc_large_inuse
;
615 *cur_size
= kalloc_large_total
;
616 *max_size
= kalloc_large_max
;
617 *elem_size
= kalloc_large_total
/ kalloc_large_inuse
;
618 *alloc_size
= kalloc_large_total
/ kalloc_large_inuse
;