2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
28 * Revision 1.1.1.1 1998/09/22 21:05:34 wsanchez
29 * Import of Mac OS X kernel (~semeria)
31 * Revision 1.1.1.1 1998/03/07 02:25:55 wsanchez
32 * Import of OSF Mach kernel (~mburg)
34 * Revision 1.2.19.5 1995/02/24 15:20:29 alanl
35 * Lock package cleanup.
38 * Merge with DIPC2_SHARED.
39 * [1995/01/05 15:11:02 alanl]
41 * Revision 1.2.28.2 1994/11/10 06:12:50 dwm
42 * mk6 CR764 - s/spinlock/simple_lock/ (name change only)
43 * [1994/11/10 05:28:35 dwm]
45 * Revision 1.2.28.1 1994/11/04 10:07:40 dwm
46 * mk6 CR668 - 1.3b26 merge
47 * * Revision 1.2.2.4 1993/11/08 15:04:18 gm
48 * CR9710: Updated to new zinit() and zone_change() interfaces.
50 * [1994/11/04 09:25:48 dwm]
52 * Revision 1.2.19.3 1994/09/23 02:20:52 ezf
53 * change marker to not FREE
54 * [1994/09/22 21:33:57 ezf]
56 * Revision 1.2.19.2 1994/06/14 18:36:36 bolinger
57 * NMK17.2 merge: Replace simple_lock ops.
58 * [1994/06/14 18:35:17 bolinger]
60 * Revision 1.2.19.1 1994/06/14 17:04:23 bolinger
61 * Merge up to NMK17.2.
62 * [1994/06/14 16:54:19 bolinger]
64 * Revision 1.2.23.3 1994/10/14 12:24:33 sjs
65 * Removed krealloc_spinl routine: the newer locking scheme makes it
69 * Revision 1.2.23.2 1994/08/11 14:42:46 rwd
73 * Changed zcollectable to use zchange.
76 * Revision 1.2.17.2 1994/07/08 01:58:45 alanl
77 * Change comment to match function name.
78 * [1994/07/08 01:47:59 alanl]
80 * Revision 1.2.17.1 1994/05/26 16:20:38 sjs
81 * Added krealloc_spinl: same as krealloc but uses spin locks.
84 * Revision 1.2.23.1 1994/08/04 02:24:55 mmp
85 * Added krealloc_spinl: same as krealloc but uses spin locks.
88 * Revision 1.2.13.1 1994/02/11 14:27:12 paire
89 * Changed krealloc() to make it work on a MP system. Added a new parameter
90 * which is the simple lock that should be held while modifying the memory
91 * area already initialized.
92 * Change from NMK16.1 [93/09/02 paire]
94 * Do not set debug for kalloc zones as default. It wastes
96 * Change from NMK16.1 [93/08/16 bernadat]
99 * Revision 1.2.2.3 1993/07/28 17:15:44 bernard
100 * CR9523 -- Prototypes.
101 * [1993/07/27 20:14:12 bernard]
103 * Revision 1.2.2.2 1993/06/02 23:37:46 jeffc
104 * Added to OSF/1 R1.3 from NMK15.0.
105 * [1993/06/02 21:12:59 jeffc]
107 * Revision 1.2 1992/12/07 21:28:42 robert
108 * integrate any changes below for 14.0 (branch from 13.16 base)
110 * Joseph Barrera (jsb) at Carnegie-Mellon University 11-Sep-92
111 * Added krealloc. Added kalloc_max_prerounded for quicker choice between
112 * zalloc and kmem_alloc. Renamed MINSIZE to KALLOC_MINSIZE.
113 * [1992/12/06 19:47:16 robert]
115 * Revision 1.1 1992/09/30 02:09:23 robert
122 * Revision 2.9 91/05/14 16:43:17 mrt
123 * Correcting copyright
125 * Revision 2.8 91/03/16 14:50:37 rpd
126 * Updated for new kmem_alloc interface.
129 * Revision 2.7 91/02/05 17:27:22 mrt
130 * Changed to new Mach copyright
131 * [91/02/01 16:14:12 mrt]
133 * Revision 2.6 90/06/19 22:59:06 rpd
134 * Made the big kalloc zones collectable.
137 * Revision 2.5 90/06/02 14:54:47 rpd
138 * Added kalloc_max, kalloc_map_size.
139 * [90/03/26 22:06:39 rpd]
141 * Revision 2.4 90/01/11 11:43:13 dbg
145 * Revision 2.3 89/09/08 11:25:51 dbg
146 * MACH_KERNEL: remove non-MACH data types.
149 * Revision 2.2 89/08/31 16:18:59 rwd
151 * [89/08/23 15:41:37 rwd]
153 * Revision 2.6 89/08/02 08:03:28 jsb
154 * Make all kalloc zones 8 MB big. (No more kalloc panics!)
155 * [89/08/01 14:10:17 jsb]
157 * Revision 2.4 89/04/05 13:03:10 rvb
158 * Guarantee a zone max of at least 100 elements or 10 pages
159 * which ever is greater. Afs (AllocDouble()) puts a great demand
160 * on the 2048 zone and used to blow away.
163 * Revision 2.3 89/02/25 18:04:39 gm0w
164 * Changes for cleanup.
166 * Revision 2.2 89/01/18 02:07:04 jsb
167 * Give each kalloc zone a meaningful name (for panics);
168 * create a zone for each power of 2 between MINSIZE
169 * and PAGE_SIZE, instead of using (obsoleted) NQUEUES.
170 * [89/01/17 10:16:33 jsb]
173 * 13-Feb-88 John Seamons (jks) at NeXT
174 * Updated to use kmem routines instead of vmem routines.
176 * 21-Jun-85 Avadis Tevanian (avie) at Carnegie-Mellon University
181 * Mach Operating System
182 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
183 * All Rights Reserved.
185 * Permission to use, copy, modify and distribute this software and its
186 * documentation is hereby granted, provided that both the copyright
187 * notice and this permission notice appear in all copies of the
188 * software, derivative works or modified versions, and any portions
189 * thereof, and that both notices appear in supporting documentation.
191 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
192 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
193 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
195 * Carnegie Mellon requests users of this software to return to
197 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
198 * School of Computer Science
199 * Carnegie Mellon University
200 * Pittsburgh PA 15213-3890
202 * any improvements or extensions that they make and grant Carnegie Mellon
203 * the rights to redistribute these changes.
208 * File: kern/kalloc.c
209 * Author: Avadis Tevanian, Jr.
212 * General kernel memory allocator. This allocator is designed
213 * to be used by the kernel to manage dynamic memory fast.
216 #include <zone_debug.h>
218 #include <mach/boolean.h>
219 #include <mach/machine/vm_types.h>
220 #include <mach/vm_param.h>
221 #include <kern/misc_protos.h>
222 #include <kern/zalloc.h>
223 #include <kern/kalloc.h>
224 #include <kern/lock.h>
225 #include <vm/vm_kern.h>
226 #include <vm/vm_object.h>
227 #include <vm/vm_map.h>
230 zone_t
kalloc_zone(vm_size_t
);
234 vm_size_t kalloc_map_size
= 16 * 1024 * 1024;
235 vm_size_t kalloc_max
;
236 vm_size_t kalloc_max_prerounded
;
238 unsigned int kalloc_large_inuse
;
239 vm_size_t kalloc_large_total
;
240 vm_size_t kalloc_large_max
;
243 * All allocations of size less than kalloc_max are rounded to the
244 * next highest power of 2. This allocator is built on top of
245 * the zone allocator. A zone is created for each potential size
246 * that we are willing to get in small blocks.
248 * We assume that kalloc_max is not greater than 64K;
249 * thus 16 is a safe array size for k_zone and k_zone_name.
251 * Note that kalloc_max is somewhat confusingly named.
252 * It represents the first power of two for which no zone exists.
253 * kalloc_max_prerounded is the smallest allocation size, before
254 * rounding, for which no zone exists.
257 int first_k_zone
= -1;
258 struct zone
*k_zone
[16];
259 static char *k_zone_name
[16] = {
260 "kalloc.1", "kalloc.2",
261 "kalloc.4", "kalloc.8",
262 "kalloc.16", "kalloc.32",
263 "kalloc.64", "kalloc.128",
264 "kalloc.256", "kalloc.512",
265 "kalloc.1024", "kalloc.2048",
266 "kalloc.4096", "kalloc.8192",
267 "kalloc.16384", "kalloc.32768"
271 * Max number of elements per zone. zinit rounds things up correctly
272 * Doing things this way permits each zone to have a different maximum size
273 * based on need, rather than just guessing; it also
274 * means its patchable in case you're wrong!
276 unsigned long k_zone_max
[16] = {
287 1024, /* 1024 Byte */
288 1024, /* 2048 Byte */
289 1024, /* 4096 Byte */
290 4096, /* 8192 Byte */
296 * Initialize the memory allocator. This should be called only
297 * once on a system wide basis (i.e. first processor to get here
298 * does the initialization).
300 * This initializes all of the zones.
307 kern_return_t retval
;
312 retval
= kmem_suballoc(kernel_map
, &min
, kalloc_map_size
,
313 FALSE
, TRUE
, &kalloc_map
);
314 if (retval
!= KERN_SUCCESS
)
315 panic("kalloc_init: kmem_suballoc failed");
318 * Ensure that zones up to size 8192 bytes exist.
319 * This is desirable because messages are allocated
320 * with kalloc, and messages up through size 8192 are common.
323 if (PAGE_SIZE
< 16*1024)
324 kalloc_max
= 16*1024;
326 kalloc_max
= PAGE_SIZE
;
327 kalloc_max_prerounded
= kalloc_max
/ 2 + 1;
330 * Allocate a zone for each size we are going to handle.
331 * We specify non-paged memory.
333 for (i
= 0, size
= 1; size
< kalloc_max
; i
++, size
<<= 1) {
334 if (size
< KALLOC_MINSIZE
) {
338 if (size
== KALLOC_MINSIZE
) {
341 k_zone
[i
] = zinit(size
, k_zone_max
[i
] * size
, size
,
352 register vm_size_t allocsize
;
355 * If size is too large for a zone, then use kmem_alloc.
356 * (We use kmem_alloc instead of kmem_alloc_wired so that
357 * krealloc can use kmem_realloc.)
360 if (size
>= kalloc_max_prerounded
) {
363 /* kmem_alloc could block so we return if noblock */
367 if (kmem_alloc(kalloc_map
, &addr
, size
) != KERN_SUCCESS
)
371 kalloc_large_inuse
++;
372 kalloc_large_total
+= size
;
374 if (kalloc_large_total
> kalloc_large_max
)
375 kalloc_large_max
= kalloc_large_total
;
380 /* compute the size of the block that we will actually allocate */
382 allocsize
= KALLOC_MINSIZE
;
383 zindex
= first_k_zone
;
384 while (allocsize
< size
) {
389 /* allocate from the appropriate zone */
391 assert(allocsize
< kalloc_max
);
392 return(zalloc_canblock(k_zone
[zindex
], canblock
));
399 return( kalloc_canblock(size
, TRUE
) );
406 return( kalloc_canblock(size
, FALSE
) );
418 register vm_size_t allocsize
;
421 /* can only be used for increasing allocation size */
423 assert(new_size
> old_size
);
425 /* if old_size is zero, then we are simply allocating */
429 naddr
= kalloc(new_size
);
435 /* if old block was kmem_alloc'd, then use kmem_realloc if necessary */
437 if (old_size
>= kalloc_max_prerounded
) {
438 old_size
= round_page(old_size
);
439 new_size
= round_page(new_size
);
440 if (new_size
> old_size
) {
442 if (kmem_realloc(kalloc_map
, *addrp
, old_size
, &naddr
,
443 new_size
) != KERN_SUCCESS
) {
444 panic("krealloc: kmem_realloc");
451 /* kmem_realloc() doesn't free old page range. */
452 kmem_free(kalloc_map
, *addrp
, old_size
);
454 kalloc_large_total
+= (new_size
- old_size
);
456 if (kalloc_large_total
> kalloc_large_max
)
457 kalloc_large_max
= kalloc_large_total
;
462 /* compute the size of the block that we actually allocated */
464 allocsize
= KALLOC_MINSIZE
;
465 zindex
= first_k_zone
;
466 while (allocsize
< old_size
) {
471 /* if new size fits in old block, then return */
473 if (new_size
<= allocsize
) {
477 /* if new size does not fit in zone, kmem_alloc it, else zalloc it */
480 if (new_size
>= kalloc_max_prerounded
) {
481 if (kmem_alloc(kalloc_map
, &naddr
, new_size
) != KERN_SUCCESS
) {
482 panic("krealloc: kmem_alloc");
487 kalloc_large_inuse
++;
488 kalloc_large_total
+= new_size
;
490 if (kalloc_large_total
> kalloc_large_max
)
491 kalloc_large_max
= kalloc_large_total
;
493 register int new_zindex
;
496 new_zindex
= zindex
+ 1;
497 while (allocsize
< new_size
) {
501 naddr
= zalloc(k_zone
[new_zindex
]);
505 /* copy existing data */
507 bcopy((const char *)*addrp
, (char *)naddr
, old_size
);
509 /* free old block, and return */
511 zfree(k_zone
[zindex
], *addrp
);
513 /* set up new address */
524 register vm_size_t allocsize
;
526 /* size must not be too large for a zone */
528 if (size
>= kalloc_max_prerounded
) {
529 /* This will never work, so we might as well panic */
533 /* compute the size of the block that we will actually allocate */
535 allocsize
= KALLOC_MINSIZE
;
536 zindex
= first_k_zone
;
537 while (allocsize
< size
) {
542 /* allocate from the appropriate zone */
544 assert(allocsize
< kalloc_max
);
545 return(zget(k_zone
[zindex
]));
554 register vm_size_t freesize
;
556 /* if size was too large for a zone, then use kmem_free */
558 if (size
>= kalloc_max_prerounded
) {
559 kmem_free(kalloc_map
, data
, size
);
561 kalloc_large_total
-= size
;
562 kalloc_large_inuse
--;
567 /* compute the size of the block that we actually allocated from */
569 freesize
= KALLOC_MINSIZE
;
570 zindex
= first_k_zone
;
571 while (freesize
< size
) {
576 /* free to the appropriate zone */
578 assert(freesize
< kalloc_max
);
579 zfree(k_zone
[zindex
], data
);
587 register int zindex
= 0;
588 register vm_size_t allocsize
;
590 /* compute the size of the block that we will actually allocate */
593 if (size
<= kalloc_max
) {
594 allocsize
= KALLOC_MINSIZE
;
595 zindex
= first_k_zone
;
596 while (allocsize
< size
) {
600 return (k_zone
[zindex
]);
608 kalloc_fake_zone_info(int *count
, vm_size_t
*cur_size
, vm_size_t
*max_size
, vm_size_t
*elem_size
,
609 vm_size_t
*alloc_size
, int *collectable
, int *exhaustable
)
611 *count
= kalloc_large_inuse
;
612 *cur_size
= kalloc_large_total
;
613 *max_size
= kalloc_large_max
;
614 *elem_size
= kalloc_large_total
/ kalloc_large_inuse
;
615 *alloc_size
= kalloc_large_total
/ kalloc_large_inuse
;