]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/kalloc.c
xnu-517.tar.gz
[apple/xnu.git] / osfmk / kern / kalloc.c
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
7 *
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * file.
14 *
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
22 *
23 * @APPLE_LICENSE_HEADER_END@
24 */
25 /*
26 * @OSF_COPYRIGHT@
27 */
28 /*
29 * HISTORY
30 *
31 * Revision 1.1.1.1 1998/09/22 21:05:34 wsanchez
32 * Import of Mac OS X kernel (~semeria)
33 *
34 * Revision 1.1.1.1 1998/03/07 02:25:55 wsanchez
35 * Import of OSF Mach kernel (~mburg)
36 *
37 * Revision 1.2.19.5 1995/02/24 15:20:29 alanl
38 * Lock package cleanup.
39 * [95/02/15 alanl]
40 *
41 * Merge with DIPC2_SHARED.
42 * [1995/01/05 15:11:02 alanl]
43 *
44 * Revision 1.2.28.2 1994/11/10 06:12:50 dwm
45 * mk6 CR764 - s/spinlock/simple_lock/ (name change only)
46 * [1994/11/10 05:28:35 dwm]
47 *
48 * Revision 1.2.28.1 1994/11/04 10:07:40 dwm
49 * mk6 CR668 - 1.3b26 merge
50 * * Revision 1.2.2.4 1993/11/08 15:04:18 gm
51 * CR9710: Updated to new zinit() and zone_change() interfaces.
52 * * End1.3merge
53 * [1994/11/04 09:25:48 dwm]
54 *
55 * Revision 1.2.19.3 1994/09/23 02:20:52 ezf
56 * change marker to not FREE
57 * [1994/09/22 21:33:57 ezf]
58 *
59 * Revision 1.2.19.2 1994/06/14 18:36:36 bolinger
60 * NMK17.2 merge: Replace simple_lock ops.
61 * [1994/06/14 18:35:17 bolinger]
62 *
63 * Revision 1.2.19.1 1994/06/14 17:04:23 bolinger
64 * Merge up to NMK17.2.
65 * [1994/06/14 16:54:19 bolinger]
66 *
67 * Revision 1.2.23.3 1994/10/14 12:24:33 sjs
68 * Removed krealloc_spinl routine: the newer locking scheme makes it
69 * obsolete.
70 * [94/10/13 sjs]
71 *
72 * Revision 1.2.23.2 1994/08/11 14:42:46 rwd
73 * Post merge cleanup
74 * [94/08/09 rwd]
75 *
76 * Changed zcollectable to use zchange.
77 * [94/08/04 rwd]
78 *
79 * Revision 1.2.17.2 1994/07/08 01:58:45 alanl
80 * Change comment to match function name.
81 * [1994/07/08 01:47:59 alanl]
82 *
83 * Revision 1.2.17.1 1994/05/26 16:20:38 sjs
84 * Added krealloc_spinl: same as krealloc but uses spin locks.
85 * [94/05/25 sjs]
86 *
87 * Revision 1.2.23.1 1994/08/04 02:24:55 mmp
88 * Added krealloc_spinl: same as krealloc but uses spin locks.
89 * [94/05/25 sjs]
90 *
91 * Revision 1.2.13.1 1994/02/11 14:27:12 paire
92 * Changed krealloc() to make it work on a MP system. Added a new parameter
93 * which is the simple lock that should be held while modifying the memory
94 * area already initialized.
95 * Change from NMK16.1 [93/09/02 paire]
96 *
97 * Do not set debug for kalloc zones as default. It wastes
98 * to much space.
99 * Change from NMK16.1 [93/08/16 bernadat]
100 * [94/02/07 paire]
101 *
102 * Revision 1.2.2.3 1993/07/28 17:15:44 bernard
103 * CR9523 -- Prototypes.
104 * [1993/07/27 20:14:12 bernard]
105 *
106 * Revision 1.2.2.2 1993/06/02 23:37:46 jeffc
107 * Added to OSF/1 R1.3 from NMK15.0.
108 * [1993/06/02 21:12:59 jeffc]
109 *
110 * Revision 1.2 1992/12/07 21:28:42 robert
111 * integrate any changes below for 14.0 (branch from 13.16 base)
112 *
113 * Joseph Barrera (jsb) at Carnegie-Mellon University 11-Sep-92
114 * Added krealloc. Added kalloc_max_prerounded for quicker choice between
115 * zalloc and kmem_alloc. Renamed MINSIZE to KALLOC_MINSIZE.
116 * [1992/12/06 19:47:16 robert]
117 *
118 * Revision 1.1 1992/09/30 02:09:23 robert
119 * Initial revision
120 *
121 * $EndLog$
122 */
123 /* CMU_HIST */
124 /*
125 * Revision 2.9 91/05/14 16:43:17 mrt
126 * Correcting copyright
127 *
128 * Revision 2.8 91/03/16 14:50:37 rpd
129 * Updated for new kmem_alloc interface.
130 * [91/03/03 rpd]
131 *
132 * Revision 2.7 91/02/05 17:27:22 mrt
133 * Changed to new Mach copyright
134 * [91/02/01 16:14:12 mrt]
135 *
136 * Revision 2.6 90/06/19 22:59:06 rpd
137 * Made the big kalloc zones collectable.
138 * [90/06/05 rpd]
139 *
140 * Revision 2.5 90/06/02 14:54:47 rpd
141 * Added kalloc_max, kalloc_map_size.
142 * [90/03/26 22:06:39 rpd]
143 *
144 * Revision 2.4 90/01/11 11:43:13 dbg
145 * De-lint.
146 * [89/12/06 dbg]
147 *
148 * Revision 2.3 89/09/08 11:25:51 dbg
149 * MACH_KERNEL: remove non-MACH data types.
150 * [89/07/11 dbg]
151 *
152 * Revision 2.2 89/08/31 16:18:59 rwd
153 * First Checkin
154 * [89/08/23 15:41:37 rwd]
155 *
156 * Revision 2.6 89/08/02 08:03:28 jsb
157 * Make all kalloc zones 8 MB big. (No more kalloc panics!)
158 * [89/08/01 14:10:17 jsb]
159 *
160 * Revision 2.4 89/04/05 13:03:10 rvb
161 * Guarantee a zone max of at least 100 elements or 10 pages
162 * which ever is greater. Afs (AllocDouble()) puts a great demand
163 * on the 2048 zone and used to blow away.
164 * [89/03/09 rvb]
165 *
166 * Revision 2.3 89/02/25 18:04:39 gm0w
167 * Changes for cleanup.
168 *
169 * Revision 2.2 89/01/18 02:07:04 jsb
170 * Give each kalloc zone a meaningful name (for panics);
171 * create a zone for each power of 2 between MINSIZE
172 * and PAGE_SIZE, instead of using (obsoleted) NQUEUES.
173 * [89/01/17 10:16:33 jsb]
174 *
175 *
176 * 13-Feb-88 John Seamons (jks) at NeXT
177 * Updated to use kmem routines instead of vmem routines.
178 *
179 * 21-Jun-85 Avadis Tevanian (avie) at Carnegie-Mellon University
180 * Created.
181 */
182 /* CMU_ENDHIST */
183 /*
184 * Mach Operating System
185 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
186 * All Rights Reserved.
187 *
188 * Permission to use, copy, modify and distribute this software and its
189 * documentation is hereby granted, provided that both the copyright
190 * notice and this permission notice appear in all copies of the
191 * software, derivative works or modified versions, and any portions
192 * thereof, and that both notices appear in supporting documentation.
193 *
194 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
195 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
196 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
197 *
198 * Carnegie Mellon requests users of this software to return to
199 *
200 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
201 * School of Computer Science
202 * Carnegie Mellon University
203 * Pittsburgh PA 15213-3890
204 *
205 * any improvements or extensions that they make and grant Carnegie Mellon
206 * the rights to redistribute these changes.
207 */
208 /*
209 */
210 /*
211 * File: kern/kalloc.c
212 * Author: Avadis Tevanian, Jr.
213 * Date: 1985
214 *
215 * General kernel memory allocator. This allocator is designed
216 * to be used by the kernel to manage dynamic memory fast.
217 */
218
219 #include <zone_debug.h>
220
221 #include <mach/boolean.h>
222 #include <mach/machine/vm_types.h>
223 #include <mach/vm_param.h>
224 #include <kern/misc_protos.h>
225 #include <kern/zalloc.h>
226 #include <kern/kalloc.h>
227 #include <kern/lock.h>
228 #include <vm/vm_kern.h>
229 #include <vm/vm_object.h>
230 #include <vm/vm_map.h>
231
232 #ifdef MACH_BSD
233 zone_t kalloc_zone(vm_size_t);
234 #endif
235
236 vm_map_t kalloc_map;
237 vm_size_t kalloc_map_size = 16 * 1024 * 1024;
238 vm_size_t kalloc_max;
239 vm_size_t kalloc_max_prerounded;
240
241 unsigned int kalloc_large_inuse;
242 vm_size_t kalloc_large_total;
243 vm_size_t kalloc_large_max;
244
245 /*
246 * All allocations of size less than kalloc_max are rounded to the
247 * next highest power of 2. This allocator is built on top of
248 * the zone allocator. A zone is created for each potential size
249 * that we are willing to get in small blocks.
250 *
251 * We assume that kalloc_max is not greater than 64K;
252 * thus 16 is a safe array size for k_zone and k_zone_name.
253 *
254 * Note that kalloc_max is somewhat confusingly named.
255 * It represents the first power of two for which no zone exists.
256 * kalloc_max_prerounded is the smallest allocation size, before
257 * rounding, for which no zone exists.
258 */
259
260 int first_k_zone = -1;
261 struct zone *k_zone[16];
262 static char *k_zone_name[16] = {
263 "kalloc.1", "kalloc.2",
264 "kalloc.4", "kalloc.8",
265 "kalloc.16", "kalloc.32",
266 "kalloc.64", "kalloc.128",
267 "kalloc.256", "kalloc.512",
268 "kalloc.1024", "kalloc.2048",
269 "kalloc.4096", "kalloc.8192",
270 "kalloc.16384", "kalloc.32768"
271 };
272
273 /*
274 * Max number of elements per zone. zinit rounds things up correctly
275 * Doing things this way permits each zone to have a different maximum size
276 * based on need, rather than just guessing; it also
277 * means its patchable in case you're wrong!
278 */
279 unsigned long k_zone_max[16] = {
280 1024, /* 1 Byte */
281 1024, /* 2 Byte */
282 1024, /* 4 Byte */
283 1024, /* 8 Byte */
284 1024, /* 16 Byte */
285 4096, /* 32 Byte */
286 4096, /* 64 Byte */
287 4096, /* 128 Byte */
288 4096, /* 256 Byte */
289 1024, /* 512 Byte */
290 1024, /* 1024 Byte */
291 1024, /* 2048 Byte */
292 1024, /* 4096 Byte */
293 4096, /* 8192 Byte */
294 64, /* 16384 Byte */
295 64, /* 32768 Byte */
296 };
297
298 /*
299 * Initialize the memory allocator. This should be called only
300 * once on a system wide basis (i.e. first processor to get here
301 * does the initialization).
302 *
303 * This initializes all of the zones.
304 */
305
306 void
307 kalloc_init(
308 void)
309 {
310 kern_return_t retval;
311 vm_offset_t min;
312 vm_size_t size;
313 register int i;
314
315 retval = kmem_suballoc(kernel_map, &min, kalloc_map_size,
316 FALSE, TRUE, &kalloc_map);
317 if (retval != KERN_SUCCESS)
318 panic("kalloc_init: kmem_suballoc failed");
319
320 /*
321 * Ensure that zones up to size 8192 bytes exist.
322 * This is desirable because messages are allocated
323 * with kalloc, and messages up through size 8192 are common.
324 */
325
326 if (PAGE_SIZE < 16*1024)
327 kalloc_max = 16*1024;
328 else
329 kalloc_max = PAGE_SIZE;
330 kalloc_max_prerounded = kalloc_max / 2 + 1;
331
332 /*
333 * Allocate a zone for each size we are going to handle.
334 * We specify non-paged memory.
335 */
336 for (i = 0, size = 1; size < kalloc_max; i++, size <<= 1) {
337 if (size < KALLOC_MINSIZE) {
338 k_zone[i] = 0;
339 continue;
340 }
341 if (size == KALLOC_MINSIZE) {
342 first_k_zone = i;
343 }
344 k_zone[i] = zinit(size, k_zone_max[i] * size, size,
345 k_zone_name[i]);
346 }
347 }
348
349 vm_offset_t
350 kalloc_canblock(
351 vm_size_t size,
352 boolean_t canblock)
353 {
354 register int zindex;
355 register vm_size_t allocsize;
356
357 /*
358 * If size is too large for a zone, then use kmem_alloc.
359 * (We use kmem_alloc instead of kmem_alloc_wired so that
360 * krealloc can use kmem_realloc.)
361 */
362
363 if (size >= kalloc_max_prerounded) {
364 vm_offset_t addr;
365
366 /* kmem_alloc could block so we return if noblock */
367 if (!canblock) {
368 return(0);
369 }
370 if (kmem_alloc(kalloc_map, &addr, size) != KERN_SUCCESS)
371 addr = 0;
372
373 if (addr) {
374 kalloc_large_inuse++;
375 kalloc_large_total += size;
376
377 if (kalloc_large_total > kalloc_large_max)
378 kalloc_large_max = kalloc_large_total;
379 }
380 return(addr);
381 }
382
383 /* compute the size of the block that we will actually allocate */
384
385 allocsize = KALLOC_MINSIZE;
386 zindex = first_k_zone;
387 while (allocsize < size) {
388 allocsize <<= 1;
389 zindex++;
390 }
391
392 /* allocate from the appropriate zone */
393
394 assert(allocsize < kalloc_max);
395 return(zalloc_canblock(k_zone[zindex], canblock));
396 }
397
398 vm_offset_t
399 kalloc(
400 vm_size_t size)
401 {
402 return( kalloc_canblock(size, TRUE) );
403 }
404
405 vm_offset_t
406 kalloc_noblock(
407 vm_size_t size)
408 {
409 return( kalloc_canblock(size, FALSE) );
410 }
411
412
413 void
414 krealloc(
415 vm_offset_t *addrp,
416 vm_size_t old_size,
417 vm_size_t new_size,
418 simple_lock_t lock)
419 {
420 register int zindex;
421 register vm_size_t allocsize;
422 vm_offset_t naddr;
423
424 /* can only be used for increasing allocation size */
425
426 assert(new_size > old_size);
427
428 /* if old_size is zero, then we are simply allocating */
429
430 if (old_size == 0) {
431 simple_unlock(lock);
432 naddr = kalloc(new_size);
433 simple_lock(lock);
434 *addrp = naddr;
435 return;
436 }
437
438 /* if old block was kmem_alloc'd, then use kmem_realloc if necessary */
439
440 if (old_size >= kalloc_max_prerounded) {
441 old_size = round_page_32(old_size);
442 new_size = round_page_32(new_size);
443 if (new_size > old_size) {
444
445 if (kmem_realloc(kalloc_map, *addrp, old_size, &naddr,
446 new_size) != KERN_SUCCESS) {
447 panic("krealloc: kmem_realloc");
448 naddr = 0;
449 }
450
451 simple_lock(lock);
452 *addrp = naddr;
453
454 /* kmem_realloc() doesn't free old page range. */
455 kmem_free(kalloc_map, *addrp, old_size);
456
457 kalloc_large_total += (new_size - old_size);
458
459 if (kalloc_large_total > kalloc_large_max)
460 kalloc_large_max = kalloc_large_total;
461 }
462 return;
463 }
464
465 /* compute the size of the block that we actually allocated */
466
467 allocsize = KALLOC_MINSIZE;
468 zindex = first_k_zone;
469 while (allocsize < old_size) {
470 allocsize <<= 1;
471 zindex++;
472 }
473
474 /* if new size fits in old block, then return */
475
476 if (new_size <= allocsize) {
477 return;
478 }
479
480 /* if new size does not fit in zone, kmem_alloc it, else zalloc it */
481
482 simple_unlock(lock);
483 if (new_size >= kalloc_max_prerounded) {
484 if (kmem_alloc(kalloc_map, &naddr, new_size) != KERN_SUCCESS) {
485 panic("krealloc: kmem_alloc");
486 simple_lock(lock);
487 *addrp = 0;
488 return;
489 }
490 kalloc_large_inuse++;
491 kalloc_large_total += new_size;
492
493 if (kalloc_large_total > kalloc_large_max)
494 kalloc_large_max = kalloc_large_total;
495 } else {
496 register int new_zindex;
497
498 allocsize <<= 1;
499 new_zindex = zindex + 1;
500 while (allocsize < new_size) {
501 allocsize <<= 1;
502 new_zindex++;
503 }
504 naddr = zalloc(k_zone[new_zindex]);
505 }
506 simple_lock(lock);
507
508 /* copy existing data */
509
510 bcopy((const char *)*addrp, (char *)naddr, old_size);
511
512 /* free old block, and return */
513
514 zfree(k_zone[zindex], *addrp);
515
516 /* set up new address */
517
518 *addrp = naddr;
519 }
520
521
522 vm_offset_t
523 kget(
524 vm_size_t size)
525 {
526 register int zindex;
527 register vm_size_t allocsize;
528
529 /* size must not be too large for a zone */
530
531 if (size >= kalloc_max_prerounded) {
532 /* This will never work, so we might as well panic */
533 panic("kget");
534 }
535
536 /* compute the size of the block that we will actually allocate */
537
538 allocsize = KALLOC_MINSIZE;
539 zindex = first_k_zone;
540 while (allocsize < size) {
541 allocsize <<= 1;
542 zindex++;
543 }
544
545 /* allocate from the appropriate zone */
546
547 assert(allocsize < kalloc_max);
548 return(zget(k_zone[zindex]));
549 }
550
551 void
552 kfree(
553 vm_offset_t data,
554 vm_size_t size)
555 {
556 register int zindex;
557 register vm_size_t freesize;
558
559 /* if size was too large for a zone, then use kmem_free */
560
561 if (size >= kalloc_max_prerounded) {
562 kmem_free(kalloc_map, data, size);
563
564 kalloc_large_total -= size;
565 kalloc_large_inuse--;
566
567 return;
568 }
569
570 /* compute the size of the block that we actually allocated from */
571
572 freesize = KALLOC_MINSIZE;
573 zindex = first_k_zone;
574 while (freesize < size) {
575 freesize <<= 1;
576 zindex++;
577 }
578
579 /* free to the appropriate zone */
580
581 assert(freesize < kalloc_max);
582 zfree(k_zone[zindex], data);
583 }
584
585 #ifdef MACH_BSD
586 zone_t
587 kalloc_zone(
588 vm_size_t size)
589 {
590 register int zindex = 0;
591 register vm_size_t allocsize;
592
593 /* compute the size of the block that we will actually allocate */
594
595 allocsize = size;
596 if (size <= kalloc_max) {
597 allocsize = KALLOC_MINSIZE;
598 zindex = first_k_zone;
599 while (allocsize < size) {
600 allocsize <<= 1;
601 zindex++;
602 }
603 return (k_zone[zindex]);
604 }
605 return (ZONE_NULL);
606 }
607 #endif
608
609
610
611 kalloc_fake_zone_info(int *count, vm_size_t *cur_size, vm_size_t *max_size, vm_size_t *elem_size,
612 vm_size_t *alloc_size, int *collectable, int *exhaustable)
613 {
614 *count = kalloc_large_inuse;
615 *cur_size = kalloc_large_total;
616 *max_size = kalloc_large_max;
617 *elem_size = kalloc_large_total / kalloc_large_inuse;
618 *alloc_size = kalloc_large_total / kalloc_large_inuse;
619 *collectable = 0;
620 *exhaustable = 0;
621 }
622