]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/kalloc.c
xnu-201.tar.gz
[apple/xnu.git] / osfmk / kern / kalloc.c
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 /*
23 * @OSF_COPYRIGHT@
24 */
25 /*
26 * HISTORY
27 *
28 * Revision 1.1.1.1 1998/09/22 21:05:34 wsanchez
29 * Import of Mac OS X kernel (~semeria)
30 *
31 * Revision 1.1.1.1 1998/03/07 02:25:55 wsanchez
32 * Import of OSF Mach kernel (~mburg)
33 *
34 * Revision 1.2.19.5 1995/02/24 15:20:29 alanl
35 * Lock package cleanup.
36 * [95/02/15 alanl]
37 *
38 * Merge with DIPC2_SHARED.
39 * [1995/01/05 15:11:02 alanl]
40 *
41 * Revision 1.2.28.2 1994/11/10 06:12:50 dwm
42 * mk6 CR764 - s/spinlock/simple_lock/ (name change only)
43 * [1994/11/10 05:28:35 dwm]
44 *
45 * Revision 1.2.28.1 1994/11/04 10:07:40 dwm
46 * mk6 CR668 - 1.3b26 merge
47 * * Revision 1.2.2.4 1993/11/08 15:04:18 gm
48 * CR9710: Updated to new zinit() and zone_change() interfaces.
49 * * End1.3merge
50 * [1994/11/04 09:25:48 dwm]
51 *
52 * Revision 1.2.19.3 1994/09/23 02:20:52 ezf
53 * change marker to not FREE
54 * [1994/09/22 21:33:57 ezf]
55 *
56 * Revision 1.2.19.2 1994/06/14 18:36:36 bolinger
57 * NMK17.2 merge: Replace simple_lock ops.
58 * [1994/06/14 18:35:17 bolinger]
59 *
60 * Revision 1.2.19.1 1994/06/14 17:04:23 bolinger
61 * Merge up to NMK17.2.
62 * [1994/06/14 16:54:19 bolinger]
63 *
64 * Revision 1.2.23.3 1994/10/14 12:24:33 sjs
65 * Removed krealloc_spinl routine: the newer locking scheme makes it
66 * obsolete.
67 * [94/10/13 sjs]
68 *
69 * Revision 1.2.23.2 1994/08/11 14:42:46 rwd
70 * Post merge cleanup
71 * [94/08/09 rwd]
72 *
73 * Changed zcollectable to use zchange.
74 * [94/08/04 rwd]
75 *
76 * Revision 1.2.17.2 1994/07/08 01:58:45 alanl
77 * Change comment to match function name.
78 * [1994/07/08 01:47:59 alanl]
79 *
80 * Revision 1.2.17.1 1994/05/26 16:20:38 sjs
81 * Added krealloc_spinl: same as krealloc but uses spin locks.
82 * [94/05/25 sjs]
83 *
84 * Revision 1.2.23.1 1994/08/04 02:24:55 mmp
85 * Added krealloc_spinl: same as krealloc but uses spin locks.
86 * [94/05/25 sjs]
87 *
88 * Revision 1.2.13.1 1994/02/11 14:27:12 paire
89 * Changed krealloc() to make it work on a MP system. Added a new parameter
90 * which is the simple lock that should be held while modifying the memory
91 * area already initialized.
92 * Change from NMK16.1 [93/09/02 paire]
93 *
94 * Do not set debug for kalloc zones as default. It wastes
95 * to much space.
96 * Change from NMK16.1 [93/08/16 bernadat]
97 * [94/02/07 paire]
98 *
99 * Revision 1.2.2.3 1993/07/28 17:15:44 bernard
100 * CR9523 -- Prototypes.
101 * [1993/07/27 20:14:12 bernard]
102 *
103 * Revision 1.2.2.2 1993/06/02 23:37:46 jeffc
104 * Added to OSF/1 R1.3 from NMK15.0.
105 * [1993/06/02 21:12:59 jeffc]
106 *
107 * Revision 1.2 1992/12/07 21:28:42 robert
108 * integrate any changes below for 14.0 (branch from 13.16 base)
109 *
110 * Joseph Barrera (jsb) at Carnegie-Mellon University 11-Sep-92
111 * Added krealloc. Added kalloc_max_prerounded for quicker choice between
112 * zalloc and kmem_alloc. Renamed MINSIZE to KALLOC_MINSIZE.
113 * [1992/12/06 19:47:16 robert]
114 *
115 * Revision 1.1 1992/09/30 02:09:23 robert
116 * Initial revision
117 *
118 * $EndLog$
119 */
120 /* CMU_HIST */
121 /*
122 * Revision 2.9 91/05/14 16:43:17 mrt
123 * Correcting copyright
124 *
125 * Revision 2.8 91/03/16 14:50:37 rpd
126 * Updated for new kmem_alloc interface.
127 * [91/03/03 rpd]
128 *
129 * Revision 2.7 91/02/05 17:27:22 mrt
130 * Changed to new Mach copyright
131 * [91/02/01 16:14:12 mrt]
132 *
133 * Revision 2.6 90/06/19 22:59:06 rpd
134 * Made the big kalloc zones collectable.
135 * [90/06/05 rpd]
136 *
137 * Revision 2.5 90/06/02 14:54:47 rpd
138 * Added kalloc_max, kalloc_map_size.
139 * [90/03/26 22:06:39 rpd]
140 *
141 * Revision 2.4 90/01/11 11:43:13 dbg
142 * De-lint.
143 * [89/12/06 dbg]
144 *
145 * Revision 2.3 89/09/08 11:25:51 dbg
146 * MACH_KERNEL: remove non-MACH data types.
147 * [89/07/11 dbg]
148 *
149 * Revision 2.2 89/08/31 16:18:59 rwd
150 * First Checkin
151 * [89/08/23 15:41:37 rwd]
152 *
153 * Revision 2.6 89/08/02 08:03:28 jsb
154 * Make all kalloc zones 8 MB big. (No more kalloc panics!)
155 * [89/08/01 14:10:17 jsb]
156 *
157 * Revision 2.4 89/04/05 13:03:10 rvb
158 * Guarantee a zone max of at least 100 elements or 10 pages
159 * which ever is greater. Afs (AllocDouble()) puts a great demand
160 * on the 2048 zone and used to blow away.
161 * [89/03/09 rvb]
162 *
163 * Revision 2.3 89/02/25 18:04:39 gm0w
164 * Changes for cleanup.
165 *
166 * Revision 2.2 89/01/18 02:07:04 jsb
167 * Give each kalloc zone a meaningful name (for panics);
168 * create a zone for each power of 2 between MINSIZE
169 * and PAGE_SIZE, instead of using (obsoleted) NQUEUES.
170 * [89/01/17 10:16:33 jsb]
171 *
172 *
173 * 13-Feb-88 John Seamons (jks) at NeXT
174 * Updated to use kmem routines instead of vmem routines.
175 *
176 * 21-Jun-85 Avadis Tevanian (avie) at Carnegie-Mellon University
177 * Created.
178 */
179 /* CMU_ENDHIST */
180 /*
181 * Mach Operating System
182 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
183 * All Rights Reserved.
184 *
185 * Permission to use, copy, modify and distribute this software and its
186 * documentation is hereby granted, provided that both the copyright
187 * notice and this permission notice appear in all copies of the
188 * software, derivative works or modified versions, and any portions
189 * thereof, and that both notices appear in supporting documentation.
190 *
191 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
192 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
193 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
194 *
195 * Carnegie Mellon requests users of this software to return to
196 *
197 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
198 * School of Computer Science
199 * Carnegie Mellon University
200 * Pittsburgh PA 15213-3890
201 *
202 * any improvements or extensions that they make and grant Carnegie Mellon
203 * the rights to redistribute these changes.
204 */
205 /*
206 */
207 /*
208 * File: kern/kalloc.c
209 * Author: Avadis Tevanian, Jr.
210 * Date: 1985
211 *
212 * General kernel memory allocator. This allocator is designed
213 * to be used by the kernel to manage dynamic memory fast.
214 */
215
216 #include <zone_debug.h>
217
218 #include <mach/boolean.h>
219 #include <mach/machine/vm_types.h>
220 #include <mach/vm_param.h>
221 #include <kern/misc_protos.h>
222 #include <kern/zalloc.h>
223 #include <kern/kalloc.h>
224 #include <kern/lock.h>
225 #include <vm/vm_kern.h>
226 #include <vm/vm_object.h>
227 #include <vm/vm_map.h>
228
229 #ifdef MACH_BSD
230 zone_t kalloc_zone(vm_size_t);
231 #endif
232
233 vm_map_t kalloc_map;
234 vm_size_t kalloc_map_size = 16 * 1024 * 1024;
235 vm_size_t kalloc_max;
236 vm_size_t kalloc_max_prerounded;
237
238 unsigned int kalloc_large_inuse;
239 vm_size_t kalloc_large_total;
240 vm_size_t kalloc_large_max;
241
242 /*
243 * All allocations of size less than kalloc_max are rounded to the
244 * next highest power of 2. This allocator is built on top of
245 * the zone allocator. A zone is created for each potential size
246 * that we are willing to get in small blocks.
247 *
248 * We assume that kalloc_max is not greater than 64K;
249 * thus 16 is a safe array size for k_zone and k_zone_name.
250 *
251 * Note that kalloc_max is somewhat confusingly named.
252 * It represents the first power of two for which no zone exists.
253 * kalloc_max_prerounded is the smallest allocation size, before
254 * rounding, for which no zone exists.
255 */
256
257 int first_k_zone = -1;
258 struct zone *k_zone[16];
259 static char *k_zone_name[16] = {
260 "kalloc.1", "kalloc.2",
261 "kalloc.4", "kalloc.8",
262 "kalloc.16", "kalloc.32",
263 "kalloc.64", "kalloc.128",
264 "kalloc.256", "kalloc.512",
265 "kalloc.1024", "kalloc.2048",
266 "kalloc.4096", "kalloc.8192",
267 "kalloc.16384", "kalloc.32768"
268 };
269
270 /*
271 * Max number of elements per zone. zinit rounds things up correctly
272 * Doing things this way permits each zone to have a different maximum size
273 * based on need, rather than just guessing; it also
274 * means its patchable in case you're wrong!
275 */
276 unsigned long k_zone_max[16] = {
277 1024, /* 1 Byte */
278 1024, /* 2 Byte */
279 1024, /* 4 Byte */
280 1024, /* 8 Byte */
281 1024, /* 16 Byte */
282 4096, /* 32 Byte */
283 4096, /* 64 Byte */
284 4096, /* 128 Byte */
285 4096, /* 256 Byte */
286 1024, /* 512 Byte */
287 1024, /* 1024 Byte */
288 1024, /* 2048 Byte */
289 1024, /* 4096 Byte */
290 4096, /* 8192 Byte */
291 64, /* 16384 Byte */
292 64, /* 32768 Byte */
293 };
294
295 /*
296 * Initialize the memory allocator. This should be called only
297 * once on a system wide basis (i.e. first processor to get here
298 * does the initialization).
299 *
300 * This initializes all of the zones.
301 */
302
303 void
304 kalloc_init(
305 void)
306 {
307 kern_return_t retval;
308 vm_offset_t min;
309 vm_size_t size;
310 register int i;
311
312 retval = kmem_suballoc(kernel_map, &min, kalloc_map_size,
313 FALSE, TRUE, &kalloc_map);
314 if (retval != KERN_SUCCESS)
315 panic("kalloc_init: kmem_suballoc failed");
316
317 /*
318 * Ensure that zones up to size 8192 bytes exist.
319 * This is desirable because messages are allocated
320 * with kalloc, and messages up through size 8192 are common.
321 */
322
323 if (PAGE_SIZE < 16*1024)
324 kalloc_max = 16*1024;
325 else
326 kalloc_max = PAGE_SIZE;
327 kalloc_max_prerounded = kalloc_max / 2 + 1;
328
329 /*
330 * Allocate a zone for each size we are going to handle.
331 * We specify non-paged memory.
332 */
333 for (i = 0, size = 1; size < kalloc_max; i++, size <<= 1) {
334 if (size < KALLOC_MINSIZE) {
335 k_zone[i] = 0;
336 continue;
337 }
338 if (size == KALLOC_MINSIZE) {
339 first_k_zone = i;
340 }
341 k_zone[i] = zinit(size, k_zone_max[i] * size, size,
342 k_zone_name[i]);
343 }
344 }
345
346 vm_offset_t
347 kalloc_canblock(
348 vm_size_t size,
349 boolean_t canblock)
350 {
351 register int zindex;
352 register vm_size_t allocsize;
353
354 /*
355 * If size is too large for a zone, then use kmem_alloc.
356 * (We use kmem_alloc instead of kmem_alloc_wired so that
357 * krealloc can use kmem_realloc.)
358 */
359
360 if (size >= kalloc_max_prerounded) {
361 vm_offset_t addr;
362
363 /* kmem_alloc could block so we return if noblock */
364 if (!canblock) {
365 return(0);
366 }
367 if (kmem_alloc(kalloc_map, &addr, size) != KERN_SUCCESS)
368 addr = 0;
369
370 if (addr) {
371 kalloc_large_inuse++;
372 kalloc_large_total += size;
373
374 if (kalloc_large_total > kalloc_large_max)
375 kalloc_large_max = kalloc_large_total;
376 }
377 return(addr);
378 }
379
380 /* compute the size of the block that we will actually allocate */
381
382 allocsize = KALLOC_MINSIZE;
383 zindex = first_k_zone;
384 while (allocsize < size) {
385 allocsize <<= 1;
386 zindex++;
387 }
388
389 /* allocate from the appropriate zone */
390
391 assert(allocsize < kalloc_max);
392 return(zalloc_canblock(k_zone[zindex], canblock));
393 }
394
395 vm_offset_t
396 kalloc(
397 vm_size_t size)
398 {
399 return( kalloc_canblock(size, TRUE) );
400 }
401
402 vm_offset_t
403 kalloc_noblock(
404 vm_size_t size)
405 {
406 return( kalloc_canblock(size, FALSE) );
407 }
408
409
410 void
411 krealloc(
412 vm_offset_t *addrp,
413 vm_size_t old_size,
414 vm_size_t new_size,
415 simple_lock_t lock)
416 {
417 register int zindex;
418 register vm_size_t allocsize;
419 vm_offset_t naddr;
420
421 /* can only be used for increasing allocation size */
422
423 assert(new_size > old_size);
424
425 /* if old_size is zero, then we are simply allocating */
426
427 if (old_size == 0) {
428 simple_unlock(lock);
429 naddr = kalloc(new_size);
430 simple_lock(lock);
431 *addrp = naddr;
432 return;
433 }
434
435 /* if old block was kmem_alloc'd, then use kmem_realloc if necessary */
436
437 if (old_size >= kalloc_max_prerounded) {
438 old_size = round_page(old_size);
439 new_size = round_page(new_size);
440 if (new_size > old_size) {
441
442 if (kmem_realloc(kalloc_map, *addrp, old_size, &naddr,
443 new_size) != KERN_SUCCESS) {
444 panic("krealloc: kmem_realloc");
445 naddr = 0;
446 }
447
448 simple_lock(lock);
449 *addrp = naddr;
450
451 /* kmem_realloc() doesn't free old page range. */
452 kmem_free(kalloc_map, *addrp, old_size);
453
454 kalloc_large_total += (new_size - old_size);
455
456 if (kalloc_large_total > kalloc_large_max)
457 kalloc_large_max = kalloc_large_total;
458 }
459 return;
460 }
461
462 /* compute the size of the block that we actually allocated */
463
464 allocsize = KALLOC_MINSIZE;
465 zindex = first_k_zone;
466 while (allocsize < old_size) {
467 allocsize <<= 1;
468 zindex++;
469 }
470
471 /* if new size fits in old block, then return */
472
473 if (new_size <= allocsize) {
474 return;
475 }
476
477 /* if new size does not fit in zone, kmem_alloc it, else zalloc it */
478
479 simple_unlock(lock);
480 if (new_size >= kalloc_max_prerounded) {
481 if (kmem_alloc(kalloc_map, &naddr, new_size) != KERN_SUCCESS) {
482 panic("krealloc: kmem_alloc");
483 simple_lock(lock);
484 *addrp = 0;
485 return;
486 }
487 kalloc_large_inuse++;
488 kalloc_large_total += new_size;
489
490 if (kalloc_large_total > kalloc_large_max)
491 kalloc_large_max = kalloc_large_total;
492 } else {
493 register int new_zindex;
494
495 allocsize <<= 1;
496 new_zindex = zindex + 1;
497 while (allocsize < new_size) {
498 allocsize <<= 1;
499 new_zindex++;
500 }
501 naddr = zalloc(k_zone[new_zindex]);
502 }
503 simple_lock(lock);
504
505 /* copy existing data */
506
507 bcopy((const char *)*addrp, (char *)naddr, old_size);
508
509 /* free old block, and return */
510
511 zfree(k_zone[zindex], *addrp);
512
513 /* set up new address */
514
515 *addrp = naddr;
516 }
517
518
519 vm_offset_t
520 kget(
521 vm_size_t size)
522 {
523 register int zindex;
524 register vm_size_t allocsize;
525
526 /* size must not be too large for a zone */
527
528 if (size >= kalloc_max_prerounded) {
529 /* This will never work, so we might as well panic */
530 panic("kget");
531 }
532
533 /* compute the size of the block that we will actually allocate */
534
535 allocsize = KALLOC_MINSIZE;
536 zindex = first_k_zone;
537 while (allocsize < size) {
538 allocsize <<= 1;
539 zindex++;
540 }
541
542 /* allocate from the appropriate zone */
543
544 assert(allocsize < kalloc_max);
545 return(zget(k_zone[zindex]));
546 }
547
548 void
549 kfree(
550 vm_offset_t data,
551 vm_size_t size)
552 {
553 register int zindex;
554 register vm_size_t freesize;
555
556 /* if size was too large for a zone, then use kmem_free */
557
558 if (size >= kalloc_max_prerounded) {
559 kmem_free(kalloc_map, data, size);
560
561 kalloc_large_total -= size;
562 kalloc_large_inuse--;
563
564 return;
565 }
566
567 /* compute the size of the block that we actually allocated from */
568
569 freesize = KALLOC_MINSIZE;
570 zindex = first_k_zone;
571 while (freesize < size) {
572 freesize <<= 1;
573 zindex++;
574 }
575
576 /* free to the appropriate zone */
577
578 assert(freesize < kalloc_max);
579 zfree(k_zone[zindex], data);
580 }
581
582 #ifdef MACH_BSD
583 zone_t
584 kalloc_zone(
585 vm_size_t size)
586 {
587 register int zindex = 0;
588 register vm_size_t allocsize;
589
590 /* compute the size of the block that we will actually allocate */
591
592 allocsize = size;
593 if (size <= kalloc_max) {
594 allocsize = KALLOC_MINSIZE;
595 zindex = first_k_zone;
596 while (allocsize < size) {
597 allocsize <<= 1;
598 zindex++;
599 }
600 return (k_zone[zindex]);
601 }
602 return (ZONE_NULL);
603 }
604 #endif
605
606
607
608 kalloc_fake_zone_info(int *count, vm_size_t *cur_size, vm_size_t *max_size, vm_size_t *elem_size,
609 vm_size_t *alloc_size, int *collectable, int *exhaustable)
610 {
611 *count = kalloc_large_inuse;
612 *cur_size = kalloc_large_total;
613 *max_size = kalloc_large_max;
614 *elem_size = kalloc_large_total / kalloc_large_inuse;
615 *alloc_size = kalloc_large_total / kalloc_large_inuse;
616 *collectable = 0;
617 *exhaustable = 0;
618 }
619