]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/kalloc.c
36a4c90e1907aa61d9bcfdd6ae13e73dadcdf1b6
[apple/xnu.git] / osfmk / kern / kalloc.c
1 /*
2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23 /*
24 * @OSF_COPYRIGHT@
25 */
26 /*
27 * Mach Operating System
28 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
29 * All Rights Reserved.
30 *
31 * Permission to use, copy, modify and distribute this software and its
32 * documentation is hereby granted, provided that both the copyright
33 * notice and this permission notice appear in all copies of the
34 * software, derivative works or modified versions, and any portions
35 * thereof, and that both notices appear in supporting documentation.
36 *
37 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
38 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
39 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
40 *
41 * Carnegie Mellon requests users of this software to return to
42 *
43 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
44 * School of Computer Science
45 * Carnegie Mellon University
46 * Pittsburgh PA 15213-3890
47 *
48 * any improvements or extensions that they make and grant Carnegie Mellon
49 * the rights to redistribute these changes.
50 */
51 /*
52 */
53 /*
54 * File: kern/kalloc.c
55 * Author: Avadis Tevanian, Jr.
56 * Date: 1985
57 *
58 * General kernel memory allocator. This allocator is designed
59 * to be used by the kernel to manage dynamic memory fast.
60 */
61
62 #include <zone_debug.h>
63
64 #include <mach/boolean.h>
65 #include <mach/machine/vm_types.h>
66 #include <mach/vm_param.h>
67 #include <kern/misc_protos.h>
68 #include <kern/zalloc.h>
69 #include <kern/kalloc.h>
70 #include <kern/lock.h>
71 #include <vm/vm_kern.h>
72 #include <vm/vm_object.h>
73 #include <vm/vm_map.h>
74 #include <libkern/OSMalloc.h>
75
76 #ifdef MACH_BSD
77 zone_t kalloc_zone(vm_size_t);
78 #endif
79
80 vm_map_t kalloc_map;
81 vm_size_t kalloc_map_size = 16 * 1024 * 1024;
82 vm_size_t kalloc_max;
83 vm_size_t kalloc_max_prerounded;
84
85 unsigned int kalloc_large_inuse;
86 vm_size_t kalloc_large_total;
87 vm_size_t kalloc_large_max;
88
89 /*
90 * All allocations of size less than kalloc_max are rounded to the
91 * next highest power of 2. This allocator is built on top of
92 * the zone allocator. A zone is created for each potential size
93 * that we are willing to get in small blocks.
94 *
95 * We assume that kalloc_max is not greater than 64K;
96 * thus 16 is a safe array size for k_zone and k_zone_name.
97 *
98 * Note that kalloc_max is somewhat confusingly named.
99 * It represents the first power of two for which no zone exists.
100 * kalloc_max_prerounded is the smallest allocation size, before
101 * rounding, for which no zone exists.
102 */
103
104 int first_k_zone = -1;
105 struct zone *k_zone[16];
106 static const char *k_zone_name[16] = {
107 "kalloc.1", "kalloc.2",
108 "kalloc.4", "kalloc.8",
109 "kalloc.16", "kalloc.32",
110 "kalloc.64", "kalloc.128",
111 "kalloc.256", "kalloc.512",
112 "kalloc.1024", "kalloc.2048",
113 "kalloc.4096", "kalloc.8192",
114 "kalloc.16384", "kalloc.32768"
115 };
116
117 /*
118 * Max number of elements per zone. zinit rounds things up correctly
119 * Doing things this way permits each zone to have a different maximum size
120 * based on need, rather than just guessing; it also
121 * means its patchable in case you're wrong!
122 */
123 unsigned long k_zone_max[16] = {
124 1024, /* 1 Byte */
125 1024, /* 2 Byte */
126 1024, /* 4 Byte */
127 1024, /* 8 Byte */
128 1024, /* 16 Byte */
129 4096, /* 32 Byte */
130 4096, /* 64 Byte */
131 4096, /* 128 Byte */
132 4096, /* 256 Byte */
133 1024, /* 512 Byte */
134 1024, /* 1024 Byte */
135 1024, /* 2048 Byte */
136 1024, /* 4096 Byte */
137 4096, /* 8192 Byte */
138 64, /* 16384 Byte */
139 64, /* 32768 Byte */
140 };
141
142 /* forward declarations */
143 void * kalloc_canblock(
144 vm_size_t size,
145 boolean_t canblock);
146
147
148 /* OSMalloc local data declarations */
149 static
150 queue_head_t OSMalloc_tag_list;
151
152 decl_simple_lock_data(static,OSMalloc_tag_lock)
153
154 /* OSMalloc forward declarations */
155 void OSMalloc_init(void);
156 void OSMalloc_Tagref(OSMallocTag tag);
157 void OSMalloc_Tagrele(OSMallocTag tag);
158
159 /*
160 * Initialize the memory allocator. This should be called only
161 * once on a system wide basis (i.e. first processor to get here
162 * does the initialization).
163 *
164 * This initializes all of the zones.
165 */
166
167 void
168 kalloc_init(
169 void)
170 {
171 kern_return_t retval;
172 vm_offset_t min;
173 vm_size_t size;
174 register int i;
175
176 retval = kmem_suballoc(kernel_map, &min, kalloc_map_size,
177 FALSE, VM_FLAGS_ANYWHERE, &kalloc_map);
178
179 if (retval != KERN_SUCCESS)
180 panic("kalloc_init: kmem_suballoc failed");
181
182 /*
183 * Ensure that zones up to size 8192 bytes exist.
184 * This is desirable because messages are allocated
185 * with kalloc, and messages up through size 8192 are common.
186 */
187
188 if (PAGE_SIZE < 16*1024)
189 kalloc_max = 16*1024;
190 else
191 kalloc_max = PAGE_SIZE;
192 kalloc_max_prerounded = kalloc_max / 2 + 1;
193
194 /*
195 * Allocate a zone for each size we are going to handle.
196 * We specify non-paged memory.
197 */
198 for (i = 0, size = 1; size < kalloc_max; i++, size <<= 1) {
199 if (size < KALLOC_MINSIZE) {
200 k_zone[i] = 0;
201 continue;
202 }
203 if (size == KALLOC_MINSIZE) {
204 first_k_zone = i;
205 }
206 k_zone[i] = zinit(size, k_zone_max[i] * size, size,
207 k_zone_name[i]);
208 }
209 OSMalloc_init();
210 }
211
212 void *
213 kalloc_canblock(
214 vm_size_t size,
215 boolean_t canblock)
216 {
217 register int zindex;
218 register vm_size_t allocsize;
219
220 /*
221 * If size is too large for a zone, then use kmem_alloc.
222 * (We use kmem_alloc instead of kmem_alloc_wired so that
223 * krealloc can use kmem_realloc.)
224 */
225
226 if (size >= kalloc_max_prerounded) {
227 void *addr;
228
229 /* kmem_alloc could block so we return if noblock */
230 if (!canblock) {
231 return(0);
232 }
233 if (kmem_alloc(kalloc_map, (vm_offset_t *)&addr, size) != KERN_SUCCESS)
234 addr = 0;
235
236 if (addr) {
237 kalloc_large_inuse++;
238 kalloc_large_total += size;
239
240 if (kalloc_large_total > kalloc_large_max)
241 kalloc_large_max = kalloc_large_total;
242 }
243 return(addr);
244 }
245
246 /* compute the size of the block that we will actually allocate */
247
248 allocsize = KALLOC_MINSIZE;
249 zindex = first_k_zone;
250 while (allocsize < size) {
251 allocsize <<= 1;
252 zindex++;
253 }
254
255 /* allocate from the appropriate zone */
256 assert(allocsize < kalloc_max);
257 return(zalloc_canblock(k_zone[zindex], canblock));
258 }
259
260 void *
261 kalloc(
262 vm_size_t size)
263 {
264 return( kalloc_canblock(size, TRUE) );
265 }
266
267 void *
268 kalloc_noblock(
269 vm_size_t size)
270 {
271 return( kalloc_canblock(size, FALSE) );
272 }
273
274
275 void
276 krealloc(
277 void **addrp,
278 vm_size_t old_size,
279 vm_size_t new_size,
280 simple_lock_t lock)
281 {
282 register int zindex;
283 register vm_size_t allocsize;
284 void *naddr;
285
286 /* can only be used for increasing allocation size */
287
288 assert(new_size > old_size);
289
290 /* if old_size is zero, then we are simply allocating */
291
292 if (old_size == 0) {
293 simple_unlock(lock);
294 naddr = kalloc(new_size);
295 simple_lock(lock);
296 *addrp = naddr;
297 return;
298 }
299
300 /* if old block was kmem_alloc'd, then use kmem_realloc if necessary */
301
302 if (old_size >= kalloc_max_prerounded) {
303 old_size = round_page(old_size);
304 new_size = round_page(new_size);
305 if (new_size > old_size) {
306
307 if (KERN_SUCCESS != kmem_realloc(kalloc_map,
308 (vm_offset_t)*addrp, old_size,
309 (vm_offset_t *)&naddr, new_size)) {
310 panic("krealloc: kmem_realloc");
311 naddr = 0;
312 }
313
314 simple_lock(lock);
315 *addrp = (void *) naddr;
316
317 /* kmem_realloc() doesn't free old page range. */
318 kmem_free(kalloc_map, (vm_offset_t)*addrp, old_size);
319
320 kalloc_large_total += (new_size - old_size);
321
322 if (kalloc_large_total > kalloc_large_max)
323 kalloc_large_max = kalloc_large_total;
324
325 }
326 return;
327 }
328
329 /* compute the size of the block that we actually allocated */
330
331 allocsize = KALLOC_MINSIZE;
332 zindex = first_k_zone;
333 while (allocsize < old_size) {
334 allocsize <<= 1;
335 zindex++;
336 }
337
338 /* if new size fits in old block, then return */
339
340 if (new_size <= allocsize) {
341 return;
342 }
343
344 /* if new size does not fit in zone, kmem_alloc it, else zalloc it */
345
346 simple_unlock(lock);
347 if (new_size >= kalloc_max_prerounded) {
348 if (KERN_SUCCESS != kmem_alloc(kalloc_map,
349 (vm_offset_t *)&naddr, new_size)) {
350 panic("krealloc: kmem_alloc");
351 simple_lock(lock);
352 *addrp = NULL;
353 return;
354 }
355 kalloc_large_inuse++;
356 kalloc_large_total += new_size;
357
358 if (kalloc_large_total > kalloc_large_max)
359 kalloc_large_max = kalloc_large_total;
360 } else {
361 register int new_zindex;
362
363 allocsize <<= 1;
364 new_zindex = zindex + 1;
365 while (allocsize < new_size) {
366 allocsize <<= 1;
367 new_zindex++;
368 }
369 naddr = zalloc(k_zone[new_zindex]);
370 }
371 simple_lock(lock);
372
373 /* copy existing data */
374
375 bcopy((const char *)*addrp, (char *)naddr, old_size);
376
377 /* free old block, and return */
378
379 zfree(k_zone[zindex], *addrp);
380
381 /* set up new address */
382
383 *addrp = (void *) naddr;
384 }
385
386
387 void *
388 kget(
389 vm_size_t size)
390 {
391 register int zindex;
392 register vm_size_t allocsize;
393
394 /* size must not be too large for a zone */
395
396 if (size >= kalloc_max_prerounded) {
397 /* This will never work, so we might as well panic */
398 panic("kget");
399 }
400
401 /* compute the size of the block that we will actually allocate */
402
403 allocsize = KALLOC_MINSIZE;
404 zindex = first_k_zone;
405 while (allocsize < size) {
406 allocsize <<= 1;
407 zindex++;
408 }
409
410 /* allocate from the appropriate zone */
411
412 assert(allocsize < kalloc_max);
413 return(zget(k_zone[zindex]));
414 }
415
416 void
417 kfree(
418 void *data,
419 vm_size_t size)
420 {
421 register int zindex;
422 register vm_size_t freesize;
423
424 /* if size was too large for a zone, then use kmem_free */
425
426 if (size >= kalloc_max_prerounded) {
427 kmem_free(kalloc_map, (vm_offset_t)data, size);
428
429 kalloc_large_total -= size;
430 kalloc_large_inuse--;
431
432 return;
433 }
434
435 /* compute the size of the block that we actually allocated from */
436
437 freesize = KALLOC_MINSIZE;
438 zindex = first_k_zone;
439 while (freesize < size) {
440 freesize <<= 1;
441 zindex++;
442 }
443
444 /* free to the appropriate zone */
445
446 assert(freesize < kalloc_max);
447 zfree(k_zone[zindex], data);
448 }
449
450 #ifdef MACH_BSD
451 zone_t
452 kalloc_zone(
453 vm_size_t size)
454 {
455 register int zindex = 0;
456 register vm_size_t allocsize;
457
458 /* compute the size of the block that we will actually allocate */
459
460 allocsize = size;
461 if (size <= kalloc_max) {
462 allocsize = KALLOC_MINSIZE;
463 zindex = first_k_zone;
464 while (allocsize < size) {
465 allocsize <<= 1;
466 zindex++;
467 }
468 return (k_zone[zindex]);
469 }
470 return (ZONE_NULL);
471 }
472 #endif
473
474
475 void
476 kalloc_fake_zone_info(int *count, vm_size_t *cur_size, vm_size_t *max_size, vm_size_t *elem_size,
477 vm_size_t *alloc_size, int *collectable, int *exhaustable)
478 {
479 *count = kalloc_large_inuse;
480 *cur_size = kalloc_large_total;
481 *max_size = kalloc_large_max;
482 *elem_size = kalloc_large_total / kalloc_large_inuse;
483 *alloc_size = kalloc_large_total / kalloc_large_inuse;
484 *collectable = 0;
485 *exhaustable = 0;
486 }
487
488
489 void
490 OSMalloc_init(
491 void)
492 {
493 queue_init(&OSMalloc_tag_list);
494 simple_lock_init(&OSMalloc_tag_lock, 0);
495 }
496
497 OSMallocTag
498 OSMalloc_Tagalloc(
499 const char *str,
500 uint32_t flags)
501 {
502 OSMallocTag OSMTag;
503
504 OSMTag = (OSMallocTag)kalloc(sizeof(*OSMTag));
505
506 bzero((void *)OSMTag, sizeof(*OSMTag));
507
508 if (flags & OSMT_PAGEABLE)
509 OSMTag->OSMT_attr = OSMT_ATTR_PAGEABLE;
510
511 OSMTag->OSMT_refcnt = 1;
512
513 strncpy(OSMTag->OSMT_name, str, OSMT_MAX_NAME);
514
515 simple_lock(&OSMalloc_tag_lock);
516 enqueue_tail(&OSMalloc_tag_list, (queue_entry_t)OSMTag);
517 simple_unlock(&OSMalloc_tag_lock);
518 OSMTag->OSMT_state = OSMT_VALID;
519 return(OSMTag);
520 }
521
522 void
523 OSMalloc_Tagref(
524 OSMallocTag tag)
525 {
526 if (!((tag->OSMT_state & OSMT_VALID_MASK) == OSMT_VALID))
527 panic("OSMalloc_Tagref(): bad state 0x%08X\n",tag->OSMT_state);
528
529 (void)hw_atomic_add((uint32_t *)(&tag->OSMT_refcnt), 1);
530 }
531
532 void
533 OSMalloc_Tagrele(
534 OSMallocTag tag)
535 {
536 if (!((tag->OSMT_state & OSMT_VALID_MASK) == OSMT_VALID))
537 panic("OSMalloc_Tagref(): bad state 0x%08X\n",tag->OSMT_state);
538
539 if (hw_atomic_sub((uint32_t *)(&tag->OSMT_refcnt), 1) == 0) {
540 if (hw_compare_and_store(OSMT_VALID|OSMT_RELEASED, OSMT_VALID|OSMT_RELEASED, &tag->OSMT_state)) {
541 simple_lock(&OSMalloc_tag_lock);
542 (void)remque((queue_entry_t)tag);
543 simple_unlock(&OSMalloc_tag_lock);
544 kfree((void*)tag, sizeof(*tag));
545 } else
546 panic("OSMalloc_Tagrele(): refcnt 0\n");
547 }
548 }
549
550 void
551 OSMalloc_Tagfree(
552 OSMallocTag tag)
553 {
554 if (!hw_compare_and_store(OSMT_VALID, OSMT_VALID|OSMT_RELEASED, &tag->OSMT_state))
555 panic("OSMalloc_Tagfree(): bad state 0x%08X\n", tag->OSMT_state);
556
557 if (hw_atomic_sub((uint32_t *)(&tag->OSMT_refcnt), 1) == 0) {
558 simple_lock(&OSMalloc_tag_lock);
559 (void)remque((queue_entry_t)tag);
560 simple_unlock(&OSMalloc_tag_lock);
561 kfree((void*)tag, sizeof(*tag));
562 }
563 }
564
565 void *
566 OSMalloc(
567 uint32_t size,
568 OSMallocTag tag)
569 {
570 void *addr=NULL;
571 kern_return_t kr;
572
573 OSMalloc_Tagref(tag);
574 if ((tag->OSMT_attr & OSMT_PAGEABLE)
575 && (size & ~PAGE_MASK)) {
576
577 if ((kr = kmem_alloc_pageable(kernel_map, (vm_offset_t *)&addr, size)) != KERN_SUCCESS)
578 panic("OSMalloc(): kmem_alloc_pageable() failed 0x%08X\n", kr);
579 } else
580 addr = kalloc((vm_size_t)size);
581
582 return(addr);
583 }
584
585 void *
586 OSMalloc_nowait(
587 uint32_t size,
588 OSMallocTag tag)
589 {
590 void *addr=NULL;
591
592 if (tag->OSMT_attr & OSMT_PAGEABLE)
593 return(NULL);
594
595 OSMalloc_Tagref(tag);
596 /* XXX: use non-blocking kalloc for now */
597 addr = kalloc_noblock((vm_size_t)size);
598 if (addr == NULL)
599 OSMalloc_Tagrele(tag);
600
601 return(addr);
602 }
603
604 void *
605 OSMalloc_noblock(
606 uint32_t size,
607 OSMallocTag tag)
608 {
609 void *addr=NULL;
610
611 if (tag->OSMT_attr & OSMT_PAGEABLE)
612 return(NULL);
613
614 OSMalloc_Tagref(tag);
615 addr = kalloc_noblock((vm_size_t)size);
616 if (addr == NULL)
617 OSMalloc_Tagrele(tag);
618
619 return(addr);
620 }
621
622 void
623 OSFree(
624 void *addr,
625 uint32_t size,
626 OSMallocTag tag)
627 {
628 if ((tag->OSMT_attr & OSMT_PAGEABLE)
629 && (size & ~PAGE_MASK)) {
630 kmem_free(kernel_map, (vm_offset_t)addr, size);
631 } else
632 kfree((void*)addr, size);
633
634 OSMalloc_Tagrele(tag);
635 }