]> git.saurik.com Git - apple/xnu.git/blame - osfmk/kern/kalloc.c
xnu-792.10.96.tar.gz
[apple/xnu.git] / osfmk / kern / kalloc.c
CommitLineData
1c79356b 1/*
91447636 2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
1c79356b
A
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
37839358
A
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
de355530 11 *
37839358
A
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
1c79356b
A
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
37839358
A
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
1c79356b
A
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22/*
23 * @OSF_COPYRIGHT@
24 */
1c79356b
A
25/*
26 * Mach Operating System
27 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
28 * All Rights Reserved.
29 *
30 * Permission to use, copy, modify and distribute this software and its
31 * documentation is hereby granted, provided that both the copyright
32 * notice and this permission notice appear in all copies of the
33 * software, derivative works or modified versions, and any portions
34 * thereof, and that both notices appear in supporting documentation.
35 *
36 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
37 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
38 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
39 *
40 * Carnegie Mellon requests users of this software to return to
41 *
42 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
43 * School of Computer Science
44 * Carnegie Mellon University
45 * Pittsburgh PA 15213-3890
46 *
47 * any improvements or extensions that they make and grant Carnegie Mellon
48 * the rights to redistribute these changes.
49 */
50/*
51 */
52/*
53 * File: kern/kalloc.c
54 * Author: Avadis Tevanian, Jr.
55 * Date: 1985
56 *
57 * General kernel memory allocator. This allocator is designed
58 * to be used by the kernel to manage dynamic memory fast.
59 */
60
61#include <zone_debug.h>
62
63#include <mach/boolean.h>
64#include <mach/machine/vm_types.h>
65#include <mach/vm_param.h>
66#include <kern/misc_protos.h>
67#include <kern/zalloc.h>
68#include <kern/kalloc.h>
69#include <kern/lock.h>
70#include <vm/vm_kern.h>
71#include <vm/vm_object.h>
72#include <vm/vm_map.h>
91447636 73#include <libkern/OSMalloc.h>
1c79356b
A
74
75#ifdef MACH_BSD
76zone_t kalloc_zone(vm_size_t);
77#endif
78
79vm_map_t kalloc_map;
0b4e3aa0 80vm_size_t kalloc_map_size = 16 * 1024 * 1024;
1c79356b
A
81vm_size_t kalloc_max;
82vm_size_t kalloc_max_prerounded;
c0fea474 83vm_size_t kalloc_kernmap_size; /* size of kallocs that can come from kernel map */
1c79356b
A
84
85unsigned int kalloc_large_inuse;
86vm_size_t kalloc_large_total;
87vm_size_t kalloc_large_max;
88
89/*
90 * All allocations of size less than kalloc_max are rounded to the
91 * next highest power of 2. This allocator is built on top of
92 * the zone allocator. A zone is created for each potential size
93 * that we are willing to get in small blocks.
94 *
95 * We assume that kalloc_max is not greater than 64K;
96 * thus 16 is a safe array size for k_zone and k_zone_name.
97 *
98 * Note that kalloc_max is somewhat confusingly named.
99 * It represents the first power of two for which no zone exists.
100 * kalloc_max_prerounded is the smallest allocation size, before
101 * rounding, for which no zone exists.
c0fea474
A
102 * Also if the allocation size is more than kalloc_kernmap_size
103 * then allocate from kernel map rather than kalloc_map.
1c79356b
A
104 */
105
106int first_k_zone = -1;
107struct zone *k_zone[16];
91447636 108static const char *k_zone_name[16] = {
1c79356b
A
109 "kalloc.1", "kalloc.2",
110 "kalloc.4", "kalloc.8",
111 "kalloc.16", "kalloc.32",
112 "kalloc.64", "kalloc.128",
113 "kalloc.256", "kalloc.512",
114 "kalloc.1024", "kalloc.2048",
115 "kalloc.4096", "kalloc.8192",
116 "kalloc.16384", "kalloc.32768"
117};
118
119/*
120 * Max number of elements per zone. zinit rounds things up correctly
121 * Doing things this way permits each zone to have a different maximum size
122 * based on need, rather than just guessing; it also
123 * means its patchable in case you're wrong!
124 */
125unsigned long k_zone_max[16] = {
126 1024, /* 1 Byte */
127 1024, /* 2 Byte */
128 1024, /* 4 Byte */
129 1024, /* 8 Byte */
130 1024, /* 16 Byte */
131 4096, /* 32 Byte */
132 4096, /* 64 Byte */
133 4096, /* 128 Byte */
134 4096, /* 256 Byte */
135 1024, /* 512 Byte */
136 1024, /* 1024 Byte */
137 1024, /* 2048 Byte */
138 1024, /* 4096 Byte */
139 4096, /* 8192 Byte */
140 64, /* 16384 Byte */
141 64, /* 32768 Byte */
142};
143
91447636
A
144/* forward declarations */
145void * kalloc_canblock(
146 vm_size_t size,
147 boolean_t canblock);
148
149
150/* OSMalloc local data declarations */
151static
152queue_head_t OSMalloc_tag_list;
153
154decl_simple_lock_data(static,OSMalloc_tag_lock)
155
156/* OSMalloc forward declarations */
157void OSMalloc_init(void);
158void OSMalloc_Tagref(OSMallocTag tag);
159void OSMalloc_Tagrele(OSMallocTag tag);
160
1c79356b
A
161/*
162 * Initialize the memory allocator. This should be called only
163 * once on a system wide basis (i.e. first processor to get here
164 * does the initialization).
165 *
166 * This initializes all of the zones.
167 */
168
169void
170kalloc_init(
171 void)
172{
173 kern_return_t retval;
174 vm_offset_t min;
175 vm_size_t size;
176 register int i;
177
178 retval = kmem_suballoc(kernel_map, &min, kalloc_map_size,
91447636
A
179 FALSE, VM_FLAGS_ANYWHERE, &kalloc_map);
180
1c79356b
A
181 if (retval != KERN_SUCCESS)
182 panic("kalloc_init: kmem_suballoc failed");
183
184 /*
185 * Ensure that zones up to size 8192 bytes exist.
186 * This is desirable because messages are allocated
187 * with kalloc, and messages up through size 8192 are common.
188 */
189
190 if (PAGE_SIZE < 16*1024)
191 kalloc_max = 16*1024;
192 else
193 kalloc_max = PAGE_SIZE;
194 kalloc_max_prerounded = kalloc_max / 2 + 1;
c0fea474
A
195 /* size it to be more than 16 times kalloc_max (256k) for allocations from kernel map */
196 kalloc_kernmap_size = (kalloc_max * 16) + 1;
1c79356b
A
197
198 /*
199 * Allocate a zone for each size we are going to handle.
200 * We specify non-paged memory.
201 */
202 for (i = 0, size = 1; size < kalloc_max; i++, size <<= 1) {
203 if (size < KALLOC_MINSIZE) {
204 k_zone[i] = 0;
205 continue;
206 }
207 if (size == KALLOC_MINSIZE) {
208 first_k_zone = i;
209 }
210 k_zone[i] = zinit(size, k_zone_max[i] * size, size,
211 k_zone_name[i]);
212 }
91447636 213 OSMalloc_init();
1c79356b
A
214}
215
91447636 216void *
1c79356b
A
217kalloc_canblock(
218 vm_size_t size,
219 boolean_t canblock)
220{
221 register int zindex;
222 register vm_size_t allocsize;
c0fea474 223 vm_map_t alloc_map = VM_MAP_NULL;
1c79356b
A
224
225 /*
226 * If size is too large for a zone, then use kmem_alloc.
227 * (We use kmem_alloc instead of kmem_alloc_wired so that
228 * krealloc can use kmem_realloc.)
229 */
230
231 if (size >= kalloc_max_prerounded) {
91447636 232 void *addr;
1c79356b
A
233
234 /* kmem_alloc could block so we return if noblock */
235 if (!canblock) {
236 return(0);
237 }
c0fea474
A
238
239 if (size >= kalloc_kernmap_size)
240 alloc_map = kernel_map;
241 else
242 alloc_map = kalloc_map;
243
244 if (kmem_alloc(alloc_map, (vm_offset_t *)&addr, size) != KERN_SUCCESS)
1c79356b
A
245 addr = 0;
246
247 if (addr) {
248 kalloc_large_inuse++;
249 kalloc_large_total += size;
250
251 if (kalloc_large_total > kalloc_large_max)
252 kalloc_large_max = kalloc_large_total;
253 }
254 return(addr);
255 }
256
257 /* compute the size of the block that we will actually allocate */
258
259 allocsize = KALLOC_MINSIZE;
260 zindex = first_k_zone;
261 while (allocsize < size) {
262 allocsize <<= 1;
263 zindex++;
264 }
265
266 /* allocate from the appropriate zone */
1c79356b
A
267 assert(allocsize < kalloc_max);
268 return(zalloc_canblock(k_zone[zindex], canblock));
269}
270
91447636 271void *
1c79356b
A
272kalloc(
273 vm_size_t size)
274{
91447636 275 return( kalloc_canblock(size, TRUE) );
1c79356b
A
276}
277
91447636 278void *
1c79356b
A
279kalloc_noblock(
280 vm_size_t size)
281{
91447636 282 return( kalloc_canblock(size, FALSE) );
1c79356b
A
283}
284
285
286void
287krealloc(
91447636 288 void **addrp,
1c79356b
A
289 vm_size_t old_size,
290 vm_size_t new_size,
291 simple_lock_t lock)
292{
293 register int zindex;
294 register vm_size_t allocsize;
91447636 295 void *naddr;
c0fea474 296 vm_map_t alloc_map = VM_MAP_NULL;
1c79356b
A
297
298 /* can only be used for increasing allocation size */
299
300 assert(new_size > old_size);
301
302 /* if old_size is zero, then we are simply allocating */
303
304 if (old_size == 0) {
305 simple_unlock(lock);
306 naddr = kalloc(new_size);
307 simple_lock(lock);
308 *addrp = naddr;
309 return;
310 }
311
312 /* if old block was kmem_alloc'd, then use kmem_realloc if necessary */
313
314 if (old_size >= kalloc_max_prerounded) {
c0fea474
A
315 if (old_size >= kalloc_kernmap_size)
316 alloc_map = kernel_map;
317 else
318 alloc_map = kalloc_map;
319
91447636
A
320 old_size = round_page(old_size);
321 new_size = round_page(new_size);
1c79356b
A
322 if (new_size > old_size) {
323
c0fea474 324 if (KERN_SUCCESS != kmem_realloc(alloc_map,
91447636
A
325 (vm_offset_t)*addrp, old_size,
326 (vm_offset_t *)&naddr, new_size)) {
1c79356b
A
327 panic("krealloc: kmem_realloc");
328 naddr = 0;
329 }
330
331 simple_lock(lock);
91447636 332 *addrp = (void *) naddr;
1c79356b
A
333
334 /* kmem_realloc() doesn't free old page range. */
c0fea474 335 kmem_free(alloc_map, (vm_offset_t)*addrp, old_size);
1c79356b
A
336
337 kalloc_large_total += (new_size - old_size);
338
339 if (kalloc_large_total > kalloc_large_max)
91447636
A
340 kalloc_large_max = kalloc_large_total;
341
1c79356b
A
342 }
343 return;
344 }
345
346 /* compute the size of the block that we actually allocated */
347
348 allocsize = KALLOC_MINSIZE;
349 zindex = first_k_zone;
350 while (allocsize < old_size) {
351 allocsize <<= 1;
352 zindex++;
353 }
354
355 /* if new size fits in old block, then return */
356
357 if (new_size <= allocsize) {
358 return;
359 }
360
361 /* if new size does not fit in zone, kmem_alloc it, else zalloc it */
362
363 simple_unlock(lock);
364 if (new_size >= kalloc_max_prerounded) {
c0fea474
A
365 if (new_size >= kalloc_kernmap_size)
366 alloc_map = kernel_map;
367 else
368 alloc_map = kalloc_map;
369 if (KERN_SUCCESS != kmem_alloc(alloc_map,
91447636 370 (vm_offset_t *)&naddr, new_size)) {
1c79356b
A
371 panic("krealloc: kmem_alloc");
372 simple_lock(lock);
91447636 373 *addrp = NULL;
1c79356b
A
374 return;
375 }
376 kalloc_large_inuse++;
377 kalloc_large_total += new_size;
378
379 if (kalloc_large_total > kalloc_large_max)
380 kalloc_large_max = kalloc_large_total;
381 } else {
382 register int new_zindex;
383
384 allocsize <<= 1;
385 new_zindex = zindex + 1;
386 while (allocsize < new_size) {
387 allocsize <<= 1;
388 new_zindex++;
389 }
390 naddr = zalloc(k_zone[new_zindex]);
391 }
392 simple_lock(lock);
393
394 /* copy existing data */
395
396 bcopy((const char *)*addrp, (char *)naddr, old_size);
397
398 /* free old block, and return */
399
400 zfree(k_zone[zindex], *addrp);
401
402 /* set up new address */
403
91447636 404 *addrp = (void *) naddr;
1c79356b
A
405}
406
407
91447636 408void *
1c79356b
A
409kget(
410 vm_size_t size)
411{
412 register int zindex;
413 register vm_size_t allocsize;
414
415 /* size must not be too large for a zone */
416
417 if (size >= kalloc_max_prerounded) {
418 /* This will never work, so we might as well panic */
419 panic("kget");
420 }
421
422 /* compute the size of the block that we will actually allocate */
423
424 allocsize = KALLOC_MINSIZE;
425 zindex = first_k_zone;
426 while (allocsize < size) {
427 allocsize <<= 1;
428 zindex++;
429 }
430
431 /* allocate from the appropriate zone */
432
433 assert(allocsize < kalloc_max);
434 return(zget(k_zone[zindex]));
435}
436
437void
438kfree(
91447636 439 void *data,
1c79356b
A
440 vm_size_t size)
441{
442 register int zindex;
443 register vm_size_t freesize;
c0fea474 444 vm_map_t alloc_map = VM_MAP_NULL;
1c79356b
A
445
446 /* if size was too large for a zone, then use kmem_free */
447
448 if (size >= kalloc_max_prerounded) {
c0fea474
A
449 if (size >= kalloc_kernmap_size)
450 alloc_map = kernel_map;
451 else
452 alloc_map = kalloc_map;
453 kmem_free(alloc_map, (vm_offset_t)data, size);
1c79356b
A
454
455 kalloc_large_total -= size;
456 kalloc_large_inuse--;
457
458 return;
459 }
460
461 /* compute the size of the block that we actually allocated from */
462
463 freesize = KALLOC_MINSIZE;
464 zindex = first_k_zone;
465 while (freesize < size) {
466 freesize <<= 1;
467 zindex++;
468 }
469
470 /* free to the appropriate zone */
471
472 assert(freesize < kalloc_max);
473 zfree(k_zone[zindex], data);
474}
475
476#ifdef MACH_BSD
477zone_t
478kalloc_zone(
479 vm_size_t size)
480{
481 register int zindex = 0;
482 register vm_size_t allocsize;
483
484 /* compute the size of the block that we will actually allocate */
485
486 allocsize = size;
487 if (size <= kalloc_max) {
488 allocsize = KALLOC_MINSIZE;
489 zindex = first_k_zone;
490 while (allocsize < size) {
491 allocsize <<= 1;
492 zindex++;
493 }
494 return (k_zone[zindex]);
495 }
496 return (ZONE_NULL);
497}
498#endif
499
500
91447636 501void
1c79356b
A
502kalloc_fake_zone_info(int *count, vm_size_t *cur_size, vm_size_t *max_size, vm_size_t *elem_size,
503 vm_size_t *alloc_size, int *collectable, int *exhaustable)
504{
91447636 505 *count = kalloc_large_inuse;
1c79356b
A
506 *cur_size = kalloc_large_total;
507 *max_size = kalloc_large_max;
508 *elem_size = kalloc_large_total / kalloc_large_inuse;
509 *alloc_size = kalloc_large_total / kalloc_large_inuse;
510 *collectable = 0;
511 *exhaustable = 0;
512}
513
91447636
A
514
515void
516OSMalloc_init(
517 void)
518{
519 queue_init(&OSMalloc_tag_list);
520 simple_lock_init(&OSMalloc_tag_lock, 0);
521}
522
523OSMallocTag
524OSMalloc_Tagalloc(
525 const char *str,
526 uint32_t flags)
527{
528 OSMallocTag OSMTag;
529
530 OSMTag = (OSMallocTag)kalloc(sizeof(*OSMTag));
531
532 bzero((void *)OSMTag, sizeof(*OSMTag));
533
534 if (flags & OSMT_PAGEABLE)
535 OSMTag->OSMT_attr = OSMT_ATTR_PAGEABLE;
536
537 OSMTag->OSMT_refcnt = 1;
538
539 strncpy(OSMTag->OSMT_name, str, OSMT_MAX_NAME);
540
541 simple_lock(&OSMalloc_tag_lock);
542 enqueue_tail(&OSMalloc_tag_list, (queue_entry_t)OSMTag);
543 simple_unlock(&OSMalloc_tag_lock);
544 OSMTag->OSMT_state = OSMT_VALID;
545 return(OSMTag);
546}
547
548void
549OSMalloc_Tagref(
550 OSMallocTag tag)
551{
552 if (!((tag->OSMT_state & OSMT_VALID_MASK) == OSMT_VALID))
553 panic("OSMalloc_Tagref(): bad state 0x%08X\n",tag->OSMT_state);
554
555 (void)hw_atomic_add((uint32_t *)(&tag->OSMT_refcnt), 1);
556}
557
558void
559OSMalloc_Tagrele(
560 OSMallocTag tag)
561{
562 if (!((tag->OSMT_state & OSMT_VALID_MASK) == OSMT_VALID))
563 panic("OSMalloc_Tagref(): bad state 0x%08X\n",tag->OSMT_state);
564
565 if (hw_atomic_sub((uint32_t *)(&tag->OSMT_refcnt), 1) == 0) {
566 if (hw_compare_and_store(OSMT_VALID|OSMT_RELEASED, OSMT_VALID|OSMT_RELEASED, &tag->OSMT_state)) {
567 simple_lock(&OSMalloc_tag_lock);
568 (void)remque((queue_entry_t)tag);
569 simple_unlock(&OSMalloc_tag_lock);
570 kfree((void*)tag, sizeof(*tag));
571 } else
572 panic("OSMalloc_Tagrele(): refcnt 0\n");
573 }
574}
575
576void
577OSMalloc_Tagfree(
578 OSMallocTag tag)
579{
580 if (!hw_compare_and_store(OSMT_VALID, OSMT_VALID|OSMT_RELEASED, &tag->OSMT_state))
581 panic("OSMalloc_Tagfree(): bad state 0x%08X\n", tag->OSMT_state);
582
583 if (hw_atomic_sub((uint32_t *)(&tag->OSMT_refcnt), 1) == 0) {
584 simple_lock(&OSMalloc_tag_lock);
585 (void)remque((queue_entry_t)tag);
586 simple_unlock(&OSMalloc_tag_lock);
587 kfree((void*)tag, sizeof(*tag));
588 }
589}
590
591void *
592OSMalloc(
593 uint32_t size,
594 OSMallocTag tag)
595{
596 void *addr=NULL;
597 kern_return_t kr;
598
599 OSMalloc_Tagref(tag);
600 if ((tag->OSMT_attr & OSMT_PAGEABLE)
601 && (size & ~PAGE_MASK)) {
602
603 if ((kr = kmem_alloc_pageable(kernel_map, (vm_offset_t *)&addr, size)) != KERN_SUCCESS)
604 panic("OSMalloc(): kmem_alloc_pageable() failed 0x%08X\n", kr);
605 } else
606 addr = kalloc((vm_size_t)size);
607
608 return(addr);
609}
610
611void *
612OSMalloc_nowait(
613 uint32_t size,
614 OSMallocTag tag)
615{
616 void *addr=NULL;
617
618 if (tag->OSMT_attr & OSMT_PAGEABLE)
619 return(NULL);
620
621 OSMalloc_Tagref(tag);
622 /* XXX: use non-blocking kalloc for now */
623 addr = kalloc_noblock((vm_size_t)size);
624 if (addr == NULL)
625 OSMalloc_Tagrele(tag);
626
627 return(addr);
628}
629
630void *
631OSMalloc_noblock(
632 uint32_t size,
633 OSMallocTag tag)
634{
635 void *addr=NULL;
636
637 if (tag->OSMT_attr & OSMT_PAGEABLE)
638 return(NULL);
639
640 OSMalloc_Tagref(tag);
641 addr = kalloc_noblock((vm_size_t)size);
642 if (addr == NULL)
643 OSMalloc_Tagrele(tag);
644
645 return(addr);
646}
647
648void
649OSFree(
650 void *addr,
651 uint32_t size,
652 OSMallocTag tag)
653{
654 if ((tag->OSMT_attr & OSMT_PAGEABLE)
655 && (size & ~PAGE_MASK)) {
656 kmem_free(kernel_map, (vm_offset_t)addr, size);
657 } else
658 kfree((void*)addr, size);
659
660 OSMalloc_Tagrele(tag);
661}