]> git.saurik.com Git - apple/xnu.git/blame - osfmk/kern/kalloc.c
xnu-792.6.70.tar.gz
[apple/xnu.git] / osfmk / kern / kalloc.c
CommitLineData
1c79356b 1/*
91447636 2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
1c79356b
A
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
37839358
A
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
de355530 11 *
37839358
A
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
1c79356b
A
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
37839358
A
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
1c79356b
A
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22/*
23 * @OSF_COPYRIGHT@
24 */
1c79356b
A
25/*
26 * Mach Operating System
27 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
28 * All Rights Reserved.
29 *
30 * Permission to use, copy, modify and distribute this software and its
31 * documentation is hereby granted, provided that both the copyright
32 * notice and this permission notice appear in all copies of the
33 * software, derivative works or modified versions, and any portions
34 * thereof, and that both notices appear in supporting documentation.
35 *
36 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
37 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
38 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
39 *
40 * Carnegie Mellon requests users of this software to return to
41 *
42 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
43 * School of Computer Science
44 * Carnegie Mellon University
45 * Pittsburgh PA 15213-3890
46 *
47 * any improvements or extensions that they make and grant Carnegie Mellon
48 * the rights to redistribute these changes.
49 */
50/*
51 */
52/*
53 * File: kern/kalloc.c
54 * Author: Avadis Tevanian, Jr.
55 * Date: 1985
56 *
57 * General kernel memory allocator. This allocator is designed
58 * to be used by the kernel to manage dynamic memory fast.
59 */
60
61#include <zone_debug.h>
62
63#include <mach/boolean.h>
64#include <mach/machine/vm_types.h>
65#include <mach/vm_param.h>
66#include <kern/misc_protos.h>
67#include <kern/zalloc.h>
68#include <kern/kalloc.h>
69#include <kern/lock.h>
70#include <vm/vm_kern.h>
71#include <vm/vm_object.h>
72#include <vm/vm_map.h>
91447636 73#include <libkern/OSMalloc.h>
1c79356b
A
74
75#ifdef MACH_BSD
76zone_t kalloc_zone(vm_size_t);
77#endif
78
79vm_map_t kalloc_map;
0b4e3aa0 80vm_size_t kalloc_map_size = 16 * 1024 * 1024;
1c79356b
A
81vm_size_t kalloc_max;
82vm_size_t kalloc_max_prerounded;
83
84unsigned int kalloc_large_inuse;
85vm_size_t kalloc_large_total;
86vm_size_t kalloc_large_max;
87
88/*
89 * All allocations of size less than kalloc_max are rounded to the
90 * next highest power of 2. This allocator is built on top of
91 * the zone allocator. A zone is created for each potential size
92 * that we are willing to get in small blocks.
93 *
94 * We assume that kalloc_max is not greater than 64K;
95 * thus 16 is a safe array size for k_zone and k_zone_name.
96 *
97 * Note that kalloc_max is somewhat confusingly named.
98 * It represents the first power of two for which no zone exists.
99 * kalloc_max_prerounded is the smallest allocation size, before
100 * rounding, for which no zone exists.
101 */
102
103int first_k_zone = -1;
104struct zone *k_zone[16];
91447636 105static const char *k_zone_name[16] = {
1c79356b
A
106 "kalloc.1", "kalloc.2",
107 "kalloc.4", "kalloc.8",
108 "kalloc.16", "kalloc.32",
109 "kalloc.64", "kalloc.128",
110 "kalloc.256", "kalloc.512",
111 "kalloc.1024", "kalloc.2048",
112 "kalloc.4096", "kalloc.8192",
113 "kalloc.16384", "kalloc.32768"
114};
115
116/*
117 * Max number of elements per zone. zinit rounds things up correctly
118 * Doing things this way permits each zone to have a different maximum size
119 * based on need, rather than just guessing; it also
120 * means its patchable in case you're wrong!
121 */
122unsigned long k_zone_max[16] = {
123 1024, /* 1 Byte */
124 1024, /* 2 Byte */
125 1024, /* 4 Byte */
126 1024, /* 8 Byte */
127 1024, /* 16 Byte */
128 4096, /* 32 Byte */
129 4096, /* 64 Byte */
130 4096, /* 128 Byte */
131 4096, /* 256 Byte */
132 1024, /* 512 Byte */
133 1024, /* 1024 Byte */
134 1024, /* 2048 Byte */
135 1024, /* 4096 Byte */
136 4096, /* 8192 Byte */
137 64, /* 16384 Byte */
138 64, /* 32768 Byte */
139};
140
91447636
A
141/* forward declarations */
142void * kalloc_canblock(
143 vm_size_t size,
144 boolean_t canblock);
145
146
147/* OSMalloc local data declarations */
148static
149queue_head_t OSMalloc_tag_list;
150
151decl_simple_lock_data(static,OSMalloc_tag_lock)
152
153/* OSMalloc forward declarations */
154void OSMalloc_init(void);
155void OSMalloc_Tagref(OSMallocTag tag);
156void OSMalloc_Tagrele(OSMallocTag tag);
157
1c79356b
A
158/*
159 * Initialize the memory allocator. This should be called only
160 * once on a system wide basis (i.e. first processor to get here
161 * does the initialization).
162 *
163 * This initializes all of the zones.
164 */
165
166void
167kalloc_init(
168 void)
169{
170 kern_return_t retval;
171 vm_offset_t min;
172 vm_size_t size;
173 register int i;
174
175 retval = kmem_suballoc(kernel_map, &min, kalloc_map_size,
91447636
A
176 FALSE, VM_FLAGS_ANYWHERE, &kalloc_map);
177
1c79356b
A
178 if (retval != KERN_SUCCESS)
179 panic("kalloc_init: kmem_suballoc failed");
180
181 /*
182 * Ensure that zones up to size 8192 bytes exist.
183 * This is desirable because messages are allocated
184 * with kalloc, and messages up through size 8192 are common.
185 */
186
187 if (PAGE_SIZE < 16*1024)
188 kalloc_max = 16*1024;
189 else
190 kalloc_max = PAGE_SIZE;
191 kalloc_max_prerounded = kalloc_max / 2 + 1;
192
193 /*
194 * Allocate a zone for each size we are going to handle.
195 * We specify non-paged memory.
196 */
197 for (i = 0, size = 1; size < kalloc_max; i++, size <<= 1) {
198 if (size < KALLOC_MINSIZE) {
199 k_zone[i] = 0;
200 continue;
201 }
202 if (size == KALLOC_MINSIZE) {
203 first_k_zone = i;
204 }
205 k_zone[i] = zinit(size, k_zone_max[i] * size, size,
206 k_zone_name[i]);
207 }
91447636 208 OSMalloc_init();
1c79356b
A
209}
210
91447636 211void *
1c79356b
A
212kalloc_canblock(
213 vm_size_t size,
214 boolean_t canblock)
215{
216 register int zindex;
217 register vm_size_t allocsize;
218
219 /*
220 * If size is too large for a zone, then use kmem_alloc.
221 * (We use kmem_alloc instead of kmem_alloc_wired so that
222 * krealloc can use kmem_realloc.)
223 */
224
225 if (size >= kalloc_max_prerounded) {
91447636 226 void *addr;
1c79356b
A
227
228 /* kmem_alloc could block so we return if noblock */
229 if (!canblock) {
230 return(0);
231 }
91447636 232 if (kmem_alloc(kalloc_map, (vm_offset_t *)&addr, size) != KERN_SUCCESS)
1c79356b
A
233 addr = 0;
234
235 if (addr) {
236 kalloc_large_inuse++;
237 kalloc_large_total += size;
238
239 if (kalloc_large_total > kalloc_large_max)
240 kalloc_large_max = kalloc_large_total;
241 }
242 return(addr);
243 }
244
245 /* compute the size of the block that we will actually allocate */
246
247 allocsize = KALLOC_MINSIZE;
248 zindex = first_k_zone;
249 while (allocsize < size) {
250 allocsize <<= 1;
251 zindex++;
252 }
253
254 /* allocate from the appropriate zone */
1c79356b
A
255 assert(allocsize < kalloc_max);
256 return(zalloc_canblock(k_zone[zindex], canblock));
257}
258
91447636 259void *
1c79356b
A
260kalloc(
261 vm_size_t size)
262{
91447636 263 return( kalloc_canblock(size, TRUE) );
1c79356b
A
264}
265
91447636 266void *
1c79356b
A
267kalloc_noblock(
268 vm_size_t size)
269{
91447636 270 return( kalloc_canblock(size, FALSE) );
1c79356b
A
271}
272
273
274void
275krealloc(
91447636 276 void **addrp,
1c79356b
A
277 vm_size_t old_size,
278 vm_size_t new_size,
279 simple_lock_t lock)
280{
281 register int zindex;
282 register vm_size_t allocsize;
91447636 283 void *naddr;
1c79356b
A
284
285 /* can only be used for increasing allocation size */
286
287 assert(new_size > old_size);
288
289 /* if old_size is zero, then we are simply allocating */
290
291 if (old_size == 0) {
292 simple_unlock(lock);
293 naddr = kalloc(new_size);
294 simple_lock(lock);
295 *addrp = naddr;
296 return;
297 }
298
299 /* if old block was kmem_alloc'd, then use kmem_realloc if necessary */
300
301 if (old_size >= kalloc_max_prerounded) {
91447636
A
302 old_size = round_page(old_size);
303 new_size = round_page(new_size);
1c79356b
A
304 if (new_size > old_size) {
305
91447636
A
306 if (KERN_SUCCESS != kmem_realloc(kalloc_map,
307 (vm_offset_t)*addrp, old_size,
308 (vm_offset_t *)&naddr, new_size)) {
1c79356b
A
309 panic("krealloc: kmem_realloc");
310 naddr = 0;
311 }
312
313 simple_lock(lock);
91447636 314 *addrp = (void *) naddr;
1c79356b
A
315
316 /* kmem_realloc() doesn't free old page range. */
91447636 317 kmem_free(kalloc_map, (vm_offset_t)*addrp, old_size);
1c79356b
A
318
319 kalloc_large_total += (new_size - old_size);
320
321 if (kalloc_large_total > kalloc_large_max)
91447636
A
322 kalloc_large_max = kalloc_large_total;
323
1c79356b
A
324 }
325 return;
326 }
327
328 /* compute the size of the block that we actually allocated */
329
330 allocsize = KALLOC_MINSIZE;
331 zindex = first_k_zone;
332 while (allocsize < old_size) {
333 allocsize <<= 1;
334 zindex++;
335 }
336
337 /* if new size fits in old block, then return */
338
339 if (new_size <= allocsize) {
340 return;
341 }
342
343 /* if new size does not fit in zone, kmem_alloc it, else zalloc it */
344
345 simple_unlock(lock);
346 if (new_size >= kalloc_max_prerounded) {
91447636
A
347 if (KERN_SUCCESS != kmem_alloc(kalloc_map,
348 (vm_offset_t *)&naddr, new_size)) {
1c79356b
A
349 panic("krealloc: kmem_alloc");
350 simple_lock(lock);
91447636 351 *addrp = NULL;
1c79356b
A
352 return;
353 }
354 kalloc_large_inuse++;
355 kalloc_large_total += new_size;
356
357 if (kalloc_large_total > kalloc_large_max)
358 kalloc_large_max = kalloc_large_total;
359 } else {
360 register int new_zindex;
361
362 allocsize <<= 1;
363 new_zindex = zindex + 1;
364 while (allocsize < new_size) {
365 allocsize <<= 1;
366 new_zindex++;
367 }
368 naddr = zalloc(k_zone[new_zindex]);
369 }
370 simple_lock(lock);
371
372 /* copy existing data */
373
374 bcopy((const char *)*addrp, (char *)naddr, old_size);
375
376 /* free old block, and return */
377
378 zfree(k_zone[zindex], *addrp);
379
380 /* set up new address */
381
91447636 382 *addrp = (void *) naddr;
1c79356b
A
383}
384
385
91447636 386void *
1c79356b
A
387kget(
388 vm_size_t size)
389{
390 register int zindex;
391 register vm_size_t allocsize;
392
393 /* size must not be too large for a zone */
394
395 if (size >= kalloc_max_prerounded) {
396 /* This will never work, so we might as well panic */
397 panic("kget");
398 }
399
400 /* compute the size of the block that we will actually allocate */
401
402 allocsize = KALLOC_MINSIZE;
403 zindex = first_k_zone;
404 while (allocsize < size) {
405 allocsize <<= 1;
406 zindex++;
407 }
408
409 /* allocate from the appropriate zone */
410
411 assert(allocsize < kalloc_max);
412 return(zget(k_zone[zindex]));
413}
414
415void
416kfree(
91447636 417 void *data,
1c79356b
A
418 vm_size_t size)
419{
420 register int zindex;
421 register vm_size_t freesize;
422
423 /* if size was too large for a zone, then use kmem_free */
424
425 if (size >= kalloc_max_prerounded) {
91447636 426 kmem_free(kalloc_map, (vm_offset_t)data, size);
1c79356b
A
427
428 kalloc_large_total -= size;
429 kalloc_large_inuse--;
430
431 return;
432 }
433
434 /* compute the size of the block that we actually allocated from */
435
436 freesize = KALLOC_MINSIZE;
437 zindex = first_k_zone;
438 while (freesize < size) {
439 freesize <<= 1;
440 zindex++;
441 }
442
443 /* free to the appropriate zone */
444
445 assert(freesize < kalloc_max);
446 zfree(k_zone[zindex], data);
447}
448
449#ifdef MACH_BSD
450zone_t
451kalloc_zone(
452 vm_size_t size)
453{
454 register int zindex = 0;
455 register vm_size_t allocsize;
456
457 /* compute the size of the block that we will actually allocate */
458
459 allocsize = size;
460 if (size <= kalloc_max) {
461 allocsize = KALLOC_MINSIZE;
462 zindex = first_k_zone;
463 while (allocsize < size) {
464 allocsize <<= 1;
465 zindex++;
466 }
467 return (k_zone[zindex]);
468 }
469 return (ZONE_NULL);
470}
471#endif
472
473
91447636 474void
1c79356b
A
475kalloc_fake_zone_info(int *count, vm_size_t *cur_size, vm_size_t *max_size, vm_size_t *elem_size,
476 vm_size_t *alloc_size, int *collectable, int *exhaustable)
477{
91447636 478 *count = kalloc_large_inuse;
1c79356b
A
479 *cur_size = kalloc_large_total;
480 *max_size = kalloc_large_max;
481 *elem_size = kalloc_large_total / kalloc_large_inuse;
482 *alloc_size = kalloc_large_total / kalloc_large_inuse;
483 *collectable = 0;
484 *exhaustable = 0;
485}
486
91447636
A
487
488void
489OSMalloc_init(
490 void)
491{
492 queue_init(&OSMalloc_tag_list);
493 simple_lock_init(&OSMalloc_tag_lock, 0);
494}
495
496OSMallocTag
497OSMalloc_Tagalloc(
498 const char *str,
499 uint32_t flags)
500{
501 OSMallocTag OSMTag;
502
503 OSMTag = (OSMallocTag)kalloc(sizeof(*OSMTag));
504
505 bzero((void *)OSMTag, sizeof(*OSMTag));
506
507 if (flags & OSMT_PAGEABLE)
508 OSMTag->OSMT_attr = OSMT_ATTR_PAGEABLE;
509
510 OSMTag->OSMT_refcnt = 1;
511
512 strncpy(OSMTag->OSMT_name, str, OSMT_MAX_NAME);
513
514 simple_lock(&OSMalloc_tag_lock);
515 enqueue_tail(&OSMalloc_tag_list, (queue_entry_t)OSMTag);
516 simple_unlock(&OSMalloc_tag_lock);
517 OSMTag->OSMT_state = OSMT_VALID;
518 return(OSMTag);
519}
520
521void
522OSMalloc_Tagref(
523 OSMallocTag tag)
524{
525 if (!((tag->OSMT_state & OSMT_VALID_MASK) == OSMT_VALID))
526 panic("OSMalloc_Tagref(): bad state 0x%08X\n",tag->OSMT_state);
527
528 (void)hw_atomic_add((uint32_t *)(&tag->OSMT_refcnt), 1);
529}
530
531void
532OSMalloc_Tagrele(
533 OSMallocTag tag)
534{
535 if (!((tag->OSMT_state & OSMT_VALID_MASK) == OSMT_VALID))
536 panic("OSMalloc_Tagref(): bad state 0x%08X\n",tag->OSMT_state);
537
538 if (hw_atomic_sub((uint32_t *)(&tag->OSMT_refcnt), 1) == 0) {
539 if (hw_compare_and_store(OSMT_VALID|OSMT_RELEASED, OSMT_VALID|OSMT_RELEASED, &tag->OSMT_state)) {
540 simple_lock(&OSMalloc_tag_lock);
541 (void)remque((queue_entry_t)tag);
542 simple_unlock(&OSMalloc_tag_lock);
543 kfree((void*)tag, sizeof(*tag));
544 } else
545 panic("OSMalloc_Tagrele(): refcnt 0\n");
546 }
547}
548
549void
550OSMalloc_Tagfree(
551 OSMallocTag tag)
552{
553 if (!hw_compare_and_store(OSMT_VALID, OSMT_VALID|OSMT_RELEASED, &tag->OSMT_state))
554 panic("OSMalloc_Tagfree(): bad state 0x%08X\n", tag->OSMT_state);
555
556 if (hw_atomic_sub((uint32_t *)(&tag->OSMT_refcnt), 1) == 0) {
557 simple_lock(&OSMalloc_tag_lock);
558 (void)remque((queue_entry_t)tag);
559 simple_unlock(&OSMalloc_tag_lock);
560 kfree((void*)tag, sizeof(*tag));
561 }
562}
563
564void *
565OSMalloc(
566 uint32_t size,
567 OSMallocTag tag)
568{
569 void *addr=NULL;
570 kern_return_t kr;
571
572 OSMalloc_Tagref(tag);
573 if ((tag->OSMT_attr & OSMT_PAGEABLE)
574 && (size & ~PAGE_MASK)) {
575
576 if ((kr = kmem_alloc_pageable(kernel_map, (vm_offset_t *)&addr, size)) != KERN_SUCCESS)
577 panic("OSMalloc(): kmem_alloc_pageable() failed 0x%08X\n", kr);
578 } else
579 addr = kalloc((vm_size_t)size);
580
581 return(addr);
582}
583
584void *
585OSMalloc_nowait(
586 uint32_t size,
587 OSMallocTag tag)
588{
589 void *addr=NULL;
590
591 if (tag->OSMT_attr & OSMT_PAGEABLE)
592 return(NULL);
593
594 OSMalloc_Tagref(tag);
595 /* XXX: use non-blocking kalloc for now */
596 addr = kalloc_noblock((vm_size_t)size);
597 if (addr == NULL)
598 OSMalloc_Tagrele(tag);
599
600 return(addr);
601}
602
603void *
604OSMalloc_noblock(
605 uint32_t size,
606 OSMallocTag tag)
607{
608 void *addr=NULL;
609
610 if (tag->OSMT_attr & OSMT_PAGEABLE)
611 return(NULL);
612
613 OSMalloc_Tagref(tag);
614 addr = kalloc_noblock((vm_size_t)size);
615 if (addr == NULL)
616 OSMalloc_Tagrele(tag);
617
618 return(addr);
619}
620
621void
622OSFree(
623 void *addr,
624 uint32_t size,
625 OSMallocTag tag)
626{
627 if ((tag->OSMT_attr & OSMT_PAGEABLE)
628 && (size & ~PAGE_MASK)) {
629 kmem_free(kernel_map, (vm_offset_t)addr, size);
630 } else
631 kfree((void*)addr, size);
632
633 OSMalloc_Tagrele(tag);
634}