]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/kalloc.c
cb12c4006499b12cca81026d8683fc0444bbb36b
[apple/xnu.git] / osfmk / kern / kalloc.c
1 /*
2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_OSREFERENCE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the
10 * License may not be used to create, or enable the creation or
11 * redistribution of, unlawful or unlicensed copies of an Apple operating
12 * system, or to circumvent, violate, or enable the circumvention or
13 * violation of, any terms of an Apple operating system software license
14 * agreement.
15 *
16 * Please obtain a copy of the License at
17 * http://www.opensource.apple.com/apsl/ and read it before using this
18 * file.
19 *
20 * The Original Code and all software distributed under the License are
21 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
22 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
23 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
25 * Please see the License for the specific language governing rights and
26 * limitations under the License.
27 *
28 * @APPLE_LICENSE_OSREFERENCE_HEADER_END@
29 */
30 /*
31 * @OSF_COPYRIGHT@
32 */
33 /*
34 * Mach Operating System
35 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
36 * All Rights Reserved.
37 *
38 * Permission to use, copy, modify and distribute this software and its
39 * documentation is hereby granted, provided that both the copyright
40 * notice and this permission notice appear in all copies of the
41 * software, derivative works or modified versions, and any portions
42 * thereof, and that both notices appear in supporting documentation.
43 *
44 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
45 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
46 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
47 *
48 * Carnegie Mellon requests users of this software to return to
49 *
50 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
51 * School of Computer Science
52 * Carnegie Mellon University
53 * Pittsburgh PA 15213-3890
54 *
55 * any improvements or extensions that they make and grant Carnegie Mellon
56 * the rights to redistribute these changes.
57 */
58 /*
59 */
60 /*
61 * File: kern/kalloc.c
62 * Author: Avadis Tevanian, Jr.
63 * Date: 1985
64 *
65 * General kernel memory allocator. This allocator is designed
66 * to be used by the kernel to manage dynamic memory fast.
67 */
68
69 #include <zone_debug.h>
70
71 #include <mach/boolean.h>
72 #include <mach/machine/vm_types.h>
73 #include <mach/vm_param.h>
74 #include <kern/misc_protos.h>
75 #include <kern/zalloc.h>
76 #include <kern/kalloc.h>
77 #include <kern/lock.h>
78 #include <vm/vm_kern.h>
79 #include <vm/vm_object.h>
80 #include <vm/vm_map.h>
81 #include <libkern/OSMalloc.h>
82
83 #ifdef MACH_BSD
84 zone_t kalloc_zone(vm_size_t);
85 #endif
86
87 vm_map_t kalloc_map;
88 vm_size_t kalloc_map_size = 16 * 1024 * 1024;
89 vm_size_t kalloc_max;
90 vm_size_t kalloc_max_prerounded;
91
92 unsigned int kalloc_large_inuse;
93 vm_size_t kalloc_large_total;
94 vm_size_t kalloc_large_max;
95
96 /*
97 * All allocations of size less than kalloc_max are rounded to the
98 * next highest power of 2. This allocator is built on top of
99 * the zone allocator. A zone is created for each potential size
100 * that we are willing to get in small blocks.
101 *
102 * We assume that kalloc_max is not greater than 64K;
103 * thus 16 is a safe array size for k_zone and k_zone_name.
104 *
105 * Note that kalloc_max is somewhat confusingly named.
106 * It represents the first power of two for which no zone exists.
107 * kalloc_max_prerounded is the smallest allocation size, before
108 * rounding, for which no zone exists.
109 */
110
111 int first_k_zone = -1;
112 struct zone *k_zone[16];
113 static const char *k_zone_name[16] = {
114 "kalloc.1", "kalloc.2",
115 "kalloc.4", "kalloc.8",
116 "kalloc.16", "kalloc.32",
117 "kalloc.64", "kalloc.128",
118 "kalloc.256", "kalloc.512",
119 "kalloc.1024", "kalloc.2048",
120 "kalloc.4096", "kalloc.8192",
121 "kalloc.16384", "kalloc.32768"
122 };
123
124 /*
125 * Max number of elements per zone. zinit rounds things up correctly
126 * Doing things this way permits each zone to have a different maximum size
127 * based on need, rather than just guessing; it also
128 * means its patchable in case you're wrong!
129 */
130 unsigned long k_zone_max[16] = {
131 1024, /* 1 Byte */
132 1024, /* 2 Byte */
133 1024, /* 4 Byte */
134 1024, /* 8 Byte */
135 1024, /* 16 Byte */
136 4096, /* 32 Byte */
137 4096, /* 64 Byte */
138 4096, /* 128 Byte */
139 4096, /* 256 Byte */
140 1024, /* 512 Byte */
141 1024, /* 1024 Byte */
142 1024, /* 2048 Byte */
143 1024, /* 4096 Byte */
144 4096, /* 8192 Byte */
145 64, /* 16384 Byte */
146 64, /* 32768 Byte */
147 };
148
149 /* forward declarations */
150 void * kalloc_canblock(
151 vm_size_t size,
152 boolean_t canblock);
153
154
155 /* OSMalloc local data declarations */
156 static
157 queue_head_t OSMalloc_tag_list;
158
159 decl_simple_lock_data(static,OSMalloc_tag_lock)
160
161 /* OSMalloc forward declarations */
162 void OSMalloc_init(void);
163 void OSMalloc_Tagref(OSMallocTag tag);
164 void OSMalloc_Tagrele(OSMallocTag tag);
165
166 /*
167 * Initialize the memory allocator. This should be called only
168 * once on a system wide basis (i.e. first processor to get here
169 * does the initialization).
170 *
171 * This initializes all of the zones.
172 */
173
174 void
175 kalloc_init(
176 void)
177 {
178 kern_return_t retval;
179 vm_offset_t min;
180 vm_size_t size;
181 register int i;
182
183 retval = kmem_suballoc(kernel_map, &min, kalloc_map_size,
184 FALSE, VM_FLAGS_ANYWHERE, &kalloc_map);
185
186 if (retval != KERN_SUCCESS)
187 panic("kalloc_init: kmem_suballoc failed");
188
189 /*
190 * Ensure that zones up to size 8192 bytes exist.
191 * This is desirable because messages are allocated
192 * with kalloc, and messages up through size 8192 are common.
193 */
194
195 if (PAGE_SIZE < 16*1024)
196 kalloc_max = 16*1024;
197 else
198 kalloc_max = PAGE_SIZE;
199 kalloc_max_prerounded = kalloc_max / 2 + 1;
200
201 /*
202 * Allocate a zone for each size we are going to handle.
203 * We specify non-paged memory.
204 */
205 for (i = 0, size = 1; size < kalloc_max; i++, size <<= 1) {
206 if (size < KALLOC_MINSIZE) {
207 k_zone[i] = 0;
208 continue;
209 }
210 if (size == KALLOC_MINSIZE) {
211 first_k_zone = i;
212 }
213 k_zone[i] = zinit(size, k_zone_max[i] * size, size,
214 k_zone_name[i]);
215 }
216 OSMalloc_init();
217 }
218
219 void *
220 kalloc_canblock(
221 vm_size_t size,
222 boolean_t canblock)
223 {
224 register int zindex;
225 register vm_size_t allocsize;
226
227 /*
228 * If size is too large for a zone, then use kmem_alloc.
229 * (We use kmem_alloc instead of kmem_alloc_wired so that
230 * krealloc can use kmem_realloc.)
231 */
232
233 if (size >= kalloc_max_prerounded) {
234 void *addr;
235
236 /* kmem_alloc could block so we return if noblock */
237 if (!canblock) {
238 return(0);
239 }
240 if (kmem_alloc(kalloc_map, (vm_offset_t *)&addr, size) != KERN_SUCCESS)
241 addr = 0;
242
243 if (addr) {
244 kalloc_large_inuse++;
245 kalloc_large_total += size;
246
247 if (kalloc_large_total > kalloc_large_max)
248 kalloc_large_max = kalloc_large_total;
249 }
250 return(addr);
251 }
252
253 /* compute the size of the block that we will actually allocate */
254
255 allocsize = KALLOC_MINSIZE;
256 zindex = first_k_zone;
257 while (allocsize < size) {
258 allocsize <<= 1;
259 zindex++;
260 }
261
262 /* allocate from the appropriate zone */
263 assert(allocsize < kalloc_max);
264 return(zalloc_canblock(k_zone[zindex], canblock));
265 }
266
267 void *
268 kalloc(
269 vm_size_t size)
270 {
271 return( kalloc_canblock(size, TRUE) );
272 }
273
274 void *
275 kalloc_noblock(
276 vm_size_t size)
277 {
278 return( kalloc_canblock(size, FALSE) );
279 }
280
281
282 void
283 krealloc(
284 void **addrp,
285 vm_size_t old_size,
286 vm_size_t new_size,
287 simple_lock_t lock)
288 {
289 register int zindex;
290 register vm_size_t allocsize;
291 void *naddr;
292
293 /* can only be used for increasing allocation size */
294
295 assert(new_size > old_size);
296
297 /* if old_size is zero, then we are simply allocating */
298
299 if (old_size == 0) {
300 simple_unlock(lock);
301 naddr = kalloc(new_size);
302 simple_lock(lock);
303 *addrp = naddr;
304 return;
305 }
306
307 /* if old block was kmem_alloc'd, then use kmem_realloc if necessary */
308
309 if (old_size >= kalloc_max_prerounded) {
310 old_size = round_page(old_size);
311 new_size = round_page(new_size);
312 if (new_size > old_size) {
313
314 if (KERN_SUCCESS != kmem_realloc(kalloc_map,
315 (vm_offset_t)*addrp, old_size,
316 (vm_offset_t *)&naddr, new_size)) {
317 panic("krealloc: kmem_realloc");
318 naddr = 0;
319 }
320
321 simple_lock(lock);
322 *addrp = (void *) naddr;
323
324 /* kmem_realloc() doesn't free old page range. */
325 kmem_free(kalloc_map, (vm_offset_t)*addrp, old_size);
326
327 kalloc_large_total += (new_size - old_size);
328
329 if (kalloc_large_total > kalloc_large_max)
330 kalloc_large_max = kalloc_large_total;
331
332 }
333 return;
334 }
335
336 /* compute the size of the block that we actually allocated */
337
338 allocsize = KALLOC_MINSIZE;
339 zindex = first_k_zone;
340 while (allocsize < old_size) {
341 allocsize <<= 1;
342 zindex++;
343 }
344
345 /* if new size fits in old block, then return */
346
347 if (new_size <= allocsize) {
348 return;
349 }
350
351 /* if new size does not fit in zone, kmem_alloc it, else zalloc it */
352
353 simple_unlock(lock);
354 if (new_size >= kalloc_max_prerounded) {
355 if (KERN_SUCCESS != kmem_alloc(kalloc_map,
356 (vm_offset_t *)&naddr, new_size)) {
357 panic("krealloc: kmem_alloc");
358 simple_lock(lock);
359 *addrp = NULL;
360 return;
361 }
362 kalloc_large_inuse++;
363 kalloc_large_total += new_size;
364
365 if (kalloc_large_total > kalloc_large_max)
366 kalloc_large_max = kalloc_large_total;
367 } else {
368 register int new_zindex;
369
370 allocsize <<= 1;
371 new_zindex = zindex + 1;
372 while (allocsize < new_size) {
373 allocsize <<= 1;
374 new_zindex++;
375 }
376 naddr = zalloc(k_zone[new_zindex]);
377 }
378 simple_lock(lock);
379
380 /* copy existing data */
381
382 bcopy((const char *)*addrp, (char *)naddr, old_size);
383
384 /* free old block, and return */
385
386 zfree(k_zone[zindex], *addrp);
387
388 /* set up new address */
389
390 *addrp = (void *) naddr;
391 }
392
393
394 void *
395 kget(
396 vm_size_t size)
397 {
398 register int zindex;
399 register vm_size_t allocsize;
400
401 /* size must not be too large for a zone */
402
403 if (size >= kalloc_max_prerounded) {
404 /* This will never work, so we might as well panic */
405 panic("kget");
406 }
407
408 /* compute the size of the block that we will actually allocate */
409
410 allocsize = KALLOC_MINSIZE;
411 zindex = first_k_zone;
412 while (allocsize < size) {
413 allocsize <<= 1;
414 zindex++;
415 }
416
417 /* allocate from the appropriate zone */
418
419 assert(allocsize < kalloc_max);
420 return(zget(k_zone[zindex]));
421 }
422
423 void
424 kfree(
425 void *data,
426 vm_size_t size)
427 {
428 register int zindex;
429 register vm_size_t freesize;
430
431 /* if size was too large for a zone, then use kmem_free */
432
433 if (size >= kalloc_max_prerounded) {
434 kmem_free(kalloc_map, (vm_offset_t)data, size);
435
436 kalloc_large_total -= size;
437 kalloc_large_inuse--;
438
439 return;
440 }
441
442 /* compute the size of the block that we actually allocated from */
443
444 freesize = KALLOC_MINSIZE;
445 zindex = first_k_zone;
446 while (freesize < size) {
447 freesize <<= 1;
448 zindex++;
449 }
450
451 /* free to the appropriate zone */
452
453 assert(freesize < kalloc_max);
454 zfree(k_zone[zindex], data);
455 }
456
457 #ifdef MACH_BSD
458 zone_t
459 kalloc_zone(
460 vm_size_t size)
461 {
462 register int zindex = 0;
463 register vm_size_t allocsize;
464
465 /* compute the size of the block that we will actually allocate */
466
467 allocsize = size;
468 if (size <= kalloc_max) {
469 allocsize = KALLOC_MINSIZE;
470 zindex = first_k_zone;
471 while (allocsize < size) {
472 allocsize <<= 1;
473 zindex++;
474 }
475 return (k_zone[zindex]);
476 }
477 return (ZONE_NULL);
478 }
479 #endif
480
481
482 void
483 kalloc_fake_zone_info(int *count, vm_size_t *cur_size, vm_size_t *max_size, vm_size_t *elem_size,
484 vm_size_t *alloc_size, int *collectable, int *exhaustable)
485 {
486 *count = kalloc_large_inuse;
487 *cur_size = kalloc_large_total;
488 *max_size = kalloc_large_max;
489 *elem_size = kalloc_large_total / kalloc_large_inuse;
490 *alloc_size = kalloc_large_total / kalloc_large_inuse;
491 *collectable = 0;
492 *exhaustable = 0;
493 }
494
495
496 void
497 OSMalloc_init(
498 void)
499 {
500 queue_init(&OSMalloc_tag_list);
501 simple_lock_init(&OSMalloc_tag_lock, 0);
502 }
503
504 OSMallocTag
505 OSMalloc_Tagalloc(
506 const char *str,
507 uint32_t flags)
508 {
509 OSMallocTag OSMTag;
510
511 OSMTag = (OSMallocTag)kalloc(sizeof(*OSMTag));
512
513 bzero((void *)OSMTag, sizeof(*OSMTag));
514
515 if (flags & OSMT_PAGEABLE)
516 OSMTag->OSMT_attr = OSMT_ATTR_PAGEABLE;
517
518 OSMTag->OSMT_refcnt = 1;
519
520 strncpy(OSMTag->OSMT_name, str, OSMT_MAX_NAME);
521
522 simple_lock(&OSMalloc_tag_lock);
523 enqueue_tail(&OSMalloc_tag_list, (queue_entry_t)OSMTag);
524 simple_unlock(&OSMalloc_tag_lock);
525 OSMTag->OSMT_state = OSMT_VALID;
526 return(OSMTag);
527 }
528
529 void
530 OSMalloc_Tagref(
531 OSMallocTag tag)
532 {
533 if (!((tag->OSMT_state & OSMT_VALID_MASK) == OSMT_VALID))
534 panic("OSMalloc_Tagref(): bad state 0x%08X\n",tag->OSMT_state);
535
536 (void)hw_atomic_add((uint32_t *)(&tag->OSMT_refcnt), 1);
537 }
538
539 void
540 OSMalloc_Tagrele(
541 OSMallocTag tag)
542 {
543 if (!((tag->OSMT_state & OSMT_VALID_MASK) == OSMT_VALID))
544 panic("OSMalloc_Tagref(): bad state 0x%08X\n",tag->OSMT_state);
545
546 if (hw_atomic_sub((uint32_t *)(&tag->OSMT_refcnt), 1) == 0) {
547 if (hw_compare_and_store(OSMT_VALID|OSMT_RELEASED, OSMT_VALID|OSMT_RELEASED, &tag->OSMT_state)) {
548 simple_lock(&OSMalloc_tag_lock);
549 (void)remque((queue_entry_t)tag);
550 simple_unlock(&OSMalloc_tag_lock);
551 kfree((void*)tag, sizeof(*tag));
552 } else
553 panic("OSMalloc_Tagrele(): refcnt 0\n");
554 }
555 }
556
557 void
558 OSMalloc_Tagfree(
559 OSMallocTag tag)
560 {
561 if (!hw_compare_and_store(OSMT_VALID, OSMT_VALID|OSMT_RELEASED, &tag->OSMT_state))
562 panic("OSMalloc_Tagfree(): bad state 0x%08X\n", tag->OSMT_state);
563
564 if (hw_atomic_sub((uint32_t *)(&tag->OSMT_refcnt), 1) == 0) {
565 simple_lock(&OSMalloc_tag_lock);
566 (void)remque((queue_entry_t)tag);
567 simple_unlock(&OSMalloc_tag_lock);
568 kfree((void*)tag, sizeof(*tag));
569 }
570 }
571
572 void *
573 OSMalloc(
574 uint32_t size,
575 OSMallocTag tag)
576 {
577 void *addr=NULL;
578 kern_return_t kr;
579
580 OSMalloc_Tagref(tag);
581 if ((tag->OSMT_attr & OSMT_PAGEABLE)
582 && (size & ~PAGE_MASK)) {
583
584 if ((kr = kmem_alloc_pageable(kernel_map, (vm_offset_t *)&addr, size)) != KERN_SUCCESS)
585 panic("OSMalloc(): kmem_alloc_pageable() failed 0x%08X\n", kr);
586 } else
587 addr = kalloc((vm_size_t)size);
588
589 return(addr);
590 }
591
592 void *
593 OSMalloc_nowait(
594 uint32_t size,
595 OSMallocTag tag)
596 {
597 void *addr=NULL;
598
599 if (tag->OSMT_attr & OSMT_PAGEABLE)
600 return(NULL);
601
602 OSMalloc_Tagref(tag);
603 /* XXX: use non-blocking kalloc for now */
604 addr = kalloc_noblock((vm_size_t)size);
605 if (addr == NULL)
606 OSMalloc_Tagrele(tag);
607
608 return(addr);
609 }
610
611 void *
612 OSMalloc_noblock(
613 uint32_t size,
614 OSMallocTag tag)
615 {
616 void *addr=NULL;
617
618 if (tag->OSMT_attr & OSMT_PAGEABLE)
619 return(NULL);
620
621 OSMalloc_Tagref(tag);
622 addr = kalloc_noblock((vm_size_t)size);
623 if (addr == NULL)
624 OSMalloc_Tagrele(tag);
625
626 return(addr);
627 }
628
629 void
630 OSFree(
631 void *addr,
632 uint32_t size,
633 OSMallocTag tag)
634 {
635 if ((tag->OSMT_attr & OSMT_PAGEABLE)
636 && (size & ~PAGE_MASK)) {
637 kmem_free(kernel_map, (vm_offset_t)addr, size);
638 } else
639 kfree((void*)addr, size);
640
641 OSMalloc_Tagrele(tag);
642 }