]> git.saurik.com Git - apple/xnu.git/blame_incremental - osfmk/kern/kalloc.c
xnu-792.17.14.tar.gz
[apple/xnu.git] / osfmk / kern / kalloc.c
... / ...
CommitLineData
1/*
2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28/*
29 * @OSF_COPYRIGHT@
30 */
31/*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56/*
57 */
58/*
59 * File: kern/kalloc.c
60 * Author: Avadis Tevanian, Jr.
61 * Date: 1985
62 *
63 * General kernel memory allocator. This allocator is designed
64 * to be used by the kernel to manage dynamic memory fast.
65 */
66
67#include <zone_debug.h>
68
69#include <mach/boolean.h>
70#include <mach/machine/vm_types.h>
71#include <mach/vm_param.h>
72#include <kern/misc_protos.h>
73#include <kern/zalloc.h>
74#include <kern/kalloc.h>
75#include <kern/lock.h>
76#include <vm/vm_kern.h>
77#include <vm/vm_object.h>
78#include <vm/vm_map.h>
79#include <libkern/OSMalloc.h>
80
81#ifdef MACH_BSD
82zone_t kalloc_zone(vm_size_t);
83#endif
84
85vm_map_t kalloc_map;
86vm_size_t kalloc_map_size = 16 * 1024 * 1024;
87vm_size_t kalloc_max;
88vm_size_t kalloc_max_prerounded;
89
90unsigned int kalloc_large_inuse;
91vm_size_t kalloc_large_total;
92vm_size_t kalloc_large_max;
93
94/*
95 * All allocations of size less than kalloc_max are rounded to the
96 * next highest power of 2. This allocator is built on top of
97 * the zone allocator. A zone is created for each potential size
98 * that we are willing to get in small blocks.
99 *
100 * We assume that kalloc_max is not greater than 64K;
101 * thus 16 is a safe array size for k_zone and k_zone_name.
102 *
103 * Note that kalloc_max is somewhat confusingly named.
104 * It represents the first power of two for which no zone exists.
105 * kalloc_max_prerounded is the smallest allocation size, before
106 * rounding, for which no zone exists.
107 */
108
109int first_k_zone = -1;
110struct zone *k_zone[16];
111static const char *k_zone_name[16] = {
112 "kalloc.1", "kalloc.2",
113 "kalloc.4", "kalloc.8",
114 "kalloc.16", "kalloc.32",
115 "kalloc.64", "kalloc.128",
116 "kalloc.256", "kalloc.512",
117 "kalloc.1024", "kalloc.2048",
118 "kalloc.4096", "kalloc.8192",
119 "kalloc.16384", "kalloc.32768"
120};
121
122/*
123 * Max number of elements per zone. zinit rounds things up correctly
124 * Doing things this way permits each zone to have a different maximum size
125 * based on need, rather than just guessing; it also
126 * means its patchable in case you're wrong!
127 */
128unsigned long k_zone_max[16] = {
129 1024, /* 1 Byte */
130 1024, /* 2 Byte */
131 1024, /* 4 Byte */
132 1024, /* 8 Byte */
133 1024, /* 16 Byte */
134 4096, /* 32 Byte */
135 4096, /* 64 Byte */
136 4096, /* 128 Byte */
137 4096, /* 256 Byte */
138 1024, /* 512 Byte */
139 1024, /* 1024 Byte */
140 1024, /* 2048 Byte */
141 1024, /* 4096 Byte */
142 4096, /* 8192 Byte */
143 64, /* 16384 Byte */
144 64, /* 32768 Byte */
145};
146
147/* forward declarations */
148void * kalloc_canblock(
149 vm_size_t size,
150 boolean_t canblock);
151
152
153/* OSMalloc local data declarations */
154static
155queue_head_t OSMalloc_tag_list;
156
157decl_simple_lock_data(static,OSMalloc_tag_lock)
158
159/* OSMalloc forward declarations */
160void OSMalloc_init(void);
161void OSMalloc_Tagref(OSMallocTag tag);
162void OSMalloc_Tagrele(OSMallocTag tag);
163
164/*
165 * Initialize the memory allocator. This should be called only
166 * once on a system wide basis (i.e. first processor to get here
167 * does the initialization).
168 *
169 * This initializes all of the zones.
170 */
171
172void
173kalloc_init(
174 void)
175{
176 kern_return_t retval;
177 vm_offset_t min;
178 vm_size_t size;
179 register int i;
180
181 retval = kmem_suballoc(kernel_map, &min, kalloc_map_size,
182 FALSE, VM_FLAGS_ANYWHERE, &kalloc_map);
183
184 if (retval != KERN_SUCCESS)
185 panic("kalloc_init: kmem_suballoc failed");
186
187 /*
188 * Ensure that zones up to size 8192 bytes exist.
189 * This is desirable because messages are allocated
190 * with kalloc, and messages up through size 8192 are common.
191 */
192
193 if (PAGE_SIZE < 16*1024)
194 kalloc_max = 16*1024;
195 else
196 kalloc_max = PAGE_SIZE;
197 kalloc_max_prerounded = kalloc_max / 2 + 1;
198
199 /*
200 * Allocate a zone for each size we are going to handle.
201 * We specify non-paged memory.
202 */
203 for (i = 0, size = 1; size < kalloc_max; i++, size <<= 1) {
204 if (size < KALLOC_MINSIZE) {
205 k_zone[i] = 0;
206 continue;
207 }
208 if (size == KALLOC_MINSIZE) {
209 first_k_zone = i;
210 }
211 k_zone[i] = zinit(size, k_zone_max[i] * size, size,
212 k_zone_name[i]);
213 }
214 OSMalloc_init();
215}
216
217void *
218kalloc_canblock(
219 vm_size_t size,
220 boolean_t canblock)
221{
222 register int zindex;
223 register vm_size_t allocsize;
224
225 /*
226 * If size is too large for a zone, then use kmem_alloc.
227 * (We use kmem_alloc instead of kmem_alloc_wired so that
228 * krealloc can use kmem_realloc.)
229 */
230
231 if (size >= kalloc_max_prerounded) {
232 void *addr;
233
234 /* kmem_alloc could block so we return if noblock */
235 if (!canblock) {
236 return(0);
237 }
238 if (kmem_alloc(kalloc_map, (vm_offset_t *)&addr, size) != KERN_SUCCESS)
239 addr = 0;
240
241 if (addr) {
242 kalloc_large_inuse++;
243 kalloc_large_total += size;
244
245 if (kalloc_large_total > kalloc_large_max)
246 kalloc_large_max = kalloc_large_total;
247 }
248 return(addr);
249 }
250
251 /* compute the size of the block that we will actually allocate */
252
253 allocsize = KALLOC_MINSIZE;
254 zindex = first_k_zone;
255 while (allocsize < size) {
256 allocsize <<= 1;
257 zindex++;
258 }
259
260 /* allocate from the appropriate zone */
261 assert(allocsize < kalloc_max);
262 return(zalloc_canblock(k_zone[zindex], canblock));
263}
264
265void *
266kalloc(
267 vm_size_t size)
268{
269 return( kalloc_canblock(size, TRUE) );
270}
271
272void *
273kalloc_noblock(
274 vm_size_t size)
275{
276 return( kalloc_canblock(size, FALSE) );
277}
278
279
280void
281krealloc(
282 void **addrp,
283 vm_size_t old_size,
284 vm_size_t new_size,
285 simple_lock_t lock)
286{
287 register int zindex;
288 register vm_size_t allocsize;
289 void *naddr;
290
291 /* can only be used for increasing allocation size */
292
293 assert(new_size > old_size);
294
295 /* if old_size is zero, then we are simply allocating */
296
297 if (old_size == 0) {
298 simple_unlock(lock);
299 naddr = kalloc(new_size);
300 simple_lock(lock);
301 *addrp = naddr;
302 return;
303 }
304
305 /* if old block was kmem_alloc'd, then use kmem_realloc if necessary */
306
307 if (old_size >= kalloc_max_prerounded) {
308 old_size = round_page(old_size);
309 new_size = round_page(new_size);
310 if (new_size > old_size) {
311
312 if (KERN_SUCCESS != kmem_realloc(kalloc_map,
313 (vm_offset_t)*addrp, old_size,
314 (vm_offset_t *)&naddr, new_size)) {
315 panic("krealloc: kmem_realloc");
316 naddr = 0;
317 }
318
319 simple_lock(lock);
320 *addrp = (void *) naddr;
321
322 /* kmem_realloc() doesn't free old page range. */
323 kmem_free(kalloc_map, (vm_offset_t)*addrp, old_size);
324
325 kalloc_large_total += (new_size - old_size);
326
327 if (kalloc_large_total > kalloc_large_max)
328 kalloc_large_max = kalloc_large_total;
329
330 }
331 return;
332 }
333
334 /* compute the size of the block that we actually allocated */
335
336 allocsize = KALLOC_MINSIZE;
337 zindex = first_k_zone;
338 while (allocsize < old_size) {
339 allocsize <<= 1;
340 zindex++;
341 }
342
343 /* if new size fits in old block, then return */
344
345 if (new_size <= allocsize) {
346 return;
347 }
348
349 /* if new size does not fit in zone, kmem_alloc it, else zalloc it */
350
351 simple_unlock(lock);
352 if (new_size >= kalloc_max_prerounded) {
353 if (KERN_SUCCESS != kmem_alloc(kalloc_map,
354 (vm_offset_t *)&naddr, new_size)) {
355 panic("krealloc: kmem_alloc");
356 simple_lock(lock);
357 *addrp = NULL;
358 return;
359 }
360 kalloc_large_inuse++;
361 kalloc_large_total += new_size;
362
363 if (kalloc_large_total > kalloc_large_max)
364 kalloc_large_max = kalloc_large_total;
365 } else {
366 register int new_zindex;
367
368 allocsize <<= 1;
369 new_zindex = zindex + 1;
370 while (allocsize < new_size) {
371 allocsize <<= 1;
372 new_zindex++;
373 }
374 naddr = zalloc(k_zone[new_zindex]);
375 }
376 simple_lock(lock);
377
378 /* copy existing data */
379
380 bcopy((const char *)*addrp, (char *)naddr, old_size);
381
382 /* free old block, and return */
383
384 zfree(k_zone[zindex], *addrp);
385
386 /* set up new address */
387
388 *addrp = (void *) naddr;
389}
390
391
392void *
393kget(
394 vm_size_t size)
395{
396 register int zindex;
397 register vm_size_t allocsize;
398
399 /* size must not be too large for a zone */
400
401 if (size >= kalloc_max_prerounded) {
402 /* This will never work, so we might as well panic */
403 panic("kget");
404 }
405
406 /* compute the size of the block that we will actually allocate */
407
408 allocsize = KALLOC_MINSIZE;
409 zindex = first_k_zone;
410 while (allocsize < size) {
411 allocsize <<= 1;
412 zindex++;
413 }
414
415 /* allocate from the appropriate zone */
416
417 assert(allocsize < kalloc_max);
418 return(zget(k_zone[zindex]));
419}
420
421void
422kfree(
423 void *data,
424 vm_size_t size)
425{
426 register int zindex;
427 register vm_size_t freesize;
428
429 /* if size was too large for a zone, then use kmem_free */
430
431 if (size >= kalloc_max_prerounded) {
432 kmem_free(kalloc_map, (vm_offset_t)data, size);
433
434 kalloc_large_total -= size;
435 kalloc_large_inuse--;
436
437 return;
438 }
439
440 /* compute the size of the block that we actually allocated from */
441
442 freesize = KALLOC_MINSIZE;
443 zindex = first_k_zone;
444 while (freesize < size) {
445 freesize <<= 1;
446 zindex++;
447 }
448
449 /* free to the appropriate zone */
450
451 assert(freesize < kalloc_max);
452 zfree(k_zone[zindex], data);
453}
454
455#ifdef MACH_BSD
456zone_t
457kalloc_zone(
458 vm_size_t size)
459{
460 register int zindex = 0;
461 register vm_size_t allocsize;
462
463 /* compute the size of the block that we will actually allocate */
464
465 allocsize = size;
466 if (size <= kalloc_max) {
467 allocsize = KALLOC_MINSIZE;
468 zindex = first_k_zone;
469 while (allocsize < size) {
470 allocsize <<= 1;
471 zindex++;
472 }
473 return (k_zone[zindex]);
474 }
475 return (ZONE_NULL);
476}
477#endif
478
479
480void
481kalloc_fake_zone_info(int *count, vm_size_t *cur_size, vm_size_t *max_size, vm_size_t *elem_size,
482 vm_size_t *alloc_size, int *collectable, int *exhaustable)
483{
484 *count = kalloc_large_inuse;
485 *cur_size = kalloc_large_total;
486 *max_size = kalloc_large_max;
487 *elem_size = kalloc_large_total / kalloc_large_inuse;
488 *alloc_size = kalloc_large_total / kalloc_large_inuse;
489 *collectable = 0;
490 *exhaustable = 0;
491}
492
493
494void
495OSMalloc_init(
496 void)
497{
498 queue_init(&OSMalloc_tag_list);
499 simple_lock_init(&OSMalloc_tag_lock, 0);
500}
501
502OSMallocTag
503OSMalloc_Tagalloc(
504 const char *str,
505 uint32_t flags)
506{
507 OSMallocTag OSMTag;
508
509 OSMTag = (OSMallocTag)kalloc(sizeof(*OSMTag));
510
511 bzero((void *)OSMTag, sizeof(*OSMTag));
512
513 if (flags & OSMT_PAGEABLE)
514 OSMTag->OSMT_attr = OSMT_ATTR_PAGEABLE;
515
516 OSMTag->OSMT_refcnt = 1;
517
518 strncpy(OSMTag->OSMT_name, str, OSMT_MAX_NAME);
519
520 simple_lock(&OSMalloc_tag_lock);
521 enqueue_tail(&OSMalloc_tag_list, (queue_entry_t)OSMTag);
522 simple_unlock(&OSMalloc_tag_lock);
523 OSMTag->OSMT_state = OSMT_VALID;
524 return(OSMTag);
525}
526
527void
528OSMalloc_Tagref(
529 OSMallocTag tag)
530{
531 if (!((tag->OSMT_state & OSMT_VALID_MASK) == OSMT_VALID))
532 panic("OSMalloc_Tagref(): bad state 0x%08X\n",tag->OSMT_state);
533
534 (void)hw_atomic_add((uint32_t *)(&tag->OSMT_refcnt), 1);
535}
536
537void
538OSMalloc_Tagrele(
539 OSMallocTag tag)
540{
541 if (!((tag->OSMT_state & OSMT_VALID_MASK) == OSMT_VALID))
542 panic("OSMalloc_Tagref(): bad state 0x%08X\n",tag->OSMT_state);
543
544 if (hw_atomic_sub((uint32_t *)(&tag->OSMT_refcnt), 1) == 0) {
545 if (hw_compare_and_store(OSMT_VALID|OSMT_RELEASED, OSMT_VALID|OSMT_RELEASED, &tag->OSMT_state)) {
546 simple_lock(&OSMalloc_tag_lock);
547 (void)remque((queue_entry_t)tag);
548 simple_unlock(&OSMalloc_tag_lock);
549 kfree((void*)tag, sizeof(*tag));
550 } else
551 panic("OSMalloc_Tagrele(): refcnt 0\n");
552 }
553}
554
555void
556OSMalloc_Tagfree(
557 OSMallocTag tag)
558{
559 if (!hw_compare_and_store(OSMT_VALID, OSMT_VALID|OSMT_RELEASED, &tag->OSMT_state))
560 panic("OSMalloc_Tagfree(): bad state 0x%08X\n", tag->OSMT_state);
561
562 if (hw_atomic_sub((uint32_t *)(&tag->OSMT_refcnt), 1) == 0) {
563 simple_lock(&OSMalloc_tag_lock);
564 (void)remque((queue_entry_t)tag);
565 simple_unlock(&OSMalloc_tag_lock);
566 kfree((void*)tag, sizeof(*tag));
567 }
568}
569
570void *
571OSMalloc(
572 uint32_t size,
573 OSMallocTag tag)
574{
575 void *addr=NULL;
576 kern_return_t kr;
577
578 OSMalloc_Tagref(tag);
579 if ((tag->OSMT_attr & OSMT_PAGEABLE)
580 && (size & ~PAGE_MASK)) {
581
582 if ((kr = kmem_alloc_pageable(kernel_map, (vm_offset_t *)&addr, size)) != KERN_SUCCESS)
583 panic("OSMalloc(): kmem_alloc_pageable() failed 0x%08X\n", kr);
584 } else
585 addr = kalloc((vm_size_t)size);
586
587 return(addr);
588}
589
590void *
591OSMalloc_nowait(
592 uint32_t size,
593 OSMallocTag tag)
594{
595 void *addr=NULL;
596
597 if (tag->OSMT_attr & OSMT_PAGEABLE)
598 return(NULL);
599
600 OSMalloc_Tagref(tag);
601 /* XXX: use non-blocking kalloc for now */
602 addr = kalloc_noblock((vm_size_t)size);
603 if (addr == NULL)
604 OSMalloc_Tagrele(tag);
605
606 return(addr);
607}
608
609void *
610OSMalloc_noblock(
611 uint32_t size,
612 OSMallocTag tag)
613{
614 void *addr=NULL;
615
616 if (tag->OSMT_attr & OSMT_PAGEABLE)
617 return(NULL);
618
619 OSMalloc_Tagref(tag);
620 addr = kalloc_noblock((vm_size_t)size);
621 if (addr == NULL)
622 OSMalloc_Tagrele(tag);
623
624 return(addr);
625}
626
627void
628OSFree(
629 void *addr,
630 uint32_t size,
631 OSMallocTag tag)
632{
633 if ((tag->OSMT_attr & OSMT_PAGEABLE)
634 && (size & ~PAGE_MASK)) {
635 kmem_free(kernel_map, (vm_offset_t)addr, size);
636 } else
637 kfree((void*)addr, size);
638
639 OSMalloc_Tagrele(tag);
640}