Commit | Line | Data |
---|---|---|
1c79356b | 1 | /* |
2d21ac55 | 2 | * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved. |
1c79356b | 3 | * |
2d21ac55 | 4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
1c79356b | 5 | * |
2d21ac55 A |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
8f6c56a5 | 14 | * |
2d21ac55 A |
15 | * Please obtain a copy of the License at |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
8f6c56a5 A |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
2d21ac55 A |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
8f6c56a5 | 25 | * |
2d21ac55 | 26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
1c79356b A |
27 | */ |
28 | /* | |
29 | * @OSF_COPYRIGHT@ | |
30 | */ | |
1c79356b A |
31 | /* |
32 | * Mach Operating System | |
33 | * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University | |
34 | * All Rights Reserved. | |
35 | * | |
36 | * Permission to use, copy, modify and distribute this software and its | |
37 | * documentation is hereby granted, provided that both the copyright | |
38 | * notice and this permission notice appear in all copies of the | |
39 | * software, derivative works or modified versions, and any portions | |
40 | * thereof, and that both notices appear in supporting documentation. | |
41 | * | |
42 | * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" | |
43 | * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR | |
44 | * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. | |
45 | * | |
46 | * Carnegie Mellon requests users of this software to return to | |
47 | * | |
48 | * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU | |
49 | * School of Computer Science | |
50 | * Carnegie Mellon University | |
51 | * Pittsburgh PA 15213-3890 | |
52 | * | |
53 | * any improvements or extensions that they make and grant Carnegie Mellon | |
54 | * the rights to redistribute these changes. | |
55 | */ | |
56 | /* | |
57 | */ | |
58 | /* | |
59 | * File: kern/kalloc.c | |
60 | * Author: Avadis Tevanian, Jr. | |
61 | * Date: 1985 | |
62 | * | |
63 | * General kernel memory allocator. This allocator is designed | |
64 | * to be used by the kernel to manage dynamic memory fast. | |
65 | */ | |
66 | ||
67 | #include <zone_debug.h> | |
68 | ||
69 | #include <mach/boolean.h> | |
70 | #include <mach/machine/vm_types.h> | |
71 | #include <mach/vm_param.h> | |
72 | #include <kern/misc_protos.h> | |
73 | #include <kern/zalloc.h> | |
74 | #include <kern/kalloc.h> | |
75 | #include <kern/lock.h> | |
76 | #include <vm/vm_kern.h> | |
77 | #include <vm/vm_object.h> | |
78 | #include <vm/vm_map.h> | |
91447636 | 79 | #include <libkern/OSMalloc.h> |
1c79356b A |
80 | |
81 | #ifdef MACH_BSD | |
82 | zone_t kalloc_zone(vm_size_t); | |
83 | #endif | |
84 | ||
2d21ac55 A |
85 | #define KALLOC_MAP_SIZE_MIN (16 * 1024 * 1024) |
86 | #define KALLOC_MAP_SIZE_MAX (128 * 1024 * 1024) | |
1c79356b | 87 | vm_map_t kalloc_map; |
1c79356b A |
88 | vm_size_t kalloc_max; |
89 | vm_size_t kalloc_max_prerounded; | |
0c530ab8 | 90 | vm_size_t kalloc_kernmap_size; /* size of kallocs that can come from kernel map */ |
1c79356b A |
91 | |
92 | unsigned int kalloc_large_inuse; | |
93 | vm_size_t kalloc_large_total; | |
94 | vm_size_t kalloc_large_max; | |
6d2010ae A |
95 | vm_size_t kalloc_largest_allocated = 0; |
96 | uint64_t kalloc_large_sum; | |
97 | ||
98 | int kalloc_fake_zone_index = -1; /* index of our fake zone in statistics arrays */ | |
b0d623f7 A |
99 | |
100 | vm_offset_t kalloc_map_min; | |
101 | vm_offset_t kalloc_map_max; | |
1c79356b | 102 | |
6d2010ae A |
103 | #ifdef MUTEX_ZONE |
104 | /* | |
105 | * Diagnostic code to track mutexes separately rather than via the 2^ zones | |
106 | */ | |
107 | zone_t lck_mtx_zone; | |
108 | #endif | |
109 | ||
110 | static void | |
111 | KALLOC_ZINFO_SALLOC(vm_size_t bytes) | |
112 | { | |
113 | thread_t thr = current_thread(); | |
114 | task_t task; | |
115 | zinfo_usage_t zinfo; | |
116 | ||
117 | thr->tkm_shared.alloc += bytes; | |
118 | if (kalloc_fake_zone_index != -1 && | |
119 | (task = thr->task) != NULL && (zinfo = task->tkm_zinfo) != NULL) | |
120 | zinfo[kalloc_fake_zone_index].alloc += bytes; | |
121 | } | |
122 | ||
123 | static void | |
124 | KALLOC_ZINFO_SFREE(vm_size_t bytes) | |
125 | { | |
126 | thread_t thr = current_thread(); | |
127 | task_t task; | |
128 | zinfo_usage_t zinfo; | |
129 | ||
130 | thr->tkm_shared.free += bytes; | |
131 | if (kalloc_fake_zone_index != -1 && | |
132 | (task = thr->task) != NULL && (zinfo = task->tkm_zinfo) != NULL) | |
133 | zinfo[kalloc_fake_zone_index].free += bytes; | |
134 | } | |
135 | ||
1c79356b A |
136 | /* |
137 | * All allocations of size less than kalloc_max are rounded to the | |
138 | * next highest power of 2. This allocator is built on top of | |
139 | * the zone allocator. A zone is created for each potential size | |
140 | * that we are willing to get in small blocks. | |
141 | * | |
142 | * We assume that kalloc_max is not greater than 64K; | |
143 | * thus 16 is a safe array size for k_zone and k_zone_name. | |
144 | * | |
145 | * Note that kalloc_max is somewhat confusingly named. | |
146 | * It represents the first power of two for which no zone exists. | |
147 | * kalloc_max_prerounded is the smallest allocation size, before | |
148 | * rounding, for which no zone exists. | |
0c530ab8 A |
149 | * Also if the allocation size is more than kalloc_kernmap_size |
150 | * then allocate from kernel map rather than kalloc_map. | |
1c79356b A |
151 | */ |
152 | ||
153 | int first_k_zone = -1; | |
154 | struct zone *k_zone[16]; | |
91447636 | 155 | static const char *k_zone_name[16] = { |
1c79356b A |
156 | "kalloc.1", "kalloc.2", |
157 | "kalloc.4", "kalloc.8", | |
158 | "kalloc.16", "kalloc.32", | |
159 | "kalloc.64", "kalloc.128", | |
160 | "kalloc.256", "kalloc.512", | |
161 | "kalloc.1024", "kalloc.2048", | |
162 | "kalloc.4096", "kalloc.8192", | |
163 | "kalloc.16384", "kalloc.32768" | |
164 | }; | |
165 | ||
166 | /* | |
167 | * Max number of elements per zone. zinit rounds things up correctly | |
168 | * Doing things this way permits each zone to have a different maximum size | |
169 | * based on need, rather than just guessing; it also | |
170 | * means its patchable in case you're wrong! | |
171 | */ | |
172 | unsigned long k_zone_max[16] = { | |
173 | 1024, /* 1 Byte */ | |
174 | 1024, /* 2 Byte */ | |
175 | 1024, /* 4 Byte */ | |
176 | 1024, /* 8 Byte */ | |
177 | 1024, /* 16 Byte */ | |
178 | 4096, /* 32 Byte */ | |
179 | 4096, /* 64 Byte */ | |
180 | 4096, /* 128 Byte */ | |
181 | 4096, /* 256 Byte */ | |
182 | 1024, /* 512 Byte */ | |
183 | 1024, /* 1024 Byte */ | |
184 | 1024, /* 2048 Byte */ | |
185 | 1024, /* 4096 Byte */ | |
186 | 4096, /* 8192 Byte */ | |
187 | 64, /* 16384 Byte */ | |
188 | 64, /* 32768 Byte */ | |
189 | }; | |
190 | ||
91447636 A |
191 | /* forward declarations */ |
192 | void * kalloc_canblock( | |
193 | vm_size_t size, | |
194 | boolean_t canblock); | |
195 | ||
196 | ||
6d2010ae A |
197 | lck_grp_t *kalloc_lck_grp; |
198 | lck_mtx_t kalloc_lock; | |
199 | ||
200 | #define kalloc_spin_lock() lck_mtx_lock_spin(&kalloc_lock) | |
201 | #define kalloc_unlock() lck_mtx_unlock(&kalloc_lock) | |
202 | ||
203 | ||
91447636 A |
204 | /* OSMalloc local data declarations */ |
205 | static | |
206 | queue_head_t OSMalloc_tag_list; | |
207 | ||
6d2010ae A |
208 | lck_grp_t *OSMalloc_tag_lck_grp; |
209 | lck_mtx_t OSMalloc_tag_lock; | |
210 | ||
211 | #define OSMalloc_tag_spin_lock() lck_mtx_lock_spin(&OSMalloc_tag_lock) | |
212 | #define OSMalloc_tag_unlock() lck_mtx_unlock(&OSMalloc_tag_lock) | |
213 | ||
91447636 A |
214 | |
215 | /* OSMalloc forward declarations */ | |
216 | void OSMalloc_init(void); | |
217 | void OSMalloc_Tagref(OSMallocTag tag); | |
218 | void OSMalloc_Tagrele(OSMallocTag tag); | |
219 | ||
1c79356b A |
220 | /* |
221 | * Initialize the memory allocator. This should be called only | |
222 | * once on a system wide basis (i.e. first processor to get here | |
223 | * does the initialization). | |
224 | * | |
225 | * This initializes all of the zones. | |
226 | */ | |
227 | ||
228 | void | |
229 | kalloc_init( | |
230 | void) | |
231 | { | |
232 | kern_return_t retval; | |
233 | vm_offset_t min; | |
2d21ac55 | 234 | vm_size_t size, kalloc_map_size; |
1c79356b A |
235 | register int i; |
236 | ||
2d21ac55 A |
237 | /* |
238 | * Scale the kalloc_map_size to physical memory size: stay below | |
b0d623f7 | 239 | * 1/8th the total zone map size, or 128 MB (for a 32-bit kernel). |
2d21ac55 | 240 | */ |
b0d623f7 A |
241 | kalloc_map_size = (vm_size_t)(sane_size >> 5); |
242 | #if !__LP64__ | |
2d21ac55 A |
243 | if (kalloc_map_size > KALLOC_MAP_SIZE_MAX) |
244 | kalloc_map_size = KALLOC_MAP_SIZE_MAX; | |
b0d623f7 | 245 | #endif /* !__LP64__ */ |
2d21ac55 A |
246 | if (kalloc_map_size < KALLOC_MAP_SIZE_MIN) |
247 | kalloc_map_size = KALLOC_MAP_SIZE_MIN; | |
248 | ||
1c79356b | 249 | retval = kmem_suballoc(kernel_map, &min, kalloc_map_size, |
b0d623f7 A |
250 | FALSE, VM_FLAGS_ANYWHERE | VM_FLAGS_PERMANENT, |
251 | &kalloc_map); | |
91447636 | 252 | |
1c79356b A |
253 | if (retval != KERN_SUCCESS) |
254 | panic("kalloc_init: kmem_suballoc failed"); | |
255 | ||
b0d623f7 A |
256 | kalloc_map_min = min; |
257 | kalloc_map_max = min + kalloc_map_size - 1; | |
258 | ||
1c79356b A |
259 | /* |
260 | * Ensure that zones up to size 8192 bytes exist. | |
261 | * This is desirable because messages are allocated | |
262 | * with kalloc, and messages up through size 8192 are common. | |
263 | */ | |
264 | ||
265 | if (PAGE_SIZE < 16*1024) | |
266 | kalloc_max = 16*1024; | |
267 | else | |
268 | kalloc_max = PAGE_SIZE; | |
269 | kalloc_max_prerounded = kalloc_max / 2 + 1; | |
0c530ab8 A |
270 | /* size it to be more than 16 times kalloc_max (256k) for allocations from kernel map */ |
271 | kalloc_kernmap_size = (kalloc_max * 16) + 1; | |
b0d623f7 | 272 | kalloc_largest_allocated = kalloc_kernmap_size; |
1c79356b A |
273 | |
274 | /* | |
275 | * Allocate a zone for each size we are going to handle. | |
6d2010ae A |
276 | * We specify non-paged memory. Don't charge the caller |
277 | * for the allocation, as we aren't sure how the memory | |
278 | * will be handled. | |
1c79356b A |
279 | */ |
280 | for (i = 0, size = 1; size < kalloc_max; i++, size <<= 1) { | |
281 | if (size < KALLOC_MINSIZE) { | |
2d21ac55 | 282 | k_zone[i] = NULL; |
1c79356b A |
283 | continue; |
284 | } | |
285 | if (size == KALLOC_MINSIZE) { | |
286 | first_k_zone = i; | |
287 | } | |
288 | k_zone[i] = zinit(size, k_zone_max[i] * size, size, | |
289 | k_zone_name[i]); | |
6d2010ae | 290 | zone_change(k_zone[i], Z_CALLERACCT, FALSE); |
1c79356b | 291 | } |
6d2010ae A |
292 | kalloc_lck_grp = lck_grp_alloc_init("kalloc.large", LCK_GRP_ATTR_NULL); |
293 | lck_mtx_init(&kalloc_lock, kalloc_lck_grp, LCK_ATTR_NULL); | |
91447636 | 294 | OSMalloc_init(); |
6d2010ae A |
295 | #ifdef MUTEX_ZONE |
296 | lck_mtx_zone = zinit(sizeof(struct _lck_mtx_), 1024*256, 4096, "lck_mtx"); | |
297 | #endif | |
298 | ||
1c79356b A |
299 | } |
300 | ||
91447636 | 301 | void * |
1c79356b A |
302 | kalloc_canblock( |
303 | vm_size_t size, | |
304 | boolean_t canblock) | |
305 | { | |
306 | register int zindex; | |
307 | register vm_size_t allocsize; | |
0c530ab8 | 308 | vm_map_t alloc_map = VM_MAP_NULL; |
1c79356b A |
309 | |
310 | /* | |
311 | * If size is too large for a zone, then use kmem_alloc. | |
b0d623f7 | 312 | * (We use kmem_alloc instead of kmem_alloc_kobject so that |
1c79356b A |
313 | * krealloc can use kmem_realloc.) |
314 | */ | |
315 | ||
316 | if (size >= kalloc_max_prerounded) { | |
91447636 | 317 | void *addr; |
1c79356b A |
318 | |
319 | /* kmem_alloc could block so we return if noblock */ | |
320 | if (!canblock) { | |
6d2010ae | 321 | return(NULL); |
1c79356b | 322 | } |
0c530ab8 | 323 | |
6d2010ae | 324 | if (size >= kalloc_kernmap_size) |
2d21ac55 | 325 | alloc_map = kernel_map; |
6d2010ae | 326 | else |
0c530ab8 A |
327 | alloc_map = kalloc_map; |
328 | ||
b0d623f7 A |
329 | if (kmem_alloc(alloc_map, (vm_offset_t *)&addr, size) != KERN_SUCCESS) { |
330 | if (alloc_map != kernel_map) { | |
331 | if (kmem_alloc(kernel_map, (vm_offset_t *)&addr, size) != KERN_SUCCESS) | |
332 | addr = NULL; | |
6d2010ae | 333 | } |
b0d623f7 A |
334 | else |
335 | addr = NULL; | |
336 | } | |
1c79356b | 337 | |
b0d623f7 | 338 | if (addr != NULL) { |
6d2010ae A |
339 | kalloc_spin_lock(); |
340 | /* | |
341 | * Thread-safe version of the workaround for 4740071 | |
342 | * (a double FREE()) | |
343 | */ | |
344 | if (size > kalloc_largest_allocated) | |
345 | kalloc_largest_allocated = size; | |
346 | ||
1c79356b A |
347 | kalloc_large_inuse++; |
348 | kalloc_large_total += size; | |
6d2010ae | 349 | kalloc_large_sum += size; |
1c79356b A |
350 | |
351 | if (kalloc_large_total > kalloc_large_max) | |
352 | kalloc_large_max = kalloc_large_total; | |
6d2010ae A |
353 | |
354 | kalloc_unlock(); | |
355 | ||
356 | KALLOC_ZINFO_SALLOC(size); | |
1c79356b A |
357 | } |
358 | return(addr); | |
359 | } | |
360 | ||
361 | /* compute the size of the block that we will actually allocate */ | |
362 | ||
363 | allocsize = KALLOC_MINSIZE; | |
364 | zindex = first_k_zone; | |
365 | while (allocsize < size) { | |
366 | allocsize <<= 1; | |
367 | zindex++; | |
368 | } | |
369 | ||
370 | /* allocate from the appropriate zone */ | |
1c79356b A |
371 | assert(allocsize < kalloc_max); |
372 | return(zalloc_canblock(k_zone[zindex], canblock)); | |
373 | } | |
374 | ||
91447636 | 375 | void * |
1c79356b A |
376 | kalloc( |
377 | vm_size_t size) | |
378 | { | |
91447636 | 379 | return( kalloc_canblock(size, TRUE) ); |
1c79356b A |
380 | } |
381 | ||
91447636 | 382 | void * |
1c79356b A |
383 | kalloc_noblock( |
384 | vm_size_t size) | |
385 | { | |
91447636 | 386 | return( kalloc_canblock(size, FALSE) ); |
1c79356b A |
387 | } |
388 | ||
389 | ||
390 | void | |
391 | krealloc( | |
91447636 | 392 | void **addrp, |
1c79356b A |
393 | vm_size_t old_size, |
394 | vm_size_t new_size, | |
395 | simple_lock_t lock) | |
396 | { | |
397 | register int zindex; | |
398 | register vm_size_t allocsize; | |
91447636 | 399 | void *naddr; |
0c530ab8 | 400 | vm_map_t alloc_map = VM_MAP_NULL; |
1c79356b A |
401 | |
402 | /* can only be used for increasing allocation size */ | |
403 | ||
404 | assert(new_size > old_size); | |
405 | ||
406 | /* if old_size is zero, then we are simply allocating */ | |
407 | ||
408 | if (old_size == 0) { | |
409 | simple_unlock(lock); | |
410 | naddr = kalloc(new_size); | |
411 | simple_lock(lock); | |
412 | *addrp = naddr; | |
413 | return; | |
414 | } | |
415 | ||
416 | /* if old block was kmem_alloc'd, then use kmem_realloc if necessary */ | |
417 | ||
418 | if (old_size >= kalloc_max_prerounded) { | |
0c530ab8 A |
419 | if (old_size >= kalloc_kernmap_size) |
420 | alloc_map = kernel_map; | |
421 | else | |
422 | alloc_map = kalloc_map; | |
423 | ||
91447636 A |
424 | old_size = round_page(old_size); |
425 | new_size = round_page(new_size); | |
1c79356b A |
426 | if (new_size > old_size) { |
427 | ||
0c530ab8 | 428 | if (KERN_SUCCESS != kmem_realloc(alloc_map, |
91447636 | 429 | (vm_offset_t)*addrp, old_size, |
2d21ac55 | 430 | (vm_offset_t *)&naddr, new_size)) |
1c79356b | 431 | panic("krealloc: kmem_realloc"); |
1c79356b A |
432 | |
433 | simple_lock(lock); | |
91447636 | 434 | *addrp = (void *) naddr; |
1c79356b A |
435 | |
436 | /* kmem_realloc() doesn't free old page range. */ | |
0c530ab8 | 437 | kmem_free(alloc_map, (vm_offset_t)*addrp, old_size); |
1c79356b A |
438 | |
439 | kalloc_large_total += (new_size - old_size); | |
6d2010ae | 440 | kalloc_large_sum += (new_size - old_size); |
1c79356b A |
441 | |
442 | if (kalloc_large_total > kalloc_large_max) | |
91447636 A |
443 | kalloc_large_max = kalloc_large_total; |
444 | ||
1c79356b A |
445 | } |
446 | return; | |
447 | } | |
448 | ||
449 | /* compute the size of the block that we actually allocated */ | |
450 | ||
451 | allocsize = KALLOC_MINSIZE; | |
452 | zindex = first_k_zone; | |
453 | while (allocsize < old_size) { | |
454 | allocsize <<= 1; | |
455 | zindex++; | |
456 | } | |
457 | ||
458 | /* if new size fits in old block, then return */ | |
459 | ||
460 | if (new_size <= allocsize) { | |
461 | return; | |
462 | } | |
463 | ||
464 | /* if new size does not fit in zone, kmem_alloc it, else zalloc it */ | |
465 | ||
466 | simple_unlock(lock); | |
467 | if (new_size >= kalloc_max_prerounded) { | |
0c530ab8 A |
468 | if (new_size >= kalloc_kernmap_size) |
469 | alloc_map = kernel_map; | |
470 | else | |
471 | alloc_map = kalloc_map; | |
472 | if (KERN_SUCCESS != kmem_alloc(alloc_map, | |
91447636 | 473 | (vm_offset_t *)&naddr, new_size)) { |
1c79356b A |
474 | panic("krealloc: kmem_alloc"); |
475 | simple_lock(lock); | |
91447636 | 476 | *addrp = NULL; |
1c79356b A |
477 | return; |
478 | } | |
6d2010ae A |
479 | kalloc_spin_lock(); |
480 | ||
1c79356b | 481 | kalloc_large_inuse++; |
6d2010ae | 482 | kalloc_large_sum += new_size; |
1c79356b A |
483 | kalloc_large_total += new_size; |
484 | ||
485 | if (kalloc_large_total > kalloc_large_max) | |
486 | kalloc_large_max = kalloc_large_total; | |
6d2010ae A |
487 | |
488 | kalloc_unlock(); | |
489 | ||
490 | KALLOC_ZINFO_SALLOC(new_size); | |
1c79356b A |
491 | } else { |
492 | register int new_zindex; | |
493 | ||
494 | allocsize <<= 1; | |
495 | new_zindex = zindex + 1; | |
496 | while (allocsize < new_size) { | |
497 | allocsize <<= 1; | |
498 | new_zindex++; | |
499 | } | |
500 | naddr = zalloc(k_zone[new_zindex]); | |
501 | } | |
502 | simple_lock(lock); | |
503 | ||
504 | /* copy existing data */ | |
505 | ||
506 | bcopy((const char *)*addrp, (char *)naddr, old_size); | |
507 | ||
508 | /* free old block, and return */ | |
509 | ||
510 | zfree(k_zone[zindex], *addrp); | |
511 | ||
512 | /* set up new address */ | |
513 | ||
91447636 | 514 | *addrp = (void *) naddr; |
1c79356b A |
515 | } |
516 | ||
517 | ||
91447636 | 518 | void * |
1c79356b A |
519 | kget( |
520 | vm_size_t size) | |
521 | { | |
522 | register int zindex; | |
523 | register vm_size_t allocsize; | |
524 | ||
525 | /* size must not be too large for a zone */ | |
526 | ||
527 | if (size >= kalloc_max_prerounded) { | |
528 | /* This will never work, so we might as well panic */ | |
529 | panic("kget"); | |
530 | } | |
531 | ||
532 | /* compute the size of the block that we will actually allocate */ | |
533 | ||
534 | allocsize = KALLOC_MINSIZE; | |
535 | zindex = first_k_zone; | |
536 | while (allocsize < size) { | |
537 | allocsize <<= 1; | |
538 | zindex++; | |
539 | } | |
540 | ||
541 | /* allocate from the appropriate zone */ | |
542 | ||
543 | assert(allocsize < kalloc_max); | |
544 | return(zget(k_zone[zindex])); | |
545 | } | |
546 | ||
b0d623f7 A |
547 | volatile SInt32 kfree_nop_count = 0; |
548 | ||
1c79356b A |
549 | void |
550 | kfree( | |
91447636 | 551 | void *data, |
1c79356b A |
552 | vm_size_t size) |
553 | { | |
554 | register int zindex; | |
555 | register vm_size_t freesize; | |
b0d623f7 | 556 | vm_map_t alloc_map = kernel_map; |
1c79356b A |
557 | |
558 | /* if size was too large for a zone, then use kmem_free */ | |
559 | ||
560 | if (size >= kalloc_max_prerounded) { | |
b0d623f7 A |
561 | if ((((vm_offset_t) data) >= kalloc_map_min) && (((vm_offset_t) data) <= kalloc_map_max)) |
562 | alloc_map = kalloc_map; | |
563 | if (size > kalloc_largest_allocated) { | |
0c530ab8 A |
564 | /* |
565 | * work around double FREEs of small MALLOCs | |
2d21ac55 | 566 | * this use to end up being a nop |
0c530ab8 A |
567 | * since the pointer being freed from an |
568 | * alloc backed by the zalloc world could | |
569 | * never show up in the kalloc_map... however, | |
570 | * the kernel_map is a different issue... since it | |
571 | * was released back into the zalloc pool, a pointer | |
572 | * would have gotten written over the 'size' that | |
573 | * the MALLOC was retaining in the first 4 bytes of | |
574 | * the underlying allocation... that pointer ends up | |
575 | * looking like a really big size on the 2nd FREE and | |
576 | * pushes the kfree into the kernel_map... we | |
577 | * end up removing a ton of virutal space before we panic | |
578 | * this check causes us to ignore the kfree for a size | |
579 | * that must be 'bogus'... note that it might not be due | |
580 | * to the above scenario, but it would still be wrong and | |
581 | * cause serious damage. | |
582 | */ | |
b0d623f7 A |
583 | |
584 | OSAddAtomic(1, &kfree_nop_count); | |
0c530ab8 | 585 | return; |
b0d623f7 | 586 | } |
0c530ab8 | 587 | kmem_free(alloc_map, (vm_offset_t)data, size); |
1c79356b | 588 | |
6d2010ae A |
589 | kalloc_spin_lock(); |
590 | ||
1c79356b A |
591 | kalloc_large_total -= size; |
592 | kalloc_large_inuse--; | |
593 | ||
6d2010ae A |
594 | kalloc_unlock(); |
595 | ||
596 | KALLOC_ZINFO_SFREE(size); | |
1c79356b A |
597 | return; |
598 | } | |
599 | ||
600 | /* compute the size of the block that we actually allocated from */ | |
601 | ||
602 | freesize = KALLOC_MINSIZE; | |
603 | zindex = first_k_zone; | |
604 | while (freesize < size) { | |
605 | freesize <<= 1; | |
606 | zindex++; | |
607 | } | |
608 | ||
609 | /* free to the appropriate zone */ | |
610 | ||
611 | assert(freesize < kalloc_max); | |
612 | zfree(k_zone[zindex], data); | |
613 | } | |
614 | ||
615 | #ifdef MACH_BSD | |
616 | zone_t | |
617 | kalloc_zone( | |
618 | vm_size_t size) | |
619 | { | |
620 | register int zindex = 0; | |
621 | register vm_size_t allocsize; | |
622 | ||
623 | /* compute the size of the block that we will actually allocate */ | |
624 | ||
625 | allocsize = size; | |
626 | if (size <= kalloc_max) { | |
627 | allocsize = KALLOC_MINSIZE; | |
628 | zindex = first_k_zone; | |
629 | while (allocsize < size) { | |
630 | allocsize <<= 1; | |
631 | zindex++; | |
632 | } | |
633 | return (k_zone[zindex]); | |
634 | } | |
635 | return (ZONE_NULL); | |
636 | } | |
637 | #endif | |
638 | ||
6d2010ae A |
639 | void |
640 | kalloc_fake_zone_init(int zone_index) | |
641 | { | |
642 | kalloc_fake_zone_index = zone_index; | |
643 | } | |
1c79356b | 644 | |
91447636 | 645 | void |
6d2010ae A |
646 | kalloc_fake_zone_info(int *count, |
647 | vm_size_t *cur_size, vm_size_t *max_size, vm_size_t *elem_size, vm_size_t *alloc_size, | |
648 | uint64_t *sum_size, int *collectable, int *exhaustable, int *caller_acct) | |
1c79356b | 649 | { |
91447636 | 650 | *count = kalloc_large_inuse; |
1c79356b A |
651 | *cur_size = kalloc_large_total; |
652 | *max_size = kalloc_large_max; | |
6d2010ae A |
653 | |
654 | if (kalloc_large_inuse) { | |
655 | *elem_size = kalloc_large_total / kalloc_large_inuse; | |
656 | *alloc_size = kalloc_large_total / kalloc_large_inuse; | |
657 | } else { | |
658 | *elem_size = 0; | |
659 | *alloc_size = 0; | |
660 | } | |
661 | *sum_size = kalloc_large_sum; | |
1c79356b A |
662 | *collectable = 0; |
663 | *exhaustable = 0; | |
6d2010ae | 664 | *caller_acct = 0; |
1c79356b A |
665 | } |
666 | ||
91447636 A |
667 | |
668 | void | |
669 | OSMalloc_init( | |
670 | void) | |
671 | { | |
672 | queue_init(&OSMalloc_tag_list); | |
6d2010ae A |
673 | |
674 | OSMalloc_tag_lck_grp = lck_grp_alloc_init("OSMalloc_tag", LCK_GRP_ATTR_NULL); | |
675 | lck_mtx_init(&OSMalloc_tag_lock, OSMalloc_tag_lck_grp, LCK_ATTR_NULL); | |
91447636 A |
676 | } |
677 | ||
678 | OSMallocTag | |
679 | OSMalloc_Tagalloc( | |
680 | const char *str, | |
681 | uint32_t flags) | |
682 | { | |
683 | OSMallocTag OSMTag; | |
684 | ||
685 | OSMTag = (OSMallocTag)kalloc(sizeof(*OSMTag)); | |
686 | ||
687 | bzero((void *)OSMTag, sizeof(*OSMTag)); | |
688 | ||
689 | if (flags & OSMT_PAGEABLE) | |
690 | OSMTag->OSMT_attr = OSMT_ATTR_PAGEABLE; | |
691 | ||
692 | OSMTag->OSMT_refcnt = 1; | |
693 | ||
694 | strncpy(OSMTag->OSMT_name, str, OSMT_MAX_NAME); | |
695 | ||
6d2010ae | 696 | OSMalloc_tag_spin_lock(); |
91447636 | 697 | enqueue_tail(&OSMalloc_tag_list, (queue_entry_t)OSMTag); |
6d2010ae | 698 | OSMalloc_tag_unlock(); |
91447636 A |
699 | OSMTag->OSMT_state = OSMT_VALID; |
700 | return(OSMTag); | |
701 | } | |
702 | ||
703 | void | |
704 | OSMalloc_Tagref( | |
705 | OSMallocTag tag) | |
706 | { | |
707 | if (!((tag->OSMT_state & OSMT_VALID_MASK) == OSMT_VALID)) | |
708 | panic("OSMalloc_Tagref(): bad state 0x%08X\n",tag->OSMT_state); | |
709 | ||
2d21ac55 | 710 | (void)hw_atomic_add(&tag->OSMT_refcnt, 1); |
91447636 A |
711 | } |
712 | ||
713 | void | |
714 | OSMalloc_Tagrele( | |
715 | OSMallocTag tag) | |
716 | { | |
717 | if (!((tag->OSMT_state & OSMT_VALID_MASK) == OSMT_VALID)) | |
718 | panic("OSMalloc_Tagref(): bad state 0x%08X\n",tag->OSMT_state); | |
719 | ||
2d21ac55 | 720 | if (hw_atomic_sub(&tag->OSMT_refcnt, 1) == 0) { |
91447636 | 721 | if (hw_compare_and_store(OSMT_VALID|OSMT_RELEASED, OSMT_VALID|OSMT_RELEASED, &tag->OSMT_state)) { |
6d2010ae | 722 | OSMalloc_tag_spin_lock(); |
91447636 | 723 | (void)remque((queue_entry_t)tag); |
6d2010ae | 724 | OSMalloc_tag_unlock(); |
91447636 A |
725 | kfree((void*)tag, sizeof(*tag)); |
726 | } else | |
727 | panic("OSMalloc_Tagrele(): refcnt 0\n"); | |
728 | } | |
729 | } | |
730 | ||
731 | void | |
732 | OSMalloc_Tagfree( | |
733 | OSMallocTag tag) | |
734 | { | |
735 | if (!hw_compare_and_store(OSMT_VALID, OSMT_VALID|OSMT_RELEASED, &tag->OSMT_state)) | |
736 | panic("OSMalloc_Tagfree(): bad state 0x%08X\n", tag->OSMT_state); | |
737 | ||
2d21ac55 | 738 | if (hw_atomic_sub(&tag->OSMT_refcnt, 1) == 0) { |
6d2010ae | 739 | OSMalloc_tag_spin_lock(); |
91447636 | 740 | (void)remque((queue_entry_t)tag); |
6d2010ae | 741 | OSMalloc_tag_unlock(); |
91447636 A |
742 | kfree((void*)tag, sizeof(*tag)); |
743 | } | |
744 | } | |
745 | ||
746 | void * | |
747 | OSMalloc( | |
748 | uint32_t size, | |
749 | OSMallocTag tag) | |
750 | { | |
751 | void *addr=NULL; | |
752 | kern_return_t kr; | |
753 | ||
754 | OSMalloc_Tagref(tag); | |
755 | if ((tag->OSMT_attr & OSMT_PAGEABLE) | |
756 | && (size & ~PAGE_MASK)) { | |
757 | ||
758 | if ((kr = kmem_alloc_pageable(kernel_map, (vm_offset_t *)&addr, size)) != KERN_SUCCESS) | |
2d21ac55 | 759 | addr = NULL; |
91447636 A |
760 | } else |
761 | addr = kalloc((vm_size_t)size); | |
762 | ||
2d21ac55 A |
763 | if (!addr) |
764 | OSMalloc_Tagrele(tag); | |
765 | ||
91447636 A |
766 | return(addr); |
767 | } | |
768 | ||
769 | void * | |
770 | OSMalloc_nowait( | |
771 | uint32_t size, | |
772 | OSMallocTag tag) | |
773 | { | |
774 | void *addr=NULL; | |
775 | ||
776 | if (tag->OSMT_attr & OSMT_PAGEABLE) | |
777 | return(NULL); | |
778 | ||
779 | OSMalloc_Tagref(tag); | |
780 | /* XXX: use non-blocking kalloc for now */ | |
781 | addr = kalloc_noblock((vm_size_t)size); | |
782 | if (addr == NULL) | |
783 | OSMalloc_Tagrele(tag); | |
784 | ||
785 | return(addr); | |
786 | } | |
787 | ||
788 | void * | |
789 | OSMalloc_noblock( | |
790 | uint32_t size, | |
791 | OSMallocTag tag) | |
792 | { | |
793 | void *addr=NULL; | |
794 | ||
795 | if (tag->OSMT_attr & OSMT_PAGEABLE) | |
796 | return(NULL); | |
797 | ||
798 | OSMalloc_Tagref(tag); | |
799 | addr = kalloc_noblock((vm_size_t)size); | |
800 | if (addr == NULL) | |
801 | OSMalloc_Tagrele(tag); | |
802 | ||
803 | return(addr); | |
804 | } | |
805 | ||
806 | void | |
807 | OSFree( | |
808 | void *addr, | |
809 | uint32_t size, | |
810 | OSMallocTag tag) | |
811 | { | |
812 | if ((tag->OSMT_attr & OSMT_PAGEABLE) | |
813 | && (size & ~PAGE_MASK)) { | |
814 | kmem_free(kernel_map, (vm_offset_t)addr, size); | |
815 | } else | |
816 | kfree((void*)addr, size); | |
817 | ||
818 | OSMalloc_Tagrele(tag); | |
819 | } |