]> git.saurik.com Git - apple/xnu.git/blame - osfmk/kern/gzalloc.c
xnu-6153.11.26.tar.gz
[apple/xnu.git] / osfmk / kern / gzalloc.c
CommitLineData
316670eb
A
1/*
2 * Copyright (c) 2000-2012 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
0a7de745 5 *
316670eb
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
0a7de745 14 *
316670eb
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
0a7de745 17 *
316670eb
A
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
0a7de745 25 *
316670eb
A
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28/*
29 * File: kern/gzalloc.c
30 * Author: Derek Kumar
31 *
32 * "Guard mode" zone allocator, used to trap use-after-free errors,
33 * overruns, underruns, mismatched allocations/frees, uninitialized
34 * zone element use, timing dependent races etc.
35 *
36 * The allocator is configured by these boot-args:
37 * gzalloc_size=<size>: target all zones with elements of <size> bytes
38 * gzalloc_min=<size>: target zones with elements >= size
39 * gzalloc_max=<size>: target zones with elements <= size
0a7de745 40 * gzalloc_min/max can be specified in conjunction to target a range of
316670eb
A
41 * sizes
42 * gzalloc_fc_size=<size>: number of zone elements (effectively page
43 * multiple sized) to retain in the free VA cache. This cache is evicted
44 * (backing pages and VA released) in a least-recently-freed fashion.
45 * Larger free VA caches allow for a longer window of opportunity to trap
46 * delayed use-after-free operations, but use more memory.
47 * -gzalloc_wp: Write protect, rather than unmap, freed allocations
48 * lingering in the free VA cache. Useful to disambiguate between
49 * read-after-frees/read overruns and writes. Also permits direct inspection
50 * of the freed element in the cache via the kernel debugger. As each
51 * element has a "header" (trailer in underflow detection mode), the zone
52 * of origin of the element can be easily determined in this mode.
53 * -gzalloc_uf_mode: Underflow detection mode, where the guard page
54 * adjoining each element is placed *before* the element page rather than
55 * after. The element is also located at the top of the page, rather than
56 * abutting the bottom as with the standard overflow detection mode.
57 * -gzalloc_noconsistency: disable consistency checks that flag mismatched
58 * frees, corruptions of the header/trailer signatures etc.
59 * -nogzalloc_mode: Disables the guard mode allocator. The DEBUG kernel
5c9f4661 60 * enables the guard allocator for zones sized 1K (if present) by
316670eb 61 * default, this option can disable that behaviour.
5ba3f43e
A
62 * gzname=<name> target a zone by name. Can be coupled with size-based
63 * targeting. Naming conventions match those of the zlog boot-arg, i.e.
64 * "a period in the logname will match a space in the zone name"
65 * -gzalloc_no_dfree_check Eliminate double free checks
66 * gzalloc_zscale=<value> specify size multiplier for the dedicated gzalloc submap
316670eb
A
67 */
68
69#include <zone_debug.h>
316670eb
A
70
71#include <mach/mach_types.h>
72#include <mach/vm_param.h>
73#include <mach/kern_return.h>
74#include <mach/machine/vm_types.h>
75#include <mach_debug/zone_info.h>
76#include <mach/vm_map.h>
77
78#include <kern/kern_types.h>
79#include <kern/assert.h>
80#include <kern/sched.h>
81#include <kern/locks.h>
82#include <kern/misc_protos.h>
83#include <kern/zalloc.h>
84#include <kern/kalloc.h>
85
86#include <vm/pmap.h>
87#include <vm/vm_map.h>
88#include <vm/vm_kern.h>
89#include <vm/vm_page.h>
90
91#include <pexpert/pexpert.h>
92
93#include <machine/machparam.h>
94
95#include <libkern/OSDebug.h>
96#include <libkern/OSAtomic.h>
97#include <sys/kdebug.h>
98
99extern boolean_t vm_kernel_ready, kmem_ready;
100boolean_t gzalloc_mode = FALSE;
101uint32_t pdzalloc_count, pdzfree_count;
102
0a7de745 103#define GZALLOC_MIN_DEFAULT (1024)
316670eb
A
104#define GZDEADZONE ((zone_t) 0xDEAD201E)
105#define GZALLOC_SIGNATURE (0xABADCAFE)
106#define GZALLOC_RESERVE_SIZE_DEFAULT (2 * 1024 * 1024)
5ba3f43e 107#define GZFC_DEFAULT_SIZE (1536)
316670eb
A
108
109char gzalloc_fill_pattern = 0x67; /* 'g' */
110
111uint32_t gzalloc_min = ~0U;
112uint32_t gzalloc_max = 0;
113uint32_t gzalloc_size = 0;
114uint64_t gzalloc_allocated, gzalloc_freed, gzalloc_early_alloc, gzalloc_early_free, gzalloc_wasted;
5ba3f43e 115boolean_t gzalloc_uf_mode = FALSE, gzalloc_consistency_checks = TRUE, gzalloc_dfree_check = TRUE;
316670eb
A
116vm_prot_t gzalloc_prot = VM_PROT_NONE;
117uint32_t gzalloc_guard = KMA_GUARD_LAST;
118uint32_t gzfc_size = GZFC_DEFAULT_SIZE;
5ba3f43e 119uint32_t gzalloc_zonemap_scale = 6;
316670eb
A
120
121vm_map_t gzalloc_map;
122vm_offset_t gzalloc_map_min, gzalloc_map_max;
123vm_offset_t gzalloc_reserve;
124vm_size_t gzalloc_reserve_size;
125
126typedef struct gzalloc_header {
127 zone_t gzone;
128 uint32_t gzsize;
129 uint32_t gzsig;
130} gzhdr_t;
131
132#define GZHEADER_SIZE (sizeof(gzhdr_t))
133
134extern zone_t vm_page_zone;
135
5ba3f43e
A
136static zone_t gztrackzone = NULL;
137static char gznamedzone[MAX_ZONE_NAME] = "";
138
0a7de745
A
139void
140gzalloc_reconfigure(__unused zone_t z)
141{
316670eb
A
142 /* Nothing for now */
143}
144
0a7de745
A
145boolean_t
146gzalloc_enabled(void)
147{
316670eb
A
148 return gzalloc_mode;
149}
150
0a7de745
A
151static inline boolean_t
152gzalloc_tracked(zone_t z)
153{
154 return gzalloc_mode &&
155 (((z->elem_size >= gzalloc_min) && (z->elem_size <= gzalloc_max)) || (z == gztrackzone)) &&
156 (z->gzalloc_exempt == 0);
5ba3f43e
A
157}
158
0a7de745
A
159void
160gzalloc_zone_init(zone_t z)
161{
316670eb
A
162 if (gzalloc_mode) {
163 bzero(&z->gz, sizeof(z->gz));
164
5ba3f43e
A
165 if (track_this_zone(z->zone_name, gznamedzone)) {
166 gztrackzone = z;
167 }
168
169 if (gzfc_size &&
170 gzalloc_tracked(z)) {
316670eb
A
171 vm_size_t gzfcsz = round_page(sizeof(*z->gz.gzfc) * gzfc_size);
172
173 /* If the VM/kmem system aren't yet configured, carve
174 * out the free element cache structure directly from the
175 * gzalloc_reserve supplied by the pmap layer.
0a7de745 176 */
316670eb 177 if (!kmem_ready) {
0a7de745 178 if (gzalloc_reserve_size < gzfcsz) {
316670eb 179 panic("gzalloc reserve exhausted");
0a7de745 180 }
316670eb
A
181
182 z->gz.gzfc = (vm_offset_t *)gzalloc_reserve;
183 gzalloc_reserve += gzfcsz;
184 gzalloc_reserve_size -= gzfcsz;
185 } else {
186 kern_return_t kr;
187
3e170ce0 188 if ((kr = kernel_memory_allocate(kernel_map, (vm_offset_t *)&z->gz.gzfc, gzfcsz, 0, KMA_KOBJECT, VM_KERN_MEMORY_OSFMK)) != KERN_SUCCESS) {
316670eb
A
189 panic("zinit/gzalloc: kernel_memory_allocate failed (%d) for 0x%lx bytes", kr, (unsigned long) gzfcsz);
190 }
191 }
192 bzero((void *)z->gz.gzfc, gzfcsz);
193 }
194 }
195}
196
5ba3f43e 197/* Called by zdestroy() to dump the free cache elements so the zone count can drop to zero. */
0a7de745
A
198void
199gzalloc_empty_free_cache(zone_t zone)
200{
5ba3f43e
A
201 if (__improbable(gzalloc_tracked(zone))) {
202 kern_return_t kr;
203 int freed_elements = 0;
204 vm_offset_t free_addr = 0;
205 vm_offset_t rounded_size = round_page(zone->elem_size + GZHEADER_SIZE);
206 vm_offset_t gzfcsz = round_page(sizeof(*zone->gz.gzfc) * gzfc_size);
207 vm_offset_t gzfc_copy;
208
209 kr = kmem_alloc(kernel_map, &gzfc_copy, gzfcsz, VM_KERN_MEMORY_OSFMK);
210 if (kr != KERN_SUCCESS) {
211 panic("gzalloc_empty_free_cache: kmem_alloc: 0x%x", kr);
212 }
213
214 /* Reset gzalloc_data. */
215 lock_zone(zone);
216 memcpy((void *)gzfc_copy, (void *)zone->gz.gzfc, gzfcsz);
217 bzero((void *)zone->gz.gzfc, gzfcsz);
218 zone->gz.gzfc_index = 0;
219 unlock_zone(zone);
220
221 /* Free up all the cached elements. */
222 for (uint32_t index = 0; index < gzfc_size; index++) {
223 free_addr = ((vm_offset_t *)gzfc_copy)[index];
224 if (free_addr && free_addr >= gzalloc_map_min && free_addr < gzalloc_map_max) {
225 kr = vm_map_remove(
0a7de745
A
226 gzalloc_map,
227 free_addr,
228 free_addr + rounded_size + (1 * PAGE_SIZE),
229 VM_MAP_REMOVE_KUNWIRE);
5ba3f43e
A
230 if (kr != KERN_SUCCESS) {
231 panic("gzalloc_empty_free_cache: vm_map_remove: %p, 0x%x", (void *)free_addr, kr);
232 }
233 OSAddAtomic64((SInt32)rounded_size, &gzalloc_freed);
234 OSAddAtomic64(-((SInt32) (rounded_size - zone->elem_size)), &gzalloc_wasted);
235
236 freed_elements++;
237 }
238 }
239 /*
240 * TODO: Consider freeing up zone->gz.gzfc as well if it didn't come from the gzalloc_reserve pool.
241 * For now we're reusing this buffer across zdestroy's. We would have to allocate it again on a
242 * subsequent zinit() as well.
243 */
244
245 /* Decrement zone counters. */
246 lock_zone(zone);
247 zone->count -= freed_elements;
248 zone->cur_size -= (freed_elements * rounded_size);
249 unlock_zone(zone);
250
251 kmem_free(kernel_map, gzfc_copy, gzfcsz);
252 }
253}
254
0a7de745
A
255void
256gzalloc_configure(void)
257{
316670eb
A
258 char temp_buf[16];
259
0a7de745 260 if (PE_parse_boot_argn("-gzalloc_mode", temp_buf, sizeof(temp_buf))) {
316670eb
A
261 gzalloc_mode = TRUE;
262 gzalloc_min = GZALLOC_MIN_DEFAULT;
316670eb
A
263 gzalloc_max = ~0U;
264 }
265
266 if (PE_parse_boot_argn("gzalloc_min", &gzalloc_min, sizeof(gzalloc_min))) {
316670eb
A
267 gzalloc_mode = TRUE;
268 gzalloc_max = ~0U;
269 }
270
271 if (PE_parse_boot_argn("gzalloc_max", &gzalloc_max, sizeof(gzalloc_max))) {
316670eb 272 gzalloc_mode = TRUE;
0a7de745 273 if (gzalloc_min == ~0U) {
316670eb 274 gzalloc_min = 0;
0a7de745 275 }
316670eb
A
276 }
277
278 if (PE_parse_boot_argn("gzalloc_size", &gzalloc_size, sizeof(gzalloc_size))) {
316670eb
A
279 gzalloc_min = gzalloc_max = gzalloc_size;
280 gzalloc_mode = TRUE;
281 }
282
283 (void)PE_parse_boot_argn("gzalloc_fc_size", &gzfc_size, sizeof(gzfc_size));
284
0a7de745 285 if (PE_parse_boot_argn("-gzalloc_wp", temp_buf, sizeof(temp_buf))) {
316670eb
A
286 gzalloc_prot = VM_PROT_READ;
287 }
288
0a7de745 289 if (PE_parse_boot_argn("-gzalloc_uf_mode", temp_buf, sizeof(temp_buf))) {
316670eb
A
290 gzalloc_uf_mode = TRUE;
291 gzalloc_guard = KMA_GUARD_FIRST;
292 }
293
5ba3f43e
A
294 if (PE_parse_boot_argn("-gzalloc_no_dfree_check", temp_buf, sizeof(temp_buf))) {
295 gzalloc_dfree_check = FALSE;
296 }
297
298 (void) PE_parse_boot_argn("gzalloc_zscale", &gzalloc_zonemap_scale, sizeof(gzalloc_zonemap_scale));
299
0a7de745 300 if (PE_parse_boot_argn("-gzalloc_noconsistency", temp_buf, sizeof(temp_buf))) {
316670eb
A
301 gzalloc_consistency_checks = FALSE;
302 }
5ba3f43e
A
303
304 if (PE_parse_boot_argn("gzname", gznamedzone, sizeof(gznamedzone))) {
305 gzalloc_mode = TRUE;
306 }
307#if DEBUG
316670eb 308 if (gzalloc_mode == FALSE) {
5ba3f43e
A
309 gzalloc_min = 1024;
310 gzalloc_max = 1024;
311 strlcpy(gznamedzone, "pmap", sizeof(gznamedzone));
316670eb
A
312 gzalloc_prot = VM_PROT_READ;
313 gzalloc_mode = TRUE;
314 }
315#endif
0a7de745 316 if (PE_parse_boot_argn("-nogzalloc_mode", temp_buf, sizeof(temp_buf))) {
316670eb 317 gzalloc_mode = FALSE;
0a7de745 318 }
316670eb
A
319
320 if (gzalloc_mode) {
321 gzalloc_reserve_size = GZALLOC_RESERVE_SIZE_DEFAULT;
322 gzalloc_reserve = (vm_offset_t) pmap_steal_memory(gzalloc_reserve_size);
323 }
324}
325
0a7de745
A
326void
327gzalloc_init(vm_size_t max_zonemap_size)
328{
316670eb
A
329 kern_return_t retval;
330
331 if (gzalloc_mode) {
5ba3f43e
A
332 vm_map_kernel_flags_t vmk_flags;
333
334 vmk_flags = VM_MAP_KERNEL_FLAGS_NONE;
335 vmk_flags.vmkf_permanent = TRUE;
336 retval = kmem_suballoc(kernel_map, &gzalloc_map_min, (max_zonemap_size * gzalloc_zonemap_scale),
0a7de745
A
337 FALSE, VM_FLAGS_ANYWHERE, vmk_flags, VM_KERN_MEMORY_ZONE,
338 &gzalloc_map);
339
5ba3f43e
A
340 if (retval != KERN_SUCCESS) {
341 panic("zone_init: kmem_suballoc(gzalloc_map, 0x%lx, %u) failed", max_zonemap_size, gzalloc_zonemap_scale);
342 }
343 gzalloc_map_max = gzalloc_map_min + (max_zonemap_size * gzalloc_zonemap_scale);
316670eb
A
344 }
345}
346
347vm_offset_t
0a7de745
A
348gzalloc_alloc(zone_t zone, boolean_t canblock)
349{
316670eb
A
350 vm_offset_t addr = 0;
351
5ba3f43e 352 if (__improbable(gzalloc_tracked(zone))) {
316670eb
A
353 if (get_preemption_level() != 0) {
354 if (canblock == TRUE) {
355 pdzalloc_count++;
0a7de745 356 } else {
316670eb 357 return 0;
0a7de745 358 }
316670eb
A
359 }
360
361 vm_offset_t rounded_size = round_page(zone->elem_size + GZHEADER_SIZE);
362 vm_offset_t residue = rounded_size - zone->elem_size;
363 vm_offset_t gzaddr = 0;
39037602 364 gzhdr_t *gzh, *gzhcopy = NULL;
316670eb
A
365
366 if (!kmem_ready || (vm_page_zone == ZONE_NULL)) {
367 /* Early allocations are supplied directly from the
368 * reserve.
369 */
0a7de745 370 if (gzalloc_reserve_size < (rounded_size + PAGE_SIZE)) {
316670eb 371 panic("gzalloc reserve exhausted");
0a7de745 372 }
316670eb
A
373 gzaddr = gzalloc_reserve;
374 /* No guard page for these early allocations, just
375 * waste an additional page.
376 */
377 gzalloc_reserve += rounded_size + PAGE_SIZE;
378 gzalloc_reserve_size -= rounded_size + PAGE_SIZE;
379 OSAddAtomic64((SInt32) (rounded_size), &gzalloc_early_alloc);
0a7de745 380 } else {
316670eb 381 kern_return_t kr = kernel_memory_allocate(gzalloc_map,
0a7de745 382 &gzaddr, rounded_size + (1 * PAGE_SIZE),
39037602 383 0, KMA_KOBJECT | KMA_ATOMIC | gzalloc_guard,
3e170ce0 384 VM_KERN_MEMORY_OSFMK);
0a7de745 385 if (kr != KERN_SUCCESS) {
316670eb 386 panic("gzalloc: kernel_memory_allocate for size 0x%llx failed with %d", (uint64_t)rounded_size, kr);
0a7de745 387 }
316670eb
A
388 }
389
390 if (gzalloc_uf_mode) {
391 gzaddr += PAGE_SIZE;
392 /* The "header" becomes a "footer" in underflow
393 * mode.
394 */
395 gzh = (gzhdr_t *) (gzaddr + zone->elem_size);
396 addr = gzaddr;
39037602 397 gzhcopy = (gzhdr_t *) (gzaddr + rounded_size - sizeof(gzhdr_t));
316670eb
A
398 } else {
399 gzh = (gzhdr_t *) (gzaddr + residue - GZHEADER_SIZE);
400 addr = (gzaddr + residue);
401 }
402
403 /* Fill with a pattern on allocation to trap uninitialized
404 * data use. Since the element size may be "rounded up"
405 * by higher layers such as the kalloc layer, this may
406 * also identify overruns between the originally requested
407 * size and the rounded size via visual inspection.
408 * TBD: plumb through the originally requested size,
409 * prior to rounding by kalloc/IOMalloc etc.
410 * We also add a signature and the zone of origin in a header
411 * prefixed to the allocation.
412 */
413 memset((void *)gzaddr, gzalloc_fill_pattern, rounded_size);
414
415 gzh->gzone = (kmem_ready && vm_page_zone) ? zone : GZDEADZONE;
416 gzh->gzsize = (uint32_t) zone->elem_size;
417 gzh->gzsig = GZALLOC_SIGNATURE;
418
39037602
A
419 /* In underflow detection mode, stash away a copy of the
420 * metadata at the edge of the allocated range, for
421 * retrieval by gzalloc_element_size()
422 */
423 if (gzhcopy) {
424 *gzhcopy = *gzh;
425 }
426
316670eb 427 lock_zone(zone);
5ba3f43e 428 assert(zone->zone_valid);
316670eb
A
429 zone->count++;
430 zone->sum_count++;
431 zone->cur_size += rounded_size;
432 unlock_zone(zone);
433
434 OSAddAtomic64((SInt32) rounded_size, &gzalloc_allocated);
435 OSAddAtomic64((SInt32) (rounded_size - zone->elem_size), &gzalloc_wasted);
436 }
437 return addr;
438}
439
0a7de745
A
440boolean_t
441gzalloc_free(zone_t zone, void *addr)
442{
316670eb
A
443 boolean_t gzfreed = FALSE;
444 kern_return_t kr;
445
5ba3f43e 446 if (__improbable(gzalloc_tracked(zone))) {
316670eb
A
447 gzhdr_t *gzh;
448 vm_offset_t rounded_size = round_page(zone->elem_size + GZHEADER_SIZE);
449 vm_offset_t residue = rounded_size - zone->elem_size;
450 vm_offset_t saddr;
451 vm_offset_t free_addr = 0;
452
453 if (gzalloc_uf_mode) {
454 gzh = (gzhdr_t *)((vm_offset_t)addr + zone->elem_size);
455 saddr = (vm_offset_t) addr - PAGE_SIZE;
456 } else {
457 gzh = (gzhdr_t *)((vm_offset_t)addr - GZHEADER_SIZE);
458 saddr = ((vm_offset_t)addr) - residue;
459 }
460
5ba3f43e
A
461 if ((saddr & PAGE_MASK) != 0) {
462 panic("gzalloc_free: invalid address supplied: %p (adjusted: 0x%lx) for zone with element sized 0x%lx\n", addr, saddr, zone->elem_size);
463 }
464
465 if (gzfc_size) {
466 if (gzalloc_dfree_check) {
467 uint32_t gd;
468
469 lock_zone(zone);
470 assert(zone->zone_valid);
471 for (gd = 0; gd < gzfc_size; gd++) {
472 if (zone->gz.gzfc[gd] == saddr) {
473 panic("gzalloc: double free detected, freed address: 0x%lx, current free cache index: %d, freed index: %d", saddr, zone->gz.gzfc_index, gd);
474 }
475 }
476 unlock_zone(zone);
477 }
478 }
316670eb
A
479
480 if (gzalloc_consistency_checks) {
481 if (gzh->gzsig != GZALLOC_SIGNATURE) {
482 panic("GZALLOC signature mismatch for element %p, expected 0x%x, found 0x%x", addr, GZALLOC_SIGNATURE, gzh->gzsig);
483 }
484
0a7de745 485 if (gzh->gzone != zone && (gzh->gzone != GZDEADZONE)) {
316670eb 486 panic("%s: Mismatched zone or under/overflow, current zone: %p, recorded zone: %p, address: %p", __FUNCTION__, zone, gzh->gzone, (void *)addr);
0a7de745 487 }
316670eb
A
488 /* Partially redundant given the zone check, but may flag header corruption */
489 if (gzh->gzsize != zone->elem_size) {
490 panic("Mismatched zfree or under/overflow for zone %p, recorded size: 0x%x, element size: 0x%x, address: %p\n", zone, gzh->gzsize, (uint32_t) zone->elem_size, (void *)addr);
491 }
5ba3f43e
A
492
493 char *gzc, *checkstart, *checkend;
494 if (gzalloc_uf_mode) {
495 checkstart = (char *) ((uintptr_t) gzh + sizeof(gzh));
496 checkend = (char *) ((((vm_offset_t)addr) & ~PAGE_MASK) + PAGE_SIZE);
497 } else {
498 checkstart = (char *) trunc_page_64(addr);
499 checkend = (char *)gzh;
500 }
501
502 for (gzc = checkstart; gzc < checkend; gzc++) {
503 if (*gzc != gzalloc_fill_pattern) {
504 panic("GZALLOC: detected over/underflow, byte at %p, element %p, contents 0x%x from 0x%lx byte sized zone (%s) doesn't match fill pattern (%c)", gzc, addr, *gzc, zone->elem_size, zone->zone_name, gzalloc_fill_pattern);
505 }
506 }
316670eb
A
507 }
508
509 if (!kmem_ready || gzh->gzone == GZDEADZONE) {
510 /* For now, just leak frees of early allocations
511 * performed before kmem is fully configured.
512 * They don't seem to get freed currently;
513 * consider ml_static_mfree in the future.
514 */
515 OSAddAtomic64((SInt32) (rounded_size), &gzalloc_early_free);
516 return TRUE;
517 }
518
519 if (get_preemption_level() != 0) {
0a7de745 520 pdzfree_count++;
316670eb
A
521 }
522
523 if (gzfc_size) {
524 /* Either write protect or unmap the newly freed
525 * allocation
526 */
527 kr = vm_map_protect(
528 gzalloc_map,
529 saddr,
530 saddr + rounded_size + (1 * PAGE_SIZE),
531 gzalloc_prot,
532 FALSE);
0a7de745 533 if (kr != KERN_SUCCESS) {
316670eb 534 panic("%s: vm_map_protect: %p, 0x%x", __FUNCTION__, (void *)saddr, kr);
0a7de745 535 }
316670eb
A
536 } else {
537 free_addr = saddr;
538 }
539
540 lock_zone(zone);
5ba3f43e 541 assert(zone->zone_valid);
316670eb
A
542
543 /* Insert newly freed element into the protected free element
544 * cache, and rotate out the LRU element.
545 */
546 if (gzfc_size) {
547 if (zone->gz.gzfc_index >= gzfc_size) {
548 zone->gz.gzfc_index = 0;
549 }
550 free_addr = zone->gz.gzfc[zone->gz.gzfc_index];
551 zone->gz.gzfc[zone->gz.gzfc_index++] = saddr;
552 }
553
554 if (free_addr) {
555 zone->count--;
556 zone->cur_size -= rounded_size;
557 }
558
559 unlock_zone(zone);
560
561 if (free_addr) {
5ba3f43e
A
562 // TODO: consider using physical reads to check for
563 // corruption while on the protected freelist
564 // (i.e. physical corruption)
316670eb
A
565 kr = vm_map_remove(
566 gzalloc_map,
567 free_addr,
568 free_addr + rounded_size + (1 * PAGE_SIZE),
569 VM_MAP_REMOVE_KUNWIRE);
0a7de745 570 if (kr != KERN_SUCCESS) {
316670eb 571 panic("gzfree: vm_map_remove: %p, 0x%x", (void *)free_addr, kr);
0a7de745 572 }
5ba3f43e 573 // TODO: sysctl-ize for quick reference
316670eb
A
574 OSAddAtomic64((SInt32)rounded_size, &gzalloc_freed);
575 OSAddAtomic64(-((SInt32) (rounded_size - zone->elem_size)), &gzalloc_wasted);
576 }
577
578 gzfreed = TRUE;
579 }
580 return gzfreed;
581}
39037602 582
0a7de745
A
583boolean_t
584gzalloc_element_size(void *gzaddr, zone_t *z, vm_size_t *gzsz)
585{
39037602 586 uintptr_t a = (uintptr_t)gzaddr;
5ba3f43e 587 if (__improbable(gzalloc_mode && (a >= gzalloc_map_min) && (a < gzalloc_map_max))) {
39037602 588 gzhdr_t *gzh;
d9a64523
A
589 boolean_t vmef;
590 vm_map_entry_t gzvme = NULL;
591 vm_map_lock_read(gzalloc_map);
592 vmef = vm_map_lookup_entry(gzalloc_map, (vm_map_offset_t)a, &gzvme);
593 vm_map_unlock(gzalloc_map);
594 if (vmef == FALSE) {
595 panic("GZALLOC: unable to locate map entry for %p\n", (void *)a);
596 }
597 assertf(gzvme->vme_atomic != 0, "GZALLOC: VM map entry inconsistency, vme: %p, start: %llu end: %llu", gzvme, gzvme->vme_start, gzvme->vme_end);
39037602
A
598
599 /* Locate the gzalloc metadata adjoining the element */
600 if (gzalloc_uf_mode == TRUE) {
39037602
A
601 /* In underflow detection mode, locate the map entry describing
602 * the element, and then locate the copy of the gzalloc
603 * header at the trailing edge of the range.
604 */
39037602
A
605 gzh = (gzhdr_t *)(gzvme->vme_end - GZHEADER_SIZE);
606 } else {
d9a64523
A
607 /* In overflow detection mode, scan forward from
608 * the base of the map entry to locate the
609 * gzalloc header.
610 */
611 uint32_t *p = (uint32_t*) gzvme->vme_start;
612 while (p < (uint32_t *) gzvme->vme_end) {
0a7de745 613 if (*p == GZALLOC_SIGNATURE) {
d9a64523 614 break;
0a7de745 615 } else {
d9a64523
A
616 p++;
617 }
618 }
619 if (p >= (uint32_t *) gzvme->vme_end) {
620 panic("GZALLOC signature missing addr %p, zone %p", gzaddr, z);
621 }
622 p++;
623 uintptr_t q = (uintptr_t) p;
624 gzh = (gzhdr_t *) (q - sizeof(gzhdr_t));
39037602
A
625 }
626
627 if (gzh->gzsig != GZALLOC_SIGNATURE) {
628 panic("GZALLOC signature mismatch for element %p, expected 0x%x, found 0x%x", (void *)a, GZALLOC_SIGNATURE, gzh->gzsig);
629 }
630
631 *gzsz = gzh->gzone->elem_size;
5ba3f43e
A
632 if (__improbable((gzalloc_tracked(gzh->gzone)) == FALSE)) {
633 panic("GZALLOC: zone mismatch (%p)\n", gzh->gzone);
39037602
A
634 }
635
636 if (z) {
637 *z = gzh->gzone;
638 }
639 return TRUE;
640 } else {
641 return FALSE;
642 }
643}