2 * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 * File: kern/gzalloc.c
32 * "Guard mode" zone allocator, used to trap use-after-free errors,
33 * overruns, underruns, mismatched allocations/frees, uninitialized
34 * zone element use, timing dependent races etc.
36 * The allocator is configured by these boot-args:
37 * gzalloc_size=<size>: target all zones with elements of <size> bytes
38 * gzalloc_min=<size>: target zones with elements >= size
39 * gzalloc_max=<size>: target zones with elements <= size
40 * gzalloc_min/max can be specified in conjunction to target a range of
42 * gzalloc_fc_size=<size>: number of zone elements (effectively page
43 * multiple sized) to retain in the free VA cache. This cache is evicted
44 * (backing pages and VA released) in a least-recently-freed fashion.
45 * Larger free VA caches allow for a longer window of opportunity to trap
46 * delayed use-after-free operations, but use more memory.
47 * -gzalloc_wp: Write protect, rather than unmap, freed allocations
48 * lingering in the free VA cache. Useful to disambiguate between
49 * read-after-frees/read overruns and writes. Also permits direct inspection
50 * of the freed element in the cache via the kernel debugger. As each
51 * element has a "header" (trailer in underflow detection mode), the zone
52 * of origin of the element can be easily determined in this mode.
53 * -gzalloc_uf_mode: Underflow detection mode, where the guard page
54 * adjoining each element is placed *before* the element page rather than
55 * after. The element is also located at the top of the page, rather than
56 * abutting the bottom as with the standard overflow detection mode.
57 * -gzalloc_noconsistency: disable consistency checks that flag mismatched
58 * frees, corruptions of the header/trailer signatures etc.
59 * -nogzalloc_mode: Disables the guard mode allocator. The DEBUG kernel
60 * enables the guard allocator for zones sized 1K (if present) by
61 * default, this option can disable that behaviour.
62 * gzname=<name> target a zone by name. Can be coupled with size-based
63 * targeting. Naming conventions match those of the zlog boot-arg, i.e.
64 * "a period in the logname will match a space in the zone name"
65 * -gzalloc_no_dfree_check Eliminate double free checks
66 * gzalloc_zscale=<value> specify size multiplier for the dedicated gzalloc submap
69 #include <mach/mach_types.h>
70 #include <mach/vm_param.h>
71 #include <mach/kern_return.h>
72 #include <mach/machine/vm_types.h>
73 #include <mach_debug/zone_info.h>
74 #include <mach/vm_map.h>
76 #include <kern/kern_types.h>
77 #include <kern/assert.h>
78 #include <kern/sched.h>
79 #include <kern/locks.h>
80 #include <kern/misc_protos.h>
81 #include <kern/zalloc_internal.h>
84 #include <vm/vm_map.h>
85 #include <vm/vm_kern.h>
86 #include <vm/vm_page.h>
88 #include <pexpert/pexpert.h>
90 #include <machine/machparam.h>
92 #include <libkern/OSDebug.h>
93 #include <libkern/OSAtomic.h>
94 #include <sys/kdebug.h>
96 boolean_t gzalloc_mode
= FALSE
;
97 uint32_t pdzalloc_count
, pdzfree_count
;
99 #define GZALLOC_MIN_DEFAULT (1024)
100 #define GZDEADZONE ((zone_t) 0xDEAD201E)
101 #define GZALLOC_SIGNATURE (0xABADCAFE)
102 #define GZALLOC_RESERVE_SIZE_DEFAULT (2 * 1024 * 1024)
103 #define GZFC_DEFAULT_SIZE (1536)
105 char gzalloc_fill_pattern
= 0x67; /* 'g' */
107 uint32_t gzalloc_min
= ~0U;
108 uint32_t gzalloc_max
= 0;
109 uint32_t gzalloc_size
= 0;
110 uint64_t gzalloc_allocated
, gzalloc_freed
, gzalloc_early_alloc
, gzalloc_early_free
, gzalloc_wasted
;
111 boolean_t gzalloc_uf_mode
= FALSE
, gzalloc_consistency_checks
= TRUE
, gzalloc_dfree_check
= TRUE
;
112 vm_prot_t gzalloc_prot
= VM_PROT_NONE
;
113 uint32_t gzalloc_guard
= KMA_GUARD_LAST
;
114 uint32_t gzfc_size
= GZFC_DEFAULT_SIZE
;
115 uint32_t gzalloc_zonemap_scale
= 6;
117 vm_map_t gzalloc_map
;
118 vm_offset_t gzalloc_map_min
, gzalloc_map_max
;
119 vm_offset_t gzalloc_reserve
;
120 vm_size_t gzalloc_reserve_size
;
122 typedef struct gzalloc_header
{
128 #define GZHEADER_SIZE (sizeof(gzhdr_t))
130 extern zone_t vm_page_zone
;
132 static zone_t gztrackzone
= NULL
;
133 static char gznamedzone
[MAX_ZONE_NAME
] = "";
136 gzalloc_enabled(void)
142 gzalloc_zone_init(zone_t z
)
144 if (gzalloc_mode
== 0) {
148 bzero(&z
->gz
, sizeof(z
->gz
));
150 if (track_this_zone(z
->z_name
, gznamedzone
)) {
154 if (!z
->gzalloc_exempt
) {
155 z
->gzalloc_tracked
= (z
== gztrackzone
) ||
156 ((zone_elem_size(z
) >= gzalloc_min
) && (zone_elem_size(z
) <= gzalloc_max
));
159 if (gzfc_size
&& z
->gzalloc_tracked
) {
160 vm_size_t gzfcsz
= round_page(sizeof(*z
->gz
.gzfc
) * gzfc_size
);
163 /* If the VM/kmem system aren't yet configured, carve
164 * out the free element cache structure directly from the
165 * gzalloc_reserve supplied by the pmap layer.
167 if (__improbable(startup_phase
< STARTUP_SUB_KMEM
)) {
168 if (gzalloc_reserve_size
< gzfcsz
) {
169 panic("gzalloc reserve exhausted");
172 z
->gz
.gzfc
= (vm_offset_t
*)gzalloc_reserve
;
173 gzalloc_reserve
+= gzfcsz
;
174 gzalloc_reserve_size
-= gzfcsz
;
175 bzero(z
->gz
.gzfc
, gzfcsz
);
177 kr
= kernel_memory_allocate(kernel_map
,
178 (vm_offset_t
*)&z
->gz
.gzfc
, gzfcsz
, 0,
179 KMA_KOBJECT
| KMA_ZERO
, VM_KERN_MEMORY_OSFMK
);
180 if (kr
!= KERN_SUCCESS
) {
181 panic("%s: kernel_memory_allocate failed (%d) for 0x%lx bytes",
182 __func__
, kr
, (unsigned long)gzfcsz
);
188 /* Called by zdestroy() to dump the free cache elements so the zone count can drop to zero. */
190 gzalloc_empty_free_cache(zone_t zone
)
193 int freed_elements
= 0;
194 vm_offset_t free_addr
= 0;
195 vm_offset_t rounded_size
= round_page(zone_elem_size(zone
) + GZHEADER_SIZE
);
196 vm_offset_t gzfcsz
= round_page(sizeof(*zone
->gz
.gzfc
) * gzfc_size
);
197 vm_offset_t gzfc_copy
;
199 assert(zone
->gzalloc_tracked
); // the caller is responsible for checking
201 kr
= kmem_alloc(kernel_map
, &gzfc_copy
, gzfcsz
, VM_KERN_MEMORY_OSFMK
);
202 if (kr
!= KERN_SUCCESS
) {
203 panic("gzalloc_empty_free_cache: kmem_alloc: 0x%x", kr
);
206 /* Reset gzalloc_data. */
208 memcpy((void *)gzfc_copy
, (void *)zone
->gz
.gzfc
, gzfcsz
);
209 bzero((void *)zone
->gz
.gzfc
, gzfcsz
);
210 zone
->gz
.gzfc_index
= 0;
213 /* Free up all the cached elements. */
214 for (uint32_t index
= 0; index
< gzfc_size
; index
++) {
215 free_addr
= ((vm_offset_t
*)gzfc_copy
)[index
];
216 if (free_addr
&& free_addr
>= gzalloc_map_min
&& free_addr
< gzalloc_map_max
) {
217 kr
= vm_map_remove(gzalloc_map
, free_addr
,
218 free_addr
+ rounded_size
+ (1 * PAGE_SIZE
),
219 VM_MAP_REMOVE_KUNWIRE
);
220 if (kr
!= KERN_SUCCESS
) {
221 panic("gzalloc_empty_free_cache: vm_map_remove: %p, 0x%x", (void *)free_addr
, kr
);
223 OSAddAtomic64((SInt32
)rounded_size
, &gzalloc_freed
);
224 OSAddAtomic64(-((SInt32
) (rounded_size
- zone_elem_size(zone
))), &gzalloc_wasted
);
230 * TODO: Consider freeing up zone->gz.gzfc as well if it didn't come from the gzalloc_reserve pool.
231 * For now we're reusing this buffer across zdestroy's. We would have to allocate it again on a
232 * subsequent zinit() as well.
235 /* Decrement zone counters. */
237 zone
->countfree
+= freed_elements
;
238 zone
->page_count
-= freed_elements
;
241 kmem_free(kernel_map
, gzfc_copy
, gzfcsz
);
246 gzalloc_configure(void)
251 if (PE_parse_boot_argn("-gzalloc_mode", temp_buf
, sizeof(temp_buf
))) {
253 gzalloc_min
= GZALLOC_MIN_DEFAULT
;
257 if (PE_parse_boot_argn("gzalloc_min", &gzalloc_min
, sizeof(gzalloc_min
))) {
262 if (PE_parse_boot_argn("gzalloc_max", &gzalloc_max
, sizeof(gzalloc_max
))) {
264 if (gzalloc_min
== ~0U) {
269 if (PE_parse_boot_argn("gzalloc_size", &gzalloc_size
, sizeof(gzalloc_size
))) {
270 gzalloc_min
= gzalloc_max
= gzalloc_size
;
274 (void)PE_parse_boot_argn("gzalloc_fc_size", &gzfc_size
, sizeof(gzfc_size
));
276 if (PE_parse_boot_argn("-gzalloc_wp", temp_buf
, sizeof(temp_buf
))) {
277 gzalloc_prot
= VM_PROT_READ
;
280 if (PE_parse_boot_argn("-gzalloc_uf_mode", temp_buf
, sizeof(temp_buf
))) {
281 gzalloc_uf_mode
= TRUE
;
282 gzalloc_guard
= KMA_GUARD_FIRST
;
285 if (PE_parse_boot_argn("-gzalloc_no_dfree_check", temp_buf
, sizeof(temp_buf
))) {
286 gzalloc_dfree_check
= FALSE
;
289 (void) PE_parse_boot_argn("gzalloc_zscale", &gzalloc_zonemap_scale
, sizeof(gzalloc_zonemap_scale
));
291 if (PE_parse_boot_argn("-gzalloc_noconsistency", temp_buf
, sizeof(temp_buf
))) {
292 gzalloc_consistency_checks
= FALSE
;
295 if (PE_parse_boot_argn("gzname", gznamedzone
, sizeof(gznamedzone
))) {
299 if (gzalloc_mode
== FALSE
) {
302 strlcpy(gznamedzone
, "pmap", sizeof(gznamedzone
));
303 gzalloc_prot
= VM_PROT_READ
;
307 if (PE_parse_boot_argn("-nogzalloc_mode", temp_buf
, sizeof(temp_buf
))) {
308 gzalloc_mode
= FALSE
;
312 gzalloc_reserve_size
= GZALLOC_RESERVE_SIZE_DEFAULT
;
313 gzalloc_reserve
= (vm_offset_t
) pmap_steal_memory(gzalloc_reserve_size
);
317 STARTUP(PMAP_STEAL
, STARTUP_RANK_FIRST
, gzalloc_configure
);
320 gzalloc_init(vm_size_t max_zonemap_size
)
322 kern_return_t retval
;
325 vm_map_kernel_flags_t vmk_flags
;
327 vmk_flags
= VM_MAP_KERNEL_FLAGS_NONE
;
328 vmk_flags
.vmkf_permanent
= TRUE
;
329 retval
= kmem_suballoc(kernel_map
, &gzalloc_map_min
, (max_zonemap_size
* gzalloc_zonemap_scale
),
330 FALSE
, VM_FLAGS_ANYWHERE
, vmk_flags
, VM_KERN_MEMORY_ZONE
,
333 if (retval
!= KERN_SUCCESS
) {
334 panic("zone_init: kmem_suballoc(gzalloc_map, 0x%lx, %u) failed",
335 max_zonemap_size
, gzalloc_zonemap_scale
);
337 gzalloc_map_max
= gzalloc_map_min
+ (max_zonemap_size
* gzalloc_zonemap_scale
);
342 gzalloc_alloc(zone_t zone
, zone_stats_t zstats
, zalloc_flags_t flags
)
344 vm_offset_t addr
= 0;
346 assert(zone
->gzalloc_tracked
); // the caller is responsible for checking
348 if (get_preemption_level() != 0) {
349 if (flags
& Z_NOWAIT
) {
355 bool kmem_ready
= (startup_phase
>= STARTUP_SUB_KMEM
);
356 vm_offset_t rounded_size
= round_page(zone_elem_size(zone
) + GZHEADER_SIZE
);
357 vm_offset_t residue
= rounded_size
- zone_elem_size(zone
);
358 vm_offset_t gzaddr
= 0;
359 gzhdr_t
*gzh
, *gzhcopy
= NULL
;
361 if (!kmem_ready
|| (vm_page_zone
== ZONE_NULL
)) {
362 /* Early allocations are supplied directly from the
365 if (gzalloc_reserve_size
< (rounded_size
+ PAGE_SIZE
)) {
366 panic("gzalloc reserve exhausted");
368 gzaddr
= gzalloc_reserve
;
369 /* No guard page for these early allocations, just
370 * waste an additional page.
372 gzalloc_reserve
+= rounded_size
+ PAGE_SIZE
;
373 gzalloc_reserve_size
-= rounded_size
+ PAGE_SIZE
;
374 OSAddAtomic64((SInt32
) (rounded_size
), &gzalloc_early_alloc
);
376 kern_return_t kr
= kernel_memory_allocate(gzalloc_map
,
377 &gzaddr
, rounded_size
+ (1 * PAGE_SIZE
),
378 0, KMA_KOBJECT
| KMA_ATOMIC
| gzalloc_guard
,
379 VM_KERN_MEMORY_OSFMK
);
380 if (kr
!= KERN_SUCCESS
) {
381 panic("gzalloc: kernel_memory_allocate for size 0x%llx failed with %d",
382 (uint64_t)rounded_size
, kr
);
386 if (gzalloc_uf_mode
) {
388 /* The "header" becomes a "footer" in underflow
391 gzh
= (gzhdr_t
*) (gzaddr
+ zone_elem_size(zone
));
393 gzhcopy
= (gzhdr_t
*) (gzaddr
+ rounded_size
- sizeof(gzhdr_t
));
395 gzh
= (gzhdr_t
*) (gzaddr
+ residue
- GZHEADER_SIZE
);
396 addr
= (gzaddr
+ residue
);
399 if (zone
->zfree_clear_mem
) {
400 bzero((void *)gzaddr
, rounded_size
);
402 /* Fill with a pattern on allocation to trap uninitialized
403 * data use. Since the element size may be "rounded up"
404 * by higher layers such as the kalloc layer, this may
405 * also identify overruns between the originally requested
406 * size and the rounded size via visual inspection.
407 * TBD: plumb through the originally requested size,
408 * prior to rounding by kalloc/IOMalloc etc.
409 * We also add a signature and the zone of origin in a header
410 * prefixed to the allocation.
412 memset((void *)gzaddr
, gzalloc_fill_pattern
, rounded_size
);
415 gzh
->gzone
= (kmem_ready
&& vm_page_zone
) ? zone
: GZDEADZONE
;
416 gzh
->gzsize
= (uint32_t)zone_elem_size(zone
);
417 gzh
->gzsig
= GZALLOC_SIGNATURE
;
419 /* In underflow detection mode, stash away a copy of the
420 * metadata at the edge of the allocated range, for
421 * retrieval by gzalloc_element_size()
428 assert(zone
->z_self
== zone
);
430 zone
->page_count
+= 1;
431 zpercpu_get(zstats
)->zs_mem_allocated
+= rounded_size
;
432 #if ZALLOC_DETAILED_STATS
433 zpercpu_get(zstats
)->zs_mem_wasted
+= rounded_size
- zone_elem_size(zone
);
434 #endif /* ZALLOC_DETAILED_STATS */
437 OSAddAtomic64((SInt32
) rounded_size
, &gzalloc_allocated
);
438 OSAddAtomic64((SInt32
) (rounded_size
- zone_elem_size(zone
)), &gzalloc_wasted
);
444 gzalloc_free(zone_t zone
, zone_stats_t zstats
, void *addr
)
448 assert(zone
->gzalloc_tracked
); // the caller is responsible for checking
451 vm_offset_t rounded_size
= round_page(zone_elem_size(zone
) + GZHEADER_SIZE
);
452 vm_offset_t residue
= rounded_size
- zone_elem_size(zone
);
454 vm_offset_t free_addr
= 0;
456 if (gzalloc_uf_mode
) {
457 gzh
= (gzhdr_t
*)((vm_offset_t
)addr
+ zone_elem_size(zone
));
458 saddr
= (vm_offset_t
) addr
- PAGE_SIZE
;
460 gzh
= (gzhdr_t
*)((vm_offset_t
)addr
- GZHEADER_SIZE
);
461 saddr
= ((vm_offset_t
)addr
) - residue
;
464 if ((saddr
& PAGE_MASK
) != 0) {
465 panic("%s: invalid address supplied: "
466 "%p (adjusted: 0x%lx) for zone with element sized 0x%lx\n",
467 __func__
, addr
, saddr
, zone_elem_size(zone
));
470 if (gzfc_size
&& gzalloc_dfree_check
) {
472 assert(zone
->z_self
== zone
);
473 for (uint32_t gd
= 0; gd
< gzfc_size
; gd
++) {
474 if (zone
->gz
.gzfc
[gd
] != saddr
) {
477 panic("%s: double free detected, freed address: 0x%lx, "
478 "current free cache index: %d, freed index: %d",
479 __func__
, saddr
, zone
->gz
.gzfc_index
, gd
);
484 if (gzalloc_consistency_checks
) {
485 if (gzh
->gzsig
!= GZALLOC_SIGNATURE
) {
486 panic("GZALLOC signature mismatch for element %p, "
487 "expected 0x%x, found 0x%x",
488 addr
, GZALLOC_SIGNATURE
, gzh
->gzsig
);
491 if (gzh
->gzone
!= zone
&& (gzh
->gzone
!= GZDEADZONE
)) {
492 panic("%s: Mismatched zone or under/overflow, "
493 "current zone: %p, recorded zone: %p, address: %p",
494 __func__
, zone
, gzh
->gzone
, (void *)addr
);
496 /* Partially redundant given the zone check, but may flag header corruption */
497 if (gzh
->gzsize
!= zone_elem_size(zone
)) {
498 panic("Mismatched zfree or under/overflow for zone %p, "
499 "recorded size: 0x%x, element size: 0x%x, address: %p",
500 zone
, gzh
->gzsize
, (uint32_t)zone_elem_size(zone
), (void *)addr
);
503 char *gzc
, *checkstart
, *checkend
;
504 if (gzalloc_uf_mode
) {
505 checkstart
= (char *) ((uintptr_t) gzh
+ sizeof(gzh
));
506 checkend
= (char *) ((((vm_offset_t
)addr
) & ~PAGE_MASK
) + PAGE_SIZE
);
508 checkstart
= (char *) trunc_page_64(addr
);
509 checkend
= (char *)gzh
;
512 for (gzc
= checkstart
; gzc
< checkend
; gzc
++) {
513 if (*gzc
== gzalloc_fill_pattern
) {
516 panic("%s: detected over/underflow, byte at %p, element %p, "
517 "contents 0x%x from 0x%lx byte sized zone (%s%s) "
518 "doesn't match fill pattern (%c)",
519 __func__
, gzc
, addr
, *gzc
, zone_elem_size(zone
),
520 zone_heap_name(zone
), zone
->z_name
, gzalloc_fill_pattern
);
524 if ((startup_phase
< STARTUP_SUB_KMEM
) || gzh
->gzone
== GZDEADZONE
) {
525 /* For now, just leak frees of early allocations
526 * performed before kmem is fully configured.
527 * They don't seem to get freed currently;
528 * consider ml_static_mfree in the future.
530 OSAddAtomic64((SInt32
) (rounded_size
), &gzalloc_early_free
);
534 if (get_preemption_level() != 0) {
539 /* Either write protect or unmap the newly freed
542 kr
= vm_map_protect(gzalloc_map
, saddr
,
543 saddr
+ rounded_size
+ (1 * PAGE_SIZE
),
544 gzalloc_prot
, FALSE
);
545 if (kr
!= KERN_SUCCESS
) {
546 panic("%s: vm_map_protect: %p, 0x%x", __func__
, (void *)saddr
, kr
);
553 assert(zone
->z_self
== zone
);
555 /* Insert newly freed element into the protected free element
556 * cache, and rotate out the LRU element.
559 if (zone
->gz
.gzfc_index
>= gzfc_size
) {
560 zone
->gz
.gzfc_index
= 0;
562 free_addr
= zone
->gz
.gzfc
[zone
->gz
.gzfc_index
];
563 zone
->gz
.gzfc
[zone
->gz
.gzfc_index
++] = saddr
;
568 zone
->page_count
-= 1;
571 zpercpu_get(zstats
)->zs_mem_freed
+= rounded_size
;
575 // TODO: consider using physical reads to check for
576 // corruption while on the protected freelist
577 // (i.e. physical corruption)
578 kr
= vm_map_remove(gzalloc_map
, free_addr
,
579 free_addr
+ rounded_size
+ (1 * PAGE_SIZE
),
580 VM_MAP_REMOVE_KUNWIRE
);
581 if (kr
!= KERN_SUCCESS
) {
582 panic("gzfree: vm_map_remove: %p, 0x%x", (void *)free_addr
, kr
);
584 // TODO: sysctl-ize for quick reference
585 OSAddAtomic64((SInt32
)rounded_size
, &gzalloc_freed
);
586 OSAddAtomic64(-((SInt32
) (rounded_size
- zone_elem_size(zone
))),
592 gzalloc_element_size(void *gzaddr
, zone_t
*z
, vm_size_t
*gzsz
)
594 uintptr_t a
= (uintptr_t)gzaddr
;
595 if (__improbable(gzalloc_mode
&& (a
>= gzalloc_map_min
) && (a
< gzalloc_map_max
))) {
598 vm_map_entry_t gzvme
= NULL
;
599 vm_map_lock_read(gzalloc_map
);
600 vmef
= vm_map_lookup_entry(gzalloc_map
, (vm_map_offset_t
)a
, &gzvme
);
601 vm_map_unlock(gzalloc_map
);
603 panic("GZALLOC: unable to locate map entry for %p\n", (void *)a
);
605 assertf(gzvme
->vme_atomic
!= 0, "GZALLOC: VM map entry inconsistency, "
606 "vme: %p, start: %llu end: %llu", gzvme
, gzvme
->vme_start
, gzvme
->vme_end
);
608 /* Locate the gzalloc metadata adjoining the element */
609 if (gzalloc_uf_mode
== TRUE
) {
610 /* In underflow detection mode, locate the map entry describing
611 * the element, and then locate the copy of the gzalloc
612 * header at the trailing edge of the range.
614 gzh
= (gzhdr_t
*)(gzvme
->vme_end
- GZHEADER_SIZE
);
616 /* In overflow detection mode, scan forward from
617 * the base of the map entry to locate the
620 uint32_t *p
= (uint32_t*) gzvme
->vme_start
;
621 while (p
< (uint32_t *) gzvme
->vme_end
) {
622 if (*p
== GZALLOC_SIGNATURE
) {
628 if (p
>= (uint32_t *) gzvme
->vme_end
) {
629 panic("GZALLOC signature missing addr %p, zone %p", gzaddr
, z
);
632 uintptr_t q
= (uintptr_t) p
;
633 gzh
= (gzhdr_t
*) (q
- sizeof(gzhdr_t
));
636 if (gzh
->gzsig
!= GZALLOC_SIGNATURE
) {
637 panic("GZALLOC signature mismatch for element %p, expected 0x%x, found 0x%x",
638 (void *)a
, GZALLOC_SIGNATURE
, gzh
->gzsig
);
641 *gzsz
= zone_elem_size(gzh
->gzone
);
642 if (__improbable(!gzh
->gzone
->gzalloc_tracked
)) {
643 panic("GZALLOC: zone mismatch (%p)\n", gzh
->gzone
);