2 * Copyright (c) 2000-2012 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 * File: kern/gzalloc.c
32 * "Guard mode" zone allocator, used to trap use-after-free errors,
33 * overruns, underruns, mismatched allocations/frees, uninitialized
34 * zone element use, timing dependent races etc.
36 * The allocator is configured by these boot-args:
37 * gzalloc_size=<size>: target all zones with elements of <size> bytes
38 * gzalloc_min=<size>: target zones with elements >= size
39 * gzalloc_max=<size>: target zones with elements <= size
40 * gzalloc_min/max can be specified in conjunction to target a range of
42 * gzalloc_fc_size=<size>: number of zone elements (effectively page
43 * multiple sized) to retain in the free VA cache. This cache is evicted
44 * (backing pages and VA released) in a least-recently-freed fashion.
45 * Larger free VA caches allow for a longer window of opportunity to trap
46 * delayed use-after-free operations, but use more memory.
47 * -gzalloc_wp: Write protect, rather than unmap, freed allocations
48 * lingering in the free VA cache. Useful to disambiguate between
49 * read-after-frees/read overruns and writes. Also permits direct inspection
50 * of the freed element in the cache via the kernel debugger. As each
51 * element has a "header" (trailer in underflow detection mode), the zone
52 * of origin of the element can be easily determined in this mode.
53 * -gzalloc_uf_mode: Underflow detection mode, where the guard page
54 * adjoining each element is placed *before* the element page rather than
55 * after. The element is also located at the top of the page, rather than
56 * abutting the bottom as with the standard overflow detection mode.
57 * -gzalloc_noconsistency: disable consistency checks that flag mismatched
58 * frees, corruptions of the header/trailer signatures etc.
59 * -nogzalloc_mode: Disables the guard mode allocator. The DEBUG kernel
60 * enables the guard allocator for zones sized 1K (if present) by
61 * default, this option can disable that behaviour.
62 * gzname=<name> target a zone by name. Can be coupled with size-based
63 * targeting. Naming conventions match those of the zlog boot-arg, i.e.
64 * "a period in the logname will match a space in the zone name"
65 * -gzalloc_no_dfree_check Eliminate double free checks
66 * gzalloc_zscale=<value> specify size multiplier for the dedicated gzalloc submap
69 #include <zone_debug.h>
71 #include <mach/mach_types.h>
72 #include <mach/vm_param.h>
73 #include <mach/kern_return.h>
74 #include <mach/machine/vm_types.h>
75 #include <mach_debug/zone_info.h>
76 #include <mach/vm_map.h>
78 #include <kern/kern_types.h>
79 #include <kern/assert.h>
80 #include <kern/sched.h>
81 #include <kern/locks.h>
82 #include <kern/misc_protos.h>
83 #include <kern/zalloc.h>
84 #include <kern/kalloc.h>
87 #include <vm/vm_map.h>
88 #include <vm/vm_kern.h>
89 #include <vm/vm_page.h>
91 #include <pexpert/pexpert.h>
93 #include <machine/machparam.h>
95 #include <libkern/OSDebug.h>
96 #include <libkern/OSAtomic.h>
97 #include <sys/kdebug.h>
99 extern boolean_t vm_kernel_ready
, kmem_ready
;
100 boolean_t gzalloc_mode
= FALSE
;
101 uint32_t pdzalloc_count
, pdzfree_count
;
103 #define GZALLOC_MIN_DEFAULT (1024)
104 #define GZDEADZONE ((zone_t) 0xDEAD201E)
105 #define GZALLOC_SIGNATURE (0xABADCAFE)
106 #define GZALLOC_RESERVE_SIZE_DEFAULT (2 * 1024 * 1024)
107 #define GZFC_DEFAULT_SIZE (1536)
109 char gzalloc_fill_pattern
= 0x67; /* 'g' */
111 uint32_t gzalloc_min
= ~0U;
112 uint32_t gzalloc_max
= 0;
113 uint32_t gzalloc_size
= 0;
114 uint64_t gzalloc_allocated
, gzalloc_freed
, gzalloc_early_alloc
, gzalloc_early_free
, gzalloc_wasted
;
115 boolean_t gzalloc_uf_mode
= FALSE
, gzalloc_consistency_checks
= TRUE
, gzalloc_dfree_check
= TRUE
;
116 vm_prot_t gzalloc_prot
= VM_PROT_NONE
;
117 uint32_t gzalloc_guard
= KMA_GUARD_LAST
;
118 uint32_t gzfc_size
= GZFC_DEFAULT_SIZE
;
119 uint32_t gzalloc_zonemap_scale
= 6;
121 vm_map_t gzalloc_map
;
122 vm_offset_t gzalloc_map_min
, gzalloc_map_max
;
123 vm_offset_t gzalloc_reserve
;
124 vm_size_t gzalloc_reserve_size
;
126 typedef struct gzalloc_header
{
132 #define GZHEADER_SIZE (sizeof(gzhdr_t))
134 extern zone_t vm_page_zone
;
136 static zone_t gztrackzone
= NULL
;
137 static char gznamedzone
[MAX_ZONE_NAME
] = "";
140 gzalloc_reconfigure(__unused zone_t z
)
142 /* Nothing for now */
146 gzalloc_enabled(void)
151 static inline boolean_t
152 gzalloc_tracked(zone_t z
)
154 return gzalloc_mode
&&
155 (((z
->elem_size
>= gzalloc_min
) && (z
->elem_size
<= gzalloc_max
)) || (z
== gztrackzone
)) &&
156 (z
->gzalloc_exempt
== 0);
160 gzalloc_zone_init(zone_t z
)
163 bzero(&z
->gz
, sizeof(z
->gz
));
165 if (track_this_zone(z
->zone_name
, gznamedzone
)) {
170 gzalloc_tracked(z
)) {
171 vm_size_t gzfcsz
= round_page(sizeof(*z
->gz
.gzfc
) * gzfc_size
);
173 /* If the VM/kmem system aren't yet configured, carve
174 * out the free element cache structure directly from the
175 * gzalloc_reserve supplied by the pmap layer.
178 if (gzalloc_reserve_size
< gzfcsz
) {
179 panic("gzalloc reserve exhausted");
182 z
->gz
.gzfc
= (vm_offset_t
*)gzalloc_reserve
;
183 gzalloc_reserve
+= gzfcsz
;
184 gzalloc_reserve_size
-= gzfcsz
;
188 if ((kr
= kernel_memory_allocate(kernel_map
, (vm_offset_t
*)&z
->gz
.gzfc
, gzfcsz
, 0, KMA_KOBJECT
, VM_KERN_MEMORY_OSFMK
)) != KERN_SUCCESS
) {
189 panic("zinit/gzalloc: kernel_memory_allocate failed (%d) for 0x%lx bytes", kr
, (unsigned long) gzfcsz
);
192 bzero((void *)z
->gz
.gzfc
, gzfcsz
);
197 /* Called by zdestroy() to dump the free cache elements so the zone count can drop to zero. */
199 gzalloc_empty_free_cache(zone_t zone
)
201 if (__improbable(gzalloc_tracked(zone
))) {
203 int freed_elements
= 0;
204 vm_offset_t free_addr
= 0;
205 vm_offset_t rounded_size
= round_page(zone
->elem_size
+ GZHEADER_SIZE
);
206 vm_offset_t gzfcsz
= round_page(sizeof(*zone
->gz
.gzfc
) * gzfc_size
);
207 vm_offset_t gzfc_copy
;
209 kr
= kmem_alloc(kernel_map
, &gzfc_copy
, gzfcsz
, VM_KERN_MEMORY_OSFMK
);
210 if (kr
!= KERN_SUCCESS
) {
211 panic("gzalloc_empty_free_cache: kmem_alloc: 0x%x", kr
);
214 /* Reset gzalloc_data. */
216 memcpy((void *)gzfc_copy
, (void *)zone
->gz
.gzfc
, gzfcsz
);
217 bzero((void *)zone
->gz
.gzfc
, gzfcsz
);
218 zone
->gz
.gzfc_index
= 0;
221 /* Free up all the cached elements. */
222 for (uint32_t index
= 0; index
< gzfc_size
; index
++) {
223 free_addr
= ((vm_offset_t
*)gzfc_copy
)[index
];
224 if (free_addr
&& free_addr
>= gzalloc_map_min
&& free_addr
< gzalloc_map_max
) {
228 free_addr
+ rounded_size
+ (1 * PAGE_SIZE
),
229 VM_MAP_REMOVE_KUNWIRE
);
230 if (kr
!= KERN_SUCCESS
) {
231 panic("gzalloc_empty_free_cache: vm_map_remove: %p, 0x%x", (void *)free_addr
, kr
);
233 OSAddAtomic64((SInt32
)rounded_size
, &gzalloc_freed
);
234 OSAddAtomic64(-((SInt32
) (rounded_size
- zone
->elem_size
)), &gzalloc_wasted
);
240 * TODO: Consider freeing up zone->gz.gzfc as well if it didn't come from the gzalloc_reserve pool.
241 * For now we're reusing this buffer across zdestroy's. We would have to allocate it again on a
242 * subsequent zinit() as well.
245 /* Decrement zone counters. */
247 zone
->count
-= freed_elements
;
248 zone
->cur_size
-= (freed_elements
* rounded_size
);
251 kmem_free(kernel_map
, gzfc_copy
, gzfcsz
);
256 gzalloc_configure(void)
260 if (PE_parse_boot_argn("-gzalloc_mode", temp_buf
, sizeof(temp_buf
))) {
262 gzalloc_min
= GZALLOC_MIN_DEFAULT
;
266 if (PE_parse_boot_argn("gzalloc_min", &gzalloc_min
, sizeof(gzalloc_min
))) {
271 if (PE_parse_boot_argn("gzalloc_max", &gzalloc_max
, sizeof(gzalloc_max
))) {
273 if (gzalloc_min
== ~0U) {
278 if (PE_parse_boot_argn("gzalloc_size", &gzalloc_size
, sizeof(gzalloc_size
))) {
279 gzalloc_min
= gzalloc_max
= gzalloc_size
;
283 (void)PE_parse_boot_argn("gzalloc_fc_size", &gzfc_size
, sizeof(gzfc_size
));
285 if (PE_parse_boot_argn("-gzalloc_wp", temp_buf
, sizeof(temp_buf
))) {
286 gzalloc_prot
= VM_PROT_READ
;
289 if (PE_parse_boot_argn("-gzalloc_uf_mode", temp_buf
, sizeof(temp_buf
))) {
290 gzalloc_uf_mode
= TRUE
;
291 gzalloc_guard
= KMA_GUARD_FIRST
;
294 if (PE_parse_boot_argn("-gzalloc_no_dfree_check", temp_buf
, sizeof(temp_buf
))) {
295 gzalloc_dfree_check
= FALSE
;
298 (void) PE_parse_boot_argn("gzalloc_zscale", &gzalloc_zonemap_scale
, sizeof(gzalloc_zonemap_scale
));
300 if (PE_parse_boot_argn("-gzalloc_noconsistency", temp_buf
, sizeof(temp_buf
))) {
301 gzalloc_consistency_checks
= FALSE
;
304 if (PE_parse_boot_argn("gzname", gznamedzone
, sizeof(gznamedzone
))) {
308 if (gzalloc_mode
== FALSE
) {
311 strlcpy(gznamedzone
, "pmap", sizeof(gznamedzone
));
312 gzalloc_prot
= VM_PROT_READ
;
316 if (PE_parse_boot_argn("-nogzalloc_mode", temp_buf
, sizeof(temp_buf
))) {
317 gzalloc_mode
= FALSE
;
321 gzalloc_reserve_size
= GZALLOC_RESERVE_SIZE_DEFAULT
;
322 gzalloc_reserve
= (vm_offset_t
) pmap_steal_memory(gzalloc_reserve_size
);
327 gzalloc_init(vm_size_t max_zonemap_size
)
329 kern_return_t retval
;
332 vm_map_kernel_flags_t vmk_flags
;
334 vmk_flags
= VM_MAP_KERNEL_FLAGS_NONE
;
335 vmk_flags
.vmkf_permanent
= TRUE
;
336 retval
= kmem_suballoc(kernel_map
, &gzalloc_map_min
, (max_zonemap_size
* gzalloc_zonemap_scale
),
337 FALSE
, VM_FLAGS_ANYWHERE
, vmk_flags
, VM_KERN_MEMORY_ZONE
,
340 if (retval
!= KERN_SUCCESS
) {
341 panic("zone_init: kmem_suballoc(gzalloc_map, 0x%lx, %u) failed", max_zonemap_size
, gzalloc_zonemap_scale
);
343 gzalloc_map_max
= gzalloc_map_min
+ (max_zonemap_size
* gzalloc_zonemap_scale
);
348 gzalloc_alloc(zone_t zone
, boolean_t canblock
)
350 vm_offset_t addr
= 0;
352 if (__improbable(gzalloc_tracked(zone
))) {
353 if (get_preemption_level() != 0) {
354 if (canblock
== TRUE
) {
361 vm_offset_t rounded_size
= round_page(zone
->elem_size
+ GZHEADER_SIZE
);
362 vm_offset_t residue
= rounded_size
- zone
->elem_size
;
363 vm_offset_t gzaddr
= 0;
364 gzhdr_t
*gzh
, *gzhcopy
= NULL
;
366 if (!kmem_ready
|| (vm_page_zone
== ZONE_NULL
)) {
367 /* Early allocations are supplied directly from the
370 if (gzalloc_reserve_size
< (rounded_size
+ PAGE_SIZE
)) {
371 panic("gzalloc reserve exhausted");
373 gzaddr
= gzalloc_reserve
;
374 /* No guard page for these early allocations, just
375 * waste an additional page.
377 gzalloc_reserve
+= rounded_size
+ PAGE_SIZE
;
378 gzalloc_reserve_size
-= rounded_size
+ PAGE_SIZE
;
379 OSAddAtomic64((SInt32
) (rounded_size
), &gzalloc_early_alloc
);
381 kern_return_t kr
= kernel_memory_allocate(gzalloc_map
,
382 &gzaddr
, rounded_size
+ (1 * PAGE_SIZE
),
383 0, KMA_KOBJECT
| KMA_ATOMIC
| gzalloc_guard
,
384 VM_KERN_MEMORY_OSFMK
);
385 if (kr
!= KERN_SUCCESS
) {
386 panic("gzalloc: kernel_memory_allocate for size 0x%llx failed with %d", (uint64_t)rounded_size
, kr
);
390 if (gzalloc_uf_mode
) {
392 /* The "header" becomes a "footer" in underflow
395 gzh
= (gzhdr_t
*) (gzaddr
+ zone
->elem_size
);
397 gzhcopy
= (gzhdr_t
*) (gzaddr
+ rounded_size
- sizeof(gzhdr_t
));
399 gzh
= (gzhdr_t
*) (gzaddr
+ residue
- GZHEADER_SIZE
);
400 addr
= (gzaddr
+ residue
);
403 /* Fill with a pattern on allocation to trap uninitialized
404 * data use. Since the element size may be "rounded up"
405 * by higher layers such as the kalloc layer, this may
406 * also identify overruns between the originally requested
407 * size and the rounded size via visual inspection.
408 * TBD: plumb through the originally requested size,
409 * prior to rounding by kalloc/IOMalloc etc.
410 * We also add a signature and the zone of origin in a header
411 * prefixed to the allocation.
413 memset((void *)gzaddr
, gzalloc_fill_pattern
, rounded_size
);
415 gzh
->gzone
= (kmem_ready
&& vm_page_zone
) ? zone
: GZDEADZONE
;
416 gzh
->gzsize
= (uint32_t) zone
->elem_size
;
417 gzh
->gzsig
= GZALLOC_SIGNATURE
;
419 /* In underflow detection mode, stash away a copy of the
420 * metadata at the edge of the allocated range, for
421 * retrieval by gzalloc_element_size()
428 assert(zone
->zone_valid
);
431 zone
->cur_size
+= rounded_size
;
434 OSAddAtomic64((SInt32
) rounded_size
, &gzalloc_allocated
);
435 OSAddAtomic64((SInt32
) (rounded_size
- zone
->elem_size
), &gzalloc_wasted
);
441 gzalloc_free(zone_t zone
, void *addr
)
443 boolean_t gzfreed
= FALSE
;
446 if (__improbable(gzalloc_tracked(zone
))) {
448 vm_offset_t rounded_size
= round_page(zone
->elem_size
+ GZHEADER_SIZE
);
449 vm_offset_t residue
= rounded_size
- zone
->elem_size
;
451 vm_offset_t free_addr
= 0;
453 if (gzalloc_uf_mode
) {
454 gzh
= (gzhdr_t
*)((vm_offset_t
)addr
+ zone
->elem_size
);
455 saddr
= (vm_offset_t
) addr
- PAGE_SIZE
;
457 gzh
= (gzhdr_t
*)((vm_offset_t
)addr
- GZHEADER_SIZE
);
458 saddr
= ((vm_offset_t
)addr
) - residue
;
461 if ((saddr
& PAGE_MASK
) != 0) {
462 panic("gzalloc_free: invalid address supplied: %p (adjusted: 0x%lx) for zone with element sized 0x%lx\n", addr
, saddr
, zone
->elem_size
);
466 if (gzalloc_dfree_check
) {
470 assert(zone
->zone_valid
);
471 for (gd
= 0; gd
< gzfc_size
; gd
++) {
472 if (zone
->gz
.gzfc
[gd
] == saddr
) {
473 panic("gzalloc: double free detected, freed address: 0x%lx, current free cache index: %d, freed index: %d", saddr
, zone
->gz
.gzfc_index
, gd
);
480 if (gzalloc_consistency_checks
) {
481 if (gzh
->gzsig
!= GZALLOC_SIGNATURE
) {
482 panic("GZALLOC signature mismatch for element %p, expected 0x%x, found 0x%x", addr
, GZALLOC_SIGNATURE
, gzh
->gzsig
);
485 if (gzh
->gzone
!= zone
&& (gzh
->gzone
!= GZDEADZONE
)) {
486 panic("%s: Mismatched zone or under/overflow, current zone: %p, recorded zone: %p, address: %p", __FUNCTION__
, zone
, gzh
->gzone
, (void *)addr
);
488 /* Partially redundant given the zone check, but may flag header corruption */
489 if (gzh
->gzsize
!= zone
->elem_size
) {
490 panic("Mismatched zfree or under/overflow for zone %p, recorded size: 0x%x, element size: 0x%x, address: %p\n", zone
, gzh
->gzsize
, (uint32_t) zone
->elem_size
, (void *)addr
);
493 char *gzc
, *checkstart
, *checkend
;
494 if (gzalloc_uf_mode
) {
495 checkstart
= (char *) ((uintptr_t) gzh
+ sizeof(gzh
));
496 checkend
= (char *) ((((vm_offset_t
)addr
) & ~PAGE_MASK
) + PAGE_SIZE
);
498 checkstart
= (char *) trunc_page_64(addr
);
499 checkend
= (char *)gzh
;
502 for (gzc
= checkstart
; gzc
< checkend
; gzc
++) {
503 if (*gzc
!= gzalloc_fill_pattern
) {
504 panic("GZALLOC: detected over/underflow, byte at %p, element %p, contents 0x%x from 0x%lx byte sized zone (%s) doesn't match fill pattern (%c)", gzc
, addr
, *gzc
, zone
->elem_size
, zone
->zone_name
, gzalloc_fill_pattern
);
509 if (!kmem_ready
|| gzh
->gzone
== GZDEADZONE
) {
510 /* For now, just leak frees of early allocations
511 * performed before kmem is fully configured.
512 * They don't seem to get freed currently;
513 * consider ml_static_mfree in the future.
515 OSAddAtomic64((SInt32
) (rounded_size
), &gzalloc_early_free
);
519 if (get_preemption_level() != 0) {
524 /* Either write protect or unmap the newly freed
530 saddr
+ rounded_size
+ (1 * PAGE_SIZE
),
533 if (kr
!= KERN_SUCCESS
) {
534 panic("%s: vm_map_protect: %p, 0x%x", __FUNCTION__
, (void *)saddr
, kr
);
541 assert(zone
->zone_valid
);
543 /* Insert newly freed element into the protected free element
544 * cache, and rotate out the LRU element.
547 if (zone
->gz
.gzfc_index
>= gzfc_size
) {
548 zone
->gz
.gzfc_index
= 0;
550 free_addr
= zone
->gz
.gzfc
[zone
->gz
.gzfc_index
];
551 zone
->gz
.gzfc
[zone
->gz
.gzfc_index
++] = saddr
;
556 zone
->cur_size
-= rounded_size
;
562 // TODO: consider using physical reads to check for
563 // corruption while on the protected freelist
564 // (i.e. physical corruption)
568 free_addr
+ rounded_size
+ (1 * PAGE_SIZE
),
569 VM_MAP_REMOVE_KUNWIRE
);
570 if (kr
!= KERN_SUCCESS
) {
571 panic("gzfree: vm_map_remove: %p, 0x%x", (void *)free_addr
, kr
);
573 // TODO: sysctl-ize for quick reference
574 OSAddAtomic64((SInt32
)rounded_size
, &gzalloc_freed
);
575 OSAddAtomic64(-((SInt32
) (rounded_size
- zone
->elem_size
)), &gzalloc_wasted
);
584 gzalloc_element_size(void *gzaddr
, zone_t
*z
, vm_size_t
*gzsz
)
586 uintptr_t a
= (uintptr_t)gzaddr
;
587 if (__improbable(gzalloc_mode
&& (a
>= gzalloc_map_min
) && (a
< gzalloc_map_max
))) {
590 vm_map_entry_t gzvme
= NULL
;
591 vm_map_lock_read(gzalloc_map
);
592 vmef
= vm_map_lookup_entry(gzalloc_map
, (vm_map_offset_t
)a
, &gzvme
);
593 vm_map_unlock(gzalloc_map
);
595 panic("GZALLOC: unable to locate map entry for %p\n", (void *)a
);
597 assertf(gzvme
->vme_atomic
!= 0, "GZALLOC: VM map entry inconsistency, vme: %p, start: %llu end: %llu", gzvme
, gzvme
->vme_start
, gzvme
->vme_end
);
599 /* Locate the gzalloc metadata adjoining the element */
600 if (gzalloc_uf_mode
== TRUE
) {
601 /* In underflow detection mode, locate the map entry describing
602 * the element, and then locate the copy of the gzalloc
603 * header at the trailing edge of the range.
605 gzh
= (gzhdr_t
*)(gzvme
->vme_end
- GZHEADER_SIZE
);
607 /* In overflow detection mode, scan forward from
608 * the base of the map entry to locate the
611 uint32_t *p
= (uint32_t*) gzvme
->vme_start
;
612 while (p
< (uint32_t *) gzvme
->vme_end
) {
613 if (*p
== GZALLOC_SIGNATURE
) {
619 if (p
>= (uint32_t *) gzvme
->vme_end
) {
620 panic("GZALLOC signature missing addr %p, zone %p", gzaddr
, z
);
623 uintptr_t q
= (uintptr_t) p
;
624 gzh
= (gzhdr_t
*) (q
- sizeof(gzhdr_t
));
627 if (gzh
->gzsig
!= GZALLOC_SIGNATURE
) {
628 panic("GZALLOC signature mismatch for element %p, expected 0x%x, found 0x%x", (void *)a
, GZALLOC_SIGNATURE
, gzh
->gzsig
);
631 *gzsz
= gzh
->gzone
->elem_size
;
632 if (__improbable((gzalloc_tracked(gzh
->gzone
)) == FALSE
)) {
633 panic("GZALLOC: zone mismatch (%p)\n", gzh
->gzone
);