5 * Copyright 2008 Apple Inc. All rights reserved.
10 #include <mach/mach.h>
13 #include <sys/param.h>
14 #include <mach-o/ldsyms.h>
16 int machvm_tests( void * the_argp
)
18 int pagesize
= getpagesize();
19 int regionsizes
[] = { 1, 3, 7, 13, 77, 1223 }; /* sizes must be in increasing order */
20 char *regionbuffers
[] = { NULL
, NULL
, NULL
, NULL
, NULL
, NULL
};
24 /* Use vm_allocate to grab some memory */
25 for (i
=0; i
< sizeof(regionsizes
)/sizeof(regionsizes
[0]); i
++) {
26 vm_address_t addr
= 0;
28 kret
= vm_allocate(mach_task_self(), &addr
, regionsizes
[i
]*pagesize
, VM_FLAGS_ANYWHERE
);
29 if (kret
!= KERN_SUCCESS
) {
30 warnx("vm_allocate of %d pages failed: %d", regionsizes
[i
], kret
);
33 regionbuffers
[i
] = (char *)addr
;
36 /* deallocate one range without having touched it, scribble on another, then deallocate that one */
37 kret
= vm_deallocate(mach_task_self(), (vm_address_t
)regionbuffers
[4], regionsizes
[4]*pagesize
);
38 if (kret
!= KERN_SUCCESS
) {
39 warnx("vm_deallocate of %d pages failed: %d", regionsizes
[4], kret
);
42 regionbuffers
[4] = NULL
;
44 memset(regionbuffers
[3], 0x4f, pagesize
*MIN(3, regionsizes
[3]));
46 kret
= vm_deallocate(mach_task_self(), (vm_address_t
)regionbuffers
[3], regionsizes
[3]*pagesize
);
47 if (kret
!= KERN_SUCCESS
) {
48 warnx("vm_deallocate of %d pages failed: %d", regionsizes
[3], kret
);
51 regionbuffers
[3] = NULL
;
53 // populate the largest buffer with a byte pattern that matches the page offset, then fix it to readonly
54 for (i
=0; i
< regionsizes
[5]; i
++) {
55 memset(regionbuffers
[5] + i
*pagesize
, (unsigned char)i
, pagesize
);
57 kret
= vm_protect(mach_task_self(), (vm_offset_t
)regionbuffers
[5], regionsizes
[5]*pagesize
, FALSE
, VM_PROT_READ
);
58 if (kret
!= KERN_SUCCESS
) {
59 warnx("vm_protect of %d pages failed: %d", regionsizes
[5], kret
);
63 // read the last few pagse of the largest buffer and verify its contents
66 mach_msg_type_number_t newcount
;
68 kret
= vm_read(mach_task_self(), (vm_address_t
)regionbuffers
[5] + (regionsizes
[5]-5)*pagesize
, 5*pagesize
,
70 if (kret
!= KERN_SUCCESS
) {
71 warnx("vm_read of %d pages failed: %d", 5, kret
);
75 if (0 != memcmp((char *)newdata
, regionbuffers
[5] + (regionsizes
[5]-5)*pagesize
,
77 warnx("vm_read comparison of %d pages failed", 5);
79 vm_deallocate(mach_task_self(), newdata
, 5*pagesize
);
83 kret
= vm_deallocate(mach_task_self(), newdata
, 5*pagesize
);
84 if (kret
!= KERN_SUCCESS
) {
85 warnx("vm_deallocate of %d pages failed: %d", 5, kret
);
90 // do a list read to repopulate slots 3 and 4
92 vm_read_entry_t readlist
;
94 readlist
[0].address
= (vm_offset_t
)regionbuffers
[5] + 10*pagesize
;
95 readlist
[0].size
= regionsizes
[3]*pagesize
;
96 readlist
[1].address
= (vm_offset_t
)regionbuffers
[5] + 10*pagesize
+ regionsizes
[3]*pagesize
;
97 readlist
[1].size
= regionsizes
[4]*pagesize
;
99 kret
= vm_read_list(mach_task_self(), readlist
, 2);
100 if (kret
!= KERN_SUCCESS
) {
101 warnx("vm_read_list failed: %d", kret
);
105 if (0 != memcmp((char *)readlist
[0].address
, regionbuffers
[5] + 10*pagesize
,
106 regionsizes
[3]*pagesize
)) {
107 warnx("vm_read_list comparison of allocation 0 failed");
109 vm_deallocate(mach_task_self(), readlist
[0].address
, readlist
[0].size
);
110 vm_deallocate(mach_task_self(), readlist
[1].address
, readlist
[1].size
);
114 if (0 != memcmp((char *)readlist
[1].address
, regionbuffers
[5] + 10*pagesize
+ regionsizes
[3]*pagesize
,
115 regionsizes
[4]*pagesize
)) {
116 warnx("vm_read_list comparison of allocation 1 failed");
118 vm_deallocate(mach_task_self(), readlist
[0].address
, readlist
[0].size
);
119 vm_deallocate(mach_task_self(), readlist
[1].address
, readlist
[1].size
);
123 regionbuffers
[3] = (char *)readlist
[0].address
;
124 regionbuffers
[4] = (char *)readlist
[1].address
;
127 // do a read_overwrite and copy, which should be about the same
131 kret
= vm_read_overwrite(mach_task_self(), (vm_offset_t
)regionbuffers
[3],
132 regionsizes
[0]*pagesize
,
133 (vm_offset_t
)regionbuffers
[0],
135 if (kret
!= KERN_SUCCESS
) {
136 warnx("vm_read_overwrite of %d pages failed: %d", regionsizes
[0], kret
);
140 kret
= vm_copy(mach_task_self(), (vm_offset_t
)regionbuffers
[0],
141 regionsizes
[0]*pagesize
,
142 (vm_offset_t
)regionbuffers
[1]);
143 if (kret
!= KERN_SUCCESS
) {
144 warnx("vm_copy of %d pages failed: %d", regionsizes
[0], kret
);
148 if (0 != memcmp(regionbuffers
[1], regionbuffers
[3],
149 regionsizes
[0]*pagesize
)) {
150 warnx("vm_read_overwrite/vm_copy comparison failed");
156 // do a vm_copy of our mach-o header and compare.
158 kret
= vm_write(mach_task_self(), (vm_address_t
)regionbuffers
[2],
159 (vm_offset_t
)&_mh_execute_header
, pagesize
);
160 if (kret
!= KERN_SUCCESS
) {
161 warnx("vm_write of %d pages failed: %d", 1, kret
);
165 if (_mh_execute_header
.magic
!= *(uint32_t *)regionbuffers
[2]) {
166 warnx("vm_write comparison failed");
171 // check that the vm_protects above worked
173 vm_address_t addr
= (vm_address_t
)regionbuffers
[5]+7*pagesize
;
174 vm_size_t size
= pagesize
;
175 int _basic
[VM_REGION_BASIC_INFO_COUNT
];
176 vm_region_basic_info_t basic
= (vm_region_basic_info_t
)_basic
;
177 int _basic64
[VM_REGION_BASIC_INFO_COUNT_64
];
178 vm_region_basic_info_64_t basic64
= (vm_region_basic_info_64_t
)_basic64
;
179 int _submap
[VM_REGION_SUBMAP_INFO_COUNT
];
180 vm_region_submap_info_t submap
= (vm_region_submap_info_t
)_submap
;
181 mach_msg_type_number_t infocnt
;
183 natural_t nesting_depth
= 0;
186 infocnt
= VM_REGION_BASIC_INFO_COUNT
;
187 kret
= vm_region(mach_task_self(), &addr
, &size
, VM_REGION_BASIC_INFO
,
188 (vm_region_info_t
)basic
, &infocnt
, &objname
);
189 if (kret
!= KERN_SUCCESS
) {
190 warnx("vm_region(VM_REGION_BASIC_INFO) failed: %d", kret
);
193 if (VM_REGION_BASIC_INFO_COUNT
!= infocnt
) {
194 warnx("vm_region(VM_REGION_BASIC_INFO) returned a bad info count");
199 // when we did the vm_read_list above, it should have split this region into
200 // a 10 page sub-region
201 if (addr
!= (vm_address_t
)regionbuffers
[5] || size
!= 10*pagesize
) {
202 warnx("vm_region(VM_REGION_BASIC_INFO) returned a bad region range");
207 if (basic
->protection
!= VM_PROT_READ
) {
208 warnx("vm_region(VM_REGION_BASIC_INFO) returned a bad protection");
214 infocnt
= VM_REGION_BASIC_INFO_COUNT_64
;
215 // intentionally use VM_REGION_BASIC_INFO and get up-converted
216 kret
= vm_region_64(mach_task_self(), &addr
, &size
, VM_REGION_BASIC_INFO
,
217 (vm_region_info_t
)basic64
, &infocnt
, &objname
);
218 if (kret
!= KERN_SUCCESS
) {
219 warnx("vm_region_64(VM_REGION_BASIC_INFO) failed: %d", kret
);
222 if (VM_REGION_BASIC_INFO_COUNT_64
!= infocnt
) {
223 warnx("vm_region_64(VM_REGION_BASIC_INFO) returned a bad info count");
228 // when we did the vm_read_list above, it should have split this region into
229 // a 10 page sub-region
230 if (addr
!= (vm_address_t
)regionbuffers
[5] || size
!= 10*pagesize
) {
231 warnx("vm_region_64(VM_REGION_BASIC_INFO) returned a bad region range");
236 if (basic64
->protection
!= VM_PROT_READ
) {
237 warnx("vm_region_64(VM_REGION_BASIC_INFO) returned a bad protection");
243 // try to compare some stuff. Particularly important for fields after offset
244 if (basic
->offset
!= basic64
->offset
||
245 basic
->behavior
!= basic64
->behavior
||
246 basic
->user_wired_count
!= basic64
->user_wired_count
) {
247 warnx("vm_region and vm_region_64 did not agree");
254 infocnt
= VM_REGION_SUBMAP_INFO_COUNT
;
255 kret
= vm_region_recurse(mach_task_self(), &addr
, &size
,
256 &nesting_depth
, (vm_region_info_t
)submap
,
258 if (kret
!= KERN_SUCCESS
) {
259 warnx("vm_region_recurse() failed: %d", kret
);
263 if (VM_REGION_SUBMAP_INFO_COUNT
!= infocnt
) {
264 warnx("vm_region_recurse() returned a bad info count");
269 if (submap
->pages_dirtied
!= 10) {
270 warnx("vm_region_recurse() returned bage pages_dirtied");
275 #endif /* !__LP64__ */
279 // exercise mach_make_memory_entry/vm_map
281 vm_address_t addr1
, addr2
;
283 mach_port_t mem_handle
= MACH_PORT_NULL
;
287 kret
= vm_allocate(mach_task_self(), &addr1
, size
, VM_FLAGS_ANYWHERE
);
288 if (kret
!= KERN_SUCCESS
) {
289 warnx("vm_allocate failed: %d", kret
);
294 *(uint32_t *)(uintptr_t)addr1
= 'test';
296 kret
= mach_make_memory_entry(mach_task_self(),
297 &size
, addr1
, VM_PROT_DEFAULT
,
298 &mem_handle
, MACH_PORT_NULL
);
299 if (kret
!= KERN_SUCCESS
) {
300 warnx("mach_make_memory_entry failed: %d", kret
);
305 kret
= vm_deallocate(mach_task_self(), addr1
, size
);
306 if (kret
!= KERN_SUCCESS
) {
307 warnx("vm_deallocate failed: %d", kret
);
313 kret
= vm_map(mach_task_self(), &addr2
, size
, 0, VM_FLAGS_ANYWHERE
,
314 mem_handle
, 0, FALSE
, VM_PROT_DEFAULT
, VM_PROT_DEFAULT
,
316 if (kret
!= KERN_SUCCESS
) {
317 warnx("vm_map failed: %d", kret
);
322 if (*(uint32_t *)(uintptr_t)addr2
!= 'test') {
323 warnx("mapped data mismatch");
328 kret
= vm_deallocate(mach_task_self(), addr2
, size
);
329 if (kret
!= KERN_SUCCESS
) {
330 warnx("vm_deallocate failed: %d", kret
);
335 kret
= mach_port_mod_refs(mach_task_self(), mem_handle
, MACH_PORT_RIGHT_SEND
, -1);
336 if (kret
!= KERN_SUCCESS
) {
337 warnx("mach_port_mod_refs(-1) failed: %d", kret
);
343 kret
= vm_map(mach_task_self(), &addr2
, size
, 0, VM_FLAGS_ANYWHERE
,
344 mem_handle
, 0, FALSE
, VM_PROT_DEFAULT
, VM_PROT_DEFAULT
,
346 if (kret
== KERN_SUCCESS
) {
347 warnx("vm_map succeeded when it should not have");
356 for (i
=0; i
< sizeof(regionsizes
)/sizeof(regionsizes
[0]); i
++) {
357 if (regionbuffers
[i
]) {
358 vm_deallocate(mach_task_self(), (vm_address_t
)regionbuffers
[i
], regionsizes
[i
]*pagesize
);