5 * Copyright 2008 Apple Inc. All rights reserved.
10 #include <mach/mach.h>
13 #include <sys/param.h>
14 #include <mach-o/ldsyms.h>
16 extern int g_is_under_rosetta
;
18 int machvm_tests( void * the_argp
)
20 int pagesize
= getpagesize();
21 int regionsizes
[] = { 1, 3, 7, 13, 77, 1223 }; /* sizes must be in increasing order */
22 char *regionbuffers
[] = { NULL
, NULL
, NULL
, NULL
, NULL
, NULL
};
26 /* Use vm_allocate to grab some memory */
27 for (i
=0; i
< sizeof(regionsizes
)/sizeof(regionsizes
[0]); i
++) {
28 vm_address_t addr
= 0;
30 kret
= vm_allocate(mach_task_self(), &addr
, regionsizes
[i
]*pagesize
, VM_FLAGS_ANYWHERE
);
31 if (kret
!= KERN_SUCCESS
) {
32 warnx("vm_allocate of %d pages failed: %d", regionsizes
[i
], kret
);
35 regionbuffers
[i
] = (char *)addr
;
38 /* deallocate one range without having touched it, scribble on another, then deallocate that one */
39 kret
= vm_deallocate(mach_task_self(), (vm_address_t
)regionbuffers
[4], regionsizes
[4]*pagesize
);
40 if (kret
!= KERN_SUCCESS
) {
41 warnx("vm_deallocate of %d pages failed: %d", regionsizes
[4], kret
);
44 regionbuffers
[4] = NULL
;
46 memset(regionbuffers
[3], 0x4f, pagesize
*MIN(3, regionsizes
[3]));
48 kret
= vm_deallocate(mach_task_self(), (vm_address_t
)regionbuffers
[3], regionsizes
[3]*pagesize
);
49 if (kret
!= KERN_SUCCESS
) {
50 warnx("vm_deallocate of %d pages failed: %d", regionsizes
[3], kret
);
53 regionbuffers
[3] = NULL
;
55 // populate the largest buffer with a byte pattern that matches the page offset, then fix it to readonly
56 for (i
=0; i
< regionsizes
[5]; i
++) {
57 memset(regionbuffers
[5] + i
*pagesize
, (unsigned char)i
, pagesize
);
59 kret
= vm_protect(mach_task_self(), (vm_offset_t
)regionbuffers
[5], regionsizes
[5]*pagesize
, FALSE
, VM_PROT_READ
);
60 if (kret
!= KERN_SUCCESS
) {
61 warnx("vm_protect of %d pages failed: %d", regionsizes
[5], kret
);
65 // read the last few pagse of the largest buffer and verify its contents
68 mach_msg_type_number_t newcount
;
70 kret
= vm_read(mach_task_self(), (vm_address_t
)regionbuffers
[5] + (regionsizes
[5]-5)*pagesize
, 5*pagesize
,
72 if (kret
!= KERN_SUCCESS
) {
73 warnx("vm_read of %d pages failed: %d", 5, kret
);
77 if (0 != memcmp((char *)newdata
, regionbuffers
[5] + (regionsizes
[5]-5)*pagesize
,
79 warnx("vm_read comparison of %d pages failed", 5);
81 vm_deallocate(mach_task_self(), newdata
, 5*pagesize
);
85 kret
= vm_deallocate(mach_task_self(), newdata
, 5*pagesize
);
86 if (kret
!= KERN_SUCCESS
) {
87 warnx("vm_deallocate of %d pages failed: %d", 5, kret
);
92 // do a list read to repopulate slots 3 and 4
94 vm_read_entry_t readlist
;
96 readlist
[0].address
= (vm_offset_t
)regionbuffers
[5] + 10*pagesize
;
97 readlist
[0].size
= regionsizes
[3]*pagesize
;
98 readlist
[1].address
= (vm_offset_t
)regionbuffers
[5] + 10*pagesize
+ regionsizes
[3]*pagesize
;
99 readlist
[1].size
= regionsizes
[4]*pagesize
;
101 kret
= vm_read_list(mach_task_self(), readlist
, 2);
102 if (kret
!= KERN_SUCCESS
) {
103 warnx("vm_read_list failed: %d", kret
);
107 if (0 != memcmp((char *)readlist
[0].address
, regionbuffers
[5] + 10*pagesize
,
108 regionsizes
[3]*pagesize
)) {
109 warnx("vm_read_list comparison of allocation 0 failed");
111 vm_deallocate(mach_task_self(), readlist
[0].address
, readlist
[0].size
);
112 vm_deallocate(mach_task_self(), readlist
[1].address
, readlist
[1].size
);
116 if (0 != memcmp((char *)readlist
[1].address
, regionbuffers
[5] + 10*pagesize
+ regionsizes
[3]*pagesize
,
117 regionsizes
[4]*pagesize
)) {
118 warnx("vm_read_list comparison of allocation 1 failed");
120 vm_deallocate(mach_task_self(), readlist
[0].address
, readlist
[0].size
);
121 vm_deallocate(mach_task_self(), readlist
[1].address
, readlist
[1].size
);
125 regionbuffers
[3] = (char *)readlist
[0].address
;
126 regionbuffers
[4] = (char *)readlist
[1].address
;
129 // do a read_overwrite and copy, which should be about the same
133 kret
= vm_read_overwrite(mach_task_self(), (vm_offset_t
)regionbuffers
[3],
134 regionsizes
[0]*pagesize
,
135 (vm_offset_t
)regionbuffers
[0],
137 if (kret
!= KERN_SUCCESS
) {
138 warnx("vm_read_overwrite of %d pages failed: %d", regionsizes
[0], kret
);
142 kret
= vm_copy(mach_task_self(), (vm_offset_t
)regionbuffers
[0],
143 regionsizes
[0]*pagesize
,
144 (vm_offset_t
)regionbuffers
[1]);
145 if (kret
!= KERN_SUCCESS
) {
146 warnx("vm_copy of %d pages failed: %d", regionsizes
[0], kret
);
150 if (0 != memcmp(regionbuffers
[1], regionbuffers
[3],
151 regionsizes
[0]*pagesize
)) {
152 warnx("vm_read_overwrite/vm_copy comparison failed");
158 // do a vm_copy of our mach-o header and compare. Rosetta doesn't support this, though
159 if (!g_is_under_rosetta
) {
161 kret
= vm_write(mach_task_self(), (vm_address_t
)regionbuffers
[2],
162 (vm_offset_t
)&_mh_execute_header
, pagesize
);
163 if (kret
!= KERN_SUCCESS
) {
164 warnx("vm_write of %d pages failed: %d", 1, kret
);
168 if (_mh_execute_header
.magic
!= *(uint32_t *)regionbuffers
[2]) {
169 warnx("vm_write comparison failed");
175 // check that the vm_protects above worked
177 vm_address_t addr
= (vm_address_t
)regionbuffers
[5]+7*pagesize
;
178 vm_size_t size
= pagesize
;
179 int _basic
[VM_REGION_BASIC_INFO_COUNT
];
180 vm_region_basic_info_t basic
= (vm_region_basic_info_t
)_basic
;
181 int _basic64
[VM_REGION_BASIC_INFO_COUNT_64
];
182 vm_region_basic_info_64_t basic64
= (vm_region_basic_info_64_t
)_basic64
;
183 mach_msg_type_number_t infocnt
;
187 infocnt
= VM_REGION_BASIC_INFO_COUNT
;
188 kret
= vm_region(mach_task_self(), &addr
, &size
, VM_REGION_BASIC_INFO
,
189 (vm_region_info_t
)basic
, &infocnt
, &objname
);
190 if (kret
!= KERN_SUCCESS
) {
191 warnx("vm_region(VM_REGION_BASIC_INFO) failed: %d", kret
);
194 if (VM_REGION_BASIC_INFO_COUNT
!= infocnt
) {
195 warnx("vm_region(VM_REGION_BASIC_INFO) returned a bad info count");
200 // when we did the vm_read_list above, it should have split this region into
201 // a 10 page sub-region
202 if (addr
!= (vm_address_t
)regionbuffers
[5] || size
!= 10*pagesize
) {
203 warnx("vm_region(VM_REGION_BASIC_INFO) returned a bad region range");
208 if (basic
->protection
!= VM_PROT_READ
) {
209 warnx("vm_region(VM_REGION_BASIC_INFO) returned a bad protection");
215 infocnt
= VM_REGION_BASIC_INFO_COUNT_64
;
216 // intentionally use VM_REGION_BASIC_INFO and get up-converted
217 kret
= vm_region_64(mach_task_self(), &addr
, &size
, VM_REGION_BASIC_INFO
,
218 (vm_region_info_t
)basic64
, &infocnt
, &objname
);
219 if (kret
!= KERN_SUCCESS
) {
220 warnx("vm_region_64(VM_REGION_BASIC_INFO) failed: %d", kret
);
223 if (VM_REGION_BASIC_INFO_COUNT_64
!= infocnt
) {
224 warnx("vm_region_64(VM_REGION_BASIC_INFO) returned a bad info count");
229 // when we did the vm_read_list above, it should have split this region into
230 // a 10 page sub-region
231 if (addr
!= (vm_address_t
)regionbuffers
[5] || size
!= 10*pagesize
) {
232 warnx("vm_region_64(VM_REGION_BASIC_INFO) returned a bad region range");
237 if (basic64
->protection
!= VM_PROT_READ
) {
238 warnx("vm_region_64(VM_REGION_BASIC_INFO) returned a bad protection");
244 // try to compare some stuff. Particularly important for fields after offset
245 if (!g_is_under_rosetta
) {
246 if (basic
->offset
!= basic64
->offset
||
247 basic
->behavior
!= basic64
->behavior
||
248 basic
->user_wired_count
!= basic64
->user_wired_count
) {
249 warnx("vm_region and vm_region_64 did not agree");
258 for (i
=0; i
< sizeof(regionsizes
)/sizeof(regionsizes
[0]); i
++) {
259 if (regionbuffers
[i
]) {
260 vm_deallocate(mach_task_self(), (vm_address_t
)regionbuffers
[i
], regionsizes
[i
]*pagesize
);