2 * Copyright (c) 2011 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
30 * Make sure we don't accidentally include the external definitions of
31 * the routines we're interposing on below.
34 #define _mach_vm_user_
35 #include <mach/mach.h>
36 #include <mach/mach_traps.h>
38 #include <mach/vm_map_internal.h>
40 #include <mach/mach_vm_internal.h>
42 #include "stack_logging_internal.h"
44 malloc_logger_t
*__syscall_logger
= NULL
; // This may get set by Libc's malloc stack logging initialization code.
48 mach_port_name_t target
,
49 mach_vm_address_t
*address
,
55 rv
= _kernelrpc_mach_vm_allocate_trap(target
, address
, size
, flags
);
57 if (rv
== MACH_SEND_INVALID_DEST
) {
58 rv
= _kernelrpc_mach_vm_allocate(target
, address
, size
, flags
);
61 int userTagFlags
= flags
& VM_FLAGS_ALIAS_MASK
;
62 if (__syscall_logger
&& rv
== KERN_SUCCESS
&& (userTagFlags
!= VM_MAKE_TAG(VM_MEMORY_STACK
))) {
63 __syscall_logger(stack_logging_type_vm_allocate
| userTagFlags
, (uintptr_t)target
, (uintptr_t)size
, 0, (uintptr_t)*address
, 0);
71 mach_port_name_t target
,
72 mach_vm_address_t address
,
77 if (__syscall_logger
) {
78 __syscall_logger(stack_logging_type_vm_deallocate
, (uintptr_t)target
, (uintptr_t)address
, (uintptr_t)size
, 0, 0);
81 rv
= _kernelrpc_mach_vm_deallocate_trap(target
, address
, size
);
83 if (rv
== MACH_SEND_INVALID_DEST
) {
84 rv
= _kernelrpc_mach_vm_deallocate(target
, address
, size
);
92 mach_port_name_t task
,
93 mach_vm_address_t address
,
95 boolean_t set_maximum
,
96 vm_prot_t new_protection
)
100 rv
= _kernelrpc_mach_vm_protect_trap(task
, address
, size
, set_maximum
,
103 if (rv
== MACH_SEND_INVALID_DEST
) {
104 rv
= _kernelrpc_mach_vm_protect(task
, address
, size
,
105 set_maximum
, new_protection
);
113 mach_port_name_t task
,
114 vm_address_t
*address
,
119 mach_vm_address_t mach_addr
;
121 mach_addr
= (mach_vm_address_t
)*address
;
122 rv
= mach_vm_allocate(task
, &mach_addr
, size
, flags
);
123 #if defined(__LP64__)
124 *address
= mach_addr
;
126 *address
= (vm_address_t
)(mach_addr
& ((vm_address_t
)-1));
134 mach_port_name_t task
,
135 vm_address_t address
,
140 rv
= mach_vm_deallocate(task
, address
, size
);
147 mach_port_name_t task
,
148 vm_address_t address
,
150 boolean_t set_maximum
,
151 vm_prot_t new_protection
)
155 rv
= mach_vm_protect(task
, address
, size
, set_maximum
, new_protection
);
162 mach_port_name_t target
,
163 mach_vm_address_t
*address
,
165 mach_vm_offset_t mask
,
167 mem_entry_name_port_t object
,
168 memory_object_offset_t offset
,
170 vm_prot_t cur_protection
,
171 vm_prot_t max_protection
,
172 vm_inherit_t inheritance
)
174 kern_return_t rv
= MACH_SEND_INVALID_DEST
;
176 if (object
== MEMORY_OBJECT_NULL
&& max_protection
== VM_PROT_ALL
&&
177 inheritance
== VM_INHERIT_DEFAULT
) {
178 rv
= _kernelrpc_mach_vm_map_trap(target
, address
, size
, mask
, flags
,
182 if (rv
== MACH_SEND_INVALID_DEST
) {
183 rv
= _kernelrpc_mach_vm_map(target
, address
, size
, mask
, flags
, object
,
184 offset
, copy
, cur_protection
, max_protection
, inheritance
);
187 int userTagFlags
= flags
& VM_FLAGS_ALIAS_MASK
;
188 if (__syscall_logger
&& rv
== KERN_SUCCESS
&& (userTagFlags
!= VM_MAKE_TAG(VM_MEMORY_STACK
))) {
189 int eventTypeFlags
= stack_logging_type_vm_allocate
| stack_logging_type_mapped_file_or_shared_mem
;
190 __syscall_logger(eventTypeFlags
| userTagFlags
, (uintptr_t)target
, (uintptr_t)size
, 0, (uintptr_t)*address
, 0);
198 mach_port_name_t target
,
199 mach_vm_address_t
*address
,
201 mach_vm_offset_t mask
,
203 mach_port_name_t src_task
,
204 mach_vm_address_t src_address
,
206 vm_prot_t
*cur_protection
,
207 vm_prot_t
*max_protection
,
208 vm_inherit_t inheritance
)
212 rv
= _kernelrpc_mach_vm_remap(target
, address
, size
, mask
, flags
,
213 src_task
, src_address
, copy
, cur_protection
, max_protection
,
216 if (__syscall_logger
&& rv
== KERN_SUCCESS
) {
217 int eventTypeFlags
= stack_logging_type_vm_allocate
| stack_logging_type_mapped_file_or_shared_mem
;
218 int userTagFlags
= flags
& VM_FLAGS_ALIAS_MASK
;
219 __syscall_logger(eventTypeFlags
| userTagFlags
, (uintptr_t)target
, (uintptr_t)size
, 0, (uintptr_t)*address
, 0);
227 mach_port_name_t target
,
228 mach_vm_address_t
*address
,
230 mach_vm_offset_t mask
,
232 mach_port_name_t src_task
,
233 mach_vm_address_t src_address
,
235 vm_prot_t
*cur_protection
,
236 vm_prot_t
*max_protection
,
237 vm_inherit_t inheritance
)
241 /* {max,cur}_protection is inout */
242 rv
= _kernelrpc_mach_vm_remap_new(target
, address
, size
, mask
, flags
,
243 src_task
, src_address
, copy
, cur_protection
, max_protection
,
246 if (__syscall_logger
&& rv
== KERN_SUCCESS
) {
247 int eventTypeFlags
= stack_logging_type_vm_allocate
| stack_logging_type_mapped_file_or_shared_mem
;
248 int userTagFlags
= flags
& VM_FLAGS_ALIAS_MASK
;
249 __syscall_logger(eventTypeFlags
| userTagFlags
, (uintptr_t)target
, (uintptr_t)size
, 0, (uintptr_t)*address
, 0);
257 mach_port_name_t target
,
258 mach_vm_address_t address
,
261 mach_msg_type_number_t
*dataCnt
)
265 rv
= _kernelrpc_mach_vm_read(target
, address
, size
, data
, dataCnt
);
267 if (__syscall_logger
&& rv
== KERN_SUCCESS
) {
268 int eventTypeFlags
= stack_logging_type_vm_allocate
| stack_logging_type_mapped_file_or_shared_mem
;
269 // The target argument is the remote task from which data is being read,
270 // so pass mach_task_self() as the destination task receiving the allocation.
271 __syscall_logger(eventTypeFlags
, (uintptr_t)mach_task_self(), (uintptr_t)*dataCnt
, 0, *data
, 0);
279 mach_port_name_t target
,
280 vm_address_t
*address
,
284 mem_entry_name_port_t object
,
287 vm_prot_t cur_protection
,
288 vm_prot_t max_protection
,
289 vm_inherit_t inheritance
)
293 rv
= _kernelrpc_vm_map(target
, address
, size
, mask
, flags
, object
,
294 offset
, copy
, cur_protection
, max_protection
, inheritance
);
296 if (__syscall_logger
&& rv
== KERN_SUCCESS
) {
297 int eventTypeFlags
= stack_logging_type_vm_allocate
| stack_logging_type_mapped_file_or_shared_mem
;
298 int userTagFlags
= flags
& VM_FLAGS_ALIAS_MASK
;
299 __syscall_logger(eventTypeFlags
| userTagFlags
, (uintptr_t)target
, (uintptr_t)size
, 0, (uintptr_t)*address
, 0);
307 mach_port_name_t target
,
308 vm_address_t
*address
,
312 mach_port_name_t src_task
,
313 vm_address_t src_address
,
315 vm_prot_t
*cur_protection
,
316 vm_prot_t
*max_protection
,
317 vm_inherit_t inheritance
)
321 rv
= _kernelrpc_vm_remap(target
, address
, size
, mask
, flags
,
322 src_task
, src_address
, copy
, cur_protection
, max_protection
,
325 if (__syscall_logger
) {
326 int eventTypeFlags
= stack_logging_type_vm_allocate
| stack_logging_type_mapped_file_or_shared_mem
;
327 int userTagFlags
= flags
& VM_FLAGS_ALIAS_MASK
;
328 __syscall_logger(eventTypeFlags
| userTagFlags
, (uintptr_t)target
, (uintptr_t)size
, 0, (uintptr_t)*address
, 0);
336 mach_port_name_t target
,
337 vm_address_t
*address
,
341 mach_port_name_t src_task
,
342 vm_address_t src_address
,
344 vm_prot_t
*cur_protection
,
345 vm_prot_t
*max_protection
,
346 vm_inherit_t inheritance
)
350 /* {max,cur}_protection is inout */
351 rv
= _kernelrpc_vm_remap_new(target
, address
, size
, mask
, flags
,
352 src_task
, src_address
, copy
, cur_protection
, max_protection
,
355 if (__syscall_logger
) {
356 int eventTypeFlags
= stack_logging_type_vm_allocate
| stack_logging_type_mapped_file_or_shared_mem
;
357 int userTagFlags
= flags
& VM_FLAGS_ALIAS_MASK
;
358 __syscall_logger(eventTypeFlags
| userTagFlags
, (uintptr_t)target
, (uintptr_t)size
, 0, (uintptr_t)*address
, 0);
366 mach_port_name_t target
,
367 vm_address_t address
,
370 mach_msg_type_number_t
*dataCnt
)
374 rv
= _kernelrpc_vm_read(target
, address
, size
, data
, dataCnt
);
376 if (__syscall_logger
&& rv
== KERN_SUCCESS
) {
377 int eventTypeFlags
= stack_logging_type_vm_allocate
| stack_logging_type_mapped_file_or_shared_mem
;
378 // The target argument is the remote task from which data is being read,
379 // so pass mach_task_self() as the destination task receiving the allocation.
380 __syscall_logger(eventTypeFlags
, (uintptr_t)mach_task_self(), (uintptr_t)*dataCnt
, 0, *data
, 0);
387 mach_vm_purgable_control(
388 mach_port_name_t target
,
389 mach_vm_offset_t address
,
390 vm_purgable_t control
,
395 rv
= _kernelrpc_mach_vm_purgable_control_trap(target
, address
, control
, state
);
397 if (rv
== MACH_SEND_INVALID_DEST
) {
398 rv
= _kernelrpc_mach_vm_purgable_control(target
, address
, control
, state
);
406 mach_port_name_t task
,
408 vm_purgable_t control
,
411 return mach_vm_purgable_control(task
,
412 (mach_vm_offset_t
) address
,