2 * Copyright (c) 2000-2017 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 * Mach Operating System
30 * Copyright (c) 1987 Carnegie-Mellon University
31 * All rights reserved. The CMU software License Agreement specifies
32 * the terms and conditions for use and redistribution.
35 * NOTICE: This file was modified by SPARTA, Inc. in 2006 to introduce
36 * support for mandatory and extensible security protections. This notice
37 * is included in support of clause 2.2 (b) of the Apple Public License,
40 #include <vm/vm_options.h>
42 #include <kern/task.h>
43 #include <kern/thread.h>
44 #include <kern/debug.h>
45 #include <kern/extmod_statistics.h>
46 #include <mach/mach_traps.h>
47 #include <mach/port.h>
49 #include <mach/task.h>
50 #include <mach/task_access.h>
51 #include <mach/task_special_ports.h>
52 #include <mach/time_value.h>
53 #include <mach/vm_map.h>
54 #include <mach/vm_param.h>
55 #include <mach/vm_prot.h>
57 #include <sys/file_internal.h>
58 #include <sys/param.h>
59 #include <sys/systm.h>
61 #include <sys/namei.h>
62 #include <sys/proc_internal.h>
63 #include <sys/kauth.h>
66 #include <sys/vnode_internal.h>
67 #include <sys/mount.h>
68 #include <sys/trace.h>
69 #include <sys/kernel.h>
70 #include <sys/ubc_internal.h>
72 #include <sys/syslog.h>
74 #include <sys/sysproto.h>
76 #include <sys/sysctl.h>
77 #include <sys/cprotect.h>
78 #include <sys/kpi_socket.h>
79 #include <sys/kas_info.h>
80 #include <sys/socket.h>
81 #include <sys/socketvar.h>
86 #include <security/audit/audit.h>
87 #include <security/mac.h>
88 #include <bsm/audit_kevents.h>
90 #include <kern/kalloc.h>
91 #include <vm/vm_map.h>
92 #include <vm/vm_kern.h>
93 #include <vm/vm_pageout.h>
95 #include <mach/shared_region.h>
96 #include <vm/vm_shared_region.h>
98 #include <vm/vm_protos.h>
100 #include <sys/kern_memorystatus.h>
103 #include <security/mac_framework.h>
106 int _shared_region_map_and_slide(struct proc
*, int, unsigned int, struct shared_file_mapping_np
*, uint32_t, user_addr_t
, user_addr_t
);
107 int shared_region_copyin_mappings(struct proc
*, user_addr_t
, unsigned int, struct shared_file_mapping_np
*);
109 #if VM_MAP_DEBUG_APPLE_PROTECT
110 SYSCTL_INT(_vm
, OID_AUTO
, map_debug_apple_protect
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_map_debug_apple_protect
, 0, "");
111 #endif /* VM_MAP_DEBUG_APPLE_PROTECT */
113 #if VM_MAP_DEBUG_FOURK
114 SYSCTL_INT(_vm
, OID_AUTO
, map_debug_fourk
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_map_debug_fourk
, 0, "");
115 #endif /* VM_MAP_DEBUG_FOURK */
117 #if DEVELOPMENT || DEBUG
120 sysctl_kmem_alloc_contig SYSCTL_HANDLER_ARGS
122 #pragma unused(arg1, arg2)
128 error
= sysctl_handle_int(oidp
, &size
, 0, req
);
129 if (error
|| !req
->newptr
)
132 kr
= kmem_alloc_contig(kernel_map
, &kaddr
, (vm_size_t
)size
, 0, 0, 0, 0, VM_KERN_MEMORY_IOKIT
);
134 if (kr
== KERN_SUCCESS
)
135 kmem_free(kernel_map
, kaddr
, size
);
140 SYSCTL_PROC(_vm
, OID_AUTO
, kmem_alloc_contig
, CTLTYPE_INT
|CTLFLAG_WR
|CTLFLAG_LOCKED
|CTLFLAG_MASKED
,
141 0, 0, &sysctl_kmem_alloc_contig
, "I", "");
143 extern int vm_region_footprint
;
144 SYSCTL_INT(_vm
, OID_AUTO
, region_footprint
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_region_footprint
, 0, "");
146 #endif /* DEVELOPMENT || DEBUG */
151 #if DEVELOPMENT || DEBUG
152 extern int panic_on_unsigned_execute
;
153 SYSCTL_INT(_vm
, OID_AUTO
, panic_on_unsigned_execute
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &panic_on_unsigned_execute
, 0, "");
154 #endif /* DEVELOPMENT || DEBUG */
156 extern int log_executable_mem_entry
;
157 extern int cs_executable_create_upl
;
158 extern int cs_executable_mem_entry
;
159 extern int cs_executable_wire
;
160 SYSCTL_INT(_vm
, OID_AUTO
, log_executable_mem_entry
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &log_executable_mem_entry
, 0, "");
161 SYSCTL_INT(_vm
, OID_AUTO
, cs_executable_create_upl
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &cs_executable_create_upl
, 0, "");
162 SYSCTL_INT(_vm
, OID_AUTO
, cs_executable_mem_entry
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &cs_executable_mem_entry
, 0, "");
163 SYSCTL_INT(_vm
, OID_AUTO
, cs_executable_wire
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &cs_executable_wire
, 0, "");
164 #endif /* CONFIG_EMBEDDED */
166 #if DEVELOPMENT || DEBUG
167 extern int radar_20146450
;
168 SYSCTL_INT(_vm
, OID_AUTO
, radar_20146450
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &radar_20146450
, 0, "");
170 extern int macho_printf
;
171 SYSCTL_INT(_vm
, OID_AUTO
, macho_printf
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &macho_printf
, 0, "");
173 extern int apple_protect_pager_data_request_debug
;
174 SYSCTL_INT(_vm
, OID_AUTO
, apple_protect_pager_data_request_debug
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &apple_protect_pager_data_request_debug
, 0, "");
176 #if __arm__ || __arm64__
177 /* These are meant to support the page table accounting unit test. */
178 extern unsigned int arm_hardware_page_size
;
179 extern unsigned int arm_pt_desc_size
;
180 extern unsigned int arm_pt_root_size
;
181 extern unsigned int free_page_size_tt_count
;
182 extern unsigned int free_two_page_size_tt_count
;
183 extern unsigned int free_tt_count
;
184 extern unsigned int inuse_user_tteroot_count
;
185 extern unsigned int inuse_kernel_tteroot_count
;
186 extern unsigned int inuse_user_ttepages_count
;
187 extern unsigned int inuse_kernel_ttepages_count
;
188 extern unsigned int inuse_user_ptepages_count
;
189 extern unsigned int inuse_kernel_ptepages_count
;
190 SYSCTL_UINT(_vm
, OID_AUTO
, native_hw_pagesize
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &arm_hardware_page_size
, 0, "");
191 SYSCTL_UINT(_vm
, OID_AUTO
, arm_pt_desc_size
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &arm_pt_desc_size
, 0, "");
192 SYSCTL_UINT(_vm
, OID_AUTO
, arm_pt_root_size
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &arm_pt_root_size
, 0, "");
193 SYSCTL_UINT(_vm
, OID_AUTO
, free_1page_tte_root
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &free_page_size_tt_count
, 0, "");
194 SYSCTL_UINT(_vm
, OID_AUTO
, free_2page_tte_root
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &free_two_page_size_tt_count
, 0, "");
195 SYSCTL_UINT(_vm
, OID_AUTO
, free_tte_root
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &free_tt_count
, 0, "");
196 SYSCTL_UINT(_vm
, OID_AUTO
, user_tte_root
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &inuse_user_tteroot_count
, 0, "");
197 SYSCTL_UINT(_vm
, OID_AUTO
, kernel_tte_root
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &inuse_kernel_tteroot_count
, 0, "");
198 SYSCTL_UINT(_vm
, OID_AUTO
, user_tte_pages
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &inuse_user_ttepages_count
, 0, "");
199 SYSCTL_UINT(_vm
, OID_AUTO
, kernel_tte_pages
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &inuse_kernel_ttepages_count
, 0, "");
200 SYSCTL_UINT(_vm
, OID_AUTO
, user_pte_pages
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &inuse_user_ptepages_count
, 0, "");
201 SYSCTL_UINT(_vm
, OID_AUTO
, kernel_pte_pages
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &inuse_kernel_ptepages_count
, 0, "");
202 #endif /* __arm__ || __arm64__ */
205 extern int fourk_pager_data_request_debug
;
206 SYSCTL_INT(_vm
, OID_AUTO
, fourk_pager_data_request_debug
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &fourk_pager_data_request_debug
, 0, "");
207 #endif /* __arm64__ */
208 #endif /* DEVELOPMENT || DEBUG */
210 SYSCTL_INT(_vm
, OID_AUTO
, vm_do_collapse_compressor
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_counters
.do_collapse_compressor
, 0, "");
211 SYSCTL_INT(_vm
, OID_AUTO
, vm_do_collapse_compressor_pages
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_counters
.do_collapse_compressor_pages
, 0, "");
212 SYSCTL_INT(_vm
, OID_AUTO
, vm_do_collapse_terminate
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_counters
.do_collapse_terminate
, 0, "");
213 SYSCTL_INT(_vm
, OID_AUTO
, vm_do_collapse_terminate_failure
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_counters
.do_collapse_terminate_failure
, 0, "");
214 SYSCTL_INT(_vm
, OID_AUTO
, vm_should_cow_but_wired
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_counters
.should_cow_but_wired
, 0, "");
215 SYSCTL_INT(_vm
, OID_AUTO
, vm_create_upl_extra_cow
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_counters
.create_upl_extra_cow
, 0, "");
216 SYSCTL_INT(_vm
, OID_AUTO
, vm_create_upl_extra_cow_pages
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_counters
.create_upl_extra_cow_pages
, 0, "");
217 SYSCTL_INT(_vm
, OID_AUTO
, vm_create_upl_lookup_failure_write
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_counters
.create_upl_lookup_failure_write
, 0, "");
218 SYSCTL_INT(_vm
, OID_AUTO
, vm_create_upl_lookup_failure_copy
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_counters
.create_upl_lookup_failure_copy
, 0, "");
219 #if VM_SCAN_FOR_SHADOW_CHAIN
220 static int vm_shadow_max_enabled
= 0; /* Disabled by default */
221 extern int proc_shadow_max(void);
223 vm_shadow_max SYSCTL_HANDLER_ARGS
225 #pragma unused(arg1, arg2, oidp)
228 if (vm_shadow_max_enabled
)
229 value
= proc_shadow_max();
231 return SYSCTL_OUT(req
, &value
, sizeof(value
));
233 SYSCTL_PROC(_vm
, OID_AUTO
, vm_shadow_max
, CTLTYPE_INT
|CTLFLAG_RD
|CTLFLAG_LOCKED
,
234 0, 0, &vm_shadow_max
, "I", "");
236 SYSCTL_INT(_vm
, OID_AUTO
, vm_shadow_max_enabled
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_shadow_max_enabled
, 0, "");
238 #endif /* VM_SCAN_FOR_SHADOW_CHAIN */
240 SYSCTL_INT(_vm
, OID_AUTO
, vm_debug_events
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_debug_events
, 0, "");
242 __attribute__((noinline
)) int __KERNEL_WAITING_ON_TASKGATED_CHECK_ACCESS_UPCALL__(
243 mach_port_t task_access_port
, int32_t calling_pid
, uint32_t calling_gid
, int32_t target_pid
);
245 * Sysctl's related to data/stack execution. See osfmk/vm/vm_map.c
248 #if DEVELOPMENT || DEBUG
249 extern int allow_stack_exec
, allow_data_exec
;
251 SYSCTL_INT(_vm
, OID_AUTO
, allow_stack_exec
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &allow_stack_exec
, 0, "");
252 SYSCTL_INT(_vm
, OID_AUTO
, allow_data_exec
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &allow_data_exec
, 0, "");
255 extern int fourk_binary_compatibility_unsafe
;
256 extern int fourk_binary_compatibility_allow_wx
;
257 SYSCTL_INT(_vm
, OID_AUTO
, fourk_binary_compatibility_unsafe
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &fourk_binary_compatibility_unsafe
, 0, "");
258 SYSCTL_INT(_vm
, OID_AUTO
, fourk_binary_compatibility_allow_wx
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &fourk_binary_compatibility_allow_wx
, 0, "");
259 #endif /* __arm64__ */
260 #endif /* DEVELOPMENT || DEBUG */
262 static const char *prot_values
[] = {
274 log_stack_execution_failure(addr64_t vaddr
, vm_prot_t prot
)
276 printf("Data/Stack execution not permitted: %s[pid %d] at virtual address 0x%qx, protections were %s\n",
277 current_proc()->p_comm
, current_proc()->p_pid
, vaddr
, prot_values
[prot
& VM_PROT_ALL
]);
281 * shared_region_unnest_logging: level of logging of unnesting events
283 * 1 - throttled logging of unexpected unnesting events (default)
284 * 2 - unthrottled logging of unexpected unnesting events
285 * 3+ - unthrottled logging of all unnesting events
287 int shared_region_unnest_logging
= 1;
289 SYSCTL_INT(_vm
, OID_AUTO
, shared_region_unnest_logging
, CTLFLAG_RW
| CTLFLAG_LOCKED
,
290 &shared_region_unnest_logging
, 0, "");
292 int vm_shared_region_unnest_log_interval
= 10;
293 int shared_region_unnest_log_count_threshold
= 5;
296 * Shared cache path enforcement.
299 #ifndef CONFIG_EMBEDDED
300 static int scdir_enforce
= 1;
301 static char scdir_path
[] = "/var/db/dyld/";
303 static int scdir_enforce
= 0;
304 static char scdir_path
[] = "/System/Library/Caches/com.apple.dyld/";
307 #ifndef SECURE_KERNEL
308 SYSCTL_INT(_vm
, OID_AUTO
, enforce_shared_cache_dir
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &scdir_enforce
, 0, "");
311 /* These log rate throttling state variables aren't thread safe, but
312 * are sufficient unto the task.
314 static int64_t last_unnest_log_time
= 0;
315 static int shared_region_unnest_log_count
= 0;
322 boolean_t is_nested_map
,
323 vm_map_offset_t lowest_unnestable_addr
)
327 if (shared_region_unnest_logging
== 0)
330 if (shared_region_unnest_logging
<= 2 &&
332 s
>= lowest_unnestable_addr
) {
334 * Unnesting of writable map entries is fine.
339 if (shared_region_unnest_logging
<= 1) {
341 if ((tv
.tv_sec
- last_unnest_log_time
) <
342 vm_shared_region_unnest_log_interval
) {
343 if (shared_region_unnest_log_count
++ >
344 shared_region_unnest_log_count_threshold
)
347 last_unnest_log_time
= tv
.tv_sec
;
348 shared_region_unnest_log_count
= 0;
352 DTRACE_VM4(log_unnest_badness
,
356 vm_map_offset_t
, lowest_unnestable_addr
);
357 printf("%s[%d] triggered unnest of range 0x%qx->0x%qx of DYLD shared region in VM map %p. While not abnormal for debuggers, this increases system memory footprint until the target exits.\n", current_proc()->p_comm
, current_proc()->p_pid
, (uint64_t)s
, (uint64_t)e
, (void *) VM_KERNEL_ADDRPERM(m
));
369 return (vm_map_check_protection(
371 vm_map_trunc_page(addr
,
372 vm_map_page_mask(map
)),
373 vm_map_round_page(addr
+len
,
374 vm_map_page_mask(map
)),
375 prot
== B_READ
? VM_PROT_READ
: VM_PROT_WRITE
));
387 kret
= vm_map_wire_kernel(map
,
388 vm_map_trunc_page(addr
,
389 vm_map_page_mask(map
)),
390 vm_map_round_page(addr
+len
,
391 vm_map_page_mask(map
)),
392 VM_PROT_READ
| VM_PROT_WRITE
, VM_KERN_MEMORY_BSD
,
398 case KERN_INVALID_ADDRESS
:
401 case KERN_PROTECTION_FAILURE
:
412 __unused
int dirtied
)
417 vm_map_offset_t vaddr
;
427 pmap
= get_task_pmap(current_task());
428 for (vaddr
= vm_map_trunc_page(addr
, PAGE_MASK
);
429 vaddr
< vm_map_round_page(addr
+len
, PAGE_MASK
);
430 vaddr
+= PAGE_SIZE
) {
431 paddr
= pmap_extract(pmap
, vaddr
);
432 pg
= PHYS_TO_VM_PAGE(paddr
);
433 vm_page_set_modified(pg
);
440 kret
= vm_map_unwire(map
,
441 vm_map_trunc_page(addr
,
442 vm_map_page_mask(map
)),
443 vm_map_round_page(addr
+len
,
444 vm_map_page_mask(map
)),
449 case KERN_INVALID_ADDRESS
:
452 case KERN_PROTECTION_FAILURE
:
466 character
= (char)byte
;
467 return (copyout((void *)&(character
), addr
, sizeof(char)) == 0 ? 0 : -1);
477 character
= (char)byte
;
478 return (copyout((void *)&(character
), addr
, sizeof(char)) == 0 ? 0 : -1);
481 int fubyte(user_addr_t addr
)
485 if (copyin(addr
, (void *) &byte
, sizeof(char)))
490 int fuibyte(user_addr_t addr
)
494 if (copyin(addr
, (void *) &(byte
), sizeof(char)))
504 return (copyout((void *) &word
, addr
, sizeof(int)) == 0 ? 0 : -1);
507 long fuword(user_addr_t addr
)
511 if (copyin(addr
, (void *) &word
, sizeof(int)))
516 /* suiword and fuiword are the same as suword and fuword, respectively */
523 return (copyout((void *) &word
, addr
, sizeof(int)) == 0 ? 0 : -1);
526 long fuiword(user_addr_t addr
)
530 if (copyin(addr
, (void *) &word
, sizeof(int)))
536 * With a 32-bit kernel and mixed 32/64-bit user tasks, this interface allows the
537 * fetching and setting of process-sized size_t and pointer values.
540 sulong(user_addr_t addr
, int64_t word
)
543 if (IS_64BIT_PROCESS(current_proc())) {
544 return(copyout((void *)&word
, addr
, sizeof(word
)) == 0 ? 0 : -1);
546 return(suiword(addr
, (long)word
));
551 fulong(user_addr_t addr
)
555 if (IS_64BIT_PROCESS(current_proc())) {
556 if (copyin(addr
, (void *)&longword
, sizeof(longword
)) != 0)
560 return((int64_t)fuiword(addr
));
565 suulong(user_addr_t addr
, uint64_t uword
)
568 if (IS_64BIT_PROCESS(current_proc())) {
569 return(copyout((void *)&uword
, addr
, sizeof(uword
)) == 0 ? 0 : -1);
571 return(suiword(addr
, (uint32_t)uword
));
576 fuulong(user_addr_t addr
)
580 if (IS_64BIT_PROCESS(current_proc())) {
581 if (copyin(addr
, (void *)&ulongword
, sizeof(ulongword
)) != 0)
585 return((uint64_t)fuiword(addr
));
590 swapon(__unused proc_t procp
, __unused
struct swapon_args
*uap
, __unused
int *retval
)
598 * Find the BSD process ID for the Mach task associated with the given Mach port
601 * Parameters: args User argument descriptor (see below)
603 * Indirect parameters: args->t Mach port name
604 * args->pid Process ID (returned value; see below)
606 * Returns: KERL_SUCCESS Success
607 * KERN_FAILURE Not success
609 * Implicit returns: args->pid Process ID
614 struct pid_for_task_args
*args
)
616 mach_port_name_t t
= args
->t
;
617 user_addr_t pid_addr
= args
->pid
;
621 kern_return_t err
= KERN_SUCCESS
;
623 AUDIT_MACH_SYSCALL_ENTER(AUE_PIDFORTASK
);
624 AUDIT_ARG(mach_port1
, t
);
626 t1
= port_name_to_task_inspect(t
);
628 if (t1
== TASK_NULL
) {
632 p
= get_bsdtask_info(t1
);
636 } else if (is_corpsetask(t1
)) {
646 (void) copyout((char *) &pid
, pid_addr
, sizeof(int));
647 AUDIT_MACH_SYSCALL_EXIT(err
);
653 * tfp_policy = KERN_TFP_POLICY_DENY; Deny Mode: None allowed except for self
654 * tfp_policy = KERN_TFP_POLICY_DEFAULT; default mode: all posix checks and upcall via task port for authentication
657 static int tfp_policy
= KERN_TFP_POLICY_DEFAULT
;
660 * Routine: task_for_pid_posix_check
662 * Verify that the current process should be allowed to
663 * get the target process's task port. This is only
665 * - The current process is root
666 * OR all of the following are true:
667 * - The target process's real, effective, and saved uids
668 * are the same as the current proc's euid,
669 * - The target process's group set is a subset of the
670 * calling process's group set, and
671 * - The target process hasn't switched credentials.
673 * Returns: TRUE: permitted
677 task_for_pid_posix_check(proc_t target
)
679 kauth_cred_t targetcred
, mycred
;
683 /* No task_for_pid on bad targets */
684 if (target
->p_stat
== SZOMB
) {
688 mycred
= kauth_cred_get();
689 myuid
= kauth_cred_getuid(mycred
);
691 /* If we're running as root, the check passes */
692 if (kauth_cred_issuser(mycred
))
695 /* We're allowed to get our own task port */
696 if (target
== current_proc())
700 * Under DENY, only root can get another proc's task port,
701 * so no more checks are needed.
703 if (tfp_policy
== KERN_TFP_POLICY_DENY
) {
707 targetcred
= kauth_cred_proc_ref(target
);
710 /* Do target's ruid, euid, and saved uid match my euid? */
711 if ((kauth_cred_getuid(targetcred
) != myuid
) ||
712 (kauth_cred_getruid(targetcred
) != myuid
) ||
713 (kauth_cred_getsvuid(targetcred
) != myuid
)) {
718 /* Are target's groups a subset of my groups? */
719 if (kauth_cred_gid_subset(targetcred
, mycred
, &allowed
) ||
725 /* Has target switched credentials? */
726 if (target
->p_flag
& P_SUGID
) {
732 kauth_cred_unref(&targetcred
);
737 * __KERNEL_WAITING_ON_TASKGATED_CHECK_ACCESS_UPCALL__
739 * Description: Waits for the user space daemon to respond to the request
740 * we made. Function declared non inline to be visible in
741 * stackshots and spindumps as well as debugging.
743 __attribute__((noinline
)) int __KERNEL_WAITING_ON_TASKGATED_CHECK_ACCESS_UPCALL__(
744 mach_port_t task_access_port
, int32_t calling_pid
, uint32_t calling_gid
, int32_t target_pid
)
746 return check_task_access(task_access_port
, calling_pid
, calling_gid
, target_pid
);
750 * Routine: task_for_pid
752 * Get the task port for another "process", named by its
753 * process ID on the same host as "target_task".
755 * Only permitted to privileged processes, or processes
756 * with the same user ID.
758 * Note: if pid == 0, an error is return no matter who is calling.
760 * XXX This should be a BSD system call, not a Mach trap!!!
764 struct task_for_pid_args
*args
)
766 mach_port_name_t target_tport
= args
->target_tport
;
768 user_addr_t task_addr
= args
->t
;
769 proc_t p
= PROC_NULL
;
770 task_t t1
= TASK_NULL
;
771 mach_port_name_t tret
= MACH_PORT_NULL
;
776 AUDIT_MACH_SYSCALL_ENTER(AUE_TASKFORPID
);
778 AUDIT_ARG(mach_port1
, target_tport
);
780 /* Always check if pid == 0 */
782 (void ) copyout((char *)&t1
, task_addr
, sizeof(mach_port_name_t
));
783 AUDIT_MACH_SYSCALL_EXIT(KERN_FAILURE
);
784 return(KERN_FAILURE
);
787 t1
= port_name_to_task(target_tport
);
788 if (t1
== TASK_NULL
) {
789 (void) copyout((char *)&t1
, task_addr
, sizeof(mach_port_name_t
));
790 AUDIT_MACH_SYSCALL_EXIT(KERN_FAILURE
);
791 return(KERN_FAILURE
);
796 if (p
== PROC_NULL
) {
797 error
= KERN_FAILURE
;
802 AUDIT_ARG(process
, p
);
805 if (!(task_for_pid_posix_check(p
))) {
806 error
= KERN_FAILURE
;
810 if (p
->task
!= TASK_NULL
) {
811 /* If we aren't root and target's task access port is set... */
812 if (!kauth_cred_issuser(kauth_cred_get()) &&
813 p
!= current_proc() &&
814 (task_get_task_access_port(p
->task
, &tfpport
) == 0) &&
815 (tfpport
!= IPC_PORT_NULL
)) {
817 if (tfpport
== IPC_PORT_DEAD
) {
818 error
= KERN_PROTECTION_FAILURE
;
822 /* Call up to the task access server */
823 error
= __KERNEL_WAITING_ON_TASKGATED_CHECK_ACCESS_UPCALL__(tfpport
, proc_selfpid(), kauth_getgid(), pid
);
825 if (error
!= MACH_MSG_SUCCESS
) {
826 if (error
== MACH_RCV_INTERRUPTED
)
827 error
= KERN_ABORTED
;
829 error
= KERN_FAILURE
;
834 error
= mac_proc_check_get_task(kauth_cred_get(), p
);
836 error
= KERN_FAILURE
;
841 /* Grant task port access */
842 task_reference(p
->task
);
843 extmod_statistics_incr_task_for_pid(p
->task
);
845 sright
= (void *) convert_task_to_port(p
->task
);
847 /* Check if the task has been corpsified */
848 if (is_corpsetask(p
->task
)) {
849 ipc_port_release_send(sright
);
850 error
= KERN_FAILURE
;
854 tret
= ipc_port_copyout_send(
856 get_task_ipcspace(current_task()));
858 error
= KERN_SUCCESS
;
862 AUDIT_ARG(mach_port2
, tret
);
863 (void) copyout((char *) &tret
, task_addr
, sizeof(mach_port_name_t
));
866 AUDIT_MACH_SYSCALL_EXIT(error
);
871 * Routine: task_name_for_pid
873 * Get the task name port for another "process", named by its
874 * process ID on the same host as "target_task".
876 * Only permitted to privileged processes, or processes
877 * with the same user ID.
879 * XXX This should be a BSD system call, not a Mach trap!!!
884 struct task_name_for_pid_args
*args
)
886 mach_port_name_t target_tport
= args
->target_tport
;
888 user_addr_t task_addr
= args
->t
;
889 proc_t p
= PROC_NULL
;
891 mach_port_name_t tret
;
893 int error
= 0, refheld
= 0;
894 kauth_cred_t target_cred
;
896 AUDIT_MACH_SYSCALL_ENTER(AUE_TASKNAMEFORPID
);
898 AUDIT_ARG(mach_port1
, target_tport
);
900 t1
= port_name_to_task(target_tport
);
901 if (t1
== TASK_NULL
) {
902 (void) copyout((char *)&t1
, task_addr
, sizeof(mach_port_name_t
));
903 AUDIT_MACH_SYSCALL_EXIT(KERN_FAILURE
);
904 return(KERN_FAILURE
);
908 if (p
!= PROC_NULL
) {
909 AUDIT_ARG(process
, p
);
910 target_cred
= kauth_cred_proc_ref(p
);
913 if ((p
->p_stat
!= SZOMB
)
914 && ((current_proc() == p
)
915 || kauth_cred_issuser(kauth_cred_get())
916 || ((kauth_cred_getuid(target_cred
) == kauth_cred_getuid(kauth_cred_get())) &&
917 ((kauth_cred_getruid(target_cred
) == kauth_getruid()))))) {
919 if (p
->task
!= TASK_NULL
) {
920 task_reference(p
->task
);
922 error
= mac_proc_check_get_task_name(kauth_cred_get(), p
);
924 task_deallocate(p
->task
);
928 sright
= (void *)convert_task_name_to_port(p
->task
);
929 tret
= ipc_port_copyout_send(sright
,
930 get_task_ipcspace(current_task()));
932 tret
= MACH_PORT_NULL
;
934 AUDIT_ARG(mach_port2
, tret
);
935 (void) copyout((char *)&tret
, task_addr
, sizeof(mach_port_name_t
));
937 error
= KERN_SUCCESS
;
946 tret
= MACH_PORT_NULL
;
947 (void) copyout((char *) &tret
, task_addr
, sizeof(mach_port_name_t
));
948 error
= KERN_FAILURE
;
951 kauth_cred_unref(&target_cred
);
954 AUDIT_MACH_SYSCALL_EXIT(error
);
959 pid_suspend(struct proc
*p __unused
, struct pid_suspend_args
*args
, int *ret
)
961 task_t target
= NULL
;
962 proc_t targetproc
= PROC_NULL
;
967 error
= mac_proc_check_suspend_resume(p
, MAC_PROC_CHECK_SUSPEND
);
979 targetproc
= proc_find(pid
);
980 if (targetproc
== PROC_NULL
) {
985 if (!task_for_pid_posix_check(targetproc
)) {
990 target
= targetproc
->task
;
991 #ifndef CONFIG_EMBEDDED
992 if (target
!= TASK_NULL
) {
995 /* If we aren't root and target's task access port is set... */
996 if (!kauth_cred_issuser(kauth_cred_get()) &&
997 targetproc
!= current_proc() &&
998 (task_get_task_access_port(target
, &tfpport
) == 0) &&
999 (tfpport
!= IPC_PORT_NULL
)) {
1001 if (tfpport
== IPC_PORT_DEAD
) {
1006 /* Call up to the task access server */
1007 error
= __KERNEL_WAITING_ON_TASKGATED_CHECK_ACCESS_UPCALL__(tfpport
, proc_selfpid(), kauth_getgid(), pid
);
1009 if (error
!= MACH_MSG_SUCCESS
) {
1010 if (error
== MACH_RCV_INTERRUPTED
)
1020 task_reference(target
);
1021 error
= task_pidsuspend(target
);
1023 if (error
== KERN_INVALID_ARGUMENT
) {
1029 #if CONFIG_MEMORYSTATUS
1031 memorystatus_on_suspend(targetproc
);
1035 task_deallocate(target
);
1038 if (targetproc
!= PROC_NULL
)
1039 proc_rele(targetproc
);
1045 pid_resume(struct proc
*p __unused
, struct pid_resume_args
*args
, int *ret
)
1047 task_t target
= NULL
;
1048 proc_t targetproc
= PROC_NULL
;
1049 int pid
= args
->pid
;
1053 error
= mac_proc_check_suspend_resume(p
, MAC_PROC_CHECK_RESUME
);
1065 targetproc
= proc_find(pid
);
1066 if (targetproc
== PROC_NULL
) {
1071 if (!task_for_pid_posix_check(targetproc
)) {
1076 target
= targetproc
->task
;
1077 #ifndef CONFIG_EMBEDDED
1078 if (target
!= TASK_NULL
) {
1079 mach_port_t tfpport
;
1081 /* If we aren't root and target's task access port is set... */
1082 if (!kauth_cred_issuser(kauth_cred_get()) &&
1083 targetproc
!= current_proc() &&
1084 (task_get_task_access_port(target
, &tfpport
) == 0) &&
1085 (tfpport
!= IPC_PORT_NULL
)) {
1087 if (tfpport
== IPC_PORT_DEAD
) {
1092 /* Call up to the task access server */
1093 error
= __KERNEL_WAITING_ON_TASKGATED_CHECK_ACCESS_UPCALL__(tfpport
, proc_selfpid(), kauth_getgid(), pid
);
1095 if (error
!= MACH_MSG_SUCCESS
) {
1096 if (error
== MACH_RCV_INTERRUPTED
)
1108 resume_proc_sockets(targetproc
);
1109 #endif /* SOCKETS */
1110 #endif /* CONFIG_EMBEDDED */
1112 task_reference(target
);
1114 #if CONFIG_MEMORYSTATUS
1115 memorystatus_on_resume(targetproc
);
1118 error
= task_pidresume(target
);
1120 if (error
== KERN_INVALID_ARGUMENT
) {
1123 if (error
== KERN_MEMORY_ERROR
) {
1124 psignal(targetproc
, SIGKILL
);
1131 task_deallocate(target
);
1134 if (targetproc
!= PROC_NULL
)
1135 proc_rele(targetproc
);
1143 * Freeze the specified process (provided in args->pid), or find and freeze a PID.
1144 * When a process is specified, this call is blocking, otherwise we wake up the
1145 * freezer thread and do not block on a process being frozen.
1148 pid_hibernate(struct proc
*p __unused
, struct pid_hibernate_args
*args
, int *ret
)
1151 proc_t targetproc
= PROC_NULL
;
1152 int pid
= args
->pid
;
1154 #ifndef CONFIG_FREEZE
1159 error
= mac_proc_check_suspend_resume(p
, MAC_PROC_CHECK_HIBERNATE
);
1167 * If a pid has been provided, we obtain the process handle and call task_for_pid_posix_check().
1171 targetproc
= proc_find(pid
);
1173 if (targetproc
== PROC_NULL
) {
1178 if (!task_for_pid_posix_check(targetproc
)) {
1185 vm_pageout_anonymous_pages();
1186 } else if (pid
== -1) {
1187 memorystatus_on_inactivity(targetproc
);
1189 error
= memorystatus_freeze_process_sync(targetproc
);
1194 #endif /* CONFIG_FREEZE */
1196 if (targetproc
!= PROC_NULL
)
1197 proc_rele(targetproc
);
1201 #endif /* CONFIG_EMBEDDED */
1205 shutdown_sockets_callout(proc_t p
, void *arg
)
1207 struct pid_shutdown_sockets_args
*args
= arg
;
1208 int pid
= args
->pid
;
1209 int level
= args
->level
;
1210 struct filedesc
*fdp
;
1211 struct fileproc
*fp
;
1216 for (i
= 0; i
< fdp
->fd_nfiles
; i
++) {
1217 fp
= fdp
->fd_ofiles
[i
];
1218 if (fp
== NULL
|| (fdp
->fd_ofileflags
[i
] & UF_RESERVED
) != 0) {
1221 if (FILEGLOB_DTYPE(fp
->f_fglob
) == DTYPE_SOCKET
) {
1222 struct socket
*so
= (struct socket
*)fp
->f_fglob
->fg_data
;
1223 if (p
->p_pid
== pid
|| so
->last_pid
== pid
||
1224 ((so
->so_flags
& SOF_DELEGATED
) && so
->e_pid
== pid
)) {
1225 /* Call networking stack with socket and level */
1226 (void) socket_defunct(p
, so
, level
);
1230 else if (FILEGLOB_DTYPE(fp
->f_fglob
) == DTYPE_NETPOLICY
&&
1232 necp_defunct_client(p
, fp
);
1238 return (PROC_RETURNED
);
1242 pid_shutdown_sockets(struct proc
*p __unused
, struct pid_shutdown_sockets_args
*args
, int *ret
)
1245 proc_t targetproc
= PROC_NULL
;
1246 int pid
= args
->pid
;
1247 int level
= args
->level
;
1249 if (level
!= SHUTDOWN_SOCKET_LEVEL_DISCONNECT_SVC
&&
1250 level
!= SHUTDOWN_SOCKET_LEVEL_DISCONNECT_ALL
) {
1256 error
= mac_proc_check_suspend_resume(p
, MAC_PROC_CHECK_SHUTDOWN_SOCKETS
);
1263 targetproc
= proc_find(pid
);
1264 if (targetproc
== PROC_NULL
) {
1269 if (!task_for_pid_posix_check(targetproc
)) {
1274 proc_iterate(PROC_ALLPROCLIST
| PROC_NOWAITTRANS
, shutdown_sockets_callout
, args
, NULL
, NULL
);
1277 if (targetproc
!= PROC_NULL
)
1278 proc_rele(targetproc
);
1283 #endif /* SOCKETS */
1286 sysctl_settfp_policy(__unused
struct sysctl_oid
*oidp
, void *arg1
,
1287 __unused
int arg2
, struct sysctl_req
*req
)
1292 error
= SYSCTL_OUT(req
, arg1
, sizeof(int));
1293 if (error
|| req
->newptr
== USER_ADDR_NULL
)
1296 if (!kauth_cred_issuser(kauth_cred_get()))
1299 if ((error
= SYSCTL_IN(req
, &new_value
, sizeof(int)))) {
1302 if ((new_value
== KERN_TFP_POLICY_DENY
)
1303 || (new_value
== KERN_TFP_POLICY_DEFAULT
))
1304 tfp_policy
= new_value
;
1312 #if defined(SECURE_KERNEL)
1313 static int kern_secure_kernel
= 1;
1315 static int kern_secure_kernel
= 0;
1318 SYSCTL_INT(_kern
, OID_AUTO
, secure_kernel
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &kern_secure_kernel
, 0, "");
1320 SYSCTL_NODE(_kern
, KERN_TFP
, tfp
, CTLFLAG_RW
| CTLFLAG_LOCKED
, 0, "tfp");
1321 SYSCTL_PROC(_kern_tfp
, KERN_TFP_POLICY
, policy
, CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
,
1322 &tfp_policy
, sizeof(uint32_t), &sysctl_settfp_policy
,"I","policy");
1324 SYSCTL_INT(_vm
, OID_AUTO
, shared_region_trace_level
, CTLFLAG_RW
| CTLFLAG_LOCKED
,
1325 &shared_region_trace_level
, 0, "");
1326 SYSCTL_INT(_vm
, OID_AUTO
, shared_region_version
, CTLFLAG_RD
| CTLFLAG_LOCKED
,
1327 &shared_region_version
, 0, "");
1328 SYSCTL_INT(_vm
, OID_AUTO
, shared_region_persistence
, CTLFLAG_RW
| CTLFLAG_LOCKED
,
1329 &shared_region_persistence
, 0, "");
1332 * shared_region_check_np:
1334 * This system call is intended for dyld.
1336 * dyld calls this when any process starts to see if the process's shared
1337 * region is already set up and ready to use.
1338 * This call returns the base address of the first mapping in the
1339 * process's shared region's first mapping.
1340 * dyld will then check what's mapped at that address.
1342 * If the shared region is empty, dyld will then attempt to map the shared
1343 * cache file in the shared region via the shared_region_map_np() system call.
1345 * If something's already mapped in the shared region, dyld will check if it
1346 * matches the shared cache it would like to use for that process.
1347 * If it matches, evrything's ready and the process can proceed and use the
1349 * If it doesn't match, dyld will unmap the shared region and map the shared
1350 * cache into the process's address space via mmap().
1353 * EINVAL no shared region
1354 * ENOMEM shared region is empty
1355 * EFAULT bad address for "start_address"
1358 shared_region_check_np(
1359 __unused
struct proc
*p
,
1360 struct shared_region_check_np_args
*uap
,
1361 __unused
int *retvalp
)
1363 vm_shared_region_t shared_region
;
1364 mach_vm_offset_t start_address
= 0;
1368 SHARED_REGION_TRACE_DEBUG(
1369 ("shared_region: %p [%d(%s)] -> check_np(0x%llx)\n",
1370 (void *)VM_KERNEL_ADDRPERM(current_thread()),
1371 p
->p_pid
, p
->p_comm
,
1372 (uint64_t)uap
->start_address
));
1374 /* retrieve the current tasks's shared region */
1375 shared_region
= vm_shared_region_get(current_task());
1376 if (shared_region
!= NULL
) {
1377 /* retrieve address of its first mapping... */
1378 kr
= vm_shared_region_start_address(shared_region
,
1380 if (kr
!= KERN_SUCCESS
) {
1383 /* ... and give it to the caller */
1384 error
= copyout(&start_address
,
1385 (user_addr_t
) uap
->start_address
,
1386 sizeof (start_address
));
1388 SHARED_REGION_TRACE_ERROR(
1389 ("shared_region: %p [%d(%s)] "
1391 "copyout(0x%llx) error %d\n",
1392 (void *)VM_KERNEL_ADDRPERM(current_thread()),
1393 p
->p_pid
, p
->p_comm
,
1394 (uint64_t)uap
->start_address
, (uint64_t)start_address
,
1398 vm_shared_region_deallocate(shared_region
);
1400 /* no shared region ! */
1404 SHARED_REGION_TRACE_DEBUG(
1405 ("shared_region: %p [%d(%s)] check_np(0x%llx) <- 0x%llx %d\n",
1406 (void *)VM_KERNEL_ADDRPERM(current_thread()),
1407 p
->p_pid
, p
->p_comm
,
1408 (uint64_t)uap
->start_address
, (uint64_t)start_address
, error
));
1415 shared_region_copyin_mappings(
1417 user_addr_t user_mappings
,
1418 unsigned int mappings_count
,
1419 struct shared_file_mapping_np
*mappings
)
1422 vm_size_t mappings_size
= 0;
1424 /* get the list of mappings the caller wants us to establish */
1425 mappings_size
= (vm_size_t
) (mappings_count
* sizeof (mappings
[0]));
1426 error
= copyin(user_mappings
,
1430 SHARED_REGION_TRACE_ERROR(
1431 ("shared_region: %p [%d(%s)] map(): "
1432 "copyin(0x%llx, %d) failed (error=%d)\n",
1433 (void *)VM_KERNEL_ADDRPERM(current_thread()),
1434 p
->p_pid
, p
->p_comm
,
1435 (uint64_t)user_mappings
, mappings_count
, error
));
1440 * shared_region_map_np()
1442 * This system call is intended for dyld.
1444 * dyld uses this to map a shared cache file into a shared region.
1445 * This is usually done only the first time a shared cache is needed.
1446 * Subsequent processes will just use the populated shared region without
1447 * requiring any further setup.
1450 _shared_region_map_and_slide(
1453 uint32_t mappings_count
,
1454 struct shared_file_mapping_np
*mappings
,
1456 user_addr_t slide_start
,
1457 user_addr_t slide_size
)
1461 struct fileproc
*fp
;
1462 struct vnode
*vp
, *root_vp
, *scdir_vp
;
1463 struct vnode_attr va
;
1465 memory_object_size_t file_size
;
1467 vm_prot_t maxprot
= VM_PROT_ALL
;
1469 memory_object_control_t file_control
;
1470 struct vm_shared_region
*shared_region
;
1473 SHARED_REGION_TRACE_DEBUG(
1474 ("shared_region: %p [%d(%s)] -> map\n",
1475 (void *)VM_KERNEL_ADDRPERM(current_thread()),
1476 p
->p_pid
, p
->p_comm
));
1478 shared_region
= NULL
;
1483 /* get file structure from file descriptor */
1484 error
= fp_lookup(p
, fd
, &fp
, 0);
1486 SHARED_REGION_TRACE_ERROR(
1487 ("shared_region: %p [%d(%s)] map: "
1488 "fd=%d lookup failed (error=%d)\n",
1489 (void *)VM_KERNEL_ADDRPERM(current_thread()),
1490 p
->p_pid
, p
->p_comm
, fd
, error
));
1494 /* make sure we're attempting to map a vnode */
1495 if (FILEGLOB_DTYPE(fp
->f_fglob
) != DTYPE_VNODE
) {
1496 SHARED_REGION_TRACE_ERROR(
1497 ("shared_region: %p [%d(%s)] map: "
1498 "fd=%d not a vnode (type=%d)\n",
1499 (void *)VM_KERNEL_ADDRPERM(current_thread()),
1500 p
->p_pid
, p
->p_comm
,
1501 fd
, FILEGLOB_DTYPE(fp
->f_fglob
)));
1506 /* we need at least read permission on the file */
1507 if (! (fp
->f_fglob
->fg_flag
& FREAD
)) {
1508 SHARED_REGION_TRACE_ERROR(
1509 ("shared_region: %p [%d(%s)] map: "
1510 "fd=%d not readable\n",
1511 (void *)VM_KERNEL_ADDRPERM(current_thread()),
1512 p
->p_pid
, p
->p_comm
, fd
));
1517 /* get vnode from file structure */
1518 error
= vnode_getwithref((vnode_t
) fp
->f_fglob
->fg_data
);
1520 SHARED_REGION_TRACE_ERROR(
1521 ("shared_region: %p [%d(%s)] map: "
1522 "fd=%d getwithref failed (error=%d)\n",
1523 (void *)VM_KERNEL_ADDRPERM(current_thread()),
1524 p
->p_pid
, p
->p_comm
, fd
, error
));
1527 vp
= (struct vnode
*) fp
->f_fglob
->fg_data
;
1529 /* make sure the vnode is a regular file */
1530 if (vp
->v_type
!= VREG
) {
1531 SHARED_REGION_TRACE_ERROR(
1532 ("shared_region: %p [%d(%s)] map(%p:'%s'): "
1533 "not a file (type=%d)\n",
1534 (void *)VM_KERNEL_ADDRPERM(current_thread()),
1535 p
->p_pid
, p
->p_comm
,
1536 (void *)VM_KERNEL_ADDRPERM(vp
),
1537 vp
->v_name
, vp
->v_type
));
1543 /* pass in 0 for the offset argument because AMFI does not need the offset
1544 of the shared cache */
1545 error
= mac_file_check_mmap(vfs_context_ucred(vfs_context_current()),
1546 fp
->f_fglob
, VM_PROT_ALL
, MAP_FILE
, 0, &maxprot
);
1552 /* make sure vnode is on the process's root volume */
1553 root_vp
= p
->p_fd
->fd_rdir
;
1554 if (root_vp
== NULL
) {
1555 root_vp
= rootvnode
;
1558 * Chroot-ed processes can't use the shared_region.
1564 if (vp
->v_mount
!= root_vp
->v_mount
) {
1565 SHARED_REGION_TRACE_ERROR(
1566 ("shared_region: %p [%d(%s)] map(%p:'%s'): "
1567 "not on process's root volume\n",
1568 (void *)VM_KERNEL_ADDRPERM(current_thread()),
1569 p
->p_pid
, p
->p_comm
,
1570 (void *)VM_KERNEL_ADDRPERM(vp
), vp
->v_name
));
1575 /* make sure vnode is owned by "root" */
1577 VATTR_WANTED(&va
, va_uid
);
1578 error
= vnode_getattr(vp
, &va
, vfs_context_current());
1580 SHARED_REGION_TRACE_ERROR(
1581 ("shared_region: %p [%d(%s)] map(%p:'%s'): "
1582 "vnode_getattr(%p) failed (error=%d)\n",
1583 (void *)VM_KERNEL_ADDRPERM(current_thread()),
1584 p
->p_pid
, p
->p_comm
,
1585 (void *)VM_KERNEL_ADDRPERM(vp
), vp
->v_name
,
1586 (void *)VM_KERNEL_ADDRPERM(vp
), error
));
1589 if (va
.va_uid
!= 0) {
1590 SHARED_REGION_TRACE_ERROR(
1591 ("shared_region: %p [%d(%s)] map(%p:'%s'): "
1592 "owned by uid=%d instead of 0\n",
1593 (void *)VM_KERNEL_ADDRPERM(current_thread()),
1594 p
->p_pid
, p
->p_comm
,
1595 (void *)VM_KERNEL_ADDRPERM(vp
),
1596 vp
->v_name
, va
.va_uid
));
1601 if (scdir_enforce
) {
1602 /* get vnode for scdir_path */
1603 error
= vnode_lookup(scdir_path
, 0, &scdir_vp
, vfs_context_current());
1605 SHARED_REGION_TRACE_ERROR(
1606 ("shared_region: %p [%d(%s)] map(%p:'%s'): "
1607 "vnode_lookup(%s) failed (error=%d)\n",
1608 (void *)VM_KERNEL_ADDRPERM(current_thread()),
1609 p
->p_pid
, p
->p_comm
,
1610 (void *)VM_KERNEL_ADDRPERM(vp
), vp
->v_name
,
1611 scdir_path
, error
));
1615 /* ensure parent is scdir_vp */
1616 if (vnode_parent(vp
) != scdir_vp
) {
1617 SHARED_REGION_TRACE_ERROR(
1618 ("shared_region: %p [%d(%s)] map(%p:'%s'): "
1619 "shared cache file not in %s\n",
1620 (void *)VM_KERNEL_ADDRPERM(current_thread()),
1621 p
->p_pid
, p
->p_comm
,
1622 (void *)VM_KERNEL_ADDRPERM(vp
),
1623 vp
->v_name
, scdir_path
));
1629 /* get vnode size */
1630 error
= vnode_size(vp
, &fs
, vfs_context_current());
1632 SHARED_REGION_TRACE_ERROR(
1633 ("shared_region: %p [%d(%s)] map(%p:'%s'): "
1634 "vnode_size(%p) failed (error=%d)\n",
1635 (void *)VM_KERNEL_ADDRPERM(current_thread()),
1636 p
->p_pid
, p
->p_comm
,
1637 (void *)VM_KERNEL_ADDRPERM(vp
), vp
->v_name
,
1638 (void *)VM_KERNEL_ADDRPERM(vp
), error
));
1643 /* get the file's memory object handle */
1644 file_control
= ubc_getobject(vp
, UBC_HOLDOBJECT
);
1645 if (file_control
== MEMORY_OBJECT_CONTROL_NULL
) {
1646 SHARED_REGION_TRACE_ERROR(
1647 ("shared_region: %p [%d(%s)] map(%p:'%s'): "
1648 "no memory object\n",
1649 (void *)VM_KERNEL_ADDRPERM(current_thread()),
1650 p
->p_pid
, p
->p_comm
,
1651 (void *)VM_KERNEL_ADDRPERM(vp
), vp
->v_name
));
1656 /* check that the mappings are properly covered by code signatures */
1657 if (!cs_enforcement(NULL
)) {
1658 /* code signing is not enforced: no need to check */
1659 } else for (i
= 0; i
< mappings_count
; i
++) {
1660 if (mappings
[i
].sfm_init_prot
& VM_PROT_ZF
) {
1661 /* zero-filled mapping: not backed by the file */
1664 if (ubc_cs_is_range_codesigned(vp
,
1665 mappings
[i
].sfm_file_offset
,
1666 mappings
[i
].sfm_size
)) {
1667 /* this mapping is fully covered by code signatures */
1670 SHARED_REGION_TRACE_ERROR(
1671 ("shared_region: %p [%d(%s)] map(%p:'%s'): "
1672 "mapping #%d/%d [0x%llx:0x%llx:0x%llx:0x%x:0x%x] "
1673 "is not code-signed\n",
1674 (void *)VM_KERNEL_ADDRPERM(current_thread()),
1675 p
->p_pid
, p
->p_comm
,
1676 (void *)VM_KERNEL_ADDRPERM(vp
), vp
->v_name
,
1678 mappings
[i
].sfm_address
,
1679 mappings
[i
].sfm_size
,
1680 mappings
[i
].sfm_file_offset
,
1681 mappings
[i
].sfm_max_prot
,
1682 mappings
[i
].sfm_init_prot
));
1687 /* get the process's shared region (setup in vm_map_exec()) */
1688 shared_region
= vm_shared_region_get(current_task());
1689 if (shared_region
== NULL
) {
1690 SHARED_REGION_TRACE_ERROR(
1691 ("shared_region: %p [%d(%s)] map(%p:'%s'): "
1692 "no shared region\n",
1693 (void *)VM_KERNEL_ADDRPERM(current_thread()),
1694 p
->p_pid
, p
->p_comm
,
1695 (void *)VM_KERNEL_ADDRPERM(vp
), vp
->v_name
));
1699 /* map the file into that shared region's submap */
1700 kr
= vm_shared_region_map_file(shared_region
,
1705 (void *) p
->p_fd
->fd_rdir
,
1709 if (kr
!= KERN_SUCCESS
) {
1710 SHARED_REGION_TRACE_ERROR(
1711 ("shared_region: %p [%d(%s)] map(%p:'%s'): "
1712 "vm_shared_region_map_file() failed kr=0x%x\n",
1713 (void *)VM_KERNEL_ADDRPERM(current_thread()),
1714 p
->p_pid
, p
->p_comm
,
1715 (void *)VM_KERNEL_ADDRPERM(vp
), vp
->v_name
, kr
));
1717 case KERN_INVALID_ADDRESS
:
1720 case KERN_PROTECTION_FAILURE
:
1727 case KERN_INVALID_ARGUMENT
:
1737 vnode_lock_spin(vp
);
1739 vp
->v_flag
|= VSHARED_DYLD
;
1743 /* update the vnode's access time */
1744 if (! (vnode_vfsvisflags(vp
) & MNT_NOATIME
)) {
1746 nanotime(&va
.va_access_time
);
1747 VATTR_SET_ACTIVE(&va
, va_access_time
);
1748 vnode_setattr(vp
, &va
, vfs_context_current());
1751 if (p
->p_flag
& P_NOSHLIB
) {
1752 /* signal that this process is now using split libraries */
1753 OSBitAndAtomic(~((uint32_t)P_NOSHLIB
), &p
->p_flag
);
1759 * release the vnode...
1760 * ubc_map() still holds it for us in the non-error case
1762 (void) vnode_put(vp
);
1766 /* release the file descriptor */
1767 fp_drop(p
, fd
, fp
, 0);
1770 if (scdir_vp
!= NULL
) {
1771 (void)vnode_put(scdir_vp
);
1775 if (shared_region
!= NULL
) {
1776 vm_shared_region_deallocate(shared_region
);
1779 SHARED_REGION_TRACE_DEBUG(
1780 ("shared_region: %p [%d(%s)] <- map\n",
1781 (void *)VM_KERNEL_ADDRPERM(current_thread()),
1782 p
->p_pid
, p
->p_comm
));
1788 shared_region_map_and_slide_np(
1790 struct shared_region_map_and_slide_np_args
*uap
,
1791 __unused
int *retvalp
)
1793 struct shared_file_mapping_np
*mappings
;
1794 unsigned int mappings_count
= uap
->count
;
1795 kern_return_t kr
= KERN_SUCCESS
;
1796 uint32_t slide
= uap
->slide
;
1798 #define SFM_MAX_STACK 8
1799 struct shared_file_mapping_np stack_mappings
[SFM_MAX_STACK
];
1801 /* Is the process chrooted?? */
1802 if (p
->p_fd
->fd_rdir
!= NULL
) {
1807 if ((kr
= vm_shared_region_sliding_valid(slide
)) != KERN_SUCCESS
) {
1808 if (kr
== KERN_INVALID_ARGUMENT
) {
1810 * This will happen if we request sliding again
1811 * with the same slide value that was used earlier
1812 * for the very first sliding.
1819 if (mappings_count
== 0) {
1820 SHARED_REGION_TRACE_INFO(
1821 ("shared_region: %p [%d(%s)] map(): "
1823 (void *)VM_KERNEL_ADDRPERM(current_thread()),
1824 p
->p_pid
, p
->p_comm
));
1825 kr
= 0; /* no mappings: we're done ! */
1827 } else if (mappings_count
<= SFM_MAX_STACK
) {
1828 mappings
= &stack_mappings
[0];
1830 SHARED_REGION_TRACE_ERROR(
1831 ("shared_region: %p [%d(%s)] map(): "
1832 "too many mappings (%d)\n",
1833 (void *)VM_KERNEL_ADDRPERM(current_thread()),
1834 p
->p_pid
, p
->p_comm
,
1840 if ( (kr
= shared_region_copyin_mappings(p
, uap
->mappings
, uap
->count
, mappings
))) {
1845 kr
= _shared_region_map_and_slide(p
, uap
->fd
, mappings_count
, mappings
,
1847 uap
->slide_start
, uap
->slide_size
);
1848 if (kr
!= KERN_SUCCESS
) {
1856 /* sysctl overflow room */
1858 SYSCTL_INT (_vm
, OID_AUTO
, pagesize
, CTLFLAG_RD
| CTLFLAG_LOCKED
,
1859 (int *) &page_size
, 0, "vm page size");
1861 /* vm_page_free_target is provided as a makeshift solution for applications that want to
1862 allocate buffer space, possibly purgeable memory, but not cause inactive pages to be
1863 reclaimed. It allows the app to calculate how much memory is free outside the free target. */
1864 extern unsigned int vm_page_free_target
;
1865 SYSCTL_INT(_vm
, OID_AUTO
, vm_page_free_target
, CTLFLAG_RD
| CTLFLAG_LOCKED
,
1866 &vm_page_free_target
, 0, "Pageout daemon free target");
1868 extern unsigned int vm_memory_pressure
;
1869 SYSCTL_INT(_vm
, OID_AUTO
, memory_pressure
, CTLFLAG_RD
| CTLFLAG_LOCKED
,
1870 &vm_memory_pressure
, 0, "Memory pressure indicator");
1873 vm_ctl_page_free_wanted SYSCTL_HANDLER_ARGS
1875 #pragma unused(oidp, arg1, arg2)
1876 unsigned int page_free_wanted
;
1878 page_free_wanted
= mach_vm_ctl_page_free_wanted();
1879 return SYSCTL_OUT(req
, &page_free_wanted
, sizeof (page_free_wanted
));
1881 SYSCTL_PROC(_vm
, OID_AUTO
, page_free_wanted
,
1882 CTLTYPE_INT
| CTLFLAG_RD
| CTLFLAG_LOCKED
,
1883 0, 0, vm_ctl_page_free_wanted
, "I", "");
1885 extern unsigned int vm_page_purgeable_count
;
1886 SYSCTL_INT(_vm
, OID_AUTO
, page_purgeable_count
, CTLFLAG_RD
| CTLFLAG_LOCKED
,
1887 &vm_page_purgeable_count
, 0, "Purgeable page count");
1889 extern unsigned int vm_page_purgeable_wired_count
;
1890 SYSCTL_INT(_vm
, OID_AUTO
, page_purgeable_wired_count
, CTLFLAG_RD
| CTLFLAG_LOCKED
,
1891 &vm_page_purgeable_wired_count
, 0, "Wired purgeable page count");
1893 extern unsigned int vm_pageout_purged_objects
;
1894 SYSCTL_INT(_vm
, OID_AUTO
, pageout_purged_objects
, CTLFLAG_RD
| CTLFLAG_LOCKED
,
1895 &vm_pageout_purged_objects
, 0, "System purged object count");
1897 extern int madvise_free_debug
;
1898 SYSCTL_INT(_vm
, OID_AUTO
, madvise_free_debug
, CTLFLAG_RW
| CTLFLAG_LOCKED
,
1899 &madvise_free_debug
, 0, "zero-fill on madvise(MADV_FREE*)");
1901 SYSCTL_INT(_vm
, OID_AUTO
, page_reusable_count
, CTLFLAG_RD
| CTLFLAG_LOCKED
,
1902 &vm_page_stats_reusable
.reusable_count
, 0, "Reusable page count");
1903 SYSCTL_QUAD(_vm
, OID_AUTO
, reusable_success
, CTLFLAG_RD
| CTLFLAG_LOCKED
,
1904 &vm_page_stats_reusable
.reusable_pages_success
, "");
1905 SYSCTL_QUAD(_vm
, OID_AUTO
, reusable_failure
, CTLFLAG_RD
| CTLFLAG_LOCKED
,
1906 &vm_page_stats_reusable
.reusable_pages_failure
, "");
1907 SYSCTL_QUAD(_vm
, OID_AUTO
, reusable_pages_shared
, CTLFLAG_RD
| CTLFLAG_LOCKED
,
1908 &vm_page_stats_reusable
.reusable_pages_shared
, "");
1909 SYSCTL_QUAD(_vm
, OID_AUTO
, all_reusable_calls
, CTLFLAG_RD
| CTLFLAG_LOCKED
,
1910 &vm_page_stats_reusable
.all_reusable_calls
, "");
1911 SYSCTL_QUAD(_vm
, OID_AUTO
, partial_reusable_calls
, CTLFLAG_RD
| CTLFLAG_LOCKED
,
1912 &vm_page_stats_reusable
.partial_reusable_calls
, "");
1913 SYSCTL_QUAD(_vm
, OID_AUTO
, reuse_success
, CTLFLAG_RD
| CTLFLAG_LOCKED
,
1914 &vm_page_stats_reusable
.reuse_pages_success
, "");
1915 SYSCTL_QUAD(_vm
, OID_AUTO
, reuse_failure
, CTLFLAG_RD
| CTLFLAG_LOCKED
,
1916 &vm_page_stats_reusable
.reuse_pages_failure
, "");
1917 SYSCTL_QUAD(_vm
, OID_AUTO
, all_reuse_calls
, CTLFLAG_RD
| CTLFLAG_LOCKED
,
1918 &vm_page_stats_reusable
.all_reuse_calls
, "");
1919 SYSCTL_QUAD(_vm
, OID_AUTO
, partial_reuse_calls
, CTLFLAG_RD
| CTLFLAG_LOCKED
,
1920 &vm_page_stats_reusable
.partial_reuse_calls
, "");
1921 SYSCTL_QUAD(_vm
, OID_AUTO
, can_reuse_success
, CTLFLAG_RD
| CTLFLAG_LOCKED
,
1922 &vm_page_stats_reusable
.can_reuse_success
, "");
1923 SYSCTL_QUAD(_vm
, OID_AUTO
, can_reuse_failure
, CTLFLAG_RD
| CTLFLAG_LOCKED
,
1924 &vm_page_stats_reusable
.can_reuse_failure
, "");
1925 SYSCTL_QUAD(_vm
, OID_AUTO
, reusable_reclaimed
, CTLFLAG_RD
| CTLFLAG_LOCKED
,
1926 &vm_page_stats_reusable
.reusable_reclaimed
, "");
1927 SYSCTL_QUAD(_vm
, OID_AUTO
, reusable_nonwritable
, CTLFLAG_RD
| CTLFLAG_LOCKED
,
1928 &vm_page_stats_reusable
.reusable_nonwritable
, "");
1929 SYSCTL_QUAD(_vm
, OID_AUTO
, reusable_shared
, CTLFLAG_RD
| CTLFLAG_LOCKED
,
1930 &vm_page_stats_reusable
.reusable_shared
, "");
1931 SYSCTL_QUAD(_vm
, OID_AUTO
, free_shared
, CTLFLAG_RD
| CTLFLAG_LOCKED
,
1932 &vm_page_stats_reusable
.free_shared
, "");
1935 extern unsigned int vm_page_free_count
, vm_page_speculative_count
;
1936 SYSCTL_UINT(_vm
, OID_AUTO
, page_free_count
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_page_free_count
, 0, "");
1937 SYSCTL_UINT(_vm
, OID_AUTO
, page_speculative_count
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_page_speculative_count
, 0, "");
1939 extern unsigned int vm_page_cleaned_count
;
1940 SYSCTL_UINT(_vm
, OID_AUTO
, page_cleaned_count
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_page_cleaned_count
, 0, "Cleaned queue size");
1942 extern unsigned int vm_page_pageable_internal_count
, vm_page_pageable_external_count
;
1943 SYSCTL_UINT(_vm
, OID_AUTO
, page_pageable_internal_count
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_page_pageable_internal_count
, 0, "");
1944 SYSCTL_UINT(_vm
, OID_AUTO
, page_pageable_external_count
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_page_pageable_external_count
, 0, "");
1946 /* pageout counts */
1947 extern unsigned int vm_pageout_inactive_dirty_internal
, vm_pageout_inactive_dirty_external
, vm_pageout_inactive_clean
, vm_pageout_speculative_clean
, vm_pageout_inactive_used
;
1948 extern unsigned int vm_pageout_freed_from_inactive_clean
, vm_pageout_freed_from_speculative
;
1949 SYSCTL_UINT(_vm
, OID_AUTO
, pageout_inactive_dirty_internal
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_pageout_inactive_dirty_internal
, 0, "");
1950 SYSCTL_UINT(_vm
, OID_AUTO
, pageout_inactive_dirty_external
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_pageout_inactive_dirty_external
, 0, "");
1951 SYSCTL_UINT(_vm
, OID_AUTO
, pageout_inactive_clean
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_pageout_inactive_clean
, 0, "");
1952 SYSCTL_UINT(_vm
, OID_AUTO
, pageout_speculative_clean
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_pageout_speculative_clean
, 0, "");
1953 SYSCTL_UINT(_vm
, OID_AUTO
, pageout_inactive_used
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_pageout_inactive_used
, 0, "");
1954 SYSCTL_UINT(_vm
, OID_AUTO
, pageout_freed_from_inactive_clean
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_pageout_freed_from_inactive_clean
, 0, "");
1955 SYSCTL_UINT(_vm
, OID_AUTO
, pageout_freed_from_speculative
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_pageout_freed_from_speculative
, 0, "");
1957 extern unsigned int vm_pageout_freed_from_cleaned
;
1958 SYSCTL_UINT(_vm
, OID_AUTO
, pageout_freed_from_cleaned
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_pageout_freed_from_cleaned
, 0, "");
1960 /* counts of pages entering the cleaned queue */
1961 extern unsigned int vm_pageout_enqueued_cleaned
, vm_pageout_enqueued_cleaned_from_inactive_dirty
;
1962 SYSCTL_UINT(_vm
, OID_AUTO
, pageout_enqueued_cleaned
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_pageout_enqueued_cleaned
, 0, ""); /* sum of next two */
1963 SYSCTL_UINT(_vm
, OID_AUTO
, pageout_enqueued_cleaned_from_inactive_dirty
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_pageout_enqueued_cleaned_from_inactive_dirty
, 0, "");
1965 /* counts of pages leaving the cleaned queue */
1966 extern unsigned int vm_pageout_cleaned_reclaimed
, vm_pageout_cleaned_reactivated
, vm_pageout_cleaned_reference_reactivated
, vm_pageout_cleaned_volatile_reactivated
, vm_pageout_cleaned_fault_reactivated
, vm_pageout_cleaned_commit_reactivated
, vm_pageout_cleaned_busy
, vm_pageout_cleaned_nolock
;
1967 SYSCTL_UINT(_vm
, OID_AUTO
, pageout_cleaned
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_pageout_cleaned_reclaimed
, 0, "Cleaned pages reclaimed");
1968 SYSCTL_UINT(_vm
, OID_AUTO
, pageout_cleaned_reactivated
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_pageout_cleaned_reactivated
, 0, "Cleaned pages reactivated"); /* sum of all reactivated AND busy and nolock (even though those actually get reDEactivated */
1969 SYSCTL_UINT(_vm
, OID_AUTO
, pageout_cleaned_reference_reactivated
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_pageout_cleaned_reference_reactivated
, 0, "Cleaned pages reference reactivated");
1970 SYSCTL_UINT(_vm
, OID_AUTO
, pageout_cleaned_volatile_reactivated
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_pageout_cleaned_volatile_reactivated
, 0, "Cleaned pages volatile reactivated");
1971 SYSCTL_UINT(_vm
, OID_AUTO
, pageout_cleaned_fault_reactivated
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_pageout_cleaned_fault_reactivated
, 0, "Cleaned pages fault reactivated");
1972 SYSCTL_UINT(_vm
, OID_AUTO
, pageout_cleaned_commit_reactivated
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_pageout_cleaned_commit_reactivated
, 0, "Cleaned pages commit reactivated");
1973 SYSCTL_UINT(_vm
, OID_AUTO
, pageout_cleaned_busy
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_pageout_cleaned_busy
, 0, "Cleaned pages busy (deactivated)");
1974 SYSCTL_UINT(_vm
, OID_AUTO
, pageout_cleaned_nolock
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_pageout_cleaned_nolock
, 0, "Cleaned pages no-lock (deactivated)");
1976 /* counts of pages prefaulted when entering a memory object */
1977 extern int64_t vm_prefault_nb_pages
, vm_prefault_nb_bailout
;
1978 SYSCTL_QUAD(_vm
, OID_AUTO
, prefault_nb_pages
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_prefault_nb_pages
, "");
1979 SYSCTL_QUAD(_vm
, OID_AUTO
, prefault_nb_bailout
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_prefault_nb_bailout
, "");
1981 #if defined (__x86_64__)
1982 extern unsigned int vm_clump_promote_threshold
;
1983 SYSCTL_UINT(_vm
, OID_AUTO
, vm_clump_promote_threshold
, CTLFLAG_RW
| CTLFLAG_LOCKED
, &vm_clump_promote_threshold
, 0, "clump size threshold for promotes");
1984 #if DEVELOPMENT || DEBUG
1985 extern unsigned long vm_clump_stats
[];
1986 SYSCTL_LONG(_vm
, OID_AUTO
, vm_clump_stats1
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_clump_stats
[1], "free page allocations from clump of 1 page");
1987 SYSCTL_LONG(_vm
, OID_AUTO
, vm_clump_stats2
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_clump_stats
[2], "free page allocations from clump of 2 pages");
1988 SYSCTL_LONG(_vm
, OID_AUTO
, vm_clump_stats3
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_clump_stats
[3], "free page allocations from clump of 3 pages");
1989 SYSCTL_LONG(_vm
, OID_AUTO
, vm_clump_stats4
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_clump_stats
[4], "free page allocations from clump of 4 pages");
1990 SYSCTL_LONG(_vm
, OID_AUTO
, vm_clump_stats5
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_clump_stats
[5], "free page allocations from clump of 5 pages");
1991 SYSCTL_LONG(_vm
, OID_AUTO
, vm_clump_stats6
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_clump_stats
[6], "free page allocations from clump of 6 pages");
1992 SYSCTL_LONG(_vm
, OID_AUTO
, vm_clump_stats7
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_clump_stats
[7], "free page allocations from clump of 7 pages");
1993 SYSCTL_LONG(_vm
, OID_AUTO
, vm_clump_stats8
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_clump_stats
[8], "free page allocations from clump of 8 pages");
1994 SYSCTL_LONG(_vm
, OID_AUTO
, vm_clump_stats9
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_clump_stats
[9], "free page allocations from clump of 9 pages");
1995 SYSCTL_LONG(_vm
, OID_AUTO
, vm_clump_stats10
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_clump_stats
[10], "free page allocations from clump of 10 pages");
1996 SYSCTL_LONG(_vm
, OID_AUTO
, vm_clump_stats11
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_clump_stats
[11], "free page allocations from clump of 11 pages");
1997 SYSCTL_LONG(_vm
, OID_AUTO
, vm_clump_stats12
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_clump_stats
[12], "free page allocations from clump of 12 pages");
1998 SYSCTL_LONG(_vm
, OID_AUTO
, vm_clump_stats13
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_clump_stats
[13], "free page allocations from clump of 13 pages");
1999 SYSCTL_LONG(_vm
, OID_AUTO
, vm_clump_stats14
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_clump_stats
[14], "free page allocations from clump of 14 pages");
2000 SYSCTL_LONG(_vm
, OID_AUTO
, vm_clump_stats15
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_clump_stats
[15], "free page allocations from clump of 15 pages");
2001 SYSCTL_LONG(_vm
, OID_AUTO
, vm_clump_stats16
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_clump_stats
[16], "free page allocations from clump of 16 pages");
2002 extern unsigned long vm_clump_allocs
, vm_clump_inserts
, vm_clump_inrange
, vm_clump_promotes
;
2003 SYSCTL_LONG(_vm
, OID_AUTO
, vm_clump_alloc
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_clump_allocs
, "free page allocations");
2004 SYSCTL_LONG(_vm
, OID_AUTO
, vm_clump_inserts
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_clump_inserts
, "free page insertions");
2005 SYSCTL_LONG(_vm
, OID_AUTO
, vm_clump_inrange
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_clump_inrange
, "free page insertions that are part of vm_pages");
2006 SYSCTL_LONG(_vm
, OID_AUTO
, vm_clump_promotes
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_clump_promotes
, "pages promoted to head");
2007 #endif /* if DEVELOPMENT || DEBUG */
2008 #endif /* #if defined (__x86_64__) */
2010 #if CONFIG_SECLUDED_MEMORY
2012 SYSCTL_UINT(_vm
, OID_AUTO
, num_tasks_can_use_secluded_mem
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &num_tasks_can_use_secluded_mem
, 0, "");
2013 extern unsigned int vm_page_secluded_target
;
2014 extern unsigned int vm_page_secluded_count
;
2015 extern unsigned int vm_page_secluded_count_free
;
2016 extern unsigned int vm_page_secluded_count_inuse
;
2017 SYSCTL_UINT(_vm
, OID_AUTO
, page_secluded_target
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_page_secluded_target
, 0, "");
2018 SYSCTL_UINT(_vm
, OID_AUTO
, page_secluded_count
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_page_secluded_count
, 0, "");
2019 SYSCTL_UINT(_vm
, OID_AUTO
, page_secluded_count_free
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_page_secluded_count_free
, 0, "");
2020 SYSCTL_UINT(_vm
, OID_AUTO
, page_secluded_count_inuse
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_page_secluded_count_inuse
, 0, "");
2022 extern struct vm_page_secluded_data vm_page_secluded
;
2023 SYSCTL_UINT(_vm
, OID_AUTO
, page_secluded_eligible
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_page_secluded
.eligible_for_secluded
, 0, "");
2024 SYSCTL_UINT(_vm
, OID_AUTO
, page_secluded_grab_success_free
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_page_secluded
.grab_success_free
, 0, "");
2025 SYSCTL_UINT(_vm
, OID_AUTO
, page_secluded_grab_success_other
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_page_secluded
.grab_success_other
, 0, "");
2026 SYSCTL_UINT(_vm
, OID_AUTO
, page_secluded_grab_failure_locked
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_page_secluded
.grab_failure_locked
, 0, "");
2027 SYSCTL_UINT(_vm
, OID_AUTO
, page_secluded_grab_failure_state
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_page_secluded
.grab_failure_state
, 0, "");
2028 SYSCTL_UINT(_vm
, OID_AUTO
, page_secluded_grab_failure_dirty
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_page_secluded
.grab_failure_dirty
, 0, "");
2029 SYSCTL_UINT(_vm
, OID_AUTO
, page_secluded_grab_for_iokit
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_page_secluded
.grab_for_iokit
, 0, "");
2030 SYSCTL_UINT(_vm
, OID_AUTO
, page_secluded_grab_for_iokit_success
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_page_secluded
.grab_for_iokit_success
, 0, "");
2032 extern uint64_t vm_pageout_secluded_burst_count
;
2033 SYSCTL_QUAD(_vm
, OID_AUTO
, pageout_secluded_burst_count
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_pageout_secluded_burst_count
, "");
2035 #endif /* CONFIG_SECLUDED_MEMORY */
2037 #include <kern/thread.h>
2038 #include <sys/user.h>
2040 void vm_pageout_io_throttle(void);
2042 void vm_pageout_io_throttle(void) {
2043 struct uthread
*uthread
= get_bsdthread_info(current_thread());
2046 * thread is marked as a low priority I/O type
2047 * and the I/O we issued while in this cleaning operation
2048 * collided with normal I/O operations... we'll
2049 * delay in order to mitigate the impact of this
2050 * task on the normal operation of the system
2053 if (uthread
->uu_lowpri_window
) {
2054 throttle_lowpri_io(1);
2060 vm_pressure_monitor(
2061 __unused
struct proc
*p
,
2062 struct vm_pressure_monitor_args
*uap
,
2066 uint32_t pages_reclaimed
;
2067 uint32_t pages_wanted
;
2069 kr
= mach_vm_pressure_monitor(
2070 (boolean_t
) uap
->wait_for_pressure
,
2071 uap
->nsecs_monitored
,
2072 (uap
->pages_reclaimed
) ? &pages_reclaimed
: NULL
,
2084 if (uap
->pages_reclaimed
) {
2085 if (copyout((void *)&pages_reclaimed
,
2086 uap
->pages_reclaimed
,
2087 sizeof (pages_reclaimed
)) != 0) {
2092 *retval
= (int) pages_wanted
;
2097 kas_info(struct proc
*p
,
2098 struct kas_info_args
*uap
,
2099 int *retval __unused
)
2101 #ifdef SECURE_KERNEL
2105 #else /* !SECURE_KERNEL */
2106 int selector
= uap
->selector
;
2107 user_addr_t valuep
= uap
->value
;
2108 user_addr_t sizep
= uap
->size
;
2112 if (!kauth_cred_issuser(kauth_cred_get())) {
2117 error
= mac_system_check_kas_info(kauth_cred_get(), selector
);
2123 if (IS_64BIT_PROCESS(p
)) {
2124 user64_size_t size64
;
2125 error
= copyin(sizep
, &size64
, sizeof(size64
));
2126 size
= (user_size_t
)size64
;
2128 user32_size_t size32
;
2129 error
= copyin(sizep
, &size32
, sizeof(size32
));
2130 size
= (user_size_t
)size32
;
2137 case KAS_INFO_KERNEL_TEXT_SLIDE_SELECTOR
:
2139 uint64_t slide
= vm_kernel_slide
;
2141 if (sizeof(slide
) != size
) {
2145 if (IS_64BIT_PROCESS(p
)) {
2146 user64_size_t size64
= (user64_size_t
)size
;
2147 error
= copyout(&size64
, sizep
, sizeof(size64
));
2149 user32_size_t size32
= (user32_size_t
)size
;
2150 error
= copyout(&size32
, sizep
, sizeof(size32
));
2156 error
= copyout(&slide
, valuep
, sizeof(slide
));
2167 #endif /* !SECURE_KERNEL */
2171 #pragma clang diagnostic push
2172 #pragma clang diagnostic ignored "-Wcast-qual"
2173 #pragma clang diagnostic ignored "-Wunused-function"
2175 static void asserts() {
2176 static_assert(sizeof(vm_min_kernel_address
) == sizeof(unsigned long));
2177 static_assert(sizeof(vm_max_kernel_address
) == sizeof(unsigned long));
2180 SYSCTL_ULONG(_vm
, OID_AUTO
, vm_min_kernel_address
, CTLFLAG_RD
, (unsigned long *) &vm_min_kernel_address
, "");
2181 SYSCTL_ULONG(_vm
, OID_AUTO
, vm_max_kernel_address
, CTLFLAG_RD
, (unsigned long *) &vm_max_kernel_address
, "");
2182 #pragma clang diagnostic pop
2184 extern uint32_t vm_page_pages
;
2185 SYSCTL_UINT(_vm
, OID_AUTO
, pages
, CTLFLAG_RD
| CTLFLAG_LOCKED
, &vm_page_pages
, 0, "");
2187 #if (__arm__ || __arm64__) && (DEVELOPMENT || DEBUG)
2188 extern void pmap_footprint_suspend(vm_map_t map
, boolean_t suspend
);
2190 sysctl_vm_footprint_suspend SYSCTL_HANDLER_ARGS
2192 #pragma unused(oidp, arg1, arg2)
2196 if (req
->newptr
== USER_ADDR_NULL
) {
2199 error
= SYSCTL_IN(req
, &new_value
, sizeof(int));
2203 pmap_footprint_suspend(current_map(), new_value
);
2206 SYSCTL_PROC(_vm
, OID_AUTO
, footprint_suspend
,
2207 CTLTYPE_INT
|CTLFLAG_WR
|CTLFLAG_ANYBODY
|CTLFLAG_LOCKED
|CTLFLAG_MASKED
,
2208 0, 0, &sysctl_vm_footprint_suspend
, "I", "");
2209 #endif /* (__arm__ || __arm64__) && (DEVELOPMENT || DEBUG) */