2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 * Mach Operating System
30 * Copyright (c) 1987 Carnegie-Mellon University
31 * All rights reserved. The CMU software License Agreement specifies
32 * the terms and conditions for use and redistribution.
35 * NOTICE: This file was modified by SPARTA, Inc. in 2006 to introduce
36 * support for mandatory and extensible security protections. This notice
37 * is included in support of clause 2.2 (b) of the Apple Public License,
41 #include <meta_features.h>
43 #include <kern/task.h>
44 #include <kern/thread.h>
45 #include <kern/debug.h>
46 #include <kern/lock.h>
47 #include <mach/mach_traps.h>
48 #include <mach/port.h>
49 #include <mach/task.h>
50 #include <mach/task_access.h>
51 #include <mach/task_special_ports.h>
52 #include <mach/time_value.h>
53 #include <mach/vm_map.h>
54 #include <mach/vm_param.h>
55 #include <mach/vm_prot.h>
57 #include <sys/file_internal.h>
58 #include <sys/param.h>
59 #include <sys/systm.h>
61 #include <sys/namei.h>
62 #include <sys/proc_internal.h>
63 #include <sys/kauth.h>
66 #include <sys/vnode_internal.h>
67 #include <sys/mount.h>
68 #include <sys/trace.h>
69 #include <sys/kernel.h>
70 #include <sys/ubc_internal.h>
72 #include <sys/syslog.h>
74 #include <sys/sysproto.h>
76 #include <sys/sysctl.h>
78 #include <bsm/audit_kernel.h>
79 #include <bsm/audit_kevents.h>
81 #include <kern/kalloc.h>
82 #include <vm/vm_map.h>
83 #include <vm/vm_kern.h>
85 #include <machine/spl.h>
87 #include <mach/shared_region.h>
88 #include <vm/vm_shared_region.h>
90 #include <vm/vm_protos.h>
93 * Sysctl's related to data/stack execution. See osfmk/vm/vm_map.c
96 extern int allow_stack_exec
, allow_data_exec
;
98 SYSCTL_INT(_vm
, OID_AUTO
, allow_stack_exec
, CTLFLAG_RW
, &allow_stack_exec
, 0, "");
99 SYSCTL_INT(_vm
, OID_AUTO
, allow_data_exec
, CTLFLAG_RW
, &allow_data_exec
, 0, "");
101 #if CONFIG_NO_PRINTF_STRINGS
103 log_stack_execution_failure(__unused addr64_t a
, __unused vm_prot_t b
)
107 static const char *prot_values
[] = {
119 log_stack_execution_failure(addr64_t vaddr
, vm_prot_t prot
)
121 printf("Data/Stack execution not permitted: %s[pid %d] at virtual address 0x%qx, protections were %s\n",
122 current_proc()->p_comm
, current_proc()->p_pid
, vaddr
, prot_values
[prot
& VM_PROT_ALL
]);
133 return (vm_map_check_protection(
135 vm_map_trunc_page(addr
), vm_map_round_page(addr
+len
),
136 prot
== B_READ
? VM_PROT_READ
: VM_PROT_WRITE
));
145 kret
= vm_map_wire(current_map(), vm_map_trunc_page(addr
),
146 vm_map_round_page(addr
+len
),
147 VM_PROT_READ
| VM_PROT_WRITE
,FALSE
);
152 case KERN_INVALID_ADDRESS
:
155 case KERN_PROTECTION_FAILURE
:
166 __unused
int dirtied
)
171 vm_map_offset_t vaddr
;
178 pmap
= get_task_pmap(current_task());
179 for (vaddr
= vm_map_trunc_page(addr
);
180 vaddr
< vm_map_round_page(addr
+len
);
181 vaddr
+= PAGE_SIZE
) {
182 paddr
= pmap_extract(pmap
, vaddr
);
183 pg
= PHYS_TO_VM_PAGE(paddr
);
184 vm_page_set_modified(pg
);
191 kret
= vm_map_unwire(current_map(), vm_map_trunc_page(addr
),
192 vm_map_round_page(addr
+len
), FALSE
);
196 case KERN_INVALID_ADDRESS
:
199 case KERN_PROTECTION_FAILURE
:
213 character
= (char)byte
;
214 return (copyout((void *)&(character
), addr
, sizeof(char)) == 0 ? 0 : -1);
224 character
= (char)byte
;
225 return (copyout((void *)&(character
), addr
, sizeof(char)) == 0 ? 0 : -1);
228 int fubyte(user_addr_t addr
)
232 if (copyin(addr
, (void *) &byte
, sizeof(char)))
237 int fuibyte(user_addr_t addr
)
241 if (copyin(addr
, (void *) &(byte
), sizeof(char)))
251 return (copyout((void *) &word
, addr
, sizeof(int)) == 0 ? 0 : -1);
254 long fuword(user_addr_t addr
)
258 if (copyin(addr
, (void *) &word
, sizeof(int)))
263 /* suiword and fuiword are the same as suword and fuword, respectively */
270 return (copyout((void *) &word
, addr
, sizeof(int)) == 0 ? 0 : -1);
273 long fuiword(user_addr_t addr
)
277 if (copyin(addr
, (void *) &word
, sizeof(int)))
283 * With a 32-bit kernel and mixed 32/64-bit user tasks, this interface allows the
284 * fetching and setting of process-sized size_t and pointer values.
287 sulong(user_addr_t addr
, int64_t word
)
290 if (IS_64BIT_PROCESS(current_proc())) {
291 return(copyout((void *)&word
, addr
, sizeof(word
)) == 0 ? 0 : -1);
293 return(suiword(addr
, (long)word
));
298 fulong(user_addr_t addr
)
302 if (IS_64BIT_PROCESS(current_proc())) {
303 if (copyin(addr
, (void *)&longword
, sizeof(longword
)) != 0)
307 return((int64_t)fuiword(addr
));
312 suulong(user_addr_t addr
, uint64_t uword
)
315 if (IS_64BIT_PROCESS(current_proc())) {
316 return(copyout((void *)&uword
, addr
, sizeof(uword
)) == 0 ? 0 : -1);
318 return(suiword(addr
, (u_long
)uword
));
323 fuulong(user_addr_t addr
)
327 if (IS_64BIT_PROCESS(current_proc())) {
328 if (copyin(addr
, (void *)&ulongword
, sizeof(ulongword
)) != 0)
332 return((uint64_t)fuiword(addr
));
337 swapon(__unused proc_t procp
, __unused
struct swapon_args
*uap
, __unused
int *retval
)
345 struct pid_for_task_args
*args
)
347 mach_port_name_t t
= args
->t
;
348 user_addr_t pid_addr
= args
->pid
;
352 kern_return_t err
= KERN_SUCCESS
;
354 AUDIT_MACH_SYSCALL_ENTER(AUE_PIDFORTASK
);
355 AUDIT_ARG(mach_port1
, t
);
357 t1
= port_name_to_task(t
);
359 if (t1
== TASK_NULL
) {
363 p
= get_bsdtask_info(t1
);
374 (void) copyout((char *) &pid
, pid_addr
, sizeof(int));
375 AUDIT_MACH_SYSCALL_EXIT(err
);
381 * tfp_policy = KERN_TFP_POLICY_DENY; Deny Mode: None allowed except for self
382 * tfp_policy = KERN_TFP_POLICY_DEFAULT; default mode: all posix checks and upcall via task port for authentication
385 static int tfp_policy
= KERN_TFP_POLICY_DEFAULT
;
388 * Routine: task_for_pid_posix_check
390 * Verify that the current process should be allowed to
391 * get the target process's task port. This is only
393 * - The current process is root
394 * OR all of the following are true:
395 * - The target process's real, effective, and saved uids
396 * are the same as the current proc's euid,
397 * - The target process's group set is a subset of the
398 * calling process's group set, and
399 * - The target process hasn't switched credentials.
401 * Returns: TRUE: permitted
405 task_for_pid_posix_check(proc_t target
)
407 kauth_cred_t targetcred
, mycred
;
411 /* No task_for_pid on bad targets */
412 if (target
== PROC_NULL
|| target
->p_stat
== SZOMB
) {
416 mycred
= kauth_cred_get();
417 myuid
= kauth_cred_getuid(mycred
);
419 /* If we're running as root, the check passes */
420 if (kauth_cred_issuser(mycred
))
423 /* We're allowed to get our own task port */
424 if (target
== current_proc())
428 * Under DENY, only root can get another proc's task port,
429 * so no more checks are needed.
431 if (tfp_policy
== KERN_TFP_POLICY_DENY
) {
435 targetcred
= kauth_cred_proc_ref(target
);
438 /* Do target's ruid, euid, and saved uid match my euid? */
439 if ((kauth_cred_getuid(targetcred
) != myuid
) ||
440 (targetcred
->cr_ruid
!= myuid
) ||
441 (targetcred
->cr_svuid
!= myuid
)) {
446 /* Are target's groups a subset of my groups? */
447 if (kauth_cred_gid_subset(targetcred
, mycred
, &allowed
) ||
453 /* Has target switched credentials? */
454 if (target
->p_flag
& P_SUGID
) {
460 kauth_cred_unref(&targetcred
);
465 * Routine: task_for_pid
467 * Get the task port for another "process", named by its
468 * process ID on the same host as "target_task".
470 * Only permitted to privileged processes, or processes
471 * with the same user ID.
473 * XXX This should be a BSD system call, not a Mach trap!!!
477 struct task_for_pid_args
*args
)
479 mach_port_name_t target_tport
= args
->target_tport
;
481 user_addr_t task_addr
= args
->t
;
482 struct uthread
*uthread
;
483 proc_t p
= PROC_NULL
;
484 task_t t1
= TASK_NULL
;
485 mach_port_name_t tret
= MACH_PORT_NULL
;
490 AUDIT_MACH_SYSCALL_ENTER(AUE_TASKFORPID
);
492 AUDIT_ARG(mach_port1
, target_tport
);
494 #if defined(SECURE_KERNEL)
496 (void ) copyout((char *)&t1
, task_addr
, sizeof(mach_port_name_t
));
497 AUDIT_MACH_SYSCALL_EXIT(KERN_FAILURE
);
498 return(KERN_FAILURE
);
502 t1
= port_name_to_task(target_tport
);
503 if (t1
== TASK_NULL
) {
504 (void) copyout((char *)&t1
, task_addr
, sizeof(mach_port_name_t
));
505 AUDIT_MACH_SYSCALL_EXIT(KERN_FAILURE
);
506 return(KERN_FAILURE
);
511 * Delayed binding of thread credential to process credential, if we
512 * are not running with an explicitly set thread credential.
514 uthread
= get_bsdthread_info(current_thread());
515 kauth_cred_uthread_update(uthread
, current_proc());
518 AUDIT_ARG(process
, p
);
520 if (!(task_for_pid_posix_check(p
))) {
521 error
= KERN_FAILURE
;
525 if (p
->task
!= TASK_NULL
) {
526 /* If we aren't root and target's task access port is set... */
527 if (!kauth_cred_issuser(kauth_cred_get()) &&
528 (task_get_task_access_port(p
->task
, &tfpport
) == 0) &&
529 (tfpport
!= IPC_PORT_NULL
)) {
531 if (tfpport
== IPC_PORT_DEAD
) {
532 error
= KERN_PROTECTION_FAILURE
;
536 /* Call up to the task access server */
537 error
= check_task_access(tfpport
, proc_selfpid(), kauth_getgid(), pid
);
539 if (error
!= MACH_MSG_SUCCESS
) {
540 if (error
== MACH_RCV_INTERRUPTED
)
541 error
= KERN_ABORTED
;
543 error
= KERN_FAILURE
;
548 error
= mac_proc_check_get_task(kauth_cred_get(), p
);
550 error
= KERN_FAILURE
;
555 /* Grant task port access */
556 task_reference(p
->task
);
557 sright
= (void *) convert_task_to_port(p
->task
);
558 tret
= ipc_port_copyout_send(
560 get_task_ipcspace(current_task()));
562 error
= KERN_SUCCESS
;
566 AUDIT_ARG(mach_port2
, tret
);
567 (void) copyout((char *) &tret
, task_addr
, sizeof(mach_port_name_t
));
570 AUDIT_MACH_SYSCALL_EXIT(error
);
575 * Routine: task_name_for_pid
577 * Get the task name port for another "process", named by its
578 * process ID on the same host as "target_task".
580 * Only permitted to privileged processes, or processes
581 * with the same user ID.
583 * XXX This should be a BSD system call, not a Mach trap!!!
588 struct task_name_for_pid_args
*args
)
590 mach_port_name_t target_tport
= args
->target_tport
;
592 user_addr_t task_addr
= args
->t
;
593 struct uthread
*uthread
;
594 proc_t p
= PROC_NULL
;
596 mach_port_name_t tret
;
598 int error
= 0, refheld
= 0;
599 kauth_cred_t target_cred
;
601 AUDIT_MACH_SYSCALL_ENTER(AUE_TASKNAMEFORPID
);
603 AUDIT_ARG(mach_port1
, target_tport
);
605 t1
= port_name_to_task(target_tport
);
606 if (t1
== TASK_NULL
) {
607 (void) copyout((char *)&t1
, task_addr
, sizeof(mach_port_name_t
));
608 AUDIT_MACH_SYSCALL_EXIT(KERN_FAILURE
);
609 return(KERN_FAILURE
);
614 * Delayed binding of thread credential to process credential, if we
615 * are not running with an explicitly set thread credential.
617 uthread
= get_bsdthread_info(current_thread());
618 kauth_cred_uthread_update(uthread
, current_proc());
621 AUDIT_ARG(process
, p
);
622 if (p
!= PROC_NULL
) {
623 target_cred
= kauth_cred_proc_ref(p
);
626 if ((p
->p_stat
!= SZOMB
)
627 && ((current_proc() == p
)
628 || kauth_cred_issuser(kauth_cred_get())
629 || ((kauth_cred_getuid(target_cred
) == kauth_cred_getuid(kauth_cred_get())) &&
630 ((target_cred
->cr_ruid
== kauth_cred_get()->cr_ruid
))))) {
632 if (p
->task
!= TASK_NULL
) {
633 task_reference(p
->task
);
635 error
= mac_proc_check_get_task_name(kauth_cred_get(), p
);
637 task_deallocate(p
->task
);
641 sright
= (void *)convert_task_name_to_port(p
->task
);
642 tret
= ipc_port_copyout_send(sright
,
643 get_task_ipcspace(current_task()));
645 tret
= MACH_PORT_NULL
;
647 AUDIT_ARG(mach_port2
, tret
);
648 (void) copyout((char *)&tret
, task_addr
, sizeof(mach_port_name_t
));
650 error
= KERN_SUCCESS
;
659 tret
= MACH_PORT_NULL
;
660 (void) copyout((char *) &tret
, task_addr
, sizeof(mach_port_name_t
));
661 error
= KERN_FAILURE
;
664 kauth_cred_unref(&target_cred
);
667 AUDIT_MACH_SYSCALL_EXIT(error
);
672 sysctl_settfp_policy(__unused
struct sysctl_oid
*oidp
, void *arg1
,
673 __unused
int arg2
, struct sysctl_req
*req
)
678 error
= SYSCTL_OUT(req
, arg1
, sizeof(int));
679 if (error
|| req
->newptr
== USER_ADDR_NULL
)
685 if ((error
= SYSCTL_IN(req
, &new_value
, sizeof(int)))) {
688 if ((new_value
== KERN_TFP_POLICY_DENY
)
689 || (new_value
== KERN_TFP_POLICY_DEFAULT
))
690 tfp_policy
= new_value
;
698 #if defined(SECURE_KERNEL)
699 static int kern_secure_kernel
= 1;
701 static int kern_secure_kernel
= 0;
704 SYSCTL_INT(_kern
, OID_AUTO
, secure_kernel
, CTLFLAG_RD
, &kern_secure_kernel
, 0, "");
706 SYSCTL_NODE(_kern
, KERN_TFP
, tfp
, CTLFLAG_RW
|CTLFLAG_LOCKED
, 0, "tfp");
707 SYSCTL_PROC(_kern_tfp
, KERN_TFP_POLICY
, policy
, CTLTYPE_INT
| CTLFLAG_RW
,
708 &tfp_policy
, sizeof(uint32_t), &sysctl_settfp_policy
,"I","policy");
710 SYSCTL_INT(_vm
, OID_AUTO
, shared_region_trace_level
, CTLFLAG_RW
,
711 &shared_region_trace_level
, 0, "");
712 SYSCTL_INT(_vm
, OID_AUTO
, shared_region_version
, CTLFLAG_RD
,
713 &shared_region_version
, 0, "");
714 SYSCTL_INT(_vm
, OID_AUTO
, shared_region_persistence
, CTLFLAG_RW
,
715 &shared_region_persistence
, 0, "");
718 * shared_region_check_np:
720 * This system call is intended for dyld.
722 * dyld calls this when any process starts to see if the process's shared
723 * region is already set up and ready to use.
724 * This call returns the base address of the first mapping in the
725 * process's shared region's first mapping.
726 * dyld will then check what's mapped at that address.
728 * If the shared region is empty, dyld will then attempt to map the shared
729 * cache file in the shared region via the shared_region_map_np() system call.
731 * If something's already mapped in the shared region, dyld will check if it
732 * matches the shared cache it would like to use for that process.
733 * If it matches, evrything's ready and the process can proceed and use the
735 * If it doesn't match, dyld will unmap the shared region and map the shared
736 * cache into the process's address space via mmap().
739 * EINVAL no shared region
740 * ENOMEM shared region is empty
741 * EFAULT bad address for "start_address"
744 shared_region_check_np(
745 __unused
struct proc
*p
,
746 struct shared_region_check_np_args
*uap
,
747 __unused
int *retvalp
)
749 vm_shared_region_t shared_region
;
750 mach_vm_offset_t start_address
;
754 SHARED_REGION_TRACE_DEBUG(
755 ("shared_region: %p [%d(%s)] -> check_np(0x%llx)\n",
756 current_thread(), p
->p_pid
, p
->p_comm
,
757 (uint64_t)uap
->start_address
));
759 /* retrieve the current tasks's shared region */
760 shared_region
= vm_shared_region_get(current_task());
761 if (shared_region
!= NULL
) {
762 /* retrieve address of its first mapping... */
763 kr
= vm_shared_region_start_address(shared_region
,
765 if (kr
!= KERN_SUCCESS
) {
768 /* ... and give it to the caller */
769 error
= copyout(&start_address
,
770 (user_addr_t
) uap
->start_address
,
771 sizeof (start_address
));
773 SHARED_REGION_TRACE_ERROR(
774 ("shared_region: %p [%d(%s)] "
776 "copyout(0x%llx) error %d\n",
777 current_thread(), p
->p_pid
, p
->p_comm
,
778 (uint64_t)uap
->start_address
, (uint64_t)start_address
,
782 vm_shared_region_deallocate(shared_region
);
784 /* no shared region ! */
788 SHARED_REGION_TRACE_DEBUG(
789 ("shared_region: %p [%d(%s)] check_np(0x%llx) <- 0x%llx %d\n",
790 current_thread(), p
->p_pid
, p
->p_comm
,
791 (uint64_t)uap
->start_address
, (uint64_t)start_address
, error
));
797 * shared_region_map_np()
799 * This system call is intended for dyld.
801 * dyld uses this to map a shared cache file into a shared region.
802 * This is usually done only the first time a shared cache is needed.
803 * Subsequent processes will just use the populated shared region without
804 * requiring any further setup.
807 shared_region_map_np(
809 struct shared_region_map_np_args
*uap
,
810 __unused
int *retvalp
)
816 struct vnode
*vp
, *root_vp
;
817 struct vnode_attr va
;
819 memory_object_size_t file_size
;
820 user_addr_t user_mappings
;
821 struct shared_file_mapping_np
*mappings
;
822 #define SFM_MAX_STACK 4
823 struct shared_file_mapping_np stack_mappings
[SFM_MAX_STACK
];
824 unsigned int mappings_count
;
825 vm_size_t mappings_size
;
826 memory_object_control_t file_control
;
827 struct vm_shared_region
*shared_region
;
829 SHARED_REGION_TRACE_DEBUG(
830 ("shared_region: %p [%d(%s)] -> map\n",
831 current_thread(), p
->p_pid
, p
->p_comm
));
833 shared_region
= NULL
;
840 /* get file descriptor for shared region cache file */
843 /* get file structure from file descriptor */
844 error
= fp_lookup(p
, fd
, &fp
, 0);
846 SHARED_REGION_TRACE_ERROR(
847 ("shared_region: %p [%d(%s)] map: "
848 "fd=%d lookup failed (error=%d)\n",
849 current_thread(), p
->p_pid
, p
->p_comm
, fd
, error
));
853 /* make sure we're attempting to map a vnode */
854 if (fp
->f_fglob
->fg_type
!= DTYPE_VNODE
) {
855 SHARED_REGION_TRACE_ERROR(
856 ("shared_region: %p [%d(%s)] map: "
857 "fd=%d not a vnode (type=%d)\n",
858 current_thread(), p
->p_pid
, p
->p_comm
,
859 fd
, fp
->f_fglob
->fg_type
));
864 /* we need at least read permission on the file */
865 if (! (fp
->f_fglob
->fg_flag
& FREAD
)) {
866 SHARED_REGION_TRACE_ERROR(
867 ("shared_region: %p [%d(%s)] map: "
868 "fd=%d not readable\n",
869 current_thread(), p
->p_pid
, p
->p_comm
, fd
));
874 /* get vnode from file structure */
875 error
= vnode_getwithref((vnode_t
) fp
->f_fglob
->fg_data
);
877 SHARED_REGION_TRACE_ERROR(
878 ("shared_region: %p [%d(%s)] map: "
879 "fd=%d getwithref failed (error=%d)\n",
880 current_thread(), p
->p_pid
, p
->p_comm
, fd
, error
));
883 vp
= (struct vnode
*) fp
->f_fglob
->fg_data
;
885 /* make sure the vnode is a regular file */
886 if (vp
->v_type
!= VREG
) {
887 SHARED_REGION_TRACE_ERROR(
888 ("shared_region: %p [%d(%s)] map(%p:'%s'): "
889 "not a file (type=%d)\n",
890 current_thread(), p
->p_pid
, p
->p_comm
,
891 vp
, vp
->v_name
, vp
->v_type
));
896 /* make sure vnode is on the process's root volume */
897 root_vp
= p
->p_fd
->fd_rdir
;
898 if (root_vp
== NULL
) {
901 if (vp
->v_mount
!= root_vp
->v_mount
) {
902 SHARED_REGION_TRACE_ERROR(
903 ("shared_region: %p [%d(%s)] map(%p:'%s'): "
904 "not on process's root volume\n",
905 current_thread(), p
->p_pid
, p
->p_comm
,
911 /* make sure vnode is owned by "root" */
913 VATTR_WANTED(&va
, va_uid
);
914 error
= vnode_getattr(vp
, &va
, vfs_context_current());
916 SHARED_REGION_TRACE_ERROR(
917 ("shared_region: %p [%d(%s)] map(%p:'%s'): "
918 "vnode_getattr(%p) failed (error=%d)\n",
919 current_thread(), p
->p_pid
, p
->p_comm
,
920 vp
, vp
->v_name
, vp
, error
));
923 if (va
.va_uid
!= 0) {
924 SHARED_REGION_TRACE_ERROR(
925 ("shared_region: %p [%d(%s)] map(%p:'%s'): "
926 "owned by uid=%d instead of 0\n",
927 current_thread(), p
->p_pid
, p
->p_comm
,
928 vp
, vp
->v_name
, va
.va_uid
));
934 error
= vnode_size(vp
, &fs
, vfs_context_current());
936 SHARED_REGION_TRACE_ERROR(
937 ("shared_region: %p [%d(%s)] map(%p:'%s'): "
938 "vnode_size(%p) failed (error=%d)\n",
939 current_thread(), p
->p_pid
, p
->p_comm
,
940 vp
, vp
->v_name
, vp
, error
));
945 /* get the file's memory object handle */
946 file_control
= ubc_getobject(vp
, UBC_HOLDOBJECT
);
947 if (file_control
== MEMORY_OBJECT_CONTROL_NULL
) {
948 SHARED_REGION_TRACE_ERROR(
949 ("shared_region: %p [%d(%s)] map(%p:'%s'): "
950 "no memory object\n",
951 current_thread(), p
->p_pid
, p
->p_comm
,
957 /* get the list of mappings the caller wants us to establish */
958 mappings_count
= uap
->count
; /* number of mappings */
959 mappings_size
= (vm_size_t
) (mappings_count
* sizeof (mappings
[0]));
960 if (mappings_count
== 0) {
961 SHARED_REGION_TRACE_INFO(
962 ("shared_region: %p [%d(%s)] map(%p:'%s'): "
964 current_thread(), p
->p_pid
, p
->p_comm
,
966 error
= 0; /* no mappings: we're done ! */
968 } else if (mappings_count
<= SFM_MAX_STACK
) {
969 mappings
= &stack_mappings
[0];
971 SHARED_REGION_TRACE_ERROR(
972 ("shared_region: %p [%d(%s)] map(%p:'%s'): "
973 "too many mappings (%d)\n",
974 current_thread(), p
->p_pid
, p
->p_comm
,
975 vp
, vp
->v_name
, mappings_count
));
980 user_mappings
= uap
->mappings
; /* the mappings, in user space */
981 error
= copyin(user_mappings
,
985 SHARED_REGION_TRACE_ERROR(
986 ("shared_region: %p [%d(%s)] map(%p:'%s'): "
987 "copyin(0x%llx, %d) failed (error=%d)\n",
988 current_thread(), p
->p_pid
, p
->p_comm
,
989 vp
, vp
->v_name
, (uint64_t)user_mappings
, mappings_count
, error
));
993 /* get the process's shared region (setup in vm_map_exec()) */
994 shared_region
= vm_shared_region_get(current_task());
995 if (shared_region
== NULL
) {
996 SHARED_REGION_TRACE_ERROR(
997 ("shared_region: %p [%d(%s)] map(%p:'%s'): "
998 "no shared region\n",
999 current_thread(), p
->p_pid
, p
->p_comm
,
1004 /* map the file into that shared region's submap */
1005 kr
= vm_shared_region_map_file(shared_region
,
1010 (void *) p
->p_fd
->fd_rdir
);
1011 if (kr
!= KERN_SUCCESS
) {
1012 SHARED_REGION_TRACE_ERROR(
1013 ("shared_region: %p [%d(%s)] map(%p:'%s'): "
1014 "vm_shared_region_map_file() failed kr=0x%x\n",
1015 current_thread(), p
->p_pid
, p
->p_comm
,
1016 vp
, vp
->v_name
, kr
));
1018 case KERN_INVALID_ADDRESS
:
1021 case KERN_PROTECTION_FAILURE
:
1028 case KERN_INVALID_ARGUMENT
:
1037 * The mapping was successful. Let the buffer cache know
1038 * that we've mapped that file with these protections. This
1039 * prevents the vnode from getting recycled while it's mapped.
1041 (void) ubc_map(vp
, VM_PROT_READ
);
1044 /* update the vnode's access time */
1045 if (! (vnode_vfsvisflags(vp
) & MNT_NOATIME
)) {
1047 nanotime(&va
.va_access_time
);
1048 VATTR_SET_ACTIVE(&va
, va_access_time
);
1049 vnode_setattr(vp
, &va
, vfs_context_current());
1052 if (p
->p_flag
& P_NOSHLIB
) {
1053 /* signal that this process is now using split libraries */
1054 OSBitAndAtomic(~((uint32_t)P_NOSHLIB
), (UInt32
*)&p
->p_flag
);
1060 * release the vnode...
1061 * ubc_map() still holds it for us in the non-error case
1063 (void) vnode_put(vp
);
1067 /* release the file descriptor */
1068 fp_drop(p
, fd
, fp
, 0);
1072 if (shared_region
!= NULL
) {
1073 vm_shared_region_deallocate(shared_region
);
1076 SHARED_REGION_TRACE_DEBUG(
1077 ("shared_region: %p [%d(%s)] <- map\n",
1078 current_thread(), p
->p_pid
, p
->p_comm
));
1084 /* sysctl overflow room */
1086 /* vm_page_free_target is provided as a makeshift solution for applications that want to
1087 allocate buffer space, possibly purgeable memory, but not cause inactive pages to be
1088 reclaimed. It allows the app to calculate how much memory is free outside the free target. */
1089 extern unsigned int vm_page_free_target
;
1090 SYSCTL_INT(_vm
, OID_AUTO
, vm_page_free_target
, CTLFLAG_RD
,
1091 &vm_page_free_target
, 0, "Pageout daemon free target");