2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
23 * Mach Operating System
24 * Copyright (c) 1987 Carnegie-Mellon University
25 * All rights reserved. The CMU software License Agreement specifies
26 * the terms and conditions for use and redistribution.
33 #include <meta_features.h>
35 #include <kern/task.h>
36 #include <kern/thread.h>
37 #include <kern/debug.h>
38 #include <kern/lock.h>
39 #include <mach/mach_traps.h>
40 #include <mach/time_value.h>
41 #include <mach/vm_map.h>
42 #include <mach/vm_param.h>
43 #include <mach/vm_prot.h>
44 #include <mach/port.h>
46 #include <sys/file_internal.h>
47 #include <sys/param.h>
48 #include <sys/systm.h>
50 #include <sys/namei.h>
51 #include <sys/proc_internal.h>
52 #include <sys/kauth.h>
55 #include <sys/vnode_internal.h>
56 #include <sys/mount.h>
57 #include <sys/trace.h>
58 #include <sys/kernel.h>
59 #include <sys/ubc_internal.h>
61 #include <sys/syslog.h>
63 #include <sys/sysproto.h>
65 #include <sys/sysctl.h>
67 #include <bsm/audit_kernel.h>
68 #include <bsm/audit_kevents.h>
70 #include <kern/kalloc.h>
71 #include <vm/vm_map.h>
72 #include <vm/vm_kern.h>
74 #include <machine/spl.h>
76 #include <mach/shared_memory_server.h>
77 #include <vm/vm_shared_memory_server.h>
79 #include <vm/vm_protos.h>
82 log_nx_failure(addr64_t vaddr
, vm_prot_t prot
)
84 printf("NX failure: %s - vaddr=%qx, prot=%x\n", current_proc()->p_comm
, vaddr
, prot
);
94 return (vm_map_check_protection(
96 vm_map_trunc_page(addr
), vm_map_round_page(addr
+len
),
97 prot
== B_READ
? VM_PROT_READ
: VM_PROT_WRITE
));
106 kret
= vm_map_wire(current_map(), vm_map_trunc_page(addr
),
107 vm_map_round_page(addr
+len
),
108 VM_PROT_READ
| VM_PROT_WRITE
,FALSE
);
113 case KERN_INVALID_ADDRESS
:
116 case KERN_PROTECTION_FAILURE
:
127 __unused
int dirtied
)
132 vm_map_offset_t vaddr
;
139 pmap
= get_task_pmap(current_task());
140 for (vaddr
= vm_map_trunc_page(addr
);
141 vaddr
< vm_map_round_page(addr
+len
);
142 vaddr
+= PAGE_SIZE
) {
143 paddr
= pmap_extract(pmap
, vaddr
);
144 pg
= PHYS_TO_VM_PAGE(paddr
);
145 vm_page_set_modified(pg
);
152 kret
= vm_map_unwire(current_map(), vm_map_trunc_page(addr
),
153 vm_map_round_page(addr
+len
), FALSE
);
157 case KERN_INVALID_ADDRESS
:
160 case KERN_PROTECTION_FAILURE
:
174 character
= (char)byte
;
175 return (copyout((void *)&(character
), addr
, sizeof(char)) == 0 ? 0 : -1);
185 character
= (char)byte
;
186 return (copyout((void *)&(character
), addr
, sizeof(char)) == 0 ? 0 : -1);
189 int fubyte(user_addr_t addr
)
193 if (copyin(addr
, (void *) &byte
, sizeof(char)))
198 int fuibyte(user_addr_t addr
)
202 if (copyin(addr
, (void *) &(byte
), sizeof(char)))
212 return (copyout((void *) &word
, addr
, sizeof(int)) == 0 ? 0 : -1);
215 long fuword(user_addr_t addr
)
219 if (copyin(addr
, (void *) &word
, sizeof(int)))
224 /* suiword and fuiword are the same as suword and fuword, respectively */
231 return (copyout((void *) &word
, addr
, sizeof(int)) == 0 ? 0 : -1);
234 long fuiword(user_addr_t addr
)
238 if (copyin(addr
, (void *) &word
, sizeof(int)))
244 * With a 32-bit kernel and mixed 32/64-bit user tasks, this interface allows the
245 * fetching and setting of process-sized size_t and pointer values.
248 sulong(user_addr_t addr
, int64_t word
)
251 if (IS_64BIT_PROCESS(current_proc())) {
252 return(copyout((void *)&word
, addr
, sizeof(word
)) == 0 ? 0 : -1);
254 return(suiword(addr
, (long)word
));
259 fulong(user_addr_t addr
)
263 if (IS_64BIT_PROCESS(current_proc())) {
264 if (copyin(addr
, (void *)&longword
, sizeof(longword
)) != 0)
268 return((int64_t)fuiword(addr
));
273 suulong(user_addr_t addr
, uint64_t uword
)
276 if (IS_64BIT_PROCESS(current_proc())) {
277 return(copyout((void *)&uword
, addr
, sizeof(uword
)) == 0 ? 0 : -1);
279 return(suiword(addr
, (u_long
)uword
));
284 fuulong(user_addr_t addr
)
288 if (IS_64BIT_PROCESS(current_proc())) {
289 if (copyin(addr
, (void *)&ulongword
, sizeof(ulongword
)) != 0)
293 return((uint64_t)fuiword(addr
));
298 swapon(__unused
struct proc
*procp
, __unused
struct swapon_args
*uap
, __unused
int *retval
)
306 struct pid_for_task_args
*args
)
308 mach_port_name_t t
= args
->t
;
309 user_addr_t pid_addr
= args
->pid
;
313 kern_return_t err
= KERN_SUCCESS
;
314 boolean_t funnel_state
;
316 AUDIT_MACH_SYSCALL_ENTER(AUE_PIDFORTASK
);
317 AUDIT_ARG(mach_port1
, t
);
319 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
320 t1
= port_name_to_task(t
);
322 if (t1
== TASK_NULL
) {
326 p
= get_bsdtask_info(t1
);
337 (void) copyout((char *) &pid
, pid_addr
, sizeof(int));
338 thread_funnel_set(kernel_flock
, funnel_state
);
339 AUDIT_MACH_SYSCALL_EXIT(err
);
344 * Routine: task_for_pid
346 * Get the task port for another "process", named by its
347 * process ID on the same host as "target_task".
349 * Only permitted to privileged processes, or processes
350 * with the same user ID.
352 * XXX This should be a BSD system call, not a Mach trap!!!
356 * tfp_policy = KERN_TFP_POLICY_DENY; Deny Mode: None allowed except for self
357 * tfp_policy = KERN_TFP_POLICY_PERMISSIVE; Permissive Mode: all permissive; related ones allowed or privileged
358 * tfp_policy = KERN_TFP_POLICY_RESTRICTED; Restricted Mode: self access allowed; setgid (to tfp_group) are allowed for other tasks
361 static int tfp_policy
= KERN_TFP_POLICY_RESTRICTED
;
362 /* the groutp is inited to kmem group and is modifiable by sysctl */
363 static int tfp_group_inited
= 0; /* policy groups are loaded ... */
364 static gid_t tfp_group_ronly
= 0; /* procview group */
365 static gid_t tfp_group_rw
= 0; /* procmod group */
369 struct task_for_pid_args
*args
)
371 mach_port_name_t target_tport
= args
->target_tport
;
373 user_addr_t task_addr
= args
->t
;
374 struct uthread
*uthread
;
378 mach_port_name_t tret
;
382 boolean_t funnel_state
;
383 boolean_t ispermitted
= FALSE
;
384 char procname
[MAXCOMLEN
+1];
386 AUDIT_MACH_SYSCALL_ENTER(AUE_TASKFORPID
);
388 AUDIT_ARG(mach_port1
, target_tport
);
390 t1
= port_name_to_task(target_tport
);
391 if (t1
== TASK_NULL
) {
392 (void ) copyout((char *)&t1
, task_addr
, sizeof(mach_port_name_t
));
393 AUDIT_MACH_SYSCALL_EXIT(KERN_FAILURE
);
394 return(KERN_FAILURE
);
397 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
402 * Delayed binding of thread credential to process credential, if we
403 * are not running with an explicitly set thread credential.
405 uthread
= get_bsdthread_info(current_thread());
406 if (uthread
->uu_ucred
!= p1
->p_ucred
&&
407 (uthread
->uu_flag
& UT_SETUID
) == 0) {
408 kauth_cred_t old
= uthread
->uu_ucred
;
410 uthread
->uu_ucred
= p1
->p_ucred
;
411 kauth_cred_ref(uthread
->uu_ucred
);
414 kauth_cred_rele(old
);
418 AUDIT_ARG(process
, p
);
420 switch (tfp_policy
) {
422 case KERN_TFP_POLICY_PERMISSIVE
:
423 /* self or suser or related ones */
424 if ((p
!= (struct proc
*) 0)
425 && (p1
!= (struct proc
*) 0)
428 || !(suser(kauth_cred_get(), 0))
429 || ((kauth_cred_getuid(p
->p_ucred
) == kauth_cred_getuid(kauth_cred_get())) &&
430 ((p
->p_ucred
->cr_ruid
== kauth_cred_get()->cr_ruid
))
431 && ((p
->p_flag
& P_SUGID
) == 0))
433 && (p
->p_stat
!= SZOMB
)
438 case KERN_TFP_POLICY_RESTRICTED
:
439 /* self or suser or setgid and related ones only */
440 if ((p
!= (struct proc
*) 0)
441 && (p1
!= (struct proc
*) 0)
444 || !(suser(kauth_cred_get(), 0))
445 || (((tfp_group_inited
!= 0) &&
447 ((kauth_cred_ismember_gid(kauth_cred_get(),
448 tfp_group_ronly
, &is_member
) == 0) && is_member
)
449 ||((kauth_cred_ismember_gid(kauth_cred_get(),
450 tfp_group_rw
, &is_member
) == 0) && is_member
)
453 && ((kauth_cred_getuid(p
->p_ucred
) == kauth_cred_getuid(kauth_cred_get())) &&
454 ((p
->p_ucred
->cr_ruid
== kauth_cred_get()->cr_ruid
))
455 && ((p
->p_flag
& P_SUGID
) == 0))
458 && (p
->p_stat
!= SZOMB
)
464 case KERN_TFP_POLICY_DENY
:
465 /* self or suser only */
467 /* do not return task port of other task at all */
468 if ((p1
!= (struct proc
*) 0) && (p
!= (struct proc
*) 0) && (p
->p_stat
!= SZOMB
)
469 && ((p1
== p
) || !(suser(kauth_cred_get(), 0))))
477 if (ispermitted
== TRUE
) {
478 if (p
->task
!= TASK_NULL
) {
479 task_reference(p
->task
);
480 sright
= (void *)convert_task_to_port(p
->task
);
481 tret
= ipc_port_copyout_send(
483 get_task_ipcspace(current_task()));
485 tret
= MACH_PORT_NULL
;
486 AUDIT_ARG(mach_port2
, tret
);
487 (void ) copyout((char *)&tret
, task_addr
, sizeof(mach_port_name_t
));
489 error
= KERN_SUCCESS
;
493 * There is no guarantee that p_comm is null terminated and
494 * kernel implementation of string functions are complete. So
495 * ensure stale info is not leaked out, bzero the buffer
497 bzero(&procname
[0], MAXCOMLEN
+1);
498 strncpy(&procname
[0], &p1
->p_comm
[0], MAXCOMLEN
);
499 if (tfp_policy
!= KERN_TFP_POLICY_PERMISSIVE
)
500 log(LOG_NOTICE
, "(%d: %s)tfp: failed on %d:\n",
501 ((p1
!= PROC_NULL
)?(p1
->p_pid
):0), &procname
[0],
502 ((p
!= PROC_NULL
)?(p
->p_pid
):0));
506 tret
= MACH_PORT_NULL
;
507 (void) copyout((char *) &tret
, task_addr
, sizeof(mach_port_name_t
));
508 error
= KERN_FAILURE
;
510 thread_funnel_set(kernel_flock
, funnel_state
);
511 AUDIT_MACH_SYSCALL_EXIT(error
);
516 * Routine: task_name_for_pid
518 * Get the task name port for another "process", named by its
519 * process ID on the same host as "target_task".
521 * Only permitted to privileged processes, or processes
522 * with the same user ID.
524 * XXX This should be a BSD system call, not a Mach trap!!!
529 struct task_name_for_pid_args
*args
)
531 mach_port_name_t target_tport
= args
->target_tport
;
533 user_addr_t task_addr
= args
->t
;
534 struct uthread
*uthread
;
538 mach_port_name_t tret
;
541 boolean_t funnel_state
;
543 AUDIT_MACH_SYSCALL_ENTER(AUE_TASKNAMEFORPID
);
545 AUDIT_ARG(mach_port1
, target_tport
);
547 t1
= port_name_to_task(target_tport
);
548 if (t1
== TASK_NULL
) {
549 (void ) copyout((char *)&t1
, task_addr
, sizeof(mach_port_name_t
));
550 AUDIT_MACH_SYSCALL_EXIT(KERN_FAILURE
);
551 return(KERN_FAILURE
);
554 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
559 * Delayed binding of thread credential to process credential, if we
560 * are not running with an explicitly set thread credential.
562 uthread
= get_bsdthread_info(current_thread());
563 if (uthread
->uu_ucred
!= p1
->p_ucred
&&
564 (uthread
->uu_flag
& UT_SETUID
) == 0) {
565 kauth_cred_t old
= uthread
->uu_ucred
;
567 uthread
->uu_ucred
= p1
->p_ucred
;
568 kauth_cred_ref(uthread
->uu_ucred
);
571 kauth_cred_rele(old
);
575 AUDIT_ARG(process
, p
);
577 if ((p
!= (struct proc
*) 0)
578 && (p
->p_stat
!= SZOMB
)
579 && (p1
!= (struct proc
*) 0)
581 || !(suser(kauth_cred_get(), 0))
582 || ((kauth_cred_getuid(p
->p_ucred
) == kauth_cred_getuid(kauth_cred_get())) &&
583 ((p
->p_ucred
->cr_ruid
== kauth_cred_get()->cr_ruid
)))))
585 if (p
->task
!= TASK_NULL
)
587 task_reference(p
->task
);
588 sright
= (void *)convert_task_name_to_port(p
->task
);
589 tret
= ipc_port_copyout_send(
591 get_task_ipcspace(current_task()));
593 tret
= MACH_PORT_NULL
;
594 AUDIT_ARG(mach_port2
, tret
);
595 (void ) copyout((char *)&tret
, task_addr
, sizeof(mach_port_name_t
));
597 error
= KERN_SUCCESS
;
602 tret
= MACH_PORT_NULL
;
603 (void) copyout((char *) &tret
, task_addr
, sizeof(mach_port_name_t
));
604 error
= KERN_FAILURE
;
606 thread_funnel_set(kernel_flock
, funnel_state
);
607 AUDIT_MACH_SYSCALL_EXIT(error
);
612 sysctl_settfp_policy(__unused
struct sysctl_oid
*oidp
, void *arg1
,
613 __unused
int arg2
, struct sysctl_req
*req
)
618 error
= SYSCTL_OUT(req
, arg1
, sizeof(int));
619 if (error
|| req
->newptr
== USER_ADDR_NULL
)
625 if ((error
= SYSCTL_IN(req
, &new_value
, sizeof(int)))) {
628 if ((new_value
== KERN_TFP_POLICY_DENY
)
629 || (new_value
== KERN_TFP_POLICY_PERMISSIVE
)
630 || (new_value
== KERN_TFP_POLICY_RESTRICTED
))
631 tfp_policy
= new_value
;
640 sysctl_settfp_groups(__unused
struct sysctl_oid
*oidp
, void *arg1
,
641 __unused
int arg2
, struct sysctl_req
*req
)
646 error
= SYSCTL_OUT(req
, arg1
, sizeof(int));
647 if (error
|| req
->newptr
== USER_ADDR_NULL
)
654 * Once set; cannot be reset till next boot. Launchd will set this
655 * in its pid 1 init and no one can set after that.
657 if (tfp_group_inited
!= 0)
660 if ((error
= SYSCTL_IN(req
, &new_value
, sizeof(int)))) {
664 if (new_value
>= 100)
667 if (arg1
== &tfp_group_ronly
)
668 tfp_group_ronly
= new_value
;
669 else if (arg1
== &tfp_group_rw
)
670 tfp_group_rw
= new_value
;
673 if ((tfp_group_ronly
!= 0 ) && (tfp_group_rw
!= 0 ))
674 tfp_group_inited
= 1;
681 SYSCTL_NODE(_kern
, KERN_TFP
, tfp
, CTLFLAG_RW
, 0, "tfp");
682 SYSCTL_PROC(_kern_tfp
, KERN_TFP_POLICY
, policy
, CTLTYPE_INT
| CTLFLAG_RW
,
683 &tfp_policy
, sizeof(uint32_t), &sysctl_settfp_policy
,"I","policy");
684 SYSCTL_PROC(_kern_tfp
, KERN_TFP_READ_GROUP
, read_group
, CTLTYPE_INT
| CTLFLAG_RW
,
685 &tfp_group_ronly
, sizeof(uint32_t), &sysctl_settfp_groups
,"I","read_group");
686 SYSCTL_PROC(_kern_tfp
, KERN_TFP_RW_GROUP
, rw_group
, CTLTYPE_INT
| CTLFLAG_RW
,
687 &tfp_group_rw
, sizeof(uint32_t), &sysctl_settfp_groups
,"I","rw_group");
690 SYSCTL_INT(_vm
, OID_AUTO
, shared_region_trace_level
, CTLFLAG_RW
, &shared_region_trace_level
, 0, "");
693 * shared_region_make_private_np:
695 * This system call is for "dyld" only.
697 * It creates a private copy of the current process's "shared region" for
698 * split libraries. "dyld" uses this when the shared region is full or
699 * it needs to load a split library that conflicts with an already loaded one
700 * that this process doesn't need. "dyld" specifies a set of address ranges
701 * that it wants to keep in the now-private "shared region". These cover
702 * the set of split libraries that the process needs so far. The kernel needs
703 * to deallocate the rest of the shared region, so that it's available for
704 * more libraries for this process.
707 shared_region_make_private_np(
709 struct shared_region_make_private_np_args
*uap
,
710 __unused
int *retvalp
)
714 boolean_t using_shared_regions
;
715 user_addr_t user_ranges
;
716 unsigned int range_count
;
717 struct shared_region_range_np
*ranges
;
718 shared_region_mapping_t shared_region
;
719 struct shared_region_task_mappings task_mapping_info
;
720 shared_region_mapping_t next
;
724 range_count
= uap
->rangeCount
;
725 user_ranges
= uap
->ranges
;
728 SHARED_REGION_TRACE_INFO
,
729 ("shared_region: %p [%d(%s)] "
730 "make_private(rangecount=%d)\n",
731 current_thread(), p
->p_pid
, p
->p_comm
, range_count
));
733 /* allocate kernel space for the "ranges" */
734 if (range_count
!= 0) {
735 kr
= kmem_alloc(kernel_map
,
736 (vm_offset_t
*) &ranges
,
737 (vm_size_t
) (range_count
* sizeof (ranges
[0])));
738 if (kr
!= KERN_SUCCESS
) {
743 /* copy "ranges" from user-space */
744 error
= copyin(user_ranges
,
746 (range_count
* sizeof (ranges
[0])));
752 if (p
->p_flag
& P_NOSHLIB
) {
753 /* no split library has been mapped for this process so far */
754 using_shared_regions
= FALSE
;
756 /* this process has already mapped some split libraries */
757 using_shared_regions
= TRUE
;
761 * Get a private copy of the current shared region.
762 * Do not chain it to the system-wide shared region, as we'll want
763 * to map other split libraries in place of the old ones. We want
764 * to completely detach from the system-wide shared region and go our
765 * own way after this point, not sharing anything with other processes.
767 error
= clone_system_shared_regions(using_shared_regions
,
768 FALSE
, /* chain_regions */
774 /* get info on the newly allocated shared region */
775 vm_get_shared_region(current_task(), &shared_region
);
776 task_mapping_info
.self
= (vm_offset_t
) shared_region
;
777 shared_region_mapping_info(shared_region
,
778 &(task_mapping_info
.text_region
),
779 &(task_mapping_info
.text_size
),
780 &(task_mapping_info
.data_region
),
781 &(task_mapping_info
.data_size
),
782 &(task_mapping_info
.region_mappings
),
783 &(task_mapping_info
.client_base
),
784 &(task_mapping_info
.alternate_base
),
785 &(task_mapping_info
.alternate_next
),
786 &(task_mapping_info
.fs_base
),
787 &(task_mapping_info
.system
),
788 &(task_mapping_info
.flags
),
792 * We now have our private copy of the shared region, as it was before
793 * the call to clone_system_shared_regions(). We now need to clean it
794 * up and keep only the memory areas described by the "ranges" array.
796 kr
= shared_region_cleanup(range_count
, ranges
, &task_mapping_info
);
807 if (ranges
!= NULL
) {
808 kmem_free(kernel_map
,
809 (vm_offset_t
) ranges
,
810 range_count
* sizeof (ranges
[0]));
815 SHARED_REGION_TRACE_INFO
,
816 ("shared_region: %p [%d(%s)] "
817 "make_private(rangecount=%d) -> %d "
818 "shared_region=%p[%x,%x,%x]\n",
819 current_thread(), p
->p_pid
, p
->p_comm
,
820 range_count
, error
, shared_region
,
821 task_mapping_info
.fs_base
,
822 task_mapping_info
.system
,
823 task_mapping_info
.flags
));
830 * shared_region_map_file_np:
832 * This system call is for "dyld" only.
834 * "dyld" wants to map parts of a split library in the shared region.
835 * We get a file descriptor on the split library to be mapped and a set
836 * of mapping instructions, describing which parts of the file to map in\
837 * which areas of the shared segment and with what protection.
838 * The "shared region" is split in 2 areas:
839 * 0x90000000 - 0xa0000000 : read-only area (for TEXT and LINKEDIT sections),
840 * 0xa0000000 - 0xb0000000 : writable area (for DATA sections).
844 shared_region_map_file_np(
846 struct shared_region_map_file_np_args
*uap
,
847 __unused
int *retvalp
)
852 unsigned int mapping_count
;
853 user_addr_t user_mappings
; /* 64-bit */
854 user_addr_t user_slide_p
; /* 64-bit */
855 struct shared_file_mapping_np
*mappings
;
857 mach_vm_offset_t slide
;
859 struct vfs_context context
;
860 memory_object_control_t file_control
;
861 memory_object_size_t file_size
;
862 shared_region_mapping_t shared_region
;
863 struct shared_region_task_mappings task_mapping_info
;
864 shared_region_mapping_t next
;
865 shared_region_mapping_t default_shared_region
;
866 boolean_t using_default_region
;
869 mach_vm_offset_t base_offset
, end_offset
;
870 mach_vm_offset_t original_base_offset
;
871 boolean_t mappings_in_segment
;
872 #define SFM_MAX_STACK 6
873 struct shared_file_mapping_np stack_mappings
[SFM_MAX_STACK
];
880 /* get file descriptor for split library from arguments */
883 /* get file structure from file descriptor */
884 error
= fp_lookup(p
, fd
, &fp
, 0);
887 SHARED_REGION_TRACE_ERROR
,
888 ("shared_region: %p [%d(%s)] map_file: "
889 "fd=%d lookup failed (error=%d)\n",
890 current_thread(), p
->p_pid
, p
->p_comm
, fd
, error
));
894 /* make sure we're attempting to map a vnode */
895 if (fp
->f_fglob
->fg_type
!= DTYPE_VNODE
) {
897 SHARED_REGION_TRACE_ERROR
,
898 ("shared_region: %p [%d(%s)] map_file: "
899 "fd=%d not a vnode (type=%d)\n",
900 current_thread(), p
->p_pid
, p
->p_comm
,
901 fd
, fp
->f_fglob
->fg_type
));
906 /* we need at least read permission on the file */
907 if (! (fp
->f_fglob
->fg_flag
& FREAD
)) {
909 SHARED_REGION_TRACE_ERROR
,
910 ("shared_region: %p [%d(%s)] map_file: "
911 "fd=%d not readable\n",
912 current_thread(), p
->p_pid
, p
->p_comm
, fd
));
917 /* get vnode from file structure */
918 error
= vnode_getwithref((vnode_t
)fp
->f_fglob
->fg_data
);
921 SHARED_REGION_TRACE_ERROR
,
922 ("shared_region: %p [%d(%s)] map_file: "
923 "fd=%d getwithref failed (error=%d)\n",
924 current_thread(), p
->p_pid
, p
->p_comm
, fd
, error
));
927 vp
= (struct vnode
*) fp
->f_fglob
->fg_data
;
929 /* make sure the vnode is a regular file */
930 if (vp
->v_type
!= VREG
) {
932 SHARED_REGION_TRACE_ERROR
,
933 ("shared_region: %p [%d(%s)] map_file(%p:'%s'): "
934 "not a file (type=%d)\n",
935 current_thread(), p
->p_pid
, p
->p_comm
,
936 vp
, vp
->v_name
, vp
->v_type
));
946 context
.vc_ucred
= kauth_cred_get();
947 if ((error
= vnode_size(vp
, &fs
, &context
)) != 0) {
949 SHARED_REGION_TRACE_ERROR
,
950 ("shared_region: %p [%d(%s)] "
951 "map_file(%p:'%s'): "
952 "vnode_size(%p) failed (error=%d)\n",
953 current_thread(), p
->p_pid
, p
->p_comm
,
954 vp
, vp
->v_name
, vp
));
961 * Get the list of mappings the caller wants us to establish.
963 mapping_count
= uap
->mappingCount
; /* the number of mappings */
964 if (mapping_count
== 0) {
966 SHARED_REGION_TRACE_INFO
,
967 ("shared_region: %p [%d(%s)] map_file(%p:'%s'): "
969 current_thread(), p
->p_pid
, p
->p_comm
,
971 error
= 0; /* no mappings: we're done ! */
973 } else if (mapping_count
<= SFM_MAX_STACK
) {
974 mappings
= &stack_mappings
[0];
976 kr
= kmem_alloc(kernel_map
,
977 (vm_offset_t
*) &mappings
,
978 (vm_size_t
) (mapping_count
*
979 sizeof (mappings
[0])));
980 if (kr
!= KERN_SUCCESS
) {
982 SHARED_REGION_TRACE_ERROR
,
983 ("shared_region: %p [%d(%s)] "
984 "map_file(%p:'%s'): "
985 "failed to allocate %d mappings (kr=0x%x)\n",
986 current_thread(), p
->p_pid
, p
->p_comm
,
987 vp
, vp
->v_name
, mapping_count
, kr
));
993 user_mappings
= uap
->mappings
; /* the mappings, in user space */
994 error
= copyin(user_mappings
,
996 (mapping_count
* sizeof (mappings
[0])));
999 SHARED_REGION_TRACE_ERROR
,
1000 ("shared_region: %p [%d(%s)] map_file(%p:'%s'): "
1001 "failed to copyin %d mappings (error=%d)\n",
1002 current_thread(), p
->p_pid
, p
->p_comm
,
1003 vp
, vp
->v_name
, mapping_count
, error
));
1008 * If the caller provides a "slide" pointer, it means they're OK
1009 * with us moving the mappings around to make them fit.
1011 user_slide_p
= uap
->slide_p
;
1014 * Make each mapping address relative to the beginning of the
1015 * shared region. Check that all mappings are in the shared region.
1016 * Compute the maximum set of protections required to tell the
1017 * buffer cache how we mapped the file (see call to ubc_map() below).
1019 max_prot
= VM_PROT_NONE
;
1022 mappings_in_segment
= TRUE
;
1023 for (j
= 0; j
< mapping_count
; j
++) {
1024 mach_vm_offset_t segment
;
1025 segment
= (mappings
[j
].sfm_address
&
1026 GLOBAL_SHARED_SEGMENT_MASK
);
1027 if (segment
!= GLOBAL_SHARED_TEXT_SEGMENT
&&
1028 segment
!= GLOBAL_SHARED_DATA_SEGMENT
) {
1029 /* this mapping is not in the shared region... */
1030 if (user_slide_p
== NULL
) {
1031 /* ... and we can't slide it in: fail */
1032 SHARED_REGION_TRACE(
1033 SHARED_REGION_TRACE_CONFLICT
,
1034 ("shared_region: %p [%d(%s)] "
1035 "map_file(%p:'%s'): "
1036 "mapping %p not in shared segment & "
1038 current_thread(), p
->p_pid
, p
->p_comm
,
1040 mappings
[j
].sfm_address
));
1045 /* expect all mappings to be outside */
1046 mappings_in_segment
= FALSE
;
1047 } else if (mappings_in_segment
!= FALSE
) {
1048 /* other mappings were not outside: fail */
1049 SHARED_REGION_TRACE(
1050 SHARED_REGION_TRACE_CONFLICT
,
1051 ("shared_region: %p [%d(%s)] "
1052 "map_file(%p:'%s'): "
1053 "mapping %p not in shared segment & "
1054 "other mappings in shared segment\n",
1055 current_thread(), p
->p_pid
, p
->p_comm
,
1057 mappings
[j
].sfm_address
));
1061 /* we'll try and slide that mapping in the segments */
1064 /* expect all mappings to be inside */
1065 mappings_in_segment
= TRUE
;
1066 } else if (mappings_in_segment
!= TRUE
) {
1067 /* other mappings were not inside: fail */
1068 SHARED_REGION_TRACE(
1069 SHARED_REGION_TRACE_CONFLICT
,
1070 ("shared_region: %p [%d(%s)] "
1071 "map_file(%p:'%s'): "
1072 "mapping %p in shared segment & "
1073 "others in shared segment\n",
1074 current_thread(), p
->p_pid
, p
->p_comm
,
1076 mappings
[j
].sfm_address
));
1080 /* get a relative offset inside the shared segments */
1081 mappings
[j
].sfm_address
-= GLOBAL_SHARED_TEXT_SEGMENT
;
1083 if ((mappings
[j
].sfm_address
& SHARED_TEXT_REGION_MASK
)
1085 base_offset
= (mappings
[j
].sfm_address
&
1086 SHARED_TEXT_REGION_MASK
);
1088 if ((mappings
[j
].sfm_address
& SHARED_TEXT_REGION_MASK
) +
1089 mappings
[j
].sfm_size
> end_offset
) {
1091 (mappings
[j
].sfm_address
&
1092 SHARED_TEXT_REGION_MASK
) +
1093 mappings
[j
].sfm_size
;
1095 max_prot
|= mappings
[j
].sfm_max_prot
;
1097 /* Make all mappings relative to the base_offset */
1098 base_offset
= vm_map_trunc_page(base_offset
);
1099 end_offset
= vm_map_round_page(end_offset
);
1100 for (j
= 0; j
< mapping_count
; j
++) {
1101 mappings
[j
].sfm_address
-= base_offset
;
1103 original_base_offset
= base_offset
;
1104 if (mappings_in_segment
== FALSE
) {
1106 * We're trying to map a library that was not pre-bound to
1107 * be in the shared segments. We want to try and slide it
1108 * back into the shared segments but as far back as possible,
1109 * so that it doesn't clash with pre-bound libraries. Set
1110 * the base_offset to the end of the region, so that it can't
1111 * possibly fit there and will have to be slid.
1113 base_offset
= SHARED_TEXT_REGION_SIZE
- end_offset
;
1116 /* get the file's memory object handle */
1117 UBCINFOCHECK("shared_region_map_file_np", vp
);
1118 file_control
= ubc_getobject(vp
, UBC_HOLDOBJECT
);
1119 if (file_control
== MEMORY_OBJECT_CONTROL_NULL
) {
1120 SHARED_REGION_TRACE(
1121 SHARED_REGION_TRACE_ERROR
,
1122 ("shared_region: %p [%d(%s)] map_file(%p:'%s'): "
1123 "ubc_getobject() failed\n",
1124 current_thread(), p
->p_pid
, p
->p_comm
,
1131 * Get info about the current process's shared region.
1132 * This might change if we decide we need to clone the shared region.
1134 vm_get_shared_region(current_task(), &shared_region
);
1135 task_mapping_info
.self
= (vm_offset_t
) shared_region
;
1136 shared_region_mapping_info(shared_region
,
1137 &(task_mapping_info
.text_region
),
1138 &(task_mapping_info
.text_size
),
1139 &(task_mapping_info
.data_region
),
1140 &(task_mapping_info
.data_size
),
1141 &(task_mapping_info
.region_mappings
),
1142 &(task_mapping_info
.client_base
),
1143 &(task_mapping_info
.alternate_base
),
1144 &(task_mapping_info
.alternate_next
),
1145 &(task_mapping_info
.fs_base
),
1146 &(task_mapping_info
.system
),
1147 &(task_mapping_info
.flags
),
1151 * Are we using the system's current shared region
1152 * for this environment ?
1154 default_shared_region
=
1155 lookup_default_shared_region(ENV_DEFAULT_ROOT
,
1156 task_mapping_info
.system
);
1157 if (shared_region
== default_shared_region
) {
1158 using_default_region
= TRUE
;
1160 using_default_region
= FALSE
;
1162 shared_region_mapping_dealloc(default_shared_region
);
1164 if (vp
->v_mount
!= rootvnode
->v_mount
&&
1165 using_default_region
) {
1167 * The split library is not on the root filesystem. We don't
1168 * want to polute the system-wide ("default") shared region
1170 * Reject the mapping. The caller (dyld) should "privatize"
1171 * (via shared_region_make_private()) the shared region and
1172 * try to establish the mapping privately for this process.
1174 SHARED_REGION_TRACE(
1175 SHARED_REGION_TRACE_CONFLICT
,
1176 ("shared_region: %p [%d(%s)] "
1177 "map_file(%p:'%s'): "
1178 "not on root volume\n",
1179 current_thread(), p
->p_pid
, p
->p_comm
,
1187 * Map the split library.
1189 kr
= map_shared_file(mapping_count
,
1195 (user_slide_p
) ? &slide
: NULL
);
1197 if (kr
== KERN_SUCCESS
) {
1199 * The mapping was successful. Let the buffer cache know
1200 * that we've mapped that file with these protections. This
1201 * prevents the vnode from getting recycled while it's mapped.
1203 (void) ubc_map(vp
, max_prot
);
1206 SHARED_REGION_TRACE(
1207 SHARED_REGION_TRACE_CONFLICT
,
1208 ("shared_region: %p [%d(%s)] "
1209 "map_file(%p:'%s'): "
1210 "map_shared_file failed, kr=0x%x\n",
1211 current_thread(), p
->p_pid
, p
->p_comm
,
1212 vp
, vp
->v_name
, kr
));
1214 case KERN_INVALID_ADDRESS
:
1217 case KERN_PROTECTION_FAILURE
:
1224 case KERN_INVALID_ARGUMENT
:
1231 if (p
->p_flag
& P_NOSHLIB
) {
1232 /* signal that this process is now using split libraries */
1233 p
->p_flag
&= ~P_NOSHLIB
;
1238 * The caller provided a pointer to a "slide" offset. Let
1239 * them know by how much we slid the mappings.
1241 if (mappings_in_segment
== FALSE
) {
1243 * We faked the base_offset earlier, so undo that
1244 * and take into account the real base_offset.
1246 slide
+= SHARED_TEXT_REGION_SIZE
- end_offset
;
1247 slide
-= original_base_offset
;
1249 * The mappings were slid into the shared segments
1250 * and "slide" is relative to the beginning of the
1251 * shared segments. Adjust it to be absolute.
1253 slide
+= GLOBAL_SHARED_TEXT_SEGMENT
;
1255 error
= copyout(&slide
,
1259 SHARED_REGION_TRACE(
1260 SHARED_REGION_TRACE_CONFLICT
,
1261 ("shared_region: %p [%d(%s)] "
1262 "map_file(%p:'%s'): "
1264 current_thread(), p
->p_pid
, p
->p_comm
,
1265 vp
, vp
->v_name
, slide
));
1272 * release the vnode...
1273 * ubc_map() still holds it for us in the non-error case
1275 (void) vnode_put(vp
);
1279 /* release the file descriptor */
1280 fp_drop(p
, fd
, fp
, 0);
1283 if (mappings
!= NULL
&&
1284 mappings
!= &stack_mappings
[0]) {
1285 kmem_free(kernel_map
,
1286 (vm_offset_t
) mappings
,
1287 mapping_count
* sizeof (mappings
[0]));
1295 load_shared_file(struct proc
*p
, struct load_shared_file_args
*uap
,
1296 __unused
int *retval
)
1298 caddr_t mapped_file_addr
=uap
->mfa
;
1299 u_long mapped_file_size
=uap
->mfs
;
1300 caddr_t
*base_address
=uap
->ba
;
1301 int map_cnt
=uap
->map_cnt
;
1302 sf_mapping_t
*mappings
=uap
->mappings
;
1303 char *filename
=uap
->filename
;
1304 int *flags
=uap
->flags
;
1305 struct vnode
*vp
= 0;
1306 struct nameidata nd
, *ndp
;
1311 struct vfs_context context
;
1313 memory_object_control_t file_control
;
1314 sf_mapping_t
*map_list
;
1319 int default_regions
= 0;
1323 shared_region_mapping_t shared_region
;
1324 struct shared_region_task_mappings task_mapping_info
;
1325 shared_region_mapping_t next
;
1327 context
.vc_proc
= p
;
1328 context
.vc_ucred
= kauth_cred_get();
1332 AUDIT_ARG(addr
, CAST_USER_ADDR_T(base_address
));
1333 /* Retrieve the base address */
1334 if ( (error
= copyin(CAST_USER_ADDR_T(base_address
), &local_base
, sizeof (caddr_t
))) ) {
1337 if ( (error
= copyin(CAST_USER_ADDR_T(flags
), &local_flags
, sizeof (int))) ) {
1341 if(local_flags
& QUERY_IS_SYSTEM_REGION
) {
1342 shared_region_mapping_t default_shared_region
;
1343 vm_get_shared_region(current_task(), &shared_region
);
1344 task_mapping_info
.self
= (vm_offset_t
)shared_region
;
1346 shared_region_mapping_info(shared_region
,
1347 &(task_mapping_info
.text_region
),
1348 &(task_mapping_info
.text_size
),
1349 &(task_mapping_info
.data_region
),
1350 &(task_mapping_info
.data_size
),
1351 &(task_mapping_info
.region_mappings
),
1352 &(task_mapping_info
.client_base
),
1353 &(task_mapping_info
.alternate_base
),
1354 &(task_mapping_info
.alternate_next
),
1355 &(task_mapping_info
.fs_base
),
1356 &(task_mapping_info
.system
),
1357 &(task_mapping_info
.flags
), &next
);
1359 default_shared_region
=
1360 lookup_default_shared_region(
1362 task_mapping_info
.system
);
1363 if (shared_region
== default_shared_region
) {
1364 local_flags
= SYSTEM_REGION_BACKED
;
1368 shared_region_mapping_dealloc(default_shared_region
);
1370 error
= copyout(&local_flags
, CAST_USER_ADDR_T(flags
), sizeof (int));
1373 caller_flags
= local_flags
;
1374 kret
= kmem_alloc(kernel_map
, (vm_offset_t
*)&filename_str
,
1375 (vm_size_t
)(MAXPATHLEN
));
1376 if (kret
!= KERN_SUCCESS
) {
1380 kret
= kmem_alloc(kernel_map
, (vm_offset_t
*)&map_list
,
1381 (vm_size_t
)(map_cnt
*sizeof(sf_mapping_t
)));
1382 if (kret
!= KERN_SUCCESS
) {
1383 kmem_free(kernel_map
, (vm_offset_t
)filename_str
,
1384 (vm_size_t
)(MAXPATHLEN
));
1389 if ( (error
= copyin(CAST_USER_ADDR_T(mappings
), map_list
, (map_cnt
*sizeof(sf_mapping_t
)))) ) {
1390 goto lsf_bailout_free
;
1393 if ( (error
= copyinstr(CAST_USER_ADDR_T(filename
), filename_str
,
1394 MAXPATHLEN
, (size_t *)&dummy
)) ) {
1395 goto lsf_bailout_free
;
1399 * Get a vnode for the target file
1401 NDINIT(ndp
, LOOKUP
, FOLLOW
| LOCKLEAF
| AUDITVNPATH1
, UIO_SYSSPACE32
,
1402 CAST_USER_ADDR_T(filename_str
), &context
);
1404 if ((error
= namei(ndp
))) {
1405 goto lsf_bailout_free
;
1411 if (vp
->v_type
!= VREG
) {
1413 goto lsf_bailout_free_vput
;
1416 UBCINFOCHECK("load_shared_file", vp
);
1418 if ((error
= vnode_size(vp
, &file_size
, &context
)) != 0)
1419 goto lsf_bailout_free_vput
;
1421 file_control
= ubc_getobject(vp
, UBC_HOLDOBJECT
);
1422 if (file_control
== MEMORY_OBJECT_CONTROL_NULL
) {
1424 goto lsf_bailout_free_vput
;
1428 if(file_size
!= mapped_file_size
) {
1430 goto lsf_bailout_free_vput
;
1433 if(p
->p_flag
& P_NOSHLIB
) {
1434 p
->p_flag
= p
->p_flag
& ~P_NOSHLIB
;
1437 /* load alternate regions if the caller has requested. */
1438 /* Note: the new regions are "clean slates" */
1439 if (local_flags
& NEW_LOCAL_SHARED_REGIONS
) {
1440 error
= clone_system_shared_regions(FALSE
,
1441 TRUE
, /* chain_regions */
1444 goto lsf_bailout_free_vput
;
1448 vm_get_shared_region(current_task(), &shared_region
);
1449 task_mapping_info
.self
= (vm_offset_t
)shared_region
;
1451 shared_region_mapping_info(shared_region
,
1452 &(task_mapping_info
.text_region
),
1453 &(task_mapping_info
.text_size
),
1454 &(task_mapping_info
.data_region
),
1455 &(task_mapping_info
.data_size
),
1456 &(task_mapping_info
.region_mappings
),
1457 &(task_mapping_info
.client_base
),
1458 &(task_mapping_info
.alternate_base
),
1459 &(task_mapping_info
.alternate_next
),
1460 &(task_mapping_info
.fs_base
),
1461 &(task_mapping_info
.system
),
1462 &(task_mapping_info
.flags
), &next
);
1465 shared_region_mapping_t default_shared_region
;
1466 default_shared_region
=
1467 lookup_default_shared_region(
1469 task_mapping_info
.system
);
1470 if(shared_region
== default_shared_region
) {
1471 default_regions
= 1;
1473 shared_region_mapping_dealloc(default_shared_region
);
1475 /* If we are running on a removable file system we must not */
1476 /* be in a set of shared regions or the file system will not */
1478 if(((vp
->v_mount
!= rootvnode
->v_mount
) && (default_regions
))
1479 && (lsf_mapping_pool_gauge() < 75)) {
1480 /* We don't want to run out of shared memory */
1481 /* map entries by starting too many private versions */
1482 /* of the shared library structures */
1485 error2
= clone_system_shared_regions(!(p
->p_flag
& P_NOSHLIB
),
1486 TRUE
, /* chain_regions */
1489 goto lsf_bailout_free_vput
;
1491 local_flags
= local_flags
& ~NEW_LOCAL_SHARED_REGIONS
;
1492 vm_get_shared_region(current_task(), &shared_region
);
1493 shared_region_mapping_info(shared_region
,
1494 &(task_mapping_info
.text_region
),
1495 &(task_mapping_info
.text_size
),
1496 &(task_mapping_info
.data_region
),
1497 &(task_mapping_info
.data_size
),
1498 &(task_mapping_info
.region_mappings
),
1499 &(task_mapping_info
.client_base
),
1500 &(task_mapping_info
.alternate_base
),
1501 &(task_mapping_info
.alternate_next
),
1502 &(task_mapping_info
.fs_base
),
1503 &(task_mapping_info
.system
),
1504 &(task_mapping_info
.flags
), &next
);
1507 /* This is a work-around to allow executables which have been */
1508 /* built without knowledge of the proper shared segment to */
1509 /* load. This code has been architected as a shared region */
1510 /* handler, the knowledge of where the regions are loaded is */
1511 /* problematic for the extension of shared regions as it will */
1512 /* not be easy to know what region an item should go into. */
1513 /* The code below however will get around a short term problem */
1514 /* with executables which believe they are loading at zero. */
1517 if (((unsigned int)local_base
&
1518 (~(task_mapping_info
.text_size
- 1))) !=
1519 task_mapping_info
.client_base
) {
1520 if(local_flags
& ALTERNATE_LOAD_SITE
) {
1521 local_base
= (caddr_t
)(
1522 (unsigned int)local_base
&
1523 (task_mapping_info
.text_size
- 1));
1524 local_base
= (caddr_t
)((unsigned int)local_base
1525 | task_mapping_info
.client_base
);
1528 goto lsf_bailout_free_vput
;
1534 if((kr
= copyin_shared_file((vm_offset_t
)mapped_file_addr
,
1536 (vm_offset_t
*)&local_base
,
1537 map_cnt
, map_list
, file_control
,
1538 &task_mapping_info
, &local_flags
))) {
1543 case KERN_INVALID_ARGUMENT
:
1546 case KERN_INVALID_ADDRESS
:
1549 case KERN_PROTECTION_FAILURE
:
1550 /* save EAUTH for authentication in this */
1560 if((caller_flags
& ALTERNATE_LOAD_SITE
) && systemLogDiags
) {
1561 printf("load_shared_file: Failed to load shared file! error: 0x%x, Base_address: 0x%x, number of mappings: %d, file_control 0x%x\n", error
, local_base
, map_cnt
, file_control
);
1562 for(i
=0; i
<map_cnt
; i
++) {
1563 printf("load_shared_file: Mapping%d, mapping_offset: 0x%x, size: 0x%x, file_offset: 0x%x, protection: 0x%x\n"
1564 , i
, map_list
[i
].mapping_offset
,
1566 map_list
[i
].file_offset
,
1567 map_list
[i
].protection
);
1572 local_flags
|= SYSTEM_REGION_BACKED
;
1573 if(!(error
= copyout(&local_flags
, CAST_USER_ADDR_T(flags
), sizeof (int)))) {
1574 error
= copyout(&local_base
,
1575 CAST_USER_ADDR_T(base_address
), sizeof (caddr_t
));
1579 lsf_bailout_free_vput
:
1583 kmem_free(kernel_map
, (vm_offset_t
)filename_str
,
1584 (vm_size_t
)(MAXPATHLEN
));
1585 kmem_free(kernel_map
, (vm_offset_t
)map_list
,
1586 (vm_size_t
)(map_cnt
*sizeof(sf_mapping_t
)));
1593 reset_shared_file(__unused
struct proc
*p
, struct reset_shared_file_args
*uap
,
1594 __unused
register int *retval
)
1596 caddr_t
*base_address
=uap
->ba
;
1597 int map_cnt
=uap
->map_cnt
;
1598 sf_mapping_t
*mappings
=uap
->mappings
;
1601 sf_mapping_t
*map_list
;
1603 vm_offset_t map_address
;
1606 shared_region_mapping_t shared_region
;
1607 struct shared_region_task_mappings task_mapping_info
;
1608 shared_region_mapping_t next
;
1610 AUDIT_ARG(addr
, CAST_DOWN(user_addr_t
, base_address
));
1611 /* Retrieve the base address */
1612 if ( (error
= copyin(CAST_USER_ADDR_T(base_address
), &local_base
, sizeof (caddr_t
))) ) {
1616 if (((unsigned int)local_base
& GLOBAL_SHARED_SEGMENT_MASK
)
1617 != GLOBAL_SHARED_TEXT_SEGMENT
) {
1622 kret
= kmem_alloc(kernel_map
, (vm_offset_t
*)&map_list
,
1623 (vm_size_t
)(map_cnt
*sizeof(sf_mapping_t
)));
1624 if (kret
!= KERN_SUCCESS
) {
1630 copyin(CAST_USER_ADDR_T(mappings
), map_list
, (map_cnt
*sizeof(sf_mapping_t
)))) ) {
1632 kmem_free(kernel_map
, (vm_offset_t
)map_list
,
1633 (vm_size_t
)(map_cnt
*sizeof(sf_mapping_t
)));
1637 vm_get_shared_region(current_task(), &shared_region
);
1638 task_mapping_info
.self
= (vm_offset_t
) shared_region
;
1639 shared_region_mapping_info(shared_region
,
1640 &(task_mapping_info
.text_region
),
1641 &(task_mapping_info
.text_size
),
1642 &(task_mapping_info
.data_region
),
1643 &(task_mapping_info
.data_size
),
1644 &(task_mapping_info
.region_mappings
),
1645 &(task_mapping_info
.client_base
),
1646 &(task_mapping_info
.alternate_base
),
1647 &(task_mapping_info
.alternate_next
),
1648 &(task_mapping_info
.fs_base
),
1649 &(task_mapping_info
.system
),
1650 &(task_mapping_info
.flags
),
1653 for (i
= 0; i
<map_cnt
; i
++) {
1654 if((map_list
[i
].mapping_offset
1655 & GLOBAL_SHARED_SEGMENT_MASK
) == 0x10000000) {
1656 map_address
= (vm_offset_t
)
1657 (local_base
+ map_list
[i
].mapping_offset
);
1658 vm_deallocate(current_map(),
1661 vm_map(current_map(), &map_address
,
1662 map_list
[i
].size
, 0,
1663 SHARED_LIB_ALIAS
| VM_FLAGS_FIXED
,
1664 task_mapping_info
.data_region
,
1665 ((unsigned int)local_base
1666 & SHARED_DATA_REGION_MASK
) +
1667 (map_list
[i
].mapping_offset
1668 & SHARED_DATA_REGION_MASK
),
1670 VM_PROT_READ
, VM_INHERIT_SHARE
);
1674 kmem_free(kernel_map
, (vm_offset_t
)map_list
,
1675 (vm_size_t
)(map_cnt
*sizeof(sf_mapping_t
)));
1682 new_system_shared_regions(__unused
struct proc
*p
,
1683 __unused
struct new_system_shared_regions_args
*uap
,
1684 register int *retval
)
1691 /* clear all of our existing defaults */
1692 remove_all_shared_regions();
1701 clone_system_shared_regions(
1702 int shared_regions_active
,
1706 shared_region_mapping_t new_shared_region
;
1707 shared_region_mapping_t next
;
1708 shared_region_mapping_t old_shared_region
;
1709 struct shared_region_task_mappings old_info
;
1710 struct shared_region_task_mappings new_info
;
1712 vm_get_shared_region(current_task(), &old_shared_region
);
1713 old_info
.self
= (vm_offset_t
)old_shared_region
;
1714 shared_region_mapping_info(old_shared_region
,
1715 &(old_info
.text_region
),
1716 &(old_info
.text_size
),
1717 &(old_info
.data_region
),
1718 &(old_info
.data_size
),
1719 &(old_info
.region_mappings
),
1720 &(old_info
.client_base
),
1721 &(old_info
.alternate_base
),
1722 &(old_info
.alternate_next
),
1723 &(old_info
.fs_base
),
1725 &(old_info
.flags
), &next
);
1727 if (shared_regions_active
||
1728 base_vnode
== ENV_DEFAULT_ROOT
) {
1729 if (shared_file_create_system_region(&new_shared_region
,
1734 if (old_shared_region
&&
1735 base_vnode
== ENV_DEFAULT_ROOT
) {
1736 base_vnode
= old_info
.fs_base
;
1739 lookup_default_shared_region(base_vnode
,
1741 if (new_shared_region
== NULL
) {
1742 shared_file_boot_time_init(base_vnode
,
1744 vm_get_shared_region(current_task(),
1745 &new_shared_region
);
1747 vm_set_shared_region(current_task(), new_shared_region
);
1749 if (old_shared_region
)
1750 shared_region_mapping_dealloc(old_shared_region
);
1752 new_info
.self
= (vm_offset_t
)new_shared_region
;
1753 shared_region_mapping_info(new_shared_region
,
1754 &(new_info
.text_region
),
1755 &(new_info
.text_size
),
1756 &(new_info
.data_region
),
1757 &(new_info
.data_size
),
1758 &(new_info
.region_mappings
),
1759 &(new_info
.client_base
),
1760 &(new_info
.alternate_base
),
1761 &(new_info
.alternate_next
),
1762 &(new_info
.fs_base
),
1764 &(new_info
.flags
), &next
);
1765 if(shared_regions_active
) {
1766 if(vm_region_clone(old_info
.text_region
, new_info
.text_region
)) {
1767 panic("clone_system_shared_regions: shared region mis-alignment 1");
1768 shared_region_mapping_dealloc(new_shared_region
);
1771 if (vm_region_clone(old_info
.data_region
, new_info
.data_region
)) {
1772 panic("clone_system_shared_regions: shared region mis-alignment 2");
1773 shared_region_mapping_dealloc(new_shared_region
);
1776 if (chain_regions
) {
1778 * We want a "shadowed" clone, a private superset of the old
1779 * shared region. The info about the old mappings is still
1782 shared_region_object_chain_attach(
1783 new_shared_region
, old_shared_region
);
1786 * We want a completely detached clone with no link to
1787 * the old shared region. We'll be removing some mappings
1788 * in our private, cloned, shared region, so the old mappings
1789 * will become irrelevant to us. Since we have a private
1790 * "shared region" now, it isn't going to be shared with
1791 * anyone else and we won't need to maintain mappings info.
1793 shared_region_object_chain_detached(new_shared_region
);
1796 if (vm_map_region_replace(current_map(), old_info
.text_region
,
1797 new_info
.text_region
, old_info
.client_base
,
1798 old_info
.client_base
+old_info
.text_size
)) {
1799 panic("clone_system_shared_regions: shared region mis-alignment 3");
1800 shared_region_mapping_dealloc(new_shared_region
);
1803 if(vm_map_region_replace(current_map(), old_info
.data_region
,
1804 new_info
.data_region
,
1805 old_info
.client_base
+ old_info
.text_size
,
1806 old_info
.client_base
1807 + old_info
.text_size
+ old_info
.data_size
)) {
1808 panic("clone_system_shared_regions: shared region mis-alignment 4");
1809 shared_region_mapping_dealloc(new_shared_region
);
1812 vm_set_shared_region(current_task(), new_shared_region
);
1814 /* consume the reference which wasn't accounted for in object */
1816 if (!shared_regions_active
|| !chain_regions
)
1817 shared_region_mapping_dealloc(old_shared_region
);
1819 SHARED_REGION_TRACE(
1820 SHARED_REGION_TRACE_INFO
,
1821 ("shared_region: %p task=%p "
1822 "clone(active=%d, base=0x%x,chain=%d) "
1823 "old=%p[%x,%x,%x] new=%p[%x,%x,%x]\n",
1824 current_thread(), current_task(),
1825 shared_regions_active
, base_vnode
, chain_regions
,
1839 /* header for the profile name file. The profiled app info is held */
1840 /* in the data file and pointed to by elements in the name file */
1842 struct profile_names_header
{
1843 unsigned int number_of_profiles
;
1844 unsigned int user_id
;
1845 unsigned int version
;
1846 off_t element_array
;
1847 unsigned int spare1
;
1848 unsigned int spare2
;
1849 unsigned int spare3
;
1852 struct profile_element
{
1855 unsigned int mod_date
;
1860 struct global_profile
{
1861 struct vnode
*names_vp
;
1862 struct vnode
*data_vp
;
1863 vm_offset_t buf_ptr
;
1869 struct global_profile_cache
{
1872 struct global_profile profiles
[3];
1875 /* forward declarations */
1876 int bsd_open_page_cache_files(unsigned int user
,
1877 struct global_profile
**profile
);
1878 void bsd_close_page_cache_files(struct global_profile
*profile
);
1879 int bsd_search_page_cache_data_base(
1881 struct profile_names_header
*database
,
1883 unsigned int mod_date
,
1886 unsigned int *profile_size
);
1888 struct global_profile_cache global_user_profile_cache
=
1889 {3, 0, {{NULL
, NULL
, 0, 0, 0, 0},
1890 {NULL
, NULL
, 0, 0, 0, 0},
1891 {NULL
, NULL
, 0, 0, 0, 0}} };
1893 /* BSD_OPEN_PAGE_CACHE_FILES: */
1894 /* Caller provides a user id. This id was used in */
1895 /* prepare_profile_database to create two unique absolute */
1896 /* file paths to the associated profile files. These files */
1897 /* are either opened or bsd_open_page_cache_files returns an */
1898 /* error. The header of the names file is then consulted. */
1899 /* The header and the vnodes for the names and data files are */
1903 bsd_open_page_cache_files(
1905 struct global_profile
**profile
)
1907 const char *cache_path
= "/var/vm/app_profile/";
1915 struct vnode
*names_vp
;
1916 struct vnode
*data_vp
;
1917 vm_offset_t names_buf
;
1918 vm_offset_t buf_ptr
;
1920 int profile_names_length
;
1921 int profile_data_length
;
1922 char *profile_data_string
;
1923 char *profile_names_string
;
1927 struct vfs_context context
;
1931 struct nameidata nd_names
;
1932 struct nameidata nd_data
;
1938 context
.vc_proc
= p
;
1939 context
.vc_ucred
= kauth_cred_get();
1942 for(i
= 0; i
<global_user_profile_cache
.max_ele
; i
++) {
1943 if((global_user_profile_cache
.profiles
[i
].user
== user
)
1944 && (global_user_profile_cache
.profiles
[i
].data_vp
1946 *profile
= &global_user_profile_cache
.profiles
[i
];
1947 /* already in cache, we're done */
1948 if ((*profile
)->busy
) {
1950 * drop funnel and wait
1952 (void)tsleep((void *)
1954 PRIBIO
, "app_profile", 0);
1957 (*profile
)->busy
= 1;
1958 (*profile
)->age
= global_user_profile_cache
.age
;
1961 * entries in cache are held with a valid
1962 * usecount... take an iocount which will
1963 * be dropped in "bsd_close_page_cache_files"
1964 * which is called after the read or writes to
1965 * these files are done
1967 if ( (vnode_getwithref((*profile
)->data_vp
)) ) {
1969 vnode_rele((*profile
)->data_vp
);
1970 vnode_rele((*profile
)->names_vp
);
1972 (*profile
)->data_vp
= NULL
;
1973 (*profile
)->busy
= 0;
1978 if ( (vnode_getwithref((*profile
)->names_vp
)) ) {
1980 vnode_put((*profile
)->data_vp
);
1981 vnode_rele((*profile
)->data_vp
);
1982 vnode_rele((*profile
)->names_vp
);
1984 (*profile
)->data_vp
= NULL
;
1985 (*profile
)->busy
= 0;
1990 global_user_profile_cache
.age
+=1;
1995 lru
= global_user_profile_cache
.age
;
1997 for(i
= 0; i
<global_user_profile_cache
.max_ele
; i
++) {
1998 /* Skip entry if it is in the process of being reused */
1999 if(global_user_profile_cache
.profiles
[i
].data_vp
==
2000 (struct vnode
*)0xFFFFFFFF)
2002 /* Otherwise grab the first empty entry */
2003 if(global_user_profile_cache
.profiles
[i
].data_vp
== NULL
) {
2004 *profile
= &global_user_profile_cache
.profiles
[i
];
2005 (*profile
)->age
= global_user_profile_cache
.age
;
2008 /* Otherwise grab the oldest entry */
2009 if(global_user_profile_cache
.profiles
[i
].age
< lru
) {
2010 lru
= global_user_profile_cache
.profiles
[i
].age
;
2011 *profile
= &global_user_profile_cache
.profiles
[i
];
2015 /* Did we set it? */
2016 if (*profile
== NULL
) {
2018 * No entries are available; this can only happen if all
2019 * of them are currently in the process of being reused;
2020 * if this happens, we sleep on the address of the first
2021 * element, and restart. This is less than ideal, but we
2022 * know it will work because we know that there will be a
2023 * wakeup on any entry currently in the process of being
2026 * XXX Reccomend a two handed clock and more than 3 total
2027 * XXX cache entries at some point in the future.
2030 * drop funnel and wait
2032 (void)tsleep((void *)
2033 &global_user_profile_cache
.profiles
[0],
2034 PRIBIO
, "app_profile", 0);
2039 * If it's currently busy, we've picked the one at the end of the
2040 * LRU list, but it's currently being actively used. We sleep on
2041 * its address and restart.
2043 if ((*profile
)->busy
) {
2045 * drop funnel and wait
2047 (void)tsleep((void *)
2049 PRIBIO
, "app_profile", 0);
2052 (*profile
)->busy
= 1;
2053 (*profile
)->user
= user
;
2056 * put dummy value in for now to get competing request to wait
2057 * above until we are finished
2059 * Save the data_vp before setting it, so we can set it before
2060 * we kmem_free() or vrele(). If we don't do this, then we
2061 * have a potential funnel race condition we have to deal with.
2063 data_vp
= (*profile
)->data_vp
;
2064 (*profile
)->data_vp
= (struct vnode
*)0xFFFFFFFF;
2067 * Age the cache here in all cases; this guarantees that we won't
2068 * be reusing only one entry over and over, once the system reaches
2071 global_user_profile_cache
.age
+=1;
2073 if(data_vp
!= NULL
) {
2074 kmem_free(kernel_map
,
2075 (*profile
)->buf_ptr
, 4 * PAGE_SIZE
);
2076 if ((*profile
)->names_vp
) {
2077 vnode_rele((*profile
)->names_vp
);
2078 (*profile
)->names_vp
= NULL
;
2080 vnode_rele(data_vp
);
2083 /* Try to open the appropriate users profile files */
2084 /* If neither file is present, try to create them */
2085 /* If one file is present and the other not, fail. */
2086 /* If the files do exist, check them for the app_file */
2087 /* requested and read it in if present */
2089 ret
= kmem_alloc(kernel_map
,
2090 (vm_offset_t
*)&profile_data_string
, PATH_MAX
);
2093 (*profile
)->data_vp
= NULL
;
2094 (*profile
)->busy
= 0;
2099 /* Split the buffer in half since we know the size of */
2100 /* our file path and our allocation is adequate for */
2101 /* both file path names */
2102 profile_names_string
= profile_data_string
+ (PATH_MAX
/2);
2105 strcpy(profile_data_string
, cache_path
);
2106 strcpy(profile_names_string
, cache_path
);
2107 profile_names_length
= profile_data_length
2108 = strlen(profile_data_string
);
2109 substring
= profile_data_string
+ profile_data_length
;
2110 sprintf(substring
, "%x_data", user
);
2111 substring
= profile_names_string
+ profile_names_length
;
2112 sprintf(substring
, "%x_names", user
);
2114 /* We now have the absolute file names */
2116 ret
= kmem_alloc(kernel_map
,
2117 (vm_offset_t
*)&names_buf
, 4 * PAGE_SIZE
);
2119 kmem_free(kernel_map
,
2120 (vm_offset_t
)profile_data_string
, PATH_MAX
);
2121 (*profile
)->data_vp
= NULL
;
2122 (*profile
)->busy
= 0;
2127 NDINIT(&nd_names
, LOOKUP
, FOLLOW
| LOCKLEAF
,
2128 UIO_SYSSPACE32
, CAST_USER_ADDR_T(profile_names_string
), &context
);
2129 NDINIT(&nd_data
, LOOKUP
, FOLLOW
| LOCKLEAF
,
2130 UIO_SYSSPACE32
, CAST_USER_ADDR_T(profile_data_string
), &context
);
2132 if ( (error
= vn_open(&nd_data
, FREAD
| FWRITE
, 0)) ) {
2134 printf("bsd_open_page_cache_files: CacheData file not found %s\n",
2135 profile_data_string
);
2137 kmem_free(kernel_map
,
2138 (vm_offset_t
)names_buf
, 4 * PAGE_SIZE
);
2139 kmem_free(kernel_map
,
2140 (vm_offset_t
)profile_data_string
, PATH_MAX
);
2141 (*profile
)->data_vp
= NULL
;
2142 (*profile
)->busy
= 0;
2146 data_vp
= nd_data
.ni_vp
;
2148 if ( (error
= vn_open(&nd_names
, FREAD
| FWRITE
, 0)) ) {
2149 printf("bsd_open_page_cache_files: NamesData file not found %s\n",
2150 profile_data_string
);
2151 kmem_free(kernel_map
,
2152 (vm_offset_t
)names_buf
, 4 * PAGE_SIZE
);
2153 kmem_free(kernel_map
,
2154 (vm_offset_t
)profile_data_string
, PATH_MAX
);
2156 vnode_rele(data_vp
);
2159 (*profile
)->data_vp
= NULL
;
2160 (*profile
)->busy
= 0;
2164 names_vp
= nd_names
.ni_vp
;
2166 if ((error
= vnode_size(names_vp
, &file_size
, &context
)) != 0) {
2167 printf("bsd_open_page_cache_files: Can't stat name file %s\n", profile_names_string
);
2168 kmem_free(kernel_map
,
2169 (vm_offset_t
)profile_data_string
, PATH_MAX
);
2170 kmem_free(kernel_map
,
2171 (vm_offset_t
)names_buf
, 4 * PAGE_SIZE
);
2173 vnode_rele(names_vp
);
2174 vnode_put(names_vp
);
2175 vnode_rele(data_vp
);
2178 (*profile
)->data_vp
= NULL
;
2179 (*profile
)->busy
= 0;
2185 if(size
> 4 * PAGE_SIZE
)
2186 size
= 4 * PAGE_SIZE
;
2187 buf_ptr
= names_buf
;
2192 error
= vn_rdwr(UIO_READ
, names_vp
, (caddr_t
)buf_ptr
,
2194 UIO_SYSSPACE32
, IO_NODELOCKED
, kauth_cred_get(),
2196 resid
= (vm_size_t
) resid_int
;
2197 if((error
) || (size
== resid
)) {
2201 kmem_free(kernel_map
,
2202 (vm_offset_t
)profile_data_string
, PATH_MAX
);
2203 kmem_free(kernel_map
,
2204 (vm_offset_t
)names_buf
, 4 * PAGE_SIZE
);
2206 vnode_rele(names_vp
);
2207 vnode_put(names_vp
);
2208 vnode_rele(data_vp
);
2211 (*profile
)->data_vp
= NULL
;
2212 (*profile
)->busy
= 0;
2216 buf_ptr
+= size
-resid
;
2217 resid_off
+= size
-resid
;
2220 kmem_free(kernel_map
, (vm_offset_t
)profile_data_string
, PATH_MAX
);
2222 (*profile
)->names_vp
= names_vp
;
2223 (*profile
)->data_vp
= data_vp
;
2224 (*profile
)->buf_ptr
= names_buf
;
2227 * at this point, the both the names_vp and the data_vp have
2228 * both a valid usecount and an iocount held
2235 bsd_close_page_cache_files(
2236 struct global_profile
*profile
)
2238 vnode_put(profile
->data_vp
);
2239 vnode_put(profile
->names_vp
);
2246 bsd_read_page_cache_file(
2251 struct vnode
*app_vp
,
2252 vm_offset_t
*buffer
,
2253 vm_offset_t
*bufsize
)
2256 boolean_t funnel_state
;
2263 unsigned int profile_size
;
2265 vm_offset_t names_buf
;
2266 struct vnode_attr va
;
2267 struct vfs_context context
;
2271 struct vnode
*names_vp
;
2272 struct vnode
*data_vp
;
2274 struct global_profile
*uid_files
;
2276 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
2278 /* Try to open the appropriate users profile files */
2279 /* If neither file is present, try to create them */
2280 /* If one file is present and the other not, fail. */
2281 /* If the files do exist, check them for the app_file */
2282 /* requested and read it in if present */
2285 error
= bsd_open_page_cache_files(user
, &uid_files
);
2287 thread_funnel_set(kernel_flock
, funnel_state
);
2293 names_vp
= uid_files
->names_vp
;
2294 data_vp
= uid_files
->data_vp
;
2295 names_buf
= uid_files
->buf_ptr
;
2297 context
.vc_proc
= p
;
2298 context
.vc_ucred
= kauth_cred_get();
2301 VATTR_WANTED(&va
, va_fileid
);
2302 VATTR_WANTED(&va
, va_modify_time
);
2304 if ((error
= vnode_getattr(app_vp
, &va
, &context
))) {
2305 printf("bsd_read_cache_file: Can't stat app file %s\n", app_name
);
2306 bsd_close_page_cache_files(uid_files
);
2307 thread_funnel_set(kernel_flock
, funnel_state
);
2311 *fid
= (u_long
)va
.va_fileid
;
2312 *mod
= va
.va_modify_time
.tv_sec
;
2314 if (bsd_search_page_cache_data_base(
2316 (struct profile_names_header
*)names_buf
,
2318 (unsigned int) va
.va_modify_time
.tv_sec
,
2319 (u_long
)va
.va_fileid
, &profile
, &profile_size
) == 0) {
2320 /* profile is an offset in the profile data base */
2321 /* It is zero if no profile data was found */
2323 if(profile_size
== 0) {
2326 bsd_close_page_cache_files(uid_files
);
2327 thread_funnel_set(kernel_flock
, funnel_state
);
2330 ret
= (vm_offset_t
)(kmem_alloc(kernel_map
, buffer
, profile_size
));
2332 bsd_close_page_cache_files(uid_files
);
2333 thread_funnel_set(kernel_flock
, funnel_state
);
2336 *bufsize
= profile_size
;
2337 while(profile_size
) {
2339 error
= vn_rdwr(UIO_READ
, data_vp
,
2340 (caddr_t
) *buffer
, profile_size
,
2341 profile
, UIO_SYSSPACE32
, IO_NODELOCKED
,
2342 kauth_cred_get(), &resid_int
, p
);
2343 resid
= (vm_size_t
) resid_int
;
2344 if((error
) || (profile_size
== resid
)) {
2345 bsd_close_page_cache_files(uid_files
);
2346 kmem_free(kernel_map
, (vm_offset_t
)*buffer
, profile_size
);
2347 thread_funnel_set(kernel_flock
, funnel_state
);
2350 profile
+= profile_size
- resid
;
2351 profile_size
= resid
;
2353 bsd_close_page_cache_files(uid_files
);
2354 thread_funnel_set(kernel_flock
, funnel_state
);
2357 bsd_close_page_cache_files(uid_files
);
2358 thread_funnel_set(kernel_flock
, funnel_state
);
2365 bsd_search_page_cache_data_base(
2367 struct profile_names_header
*database
,
2369 unsigned int mod_date
,
2372 unsigned int *profile_size
)
2378 struct profile_element
*element
;
2379 unsigned int ele_total
;
2380 unsigned int extended_list
= 0;
2385 vm_offset_t local_buf
= 0;
2392 if(((vm_offset_t
)database
->element_array
) !=
2393 sizeof(struct profile_names_header
)) {
2396 element
= (struct profile_element
*)(
2397 (vm_offset_t
)database
->element_array
+
2398 (vm_offset_t
)database
);
2400 ele_total
= database
->number_of_profiles
;
2405 /* note: code assumes header + n*ele comes out on a page boundary */
2406 if(((local_buf
== 0) && (sizeof(struct profile_names_header
) +
2407 (ele_total
* sizeof(struct profile_element
)))
2408 > (PAGE_SIZE
* 4)) ||
2409 ((local_buf
!= 0) &&
2410 (ele_total
* sizeof(struct profile_element
))
2411 > (PAGE_SIZE
* 4))) {
2412 extended_list
= ele_total
;
2413 if(element
== (struct profile_element
*)
2414 ((vm_offset_t
)database
->element_array
+
2415 (vm_offset_t
)database
)) {
2416 ele_total
= ((PAGE_SIZE
* 4)/sizeof(struct profile_element
)) - 1;
2418 ele_total
= (PAGE_SIZE
* 4)/sizeof(struct profile_element
);
2420 extended_list
-= ele_total
;
2422 for (i
=0; i
<ele_total
; i
++) {
2423 if((mod_date
== element
[i
].mod_date
)
2424 && (inode
== element
[i
].inode
)) {
2425 if(strncmp(element
[i
].name
, app_name
, 12) == 0) {
2426 *profile
= element
[i
].addr
;
2427 *profile_size
= element
[i
].size
;
2428 if(local_buf
!= 0) {
2429 kmem_free(kernel_map
, local_buf
, 4 * PAGE_SIZE
);
2435 if(extended_list
== 0)
2437 if(local_buf
== 0) {
2438 ret
= kmem_alloc(kernel_map
, &local_buf
, 4 * PAGE_SIZE
);
2439 if(ret
!= KERN_SUCCESS
) {
2443 element
= (struct profile_element
*)local_buf
;
2444 ele_total
= extended_list
;
2446 file_off
+= 4 * PAGE_SIZE
;
2447 if((ele_total
* sizeof(struct profile_element
)) >
2449 size
= PAGE_SIZE
* 4;
2451 size
= ele_total
* sizeof(struct profile_element
);
2456 error
= vn_rdwr(UIO_READ
, vp
,
2457 CAST_DOWN(caddr_t
, (local_buf
+ resid_off
)),
2458 size
, file_off
+ resid_off
, UIO_SYSSPACE32
,
2459 IO_NODELOCKED
, kauth_cred_get(), &resid_int
, p
);
2460 resid
= (vm_size_t
) resid_int
;
2461 if((error
) || (size
== resid
)) {
2462 if(local_buf
!= 0) {
2463 kmem_free(kernel_map
, local_buf
, 4 * PAGE_SIZE
);
2467 resid_off
+= size
-resid
;
2471 if(local_buf
!= 0) {
2472 kmem_free(kernel_map
, local_buf
, 4 * PAGE_SIZE
);
2478 bsd_write_page_cache_file(
2490 boolean_t funnel_state
;
2492 struct vfs_context context
;
2494 unsigned int profile_size
;
2496 vm_offset_t names_buf
;
2497 struct vnode
*names_vp
;
2498 struct vnode
*data_vp
;
2499 struct profile_names_header
*profile_header
;
2501 struct global_profile
*uid_files
;
2504 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
2507 error
= bsd_open_page_cache_files(user
, &uid_files
);
2509 thread_funnel_set(kernel_flock
, funnel_state
);
2515 names_vp
= uid_files
->names_vp
;
2516 data_vp
= uid_files
->data_vp
;
2517 names_buf
= uid_files
->buf_ptr
;
2519 /* Stat data file for size */
2521 context
.vc_proc
= p
;
2522 context
.vc_ucred
= kauth_cred_get();
2524 if ((error
= vnode_size(data_vp
, &file_size
, &context
)) != 0) {
2525 printf("bsd_write_page_cache_file: Can't stat profile data %s\n", file_name
);
2526 bsd_close_page_cache_files(uid_files
);
2527 thread_funnel_set(kernel_flock
, funnel_state
);
2531 if (bsd_search_page_cache_data_base(names_vp
,
2532 (struct profile_names_header
*)names_buf
,
2533 file_name
, (unsigned int) mod
,
2534 fid
, &profile
, &profile_size
) == 0) {
2535 /* profile is an offset in the profile data base */
2536 /* It is zero if no profile data was found */
2538 if(profile_size
== 0) {
2539 unsigned int header_size
;
2540 vm_offset_t buf_ptr
;
2542 /* Our Write case */
2544 /* read header for last entry */
2546 (struct profile_names_header
*)names_buf
;
2547 name_offset
= sizeof(struct profile_names_header
) +
2548 (sizeof(struct profile_element
)
2549 * profile_header
->number_of_profiles
);
2550 profile_header
->number_of_profiles
+= 1;
2552 if(name_offset
< PAGE_SIZE
* 4) {
2553 struct profile_element
*name
;
2554 /* write new entry */
2555 name
= (struct profile_element
*)
2556 (names_buf
+ (vm_offset_t
)name_offset
);
2557 name
->addr
= file_size
;
2559 name
->mod_date
= mod
;
2561 strncpy (name
->name
, file_name
, 12);
2563 unsigned int ele_size
;
2564 struct profile_element name
;
2565 /* write new entry */
2566 name
.addr
= file_size
;
2568 name
.mod_date
= mod
;
2570 strncpy (name
.name
, file_name
, 12);
2571 /* write element out separately */
2572 ele_size
= sizeof(struct profile_element
);
2573 buf_ptr
= (vm_offset_t
)&name
;
2574 resid_off
= name_offset
;
2577 error
= vn_rdwr(UIO_WRITE
, names_vp
,
2579 ele_size
, resid_off
,
2580 UIO_SYSSPACE32
, IO_NODELOCKED
,
2581 kauth_cred_get(), &resid
, p
);
2583 printf("bsd_write_page_cache_file: Can't write name_element %x\n", user
);
2584 bsd_close_page_cache_files(
2591 buf_ptr
+= (vm_offset_t
)
2593 resid_off
+= ele_size
-resid
;
2598 if(name_offset
< PAGE_SIZE
* 4) {
2599 header_size
= name_offset
+
2600 sizeof(struct profile_element
);
2604 sizeof(struct profile_names_header
);
2606 buf_ptr
= (vm_offset_t
)profile_header
;
2609 /* write names file header */
2610 while(header_size
) {
2611 error
= vn_rdwr(UIO_WRITE
, names_vp
,
2613 header_size
, resid_off
,
2614 UIO_SYSSPACE32
, IO_NODELOCKED
,
2615 kauth_cred_get(), &resid
, p
);
2617 printf("bsd_write_page_cache_file: Can't write header %x\n", user
);
2618 bsd_close_page_cache_files(
2621 kernel_flock
, funnel_state
);
2624 buf_ptr
+= (vm_offset_t
)header_size
-resid
;
2625 resid_off
+= header_size
-resid
;
2626 header_size
= resid
;
2628 /* write profile to data file */
2629 resid_off
= file_size
;
2631 error
= vn_rdwr(UIO_WRITE
, data_vp
,
2632 (caddr_t
)buffer
, size
, resid_off
,
2633 UIO_SYSSPACE32
, IO_NODELOCKED
,
2634 kauth_cred_get(), &resid
, p
);
2636 printf("bsd_write_page_cache_file: Can't write header %x\n", user
);
2637 bsd_close_page_cache_files(
2640 kernel_flock
, funnel_state
);
2643 buffer
+= size
-resid
;
2644 resid_off
+= size
-resid
;
2647 bsd_close_page_cache_files(uid_files
);
2648 thread_funnel_set(kernel_flock
, funnel_state
);
2651 /* Someone else wrote a twin profile before us */
2652 bsd_close_page_cache_files(uid_files
);
2653 thread_funnel_set(kernel_flock
, funnel_state
);
2656 bsd_close_page_cache_files(uid_files
);
2657 thread_funnel_set(kernel_flock
, funnel_state
);
2664 prepare_profile_database(int user
)
2666 const char *cache_path
= "/var/vm/app_profile/";
2673 struct vnode
*names_vp
;
2674 struct vnode
*data_vp
;
2675 vm_offset_t names_buf
;
2676 vm_offset_t buf_ptr
;
2678 int profile_names_length
;
2679 int profile_data_length
;
2680 char *profile_data_string
;
2681 char *profile_names_string
;
2684 struct vnode_attr va
;
2685 struct vfs_context context
;
2687 struct profile_names_header
*profile_header
;
2690 struct nameidata nd_names
;
2691 struct nameidata nd_data
;
2695 context
.vc_proc
= p
;
2696 context
.vc_ucred
= kauth_cred_get();
2698 ret
= kmem_alloc(kernel_map
,
2699 (vm_offset_t
*)&profile_data_string
, PATH_MAX
);
2705 /* Split the buffer in half since we know the size of */
2706 /* our file path and our allocation is adequate for */
2707 /* both file path names */
2708 profile_names_string
= profile_data_string
+ (PATH_MAX
/2);
2711 strcpy(profile_data_string
, cache_path
);
2712 strcpy(profile_names_string
, cache_path
);
2713 profile_names_length
= profile_data_length
2714 = strlen(profile_data_string
);
2715 substring
= profile_data_string
+ profile_data_length
;
2716 sprintf(substring
, "%x_data", user
);
2717 substring
= profile_names_string
+ profile_names_length
;
2718 sprintf(substring
, "%x_names", user
);
2720 /* We now have the absolute file names */
2722 ret
= kmem_alloc(kernel_map
,
2723 (vm_offset_t
*)&names_buf
, 4 * PAGE_SIZE
);
2725 kmem_free(kernel_map
,
2726 (vm_offset_t
)profile_data_string
, PATH_MAX
);
2730 NDINIT(&nd_names
, LOOKUP
, FOLLOW
,
2731 UIO_SYSSPACE32
, CAST_USER_ADDR_T(profile_names_string
), &context
);
2732 NDINIT(&nd_data
, LOOKUP
, FOLLOW
,
2733 UIO_SYSSPACE32
, CAST_USER_ADDR_T(profile_data_string
), &context
);
2735 if ( (error
= vn_open(&nd_data
,
2736 O_CREAT
| O_EXCL
| FWRITE
, S_IRUSR
|S_IWUSR
)) ) {
2737 kmem_free(kernel_map
,
2738 (vm_offset_t
)names_buf
, 4 * PAGE_SIZE
);
2739 kmem_free(kernel_map
,
2740 (vm_offset_t
)profile_data_string
, PATH_MAX
);
2744 data_vp
= nd_data
.ni_vp
;
2746 if ( (error
= vn_open(&nd_names
,
2747 O_CREAT
| O_EXCL
| FWRITE
, S_IRUSR
|S_IWUSR
)) ) {
2748 printf("prepare_profile_database: Can't create CacheNames %s\n",
2749 profile_data_string
);
2750 kmem_free(kernel_map
,
2751 (vm_offset_t
)names_buf
, 4 * PAGE_SIZE
);
2752 kmem_free(kernel_map
,
2753 (vm_offset_t
)profile_data_string
, PATH_MAX
);
2755 vnode_rele(data_vp
);
2760 names_vp
= nd_names
.ni_vp
;
2762 /* Write Header for new names file */
2764 profile_header
= (struct profile_names_header
*)names_buf
;
2766 profile_header
->number_of_profiles
= 0;
2767 profile_header
->user_id
= user
;
2768 profile_header
->version
= 1;
2769 profile_header
->element_array
=
2770 sizeof(struct profile_names_header
);
2771 profile_header
->spare1
= 0;
2772 profile_header
->spare2
= 0;
2773 profile_header
->spare3
= 0;
2775 size
= sizeof(struct profile_names_header
);
2776 buf_ptr
= (vm_offset_t
)profile_header
;
2780 error
= vn_rdwr(UIO_WRITE
, names_vp
,
2781 (caddr_t
)buf_ptr
, size
, resid_off
,
2782 UIO_SYSSPACE32
, IO_NODELOCKED
,
2783 kauth_cred_get(), &resid
, p
);
2785 printf("prepare_profile_database: Can't write header %s\n", profile_names_string
);
2786 kmem_free(kernel_map
,
2787 (vm_offset_t
)names_buf
, 4 * PAGE_SIZE
);
2788 kmem_free(kernel_map
,
2789 (vm_offset_t
)profile_data_string
,
2792 vnode_rele(names_vp
);
2793 vnode_put(names_vp
);
2794 vnode_rele(data_vp
);
2799 buf_ptr
+= size
-resid
;
2800 resid_off
+= size
-resid
;
2804 VATTR_SET(&va
, va_uid
, user
);
2806 error
= vnode_setattr(names_vp
, &va
, &context
);
2808 printf("prepare_profile_database: "
2809 "Can't set user %s\n", profile_names_string
);
2811 vnode_rele(names_vp
);
2812 vnode_put(names_vp
);
2815 VATTR_SET(&va
, va_uid
, user
);
2816 error
= vnode_setattr(data_vp
, &va
, &context
);
2818 printf("prepare_profile_database: "
2819 "Can't set user %s\n", profile_data_string
);
2821 vnode_rele(data_vp
);
2824 kmem_free(kernel_map
,
2825 (vm_offset_t
)profile_data_string
, PATH_MAX
);
2826 kmem_free(kernel_map
,
2827 (vm_offset_t
)names_buf
, 4 * PAGE_SIZE
);