2 * Copyright (c) 2006 Apple Computer, Inc. All Rights Reserved.
4 * @APPLE_LICENSE_OSREFERENCE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the
10 * License may not be used to create, or enable the creation or
11 * redistribution of, unlawful or unlicensed copies of an Apple operating
12 * system, or to circumvent, violate, or enable the circumvention or
13 * violation of, any terms of an Apple operating system software license
16 * Please obtain a copy of the License at
17 * http://www.opensource.apple.com/apsl/ and read it before using this
20 * The Original Code and all software distributed under the License are
21 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
22 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
23 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
25 * Please see the License for the specific language governing rights and
26 * limitations under the License.
28 * @APPLE_LICENSE_OSREFERENCE_HEADER_END@
31 * Mach Operating System
32 * Copyright (c) 1987 Carnegie-Mellon University
33 * All rights reserved. The CMU software License Agreement specifies
34 * the terms and conditions for use and redistribution.
41 #include <meta_features.h>
43 #include <kern/task.h>
44 #include <kern/thread.h>
45 #include <kern/debug.h>
46 #include <kern/lock.h>
47 #include <mach/mach_traps.h>
48 #include <mach/time_value.h>
49 #include <mach/vm_map.h>
50 #include <mach/vm_param.h>
51 #include <mach/vm_prot.h>
52 #include <mach/port.h>
54 #include <sys/file_internal.h>
55 #include <sys/param.h>
56 #include <sys/systm.h>
58 #include <sys/namei.h>
59 #include <sys/proc_internal.h>
60 #include <sys/kauth.h>
63 #include <sys/vnode_internal.h>
64 #include <sys/mount.h>
65 #include <sys/trace.h>
66 #include <sys/kernel.h>
67 #include <sys/ubc_internal.h>
70 #include <sys/sysproto.h>
73 #include <bsm/audit_kernel.h>
74 #include <bsm/audit_kevents.h>
76 #include <kern/kalloc.h>
77 #include <vm/vm_map.h>
78 #include <vm/vm_kern.h>
80 #include <machine/spl.h>
82 #include <mach/shared_memory_server.h>
83 #include <vm/vm_shared_memory_server.h>
85 #include <vm/vm_protos.h>
94 return (vm_map_check_protection(
96 vm_map_trunc_page(addr
), vm_map_round_page(addr
+len
),
97 prot
== B_READ
? VM_PROT_READ
: VM_PROT_WRITE
));
106 kret
= vm_map_wire(current_map(), vm_map_trunc_page(addr
),
107 vm_map_round_page(addr
+len
),
108 VM_PROT_READ
| VM_PROT_WRITE
,FALSE
);
113 case KERN_INVALID_ADDRESS
:
116 case KERN_PROTECTION_FAILURE
:
127 __unused
int dirtied
)
132 vm_map_offset_t vaddr
;
139 pmap
= get_task_pmap(current_task());
140 for (vaddr
= vm_map_trunc_page(addr
);
141 vaddr
< vm_map_round_page(addr
+len
);
142 vaddr
+= PAGE_SIZE
) {
143 paddr
= pmap_extract(pmap
, vaddr
);
144 pg
= PHYS_TO_VM_PAGE(paddr
);
145 vm_page_set_modified(pg
);
152 kret
= vm_map_unwire(current_map(), vm_map_trunc_page(addr
),
153 vm_map_round_page(addr
+len
), FALSE
);
157 case KERN_INVALID_ADDRESS
:
160 case KERN_PROTECTION_FAILURE
:
174 character
= (char)byte
;
175 return (copyout((void *)&(character
), addr
, sizeof(char)) == 0 ? 0 : -1);
185 character
= (char)byte
;
186 return (copyout((void *)&(character
), addr
, sizeof(char)) == 0 ? 0 : -1);
189 int fubyte(user_addr_t addr
)
193 if (copyin(addr
, (void *) &byte
, sizeof(char)))
198 int fuibyte(user_addr_t addr
)
202 if (copyin(addr
, (void *) &(byte
), sizeof(char)))
212 return (copyout((void *) &word
, addr
, sizeof(int)) == 0 ? 0 : -1);
215 long fuword(user_addr_t addr
)
219 if (copyin(addr
, (void *) &word
, sizeof(int)))
224 /* suiword and fuiword are the same as suword and fuword, respectively */
231 return (copyout((void *) &word
, addr
, sizeof(int)) == 0 ? 0 : -1);
234 long fuiword(user_addr_t addr
)
238 if (copyin(addr
, (void *) &word
, sizeof(int)))
244 * With a 32-bit kernel and mixed 32/64-bit user tasks, this interface allows the
245 * fetching and setting of process-sized size_t and pointer values.
248 sulong(user_addr_t addr
, int64_t word
)
251 if (IS_64BIT_PROCESS(current_proc())) {
252 return(copyout((void *)&word
, addr
, sizeof(word
)) == 0 ? 0 : -1);
254 return(suiword(addr
, (long)word
));
259 fulong(user_addr_t addr
)
263 if (IS_64BIT_PROCESS(current_proc())) {
264 if (copyin(addr
, (void *)&longword
, sizeof(longword
)) != 0)
268 return((int64_t)fuiword(addr
));
273 suulong(user_addr_t addr
, uint64_t uword
)
276 if (IS_64BIT_PROCESS(current_proc())) {
277 return(copyout((void *)&uword
, addr
, sizeof(uword
)) == 0 ? 0 : -1);
279 return(suiword(addr
, (u_long
)uword
));
284 fuulong(user_addr_t addr
)
288 if (IS_64BIT_PROCESS(current_proc())) {
289 if (copyin(addr
, (void *)&ulongword
, sizeof(ulongword
)) != 0)
293 return((uint64_t)fuiword(addr
));
298 swapon(__unused
struct proc
*procp
, __unused
struct swapon_args
*uap
, __unused
int *retval
)
306 struct pid_for_task_args
*args
)
308 mach_port_name_t t
= args
->t
;
309 user_addr_t pid_addr
= args
->pid
;
313 kern_return_t err
= KERN_SUCCESS
;
314 boolean_t funnel_state
;
316 AUDIT_MACH_SYSCALL_ENTER(AUE_PIDFORTASK
);
317 AUDIT_ARG(mach_port1
, t
);
319 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
320 t1
= port_name_to_task(t
);
322 if (t1
== TASK_NULL
) {
326 p
= get_bsdtask_info(t1
);
337 (void) copyout((char *) &pid
, pid_addr
, sizeof(int));
338 thread_funnel_set(kernel_flock
, funnel_state
);
339 AUDIT_MACH_SYSCALL_EXIT(err
);
344 * Routine: task_for_pid
346 * Get the task port for another "process", named by its
347 * process ID on the same host as "target_task".
349 * Only permitted to privileged processes, or processes
350 * with the same user ID.
352 * XXX This should be a BSD system call, not a Mach trap!!!
356 struct task_for_pid_args
*args
)
358 mach_port_name_t target_tport
= args
->target_tport
;
360 user_addr_t task_addr
= args
->t
;
361 struct uthread
*uthread
;
365 mach_port_name_t tret
;
368 boolean_t funnel_state
;
370 AUDIT_MACH_SYSCALL_ENTER(AUE_TASKFORPID
);
372 AUDIT_ARG(mach_port1
, target_tport
);
374 t1
= port_name_to_task(target_tport
);
375 if (t1
== TASK_NULL
) {
376 (void ) copyout((char *)&t1
, task_addr
, sizeof(mach_port_name_t
));
377 AUDIT_MACH_SYSCALL_EXIT(KERN_FAILURE
);
378 return(KERN_FAILURE
);
381 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
383 p1
= get_bsdtask_info(t1
); /* XXX current proc */
386 * Delayed binding of thread credential to process credential, if we
387 * are not running with an explicitly set thread credential.
389 uthread
= get_bsdthread_info(current_thread());
390 if (uthread
->uu_ucred
!= p1
->p_ucred
&&
391 (uthread
->uu_flag
& UT_SETUID
) == 0) {
392 kauth_cred_t old
= uthread
->uu_ucred
;
394 uthread
->uu_ucred
= p1
->p_ucred
;
395 kauth_cred_ref(uthread
->uu_ucred
);
398 kauth_cred_rele(old
);
402 AUDIT_ARG(process
, p
);
405 (p
!= (struct proc
*) 0)
406 && (p1
!= (struct proc
*) 0)
409 || !(suser(kauth_cred_get(), 0))
410 || ((kauth_cred_getuid(p
->p_ucred
) == kauth_cred_getuid(kauth_cred_get()))
411 && (p
->p_ucred
->cr_ruid
== kauth_cred_get()->cr_ruid
)
412 && ((p
->p_flag
& P_SUGID
) == 0))
414 && (p
->p_stat
!= SZOMB
)
416 if (p
->task
!= TASK_NULL
) {
417 task_reference(p
->task
);
418 sright
= (void *)convert_task_to_port(p
->task
);
419 tret
= ipc_port_copyout_send(
421 get_task_ipcspace(current_task()));
423 tret
= MACH_PORT_NULL
;
424 AUDIT_ARG(mach_port2
, tret
);
425 (void ) copyout((char *)&tret
, task_addr
, sizeof(mach_port_name_t
));
427 error
= KERN_SUCCESS
;
431 tret
= MACH_PORT_NULL
;
432 (void) copyout((char *) &tret
, task_addr
, sizeof(mach_port_name_t
));
433 error
= KERN_FAILURE
;
435 thread_funnel_set(kernel_flock
, funnel_state
);
436 AUDIT_MACH_SYSCALL_EXIT(error
);
442 * shared_region_make_private_np:
444 * This system call is for "dyld" only.
446 * It creates a private copy of the current process's "shared region" for
447 * split libraries. "dyld" uses this when the shared region is full or
448 * it needs to load a split library that conflicts with an already loaded one
449 * that this process doesn't need. "dyld" specifies a set of address ranges
450 * that it wants to keep in the now-private "shared region". These cover
451 * the set of split libraries that the process needs so far. The kernel needs
452 * to deallocate the rest of the shared region, so that it's available for
453 * more libraries for this process.
456 shared_region_make_private_np(
458 struct shared_region_make_private_np_args
*uap
,
459 __unused
int *retvalp
)
463 boolean_t using_shared_regions
;
464 user_addr_t user_ranges
;
465 unsigned int range_count
;
466 vm_size_t ranges_size
;
467 struct shared_region_range_np
*ranges
;
468 shared_region_mapping_t shared_region
;
469 struct shared_region_task_mappings task_mapping_info
;
470 shared_region_mapping_t next
;
474 range_count
= uap
->rangeCount
;
475 user_ranges
= uap
->ranges
;
476 ranges_size
= (vm_size_t
) (range_count
* sizeof (ranges
[0]));
478 /* allocate kernel space for the "ranges" */
479 if (range_count
!= 0) {
480 if ((mach_vm_size_t
) ranges_size
!=
481 (mach_vm_size_t
) range_count
* sizeof (ranges
[0])) {
482 /* 32-bit integer overflow */
486 kr
= kmem_alloc(kernel_map
,
487 (vm_offset_t
*) &ranges
,
489 if (kr
!= KERN_SUCCESS
) {
494 /* copy "ranges" from user-space */
495 error
= copyin(user_ranges
,
503 if (p
->p_flag
& P_NOSHLIB
) {
504 /* no split library has been mapped for this process so far */
505 using_shared_regions
= FALSE
;
507 /* this process has already mapped some split libraries */
508 using_shared_regions
= TRUE
;
512 * Get a private copy of the current shared region.
513 * Do not chain it to the system-wide shared region, as we'll want
514 * to map other split libraries in place of the old ones. We want
515 * to completely detach from the system-wide shared region and go our
516 * own way after this point, not sharing anything with other processes.
518 error
= clone_system_shared_regions(using_shared_regions
,
519 FALSE
, /* chain_regions */
525 /* get info on the newly allocated shared region */
526 vm_get_shared_region(current_task(), &shared_region
);
527 task_mapping_info
.self
= (vm_offset_t
) shared_region
;
528 shared_region_mapping_info(shared_region
,
529 &(task_mapping_info
.text_region
),
530 &(task_mapping_info
.text_size
),
531 &(task_mapping_info
.data_region
),
532 &(task_mapping_info
.data_size
),
533 &(task_mapping_info
.region_mappings
),
534 &(task_mapping_info
.client_base
),
535 &(task_mapping_info
.alternate_base
),
536 &(task_mapping_info
.alternate_next
),
537 &(task_mapping_info
.fs_base
),
538 &(task_mapping_info
.system
),
539 &(task_mapping_info
.flags
),
543 * We now have our private copy of the shared region, as it was before
544 * the call to clone_system_shared_regions(). We now need to clean it
545 * up and keep only the memory areas described by the "ranges" array.
547 kr
= shared_region_cleanup(range_count
, ranges
, &task_mapping_info
);
558 if (ranges
!= NULL
) {
559 kmem_free(kernel_map
,
560 (vm_offset_t
) ranges
,
570 * shared_region_map_file_np:
572 * This system call is for "dyld" only.
574 * "dyld" wants to map parts of a split library in the shared region.
575 * We get a file descriptor on the split library to be mapped and a set
576 * of mapping instructions, describing which parts of the file to map in\
577 * which areas of the shared segment and with what protection.
578 * The "shared region" is split in 2 areas:
579 * 0x90000000 - 0xa0000000 : read-only area (for TEXT and LINKEDIT sections),
580 * 0xa0000000 - 0xb0000000 : writable area (for DATA sections).
584 shared_region_map_file_np(
586 struct shared_region_map_file_np_args
*uap
,
587 __unused
int *retvalp
)
592 unsigned int mapping_count
;
593 user_addr_t user_mappings
; /* 64-bit */
594 user_addr_t user_slide_p
; /* 64-bit */
595 struct shared_file_mapping_np
*mappings
;
596 vm_size_t mappings_size
;
598 mach_vm_offset_t slide
;
600 struct vfs_context context
;
601 memory_object_control_t file_control
;
602 memory_object_size_t file_size
;
603 shared_region_mapping_t shared_region
;
604 struct shared_region_task_mappings task_mapping_info
;
605 shared_region_mapping_t next
;
606 shared_region_mapping_t default_shared_region
;
607 boolean_t using_default_region
;
610 mach_vm_offset_t base_offset
, end_offset
;
611 mach_vm_offset_t original_base_offset
;
612 boolean_t mappings_in_segment
;
613 #define SFM_MAX_STACK 6
614 struct shared_file_mapping_np stack_mappings
[SFM_MAX_STACK
];
622 /* get file descriptor for split library from arguments */
625 /* get file structure from file descriptor */
626 error
= fp_lookup(p
, fd
, &fp
, 0);
631 /* make sure we're attempting to map a vnode */
632 if (fp
->f_fglob
->fg_type
!= DTYPE_VNODE
) {
637 /* we need at least read permission on the file */
638 if (! (fp
->f_fglob
->fg_flag
& FREAD
)) {
643 /* get vnode from file structure */
644 error
= vnode_getwithref((vnode_t
)fp
->f_fglob
->fg_data
);
648 vp
= (struct vnode
*) fp
->f_fglob
->fg_data
;
650 /* make sure the vnode is a regular file */
651 if (vp
->v_type
!= VREG
) {
661 context
.vc_ucred
= kauth_cred_get();
662 if ((error
= vnode_size(vp
, &fs
, &context
)) != 0)
668 * Get the list of mappings the caller wants us to establish.
670 mapping_count
= uap
->mappingCount
; /* the number of mappings */
671 mappings_size
= (vm_size_t
) (mapping_count
* sizeof (mappings
[0]));
672 if (mapping_count
== 0) {
673 error
= 0; /* no mappings: we're done ! */
675 } else if (mapping_count
<= SFM_MAX_STACK
) {
676 mappings
= &stack_mappings
[0];
678 if ((mach_vm_size_t
) mappings_size
!=
679 (mach_vm_size_t
) mapping_count
* sizeof (mappings
[0])) {
680 /* 32-bit integer overflow */
684 kr
= kmem_alloc(kernel_map
,
685 (vm_offset_t
*) &mappings
,
687 if (kr
!= KERN_SUCCESS
) {
693 user_mappings
= uap
->mappings
; /* the mappings, in user space */
694 error
= copyin(user_mappings
,
702 * If the caller provides a "slide" pointer, it means they're OK
703 * with us moving the mappings around to make them fit.
705 user_slide_p
= uap
->slide_p
;
708 * Make each mapping address relative to the beginning of the
709 * shared region. Check that all mappings are in the shared region.
710 * Compute the maximum set of protections required to tell the
711 * buffer cache how we mapped the file (see call to ubc_map() below).
713 max_prot
= VM_PROT_NONE
;
716 mappings_in_segment
= TRUE
;
717 for (j
= 0; j
< mapping_count
; j
++) {
718 mach_vm_offset_t segment
;
719 segment
= (mappings
[j
].sfm_address
&
720 GLOBAL_SHARED_SEGMENT_MASK
);
721 if (segment
!= GLOBAL_SHARED_TEXT_SEGMENT
&&
722 segment
!= GLOBAL_SHARED_DATA_SEGMENT
) {
723 /* this mapping is not in the shared region... */
724 if (user_slide_p
== NULL
) {
725 /* ... and we can't slide it in: fail */
730 /* expect all mappings to be outside */
731 mappings_in_segment
= FALSE
;
732 } else if (mappings_in_segment
!= FALSE
) {
733 /* other mappings were not outside: fail */
737 /* we'll try and slide that mapping in the segments */
740 /* expect all mappings to be inside */
741 mappings_in_segment
= TRUE
;
742 } else if (mappings_in_segment
!= TRUE
) {
743 /* other mappings were not inside: fail */
747 /* get a relative offset inside the shared segments */
748 mappings
[j
].sfm_address
-= GLOBAL_SHARED_TEXT_SEGMENT
;
750 if ((mappings
[j
].sfm_address
& SHARED_TEXT_REGION_MASK
)
752 base_offset
= (mappings
[j
].sfm_address
&
753 SHARED_TEXT_REGION_MASK
);
755 if ((mappings
[j
].sfm_address
& SHARED_TEXT_REGION_MASK
) +
756 mappings
[j
].sfm_size
> end_offset
) {
758 (mappings
[j
].sfm_address
&
759 SHARED_TEXT_REGION_MASK
) +
760 mappings
[j
].sfm_size
;
762 max_prot
|= mappings
[j
].sfm_max_prot
;
764 /* Make all mappings relative to the base_offset */
765 base_offset
= vm_map_trunc_page(base_offset
);
766 end_offset
= vm_map_round_page(end_offset
);
767 for (j
= 0; j
< mapping_count
; j
++) {
768 mappings
[j
].sfm_address
-= base_offset
;
770 original_base_offset
= base_offset
;
771 if (mappings_in_segment
== FALSE
) {
773 * We're trying to map a library that was not pre-bound to
774 * be in the shared segments. We want to try and slide it
775 * back into the shared segments but as far back as possible,
776 * so that it doesn't clash with pre-bound libraries. Set
777 * the base_offset to the end of the region, so that it can't
778 * possibly fit there and will have to be slid.
780 base_offset
= SHARED_TEXT_REGION_SIZE
- end_offset
;
783 /* get the file's memory object handle */
784 UBCINFOCHECK("shared_region_map_file_np", vp
);
785 file_control
= ubc_getobject(vp
, UBC_HOLDOBJECT
);
786 if (file_control
== MEMORY_OBJECT_CONTROL_NULL
) {
792 * Get info about the current process's shared region.
793 * This might change if we decide we need to clone the shared region.
795 vm_get_shared_region(current_task(), &shared_region
);
796 task_mapping_info
.self
= (vm_offset_t
) shared_region
;
797 shared_region_mapping_info(shared_region
,
798 &(task_mapping_info
.text_region
),
799 &(task_mapping_info
.text_size
),
800 &(task_mapping_info
.data_region
),
801 &(task_mapping_info
.data_size
),
802 &(task_mapping_info
.region_mappings
),
803 &(task_mapping_info
.client_base
),
804 &(task_mapping_info
.alternate_base
),
805 &(task_mapping_info
.alternate_next
),
806 &(task_mapping_info
.fs_base
),
807 &(task_mapping_info
.system
),
808 &(task_mapping_info
.flags
),
812 * Are we using the system's current shared region
813 * for this environment ?
815 default_shared_region
=
816 lookup_default_shared_region(ENV_DEFAULT_ROOT
,
817 task_mapping_info
.system
);
818 if (shared_region
== default_shared_region
) {
819 using_default_region
= TRUE
;
821 using_default_region
= FALSE
;
823 shared_region_mapping_dealloc(default_shared_region
);
825 if (vp
->v_mount
!= rootvnode
->v_mount
&&
826 using_default_region
) {
828 * The split library is not on the root filesystem. We don't
829 * want to polute the system-wide ("default") shared region
831 * Reject the mapping. The caller (dyld) should "privatize"
832 * (via shared_region_make_private()) the shared region and
833 * try to establish the mapping privately for this process.
841 * Map the split library.
843 kr
= map_shared_file(mapping_count
,
849 (user_slide_p
) ? &slide
: NULL
);
854 * The mapping was successful. Let the buffer cache know
855 * that we've mapped that file with these protections. This
856 * prevents the vnode from getting recycled while it's mapped.
858 (void) ubc_map(vp
, max_prot
);
861 case KERN_INVALID_ADDRESS
:
864 case KERN_PROTECTION_FAILURE
:
871 case KERN_INVALID_ARGUMENT
:
877 if (p
->p_flag
& P_NOSHLIB
) {
878 /* signal that this process is now using split libraries */
879 p
->p_flag
&= ~P_NOSHLIB
;
884 * The caller provided a pointer to a "slide" offset. Let
885 * them know by how much we slid the mappings.
887 if (mappings_in_segment
== FALSE
) {
889 * We faked the base_offset earlier, so undo that
890 * and take into account the real base_offset.
892 slide
+= SHARED_TEXT_REGION_SIZE
- end_offset
;
893 slide
-= original_base_offset
;
895 * The mappings were slid into the shared segments
896 * and "slide" is relative to the beginning of the
897 * shared segments. Adjust it to be absolute.
899 slide
+= GLOBAL_SHARED_TEXT_SEGMENT
;
901 error
= copyout(&slide
,
909 * release the vnode...
910 * ubc_map() still holds it for us in the non-error case
912 (void) vnode_put(vp
);
916 /* release the file descriptor */
917 fp_drop(p
, fd
, fp
, 0);
920 if (mappings
!= NULL
&&
921 mappings
!= &stack_mappings
[0]) {
922 kmem_free(kernel_map
,
923 (vm_offset_t
) mappings
,
933 __unused
struct proc
*p
,
934 __unused
struct load_shared_file_args
*uap
,
935 __unused
int *retval
)
942 __unused
struct proc
*p
,
943 __unused
struct reset_shared_file_args
*uap
,
944 __unused
int *retval
)
950 new_system_shared_regions(
951 __unused
struct proc
*p
,
952 __unused
struct new_system_shared_regions_args
*uap
,
953 __unused
int *retval
)
961 clone_system_shared_regions(
962 int shared_regions_active
,
966 shared_region_mapping_t new_shared_region
;
967 shared_region_mapping_t next
;
968 shared_region_mapping_t old_shared_region
;
969 struct shared_region_task_mappings old_info
;
970 struct shared_region_task_mappings new_info
;
972 vm_get_shared_region(current_task(), &old_shared_region
);
973 old_info
.self
= (vm_offset_t
)old_shared_region
;
974 shared_region_mapping_info(old_shared_region
,
975 &(old_info
.text_region
),
976 &(old_info
.text_size
),
977 &(old_info
.data_region
),
978 &(old_info
.data_size
),
979 &(old_info
.region_mappings
),
980 &(old_info
.client_base
),
981 &(old_info
.alternate_base
),
982 &(old_info
.alternate_next
),
985 &(old_info
.flags
), &next
);
986 if ((shared_regions_active
) ||
987 (base_vnode
== ENV_DEFAULT_ROOT
)) {
988 if (shared_file_create_system_region(&new_shared_region
))
992 lookup_default_shared_region(
993 base_vnode
, old_info
.system
);
994 if(new_shared_region
== NULL
) {
995 shared_file_boot_time_init(
996 base_vnode
, old_info
.system
);
997 vm_get_shared_region(current_task(), &new_shared_region
);
999 vm_set_shared_region(current_task(), new_shared_region
);
1001 if(old_shared_region
)
1002 shared_region_mapping_dealloc(old_shared_region
);
1004 new_info
.self
= (vm_offset_t
)new_shared_region
;
1005 shared_region_mapping_info(new_shared_region
,
1006 &(new_info
.text_region
),
1007 &(new_info
.text_size
),
1008 &(new_info
.data_region
),
1009 &(new_info
.data_size
),
1010 &(new_info
.region_mappings
),
1011 &(new_info
.client_base
),
1012 &(new_info
.alternate_base
),
1013 &(new_info
.alternate_next
),
1014 &(new_info
.fs_base
),
1016 &(new_info
.flags
), &next
);
1017 if(shared_regions_active
) {
1018 if(vm_region_clone(old_info
.text_region
, new_info
.text_region
)) {
1019 panic("clone_system_shared_regions: shared region mis-alignment 1");
1020 shared_region_mapping_dealloc(new_shared_region
);
1023 if (vm_region_clone(old_info
.data_region
, new_info
.data_region
)) {
1024 panic("clone_system_shared_regions: shared region mis-alignment 2");
1025 shared_region_mapping_dealloc(new_shared_region
);
1028 if (chain_regions
) {
1030 * We want a "shadowed" clone, a private superset of the old
1031 * shared region. The info about the old mappings is still
1034 shared_region_object_chain_attach(
1035 new_shared_region
, old_shared_region
);
1038 * We want a completely detached clone with no link to
1039 * the old shared region. We'll be removing some mappings
1040 * in our private, cloned, shared region, so the old mappings
1041 * will become irrelevant to us. Since we have a private
1042 * "shared region" now, it isn't going to be shared with
1043 * anyone else and we won't need to maintain mappings info.
1045 shared_region_object_chain_detached(new_shared_region
);
1048 if (vm_map_region_replace(current_map(), old_info
.text_region
,
1049 new_info
.text_region
, old_info
.client_base
,
1050 old_info
.client_base
+old_info
.text_size
)) {
1051 panic("clone_system_shared_regions: shared region mis-alignment 3");
1052 shared_region_mapping_dealloc(new_shared_region
);
1055 if(vm_map_region_replace(current_map(), old_info
.data_region
,
1056 new_info
.data_region
,
1057 old_info
.client_base
+ old_info
.text_size
,
1058 old_info
.client_base
1059 + old_info
.text_size
+ old_info
.data_size
)) {
1060 panic("clone_system_shared_regions: shared region mis-alignment 4");
1061 shared_region_mapping_dealloc(new_shared_region
);
1064 vm_set_shared_region(current_task(), new_shared_region
);
1066 /* consume the reference which wasn't accounted for in object */
1068 if (!shared_regions_active
|| !chain_regions
)
1069 shared_region_mapping_dealloc(old_shared_region
);
1075 /* header for the profile name file. The profiled app info is held */
1076 /* in the data file and pointed to by elements in the name file */
1078 struct profile_names_header
{
1079 unsigned int number_of_profiles
;
1080 unsigned int user_id
;
1081 unsigned int version
;
1082 off_t element_array
;
1083 unsigned int spare1
;
1084 unsigned int spare2
;
1085 unsigned int spare3
;
1088 struct profile_element
{
1091 unsigned int mod_date
;
1096 struct global_profile
{
1097 struct vnode
*names_vp
;
1098 struct vnode
*data_vp
;
1099 vm_offset_t buf_ptr
;
1105 struct global_profile_cache
{
1108 struct global_profile profiles
[3];
1111 /* forward declarations */
1112 int bsd_open_page_cache_files(unsigned int user
,
1113 struct global_profile
**profile
);
1114 void bsd_close_page_cache_files(struct global_profile
*profile
);
1115 int bsd_search_page_cache_data_base(
1117 struct profile_names_header
*database
,
1119 unsigned int mod_date
,
1122 unsigned int *profile_size
);
1124 struct global_profile_cache global_user_profile_cache
=
1125 {3, 0, {{NULL
, NULL
, 0, 0, 0, 0},
1126 {NULL
, NULL
, 0, 0, 0, 0},
1127 {NULL
, NULL
, 0, 0, 0, 0}} };
1129 /* BSD_OPEN_PAGE_CACHE_FILES: */
1130 /* Caller provides a user id. This id was used in */
1131 /* prepare_profile_database to create two unique absolute */
1132 /* file paths to the associated profile files. These files */
1133 /* are either opened or bsd_open_page_cache_files returns an */
1134 /* error. The header of the names file is then consulted. */
1135 /* The header and the vnodes for the names and data files are */
1139 bsd_open_page_cache_files(
1141 struct global_profile
**profile
)
1143 const char *cache_path
= "/var/vm/app_profile/";
1151 struct vnode
*names_vp
;
1152 struct vnode
*data_vp
;
1153 vm_offset_t names_buf
;
1154 vm_offset_t buf_ptr
;
1156 int profile_names_length
;
1157 int profile_data_length
;
1158 char *profile_data_string
;
1159 char *profile_names_string
;
1163 struct vfs_context context
;
1167 struct nameidata nd_names
;
1168 struct nameidata nd_data
;
1174 context
.vc_proc
= p
;
1175 context
.vc_ucred
= kauth_cred_get();
1178 for(i
= 0; i
<global_user_profile_cache
.max_ele
; i
++) {
1179 if((global_user_profile_cache
.profiles
[i
].user
== user
)
1180 && (global_user_profile_cache
.profiles
[i
].data_vp
1182 *profile
= &global_user_profile_cache
.profiles
[i
];
1183 /* already in cache, we're done */
1184 if ((*profile
)->busy
) {
1186 * drop funnel and wait
1188 (void)tsleep((void *)
1190 PRIBIO
, "app_profile", 0);
1193 (*profile
)->busy
= 1;
1194 (*profile
)->age
= global_user_profile_cache
.age
;
1197 * entries in cache are held with a valid
1198 * usecount... take an iocount which will
1199 * be dropped in "bsd_close_page_cache_files"
1200 * which is called after the read or writes to
1201 * these files are done
1203 if ( (vnode_getwithref((*profile
)->data_vp
)) ) {
1205 vnode_rele((*profile
)->data_vp
);
1206 vnode_rele((*profile
)->names_vp
);
1208 (*profile
)->data_vp
= NULL
;
1209 (*profile
)->busy
= 0;
1214 if ( (vnode_getwithref((*profile
)->names_vp
)) ) {
1216 vnode_put((*profile
)->data_vp
);
1217 vnode_rele((*profile
)->data_vp
);
1218 vnode_rele((*profile
)->names_vp
);
1220 (*profile
)->data_vp
= NULL
;
1221 (*profile
)->busy
= 0;
1226 global_user_profile_cache
.age
+=1;
1231 lru
= global_user_profile_cache
.age
;
1233 for(i
= 0; i
<global_user_profile_cache
.max_ele
; i
++) {
1234 /* Skip entry if it is in the process of being reused */
1235 if(global_user_profile_cache
.profiles
[i
].data_vp
==
1236 (struct vnode
*)0xFFFFFFFF)
1238 /* Otherwise grab the first empty entry */
1239 if(global_user_profile_cache
.profiles
[i
].data_vp
== NULL
) {
1240 *profile
= &global_user_profile_cache
.profiles
[i
];
1241 (*profile
)->age
= global_user_profile_cache
.age
;
1244 /* Otherwise grab the oldest entry */
1245 if(global_user_profile_cache
.profiles
[i
].age
< lru
) {
1246 lru
= global_user_profile_cache
.profiles
[i
].age
;
1247 *profile
= &global_user_profile_cache
.profiles
[i
];
1251 /* Did we set it? */
1252 if (*profile
== NULL
) {
1254 * No entries are available; this can only happen if all
1255 * of them are currently in the process of being reused;
1256 * if this happens, we sleep on the address of the first
1257 * element, and restart. This is less than ideal, but we
1258 * know it will work because we know that there will be a
1259 * wakeup on any entry currently in the process of being
1262 * XXX Reccomend a two handed clock and more than 3 total
1263 * XXX cache entries at some point in the future.
1266 * drop funnel and wait
1268 (void)tsleep((void *)
1269 &global_user_profile_cache
.profiles
[0],
1270 PRIBIO
, "app_profile", 0);
1275 * If it's currently busy, we've picked the one at the end of the
1276 * LRU list, but it's currently being actively used. We sleep on
1277 * its address and restart.
1279 if ((*profile
)->busy
) {
1281 * drop funnel and wait
1283 (void)tsleep((void *)
1285 PRIBIO
, "app_profile", 0);
1288 (*profile
)->busy
= 1;
1289 (*profile
)->user
= user
;
1292 * put dummy value in for now to get competing request to wait
1293 * above until we are finished
1295 * Save the data_vp before setting it, so we can set it before
1296 * we kmem_free() or vrele(). If we don't do this, then we
1297 * have a potential funnel race condition we have to deal with.
1299 data_vp
= (*profile
)->data_vp
;
1300 (*profile
)->data_vp
= (struct vnode
*)0xFFFFFFFF;
1303 * Age the cache here in all cases; this guarantees that we won't
1304 * be reusing only one entry over and over, once the system reaches
1307 global_user_profile_cache
.age
+=1;
1309 if(data_vp
!= NULL
) {
1310 kmem_free(kernel_map
,
1311 (*profile
)->buf_ptr
, 4 * PAGE_SIZE
);
1312 if ((*profile
)->names_vp
) {
1313 vnode_rele((*profile
)->names_vp
);
1314 (*profile
)->names_vp
= NULL
;
1316 vnode_rele(data_vp
);
1319 /* Try to open the appropriate users profile files */
1320 /* If neither file is present, try to create them */
1321 /* If one file is present and the other not, fail. */
1322 /* If the files do exist, check them for the app_file */
1323 /* requested and read it in if present */
1325 ret
= kmem_alloc(kernel_map
,
1326 (vm_offset_t
*)&profile_data_string
, PATH_MAX
);
1329 (*profile
)->data_vp
= NULL
;
1330 (*profile
)->busy
= 0;
1335 /* Split the buffer in half since we know the size of */
1336 /* our file path and our allocation is adequate for */
1337 /* both file path names */
1338 profile_names_string
= profile_data_string
+ (PATH_MAX
/2);
1341 strcpy(profile_data_string
, cache_path
);
1342 strcpy(profile_names_string
, cache_path
);
1343 profile_names_length
= profile_data_length
1344 = strlen(profile_data_string
);
1345 substring
= profile_data_string
+ profile_data_length
;
1346 sprintf(substring
, "%x_data", user
);
1347 substring
= profile_names_string
+ profile_names_length
;
1348 sprintf(substring
, "%x_names", user
);
1350 /* We now have the absolute file names */
1352 ret
= kmem_alloc(kernel_map
,
1353 (vm_offset_t
*)&names_buf
, 4 * PAGE_SIZE
);
1355 kmem_free(kernel_map
,
1356 (vm_offset_t
)profile_data_string
, PATH_MAX
);
1357 (*profile
)->data_vp
= NULL
;
1358 (*profile
)->busy
= 0;
1363 NDINIT(&nd_names
, LOOKUP
, FOLLOW
| LOCKLEAF
,
1364 UIO_SYSSPACE32
, CAST_USER_ADDR_T(profile_names_string
), &context
);
1365 NDINIT(&nd_data
, LOOKUP
, FOLLOW
| LOCKLEAF
,
1366 UIO_SYSSPACE32
, CAST_USER_ADDR_T(profile_data_string
), &context
);
1368 if ( (error
= vn_open(&nd_data
, FREAD
| FWRITE
, 0)) ) {
1370 printf("bsd_open_page_cache_files: CacheData file not found %s\n",
1371 profile_data_string
);
1373 kmem_free(kernel_map
,
1374 (vm_offset_t
)names_buf
, 4 * PAGE_SIZE
);
1375 kmem_free(kernel_map
,
1376 (vm_offset_t
)profile_data_string
, PATH_MAX
);
1377 (*profile
)->data_vp
= NULL
;
1378 (*profile
)->busy
= 0;
1382 data_vp
= nd_data
.ni_vp
;
1384 if ( (error
= vn_open(&nd_names
, FREAD
| FWRITE
, 0)) ) {
1385 printf("bsd_open_page_cache_files: NamesData file not found %s\n",
1386 profile_data_string
);
1387 kmem_free(kernel_map
,
1388 (vm_offset_t
)names_buf
, 4 * PAGE_SIZE
);
1389 kmem_free(kernel_map
,
1390 (vm_offset_t
)profile_data_string
, PATH_MAX
);
1392 vnode_rele(data_vp
);
1395 (*profile
)->data_vp
= NULL
;
1396 (*profile
)->busy
= 0;
1400 names_vp
= nd_names
.ni_vp
;
1402 if ((error
= vnode_size(names_vp
, &file_size
, &context
)) != 0) {
1403 printf("bsd_open_page_cache_files: Can't stat name file %s\n", profile_names_string
);
1404 kmem_free(kernel_map
,
1405 (vm_offset_t
)profile_data_string
, PATH_MAX
);
1406 kmem_free(kernel_map
,
1407 (vm_offset_t
)names_buf
, 4 * PAGE_SIZE
);
1409 vnode_rele(names_vp
);
1410 vnode_put(names_vp
);
1411 vnode_rele(data_vp
);
1414 (*profile
)->data_vp
= NULL
;
1415 (*profile
)->busy
= 0;
1421 if(size
> 4 * PAGE_SIZE
)
1422 size
= 4 * PAGE_SIZE
;
1423 buf_ptr
= names_buf
;
1427 error
= vn_rdwr(UIO_READ
, names_vp
, (caddr_t
)buf_ptr
,
1429 UIO_SYSSPACE32
, IO_NODELOCKED
, kauth_cred_get(), &resid
, p
);
1430 if((error
) || (size
== resid
)) {
1434 kmem_free(kernel_map
,
1435 (vm_offset_t
)profile_data_string
, PATH_MAX
);
1436 kmem_free(kernel_map
,
1437 (vm_offset_t
)names_buf
, 4 * PAGE_SIZE
);
1439 vnode_rele(names_vp
);
1440 vnode_put(names_vp
);
1441 vnode_rele(data_vp
);
1444 (*profile
)->data_vp
= NULL
;
1445 (*profile
)->busy
= 0;
1449 buf_ptr
+= size
-resid
;
1450 resid_off
+= size
-resid
;
1453 kmem_free(kernel_map
, (vm_offset_t
)profile_data_string
, PATH_MAX
);
1455 (*profile
)->names_vp
= names_vp
;
1456 (*profile
)->data_vp
= data_vp
;
1457 (*profile
)->buf_ptr
= names_buf
;
1460 * at this point, the both the names_vp and the data_vp have
1461 * both a valid usecount and an iocount held
1468 bsd_close_page_cache_files(
1469 struct global_profile
*profile
)
1471 vnode_put(profile
->data_vp
);
1472 vnode_put(profile
->names_vp
);
1479 bsd_read_page_cache_file(
1484 struct vnode
*app_vp
,
1485 vm_offset_t
*buffer
,
1486 vm_offset_t
*bufsize
)
1489 boolean_t funnel_state
;
1496 unsigned int profile_size
;
1498 vm_offset_t names_buf
;
1499 struct vnode_attr va
;
1500 struct vfs_context context
;
1504 struct vnode
*names_vp
;
1505 struct vnode
*data_vp
;
1507 struct global_profile
*uid_files
;
1509 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
1511 /* Try to open the appropriate users profile files */
1512 /* If neither file is present, try to create them */
1513 /* If one file is present and the other not, fail. */
1514 /* If the files do exist, check them for the app_file */
1515 /* requested and read it in if present */
1518 error
= bsd_open_page_cache_files(user
, &uid_files
);
1520 thread_funnel_set(kernel_flock
, funnel_state
);
1526 names_vp
= uid_files
->names_vp
;
1527 data_vp
= uid_files
->data_vp
;
1528 names_buf
= uid_files
->buf_ptr
;
1530 context
.vc_proc
= p
;
1531 context
.vc_ucred
= kauth_cred_get();
1534 VATTR_WANTED(&va
, va_fileid
);
1535 VATTR_WANTED(&va
, va_modify_time
);
1537 if ((error
= vnode_getattr(app_vp
, &va
, &context
))) {
1538 printf("bsd_read_cache_file: Can't stat app file %s\n", app_name
);
1539 bsd_close_page_cache_files(uid_files
);
1540 thread_funnel_set(kernel_flock
, funnel_state
);
1544 *fid
= (u_long
)va
.va_fileid
;
1545 *mod
= va
.va_modify_time
.tv_sec
;
1547 if (bsd_search_page_cache_data_base(
1549 (struct profile_names_header
*)names_buf
,
1551 (unsigned int) va
.va_modify_time
.tv_sec
,
1552 (u_long
)va
.va_fileid
, &profile
, &profile_size
) == 0) {
1553 /* profile is an offset in the profile data base */
1554 /* It is zero if no profile data was found */
1556 if(profile_size
== 0) {
1559 bsd_close_page_cache_files(uid_files
);
1560 thread_funnel_set(kernel_flock
, funnel_state
);
1563 ret
= (vm_offset_t
)(kmem_alloc(kernel_map
, buffer
, profile_size
));
1565 bsd_close_page_cache_files(uid_files
);
1566 thread_funnel_set(kernel_flock
, funnel_state
);
1569 *bufsize
= profile_size
;
1570 while(profile_size
) {
1571 error
= vn_rdwr(UIO_READ
, data_vp
,
1572 (caddr_t
) *buffer
, profile_size
,
1573 profile
, UIO_SYSSPACE32
, IO_NODELOCKED
,
1574 kauth_cred_get(), &resid
, p
);
1575 if((error
) || (profile_size
== resid
)) {
1576 bsd_close_page_cache_files(uid_files
);
1577 kmem_free(kernel_map
, (vm_offset_t
)*buffer
, profile_size
);
1578 thread_funnel_set(kernel_flock
, funnel_state
);
1581 profile
+= profile_size
- resid
;
1582 profile_size
= resid
;
1584 bsd_close_page_cache_files(uid_files
);
1585 thread_funnel_set(kernel_flock
, funnel_state
);
1588 bsd_close_page_cache_files(uid_files
);
1589 thread_funnel_set(kernel_flock
, funnel_state
);
1596 bsd_search_page_cache_data_base(
1598 struct profile_names_header
*database
,
1600 unsigned int mod_date
,
1603 unsigned int *profile_size
)
1609 struct profile_element
*element
;
1610 unsigned int ele_total
;
1611 unsigned int extended_list
= 0;
1616 vm_offset_t local_buf
= 0;
1623 if(((vm_offset_t
)database
->element_array
) !=
1624 sizeof(struct profile_names_header
)) {
1627 element
= (struct profile_element
*)(
1628 (vm_offset_t
)database
->element_array
+
1629 (vm_offset_t
)database
);
1631 ele_total
= database
->number_of_profiles
;
1636 /* note: code assumes header + n*ele comes out on a page boundary */
1637 if(((local_buf
== 0) && (sizeof(struct profile_names_header
) +
1638 (ele_total
* sizeof(struct profile_element
)))
1639 > (PAGE_SIZE
* 4)) ||
1640 ((local_buf
!= 0) &&
1641 (ele_total
* sizeof(struct profile_element
))
1642 > (PAGE_SIZE
* 4))) {
1643 extended_list
= ele_total
;
1644 if(element
== (struct profile_element
*)
1645 ((vm_offset_t
)database
->element_array
+
1646 (vm_offset_t
)database
)) {
1647 ele_total
= ((PAGE_SIZE
* 4)/sizeof(struct profile_element
)) - 1;
1649 ele_total
= (PAGE_SIZE
* 4)/sizeof(struct profile_element
);
1651 extended_list
-= ele_total
;
1653 for (i
=0; i
<ele_total
; i
++) {
1654 if((mod_date
== element
[i
].mod_date
)
1655 && (inode
== element
[i
].inode
)) {
1656 if(strncmp(element
[i
].name
, app_name
, 12) == 0) {
1657 *profile
= element
[i
].addr
;
1658 *profile_size
= element
[i
].size
;
1659 if(local_buf
!= 0) {
1660 kmem_free(kernel_map
, local_buf
, 4 * PAGE_SIZE
);
1666 if(extended_list
== 0)
1668 if(local_buf
== 0) {
1669 ret
= kmem_alloc(kernel_map
, &local_buf
, 4 * PAGE_SIZE
);
1670 if(ret
!= KERN_SUCCESS
) {
1674 element
= (struct profile_element
*)local_buf
;
1675 ele_total
= extended_list
;
1677 file_off
+= 4 * PAGE_SIZE
;
1678 if((ele_total
* sizeof(struct profile_element
)) >
1680 size
= PAGE_SIZE
* 4;
1682 size
= ele_total
* sizeof(struct profile_element
);
1686 error
= vn_rdwr(UIO_READ
, vp
,
1687 CAST_DOWN(caddr_t
, (local_buf
+ resid_off
)),
1688 size
, file_off
+ resid_off
, UIO_SYSSPACE32
,
1689 IO_NODELOCKED
, kauth_cred_get(), &resid
, p
);
1690 if((error
) || (size
== resid
)) {
1691 if(local_buf
!= 0) {
1692 kmem_free(kernel_map
, local_buf
, 4 * PAGE_SIZE
);
1696 resid_off
+= size
-resid
;
1700 if(local_buf
!= 0) {
1701 kmem_free(kernel_map
, local_buf
, 4 * PAGE_SIZE
);
1707 bsd_write_page_cache_file(
1719 boolean_t funnel_state
;
1721 struct vfs_context context
;
1723 unsigned int profile_size
;
1725 vm_offset_t names_buf
;
1726 struct vnode
*names_vp
;
1727 struct vnode
*data_vp
;
1728 struct profile_names_header
*profile_header
;
1730 struct global_profile
*uid_files
;
1733 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
1736 error
= bsd_open_page_cache_files(user
, &uid_files
);
1738 thread_funnel_set(kernel_flock
, funnel_state
);
1744 names_vp
= uid_files
->names_vp
;
1745 data_vp
= uid_files
->data_vp
;
1746 names_buf
= uid_files
->buf_ptr
;
1748 /* Stat data file for size */
1750 context
.vc_proc
= p
;
1751 context
.vc_ucred
= kauth_cred_get();
1753 if ((error
= vnode_size(data_vp
, &file_size
, &context
)) != 0) {
1754 printf("bsd_write_page_cache_file: Can't stat profile data %s\n", file_name
);
1755 bsd_close_page_cache_files(uid_files
);
1756 thread_funnel_set(kernel_flock
, funnel_state
);
1760 if (bsd_search_page_cache_data_base(names_vp
,
1761 (struct profile_names_header
*)names_buf
,
1762 file_name
, (unsigned int) mod
,
1763 fid
, &profile
, &profile_size
) == 0) {
1764 /* profile is an offset in the profile data base */
1765 /* It is zero if no profile data was found */
1767 if(profile_size
== 0) {
1768 unsigned int header_size
;
1769 vm_offset_t buf_ptr
;
1771 /* Our Write case */
1773 /* read header for last entry */
1775 (struct profile_names_header
*)names_buf
;
1776 name_offset
= sizeof(struct profile_names_header
) +
1777 (sizeof(struct profile_element
)
1778 * profile_header
->number_of_profiles
);
1779 profile_header
->number_of_profiles
+= 1;
1781 if(name_offset
< PAGE_SIZE
* 4) {
1782 struct profile_element
*name
;
1783 /* write new entry */
1784 name
= (struct profile_element
*)
1785 (names_buf
+ (vm_offset_t
)name_offset
);
1786 name
->addr
= file_size
;
1788 name
->mod_date
= mod
;
1790 strncpy (name
->name
, file_name
, 12);
1792 unsigned int ele_size
;
1793 struct profile_element name
;
1794 /* write new entry */
1795 name
.addr
= file_size
;
1797 name
.mod_date
= mod
;
1799 strncpy (name
.name
, file_name
, 12);
1800 /* write element out separately */
1801 ele_size
= sizeof(struct profile_element
);
1802 buf_ptr
= (vm_offset_t
)&name
;
1803 resid_off
= name_offset
;
1806 error
= vn_rdwr(UIO_WRITE
, names_vp
,
1808 ele_size
, resid_off
,
1809 UIO_SYSSPACE32
, IO_NODELOCKED
,
1810 kauth_cred_get(), &resid
, p
);
1812 printf("bsd_write_page_cache_file: Can't write name_element %x\n", user
);
1813 bsd_close_page_cache_files(
1820 buf_ptr
+= (vm_offset_t
)
1822 resid_off
+= ele_size
-resid
;
1827 if(name_offset
< PAGE_SIZE
* 4) {
1828 header_size
= name_offset
+
1829 sizeof(struct profile_element
);
1833 sizeof(struct profile_names_header
);
1835 buf_ptr
= (vm_offset_t
)profile_header
;
1838 /* write names file header */
1839 while(header_size
) {
1840 error
= vn_rdwr(UIO_WRITE
, names_vp
,
1842 header_size
, resid_off
,
1843 UIO_SYSSPACE32
, IO_NODELOCKED
,
1844 kauth_cred_get(), &resid
, p
);
1846 printf("bsd_write_page_cache_file: Can't write header %x\n", user
);
1847 bsd_close_page_cache_files(
1850 kernel_flock
, funnel_state
);
1853 buf_ptr
+= (vm_offset_t
)header_size
-resid
;
1854 resid_off
+= header_size
-resid
;
1855 header_size
= resid
;
1857 /* write profile to data file */
1858 resid_off
= file_size
;
1860 error
= vn_rdwr(UIO_WRITE
, data_vp
,
1861 (caddr_t
)buffer
, size
, resid_off
,
1862 UIO_SYSSPACE32
, IO_NODELOCKED
,
1863 kauth_cred_get(), &resid
, p
);
1865 printf("bsd_write_page_cache_file: Can't write header %x\n", user
);
1866 bsd_close_page_cache_files(
1869 kernel_flock
, funnel_state
);
1872 buffer
+= size
-resid
;
1873 resid_off
+= size
-resid
;
1876 bsd_close_page_cache_files(uid_files
);
1877 thread_funnel_set(kernel_flock
, funnel_state
);
1880 /* Someone else wrote a twin profile before us */
1881 bsd_close_page_cache_files(uid_files
);
1882 thread_funnel_set(kernel_flock
, funnel_state
);
1885 bsd_close_page_cache_files(uid_files
);
1886 thread_funnel_set(kernel_flock
, funnel_state
);
1893 prepare_profile_database(int user
)
1895 const char *cache_path
= "/var/vm/app_profile/";
1902 struct vnode
*names_vp
;
1903 struct vnode
*data_vp
;
1904 vm_offset_t names_buf
;
1905 vm_offset_t buf_ptr
;
1907 int profile_names_length
;
1908 int profile_data_length
;
1909 char *profile_data_string
;
1910 char *profile_names_string
;
1913 struct vnode_attr va
;
1914 struct vfs_context context
;
1916 struct profile_names_header
*profile_header
;
1919 struct nameidata nd_names
;
1920 struct nameidata nd_data
;
1924 context
.vc_proc
= p
;
1925 context
.vc_ucred
= kauth_cred_get();
1927 ret
= kmem_alloc(kernel_map
,
1928 (vm_offset_t
*)&profile_data_string
, PATH_MAX
);
1934 /* Split the buffer in half since we know the size of */
1935 /* our file path and our allocation is adequate for */
1936 /* both file path names */
1937 profile_names_string
= profile_data_string
+ (PATH_MAX
/2);
1940 strcpy(profile_data_string
, cache_path
);
1941 strcpy(profile_names_string
, cache_path
);
1942 profile_names_length
= profile_data_length
1943 = strlen(profile_data_string
);
1944 substring
= profile_data_string
+ profile_data_length
;
1945 sprintf(substring
, "%x_data", user
);
1946 substring
= profile_names_string
+ profile_names_length
;
1947 sprintf(substring
, "%x_names", user
);
1949 /* We now have the absolute file names */
1951 ret
= kmem_alloc(kernel_map
,
1952 (vm_offset_t
*)&names_buf
, 4 * PAGE_SIZE
);
1954 kmem_free(kernel_map
,
1955 (vm_offset_t
)profile_data_string
, PATH_MAX
);
1959 NDINIT(&nd_names
, LOOKUP
, FOLLOW
,
1960 UIO_SYSSPACE32
, CAST_USER_ADDR_T(profile_names_string
), &context
);
1961 NDINIT(&nd_data
, LOOKUP
, FOLLOW
,
1962 UIO_SYSSPACE32
, CAST_USER_ADDR_T(profile_data_string
), &context
);
1964 if ( (error
= vn_open(&nd_data
,
1965 O_CREAT
| O_EXCL
| FWRITE
, S_IRUSR
|S_IWUSR
)) ) {
1966 kmem_free(kernel_map
,
1967 (vm_offset_t
)names_buf
, 4 * PAGE_SIZE
);
1968 kmem_free(kernel_map
,
1969 (vm_offset_t
)profile_data_string
, PATH_MAX
);
1973 data_vp
= nd_data
.ni_vp
;
1975 if ( (error
= vn_open(&nd_names
,
1976 O_CREAT
| O_EXCL
| FWRITE
, S_IRUSR
|S_IWUSR
)) ) {
1977 printf("prepare_profile_database: Can't create CacheNames %s\n",
1978 profile_data_string
);
1979 kmem_free(kernel_map
,
1980 (vm_offset_t
)names_buf
, 4 * PAGE_SIZE
);
1981 kmem_free(kernel_map
,
1982 (vm_offset_t
)profile_data_string
, PATH_MAX
);
1984 vnode_rele(data_vp
);
1989 names_vp
= nd_names
.ni_vp
;
1991 /* Write Header for new names file */
1993 profile_header
= (struct profile_names_header
*)names_buf
;
1995 profile_header
->number_of_profiles
= 0;
1996 profile_header
->user_id
= user
;
1997 profile_header
->version
= 1;
1998 profile_header
->element_array
=
1999 sizeof(struct profile_names_header
);
2000 profile_header
->spare1
= 0;
2001 profile_header
->spare2
= 0;
2002 profile_header
->spare3
= 0;
2004 size
= sizeof(struct profile_names_header
);
2005 buf_ptr
= (vm_offset_t
)profile_header
;
2009 error
= vn_rdwr(UIO_WRITE
, names_vp
,
2010 (caddr_t
)buf_ptr
, size
, resid_off
,
2011 UIO_SYSSPACE32
, IO_NODELOCKED
,
2012 kauth_cred_get(), &resid
, p
);
2014 printf("prepare_profile_database: Can't write header %s\n", profile_names_string
);
2015 kmem_free(kernel_map
,
2016 (vm_offset_t
)names_buf
, 4 * PAGE_SIZE
);
2017 kmem_free(kernel_map
,
2018 (vm_offset_t
)profile_data_string
,
2021 vnode_rele(names_vp
);
2022 vnode_put(names_vp
);
2023 vnode_rele(data_vp
);
2028 buf_ptr
+= size
-resid
;
2029 resid_off
+= size
-resid
;
2033 VATTR_SET(&va
, va_uid
, user
);
2035 error
= vnode_setattr(names_vp
, &va
, &context
);
2037 printf("prepare_profile_database: "
2038 "Can't set user %s\n", profile_names_string
);
2040 vnode_rele(names_vp
);
2041 vnode_put(names_vp
);
2044 VATTR_SET(&va
, va_uid
, user
);
2045 error
= vnode_setattr(data_vp
, &va
, &context
);
2047 printf("prepare_profile_database: "
2048 "Can't set user %s\n", profile_data_string
);
2050 vnode_rele(data_vp
);
2053 kmem_free(kernel_map
,
2054 (vm_offset_t
)profile_data_string
, PATH_MAX
);
2055 kmem_free(kernel_map
,
2056 (vm_offset_t
)names_buf
, 4 * PAGE_SIZE
);