2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
23 * Mach Operating System
24 * Copyright (c) 1987 Carnegie-Mellon University
25 * All rights reserved. The CMU software License Agreement specifies
26 * the terms and conditions for use and redistribution.
33 #include <meta_features.h>
35 #include <kern/task.h>
36 #include <kern/thread.h>
37 #include <kern/debug.h>
38 #include <kern/lock.h>
39 #include <mach/mach_traps.h>
40 #include <mach/time_value.h>
41 #include <mach/vm_map.h>
42 #include <mach/vm_param.h>
43 #include <mach/vm_prot.h>
44 #include <mach/port.h>
46 #include <sys/file_internal.h>
47 #include <sys/param.h>
48 #include <sys/systm.h>
50 #include <sys/namei.h>
51 #include <sys/proc_internal.h>
52 #include <sys/kauth.h>
55 #include <sys/vnode_internal.h>
56 #include <sys/mount.h>
57 #include <sys/trace.h>
58 #include <sys/kernel.h>
59 #include <sys/ubc_internal.h>
62 #include <sys/sysproto.h>
65 #include <bsm/audit_kernel.h>
66 #include <bsm/audit_kevents.h>
68 #include <kern/kalloc.h>
69 #include <vm/vm_map.h>
70 #include <vm/vm_kern.h>
72 #include <machine/spl.h>
74 #include <mach/shared_memory_server.h>
75 #include <vm/vm_shared_memory_server.h>
77 #include <vm/vm_protos.h>
86 return (vm_map_check_protection(
88 vm_map_trunc_page(addr
), vm_map_round_page(addr
+len
),
89 prot
== B_READ
? VM_PROT_READ
: VM_PROT_WRITE
));
98 kret
= vm_map_wire(current_map(), vm_map_trunc_page(addr
),
99 vm_map_round_page(addr
+len
),
100 VM_PROT_READ
| VM_PROT_WRITE
,FALSE
);
105 case KERN_INVALID_ADDRESS
:
108 case KERN_PROTECTION_FAILURE
:
119 __unused
int dirtied
)
124 vm_map_offset_t vaddr
;
131 pmap
= get_task_pmap(current_task());
132 for (vaddr
= vm_map_trunc_page(addr
);
133 vaddr
< vm_map_round_page(addr
+len
);
134 vaddr
+= PAGE_SIZE
) {
135 paddr
= pmap_extract(pmap
, vaddr
);
136 pg
= PHYS_TO_VM_PAGE(paddr
);
137 vm_page_set_modified(pg
);
144 kret
= vm_map_unwire(current_map(), vm_map_trunc_page(addr
),
145 vm_map_round_page(addr
+len
), FALSE
);
149 case KERN_INVALID_ADDRESS
:
152 case KERN_PROTECTION_FAILURE
:
166 character
= (char)byte
;
167 return (copyout((void *)&(character
), addr
, sizeof(char)) == 0 ? 0 : -1);
177 character
= (char)byte
;
178 return (copyout((void *)&(character
), addr
, sizeof(char)) == 0 ? 0 : -1);
181 int fubyte(user_addr_t addr
)
185 if (copyin(addr
, (void *) &byte
, sizeof(char)))
190 int fuibyte(user_addr_t addr
)
194 if (copyin(addr
, (void *) &(byte
), sizeof(char)))
204 return (copyout((void *) &word
, addr
, sizeof(int)) == 0 ? 0 : -1);
207 long fuword(user_addr_t addr
)
211 if (copyin(addr
, (void *) &word
, sizeof(int)))
216 /* suiword and fuiword are the same as suword and fuword, respectively */
223 return (copyout((void *) &word
, addr
, sizeof(int)) == 0 ? 0 : -1);
226 long fuiword(user_addr_t addr
)
230 if (copyin(addr
, (void *) &word
, sizeof(int)))
236 * With a 32-bit kernel and mixed 32/64-bit user tasks, this interface allows the
237 * fetching and setting of process-sized size_t and pointer values.
240 sulong(user_addr_t addr
, int64_t word
)
243 if (IS_64BIT_PROCESS(current_proc())) {
244 return(copyout((void *)&word
, addr
, sizeof(word
)) == 0 ? 0 : -1);
246 return(suiword(addr
, (long)word
));
251 fulong(user_addr_t addr
)
255 if (IS_64BIT_PROCESS(current_proc())) {
256 if (copyin(addr
, (void *)&longword
, sizeof(longword
)) != 0)
260 return((int64_t)fuiword(addr
));
265 suulong(user_addr_t addr
, uint64_t uword
)
268 if (IS_64BIT_PROCESS(current_proc())) {
269 return(copyout((void *)&uword
, addr
, sizeof(uword
)) == 0 ? 0 : -1);
271 return(suiword(addr
, (u_long
)uword
));
276 fuulong(user_addr_t addr
)
280 if (IS_64BIT_PROCESS(current_proc())) {
281 if (copyin(addr
, (void *)&ulongword
, sizeof(ulongword
)) != 0)
285 return((uint64_t)fuiword(addr
));
290 swapon(__unused
struct proc
*procp
, __unused
struct swapon_args
*uap
, __unused
int *retval
)
298 struct pid_for_task_args
*args
)
300 mach_port_name_t t
= args
->t
;
301 user_addr_t pid_addr
= args
->pid
;
305 kern_return_t err
= KERN_SUCCESS
;
306 boolean_t funnel_state
;
308 AUDIT_MACH_SYSCALL_ENTER(AUE_PIDFORTASK
);
309 AUDIT_ARG(mach_port1
, t
);
311 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
312 t1
= port_name_to_task(t
);
314 if (t1
== TASK_NULL
) {
318 p
= get_bsdtask_info(t1
);
329 (void) copyout((char *) &pid
, pid_addr
, sizeof(int));
330 thread_funnel_set(kernel_flock
, funnel_state
);
331 AUDIT_MACH_SYSCALL_EXIT(err
);
336 * Routine: task_for_pid
338 * Get the task port for another "process", named by its
339 * process ID on the same host as "target_task".
341 * Only permitted to privileged processes, or processes
342 * with the same user ID.
344 * XXX This should be a BSD system call, not a Mach trap!!!
348 struct task_for_pid_args
*args
)
350 mach_port_name_t target_tport
= args
->target_tport
;
352 user_addr_t task_addr
= args
->t
;
353 struct uthread
*uthread
;
357 mach_port_name_t tret
;
360 boolean_t funnel_state
;
362 AUDIT_MACH_SYSCALL_ENTER(AUE_TASKFORPID
);
364 AUDIT_ARG(mach_port1
, target_tport
);
366 t1
= port_name_to_task(target_tport
);
367 if (t1
== TASK_NULL
) {
368 (void ) copyout((char *)&t1
, task_addr
, sizeof(mach_port_name_t
));
369 AUDIT_MACH_SYSCALL_EXIT(KERN_FAILURE
);
370 return(KERN_FAILURE
);
373 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
375 p1
= get_bsdtask_info(t1
); /* XXX current proc */
378 * Delayed binding of thread credential to process credential, if we
379 * are not running with an explicitly set thread credential.
381 uthread
= get_bsdthread_info(current_thread());
382 if (uthread
->uu_ucred
!= p1
->p_ucred
&&
383 (uthread
->uu_flag
& UT_SETUID
) == 0) {
384 kauth_cred_t old
= uthread
->uu_ucred
;
386 uthread
->uu_ucred
= p1
->p_ucred
;
387 kauth_cred_ref(uthread
->uu_ucred
);
390 kauth_cred_rele(old
);
394 AUDIT_ARG(process
, p
);
397 (p
!= (struct proc
*) 0)
398 && (p1
!= (struct proc
*) 0)
401 || !(suser(kauth_cred_get(), 0))
402 || ((kauth_cred_getuid(p
->p_ucred
) == kauth_cred_getuid(kauth_cred_get()))
403 && (p
->p_ucred
->cr_ruid
== kauth_cred_get()->cr_ruid
)
404 && ((p
->p_flag
& P_SUGID
) == 0))
406 && (p
->p_stat
!= SZOMB
)
408 if (p
->task
!= TASK_NULL
) {
409 task_reference(p
->task
);
410 sright
= (void *)convert_task_to_port(p
->task
);
411 tret
= ipc_port_copyout_send(
413 get_task_ipcspace(current_task()));
415 tret
= MACH_PORT_NULL
;
416 AUDIT_ARG(mach_port2
, tret
);
417 (void ) copyout((char *)&tret
, task_addr
, sizeof(mach_port_name_t
));
419 error
= KERN_SUCCESS
;
423 tret
= MACH_PORT_NULL
;
424 (void) copyout((char *) &tret
, task_addr
, sizeof(mach_port_name_t
));
425 error
= KERN_FAILURE
;
427 thread_funnel_set(kernel_flock
, funnel_state
);
428 AUDIT_MACH_SYSCALL_EXIT(error
);
434 * Try and cap the number of mappings the user might be trying to deal with,
435 * so that we don't end up allocating insane amounts of wired memory in the
436 * kernel based on bogus user arguments.
437 * There are 2 shared regions (TEXT and DATA). The size of each submap
438 * is SHARED_TEXT_REGION_SIZE and we can have at most 1 VM map entry per page,
439 * so the maximum number of mappings we could ever have to deal with is...
441 #define SHARED_REGION_MAX_MAPPINGS ((2 *SHARED_TEXT_REGION_SIZE) >> PAGE_SHIFT)
444 * shared_region_make_private_np:
446 * This system call is for "dyld" only.
448 * It creates a private copy of the current process's "shared region" for
449 * split libraries. "dyld" uses this when the shared region is full or
450 * it needs to load a split library that conflicts with an already loaded one
451 * that this process doesn't need. "dyld" specifies a set of address ranges
452 * that it wants to keep in the now-private "shared region". These cover
453 * the set of split libraries that the process needs so far. The kernel needs
454 * to deallocate the rest of the shared region, so that it's available for
455 * more libraries for this process.
458 shared_region_make_private_np(
460 struct shared_region_make_private_np_args
*uap
,
461 __unused
int *retvalp
)
465 boolean_t using_shared_regions
;
466 user_addr_t user_ranges
;
467 unsigned int range_count
;
468 vm_size_t ranges_size
;
469 struct shared_region_range_np
*ranges
;
470 shared_region_mapping_t shared_region
;
471 struct shared_region_task_mappings task_mapping_info
;
472 shared_region_mapping_t next
;
476 range_count
= uap
->rangeCount
;
477 user_ranges
= uap
->ranges
;
478 ranges_size
= (vm_size_t
) (range_count
* sizeof (ranges
[0]));
480 /* allocate kernel space for the "ranges" */
481 if (range_count
!= 0) {
482 if (range_count
> SHARED_REGION_MAX_MAPPINGS
) {
486 if ((mach_vm_size_t
) ranges_size
!=
487 (mach_vm_size_t
) range_count
* sizeof (ranges
[0])) {
488 /* 32-bit integer overflow */
492 kr
= kmem_alloc(kernel_map
,
493 (vm_offset_t
*) &ranges
,
495 if (kr
!= KERN_SUCCESS
) {
500 /* copy "ranges" from user-space */
501 error
= copyin(user_ranges
,
509 if (p
->p_flag
& P_NOSHLIB
) {
510 /* no split library has been mapped for this process so far */
511 using_shared_regions
= FALSE
;
513 /* this process has already mapped some split libraries */
514 using_shared_regions
= TRUE
;
518 * Get a private copy of the current shared region.
519 * Do not chain it to the system-wide shared region, as we'll want
520 * to map other split libraries in place of the old ones. We want
521 * to completely detach from the system-wide shared region and go our
522 * own way after this point, not sharing anything with other processes.
524 error
= clone_system_shared_regions(using_shared_regions
,
525 FALSE
, /* chain_regions */
531 /* get info on the newly allocated shared region */
532 vm_get_shared_region(current_task(), &shared_region
);
533 task_mapping_info
.self
= (vm_offset_t
) shared_region
;
534 shared_region_mapping_info(shared_region
,
535 &(task_mapping_info
.text_region
),
536 &(task_mapping_info
.text_size
),
537 &(task_mapping_info
.data_region
),
538 &(task_mapping_info
.data_size
),
539 &(task_mapping_info
.region_mappings
),
540 &(task_mapping_info
.client_base
),
541 &(task_mapping_info
.alternate_base
),
542 &(task_mapping_info
.alternate_next
),
543 &(task_mapping_info
.fs_base
),
544 &(task_mapping_info
.system
),
545 &(task_mapping_info
.flags
),
549 * We now have our private copy of the shared region, as it was before
550 * the call to clone_system_shared_regions(). We now need to clean it
551 * up and keep only the memory areas described by the "ranges" array.
553 kr
= shared_region_cleanup(range_count
, ranges
, &task_mapping_info
);
564 if (ranges
!= NULL
) {
565 kmem_free(kernel_map
,
566 (vm_offset_t
) ranges
,
576 * shared_region_map_file_np:
578 * This system call is for "dyld" only.
580 * "dyld" wants to map parts of a split library in the shared region.
581 * We get a file descriptor on the split library to be mapped and a set
582 * of mapping instructions, describing which parts of the file to map in\
583 * which areas of the shared segment and with what protection.
584 * The "shared region" is split in 2 areas:
585 * 0x90000000 - 0xa0000000 : read-only area (for TEXT and LINKEDIT sections),
586 * 0xa0000000 - 0xb0000000 : writable area (for DATA sections).
590 shared_region_map_file_np(
592 struct shared_region_map_file_np_args
*uap
,
593 __unused
int *retvalp
)
598 unsigned int mapping_count
;
599 user_addr_t user_mappings
; /* 64-bit */
600 user_addr_t user_slide_p
; /* 64-bit */
601 struct shared_file_mapping_np
*mappings
;
602 vm_size_t mappings_size
;
604 mach_vm_offset_t slide
;
606 struct vfs_context context
;
607 memory_object_control_t file_control
;
608 memory_object_size_t file_size
;
609 shared_region_mapping_t shared_region
;
610 struct shared_region_task_mappings task_mapping_info
;
611 shared_region_mapping_t next
;
612 shared_region_mapping_t default_shared_region
;
613 boolean_t using_default_region
;
616 mach_vm_offset_t base_offset
, end_offset
;
617 mach_vm_offset_t original_base_offset
;
618 boolean_t mappings_in_segment
;
619 #define SFM_MAX_STACK 6
620 struct shared_file_mapping_np stack_mappings
[SFM_MAX_STACK
];
628 /* get file descriptor for split library from arguments */
631 /* get file structure from file descriptor */
632 error
= fp_lookup(p
, fd
, &fp
, 0);
637 /* make sure we're attempting to map a vnode */
638 if (fp
->f_fglob
->fg_type
!= DTYPE_VNODE
) {
643 /* we need at least read permission on the file */
644 if (! (fp
->f_fglob
->fg_flag
& FREAD
)) {
649 /* get vnode from file structure */
650 error
= vnode_getwithref((vnode_t
)fp
->f_fglob
->fg_data
);
654 vp
= (struct vnode
*) fp
->f_fglob
->fg_data
;
656 /* make sure the vnode is a regular file */
657 if (vp
->v_type
!= VREG
) {
667 context
.vc_ucred
= kauth_cred_get();
668 if ((error
= vnode_size(vp
, &fs
, &context
)) != 0)
674 * Get the list of mappings the caller wants us to establish.
676 mapping_count
= uap
->mappingCount
; /* the number of mappings */
677 mappings_size
= (vm_size_t
) (mapping_count
* sizeof (mappings
[0]));
678 if (mapping_count
== 0) {
679 error
= 0; /* no mappings: we're done ! */
681 } else if (mapping_count
<= SFM_MAX_STACK
) {
682 mappings
= &stack_mappings
[0];
684 if (mapping_count
> SHARED_REGION_MAX_MAPPINGS
) {
688 if ((mach_vm_size_t
) mappings_size
!=
689 (mach_vm_size_t
) mapping_count
* sizeof (mappings
[0])) {
690 /* 32-bit integer overflow */
694 kr
= kmem_alloc(kernel_map
,
695 (vm_offset_t
*) &mappings
,
697 if (kr
!= KERN_SUCCESS
) {
703 user_mappings
= uap
->mappings
; /* the mappings, in user space */
704 error
= copyin(user_mappings
,
712 * If the caller provides a "slide" pointer, it means they're OK
713 * with us moving the mappings around to make them fit.
715 user_slide_p
= uap
->slide_p
;
718 * Make each mapping address relative to the beginning of the
719 * shared region. Check that all mappings are in the shared region.
720 * Compute the maximum set of protections required to tell the
721 * buffer cache how we mapped the file (see call to ubc_map() below).
723 max_prot
= VM_PROT_NONE
;
726 mappings_in_segment
= TRUE
;
727 for (j
= 0; j
< mapping_count
; j
++) {
728 mach_vm_offset_t segment
;
729 segment
= (mappings
[j
].sfm_address
&
730 GLOBAL_SHARED_SEGMENT_MASK
);
731 if (segment
!= GLOBAL_SHARED_TEXT_SEGMENT
&&
732 segment
!= GLOBAL_SHARED_DATA_SEGMENT
) {
733 /* this mapping is not in the shared region... */
734 if (user_slide_p
== NULL
) {
735 /* ... and we can't slide it in: fail */
740 /* expect all mappings to be outside */
741 mappings_in_segment
= FALSE
;
742 } else if (mappings_in_segment
!= FALSE
) {
743 /* other mappings were not outside: fail */
747 /* we'll try and slide that mapping in the segments */
750 /* expect all mappings to be inside */
751 mappings_in_segment
= TRUE
;
752 } else if (mappings_in_segment
!= TRUE
) {
753 /* other mappings were not inside: fail */
757 /* get a relative offset inside the shared segments */
758 mappings
[j
].sfm_address
-= GLOBAL_SHARED_TEXT_SEGMENT
;
760 if ((mappings
[j
].sfm_address
& SHARED_TEXT_REGION_MASK
)
762 base_offset
= (mappings
[j
].sfm_address
&
763 SHARED_TEXT_REGION_MASK
);
765 if ((mappings
[j
].sfm_address
& SHARED_TEXT_REGION_MASK
) +
766 mappings
[j
].sfm_size
> end_offset
) {
768 (mappings
[j
].sfm_address
&
769 SHARED_TEXT_REGION_MASK
) +
770 mappings
[j
].sfm_size
;
772 max_prot
|= mappings
[j
].sfm_max_prot
;
774 /* Make all mappings relative to the base_offset */
775 base_offset
= vm_map_trunc_page(base_offset
);
776 end_offset
= vm_map_round_page(end_offset
);
777 for (j
= 0; j
< mapping_count
; j
++) {
778 mappings
[j
].sfm_address
-= base_offset
;
780 original_base_offset
= base_offset
;
781 if (mappings_in_segment
== FALSE
) {
783 * We're trying to map a library that was not pre-bound to
784 * be in the shared segments. We want to try and slide it
785 * back into the shared segments but as far back as possible,
786 * so that it doesn't clash with pre-bound libraries. Set
787 * the base_offset to the end of the region, so that it can't
788 * possibly fit there and will have to be slid.
790 base_offset
= SHARED_TEXT_REGION_SIZE
- end_offset
;
793 /* get the file's memory object handle */
794 UBCINFOCHECK("shared_region_map_file_np", vp
);
795 file_control
= ubc_getobject(vp
, UBC_HOLDOBJECT
);
796 if (file_control
== MEMORY_OBJECT_CONTROL_NULL
) {
802 * Get info about the current process's shared region.
803 * This might change if we decide we need to clone the shared region.
805 vm_get_shared_region(current_task(), &shared_region
);
806 task_mapping_info
.self
= (vm_offset_t
) shared_region
;
807 shared_region_mapping_info(shared_region
,
808 &(task_mapping_info
.text_region
),
809 &(task_mapping_info
.text_size
),
810 &(task_mapping_info
.data_region
),
811 &(task_mapping_info
.data_size
),
812 &(task_mapping_info
.region_mappings
),
813 &(task_mapping_info
.client_base
),
814 &(task_mapping_info
.alternate_base
),
815 &(task_mapping_info
.alternate_next
),
816 &(task_mapping_info
.fs_base
),
817 &(task_mapping_info
.system
),
818 &(task_mapping_info
.flags
),
822 * Are we using the system's current shared region
823 * for this environment ?
825 default_shared_region
=
826 lookup_default_shared_region(ENV_DEFAULT_ROOT
,
827 task_mapping_info
.system
);
828 if (shared_region
== default_shared_region
) {
829 using_default_region
= TRUE
;
831 using_default_region
= FALSE
;
833 shared_region_mapping_dealloc(default_shared_region
);
835 if (vp
->v_mount
!= rootvnode
->v_mount
&&
836 using_default_region
) {
838 * The split library is not on the root filesystem. We don't
839 * want to polute the system-wide ("default") shared region
841 * Reject the mapping. The caller (dyld) should "privatize"
842 * (via shared_region_make_private()) the shared region and
843 * try to establish the mapping privately for this process.
851 * Map the split library.
853 kr
= map_shared_file(mapping_count
,
859 (user_slide_p
) ? &slide
: NULL
);
864 * The mapping was successful. Let the buffer cache know
865 * that we've mapped that file with these protections. This
866 * prevents the vnode from getting recycled while it's mapped.
868 (void) ubc_map(vp
, max_prot
);
871 case KERN_INVALID_ADDRESS
:
874 case KERN_PROTECTION_FAILURE
:
881 case KERN_INVALID_ARGUMENT
:
887 if (p
->p_flag
& P_NOSHLIB
) {
888 /* signal that this process is now using split libraries */
889 p
->p_flag
&= ~P_NOSHLIB
;
894 * The caller provided a pointer to a "slide" offset. Let
895 * them know by how much we slid the mappings.
897 if (mappings_in_segment
== FALSE
) {
899 * We faked the base_offset earlier, so undo that
900 * and take into account the real base_offset.
902 slide
+= SHARED_TEXT_REGION_SIZE
- end_offset
;
903 slide
-= original_base_offset
;
905 * The mappings were slid into the shared segments
906 * and "slide" is relative to the beginning of the
907 * shared segments. Adjust it to be absolute.
909 slide
+= GLOBAL_SHARED_TEXT_SEGMENT
;
911 error
= copyout(&slide
,
919 * release the vnode...
920 * ubc_map() still holds it for us in the non-error case
922 (void) vnode_put(vp
);
926 /* release the file descriptor */
927 fp_drop(p
, fd
, fp
, 0);
930 if (mappings
!= NULL
&&
931 mappings
!= &stack_mappings
[0]) {
932 kmem_free(kernel_map
,
933 (vm_offset_t
) mappings
,
943 __unused
struct proc
*p
,
944 __unused
struct load_shared_file_args
*uap
,
945 __unused
int *retval
)
952 __unused
struct proc
*p
,
953 __unused
struct reset_shared_file_args
*uap
,
954 __unused
int *retval
)
960 new_system_shared_regions(
961 __unused
struct proc
*p
,
962 __unused
struct new_system_shared_regions_args
*uap
,
963 __unused
int *retval
)
971 clone_system_shared_regions(
972 int shared_regions_active
,
976 shared_region_mapping_t new_shared_region
;
977 shared_region_mapping_t next
;
978 shared_region_mapping_t old_shared_region
;
979 struct shared_region_task_mappings old_info
;
980 struct shared_region_task_mappings new_info
;
982 vm_get_shared_region(current_task(), &old_shared_region
);
983 old_info
.self
= (vm_offset_t
)old_shared_region
;
984 shared_region_mapping_info(old_shared_region
,
985 &(old_info
.text_region
),
986 &(old_info
.text_size
),
987 &(old_info
.data_region
),
988 &(old_info
.data_size
),
989 &(old_info
.region_mappings
),
990 &(old_info
.client_base
),
991 &(old_info
.alternate_base
),
992 &(old_info
.alternate_next
),
995 &(old_info
.flags
), &next
);
996 if ((shared_regions_active
) ||
997 (base_vnode
== ENV_DEFAULT_ROOT
)) {
998 if (shared_file_create_system_region(&new_shared_region
))
1002 lookup_default_shared_region(
1003 base_vnode
, old_info
.system
);
1004 if(new_shared_region
== NULL
) {
1005 shared_file_boot_time_init(
1006 base_vnode
, old_info
.system
);
1007 vm_get_shared_region(current_task(), &new_shared_region
);
1009 vm_set_shared_region(current_task(), new_shared_region
);
1011 if(old_shared_region
)
1012 shared_region_mapping_dealloc(old_shared_region
);
1014 new_info
.self
= (vm_offset_t
)new_shared_region
;
1015 shared_region_mapping_info(new_shared_region
,
1016 &(new_info
.text_region
),
1017 &(new_info
.text_size
),
1018 &(new_info
.data_region
),
1019 &(new_info
.data_size
),
1020 &(new_info
.region_mappings
),
1021 &(new_info
.client_base
),
1022 &(new_info
.alternate_base
),
1023 &(new_info
.alternate_next
),
1024 &(new_info
.fs_base
),
1026 &(new_info
.flags
), &next
);
1027 if(shared_regions_active
) {
1028 if(vm_region_clone(old_info
.text_region
, new_info
.text_region
)) {
1029 panic("clone_system_shared_regions: shared region mis-alignment 1");
1030 shared_region_mapping_dealloc(new_shared_region
);
1033 if (vm_region_clone(old_info
.data_region
, new_info
.data_region
)) {
1034 panic("clone_system_shared_regions: shared region mis-alignment 2");
1035 shared_region_mapping_dealloc(new_shared_region
);
1038 if (chain_regions
) {
1040 * We want a "shadowed" clone, a private superset of the old
1041 * shared region. The info about the old mappings is still
1044 shared_region_object_chain_attach(
1045 new_shared_region
, old_shared_region
);
1048 if (!chain_regions
) {
1050 * We want a completely detached clone with no link to
1051 * the old shared region. We'll be removing some mappings
1052 * in our private, cloned, shared region, so the old mappings
1053 * will become irrelevant to us. Since we have a private
1054 * "shared region" now, it isn't going to be shared with
1055 * anyone else and we won't need to maintain mappings info.
1057 shared_region_object_chain_detached(new_shared_region
);
1059 if (vm_map_region_replace(current_map(), old_info
.text_region
,
1060 new_info
.text_region
, old_info
.client_base
,
1061 old_info
.client_base
+old_info
.text_size
)) {
1062 panic("clone_system_shared_regions: shared region mis-alignment 3");
1063 shared_region_mapping_dealloc(new_shared_region
);
1066 if(vm_map_region_replace(current_map(), old_info
.data_region
,
1067 new_info
.data_region
,
1068 old_info
.client_base
+ old_info
.text_size
,
1069 old_info
.client_base
1070 + old_info
.text_size
+ old_info
.data_size
)) {
1071 panic("clone_system_shared_regions: shared region mis-alignment 4");
1072 shared_region_mapping_dealloc(new_shared_region
);
1075 vm_set_shared_region(current_task(), new_shared_region
);
1077 /* consume the reference which wasn't accounted for in object */
1079 if (!shared_regions_active
|| !chain_regions
)
1080 shared_region_mapping_dealloc(old_shared_region
);
1086 /* header for the profile name file. The profiled app info is held */
1087 /* in the data file and pointed to by elements in the name file */
1089 struct profile_names_header
{
1090 unsigned int number_of_profiles
;
1091 unsigned int user_id
;
1092 unsigned int version
;
1093 off_t element_array
;
1094 unsigned int spare1
;
1095 unsigned int spare2
;
1096 unsigned int spare3
;
1099 struct profile_element
{
1102 unsigned int mod_date
;
1107 struct global_profile
{
1108 struct vnode
*names_vp
;
1109 struct vnode
*data_vp
;
1110 vm_offset_t buf_ptr
;
1116 struct global_profile_cache
{
1119 struct global_profile profiles
[3];
1122 /* forward declarations */
1123 int bsd_open_page_cache_files(unsigned int user
,
1124 struct global_profile
**profile
);
1125 void bsd_close_page_cache_files(struct global_profile
*profile
);
1126 int bsd_search_page_cache_data_base(
1128 struct profile_names_header
*database
,
1130 unsigned int mod_date
,
1133 unsigned int *profile_size
);
1135 struct global_profile_cache global_user_profile_cache
=
1136 {3, 0, {{NULL
, NULL
, 0, 0, 0, 0},
1137 {NULL
, NULL
, 0, 0, 0, 0},
1138 {NULL
, NULL
, 0, 0, 0, 0}} };
1140 /* BSD_OPEN_PAGE_CACHE_FILES: */
1141 /* Caller provides a user id. This id was used in */
1142 /* prepare_profile_database to create two unique absolute */
1143 /* file paths to the associated profile files. These files */
1144 /* are either opened or bsd_open_page_cache_files returns an */
1145 /* error. The header of the names file is then consulted. */
1146 /* The header and the vnodes for the names and data files are */
1150 bsd_open_page_cache_files(
1152 struct global_profile
**profile
)
1154 const char *cache_path
= "/var/vm/app_profile/";
1162 struct vnode
*names_vp
;
1163 struct vnode
*data_vp
;
1164 vm_offset_t names_buf
;
1165 vm_offset_t buf_ptr
;
1167 int profile_names_length
;
1168 int profile_data_length
;
1169 char *profile_data_string
;
1170 char *profile_names_string
;
1174 struct vfs_context context
;
1178 struct nameidata nd_names
;
1179 struct nameidata nd_data
;
1185 context
.vc_proc
= p
;
1186 context
.vc_ucred
= kauth_cred_get();
1189 for(i
= 0; i
<global_user_profile_cache
.max_ele
; i
++) {
1190 if((global_user_profile_cache
.profiles
[i
].user
== user
)
1191 && (global_user_profile_cache
.profiles
[i
].data_vp
1193 *profile
= &global_user_profile_cache
.profiles
[i
];
1194 /* already in cache, we're done */
1195 if ((*profile
)->busy
) {
1197 * drop funnel and wait
1199 (void)tsleep((void *)
1201 PRIBIO
, "app_profile", 0);
1204 (*profile
)->busy
= 1;
1205 (*profile
)->age
= global_user_profile_cache
.age
;
1208 * entries in cache are held with a valid
1209 * usecount... take an iocount which will
1210 * be dropped in "bsd_close_page_cache_files"
1211 * which is called after the read or writes to
1212 * these files are done
1214 if ( (vnode_getwithref((*profile
)->data_vp
)) ) {
1216 vnode_rele((*profile
)->data_vp
);
1217 vnode_rele((*profile
)->names_vp
);
1219 (*profile
)->data_vp
= NULL
;
1220 (*profile
)->busy
= 0;
1225 if ( (vnode_getwithref((*profile
)->names_vp
)) ) {
1227 vnode_put((*profile
)->data_vp
);
1228 vnode_rele((*profile
)->data_vp
);
1229 vnode_rele((*profile
)->names_vp
);
1231 (*profile
)->data_vp
= NULL
;
1232 (*profile
)->busy
= 0;
1237 global_user_profile_cache
.age
+=1;
1242 lru
= global_user_profile_cache
.age
;
1244 for(i
= 0; i
<global_user_profile_cache
.max_ele
; i
++) {
1245 /* Skip entry if it is in the process of being reused */
1246 if(global_user_profile_cache
.profiles
[i
].data_vp
==
1247 (struct vnode
*)0xFFFFFFFF)
1249 /* Otherwise grab the first empty entry */
1250 if(global_user_profile_cache
.profiles
[i
].data_vp
== NULL
) {
1251 *profile
= &global_user_profile_cache
.profiles
[i
];
1252 (*profile
)->age
= global_user_profile_cache
.age
;
1255 /* Otherwise grab the oldest entry */
1256 if(global_user_profile_cache
.profiles
[i
].age
< lru
) {
1257 lru
= global_user_profile_cache
.profiles
[i
].age
;
1258 *profile
= &global_user_profile_cache
.profiles
[i
];
1262 /* Did we set it? */
1263 if (*profile
== NULL
) {
1265 * No entries are available; this can only happen if all
1266 * of them are currently in the process of being reused;
1267 * if this happens, we sleep on the address of the first
1268 * element, and restart. This is less than ideal, but we
1269 * know it will work because we know that there will be a
1270 * wakeup on any entry currently in the process of being
1273 * XXX Reccomend a two handed clock and more than 3 total
1274 * XXX cache entries at some point in the future.
1277 * drop funnel and wait
1279 (void)tsleep((void *)
1280 &global_user_profile_cache
.profiles
[0],
1281 PRIBIO
, "app_profile", 0);
1286 * If it's currently busy, we've picked the one at the end of the
1287 * LRU list, but it's currently being actively used. We sleep on
1288 * its address and restart.
1290 if ((*profile
)->busy
) {
1292 * drop funnel and wait
1294 (void)tsleep((void *)
1296 PRIBIO
, "app_profile", 0);
1299 (*profile
)->busy
= 1;
1300 (*profile
)->user
= user
;
1303 * put dummy value in for now to get competing request to wait
1304 * above until we are finished
1306 * Save the data_vp before setting it, so we can set it before
1307 * we kmem_free() or vrele(). If we don't do this, then we
1308 * have a potential funnel race condition we have to deal with.
1310 data_vp
= (*profile
)->data_vp
;
1311 (*profile
)->data_vp
= (struct vnode
*)0xFFFFFFFF;
1314 * Age the cache here in all cases; this guarantees that we won't
1315 * be reusing only one entry over and over, once the system reaches
1318 global_user_profile_cache
.age
+=1;
1320 if(data_vp
!= NULL
) {
1321 kmem_free(kernel_map
,
1322 (*profile
)->buf_ptr
, 4 * PAGE_SIZE
);
1323 if ((*profile
)->names_vp
) {
1324 vnode_rele((*profile
)->names_vp
);
1325 (*profile
)->names_vp
= NULL
;
1327 vnode_rele(data_vp
);
1330 /* Try to open the appropriate users profile files */
1331 /* If neither file is present, try to create them */
1332 /* If one file is present and the other not, fail. */
1333 /* If the files do exist, check them for the app_file */
1334 /* requested and read it in if present */
1336 ret
= kmem_alloc(kernel_map
,
1337 (vm_offset_t
*)&profile_data_string
, PATH_MAX
);
1340 (*profile
)->data_vp
= NULL
;
1341 (*profile
)->busy
= 0;
1346 /* Split the buffer in half since we know the size of */
1347 /* our file path and our allocation is adequate for */
1348 /* both file path names */
1349 profile_names_string
= profile_data_string
+ (PATH_MAX
/2);
1352 strcpy(profile_data_string
, cache_path
);
1353 strcpy(profile_names_string
, cache_path
);
1354 profile_names_length
= profile_data_length
1355 = strlen(profile_data_string
);
1356 substring
= profile_data_string
+ profile_data_length
;
1357 sprintf(substring
, "%x_data", user
);
1358 substring
= profile_names_string
+ profile_names_length
;
1359 sprintf(substring
, "%x_names", user
);
1361 /* We now have the absolute file names */
1363 ret
= kmem_alloc(kernel_map
,
1364 (vm_offset_t
*)&names_buf
, 4 * PAGE_SIZE
);
1366 kmem_free(kernel_map
,
1367 (vm_offset_t
)profile_data_string
, PATH_MAX
);
1368 (*profile
)->data_vp
= NULL
;
1369 (*profile
)->busy
= 0;
1374 NDINIT(&nd_names
, LOOKUP
, FOLLOW
| LOCKLEAF
,
1375 UIO_SYSSPACE32
, CAST_USER_ADDR_T(profile_names_string
), &context
);
1376 NDINIT(&nd_data
, LOOKUP
, FOLLOW
| LOCKLEAF
,
1377 UIO_SYSSPACE32
, CAST_USER_ADDR_T(profile_data_string
), &context
);
1379 if ( (error
= vn_open(&nd_data
, FREAD
| FWRITE
, 0)) ) {
1381 printf("bsd_open_page_cache_files: CacheData file not found %s\n",
1382 profile_data_string
);
1384 kmem_free(kernel_map
,
1385 (vm_offset_t
)names_buf
, 4 * PAGE_SIZE
);
1386 kmem_free(kernel_map
,
1387 (vm_offset_t
)profile_data_string
, PATH_MAX
);
1388 (*profile
)->data_vp
= NULL
;
1389 (*profile
)->busy
= 0;
1393 data_vp
= nd_data
.ni_vp
;
1395 if ( (error
= vn_open(&nd_names
, FREAD
| FWRITE
, 0)) ) {
1396 printf("bsd_open_page_cache_files: NamesData file not found %s\n",
1397 profile_data_string
);
1398 kmem_free(kernel_map
,
1399 (vm_offset_t
)names_buf
, 4 * PAGE_SIZE
);
1400 kmem_free(kernel_map
,
1401 (vm_offset_t
)profile_data_string
, PATH_MAX
);
1403 vnode_rele(data_vp
);
1406 (*profile
)->data_vp
= NULL
;
1407 (*profile
)->busy
= 0;
1411 names_vp
= nd_names
.ni_vp
;
1413 if ((error
= vnode_size(names_vp
, &file_size
, &context
)) != 0) {
1414 printf("bsd_open_page_cache_files: Can't stat name file %s\n", profile_names_string
);
1415 kmem_free(kernel_map
,
1416 (vm_offset_t
)profile_data_string
, PATH_MAX
);
1417 kmem_free(kernel_map
,
1418 (vm_offset_t
)names_buf
, 4 * PAGE_SIZE
);
1420 vnode_rele(names_vp
);
1421 vnode_put(names_vp
);
1422 vnode_rele(data_vp
);
1425 (*profile
)->data_vp
= NULL
;
1426 (*profile
)->busy
= 0;
1432 if(size
> 4 * PAGE_SIZE
)
1433 size
= 4 * PAGE_SIZE
;
1434 buf_ptr
= names_buf
;
1438 error
= vn_rdwr(UIO_READ
, names_vp
, (caddr_t
)buf_ptr
,
1440 UIO_SYSSPACE32
, IO_NODELOCKED
, kauth_cred_get(), &resid
, p
);
1441 if((error
) || (size
== resid
)) {
1445 kmem_free(kernel_map
,
1446 (vm_offset_t
)profile_data_string
, PATH_MAX
);
1447 kmem_free(kernel_map
,
1448 (vm_offset_t
)names_buf
, 4 * PAGE_SIZE
);
1450 vnode_rele(names_vp
);
1451 vnode_put(names_vp
);
1452 vnode_rele(data_vp
);
1455 (*profile
)->data_vp
= NULL
;
1456 (*profile
)->busy
= 0;
1460 buf_ptr
+= size
-resid
;
1461 resid_off
+= size
-resid
;
1464 kmem_free(kernel_map
, (vm_offset_t
)profile_data_string
, PATH_MAX
);
1466 (*profile
)->names_vp
= names_vp
;
1467 (*profile
)->data_vp
= data_vp
;
1468 (*profile
)->buf_ptr
= names_buf
;
1471 * at this point, the both the names_vp and the data_vp have
1472 * both a valid usecount and an iocount held
1479 bsd_close_page_cache_files(
1480 struct global_profile
*profile
)
1482 vnode_put(profile
->data_vp
);
1483 vnode_put(profile
->names_vp
);
1490 bsd_read_page_cache_file(
1495 struct vnode
*app_vp
,
1496 vm_offset_t
*buffer
,
1497 vm_offset_t
*bufsize
)
1500 boolean_t funnel_state
;
1507 unsigned int profile_size
;
1509 vm_offset_t names_buf
;
1510 struct vnode_attr va
;
1511 struct vfs_context context
;
1515 struct vnode
*names_vp
;
1516 struct vnode
*data_vp
;
1518 struct global_profile
*uid_files
;
1520 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
1522 /* Try to open the appropriate users profile files */
1523 /* If neither file is present, try to create them */
1524 /* If one file is present and the other not, fail. */
1525 /* If the files do exist, check them for the app_file */
1526 /* requested and read it in if present */
1529 error
= bsd_open_page_cache_files(user
, &uid_files
);
1531 thread_funnel_set(kernel_flock
, funnel_state
);
1537 names_vp
= uid_files
->names_vp
;
1538 data_vp
= uid_files
->data_vp
;
1539 names_buf
= uid_files
->buf_ptr
;
1541 context
.vc_proc
= p
;
1542 context
.vc_ucred
= kauth_cred_get();
1545 VATTR_WANTED(&va
, va_fileid
);
1546 VATTR_WANTED(&va
, va_modify_time
);
1548 if ((error
= vnode_getattr(app_vp
, &va
, &context
))) {
1549 printf("bsd_read_cache_file: Can't stat app file %s\n", app_name
);
1550 bsd_close_page_cache_files(uid_files
);
1551 thread_funnel_set(kernel_flock
, funnel_state
);
1555 *fid
= (u_long
)va
.va_fileid
;
1556 *mod
= va
.va_modify_time
.tv_sec
;
1558 if (bsd_search_page_cache_data_base(
1560 (struct profile_names_header
*)names_buf
,
1562 (unsigned int) va
.va_modify_time
.tv_sec
,
1563 (u_long
)va
.va_fileid
, &profile
, &profile_size
) == 0) {
1564 /* profile is an offset in the profile data base */
1565 /* It is zero if no profile data was found */
1567 if(profile_size
== 0) {
1570 bsd_close_page_cache_files(uid_files
);
1571 thread_funnel_set(kernel_flock
, funnel_state
);
1574 ret
= (vm_offset_t
)(kmem_alloc(kernel_map
, buffer
, profile_size
));
1576 bsd_close_page_cache_files(uid_files
);
1577 thread_funnel_set(kernel_flock
, funnel_state
);
1580 *bufsize
= profile_size
;
1581 while(profile_size
) {
1582 error
= vn_rdwr(UIO_READ
, data_vp
,
1583 (caddr_t
) *buffer
, profile_size
,
1584 profile
, UIO_SYSSPACE32
, IO_NODELOCKED
,
1585 kauth_cred_get(), &resid
, p
);
1586 if((error
) || (profile_size
== resid
)) {
1587 bsd_close_page_cache_files(uid_files
);
1588 kmem_free(kernel_map
, (vm_offset_t
)*buffer
, profile_size
);
1589 thread_funnel_set(kernel_flock
, funnel_state
);
1592 profile
+= profile_size
- resid
;
1593 profile_size
= resid
;
1595 bsd_close_page_cache_files(uid_files
);
1596 thread_funnel_set(kernel_flock
, funnel_state
);
1599 bsd_close_page_cache_files(uid_files
);
1600 thread_funnel_set(kernel_flock
, funnel_state
);
1607 bsd_search_page_cache_data_base(
1609 struct profile_names_header
*database
,
1611 unsigned int mod_date
,
1614 unsigned int *profile_size
)
1620 struct profile_element
*element
;
1621 unsigned int ele_total
;
1622 unsigned int extended_list
= 0;
1627 vm_offset_t local_buf
= 0;
1634 if(((vm_offset_t
)database
->element_array
) !=
1635 sizeof(struct profile_names_header
)) {
1638 element
= (struct profile_element
*)(
1639 (vm_offset_t
)database
->element_array
+
1640 (vm_offset_t
)database
);
1642 ele_total
= database
->number_of_profiles
;
1647 /* note: code assumes header + n*ele comes out on a page boundary */
1648 if(((local_buf
== 0) && (sizeof(struct profile_names_header
) +
1649 (ele_total
* sizeof(struct profile_element
)))
1650 > (PAGE_SIZE
* 4)) ||
1651 ((local_buf
!= 0) &&
1652 (ele_total
* sizeof(struct profile_element
))
1653 > (PAGE_SIZE
* 4))) {
1654 extended_list
= ele_total
;
1655 if(element
== (struct profile_element
*)
1656 ((vm_offset_t
)database
->element_array
+
1657 (vm_offset_t
)database
)) {
1658 ele_total
= ((PAGE_SIZE
* 4)/sizeof(struct profile_element
)) - 1;
1660 ele_total
= (PAGE_SIZE
* 4)/sizeof(struct profile_element
);
1662 extended_list
-= ele_total
;
1664 for (i
=0; i
<ele_total
; i
++) {
1665 if((mod_date
== element
[i
].mod_date
)
1666 && (inode
== element
[i
].inode
)) {
1667 if(strncmp(element
[i
].name
, app_name
, 12) == 0) {
1668 *profile
= element
[i
].addr
;
1669 *profile_size
= element
[i
].size
;
1670 if(local_buf
!= 0) {
1671 kmem_free(kernel_map
, local_buf
, 4 * PAGE_SIZE
);
1677 if(extended_list
== 0)
1679 if(local_buf
== 0) {
1680 ret
= kmem_alloc(kernel_map
, &local_buf
, 4 * PAGE_SIZE
);
1681 if(ret
!= KERN_SUCCESS
) {
1685 element
= (struct profile_element
*)local_buf
;
1686 ele_total
= extended_list
;
1688 file_off
+= 4 * PAGE_SIZE
;
1689 if((ele_total
* sizeof(struct profile_element
)) >
1691 size
= PAGE_SIZE
* 4;
1693 size
= ele_total
* sizeof(struct profile_element
);
1697 error
= vn_rdwr(UIO_READ
, vp
,
1698 CAST_DOWN(caddr_t
, (local_buf
+ resid_off
)),
1699 size
, file_off
+ resid_off
, UIO_SYSSPACE32
,
1700 IO_NODELOCKED
, kauth_cred_get(), &resid
, p
);
1701 if((error
) || (size
== resid
)) {
1702 if(local_buf
!= 0) {
1703 kmem_free(kernel_map
, local_buf
, 4 * PAGE_SIZE
);
1707 resid_off
+= size
-resid
;
1711 if(local_buf
!= 0) {
1712 kmem_free(kernel_map
, local_buf
, 4 * PAGE_SIZE
);
1718 bsd_write_page_cache_file(
1730 boolean_t funnel_state
;
1732 struct vfs_context context
;
1734 unsigned int profile_size
;
1736 vm_offset_t names_buf
;
1737 struct vnode
*names_vp
;
1738 struct vnode
*data_vp
;
1739 struct profile_names_header
*profile_header
;
1741 struct global_profile
*uid_files
;
1744 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
1747 error
= bsd_open_page_cache_files(user
, &uid_files
);
1749 thread_funnel_set(kernel_flock
, funnel_state
);
1755 names_vp
= uid_files
->names_vp
;
1756 data_vp
= uid_files
->data_vp
;
1757 names_buf
= uid_files
->buf_ptr
;
1759 /* Stat data file for size */
1761 context
.vc_proc
= p
;
1762 context
.vc_ucred
= kauth_cred_get();
1764 if ((error
= vnode_size(data_vp
, &file_size
, &context
)) != 0) {
1765 printf("bsd_write_page_cache_file: Can't stat profile data %s\n", file_name
);
1766 bsd_close_page_cache_files(uid_files
);
1767 thread_funnel_set(kernel_flock
, funnel_state
);
1771 if (bsd_search_page_cache_data_base(names_vp
,
1772 (struct profile_names_header
*)names_buf
,
1773 file_name
, (unsigned int) mod
,
1774 fid
, &profile
, &profile_size
) == 0) {
1775 /* profile is an offset in the profile data base */
1776 /* It is zero if no profile data was found */
1778 if(profile_size
== 0) {
1779 unsigned int header_size
;
1780 vm_offset_t buf_ptr
;
1782 /* Our Write case */
1784 /* read header for last entry */
1786 (struct profile_names_header
*)names_buf
;
1787 name_offset
= sizeof(struct profile_names_header
) +
1788 (sizeof(struct profile_element
)
1789 * profile_header
->number_of_profiles
);
1790 profile_header
->number_of_profiles
+= 1;
1792 if(name_offset
< PAGE_SIZE
* 4) {
1793 struct profile_element
*name
;
1794 /* write new entry */
1795 name
= (struct profile_element
*)
1796 (names_buf
+ (vm_offset_t
)name_offset
);
1797 name
->addr
= file_size
;
1799 name
->mod_date
= mod
;
1801 strncpy (name
->name
, file_name
, 12);
1803 unsigned int ele_size
;
1804 struct profile_element name
;
1805 /* write new entry */
1806 name
.addr
= file_size
;
1808 name
.mod_date
= mod
;
1810 strncpy (name
.name
, file_name
, 12);
1811 /* write element out separately */
1812 ele_size
= sizeof(struct profile_element
);
1813 buf_ptr
= (vm_offset_t
)&name
;
1814 resid_off
= name_offset
;
1817 error
= vn_rdwr(UIO_WRITE
, names_vp
,
1819 ele_size
, resid_off
,
1820 UIO_SYSSPACE32
, IO_NODELOCKED
,
1821 kauth_cred_get(), &resid
, p
);
1823 printf("bsd_write_page_cache_file: Can't write name_element %x\n", user
);
1824 bsd_close_page_cache_files(
1831 buf_ptr
+= (vm_offset_t
)
1833 resid_off
+= ele_size
-resid
;
1838 if(name_offset
< PAGE_SIZE
* 4) {
1839 header_size
= name_offset
+
1840 sizeof(struct profile_element
);
1844 sizeof(struct profile_names_header
);
1846 buf_ptr
= (vm_offset_t
)profile_header
;
1849 /* write names file header */
1850 while(header_size
) {
1851 error
= vn_rdwr(UIO_WRITE
, names_vp
,
1853 header_size
, resid_off
,
1854 UIO_SYSSPACE32
, IO_NODELOCKED
,
1855 kauth_cred_get(), &resid
, p
);
1857 printf("bsd_write_page_cache_file: Can't write header %x\n", user
);
1858 bsd_close_page_cache_files(
1861 kernel_flock
, funnel_state
);
1864 buf_ptr
+= (vm_offset_t
)header_size
-resid
;
1865 resid_off
+= header_size
-resid
;
1866 header_size
= resid
;
1868 /* write profile to data file */
1869 resid_off
= file_size
;
1871 error
= vn_rdwr(UIO_WRITE
, data_vp
,
1872 (caddr_t
)buffer
, size
, resid_off
,
1873 UIO_SYSSPACE32
, IO_NODELOCKED
,
1874 kauth_cred_get(), &resid
, p
);
1876 printf("bsd_write_page_cache_file: Can't write header %x\n", user
);
1877 bsd_close_page_cache_files(
1880 kernel_flock
, funnel_state
);
1883 buffer
+= size
-resid
;
1884 resid_off
+= size
-resid
;
1887 bsd_close_page_cache_files(uid_files
);
1888 thread_funnel_set(kernel_flock
, funnel_state
);
1891 /* Someone else wrote a twin profile before us */
1892 bsd_close_page_cache_files(uid_files
);
1893 thread_funnel_set(kernel_flock
, funnel_state
);
1896 bsd_close_page_cache_files(uid_files
);
1897 thread_funnel_set(kernel_flock
, funnel_state
);
1904 prepare_profile_database(int user
)
1906 const char *cache_path
= "/var/vm/app_profile/";
1913 struct vnode
*names_vp
;
1914 struct vnode
*data_vp
;
1915 vm_offset_t names_buf
;
1916 vm_offset_t buf_ptr
;
1918 int profile_names_length
;
1919 int profile_data_length
;
1920 char *profile_data_string
;
1921 char *profile_names_string
;
1924 struct vnode_attr va
;
1925 struct vfs_context context
;
1927 struct profile_names_header
*profile_header
;
1930 struct nameidata nd_names
;
1931 struct nameidata nd_data
;
1935 context
.vc_proc
= p
;
1936 context
.vc_ucred
= kauth_cred_get();
1938 ret
= kmem_alloc(kernel_map
,
1939 (vm_offset_t
*)&profile_data_string
, PATH_MAX
);
1945 /* Split the buffer in half since we know the size of */
1946 /* our file path and our allocation is adequate for */
1947 /* both file path names */
1948 profile_names_string
= profile_data_string
+ (PATH_MAX
/2);
1951 strcpy(profile_data_string
, cache_path
);
1952 strcpy(profile_names_string
, cache_path
);
1953 profile_names_length
= profile_data_length
1954 = strlen(profile_data_string
);
1955 substring
= profile_data_string
+ profile_data_length
;
1956 sprintf(substring
, "%x_data", user
);
1957 substring
= profile_names_string
+ profile_names_length
;
1958 sprintf(substring
, "%x_names", user
);
1960 /* We now have the absolute file names */
1962 ret
= kmem_alloc(kernel_map
,
1963 (vm_offset_t
*)&names_buf
, 4 * PAGE_SIZE
);
1965 kmem_free(kernel_map
,
1966 (vm_offset_t
)profile_data_string
, PATH_MAX
);
1970 NDINIT(&nd_names
, LOOKUP
, FOLLOW
,
1971 UIO_SYSSPACE32
, CAST_USER_ADDR_T(profile_names_string
), &context
);
1972 NDINIT(&nd_data
, LOOKUP
, FOLLOW
,
1973 UIO_SYSSPACE32
, CAST_USER_ADDR_T(profile_data_string
), &context
);
1975 if ( (error
= vn_open(&nd_data
,
1976 O_CREAT
| O_EXCL
| FWRITE
, S_IRUSR
|S_IWUSR
)) ) {
1977 kmem_free(kernel_map
,
1978 (vm_offset_t
)names_buf
, 4 * PAGE_SIZE
);
1979 kmem_free(kernel_map
,
1980 (vm_offset_t
)profile_data_string
, PATH_MAX
);
1984 data_vp
= nd_data
.ni_vp
;
1986 if ( (error
= vn_open(&nd_names
,
1987 O_CREAT
| O_EXCL
| FWRITE
, S_IRUSR
|S_IWUSR
)) ) {
1988 printf("prepare_profile_database: Can't create CacheNames %s\n",
1989 profile_data_string
);
1990 kmem_free(kernel_map
,
1991 (vm_offset_t
)names_buf
, 4 * PAGE_SIZE
);
1992 kmem_free(kernel_map
,
1993 (vm_offset_t
)profile_data_string
, PATH_MAX
);
1995 vnode_rele(data_vp
);
2000 names_vp
= nd_names
.ni_vp
;
2002 /* Write Header for new names file */
2004 profile_header
= (struct profile_names_header
*)names_buf
;
2006 profile_header
->number_of_profiles
= 0;
2007 profile_header
->user_id
= user
;
2008 profile_header
->version
= 1;
2009 profile_header
->element_array
=
2010 sizeof(struct profile_names_header
);
2011 profile_header
->spare1
= 0;
2012 profile_header
->spare2
= 0;
2013 profile_header
->spare3
= 0;
2015 size
= sizeof(struct profile_names_header
);
2016 buf_ptr
= (vm_offset_t
)profile_header
;
2020 error
= vn_rdwr(UIO_WRITE
, names_vp
,
2021 (caddr_t
)buf_ptr
, size
, resid_off
,
2022 UIO_SYSSPACE32
, IO_NODELOCKED
,
2023 kauth_cred_get(), &resid
, p
);
2025 printf("prepare_profile_database: Can't write header %s\n", profile_names_string
);
2026 kmem_free(kernel_map
,
2027 (vm_offset_t
)names_buf
, 4 * PAGE_SIZE
);
2028 kmem_free(kernel_map
,
2029 (vm_offset_t
)profile_data_string
,
2032 vnode_rele(names_vp
);
2033 vnode_put(names_vp
);
2034 vnode_rele(data_vp
);
2039 buf_ptr
+= size
-resid
;
2040 resid_off
+= size
-resid
;
2044 VATTR_SET(&va
, va_uid
, user
);
2046 error
= vnode_setattr(names_vp
, &va
, &context
);
2048 printf("prepare_profile_database: "
2049 "Can't set user %s\n", profile_names_string
);
2051 vnode_rele(names_vp
);
2052 vnode_put(names_vp
);
2055 VATTR_SET(&va
, va_uid
, user
);
2056 error
= vnode_setattr(data_vp
, &va
, &context
);
2058 printf("prepare_profile_database: "
2059 "Can't set user %s\n", profile_data_string
);
2061 vnode_rele(data_vp
);
2064 kmem_free(kernel_map
,
2065 (vm_offset_t
)profile_data_string
, PATH_MAX
);
2066 kmem_free(kernel_map
,
2067 (vm_offset_t
)names_buf
, 4 * PAGE_SIZE
);