2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
23 * Mach Operating System
24 * Copyright (c) 1987 Carnegie-Mellon University
25 * All rights reserved. The CMU software License Agreement specifies
26 * the terms and conditions for use and redistribution.
33 #include <meta_features.h>
35 #include <kern/task.h>
36 #include <kern/thread.h>
37 #include <kern/debug.h>
38 #include <kern/lock.h>
39 #include <mach/mach_traps.h>
40 #include <mach/time_value.h>
41 #include <mach/vm_map.h>
42 #include <mach/vm_param.h>
43 #include <mach/vm_prot.h>
44 #include <mach/port.h>
46 #include <sys/file_internal.h>
47 #include <sys/param.h>
48 #include <sys/systm.h>
50 #include <sys/namei.h>
51 #include <sys/proc_internal.h>
52 #include <sys/kauth.h>
55 #include <sys/vnode_internal.h>
56 #include <sys/mount.h>
57 #include <sys/trace.h>
58 #include <sys/kernel.h>
59 #include <sys/ubc_internal.h>
62 #include <sys/sysproto.h>
65 #include <bsm/audit_kernel.h>
66 #include <bsm/audit_kevents.h>
68 #include <kern/kalloc.h>
69 #include <vm/vm_map.h>
70 #include <vm/vm_kern.h>
72 #include <machine/spl.h>
74 #include <mach/shared_memory_server.h>
75 #include <vm/vm_shared_memory_server.h>
77 #include <vm/vm_protos.h>
86 return (vm_map_check_protection(
88 vm_map_trunc_page(addr
), vm_map_round_page(addr
+len
),
89 prot
== B_READ
? VM_PROT_READ
: VM_PROT_WRITE
));
98 kret
= vm_map_wire(current_map(), vm_map_trunc_page(addr
),
99 vm_map_round_page(addr
+len
),
100 VM_PROT_READ
| VM_PROT_WRITE
,FALSE
);
105 case KERN_INVALID_ADDRESS
:
108 case KERN_PROTECTION_FAILURE
:
119 __unused
int dirtied
)
124 vm_map_offset_t vaddr
;
131 pmap
= get_task_pmap(current_task());
132 for (vaddr
= vm_map_trunc_page(addr
);
133 vaddr
< vm_map_round_page(addr
+len
);
134 vaddr
+= PAGE_SIZE
) {
135 paddr
= pmap_extract(pmap
, vaddr
);
136 pg
= PHYS_TO_VM_PAGE(paddr
);
137 vm_page_set_modified(pg
);
144 kret
= vm_map_unwire(current_map(), vm_map_trunc_page(addr
),
145 vm_map_round_page(addr
+len
), FALSE
);
149 case KERN_INVALID_ADDRESS
:
152 case KERN_PROTECTION_FAILURE
:
166 character
= (char)byte
;
167 return (copyout((void *)&(character
), addr
, sizeof(char)) == 0 ? 0 : -1);
177 character
= (char)byte
;
178 return (copyout((void *)&(character
), addr
, sizeof(char)) == 0 ? 0 : -1);
181 int fubyte(user_addr_t addr
)
185 if (copyin(addr
, (void *) &byte
, sizeof(char)))
190 int fuibyte(user_addr_t addr
)
194 if (copyin(addr
, (void *) &(byte
), sizeof(char)))
204 return (copyout((void *) &word
, addr
, sizeof(int)) == 0 ? 0 : -1);
207 long fuword(user_addr_t addr
)
211 if (copyin(addr
, (void *) &word
, sizeof(int)))
216 /* suiword and fuiword are the same as suword and fuword, respectively */
223 return (copyout((void *) &word
, addr
, sizeof(int)) == 0 ? 0 : -1);
226 long fuiword(user_addr_t addr
)
230 if (copyin(addr
, (void *) &word
, sizeof(int)))
236 * With a 32-bit kernel and mixed 32/64-bit user tasks, this interface allows the
237 * fetching and setting of process-sized size_t and pointer values.
240 sulong(user_addr_t addr
, int64_t word
)
243 if (IS_64BIT_PROCESS(current_proc())) {
244 return(copyout((void *)&word
, addr
, sizeof(word
)) == 0 ? 0 : -1);
246 return(suiword(addr
, (long)word
));
251 fulong(user_addr_t addr
)
255 if (IS_64BIT_PROCESS(current_proc())) {
256 if (copyin(addr
, (void *)&longword
, sizeof(longword
)) != 0)
260 return((int64_t)fuiword(addr
));
265 suulong(user_addr_t addr
, uint64_t uword
)
268 if (IS_64BIT_PROCESS(current_proc())) {
269 return(copyout((void *)&uword
, addr
, sizeof(uword
)) == 0 ? 0 : -1);
271 return(suiword(addr
, (u_long
)uword
));
276 fuulong(user_addr_t addr
)
280 if (IS_64BIT_PROCESS(current_proc())) {
281 if (copyin(addr
, (void *)&ulongword
, sizeof(ulongword
)) != 0)
285 return((uint64_t)fuiword(addr
));
290 swapon(__unused
struct proc
*procp
, __unused
struct swapon_args
*uap
, __unused
int *retval
)
298 struct pid_for_task_args
*args
)
300 mach_port_name_t t
= args
->t
;
301 user_addr_t pid_addr
= args
->pid
;
305 kern_return_t err
= KERN_SUCCESS
;
306 boolean_t funnel_state
;
308 AUDIT_MACH_SYSCALL_ENTER(AUE_PIDFORTASK
);
309 AUDIT_ARG(mach_port1
, t
);
311 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
312 t1
= port_name_to_task(t
);
314 if (t1
== TASK_NULL
) {
318 p
= get_bsdtask_info(t1
);
329 (void) copyout((char *) &pid
, pid_addr
, sizeof(int));
330 thread_funnel_set(kernel_flock
, funnel_state
);
331 AUDIT_MACH_SYSCALL_EXIT(err
);
336 * Routine: task_for_pid
338 * Get the task port for another "process", named by its
339 * process ID on the same host as "target_task".
341 * Only permitted to privileged processes, or processes
342 * with the same user ID.
344 * XXX This should be a BSD system call, not a Mach trap!!!
348 struct task_for_pid_args
*args
)
350 mach_port_name_t target_tport
= args
->target_tport
;
352 user_addr_t task_addr
= args
->t
;
353 struct uthread
*uthread
;
357 mach_port_name_t tret
;
360 boolean_t funnel_state
;
362 AUDIT_MACH_SYSCALL_ENTER(AUE_TASKFORPID
);
364 AUDIT_ARG(mach_port1
, target_tport
);
366 t1
= port_name_to_task(target_tport
);
367 if (t1
== TASK_NULL
) {
368 (void ) copyout((char *)&t1
, task_addr
, sizeof(mach_port_name_t
));
369 AUDIT_MACH_SYSCALL_EXIT(KERN_FAILURE
);
370 return(KERN_FAILURE
);
373 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
375 p1
= get_bsdtask_info(t1
); /* XXX current proc */
378 * Delayed binding of thread credential to process credential, if we
379 * are not running with an explicitly set thread credential.
381 uthread
= get_bsdthread_info(current_thread());
382 if (uthread
->uu_ucred
!= p1
->p_ucred
&&
383 (uthread
->uu_flag
& UT_SETUID
) == 0) {
384 kauth_cred_t old
= uthread
->uu_ucred
;
386 uthread
->uu_ucred
= p1
->p_ucred
;
387 kauth_cred_ref(uthread
->uu_ucred
);
390 kauth_cred_rele(old
);
394 AUDIT_ARG(process
, p
);
397 (p
!= (struct proc
*) 0)
398 && (p1
!= (struct proc
*) 0)
401 || !(suser(kauth_cred_get(), 0))
402 || ((kauth_cred_getuid(p
->p_ucred
) == kauth_cred_getuid(kauth_cred_get()))
403 && (p
->p_ucred
->cr_ruid
== kauth_cred_get()->cr_ruid
)
404 && ((p
->p_flag
& P_SUGID
) == 0))
406 && (p
->p_stat
!= SZOMB
)
408 if (p
->task
!= TASK_NULL
) {
409 task_reference(p
->task
);
410 sright
= (void *)convert_task_to_port(p
->task
);
411 tret
= ipc_port_copyout_send(
413 get_task_ipcspace(current_task()));
415 tret
= MACH_PORT_NULL
;
416 AUDIT_ARG(mach_port2
, tret
);
417 (void ) copyout((char *)&tret
, task_addr
, sizeof(mach_port_name_t
));
419 error
= KERN_SUCCESS
;
423 tret
= MACH_PORT_NULL
;
424 (void) copyout((char *) &tret
, task_addr
, sizeof(mach_port_name_t
));
425 error
= KERN_FAILURE
;
427 thread_funnel_set(kernel_flock
, funnel_state
);
428 AUDIT_MACH_SYSCALL_EXIT(error
);
434 * shared_region_make_private_np:
436 * This system call is for "dyld" only.
438 * It creates a private copy of the current process's "shared region" for
439 * split libraries. "dyld" uses this when the shared region is full or
440 * it needs to load a split library that conflicts with an already loaded one
441 * that this process doesn't need. "dyld" specifies a set of address ranges
442 * that it wants to keep in the now-private "shared region". These cover
443 * the set of split libraries that the process needs so far. The kernel needs
444 * to deallocate the rest of the shared region, so that it's available for
445 * more libraries for this process.
448 shared_region_make_private_np(
450 struct shared_region_make_private_np_args
*uap
,
451 __unused
int *retvalp
)
455 boolean_t using_shared_regions
;
456 user_addr_t user_ranges
;
457 unsigned int range_count
;
458 struct shared_region_range_np
*ranges
;
459 shared_region_mapping_t shared_region
;
460 struct shared_region_task_mappings task_mapping_info
;
461 shared_region_mapping_t next
;
465 range_count
= uap
->rangeCount
;
466 user_ranges
= uap
->ranges
;
468 /* allocate kernel space for the "ranges" */
469 if (range_count
!= 0) {
470 kr
= kmem_alloc(kernel_map
,
471 (vm_offset_t
*) &ranges
,
472 (vm_size_t
) (range_count
* sizeof (ranges
[0])));
473 if (kr
!= KERN_SUCCESS
) {
478 /* copy "ranges" from user-space */
479 error
= copyin(user_ranges
,
481 (range_count
* sizeof (ranges
[0])));
487 if (p
->p_flag
& P_NOSHLIB
) {
488 /* no split library has been mapped for this process so far */
489 using_shared_regions
= FALSE
;
491 /* this process has already mapped some split libraries */
492 using_shared_regions
= TRUE
;
496 * Get a private copy of the current shared region.
497 * Do not chain it to the system-wide shared region, as we'll want
498 * to map other split libraries in place of the old ones. We want
499 * to completely detach from the system-wide shared region and go our
500 * own way after this point, not sharing anything with other processes.
502 error
= clone_system_shared_regions(using_shared_regions
,
503 FALSE
, /* chain_regions */
509 /* get info on the newly allocated shared region */
510 vm_get_shared_region(current_task(), &shared_region
);
511 task_mapping_info
.self
= (vm_offset_t
) shared_region
;
512 shared_region_mapping_info(shared_region
,
513 &(task_mapping_info
.text_region
),
514 &(task_mapping_info
.text_size
),
515 &(task_mapping_info
.data_region
),
516 &(task_mapping_info
.data_size
),
517 &(task_mapping_info
.region_mappings
),
518 &(task_mapping_info
.client_base
),
519 &(task_mapping_info
.alternate_base
),
520 &(task_mapping_info
.alternate_next
),
521 &(task_mapping_info
.fs_base
),
522 &(task_mapping_info
.system
),
523 &(task_mapping_info
.flags
),
527 * We now have our private copy of the shared region, as it was before
528 * the call to clone_system_shared_regions(). We now need to clean it
529 * up and keep only the memory areas described by the "ranges" array.
531 kr
= shared_region_cleanup(range_count
, ranges
, &task_mapping_info
);
542 if (ranges
!= NULL
) {
543 kmem_free(kernel_map
,
544 (vm_offset_t
) ranges
,
545 range_count
* sizeof (ranges
[0]));
554 * shared_region_map_file_np:
556 * This system call is for "dyld" only.
558 * "dyld" wants to map parts of a split library in the shared region.
559 * We get a file descriptor on the split library to be mapped and a set
560 * of mapping instructions, describing which parts of the file to map in\
561 * which areas of the shared segment and with what protection.
562 * The "shared region" is split in 2 areas:
563 * 0x90000000 - 0xa0000000 : read-only area (for TEXT and LINKEDIT sections),
564 * 0xa0000000 - 0xb0000000 : writable area (for DATA sections).
568 shared_region_map_file_np(
570 struct shared_region_map_file_np_args
*uap
,
571 __unused
int *retvalp
)
576 unsigned int mapping_count
;
577 user_addr_t user_mappings
; /* 64-bit */
578 user_addr_t user_slide_p
; /* 64-bit */
579 struct shared_file_mapping_np
*mappings
;
581 mach_vm_offset_t slide
;
583 struct vfs_context context
;
584 memory_object_control_t file_control
;
585 memory_object_size_t file_size
;
586 shared_region_mapping_t shared_region
;
587 struct shared_region_task_mappings task_mapping_info
;
588 shared_region_mapping_t next
;
589 shared_region_mapping_t default_shared_region
;
590 boolean_t using_default_region
;
593 mach_vm_offset_t base_offset
, end_offset
;
594 mach_vm_offset_t original_base_offset
;
595 boolean_t mappings_in_segment
;
596 #define SFM_MAX_STACK 6
597 struct shared_file_mapping_np stack_mappings
[SFM_MAX_STACK
];
604 /* get file descriptor for split library from arguments */
607 /* get file structure from file descriptor */
608 error
= fp_lookup(p
, fd
, &fp
, 0);
613 /* make sure we're attempting to map a vnode */
614 if (fp
->f_fglob
->fg_type
!= DTYPE_VNODE
) {
619 /* we need at least read permission on the file */
620 if (! (fp
->f_fglob
->fg_flag
& FREAD
)) {
625 /* get vnode from file structure */
626 error
= vnode_getwithref((vnode_t
)fp
->f_fglob
->fg_data
);
630 vp
= (struct vnode
*) fp
->f_fglob
->fg_data
;
632 /* make sure the vnode is a regular file */
633 if (vp
->v_type
!= VREG
) {
643 context
.vc_ucred
= kauth_cred_get();
644 if ((error
= vnode_size(vp
, &fs
, &context
)) != 0)
650 * Get the list of mappings the caller wants us to establish.
652 mapping_count
= uap
->mappingCount
; /* the number of mappings */
653 if (mapping_count
== 0) {
654 error
= 0; /* no mappings: we're done ! */
656 } else if (mapping_count
<= SFM_MAX_STACK
) {
657 mappings
= &stack_mappings
[0];
659 kr
= kmem_alloc(kernel_map
,
660 (vm_offset_t
*) &mappings
,
661 (vm_size_t
) (mapping_count
*
662 sizeof (mappings
[0])));
663 if (kr
!= KERN_SUCCESS
) {
669 user_mappings
= uap
->mappings
; /* the mappings, in user space */
670 error
= copyin(user_mappings
,
672 (mapping_count
* sizeof (mappings
[0])));
678 * If the caller provides a "slide" pointer, it means they're OK
679 * with us moving the mappings around to make them fit.
681 user_slide_p
= uap
->slide_p
;
684 * Make each mapping address relative to the beginning of the
685 * shared region. Check that all mappings are in the shared region.
686 * Compute the maximum set of protections required to tell the
687 * buffer cache how we mapped the file (see call to ubc_map() below).
689 max_prot
= VM_PROT_NONE
;
692 mappings_in_segment
= TRUE
;
693 for (j
= 0; j
< mapping_count
; j
++) {
694 mach_vm_offset_t segment
;
695 segment
= (mappings
[j
].sfm_address
&
696 GLOBAL_SHARED_SEGMENT_MASK
);
697 if (segment
!= GLOBAL_SHARED_TEXT_SEGMENT
&&
698 segment
!= GLOBAL_SHARED_DATA_SEGMENT
) {
699 /* this mapping is not in the shared region... */
700 if (user_slide_p
== NULL
) {
701 /* ... and we can't slide it in: fail */
706 /* expect all mappings to be outside */
707 mappings_in_segment
= FALSE
;
708 } else if (mappings_in_segment
!= FALSE
) {
709 /* other mappings were not outside: fail */
713 /* we'll try and slide that mapping in the segments */
716 /* expect all mappings to be inside */
717 mappings_in_segment
= TRUE
;
718 } else if (mappings_in_segment
!= TRUE
) {
719 /* other mappings were not inside: fail */
723 /* get a relative offset inside the shared segments */
724 mappings
[j
].sfm_address
-= GLOBAL_SHARED_TEXT_SEGMENT
;
726 if ((mappings
[j
].sfm_address
& SHARED_TEXT_REGION_MASK
)
728 base_offset
= (mappings
[j
].sfm_address
&
729 SHARED_TEXT_REGION_MASK
);
731 if ((mappings
[j
].sfm_address
& SHARED_TEXT_REGION_MASK
) +
732 mappings
[j
].sfm_size
> end_offset
) {
734 (mappings
[j
].sfm_address
&
735 SHARED_TEXT_REGION_MASK
) +
736 mappings
[j
].sfm_size
;
738 max_prot
|= mappings
[j
].sfm_max_prot
;
740 /* Make all mappings relative to the base_offset */
741 base_offset
= vm_map_trunc_page(base_offset
);
742 end_offset
= vm_map_round_page(end_offset
);
743 for (j
= 0; j
< mapping_count
; j
++) {
744 mappings
[j
].sfm_address
-= base_offset
;
746 original_base_offset
= base_offset
;
747 if (mappings_in_segment
== FALSE
) {
749 * We're trying to map a library that was not pre-bound to
750 * be in the shared segments. We want to try and slide it
751 * back into the shared segments but as far back as possible,
752 * so that it doesn't clash with pre-bound libraries. Set
753 * the base_offset to the end of the region, so that it can't
754 * possibly fit there and will have to be slid.
756 base_offset
= SHARED_TEXT_REGION_SIZE
- end_offset
;
759 /* get the file's memory object handle */
760 UBCINFOCHECK("shared_region_map_file_np", vp
);
761 file_control
= ubc_getobject(vp
, UBC_HOLDOBJECT
);
762 if (file_control
== MEMORY_OBJECT_CONTROL_NULL
) {
768 * Get info about the current process's shared region.
769 * This might change if we decide we need to clone the shared region.
771 vm_get_shared_region(current_task(), &shared_region
);
772 task_mapping_info
.self
= (vm_offset_t
) shared_region
;
773 shared_region_mapping_info(shared_region
,
774 &(task_mapping_info
.text_region
),
775 &(task_mapping_info
.text_size
),
776 &(task_mapping_info
.data_region
),
777 &(task_mapping_info
.data_size
),
778 &(task_mapping_info
.region_mappings
),
779 &(task_mapping_info
.client_base
),
780 &(task_mapping_info
.alternate_base
),
781 &(task_mapping_info
.alternate_next
),
782 &(task_mapping_info
.fs_base
),
783 &(task_mapping_info
.system
),
784 &(task_mapping_info
.flags
),
788 * Are we using the system's current shared region
789 * for this environment ?
791 default_shared_region
=
792 lookup_default_shared_region(ENV_DEFAULT_ROOT
,
793 task_mapping_info
.system
);
794 if (shared_region
== default_shared_region
) {
795 using_default_region
= TRUE
;
797 using_default_region
= FALSE
;
799 shared_region_mapping_dealloc(default_shared_region
);
801 if (vp
->v_mount
!= rootvnode
->v_mount
&&
802 using_default_region
) {
804 * The split library is not on the root filesystem. We don't
805 * want to polute the system-wide ("default") shared region
807 * Reject the mapping. The caller (dyld) should "privatize"
808 * (via shared_region_make_private()) the shared region and
809 * try to establish the mapping privately for this process.
817 * Map the split library.
819 kr
= map_shared_file(mapping_count
,
825 (user_slide_p
) ? &slide
: NULL
);
830 * The mapping was successful. Let the buffer cache know
831 * that we've mapped that file with these protections. This
832 * prevents the vnode from getting recycled while it's mapped.
834 (void) ubc_map(vp
, max_prot
);
837 case KERN_INVALID_ADDRESS
:
840 case KERN_PROTECTION_FAILURE
:
847 case KERN_INVALID_ARGUMENT
:
853 if (p
->p_flag
& P_NOSHLIB
) {
854 /* signal that this process is now using split libraries */
855 p
->p_flag
&= ~P_NOSHLIB
;
860 * The caller provided a pointer to a "slide" offset. Let
861 * them know by how much we slid the mappings.
863 if (mappings_in_segment
== FALSE
) {
865 * We faked the base_offset earlier, so undo that
866 * and take into account the real base_offset.
868 slide
+= SHARED_TEXT_REGION_SIZE
- end_offset
;
869 slide
-= original_base_offset
;
871 * The mappings were slid into the shared segments
872 * and "slide" is relative to the beginning of the
873 * shared segments. Adjust it to be absolute.
875 slide
+= GLOBAL_SHARED_TEXT_SEGMENT
;
877 error
= copyout(&slide
,
885 * release the vnode...
886 * ubc_map() still holds it for us in the non-error case
888 (void) vnode_put(vp
);
892 /* release the file descriptor */
893 fp_drop(p
, fd
, fp
, 0);
896 if (mappings
!= NULL
&&
897 mappings
!= &stack_mappings
[0]) {
898 kmem_free(kernel_map
,
899 (vm_offset_t
) mappings
,
900 mapping_count
* sizeof (mappings
[0]));
908 load_shared_file(struct proc
*p
, struct load_shared_file_args
*uap
,
909 __unused
int *retval
)
911 caddr_t mapped_file_addr
=uap
->mfa
;
912 u_long mapped_file_size
=uap
->mfs
;
913 caddr_t
*base_address
=uap
->ba
;
914 int map_cnt
=uap
->map_cnt
;
915 sf_mapping_t
*mappings
=uap
->mappings
;
916 char *filename
=uap
->filename
;
917 int *flags
=uap
->flags
;
918 struct vnode
*vp
= 0;
919 struct nameidata nd
, *ndp
;
924 struct vfs_context context
;
926 memory_object_control_t file_control
;
927 sf_mapping_t
*map_list
;
932 int default_regions
= 0;
936 shared_region_mapping_t shared_region
;
937 struct shared_region_task_mappings task_mapping_info
;
938 shared_region_mapping_t next
;
941 context
.vc_ucred
= kauth_cred_get();
945 AUDIT_ARG(addr
, CAST_USER_ADDR_T(base_address
));
946 /* Retrieve the base address */
947 if ( (error
= copyin(CAST_USER_ADDR_T(base_address
), &local_base
, sizeof (caddr_t
))) ) {
950 if ( (error
= copyin(CAST_USER_ADDR_T(flags
), &local_flags
, sizeof (int))) ) {
954 if(local_flags
& QUERY_IS_SYSTEM_REGION
) {
955 shared_region_mapping_t default_shared_region
;
956 vm_get_shared_region(current_task(), &shared_region
);
957 task_mapping_info
.self
= (vm_offset_t
)shared_region
;
959 shared_region_mapping_info(shared_region
,
960 &(task_mapping_info
.text_region
),
961 &(task_mapping_info
.text_size
),
962 &(task_mapping_info
.data_region
),
963 &(task_mapping_info
.data_size
),
964 &(task_mapping_info
.region_mappings
),
965 &(task_mapping_info
.client_base
),
966 &(task_mapping_info
.alternate_base
),
967 &(task_mapping_info
.alternate_next
),
968 &(task_mapping_info
.fs_base
),
969 &(task_mapping_info
.system
),
970 &(task_mapping_info
.flags
), &next
);
972 default_shared_region
=
973 lookup_default_shared_region(
975 task_mapping_info
.system
);
976 if (shared_region
== default_shared_region
) {
977 local_flags
= SYSTEM_REGION_BACKED
;
981 shared_region_mapping_dealloc(default_shared_region
);
983 error
= copyout(&local_flags
, CAST_USER_ADDR_T(flags
), sizeof (int));
986 caller_flags
= local_flags
;
987 kret
= kmem_alloc(kernel_map
, (vm_offset_t
*)&filename_str
,
988 (vm_size_t
)(MAXPATHLEN
));
989 if (kret
!= KERN_SUCCESS
) {
993 kret
= kmem_alloc(kernel_map
, (vm_offset_t
*)&map_list
,
994 (vm_size_t
)(map_cnt
*sizeof(sf_mapping_t
)));
995 if (kret
!= KERN_SUCCESS
) {
996 kmem_free(kernel_map
, (vm_offset_t
)filename_str
,
997 (vm_size_t
)(MAXPATHLEN
));
1002 if ( (error
= copyin(CAST_USER_ADDR_T(mappings
), map_list
, (map_cnt
*sizeof(sf_mapping_t
)))) ) {
1003 goto lsf_bailout_free
;
1006 if ( (error
= copyinstr(CAST_USER_ADDR_T(filename
), filename_str
,
1007 MAXPATHLEN
, (size_t *)&dummy
)) ) {
1008 goto lsf_bailout_free
;
1012 * Get a vnode for the target file
1014 NDINIT(ndp
, LOOKUP
, FOLLOW
| LOCKLEAF
| AUDITVNPATH1
, UIO_SYSSPACE32
,
1015 CAST_USER_ADDR_T(filename_str
), &context
);
1017 if ((error
= namei(ndp
))) {
1018 goto lsf_bailout_free
;
1024 if (vp
->v_type
!= VREG
) {
1026 goto lsf_bailout_free_vput
;
1029 UBCINFOCHECK("load_shared_file", vp
);
1031 if ((error
= vnode_size(vp
, &file_size
, &context
)) != 0)
1032 goto lsf_bailout_free_vput
;
1034 file_control
= ubc_getobject(vp
, UBC_HOLDOBJECT
);
1035 if (file_control
== MEMORY_OBJECT_CONTROL_NULL
) {
1037 goto lsf_bailout_free_vput
;
1041 if(file_size
!= mapped_file_size
) {
1043 goto lsf_bailout_free_vput
;
1046 if(p
->p_flag
& P_NOSHLIB
) {
1047 p
->p_flag
= p
->p_flag
& ~P_NOSHLIB
;
1050 /* load alternate regions if the caller has requested. */
1051 /* Note: the new regions are "clean slates" */
1052 if (local_flags
& NEW_LOCAL_SHARED_REGIONS
) {
1053 error
= clone_system_shared_regions(FALSE
,
1054 TRUE
, /* chain_regions */
1057 goto lsf_bailout_free_vput
;
1061 vm_get_shared_region(current_task(), &shared_region
);
1062 task_mapping_info
.self
= (vm_offset_t
)shared_region
;
1064 shared_region_mapping_info(shared_region
,
1065 &(task_mapping_info
.text_region
),
1066 &(task_mapping_info
.text_size
),
1067 &(task_mapping_info
.data_region
),
1068 &(task_mapping_info
.data_size
),
1069 &(task_mapping_info
.region_mappings
),
1070 &(task_mapping_info
.client_base
),
1071 &(task_mapping_info
.alternate_base
),
1072 &(task_mapping_info
.alternate_next
),
1073 &(task_mapping_info
.fs_base
),
1074 &(task_mapping_info
.system
),
1075 &(task_mapping_info
.flags
), &next
);
1078 shared_region_mapping_t default_shared_region
;
1079 default_shared_region
=
1080 lookup_default_shared_region(
1082 task_mapping_info
.system
);
1083 if(shared_region
== default_shared_region
) {
1084 default_regions
= 1;
1086 shared_region_mapping_dealloc(default_shared_region
);
1088 /* If we are running on a removable file system we must not */
1089 /* be in a set of shared regions or the file system will not */
1091 if(((vp
->v_mount
!= rootvnode
->v_mount
) && (default_regions
))
1092 && (lsf_mapping_pool_gauge() < 75)) {
1093 /* We don't want to run out of shared memory */
1094 /* map entries by starting too many private versions */
1095 /* of the shared library structures */
1098 error2
= clone_system_shared_regions(!(p
->p_flag
& P_NOSHLIB
),
1099 TRUE
, /* chain_regions */
1102 goto lsf_bailout_free_vput
;
1104 local_flags
= local_flags
& ~NEW_LOCAL_SHARED_REGIONS
;
1105 vm_get_shared_region(current_task(), &shared_region
);
1106 shared_region_mapping_info(shared_region
,
1107 &(task_mapping_info
.text_region
),
1108 &(task_mapping_info
.text_size
),
1109 &(task_mapping_info
.data_region
),
1110 &(task_mapping_info
.data_size
),
1111 &(task_mapping_info
.region_mappings
),
1112 &(task_mapping_info
.client_base
),
1113 &(task_mapping_info
.alternate_base
),
1114 &(task_mapping_info
.alternate_next
),
1115 &(task_mapping_info
.fs_base
),
1116 &(task_mapping_info
.system
),
1117 &(task_mapping_info
.flags
), &next
);
1120 /* This is a work-around to allow executables which have been */
1121 /* built without knowledge of the proper shared segment to */
1122 /* load. This code has been architected as a shared region */
1123 /* handler, the knowledge of where the regions are loaded is */
1124 /* problematic for the extension of shared regions as it will */
1125 /* not be easy to know what region an item should go into. */
1126 /* The code below however will get around a short term problem */
1127 /* with executables which believe they are loading at zero. */
1130 if (((unsigned int)local_base
&
1131 (~(task_mapping_info
.text_size
- 1))) !=
1132 task_mapping_info
.client_base
) {
1133 if(local_flags
& ALTERNATE_LOAD_SITE
) {
1134 local_base
= (caddr_t
)(
1135 (unsigned int)local_base
&
1136 (task_mapping_info
.text_size
- 1));
1137 local_base
= (caddr_t
)((unsigned int)local_base
1138 | task_mapping_info
.client_base
);
1141 goto lsf_bailout_free_vput
;
1147 if((kr
= copyin_shared_file((vm_offset_t
)mapped_file_addr
,
1149 (vm_offset_t
*)&local_base
,
1150 map_cnt
, map_list
, file_control
,
1151 &task_mapping_info
, &local_flags
))) {
1156 case KERN_INVALID_ARGUMENT
:
1159 case KERN_INVALID_ADDRESS
:
1162 case KERN_PROTECTION_FAILURE
:
1163 /* save EAUTH for authentication in this */
1173 if((caller_flags
& ALTERNATE_LOAD_SITE
) && systemLogDiags
) {
1174 printf("load_shared_file: Failed to load shared file! error: 0x%x, Base_address: 0x%x, number of mappings: %d, file_control 0x%x\n", error
, local_base
, map_cnt
, file_control
);
1175 for(i
=0; i
<map_cnt
; i
++) {
1176 printf("load_shared_file: Mapping%d, mapping_offset: 0x%x, size: 0x%x, file_offset: 0x%x, protection: 0x%x\n"
1177 , i
, map_list
[i
].mapping_offset
,
1179 map_list
[i
].file_offset
,
1180 map_list
[i
].protection
);
1185 local_flags
|= SYSTEM_REGION_BACKED
;
1186 if(!(error
= copyout(&local_flags
, CAST_USER_ADDR_T(flags
), sizeof (int)))) {
1187 error
= copyout(&local_base
,
1188 CAST_USER_ADDR_T(base_address
), sizeof (caddr_t
));
1192 lsf_bailout_free_vput
:
1196 kmem_free(kernel_map
, (vm_offset_t
)filename_str
,
1197 (vm_size_t
)(MAXPATHLEN
));
1198 kmem_free(kernel_map
, (vm_offset_t
)map_list
,
1199 (vm_size_t
)(map_cnt
*sizeof(sf_mapping_t
)));
1206 reset_shared_file(__unused
struct proc
*p
, struct reset_shared_file_args
*uap
,
1207 __unused
register int *retval
)
1209 caddr_t
*base_address
=uap
->ba
;
1210 int map_cnt
=uap
->map_cnt
;
1211 sf_mapping_t
*mappings
=uap
->mappings
;
1214 sf_mapping_t
*map_list
;
1216 vm_offset_t map_address
;
1220 AUDIT_ARG(addr
, CAST_DOWN(user_addr_t
, base_address
));
1221 /* Retrieve the base address */
1222 if ( (error
= copyin(CAST_USER_ADDR_T(base_address
), &local_base
, sizeof (caddr_t
))) ) {
1226 if (((unsigned int)local_base
& GLOBAL_SHARED_SEGMENT_MASK
)
1227 != GLOBAL_SHARED_TEXT_SEGMENT
) {
1232 kret
= kmem_alloc(kernel_map
, (vm_offset_t
*)&map_list
,
1233 (vm_size_t
)(map_cnt
*sizeof(sf_mapping_t
)));
1234 if (kret
!= KERN_SUCCESS
) {
1240 copyin(CAST_USER_ADDR_T(mappings
), map_list
, (map_cnt
*sizeof(sf_mapping_t
)))) ) {
1242 kmem_free(kernel_map
, (vm_offset_t
)map_list
,
1243 (vm_size_t
)(map_cnt
*sizeof(sf_mapping_t
)));
1246 for (i
= 0; i
<map_cnt
; i
++) {
1247 if((map_list
[i
].mapping_offset
1248 & GLOBAL_SHARED_SEGMENT_MASK
) == 0x10000000) {
1249 map_address
= (vm_offset_t
)
1250 (local_base
+ map_list
[i
].mapping_offset
);
1251 vm_deallocate(current_map(),
1254 vm_map(current_map(), &map_address
,
1255 map_list
[i
].size
, 0,
1256 SHARED_LIB_ALIAS
| VM_FLAGS_FIXED
,
1257 shared_data_region_handle
,
1258 ((unsigned int)local_base
1259 & SHARED_DATA_REGION_MASK
) +
1260 (map_list
[i
].mapping_offset
1261 & SHARED_DATA_REGION_MASK
),
1263 VM_PROT_READ
, VM_INHERIT_SHARE
);
1267 kmem_free(kernel_map
, (vm_offset_t
)map_list
,
1268 (vm_size_t
)(map_cnt
*sizeof(sf_mapping_t
)));
1275 new_system_shared_regions(__unused
struct proc
*p
,
1276 __unused
struct new_system_shared_regions_args
*uap
,
1277 register int *retval
)
1284 /* clear all of our existing defaults */
1285 remove_all_shared_regions();
1294 clone_system_shared_regions(
1295 int shared_regions_active
,
1299 shared_region_mapping_t new_shared_region
;
1300 shared_region_mapping_t next
;
1301 shared_region_mapping_t old_shared_region
;
1302 struct shared_region_task_mappings old_info
;
1303 struct shared_region_task_mappings new_info
;
1305 vm_get_shared_region(current_task(), &old_shared_region
);
1306 old_info
.self
= (vm_offset_t
)old_shared_region
;
1307 shared_region_mapping_info(old_shared_region
,
1308 &(old_info
.text_region
),
1309 &(old_info
.text_size
),
1310 &(old_info
.data_region
),
1311 &(old_info
.data_size
),
1312 &(old_info
.region_mappings
),
1313 &(old_info
.client_base
),
1314 &(old_info
.alternate_base
),
1315 &(old_info
.alternate_next
),
1316 &(old_info
.fs_base
),
1318 &(old_info
.flags
), &next
);
1319 if ((shared_regions_active
) ||
1320 (base_vnode
== ENV_DEFAULT_ROOT
)) {
1321 if (shared_file_create_system_region(&new_shared_region
))
1325 lookup_default_shared_region(
1326 base_vnode
, old_info
.system
);
1327 if(new_shared_region
== NULL
) {
1328 shared_file_boot_time_init(
1329 base_vnode
, old_info
.system
);
1330 vm_get_shared_region(current_task(), &new_shared_region
);
1332 vm_set_shared_region(current_task(), new_shared_region
);
1334 if(old_shared_region
)
1335 shared_region_mapping_dealloc(old_shared_region
);
1337 new_info
.self
= (vm_offset_t
)new_shared_region
;
1338 shared_region_mapping_info(new_shared_region
,
1339 &(new_info
.text_region
),
1340 &(new_info
.text_size
),
1341 &(new_info
.data_region
),
1342 &(new_info
.data_size
),
1343 &(new_info
.region_mappings
),
1344 &(new_info
.client_base
),
1345 &(new_info
.alternate_base
),
1346 &(new_info
.alternate_next
),
1347 &(new_info
.fs_base
),
1349 &(new_info
.flags
), &next
);
1350 if(shared_regions_active
) {
1351 if(vm_region_clone(old_info
.text_region
, new_info
.text_region
)) {
1352 panic("clone_system_shared_regions: shared region mis-alignment 1");
1353 shared_region_mapping_dealloc(new_shared_region
);
1356 if (vm_region_clone(old_info
.data_region
, new_info
.data_region
)) {
1357 panic("clone_system_shared_regions: shared region mis-alignment 2");
1358 shared_region_mapping_dealloc(new_shared_region
);
1361 if (chain_regions
) {
1363 * We want a "shadowed" clone, a private superset of the old
1364 * shared region. The info about the old mappings is still
1367 shared_region_object_chain_attach(
1368 new_shared_region
, old_shared_region
);
1371 * We want a completely detached clone with no link to
1372 * the old shared region. We'll be removing some mappings
1373 * in our private, cloned, shared region, so the old mappings
1374 * will become irrelevant to us. Since we have a private
1375 * "shared region" now, it isn't going to be shared with
1376 * anyone else and we won't need to maintain mappings info.
1378 shared_region_object_chain_detached(new_shared_region
);
1381 if (vm_map_region_replace(current_map(), old_info
.text_region
,
1382 new_info
.text_region
, old_info
.client_base
,
1383 old_info
.client_base
+old_info
.text_size
)) {
1384 panic("clone_system_shared_regions: shared region mis-alignment 3");
1385 shared_region_mapping_dealloc(new_shared_region
);
1388 if(vm_map_region_replace(current_map(), old_info
.data_region
,
1389 new_info
.data_region
,
1390 old_info
.client_base
+ old_info
.text_size
,
1391 old_info
.client_base
1392 + old_info
.text_size
+ old_info
.data_size
)) {
1393 panic("clone_system_shared_regions: shared region mis-alignment 4");
1394 shared_region_mapping_dealloc(new_shared_region
);
1397 vm_set_shared_region(current_task(), new_shared_region
);
1399 /* consume the reference which wasn't accounted for in object */
1401 if (!shared_regions_active
|| !chain_regions
)
1402 shared_region_mapping_dealloc(old_shared_region
);
1408 /* header for the profile name file. The profiled app info is held */
1409 /* in the data file and pointed to by elements in the name file */
1411 struct profile_names_header
{
1412 unsigned int number_of_profiles
;
1413 unsigned int user_id
;
1414 unsigned int version
;
1415 off_t element_array
;
1416 unsigned int spare1
;
1417 unsigned int spare2
;
1418 unsigned int spare3
;
1421 struct profile_element
{
1424 unsigned int mod_date
;
1429 struct global_profile
{
1430 struct vnode
*names_vp
;
1431 struct vnode
*data_vp
;
1432 vm_offset_t buf_ptr
;
1438 struct global_profile_cache
{
1441 struct global_profile profiles
[3];
1444 /* forward declarations */
1445 int bsd_open_page_cache_files(unsigned int user
,
1446 struct global_profile
**profile
);
1447 void bsd_close_page_cache_files(struct global_profile
*profile
);
1448 int bsd_search_page_cache_data_base(
1450 struct profile_names_header
*database
,
1452 unsigned int mod_date
,
1455 unsigned int *profile_size
);
1457 struct global_profile_cache global_user_profile_cache
=
1458 {3, 0, {{NULL
, NULL
, 0, 0, 0, 0},
1459 {NULL
, NULL
, 0, 0, 0, 0},
1460 {NULL
, NULL
, 0, 0, 0, 0}} };
1462 /* BSD_OPEN_PAGE_CACHE_FILES: */
1463 /* Caller provides a user id. This id was used in */
1464 /* prepare_profile_database to create two unique absolute */
1465 /* file paths to the associated profile files. These files */
1466 /* are either opened or bsd_open_page_cache_files returns an */
1467 /* error. The header of the names file is then consulted. */
1468 /* The header and the vnodes for the names and data files are */
1472 bsd_open_page_cache_files(
1474 struct global_profile
**profile
)
1476 const char *cache_path
= "/var/vm/app_profile/";
1484 struct vnode
*names_vp
;
1485 struct vnode
*data_vp
;
1486 vm_offset_t names_buf
;
1487 vm_offset_t buf_ptr
;
1489 int profile_names_length
;
1490 int profile_data_length
;
1491 char *profile_data_string
;
1492 char *profile_names_string
;
1496 struct vfs_context context
;
1500 struct nameidata nd_names
;
1501 struct nameidata nd_data
;
1507 context
.vc_proc
= p
;
1508 context
.vc_ucred
= kauth_cred_get();
1511 for(i
= 0; i
<global_user_profile_cache
.max_ele
; i
++) {
1512 if((global_user_profile_cache
.profiles
[i
].user
== user
)
1513 && (global_user_profile_cache
.profiles
[i
].data_vp
1515 *profile
= &global_user_profile_cache
.profiles
[i
];
1516 /* already in cache, we're done */
1517 if ((*profile
)->busy
) {
1519 * drop funnel and wait
1521 (void)tsleep((void *)
1523 PRIBIO
, "app_profile", 0);
1526 (*profile
)->busy
= 1;
1527 (*profile
)->age
= global_user_profile_cache
.age
;
1530 * entries in cache are held with a valid
1531 * usecount... take an iocount which will
1532 * be dropped in "bsd_close_page_cache_files"
1533 * which is called after the read or writes to
1534 * these files are done
1536 if ( (vnode_getwithref((*profile
)->data_vp
)) ) {
1538 vnode_rele((*profile
)->data_vp
);
1539 vnode_rele((*profile
)->names_vp
);
1541 (*profile
)->data_vp
= NULL
;
1542 (*profile
)->busy
= 0;
1547 if ( (vnode_getwithref((*profile
)->names_vp
)) ) {
1549 vnode_put((*profile
)->data_vp
);
1550 vnode_rele((*profile
)->data_vp
);
1551 vnode_rele((*profile
)->names_vp
);
1553 (*profile
)->data_vp
= NULL
;
1554 (*profile
)->busy
= 0;
1559 global_user_profile_cache
.age
+=1;
1564 lru
= global_user_profile_cache
.age
;
1566 for(i
= 0; i
<global_user_profile_cache
.max_ele
; i
++) {
1567 /* Skip entry if it is in the process of being reused */
1568 if(global_user_profile_cache
.profiles
[i
].data_vp
==
1569 (struct vnode
*)0xFFFFFFFF)
1571 /* Otherwise grab the first empty entry */
1572 if(global_user_profile_cache
.profiles
[i
].data_vp
== NULL
) {
1573 *profile
= &global_user_profile_cache
.profiles
[i
];
1574 (*profile
)->age
= global_user_profile_cache
.age
;
1577 /* Otherwise grab the oldest entry */
1578 if(global_user_profile_cache
.profiles
[i
].age
< lru
) {
1579 lru
= global_user_profile_cache
.profiles
[i
].age
;
1580 *profile
= &global_user_profile_cache
.profiles
[i
];
1584 /* Did we set it? */
1585 if (*profile
== NULL
) {
1587 * No entries are available; this can only happen if all
1588 * of them are currently in the process of being reused;
1589 * if this happens, we sleep on the address of the first
1590 * element, and restart. This is less than ideal, but we
1591 * know it will work because we know that there will be a
1592 * wakeup on any entry currently in the process of being
1595 * XXX Reccomend a two handed clock and more than 3 total
1596 * XXX cache entries at some point in the future.
1599 * drop funnel and wait
1601 (void)tsleep((void *)
1602 &global_user_profile_cache
.profiles
[0],
1603 PRIBIO
, "app_profile", 0);
1608 * If it's currently busy, we've picked the one at the end of the
1609 * LRU list, but it's currently being actively used. We sleep on
1610 * its address and restart.
1612 if ((*profile
)->busy
) {
1614 * drop funnel and wait
1616 (void)tsleep((void *)
1618 PRIBIO
, "app_profile", 0);
1621 (*profile
)->busy
= 1;
1622 (*profile
)->user
= user
;
1625 * put dummy value in for now to get competing request to wait
1626 * above until we are finished
1628 * Save the data_vp before setting it, so we can set it before
1629 * we kmem_free() or vrele(). If we don't do this, then we
1630 * have a potential funnel race condition we have to deal with.
1632 data_vp
= (*profile
)->data_vp
;
1633 (*profile
)->data_vp
= (struct vnode
*)0xFFFFFFFF;
1636 * Age the cache here in all cases; this guarantees that we won't
1637 * be reusing only one entry over and over, once the system reaches
1640 global_user_profile_cache
.age
+=1;
1642 if(data_vp
!= NULL
) {
1643 kmem_free(kernel_map
,
1644 (*profile
)->buf_ptr
, 4 * PAGE_SIZE
);
1645 if ((*profile
)->names_vp
) {
1646 vnode_rele((*profile
)->names_vp
);
1647 (*profile
)->names_vp
= NULL
;
1649 vnode_rele(data_vp
);
1652 /* Try to open the appropriate users profile files */
1653 /* If neither file is present, try to create them */
1654 /* If one file is present and the other not, fail. */
1655 /* If the files do exist, check them for the app_file */
1656 /* requested and read it in if present */
1658 ret
= kmem_alloc(kernel_map
,
1659 (vm_offset_t
*)&profile_data_string
, PATH_MAX
);
1662 (*profile
)->data_vp
= NULL
;
1663 (*profile
)->busy
= 0;
1668 /* Split the buffer in half since we know the size of */
1669 /* our file path and our allocation is adequate for */
1670 /* both file path names */
1671 profile_names_string
= profile_data_string
+ (PATH_MAX
/2);
1674 strcpy(profile_data_string
, cache_path
);
1675 strcpy(profile_names_string
, cache_path
);
1676 profile_names_length
= profile_data_length
1677 = strlen(profile_data_string
);
1678 substring
= profile_data_string
+ profile_data_length
;
1679 sprintf(substring
, "%x_data", user
);
1680 substring
= profile_names_string
+ profile_names_length
;
1681 sprintf(substring
, "%x_names", user
);
1683 /* We now have the absolute file names */
1685 ret
= kmem_alloc(kernel_map
,
1686 (vm_offset_t
*)&names_buf
, 4 * PAGE_SIZE
);
1688 kmem_free(kernel_map
,
1689 (vm_offset_t
)profile_data_string
, PATH_MAX
);
1690 (*profile
)->data_vp
= NULL
;
1691 (*profile
)->busy
= 0;
1696 NDINIT(&nd_names
, LOOKUP
, FOLLOW
| LOCKLEAF
,
1697 UIO_SYSSPACE32
, CAST_USER_ADDR_T(profile_names_string
), &context
);
1698 NDINIT(&nd_data
, LOOKUP
, FOLLOW
| LOCKLEAF
,
1699 UIO_SYSSPACE32
, CAST_USER_ADDR_T(profile_data_string
), &context
);
1701 if ( (error
= vn_open(&nd_data
, FREAD
| FWRITE
, 0)) ) {
1703 printf("bsd_open_page_cache_files: CacheData file not found %s\n",
1704 profile_data_string
);
1706 kmem_free(kernel_map
,
1707 (vm_offset_t
)names_buf
, 4 * PAGE_SIZE
);
1708 kmem_free(kernel_map
,
1709 (vm_offset_t
)profile_data_string
, PATH_MAX
);
1710 (*profile
)->data_vp
= NULL
;
1711 (*profile
)->busy
= 0;
1715 data_vp
= nd_data
.ni_vp
;
1717 if ( (error
= vn_open(&nd_names
, FREAD
| FWRITE
, 0)) ) {
1718 printf("bsd_open_page_cache_files: NamesData file not found %s\n",
1719 profile_data_string
);
1720 kmem_free(kernel_map
,
1721 (vm_offset_t
)names_buf
, 4 * PAGE_SIZE
);
1722 kmem_free(kernel_map
,
1723 (vm_offset_t
)profile_data_string
, PATH_MAX
);
1725 vnode_rele(data_vp
);
1728 (*profile
)->data_vp
= NULL
;
1729 (*profile
)->busy
= 0;
1733 names_vp
= nd_names
.ni_vp
;
1735 if ((error
= vnode_size(names_vp
, &file_size
, &context
)) != 0) {
1736 printf("bsd_open_page_cache_files: Can't stat name file %s\n", profile_names_string
);
1737 kmem_free(kernel_map
,
1738 (vm_offset_t
)profile_data_string
, PATH_MAX
);
1739 kmem_free(kernel_map
,
1740 (vm_offset_t
)names_buf
, 4 * PAGE_SIZE
);
1742 vnode_rele(names_vp
);
1743 vnode_put(names_vp
);
1744 vnode_rele(data_vp
);
1747 (*profile
)->data_vp
= NULL
;
1748 (*profile
)->busy
= 0;
1754 if(size
> 4 * PAGE_SIZE
)
1755 size
= 4 * PAGE_SIZE
;
1756 buf_ptr
= names_buf
;
1760 error
= vn_rdwr(UIO_READ
, names_vp
, (caddr_t
)buf_ptr
,
1762 UIO_SYSSPACE32
, IO_NODELOCKED
, kauth_cred_get(), &resid
, p
);
1763 if((error
) || (size
== resid
)) {
1767 kmem_free(kernel_map
,
1768 (vm_offset_t
)profile_data_string
, PATH_MAX
);
1769 kmem_free(kernel_map
,
1770 (vm_offset_t
)names_buf
, 4 * PAGE_SIZE
);
1772 vnode_rele(names_vp
);
1773 vnode_put(names_vp
);
1774 vnode_rele(data_vp
);
1777 (*profile
)->data_vp
= NULL
;
1778 (*profile
)->busy
= 0;
1782 buf_ptr
+= size
-resid
;
1783 resid_off
+= size
-resid
;
1786 kmem_free(kernel_map
, (vm_offset_t
)profile_data_string
, PATH_MAX
);
1788 (*profile
)->names_vp
= names_vp
;
1789 (*profile
)->data_vp
= data_vp
;
1790 (*profile
)->buf_ptr
= names_buf
;
1793 * at this point, the both the names_vp and the data_vp have
1794 * both a valid usecount and an iocount held
1801 bsd_close_page_cache_files(
1802 struct global_profile
*profile
)
1804 vnode_put(profile
->data_vp
);
1805 vnode_put(profile
->names_vp
);
1812 bsd_read_page_cache_file(
1817 struct vnode
*app_vp
,
1818 vm_offset_t
*buffer
,
1819 vm_offset_t
*bufsize
)
1822 boolean_t funnel_state
;
1829 unsigned int profile_size
;
1831 vm_offset_t names_buf
;
1832 struct vnode_attr va
;
1833 struct vfs_context context
;
1837 struct vnode
*names_vp
;
1838 struct vnode
*data_vp
;
1840 struct global_profile
*uid_files
;
1842 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
1844 /* Try to open the appropriate users profile files */
1845 /* If neither file is present, try to create them */
1846 /* If one file is present and the other not, fail. */
1847 /* If the files do exist, check them for the app_file */
1848 /* requested and read it in if present */
1851 error
= bsd_open_page_cache_files(user
, &uid_files
);
1853 thread_funnel_set(kernel_flock
, funnel_state
);
1859 names_vp
= uid_files
->names_vp
;
1860 data_vp
= uid_files
->data_vp
;
1861 names_buf
= uid_files
->buf_ptr
;
1863 context
.vc_proc
= p
;
1864 context
.vc_ucred
= kauth_cred_get();
1867 VATTR_WANTED(&va
, va_fileid
);
1868 VATTR_WANTED(&va
, va_modify_time
);
1870 if ((error
= vnode_getattr(app_vp
, &va
, &context
))) {
1871 printf("bsd_read_cache_file: Can't stat app file %s\n", app_name
);
1872 bsd_close_page_cache_files(uid_files
);
1873 thread_funnel_set(kernel_flock
, funnel_state
);
1877 *fid
= (u_long
)va
.va_fileid
;
1878 *mod
= va
.va_modify_time
.tv_sec
;
1880 if (bsd_search_page_cache_data_base(
1882 (struct profile_names_header
*)names_buf
,
1884 (unsigned int) va
.va_modify_time
.tv_sec
,
1885 (u_long
)va
.va_fileid
, &profile
, &profile_size
) == 0) {
1886 /* profile is an offset in the profile data base */
1887 /* It is zero if no profile data was found */
1889 if(profile_size
== 0) {
1892 bsd_close_page_cache_files(uid_files
);
1893 thread_funnel_set(kernel_flock
, funnel_state
);
1896 ret
= (vm_offset_t
)(kmem_alloc(kernel_map
, buffer
, profile_size
));
1898 bsd_close_page_cache_files(uid_files
);
1899 thread_funnel_set(kernel_flock
, funnel_state
);
1902 *bufsize
= profile_size
;
1903 while(profile_size
) {
1904 error
= vn_rdwr(UIO_READ
, data_vp
,
1905 (caddr_t
) *buffer
, profile_size
,
1906 profile
, UIO_SYSSPACE32
, IO_NODELOCKED
,
1907 kauth_cred_get(), &resid
, p
);
1908 if((error
) || (profile_size
== resid
)) {
1909 bsd_close_page_cache_files(uid_files
);
1910 kmem_free(kernel_map
, (vm_offset_t
)*buffer
, profile_size
);
1911 thread_funnel_set(kernel_flock
, funnel_state
);
1914 profile
+= profile_size
- resid
;
1915 profile_size
= resid
;
1917 bsd_close_page_cache_files(uid_files
);
1918 thread_funnel_set(kernel_flock
, funnel_state
);
1921 bsd_close_page_cache_files(uid_files
);
1922 thread_funnel_set(kernel_flock
, funnel_state
);
1929 bsd_search_page_cache_data_base(
1931 struct profile_names_header
*database
,
1933 unsigned int mod_date
,
1936 unsigned int *profile_size
)
1942 struct profile_element
*element
;
1943 unsigned int ele_total
;
1944 unsigned int extended_list
= 0;
1949 vm_offset_t local_buf
= 0;
1956 if(((vm_offset_t
)database
->element_array
) !=
1957 sizeof(struct profile_names_header
)) {
1960 element
= (struct profile_element
*)(
1961 (vm_offset_t
)database
->element_array
+
1962 (vm_offset_t
)database
);
1964 ele_total
= database
->number_of_profiles
;
1969 /* note: code assumes header + n*ele comes out on a page boundary */
1970 if(((local_buf
== 0) && (sizeof(struct profile_names_header
) +
1971 (ele_total
* sizeof(struct profile_element
)))
1972 > (PAGE_SIZE
* 4)) ||
1973 ((local_buf
!= 0) &&
1974 (ele_total
* sizeof(struct profile_element
))
1975 > (PAGE_SIZE
* 4))) {
1976 extended_list
= ele_total
;
1977 if(element
== (struct profile_element
*)
1978 ((vm_offset_t
)database
->element_array
+
1979 (vm_offset_t
)database
)) {
1980 ele_total
= ((PAGE_SIZE
* 4)/sizeof(struct profile_element
)) - 1;
1982 ele_total
= (PAGE_SIZE
* 4)/sizeof(struct profile_element
);
1984 extended_list
-= ele_total
;
1986 for (i
=0; i
<ele_total
; i
++) {
1987 if((mod_date
== element
[i
].mod_date
)
1988 && (inode
== element
[i
].inode
)) {
1989 if(strncmp(element
[i
].name
, app_name
, 12) == 0) {
1990 *profile
= element
[i
].addr
;
1991 *profile_size
= element
[i
].size
;
1992 if(local_buf
!= 0) {
1993 kmem_free(kernel_map
, local_buf
, 4 * PAGE_SIZE
);
1999 if(extended_list
== 0)
2001 if(local_buf
== 0) {
2002 ret
= kmem_alloc(kernel_map
, &local_buf
, 4 * PAGE_SIZE
);
2003 if(ret
!= KERN_SUCCESS
) {
2007 element
= (struct profile_element
*)local_buf
;
2008 ele_total
= extended_list
;
2010 file_off
+= 4 * PAGE_SIZE
;
2011 if((ele_total
* sizeof(struct profile_element
)) >
2013 size
= PAGE_SIZE
* 4;
2015 size
= ele_total
* sizeof(struct profile_element
);
2019 error
= vn_rdwr(UIO_READ
, vp
,
2020 CAST_DOWN(caddr_t
, (local_buf
+ resid_off
)),
2021 size
, file_off
+ resid_off
, UIO_SYSSPACE32
,
2022 IO_NODELOCKED
, kauth_cred_get(), &resid
, p
);
2023 if((error
) || (size
== resid
)) {
2024 if(local_buf
!= 0) {
2025 kmem_free(kernel_map
, local_buf
, 4 * PAGE_SIZE
);
2029 resid_off
+= size
-resid
;
2033 if(local_buf
!= 0) {
2034 kmem_free(kernel_map
, local_buf
, 4 * PAGE_SIZE
);
2040 bsd_write_page_cache_file(
2052 boolean_t funnel_state
;
2054 struct vfs_context context
;
2056 unsigned int profile_size
;
2058 vm_offset_t names_buf
;
2059 struct vnode
*names_vp
;
2060 struct vnode
*data_vp
;
2061 struct profile_names_header
*profile_header
;
2063 struct global_profile
*uid_files
;
2066 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
2069 error
= bsd_open_page_cache_files(user
, &uid_files
);
2071 thread_funnel_set(kernel_flock
, funnel_state
);
2077 names_vp
= uid_files
->names_vp
;
2078 data_vp
= uid_files
->data_vp
;
2079 names_buf
= uid_files
->buf_ptr
;
2081 /* Stat data file for size */
2083 context
.vc_proc
= p
;
2084 context
.vc_ucred
= kauth_cred_get();
2086 if ((error
= vnode_size(data_vp
, &file_size
, &context
)) != 0) {
2087 printf("bsd_write_page_cache_file: Can't stat profile data %s\n", file_name
);
2088 bsd_close_page_cache_files(uid_files
);
2089 thread_funnel_set(kernel_flock
, funnel_state
);
2093 if (bsd_search_page_cache_data_base(names_vp
,
2094 (struct profile_names_header
*)names_buf
,
2095 file_name
, (unsigned int) mod
,
2096 fid
, &profile
, &profile_size
) == 0) {
2097 /* profile is an offset in the profile data base */
2098 /* It is zero if no profile data was found */
2100 if(profile_size
== 0) {
2101 unsigned int header_size
;
2102 vm_offset_t buf_ptr
;
2104 /* Our Write case */
2106 /* read header for last entry */
2108 (struct profile_names_header
*)names_buf
;
2109 name_offset
= sizeof(struct profile_names_header
) +
2110 (sizeof(struct profile_element
)
2111 * profile_header
->number_of_profiles
);
2112 profile_header
->number_of_profiles
+= 1;
2114 if(name_offset
< PAGE_SIZE
* 4) {
2115 struct profile_element
*name
;
2116 /* write new entry */
2117 name
= (struct profile_element
*)
2118 (names_buf
+ (vm_offset_t
)name_offset
);
2119 name
->addr
= file_size
;
2121 name
->mod_date
= mod
;
2123 strncpy (name
->name
, file_name
, 12);
2125 unsigned int ele_size
;
2126 struct profile_element name
;
2127 /* write new entry */
2128 name
.addr
= file_size
;
2130 name
.mod_date
= mod
;
2132 strncpy (name
.name
, file_name
, 12);
2133 /* write element out separately */
2134 ele_size
= sizeof(struct profile_element
);
2135 buf_ptr
= (vm_offset_t
)&name
;
2136 resid_off
= name_offset
;
2139 error
= vn_rdwr(UIO_WRITE
, names_vp
,
2141 ele_size
, resid_off
,
2142 UIO_SYSSPACE32
, IO_NODELOCKED
,
2143 kauth_cred_get(), &resid
, p
);
2145 printf("bsd_write_page_cache_file: Can't write name_element %x\n", user
);
2146 bsd_close_page_cache_files(
2153 buf_ptr
+= (vm_offset_t
)
2155 resid_off
+= ele_size
-resid
;
2160 if(name_offset
< PAGE_SIZE
* 4) {
2161 header_size
= name_offset
+
2162 sizeof(struct profile_element
);
2166 sizeof(struct profile_names_header
);
2168 buf_ptr
= (vm_offset_t
)profile_header
;
2171 /* write names file header */
2172 while(header_size
) {
2173 error
= vn_rdwr(UIO_WRITE
, names_vp
,
2175 header_size
, resid_off
,
2176 UIO_SYSSPACE32
, IO_NODELOCKED
,
2177 kauth_cred_get(), &resid
, p
);
2179 printf("bsd_write_page_cache_file: Can't write header %x\n", user
);
2180 bsd_close_page_cache_files(
2183 kernel_flock
, funnel_state
);
2186 buf_ptr
+= (vm_offset_t
)header_size
-resid
;
2187 resid_off
+= header_size
-resid
;
2188 header_size
= resid
;
2190 /* write profile to data file */
2191 resid_off
= file_size
;
2193 error
= vn_rdwr(UIO_WRITE
, data_vp
,
2194 (caddr_t
)buffer
, size
, resid_off
,
2195 UIO_SYSSPACE32
, IO_NODELOCKED
,
2196 kauth_cred_get(), &resid
, p
);
2198 printf("bsd_write_page_cache_file: Can't write header %x\n", user
);
2199 bsd_close_page_cache_files(
2202 kernel_flock
, funnel_state
);
2205 buffer
+= size
-resid
;
2206 resid_off
+= size
-resid
;
2209 bsd_close_page_cache_files(uid_files
);
2210 thread_funnel_set(kernel_flock
, funnel_state
);
2213 /* Someone else wrote a twin profile before us */
2214 bsd_close_page_cache_files(uid_files
);
2215 thread_funnel_set(kernel_flock
, funnel_state
);
2218 bsd_close_page_cache_files(uid_files
);
2219 thread_funnel_set(kernel_flock
, funnel_state
);
2226 prepare_profile_database(int user
)
2228 const char *cache_path
= "/var/vm/app_profile/";
2235 struct vnode
*names_vp
;
2236 struct vnode
*data_vp
;
2237 vm_offset_t names_buf
;
2238 vm_offset_t buf_ptr
;
2240 int profile_names_length
;
2241 int profile_data_length
;
2242 char *profile_data_string
;
2243 char *profile_names_string
;
2246 struct vnode_attr va
;
2247 struct vfs_context context
;
2249 struct profile_names_header
*profile_header
;
2252 struct nameidata nd_names
;
2253 struct nameidata nd_data
;
2257 context
.vc_proc
= p
;
2258 context
.vc_ucred
= kauth_cred_get();
2260 ret
= kmem_alloc(kernel_map
,
2261 (vm_offset_t
*)&profile_data_string
, PATH_MAX
);
2267 /* Split the buffer in half since we know the size of */
2268 /* our file path and our allocation is adequate for */
2269 /* both file path names */
2270 profile_names_string
= profile_data_string
+ (PATH_MAX
/2);
2273 strcpy(profile_data_string
, cache_path
);
2274 strcpy(profile_names_string
, cache_path
);
2275 profile_names_length
= profile_data_length
2276 = strlen(profile_data_string
);
2277 substring
= profile_data_string
+ profile_data_length
;
2278 sprintf(substring
, "%x_data", user
);
2279 substring
= profile_names_string
+ profile_names_length
;
2280 sprintf(substring
, "%x_names", user
);
2282 /* We now have the absolute file names */
2284 ret
= kmem_alloc(kernel_map
,
2285 (vm_offset_t
*)&names_buf
, 4 * PAGE_SIZE
);
2287 kmem_free(kernel_map
,
2288 (vm_offset_t
)profile_data_string
, PATH_MAX
);
2292 NDINIT(&nd_names
, LOOKUP
, FOLLOW
,
2293 UIO_SYSSPACE32
, CAST_USER_ADDR_T(profile_names_string
), &context
);
2294 NDINIT(&nd_data
, LOOKUP
, FOLLOW
,
2295 UIO_SYSSPACE32
, CAST_USER_ADDR_T(profile_data_string
), &context
);
2297 if ( (error
= vn_open(&nd_data
,
2298 O_CREAT
| O_EXCL
| FWRITE
, S_IRUSR
|S_IWUSR
)) ) {
2299 kmem_free(kernel_map
,
2300 (vm_offset_t
)names_buf
, 4 * PAGE_SIZE
);
2301 kmem_free(kernel_map
,
2302 (vm_offset_t
)profile_data_string
, PATH_MAX
);
2306 data_vp
= nd_data
.ni_vp
;
2308 if ( (error
= vn_open(&nd_names
,
2309 O_CREAT
| O_EXCL
| FWRITE
, S_IRUSR
|S_IWUSR
)) ) {
2310 printf("prepare_profile_database: Can't create CacheNames %s\n",
2311 profile_data_string
);
2312 kmem_free(kernel_map
,
2313 (vm_offset_t
)names_buf
, 4 * PAGE_SIZE
);
2314 kmem_free(kernel_map
,
2315 (vm_offset_t
)profile_data_string
, PATH_MAX
);
2317 vnode_rele(data_vp
);
2322 names_vp
= nd_names
.ni_vp
;
2324 /* Write Header for new names file */
2326 profile_header
= (struct profile_names_header
*)names_buf
;
2328 profile_header
->number_of_profiles
= 0;
2329 profile_header
->user_id
= user
;
2330 profile_header
->version
= 1;
2331 profile_header
->element_array
=
2332 sizeof(struct profile_names_header
);
2333 profile_header
->spare1
= 0;
2334 profile_header
->spare2
= 0;
2335 profile_header
->spare3
= 0;
2337 size
= sizeof(struct profile_names_header
);
2338 buf_ptr
= (vm_offset_t
)profile_header
;
2342 error
= vn_rdwr(UIO_WRITE
, names_vp
,
2343 (caddr_t
)buf_ptr
, size
, resid_off
,
2344 UIO_SYSSPACE32
, IO_NODELOCKED
,
2345 kauth_cred_get(), &resid
, p
);
2347 printf("prepare_profile_database: Can't write header %s\n", profile_names_string
);
2348 kmem_free(kernel_map
,
2349 (vm_offset_t
)names_buf
, 4 * PAGE_SIZE
);
2350 kmem_free(kernel_map
,
2351 (vm_offset_t
)profile_data_string
,
2354 vnode_rele(names_vp
);
2355 vnode_put(names_vp
);
2356 vnode_rele(data_vp
);
2361 buf_ptr
+= size
-resid
;
2362 resid_off
+= size
-resid
;
2366 VATTR_SET(&va
, va_uid
, user
);
2368 error
= vnode_setattr(names_vp
, &va
, &context
);
2370 printf("prepare_profile_database: "
2371 "Can't set user %s\n", profile_names_string
);
2373 vnode_rele(names_vp
);
2374 vnode_put(names_vp
);
2377 VATTR_SET(&va
, va_uid
, user
);
2378 error
= vnode_setattr(data_vp
, &va
, &context
);
2380 printf("prepare_profile_database: "
2381 "Can't set user %s\n", profile_data_string
);
2383 vnode_rele(data_vp
);
2386 kmem_free(kernel_map
,
2387 (vm_offset_t
)profile_data_string
, PATH_MAX
);
2388 kmem_free(kernel_map
,
2389 (vm_offset_t
)names_buf
, 4 * PAGE_SIZE
);