2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
23 * @APPLE_LICENSE_HEADER_END@
26 * Mach Operating System
27 * Copyright (c) 1987 Carnegie-Mellon University
28 * All rights reserved. The CMU software License Agreement specifies
29 * the terms and conditions for use and redistribution.
36 #include <meta_features.h>
38 #include <kern/task.h>
39 #include <kern/thread.h>
40 #include <kern/debug.h>
41 #include <kern/lock.h>
42 #include <mach/time_value.h>
43 #include <mach/vm_param.h>
44 #include <mach/vm_prot.h>
45 #include <mach/port.h>
47 #include <sys/param.h>
48 #include <sys/systm.h>
50 #include <sys/namei.h>
54 #include <sys/vnode.h>
56 #include <sys/mount.h>
57 #include <sys/trace.h>
58 #include <sys/kernel.h>
62 #include <kern/kalloc.h>
63 #include <vm/vm_map.h>
64 #include <vm/vm_kern.h>
66 #include <machine/spl.h>
68 #include <mach/shared_memory_server.h>
69 #include <vm/vm_shared_memory_server.h>
72 extern shared_region_mapping_t system_shared_region
;
73 extern zone_t lsf_zone
;
75 useracc(addr
, len
, prot
)
80 return (vm_map_check_protection(
82 trunc_page(addr
), round_page(addr
+len
),
83 prot
== B_READ
? VM_PROT_READ
: VM_PROT_WRITE
));
91 kret
= vm_map_wire(current_map(), trunc_page(addr
),
93 VM_PROT_READ
| VM_PROT_WRITE
,FALSE
);
98 case KERN_INVALID_ADDRESS
:
101 case KERN_PROTECTION_FAILURE
:
108 vsunlock(addr
, len
, dirtied
)
117 vm_offset_t vaddr
, paddr
;
122 pmap
= get_task_pmap(current_task());
123 for (vaddr
= trunc_page(addr
); vaddr
< round_page(addr
+len
);
124 vaddr
+= PAGE_SIZE
) {
125 paddr
= pmap_extract(pmap
, vaddr
);
126 pg
= PHYS_TO_VM_PAGE(paddr
);
127 vm_page_set_modified(pg
);
134 kret
= vm_map_unwire(current_map(), trunc_page(addr
),
135 round_page(addr
+len
), FALSE
);
139 case KERN_INVALID_ADDRESS
:
142 case KERN_PROTECTION_FAILURE
:
149 #if defined(sun) || BALANCE || defined(m88k)
150 #else /*defined(sun) || BALANCE || defined(m88k)*/
157 character
= (char)byte
;
158 return (copyout((void *)&(character
), addr
, sizeof(char)) == 0 ? 0 : -1);
167 character
= (char)byte
;
168 return (copyout((void *) &(character
), addr
, sizeof(char)) == 0 ? 0 : -1);
176 if (copyin(addr
, (void *) &byte
, sizeof(char)))
186 if (copyin(addr
, (void *) &(byte
), sizeof(char)))
195 return (copyout((void *) &word
, addr
, sizeof(int)) == 0 ? 0 : -1);
203 if (copyin(addr
, (void *) &word
, sizeof(int)))
208 /* suiword and fuiword are the same as suword and fuword, respectively */
214 return (copyout((void *) &word
, addr
, sizeof(int)) == 0 ? 0 : -1);
222 if (copyin(addr
, (void *) &word
, sizeof(int)))
226 #endif /* defined(sun) || BALANCE || defined(m88k) || defined(i386) */
242 extern task_t
port_name_to_task(mach_port_t t
);
244 kern_return_t err
= KERN_SUCCESS
;
245 boolean_t funnel_state
;
247 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
248 t1
= port_name_to_task(t
);
250 if (t1
== TASK_NULL
) {
254 p
= get_bsdtask_info(t1
);
264 (void) copyout((char *) &pid
, (char *) x
, sizeof(*x
));
265 thread_funnel_set(kernel_flock
, funnel_state
);
270 * Routine: task_for_pid
272 * Get the task port for another "process", named by its
273 * process ID on the same host as "target_task".
275 * Only permitted to privileged processes, or processes
276 * with the same user ID.
279 task_for_pid(target_tport
, pid
, t
)
280 mach_port_t target_tport
;
288 extern task_t
port_name_to_task(mach_port_t tp
);
291 boolean_t funnel_state
;
293 t1
= port_name_to_task(target_tport
);
294 if (t1
== TASK_NULL
) {
295 (void ) copyout((char *)&t1
, (char *)t
, sizeof(mach_port_t
));
296 return(KERN_FAILURE
);
299 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
302 p1
= get_bsdtask_info(t1
);
304 ((p
= pfind(pid
)) != (struct proc
*) 0)
305 && (p1
!= (struct proc
*) 0)
306 && (((p
->p_ucred
->cr_uid
== p1
->p_ucred
->cr_uid
) &&
307 ((p
->p_cred
->p_ruid
== p1
->p_cred
->p_ruid
)))
308 || !(suser(p1
->p_ucred
, &p1
->p_acflag
)))
309 && (p
->p_stat
!= SZOMB
)
311 if (p
->task
!= TASK_NULL
) {
312 if (!task_reference_try(p
->task
)) {
313 mutex_pause(); /* temp loss of funnel */
316 sright
= (void *)convert_task_to_port(p
->task
);
318 ipc_port_copyout_send(sright
,
319 get_task_ipcspace(current_task()));
321 tret
= MACH_PORT_NULL
;
322 (void ) copyout((char *)&tret
, (char *) t
, sizeof(mach_port_t
));
324 error
= KERN_SUCCESS
;
328 tret
= MACH_PORT_NULL
;
329 (void) copyout((char *) &tret
, (char *) t
, sizeof(mach_port_t
));
330 error
= KERN_FAILURE
;
332 thread_funnel_set(kernel_flock
, funnel_state
);
337 struct load_shared_file_args
{
343 sf_mapping_t
*mappings
;
352 struct load_shared_file_args
*uap
,
355 caddr_t mapped_file_addr
=uap
->mfa
;
356 u_long mapped_file_size
=uap
->mfs
;
357 caddr_t
*base_address
=uap
->ba
;
358 int map_cnt
=uap
->map_cnt
;
359 sf_mapping_t
*mappings
=uap
->mappings
;
360 char *filename
=uap
->filename
;
361 int *flags
=uap
->flags
;
362 struct vnode
*vp
= 0;
363 struct nameidata nd
, *ndp
;
369 memory_object_control_t file_control
;
370 sf_mapping_t
*map_list
;
375 int default_regions
= 0;
379 shared_region_mapping_t shared_region
;
380 struct shared_region_task_mappings task_mapping_info
;
381 shared_region_mapping_t next
;
386 /* Retrieve the base address */
387 if (error
= copyin(base_address
, &local_base
, sizeof (caddr_t
))) {
390 if (error
= copyin(flags
, &local_flags
, sizeof (int))) {
394 if(local_flags
& QUERY_IS_SYSTEM_REGION
) {
395 vm_get_shared_region(current_task(), &shared_region
);
396 if (shared_region
== system_shared_region
) {
397 local_flags
= SYSTEM_REGION_BACKED
;
402 error
= copyout(&local_flags
, flags
, sizeof (int));
405 caller_flags
= local_flags
;
406 kret
= kmem_alloc(kernel_map
, (vm_offset_t
*)&filename_str
,
407 (vm_size_t
)(MAXPATHLEN
));
408 if (kret
!= KERN_SUCCESS
) {
412 kret
= kmem_alloc(kernel_map
, (vm_offset_t
*)&map_list
,
413 (vm_size_t
)(map_cnt
*sizeof(sf_mapping_t
)));
414 if (kret
!= KERN_SUCCESS
) {
415 kmem_free(kernel_map
, (vm_offset_t
)filename_str
,
416 (vm_size_t
)(MAXPATHLEN
));
422 copyin(mappings
, map_list
, (map_cnt
*sizeof(sf_mapping_t
)))) {
423 goto lsf_bailout_free
;
426 if (error
= copyinstr(filename
,
427 filename_str
, MAXPATHLEN
, (size_t *)&dummy
)) {
428 goto lsf_bailout_free
;
432 * Get a vnode for the target file
434 NDINIT(ndp
, LOOKUP
, FOLLOW
| LOCKLEAF
, UIO_SYSSPACE
,
437 if ((error
= namei(ndp
))) {
438 goto lsf_bailout_free
;
443 if (vp
->v_type
!= VREG
) {
445 goto lsf_bailout_free_vput
;
448 UBCINFOCHECK("load_shared_file", vp
);
450 if (error
= VOP_GETATTR(vp
, &vattr
, p
->p_ucred
, p
)) {
451 goto lsf_bailout_free_vput
;
455 file_control
= ubc_getobject(vp
, UBC_HOLDOBJECT
);
456 if (file_control
== MEMORY_OBJECT_CONTROL_NULL
) {
458 goto lsf_bailout_free_vput
;
461 vm_get_shared_region(current_task(), &shared_region
);
462 if(shared_region
== system_shared_region
) {
465 if(((vp
->v_mount
!= rootvnode
->v_mount
)
466 && (shared_region
== system_shared_region
))
467 && (lsf_mapping_pool_gauge() < 75)) {
468 /* We don't want to run out of shared memory */
469 /* map entries by starting too many private versions */
470 /* of the shared library structures */
472 if(p
->p_flag
& P_NOSHLIB
) {
473 error
= clone_system_shared_regions(FALSE
);
475 error
= clone_system_shared_regions(TRUE
);
478 goto lsf_bailout_free_vput
;
480 local_flags
= local_flags
& ~NEW_LOCAL_SHARED_REGIONS
;
481 vm_get_shared_region(current_task(), &shared_region
);
484 if(vattr
.va_size
!= mapped_file_size
) {
486 goto lsf_bailout_free_vput
;
489 if(p
->p_flag
& P_NOSHLIB
) {
490 p
->p_flag
= p
->p_flag
& ~P_NOSHLIB
;
493 /* load alternate regions if the caller has requested. */
494 /* Note: the new regions are "clean slates" */
495 if (local_flags
& NEW_LOCAL_SHARED_REGIONS
) {
496 error
= clone_system_shared_regions(FALSE
);
498 goto lsf_bailout_free_vput
;
500 vm_get_shared_region(current_task(), &shared_region
);
503 task_mapping_info
.self
= (vm_offset_t
)shared_region
;
505 shared_region_mapping_info(shared_region
,
506 &(task_mapping_info
.text_region
),
507 &(task_mapping_info
.text_size
),
508 &(task_mapping_info
.data_region
),
509 &(task_mapping_info
.data_size
),
510 &(task_mapping_info
.region_mappings
),
511 &(task_mapping_info
.client_base
),
512 &(task_mapping_info
.alternate_base
),
513 &(task_mapping_info
.alternate_next
),
514 &(task_mapping_info
.flags
), &next
);
516 /* This is a work-around to allow executables which have been */
517 /* built without knowledge of the proper shared segment to */
518 /* load. This code has been architected as a shared region */
519 /* handler, the knowledge of where the regions are loaded is */
520 /* problematic for the extension of shared regions as it will */
521 /* not be easy to know what region an item should go into. */
522 /* The code below however will get around a short term problem */
523 /* with executables which believe they are loading at zero. */
526 if (((unsigned int)local_base
&
527 (~(task_mapping_info
.text_size
- 1))) !=
528 task_mapping_info
.client_base
) {
529 if(local_flags
& ALTERNATE_LOAD_SITE
) {
530 local_base
= (caddr_t
)(
531 (unsigned int)local_base
&
532 (task_mapping_info
.text_size
- 1));
533 local_base
= (caddr_t
)((unsigned int)local_base
534 | task_mapping_info
.client_base
);
537 goto lsf_bailout_free_vput
;
543 if((kr
= copyin_shared_file((vm_offset_t
)mapped_file_addr
,
545 (vm_offset_t
*)&local_base
,
546 map_cnt
, map_list
, file_control
,
547 &task_mapping_info
, &local_flags
))) {
552 case KERN_INVALID_ARGUMENT
:
555 case KERN_INVALID_ADDRESS
:
558 case KERN_PROTECTION_FAILURE
:
559 /* save EAUTH for authentication in this */
569 if((caller_flags
& ALTERNATE_LOAD_SITE
) && systemLogDiags
) {
570 printf("load_shared_file: Failed to load shared file! error: 0x%x, Base_address: 0x%x, number of mappings: %d, file_control 0x%x\n", error
, local_base
, map_cnt
, file_control
);
571 for(i
=0; i
<map_cnt
; i
++) {
572 printf("load_shared_file: Mapping%d, mapping_offset: 0x%x, size: 0x%x, file_offset: 0x%x, protection: 0x%x\n"
573 , i
, map_list
[i
].mapping_offset
,
575 map_list
[i
].file_offset
,
576 map_list
[i
].protection
);
581 local_flags
|= SYSTEM_REGION_BACKED
;
582 if(!(error
= copyout(&local_flags
, flags
, sizeof (int)))) {
583 error
= copyout(&local_base
,
584 base_address
, sizeof (caddr_t
));
588 lsf_bailout_free_vput
:
592 kmem_free(kernel_map
, (vm_offset_t
)filename_str
,
593 (vm_size_t
)(MAXPATHLEN
));
594 kmem_free(kernel_map
, (vm_offset_t
)map_list
,
595 (vm_size_t
)(map_cnt
*sizeof(sf_mapping_t
)));
601 struct reset_shared_file_args
{
604 sf_mapping_t
*mappings
;
610 struct reset_shared_file_args
*uap
,
613 caddr_t
*base_address
=uap
->ba
;
614 int map_cnt
=uap
->map_cnt
;
615 sf_mapping_t
*mappings
=uap
->mappings
;
619 sf_mapping_t
*map_list
;
621 vm_offset_t map_address
;
625 /* Retrieve the base address */
626 if (error
= copyin(base_address
, &local_base
, sizeof (caddr_t
))) {
630 if (((unsigned int)local_base
& GLOBAL_SHARED_SEGMENT_MASK
)
631 != GLOBAL_SHARED_TEXT_SEGMENT
) {
636 kret
= kmem_alloc(kernel_map
, (vm_offset_t
*)&map_list
,
637 (vm_size_t
)(map_cnt
*sizeof(sf_mapping_t
)));
638 if (kret
!= KERN_SUCCESS
) {
644 copyin(mappings
, map_list
, (map_cnt
*sizeof(sf_mapping_t
)))) {
646 kmem_free(kernel_map
, (vm_offset_t
)map_list
,
647 (vm_size_t
)(map_cnt
*sizeof(sf_mapping_t
)));
650 for (i
= 0; i
<map_cnt
; i
++) {
651 if((map_list
[i
].mapping_offset
652 & GLOBAL_SHARED_SEGMENT_MASK
) == 0x10000000) {
653 map_address
= (vm_offset_t
)
654 (local_base
+ map_list
[i
].mapping_offset
);
655 vm_deallocate(current_map(),
658 vm_map(current_map(), &map_address
,
659 map_list
[i
].size
, 0, SHARED_LIB_ALIAS
,
660 shared_data_region_handle
,
661 ((unsigned int)local_base
662 & SHARED_DATA_REGION_MASK
) +
663 (map_list
[i
].mapping_offset
664 & SHARED_DATA_REGION_MASK
),
666 VM_PROT_READ
, VM_INHERIT_SHARE
);
670 kmem_free(kernel_map
, (vm_offset_t
)map_list
,
671 (vm_size_t
)(map_cnt
*sizeof(sf_mapping_t
)));
677 struct new_system_shared_regions_args
{
682 new_system_shared_regions(
684 struct new_system_shared_regions_args
*uap
,
687 shared_region_mapping_t regions
;
688 shared_region_mapping_t new_regions
;
695 /* get current shared region info for */
696 /* restoration after new system shared */
697 /* regions are in place */
698 vm_get_shared_region(current_task(), ®ions
);
700 /* usually only called at boot time */
701 /* shared_file_boot_time_init creates */
702 /* a new set of system shared regions */
703 /* and places them as the system */
704 /* shared regions. */
705 shared_file_boot_time_init();
707 /* set current task back to its */
708 /* original regions. */
709 vm_get_shared_region(current_task(), &new_regions
);
710 shared_region_mapping_dealloc(new_regions
);
712 vm_set_shared_region(current_task(), regions
);
721 clone_system_shared_regions(shared_regions_active
)
723 shared_region_mapping_t new_shared_region
;
724 shared_region_mapping_t next
;
725 shared_region_mapping_t old_shared_region
;
726 struct shared_region_task_mappings old_info
;
727 struct shared_region_task_mappings new_info
;
731 if (shared_file_create_system_region(&new_shared_region
))
733 vm_get_shared_region(current_task(), &old_shared_region
);
734 old_info
.self
= (vm_offset_t
)old_shared_region
;
735 shared_region_mapping_info(old_shared_region
,
736 &(old_info
.text_region
),
737 &(old_info
.text_size
),
738 &(old_info
.data_region
),
739 &(old_info
.data_size
),
740 &(old_info
.region_mappings
),
741 &(old_info
.client_base
),
742 &(old_info
.alternate_base
),
743 &(old_info
.alternate_next
),
744 &(old_info
.flags
), &next
);
745 new_info
.self
= (vm_offset_t
)new_shared_region
;
746 shared_region_mapping_info(new_shared_region
,
747 &(new_info
.text_region
),
748 &(new_info
.text_size
),
749 &(new_info
.data_region
),
750 &(new_info
.data_size
),
751 &(new_info
.region_mappings
),
752 &(new_info
.client_base
),
753 &(new_info
.alternate_base
),
754 &(new_info
.alternate_next
),
755 &(new_info
.flags
), &next
);
756 if(shared_regions_active
) {
757 if(vm_region_clone(old_info
.text_region
, new_info
.text_region
)) {
758 panic("clone_system_shared_regions: shared region mis-alignment 1");
759 shared_region_mapping_dealloc(new_shared_region
);
762 if (vm_region_clone(old_info
.data_region
, new_info
.data_region
)) {
763 panic("clone_system_shared_regions: shared region mis-alignment 2");
764 shared_region_mapping_dealloc(new_shared_region
);
767 shared_region_object_chain_attach(
768 new_shared_region
, old_shared_region
);
770 if (vm_map_region_replace(current_map(), old_info
.text_region
,
771 new_info
.text_region
, old_info
.client_base
,
772 old_info
.client_base
+old_info
.text_size
)) {
773 panic("clone_system_shared_regions: shared region mis-alignment 3");
774 shared_region_mapping_dealloc(new_shared_region
);
777 if(vm_map_region_replace(current_map(), old_info
.data_region
,
778 new_info
.data_region
,
779 old_info
.client_base
+ old_info
.text_size
,
781 + old_info
.text_size
+ old_info
.data_size
)) {
782 panic("clone_system_shared_regions: shared region mis-alignment 4");
783 shared_region_mapping_dealloc(new_shared_region
);
786 vm_set_shared_region(current_task(), new_shared_region
);
788 /* consume the reference which wasn't accounted for in object */
790 if(!shared_regions_active
)
791 shared_region_mapping_dealloc(old_shared_region
);
797 extern vm_map_t bsd_pageable_map
;
799 /* header for the profile name file. The profiled app info is held */
800 /* in the data file and pointed to by elements in the name file */
802 struct profile_names_header
{
803 unsigned int number_of_profiles
;
804 unsigned int user_id
;
805 unsigned int version
;
812 struct profile_element
{
815 unsigned int mod_date
;
820 struct global_profile
{
821 struct vnode
*names_vp
;
822 struct vnode
*data_vp
;
829 struct global_profile_cache
{
832 struct global_profile profiles
[3];
835 struct global_profile_cache global_user_profile_cache
=
836 {3, 0, NULL
, NULL
, NULL
, 0, 0, 0,
837 NULL
, NULL
, NULL
, 0, 0, 0,
838 NULL
, NULL
, NULL
, 0, 0, 0 };
840 /* BSD_OPEN_PAGE_CACHE_FILES: */
841 /* Caller provides a user id. This id was used in */
842 /* prepare_profile_database to create two unique absolute */
843 /* file paths to the associated profile files. These files */
844 /* are either opened or bsd_open_page_cache_files returns an */
845 /* error. The header of the names file is then consulted. */
846 /* The header and the vnodes for the names and data files are */
850 bsd_open_page_cache_files(
852 struct global_profile
**profile
)
854 char *cache_path
= "/var/vm/app_profile/";
862 struct vnode
*names_vp
;
863 struct vnode
*data_vp
;
864 vm_offset_t names_buf
;
867 int profile_names_length
;
868 int profile_data_length
;
869 char *profile_data_string
;
870 char *profile_names_string
;
875 struct profile_names_header
*profile_header
;
878 struct nameidata nd_names
;
879 struct nameidata nd_data
;
887 for(i
= 0; i
<global_user_profile_cache
.max_ele
; i
++) {
888 if((global_user_profile_cache
.profiles
[i
].user
== user
)
889 && (global_user_profile_cache
.profiles
[i
].data_vp
891 *profile
= &global_user_profile_cache
.profiles
[i
];
892 /* already in cache, we're done */
893 if ((*profile
)->busy
) {
895 * drop funnel and wait
897 (void)tsleep((void *)
899 PRIBIO
, "app_profile", 0);
902 (*profile
)->busy
= 1;
903 (*profile
)->age
= global_user_profile_cache
.age
;
904 global_user_profile_cache
.age
+=1;
909 lru
= global_user_profile_cache
.age
;
910 for(i
= 0; i
<global_user_profile_cache
.max_ele
; i
++) {
911 if(global_user_profile_cache
.profiles
[i
].data_vp
== NULL
) {
912 *profile
= &global_user_profile_cache
.profiles
[i
];
913 (*profile
)->age
= global_user_profile_cache
.age
;
914 global_user_profile_cache
.age
+=1;
917 if(global_user_profile_cache
.profiles
[i
].age
< lru
) {
918 lru
= global_user_profile_cache
.profiles
[i
].age
;
919 *profile
= &global_user_profile_cache
.profiles
[i
];
923 if ((*profile
)->busy
) {
925 * drop funnel and wait
927 (void)tsleep((void *)
928 &(global_user_profile_cache
),
929 PRIBIO
, "app_profile", 0);
932 (*profile
)->busy
= 1;
933 (*profile
)->user
= user
;
935 if((*profile
)->data_vp
!= NULL
) {
936 kmem_free(kernel_map
,
937 (*profile
)->buf_ptr
, 4 * PAGE_SIZE
);
938 if ((*profile
)->names_vp
) {
939 vrele((*profile
)->names_vp
);
940 (*profile
)->names_vp
= NULL
;
942 if ((*profile
)->data_vp
) {
943 vrele((*profile
)->data_vp
);
944 (*profile
)->data_vp
= NULL
;
948 /* put dummy value in for now to get */
949 /* competing request to wait above */
950 /* until we are finished */
951 (*profile
)->data_vp
= (struct vnode
*)0xFFFFFFFF;
953 /* Try to open the appropriate users profile files */
954 /* If neither file is present, try to create them */
955 /* If one file is present and the other not, fail. */
956 /* If the files do exist, check them for the app_file */
957 /* requested and read it in if present */
960 ret
= kmem_alloc(kernel_map
,
961 (vm_offset_t
*)&profile_data_string
, PATH_MAX
);
964 (*profile
)->data_vp
= NULL
;
965 (*profile
)->busy
= 0;
970 /* Split the buffer in half since we know the size of */
971 /* our file path and our allocation is adequate for */
972 /* both file path names */
973 profile_names_string
= profile_data_string
+ (PATH_MAX
/2);
976 strcpy(profile_data_string
, cache_path
);
977 strcpy(profile_names_string
, cache_path
);
978 profile_names_length
= profile_data_length
979 = strlen(profile_data_string
);
980 substring
= profile_data_string
+ profile_data_length
;
981 sprintf(substring
, "%x_data", user
);
982 substring
= profile_names_string
+ profile_names_length
;
983 sprintf(substring
, "%x_names", user
);
985 /* We now have the absolute file names */
987 ret
= kmem_alloc(kernel_map
,
988 (vm_offset_t
*)&names_buf
, 4 * PAGE_SIZE
);
990 kmem_free(kernel_map
,
991 (vm_offset_t
)profile_data_string
, PATH_MAX
);
992 (*profile
)->data_vp
= NULL
;
993 (*profile
)->busy
= 0;
998 NDINIT(&nd_names
, LOOKUP
, FOLLOW
| LOCKLEAF
,
999 UIO_SYSSPACE
, profile_names_string
, p
);
1000 NDINIT(&nd_data
, LOOKUP
, FOLLOW
| LOCKLEAF
,
1001 UIO_SYSSPACE
, profile_data_string
, p
);
1002 if (error
= vn_open(&nd_data
, FREAD
| FWRITE
, 0)) {
1004 printf("bsd_open_page_cache_files: CacheData file not found %s\n",
1005 profile_data_string
);
1007 kmem_free(kernel_map
,
1008 (vm_offset_t
)names_buf
, 4 * PAGE_SIZE
);
1009 kmem_free(kernel_map
,
1010 (vm_offset_t
)profile_data_string
, PATH_MAX
);
1011 (*profile
)->data_vp
= NULL
;
1012 (*profile
)->busy
= 0;
1017 data_vp
= nd_data
.ni_vp
;
1018 VOP_UNLOCK(data_vp
, 0, p
);
1020 if (error
= vn_open(&nd_names
, FREAD
| FWRITE
, 0)) {
1021 printf("bsd_open_page_cache_files: NamesData file not found %s\n",
1022 profile_data_string
);
1023 kmem_free(kernel_map
,
1024 (vm_offset_t
)names_buf
, 4 * PAGE_SIZE
);
1025 kmem_free(kernel_map
,
1026 (vm_offset_t
)profile_data_string
, PATH_MAX
);
1028 (*profile
)->data_vp
= NULL
;
1029 (*profile
)->busy
= 0;
1033 names_vp
= nd_names
.ni_vp
;
1035 if(error
= VOP_GETATTR(names_vp
, &vattr
, p
->p_ucred
, p
)) {
1036 printf("bsd_open_page_cache_files: Can't stat name file %s\n", profile_names_string
);
1037 kmem_free(kernel_map
,
1038 (vm_offset_t
)profile_data_string
, PATH_MAX
);
1039 kmem_free(kernel_map
,
1040 (vm_offset_t
)names_buf
, 4 * PAGE_SIZE
);
1043 (*profile
)->data_vp
= NULL
;
1044 (*profile
)->busy
= 0;
1049 size
= vattr
.va_size
;
1050 if(size
> 4 * PAGE_SIZE
)
1051 size
= 4 * PAGE_SIZE
;
1052 buf_ptr
= names_buf
;
1056 error
= vn_rdwr(UIO_READ
, names_vp
, (caddr_t
)buf_ptr
,
1058 UIO_SYSSPACE
, IO_NODELOCKED
, p
->p_ucred
, &resid
, p
);
1059 if((error
) || (size
== resid
)) {
1063 kmem_free(kernel_map
,
1064 (vm_offset_t
)profile_data_string
, PATH_MAX
);
1065 kmem_free(kernel_map
,
1066 (vm_offset_t
)names_buf
, 4 * PAGE_SIZE
);
1069 (*profile
)->data_vp
= NULL
;
1070 (*profile
)->busy
= 0;
1074 buf_ptr
+= size
-resid
;
1075 resid_off
+= size
-resid
;
1079 VOP_UNLOCK(names_vp
, 0, p
);
1080 kmem_free(kernel_map
, (vm_offset_t
)profile_data_string
, PATH_MAX
);
1081 (*profile
)->names_vp
= names_vp
;
1082 (*profile
)->data_vp
= data_vp
;
1083 (*profile
)->buf_ptr
= names_buf
;
1089 bsd_close_page_cache_files(
1090 struct global_profile
*profile
)
1097 bsd_read_page_cache_file(
1102 struct vnode
*app_vp
,
1103 vm_offset_t
*buffer
,
1104 vm_offset_t
*buf_size
)
1107 boolean_t funnel_state
;
1115 unsigned int profile_size
;
1117 vm_offset_t names_buf
;
1122 struct vnode
*names_vp
;
1123 struct vnode
*data_vp
;
1127 struct global_profile
*uid_files
;
1129 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
1131 /* Try to open the appropriate users profile files */
1132 /* If neither file is present, try to create them */
1133 /* If one file is present and the other not, fail. */
1134 /* If the files do exist, check them for the app_file */
1135 /* requested and read it in if present */
1138 error
= bsd_open_page_cache_files(user
, &uid_files
);
1140 thread_funnel_set(kernel_flock
, funnel_state
);
1146 names_vp
= uid_files
->names_vp
;
1147 data_vp
= uid_files
->data_vp
;
1148 names_buf
= uid_files
->buf_ptr
;
1152 * Get locks on both files, get the vnode with the lowest address first
1155 if((unsigned int)names_vp
< (unsigned int)data_vp
) {
1162 error
= vn_lock(vp1
, LK_EXCLUSIVE
| LK_RETRY
, p
);
1164 printf("bsd_read_page_cache_file: Can't lock profile names %x\n", user
);
1165 bsd_close_page_cache_files(uid_files
);
1166 thread_funnel_set(kernel_flock
, funnel_state
);
1169 error
= vn_lock(vp2
, LK_EXCLUSIVE
| LK_RETRY
, p
);
1171 printf("bsd_read_page_cache_file: Can't lock profile data %x\n", user
);
1172 VOP_UNLOCK(vp1
, 0, p
);
1173 bsd_close_page_cache_files(uid_files
);
1174 thread_funnel_set(kernel_flock
, funnel_state
);
1178 if(error
= VOP_GETATTR(app_vp
, &vattr
, p
->p_ucred
, p
)) {
1179 VOP_UNLOCK(names_vp
, 0, p
);
1180 VOP_UNLOCK(data_vp
, 0, p
);
1181 printf("bsd_read_cache_file: Can't stat app file %s\n", app_name
);
1182 bsd_close_page_cache_files(uid_files
);
1183 thread_funnel_set(kernel_flock
, funnel_state
);
1187 *fid
= vattr
.va_fileid
;
1188 *mod
= vattr
.va_mtime
.tv_sec
;
1191 if (bsd_search_page_cache_data_base(names_vp
, names_buf
, app_name
,
1192 (unsigned int) vattr
.va_mtime
.tv_sec
,
1193 vattr
.va_fileid
, &profile
, &profile_size
) == 0) {
1194 /* profile is an offset in the profile data base */
1195 /* It is zero if no profile data was found */
1197 if(profile_size
== 0) {
1200 VOP_UNLOCK(names_vp
, 0, p
);
1201 VOP_UNLOCK(data_vp
, 0, p
);
1202 bsd_close_page_cache_files(uid_files
);
1203 thread_funnel_set(kernel_flock
, funnel_state
);
1206 ret
= (vm_offset_t
)(kmem_alloc(kernel_map
, buffer
, profile_size
));
1208 VOP_UNLOCK(names_vp
, 0, p
);
1209 VOP_UNLOCK(data_vp
, 0, p
);
1210 bsd_close_page_cache_files(uid_files
);
1211 thread_funnel_set(kernel_flock
, funnel_state
);
1214 *buf_size
= profile_size
;
1215 while(profile_size
) {
1216 error
= vn_rdwr(UIO_READ
, data_vp
,
1217 (caddr_t
) *buffer
, profile_size
,
1218 profile
, UIO_SYSSPACE
, IO_NODELOCKED
,
1219 p
->p_ucred
, &resid
, p
);
1220 if((error
) || (profile_size
== resid
)) {
1221 VOP_UNLOCK(names_vp
, 0, p
);
1222 VOP_UNLOCK(data_vp
, 0, p
);
1223 bsd_close_page_cache_files(uid_files
);
1224 kmem_free(kernel_map
, (vm_offset_t
)*buffer
, profile_size
);
1225 thread_funnel_set(kernel_flock
, funnel_state
);
1228 profile
+= profile_size
- resid
;
1229 profile_size
= resid
;
1231 VOP_UNLOCK(names_vp
, 0, p
);
1232 VOP_UNLOCK(data_vp
, 0, p
);
1233 bsd_close_page_cache_files(uid_files
);
1234 thread_funnel_set(kernel_flock
, funnel_state
);
1237 VOP_UNLOCK(names_vp
, 0, p
);
1238 VOP_UNLOCK(data_vp
, 0, p
);
1239 bsd_close_page_cache_files(uid_files
);
1240 thread_funnel_set(kernel_flock
, funnel_state
);
1247 bsd_search_page_cache_data_base(
1249 struct profile_names_header
*database
,
1251 unsigned int mod_date
,
1254 unsigned int *profile_size
)
1260 struct profile_element
*element
;
1261 unsigned int ele_total
;
1262 unsigned int extended_list
= 0;
1267 vm_offset_t local_buf
= NULL
;
1274 if(((vm_offset_t
)database
->element_array
) !=
1275 sizeof(struct profile_names_header
)) {
1278 element
= (struct profile_element
*)(
1279 (vm_offset_t
)database
->element_array
+
1280 (vm_offset_t
)database
);
1282 ele_total
= database
->number_of_profiles
;
1287 /* note: code assumes header + n*ele comes out on a page boundary */
1288 if(((local_buf
== 0) && (sizeof(struct profile_names_header
) +
1289 (ele_total
* sizeof(struct profile_element
)))
1290 > (PAGE_SIZE
* 4)) ||
1291 ((local_buf
!= 0) &&
1292 (ele_total
* sizeof(struct profile_element
))
1293 > (PAGE_SIZE
* 4))) {
1294 extended_list
= ele_total
;
1295 if(element
== (struct profile_element
*)
1296 ((vm_offset_t
)database
->element_array
+
1297 (vm_offset_t
)database
)) {
1298 ele_total
= ((PAGE_SIZE
* 4)/sizeof(struct profile_element
)) - 1;
1300 ele_total
= (PAGE_SIZE
* 4)/sizeof(struct profile_element
);
1302 extended_list
-= ele_total
;
1304 for (i
=0; i
<ele_total
; i
++) {
1305 if((mod_date
== element
[i
].mod_date
)
1306 && (inode
== element
[i
].inode
)) {
1307 if(strncmp(element
[i
].name
, app_name
, 12) == 0) {
1308 *profile
= element
[i
].addr
;
1309 *profile_size
= element
[i
].size
;
1310 if(local_buf
!= NULL
) {
1311 kmem_free(kernel_map
,
1312 (vm_offset_t
)local_buf
, 4 * PAGE_SIZE
);
1318 if(extended_list
== 0)
1320 if(local_buf
== NULL
) {
1321 ret
= kmem_alloc(kernel_map
,
1322 (vm_offset_t
*)&local_buf
, 4 * PAGE_SIZE
);
1323 if(ret
!= KERN_SUCCESS
) {
1327 element
= (struct profile_element
*)local_buf
;
1328 ele_total
= extended_list
;
1330 file_off
+= 4 * PAGE_SIZE
;
1331 if((ele_total
* sizeof(struct profile_element
)) >
1333 size
= PAGE_SIZE
* 4;
1335 size
= ele_total
* sizeof(struct profile_element
);
1339 error
= vn_rdwr(UIO_READ
, vp
,
1340 (caddr_t
)(local_buf
+ resid_off
),
1341 size
, file_off
+ resid_off
, UIO_SYSSPACE
,
1342 IO_NODELOCKED
, p
->p_ucred
, &resid
, p
);
1343 if((error
) || (size
== resid
)) {
1344 if(local_buf
!= NULL
) {
1345 kmem_free(kernel_map
,
1346 (vm_offset_t
)local_buf
,
1351 resid_off
+= size
-resid
;
1355 if(local_buf
!= NULL
) {
1356 kmem_free(kernel_map
,
1357 (vm_offset_t
)local_buf
, 4 * PAGE_SIZE
);
1363 bsd_write_page_cache_file(
1372 struct nameidata nd
;
1373 struct vnode
*vp
= 0;
1377 boolean_t funnel_state
;
1379 struct vattr data_vattr
;
1382 unsigned int profile_size
;
1384 vm_offset_t names_buf
;
1385 struct vnode
*names_vp
;
1386 struct vnode
*data_vp
;
1390 struct profile_names_header
*profile_header
;
1393 struct global_profile
*uid_files
;
1396 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
1400 error
= bsd_open_page_cache_files(user
, &uid_files
);
1402 thread_funnel_set(kernel_flock
, funnel_state
);
1408 names_vp
= uid_files
->names_vp
;
1409 data_vp
= uid_files
->data_vp
;
1410 names_buf
= uid_files
->buf_ptr
;
1413 * Get locks on both files, get the vnode with the lowest address first
1416 if((unsigned int)names_vp
< (unsigned int)data_vp
) {
1424 error
= vn_lock(vp1
, LK_EXCLUSIVE
| LK_RETRY
, p
);
1426 printf("bsd_write_page_cache_file: Can't lock profile names %x\n", user
);
1427 bsd_close_page_cache_files(uid_files
);
1428 thread_funnel_set(kernel_flock
, funnel_state
);
1431 error
= vn_lock(vp2
, LK_EXCLUSIVE
| LK_RETRY
, p
);
1433 printf("bsd_write_page_cache_file: Can't lock profile data %x\n", user
);
1434 VOP_UNLOCK(vp1
, 0, p
);
1435 bsd_close_page_cache_files(uid_files
);
1436 thread_funnel_set(kernel_flock
, funnel_state
);
1440 /* Stat data file for size */
1442 if(error
= VOP_GETATTR(data_vp
, &data_vattr
, p
->p_ucred
, p
)) {
1443 VOP_UNLOCK(names_vp
, 0, p
);
1444 VOP_UNLOCK(data_vp
, 0, p
);
1445 printf("bsd_write_page_cache_file: Can't stat profile data %s\n", file_name
);
1446 bsd_close_page_cache_files(uid_files
);
1447 thread_funnel_set(kernel_flock
, funnel_state
);
1451 if (bsd_search_page_cache_data_base(names_vp
,
1452 (struct profile_names_header
*)names_buf
,
1453 file_name
, (unsigned int) mod
,
1454 fid
, &profile
, &profile_size
) == 0) {
1455 /* profile is an offset in the profile data base */
1456 /* It is zero if no profile data was found */
1458 if(profile_size
== 0) {
1459 unsigned int header_size
;
1460 vm_offset_t buf_ptr
;
1462 /* Our Write case */
1464 /* read header for last entry */
1466 (struct profile_names_header
*)names_buf
;
1467 name_offset
= sizeof(struct profile_names_header
) +
1468 (sizeof(struct profile_element
)
1469 * profile_header
->number_of_profiles
);
1470 profile_header
->number_of_profiles
+= 1;
1472 if(name_offset
< PAGE_SIZE
* 4) {
1473 struct profile_element
*name
;
1474 /* write new entry */
1475 name
= (struct profile_element
*)
1476 (names_buf
+ (vm_offset_t
)name_offset
);
1477 name
->addr
= data_vattr
.va_size
;
1479 name
->mod_date
= mod
;
1481 strncpy (name
->name
, file_name
, 12);
1483 unsigned int ele_size
;
1484 struct profile_element name
;
1485 /* write new entry */
1486 name
.addr
= data_vattr
.va_size
;
1488 name
.mod_date
= mod
;
1490 strncpy (name
.name
, file_name
, 12);
1491 /* write element out separately */
1492 ele_size
= sizeof(struct profile_element
);
1493 buf_ptr
= (vm_offset_t
)&name
;
1494 resid_off
= name_offset
;
1497 error
= vn_rdwr(UIO_WRITE
, names_vp
,
1499 ele_size
, resid_off
,
1500 UIO_SYSSPACE
, IO_NODELOCKED
,
1501 p
->p_ucred
, &resid
, p
);
1503 printf("bsd_write_page_cache_file: Can't write name_element %x\n", user
);
1504 VOP_UNLOCK(names_vp
, 0, p
);
1505 VOP_UNLOCK(data_vp
, 0, p
);
1506 bsd_close_page_cache_files(
1513 buf_ptr
+= (vm_offset_t
)
1515 resid_off
+= ele_size
-resid
;
1520 if(name_offset
< PAGE_SIZE
* 4) {
1521 header_size
= name_offset
+
1522 sizeof(struct profile_element
);
1526 sizeof(struct profile_names_header
);
1528 buf_ptr
= (vm_offset_t
)profile_header
;
1531 /* write names file header */
1532 while(header_size
) {
1533 error
= vn_rdwr(UIO_WRITE
, names_vp
,
1535 header_size
, resid_off
,
1536 UIO_SYSSPACE
, IO_NODELOCKED
,
1537 p
->p_ucred
, &resid
, p
);
1539 VOP_UNLOCK(names_vp
, 0, p
);
1540 VOP_UNLOCK(data_vp
, 0, p
);
1541 printf("bsd_write_page_cache_file: Can't write header %x\n", user
);
1542 bsd_close_page_cache_files(
1545 kernel_flock
, funnel_state
);
1548 buf_ptr
+= (vm_offset_t
)header_size
-resid
;
1549 resid_off
+= header_size
-resid
;
1550 header_size
= resid
;
1552 /* write profile to data file */
1553 resid_off
= data_vattr
.va_size
;
1555 error
= vn_rdwr(UIO_WRITE
, data_vp
,
1556 (caddr_t
)buffer
, size
, resid_off
,
1557 UIO_SYSSPACE
, IO_NODELOCKED
,
1558 p
->p_ucred
, &resid
, p
);
1560 VOP_UNLOCK(names_vp
, 0, p
);
1561 VOP_UNLOCK(data_vp
, 0, p
);
1562 printf("bsd_write_page_cache_file: Can't write header %x\n", user
);
1563 bsd_close_page_cache_files(
1566 kernel_flock
, funnel_state
);
1569 buffer
+= size
-resid
;
1570 resid_off
+= size
-resid
;
1573 VOP_UNLOCK(names_vp
, 0, p
);
1574 VOP_UNLOCK(data_vp
, 0, p
);
1575 bsd_close_page_cache_files(uid_files
);
1576 thread_funnel_set(kernel_flock
, funnel_state
);
1579 /* Someone else wrote a twin profile before us */
1580 VOP_UNLOCK(names_vp
, 0, p
);
1581 VOP_UNLOCK(data_vp
, 0, p
);
1582 bsd_close_page_cache_files(uid_files
);
1583 thread_funnel_set(kernel_flock
, funnel_state
);
1586 VOP_UNLOCK(names_vp
, 0, p
);
1587 VOP_UNLOCK(data_vp
, 0, p
);
1588 bsd_close_page_cache_files(uid_files
);
1589 thread_funnel_set(kernel_flock
, funnel_state
);
1596 prepare_profile_database(int user
)
1598 char *cache_path
= "/var/vm/app_profile/";
1606 struct vnode
*names_vp
;
1607 struct vnode
*data_vp
;
1608 vm_offset_t names_buf
;
1609 vm_offset_t buf_ptr
;
1611 int profile_names_length
;
1612 int profile_data_length
;
1613 char *profile_data_string
;
1614 char *profile_names_string
;
1619 struct profile_names_header
*profile_header
;
1622 struct nameidata nd_names
;
1623 struct nameidata nd_data
;
1629 ret
= kmem_alloc(kernel_map
,
1630 (vm_offset_t
*)&profile_data_string
, PATH_MAX
);
1636 /* Split the buffer in half since we know the size of */
1637 /* our file path and our allocation is adequate for */
1638 /* both file path names */
1639 profile_names_string
= profile_data_string
+ (PATH_MAX
/2);
1642 strcpy(profile_data_string
, cache_path
);
1643 strcpy(profile_names_string
, cache_path
);
1644 profile_names_length
= profile_data_length
1645 = strlen(profile_data_string
);
1646 substring
= profile_data_string
+ profile_data_length
;
1647 sprintf(substring
, "%x_data", user
);
1648 substring
= profile_names_string
+ profile_names_length
;
1649 sprintf(substring
, "%x_names", user
);
1651 /* We now have the absolute file names */
1653 ret
= kmem_alloc(kernel_map
,
1654 (vm_offset_t
*)&names_buf
, 4 * PAGE_SIZE
);
1656 kmem_free(kernel_map
,
1657 (vm_offset_t
)profile_data_string
, PATH_MAX
);
1661 NDINIT(&nd_names
, LOOKUP
, FOLLOW
,
1662 UIO_SYSSPACE
, profile_names_string
, p
);
1663 NDINIT(&nd_data
, LOOKUP
, FOLLOW
,
1664 UIO_SYSSPACE
, profile_data_string
, p
);
1666 if (error
= vn_open(&nd_data
,
1667 O_CREAT
| O_EXCL
| FWRITE
, S_IRUSR
|S_IWUSR
)) {
1668 kmem_free(kernel_map
,
1669 (vm_offset_t
)names_buf
, 4 * PAGE_SIZE
);
1670 kmem_free(kernel_map
,
1671 (vm_offset_t
)profile_data_string
, PATH_MAX
);
1675 data_vp
= nd_data
.ni_vp
;
1676 VOP_UNLOCK(data_vp
, 0, p
);
1678 if (error
= vn_open(&nd_names
,
1679 O_CREAT
| O_EXCL
| FWRITE
, S_IRUSR
|S_IWUSR
)) {
1680 printf("prepare_profile_database: Can't create CacheNames %s\n",
1681 profile_data_string
);
1682 kmem_free(kernel_map
,
1683 (vm_offset_t
)names_buf
, 4 * PAGE_SIZE
);
1684 kmem_free(kernel_map
,
1685 (vm_offset_t
)profile_data_string
, PATH_MAX
);
1690 names_vp
= nd_names
.ni_vp
;
1693 /* Write Header for new names file */
1695 profile_header
= (struct profile_names_header
*)names_buf
;
1697 profile_header
->number_of_profiles
= 0;
1698 profile_header
->user_id
= user
;
1699 profile_header
->version
= 1;
1700 profile_header
->element_array
=
1701 sizeof(struct profile_names_header
);
1702 profile_header
->spare1
= 0;
1703 profile_header
->spare2
= 0;
1704 profile_header
->spare3
= 0;
1706 size
= sizeof(struct profile_names_header
);
1707 buf_ptr
= (vm_offset_t
)profile_header
;
1711 error
= vn_rdwr(UIO_WRITE
, names_vp
,
1712 (caddr_t
)buf_ptr
, size
, resid_off
,
1713 UIO_SYSSPACE
, IO_NODELOCKED
,
1714 p
->p_ucred
, &resid
, p
);
1716 printf("prepare_profile_database: Can't write header %s\n", profile_names_string
);
1717 kmem_free(kernel_map
,
1718 (vm_offset_t
)names_buf
, 4 * PAGE_SIZE
);
1719 kmem_free(kernel_map
,
1720 (vm_offset_t
)profile_data_string
,
1726 buf_ptr
+= size
-resid
;
1727 resid_off
+= size
-resid
;
1732 vattr
.va_uid
= user
;
1733 error
= VOP_SETATTR(names_vp
, &vattr
, p
->p_cred
->pc_ucred
, p
);
1735 printf("prepare_profile_database: "
1736 "Can't set user %s\n", profile_names_string
);
1740 error
= vn_lock(data_vp
, LK_EXCLUSIVE
| LK_RETRY
, p
);
1743 printf("prepare_profile_database: cannot lock data file %s\n",
1744 profile_data_string
);
1745 kmem_free(kernel_map
,
1746 (vm_offset_t
)profile_data_string
, PATH_MAX
);
1747 kmem_free(kernel_map
,
1748 (vm_offset_t
)names_buf
, 4 * PAGE_SIZE
);
1751 vattr
.va_uid
= user
;
1752 error
= VOP_SETATTR(data_vp
, &vattr
, p
->p_cred
->pc_ucred
, p
);
1754 printf("prepare_profile_database: "
1755 "Can't set user %s\n", profile_data_string
);
1759 kmem_free(kernel_map
,
1760 (vm_offset_t
)profile_data_string
, PATH_MAX
);
1761 kmem_free(kernel_map
,
1762 (vm_offset_t
)names_buf
, 4 * PAGE_SIZE
);