2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
23 * Mach Operating System
24 * Copyright (c) 1987 Carnegie-Mellon University
25 * All rights reserved. The CMU software License Agreement specifies
26 * the terms and conditions for use and redistribution.
33 #include <meta_features.h>
35 #include <kern/task.h>
36 #include <kern/thread.h>
37 #include <kern/debug.h>
38 #include <kern/lock.h>
39 #include <mach/time_value.h>
40 #include <mach/vm_param.h>
41 #include <mach/vm_prot.h>
42 #include <mach/port.h>
44 #include <sys/param.h>
45 #include <sys/systm.h>
47 #include <sys/namei.h>
51 #include <sys/vnode.h>
53 #include <sys/mount.h>
54 #include <sys/trace.h>
55 #include <sys/kernel.h>
59 #include <kern/kalloc.h>
60 #include <vm/vm_map.h>
61 #include <vm/vm_kern.h>
63 #include <machine/spl.h>
65 #include <mach/shared_memory_server.h>
66 #include <vm/vm_shared_memory_server.h>
69 extern shared_region_mapping_t system_shared_region
;
70 extern zone_t lsf_zone
;
72 useracc(addr
, len
, prot
)
77 return (vm_map_check_protection(
79 trunc_page(addr
), round_page(addr
+len
),
80 prot
== B_READ
? VM_PROT_READ
: VM_PROT_WRITE
));
88 kret
= vm_map_wire(current_map(), trunc_page(addr
),
90 VM_PROT_READ
| VM_PROT_WRITE
,FALSE
);
95 case KERN_INVALID_ADDRESS
:
98 case KERN_PROTECTION_FAILURE
:
105 vsunlock(addr
, len
, dirtied
)
114 vm_offset_t vaddr
, paddr
;
119 pmap
= get_task_pmap(current_task());
120 for (vaddr
= trunc_page(addr
); vaddr
< round_page(addr
+len
);
121 vaddr
+= PAGE_SIZE
) {
122 paddr
= pmap_extract(pmap
, vaddr
);
123 pg
= PHYS_TO_VM_PAGE(paddr
);
124 vm_page_set_modified(pg
);
131 kret
= vm_map_unwire(current_map(), trunc_page(addr
),
132 round_page(addr
+len
), FALSE
);
136 case KERN_INVALID_ADDRESS
:
139 case KERN_PROTECTION_FAILURE
:
146 #if defined(sun) || BALANCE || defined(m88k)
147 #else /*defined(sun) || BALANCE || defined(m88k)*/
154 character
= (char)byte
;
155 return (copyout((void *)&(character
), addr
, sizeof(char)) == 0 ? 0 : -1);
164 character
= (char)byte
;
165 return (copyout((void *) &(character
), addr
, sizeof(char)) == 0 ? 0 : -1);
173 if (copyin(addr
, (void *) &byte
, sizeof(char)))
183 if (copyin(addr
, (void *) &(byte
), sizeof(char)))
192 return (copyout((void *) &word
, addr
, sizeof(int)) == 0 ? 0 : -1);
200 if (copyin(addr
, (void *) &word
, sizeof(int)))
205 /* suiword and fuiword are the same as suword and fuword, respectively */
211 return (copyout((void *) &word
, addr
, sizeof(int)) == 0 ? 0 : -1);
219 if (copyin(addr
, (void *) &word
, sizeof(int)))
223 #endif /* defined(sun) || BALANCE || defined(m88k) || defined(i386) */
239 extern task_t
port_name_to_task(mach_port_t t
);
241 kern_return_t err
= KERN_SUCCESS
;
242 boolean_t funnel_state
;
244 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
245 t1
= port_name_to_task(t
);
247 if (t1
== TASK_NULL
) {
251 p
= get_bsdtask_info(t1
);
261 (void) copyout((char *) &pid
, (char *) x
, sizeof(*x
));
262 thread_funnel_set(kernel_flock
, funnel_state
);
267 * Routine: task_for_pid
269 * Get the task port for another "process", named by its
270 * process ID on the same host as "target_task".
272 * Only permitted to privileged processes, or processes
273 * with the same user ID.
276 task_for_pid(target_tport
, pid
, t
)
277 mach_port_t target_tport
;
285 extern task_t
port_name_to_task(mach_port_t tp
);
288 boolean_t funnel_state
;
290 t1
= port_name_to_task(target_tport
);
291 if (t1
== TASK_NULL
) {
292 (void ) copyout((char *)&t1
, (char *)t
, sizeof(mach_port_t
));
293 return(KERN_FAILURE
);
296 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
299 p1
= get_bsdtask_info(t1
);
301 ((p
= pfind(pid
)) != (struct proc
*) 0)
302 && (p1
!= (struct proc
*) 0)
303 && (((p
->p_ucred
->cr_uid
== p1
->p_ucred
->cr_uid
) &&
304 ((p
->p_cred
->p_ruid
== p1
->p_cred
->p_ruid
)))
305 || !(suser(p1
->p_ucred
, &p1
->p_acflag
)))
306 && (p
->p_stat
!= SZOMB
)
308 if (p
->task
!= TASK_NULL
) {
309 if (!task_reference_try(p
->task
)) {
310 mutex_pause(); /* temp loss of funnel */
313 sright
= (void *)convert_task_to_port(p
->task
);
315 ipc_port_copyout_send(sright
,
316 get_task_ipcspace(current_task()));
318 tret
= MACH_PORT_NULL
;
319 (void ) copyout((char *)&tret
, (char *) t
, sizeof(mach_port_t
));
321 error
= KERN_SUCCESS
;
325 tret
= MACH_PORT_NULL
;
326 (void) copyout((char *) &tret
, (char *) t
, sizeof(mach_port_t
));
327 error
= KERN_FAILURE
;
329 thread_funnel_set(kernel_flock
, funnel_state
);
334 struct load_shared_file_args
{
340 sf_mapping_t
*mappings
;
349 struct load_shared_file_args
*uap
,
352 caddr_t mapped_file_addr
=uap
->mfa
;
353 u_long mapped_file_size
=uap
->mfs
;
354 caddr_t
*base_address
=uap
->ba
;
355 int map_cnt
=uap
->map_cnt
;
356 sf_mapping_t
*mappings
=uap
->mappings
;
357 char *filename
=uap
->filename
;
358 int *flags
=uap
->flags
;
359 struct vnode
*vp
= 0;
360 struct nameidata nd
, *ndp
;
366 memory_object_control_t file_control
;
367 sf_mapping_t
*map_list
;
372 int default_regions
= 0;
376 shared_region_mapping_t shared_region
;
377 struct shared_region_task_mappings task_mapping_info
;
378 shared_region_mapping_t next
;
383 /* Retrieve the base address */
384 if (error
= copyin(base_address
, &local_base
, sizeof (caddr_t
))) {
387 if (error
= copyin(flags
, &local_flags
, sizeof (int))) {
391 if(local_flags
& QUERY_IS_SYSTEM_REGION
) {
392 vm_get_shared_region(current_task(), &shared_region
);
393 if (shared_region
== system_shared_region
) {
394 local_flags
= SYSTEM_REGION_BACKED
;
399 error
= copyout(&local_flags
, flags
, sizeof (int));
402 caller_flags
= local_flags
;
403 kret
= kmem_alloc(kernel_map
, (vm_offset_t
*)&filename_str
,
404 (vm_size_t
)(MAXPATHLEN
));
405 if (kret
!= KERN_SUCCESS
) {
409 kret
= kmem_alloc(kernel_map
, (vm_offset_t
*)&map_list
,
410 (vm_size_t
)(map_cnt
*sizeof(sf_mapping_t
)));
411 if (kret
!= KERN_SUCCESS
) {
412 kmem_free(kernel_map
, (vm_offset_t
)filename_str
,
413 (vm_size_t
)(MAXPATHLEN
));
419 copyin(mappings
, map_list
, (map_cnt
*sizeof(sf_mapping_t
)))) {
420 goto lsf_bailout_free
;
423 if (error
= copyinstr(filename
,
424 filename_str
, MAXPATHLEN
, (size_t *)&dummy
)) {
425 goto lsf_bailout_free
;
429 * Get a vnode for the target file
431 NDINIT(ndp
, LOOKUP
, FOLLOW
| LOCKLEAF
, UIO_SYSSPACE
,
434 if ((error
= namei(ndp
))) {
435 goto lsf_bailout_free
;
440 if (vp
->v_type
!= VREG
) {
442 goto lsf_bailout_free_vput
;
445 UBCINFOCHECK("load_shared_file", vp
);
447 if (error
= VOP_GETATTR(vp
, &vattr
, p
->p_ucred
, p
)) {
448 goto lsf_bailout_free_vput
;
452 file_control
= ubc_getobject(vp
, UBC_HOLDOBJECT
);
453 if (file_control
== MEMORY_OBJECT_CONTROL_NULL
) {
455 goto lsf_bailout_free_vput
;
458 vm_get_shared_region(current_task(), &shared_region
);
459 if(shared_region
== system_shared_region
) {
462 if(((vp
->v_mount
!= rootvnode
->v_mount
)
463 && (shared_region
== system_shared_region
))
464 && (lsf_mapping_pool_gauge() < 75)) {
465 /* We don't want to run out of shared memory */
466 /* map entries by starting too many private versions */
467 /* of the shared library structures */
469 if(p
->p_flag
& P_NOSHLIB
) {
470 error
= clone_system_shared_regions(FALSE
);
472 error
= clone_system_shared_regions(TRUE
);
475 goto lsf_bailout_free_vput
;
477 local_flags
= local_flags
& ~NEW_LOCAL_SHARED_REGIONS
;
478 vm_get_shared_region(current_task(), &shared_region
);
481 if(vattr
.va_size
!= mapped_file_size
) {
483 goto lsf_bailout_free_vput
;
486 if(p
->p_flag
& P_NOSHLIB
) {
487 p
->p_flag
= p
->p_flag
& ~P_NOSHLIB
;
490 /* load alternate regions if the caller has requested. */
491 /* Note: the new regions are "clean slates" */
492 if (local_flags
& NEW_LOCAL_SHARED_REGIONS
) {
493 error
= clone_system_shared_regions(FALSE
);
495 goto lsf_bailout_free_vput
;
497 vm_get_shared_region(current_task(), &shared_region
);
500 task_mapping_info
.self
= (vm_offset_t
)shared_region
;
502 shared_region_mapping_info(shared_region
,
503 &(task_mapping_info
.text_region
),
504 &(task_mapping_info
.text_size
),
505 &(task_mapping_info
.data_region
),
506 &(task_mapping_info
.data_size
),
507 &(task_mapping_info
.region_mappings
),
508 &(task_mapping_info
.client_base
),
509 &(task_mapping_info
.alternate_base
),
510 &(task_mapping_info
.alternate_next
),
511 &(task_mapping_info
.flags
), &next
);
513 /* This is a work-around to allow executables which have been */
514 /* built without knowledge of the proper shared segment to */
515 /* load. This code has been architected as a shared region */
516 /* handler, the knowledge of where the regions are loaded is */
517 /* problematic for the extension of shared regions as it will */
518 /* not be easy to know what region an item should go into. */
519 /* The code below however will get around a short term problem */
520 /* with executables which believe they are loading at zero. */
523 if (((unsigned int)local_base
&
524 (~(task_mapping_info
.text_size
- 1))) !=
525 task_mapping_info
.client_base
) {
526 if(local_flags
& ALTERNATE_LOAD_SITE
) {
527 local_base
= (caddr_t
)(
528 (unsigned int)local_base
&
529 (task_mapping_info
.text_size
- 1));
530 local_base
= (caddr_t
)((unsigned int)local_base
531 | task_mapping_info
.client_base
);
534 goto lsf_bailout_free_vput
;
540 if((kr
= copyin_shared_file((vm_offset_t
)mapped_file_addr
,
542 (vm_offset_t
*)&local_base
,
543 map_cnt
, map_list
, file_control
,
544 &task_mapping_info
, &local_flags
))) {
549 case KERN_INVALID_ARGUMENT
:
552 case KERN_INVALID_ADDRESS
:
555 case KERN_PROTECTION_FAILURE
:
556 /* save EAUTH for authentication in this */
566 if((caller_flags
& ALTERNATE_LOAD_SITE
) && systemLogDiags
) {
567 printf("load_shared_file: Failed to load shared file! error: 0x%x, Base_address: 0x%x, number of mappings: %d, file_control 0x%x\n", error
, local_base
, map_cnt
, file_control
);
568 for(i
=0; i
<map_cnt
; i
++) {
569 printf("load_shared_file: Mapping%d, mapping_offset: 0x%x, size: 0x%x, file_offset: 0x%x, protection: 0x%x\n"
570 , i
, map_list
[i
].mapping_offset
,
572 map_list
[i
].file_offset
,
573 map_list
[i
].protection
);
578 local_flags
|= SYSTEM_REGION_BACKED
;
579 if(!(error
= copyout(&local_flags
, flags
, sizeof (int)))) {
580 error
= copyout(&local_base
,
581 base_address
, sizeof (caddr_t
));
585 lsf_bailout_free_vput
:
589 kmem_free(kernel_map
, (vm_offset_t
)filename_str
,
590 (vm_size_t
)(MAXPATHLEN
));
591 kmem_free(kernel_map
, (vm_offset_t
)map_list
,
592 (vm_size_t
)(map_cnt
*sizeof(sf_mapping_t
)));
598 struct reset_shared_file_args
{
601 sf_mapping_t
*mappings
;
607 struct reset_shared_file_args
*uap
,
610 caddr_t
*base_address
=uap
->ba
;
611 int map_cnt
=uap
->map_cnt
;
612 sf_mapping_t
*mappings
=uap
->mappings
;
616 sf_mapping_t
*map_list
;
618 vm_offset_t map_address
;
622 /* Retrieve the base address */
623 if (error
= copyin(base_address
, &local_base
, sizeof (caddr_t
))) {
627 if (((unsigned int)local_base
& GLOBAL_SHARED_SEGMENT_MASK
)
628 != GLOBAL_SHARED_TEXT_SEGMENT
) {
633 kret
= kmem_alloc(kernel_map
, (vm_offset_t
*)&map_list
,
634 (vm_size_t
)(map_cnt
*sizeof(sf_mapping_t
)));
635 if (kret
!= KERN_SUCCESS
) {
641 copyin(mappings
, map_list
, (map_cnt
*sizeof(sf_mapping_t
)))) {
643 kmem_free(kernel_map
, (vm_offset_t
)map_list
,
644 (vm_size_t
)(map_cnt
*sizeof(sf_mapping_t
)));
647 for (i
= 0; i
<map_cnt
; i
++) {
648 if((map_list
[i
].mapping_offset
649 & GLOBAL_SHARED_SEGMENT_MASK
) == 0x10000000) {
650 map_address
= (vm_offset_t
)
651 (local_base
+ map_list
[i
].mapping_offset
);
652 vm_deallocate(current_map(),
655 vm_map(current_map(), &map_address
,
656 map_list
[i
].size
, 0, SHARED_LIB_ALIAS
,
657 shared_data_region_handle
,
658 ((unsigned int)local_base
659 & SHARED_DATA_REGION_MASK
) +
660 (map_list
[i
].mapping_offset
661 & SHARED_DATA_REGION_MASK
),
663 VM_PROT_READ
, VM_INHERIT_SHARE
);
667 kmem_free(kernel_map
, (vm_offset_t
)map_list
,
668 (vm_size_t
)(map_cnt
*sizeof(sf_mapping_t
)));
674 struct new_system_shared_regions_args
{
679 new_system_shared_regions(
681 struct new_system_shared_regions_args
*uap
,
684 shared_region_mapping_t regions
;
685 shared_region_mapping_t new_regions
;
692 /* get current shared region info for */
693 /* restoration after new system shared */
694 /* regions are in place */
695 vm_get_shared_region(current_task(), ®ions
);
697 /* usually only called at boot time */
698 /* shared_file_boot_time_init creates */
699 /* a new set of system shared regions */
700 /* and places them as the system */
701 /* shared regions. */
702 shared_file_boot_time_init();
704 /* set current task back to its */
705 /* original regions. */
706 vm_get_shared_region(current_task(), &new_regions
);
707 shared_region_mapping_dealloc(new_regions
);
709 vm_set_shared_region(current_task(), regions
);
718 clone_system_shared_regions(shared_regions_active
)
720 shared_region_mapping_t new_shared_region
;
721 shared_region_mapping_t next
;
722 shared_region_mapping_t old_shared_region
;
723 struct shared_region_task_mappings old_info
;
724 struct shared_region_task_mappings new_info
;
728 if (shared_file_create_system_region(&new_shared_region
))
730 vm_get_shared_region(current_task(), &old_shared_region
);
731 old_info
.self
= (vm_offset_t
)old_shared_region
;
732 shared_region_mapping_info(old_shared_region
,
733 &(old_info
.text_region
),
734 &(old_info
.text_size
),
735 &(old_info
.data_region
),
736 &(old_info
.data_size
),
737 &(old_info
.region_mappings
),
738 &(old_info
.client_base
),
739 &(old_info
.alternate_base
),
740 &(old_info
.alternate_next
),
741 &(old_info
.flags
), &next
);
742 new_info
.self
= (vm_offset_t
)new_shared_region
;
743 shared_region_mapping_info(new_shared_region
,
744 &(new_info
.text_region
),
745 &(new_info
.text_size
),
746 &(new_info
.data_region
),
747 &(new_info
.data_size
),
748 &(new_info
.region_mappings
),
749 &(new_info
.client_base
),
750 &(new_info
.alternate_base
),
751 &(new_info
.alternate_next
),
752 &(new_info
.flags
), &next
);
753 if(shared_regions_active
) {
754 if(vm_region_clone(old_info
.text_region
, new_info
.text_region
)) {
755 panic("clone_system_shared_regions: shared region mis-alignment 1");
756 shared_region_mapping_dealloc(new_shared_region
);
759 if (vm_region_clone(old_info
.data_region
, new_info
.data_region
)) {
760 panic("clone_system_shared_regions: shared region mis-alignment 2");
761 shared_region_mapping_dealloc(new_shared_region
);
764 shared_region_object_chain_attach(
765 new_shared_region
, old_shared_region
);
767 if (vm_map_region_replace(current_map(), old_info
.text_region
,
768 new_info
.text_region
, old_info
.client_base
,
769 old_info
.client_base
+old_info
.text_size
)) {
770 panic("clone_system_shared_regions: shared region mis-alignment 3");
771 shared_region_mapping_dealloc(new_shared_region
);
774 if(vm_map_region_replace(current_map(), old_info
.data_region
,
775 new_info
.data_region
,
776 old_info
.client_base
+ old_info
.text_size
,
778 + old_info
.text_size
+ old_info
.data_size
)) {
779 panic("clone_system_shared_regions: shared region mis-alignment 4");
780 shared_region_mapping_dealloc(new_shared_region
);
783 vm_set_shared_region(current_task(), new_shared_region
);
785 /* consume the reference which wasn't accounted for in object */
787 if(!shared_regions_active
)
788 shared_region_mapping_dealloc(old_shared_region
);
794 extern vm_map_t bsd_pageable_map
;
796 /* header for the profile name file. The profiled app info is held */
797 /* in the data file and pointed to by elements in the name file */
799 struct profile_names_header
{
800 unsigned int number_of_profiles
;
801 unsigned int user_id
;
802 unsigned int version
;
809 struct profile_element
{
812 unsigned int mod_date
;
817 struct global_profile
{
818 struct vnode
*names_vp
;
819 struct vnode
*data_vp
;
826 struct global_profile_cache
{
829 struct global_profile profiles
[3];
832 struct global_profile_cache global_user_profile_cache
=
833 {3, 0, NULL
, NULL
, NULL
, 0, 0, 0,
834 NULL
, NULL
, NULL
, 0, 0, 0,
835 NULL
, NULL
, NULL
, 0, 0, 0 };
837 /* BSD_OPEN_PAGE_CACHE_FILES: */
838 /* Caller provides a user id. This id was used in */
839 /* prepare_profile_database to create two unique absolute */
840 /* file paths to the associated profile files. These files */
841 /* are either opened or bsd_open_page_cache_files returns an */
842 /* error. The header of the names file is then consulted. */
843 /* The header and the vnodes for the names and data files are */
847 bsd_open_page_cache_files(
849 struct global_profile
**profile
)
851 char *cache_path
= "/var/vm/app_profile/";
859 struct vnode
*names_vp
;
860 struct vnode
*data_vp
;
861 vm_offset_t names_buf
;
864 int profile_names_length
;
865 int profile_data_length
;
866 char *profile_data_string
;
867 char *profile_names_string
;
872 struct profile_names_header
*profile_header
;
875 struct nameidata nd_names
;
876 struct nameidata nd_data
;
884 for(i
= 0; i
<global_user_profile_cache
.max_ele
; i
++) {
885 if((global_user_profile_cache
.profiles
[i
].user
== user
)
886 && (global_user_profile_cache
.profiles
[i
].data_vp
888 *profile
= &global_user_profile_cache
.profiles
[i
];
889 /* already in cache, we're done */
890 if ((*profile
)->busy
) {
892 * drop funnel and wait
894 (void)tsleep((void *)
896 PRIBIO
, "app_profile", 0);
899 (*profile
)->busy
= 1;
900 (*profile
)->age
= global_user_profile_cache
.age
;
901 global_user_profile_cache
.age
+=1;
906 lru
= global_user_profile_cache
.age
;
907 for(i
= 0; i
<global_user_profile_cache
.max_ele
; i
++) {
908 if(global_user_profile_cache
.profiles
[i
].data_vp
== NULL
) {
909 *profile
= &global_user_profile_cache
.profiles
[i
];
910 (*profile
)->age
= global_user_profile_cache
.age
;
911 global_user_profile_cache
.age
+=1;
914 if(global_user_profile_cache
.profiles
[i
].age
< lru
) {
915 lru
= global_user_profile_cache
.profiles
[i
].age
;
916 *profile
= &global_user_profile_cache
.profiles
[i
];
920 if ((*profile
)->busy
) {
922 * drop funnel and wait
924 (void)tsleep((void *)
925 &(global_user_profile_cache
),
926 PRIBIO
, "app_profile", 0);
929 (*profile
)->busy
= 1;
930 (*profile
)->user
= user
;
932 if((*profile
)->data_vp
!= NULL
) {
933 kmem_free(kernel_map
,
934 (*profile
)->buf_ptr
, 4 * PAGE_SIZE
);
935 if ((*profile
)->names_vp
) {
936 vrele((*profile
)->names_vp
);
937 (*profile
)->names_vp
= NULL
;
939 if ((*profile
)->data_vp
) {
940 vrele((*profile
)->data_vp
);
941 (*profile
)->data_vp
= NULL
;
945 /* put dummy value in for now to get */
946 /* competing request to wait above */
947 /* until we are finished */
948 (*profile
)->data_vp
= (struct vnode
*)0xFFFFFFFF;
950 /* Try to open the appropriate users profile files */
951 /* If neither file is present, try to create them */
952 /* If one file is present and the other not, fail. */
953 /* If the files do exist, check them for the app_file */
954 /* requested and read it in if present */
957 ret
= kmem_alloc(kernel_map
,
958 (vm_offset_t
*)&profile_data_string
, PATH_MAX
);
961 (*profile
)->data_vp
= NULL
;
962 (*profile
)->busy
= 0;
967 /* Split the buffer in half since we know the size of */
968 /* our file path and our allocation is adequate for */
969 /* both file path names */
970 profile_names_string
= profile_data_string
+ (PATH_MAX
/2);
973 strcpy(profile_data_string
, cache_path
);
974 strcpy(profile_names_string
, cache_path
);
975 profile_names_length
= profile_data_length
976 = strlen(profile_data_string
);
977 substring
= profile_data_string
+ profile_data_length
;
978 sprintf(substring
, "%x_data", user
);
979 substring
= profile_names_string
+ profile_names_length
;
980 sprintf(substring
, "%x_names", user
);
982 /* We now have the absolute file names */
984 ret
= kmem_alloc(kernel_map
,
985 (vm_offset_t
*)&names_buf
, 4 * PAGE_SIZE
);
987 kmem_free(kernel_map
,
988 (vm_offset_t
)profile_data_string
, PATH_MAX
);
989 (*profile
)->data_vp
= NULL
;
990 (*profile
)->busy
= 0;
995 NDINIT(&nd_names
, LOOKUP
, FOLLOW
| LOCKLEAF
,
996 UIO_SYSSPACE
, profile_names_string
, p
);
997 NDINIT(&nd_data
, LOOKUP
, FOLLOW
| LOCKLEAF
,
998 UIO_SYSSPACE
, profile_data_string
, p
);
999 if (error
= vn_open(&nd_data
, FREAD
| FWRITE
, 0)) {
1001 printf("bsd_open_page_cache_files: CacheData file not found %s\n",
1002 profile_data_string
);
1004 kmem_free(kernel_map
,
1005 (vm_offset_t
)names_buf
, 4 * PAGE_SIZE
);
1006 kmem_free(kernel_map
,
1007 (vm_offset_t
)profile_data_string
, PATH_MAX
);
1008 (*profile
)->data_vp
= NULL
;
1009 (*profile
)->busy
= 0;
1014 data_vp
= nd_data
.ni_vp
;
1015 VOP_UNLOCK(data_vp
, 0, p
);
1017 if (error
= vn_open(&nd_names
, FREAD
| FWRITE
, 0)) {
1018 printf("bsd_open_page_cache_files: NamesData file not found %s\n",
1019 profile_data_string
);
1020 kmem_free(kernel_map
,
1021 (vm_offset_t
)names_buf
, 4 * PAGE_SIZE
);
1022 kmem_free(kernel_map
,
1023 (vm_offset_t
)profile_data_string
, PATH_MAX
);
1025 (*profile
)->data_vp
= NULL
;
1026 (*profile
)->busy
= 0;
1030 names_vp
= nd_names
.ni_vp
;
1032 if(error
= VOP_GETATTR(names_vp
, &vattr
, p
->p_ucred
, p
)) {
1033 printf("bsd_open_page_cache_files: Can't stat name file %s\n", profile_names_string
);
1034 kmem_free(kernel_map
,
1035 (vm_offset_t
)profile_data_string
, PATH_MAX
);
1036 kmem_free(kernel_map
,
1037 (vm_offset_t
)names_buf
, 4 * PAGE_SIZE
);
1040 (*profile
)->data_vp
= NULL
;
1041 (*profile
)->busy
= 0;
1046 size
= vattr
.va_size
;
1047 if(size
> 4 * PAGE_SIZE
)
1048 size
= 4 * PAGE_SIZE
;
1049 buf_ptr
= names_buf
;
1053 error
= vn_rdwr(UIO_READ
, names_vp
, (caddr_t
)buf_ptr
,
1055 UIO_SYSSPACE
, IO_NODELOCKED
, p
->p_ucred
, &resid
, p
);
1056 if((error
) || (size
== resid
)) {
1060 kmem_free(kernel_map
,
1061 (vm_offset_t
)profile_data_string
, PATH_MAX
);
1062 kmem_free(kernel_map
,
1063 (vm_offset_t
)names_buf
, 4 * PAGE_SIZE
);
1066 (*profile
)->data_vp
= NULL
;
1067 (*profile
)->busy
= 0;
1071 buf_ptr
+= size
-resid
;
1072 resid_off
+= size
-resid
;
1076 VOP_UNLOCK(names_vp
, 0, p
);
1077 kmem_free(kernel_map
, (vm_offset_t
)profile_data_string
, PATH_MAX
);
1078 (*profile
)->names_vp
= names_vp
;
1079 (*profile
)->data_vp
= data_vp
;
1080 (*profile
)->buf_ptr
= names_buf
;
1086 bsd_close_page_cache_files(
1087 struct global_profile
*profile
)
1094 bsd_read_page_cache_file(
1099 struct vnode
*app_vp
,
1100 vm_offset_t
*buffer
,
1101 vm_offset_t
*buf_size
)
1104 boolean_t funnel_state
;
1112 unsigned int profile_size
;
1114 vm_offset_t names_buf
;
1119 struct vnode
*names_vp
;
1120 struct vnode
*data_vp
;
1124 struct global_profile
*uid_files
;
1126 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
1128 /* Try to open the appropriate users profile files */
1129 /* If neither file is present, try to create them */
1130 /* If one file is present and the other not, fail. */
1131 /* If the files do exist, check them for the app_file */
1132 /* requested and read it in if present */
1135 error
= bsd_open_page_cache_files(user
, &uid_files
);
1137 thread_funnel_set(kernel_flock
, funnel_state
);
1143 names_vp
= uid_files
->names_vp
;
1144 data_vp
= uid_files
->data_vp
;
1145 names_buf
= uid_files
->buf_ptr
;
1149 * Get locks on both files, get the vnode with the lowest address first
1152 if((unsigned int)names_vp
< (unsigned int)data_vp
) {
1159 error
= vn_lock(vp1
, LK_EXCLUSIVE
| LK_RETRY
, p
);
1161 printf("bsd_read_page_cache_file: Can't lock profile names %x\n", user
);
1162 bsd_close_page_cache_files(uid_files
);
1163 thread_funnel_set(kernel_flock
, funnel_state
);
1166 error
= vn_lock(vp2
, LK_EXCLUSIVE
| LK_RETRY
, p
);
1168 printf("bsd_read_page_cache_file: Can't lock profile data %x\n", user
);
1169 VOP_UNLOCK(vp1
, 0, p
);
1170 bsd_close_page_cache_files(uid_files
);
1171 thread_funnel_set(kernel_flock
, funnel_state
);
1175 if(error
= VOP_GETATTR(app_vp
, &vattr
, p
->p_ucred
, p
)) {
1176 VOP_UNLOCK(names_vp
, 0, p
);
1177 VOP_UNLOCK(data_vp
, 0, p
);
1178 printf("bsd_read_cache_file: Can't stat app file %s\n", app_name
);
1179 bsd_close_page_cache_files(uid_files
);
1180 thread_funnel_set(kernel_flock
, funnel_state
);
1184 *fid
= vattr
.va_fileid
;
1185 *mod
= vattr
.va_mtime
.tv_sec
;
1188 if (bsd_search_page_cache_data_base(names_vp
, names_buf
, app_name
,
1189 (unsigned int) vattr
.va_mtime
.tv_sec
,
1190 vattr
.va_fileid
, &profile
, &profile_size
) == 0) {
1191 /* profile is an offset in the profile data base */
1192 /* It is zero if no profile data was found */
1194 if(profile_size
== 0) {
1197 VOP_UNLOCK(names_vp
, 0, p
);
1198 VOP_UNLOCK(data_vp
, 0, p
);
1199 bsd_close_page_cache_files(uid_files
);
1200 thread_funnel_set(kernel_flock
, funnel_state
);
1203 ret
= (vm_offset_t
)(kmem_alloc(kernel_map
, buffer
, profile_size
));
1205 VOP_UNLOCK(names_vp
, 0, p
);
1206 VOP_UNLOCK(data_vp
, 0, p
);
1207 bsd_close_page_cache_files(uid_files
);
1208 thread_funnel_set(kernel_flock
, funnel_state
);
1211 *buf_size
= profile_size
;
1212 while(profile_size
) {
1213 error
= vn_rdwr(UIO_READ
, data_vp
,
1214 (caddr_t
) *buffer
, profile_size
,
1215 profile
, UIO_SYSSPACE
, IO_NODELOCKED
,
1216 p
->p_ucred
, &resid
, p
);
1217 if((error
) || (profile_size
== resid
)) {
1218 VOP_UNLOCK(names_vp
, 0, p
);
1219 VOP_UNLOCK(data_vp
, 0, p
);
1220 bsd_close_page_cache_files(uid_files
);
1221 kmem_free(kernel_map
, (vm_offset_t
)*buffer
, profile_size
);
1222 thread_funnel_set(kernel_flock
, funnel_state
);
1225 profile
+= profile_size
- resid
;
1226 profile_size
= resid
;
1228 VOP_UNLOCK(names_vp
, 0, p
);
1229 VOP_UNLOCK(data_vp
, 0, p
);
1230 bsd_close_page_cache_files(uid_files
);
1231 thread_funnel_set(kernel_flock
, funnel_state
);
1234 VOP_UNLOCK(names_vp
, 0, p
);
1235 VOP_UNLOCK(data_vp
, 0, p
);
1236 bsd_close_page_cache_files(uid_files
);
1237 thread_funnel_set(kernel_flock
, funnel_state
);
1244 bsd_search_page_cache_data_base(
1246 struct profile_names_header
*database
,
1248 unsigned int mod_date
,
1251 unsigned int *profile_size
)
1257 struct profile_element
*element
;
1258 unsigned int ele_total
;
1259 unsigned int extended_list
= 0;
1264 vm_offset_t local_buf
= NULL
;
1271 if(((vm_offset_t
)database
->element_array
) !=
1272 sizeof(struct profile_names_header
)) {
1275 element
= (struct profile_element
*)(
1276 (vm_offset_t
)database
->element_array
+
1277 (vm_offset_t
)database
);
1279 ele_total
= database
->number_of_profiles
;
1284 /* note: code assumes header + n*ele comes out on a page boundary */
1285 if(((local_buf
== 0) && (sizeof(struct profile_names_header
) +
1286 (ele_total
* sizeof(struct profile_element
)))
1287 > (PAGE_SIZE
* 4)) ||
1288 ((local_buf
!= 0) &&
1289 (ele_total
* sizeof(struct profile_element
))
1290 > (PAGE_SIZE
* 4))) {
1291 extended_list
= ele_total
;
1292 if(element
== (struct profile_element
*)
1293 ((vm_offset_t
)database
->element_array
+
1294 (vm_offset_t
)database
)) {
1295 ele_total
= ((PAGE_SIZE
* 4)/sizeof(struct profile_element
)) - 1;
1297 ele_total
= (PAGE_SIZE
* 4)/sizeof(struct profile_element
);
1299 extended_list
-= ele_total
;
1301 for (i
=0; i
<ele_total
; i
++) {
1302 if((mod_date
== element
[i
].mod_date
)
1303 && (inode
== element
[i
].inode
)) {
1304 if(strncmp(element
[i
].name
, app_name
, 12) == 0) {
1305 *profile
= element
[i
].addr
;
1306 *profile_size
= element
[i
].size
;
1307 if(local_buf
!= NULL
) {
1308 kmem_free(kernel_map
,
1309 (vm_offset_t
)local_buf
, 4 * PAGE_SIZE
);
1315 if(extended_list
== 0)
1317 if(local_buf
== NULL
) {
1318 ret
= kmem_alloc(kernel_map
,
1319 (vm_offset_t
*)&local_buf
, 4 * PAGE_SIZE
);
1320 if(ret
!= KERN_SUCCESS
) {
1324 element
= (struct profile_element
*)local_buf
;
1325 ele_total
= extended_list
;
1327 file_off
+= 4 * PAGE_SIZE
;
1328 if((ele_total
* sizeof(struct profile_element
)) >
1330 size
= PAGE_SIZE
* 4;
1332 size
= ele_total
* sizeof(struct profile_element
);
1336 error
= vn_rdwr(UIO_READ
, vp
,
1337 (caddr_t
)(local_buf
+ resid_off
),
1338 size
, file_off
+ resid_off
, UIO_SYSSPACE
,
1339 IO_NODELOCKED
, p
->p_ucred
, &resid
, p
);
1340 if((error
) || (size
== resid
)) {
1341 if(local_buf
!= NULL
) {
1342 kmem_free(kernel_map
,
1343 (vm_offset_t
)local_buf
,
1348 resid_off
+= size
-resid
;
1352 if(local_buf
!= NULL
) {
1353 kmem_free(kernel_map
,
1354 (vm_offset_t
)local_buf
, 4 * PAGE_SIZE
);
1360 bsd_write_page_cache_file(
1369 struct nameidata nd
;
1370 struct vnode
*vp
= 0;
1374 boolean_t funnel_state
;
1376 struct vattr data_vattr
;
1379 unsigned int profile_size
;
1381 vm_offset_t names_buf
;
1382 struct vnode
*names_vp
;
1383 struct vnode
*data_vp
;
1387 struct profile_names_header
*profile_header
;
1390 struct global_profile
*uid_files
;
1393 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
1397 error
= bsd_open_page_cache_files(user
, &uid_files
);
1399 thread_funnel_set(kernel_flock
, funnel_state
);
1405 names_vp
= uid_files
->names_vp
;
1406 data_vp
= uid_files
->data_vp
;
1407 names_buf
= uid_files
->buf_ptr
;
1410 * Get locks on both files, get the vnode with the lowest address first
1413 if((unsigned int)names_vp
< (unsigned int)data_vp
) {
1421 error
= vn_lock(vp1
, LK_EXCLUSIVE
| LK_RETRY
, p
);
1423 printf("bsd_write_page_cache_file: Can't lock profile names %x\n", user
);
1424 bsd_close_page_cache_files(uid_files
);
1425 thread_funnel_set(kernel_flock
, funnel_state
);
1428 error
= vn_lock(vp2
, LK_EXCLUSIVE
| LK_RETRY
, p
);
1430 printf("bsd_write_page_cache_file: Can't lock profile data %x\n", user
);
1431 VOP_UNLOCK(vp1
, 0, p
);
1432 bsd_close_page_cache_files(uid_files
);
1433 thread_funnel_set(kernel_flock
, funnel_state
);
1437 /* Stat data file for size */
1439 if(error
= VOP_GETATTR(data_vp
, &data_vattr
, p
->p_ucred
, p
)) {
1440 VOP_UNLOCK(names_vp
, 0, p
);
1441 VOP_UNLOCK(data_vp
, 0, p
);
1442 printf("bsd_write_page_cache_file: Can't stat profile data %s\n", file_name
);
1443 bsd_close_page_cache_files(uid_files
);
1444 thread_funnel_set(kernel_flock
, funnel_state
);
1448 if (bsd_search_page_cache_data_base(names_vp
,
1449 (struct profile_names_header
*)names_buf
,
1450 file_name
, (unsigned int) mod
,
1451 fid
, &profile
, &profile_size
) == 0) {
1452 /* profile is an offset in the profile data base */
1453 /* It is zero if no profile data was found */
1455 if(profile_size
== 0) {
1456 unsigned int header_size
;
1457 vm_offset_t buf_ptr
;
1459 /* Our Write case */
1461 /* read header for last entry */
1463 (struct profile_names_header
*)names_buf
;
1464 name_offset
= sizeof(struct profile_names_header
) +
1465 (sizeof(struct profile_element
)
1466 * profile_header
->number_of_profiles
);
1467 profile_header
->number_of_profiles
+= 1;
1469 if(name_offset
< PAGE_SIZE
* 4) {
1470 struct profile_element
*name
;
1471 /* write new entry */
1472 name
= (struct profile_element
*)
1473 (names_buf
+ (vm_offset_t
)name_offset
);
1474 name
->addr
= data_vattr
.va_size
;
1476 name
->mod_date
= mod
;
1478 strncpy (name
->name
, file_name
, 12);
1480 unsigned int ele_size
;
1481 struct profile_element name
;
1482 /* write new entry */
1483 name
.addr
= data_vattr
.va_size
;
1485 name
.mod_date
= mod
;
1487 strncpy (name
.name
, file_name
, 12);
1488 /* write element out separately */
1489 ele_size
= sizeof(struct profile_element
);
1490 buf_ptr
= (vm_offset_t
)&name
;
1491 resid_off
= name_offset
;
1494 error
= vn_rdwr(UIO_WRITE
, names_vp
,
1496 ele_size
, resid_off
,
1497 UIO_SYSSPACE
, IO_NODELOCKED
,
1498 p
->p_ucred
, &resid
, p
);
1500 printf("bsd_write_page_cache_file: Can't write name_element %x\n", user
);
1501 VOP_UNLOCK(names_vp
, 0, p
);
1502 VOP_UNLOCK(data_vp
, 0, p
);
1503 bsd_close_page_cache_files(
1510 buf_ptr
+= (vm_offset_t
)
1512 resid_off
+= ele_size
-resid
;
1517 if(name_offset
< PAGE_SIZE
* 4) {
1518 header_size
= name_offset
+
1519 sizeof(struct profile_element
);
1523 sizeof(struct profile_names_header
);
1525 buf_ptr
= (vm_offset_t
)profile_header
;
1528 /* write names file header */
1529 while(header_size
) {
1530 error
= vn_rdwr(UIO_WRITE
, names_vp
,
1532 header_size
, resid_off
,
1533 UIO_SYSSPACE
, IO_NODELOCKED
,
1534 p
->p_ucred
, &resid
, p
);
1536 VOP_UNLOCK(names_vp
, 0, p
);
1537 VOP_UNLOCK(data_vp
, 0, p
);
1538 printf("bsd_write_page_cache_file: Can't write header %x\n", user
);
1539 bsd_close_page_cache_files(
1542 kernel_flock
, funnel_state
);
1545 buf_ptr
+= (vm_offset_t
)header_size
-resid
;
1546 resid_off
+= header_size
-resid
;
1547 header_size
= resid
;
1549 /* write profile to data file */
1550 resid_off
= data_vattr
.va_size
;
1552 error
= vn_rdwr(UIO_WRITE
, data_vp
,
1553 (caddr_t
)buffer
, size
, resid_off
,
1554 UIO_SYSSPACE
, IO_NODELOCKED
,
1555 p
->p_ucred
, &resid
, p
);
1557 VOP_UNLOCK(names_vp
, 0, p
);
1558 VOP_UNLOCK(data_vp
, 0, p
);
1559 printf("bsd_write_page_cache_file: Can't write header %x\n", user
);
1560 bsd_close_page_cache_files(
1563 kernel_flock
, funnel_state
);
1566 buffer
+= size
-resid
;
1567 resid_off
+= size
-resid
;
1570 VOP_UNLOCK(names_vp
, 0, p
);
1571 VOP_UNLOCK(data_vp
, 0, p
);
1572 bsd_close_page_cache_files(uid_files
);
1573 thread_funnel_set(kernel_flock
, funnel_state
);
1576 /* Someone else wrote a twin profile before us */
1577 VOP_UNLOCK(names_vp
, 0, p
);
1578 VOP_UNLOCK(data_vp
, 0, p
);
1579 bsd_close_page_cache_files(uid_files
);
1580 thread_funnel_set(kernel_flock
, funnel_state
);
1583 VOP_UNLOCK(names_vp
, 0, p
);
1584 VOP_UNLOCK(data_vp
, 0, p
);
1585 bsd_close_page_cache_files(uid_files
);
1586 thread_funnel_set(kernel_flock
, funnel_state
);
1593 prepare_profile_database(int user
)
1595 char *cache_path
= "/var/vm/app_profile/";
1603 struct vnode
*names_vp
;
1604 struct vnode
*data_vp
;
1605 vm_offset_t names_buf
;
1606 vm_offset_t buf_ptr
;
1608 int profile_names_length
;
1609 int profile_data_length
;
1610 char *profile_data_string
;
1611 char *profile_names_string
;
1616 struct profile_names_header
*profile_header
;
1619 struct nameidata nd_names
;
1620 struct nameidata nd_data
;
1626 ret
= kmem_alloc(kernel_map
,
1627 (vm_offset_t
*)&profile_data_string
, PATH_MAX
);
1633 /* Split the buffer in half since we know the size of */
1634 /* our file path and our allocation is adequate for */
1635 /* both file path names */
1636 profile_names_string
= profile_data_string
+ (PATH_MAX
/2);
1639 strcpy(profile_data_string
, cache_path
);
1640 strcpy(profile_names_string
, cache_path
);
1641 profile_names_length
= profile_data_length
1642 = strlen(profile_data_string
);
1643 substring
= profile_data_string
+ profile_data_length
;
1644 sprintf(substring
, "%x_data", user
);
1645 substring
= profile_names_string
+ profile_names_length
;
1646 sprintf(substring
, "%x_names", user
);
1648 /* We now have the absolute file names */
1650 ret
= kmem_alloc(kernel_map
,
1651 (vm_offset_t
*)&names_buf
, 4 * PAGE_SIZE
);
1653 kmem_free(kernel_map
,
1654 (vm_offset_t
)profile_data_string
, PATH_MAX
);
1658 NDINIT(&nd_names
, LOOKUP
, FOLLOW
,
1659 UIO_SYSSPACE
, profile_names_string
, p
);
1660 NDINIT(&nd_data
, LOOKUP
, FOLLOW
,
1661 UIO_SYSSPACE
, profile_data_string
, p
);
1663 if (error
= vn_open(&nd_data
,
1664 O_CREAT
| O_EXCL
| FWRITE
, S_IRUSR
|S_IWUSR
)) {
1665 kmem_free(kernel_map
,
1666 (vm_offset_t
)names_buf
, 4 * PAGE_SIZE
);
1667 kmem_free(kernel_map
,
1668 (vm_offset_t
)profile_data_string
, PATH_MAX
);
1672 data_vp
= nd_data
.ni_vp
;
1673 VOP_UNLOCK(data_vp
, 0, p
);
1675 if (error
= vn_open(&nd_names
,
1676 O_CREAT
| O_EXCL
| FWRITE
, S_IRUSR
|S_IWUSR
)) {
1677 printf("prepare_profile_database: Can't create CacheNames %s\n",
1678 profile_data_string
);
1679 kmem_free(kernel_map
,
1680 (vm_offset_t
)names_buf
, 4 * PAGE_SIZE
);
1681 kmem_free(kernel_map
,
1682 (vm_offset_t
)profile_data_string
, PATH_MAX
);
1687 names_vp
= nd_names
.ni_vp
;
1690 /* Write Header for new names file */
1692 profile_header
= (struct profile_names_header
*)names_buf
;
1694 profile_header
->number_of_profiles
= 0;
1695 profile_header
->user_id
= user
;
1696 profile_header
->version
= 1;
1697 profile_header
->element_array
=
1698 sizeof(struct profile_names_header
);
1699 profile_header
->spare1
= 0;
1700 profile_header
->spare2
= 0;
1701 profile_header
->spare3
= 0;
1703 size
= sizeof(struct profile_names_header
);
1704 buf_ptr
= (vm_offset_t
)profile_header
;
1708 error
= vn_rdwr(UIO_WRITE
, names_vp
,
1709 (caddr_t
)buf_ptr
, size
, resid_off
,
1710 UIO_SYSSPACE
, IO_NODELOCKED
,
1711 p
->p_ucred
, &resid
, p
);
1713 printf("prepare_profile_database: Can't write header %s\n", profile_names_string
);
1714 kmem_free(kernel_map
,
1715 (vm_offset_t
)names_buf
, 4 * PAGE_SIZE
);
1716 kmem_free(kernel_map
,
1717 (vm_offset_t
)profile_data_string
,
1723 buf_ptr
+= size
-resid
;
1724 resid_off
+= size
-resid
;
1729 vattr
.va_uid
= user
;
1730 error
= VOP_SETATTR(names_vp
, &vattr
, p
->p_cred
->pc_ucred
, p
);
1732 printf("prepare_profile_database: "
1733 "Can't set user %s\n", profile_names_string
);
1737 error
= vn_lock(data_vp
, LK_EXCLUSIVE
| LK_RETRY
, p
);
1740 printf("prepare_profile_database: cannot lock data file %s\n",
1741 profile_data_string
);
1742 kmem_free(kernel_map
,
1743 (vm_offset_t
)profile_data_string
, PATH_MAX
);
1744 kmem_free(kernel_map
,
1745 (vm_offset_t
)names_buf
, 4 * PAGE_SIZE
);
1748 vattr
.va_uid
= user
;
1749 error
= VOP_SETATTR(data_vp
, &vattr
, p
->p_cred
->pc_ucred
, p
);
1751 printf("prepare_profile_database: "
1752 "Can't set user %s\n", profile_data_string
);
1756 kmem_free(kernel_map
,
1757 (vm_offset_t
)profile_data_string
, PATH_MAX
);
1758 kmem_free(kernel_map
,
1759 (vm_offset_t
)names_buf
, 4 * PAGE_SIZE
);