2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
23 * @APPLE_LICENSE_HEADER_END@
26 * Mach Operating System
27 * Copyright (c) 1987 Carnegie-Mellon University
28 * All rights reserved. The CMU software License Agreement specifies
29 * the terms and conditions for use and redistribution.
36 #include <meta_features.h>
38 #include <kern/task.h>
39 #include <kern/thread.h>
40 #include <kern/debug.h>
41 #include <kern/lock.h>
42 #include <mach/time_value.h>
43 #include <mach/vm_param.h>
44 #include <mach/vm_prot.h>
45 #include <mach/port.h>
47 #include <sys/param.h>
48 #include <sys/systm.h>
50 #include <sys/namei.h>
54 #include <sys/vnode.h>
56 #include <sys/mount.h>
57 #include <sys/trace.h>
58 #include <sys/kernel.h>
62 #include <kern/kalloc.h>
63 #include <vm/vm_map.h>
64 #include <vm/vm_kern.h>
66 #include <machine/spl.h>
68 #include <mach/shared_memory_server.h>
69 #include <vm/vm_shared_memory_server.h>
72 extern zone_t lsf_zone
;
74 useracc(addr
, len
, prot
)
79 return (vm_map_check_protection(
81 trunc_page_32((unsigned int)addr
), round_page_32((unsigned int)(addr
+len
)),
82 prot
== B_READ
? VM_PROT_READ
: VM_PROT_WRITE
));
90 kret
= vm_map_wire(current_map(), trunc_page_32((unsigned int)addr
),
91 round_page_32((unsigned int)(addr
+len
)),
92 VM_PROT_READ
| VM_PROT_WRITE
,FALSE
);
97 case KERN_INVALID_ADDRESS
:
100 case KERN_PROTECTION_FAILURE
:
107 vsunlock(addr
, len
, dirtied
)
116 vm_offset_t vaddr
, paddr
;
121 pmap
= get_task_pmap(current_task());
122 for (vaddr
= trunc_page((unsigned int)(addr
)); vaddr
< round_page((unsigned int)(addr
+len
));
123 vaddr
+= PAGE_SIZE
) {
124 paddr
= pmap_extract(pmap
, vaddr
);
125 pg
= PHYS_TO_VM_PAGE(paddr
);
126 vm_page_set_modified(pg
);
133 kret
= vm_map_unwire(current_map(), trunc_page_32((unsigned int)(addr
)),
134 round_page_32((unsigned int)(addr
+len
)), FALSE
);
138 case KERN_INVALID_ADDRESS
:
141 case KERN_PROTECTION_FAILURE
:
148 #if defined(sun) || BALANCE || defined(m88k)
149 #else /*defined(sun) || BALANCE || defined(m88k)*/
156 character
= (char)byte
;
157 return (copyout((void *)&(character
), addr
, sizeof(char)) == 0 ? 0 : -1);
166 character
= (char)byte
;
167 return (copyout((void *) &(character
), addr
, sizeof(char)) == 0 ? 0 : -1);
175 if (copyin(addr
, (void *) &byte
, sizeof(char)))
185 if (copyin(addr
, (void *) &(byte
), sizeof(char)))
194 return (copyout((void *) &word
, addr
, sizeof(int)) == 0 ? 0 : -1);
202 if (copyin(addr
, (void *) &word
, sizeof(int)))
207 /* suiword and fuiword are the same as suword and fuword, respectively */
213 return (copyout((void *) &word
, addr
, sizeof(int)) == 0 ? 0 : -1);
221 if (copyin(addr
, (void *) &word
, sizeof(int)))
225 #endif /* defined(sun) || BALANCE || defined(m88k) || defined(i386) */
241 extern task_t
port_name_to_task(mach_port_t t
);
243 kern_return_t err
= KERN_SUCCESS
;
244 boolean_t funnel_state
;
246 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
247 t1
= port_name_to_task(t
);
249 if (t1
== TASK_NULL
) {
253 p
= get_bsdtask_info(t1
);
263 (void) copyout((char *) &pid
, (char *) x
, sizeof(*x
));
264 thread_funnel_set(kernel_flock
, funnel_state
);
269 * Routine: task_for_pid
271 * Get the task port for another "process", named by its
272 * process ID on the same host as "target_task".
274 * Only permitted to privileged processes, or processes
275 * with the same user ID.
278 task_for_pid(target_tport
, pid
, t
)
279 mach_port_t target_tport
;
287 extern task_t
port_name_to_task(mach_port_t tp
);
290 boolean_t funnel_state
;
292 t1
= port_name_to_task(target_tport
);
293 if (t1
== TASK_NULL
) {
294 (void ) copyout((char *)&t1
, (char *)t
, sizeof(mach_port_t
));
295 return(KERN_FAILURE
);
298 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
301 p1
= get_bsdtask_info(t1
);
303 ((p
= pfind(pid
)) != (struct proc
*) 0)
304 && (p1
!= (struct proc
*) 0)
305 && (((p
->p_ucred
->cr_uid
== p1
->p_ucred
->cr_uid
) &&
306 ((p
->p_cred
->p_ruid
== p1
->p_cred
->p_ruid
)))
307 || !(suser(p1
->p_ucred
, &p1
->p_acflag
)))
308 && (p
->p_stat
!= SZOMB
)
310 if (p
->task
!= TASK_NULL
) {
311 if (!task_reference_try(p
->task
)) {
312 mutex_pause(); /* temp loss of funnel */
315 sright
= (void *)convert_task_to_port(p
->task
);
317 ipc_port_copyout_send(sright
,
318 get_task_ipcspace(current_task()));
320 tret
= MACH_PORT_NULL
;
321 (void ) copyout((char *)&tret
, (char *) t
, sizeof(mach_port_t
));
323 error
= KERN_SUCCESS
;
327 tret
= MACH_PORT_NULL
;
328 (void) copyout((char *) &tret
, (char *) t
, sizeof(mach_port_t
));
329 error
= KERN_FAILURE
;
331 thread_funnel_set(kernel_flock
, funnel_state
);
336 struct load_shared_file_args
{
342 sf_mapping_t
*mappings
;
351 struct load_shared_file_args
*uap
,
354 caddr_t mapped_file_addr
=uap
->mfa
;
355 u_long mapped_file_size
=uap
->mfs
;
356 caddr_t
*base_address
=uap
->ba
;
357 int map_cnt
=uap
->map_cnt
;
358 sf_mapping_t
*mappings
=uap
->mappings
;
359 char *filename
=uap
->filename
;
360 int *flags
=uap
->flags
;
361 struct vnode
*vp
= 0;
362 struct nameidata nd
, *ndp
;
368 memory_object_control_t file_control
;
369 sf_mapping_t
*map_list
;
374 int default_regions
= 0;
378 shared_region_mapping_t shared_region
;
379 struct shared_region_task_mappings task_mapping_info
;
380 shared_region_mapping_t next
;
385 /* Retrieve the base address */
386 if (error
= copyin(base_address
, &local_base
, sizeof (caddr_t
))) {
389 if (error
= copyin(flags
, &local_flags
, sizeof (int))) {
393 if(local_flags
& QUERY_IS_SYSTEM_REGION
) {
394 shared_region_mapping_t default_shared_region
;
395 vm_get_shared_region(current_task(), &shared_region
);
396 task_mapping_info
.self
= (vm_offset_t
)shared_region
;
398 shared_region_mapping_info(shared_region
,
399 &(task_mapping_info
.text_region
),
400 &(task_mapping_info
.text_size
),
401 &(task_mapping_info
.data_region
),
402 &(task_mapping_info
.data_size
),
403 &(task_mapping_info
.region_mappings
),
404 &(task_mapping_info
.client_base
),
405 &(task_mapping_info
.alternate_base
),
406 &(task_mapping_info
.alternate_next
),
407 &(task_mapping_info
.fs_base
),
408 &(task_mapping_info
.system
),
409 &(task_mapping_info
.flags
), &next
);
411 default_shared_region
=
412 lookup_default_shared_region(
414 task_mapping_info
.system
);
415 if (shared_region
== default_shared_region
) {
416 local_flags
= SYSTEM_REGION_BACKED
;
420 shared_region_mapping_dealloc(default_shared_region
);
422 error
= copyout(&local_flags
, flags
, sizeof (int));
425 caller_flags
= local_flags
;
426 kret
= kmem_alloc(kernel_map
, (vm_offset_t
*)&filename_str
,
427 (vm_size_t
)(MAXPATHLEN
));
428 if (kret
!= KERN_SUCCESS
) {
432 kret
= kmem_alloc(kernel_map
, (vm_offset_t
*)&map_list
,
433 (vm_size_t
)(map_cnt
*sizeof(sf_mapping_t
)));
434 if (kret
!= KERN_SUCCESS
) {
435 kmem_free(kernel_map
, (vm_offset_t
)filename_str
,
436 (vm_size_t
)(MAXPATHLEN
));
442 copyin(mappings
, map_list
, (map_cnt
*sizeof(sf_mapping_t
)))) {
443 goto lsf_bailout_free
;
446 if (error
= copyinstr(filename
,
447 filename_str
, MAXPATHLEN
, (size_t *)&dummy
)) {
448 goto lsf_bailout_free
;
452 * Get a vnode for the target file
454 NDINIT(ndp
, LOOKUP
, FOLLOW
| LOCKLEAF
, UIO_SYSSPACE
,
457 if ((error
= namei(ndp
))) {
458 goto lsf_bailout_free
;
463 if (vp
->v_type
!= VREG
) {
465 goto lsf_bailout_free_vput
;
468 UBCINFOCHECK("load_shared_file", vp
);
470 if (error
= VOP_GETATTR(vp
, &vattr
, p
->p_ucred
, p
)) {
471 goto lsf_bailout_free_vput
;
475 file_control
= ubc_getobject(vp
, UBC_HOLDOBJECT
);
476 if (file_control
== MEMORY_OBJECT_CONTROL_NULL
) {
478 goto lsf_bailout_free_vput
;
482 if(vattr
.va_size
!= mapped_file_size
) {
484 goto lsf_bailout_free_vput
;
487 if(p
->p_flag
& P_NOSHLIB
) {
488 p
->p_flag
= p
->p_flag
& ~P_NOSHLIB
;
491 /* load alternate regions if the caller has requested. */
492 /* Note: the new regions are "clean slates" */
493 if (local_flags
& NEW_LOCAL_SHARED_REGIONS
) {
494 error
= clone_system_shared_regions(FALSE
, ENV_DEFAULT_ROOT
);
496 goto lsf_bailout_free_vput
;
500 vm_get_shared_region(current_task(), &shared_region
);
501 task_mapping_info
.self
= (vm_offset_t
)shared_region
;
503 shared_region_mapping_info(shared_region
,
504 &(task_mapping_info
.text_region
),
505 &(task_mapping_info
.text_size
),
506 &(task_mapping_info
.data_region
),
507 &(task_mapping_info
.data_size
),
508 &(task_mapping_info
.region_mappings
),
509 &(task_mapping_info
.client_base
),
510 &(task_mapping_info
.alternate_base
),
511 &(task_mapping_info
.alternate_next
),
512 &(task_mapping_info
.fs_base
),
513 &(task_mapping_info
.system
),
514 &(task_mapping_info
.flags
), &next
);
517 shared_region_mapping_t default_shared_region
;
518 default_shared_region
=
519 lookup_default_shared_region(
521 task_mapping_info
.system
);
522 if(shared_region
== default_shared_region
) {
525 shared_region_mapping_dealloc(default_shared_region
);
527 /* If we are running on a removable file system we must not */
528 /* be in a set of shared regions or the file system will not */
530 if(((vp
->v_mount
!= rootvnode
->v_mount
) && (default_regions
))
531 && (lsf_mapping_pool_gauge() < 75)) {
532 /* We don't want to run out of shared memory */
533 /* map entries by starting too many private versions */
534 /* of the shared library structures */
536 if(p
->p_flag
& P_NOSHLIB
) {
537 error
= clone_system_shared_regions(FALSE
, ENV_DEFAULT_ROOT
);
539 error
= clone_system_shared_regions(TRUE
, ENV_DEFAULT_ROOT
);
542 goto lsf_bailout_free_vput
;
544 local_flags
= local_flags
& ~NEW_LOCAL_SHARED_REGIONS
;
545 vm_get_shared_region(current_task(), &shared_region
);
546 shared_region_mapping_info(shared_region
,
547 &(task_mapping_info
.text_region
),
548 &(task_mapping_info
.text_size
),
549 &(task_mapping_info
.data_region
),
550 &(task_mapping_info
.data_size
),
551 &(task_mapping_info
.region_mappings
),
552 &(task_mapping_info
.client_base
),
553 &(task_mapping_info
.alternate_base
),
554 &(task_mapping_info
.alternate_next
),
555 &(task_mapping_info
.fs_base
),
556 &(task_mapping_info
.system
),
557 &(task_mapping_info
.flags
), &next
);
560 /* This is a work-around to allow executables which have been */
561 /* built without knowledge of the proper shared segment to */
562 /* load. This code has been architected as a shared region */
563 /* handler, the knowledge of where the regions are loaded is */
564 /* problematic for the extension of shared regions as it will */
565 /* not be easy to know what region an item should go into. */
566 /* The code below however will get around a short term problem */
567 /* with executables which believe they are loading at zero. */
570 if (((unsigned int)local_base
&
571 (~(task_mapping_info
.text_size
- 1))) !=
572 task_mapping_info
.client_base
) {
573 if(local_flags
& ALTERNATE_LOAD_SITE
) {
574 local_base
= (caddr_t
)(
575 (unsigned int)local_base
&
576 (task_mapping_info
.text_size
- 1));
577 local_base
= (caddr_t
)((unsigned int)local_base
578 | task_mapping_info
.client_base
);
581 goto lsf_bailout_free_vput
;
587 if((kr
= copyin_shared_file((vm_offset_t
)mapped_file_addr
,
589 (vm_offset_t
*)&local_base
,
590 map_cnt
, map_list
, file_control
,
591 &task_mapping_info
, &local_flags
))) {
596 case KERN_INVALID_ARGUMENT
:
599 case KERN_INVALID_ADDRESS
:
602 case KERN_PROTECTION_FAILURE
:
603 /* save EAUTH for authentication in this */
613 if((caller_flags
& ALTERNATE_LOAD_SITE
) && systemLogDiags
) {
614 printf("load_shared_file: Failed to load shared file! error: 0x%x, Base_address: 0x%x, number of mappings: %d, file_control 0x%x\n", error
, local_base
, map_cnt
, file_control
);
615 for(i
=0; i
<map_cnt
; i
++) {
616 printf("load_shared_file: Mapping%d, mapping_offset: 0x%x, size: 0x%x, file_offset: 0x%x, protection: 0x%x\n"
617 , i
, map_list
[i
].mapping_offset
,
619 map_list
[i
].file_offset
,
620 map_list
[i
].protection
);
625 local_flags
|= SYSTEM_REGION_BACKED
;
626 if(!(error
= copyout(&local_flags
, flags
, sizeof (int)))) {
627 error
= copyout(&local_base
,
628 base_address
, sizeof (caddr_t
));
632 lsf_bailout_free_vput
:
636 kmem_free(kernel_map
, (vm_offset_t
)filename_str
,
637 (vm_size_t
)(MAXPATHLEN
));
638 kmem_free(kernel_map
, (vm_offset_t
)map_list
,
639 (vm_size_t
)(map_cnt
*sizeof(sf_mapping_t
)));
645 struct reset_shared_file_args
{
648 sf_mapping_t
*mappings
;
654 struct reset_shared_file_args
*uap
,
657 caddr_t
*base_address
=uap
->ba
;
658 int map_cnt
=uap
->map_cnt
;
659 sf_mapping_t
*mappings
=uap
->mappings
;
663 sf_mapping_t
*map_list
;
665 vm_offset_t map_address
;
669 /* Retrieve the base address */
670 if (error
= copyin(base_address
, &local_base
, sizeof (caddr_t
))) {
674 if (((unsigned int)local_base
& GLOBAL_SHARED_SEGMENT_MASK
)
675 != GLOBAL_SHARED_TEXT_SEGMENT
) {
680 kret
= kmem_alloc(kernel_map
, (vm_offset_t
*)&map_list
,
681 (vm_size_t
)(map_cnt
*sizeof(sf_mapping_t
)));
682 if (kret
!= KERN_SUCCESS
) {
688 copyin(mappings
, map_list
, (map_cnt
*sizeof(sf_mapping_t
)))) {
690 kmem_free(kernel_map
, (vm_offset_t
)map_list
,
691 (vm_size_t
)(map_cnt
*sizeof(sf_mapping_t
)));
694 for (i
= 0; i
<map_cnt
; i
++) {
695 if((map_list
[i
].mapping_offset
696 & GLOBAL_SHARED_SEGMENT_MASK
) == 0x10000000) {
697 map_address
= (vm_offset_t
)
698 (local_base
+ map_list
[i
].mapping_offset
);
699 vm_deallocate(current_map(),
702 vm_map(current_map(), &map_address
,
703 map_list
[i
].size
, 0, SHARED_LIB_ALIAS
,
704 shared_data_region_handle
,
705 ((unsigned int)local_base
706 & SHARED_DATA_REGION_MASK
) +
707 (map_list
[i
].mapping_offset
708 & SHARED_DATA_REGION_MASK
),
710 VM_PROT_READ
, VM_INHERIT_SHARE
);
714 kmem_free(kernel_map
, (vm_offset_t
)map_list
,
715 (vm_size_t
)(map_cnt
*sizeof(sf_mapping_t
)));
721 struct new_system_shared_regions_args
{
726 new_system_shared_regions(
728 struct new_system_shared_regions_args
*uap
,
731 shared_region_mapping_t regions
;
732 shared_region_mapping_t new_regions
;
739 /* clear all of our existing defaults */
740 remove_all_shared_regions();
749 clone_system_shared_regions(shared_regions_active
, base_vnode
)
751 shared_region_mapping_t new_shared_region
;
752 shared_region_mapping_t next
;
753 shared_region_mapping_t old_shared_region
;
754 struct shared_region_task_mappings old_info
;
755 struct shared_region_task_mappings new_info
;
759 vm_get_shared_region(current_task(), &old_shared_region
);
760 old_info
.self
= (vm_offset_t
)old_shared_region
;
761 shared_region_mapping_info(old_shared_region
,
762 &(old_info
.text_region
),
763 &(old_info
.text_size
),
764 &(old_info
.data_region
),
765 &(old_info
.data_size
),
766 &(old_info
.region_mappings
),
767 &(old_info
.client_base
),
768 &(old_info
.alternate_base
),
769 &(old_info
.alternate_next
),
772 &(old_info
.flags
), &next
);
773 if ((shared_regions_active
) ||
774 (base_vnode
== ENV_DEFAULT_ROOT
)) {
775 if (shared_file_create_system_region(&new_shared_region
))
779 lookup_default_shared_region(
780 base_vnode
, old_info
.system
);
781 if(new_shared_region
== NULL
) {
782 shared_file_boot_time_init(
783 base_vnode
, old_info
.system
);
784 vm_get_shared_region(current_task(), &new_shared_region
);
786 vm_set_shared_region(current_task(), new_shared_region
);
788 if(old_shared_region
)
789 shared_region_mapping_dealloc(old_shared_region
);
791 new_info
.self
= (vm_offset_t
)new_shared_region
;
792 shared_region_mapping_info(new_shared_region
,
793 &(new_info
.text_region
),
794 &(new_info
.text_size
),
795 &(new_info
.data_region
),
796 &(new_info
.data_size
),
797 &(new_info
.region_mappings
),
798 &(new_info
.client_base
),
799 &(new_info
.alternate_base
),
800 &(new_info
.alternate_next
),
803 &(new_info
.flags
), &next
);
804 if(shared_regions_active
) {
805 if(vm_region_clone(old_info
.text_region
, new_info
.text_region
)) {
806 panic("clone_system_shared_regions: shared region mis-alignment 1");
807 shared_region_mapping_dealloc(new_shared_region
);
810 if (vm_region_clone(old_info
.data_region
, new_info
.data_region
)) {
811 panic("clone_system_shared_regions: shared region mis-alignment 2");
812 shared_region_mapping_dealloc(new_shared_region
);
815 shared_region_object_chain_attach(
816 new_shared_region
, old_shared_region
);
818 if (vm_map_region_replace(current_map(), old_info
.text_region
,
819 new_info
.text_region
, old_info
.client_base
,
820 old_info
.client_base
+old_info
.text_size
)) {
821 panic("clone_system_shared_regions: shared region mis-alignment 3");
822 shared_region_mapping_dealloc(new_shared_region
);
825 if(vm_map_region_replace(current_map(), old_info
.data_region
,
826 new_info
.data_region
,
827 old_info
.client_base
+ old_info
.text_size
,
829 + old_info
.text_size
+ old_info
.data_size
)) {
830 panic("clone_system_shared_regions: shared region mis-alignment 4");
831 shared_region_mapping_dealloc(new_shared_region
);
834 vm_set_shared_region(current_task(), new_shared_region
);
836 /* consume the reference which wasn't accounted for in object */
838 if(!shared_regions_active
)
839 shared_region_mapping_dealloc(old_shared_region
);
845 extern vm_map_t bsd_pageable_map
;
847 /* header for the profile name file. The profiled app info is held */
848 /* in the data file and pointed to by elements in the name file */
850 struct profile_names_header
{
851 unsigned int number_of_profiles
;
852 unsigned int user_id
;
853 unsigned int version
;
860 struct profile_element
{
863 unsigned int mod_date
;
868 struct global_profile
{
869 struct vnode
*names_vp
;
870 struct vnode
*data_vp
;
877 struct global_profile_cache
{
880 struct global_profile profiles
[3];
883 struct global_profile_cache global_user_profile_cache
=
884 {3, 0, NULL
, NULL
, NULL
, 0, 0, 0,
885 NULL
, NULL
, NULL
, 0, 0, 0,
886 NULL
, NULL
, NULL
, 0, 0, 0 };
888 /* BSD_OPEN_PAGE_CACHE_FILES: */
889 /* Caller provides a user id. This id was used in */
890 /* prepare_profile_database to create two unique absolute */
891 /* file paths to the associated profile files. These files */
892 /* are either opened or bsd_open_page_cache_files returns an */
893 /* error. The header of the names file is then consulted. */
894 /* The header and the vnodes for the names and data files are */
898 bsd_open_page_cache_files(
900 struct global_profile
**profile
)
902 char *cache_path
= "/var/vm/app_profile/";
910 struct vnode
*names_vp
;
911 struct vnode
*data_vp
;
912 vm_offset_t names_buf
;
915 int profile_names_length
;
916 int profile_data_length
;
917 char *profile_data_string
;
918 char *profile_names_string
;
923 struct profile_names_header
*profile_header
;
926 struct nameidata nd_names
;
927 struct nameidata nd_data
;
935 for(i
= 0; i
<global_user_profile_cache
.max_ele
; i
++) {
936 if((global_user_profile_cache
.profiles
[i
].user
== user
)
937 && (global_user_profile_cache
.profiles
[i
].data_vp
939 *profile
= &global_user_profile_cache
.profiles
[i
];
940 /* already in cache, we're done */
941 if ((*profile
)->busy
) {
943 * drop funnel and wait
945 (void)tsleep((void *)
947 PRIBIO
, "app_profile", 0);
950 (*profile
)->busy
= 1;
951 (*profile
)->age
= global_user_profile_cache
.age
;
952 global_user_profile_cache
.age
+=1;
957 lru
= global_user_profile_cache
.age
;
959 for(i
= 0; i
<global_user_profile_cache
.max_ele
; i
++) {
960 /* Skip entry if it is in the process of being reused */
961 if(global_user_profile_cache
.profiles
[i
].data_vp
==
962 (struct vnode
*)0xFFFFFFFF)
964 /* Otherwise grab the first empty entry */
965 if(global_user_profile_cache
.profiles
[i
].data_vp
== NULL
) {
966 *profile
= &global_user_profile_cache
.profiles
[i
];
967 (*profile
)->age
= global_user_profile_cache
.age
;
970 /* Otherwise grab the oldest entry */
971 if(global_user_profile_cache
.profiles
[i
].age
< lru
) {
972 lru
= global_user_profile_cache
.profiles
[i
].age
;
973 *profile
= &global_user_profile_cache
.profiles
[i
];
978 if (*profile
== NULL
) {
980 * No entries are available; this can only happen if all
981 * of them are currently in the process of being reused;
982 * if this happens, we sleep on the address of the first
983 * element, and restart. This is less than ideal, but we
984 * know it will work because we know that there will be a
985 * wakeup on any entry currently in the process of being
988 * XXX Reccomend a two handed clock and more than 3 total
989 * XXX cache entries at some point in the future.
992 * drop funnel and wait
994 (void)tsleep((void *)
995 &global_user_profile_cache
.profiles
[0],
996 PRIBIO
, "app_profile", 0);
1001 * If it's currently busy, we've picked the one at the end of the
1002 * LRU list, but it's currently being actively used. We sleep on
1003 * its address and restart.
1005 if ((*profile
)->busy
) {
1007 * drop funnel and wait
1009 (void)tsleep((void *)
1011 PRIBIO
, "app_profile", 0);
1014 (*profile
)->busy
= 1;
1015 (*profile
)->user
= user
;
1018 * put dummy value in for now to get competing request to wait
1019 * above until we are finished
1021 * Save the data_vp before setting it, so we can set it before
1022 * we kmem_free() or vrele(). If we don't do this, then we
1023 * have a potential funnel race condition we have to deal with.
1025 data_vp
= (*profile
)->data_vp
;
1026 (*profile
)->data_vp
= (struct vnode
*)0xFFFFFFFF;
1029 * Age the cache here in all cases; this guarantees that we won't
1030 * be reusing only one entry over and over, once the system reaches
1033 global_user_profile_cache
.age
+=1;
1035 if(data_vp
!= NULL
) {
1036 kmem_free(kernel_map
,
1037 (*profile
)->buf_ptr
, 4 * PAGE_SIZE
);
1038 if ((*profile
)->names_vp
) {
1039 vrele((*profile
)->names_vp
);
1040 (*profile
)->names_vp
= NULL
;
1045 /* Try to open the appropriate users profile files */
1046 /* If neither file is present, try to create them */
1047 /* If one file is present and the other not, fail. */
1048 /* If the files do exist, check them for the app_file */
1049 /* requested and read it in if present */
1051 ret
= kmem_alloc(kernel_map
,
1052 (vm_offset_t
*)&profile_data_string
, PATH_MAX
);
1055 (*profile
)->data_vp
= NULL
;
1056 (*profile
)->busy
= 0;
1061 /* Split the buffer in half since we know the size of */
1062 /* our file path and our allocation is adequate for */
1063 /* both file path names */
1064 profile_names_string
= profile_data_string
+ (PATH_MAX
/2);
1067 strcpy(profile_data_string
, cache_path
);
1068 strcpy(profile_names_string
, cache_path
);
1069 profile_names_length
= profile_data_length
1070 = strlen(profile_data_string
);
1071 substring
= profile_data_string
+ profile_data_length
;
1072 sprintf(substring
, "%x_data", user
);
1073 substring
= profile_names_string
+ profile_names_length
;
1074 sprintf(substring
, "%x_names", user
);
1076 /* We now have the absolute file names */
1078 ret
= kmem_alloc(kernel_map
,
1079 (vm_offset_t
*)&names_buf
, 4 * PAGE_SIZE
);
1081 kmem_free(kernel_map
,
1082 (vm_offset_t
)profile_data_string
, PATH_MAX
);
1083 (*profile
)->data_vp
= NULL
;
1084 (*profile
)->busy
= 0;
1089 NDINIT(&nd_names
, LOOKUP
, FOLLOW
| LOCKLEAF
,
1090 UIO_SYSSPACE
, profile_names_string
, p
);
1091 NDINIT(&nd_data
, LOOKUP
, FOLLOW
| LOCKLEAF
,
1092 UIO_SYSSPACE
, profile_data_string
, p
);
1093 if (error
= vn_open(&nd_data
, FREAD
| FWRITE
, 0)) {
1095 printf("bsd_open_page_cache_files: CacheData file not found %s\n",
1096 profile_data_string
);
1098 kmem_free(kernel_map
,
1099 (vm_offset_t
)names_buf
, 4 * PAGE_SIZE
);
1100 kmem_free(kernel_map
,
1101 (vm_offset_t
)profile_data_string
, PATH_MAX
);
1102 (*profile
)->data_vp
= NULL
;
1103 (*profile
)->busy
= 0;
1108 data_vp
= nd_data
.ni_vp
;
1109 VOP_UNLOCK(data_vp
, 0, p
);
1111 if (error
= vn_open(&nd_names
, FREAD
| FWRITE
, 0)) {
1112 printf("bsd_open_page_cache_files: NamesData file not found %s\n",
1113 profile_data_string
);
1114 kmem_free(kernel_map
,
1115 (vm_offset_t
)names_buf
, 4 * PAGE_SIZE
);
1116 kmem_free(kernel_map
,
1117 (vm_offset_t
)profile_data_string
, PATH_MAX
);
1119 (*profile
)->data_vp
= NULL
;
1120 (*profile
)->busy
= 0;
1124 names_vp
= nd_names
.ni_vp
;
1126 if(error
= VOP_GETATTR(names_vp
, &vattr
, p
->p_ucred
, p
)) {
1127 printf("bsd_open_page_cache_files: Can't stat name file %s\n", profile_names_string
);
1128 kmem_free(kernel_map
,
1129 (vm_offset_t
)profile_data_string
, PATH_MAX
);
1130 kmem_free(kernel_map
,
1131 (vm_offset_t
)names_buf
, 4 * PAGE_SIZE
);
1134 (*profile
)->data_vp
= NULL
;
1135 (*profile
)->busy
= 0;
1140 size
= vattr
.va_size
;
1141 if(size
> 4 * PAGE_SIZE
)
1142 size
= 4 * PAGE_SIZE
;
1143 buf_ptr
= names_buf
;
1147 error
= vn_rdwr(UIO_READ
, names_vp
, (caddr_t
)buf_ptr
,
1149 UIO_SYSSPACE
, IO_NODELOCKED
, p
->p_ucred
, &resid
, p
);
1150 if((error
) || (size
== resid
)) {
1154 kmem_free(kernel_map
,
1155 (vm_offset_t
)profile_data_string
, PATH_MAX
);
1156 kmem_free(kernel_map
,
1157 (vm_offset_t
)names_buf
, 4 * PAGE_SIZE
);
1160 (*profile
)->data_vp
= NULL
;
1161 (*profile
)->busy
= 0;
1165 buf_ptr
+= size
-resid
;
1166 resid_off
+= size
-resid
;
1170 VOP_UNLOCK(names_vp
, 0, p
);
1171 kmem_free(kernel_map
, (vm_offset_t
)profile_data_string
, PATH_MAX
);
1172 (*profile
)->names_vp
= names_vp
;
1173 (*profile
)->data_vp
= data_vp
;
1174 (*profile
)->buf_ptr
= names_buf
;
1180 bsd_close_page_cache_files(
1181 struct global_profile
*profile
)
1188 bsd_read_page_cache_file(
1193 struct vnode
*app_vp
,
1194 vm_offset_t
*buffer
,
1195 vm_offset_t
*buf_size
)
1198 boolean_t funnel_state
;
1206 unsigned int profile_size
;
1208 vm_offset_t names_buf
;
1213 struct vnode
*names_vp
;
1214 struct vnode
*data_vp
;
1218 struct global_profile
*uid_files
;
1220 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
1222 /* Try to open the appropriate users profile files */
1223 /* If neither file is present, try to create them */
1224 /* If one file is present and the other not, fail. */
1225 /* If the files do exist, check them for the app_file */
1226 /* requested and read it in if present */
1229 error
= bsd_open_page_cache_files(user
, &uid_files
);
1231 thread_funnel_set(kernel_flock
, funnel_state
);
1237 names_vp
= uid_files
->names_vp
;
1238 data_vp
= uid_files
->data_vp
;
1239 names_buf
= uid_files
->buf_ptr
;
1243 * Get locks on both files, get the vnode with the lowest address first
1246 if((unsigned int)names_vp
< (unsigned int)data_vp
) {
1253 error
= vn_lock(vp1
, LK_EXCLUSIVE
| LK_RETRY
, p
);
1255 printf("bsd_read_page_cache_file: Can't lock profile names %x\n", user
);
1256 bsd_close_page_cache_files(uid_files
);
1257 thread_funnel_set(kernel_flock
, funnel_state
);
1260 error
= vn_lock(vp2
, LK_EXCLUSIVE
| LK_RETRY
, p
);
1262 printf("bsd_read_page_cache_file: Can't lock profile data %x\n", user
);
1263 VOP_UNLOCK(vp1
, 0, p
);
1264 bsd_close_page_cache_files(uid_files
);
1265 thread_funnel_set(kernel_flock
, funnel_state
);
1269 if(error
= VOP_GETATTR(app_vp
, &vattr
, p
->p_ucred
, p
)) {
1270 VOP_UNLOCK(names_vp
, 0, p
);
1271 VOP_UNLOCK(data_vp
, 0, p
);
1272 printf("bsd_read_cache_file: Can't stat app file %s\n", app_name
);
1273 bsd_close_page_cache_files(uid_files
);
1274 thread_funnel_set(kernel_flock
, funnel_state
);
1278 *fid
= vattr
.va_fileid
;
1279 *mod
= vattr
.va_mtime
.tv_sec
;
1282 if (bsd_search_page_cache_data_base(names_vp
, names_buf
, app_name
,
1283 (unsigned int) vattr
.va_mtime
.tv_sec
,
1284 vattr
.va_fileid
, &profile
, &profile_size
) == 0) {
1285 /* profile is an offset in the profile data base */
1286 /* It is zero if no profile data was found */
1288 if(profile_size
== 0) {
1291 VOP_UNLOCK(names_vp
, 0, p
);
1292 VOP_UNLOCK(data_vp
, 0, p
);
1293 bsd_close_page_cache_files(uid_files
);
1294 thread_funnel_set(kernel_flock
, funnel_state
);
1297 ret
= (vm_offset_t
)(kmem_alloc(kernel_map
, buffer
, profile_size
));
1299 VOP_UNLOCK(names_vp
, 0, p
);
1300 VOP_UNLOCK(data_vp
, 0, p
);
1301 bsd_close_page_cache_files(uid_files
);
1302 thread_funnel_set(kernel_flock
, funnel_state
);
1305 *buf_size
= profile_size
;
1306 while(profile_size
) {
1307 error
= vn_rdwr(UIO_READ
, data_vp
,
1308 (caddr_t
) *buffer
, profile_size
,
1309 profile
, UIO_SYSSPACE
, IO_NODELOCKED
,
1310 p
->p_ucred
, &resid
, p
);
1311 if((error
) || (profile_size
== resid
)) {
1312 VOP_UNLOCK(names_vp
, 0, p
);
1313 VOP_UNLOCK(data_vp
, 0, p
);
1314 bsd_close_page_cache_files(uid_files
);
1315 kmem_free(kernel_map
, (vm_offset_t
)*buffer
, profile_size
);
1316 thread_funnel_set(kernel_flock
, funnel_state
);
1319 profile
+= profile_size
- resid
;
1320 profile_size
= resid
;
1322 VOP_UNLOCK(names_vp
, 0, p
);
1323 VOP_UNLOCK(data_vp
, 0, p
);
1324 bsd_close_page_cache_files(uid_files
);
1325 thread_funnel_set(kernel_flock
, funnel_state
);
1328 VOP_UNLOCK(names_vp
, 0, p
);
1329 VOP_UNLOCK(data_vp
, 0, p
);
1330 bsd_close_page_cache_files(uid_files
);
1331 thread_funnel_set(kernel_flock
, funnel_state
);
1338 bsd_search_page_cache_data_base(
1340 struct profile_names_header
*database
,
1342 unsigned int mod_date
,
1345 unsigned int *profile_size
)
1351 struct profile_element
*element
;
1352 unsigned int ele_total
;
1353 unsigned int extended_list
= 0;
1358 vm_offset_t local_buf
= NULL
;
1365 if(((vm_offset_t
)database
->element_array
) !=
1366 sizeof(struct profile_names_header
)) {
1369 element
= (struct profile_element
*)(
1370 (vm_offset_t
)database
->element_array
+
1371 (vm_offset_t
)database
);
1373 ele_total
= database
->number_of_profiles
;
1378 /* note: code assumes header + n*ele comes out on a page boundary */
1379 if(((local_buf
== 0) && (sizeof(struct profile_names_header
) +
1380 (ele_total
* sizeof(struct profile_element
)))
1381 > (PAGE_SIZE
* 4)) ||
1382 ((local_buf
!= 0) &&
1383 (ele_total
* sizeof(struct profile_element
))
1384 > (PAGE_SIZE
* 4))) {
1385 extended_list
= ele_total
;
1386 if(element
== (struct profile_element
*)
1387 ((vm_offset_t
)database
->element_array
+
1388 (vm_offset_t
)database
)) {
1389 ele_total
= ((PAGE_SIZE
* 4)/sizeof(struct profile_element
)) - 1;
1391 ele_total
= (PAGE_SIZE
* 4)/sizeof(struct profile_element
);
1393 extended_list
-= ele_total
;
1395 for (i
=0; i
<ele_total
; i
++) {
1396 if((mod_date
== element
[i
].mod_date
)
1397 && (inode
== element
[i
].inode
)) {
1398 if(strncmp(element
[i
].name
, app_name
, 12) == 0) {
1399 *profile
= element
[i
].addr
;
1400 *profile_size
= element
[i
].size
;
1401 if(local_buf
!= NULL
) {
1402 kmem_free(kernel_map
,
1403 (vm_offset_t
)local_buf
, 4 * PAGE_SIZE
);
1409 if(extended_list
== 0)
1411 if(local_buf
== NULL
) {
1412 ret
= kmem_alloc(kernel_map
,
1413 (vm_offset_t
*)&local_buf
, 4 * PAGE_SIZE
);
1414 if(ret
!= KERN_SUCCESS
) {
1418 element
= (struct profile_element
*)local_buf
;
1419 ele_total
= extended_list
;
1421 file_off
+= 4 * PAGE_SIZE
;
1422 if((ele_total
* sizeof(struct profile_element
)) >
1424 size
= PAGE_SIZE
* 4;
1426 size
= ele_total
* sizeof(struct profile_element
);
1430 error
= vn_rdwr(UIO_READ
, vp
,
1431 CAST_DOWN(caddr_t
, (local_buf
+ resid_off
)),
1432 size
, file_off
+ resid_off
, UIO_SYSSPACE
,
1433 IO_NODELOCKED
, p
->p_ucred
, &resid
, p
);
1434 if((error
) || (size
== resid
)) {
1435 if(local_buf
!= NULL
) {
1436 kmem_free(kernel_map
,
1437 (vm_offset_t
)local_buf
,
1442 resid_off
+= size
-resid
;
1446 if(local_buf
!= NULL
) {
1447 kmem_free(kernel_map
,
1448 (vm_offset_t
)local_buf
, 4 * PAGE_SIZE
);
1454 bsd_write_page_cache_file(
1463 struct nameidata nd
;
1464 struct vnode
*vp
= 0;
1468 boolean_t funnel_state
;
1470 struct vattr data_vattr
;
1473 unsigned int profile_size
;
1475 vm_offset_t names_buf
;
1476 struct vnode
*names_vp
;
1477 struct vnode
*data_vp
;
1481 struct profile_names_header
*profile_header
;
1484 struct global_profile
*uid_files
;
1487 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
1491 error
= bsd_open_page_cache_files(user
, &uid_files
);
1493 thread_funnel_set(kernel_flock
, funnel_state
);
1499 names_vp
= uid_files
->names_vp
;
1500 data_vp
= uid_files
->data_vp
;
1501 names_buf
= uid_files
->buf_ptr
;
1504 * Get locks on both files, get the vnode with the lowest address first
1507 if((unsigned int)names_vp
< (unsigned int)data_vp
) {
1515 error
= vn_lock(vp1
, LK_EXCLUSIVE
| LK_RETRY
, p
);
1517 printf("bsd_write_page_cache_file: Can't lock profile names %x\n", user
);
1518 bsd_close_page_cache_files(uid_files
);
1519 thread_funnel_set(kernel_flock
, funnel_state
);
1522 error
= vn_lock(vp2
, LK_EXCLUSIVE
| LK_RETRY
, p
);
1524 printf("bsd_write_page_cache_file: Can't lock profile data %x\n", user
);
1525 VOP_UNLOCK(vp1
, 0, p
);
1526 bsd_close_page_cache_files(uid_files
);
1527 thread_funnel_set(kernel_flock
, funnel_state
);
1531 /* Stat data file for size */
1533 if(error
= VOP_GETATTR(data_vp
, &data_vattr
, p
->p_ucred
, p
)) {
1534 VOP_UNLOCK(names_vp
, 0, p
);
1535 VOP_UNLOCK(data_vp
, 0, p
);
1536 printf("bsd_write_page_cache_file: Can't stat profile data %s\n", file_name
);
1537 bsd_close_page_cache_files(uid_files
);
1538 thread_funnel_set(kernel_flock
, funnel_state
);
1542 if (bsd_search_page_cache_data_base(names_vp
,
1543 (struct profile_names_header
*)names_buf
,
1544 file_name
, (unsigned int) mod
,
1545 fid
, &profile
, &profile_size
) == 0) {
1546 /* profile is an offset in the profile data base */
1547 /* It is zero if no profile data was found */
1549 if(profile_size
== 0) {
1550 unsigned int header_size
;
1551 vm_offset_t buf_ptr
;
1553 /* Our Write case */
1555 /* read header for last entry */
1557 (struct profile_names_header
*)names_buf
;
1558 name_offset
= sizeof(struct profile_names_header
) +
1559 (sizeof(struct profile_element
)
1560 * profile_header
->number_of_profiles
);
1561 profile_header
->number_of_profiles
+= 1;
1563 if(name_offset
< PAGE_SIZE
* 4) {
1564 struct profile_element
*name
;
1565 /* write new entry */
1566 name
= (struct profile_element
*)
1567 (names_buf
+ (vm_offset_t
)name_offset
);
1568 name
->addr
= data_vattr
.va_size
;
1570 name
->mod_date
= mod
;
1572 strncpy (name
->name
, file_name
, 12);
1574 unsigned int ele_size
;
1575 struct profile_element name
;
1576 /* write new entry */
1577 name
.addr
= data_vattr
.va_size
;
1579 name
.mod_date
= mod
;
1581 strncpy (name
.name
, file_name
, 12);
1582 /* write element out separately */
1583 ele_size
= sizeof(struct profile_element
);
1584 buf_ptr
= (vm_offset_t
)&name
;
1585 resid_off
= name_offset
;
1588 error
= vn_rdwr(UIO_WRITE
, names_vp
,
1590 ele_size
, resid_off
,
1591 UIO_SYSSPACE
, IO_NODELOCKED
,
1592 p
->p_ucred
, &resid
, p
);
1594 printf("bsd_write_page_cache_file: Can't write name_element %x\n", user
);
1595 VOP_UNLOCK(names_vp
, 0, p
);
1596 VOP_UNLOCK(data_vp
, 0, p
);
1597 bsd_close_page_cache_files(
1604 buf_ptr
+= (vm_offset_t
)
1606 resid_off
+= ele_size
-resid
;
1611 if(name_offset
< PAGE_SIZE
* 4) {
1612 header_size
= name_offset
+
1613 sizeof(struct profile_element
);
1617 sizeof(struct profile_names_header
);
1619 buf_ptr
= (vm_offset_t
)profile_header
;
1622 /* write names file header */
1623 while(header_size
) {
1624 error
= vn_rdwr(UIO_WRITE
, names_vp
,
1626 header_size
, resid_off
,
1627 UIO_SYSSPACE
, IO_NODELOCKED
,
1628 p
->p_ucred
, &resid
, p
);
1630 VOP_UNLOCK(names_vp
, 0, p
);
1631 VOP_UNLOCK(data_vp
, 0, p
);
1632 printf("bsd_write_page_cache_file: Can't write header %x\n", user
);
1633 bsd_close_page_cache_files(
1636 kernel_flock
, funnel_state
);
1639 buf_ptr
+= (vm_offset_t
)header_size
-resid
;
1640 resid_off
+= header_size
-resid
;
1641 header_size
= resid
;
1643 /* write profile to data file */
1644 resid_off
= data_vattr
.va_size
;
1646 error
= vn_rdwr(UIO_WRITE
, data_vp
,
1647 (caddr_t
)buffer
, size
, resid_off
,
1648 UIO_SYSSPACE
, IO_NODELOCKED
,
1649 p
->p_ucred
, &resid
, p
);
1651 VOP_UNLOCK(names_vp
, 0, p
);
1652 VOP_UNLOCK(data_vp
, 0, p
);
1653 printf("bsd_write_page_cache_file: Can't write header %x\n", user
);
1654 bsd_close_page_cache_files(
1657 kernel_flock
, funnel_state
);
1660 buffer
+= size
-resid
;
1661 resid_off
+= size
-resid
;
1664 VOP_UNLOCK(names_vp
, 0, p
);
1665 VOP_UNLOCK(data_vp
, 0, p
);
1666 bsd_close_page_cache_files(uid_files
);
1667 thread_funnel_set(kernel_flock
, funnel_state
);
1670 /* Someone else wrote a twin profile before us */
1671 VOP_UNLOCK(names_vp
, 0, p
);
1672 VOP_UNLOCK(data_vp
, 0, p
);
1673 bsd_close_page_cache_files(uid_files
);
1674 thread_funnel_set(kernel_flock
, funnel_state
);
1677 VOP_UNLOCK(names_vp
, 0, p
);
1678 VOP_UNLOCK(data_vp
, 0, p
);
1679 bsd_close_page_cache_files(uid_files
);
1680 thread_funnel_set(kernel_flock
, funnel_state
);
1687 prepare_profile_database(int user
)
1689 char *cache_path
= "/var/vm/app_profile/";
1697 struct vnode
*names_vp
;
1698 struct vnode
*data_vp
;
1699 vm_offset_t names_buf
;
1700 vm_offset_t buf_ptr
;
1702 int profile_names_length
;
1703 int profile_data_length
;
1704 char *profile_data_string
;
1705 char *profile_names_string
;
1710 struct profile_names_header
*profile_header
;
1713 struct nameidata nd_names
;
1714 struct nameidata nd_data
;
1720 ret
= kmem_alloc(kernel_map
,
1721 (vm_offset_t
*)&profile_data_string
, PATH_MAX
);
1727 /* Split the buffer in half since we know the size of */
1728 /* our file path and our allocation is adequate for */
1729 /* both file path names */
1730 profile_names_string
= profile_data_string
+ (PATH_MAX
/2);
1733 strcpy(profile_data_string
, cache_path
);
1734 strcpy(profile_names_string
, cache_path
);
1735 profile_names_length
= profile_data_length
1736 = strlen(profile_data_string
);
1737 substring
= profile_data_string
+ profile_data_length
;
1738 sprintf(substring
, "%x_data", user
);
1739 substring
= profile_names_string
+ profile_names_length
;
1740 sprintf(substring
, "%x_names", user
);
1742 /* We now have the absolute file names */
1744 ret
= kmem_alloc(kernel_map
,
1745 (vm_offset_t
*)&names_buf
, 4 * PAGE_SIZE
);
1747 kmem_free(kernel_map
,
1748 (vm_offset_t
)profile_data_string
, PATH_MAX
);
1752 NDINIT(&nd_names
, LOOKUP
, FOLLOW
,
1753 UIO_SYSSPACE
, profile_names_string
, p
);
1754 NDINIT(&nd_data
, LOOKUP
, FOLLOW
,
1755 UIO_SYSSPACE
, profile_data_string
, p
);
1757 if (error
= vn_open(&nd_data
,
1758 O_CREAT
| O_EXCL
| FWRITE
, S_IRUSR
|S_IWUSR
)) {
1759 kmem_free(kernel_map
,
1760 (vm_offset_t
)names_buf
, 4 * PAGE_SIZE
);
1761 kmem_free(kernel_map
,
1762 (vm_offset_t
)profile_data_string
, PATH_MAX
);
1766 data_vp
= nd_data
.ni_vp
;
1767 VOP_UNLOCK(data_vp
, 0, p
);
1769 if (error
= vn_open(&nd_names
,
1770 O_CREAT
| O_EXCL
| FWRITE
, S_IRUSR
|S_IWUSR
)) {
1771 printf("prepare_profile_database: Can't create CacheNames %s\n",
1772 profile_data_string
);
1773 kmem_free(kernel_map
,
1774 (vm_offset_t
)names_buf
, 4 * PAGE_SIZE
);
1775 kmem_free(kernel_map
,
1776 (vm_offset_t
)profile_data_string
, PATH_MAX
);
1781 names_vp
= nd_names
.ni_vp
;
1784 /* Write Header for new names file */
1786 profile_header
= (struct profile_names_header
*)names_buf
;
1788 profile_header
->number_of_profiles
= 0;
1789 profile_header
->user_id
= user
;
1790 profile_header
->version
= 1;
1791 profile_header
->element_array
=
1792 sizeof(struct profile_names_header
);
1793 profile_header
->spare1
= 0;
1794 profile_header
->spare2
= 0;
1795 profile_header
->spare3
= 0;
1797 size
= sizeof(struct profile_names_header
);
1798 buf_ptr
= (vm_offset_t
)profile_header
;
1802 error
= vn_rdwr(UIO_WRITE
, names_vp
,
1803 (caddr_t
)buf_ptr
, size
, resid_off
,
1804 UIO_SYSSPACE
, IO_NODELOCKED
,
1805 p
->p_ucred
, &resid
, p
);
1807 printf("prepare_profile_database: Can't write header %s\n", profile_names_string
);
1808 kmem_free(kernel_map
,
1809 (vm_offset_t
)names_buf
, 4 * PAGE_SIZE
);
1810 kmem_free(kernel_map
,
1811 (vm_offset_t
)profile_data_string
,
1817 buf_ptr
+= size
-resid
;
1818 resid_off
+= size
-resid
;
1823 vattr
.va_uid
= user
;
1824 error
= VOP_SETATTR(names_vp
, &vattr
, p
->p_cred
->pc_ucred
, p
);
1826 printf("prepare_profile_database: "
1827 "Can't set user %s\n", profile_names_string
);
1831 error
= vn_lock(data_vp
, LK_EXCLUSIVE
| LK_RETRY
, p
);
1834 printf("prepare_profile_database: cannot lock data file %s\n",
1835 profile_data_string
);
1836 kmem_free(kernel_map
,
1837 (vm_offset_t
)profile_data_string
, PATH_MAX
);
1838 kmem_free(kernel_map
,
1839 (vm_offset_t
)names_buf
, 4 * PAGE_SIZE
);
1842 vattr
.va_uid
= user
;
1843 error
= VOP_SETATTR(data_vp
, &vattr
, p
->p_cred
->pc_ucred
, p
);
1845 printf("prepare_profile_database: "
1846 "Can't set user %s\n", profile_data_string
);
1850 kmem_free(kernel_map
,
1851 (vm_offset_t
)profile_data_string
, PATH_MAX
);
1852 kmem_free(kernel_map
,
1853 (vm_offset_t
)names_buf
, 4 * PAGE_SIZE
);