2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
23 * Mach Operating System
24 * Copyright (c) 1987 Carnegie-Mellon University
25 * All rights reserved. The CMU software License Agreement specifies
26 * the terms and conditions for use and redistribution.
33 #include <meta_features.h>
35 #include <kern/task.h>
36 #include <kern/thread.h>
37 #include <kern/debug.h>
38 #include <kern/lock.h>
39 #include <mach/time_value.h>
40 #include <mach/vm_param.h>
41 #include <mach/vm_prot.h>
42 #include <mach/port.h>
44 #include <sys/param.h>
45 #include <sys/systm.h>
47 #include <sys/namei.h>
51 #include <sys/vnode.h>
53 #include <sys/mount.h>
54 #include <sys/trace.h>
55 #include <sys/kernel.h>
59 #include <bsm/audit_kernel.h>
60 #include <bsm/audit_kevents.h>
62 #include <kern/kalloc.h>
63 #include <vm/vm_map.h>
64 #include <vm/vm_kern.h>
66 #include <machine/spl.h>
68 #include <mach/shared_memory_server.h>
69 #include <vm/vm_shared_memory_server.h>
72 extern zone_t lsf_zone
;
74 useracc(addr
, len
, prot
)
79 return (vm_map_check_protection(
81 trunc_page_32((unsigned int)addr
), round_page_32((unsigned int)(addr
+len
)),
82 prot
== B_READ
? VM_PROT_READ
: VM_PROT_WRITE
));
90 kret
= vm_map_wire(current_map(), trunc_page_32((unsigned int)addr
),
91 round_page_32((unsigned int)(addr
+len
)),
92 VM_PROT_READ
| VM_PROT_WRITE
,FALSE
);
97 case KERN_INVALID_ADDRESS
:
100 case KERN_PROTECTION_FAILURE
:
107 vsunlock(addr
, len
, dirtied
)
116 vm_offset_t vaddr
, paddr
;
121 pmap
= get_task_pmap(current_task());
122 for (vaddr
= trunc_page((unsigned int)(addr
)); vaddr
< round_page((unsigned int)(addr
+len
));
123 vaddr
+= PAGE_SIZE
) {
124 paddr
= pmap_extract(pmap
, vaddr
);
125 pg
= PHYS_TO_VM_PAGE(paddr
);
126 vm_page_set_modified(pg
);
133 kret
= vm_map_unwire(current_map(), trunc_page_32((unsigned int)(addr
)),
134 round_page_32((unsigned int)(addr
+len
)), FALSE
);
138 case KERN_INVALID_ADDRESS
:
141 case KERN_PROTECTION_FAILURE
:
148 #if defined(sun) || BALANCE || defined(m88k)
149 #else /*defined(sun) || BALANCE || defined(m88k)*/
156 character
= (char)byte
;
157 return (copyout((void *)&(character
), addr
, sizeof(char)) == 0 ? 0 : -1);
166 character
= (char)byte
;
167 return (copyout((void *) &(character
), addr
, sizeof(char)) == 0 ? 0 : -1);
175 if (copyin(addr
, (void *) &byte
, sizeof(char)))
185 if (copyin(addr
, (void *) &(byte
), sizeof(char)))
194 return (copyout((void *) &word
, addr
, sizeof(int)) == 0 ? 0 : -1);
202 if (copyin(addr
, (void *) &word
, sizeof(int)))
207 /* suiword and fuiword are the same as suword and fuword, respectively */
213 return (copyout((void *) &word
, addr
, sizeof(int)) == 0 ? 0 : -1);
221 if (copyin(addr
, (void *) &word
, sizeof(int)))
225 #endif /* defined(sun) || BALANCE || defined(m88k) || defined(i386) */
241 extern task_t
port_name_to_task(mach_port_t t
);
243 kern_return_t err
= KERN_SUCCESS
;
244 boolean_t funnel_state
;
246 AUDIT_MACH_SYSCALL_ENTER(AUE_PIDFORTASK
);
247 AUDIT_ARG(mach_port1
, t
);
249 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
250 t1
= port_name_to_task(t
);
252 if (t1
== TASK_NULL
) {
256 p
= get_bsdtask_info(t1
);
267 (void) copyout((char *) &pid
, (char *) x
, sizeof(*x
));
268 thread_funnel_set(kernel_flock
, funnel_state
);
269 AUDIT_MACH_SYSCALL_EXIT(err
);
274 * Routine: task_for_pid
276 * Get the task port for another "process", named by its
277 * process ID on the same host as "target_task".
279 * Only permitted to privileged processes, or processes
280 * with the same user ID.
283 task_for_pid(target_tport
, pid
, t
)
284 mach_port_t target_tport
;
292 extern task_t
port_name_to_task(mach_port_t tp
);
295 boolean_t funnel_state
;
297 AUDIT_MACH_SYSCALL_ENTER(AUE_TASKFORPID
);
299 AUDIT_ARG(mach_port1
, target_tport
);
301 t1
= port_name_to_task(target_tport
);
302 if (t1
== TASK_NULL
) {
303 (void ) copyout((char *)&t1
, (char *)t
, sizeof(mach_port_t
));
304 AUDIT_MACH_SYSCALL_EXIT(KERN_FAILURE
);
305 return(KERN_FAILURE
);
308 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
311 p1
= get_bsdtask_info(t1
);
313 AUDIT_ARG(process
, p
);
315 (p
!= (struct proc
*) 0)
316 && (p1
!= (struct proc
*) 0)
317 && (((p
->p_ucred
->cr_uid
== p1
->p_ucred
->cr_uid
) &&
318 ((p
->p_cred
->p_ruid
== p1
->p_cred
->p_ruid
)))
319 || !(suser(p1
->p_ucred
, &p1
->p_acflag
)))
320 && (p
->p_stat
!= SZOMB
)
322 if (p
->task
!= TASK_NULL
) {
323 if (!task_reference_try(p
->task
)) {
324 mutex_pause(); /* temp loss of funnel */
327 sright
= (void *)convert_task_to_port(p
->task
);
329 ipc_port_copyout_send(sright
,
330 get_task_ipcspace(current_task()));
332 tret
= MACH_PORT_NULL
;
333 AUDIT_ARG(mach_port2
, tret
);
334 (void ) copyout((char *)&tret
, (char *) t
, sizeof(mach_port_t
));
336 error
= KERN_SUCCESS
;
340 tret
= MACH_PORT_NULL
;
341 (void) copyout((char *) &tret
, (char *) t
, sizeof(mach_port_t
));
342 error
= KERN_FAILURE
;
344 thread_funnel_set(kernel_flock
, funnel_state
);
345 AUDIT_MACH_SYSCALL_EXIT(error
);
350 struct load_shared_file_args
{
356 sf_mapping_t
*mappings
;
365 struct load_shared_file_args
*uap
,
368 caddr_t mapped_file_addr
=uap
->mfa
;
369 u_long mapped_file_size
=uap
->mfs
;
370 caddr_t
*base_address
=uap
->ba
;
371 int map_cnt
=uap
->map_cnt
;
372 sf_mapping_t
*mappings
=uap
->mappings
;
373 char *filename
=uap
->filename
;
374 int *flags
=uap
->flags
;
375 struct vnode
*vp
= 0;
376 struct nameidata nd
, *ndp
;
382 memory_object_control_t file_control
;
383 sf_mapping_t
*map_list
;
388 int default_regions
= 0;
392 shared_region_mapping_t shared_region
;
393 struct shared_region_task_mappings task_mapping_info
;
394 shared_region_mapping_t next
;
398 AUDIT_ARG(addr
, base_address
);
399 /* Retrieve the base address */
400 if (error
= copyin(base_address
, &local_base
, sizeof (caddr_t
))) {
403 if (error
= copyin(flags
, &local_flags
, sizeof (int))) {
407 if(local_flags
& QUERY_IS_SYSTEM_REGION
) {
408 shared_region_mapping_t default_shared_region
;
409 vm_get_shared_region(current_task(), &shared_region
);
410 task_mapping_info
.self
= (vm_offset_t
)shared_region
;
412 shared_region_mapping_info(shared_region
,
413 &(task_mapping_info
.text_region
),
414 &(task_mapping_info
.text_size
),
415 &(task_mapping_info
.data_region
),
416 &(task_mapping_info
.data_size
),
417 &(task_mapping_info
.region_mappings
),
418 &(task_mapping_info
.client_base
),
419 &(task_mapping_info
.alternate_base
),
420 &(task_mapping_info
.alternate_next
),
421 &(task_mapping_info
.fs_base
),
422 &(task_mapping_info
.system
),
423 &(task_mapping_info
.flags
), &next
);
425 default_shared_region
=
426 lookup_default_shared_region(
428 task_mapping_info
.system
);
429 if (shared_region
== default_shared_region
) {
430 local_flags
= SYSTEM_REGION_BACKED
;
434 shared_region_mapping_dealloc(default_shared_region
);
436 error
= copyout(&local_flags
, flags
, sizeof (int));
439 caller_flags
= local_flags
;
440 kret
= kmem_alloc(kernel_map
, (vm_offset_t
*)&filename_str
,
441 (vm_size_t
)(MAXPATHLEN
));
442 if (kret
!= KERN_SUCCESS
) {
446 kret
= kmem_alloc(kernel_map
, (vm_offset_t
*)&map_list
,
447 (vm_size_t
)(map_cnt
*sizeof(sf_mapping_t
)));
448 if (kret
!= KERN_SUCCESS
) {
449 kmem_free(kernel_map
, (vm_offset_t
)filename_str
,
450 (vm_size_t
)(MAXPATHLEN
));
456 copyin(mappings
, map_list
, (map_cnt
*sizeof(sf_mapping_t
)))) {
457 goto lsf_bailout_free
;
460 if (error
= copyinstr(filename
,
461 filename_str
, MAXPATHLEN
, (size_t *)&dummy
)) {
462 goto lsf_bailout_free
;
466 * Get a vnode for the target file
468 NDINIT(ndp
, LOOKUP
, FOLLOW
| LOCKLEAF
| AUDITVNPATH1
, UIO_SYSSPACE
,
471 if ((error
= namei(ndp
))) {
472 goto lsf_bailout_free
;
477 if (vp
->v_type
!= VREG
) {
479 goto lsf_bailout_free_vput
;
482 UBCINFOCHECK("load_shared_file", vp
);
484 if (error
= VOP_GETATTR(vp
, &vattr
, p
->p_ucred
, p
)) {
485 goto lsf_bailout_free_vput
;
489 file_control
= ubc_getobject(vp
, UBC_HOLDOBJECT
);
490 if (file_control
== MEMORY_OBJECT_CONTROL_NULL
) {
492 goto lsf_bailout_free_vput
;
496 if(vattr
.va_size
!= mapped_file_size
) {
498 goto lsf_bailout_free_vput
;
501 if(p
->p_flag
& P_NOSHLIB
) {
502 p
->p_flag
= p
->p_flag
& ~P_NOSHLIB
;
505 /* load alternate regions if the caller has requested. */
506 /* Note: the new regions are "clean slates" */
507 if (local_flags
& NEW_LOCAL_SHARED_REGIONS
) {
508 error
= clone_system_shared_regions(FALSE
, ENV_DEFAULT_ROOT
);
510 goto lsf_bailout_free_vput
;
514 vm_get_shared_region(current_task(), &shared_region
);
515 task_mapping_info
.self
= (vm_offset_t
)shared_region
;
517 shared_region_mapping_info(shared_region
,
518 &(task_mapping_info
.text_region
),
519 &(task_mapping_info
.text_size
),
520 &(task_mapping_info
.data_region
),
521 &(task_mapping_info
.data_size
),
522 &(task_mapping_info
.region_mappings
),
523 &(task_mapping_info
.client_base
),
524 &(task_mapping_info
.alternate_base
),
525 &(task_mapping_info
.alternate_next
),
526 &(task_mapping_info
.fs_base
),
527 &(task_mapping_info
.system
),
528 &(task_mapping_info
.flags
), &next
);
531 shared_region_mapping_t default_shared_region
;
532 default_shared_region
=
533 lookup_default_shared_region(
535 task_mapping_info
.system
);
536 if(shared_region
== default_shared_region
) {
539 shared_region_mapping_dealloc(default_shared_region
);
541 /* If we are running on a removable file system we must not */
542 /* be in a set of shared regions or the file system will not */
544 if(((vp
->v_mount
!= rootvnode
->v_mount
) && (default_regions
))
545 && (lsf_mapping_pool_gauge() < 75)) {
546 /* We don't want to run out of shared memory */
547 /* map entries by starting too many private versions */
548 /* of the shared library structures */
550 if(p
->p_flag
& P_NOSHLIB
) {
551 error
= clone_system_shared_regions(FALSE
, ENV_DEFAULT_ROOT
);
553 error
= clone_system_shared_regions(TRUE
, ENV_DEFAULT_ROOT
);
556 goto lsf_bailout_free_vput
;
558 local_flags
= local_flags
& ~NEW_LOCAL_SHARED_REGIONS
;
559 vm_get_shared_region(current_task(), &shared_region
);
560 shared_region_mapping_info(shared_region
,
561 &(task_mapping_info
.text_region
),
562 &(task_mapping_info
.text_size
),
563 &(task_mapping_info
.data_region
),
564 &(task_mapping_info
.data_size
),
565 &(task_mapping_info
.region_mappings
),
566 &(task_mapping_info
.client_base
),
567 &(task_mapping_info
.alternate_base
),
568 &(task_mapping_info
.alternate_next
),
569 &(task_mapping_info
.fs_base
),
570 &(task_mapping_info
.system
),
571 &(task_mapping_info
.flags
), &next
);
574 /* This is a work-around to allow executables which have been */
575 /* built without knowledge of the proper shared segment to */
576 /* load. This code has been architected as a shared region */
577 /* handler, the knowledge of where the regions are loaded is */
578 /* problematic for the extension of shared regions as it will */
579 /* not be easy to know what region an item should go into. */
580 /* The code below however will get around a short term problem */
581 /* with executables which believe they are loading at zero. */
584 if (((unsigned int)local_base
&
585 (~(task_mapping_info
.text_size
- 1))) !=
586 task_mapping_info
.client_base
) {
587 if(local_flags
& ALTERNATE_LOAD_SITE
) {
588 local_base
= (caddr_t
)(
589 (unsigned int)local_base
&
590 (task_mapping_info
.text_size
- 1));
591 local_base
= (caddr_t
)((unsigned int)local_base
592 | task_mapping_info
.client_base
);
595 goto lsf_bailout_free_vput
;
601 if((kr
= copyin_shared_file((vm_offset_t
)mapped_file_addr
,
603 (vm_offset_t
*)&local_base
,
604 map_cnt
, map_list
, file_control
,
605 &task_mapping_info
, &local_flags
))) {
610 case KERN_INVALID_ARGUMENT
:
613 case KERN_INVALID_ADDRESS
:
616 case KERN_PROTECTION_FAILURE
:
617 /* save EAUTH for authentication in this */
627 if((caller_flags
& ALTERNATE_LOAD_SITE
) && systemLogDiags
) {
628 printf("load_shared_file: Failed to load shared file! error: 0x%x, Base_address: 0x%x, number of mappings: %d, file_control 0x%x\n", error
, local_base
, map_cnt
, file_control
);
629 for(i
=0; i
<map_cnt
; i
++) {
630 printf("load_shared_file: Mapping%d, mapping_offset: 0x%x, size: 0x%x, file_offset: 0x%x, protection: 0x%x\n"
631 , i
, map_list
[i
].mapping_offset
,
633 map_list
[i
].file_offset
,
634 map_list
[i
].protection
);
639 local_flags
|= SYSTEM_REGION_BACKED
;
640 if(!(error
= copyout(&local_flags
, flags
, sizeof (int)))) {
641 error
= copyout(&local_base
,
642 base_address
, sizeof (caddr_t
));
646 lsf_bailout_free_vput
:
650 kmem_free(kernel_map
, (vm_offset_t
)filename_str
,
651 (vm_size_t
)(MAXPATHLEN
));
652 kmem_free(kernel_map
, (vm_offset_t
)map_list
,
653 (vm_size_t
)(map_cnt
*sizeof(sf_mapping_t
)));
659 struct reset_shared_file_args
{
662 sf_mapping_t
*mappings
;
668 struct reset_shared_file_args
*uap
,
671 caddr_t
*base_address
=uap
->ba
;
672 int map_cnt
=uap
->map_cnt
;
673 sf_mapping_t
*mappings
=uap
->mappings
;
677 sf_mapping_t
*map_list
;
679 vm_offset_t map_address
;
683 AUDIT_ARG(addr
, base_address
);
684 /* Retrieve the base address */
685 if (error
= copyin(base_address
, &local_base
, sizeof (caddr_t
))) {
689 if (((unsigned int)local_base
& GLOBAL_SHARED_SEGMENT_MASK
)
690 != GLOBAL_SHARED_TEXT_SEGMENT
) {
695 kret
= kmem_alloc(kernel_map
, (vm_offset_t
*)&map_list
,
696 (vm_size_t
)(map_cnt
*sizeof(sf_mapping_t
)));
697 if (kret
!= KERN_SUCCESS
) {
703 copyin(mappings
, map_list
, (map_cnt
*sizeof(sf_mapping_t
)))) {
705 kmem_free(kernel_map
, (vm_offset_t
)map_list
,
706 (vm_size_t
)(map_cnt
*sizeof(sf_mapping_t
)));
709 for (i
= 0; i
<map_cnt
; i
++) {
710 if((map_list
[i
].mapping_offset
711 & GLOBAL_SHARED_SEGMENT_MASK
) == 0x10000000) {
712 map_address
= (vm_offset_t
)
713 (local_base
+ map_list
[i
].mapping_offset
);
714 vm_deallocate(current_map(),
717 vm_map(current_map(), &map_address
,
718 map_list
[i
].size
, 0, SHARED_LIB_ALIAS
,
719 shared_data_region_handle
,
720 ((unsigned int)local_base
721 & SHARED_DATA_REGION_MASK
) +
722 (map_list
[i
].mapping_offset
723 & SHARED_DATA_REGION_MASK
),
725 VM_PROT_READ
, VM_INHERIT_SHARE
);
729 kmem_free(kernel_map
, (vm_offset_t
)map_list
,
730 (vm_size_t
)(map_cnt
*sizeof(sf_mapping_t
)));
736 struct new_system_shared_regions_args
{
741 new_system_shared_regions(
743 struct new_system_shared_regions_args
*uap
,
746 shared_region_mapping_t regions
;
747 shared_region_mapping_t new_regions
;
754 /* clear all of our existing defaults */
755 remove_all_shared_regions();
764 clone_system_shared_regions(shared_regions_active
, base_vnode
)
766 shared_region_mapping_t new_shared_region
;
767 shared_region_mapping_t next
;
768 shared_region_mapping_t old_shared_region
;
769 struct shared_region_task_mappings old_info
;
770 struct shared_region_task_mappings new_info
;
774 vm_get_shared_region(current_task(), &old_shared_region
);
775 old_info
.self
= (vm_offset_t
)old_shared_region
;
776 shared_region_mapping_info(old_shared_region
,
777 &(old_info
.text_region
),
778 &(old_info
.text_size
),
779 &(old_info
.data_region
),
780 &(old_info
.data_size
),
781 &(old_info
.region_mappings
),
782 &(old_info
.client_base
),
783 &(old_info
.alternate_base
),
784 &(old_info
.alternate_next
),
787 &(old_info
.flags
), &next
);
788 if ((shared_regions_active
) ||
789 (base_vnode
== ENV_DEFAULT_ROOT
)) {
790 if (shared_file_create_system_region(&new_shared_region
))
794 lookup_default_shared_region(
795 base_vnode
, old_info
.system
);
796 if(new_shared_region
== NULL
) {
797 shared_file_boot_time_init(
798 base_vnode
, old_info
.system
);
799 vm_get_shared_region(current_task(), &new_shared_region
);
801 vm_set_shared_region(current_task(), new_shared_region
);
803 if(old_shared_region
)
804 shared_region_mapping_dealloc(old_shared_region
);
806 new_info
.self
= (vm_offset_t
)new_shared_region
;
807 shared_region_mapping_info(new_shared_region
,
808 &(new_info
.text_region
),
809 &(new_info
.text_size
),
810 &(new_info
.data_region
),
811 &(new_info
.data_size
),
812 &(new_info
.region_mappings
),
813 &(new_info
.client_base
),
814 &(new_info
.alternate_base
),
815 &(new_info
.alternate_next
),
818 &(new_info
.flags
), &next
);
819 if(shared_regions_active
) {
820 if(vm_region_clone(old_info
.text_region
, new_info
.text_region
)) {
821 panic("clone_system_shared_regions: shared region mis-alignment 1");
822 shared_region_mapping_dealloc(new_shared_region
);
825 if (vm_region_clone(old_info
.data_region
, new_info
.data_region
)) {
826 panic("clone_system_shared_regions: shared region mis-alignment 2");
827 shared_region_mapping_dealloc(new_shared_region
);
830 shared_region_object_chain_attach(
831 new_shared_region
, old_shared_region
);
833 if (vm_map_region_replace(current_map(), old_info
.text_region
,
834 new_info
.text_region
, old_info
.client_base
,
835 old_info
.client_base
+old_info
.text_size
)) {
836 panic("clone_system_shared_regions: shared region mis-alignment 3");
837 shared_region_mapping_dealloc(new_shared_region
);
840 if(vm_map_region_replace(current_map(), old_info
.data_region
,
841 new_info
.data_region
,
842 old_info
.client_base
+ old_info
.text_size
,
844 + old_info
.text_size
+ old_info
.data_size
)) {
845 panic("clone_system_shared_regions: shared region mis-alignment 4");
846 shared_region_mapping_dealloc(new_shared_region
);
849 vm_set_shared_region(current_task(), new_shared_region
);
851 /* consume the reference which wasn't accounted for in object */
853 if(!shared_regions_active
)
854 shared_region_mapping_dealloc(old_shared_region
);
860 extern vm_map_t bsd_pageable_map
;
862 /* header for the profile name file. The profiled app info is held */
863 /* in the data file and pointed to by elements in the name file */
865 struct profile_names_header
{
866 unsigned int number_of_profiles
;
867 unsigned int user_id
;
868 unsigned int version
;
875 struct profile_element
{
878 unsigned int mod_date
;
883 struct global_profile
{
884 struct vnode
*names_vp
;
885 struct vnode
*data_vp
;
892 struct global_profile_cache
{
895 struct global_profile profiles
[3];
898 struct global_profile_cache global_user_profile_cache
=
899 {3, 0, NULL
, NULL
, NULL
, 0, 0, 0,
900 NULL
, NULL
, NULL
, 0, 0, 0,
901 NULL
, NULL
, NULL
, 0, 0, 0 };
903 /* BSD_OPEN_PAGE_CACHE_FILES: */
904 /* Caller provides a user id. This id was used in */
905 /* prepare_profile_database to create two unique absolute */
906 /* file paths to the associated profile files. These files */
907 /* are either opened or bsd_open_page_cache_files returns an */
908 /* error. The header of the names file is then consulted. */
909 /* The header and the vnodes for the names and data files are */
913 bsd_open_page_cache_files(
915 struct global_profile
**profile
)
917 char *cache_path
= "/var/vm/app_profile/";
925 struct vnode
*names_vp
;
926 struct vnode
*data_vp
;
927 vm_offset_t names_buf
;
930 int profile_names_length
;
931 int profile_data_length
;
932 char *profile_data_string
;
933 char *profile_names_string
;
938 struct profile_names_header
*profile_header
;
941 struct nameidata nd_names
;
942 struct nameidata nd_data
;
950 for(i
= 0; i
<global_user_profile_cache
.max_ele
; i
++) {
951 if((global_user_profile_cache
.profiles
[i
].user
== user
)
952 && (global_user_profile_cache
.profiles
[i
].data_vp
954 *profile
= &global_user_profile_cache
.profiles
[i
];
955 /* already in cache, we're done */
956 if ((*profile
)->busy
) {
958 * drop funnel and wait
960 (void)tsleep((void *)
962 PRIBIO
, "app_profile", 0);
965 (*profile
)->busy
= 1;
966 (*profile
)->age
= global_user_profile_cache
.age
;
967 global_user_profile_cache
.age
+=1;
972 lru
= global_user_profile_cache
.age
;
974 for(i
= 0; i
<global_user_profile_cache
.max_ele
; i
++) {
975 /* Skip entry if it is in the process of being reused */
976 if(global_user_profile_cache
.profiles
[i
].data_vp
==
977 (struct vnode
*)0xFFFFFFFF)
979 /* Otherwise grab the first empty entry */
980 if(global_user_profile_cache
.profiles
[i
].data_vp
== NULL
) {
981 *profile
= &global_user_profile_cache
.profiles
[i
];
982 (*profile
)->age
= global_user_profile_cache
.age
;
985 /* Otherwise grab the oldest entry */
986 if(global_user_profile_cache
.profiles
[i
].age
< lru
) {
987 lru
= global_user_profile_cache
.profiles
[i
].age
;
988 *profile
= &global_user_profile_cache
.profiles
[i
];
993 if (*profile
== NULL
) {
995 * No entries are available; this can only happen if all
996 * of them are currently in the process of being reused;
997 * if this happens, we sleep on the address of the first
998 * element, and restart. This is less than ideal, but we
999 * know it will work because we know that there will be a
1000 * wakeup on any entry currently in the process of being
1003 * XXX Reccomend a two handed clock and more than 3 total
1004 * XXX cache entries at some point in the future.
1007 * drop funnel and wait
1009 (void)tsleep((void *)
1010 &global_user_profile_cache
.profiles
[0],
1011 PRIBIO
, "app_profile", 0);
1016 * If it's currently busy, we've picked the one at the end of the
1017 * LRU list, but it's currently being actively used. We sleep on
1018 * its address and restart.
1020 if ((*profile
)->busy
) {
1022 * drop funnel and wait
1024 (void)tsleep((void *)
1026 PRIBIO
, "app_profile", 0);
1029 (*profile
)->busy
= 1;
1030 (*profile
)->user
= user
;
1033 * put dummy value in for now to get competing request to wait
1034 * above until we are finished
1036 * Save the data_vp before setting it, so we can set it before
1037 * we kmem_free() or vrele(). If we don't do this, then we
1038 * have a potential funnel race condition we have to deal with.
1040 data_vp
= (*profile
)->data_vp
;
1041 (*profile
)->data_vp
= (struct vnode
*)0xFFFFFFFF;
1044 * Age the cache here in all cases; this guarantees that we won't
1045 * be reusing only one entry over and over, once the system reaches
1048 global_user_profile_cache
.age
+=1;
1050 if(data_vp
!= NULL
) {
1051 kmem_free(kernel_map
,
1052 (*profile
)->buf_ptr
, 4 * PAGE_SIZE
);
1053 if ((*profile
)->names_vp
) {
1054 vrele((*profile
)->names_vp
);
1055 (*profile
)->names_vp
= NULL
;
1060 /* Try to open the appropriate users profile files */
1061 /* If neither file is present, try to create them */
1062 /* If one file is present and the other not, fail. */
1063 /* If the files do exist, check them for the app_file */
1064 /* requested and read it in if present */
1066 ret
= kmem_alloc(kernel_map
,
1067 (vm_offset_t
*)&profile_data_string
, PATH_MAX
);
1070 (*profile
)->data_vp
= NULL
;
1071 (*profile
)->busy
= 0;
1076 /* Split the buffer in half since we know the size of */
1077 /* our file path and our allocation is adequate for */
1078 /* both file path names */
1079 profile_names_string
= profile_data_string
+ (PATH_MAX
/2);
1082 strcpy(profile_data_string
, cache_path
);
1083 strcpy(profile_names_string
, cache_path
);
1084 profile_names_length
= profile_data_length
1085 = strlen(profile_data_string
);
1086 substring
= profile_data_string
+ profile_data_length
;
1087 sprintf(substring
, "%x_data", user
);
1088 substring
= profile_names_string
+ profile_names_length
;
1089 sprintf(substring
, "%x_names", user
);
1091 /* We now have the absolute file names */
1093 ret
= kmem_alloc(kernel_map
,
1094 (vm_offset_t
*)&names_buf
, 4 * PAGE_SIZE
);
1096 kmem_free(kernel_map
,
1097 (vm_offset_t
)profile_data_string
, PATH_MAX
);
1098 (*profile
)->data_vp
= NULL
;
1099 (*profile
)->busy
= 0;
1104 NDINIT(&nd_names
, LOOKUP
, FOLLOW
| LOCKLEAF
,
1105 UIO_SYSSPACE
, profile_names_string
, p
);
1106 NDINIT(&nd_data
, LOOKUP
, FOLLOW
| LOCKLEAF
,
1107 UIO_SYSSPACE
, profile_data_string
, p
);
1108 if (error
= vn_open(&nd_data
, FREAD
| FWRITE
, 0)) {
1110 printf("bsd_open_page_cache_files: CacheData file not found %s\n",
1111 profile_data_string
);
1113 kmem_free(kernel_map
,
1114 (vm_offset_t
)names_buf
, 4 * PAGE_SIZE
);
1115 kmem_free(kernel_map
,
1116 (vm_offset_t
)profile_data_string
, PATH_MAX
);
1117 (*profile
)->data_vp
= NULL
;
1118 (*profile
)->busy
= 0;
1123 data_vp
= nd_data
.ni_vp
;
1124 VOP_UNLOCK(data_vp
, 0, p
);
1126 if (error
= vn_open(&nd_names
, FREAD
| FWRITE
, 0)) {
1127 printf("bsd_open_page_cache_files: NamesData file not found %s\n",
1128 profile_data_string
);
1129 kmem_free(kernel_map
,
1130 (vm_offset_t
)names_buf
, 4 * PAGE_SIZE
);
1131 kmem_free(kernel_map
,
1132 (vm_offset_t
)profile_data_string
, PATH_MAX
);
1134 (*profile
)->data_vp
= NULL
;
1135 (*profile
)->busy
= 0;
1139 names_vp
= nd_names
.ni_vp
;
1141 if(error
= VOP_GETATTR(names_vp
, &vattr
, p
->p_ucred
, p
)) {
1142 printf("bsd_open_page_cache_files: Can't stat name file %s\n", profile_names_string
);
1143 kmem_free(kernel_map
,
1144 (vm_offset_t
)profile_data_string
, PATH_MAX
);
1145 kmem_free(kernel_map
,
1146 (vm_offset_t
)names_buf
, 4 * PAGE_SIZE
);
1149 (*profile
)->data_vp
= NULL
;
1150 (*profile
)->busy
= 0;
1155 size
= vattr
.va_size
;
1156 if(size
> 4 * PAGE_SIZE
)
1157 size
= 4 * PAGE_SIZE
;
1158 buf_ptr
= names_buf
;
1162 error
= vn_rdwr(UIO_READ
, names_vp
, (caddr_t
)buf_ptr
,
1164 UIO_SYSSPACE
, IO_NODELOCKED
, p
->p_ucred
, &resid
, p
);
1165 if((error
) || (size
== resid
)) {
1169 kmem_free(kernel_map
,
1170 (vm_offset_t
)profile_data_string
, PATH_MAX
);
1171 kmem_free(kernel_map
,
1172 (vm_offset_t
)names_buf
, 4 * PAGE_SIZE
);
1175 (*profile
)->data_vp
= NULL
;
1176 (*profile
)->busy
= 0;
1180 buf_ptr
+= size
-resid
;
1181 resid_off
+= size
-resid
;
1185 VOP_UNLOCK(names_vp
, 0, p
);
1186 kmem_free(kernel_map
, (vm_offset_t
)profile_data_string
, PATH_MAX
);
1187 (*profile
)->names_vp
= names_vp
;
1188 (*profile
)->data_vp
= data_vp
;
1189 (*profile
)->buf_ptr
= names_buf
;
1195 bsd_close_page_cache_files(
1196 struct global_profile
*profile
)
1203 bsd_read_page_cache_file(
1208 struct vnode
*app_vp
,
1209 vm_offset_t
*buffer
,
1210 vm_offset_t
*buf_size
)
1213 boolean_t funnel_state
;
1221 unsigned int profile_size
;
1223 vm_offset_t names_buf
;
1228 struct vnode
*names_vp
;
1229 struct vnode
*data_vp
;
1233 struct global_profile
*uid_files
;
1235 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
1237 /* Try to open the appropriate users profile files */
1238 /* If neither file is present, try to create them */
1239 /* If one file is present and the other not, fail. */
1240 /* If the files do exist, check them for the app_file */
1241 /* requested and read it in if present */
1244 error
= bsd_open_page_cache_files(user
, &uid_files
);
1246 thread_funnel_set(kernel_flock
, funnel_state
);
1252 names_vp
= uid_files
->names_vp
;
1253 data_vp
= uid_files
->data_vp
;
1254 names_buf
= uid_files
->buf_ptr
;
1258 * Get locks on both files, get the vnode with the lowest address first
1261 if((unsigned int)names_vp
< (unsigned int)data_vp
) {
1268 error
= vn_lock(vp1
, LK_EXCLUSIVE
| LK_RETRY
, p
);
1270 printf("bsd_read_page_cache_file: Can't lock profile names %x\n", user
);
1271 bsd_close_page_cache_files(uid_files
);
1272 thread_funnel_set(kernel_flock
, funnel_state
);
1275 error
= vn_lock(vp2
, LK_EXCLUSIVE
| LK_RETRY
, p
);
1277 printf("bsd_read_page_cache_file: Can't lock profile data %x\n", user
);
1278 VOP_UNLOCK(vp1
, 0, p
);
1279 bsd_close_page_cache_files(uid_files
);
1280 thread_funnel_set(kernel_flock
, funnel_state
);
1284 if(error
= VOP_GETATTR(app_vp
, &vattr
, p
->p_ucred
, p
)) {
1285 VOP_UNLOCK(names_vp
, 0, p
);
1286 VOP_UNLOCK(data_vp
, 0, p
);
1287 printf("bsd_read_cache_file: Can't stat app file %s\n", app_name
);
1288 bsd_close_page_cache_files(uid_files
);
1289 thread_funnel_set(kernel_flock
, funnel_state
);
1293 *fid
= vattr
.va_fileid
;
1294 *mod
= vattr
.va_mtime
.tv_sec
;
1297 if (bsd_search_page_cache_data_base(names_vp
, names_buf
, app_name
,
1298 (unsigned int) vattr
.va_mtime
.tv_sec
,
1299 vattr
.va_fileid
, &profile
, &profile_size
) == 0) {
1300 /* profile is an offset in the profile data base */
1301 /* It is zero if no profile data was found */
1303 if(profile_size
== 0) {
1306 VOP_UNLOCK(names_vp
, 0, p
);
1307 VOP_UNLOCK(data_vp
, 0, p
);
1308 bsd_close_page_cache_files(uid_files
);
1309 thread_funnel_set(kernel_flock
, funnel_state
);
1312 ret
= (vm_offset_t
)(kmem_alloc(kernel_map
, buffer
, profile_size
));
1314 VOP_UNLOCK(names_vp
, 0, p
);
1315 VOP_UNLOCK(data_vp
, 0, p
);
1316 bsd_close_page_cache_files(uid_files
);
1317 thread_funnel_set(kernel_flock
, funnel_state
);
1320 *buf_size
= profile_size
;
1321 while(profile_size
) {
1322 error
= vn_rdwr(UIO_READ
, data_vp
,
1323 (caddr_t
) *buffer
, profile_size
,
1324 profile
, UIO_SYSSPACE
, IO_NODELOCKED
,
1325 p
->p_ucred
, &resid
, p
);
1326 if((error
) || (profile_size
== resid
)) {
1327 VOP_UNLOCK(names_vp
, 0, p
);
1328 VOP_UNLOCK(data_vp
, 0, p
);
1329 bsd_close_page_cache_files(uid_files
);
1330 kmem_free(kernel_map
, (vm_offset_t
)*buffer
, profile_size
);
1331 thread_funnel_set(kernel_flock
, funnel_state
);
1334 profile
+= profile_size
- resid
;
1335 profile_size
= resid
;
1337 VOP_UNLOCK(names_vp
, 0, p
);
1338 VOP_UNLOCK(data_vp
, 0, p
);
1339 bsd_close_page_cache_files(uid_files
);
1340 thread_funnel_set(kernel_flock
, funnel_state
);
1343 VOP_UNLOCK(names_vp
, 0, p
);
1344 VOP_UNLOCK(data_vp
, 0, p
);
1345 bsd_close_page_cache_files(uid_files
);
1346 thread_funnel_set(kernel_flock
, funnel_state
);
1353 bsd_search_page_cache_data_base(
1355 struct profile_names_header
*database
,
1357 unsigned int mod_date
,
1360 unsigned int *profile_size
)
1366 struct profile_element
*element
;
1367 unsigned int ele_total
;
1368 unsigned int extended_list
= 0;
1373 vm_offset_t local_buf
= NULL
;
1380 if(((vm_offset_t
)database
->element_array
) !=
1381 sizeof(struct profile_names_header
)) {
1384 element
= (struct profile_element
*)(
1385 (vm_offset_t
)database
->element_array
+
1386 (vm_offset_t
)database
);
1388 ele_total
= database
->number_of_profiles
;
1393 /* note: code assumes header + n*ele comes out on a page boundary */
1394 if(((local_buf
== 0) && (sizeof(struct profile_names_header
) +
1395 (ele_total
* sizeof(struct profile_element
)))
1396 > (PAGE_SIZE
* 4)) ||
1397 ((local_buf
!= 0) &&
1398 (ele_total
* sizeof(struct profile_element
))
1399 > (PAGE_SIZE
* 4))) {
1400 extended_list
= ele_total
;
1401 if(element
== (struct profile_element
*)
1402 ((vm_offset_t
)database
->element_array
+
1403 (vm_offset_t
)database
)) {
1404 ele_total
= ((PAGE_SIZE
* 4)/sizeof(struct profile_element
)) - 1;
1406 ele_total
= (PAGE_SIZE
* 4)/sizeof(struct profile_element
);
1408 extended_list
-= ele_total
;
1410 for (i
=0; i
<ele_total
; i
++) {
1411 if((mod_date
== element
[i
].mod_date
)
1412 && (inode
== element
[i
].inode
)) {
1413 if(strncmp(element
[i
].name
, app_name
, 12) == 0) {
1414 *profile
= element
[i
].addr
;
1415 *profile_size
= element
[i
].size
;
1416 if(local_buf
!= NULL
) {
1417 kmem_free(kernel_map
,
1418 (vm_offset_t
)local_buf
, 4 * PAGE_SIZE
);
1424 if(extended_list
== 0)
1426 if(local_buf
== NULL
) {
1427 ret
= kmem_alloc(kernel_map
,
1428 (vm_offset_t
*)&local_buf
, 4 * PAGE_SIZE
);
1429 if(ret
!= KERN_SUCCESS
) {
1433 element
= (struct profile_element
*)local_buf
;
1434 ele_total
= extended_list
;
1436 file_off
+= 4 * PAGE_SIZE
;
1437 if((ele_total
* sizeof(struct profile_element
)) >
1439 size
= PAGE_SIZE
* 4;
1441 size
= ele_total
* sizeof(struct profile_element
);
1445 error
= vn_rdwr(UIO_READ
, vp
,
1446 CAST_DOWN(caddr_t
, (local_buf
+ resid_off
)),
1447 size
, file_off
+ resid_off
, UIO_SYSSPACE
,
1448 IO_NODELOCKED
, p
->p_ucred
, &resid
, p
);
1449 if((error
) || (size
== resid
)) {
1450 if(local_buf
!= NULL
) {
1451 kmem_free(kernel_map
,
1452 (vm_offset_t
)local_buf
,
1457 resid_off
+= size
-resid
;
1461 if(local_buf
!= NULL
) {
1462 kmem_free(kernel_map
,
1463 (vm_offset_t
)local_buf
, 4 * PAGE_SIZE
);
1469 bsd_write_page_cache_file(
1478 struct nameidata nd
;
1479 struct vnode
*vp
= 0;
1483 boolean_t funnel_state
;
1485 struct vattr data_vattr
;
1488 unsigned int profile_size
;
1490 vm_offset_t names_buf
;
1491 struct vnode
*names_vp
;
1492 struct vnode
*data_vp
;
1496 struct profile_names_header
*profile_header
;
1499 struct global_profile
*uid_files
;
1502 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
1506 error
= bsd_open_page_cache_files(user
, &uid_files
);
1508 thread_funnel_set(kernel_flock
, funnel_state
);
1514 names_vp
= uid_files
->names_vp
;
1515 data_vp
= uid_files
->data_vp
;
1516 names_buf
= uid_files
->buf_ptr
;
1519 * Get locks on both files, get the vnode with the lowest address first
1522 if((unsigned int)names_vp
< (unsigned int)data_vp
) {
1530 error
= vn_lock(vp1
, LK_EXCLUSIVE
| LK_RETRY
, p
);
1532 printf("bsd_write_page_cache_file: Can't lock profile names %x\n", user
);
1533 bsd_close_page_cache_files(uid_files
);
1534 thread_funnel_set(kernel_flock
, funnel_state
);
1537 error
= vn_lock(vp2
, LK_EXCLUSIVE
| LK_RETRY
, p
);
1539 printf("bsd_write_page_cache_file: Can't lock profile data %x\n", user
);
1540 VOP_UNLOCK(vp1
, 0, p
);
1541 bsd_close_page_cache_files(uid_files
);
1542 thread_funnel_set(kernel_flock
, funnel_state
);
1546 /* Stat data file for size */
1548 if(error
= VOP_GETATTR(data_vp
, &data_vattr
, p
->p_ucred
, p
)) {
1549 VOP_UNLOCK(names_vp
, 0, p
);
1550 VOP_UNLOCK(data_vp
, 0, p
);
1551 printf("bsd_write_page_cache_file: Can't stat profile data %s\n", file_name
);
1552 bsd_close_page_cache_files(uid_files
);
1553 thread_funnel_set(kernel_flock
, funnel_state
);
1557 if (bsd_search_page_cache_data_base(names_vp
,
1558 (struct profile_names_header
*)names_buf
,
1559 file_name
, (unsigned int) mod
,
1560 fid
, &profile
, &profile_size
) == 0) {
1561 /* profile is an offset in the profile data base */
1562 /* It is zero if no profile data was found */
1564 if(profile_size
== 0) {
1565 unsigned int header_size
;
1566 vm_offset_t buf_ptr
;
1568 /* Our Write case */
1570 /* read header for last entry */
1572 (struct profile_names_header
*)names_buf
;
1573 name_offset
= sizeof(struct profile_names_header
) +
1574 (sizeof(struct profile_element
)
1575 * profile_header
->number_of_profiles
);
1576 profile_header
->number_of_profiles
+= 1;
1578 if(name_offset
< PAGE_SIZE
* 4) {
1579 struct profile_element
*name
;
1580 /* write new entry */
1581 name
= (struct profile_element
*)
1582 (names_buf
+ (vm_offset_t
)name_offset
);
1583 name
->addr
= data_vattr
.va_size
;
1585 name
->mod_date
= mod
;
1587 strncpy (name
->name
, file_name
, 12);
1589 unsigned int ele_size
;
1590 struct profile_element name
;
1591 /* write new entry */
1592 name
.addr
= data_vattr
.va_size
;
1594 name
.mod_date
= mod
;
1596 strncpy (name
.name
, file_name
, 12);
1597 /* write element out separately */
1598 ele_size
= sizeof(struct profile_element
);
1599 buf_ptr
= (vm_offset_t
)&name
;
1600 resid_off
= name_offset
;
1603 error
= vn_rdwr(UIO_WRITE
, names_vp
,
1605 ele_size
, resid_off
,
1606 UIO_SYSSPACE
, IO_NODELOCKED
,
1607 p
->p_ucred
, &resid
, p
);
1609 printf("bsd_write_page_cache_file: Can't write name_element %x\n", user
);
1610 VOP_UNLOCK(names_vp
, 0, p
);
1611 VOP_UNLOCK(data_vp
, 0, p
);
1612 bsd_close_page_cache_files(
1619 buf_ptr
+= (vm_offset_t
)
1621 resid_off
+= ele_size
-resid
;
1626 if(name_offset
< PAGE_SIZE
* 4) {
1627 header_size
= name_offset
+
1628 sizeof(struct profile_element
);
1632 sizeof(struct profile_names_header
);
1634 buf_ptr
= (vm_offset_t
)profile_header
;
1637 /* write names file header */
1638 while(header_size
) {
1639 error
= vn_rdwr(UIO_WRITE
, names_vp
,
1641 header_size
, resid_off
,
1642 UIO_SYSSPACE
, IO_NODELOCKED
,
1643 p
->p_ucred
, &resid
, p
);
1645 VOP_UNLOCK(names_vp
, 0, p
);
1646 VOP_UNLOCK(data_vp
, 0, p
);
1647 printf("bsd_write_page_cache_file: Can't write header %x\n", user
);
1648 bsd_close_page_cache_files(
1651 kernel_flock
, funnel_state
);
1654 buf_ptr
+= (vm_offset_t
)header_size
-resid
;
1655 resid_off
+= header_size
-resid
;
1656 header_size
= resid
;
1658 /* write profile to data file */
1659 resid_off
= data_vattr
.va_size
;
1661 error
= vn_rdwr(UIO_WRITE
, data_vp
,
1662 (caddr_t
)buffer
, size
, resid_off
,
1663 UIO_SYSSPACE
, IO_NODELOCKED
,
1664 p
->p_ucred
, &resid
, p
);
1666 VOP_UNLOCK(names_vp
, 0, p
);
1667 VOP_UNLOCK(data_vp
, 0, p
);
1668 printf("bsd_write_page_cache_file: Can't write header %x\n", user
);
1669 bsd_close_page_cache_files(
1672 kernel_flock
, funnel_state
);
1675 buffer
+= size
-resid
;
1676 resid_off
+= size
-resid
;
1679 VOP_UNLOCK(names_vp
, 0, p
);
1680 VOP_UNLOCK(data_vp
, 0, p
);
1681 bsd_close_page_cache_files(uid_files
);
1682 thread_funnel_set(kernel_flock
, funnel_state
);
1685 /* Someone else wrote a twin profile before us */
1686 VOP_UNLOCK(names_vp
, 0, p
);
1687 VOP_UNLOCK(data_vp
, 0, p
);
1688 bsd_close_page_cache_files(uid_files
);
1689 thread_funnel_set(kernel_flock
, funnel_state
);
1692 VOP_UNLOCK(names_vp
, 0, p
);
1693 VOP_UNLOCK(data_vp
, 0, p
);
1694 bsd_close_page_cache_files(uid_files
);
1695 thread_funnel_set(kernel_flock
, funnel_state
);
1702 prepare_profile_database(int user
)
1704 char *cache_path
= "/var/vm/app_profile/";
1712 struct vnode
*names_vp
;
1713 struct vnode
*data_vp
;
1714 vm_offset_t names_buf
;
1715 vm_offset_t buf_ptr
;
1717 int profile_names_length
;
1718 int profile_data_length
;
1719 char *profile_data_string
;
1720 char *profile_names_string
;
1725 struct profile_names_header
*profile_header
;
1728 struct nameidata nd_names
;
1729 struct nameidata nd_data
;
1735 ret
= kmem_alloc(kernel_map
,
1736 (vm_offset_t
*)&profile_data_string
, PATH_MAX
);
1742 /* Split the buffer in half since we know the size of */
1743 /* our file path and our allocation is adequate for */
1744 /* both file path names */
1745 profile_names_string
= profile_data_string
+ (PATH_MAX
/2);
1748 strcpy(profile_data_string
, cache_path
);
1749 strcpy(profile_names_string
, cache_path
);
1750 profile_names_length
= profile_data_length
1751 = strlen(profile_data_string
);
1752 substring
= profile_data_string
+ profile_data_length
;
1753 sprintf(substring
, "%x_data", user
);
1754 substring
= profile_names_string
+ profile_names_length
;
1755 sprintf(substring
, "%x_names", user
);
1757 /* We now have the absolute file names */
1759 ret
= kmem_alloc(kernel_map
,
1760 (vm_offset_t
*)&names_buf
, 4 * PAGE_SIZE
);
1762 kmem_free(kernel_map
,
1763 (vm_offset_t
)profile_data_string
, PATH_MAX
);
1767 NDINIT(&nd_names
, LOOKUP
, FOLLOW
,
1768 UIO_SYSSPACE
, profile_names_string
, p
);
1769 NDINIT(&nd_data
, LOOKUP
, FOLLOW
,
1770 UIO_SYSSPACE
, profile_data_string
, p
);
1772 if (error
= vn_open(&nd_data
,
1773 O_CREAT
| O_EXCL
| FWRITE
, S_IRUSR
|S_IWUSR
)) {
1774 kmem_free(kernel_map
,
1775 (vm_offset_t
)names_buf
, 4 * PAGE_SIZE
);
1776 kmem_free(kernel_map
,
1777 (vm_offset_t
)profile_data_string
, PATH_MAX
);
1781 data_vp
= nd_data
.ni_vp
;
1782 VOP_UNLOCK(data_vp
, 0, p
);
1784 if (error
= vn_open(&nd_names
,
1785 O_CREAT
| O_EXCL
| FWRITE
, S_IRUSR
|S_IWUSR
)) {
1786 printf("prepare_profile_database: Can't create CacheNames %s\n",
1787 profile_data_string
);
1788 kmem_free(kernel_map
,
1789 (vm_offset_t
)names_buf
, 4 * PAGE_SIZE
);
1790 kmem_free(kernel_map
,
1791 (vm_offset_t
)profile_data_string
, PATH_MAX
);
1796 names_vp
= nd_names
.ni_vp
;
1799 /* Write Header for new names file */
1801 profile_header
= (struct profile_names_header
*)names_buf
;
1803 profile_header
->number_of_profiles
= 0;
1804 profile_header
->user_id
= user
;
1805 profile_header
->version
= 1;
1806 profile_header
->element_array
=
1807 sizeof(struct profile_names_header
);
1808 profile_header
->spare1
= 0;
1809 profile_header
->spare2
= 0;
1810 profile_header
->spare3
= 0;
1812 size
= sizeof(struct profile_names_header
);
1813 buf_ptr
= (vm_offset_t
)profile_header
;
1817 error
= vn_rdwr(UIO_WRITE
, names_vp
,
1818 (caddr_t
)buf_ptr
, size
, resid_off
,
1819 UIO_SYSSPACE
, IO_NODELOCKED
,
1820 p
->p_ucred
, &resid
, p
);
1822 printf("prepare_profile_database: Can't write header %s\n", profile_names_string
);
1823 kmem_free(kernel_map
,
1824 (vm_offset_t
)names_buf
, 4 * PAGE_SIZE
);
1825 kmem_free(kernel_map
,
1826 (vm_offset_t
)profile_data_string
,
1832 buf_ptr
+= size
-resid
;
1833 resid_off
+= size
-resid
;
1838 vattr
.va_uid
= user
;
1839 error
= VOP_SETATTR(names_vp
, &vattr
, p
->p_cred
->pc_ucred
, p
);
1841 printf("prepare_profile_database: "
1842 "Can't set user %s\n", profile_names_string
);
1846 error
= vn_lock(data_vp
, LK_EXCLUSIVE
| LK_RETRY
, p
);
1849 printf("prepare_profile_database: cannot lock data file %s\n",
1850 profile_data_string
);
1851 kmem_free(kernel_map
,
1852 (vm_offset_t
)profile_data_string
, PATH_MAX
);
1853 kmem_free(kernel_map
,
1854 (vm_offset_t
)names_buf
, 4 * PAGE_SIZE
);
1857 vattr
.va_uid
= user
;
1858 error
= VOP_SETATTR(data_vp
, &vattr
, p
->p_cred
->pc_ucred
, p
);
1860 printf("prepare_profile_database: "
1861 "Can't set user %s\n", profile_data_string
);
1865 kmem_free(kernel_map
,
1866 (vm_offset_t
)profile_data_string
, PATH_MAX
);
1867 kmem_free(kernel_map
,
1868 (vm_offset_t
)names_buf
, 4 * PAGE_SIZE
);