2  * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved. 
   4  * @APPLE_LICENSE_HEADER_START@ 
   6  * This file contains Original Code and/or Modifications of Original Code 
   7  * as defined in and that are subject to the Apple Public Source License 
   8  * Version 2.0 (the 'License'). You may not use this file except in 
   9  * compliance with the License. Please obtain a copy of the License at 
  10  * http://www.opensource.apple.com/apsl/ and read it before using this 
  13  * The Original Code and all software distributed under the License are 
  14  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER 
  15  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, 
  16  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, 
  17  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. 
  18  * Please see the License for the specific language governing rights and 
  19  * limitations under the License. 
  21  * @APPLE_LICENSE_HEADER_END@ 
  24  * Mach Operating System 
  25  * Copyright (c) 1987 Carnegie-Mellon University 
  26  * All rights reserved.  The CMU software License Agreement specifies 
  27  * the terms and conditions for use and redistribution. 
  34 #include <meta_features.h> 
  36 #include <kern/task.h> 
  37 #include <kern/thread.h> 
  38 #include <kern/debug.h> 
  39 #include <kern/lock.h> 
  40 #include <mach/mach_traps.h> 
  41 #include <mach/time_value.h> 
  42 #include <mach/vm_map.h> 
  43 #include <mach/vm_param.h> 
  44 #include <mach/vm_prot.h> 
  45 #include <mach/port.h> 
  47 #include <sys/file_internal.h> 
  48 #include <sys/param.h> 
  49 #include <sys/systm.h> 
  51 #include <sys/namei.h> 
  52 #include <sys/proc_internal.h> 
  53 #include <sys/kauth.h> 
  56 #include <sys/vnode_internal.h> 
  57 #include <sys/mount.h> 
  58 #include <sys/trace.h> 
  59 #include <sys/kernel.h> 
  60 #include <sys/ubc_internal.h> 
  63 #include <sys/sysproto.h> 
  66 #include <bsm/audit_kernel.h> 
  67 #include <bsm/audit_kevents.h> 
  69 #include <kern/kalloc.h> 
  70 #include <vm/vm_map.h> 
  71 #include <vm/vm_kern.h> 
  73 #include <machine/spl.h> 
  75 #include <mach/shared_memory_server.h> 
  76 #include <vm/vm_shared_memory_server.h> 
  78 #include <vm/vm_protos.h> 
  87         return (vm_map_check_protection( 
  89                         vm_map_trunc_page(addr
), vm_map_round_page(addr
+len
), 
  90                         prot 
== B_READ 
? VM_PROT_READ 
: VM_PROT_WRITE
)); 
  99         kret 
= vm_map_wire(current_map(), vm_map_trunc_page(addr
), 
 100                         vm_map_round_page(addr
+len
),  
 101                         VM_PROT_READ 
| VM_PROT_WRITE 
,FALSE
); 
 106         case KERN_INVALID_ADDRESS
: 
 109         case KERN_PROTECTION_FAILURE
: 
 120         __unused 
int dirtied
) 
 125         vm_map_offset_t vaddr
; 
 132                 pmap 
= get_task_pmap(current_task()); 
 133                 for (vaddr 
= vm_map_trunc_page(addr
); 
 134                      vaddr 
< vm_map_round_page(addr
+len
); 
 135                                 vaddr 
+= PAGE_SIZE
) { 
 136                         paddr 
= pmap_extract(pmap
, vaddr
); 
 137                         pg 
= PHYS_TO_VM_PAGE(paddr
); 
 138                         vm_page_set_modified(pg
); 
 145         kret 
= vm_map_unwire(current_map(), vm_map_trunc_page(addr
), 
 146                                 vm_map_round_page(addr
+len
), FALSE
); 
 150         case KERN_INVALID_ADDRESS
: 
 153         case KERN_PROTECTION_FAILURE
: 
 167         character 
= (char)byte
; 
 168         return (copyout((void *)&(character
), addr
, sizeof(char)) == 0 ? 0 : -1); 
 178         character 
= (char)byte
; 
 179         return (copyout((void *)&(character
), addr
, sizeof(char)) == 0 ? 0 : -1); 
 182 int fubyte(user_addr_t addr
) 
 186         if (copyin(addr
, (void *) &byte
, sizeof(char))) 
 191 int fuibyte(user_addr_t addr
) 
 195         if (copyin(addr
, (void *) &(byte
), sizeof(char))) 
 205         return (copyout((void *) &word
, addr
, sizeof(int)) == 0 ? 0 : -1); 
 208 long fuword(user_addr_t addr
) 
 212         if (copyin(addr
, (void *) &word
, sizeof(int))) 
 217 /* suiword and fuiword are the same as suword and fuword, respectively */ 
 224         return (copyout((void *) &word
, addr
, sizeof(int)) == 0 ? 0 : -1); 
 227 long fuiword(user_addr_t addr
) 
 231         if (copyin(addr
, (void *) &word
, sizeof(int))) 
 237  * With a 32-bit kernel and mixed 32/64-bit user tasks, this interface allows the 
 238  * fetching and setting of process-sized size_t and pointer values. 
 241 sulong(user_addr_t addr
, int64_t word
) 
 244         if (IS_64BIT_PROCESS(current_proc())) { 
 245                 return(copyout((void *)&word
, addr
, sizeof(word
)) == 0 ? 0 : -1); 
 247                 return(suiword(addr
, (long)word
)); 
 252 fulong(user_addr_t addr
) 
 256         if (IS_64BIT_PROCESS(current_proc())) { 
 257                 if (copyin(addr
, (void *)&longword
, sizeof(longword
)) != 0) 
 261                 return((int64_t)fuiword(addr
)); 
 266 suulong(user_addr_t addr
, uint64_t uword
) 
 269         if (IS_64BIT_PROCESS(current_proc())) { 
 270                 return(copyout((void *)&uword
, addr
, sizeof(uword
)) == 0 ? 0 : -1); 
 272                 return(suiword(addr
, (u_long
)uword
)); 
 277 fuulong(user_addr_t addr
) 
 281         if (IS_64BIT_PROCESS(current_proc())) { 
 282                 if (copyin(addr
, (void *)&ulongword
, sizeof(ulongword
)) != 0) 
 286                 return((uint64_t)fuiword(addr
)); 
 291 swapon(__unused 
struct proc 
*procp
, __unused 
struct swapon_args 
*uap
, __unused 
int *retval
) 
 299         struct pid_for_task_args 
*args
) 
 301         mach_port_name_t        t 
= args
->t
; 
 302         user_addr_t             pid_addr  
= args
->pid
;   
 306         kern_return_t   err 
= KERN_SUCCESS
; 
 307         boolean_t funnel_state
; 
 309         AUDIT_MACH_SYSCALL_ENTER(AUE_PIDFORTASK
); 
 310         AUDIT_ARG(mach_port1
, t
); 
 312         funnel_state 
= thread_funnel_set(kernel_flock
, TRUE
); 
 313         t1 
= port_name_to_task(t
); 
 315         if (t1 
== TASK_NULL
) { 
 319                 p 
= get_bsdtask_info(t1
); 
 330         (void) copyout((char *) &pid
, pid_addr
, sizeof(int)); 
 331         thread_funnel_set(kernel_flock
, funnel_state
); 
 332         AUDIT_MACH_SYSCALL_EXIT(err
); 
 337  *      Routine:        task_for_pid 
 339  *              Get the task port for another "process", named by its 
 340  *              process ID on the same host as "target_task". 
 342  *              Only permitted to privileged processes, or processes 
 343  *              with the same user ID. 
 345  * XXX This should be a BSD system call, not a Mach trap!!! 
 349         struct task_for_pid_args 
*args
) 
 351         mach_port_name_t        target_tport 
= args
->target_tport
; 
 353         user_addr_t             task_addr 
= args
->t
; 
 354         struct uthread          
*uthread
; 
 358         mach_port_name_t        tret
; 
 361         boolean_t funnel_state
; 
 363         AUDIT_MACH_SYSCALL_ENTER(AUE_TASKFORPID
); 
 365         AUDIT_ARG(mach_port1
, target_tport
); 
 367         t1 
= port_name_to_task(target_tport
); 
 368         if (t1 
== TASK_NULL
) { 
 369                 (void ) copyout((char *)&t1
, task_addr
, sizeof(mach_port_name_t
)); 
 370                 AUDIT_MACH_SYSCALL_EXIT(KERN_FAILURE
); 
 371                 return(KERN_FAILURE
); 
 374         funnel_state 
= thread_funnel_set(kernel_flock
, TRUE
); 
 376         p1 
= get_bsdtask_info(t1
);      /* XXX current proc */ 
 379          * Delayed binding of thread credential to process credential, if we 
 380          * are not running with an explicitly set thread credential. 
 382         uthread 
= get_bsdthread_info(current_thread()); 
 383         if (uthread
->uu_ucred 
!= p1
->p_ucred 
&& 
 384             (uthread
->uu_flag 
& UT_SETUID
) == 0) { 
 385                 kauth_cred_t old 
= uthread
->uu_ucred
; 
 387                 uthread
->uu_ucred 
= p1
->p_ucred
; 
 388                 kauth_cred_ref(uthread
->uu_ucred
); 
 391                         kauth_cred_rele(old
); 
 395         AUDIT_ARG(process
, p
); 
 398                 (p 
!= (struct proc 
*) 0) 
 399                 && (p1 
!= (struct proc 
*) 0) 
 400                 && (((kauth_cred_getuid(p
->p_ucred
) == kauth_cred_getuid(kauth_cred_get())) &&  
 401                         ((p
->p_ucred
->cr_ruid 
== kauth_cred_get()->cr_ruid
))) 
 402                 || !(suser(kauth_cred_get(), 0))) 
 403                 && (p
->p_stat 
!= SZOMB
) 
 405                         if (p
->task 
!= TASK_NULL
) { 
 406                                 task_reference(p
->task
); 
 407                                 sright 
= (void *)convert_task_to_port(p
->task
); 
 408                                 tret 
= ipc_port_copyout_send( 
 410                                         get_task_ipcspace(current_task())); 
 412                                 tret  
= MACH_PORT_NULL
; 
 413                         AUDIT_ARG(mach_port2
, tret
); 
 414                         (void ) copyout((char *)&tret
, task_addr
, sizeof(mach_port_name_t
)); 
 416                         error 
= KERN_SUCCESS
; 
 420         tret 
= MACH_PORT_NULL
; 
 421         (void) copyout((char *) &tret
, task_addr
, sizeof(mach_port_name_t
)); 
 422         error 
= KERN_FAILURE
; 
 424         thread_funnel_set(kernel_flock
, funnel_state
); 
 425         AUDIT_MACH_SYSCALL_EXIT(error
); 
 431  * shared_region_make_private_np: 
 433  * This system call is for "dyld" only. 
 435  * It creates a private copy of the current process's "shared region" for 
 436  * split libraries.  "dyld" uses this when the shared region is full or 
 437  * it needs to load a split library that conflicts with an already loaded one 
 438  * that this process doesn't need.  "dyld" specifies a set of address ranges 
 439  * that it wants to keep in the now-private "shared region".  These cover 
 440  * the set of split libraries that the process needs so far.  The kernel needs 
 441  * to deallocate the rest of the shared region, so that it's available for  
 442  * more libraries for this process. 
 445 shared_region_make_private_np( 
 447         struct shared_region_make_private_np_args       
*uap
, 
 448         __unused 
int                                    *retvalp
) 
 452         boolean_t                       using_shared_regions
; 
 453         user_addr_t                     user_ranges
; 
 454         unsigned int                    range_count
; 
 455         struct shared_region_range_np   
*ranges
; 
 456         shared_region_mapping_t         shared_region
; 
 457         struct shared_region_task_mappings      task_mapping_info
; 
 458         shared_region_mapping_t         next
; 
 462         range_count 
= uap
->rangeCount
; 
 463         user_ranges 
= uap
->ranges
; 
 465         /* allocate kernel space for the "ranges" */ 
 466         if (range_count 
!= 0) { 
 467                 kr 
= kmem_alloc(kernel_map
, 
 468                                 (vm_offset_t 
*) &ranges
, 
 469                                 (vm_size_t
) (range_count 
* sizeof (ranges
[0]))); 
 470                 if (kr 
!= KERN_SUCCESS
) { 
 475                 /* copy "ranges" from user-space */ 
 476                 error 
= copyin(user_ranges
, 
 478                                (range_count 
* sizeof (ranges
[0]))); 
 484         if (p
->p_flag 
& P_NOSHLIB
) { 
 485                 /* no split library has been mapped for this process so far */ 
 486                 using_shared_regions 
= FALSE
; 
 488                 /* this process has already mapped some split libraries */ 
 489                 using_shared_regions 
= TRUE
; 
 493          * Get a private copy of the current shared region. 
 494          * Do not chain it to the system-wide shared region, as we'll want 
 495          * to map other split libraries in place of the old ones.  We want 
 496          * to completely detach from the system-wide shared region and go our 
 497          * own way after this point, not sharing anything with other processes. 
 499         error 
= clone_system_shared_regions(using_shared_regions
, 
 500                                             FALSE
, /* chain_regions */ 
 506         /* get info on the newly allocated shared region */ 
 507         vm_get_shared_region(current_task(), &shared_region
); 
 508         task_mapping_info
.self 
= (vm_offset_t
) shared_region
; 
 509         shared_region_mapping_info(shared_region
, 
 510                                    &(task_mapping_info
.text_region
), 
 511                                    &(task_mapping_info
.text_size
), 
 512                                    &(task_mapping_info
.data_region
), 
 513                                    &(task_mapping_info
.data_size
), 
 514                                    &(task_mapping_info
.region_mappings
), 
 515                                    &(task_mapping_info
.client_base
), 
 516                                    &(task_mapping_info
.alternate_base
), 
 517                                    &(task_mapping_info
.alternate_next
), 
 518                                    &(task_mapping_info
.fs_base
), 
 519                                    &(task_mapping_info
.system
), 
 520                                    &(task_mapping_info
.flags
), 
 524          * We now have our private copy of the shared region, as it was before 
 525          * the call to clone_system_shared_regions().  We now need to clean it 
 526          * up and keep only the memory areas described by the "ranges" array. 
 528         kr 
= shared_region_cleanup(range_count
, ranges
, &task_mapping_info
); 
 539         if (ranges 
!= NULL
) { 
 540                 kmem_free(kernel_map
, 
 541                           (vm_offset_t
) ranges
, 
 542                           range_count 
* sizeof (ranges
[0])); 
 551  * shared_region_map_file_np: 
 553  * This system call is for "dyld" only. 
 555  * "dyld" wants to map parts of a split library in the shared region. 
 556  * We get a file descriptor on the split library to be mapped and a set 
 557  * of mapping instructions, describing which parts of the file to map in\ 
 558  * which areas of the shared segment and with what protection. 
 559  * The "shared region" is split in 2 areas: 
 560  * 0x90000000 - 0xa0000000 : read-only area (for TEXT and LINKEDIT sections),  
 561  * 0xa0000000 - 0xb0000000 : writable area (for DATA sections). 
 565 shared_region_map_file_np( 
 567         struct shared_region_map_file_np_args   
*uap
, 
 568         __unused 
int                            *retvalp
) 
 573         unsigned int                            mapping_count
; 
 574         user_addr_t                             user_mappings
; /* 64-bit */ 
 575         user_addr_t                             user_slide_p
;  /* 64-bit */ 
 576         struct shared_file_mapping_np           
*mappings
; 
 578         mach_vm_offset_t                        slide
; 
 580         struct vfs_context                      context
; 
 581         memory_object_control_t                 file_control
; 
 582         memory_object_size_t                    file_size
; 
 583         shared_region_mapping_t                 shared_region
; 
 584         struct shared_region_task_mappings      task_mapping_info
; 
 585         shared_region_mapping_t                 next
; 
 586         shared_region_mapping_t                 default_shared_region
; 
 587         boolean_t                               using_default_region
; 
 590         mach_vm_offset_t                        base_offset
, end_offset
; 
 591         mach_vm_offset_t                        original_base_offset
; 
 592         boolean_t                               mappings_in_segment
; 
 593 #define SFM_MAX_STACK   6 
 594         struct shared_file_mapping_np           stack_mappings
[SFM_MAX_STACK
]; 
 601         /* get file descriptor for split library from arguments */ 
 604         /* get file structure from file descriptor */ 
 605         error 
= fp_lookup(p
, fd
, &fp
, 0); 
 610         /* make sure we're attempting to map a vnode */ 
 611         if (fp
->f_fglob
->fg_type 
!= DTYPE_VNODE
) { 
 616         /* we need at least read permission on the file */ 
 617         if (! (fp
->f_fglob
->fg_flag 
& FREAD
)) { 
 622         /* get vnode from file structure */ 
 623         error 
= vnode_getwithref((vnode_t
)fp
->f_fglob
->fg_data
); 
 627         vp 
= (struct vnode 
*) fp
->f_fglob
->fg_data
; 
 629         /* make sure the vnode is a regular file */ 
 630         if (vp
->v_type 
!= VREG
) { 
 640                 context
.vc_ucred 
= kauth_cred_get(); 
 641                 if ((error 
= vnode_size(vp
, &fs
, &context
)) != 0) 
 647          * Get the list of mappings the caller wants us to establish. 
 649         mapping_count 
= uap
->mappingCount
; /* the number of mappings */ 
 650         if (mapping_count 
== 0) { 
 651                 error 
= 0;      /* no mappings: we're done ! */ 
 653         } else if (mapping_count 
<= SFM_MAX_STACK
) { 
 654                 mappings 
= &stack_mappings
[0]; 
 656                 kr 
= kmem_alloc(kernel_map
, 
 657                                 (vm_offset_t 
*) &mappings
, 
 658                                 (vm_size_t
) (mapping_count 
* 
 659                                              sizeof (mappings
[0]))); 
 660                 if (kr 
!= KERN_SUCCESS
) { 
 666         user_mappings 
= uap
->mappings
;     /* the mappings, in user space */ 
 667         error 
= copyin(user_mappings
, 
 669                        (mapping_count 
* sizeof (mappings
[0]))); 
 675          * If the caller provides a "slide" pointer, it means they're OK 
 676          * with us moving the mappings around to make them fit. 
 678         user_slide_p 
= uap
->slide_p
; 
 681          * Make each mapping address relative to the beginning of the 
 682          * shared region.  Check that all mappings are in the shared region. 
 683          * Compute the maximum set of protections required to tell the 
 684          * buffer cache how we mapped the file (see call to ubc_map() below). 
 686         max_prot 
= VM_PROT_NONE
; 
 689         mappings_in_segment 
= TRUE
; 
 690         for (j 
= 0; j 
< mapping_count
; j
++) { 
 691                 mach_vm_offset_t segment
; 
 692                 segment 
= (mappings
[j
].sfm_address 
& 
 693                            GLOBAL_SHARED_SEGMENT_MASK
); 
 694                 if (segment 
!= GLOBAL_SHARED_TEXT_SEGMENT 
&& 
 695                     segment 
!= GLOBAL_SHARED_DATA_SEGMENT
) { 
 696                         /* this mapping is not in the shared region... */ 
 697                         if (user_slide_p 
== NULL
) { 
 698                                 /* ... and we can't slide it in: fail */ 
 703                                 /* expect all mappings to be outside */ 
 704                                 mappings_in_segment 
= FALSE
; 
 705                         } else if (mappings_in_segment 
!= FALSE
) { 
 706                                 /* other mappings were not outside: fail */ 
 710                         /* we'll try and slide that mapping in the segments */ 
 713                                 /* expect all mappings to be inside */ 
 714                                 mappings_in_segment 
= TRUE
; 
 715                         } else if (mappings_in_segment 
!= TRUE
) { 
 716                                 /* other mappings were not inside: fail */ 
 720                         /* get a relative offset inside the shared segments */ 
 721                         mappings
[j
].sfm_address 
-= GLOBAL_SHARED_TEXT_SEGMENT
; 
 723                 if ((mappings
[j
].sfm_address 
& SHARED_TEXT_REGION_MASK
) 
 725                         base_offset 
= (mappings
[j
].sfm_address 
& 
 726                                        SHARED_TEXT_REGION_MASK
); 
 728                 if ((mappings
[j
].sfm_address 
& SHARED_TEXT_REGION_MASK
) + 
 729                     mappings
[j
].sfm_size 
> end_offset
) { 
 731                                 (mappings
[j
].sfm_address 
& 
 732                                  SHARED_TEXT_REGION_MASK
) + 
 733                                 mappings
[j
].sfm_size
; 
 735                 max_prot 
|= mappings
[j
].sfm_max_prot
; 
 737         /* Make all mappings relative to the base_offset */ 
 738         base_offset 
= vm_map_trunc_page(base_offset
); 
 739         end_offset 
= vm_map_round_page(end_offset
); 
 740         for (j 
= 0; j 
< mapping_count
; j
++) { 
 741                 mappings
[j
].sfm_address 
-= base_offset
; 
 743         original_base_offset 
= base_offset
; 
 744         if (mappings_in_segment 
== FALSE
) { 
 746                  * We're trying to map a library that was not pre-bound to 
 747                  * be in the shared segments.  We want to try and slide it 
 748                  * back into the shared segments but as far back as possible, 
 749                  * so that it doesn't clash with pre-bound libraries.  Set 
 750                  * the base_offset to the end of the region, so that it can't 
 751                  * possibly fit there and will have to be slid. 
 753                 base_offset 
= SHARED_TEXT_REGION_SIZE 
- end_offset
; 
 756         /* get the file's memory object handle */ 
 757         UBCINFOCHECK("shared_region_map_file_np", vp
); 
 758         file_control 
= ubc_getobject(vp
, UBC_HOLDOBJECT
); 
 759         if (file_control 
== MEMORY_OBJECT_CONTROL_NULL
) { 
 765          * Get info about the current process's shared region. 
 766          * This might change if we decide we need to clone the shared region. 
 768         vm_get_shared_region(current_task(), &shared_region
); 
 769         task_mapping_info
.self 
= (vm_offset_t
) shared_region
; 
 770         shared_region_mapping_info(shared_region
, 
 771                                    &(task_mapping_info
.text_region
), 
 772                                    &(task_mapping_info
.text_size
), 
 773                                    &(task_mapping_info
.data_region
), 
 774                                    &(task_mapping_info
.data_size
), 
 775                                    &(task_mapping_info
.region_mappings
), 
 776                                    &(task_mapping_info
.client_base
), 
 777                                    &(task_mapping_info
.alternate_base
), 
 778                                    &(task_mapping_info
.alternate_next
), 
 779                                    &(task_mapping_info
.fs_base
), 
 780                                    &(task_mapping_info
.system
), 
 781                                    &(task_mapping_info
.flags
), 
 785          * Are we using the system's current shared region 
 786          * for this environment ? 
 788         default_shared_region 
= 
 789                 lookup_default_shared_region(ENV_DEFAULT_ROOT
, 
 790                                              task_mapping_info
.system
); 
 791         if (shared_region 
== default_shared_region
) { 
 792                 using_default_region 
= TRUE
; 
 794                 using_default_region 
= FALSE
; 
 796         shared_region_mapping_dealloc(default_shared_region
); 
 798         if (vp
->v_mount 
!= rootvnode
->v_mount 
&& 
 799             using_default_region
) { 
 801                  * The split library is not on the root filesystem.  We don't 
 802                  * want to polute the system-wide ("default") shared region 
 804                  * Reject the mapping.  The caller (dyld) should "privatize" 
 805                  * (via shared_region_make_private()) the shared region and 
 806                  * try to establish the mapping privately for this process. 
 814          * Map the split library. 
 816         kr 
= map_shared_file(mapping_count
, 
 822                              (user_slide_p
) ? &slide 
: NULL
); 
 827                  * The mapping was successful.  Let the buffer cache know 
 828                  * that we've mapped that file with these protections.  This 
 829                  * prevents the vnode from getting recycled while it's mapped. 
 831                 (void) ubc_map(vp
, max_prot
); 
 834         case KERN_INVALID_ADDRESS
: 
 837         case KERN_PROTECTION_FAILURE
: 
 844         case KERN_INVALID_ARGUMENT
: 
 850         if (p
->p_flag 
& P_NOSHLIB
) { 
 851                 /* signal that this process is now using split libraries */ 
 852                 p
->p_flag 
&= ~P_NOSHLIB
; 
 857                  * The caller provided a pointer to a "slide" offset.  Let 
 858                  * them know by how much we slid the mappings. 
 860                 if (mappings_in_segment 
== FALSE
) { 
 862                          * We faked the base_offset earlier, so undo that 
 863                          * and take into account the real base_offset. 
 865                         slide 
+= SHARED_TEXT_REGION_SIZE 
- end_offset
; 
 866                         slide 
-= original_base_offset
; 
 868                          * The mappings were slid into the shared segments 
 869                          * and "slide" is relative to the beginning of the 
 870                          * shared segments.  Adjust it to be absolute. 
 872                         slide 
+= GLOBAL_SHARED_TEXT_SEGMENT
; 
 874                 error 
= copyout(&slide
, 
 882                  * release the vnode... 
 883                  * ubc_map() still holds it for us in the non-error case 
 885                 (void) vnode_put(vp
); 
 889                 /* release the file descriptor */ 
 890                 fp_drop(p
, fd
, fp
, 0); 
 893         if (mappings 
!= NULL 
&& 
 894             mappings 
!= &stack_mappings
[0]) { 
 895                 kmem_free(kernel_map
, 
 896                           (vm_offset_t
) mappings
, 
 897                           mapping_count 
* sizeof (mappings
[0])); 
 905 load_shared_file(struct proc 
*p
, struct load_shared_file_args 
*uap
, 
 906                                         __unused 
int *retval
) 
 908         caddr_t         mapped_file_addr
=uap
->mfa
; 
 909         u_long          mapped_file_size
=uap
->mfs
; 
 910         caddr_t         
*base_address
=uap
->ba
; 
 911         int             map_cnt
=uap
->map_cnt
; 
 912         sf_mapping_t       
*mappings
=uap
->mappings
; 
 913         char            *filename
=uap
->filename
; 
 914         int             *flags
=uap
->flags
; 
 915         struct vnode            
*vp 
= 0;  
 916         struct nameidata        nd
, *ndp
; 
 921         struct vfs_context context
; 
 923         memory_object_control_t file_control
; 
 924         sf_mapping_t    
*map_list
; 
 929         int             default_regions 
= 0; 
 933         shared_region_mapping_t shared_region
; 
 934         struct shared_region_task_mappings      task_mapping_info
; 
 935         shared_region_mapping_t next
; 
 938         context
.vc_ucred 
= kauth_cred_get(); 
 942         AUDIT_ARG(addr
, CAST_USER_ADDR_T(base_address
)); 
 943         /* Retrieve the base address */ 
 944         if ( (error 
= copyin(CAST_USER_ADDR_T(base_address
), &local_base
, sizeof (caddr_t
))) ) { 
 947         if ( (error 
= copyin(CAST_USER_ADDR_T(flags
), &local_flags
, sizeof (int))) ) { 
 951         if(local_flags 
& QUERY_IS_SYSTEM_REGION
) { 
 952                         shared_region_mapping_t default_shared_region
; 
 953                         vm_get_shared_region(current_task(), &shared_region
); 
 954                         task_mapping_info
.self 
= (vm_offset_t
)shared_region
; 
 956                         shared_region_mapping_info(shared_region
,  
 957                                         &(task_mapping_info
.text_region
),  
 958                                         &(task_mapping_info
.text_size
), 
 959                                         &(task_mapping_info
.data_region
),  
 960                                         &(task_mapping_info
.data_size
),  
 961                                         &(task_mapping_info
.region_mappings
), 
 962                                         &(task_mapping_info
.client_base
),  
 963                                         &(task_mapping_info
.alternate_base
), 
 964                                         &(task_mapping_info
.alternate_next
),  
 965                                         &(task_mapping_info
.fs_base
), 
 966                                         &(task_mapping_info
.system
), 
 967                                         &(task_mapping_info
.flags
), &next
); 
 969                         default_shared_region 
= 
 970                                 lookup_default_shared_region( 
 972                                         task_mapping_info
.system
); 
 973                         if (shared_region 
== default_shared_region
) { 
 974                                 local_flags 
= SYSTEM_REGION_BACKED
; 
 978                         shared_region_mapping_dealloc(default_shared_region
); 
 980                         error 
= copyout(&local_flags
, CAST_USER_ADDR_T(flags
), sizeof (int)); 
 983         caller_flags 
= local_flags
; 
 984         kret 
= kmem_alloc(kernel_map
, (vm_offset_t 
*)&filename_str
, 
 985                         (vm_size_t
)(MAXPATHLEN
)); 
 986                 if (kret 
!= KERN_SUCCESS
) { 
 990         kret 
= kmem_alloc(kernel_map
, (vm_offset_t 
*)&map_list
, 
 991                         (vm_size_t
)(map_cnt
*sizeof(sf_mapping_t
))); 
 992                 if (kret 
!= KERN_SUCCESS
) { 
 993                         kmem_free(kernel_map
, (vm_offset_t
)filename_str
,  
 994                                 (vm_size_t
)(MAXPATHLEN
)); 
 999         if ( (error 
= copyin(CAST_USER_ADDR_T(mappings
), map_list
, (map_cnt
*sizeof(sf_mapping_t
)))) ) { 
1000                 goto lsf_bailout_free
; 
1003         if ( (error 
= copyinstr(CAST_USER_ADDR_T(filename
), filename_str
,  
1004                                                         MAXPATHLEN
, (size_t *)&dummy
)) ) { 
1005                 goto lsf_bailout_free
; 
1009          * Get a vnode for the target file 
1011         NDINIT(ndp
, LOOKUP
, FOLLOW 
| LOCKLEAF 
| AUDITVNPATH1
, UIO_SYSSPACE32
, 
1012             CAST_USER_ADDR_T(filename_str
), &context
); 
1014         if ((error 
= namei(ndp
))) { 
1015                 goto lsf_bailout_free
; 
1021         if (vp
->v_type 
!= VREG
) { 
1023                 goto lsf_bailout_free_vput
; 
1026         UBCINFOCHECK("load_shared_file", vp
); 
1028         if ((error 
= vnode_size(vp
, &file_size
, &context
)) != 0) 
1029                 goto lsf_bailout_free_vput
; 
1031         file_control 
= ubc_getobject(vp
, UBC_HOLDOBJECT
); 
1032         if (file_control 
== MEMORY_OBJECT_CONTROL_NULL
) { 
1034                 goto lsf_bailout_free_vput
; 
1038         if(file_size 
!= mapped_file_size
) { 
1040                 goto lsf_bailout_free_vput
; 
1043         if(p
->p_flag 
& P_NOSHLIB
) { 
1044                 p
->p_flag 
= p
->p_flag 
& ~P_NOSHLIB
; 
1047         /* load alternate regions if the caller has requested.  */ 
1048         /* Note: the new regions are "clean slates" */ 
1049         if (local_flags 
& NEW_LOCAL_SHARED_REGIONS
) { 
1050                 error 
= clone_system_shared_regions(FALSE
, 
1051                                                     TRUE
, /* chain_regions */ 
1054                         goto lsf_bailout_free_vput
; 
1058         vm_get_shared_region(current_task(), &shared_region
); 
1059         task_mapping_info
.self 
= (vm_offset_t
)shared_region
; 
1061         shared_region_mapping_info(shared_region
,  
1062                         &(task_mapping_info
.text_region
),  
1063                         &(task_mapping_info
.text_size
), 
1064                         &(task_mapping_info
.data_region
),  
1065                         &(task_mapping_info
.data_size
),  
1066                         &(task_mapping_info
.region_mappings
), 
1067                         &(task_mapping_info
.client_base
),  
1068                         &(task_mapping_info
.alternate_base
), 
1069                         &(task_mapping_info
.alternate_next
),  
1070                         &(task_mapping_info
.fs_base
), 
1071                         &(task_mapping_info
.system
), 
1072                         &(task_mapping_info
.flags
), &next
); 
1075                 shared_region_mapping_t default_shared_region
; 
1076                 default_shared_region 
= 
1077                         lookup_default_shared_region( 
1079                                 task_mapping_info
.system
); 
1080                 if(shared_region 
== default_shared_region
) { 
1081                         default_regions 
= 1; 
1083                 shared_region_mapping_dealloc(default_shared_region
); 
1085         /* If we are running on a removable file system we must not */ 
1086         /* be in a set of shared regions or the file system will not */ 
1088         if(((vp
->v_mount 
!= rootvnode
->v_mount
) && (default_regions
))  
1089                 && (lsf_mapping_pool_gauge() < 75)) { 
1090                                 /* We don't want to run out of shared memory */ 
1091                                 /* map entries by starting too many private versions */ 
1092                                 /* of the shared library structures */ 
1095                 error2 
= clone_system_shared_regions(!(p
->p_flag 
& P_NOSHLIB
), 
1096                                                      TRUE
, /* chain_regions */ 
1099                         goto lsf_bailout_free_vput
; 
1101                 local_flags 
= local_flags 
& ~NEW_LOCAL_SHARED_REGIONS
; 
1102                 vm_get_shared_region(current_task(), &shared_region
); 
1103                 shared_region_mapping_info(shared_region
,  
1104                         &(task_mapping_info
.text_region
),  
1105                         &(task_mapping_info
.text_size
), 
1106                         &(task_mapping_info
.data_region
),  
1107                         &(task_mapping_info
.data_size
),  
1108                         &(task_mapping_info
.region_mappings
), 
1109                         &(task_mapping_info
.client_base
),  
1110                         &(task_mapping_info
.alternate_base
), 
1111                         &(task_mapping_info
.alternate_next
),  
1112                         &(task_mapping_info
.fs_base
), 
1113                         &(task_mapping_info
.system
), 
1114                         &(task_mapping_info
.flags
), &next
); 
1117         /*  This is a work-around to allow executables which have been */ 
1118         /*  built without knowledge of the proper shared segment to    */ 
1119         /*  load.  This code has been architected as a shared region   */ 
1120         /*  handler, the knowledge of where the regions are loaded is  */ 
1121         /*  problematic for the extension of shared regions as it will */ 
1122         /*  not be easy to know what region an item should go into.    */ 
1123         /*  The code below however will get around a short term problem */ 
1124         /*  with executables which believe they are loading at zero.   */ 
1127                 if (((unsigned int)local_base 
&  
1128                         (~(task_mapping_info
.text_size 
- 1))) !=  
1129                         task_mapping_info
.client_base
) { 
1130                         if(local_flags 
& ALTERNATE_LOAD_SITE
) { 
1131                                 local_base 
= (caddr_t
)( 
1132                                         (unsigned int)local_base 
&  
1133                                            (task_mapping_info
.text_size 
- 1)); 
1134                                 local_base 
= (caddr_t
)((unsigned int)local_base
 
1135                                            | task_mapping_info
.client_base
); 
1138                                 goto lsf_bailout_free_vput
; 
1144         if((kr 
= copyin_shared_file((vm_offset_t
)mapped_file_addr
,  
1146                         (vm_offset_t 
*)&local_base
, 
1147                         map_cnt
, map_list
, file_control
,  
1148                         &task_mapping_info
, &local_flags
))) { 
1153                         case KERN_INVALID_ARGUMENT
: 
1156                         case KERN_INVALID_ADDRESS
: 
1159                         case KERN_PROTECTION_FAILURE
: 
1160                                 /* save EAUTH for authentication in this */ 
1170                 if((caller_flags 
& ALTERNATE_LOAD_SITE
) && systemLogDiags
) { 
1171                         printf("load_shared_file:  Failed to load shared file! error: 0x%x, Base_address: 0x%x, number of mappings: %d, file_control 0x%x\n", error
, local_base
, map_cnt
, file_control
); 
1172                         for(i
=0; i
<map_cnt
; i
++) { 
1173                                 printf("load_shared_file: Mapping%d, mapping_offset: 0x%x, size: 0x%x, file_offset: 0x%x, protection: 0x%x\n" 
1174                                         , i
, map_list
[i
].mapping_offset
,  
1176                                         map_list
[i
].file_offset
,  
1177                                         map_list
[i
].protection
); 
1182                         local_flags 
|= SYSTEM_REGION_BACKED
; 
1183                 if(!(error 
= copyout(&local_flags
, CAST_USER_ADDR_T(flags
), sizeof (int)))) { 
1184                         error 
= copyout(&local_base
,  
1185                                 CAST_USER_ADDR_T(base_address
), sizeof (caddr_t
)); 
1189 lsf_bailout_free_vput
: 
1193         kmem_free(kernel_map
, (vm_offset_t
)filename_str
,  
1194                                 (vm_size_t
)(MAXPATHLEN
)); 
1195         kmem_free(kernel_map
, (vm_offset_t
)map_list
,  
1196                                 (vm_size_t
)(map_cnt
*sizeof(sf_mapping_t
))); 
1203 reset_shared_file(__unused 
struct proc 
*p
, struct reset_shared_file_args 
*uap
, 
1204                                         __unused 
register int *retval
) 
1206         caddr_t                         
*base_address
=uap
->ba
; 
1207         int                     map_cnt
=uap
->map_cnt
; 
1208         sf_mapping_t            
*mappings
=uap
->mappings
; 
1211         sf_mapping_t            
*map_list
; 
1213         vm_offset_t                     map_address
; 
1217         AUDIT_ARG(addr
, CAST_DOWN(user_addr_t
, base_address
)); 
1218         /* Retrieve the base address */ 
1219         if ( (error 
= copyin(CAST_USER_ADDR_T(base_address
), &local_base
, sizeof (caddr_t
))) ) { 
1223         if (((unsigned int)local_base 
& GLOBAL_SHARED_SEGMENT_MASK
)  
1224                                         != GLOBAL_SHARED_TEXT_SEGMENT
) { 
1229         kret 
= kmem_alloc(kernel_map
, (vm_offset_t 
*)&map_list
, 
1230                         (vm_size_t
)(map_cnt
*sizeof(sf_mapping_t
))); 
1231                 if (kret 
!= KERN_SUCCESS
) { 
1237                   copyin(CAST_USER_ADDR_T(mappings
), map_list
, (map_cnt
*sizeof(sf_mapping_t
)))) ) { 
1239                 kmem_free(kernel_map
, (vm_offset_t
)map_list
,  
1240                                 (vm_size_t
)(map_cnt
*sizeof(sf_mapping_t
))); 
1243         for (i 
= 0; i
<map_cnt
; i
++) { 
1244                 if((map_list
[i
].mapping_offset 
 
1245                                 & GLOBAL_SHARED_SEGMENT_MASK
) == 0x10000000) { 
1246                         map_address 
= (vm_offset_t
) 
1247                                 (local_base 
+ map_list
[i
].mapping_offset
); 
1248                         vm_deallocate(current_map(),  
1251                         vm_map(current_map(), &map_address
, 
1252                                 map_list
[i
].size
, 0, 
1253                                 SHARED_LIB_ALIAS 
| VM_FLAGS_FIXED
, 
1254                                 shared_data_region_handle
,  
1255                                 ((unsigned int)local_base 
 
1256                                    & SHARED_DATA_REGION_MASK
) + 
1257                                         (map_list
[i
].mapping_offset 
 
1258                                         & SHARED_DATA_REGION_MASK
), 
1260                                 VM_PROT_READ
, VM_INHERIT_SHARE
); 
1264         kmem_free(kernel_map
, (vm_offset_t
)map_list
,  
1265                                 (vm_size_t
)(map_cnt
*sizeof(sf_mapping_t
))); 
1272 new_system_shared_regions(__unused 
struct proc 
*p
, 
1273                           __unused 
struct new_system_shared_regions_args 
*uap
, 
1274                           register int *retval
) 
1281         /* clear all of our existing defaults */ 
1282         remove_all_shared_regions(); 
1291 clone_system_shared_regions( 
1292         int             shared_regions_active
, 
1296         shared_region_mapping_t new_shared_region
; 
1297         shared_region_mapping_t next
; 
1298         shared_region_mapping_t old_shared_region
; 
1299         struct shared_region_task_mappings old_info
; 
1300         struct shared_region_task_mappings new_info
; 
1302         vm_get_shared_region(current_task(), &old_shared_region
); 
1303         old_info
.self 
= (vm_offset_t
)old_shared_region
; 
1304         shared_region_mapping_info(old_shared_region
, 
1305                 &(old_info
.text_region
),    
1306                 &(old_info
.text_size
), 
1307                 &(old_info
.data_region
), 
1308                 &(old_info
.data_size
), 
1309                 &(old_info
.region_mappings
), 
1310                 &(old_info
.client_base
), 
1311                 &(old_info
.alternate_base
), 
1312                 &(old_info
.alternate_next
),  
1313                 &(old_info
.fs_base
), 
1315                 &(old_info
.flags
), &next
); 
1316         if ((shared_regions_active
) || 
1317                 (base_vnode 
== ENV_DEFAULT_ROOT
)) { 
1318            if (shared_file_create_system_region(&new_shared_region
)) 
1322                 lookup_default_shared_region( 
1323                         base_vnode
, old_info
.system
); 
1324            if(new_shared_region 
== NULL
) { 
1325                 shared_file_boot_time_init( 
1326                         base_vnode
, old_info
.system
); 
1327                 vm_get_shared_region(current_task(), &new_shared_region
); 
1329                 vm_set_shared_region(current_task(), new_shared_region
); 
1331            if(old_shared_region
) 
1332                 shared_region_mapping_dealloc(old_shared_region
); 
1334         new_info
.self 
= (vm_offset_t
)new_shared_region
; 
1335         shared_region_mapping_info(new_shared_region
, 
1336                 &(new_info
.text_region
),    
1337                 &(new_info
.text_size
), 
1338                 &(new_info
.data_region
), 
1339                 &(new_info
.data_size
), 
1340                 &(new_info
.region_mappings
), 
1341                 &(new_info
.client_base
), 
1342                 &(new_info
.alternate_base
), 
1343                 &(new_info
.alternate_next
),  
1344                 &(new_info
.fs_base
), 
1346                 &(new_info
.flags
), &next
); 
1347         if(shared_regions_active
) { 
1348            if(vm_region_clone(old_info
.text_region
, new_info
.text_region
)) { 
1349            panic("clone_system_shared_regions: shared region mis-alignment 1"); 
1350                 shared_region_mapping_dealloc(new_shared_region
); 
1353            if (vm_region_clone(old_info
.data_region
, new_info
.data_region
)) { 
1354            panic("clone_system_shared_regions: shared region mis-alignment 2"); 
1355                 shared_region_mapping_dealloc(new_shared_region
); 
1358            if (chain_regions
) { 
1360                     * We want a "shadowed" clone, a private superset of the old 
1361                     * shared region.  The info about the old mappings is still 
1364                    shared_region_object_chain_attach( 
1365                            new_shared_region
, old_shared_region
); 
1368                     * We want a completely detached clone with no link to 
1369                     * the old shared region.  We'll be removing some mappings 
1370                     * in our private, cloned, shared region, so the old mappings 
1371                     * will become irrelevant to us.  Since we have a private 
1372                     * "shared region" now, it isn't going to be shared with 
1373                     * anyone else and we won't need to maintain mappings info. 
1375                    shared_region_object_chain_detached(new_shared_region
); 
1378         if (vm_map_region_replace(current_map(), old_info
.text_region
,  
1379                         new_info
.text_region
, old_info
.client_base
,  
1380                         old_info
.client_base
+old_info
.text_size
)) { 
1381         panic("clone_system_shared_regions: shared region mis-alignment 3"); 
1382                 shared_region_mapping_dealloc(new_shared_region
); 
1385         if(vm_map_region_replace(current_map(), old_info
.data_region
,  
1386                         new_info
.data_region
,  
1387                         old_info
.client_base 
+ old_info
.text_size
,  
1388                         old_info
.client_base
 
1389                                 + old_info
.text_size 
+ old_info
.data_size
)) { 
1390         panic("clone_system_shared_regions: shared region mis-alignment 4"); 
1391                 shared_region_mapping_dealloc(new_shared_region
); 
1394         vm_set_shared_region(current_task(), new_shared_region
); 
1396         /* consume the reference which wasn't accounted for in object */ 
1398         if (!shared_regions_active 
|| !chain_regions
) 
1399                 shared_region_mapping_dealloc(old_shared_region
); 
1405 /* header for the profile name file.  The profiled app info is held */ 
1406 /* in the data file and pointed to by elements in the name file     */ 
1408 struct profile_names_header 
{ 
1409         unsigned int    number_of_profiles
; 
1410         unsigned int    user_id
; 
1411         unsigned int    version
; 
1412         off_t           element_array
; 
1413         unsigned int    spare1
; 
1414         unsigned int    spare2
; 
1415         unsigned int    spare3
; 
1418 struct profile_element 
{ 
1421         unsigned int    mod_date
; 
1426 struct global_profile 
{ 
1427         struct vnode    
*names_vp
; 
1428         struct vnode    
*data_vp
; 
1429         vm_offset_t     buf_ptr
; 
1435 struct global_profile_cache 
{ 
1438         struct global_profile   profiles
[3]; 
1441 /* forward declarations */ 
1442 int bsd_open_page_cache_files(unsigned int user
, 
1443                               struct global_profile 
**profile
); 
1444 void bsd_close_page_cache_files(struct global_profile 
*profile
); 
1445 int bsd_search_page_cache_data_base( 
1447         struct profile_names_header     
*database
, 
1449         unsigned int                    mod_date
, 
1452         unsigned int                    *profile_size
); 
1454 struct global_profile_cache global_user_profile_cache 
= 
1455         {3, 0, {{NULL
, NULL
, 0, 0, 0, 0}, 
1456                     {NULL
, NULL
, 0, 0, 0, 0}, 
1457                     {NULL
, NULL
, 0, 0, 0, 0}} }; 
1459 /* BSD_OPEN_PAGE_CACHE_FILES:                                 */ 
1460 /* Caller provides a user id.  This id was used in            */ 
1461 /* prepare_profile_database to create two unique absolute     */ 
1462 /* file paths to the associated profile files.  These files   */ 
1463 /* are either opened or bsd_open_page_cache_files returns an  */ 
1464 /* error.  The header of the names file is then consulted.    */ 
1465 /* The header and the vnodes for the names and data files are */ 
1469 bsd_open_page_cache_files( 
1471         struct global_profile 
**profile
) 
1473         const char *cache_path 
= "/var/vm/app_profile/"; 
1481         struct  vnode   
*names_vp
; 
1482         struct  vnode   
*data_vp
; 
1483         vm_offset_t     names_buf
; 
1484         vm_offset_t     buf_ptr
; 
1486         int             profile_names_length
; 
1487         int             profile_data_length
; 
1488         char            *profile_data_string
; 
1489         char            *profile_names_string
; 
1493         struct vfs_context  context
; 
1497         struct nameidata nd_names
; 
1498         struct nameidata nd_data
; 
1504         context
.vc_proc 
= p
; 
1505         context
.vc_ucred 
= kauth_cred_get(); 
1508         for(i 
= 0; i
<global_user_profile_cache
.max_ele
; i
++) { 
1509                 if((global_user_profile_cache
.profiles
[i
].user 
== user
)  
1510                         &&  (global_user_profile_cache
.profiles
[i
].data_vp 
 
1512                         *profile 
= &global_user_profile_cache
.profiles
[i
]; 
1513                         /* already in cache, we're done */ 
1514                         if ((*profile
)->busy
) { 
1516                                 * drop funnel and wait  
1518                                 (void)tsleep((void *) 
1520                                         PRIBIO
, "app_profile", 0); 
1523                         (*profile
)->busy 
= 1; 
1524                         (*profile
)->age 
= global_user_profile_cache
.age
; 
1527                          * entries in cache are held with a valid 
1528                          * usecount... take an iocount which will 
1529                          * be dropped in "bsd_close_page_cache_files" 
1530                          * which is called after the read or writes to 
1531                          * these files are done 
1533                         if ( (vnode_getwithref((*profile
)->data_vp
)) ) { 
1535                                 vnode_rele((*profile
)->data_vp
); 
1536                                 vnode_rele((*profile
)->names_vp
); 
1538                                 (*profile
)->data_vp 
= NULL
; 
1539                                 (*profile
)->busy 
= 0; 
1544                         if ( (vnode_getwithref((*profile
)->names_vp
)) ) { 
1546                                 vnode_put((*profile
)->data_vp
); 
1547                                 vnode_rele((*profile
)->data_vp
); 
1548                                 vnode_rele((*profile
)->names_vp
); 
1550                                 (*profile
)->data_vp 
= NULL
; 
1551                                 (*profile
)->busy 
= 0; 
1556                         global_user_profile_cache
.age
+=1; 
1561         lru 
= global_user_profile_cache
.age
; 
1563         for(i 
= 0; i
<global_user_profile_cache
.max_ele
; i
++) { 
1564                 /* Skip entry if it is in the process of being reused */ 
1565                 if(global_user_profile_cache
.profiles
[i
].data_vp 
== 
1566                                                 (struct vnode 
*)0xFFFFFFFF) 
1568                 /* Otherwise grab the first empty entry */ 
1569                 if(global_user_profile_cache
.profiles
[i
].data_vp 
== NULL
) { 
1570                         *profile 
= &global_user_profile_cache
.profiles
[i
]; 
1571                         (*profile
)->age 
= global_user_profile_cache
.age
; 
1574                 /* Otherwise grab the oldest entry */ 
1575                 if(global_user_profile_cache
.profiles
[i
].age 
< lru
) { 
1576                         lru 
= global_user_profile_cache
.profiles
[i
].age
; 
1577                         *profile 
= &global_user_profile_cache
.profiles
[i
]; 
1581         /* Did we set it? */ 
1582         if (*profile 
== NULL
) { 
1584                  * No entries are available; this can only happen if all 
1585                  * of them are currently in the process of being reused; 
1586                  * if this happens, we sleep on the address of the first 
1587                  * element, and restart.  This is less than ideal, but we 
1588                  * know it will work because we know that there will be a 
1589                  * wakeup on any entry currently in the process of being 
1592                  * XXX Reccomend a two handed clock and more than 3 total 
1593                  * XXX cache entries at some point in the future. 
1596                 * drop funnel and wait  
1598                 (void)tsleep((void *) 
1599                  &global_user_profile_cache
.profiles
[0], 
1600                         PRIBIO
, "app_profile", 0); 
1605          * If it's currently busy, we've picked the one at the end of the 
1606          * LRU list, but it's currently being actively used.  We sleep on 
1607          * its address and restart. 
1609         if ((*profile
)->busy
) { 
1611                 * drop funnel and wait  
1613                 (void)tsleep((void *) 
1615                         PRIBIO
, "app_profile", 0); 
1618         (*profile
)->busy 
= 1; 
1619         (*profile
)->user 
= user
; 
1622          * put dummy value in for now to get competing request to wait 
1623          * above until we are finished 
1625          * Save the data_vp before setting it, so we can set it before 
1626          * we kmem_free() or vrele().  If we don't do this, then we 
1627          * have a potential funnel race condition we have to deal with. 
1629         data_vp 
= (*profile
)->data_vp
; 
1630         (*profile
)->data_vp 
= (struct vnode 
*)0xFFFFFFFF; 
1633          * Age the cache here in all cases; this guarantees that we won't 
1634          * be reusing only one entry over and over, once the system reaches 
1637         global_user_profile_cache
.age
+=1; 
1639         if(data_vp 
!= NULL
) { 
1640                 kmem_free(kernel_map
,  
1641                                 (*profile
)->buf_ptr
, 4 * PAGE_SIZE
); 
1642                 if ((*profile
)->names_vp
) { 
1643                         vnode_rele((*profile
)->names_vp
); 
1644                         (*profile
)->names_vp 
= NULL
; 
1646                 vnode_rele(data_vp
); 
1649         /* Try to open the appropriate users profile files */ 
1650         /* If neither file is present, try to create them  */ 
1651         /* If one file is present and the other not, fail. */ 
1652         /* If the files do exist, check them for the app_file */ 
1653         /* requested and read it in if present */ 
1655         ret 
= kmem_alloc(kernel_map
, 
1656                 (vm_offset_t 
*)&profile_data_string
, PATH_MAX
); 
1659                 (*profile
)->data_vp 
= NULL
; 
1660                 (*profile
)->busy 
= 0; 
1665         /* Split the buffer in half since we know the size of */ 
1666         /* our file path and our allocation is adequate for   */ 
1667         /* both file path names */ 
1668         profile_names_string 
= profile_data_string 
+ (PATH_MAX
/2); 
1671         strcpy(profile_data_string
, cache_path
); 
1672         strcpy(profile_names_string
, cache_path
); 
1673         profile_names_length 
= profile_data_length 
 
1674                         = strlen(profile_data_string
); 
1675         substring 
= profile_data_string 
+ profile_data_length
; 
1676         sprintf(substring
, "%x_data", user
); 
1677         substring 
= profile_names_string 
+ profile_names_length
; 
1678         sprintf(substring
, "%x_names", user
); 
1680         /* We now have the absolute file names */ 
1682         ret 
= kmem_alloc(kernel_map
, 
1683                         (vm_offset_t 
*)&names_buf
, 4 * PAGE_SIZE
); 
1685                 kmem_free(kernel_map
,  
1686                                 (vm_offset_t
)profile_data_string
, PATH_MAX
); 
1687                 (*profile
)->data_vp 
= NULL
; 
1688                 (*profile
)->busy 
= 0; 
1693         NDINIT(&nd_names
, LOOKUP
, FOLLOW 
| LOCKLEAF
,  
1694                         UIO_SYSSPACE32
, CAST_USER_ADDR_T(profile_names_string
), &context
); 
1695         NDINIT(&nd_data
, LOOKUP
, FOLLOW 
| LOCKLEAF
,  
1696                         UIO_SYSSPACE32
, CAST_USER_ADDR_T(profile_data_string
), &context
); 
1698         if ( (error 
= vn_open(&nd_data
, FREAD 
| FWRITE
, 0)) ) { 
1700                 printf("bsd_open_page_cache_files: CacheData file not found %s\n", 
1701                         profile_data_string
); 
1703                 kmem_free(kernel_map
,  
1704                                 (vm_offset_t
)names_buf
, 4 * PAGE_SIZE
); 
1705                 kmem_free(kernel_map
,  
1706                         (vm_offset_t
)profile_data_string
, PATH_MAX
); 
1707                 (*profile
)->data_vp 
= NULL
; 
1708                 (*profile
)->busy 
= 0; 
1712         data_vp 
= nd_data
.ni_vp
; 
1714         if ( (error 
= vn_open(&nd_names
, FREAD 
| FWRITE
, 0)) ) { 
1715                 printf("bsd_open_page_cache_files: NamesData file not found %s\n", 
1716                         profile_data_string
); 
1717                 kmem_free(kernel_map
,  
1718                                 (vm_offset_t
)names_buf
, 4 * PAGE_SIZE
); 
1719                 kmem_free(kernel_map
,  
1720                         (vm_offset_t
)profile_data_string
, PATH_MAX
); 
1722                 vnode_rele(data_vp
); 
1725                 (*profile
)->data_vp 
= NULL
; 
1726                 (*profile
)->busy 
= 0; 
1730         names_vp 
= nd_names
.ni_vp
; 
1732         if ((error 
= vnode_size(names_vp
, &file_size
, &context
)) != 0) { 
1733                 printf("bsd_open_page_cache_files: Can't stat name file %s\n", profile_names_string
); 
1734                 kmem_free(kernel_map
,  
1735                         (vm_offset_t
)profile_data_string
, PATH_MAX
); 
1736                 kmem_free(kernel_map
,  
1737                         (vm_offset_t
)names_buf
, 4 * PAGE_SIZE
); 
1739                 vnode_rele(names_vp
); 
1740                 vnode_put(names_vp
); 
1741                 vnode_rele(data_vp
); 
1744                 (*profile
)->data_vp 
= NULL
; 
1745                 (*profile
)->busy 
= 0; 
1751         if(size 
> 4 * PAGE_SIZE
)  
1752                 size 
= 4 * PAGE_SIZE
; 
1753         buf_ptr 
= names_buf
; 
1757                 error 
= vn_rdwr(UIO_READ
, names_vp
, (caddr_t
)buf_ptr
,  
1759                         UIO_SYSSPACE32
, IO_NODELOCKED
, kauth_cred_get(), &resid
, p
); 
1760                 if((error
) || (size 
== resid
)) { 
1764                         kmem_free(kernel_map
,  
1765                                 (vm_offset_t
)profile_data_string
, PATH_MAX
); 
1766                         kmem_free(kernel_map
,  
1767                                 (vm_offset_t
)names_buf
, 4 * PAGE_SIZE
); 
1769                         vnode_rele(names_vp
); 
1770                         vnode_put(names_vp
); 
1771                         vnode_rele(data_vp
); 
1774                         (*profile
)->data_vp 
= NULL
; 
1775                         (*profile
)->busy 
= 0; 
1779                 buf_ptr 
+= size
-resid
; 
1780                 resid_off 
+= size
-resid
; 
1783         kmem_free(kernel_map
, (vm_offset_t
)profile_data_string
, PATH_MAX
); 
1785         (*profile
)->names_vp 
= names_vp
; 
1786         (*profile
)->data_vp 
= data_vp
; 
1787         (*profile
)->buf_ptr 
= names_buf
; 
1790          * at this point, the both the names_vp and the data_vp have 
1791          * both a valid usecount and an iocount held 
1798 bsd_close_page_cache_files( 
1799         struct global_profile 
*profile
) 
1801         vnode_put(profile
->data_vp
); 
1802         vnode_put(profile
->names_vp
); 
1809 bsd_read_page_cache_file( 
1814         struct vnode    
*app_vp
, 
1815         vm_offset_t     
*buffer
, 
1816         vm_offset_t     
*bufsize
) 
1819         boolean_t       funnel_state
; 
1826         unsigned int    profile_size
; 
1828         vm_offset_t     names_buf
; 
1829         struct vnode_attr       va
; 
1830         struct vfs_context  context
; 
1834         struct  vnode   
*names_vp
; 
1835         struct  vnode   
*data_vp
; 
1837         struct global_profile 
*uid_files
; 
1839         funnel_state 
= thread_funnel_set(kernel_flock
, TRUE
); 
1841         /* Try to open the appropriate users profile files */ 
1842         /* If neither file is present, try to create them  */ 
1843         /* If one file is present and the other not, fail. */ 
1844         /* If the files do exist, check them for the app_file */ 
1845         /* requested and read it in if present */ 
1848         error 
= bsd_open_page_cache_files(user
, &uid_files
); 
1850                 thread_funnel_set(kernel_flock
, funnel_state
); 
1856         names_vp 
= uid_files
->names_vp
; 
1857         data_vp 
= uid_files
->data_vp
; 
1858         names_buf 
= uid_files
->buf_ptr
; 
1860         context
.vc_proc 
= p
; 
1861         context
.vc_ucred 
= kauth_cred_get(); 
1864         VATTR_WANTED(&va
, va_fileid
); 
1865         VATTR_WANTED(&va
, va_modify_time
); 
1867         if ((error 
= vnode_getattr(app_vp
, &va
, &context
))) { 
1868                 printf("bsd_read_cache_file: Can't stat app file %s\n", app_name
); 
1869                 bsd_close_page_cache_files(uid_files
); 
1870                 thread_funnel_set(kernel_flock
, funnel_state
); 
1874         *fid 
= (u_long
)va
.va_fileid
; 
1875         *mod 
= va
.va_modify_time
.tv_sec
; 
1877         if (bsd_search_page_cache_data_base( 
1879                     (struct profile_names_header 
*)names_buf
, 
1881                     (unsigned int) va
.va_modify_time
.tv_sec
,   
1882                     (u_long
)va
.va_fileid
, &profile
, &profile_size
) == 0) { 
1883                 /* profile is an offset in the profile data base */ 
1884                 /* It is zero if no profile data was found */ 
1886                 if(profile_size 
== 0) { 
1889                         bsd_close_page_cache_files(uid_files
); 
1890                         thread_funnel_set(kernel_flock
, funnel_state
); 
1893                 ret 
= (vm_offset_t
)(kmem_alloc(kernel_map
, buffer
, profile_size
)); 
1895                         bsd_close_page_cache_files(uid_files
); 
1896                         thread_funnel_set(kernel_flock
, funnel_state
); 
1899                 *bufsize 
= profile_size
; 
1900                 while(profile_size
) { 
1901                         error 
= vn_rdwr(UIO_READ
, data_vp
,  
1902                                 (caddr_t
) *buffer
, profile_size
,  
1903                                 profile
, UIO_SYSSPACE32
, IO_NODELOCKED
,  
1904                                 kauth_cred_get(), &resid
, p
); 
1905                         if((error
) || (profile_size 
== resid
)) { 
1906                                 bsd_close_page_cache_files(uid_files
); 
1907                                 kmem_free(kernel_map
, (vm_offset_t
)*buffer
, profile_size
); 
1908                                 thread_funnel_set(kernel_flock
, funnel_state
); 
1911                         profile 
+= profile_size 
- resid
; 
1912                         profile_size 
= resid
; 
1914                 bsd_close_page_cache_files(uid_files
); 
1915                 thread_funnel_set(kernel_flock
, funnel_state
); 
1918                 bsd_close_page_cache_files(uid_files
); 
1919                 thread_funnel_set(kernel_flock
, funnel_state
); 
1926 bsd_search_page_cache_data_base( 
1928         struct profile_names_header     
*database
, 
1930         unsigned int                    mod_date
, 
1933         unsigned int                    *profile_size
) 
1939         struct profile_element  
*element
; 
1940         unsigned int            ele_total
; 
1941         unsigned int            extended_list 
= 0; 
1946         vm_offset_t             local_buf 
= 0; 
1953         if(((vm_offset_t
)database
->element_array
) != 
1954                                 sizeof(struct profile_names_header
)) { 
1957         element 
= (struct profile_element 
*)( 
1958                         (vm_offset_t
)database
->element_array 
+  
1959                                                 (vm_offset_t
)database
); 
1961         ele_total 
= database
->number_of_profiles
; 
1966                 /* note: code assumes header + n*ele comes out on a page boundary */ 
1967                 if(((local_buf 
== 0) && (sizeof(struct profile_names_header
) +  
1968                         (ele_total 
* sizeof(struct profile_element
)))  
1969                                         > (PAGE_SIZE 
* 4)) || 
1970                         ((local_buf 
!= 0) &&  
1971                                 (ele_total 
* sizeof(struct profile_element
)) 
1972                                          > (PAGE_SIZE 
* 4))) { 
1973                         extended_list 
= ele_total
; 
1974                         if(element 
== (struct profile_element 
*) 
1975                                 ((vm_offset_t
)database
->element_array 
+  
1976                                                 (vm_offset_t
)database
)) { 
1977                                 ele_total 
= ((PAGE_SIZE 
* 4)/sizeof(struct profile_element
)) - 1; 
1979                                 ele_total 
= (PAGE_SIZE 
* 4)/sizeof(struct profile_element
); 
1981                         extended_list 
-= ele_total
; 
1983                 for (i
=0; i
<ele_total
; i
++) { 
1984                         if((mod_date 
== element
[i
].mod_date
)  
1985                                         && (inode 
== element
[i
].inode
)) { 
1986                                 if(strncmp(element
[i
].name
, app_name
, 12) == 0) { 
1987                                         *profile 
= element
[i
].addr
; 
1988                                         *profile_size 
= element
[i
].size
; 
1989                                         if(local_buf 
!= 0) { 
1990                                                 kmem_free(kernel_map
, local_buf
, 4 * PAGE_SIZE
); 
1996                 if(extended_list 
== 0) 
1998                 if(local_buf 
== 0) { 
1999                         ret 
= kmem_alloc(kernel_map
, &local_buf
, 4 * PAGE_SIZE
); 
2000                         if(ret 
!= KERN_SUCCESS
) { 
2004                 element 
= (struct profile_element 
*)local_buf
; 
2005                 ele_total 
= extended_list
; 
2007                 file_off 
+=  4 * PAGE_SIZE
; 
2008                 if((ele_total 
* sizeof(struct profile_element
)) >  
2010                         size 
= PAGE_SIZE 
* 4; 
2012                         size 
= ele_total 
* sizeof(struct profile_element
); 
2016                         error 
= vn_rdwr(UIO_READ
, vp
,  
2017                                 CAST_DOWN(caddr_t
, (local_buf 
+ resid_off
)), 
2018                                 size
, file_off 
+ resid_off
, UIO_SYSSPACE32
,  
2019                                 IO_NODELOCKED
, kauth_cred_get(), &resid
, p
); 
2020                         if((error
) || (size 
== resid
)) { 
2021                                 if(local_buf 
!= 0) { 
2022                                         kmem_free(kernel_map
, local_buf
, 4 * PAGE_SIZE
); 
2026                         resid_off 
+= size
-resid
; 
2030         if(local_buf 
!= 0) { 
2031                 kmem_free(kernel_map
, local_buf
, 4 * PAGE_SIZE
); 
2037 bsd_write_page_cache_file( 
2049         boolean_t               funnel_state
; 
2051         struct vfs_context      context
; 
2053         unsigned int    profile_size
; 
2055         vm_offset_t     names_buf
; 
2056         struct  vnode   
*names_vp
; 
2057         struct  vnode   
*data_vp
; 
2058         struct  profile_names_header 
*profile_header
; 
2060         struct global_profile 
*uid_files
; 
2063         funnel_state 
= thread_funnel_set(kernel_flock
, TRUE
); 
2066         error 
= bsd_open_page_cache_files(user
, &uid_files
); 
2068                 thread_funnel_set(kernel_flock
, funnel_state
); 
2074         names_vp 
= uid_files
->names_vp
; 
2075         data_vp 
= uid_files
->data_vp
; 
2076         names_buf 
= uid_files
->buf_ptr
; 
2078         /* Stat data file for size */ 
2080         context
.vc_proc 
= p
; 
2081         context
.vc_ucred 
= kauth_cred_get(); 
2083         if ((error 
= vnode_size(data_vp
, &file_size
, &context
)) != 0) { 
2084                 printf("bsd_write_page_cache_file: Can't stat profile data %s\n", file_name
); 
2085                 bsd_close_page_cache_files(uid_files
); 
2086                 thread_funnel_set(kernel_flock
, funnel_state
); 
2090         if (bsd_search_page_cache_data_base(names_vp
,  
2091                         (struct profile_names_header 
*)names_buf
,  
2092                         file_name
, (unsigned int) mod
,   
2093                         fid
, &profile
, &profile_size
) == 0) { 
2094                 /* profile is an offset in the profile data base */ 
2095                 /* It is zero if no profile data was found */ 
2097                 if(profile_size 
== 0) { 
2098                         unsigned int    header_size
; 
2099                         vm_offset_t     buf_ptr
; 
2101                         /* Our Write case */ 
2103                         /* read header for last entry */ 
2105                                 (struct profile_names_header 
*)names_buf
; 
2106                         name_offset 
= sizeof(struct profile_names_header
) +  
2107                                 (sizeof(struct profile_element
)  
2108                                         * profile_header
->number_of_profiles
); 
2109                         profile_header
->number_of_profiles 
+= 1; 
2111                         if(name_offset 
< PAGE_SIZE 
* 4) { 
2112                                 struct profile_element  
*name
; 
2113                                 /* write new entry */ 
2114                                 name 
= (struct profile_element 
*) 
2115                                         (names_buf 
+ (vm_offset_t
)name_offset
); 
2116                                 name
->addr 
=  file_size
; 
2118                                 name
->mod_date 
= mod
; 
2120                                 strncpy (name
->name
, file_name
, 12); 
2122                                 unsigned int    ele_size
; 
2123                                 struct profile_element  name
; 
2124                                 /* write new entry */ 
2125                                 name
.addr 
= file_size
; 
2127                                 name
.mod_date 
= mod
; 
2129                                 strncpy (name
.name
, file_name
, 12); 
2130                                 /* write element out separately */ 
2131                                 ele_size 
= sizeof(struct profile_element
); 
2132                                 buf_ptr 
= (vm_offset_t
)&name
; 
2133                                 resid_off 
= name_offset
; 
2136                                         error 
= vn_rdwr(UIO_WRITE
, names_vp
,  
2138                                                 ele_size
, resid_off
,  
2139                                                 UIO_SYSSPACE32
, IO_NODELOCKED
,  
2140                                                 kauth_cred_get(), &resid
, p
); 
2142                                                 printf("bsd_write_page_cache_file: Can't write name_element %x\n", user
); 
2143                                                 bsd_close_page_cache_files( 
2150                                         buf_ptr 
+= (vm_offset_t
) 
2152                                         resid_off 
+= ele_size
-resid
; 
2157                         if(name_offset 
< PAGE_SIZE 
* 4) { 
2158                                 header_size 
= name_offset 
+  
2159                                         sizeof(struct profile_element
); 
2163                                         sizeof(struct profile_names_header
); 
2165                         buf_ptr 
= (vm_offset_t
)profile_header
; 
2168                         /* write names file header */ 
2169                         while(header_size
) { 
2170                                 error 
= vn_rdwr(UIO_WRITE
, names_vp
,  
2172                                         header_size
, resid_off
,  
2173                                         UIO_SYSSPACE32
, IO_NODELOCKED
,  
2174                                         kauth_cred_get(), &resid
, p
); 
2176                                         printf("bsd_write_page_cache_file: Can't write header %x\n", user
); 
2177                                         bsd_close_page_cache_files( 
2180                                                 kernel_flock
, funnel_state
); 
2183                                 buf_ptr 
+= (vm_offset_t
)header_size
-resid
; 
2184                                 resid_off 
+= header_size
-resid
; 
2185                                 header_size 
= resid
; 
2187                         /* write profile to data file */ 
2188                         resid_off 
= file_size
; 
2190                                 error 
= vn_rdwr(UIO_WRITE
, data_vp
,  
2191                                         (caddr_t
)buffer
, size
, resid_off
,  
2192                                         UIO_SYSSPACE32
, IO_NODELOCKED
,  
2193                                         kauth_cred_get(), &resid
, p
); 
2195                                         printf("bsd_write_page_cache_file: Can't write header %x\n", user
); 
2196                                         bsd_close_page_cache_files( 
2199                                                 kernel_flock
, funnel_state
); 
2202                                 buffer 
+= size
-resid
; 
2203                                 resid_off 
+= size
-resid
; 
2206                         bsd_close_page_cache_files(uid_files
); 
2207                         thread_funnel_set(kernel_flock
, funnel_state
); 
2210                 /* Someone else wrote a twin profile before us */ 
2211                 bsd_close_page_cache_files(uid_files
); 
2212                 thread_funnel_set(kernel_flock
, funnel_state
); 
2215                 bsd_close_page_cache_files(uid_files
); 
2216                 thread_funnel_set(kernel_flock
, funnel_state
); 
2223 prepare_profile_database(int    user
) 
2225         const char *cache_path 
= "/var/vm/app_profile/"; 
2232         struct  vnode   
*names_vp
; 
2233         struct  vnode   
*data_vp
; 
2234         vm_offset_t     names_buf
; 
2235         vm_offset_t     buf_ptr
; 
2237         int             profile_names_length
; 
2238         int             profile_data_length
; 
2239         char            *profile_data_string
; 
2240         char            *profile_names_string
; 
2243         struct vnode_attr va
; 
2244         struct vfs_context context
; 
2246         struct  profile_names_header 
*profile_header
; 
2249         struct nameidata nd_names
; 
2250         struct nameidata nd_data
; 
2254         context
.vc_proc 
= p
; 
2255         context
.vc_ucred 
= kauth_cred_get(); 
2257         ret 
= kmem_alloc(kernel_map
, 
2258                 (vm_offset_t 
*)&profile_data_string
, PATH_MAX
); 
2264         /* Split the buffer in half since we know the size of */ 
2265         /* our file path and our allocation is adequate for   */ 
2266         /* both file path names */ 
2267         profile_names_string 
= profile_data_string 
+ (PATH_MAX
/2); 
2270         strcpy(profile_data_string
, cache_path
); 
2271         strcpy(profile_names_string
, cache_path
); 
2272         profile_names_length 
= profile_data_length 
 
2273                         = strlen(profile_data_string
); 
2274         substring 
= profile_data_string 
+ profile_data_length
; 
2275         sprintf(substring
, "%x_data", user
); 
2276         substring 
= profile_names_string 
+ profile_names_length
; 
2277         sprintf(substring
, "%x_names", user
); 
2279         /* We now have the absolute file names */ 
2281         ret 
= kmem_alloc(kernel_map
, 
2282                         (vm_offset_t 
*)&names_buf
, 4 * PAGE_SIZE
); 
2284                 kmem_free(kernel_map
,  
2285                                 (vm_offset_t
)profile_data_string
, PATH_MAX
); 
2289         NDINIT(&nd_names
, LOOKUP
, FOLLOW
,  
2290                         UIO_SYSSPACE32
, CAST_USER_ADDR_T(profile_names_string
), &context
); 
2291         NDINIT(&nd_data
, LOOKUP
, FOLLOW
, 
2292                         UIO_SYSSPACE32
, CAST_USER_ADDR_T(profile_data_string
), &context
); 
2294         if ( (error 
= vn_open(&nd_data
,  
2295                                                         O_CREAT 
| O_EXCL 
| FWRITE
, S_IRUSR
|S_IWUSR
)) ) { 
2296                         kmem_free(kernel_map
,  
2297                                         (vm_offset_t
)names_buf
, 4 * PAGE_SIZE
); 
2298                         kmem_free(kernel_map
,  
2299                                 (vm_offset_t
)profile_data_string
, PATH_MAX
); 
2303         data_vp 
= nd_data
.ni_vp
; 
2305         if ( (error 
= vn_open(&nd_names
,  
2306                                                         O_CREAT 
| O_EXCL 
| FWRITE
, S_IRUSR
|S_IWUSR
)) ) { 
2307                         printf("prepare_profile_database: Can't create CacheNames %s\n", 
2308                                 profile_data_string
); 
2309                         kmem_free(kernel_map
,  
2310                                         (vm_offset_t
)names_buf
, 4 * PAGE_SIZE
); 
2311                         kmem_free(kernel_map
,  
2312                                 (vm_offset_t
)profile_data_string
, PATH_MAX
); 
2314                         vnode_rele(data_vp
); 
2319         names_vp 
= nd_names
.ni_vp
; 
2321         /* Write Header for new names file */ 
2323         profile_header 
= (struct profile_names_header 
*)names_buf
; 
2325         profile_header
->number_of_profiles 
= 0; 
2326         profile_header
->user_id 
=  user
; 
2327         profile_header
->version 
= 1; 
2328         profile_header
->element_array 
=  
2329                                 sizeof(struct profile_names_header
); 
2330         profile_header
->spare1 
= 0; 
2331         profile_header
->spare2 
= 0; 
2332         profile_header
->spare3 
= 0; 
2334         size 
= sizeof(struct profile_names_header
); 
2335         buf_ptr 
= (vm_offset_t
)profile_header
; 
2339                 error 
= vn_rdwr(UIO_WRITE
, names_vp
,  
2340                                 (caddr_t
)buf_ptr
, size
, resid_off
, 
2341                                 UIO_SYSSPACE32
, IO_NODELOCKED
,  
2342                                 kauth_cred_get(), &resid
, p
); 
2344                         printf("prepare_profile_database: Can't write header %s\n", profile_names_string
); 
2345                         kmem_free(kernel_map
,  
2346                                 (vm_offset_t
)names_buf
, 4 * PAGE_SIZE
); 
2347                         kmem_free(kernel_map
,  
2348                                 (vm_offset_t
)profile_data_string
,  
2351                         vnode_rele(names_vp
); 
2352                         vnode_put(names_vp
); 
2353                         vnode_rele(data_vp
); 
2358                 buf_ptr 
+= size
-resid
; 
2359                 resid_off 
+= size
-resid
; 
2363         VATTR_SET(&va
, va_uid
, user
); 
2365         error 
= vnode_setattr(names_vp
, &va
, &context
); 
2367                 printf("prepare_profile_database: " 
2368                         "Can't set user %s\n", profile_names_string
); 
2370         vnode_rele(names_vp
); 
2371         vnode_put(names_vp
); 
2374         VATTR_SET(&va
, va_uid
, user
); 
2375         error 
= vnode_setattr(data_vp
, &va
, &context
); 
2377                 printf("prepare_profile_database: " 
2378                         "Can't set user %s\n", profile_data_string
); 
2380         vnode_rele(data_vp
); 
2383         kmem_free(kernel_map
,  
2384                         (vm_offset_t
)profile_data_string
, PATH_MAX
); 
2385         kmem_free(kernel_map
,  
2386                         (vm_offset_t
)names_buf
, 4 * PAGE_SIZE
);