2  * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. 
   4  * @APPLE_LICENSE_HEADER_START@ 
   6  * Copyright (c) 1999-2003 Apple Computer, Inc.  All Rights Reserved. 
   8  * This file contains Original Code and/or Modifications of Original Code 
   9  * as defined in and that are subject to the Apple Public Source License 
  10  * Version 2.0 (the 'License'). You may not use this file except in 
  11  * compliance with the License. Please obtain a copy of the License at 
  12  * http://www.opensource.apple.com/apsl/ and read it before using this 
  15  * The Original Code and all software distributed under the License are 
  16  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER 
  17  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, 
  18  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, 
  19  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. 
  20  * Please see the License for the specific language governing rights and 
  21  * limitations under the License. 
  23  * @APPLE_LICENSE_HEADER_END@ 
  26  * Mach Operating System 
  27  * Copyright (c) 1987 Carnegie-Mellon University 
  28  * All rights reserved.  The CMU software License Agreement specifies 
  29  * the terms and conditions for use and redistribution. 
  36 #include <meta_features.h> 
  38 #include <kern/task.h> 
  39 #include <kern/thread.h> 
  40 #include <kern/debug.h> 
  41 #include <kern/lock.h> 
  42 #include <mach/time_value.h> 
  43 #include <mach/vm_param.h> 
  44 #include <mach/vm_prot.h> 
  45 #include <mach/port.h> 
  47 #include <sys/param.h> 
  48 #include <sys/systm.h> 
  50 #include <sys/namei.h> 
  54 #include <sys/vnode.h> 
  56 #include <sys/mount.h> 
  57 #include <sys/trace.h> 
  58 #include <sys/kernel.h> 
  62 #include <kern/kalloc.h> 
  63 #include <vm/vm_map.h> 
  64 #include <vm/vm_kern.h> 
  66 #include <machine/spl.h> 
  68 #include <mach/shared_memory_server.h> 
  69 #include <vm/vm_shared_memory_server.h> 
  72 extern zone_t lsf_zone
; 
  74 useracc(addr
, len
, prot
) 
  79         return (vm_map_check_protection( 
  81                         trunc_page_32((unsigned int)addr
), round_page_32((unsigned int)(addr
+len
)), 
  82                         prot 
== B_READ 
? VM_PROT_READ 
: VM_PROT_WRITE
)); 
  90         kret 
= vm_map_wire(current_map(), trunc_page_32((unsigned int)addr
), 
  91                         round_page_32((unsigned int)(addr
+len
)),  
  92                         VM_PROT_READ 
| VM_PROT_WRITE 
,FALSE
); 
  97         case KERN_INVALID_ADDRESS
: 
 100         case KERN_PROTECTION_FAILURE
: 
 107 vsunlock(addr
, len
, dirtied
) 
 116         vm_offset_t     vaddr
, paddr
; 
 121                 pmap 
= get_task_pmap(current_task()); 
 122                 for (vaddr 
= trunc_page((unsigned int)(addr
)); vaddr 
< round_page((unsigned int)(addr
+len
)); 
 123                                 vaddr 
+= PAGE_SIZE
) { 
 124                         paddr 
= pmap_extract(pmap
, vaddr
); 
 125                         pg 
= PHYS_TO_VM_PAGE(paddr
); 
 126                         vm_page_set_modified(pg
); 
 133         kret 
= vm_map_unwire(current_map(), trunc_page_32((unsigned int)(addr
)), 
 134                                 round_page_32((unsigned int)(addr
+len
)), FALSE
); 
 138         case KERN_INVALID_ADDRESS
: 
 141         case KERN_PROTECTION_FAILURE
: 
 148 #if     defined(sun) || BALANCE || defined(m88k) 
 149 #else   /*defined(sun) || BALANCE || defined(m88k)*/ 
 156         character 
= (char)byte
; 
 157         return (copyout((void *)&(character
), addr
, sizeof(char)) == 0 ? 0 : -1); 
 166         character 
= (char)byte
; 
 167         return (copyout((void *) &(character
), addr
, sizeof(char)) == 0 ? 0 : -1); 
 175         if (copyin(addr
, (void *) &byte
, sizeof(char))) 
 185         if (copyin(addr
, (void *) &(byte
), sizeof(char))) 
 194         return (copyout((void *) &word
, addr
, sizeof(int)) == 0 ? 0 : -1); 
 202         if (copyin(addr
, (void *) &word
, sizeof(int))) 
 207 /* suiword and fuiword are the same as suword and fuword, respectively */ 
 213         return (copyout((void *) &word
, addr
, sizeof(int)) == 0 ? 0 : -1); 
 221         if (copyin(addr
, (void *) &word
, sizeof(int))) 
 225 #endif  /* defined(sun) || BALANCE || defined(m88k) || defined(i386) */ 
 241         extern task_t 
port_name_to_task(mach_port_t t
); 
 243         kern_return_t   err 
= KERN_SUCCESS
; 
 244         boolean_t funnel_state
; 
 246         funnel_state 
= thread_funnel_set(kernel_flock
, TRUE
); 
 247         t1 
= port_name_to_task(t
); 
 249         if (t1 
== TASK_NULL
) { 
 253                 p 
= get_bsdtask_info(t1
); 
 263         (void) copyout((char *) &pid
, (char *) x
, sizeof(*x
)); 
 264         thread_funnel_set(kernel_flock
, funnel_state
); 
 269  *      Routine:        task_for_pid 
 271  *              Get the task port for another "process", named by its 
 272  *              process ID on the same host as "target_task". 
 274  *              Only permitted to privileged processes, or processes 
 275  *              with the same user ID. 
 278 task_for_pid(target_tport
, pid
, t
) 
 279         mach_port_t     target_tport
; 
 287         extern task_t 
port_name_to_task(mach_port_t tp
); 
 290         boolean_t funnel_state
; 
 292         t1 
= port_name_to_task(target_tport
); 
 293         if (t1 
== TASK_NULL
) { 
 294                 (void ) copyout((char *)&t1
, (char *)t
, sizeof(mach_port_t
)); 
 295                 return(KERN_FAILURE
); 
 298         funnel_state 
= thread_funnel_set(kernel_flock
, TRUE
); 
 301         p1 
= get_bsdtask_info(t1
); 
 303                 ((p 
= pfind(pid
)) != (struct proc 
*) 0) 
 304                 && (p1 
!= (struct proc 
*) 0) 
 305                 && (((p
->p_ucred
->cr_uid 
== p1
->p_ucred
->cr_uid
) &&  
 306                         ((p
->p_cred
->p_ruid 
== p1
->p_cred
->p_ruid
))) 
 307                 || !(suser(p1
->p_ucred
, &p1
->p_acflag
))) 
 308                 && (p
->p_stat 
!= SZOMB
) 
 310                         if (p
->task 
!= TASK_NULL
) { 
 311                                 if (!task_reference_try(p
->task
)) { 
 312                                         mutex_pause(); /* temp loss of funnel */ 
 315                                 sright 
= (void *)convert_task_to_port(p
->task
); 
 317                                         ipc_port_copyout_send(sright
,  
 318                                            get_task_ipcspace(current_task())); 
 320                                 tret  
= MACH_PORT_NULL
; 
 321                         (void ) copyout((char *)&tret
, (char *) t
, sizeof(mach_port_t
)); 
 323                         error 
= KERN_SUCCESS
; 
 327         tret 
= MACH_PORT_NULL
; 
 328         (void) copyout((char *) &tret
, (char *) t
, sizeof(mach_port_t
)); 
 329         error 
= KERN_FAILURE
; 
 331         thread_funnel_set(kernel_flock
, funnel_state
); 
 336 struct load_shared_file_args 
{ 
 342                 sf_mapping_t    
*mappings
; 
 351         struct load_shared_file_args 
*uap
, 
 354         caddr_t         mapped_file_addr
=uap
->mfa
; 
 355         u_long          mapped_file_size
=uap
->mfs
; 
 356         caddr_t         
*base_address
=uap
->ba
; 
 357         int             map_cnt
=uap
->map_cnt
; 
 358         sf_mapping_t       
*mappings
=uap
->mappings
; 
 359         char            *filename
=uap
->filename
; 
 360         int             *flags
=uap
->flags
; 
 361         struct vnode            
*vp 
= 0;  
 362         struct nameidata        nd
, *ndp
; 
 368         memory_object_control_t file_control
; 
 369         sf_mapping_t    
*map_list
; 
 374         int             default_regions 
= 0; 
 378         shared_region_mapping_t shared_region
; 
 379         struct shared_region_task_mappings      task_mapping_info
; 
 380         shared_region_mapping_t next
; 
 385         /* Retrieve the base address */ 
 386         if (error 
= copyin(base_address
, &local_base
, sizeof (caddr_t
))) { 
 389         if (error 
= copyin(flags
, &local_flags
, sizeof (int))) { 
 393         if(local_flags 
& QUERY_IS_SYSTEM_REGION
) { 
 394                         shared_region_mapping_t default_shared_region
; 
 395                         vm_get_shared_region(current_task(), &shared_region
); 
 396                         task_mapping_info
.self 
= (vm_offset_t
)shared_region
; 
 398                         shared_region_mapping_info(shared_region
,  
 399                                         &(task_mapping_info
.text_region
),  
 400                                         &(task_mapping_info
.text_size
), 
 401                                         &(task_mapping_info
.data_region
),  
 402                                         &(task_mapping_info
.data_size
),  
 403                                         &(task_mapping_info
.region_mappings
), 
 404                                         &(task_mapping_info
.client_base
),  
 405                                         &(task_mapping_info
.alternate_base
), 
 406                                         &(task_mapping_info
.alternate_next
),  
 407                                         &(task_mapping_info
.fs_base
), 
 408                                         &(task_mapping_info
.system
), 
 409                                         &(task_mapping_info
.flags
), &next
); 
 411                         default_shared_region 
= 
 412                                 lookup_default_shared_region( 
 414                                         task_mapping_info
.system
); 
 415                         if (shared_region 
== default_shared_region
) { 
 416                                 local_flags 
= SYSTEM_REGION_BACKED
; 
 420                         shared_region_mapping_dealloc(default_shared_region
); 
 422                         error 
= copyout(&local_flags
, flags
, sizeof (int)); 
 425         caller_flags 
= local_flags
; 
 426         kret 
= kmem_alloc(kernel_map
, (vm_offset_t 
*)&filename_str
, 
 427                         (vm_size_t
)(MAXPATHLEN
)); 
 428                 if (kret 
!= KERN_SUCCESS
) { 
 432         kret 
= kmem_alloc(kernel_map
, (vm_offset_t 
*)&map_list
, 
 433                         (vm_size_t
)(map_cnt
*sizeof(sf_mapping_t
))); 
 434                 if (kret 
!= KERN_SUCCESS
) { 
 435                         kmem_free(kernel_map
, (vm_offset_t
)filename_str
,  
 436                                 (vm_size_t
)(MAXPATHLEN
)); 
 442                 copyin(mappings
, map_list
, (map_cnt
*sizeof(sf_mapping_t
)))) { 
 443                 goto lsf_bailout_free
; 
 446         if (error 
= copyinstr(filename
,  
 447                         filename_str
, MAXPATHLEN
, (size_t *)&dummy
)) { 
 448                 goto lsf_bailout_free
; 
 452          * Get a vnode for the target file 
 454         NDINIT(ndp
, LOOKUP
, FOLLOW 
| LOCKLEAF
, UIO_SYSSPACE
, 
 457         if ((error 
= namei(ndp
))) { 
 458                 goto lsf_bailout_free
; 
 463         if (vp
->v_type 
!= VREG
) { 
 465                 goto lsf_bailout_free_vput
; 
 468         UBCINFOCHECK("load_shared_file", vp
); 
 470         if (error 
= VOP_GETATTR(vp
, &vattr
, p
->p_ucred
, p
)) { 
 471                 goto lsf_bailout_free_vput
; 
 475         file_control 
= ubc_getobject(vp
, UBC_HOLDOBJECT
); 
 476         if (file_control 
== MEMORY_OBJECT_CONTROL_NULL
) { 
 478                 goto lsf_bailout_free_vput
; 
 482         if(vattr
.va_size 
!= mapped_file_size
) { 
 484                 goto lsf_bailout_free_vput
; 
 487         if(p
->p_flag 
& P_NOSHLIB
) { 
 488                 p
->p_flag 
= p
->p_flag 
& ~P_NOSHLIB
; 
 491         /* load alternate regions if the caller has requested.  */ 
 492         /* Note: the new regions are "clean slates" */ 
 493         if (local_flags 
& NEW_LOCAL_SHARED_REGIONS
) { 
 494                 error 
= clone_system_shared_regions(FALSE
, ENV_DEFAULT_ROOT
); 
 496                         goto lsf_bailout_free_vput
; 
 500         vm_get_shared_region(current_task(), &shared_region
); 
 501         task_mapping_info
.self 
= (vm_offset_t
)shared_region
; 
 503         shared_region_mapping_info(shared_region
,  
 504                         &(task_mapping_info
.text_region
),  
 505                         &(task_mapping_info
.text_size
), 
 506                         &(task_mapping_info
.data_region
),  
 507                         &(task_mapping_info
.data_size
),  
 508                         &(task_mapping_info
.region_mappings
), 
 509                         &(task_mapping_info
.client_base
),  
 510                         &(task_mapping_info
.alternate_base
), 
 511                         &(task_mapping_info
.alternate_next
),  
 512                         &(task_mapping_info
.fs_base
), 
 513                         &(task_mapping_info
.system
), 
 514                         &(task_mapping_info
.flags
), &next
); 
 517                 shared_region_mapping_t default_shared_region
; 
 518                 default_shared_region 
= 
 519                         lookup_default_shared_region( 
 521                                 task_mapping_info
.system
); 
 522                 if(shared_region 
== default_shared_region
) { 
 525                 shared_region_mapping_dealloc(default_shared_region
); 
 527         /* If we are running on a removable file system we must not */ 
 528         /* be in a set of shared regions or the file system will not */ 
 530         if(((vp
->v_mount 
!= rootvnode
->v_mount
) && (default_regions
))  
 531                 && (lsf_mapping_pool_gauge() < 75)) { 
 532                                 /* We don't want to run out of shared memory */ 
 533                                 /* map entries by starting too many private versions */ 
 534                                 /* of the shared library structures */ 
 536                 if(p
->p_flag 
& P_NOSHLIB
) { 
 537                                 error 
= clone_system_shared_regions(FALSE
, ENV_DEFAULT_ROOT
); 
 539                                 error 
= clone_system_shared_regions(TRUE
, ENV_DEFAULT_ROOT
); 
 542                         goto lsf_bailout_free_vput
; 
 544                 local_flags 
= local_flags 
& ~NEW_LOCAL_SHARED_REGIONS
; 
 545                 vm_get_shared_region(current_task(), &shared_region
); 
 546                 shared_region_mapping_info(shared_region
,  
 547                         &(task_mapping_info
.text_region
),  
 548                         &(task_mapping_info
.text_size
), 
 549                         &(task_mapping_info
.data_region
),  
 550                         &(task_mapping_info
.data_size
),  
 551                         &(task_mapping_info
.region_mappings
), 
 552                         &(task_mapping_info
.client_base
),  
 553                         &(task_mapping_info
.alternate_base
), 
 554                         &(task_mapping_info
.alternate_next
),  
 555                         &(task_mapping_info
.fs_base
), 
 556                         &(task_mapping_info
.system
), 
 557                         &(task_mapping_info
.flags
), &next
); 
 560         /*  This is a work-around to allow executables which have been */ 
 561         /*  built without knowledge of the proper shared segment to    */ 
 562         /*  load.  This code has been architected as a shared region   */ 
 563         /*  handler, the knowledge of where the regions are loaded is  */ 
 564         /*  problematic for the extension of shared regions as it will */ 
 565         /*  not be easy to know what region an item should go into.    */ 
 566         /*  The code below however will get around a short term problem */ 
 567         /*  with executables which believe they are loading at zero.   */ 
 570                 if (((unsigned int)local_base 
&  
 571                         (~(task_mapping_info
.text_size 
- 1))) !=  
 572                         task_mapping_info
.client_base
) { 
 573                         if(local_flags 
& ALTERNATE_LOAD_SITE
) { 
 574                                 local_base 
= (caddr_t
)( 
 575                                         (unsigned int)local_base 
&  
 576                                            (task_mapping_info
.text_size 
- 1)); 
 577                                 local_base 
= (caddr_t
)((unsigned int)local_base
 
 578                                            | task_mapping_info
.client_base
); 
 581                                 goto lsf_bailout_free_vput
; 
 587         if((kr 
= copyin_shared_file((vm_offset_t
)mapped_file_addr
,  
 589                         (vm_offset_t 
*)&local_base
, 
 590                         map_cnt
, map_list
, file_control
,  
 591                         &task_mapping_info
, &local_flags
))) { 
 596                         case KERN_INVALID_ARGUMENT
: 
 599                         case KERN_INVALID_ADDRESS
: 
 602                         case KERN_PROTECTION_FAILURE
: 
 603                                 /* save EAUTH for authentication in this */ 
 613                 if((caller_flags 
& ALTERNATE_LOAD_SITE
) && systemLogDiags
) { 
 614                         printf("load_shared_file:  Failed to load shared file! error: 0x%x, Base_address: 0x%x, number of mappings: %d, file_control 0x%x\n", error
, local_base
, map_cnt
, file_control
); 
 615                         for(i
=0; i
<map_cnt
; i
++) { 
 616                                 printf("load_shared_file: Mapping%d, mapping_offset: 0x%x, size: 0x%x, file_offset: 0x%x, protection: 0x%x\n" 
 617                                         , i
, map_list
[i
].mapping_offset
,  
 619                                         map_list
[i
].file_offset
,  
 620                                         map_list
[i
].protection
); 
 625                         local_flags 
|= SYSTEM_REGION_BACKED
; 
 626                 if(!(error 
= copyout(&local_flags
, flags
, sizeof (int)))) { 
 627                         error 
= copyout(&local_base
,  
 628                                 base_address
, sizeof (caddr_t
)); 
 632 lsf_bailout_free_vput
: 
 636         kmem_free(kernel_map
, (vm_offset_t
)filename_str
,  
 637                                 (vm_size_t
)(MAXPATHLEN
)); 
 638         kmem_free(kernel_map
, (vm_offset_t
)map_list
,  
 639                                 (vm_size_t
)(map_cnt
*sizeof(sf_mapping_t
))); 
 645 struct reset_shared_file_args 
{ 
 648                 sf_mapping_t    
*mappings
; 
 654         struct reset_shared_file_args 
*uap
, 
 657         caddr_t         
*base_address
=uap
->ba
; 
 658         int             map_cnt
=uap
->map_cnt
; 
 659         sf_mapping_t       
*mappings
=uap
->mappings
; 
 663         sf_mapping_t    
*map_list
; 
 665         vm_offset_t     map_address
; 
 669         /* Retrieve the base address */ 
 670         if (error 
= copyin(base_address
, &local_base
, sizeof (caddr_t
))) { 
 674         if (((unsigned int)local_base 
& GLOBAL_SHARED_SEGMENT_MASK
)  
 675                                         != GLOBAL_SHARED_TEXT_SEGMENT
) { 
 680         kret 
= kmem_alloc(kernel_map
, (vm_offset_t 
*)&map_list
, 
 681                         (vm_size_t
)(map_cnt
*sizeof(sf_mapping_t
))); 
 682                 if (kret 
!= KERN_SUCCESS
) { 
 688                 copyin(mappings
, map_list
, (map_cnt
*sizeof(sf_mapping_t
)))) { 
 690                 kmem_free(kernel_map
, (vm_offset_t
)map_list
,  
 691                                 (vm_size_t
)(map_cnt
*sizeof(sf_mapping_t
))); 
 694         for (i 
= 0; i
<map_cnt
; i
++) { 
 695                 if((map_list
[i
].mapping_offset 
 
 696                                 & GLOBAL_SHARED_SEGMENT_MASK
) == 0x10000000) { 
 697                         map_address 
= (vm_offset_t
) 
 698                                 (local_base 
+ map_list
[i
].mapping_offset
); 
 699                         vm_deallocate(current_map(),  
 702                         vm_map(current_map(), &map_address
, 
 703                                 map_list
[i
].size
, 0, SHARED_LIB_ALIAS
, 
 704                                 shared_data_region_handle
,  
 705                                 ((unsigned int)local_base 
 
 706                                    & SHARED_DATA_REGION_MASK
) + 
 707                                         (map_list
[i
].mapping_offset 
 
 708                                         & SHARED_DATA_REGION_MASK
), 
 710                                 VM_PROT_READ
, VM_INHERIT_SHARE
); 
 714         kmem_free(kernel_map
, (vm_offset_t
)map_list
,  
 715                                 (vm_size_t
)(map_cnt
*sizeof(sf_mapping_t
))); 
 721 struct new_system_shared_regions_args 
{ 
 726 new_system_shared_regions( 
 728         struct new_system_shared_regions_args 
*uap
, 
 731         shared_region_mapping_t regions
; 
 732         shared_region_mapping_t new_regions
; 
 739         /* clear all of our existing defaults */ 
 740         remove_all_shared_regions(); 
 749 clone_system_shared_regions(shared_regions_active
, base_vnode
) 
 751         shared_region_mapping_t new_shared_region
; 
 752         shared_region_mapping_t next
; 
 753         shared_region_mapping_t old_shared_region
; 
 754         struct shared_region_task_mappings old_info
; 
 755         struct shared_region_task_mappings new_info
; 
 759         vm_get_shared_region(current_task(), &old_shared_region
); 
 760         old_info
.self 
= (vm_offset_t
)old_shared_region
; 
 761         shared_region_mapping_info(old_shared_region
, 
 762                 &(old_info
.text_region
),    
 763                 &(old_info
.text_size
), 
 764                 &(old_info
.data_region
), 
 765                 &(old_info
.data_size
), 
 766                 &(old_info
.region_mappings
), 
 767                 &(old_info
.client_base
), 
 768                 &(old_info
.alternate_base
), 
 769                 &(old_info
.alternate_next
),  
 772                 &(old_info
.flags
), &next
); 
 773         if ((shared_regions_active
) || 
 774                 (base_vnode 
== ENV_DEFAULT_ROOT
)) { 
 775            if (shared_file_create_system_region(&new_shared_region
)) 
 779                 lookup_default_shared_region( 
 780                         base_vnode
, old_info
.system
); 
 781            if(new_shared_region 
== NULL
) { 
 782                 shared_file_boot_time_init( 
 783                         base_vnode
, old_info
.system
); 
 784                 vm_get_shared_region(current_task(), &new_shared_region
); 
 786                 vm_set_shared_region(current_task(), new_shared_region
); 
 788            if(old_shared_region
) 
 789                 shared_region_mapping_dealloc(old_shared_region
); 
 791         new_info
.self 
= (vm_offset_t
)new_shared_region
; 
 792         shared_region_mapping_info(new_shared_region
, 
 793                 &(new_info
.text_region
),    
 794                 &(new_info
.text_size
), 
 795                 &(new_info
.data_region
), 
 796                 &(new_info
.data_size
), 
 797                 &(new_info
.region_mappings
), 
 798                 &(new_info
.client_base
), 
 799                 &(new_info
.alternate_base
), 
 800                 &(new_info
.alternate_next
),  
 803                 &(new_info
.flags
), &next
); 
 804         if(shared_regions_active
) { 
 805            if(vm_region_clone(old_info
.text_region
, new_info
.text_region
)) { 
 806            panic("clone_system_shared_regions: shared region mis-alignment 1"); 
 807                 shared_region_mapping_dealloc(new_shared_region
); 
 810            if (vm_region_clone(old_info
.data_region
, new_info
.data_region
)) { 
 811            panic("clone_system_shared_regions: shared region mis-alignment 2"); 
 812                 shared_region_mapping_dealloc(new_shared_region
); 
 815            shared_region_object_chain_attach( 
 816                                 new_shared_region
, old_shared_region
); 
 818         if (vm_map_region_replace(current_map(), old_info
.text_region
,  
 819                         new_info
.text_region
, old_info
.client_base
,  
 820                         old_info
.client_base
+old_info
.text_size
)) { 
 821         panic("clone_system_shared_regions: shared region mis-alignment 3"); 
 822                 shared_region_mapping_dealloc(new_shared_region
); 
 825         if(vm_map_region_replace(current_map(), old_info
.data_region
,  
 826                         new_info
.data_region
,  
 827                         old_info
.client_base 
+ old_info
.text_size
,  
 829                                 + old_info
.text_size 
+ old_info
.data_size
)) { 
 830         panic("clone_system_shared_regions: shared region mis-alignment 4"); 
 831                 shared_region_mapping_dealloc(new_shared_region
); 
 834         vm_set_shared_region(current_task(), new_shared_region
); 
 836         /* consume the reference which wasn't accounted for in object */ 
 838         if(!shared_regions_active
) 
 839                 shared_region_mapping_dealloc(old_shared_region
); 
 845 extern vm_map_t bsd_pageable_map
; 
 847 /* header for the profile name file.  The profiled app info is held */ 
 848 /* in the data file and pointed to by elements in the name file     */ 
 850 struct profile_names_header 
{ 
 851         unsigned int    number_of_profiles
; 
 852         unsigned int    user_id
; 
 853         unsigned int    version
; 
 860 struct profile_element 
{ 
 863         unsigned int    mod_date
; 
 868 struct global_profile 
{ 
 869         struct vnode    
*names_vp
; 
 870         struct vnode    
*data_vp
; 
 877 struct global_profile_cache 
{ 
 880         struct global_profile   profiles
[3]; 
 883 struct global_profile_cache global_user_profile_cache 
= 
 884         {3, 0, NULL
, NULL
, NULL
, 0, 0, 0, 
 885                 NULL
, NULL
, NULL
, 0, 0, 0, 
 886                 NULL
, NULL
, NULL
, 0, 0, 0 }; 
 888 /* BSD_OPEN_PAGE_CACHE_FILES:                                 */ 
 889 /* Caller provides a user id.  This id was used in            */ 
 890 /* prepare_profile_database to create two unique absolute     */ 
 891 /* file paths to the associated profile files.  These files   */ 
 892 /* are either opened or bsd_open_page_cache_files returns an  */ 
 893 /* error.  The header of the names file is then consulted.    */ 
 894 /* The header and the vnodes for the names and data files are */ 
 898 bsd_open_page_cache_files( 
 900         struct global_profile 
**profile
) 
 902         char            *cache_path 
= "/var/vm/app_profile/"; 
 910         struct  vnode   
*names_vp
; 
 911         struct  vnode   
*data_vp
; 
 912         vm_offset_t     names_buf
; 
 915         int             profile_names_length
; 
 916         int             profile_data_length
; 
 917         char            *profile_data_string
; 
 918         char            *profile_names_string
; 
 923         struct  profile_names_header 
*profile_header
; 
 926         struct nameidata nd_names
; 
 927         struct nameidata nd_data
; 
 935         for(i 
= 0; i
<global_user_profile_cache
.max_ele
; i
++) { 
 936                 if((global_user_profile_cache
.profiles
[i
].user 
== user
)  
 937                         &&  (global_user_profile_cache
.profiles
[i
].data_vp 
 
 939                         *profile 
= &global_user_profile_cache
.profiles
[i
]; 
 940                         /* already in cache, we're done */ 
 941                         if ((*profile
)->busy
) { 
 943                                 * drop funnel and wait  
 945                                 (void)tsleep((void *) 
 947                                         PRIBIO
, "app_profile", 0); 
 950                         (*profile
)->busy 
= 1; 
 951                         (*profile
)->age 
= global_user_profile_cache
.age
; 
 952                         global_user_profile_cache
.age
+=1; 
 957         lru 
= global_user_profile_cache
.age
; 
 958         for(i 
= 0; i
<global_user_profile_cache
.max_ele
; i
++) { 
 959                 if(global_user_profile_cache
.profiles
[i
].data_vp 
== NULL
) { 
 960                         *profile 
= &global_user_profile_cache
.profiles
[i
]; 
 961                         (*profile
)->age 
= global_user_profile_cache
.age
; 
 962                         global_user_profile_cache
.age
+=1; 
 965                 if(global_user_profile_cache
.profiles
[i
].age 
< lru
) { 
 966                         lru 
= global_user_profile_cache
.profiles
[i
].age
; 
 967                         *profile 
= &global_user_profile_cache
.profiles
[i
]; 
 971         if ((*profile
)->busy
) { 
 973                 * drop funnel and wait  
 975                 (void)tsleep((void *) 
 976                         &(global_user_profile_cache
),  
 977                         PRIBIO
, "app_profile", 0); 
 980         (*profile
)->busy 
= 1; 
 981         (*profile
)->user 
= user
; 
 983         if((*profile
)->data_vp 
!= NULL
) { 
 984                 kmem_free(kernel_map
,  
 985                                 (*profile
)->buf_ptr
, 4 * PAGE_SIZE
); 
 986                 if ((*profile
)->names_vp
) { 
 987                         vrele((*profile
)->names_vp
); 
 988                         (*profile
)->names_vp 
= NULL
; 
 990                 if ((*profile
)->data_vp
) { 
 991                         vrele((*profile
)->data_vp
); 
 992                         (*profile
)->data_vp 
= NULL
; 
 996         /* put dummy value in for now to get */ 
 997         /* competing request to wait above   */ 
 998         /* until we are finished */ 
 999         (*profile
)->data_vp 
= (struct vnode 
*)0xFFFFFFFF; 
1001         /* Try to open the appropriate users profile files */ 
1002         /* If neither file is present, try to create them  */ 
1003         /* If one file is present and the other not, fail. */ 
1004         /* If the files do exist, check them for the app_file */ 
1005         /* requested and read it in if present */ 
1008         ret 
= kmem_alloc(kernel_map
, 
1009                 (vm_offset_t 
*)&profile_data_string
, PATH_MAX
); 
1012                 (*profile
)->data_vp 
= NULL
; 
1013                 (*profile
)->busy 
= 0; 
1018         /* Split the buffer in half since we know the size of */ 
1019         /* our file path and our allocation is adequate for   */ 
1020         /* both file path names */ 
1021         profile_names_string 
= profile_data_string 
+ (PATH_MAX
/2); 
1024         strcpy(profile_data_string
, cache_path
); 
1025         strcpy(profile_names_string
, cache_path
); 
1026         profile_names_length 
= profile_data_length 
 
1027                         = strlen(profile_data_string
); 
1028         substring 
= profile_data_string 
+ profile_data_length
; 
1029         sprintf(substring
, "%x_data", user
); 
1030         substring 
= profile_names_string 
+ profile_names_length
; 
1031         sprintf(substring
, "%x_names", user
); 
1033         /* We now have the absolute file names */ 
1035         ret 
= kmem_alloc(kernel_map
, 
1036                         (vm_offset_t 
*)&names_buf
, 4 * PAGE_SIZE
); 
1038                 kmem_free(kernel_map
,  
1039                                 (vm_offset_t
)profile_data_string
, PATH_MAX
); 
1040                 (*profile
)->data_vp 
= NULL
; 
1041                 (*profile
)->busy 
= 0; 
1046         NDINIT(&nd_names
, LOOKUP
, FOLLOW 
| LOCKLEAF
,  
1047                         UIO_SYSSPACE
, profile_names_string
, p
); 
1048         NDINIT(&nd_data
, LOOKUP
, FOLLOW 
| LOCKLEAF
,  
1049                         UIO_SYSSPACE
, profile_data_string
, p
); 
1050         if (error 
= vn_open(&nd_data
, FREAD 
| FWRITE
, 0)) { 
1052                 printf("bsd_open_page_cache_files: CacheData file not found %s\n", 
1053                         profile_data_string
); 
1055                 kmem_free(kernel_map
,  
1056                                 (vm_offset_t
)names_buf
, 4 * PAGE_SIZE
); 
1057                 kmem_free(kernel_map
,  
1058                         (vm_offset_t
)profile_data_string
, PATH_MAX
); 
1059                 (*profile
)->data_vp 
= NULL
; 
1060                 (*profile
)->busy 
= 0; 
1065         data_vp 
= nd_data
.ni_vp
; 
1066         VOP_UNLOCK(data_vp
, 0, p
); 
1068         if (error 
= vn_open(&nd_names
, FREAD 
| FWRITE
, 0)) { 
1069                 printf("bsd_open_page_cache_files: NamesData file not found %s\n", 
1070                         profile_data_string
); 
1071                 kmem_free(kernel_map
,  
1072                                 (vm_offset_t
)names_buf
, 4 * PAGE_SIZE
); 
1073                 kmem_free(kernel_map
,  
1074                         (vm_offset_t
)profile_data_string
, PATH_MAX
); 
1076                 (*profile
)->data_vp 
= NULL
; 
1077                 (*profile
)->busy 
= 0; 
1081         names_vp 
= nd_names
.ni_vp
; 
1083         if(error 
= VOP_GETATTR(names_vp
, &vattr
, p
->p_ucred
, p
)) { 
1084                 printf("bsd_open_page_cache_files: Can't stat name file %s\n", profile_names_string
); 
1085                 kmem_free(kernel_map
,  
1086                         (vm_offset_t
)profile_data_string
, PATH_MAX
); 
1087                 kmem_free(kernel_map
,  
1088                         (vm_offset_t
)names_buf
, 4 * PAGE_SIZE
); 
1091                 (*profile
)->data_vp 
= NULL
; 
1092                 (*profile
)->busy 
= 0; 
1097         size 
= vattr
.va_size
; 
1098         if(size 
> 4 * PAGE_SIZE
)  
1099                 size 
= 4 * PAGE_SIZE
; 
1100         buf_ptr 
= names_buf
; 
1104                 error 
= vn_rdwr(UIO_READ
, names_vp
, (caddr_t
)buf_ptr
,  
1106                         UIO_SYSSPACE
, IO_NODELOCKED
, p
->p_ucred
, &resid
, p
); 
1107                 if((error
) || (size 
== resid
)) { 
1111                         kmem_free(kernel_map
,  
1112                                 (vm_offset_t
)profile_data_string
, PATH_MAX
); 
1113                         kmem_free(kernel_map
,  
1114                                 (vm_offset_t
)names_buf
, 4 * PAGE_SIZE
); 
1117                         (*profile
)->data_vp 
= NULL
; 
1118                         (*profile
)->busy 
= 0; 
1122                 buf_ptr 
+= size
-resid
; 
1123                 resid_off 
+= size
-resid
; 
1127         VOP_UNLOCK(names_vp
, 0, p
); 
1128         kmem_free(kernel_map
, (vm_offset_t
)profile_data_string
, PATH_MAX
); 
1129         (*profile
)->names_vp 
= names_vp
; 
1130         (*profile
)->data_vp 
= data_vp
; 
1131         (*profile
)->buf_ptr 
= names_buf
; 
1137 bsd_close_page_cache_files( 
1138         struct global_profile 
*profile
) 
1145 bsd_read_page_cache_file( 
1150         struct vnode    
*app_vp
, 
1151         vm_offset_t     
*buffer
, 
1152         vm_offset_t     
*buf_size
) 
1155         boolean_t               funnel_state
; 
1163         unsigned int    profile_size
; 
1165         vm_offset_t     names_buf
; 
1170         struct  vnode   
*names_vp
; 
1171         struct  vnode   
*data_vp
; 
1175         struct global_profile 
*uid_files
; 
1177         funnel_state 
= thread_funnel_set(kernel_flock
, TRUE
); 
1179         /* Try to open the appropriate users profile files */ 
1180         /* If neither file is present, try to create them  */ 
1181         /* If one file is present and the other not, fail. */ 
1182         /* If the files do exist, check them for the app_file */ 
1183         /* requested and read it in if present */ 
1186         error 
= bsd_open_page_cache_files(user
, &uid_files
); 
1188                 thread_funnel_set(kernel_flock
, funnel_state
); 
1194         names_vp 
= uid_files
->names_vp
; 
1195         data_vp 
= uid_files
->data_vp
; 
1196         names_buf 
= uid_files
->buf_ptr
; 
1200          * Get locks on both files, get the vnode with the lowest address first 
1203         if((unsigned int)names_vp 
< (unsigned int)data_vp
) { 
1210         error 
= vn_lock(vp1
, LK_EXCLUSIVE 
| LK_RETRY
, p
); 
1212                 printf("bsd_read_page_cache_file: Can't lock profile names %x\n", user
); 
1213                 bsd_close_page_cache_files(uid_files
); 
1214                 thread_funnel_set(kernel_flock
, funnel_state
); 
1217         error 
= vn_lock(vp2
, LK_EXCLUSIVE 
| LK_RETRY
, p
); 
1219                 printf("bsd_read_page_cache_file: Can't lock profile data %x\n", user
); 
1220                 VOP_UNLOCK(vp1
, 0, p
); 
1221                 bsd_close_page_cache_files(uid_files
); 
1222                 thread_funnel_set(kernel_flock
, funnel_state
); 
1226         if(error 
= VOP_GETATTR(app_vp
, &vattr
, p
->p_ucred
, p
)) { 
1227                 VOP_UNLOCK(names_vp
, 0, p
); 
1228                 VOP_UNLOCK(data_vp
, 0, p
); 
1229                 printf("bsd_read_cache_file: Can't stat app file %s\n", app_name
); 
1230                 bsd_close_page_cache_files(uid_files
); 
1231                 thread_funnel_set(kernel_flock
, funnel_state
); 
1235         *fid 
= vattr
.va_fileid
; 
1236         *mod 
= vattr
.va_mtime
.tv_sec
; 
1239         if (bsd_search_page_cache_data_base(names_vp
, names_buf
, app_name
,  
1240                         (unsigned int) vattr
.va_mtime
.tv_sec
,   
1241                         vattr
.va_fileid
, &profile
, &profile_size
) == 0) { 
1242                 /* profile is an offset in the profile data base */ 
1243                 /* It is zero if no profile data was found */ 
1245                 if(profile_size 
== 0) { 
1248                         VOP_UNLOCK(names_vp
, 0, p
); 
1249                         VOP_UNLOCK(data_vp
, 0, p
); 
1250                         bsd_close_page_cache_files(uid_files
); 
1251                         thread_funnel_set(kernel_flock
, funnel_state
); 
1254                 ret 
= (vm_offset_t
)(kmem_alloc(kernel_map
, buffer
, profile_size
)); 
1256                         VOP_UNLOCK(names_vp
, 0, p
); 
1257                         VOP_UNLOCK(data_vp
, 0, p
); 
1258                         bsd_close_page_cache_files(uid_files
); 
1259                         thread_funnel_set(kernel_flock
, funnel_state
); 
1262                 *buf_size 
= profile_size
; 
1263                 while(profile_size
) { 
1264                         error 
= vn_rdwr(UIO_READ
, data_vp
,  
1265                                 (caddr_t
) *buffer
, profile_size
,  
1266                                 profile
, UIO_SYSSPACE
, IO_NODELOCKED
,  
1267                                 p
->p_ucred
, &resid
, p
); 
1268                         if((error
) || (profile_size 
== resid
)) { 
1269                                 VOP_UNLOCK(names_vp
, 0, p
); 
1270                                 VOP_UNLOCK(data_vp
, 0, p
); 
1271                                 bsd_close_page_cache_files(uid_files
); 
1272                                 kmem_free(kernel_map
, (vm_offset_t
)*buffer
, profile_size
); 
1273                                 thread_funnel_set(kernel_flock
, funnel_state
); 
1276                         profile 
+= profile_size 
- resid
; 
1277                         profile_size 
= resid
; 
1279                 VOP_UNLOCK(names_vp
, 0, p
); 
1280                 VOP_UNLOCK(data_vp
, 0, p
); 
1281                 bsd_close_page_cache_files(uid_files
); 
1282                 thread_funnel_set(kernel_flock
, funnel_state
); 
1285                 VOP_UNLOCK(names_vp
, 0, p
); 
1286                 VOP_UNLOCK(data_vp
, 0, p
); 
1287                 bsd_close_page_cache_files(uid_files
); 
1288                 thread_funnel_set(kernel_flock
, funnel_state
); 
1295 bsd_search_page_cache_data_base( 
1297         struct profile_names_header     
*database
, 
1299         unsigned int                    mod_date
, 
1302         unsigned int                    *profile_size
) 
1308         struct profile_element  
*element
; 
1309         unsigned int            ele_total
; 
1310         unsigned int            extended_list 
= 0; 
1315         vm_offset_t             local_buf 
= NULL
; 
1322         if(((vm_offset_t
)database
->element_array
) != 
1323                                 sizeof(struct profile_names_header
)) { 
1326         element 
= (struct profile_element 
*)( 
1327                         (vm_offset_t
)database
->element_array 
+  
1328                                                 (vm_offset_t
)database
); 
1330         ele_total 
= database
->number_of_profiles
; 
1335                 /* note: code assumes header + n*ele comes out on a page boundary */ 
1336                 if(((local_buf 
== 0) && (sizeof(struct profile_names_header
) +  
1337                         (ele_total 
* sizeof(struct profile_element
)))  
1338                                         > (PAGE_SIZE 
* 4)) || 
1339                         ((local_buf 
!= 0) &&  
1340                                 (ele_total 
* sizeof(struct profile_element
)) 
1341                                          > (PAGE_SIZE 
* 4))) { 
1342                         extended_list 
= ele_total
; 
1343                         if(element 
== (struct profile_element 
*) 
1344                                 ((vm_offset_t
)database
->element_array 
+  
1345                                                 (vm_offset_t
)database
)) { 
1346                                 ele_total 
= ((PAGE_SIZE 
* 4)/sizeof(struct profile_element
)) - 1; 
1348                                 ele_total 
= (PAGE_SIZE 
* 4)/sizeof(struct profile_element
); 
1350                         extended_list 
-= ele_total
; 
1352                 for (i
=0; i
<ele_total
; i
++) { 
1353                         if((mod_date 
== element
[i
].mod_date
)  
1354                                         && (inode 
== element
[i
].inode
)) { 
1355                                 if(strncmp(element
[i
].name
, app_name
, 12) == 0) { 
1356                                         *profile 
= element
[i
].addr
; 
1357                                         *profile_size 
= element
[i
].size
; 
1358                                         if(local_buf 
!= NULL
) { 
1359                                                 kmem_free(kernel_map
,  
1360                                                         (vm_offset_t
)local_buf
, 4 * PAGE_SIZE
); 
1366                 if(extended_list 
== 0) 
1368                 if(local_buf 
== NULL
) { 
1369                         ret 
= kmem_alloc(kernel_map
, 
1370                                 (vm_offset_t 
*)&local_buf
, 4 * PAGE_SIZE
); 
1371                         if(ret 
!= KERN_SUCCESS
) { 
1375                 element 
= (struct profile_element 
*)local_buf
; 
1376                 ele_total 
= extended_list
; 
1378                 file_off 
+=  4 * PAGE_SIZE
; 
1379                 if((ele_total 
* sizeof(struct profile_element
)) >  
1381                         size 
= PAGE_SIZE 
* 4; 
1383                         size 
= ele_total 
* sizeof(struct profile_element
); 
1387                         error 
= vn_rdwr(UIO_READ
, vp
,  
1388                                 (caddr_t
)(local_buf 
+ resid_off
), 
1389                                 size
, file_off 
+ resid_off
, UIO_SYSSPACE
,  
1390                                 IO_NODELOCKED
, p
->p_ucred
, &resid
, p
); 
1391                         if((error
) || (size 
== resid
)) { 
1392                                 if(local_buf 
!= NULL
) { 
1393                                         kmem_free(kernel_map
,  
1394                                                 (vm_offset_t
)local_buf
,  
1399                         resid_off 
+= size
-resid
; 
1403         if(local_buf 
!= NULL
) { 
1404                 kmem_free(kernel_map
,  
1405                         (vm_offset_t
)local_buf
, 4 * PAGE_SIZE
); 
1411 bsd_write_page_cache_file( 
1420         struct nameidata        nd
; 
1421         struct vnode            
*vp 
= 0;  
1425         boolean_t               funnel_state
; 
1427         struct vattr            data_vattr
; 
1430         unsigned int                    profile_size
; 
1432         vm_offset_t     names_buf
; 
1433         struct  vnode   
*names_vp
; 
1434         struct  vnode   
*data_vp
; 
1438         struct  profile_names_header 
*profile_header
; 
1441         struct global_profile 
*uid_files
; 
1444         funnel_state 
= thread_funnel_set(kernel_flock
, TRUE
); 
1448         error 
= bsd_open_page_cache_files(user
, &uid_files
); 
1450                 thread_funnel_set(kernel_flock
, funnel_state
); 
1456         names_vp 
= uid_files
->names_vp
; 
1457         data_vp 
= uid_files
->data_vp
; 
1458         names_buf 
= uid_files
->buf_ptr
; 
1461          * Get locks on both files, get the vnode with the lowest address first 
1464         if((unsigned int)names_vp 
< (unsigned int)data_vp
) { 
1472         error 
= vn_lock(vp1
, LK_EXCLUSIVE 
| LK_RETRY
, p
); 
1474                 printf("bsd_write_page_cache_file: Can't lock profile names %x\n", user
); 
1475                 bsd_close_page_cache_files(uid_files
); 
1476                 thread_funnel_set(kernel_flock
, funnel_state
); 
1479         error 
= vn_lock(vp2
, LK_EXCLUSIVE 
| LK_RETRY
, p
); 
1481                 printf("bsd_write_page_cache_file: Can't lock profile data %x\n", user
); 
1482                 VOP_UNLOCK(vp1
, 0, p
); 
1483                 bsd_close_page_cache_files(uid_files
); 
1484                 thread_funnel_set(kernel_flock
, funnel_state
); 
1488         /* Stat data file for size */ 
1490         if(error 
= VOP_GETATTR(data_vp
, &data_vattr
, p
->p_ucred
, p
)) { 
1491                 VOP_UNLOCK(names_vp
, 0, p
); 
1492                 VOP_UNLOCK(data_vp
, 0, p
); 
1493                 printf("bsd_write_page_cache_file: Can't stat profile data %s\n", file_name
); 
1494                 bsd_close_page_cache_files(uid_files
); 
1495                 thread_funnel_set(kernel_flock
, funnel_state
); 
1499         if (bsd_search_page_cache_data_base(names_vp
,  
1500                         (struct profile_names_header 
*)names_buf
,  
1501                         file_name
, (unsigned int) mod
,   
1502                         fid
, &profile
, &profile_size
) == 0) { 
1503                 /* profile is an offset in the profile data base */ 
1504                 /* It is zero if no profile data was found */ 
1506                 if(profile_size 
== 0) { 
1507                         unsigned int    header_size
; 
1508                         vm_offset_t     buf_ptr
; 
1510                         /* Our Write case */ 
1512                         /* read header for last entry */ 
1514                                 (struct profile_names_header 
*)names_buf
; 
1515                         name_offset 
= sizeof(struct profile_names_header
) +  
1516                                 (sizeof(struct profile_element
)  
1517                                         * profile_header
->number_of_profiles
); 
1518                         profile_header
->number_of_profiles 
+= 1; 
1520                         if(name_offset 
< PAGE_SIZE 
* 4) { 
1521                                 struct profile_element  
*name
; 
1522                                 /* write new entry */ 
1523                                 name 
= (struct profile_element 
*) 
1524                                         (names_buf 
+ (vm_offset_t
)name_offset
); 
1525                                 name
->addr 
=  data_vattr
.va_size
; 
1527                                 name
->mod_date 
= mod
; 
1529                                 strncpy (name
->name
, file_name
, 12); 
1531                                 unsigned int    ele_size
; 
1532                                 struct profile_element  name
; 
1533                                 /* write new entry */ 
1534                                 name
.addr 
= data_vattr
.va_size
; 
1536                                 name
.mod_date 
= mod
; 
1538                                 strncpy (name
.name
, file_name
, 12); 
1539                                 /* write element out separately */ 
1540                                 ele_size 
= sizeof(struct profile_element
); 
1541                                 buf_ptr 
= (vm_offset_t
)&name
; 
1542                                 resid_off 
= name_offset
; 
1545                                         error 
= vn_rdwr(UIO_WRITE
, names_vp
,  
1547                                                 ele_size
, resid_off
,  
1548                                                 UIO_SYSSPACE
, IO_NODELOCKED
,  
1549                                                 p
->p_ucred
, &resid
, p
); 
1551                                                 printf("bsd_write_page_cache_file: Can't write name_element %x\n", user
); 
1552                                                 VOP_UNLOCK(names_vp
, 0, p
); 
1553                                                 VOP_UNLOCK(data_vp
, 0, p
); 
1554                                                 bsd_close_page_cache_files( 
1561                                         buf_ptr 
+= (vm_offset_t
) 
1563                                         resid_off 
+= ele_size
-resid
; 
1568                         if(name_offset 
< PAGE_SIZE 
* 4) { 
1569                                 header_size 
= name_offset 
+  
1570                                         sizeof(struct profile_element
); 
1574                                         sizeof(struct profile_names_header
); 
1576                         buf_ptr 
= (vm_offset_t
)profile_header
; 
1579                         /* write names file header */ 
1580                         while(header_size
) { 
1581                                 error 
= vn_rdwr(UIO_WRITE
, names_vp
,  
1583                                         header_size
, resid_off
,  
1584                                         UIO_SYSSPACE
, IO_NODELOCKED
,  
1585                                         p
->p_ucred
, &resid
, p
); 
1587                                         VOP_UNLOCK(names_vp
, 0, p
); 
1588                                         VOP_UNLOCK(data_vp
, 0, p
); 
1589                                         printf("bsd_write_page_cache_file: Can't write header %x\n", user
); 
1590                                         bsd_close_page_cache_files( 
1593                                                 kernel_flock
, funnel_state
); 
1596                                 buf_ptr 
+= (vm_offset_t
)header_size
-resid
; 
1597                                 resid_off 
+= header_size
-resid
; 
1598                                 header_size 
= resid
; 
1600                         /* write profile to data file */ 
1601                         resid_off 
= data_vattr
.va_size
; 
1603                                 error 
= vn_rdwr(UIO_WRITE
, data_vp
,  
1604                                         (caddr_t
)buffer
, size
, resid_off
,  
1605                                         UIO_SYSSPACE
, IO_NODELOCKED
,  
1606                                         p
->p_ucred
, &resid
, p
); 
1608                                         VOP_UNLOCK(names_vp
, 0, p
); 
1609                                         VOP_UNLOCK(data_vp
, 0, p
); 
1610                                         printf("bsd_write_page_cache_file: Can't write header %x\n", user
); 
1611                                         bsd_close_page_cache_files( 
1614                                                 kernel_flock
, funnel_state
); 
1617                                 buffer 
+= size
-resid
; 
1618                                 resid_off 
+= size
-resid
; 
1621                         VOP_UNLOCK(names_vp
, 0, p
); 
1622                         VOP_UNLOCK(data_vp
, 0, p
); 
1623                         bsd_close_page_cache_files(uid_files
); 
1624                         thread_funnel_set(kernel_flock
, funnel_state
); 
1627                 /* Someone else wrote a twin profile before us */ 
1628                 VOP_UNLOCK(names_vp
, 0, p
); 
1629                 VOP_UNLOCK(data_vp
, 0, p
); 
1630                 bsd_close_page_cache_files(uid_files
); 
1631                 thread_funnel_set(kernel_flock
, funnel_state
); 
1634                 VOP_UNLOCK(names_vp
, 0, p
); 
1635                 VOP_UNLOCK(data_vp
, 0, p
); 
1636                 bsd_close_page_cache_files(uid_files
); 
1637                 thread_funnel_set(kernel_flock
, funnel_state
); 
1644 prepare_profile_database(int    user
) 
1646         char            *cache_path 
= "/var/vm/app_profile/"; 
1654         struct  vnode   
*names_vp
; 
1655         struct  vnode   
*data_vp
; 
1656         vm_offset_t     names_buf
; 
1657         vm_offset_t     buf_ptr
; 
1659         int             profile_names_length
; 
1660         int             profile_data_length
; 
1661         char            *profile_data_string
; 
1662         char            *profile_names_string
; 
1667         struct  profile_names_header 
*profile_header
; 
1670         struct nameidata nd_names
; 
1671         struct nameidata nd_data
; 
1677         ret 
= kmem_alloc(kernel_map
, 
1678                 (vm_offset_t 
*)&profile_data_string
, PATH_MAX
); 
1684         /* Split the buffer in half since we know the size of */ 
1685         /* our file path and our allocation is adequate for   */ 
1686         /* both file path names */ 
1687         profile_names_string 
= profile_data_string 
+ (PATH_MAX
/2); 
1690         strcpy(profile_data_string
, cache_path
); 
1691         strcpy(profile_names_string
, cache_path
); 
1692         profile_names_length 
= profile_data_length 
 
1693                         = strlen(profile_data_string
); 
1694         substring 
= profile_data_string 
+ profile_data_length
; 
1695         sprintf(substring
, "%x_data", user
); 
1696         substring 
= profile_names_string 
+ profile_names_length
; 
1697         sprintf(substring
, "%x_names", user
); 
1699         /* We now have the absolute file names */ 
1701         ret 
= kmem_alloc(kernel_map
, 
1702                         (vm_offset_t 
*)&names_buf
, 4 * PAGE_SIZE
); 
1704                 kmem_free(kernel_map
,  
1705                                 (vm_offset_t
)profile_data_string
, PATH_MAX
); 
1709         NDINIT(&nd_names
, LOOKUP
, FOLLOW
,  
1710                         UIO_SYSSPACE
, profile_names_string
, p
); 
1711         NDINIT(&nd_data
, LOOKUP
, FOLLOW
, 
1712                         UIO_SYSSPACE
, profile_data_string
, p
); 
1714         if (error 
= vn_open(&nd_data
,  
1715                         O_CREAT 
| O_EXCL 
| FWRITE
, S_IRUSR
|S_IWUSR
)) { 
1716                         kmem_free(kernel_map
,  
1717                                         (vm_offset_t
)names_buf
, 4 * PAGE_SIZE
); 
1718                         kmem_free(kernel_map
,  
1719                                 (vm_offset_t
)profile_data_string
, PATH_MAX
); 
1723         data_vp 
= nd_data
.ni_vp
; 
1724         VOP_UNLOCK(data_vp
, 0, p
); 
1726         if (error 
= vn_open(&nd_names
,  
1727                         O_CREAT 
| O_EXCL 
| FWRITE
, S_IRUSR
|S_IWUSR
)) { 
1728                         printf("prepare_profile_database: Can't create CacheNames %s\n", 
1729                                 profile_data_string
); 
1730                         kmem_free(kernel_map
,  
1731                                         (vm_offset_t
)names_buf
, 4 * PAGE_SIZE
); 
1732                         kmem_free(kernel_map
,  
1733                                 (vm_offset_t
)profile_data_string
, PATH_MAX
); 
1738         names_vp 
= nd_names
.ni_vp
; 
1741         /* Write Header for new names file */ 
1743         profile_header 
= (struct profile_names_header 
*)names_buf
; 
1745         profile_header
->number_of_profiles 
= 0; 
1746         profile_header
->user_id 
=  user
; 
1747         profile_header
->version 
= 1; 
1748         profile_header
->element_array 
=  
1749                                 sizeof(struct profile_names_header
); 
1750         profile_header
->spare1 
= 0; 
1751         profile_header
->spare2 
= 0; 
1752         profile_header
->spare3 
= 0; 
1754         size 
= sizeof(struct profile_names_header
); 
1755         buf_ptr 
= (vm_offset_t
)profile_header
; 
1759                 error 
= vn_rdwr(UIO_WRITE
, names_vp
,  
1760                                 (caddr_t
)buf_ptr
, size
, resid_off
, 
1761                                 UIO_SYSSPACE
, IO_NODELOCKED
,  
1762                                 p
->p_ucred
, &resid
, p
); 
1764                         printf("prepare_profile_database: Can't write header %s\n", profile_names_string
); 
1765                         kmem_free(kernel_map
,  
1766                                 (vm_offset_t
)names_buf
, 4 * PAGE_SIZE
); 
1767                         kmem_free(kernel_map
,  
1768                                 (vm_offset_t
)profile_data_string
,  
1774                 buf_ptr 
+= size
-resid
; 
1775                 resid_off 
+= size
-resid
; 
1780         vattr
.va_uid 
= user
; 
1781         error 
= VOP_SETATTR(names_vp
, &vattr
, p
->p_cred
->pc_ucred
, p
); 
1783                 printf("prepare_profile_database: " 
1784                         "Can't set user %s\n", profile_names_string
); 
1788         error 
= vn_lock(data_vp
, LK_EXCLUSIVE 
| LK_RETRY
, p
); 
1791                 printf("prepare_profile_database: cannot lock data file %s\n", 
1792                         profile_data_string
); 
1793                 kmem_free(kernel_map
,  
1794                         (vm_offset_t
)profile_data_string
, PATH_MAX
); 
1795                 kmem_free(kernel_map
,  
1796                         (vm_offset_t
)names_buf
, 4 * PAGE_SIZE
); 
1799         vattr
.va_uid 
= user
; 
1800         error 
= VOP_SETATTR(data_vp
, &vattr
, p
->p_cred
->pc_ucred
, p
); 
1802                 printf("prepare_profile_database: " 
1803                         "Can't set user %s\n", profile_data_string
); 
1807         kmem_free(kernel_map
,  
1808                         (vm_offset_t
)profile_data_string
, PATH_MAX
); 
1809         kmem_free(kernel_map
,  
1810                         (vm_offset_t
)names_buf
, 4 * PAGE_SIZE
);