]> git.saurik.com Git - apple/xnu.git/blob - bsd/vm/vm_unix.c
7922e830f6c3e5125c30d65d4428b5e7e8be7ceb
[apple/xnu.git] / bsd / vm / vm_unix.c
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
7 *
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * file.
14 *
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
22 *
23 * @APPLE_LICENSE_HEADER_END@
24 */
25 /*
26 * Mach Operating System
27 * Copyright (c) 1987 Carnegie-Mellon University
28 * All rights reserved. The CMU software License Agreement specifies
29 * the terms and conditions for use and redistribution.
30 */
31
32 /*
33 */
34
35
36 #include <meta_features.h>
37
38 #include <kern/task.h>
39 #include <kern/thread.h>
40 #include <kern/debug.h>
41 #include <kern/lock.h>
42 #include <mach/time_value.h>
43 #include <mach/vm_param.h>
44 #include <mach/vm_prot.h>
45 #include <mach/port.h>
46
47 #include <sys/param.h>
48 #include <sys/systm.h>
49 #include <sys/dir.h>
50 #include <sys/namei.h>
51 #include <sys/proc.h>
52 #include <sys/vm.h>
53 #include <sys/file.h>
54 #include <sys/vnode.h>
55 #include <sys/buf.h>
56 #include <sys/mount.h>
57 #include <sys/trace.h>
58 #include <sys/kernel.h>
59 #include <sys/ubc.h>
60 #include <sys/stat.h>
61
62 #include <kern/kalloc.h>
63 #include <vm/vm_map.h>
64 #include <vm/vm_kern.h>
65
66 #include <machine/spl.h>
67
68 #include <mach/shared_memory_server.h>
69 #include <vm/vm_shared_memory_server.h>
70
71
72 extern shared_region_mapping_t system_shared_region;
73 extern zone_t lsf_zone;
74
75 useracc(addr, len, prot)
76 caddr_t addr;
77 u_int len;
78 int prot;
79 {
80 return (vm_map_check_protection(
81 current_map(),
82 trunc_page(addr), round_page(addr+len),
83 prot == B_READ ? VM_PROT_READ : VM_PROT_WRITE));
84 }
85
86 vslock(addr, len)
87 caddr_t addr;
88 int len;
89 {
90 kern_return_t kret;
91 kret = vm_map_wire(current_map(), trunc_page(addr),
92 round_page(addr+len),
93 VM_PROT_READ | VM_PROT_WRITE ,FALSE);
94
95 switch (kret) {
96 case KERN_SUCCESS:
97 return (0);
98 case KERN_INVALID_ADDRESS:
99 case KERN_NO_SPACE:
100 return (ENOMEM);
101 case KERN_PROTECTION_FAILURE:
102 return (EACCES);
103 default:
104 return (EINVAL);
105 }
106 }
107
108 vsunlock(addr, len, dirtied)
109 caddr_t addr;
110 int len;
111 int dirtied;
112 {
113 pmap_t pmap;
114 #if FIXME /* [ */
115 vm_page_t pg;
116 #endif /* FIXME ] */
117 vm_offset_t vaddr, paddr;
118 kern_return_t kret;
119
120 #if FIXME /* [ */
121 if (dirtied) {
122 pmap = get_task_pmap(current_task());
123 for (vaddr = trunc_page(addr); vaddr < round_page(addr+len);
124 vaddr += PAGE_SIZE) {
125 paddr = pmap_extract(pmap, vaddr);
126 pg = PHYS_TO_VM_PAGE(paddr);
127 vm_page_set_modified(pg);
128 }
129 }
130 #endif /* FIXME ] */
131 #ifdef lint
132 dirtied++;
133 #endif /* lint */
134 kret = vm_map_unwire(current_map(), trunc_page(addr),
135 round_page(addr+len), FALSE);
136 switch (kret) {
137 case KERN_SUCCESS:
138 return (0);
139 case KERN_INVALID_ADDRESS:
140 case KERN_NO_SPACE:
141 return (ENOMEM);
142 case KERN_PROTECTION_FAILURE:
143 return (EACCES);
144 default:
145 return (EINVAL);
146 }
147 }
148
149 #if defined(sun) || BALANCE || defined(m88k)
150 #else /*defined(sun) || BALANCE || defined(m88k)*/
151 subyte(addr, byte)
152 void * addr;
153 int byte;
154 {
155 char character;
156
157 character = (char)byte;
158 return (copyout((void *)&(character), addr, sizeof(char)) == 0 ? 0 : -1);
159 }
160
161 suibyte(addr, byte)
162 void * addr;
163 int byte;
164 {
165 char character;
166
167 character = (char)byte;
168 return (copyout((void *) &(character), addr, sizeof(char)) == 0 ? 0 : -1);
169 }
170
171 int fubyte(addr)
172 void * addr;
173 {
174 unsigned char byte;
175
176 if (copyin(addr, (void *) &byte, sizeof(char)))
177 return(-1);
178 return(byte);
179 }
180
181 int fuibyte(addr)
182 void * addr;
183 {
184 unsigned char byte;
185
186 if (copyin(addr, (void *) &(byte), sizeof(char)))
187 return(-1);
188 return(byte);
189 }
190
191 suword(addr, word)
192 void * addr;
193 long word;
194 {
195 return (copyout((void *) &word, addr, sizeof(int)) == 0 ? 0 : -1);
196 }
197
198 long fuword(addr)
199 void * addr;
200 {
201 long word;
202
203 if (copyin(addr, (void *) &word, sizeof(int)))
204 return(-1);
205 return(word);
206 }
207
208 /* suiword and fuiword are the same as suword and fuword, respectively */
209
210 suiword(addr, word)
211 void * addr;
212 long word;
213 {
214 return (copyout((void *) &word, addr, sizeof(int)) == 0 ? 0 : -1);
215 }
216
217 long fuiword(addr)
218 void * addr;
219 {
220 long word;
221
222 if (copyin(addr, (void *) &word, sizeof(int)))
223 return(-1);
224 return(word);
225 }
226 #endif /* defined(sun) || BALANCE || defined(m88k) || defined(i386) */
227
228 int
229 swapon()
230 {
231 return(EOPNOTSUPP);
232 }
233
234
235 kern_return_t
236 pid_for_task(t, x)
237 mach_port_t t;
238 int *x;
239 {
240 struct proc * p;
241 task_t t1;
242 extern task_t port_name_to_task(mach_port_t t);
243 int pid = -1;
244 kern_return_t err = KERN_SUCCESS;
245 boolean_t funnel_state;
246
247 funnel_state = thread_funnel_set(kernel_flock, TRUE);
248 t1 = port_name_to_task(t);
249
250 if (t1 == TASK_NULL) {
251 err = KERN_FAILURE;
252 goto pftout;
253 } else {
254 p = get_bsdtask_info(t1);
255 if (p) {
256 pid = p->p_pid;
257 err = KERN_SUCCESS;
258 } else {
259 err = KERN_FAILURE;
260 }
261 }
262 task_deallocate(t1);
263 pftout:
264 (void) copyout((char *) &pid, (char *) x, sizeof(*x));
265 thread_funnel_set(kernel_flock, funnel_state);
266 return(err);
267 }
268
269 /*
270 * Routine: task_for_pid
271 * Purpose:
272 * Get the task port for another "process", named by its
273 * process ID on the same host as "target_task".
274 *
275 * Only permitted to privileged processes, or processes
276 * with the same user ID.
277 */
278 kern_return_t
279 task_for_pid(target_tport, pid, t)
280 mach_port_t target_tport;
281 int pid;
282 mach_port_t *t;
283 {
284 struct proc *p;
285 struct proc *p1;
286 task_t t1;
287 mach_port_t tret;
288 extern task_t port_name_to_task(mach_port_t tp);
289 void * sright;
290 int error = 0;
291 boolean_t funnel_state;
292
293 t1 = port_name_to_task(target_tport);
294 if (t1 == TASK_NULL) {
295 (void ) copyout((char *)&t1, (char *)t, sizeof(mach_port_t));
296 return(KERN_FAILURE);
297 }
298
299 funnel_state = thread_funnel_set(kernel_flock, TRUE);
300
301 restart:
302 p1 = get_bsdtask_info(t1);
303 if (
304 ((p = pfind(pid)) != (struct proc *) 0)
305 && (p1 != (struct proc *) 0)
306 && (((p->p_ucred->cr_uid == p1->p_ucred->cr_uid) &&
307 ((p->p_cred->p_ruid == p1->p_cred->p_ruid)))
308 || !(suser(p1->p_ucred, &p1->p_acflag)))
309 && (p->p_stat != SZOMB)
310 ) {
311 if (p->task != TASK_NULL) {
312 if (!task_reference_try(p->task)) {
313 mutex_pause(); /* temp loss of funnel */
314 goto restart;
315 }
316 sright = (void *)convert_task_to_port(p->task);
317 tret = (void *)
318 ipc_port_copyout_send(sright,
319 get_task_ipcspace(current_task()));
320 } else
321 tret = MACH_PORT_NULL;
322 (void ) copyout((char *)&tret, (char *) t, sizeof(mach_port_t));
323 task_deallocate(t1);
324 error = KERN_SUCCESS;
325 goto tfpout;
326 }
327 task_deallocate(t1);
328 tret = MACH_PORT_NULL;
329 (void) copyout((char *) &tret, (char *) t, sizeof(mach_port_t));
330 error = KERN_FAILURE;
331 tfpout:
332 thread_funnel_set(kernel_flock, funnel_state);
333 return(error);
334 }
335
336
337 struct load_shared_file_args {
338 char *filename;
339 caddr_t mfa;
340 u_long mfs;
341 caddr_t *ba;
342 int map_cnt;
343 sf_mapping_t *mappings;
344 int *flags;
345 };
346
347 int ws_disabled = 1;
348
349 int
350 load_shared_file(
351 struct proc *p,
352 struct load_shared_file_args *uap,
353 register *retval)
354 {
355 caddr_t mapped_file_addr=uap->mfa;
356 u_long mapped_file_size=uap->mfs;
357 caddr_t *base_address=uap->ba;
358 int map_cnt=uap->map_cnt;
359 sf_mapping_t *mappings=uap->mappings;
360 char *filename=uap->filename;
361 int *flags=uap->flags;
362 struct vnode *vp = 0;
363 struct nameidata nd, *ndp;
364 char *filename_str;
365 register int error;
366 kern_return_t kr;
367
368 struct vattr vattr;
369 memory_object_control_t file_control;
370 sf_mapping_t *map_list;
371 caddr_t local_base;
372 int local_flags;
373 int caller_flags;
374 int i;
375 int default_regions = 0;
376 vm_size_t dummy;
377 kern_return_t kret;
378
379 shared_region_mapping_t shared_region;
380 struct shared_region_task_mappings task_mapping_info;
381 shared_region_mapping_t next;
382
383 ndp = &nd;
384
385
386 /* Retrieve the base address */
387 if (error = copyin(base_address, &local_base, sizeof (caddr_t))) {
388 goto lsf_bailout;
389 }
390 if (error = copyin(flags, &local_flags, sizeof (int))) {
391 goto lsf_bailout;
392 }
393
394 if(local_flags & QUERY_IS_SYSTEM_REGION) {
395 vm_get_shared_region(current_task(), &shared_region);
396 if (shared_region == system_shared_region) {
397 local_flags = SYSTEM_REGION_BACKED;
398 } else {
399 local_flags = 0;
400 }
401 error = 0;
402 error = copyout(&local_flags, flags, sizeof (int));
403 goto lsf_bailout;
404 }
405 caller_flags = local_flags;
406 kret = kmem_alloc(kernel_map, (vm_offset_t *)&filename_str,
407 (vm_size_t)(MAXPATHLEN));
408 if (kret != KERN_SUCCESS) {
409 error = ENOMEM;
410 goto lsf_bailout;
411 }
412 kret = kmem_alloc(kernel_map, (vm_offset_t *)&map_list,
413 (vm_size_t)(map_cnt*sizeof(sf_mapping_t)));
414 if (kret != KERN_SUCCESS) {
415 kmem_free(kernel_map, (vm_offset_t)filename_str,
416 (vm_size_t)(MAXPATHLEN));
417 error = ENOMEM;
418 goto lsf_bailout;
419 }
420
421 if (error =
422 copyin(mappings, map_list, (map_cnt*sizeof(sf_mapping_t)))) {
423 goto lsf_bailout_free;
424 }
425
426 if (error = copyinstr(filename,
427 filename_str, MAXPATHLEN, (size_t *)&dummy)) {
428 goto lsf_bailout_free;
429 }
430
431 /*
432 * Get a vnode for the target file
433 */
434 NDINIT(ndp, LOOKUP, FOLLOW | LOCKLEAF, UIO_SYSSPACE,
435 filename_str, p);
436
437 if ((error = namei(ndp))) {
438 goto lsf_bailout_free;
439 }
440
441 vp = ndp->ni_vp;
442
443 if (vp->v_type != VREG) {
444 error = EINVAL;
445 goto lsf_bailout_free_vput;
446 }
447
448 UBCINFOCHECK("load_shared_file", vp);
449
450 if (error = VOP_GETATTR(vp, &vattr, p->p_ucred, p)) {
451 goto lsf_bailout_free_vput;
452 }
453
454
455 file_control = ubc_getobject(vp, UBC_HOLDOBJECT);
456 if (file_control == MEMORY_OBJECT_CONTROL_NULL) {
457 error = EINVAL;
458 goto lsf_bailout_free_vput;
459 }
460
461 vm_get_shared_region(current_task(), &shared_region);
462 if(shared_region == system_shared_region) {
463 default_regions = 1;
464 }
465 if(((vp->v_mount != rootvnode->v_mount)
466 && (shared_region == system_shared_region))
467 && (lsf_mapping_pool_gauge() < 75)) {
468 /* We don't want to run out of shared memory */
469 /* map entries by starting too many private versions */
470 /* of the shared library structures */
471 int error;
472 if(p->p_flag & P_NOSHLIB) {
473 error = clone_system_shared_regions(FALSE);
474 } else {
475 error = clone_system_shared_regions(TRUE);
476 }
477 if (error) {
478 goto lsf_bailout_free_vput;
479 }
480 local_flags = local_flags & ~NEW_LOCAL_SHARED_REGIONS;
481 vm_get_shared_region(current_task(), &shared_region);
482 }
483 #ifdef notdef
484 if(vattr.va_size != mapped_file_size) {
485 error = EINVAL;
486 goto lsf_bailout_free_vput;
487 }
488 #endif
489 if(p->p_flag & P_NOSHLIB) {
490 p->p_flag = p->p_flag & ~P_NOSHLIB;
491 }
492
493 /* load alternate regions if the caller has requested. */
494 /* Note: the new regions are "clean slates" */
495 if (local_flags & NEW_LOCAL_SHARED_REGIONS) {
496 error = clone_system_shared_regions(FALSE);
497 if (error) {
498 goto lsf_bailout_free_vput;
499 }
500 vm_get_shared_region(current_task(), &shared_region);
501 }
502
503 task_mapping_info.self = (vm_offset_t)shared_region;
504
505 shared_region_mapping_info(shared_region,
506 &(task_mapping_info.text_region),
507 &(task_mapping_info.text_size),
508 &(task_mapping_info.data_region),
509 &(task_mapping_info.data_size),
510 &(task_mapping_info.region_mappings),
511 &(task_mapping_info.client_base),
512 &(task_mapping_info.alternate_base),
513 &(task_mapping_info.alternate_next),
514 &(task_mapping_info.flags), &next);
515
516 /* This is a work-around to allow executables which have been */
517 /* built without knowledge of the proper shared segment to */
518 /* load. This code has been architected as a shared region */
519 /* handler, the knowledge of where the regions are loaded is */
520 /* problematic for the extension of shared regions as it will */
521 /* not be easy to know what region an item should go into. */
522 /* The code below however will get around a short term problem */
523 /* with executables which believe they are loading at zero. */
524
525 {
526 if (((unsigned int)local_base &
527 (~(task_mapping_info.text_size - 1))) !=
528 task_mapping_info.client_base) {
529 if(local_flags & ALTERNATE_LOAD_SITE) {
530 local_base = (caddr_t)(
531 (unsigned int)local_base &
532 (task_mapping_info.text_size - 1));
533 local_base = (caddr_t)((unsigned int)local_base
534 | task_mapping_info.client_base);
535 } else {
536 error = EINVAL;
537 goto lsf_bailout_free_vput;
538 }
539 }
540 }
541
542
543 if((kr = copyin_shared_file((vm_offset_t)mapped_file_addr,
544 mapped_file_size,
545 (vm_offset_t *)&local_base,
546 map_cnt, map_list, file_control,
547 &task_mapping_info, &local_flags))) {
548 switch (kr) {
549 case KERN_FAILURE:
550 error = EINVAL;
551 break;
552 case KERN_INVALID_ARGUMENT:
553 error = EINVAL;
554 break;
555 case KERN_INVALID_ADDRESS:
556 error = EACCES;
557 break;
558 case KERN_PROTECTION_FAILURE:
559 /* save EAUTH for authentication in this */
560 /* routine */
561 error = EPERM;
562 break;
563 case KERN_NO_SPACE:
564 error = ENOMEM;
565 break;
566 default:
567 error = EINVAL;
568 };
569 if((caller_flags & ALTERNATE_LOAD_SITE) && systemLogDiags) {
570 printf("load_shared_file: Failed to load shared file! error: 0x%x, Base_address: 0x%x, number of mappings: %d, file_control 0x%x\n", error, local_base, map_cnt, file_control);
571 for(i=0; i<map_cnt; i++) {
572 printf("load_shared_file: Mapping%d, mapping_offset: 0x%x, size: 0x%x, file_offset: 0x%x, protection: 0x%x\n"
573 , i, map_list[i].mapping_offset,
574 map_list[i].size,
575 map_list[i].file_offset,
576 map_list[i].protection);
577 }
578 }
579 } else {
580 if(default_regions)
581 local_flags |= SYSTEM_REGION_BACKED;
582 if(!(error = copyout(&local_flags, flags, sizeof (int)))) {
583 error = copyout(&local_base,
584 base_address, sizeof (caddr_t));
585 }
586 }
587
588 lsf_bailout_free_vput:
589 vput(vp);
590
591 lsf_bailout_free:
592 kmem_free(kernel_map, (vm_offset_t)filename_str,
593 (vm_size_t)(MAXPATHLEN));
594 kmem_free(kernel_map, (vm_offset_t)map_list,
595 (vm_size_t)(map_cnt*sizeof(sf_mapping_t)));
596
597 lsf_bailout:
598 return error;
599 }
600
601 struct reset_shared_file_args {
602 caddr_t *ba;
603 int map_cnt;
604 sf_mapping_t *mappings;
605 };
606
607 int
608 reset_shared_file(
609 struct proc *p,
610 struct reset_shared_file_args *uap,
611 register *retval)
612 {
613 caddr_t *base_address=uap->ba;
614 int map_cnt=uap->map_cnt;
615 sf_mapping_t *mappings=uap->mappings;
616 register int error;
617 kern_return_t kr;
618
619 sf_mapping_t *map_list;
620 caddr_t local_base;
621 vm_offset_t map_address;
622 int i;
623 kern_return_t kret;
624
625 /* Retrieve the base address */
626 if (error = copyin(base_address, &local_base, sizeof (caddr_t))) {
627 goto rsf_bailout;
628 }
629
630 if (((unsigned int)local_base & GLOBAL_SHARED_SEGMENT_MASK)
631 != GLOBAL_SHARED_TEXT_SEGMENT) {
632 error = EINVAL;
633 goto rsf_bailout;
634 }
635
636 kret = kmem_alloc(kernel_map, (vm_offset_t *)&map_list,
637 (vm_size_t)(map_cnt*sizeof(sf_mapping_t)));
638 if (kret != KERN_SUCCESS) {
639 error = ENOMEM;
640 goto rsf_bailout;
641 }
642
643 if (error =
644 copyin(mappings, map_list, (map_cnt*sizeof(sf_mapping_t)))) {
645
646 kmem_free(kernel_map, (vm_offset_t)map_list,
647 (vm_size_t)(map_cnt*sizeof(sf_mapping_t)));
648 goto rsf_bailout;
649 }
650 for (i = 0; i<map_cnt; i++) {
651 if((map_list[i].mapping_offset
652 & GLOBAL_SHARED_SEGMENT_MASK) == 0x10000000) {
653 map_address = (vm_offset_t)
654 (local_base + map_list[i].mapping_offset);
655 vm_deallocate(current_map(),
656 map_address,
657 map_list[i].size);
658 vm_map(current_map(), &map_address,
659 map_list[i].size, 0, SHARED_LIB_ALIAS,
660 shared_data_region_handle,
661 ((unsigned int)local_base
662 & SHARED_DATA_REGION_MASK) +
663 (map_list[i].mapping_offset
664 & SHARED_DATA_REGION_MASK),
665 TRUE, VM_PROT_READ,
666 VM_PROT_READ, VM_INHERIT_SHARE);
667 }
668 }
669
670 kmem_free(kernel_map, (vm_offset_t)map_list,
671 (vm_size_t)(map_cnt*sizeof(sf_mapping_t)));
672
673 rsf_bailout:
674 return error;
675 }
676
677 struct new_system_shared_regions_args {
678 int dummy;
679 };
680
681 int
682 new_system_shared_regions(
683 struct proc *p,
684 struct new_system_shared_regions_args *uap,
685 register *retval)
686 {
687 shared_region_mapping_t regions;
688 shared_region_mapping_t new_regions;
689
690 if(!(is_suser())) {
691 *retval = EINVAL;
692 return EINVAL;
693 }
694
695 /* get current shared region info for */
696 /* restoration after new system shared */
697 /* regions are in place */
698 vm_get_shared_region(current_task(), &regions);
699
700 /* usually only called at boot time */
701 /* shared_file_boot_time_init creates */
702 /* a new set of system shared regions */
703 /* and places them as the system */
704 /* shared regions. */
705 shared_file_boot_time_init();
706
707 /* set current task back to its */
708 /* original regions. */
709 vm_get_shared_region(current_task(), &new_regions);
710 shared_region_mapping_dealloc(new_regions);
711
712 vm_set_shared_region(current_task(), regions);
713
714 *retval = 0;
715 return 0;
716 }
717
718
719
720 int
721 clone_system_shared_regions(shared_regions_active)
722 {
723 shared_region_mapping_t new_shared_region;
724 shared_region_mapping_t next;
725 shared_region_mapping_t old_shared_region;
726 struct shared_region_task_mappings old_info;
727 struct shared_region_task_mappings new_info;
728
729 struct proc *p;
730
731 if (shared_file_create_system_region(&new_shared_region))
732 return (ENOMEM);
733 vm_get_shared_region(current_task(), &old_shared_region);
734 old_info.self = (vm_offset_t)old_shared_region;
735 shared_region_mapping_info(old_shared_region,
736 &(old_info.text_region),
737 &(old_info.text_size),
738 &(old_info.data_region),
739 &(old_info.data_size),
740 &(old_info.region_mappings),
741 &(old_info.client_base),
742 &(old_info.alternate_base),
743 &(old_info.alternate_next),
744 &(old_info.flags), &next);
745 new_info.self = (vm_offset_t)new_shared_region;
746 shared_region_mapping_info(new_shared_region,
747 &(new_info.text_region),
748 &(new_info.text_size),
749 &(new_info.data_region),
750 &(new_info.data_size),
751 &(new_info.region_mappings),
752 &(new_info.client_base),
753 &(new_info.alternate_base),
754 &(new_info.alternate_next),
755 &(new_info.flags), &next);
756 if(shared_regions_active) {
757 if(vm_region_clone(old_info.text_region, new_info.text_region)) {
758 panic("clone_system_shared_regions: shared region mis-alignment 1");
759 shared_region_mapping_dealloc(new_shared_region);
760 return(EINVAL);
761 }
762 if (vm_region_clone(old_info.data_region, new_info.data_region)) {
763 panic("clone_system_shared_regions: shared region mis-alignment 2");
764 shared_region_mapping_dealloc(new_shared_region);
765 return(EINVAL);
766 }
767 shared_region_object_chain_attach(
768 new_shared_region, old_shared_region);
769 }
770 if (vm_map_region_replace(current_map(), old_info.text_region,
771 new_info.text_region, old_info.client_base,
772 old_info.client_base+old_info.text_size)) {
773 panic("clone_system_shared_regions: shared region mis-alignment 3");
774 shared_region_mapping_dealloc(new_shared_region);
775 return(EINVAL);
776 }
777 if(vm_map_region_replace(current_map(), old_info.data_region,
778 new_info.data_region,
779 old_info.client_base + old_info.text_size,
780 old_info.client_base
781 + old_info.text_size + old_info.data_size)) {
782 panic("clone_system_shared_regions: shared region mis-alignment 4");
783 shared_region_mapping_dealloc(new_shared_region);
784 return(EINVAL);
785 }
786 vm_set_shared_region(current_task(), new_shared_region);
787
788 /* consume the reference which wasn't accounted for in object */
789 /* chain attach */
790 if(!shared_regions_active)
791 shared_region_mapping_dealloc(old_shared_region);
792
793 return(0);
794
795 }
796
797 extern vm_map_t bsd_pageable_map;
798
799 /* header for the profile name file. The profiled app info is held */
800 /* in the data file and pointed to by elements in the name file */
801
802 struct profile_names_header {
803 unsigned int number_of_profiles;
804 unsigned int user_id;
805 unsigned int version;
806 off_t element_array;
807 unsigned int spare1;
808 unsigned int spare2;
809 unsigned int spare3;
810 };
811
812 struct profile_element {
813 off_t addr;
814 vm_size_t size;
815 unsigned int mod_date;
816 unsigned int inode;
817 char name[12];
818 };
819
820 struct global_profile {
821 struct vnode *names_vp;
822 struct vnode *data_vp;
823 vm_offset_t buf_ptr;
824 unsigned int user;
825 unsigned int age;
826 unsigned int busy;
827 };
828
829 struct global_profile_cache {
830 int max_ele;
831 unsigned int age;
832 struct global_profile profiles[3];
833 };
834
835 struct global_profile_cache global_user_profile_cache =
836 {3, 0, NULL, NULL, NULL, 0, 0, 0,
837 NULL, NULL, NULL, 0, 0, 0,
838 NULL, NULL, NULL, 0, 0, 0 };
839
840 /* BSD_OPEN_PAGE_CACHE_FILES: */
841 /* Caller provides a user id. This id was used in */
842 /* prepare_profile_database to create two unique absolute */
843 /* file paths to the associated profile files. These files */
844 /* are either opened or bsd_open_page_cache_files returns an */
845 /* error. The header of the names file is then consulted. */
846 /* The header and the vnodes for the names and data files are */
847 /* returned. */
848
849 int
850 bsd_open_page_cache_files(
851 unsigned int user,
852 struct global_profile **profile)
853 {
854 char *cache_path = "/var/vm/app_profile/";
855 struct proc *p;
856 int error;
857 int resid;
858 off_t resid_off;
859 unsigned int lru;
860 vm_size_t size;
861
862 struct vnode *names_vp;
863 struct vnode *data_vp;
864 vm_offset_t names_buf;
865 vm_offset_t buf_ptr;
866
867 int profile_names_length;
868 int profile_data_length;
869 char *profile_data_string;
870 char *profile_names_string;
871 char *substring;
872
873 struct vattr vattr;
874
875 struct profile_names_header *profile_header;
876 kern_return_t ret;
877
878 struct nameidata nd_names;
879 struct nameidata nd_data;
880
881 int i;
882
883
884 p = current_proc();
885
886 restart:
887 for(i = 0; i<global_user_profile_cache.max_ele; i++) {
888 if((global_user_profile_cache.profiles[i].user == user)
889 && (global_user_profile_cache.profiles[i].data_vp
890 != NULL)) {
891 *profile = &global_user_profile_cache.profiles[i];
892 /* already in cache, we're done */
893 if ((*profile)->busy) {
894 /*
895 * drop funnel and wait
896 */
897 (void)tsleep((void *)
898 *profile,
899 PRIBIO, "app_profile", 0);
900 goto restart;
901 }
902 (*profile)->busy = 1;
903 (*profile)->age = global_user_profile_cache.age;
904 global_user_profile_cache.age+=1;
905 return 0;
906 }
907 }
908
909 lru = global_user_profile_cache.age;
910 for(i = 0; i<global_user_profile_cache.max_ele; i++) {
911 if(global_user_profile_cache.profiles[i].data_vp == NULL) {
912 *profile = &global_user_profile_cache.profiles[i];
913 (*profile)->age = global_user_profile_cache.age;
914 global_user_profile_cache.age+=1;
915 break;
916 }
917 if(global_user_profile_cache.profiles[i].age < lru) {
918 lru = global_user_profile_cache.profiles[i].age;
919 *profile = &global_user_profile_cache.profiles[i];
920 }
921 }
922
923 if ((*profile)->busy) {
924 /*
925 * drop funnel and wait
926 */
927 (void)tsleep((void *)
928 &(global_user_profile_cache),
929 PRIBIO, "app_profile", 0);
930 goto restart;
931 }
932 (*profile)->busy = 1;
933 (*profile)->user = user;
934
935 if((*profile)->data_vp != NULL) {
936 kmem_free(kernel_map,
937 (*profile)->buf_ptr, 4 * PAGE_SIZE);
938 if ((*profile)->names_vp) {
939 vrele((*profile)->names_vp);
940 (*profile)->names_vp = NULL;
941 }
942 if ((*profile)->data_vp) {
943 vrele((*profile)->data_vp);
944 (*profile)->data_vp = NULL;
945 }
946 }
947
948 /* put dummy value in for now to get */
949 /* competing request to wait above */
950 /* until we are finished */
951 (*profile)->data_vp = (struct vnode *)0xFFFFFFFF;
952
953 /* Try to open the appropriate users profile files */
954 /* If neither file is present, try to create them */
955 /* If one file is present and the other not, fail. */
956 /* If the files do exist, check them for the app_file */
957 /* requested and read it in if present */
958
959
960 ret = kmem_alloc(kernel_map,
961 (vm_offset_t *)&profile_data_string, PATH_MAX);
962
963 if(ret) {
964 (*profile)->data_vp = NULL;
965 (*profile)->busy = 0;
966 wakeup(*profile);
967 return ENOMEM;
968 }
969
970 /* Split the buffer in half since we know the size of */
971 /* our file path and our allocation is adequate for */
972 /* both file path names */
973 profile_names_string = profile_data_string + (PATH_MAX/2);
974
975
976 strcpy(profile_data_string, cache_path);
977 strcpy(profile_names_string, cache_path);
978 profile_names_length = profile_data_length
979 = strlen(profile_data_string);
980 substring = profile_data_string + profile_data_length;
981 sprintf(substring, "%x_data", user);
982 substring = profile_names_string + profile_names_length;
983 sprintf(substring, "%x_names", user);
984
985 /* We now have the absolute file names */
986
987 ret = kmem_alloc(kernel_map,
988 (vm_offset_t *)&names_buf, 4 * PAGE_SIZE);
989 if(ret) {
990 kmem_free(kernel_map,
991 (vm_offset_t)profile_data_string, PATH_MAX);
992 (*profile)->data_vp = NULL;
993 (*profile)->busy = 0;
994 wakeup(*profile);
995 return ENOMEM;
996 }
997
998 NDINIT(&nd_names, LOOKUP, FOLLOW | LOCKLEAF,
999 UIO_SYSSPACE, profile_names_string, p);
1000 NDINIT(&nd_data, LOOKUP, FOLLOW | LOCKLEAF,
1001 UIO_SYSSPACE, profile_data_string, p);
1002 if (error = vn_open(&nd_data, FREAD | FWRITE, 0)) {
1003 #ifdef notdef
1004 printf("bsd_open_page_cache_files: CacheData file not found %s\n",
1005 profile_data_string);
1006 #endif
1007 kmem_free(kernel_map,
1008 (vm_offset_t)names_buf, 4 * PAGE_SIZE);
1009 kmem_free(kernel_map,
1010 (vm_offset_t)profile_data_string, PATH_MAX);
1011 (*profile)->data_vp = NULL;
1012 (*profile)->busy = 0;
1013 wakeup(*profile);
1014 return error;
1015 }
1016
1017 data_vp = nd_data.ni_vp;
1018 VOP_UNLOCK(data_vp, 0, p);
1019
1020 if (error = vn_open(&nd_names, FREAD | FWRITE, 0)) {
1021 printf("bsd_open_page_cache_files: NamesData file not found %s\n",
1022 profile_data_string);
1023 kmem_free(kernel_map,
1024 (vm_offset_t)names_buf, 4 * PAGE_SIZE);
1025 kmem_free(kernel_map,
1026 (vm_offset_t)profile_data_string, PATH_MAX);
1027 vrele(data_vp);
1028 (*profile)->data_vp = NULL;
1029 (*profile)->busy = 0;
1030 wakeup(*profile);
1031 return error;
1032 }
1033 names_vp = nd_names.ni_vp;
1034
1035 if(error = VOP_GETATTR(names_vp, &vattr, p->p_ucred, p)) {
1036 printf("bsd_open_page_cache_files: Can't stat name file %s\n", profile_names_string);
1037 kmem_free(kernel_map,
1038 (vm_offset_t)profile_data_string, PATH_MAX);
1039 kmem_free(kernel_map,
1040 (vm_offset_t)names_buf, 4 * PAGE_SIZE);
1041 vput(names_vp);
1042 vrele(data_vp);
1043 (*profile)->data_vp = NULL;
1044 (*profile)->busy = 0;
1045 wakeup(*profile);
1046 return error;
1047 }
1048
1049 size = vattr.va_size;
1050 if(size > 4 * PAGE_SIZE)
1051 size = 4 * PAGE_SIZE;
1052 buf_ptr = names_buf;
1053 resid_off = 0;
1054
1055 while(size) {
1056 error = vn_rdwr(UIO_READ, names_vp, (caddr_t)buf_ptr,
1057 size, resid_off,
1058 UIO_SYSSPACE, IO_NODELOCKED, p->p_ucred, &resid, p);
1059 if((error) || (size == resid)) {
1060 if(!error) {
1061 error = EINVAL;
1062 }
1063 kmem_free(kernel_map,
1064 (vm_offset_t)profile_data_string, PATH_MAX);
1065 kmem_free(kernel_map,
1066 (vm_offset_t)names_buf, 4 * PAGE_SIZE);
1067 vput(names_vp);
1068 vrele(data_vp);
1069 (*profile)->data_vp = NULL;
1070 (*profile)->busy = 0;
1071 wakeup(*profile);
1072 return error;
1073 }
1074 buf_ptr += size-resid;
1075 resid_off += size-resid;
1076 size = resid;
1077 }
1078
1079 VOP_UNLOCK(names_vp, 0, p);
1080 kmem_free(kernel_map, (vm_offset_t)profile_data_string, PATH_MAX);
1081 (*profile)->names_vp = names_vp;
1082 (*profile)->data_vp = data_vp;
1083 (*profile)->buf_ptr = names_buf;
1084 return 0;
1085
1086 }
1087
1088 void
1089 bsd_close_page_cache_files(
1090 struct global_profile *profile)
1091 {
1092 profile->busy = 0;
1093 wakeup(profile);
1094 }
1095
1096 int
1097 bsd_read_page_cache_file(
1098 unsigned int user,
1099 int *fid,
1100 int *mod,
1101 char *app_name,
1102 struct vnode *app_vp,
1103 vm_offset_t *buffer,
1104 vm_offset_t *buf_size)
1105 {
1106
1107 boolean_t funnel_state;
1108
1109 struct proc *p;
1110 int error;
1111 int resid;
1112 vm_size_t size;
1113
1114 off_t profile;
1115 unsigned int profile_size;
1116
1117 vm_offset_t names_buf;
1118 struct vattr vattr;
1119
1120 kern_return_t ret;
1121
1122 struct vnode *names_vp;
1123 struct vnode *data_vp;
1124 struct vnode *vp1;
1125 struct vnode *vp2;
1126
1127 struct global_profile *uid_files;
1128
1129 funnel_state = thread_funnel_set(kernel_flock, TRUE);
1130
1131 /* Try to open the appropriate users profile files */
1132 /* If neither file is present, try to create them */
1133 /* If one file is present and the other not, fail. */
1134 /* If the files do exist, check them for the app_file */
1135 /* requested and read it in if present */
1136
1137
1138 error = bsd_open_page_cache_files(user, &uid_files);
1139 if(error) {
1140 thread_funnel_set(kernel_flock, funnel_state);
1141 return EINVAL;
1142 }
1143
1144 p = current_proc();
1145
1146 names_vp = uid_files->names_vp;
1147 data_vp = uid_files->data_vp;
1148 names_buf = uid_files->buf_ptr;
1149
1150
1151 /*
1152 * Get locks on both files, get the vnode with the lowest address first
1153 */
1154
1155 if((unsigned int)names_vp < (unsigned int)data_vp) {
1156 vp1 = names_vp;
1157 vp2 = data_vp;
1158 } else {
1159 vp1 = data_vp;
1160 vp2 = names_vp;
1161 }
1162 error = vn_lock(vp1, LK_EXCLUSIVE | LK_RETRY, p);
1163 if(error) {
1164 printf("bsd_read_page_cache_file: Can't lock profile names %x\n", user);
1165 bsd_close_page_cache_files(uid_files);
1166 thread_funnel_set(kernel_flock, funnel_state);
1167 return error;
1168 }
1169 error = vn_lock(vp2, LK_EXCLUSIVE | LK_RETRY, p);
1170 if(error) {
1171 printf("bsd_read_page_cache_file: Can't lock profile data %x\n", user);
1172 VOP_UNLOCK(vp1, 0, p);
1173 bsd_close_page_cache_files(uid_files);
1174 thread_funnel_set(kernel_flock, funnel_state);
1175 return error;
1176 }
1177
1178 if(error = VOP_GETATTR(app_vp, &vattr, p->p_ucred, p)) {
1179 VOP_UNLOCK(names_vp, 0, p);
1180 VOP_UNLOCK(data_vp, 0, p);
1181 printf("bsd_read_cache_file: Can't stat app file %s\n", app_name);
1182 bsd_close_page_cache_files(uid_files);
1183 thread_funnel_set(kernel_flock, funnel_state);
1184 return error;
1185 }
1186
1187 *fid = vattr.va_fileid;
1188 *mod = vattr.va_mtime.tv_sec;
1189
1190
1191 if (bsd_search_page_cache_data_base(names_vp, names_buf, app_name,
1192 (unsigned int) vattr.va_mtime.tv_sec,
1193 vattr.va_fileid, &profile, &profile_size) == 0) {
1194 /* profile is an offset in the profile data base */
1195 /* It is zero if no profile data was found */
1196
1197 if(profile_size == 0) {
1198 *buffer = NULL;
1199 *buf_size = 0;
1200 VOP_UNLOCK(names_vp, 0, p);
1201 VOP_UNLOCK(data_vp, 0, p);
1202 bsd_close_page_cache_files(uid_files);
1203 thread_funnel_set(kernel_flock, funnel_state);
1204 return 0;
1205 }
1206 ret = (vm_offset_t)(kmem_alloc(kernel_map, buffer, profile_size));
1207 if(ret) {
1208 VOP_UNLOCK(names_vp, 0, p);
1209 VOP_UNLOCK(data_vp, 0, p);
1210 bsd_close_page_cache_files(uid_files);
1211 thread_funnel_set(kernel_flock, funnel_state);
1212 return ENOMEM;
1213 }
1214 *buf_size = profile_size;
1215 while(profile_size) {
1216 error = vn_rdwr(UIO_READ, data_vp,
1217 (caddr_t) *buffer, profile_size,
1218 profile, UIO_SYSSPACE, IO_NODELOCKED,
1219 p->p_ucred, &resid, p);
1220 if((error) || (profile_size == resid)) {
1221 VOP_UNLOCK(names_vp, 0, p);
1222 VOP_UNLOCK(data_vp, 0, p);
1223 bsd_close_page_cache_files(uid_files);
1224 kmem_free(kernel_map, (vm_offset_t)*buffer, profile_size);
1225 thread_funnel_set(kernel_flock, funnel_state);
1226 return EINVAL;
1227 }
1228 profile += profile_size - resid;
1229 profile_size = resid;
1230 }
1231 VOP_UNLOCK(names_vp, 0, p);
1232 VOP_UNLOCK(data_vp, 0, p);
1233 bsd_close_page_cache_files(uid_files);
1234 thread_funnel_set(kernel_flock, funnel_state);
1235 return 0;
1236 } else {
1237 VOP_UNLOCK(names_vp, 0, p);
1238 VOP_UNLOCK(data_vp, 0, p);
1239 bsd_close_page_cache_files(uid_files);
1240 thread_funnel_set(kernel_flock, funnel_state);
1241 return EINVAL;
1242 }
1243
1244 }
1245
1246 int
1247 bsd_search_page_cache_data_base(
1248 struct vnode *vp,
1249 struct profile_names_header *database,
1250 char *app_name,
1251 unsigned int mod_date,
1252 unsigned int inode,
1253 off_t *profile,
1254 unsigned int *profile_size)
1255 {
1256
1257 struct proc *p;
1258
1259 unsigned int i;
1260 struct profile_element *element;
1261 unsigned int ele_total;
1262 unsigned int extended_list = 0;
1263 off_t file_off = 0;
1264 unsigned int size;
1265 off_t resid_off;
1266 int resid;
1267 vm_offset_t local_buf = NULL;
1268
1269 int error;
1270 kern_return_t ret;
1271
1272 p = current_proc();
1273
1274 if(((vm_offset_t)database->element_array) !=
1275 sizeof(struct profile_names_header)) {
1276 return EINVAL;
1277 }
1278 element = (struct profile_element *)(
1279 (vm_offset_t)database->element_array +
1280 (vm_offset_t)database);
1281
1282 ele_total = database->number_of_profiles;
1283
1284 *profile = 0;
1285 *profile_size = 0;
1286 while(ele_total) {
1287 /* note: code assumes header + n*ele comes out on a page boundary */
1288 if(((local_buf == 0) && (sizeof(struct profile_names_header) +
1289 (ele_total * sizeof(struct profile_element)))
1290 > (PAGE_SIZE * 4)) ||
1291 ((local_buf != 0) &&
1292 (ele_total * sizeof(struct profile_element))
1293 > (PAGE_SIZE * 4))) {
1294 extended_list = ele_total;
1295 if(element == (struct profile_element *)
1296 ((vm_offset_t)database->element_array +
1297 (vm_offset_t)database)) {
1298 ele_total = ((PAGE_SIZE * 4)/sizeof(struct profile_element)) - 1;
1299 } else {
1300 ele_total = (PAGE_SIZE * 4)/sizeof(struct profile_element);
1301 }
1302 extended_list -= ele_total;
1303 }
1304 for (i=0; i<ele_total; i++) {
1305 if((mod_date == element[i].mod_date)
1306 && (inode == element[i].inode)) {
1307 if(strncmp(element[i].name, app_name, 12) == 0) {
1308 *profile = element[i].addr;
1309 *profile_size = element[i].size;
1310 if(local_buf != NULL) {
1311 kmem_free(kernel_map,
1312 (vm_offset_t)local_buf, 4 * PAGE_SIZE);
1313 }
1314 return 0;
1315 }
1316 }
1317 }
1318 if(extended_list == 0)
1319 break;
1320 if(local_buf == NULL) {
1321 ret = kmem_alloc(kernel_map,
1322 (vm_offset_t *)&local_buf, 4 * PAGE_SIZE);
1323 if(ret != KERN_SUCCESS) {
1324 return ENOMEM;
1325 }
1326 }
1327 element = (struct profile_element *)local_buf;
1328 ele_total = extended_list;
1329 extended_list = 0;
1330 file_off += 4 * PAGE_SIZE;
1331 if((ele_total * sizeof(struct profile_element)) >
1332 (PAGE_SIZE * 4)) {
1333 size = PAGE_SIZE * 4;
1334 } else {
1335 size = ele_total * sizeof(struct profile_element);
1336 }
1337 resid_off = 0;
1338 while(size) {
1339 error = vn_rdwr(UIO_READ, vp,
1340 (caddr_t)(local_buf + resid_off),
1341 size, file_off + resid_off, UIO_SYSSPACE,
1342 IO_NODELOCKED, p->p_ucred, &resid, p);
1343 if((error) || (size == resid)) {
1344 if(local_buf != NULL) {
1345 kmem_free(kernel_map,
1346 (vm_offset_t)local_buf,
1347 4 * PAGE_SIZE);
1348 }
1349 return EINVAL;
1350 }
1351 resid_off += size-resid;
1352 size = resid;
1353 }
1354 }
1355 if(local_buf != NULL) {
1356 kmem_free(kernel_map,
1357 (vm_offset_t)local_buf, 4 * PAGE_SIZE);
1358 }
1359 return 0;
1360 }
1361
1362 int
1363 bsd_write_page_cache_file(
1364 unsigned int user,
1365 char *file_name,
1366 caddr_t buffer,
1367 vm_size_t size,
1368 int mod,
1369 int fid)
1370 {
1371 struct proc *p;
1372 struct nameidata nd;
1373 struct vnode *vp = 0;
1374 int resid;
1375 off_t resid_off;
1376 int error;
1377 boolean_t funnel_state;
1378 struct vattr vattr;
1379 struct vattr data_vattr;
1380
1381 off_t profile;
1382 unsigned int profile_size;
1383
1384 vm_offset_t names_buf;
1385 struct vnode *names_vp;
1386 struct vnode *data_vp;
1387 struct vnode *vp1;
1388 struct vnode *vp2;
1389
1390 struct profile_names_header *profile_header;
1391 off_t name_offset;
1392
1393 struct global_profile *uid_files;
1394
1395
1396 funnel_state = thread_funnel_set(kernel_flock, TRUE);
1397
1398
1399
1400 error = bsd_open_page_cache_files(user, &uid_files);
1401 if(error) {
1402 thread_funnel_set(kernel_flock, funnel_state);
1403 return EINVAL;
1404 }
1405
1406 p = current_proc();
1407
1408 names_vp = uid_files->names_vp;
1409 data_vp = uid_files->data_vp;
1410 names_buf = uid_files->buf_ptr;
1411
1412 /*
1413 * Get locks on both files, get the vnode with the lowest address first
1414 */
1415
1416 if((unsigned int)names_vp < (unsigned int)data_vp) {
1417 vp1 = names_vp;
1418 vp2 = data_vp;
1419 } else {
1420 vp1 = data_vp;
1421 vp2 = names_vp;
1422 }
1423
1424 error = vn_lock(vp1, LK_EXCLUSIVE | LK_RETRY, p);
1425 if(error) {
1426 printf("bsd_write_page_cache_file: Can't lock profile names %x\n", user);
1427 bsd_close_page_cache_files(uid_files);
1428 thread_funnel_set(kernel_flock, funnel_state);
1429 return error;
1430 }
1431 error = vn_lock(vp2, LK_EXCLUSIVE | LK_RETRY, p);
1432 if(error) {
1433 printf("bsd_write_page_cache_file: Can't lock profile data %x\n", user);
1434 VOP_UNLOCK(vp1, 0, p);
1435 bsd_close_page_cache_files(uid_files);
1436 thread_funnel_set(kernel_flock, funnel_state);
1437 return error;
1438 }
1439
1440 /* Stat data file for size */
1441
1442 if(error = VOP_GETATTR(data_vp, &data_vattr, p->p_ucred, p)) {
1443 VOP_UNLOCK(names_vp, 0, p);
1444 VOP_UNLOCK(data_vp, 0, p);
1445 printf("bsd_write_page_cache_file: Can't stat profile data %s\n", file_name);
1446 bsd_close_page_cache_files(uid_files);
1447 thread_funnel_set(kernel_flock, funnel_state);
1448 return error;
1449 }
1450
1451 if (bsd_search_page_cache_data_base(names_vp,
1452 (struct profile_names_header *)names_buf,
1453 file_name, (unsigned int) mod,
1454 fid, &profile, &profile_size) == 0) {
1455 /* profile is an offset in the profile data base */
1456 /* It is zero if no profile data was found */
1457
1458 if(profile_size == 0) {
1459 unsigned int header_size;
1460 vm_offset_t buf_ptr;
1461
1462 /* Our Write case */
1463
1464 /* read header for last entry */
1465 profile_header =
1466 (struct profile_names_header *)names_buf;
1467 name_offset = sizeof(struct profile_names_header) +
1468 (sizeof(struct profile_element)
1469 * profile_header->number_of_profiles);
1470 profile_header->number_of_profiles += 1;
1471
1472 if(name_offset < PAGE_SIZE * 4) {
1473 struct profile_element *name;
1474 /* write new entry */
1475 name = (struct profile_element *)
1476 (names_buf + (vm_offset_t)name_offset);
1477 name->addr = data_vattr.va_size;
1478 name->size = size;
1479 name->mod_date = mod;
1480 name->inode = fid;
1481 strncpy (name->name, file_name, 12);
1482 } else {
1483 unsigned int ele_size;
1484 struct profile_element name;
1485 /* write new entry */
1486 name.addr = data_vattr.va_size;
1487 name.size = size;
1488 name.mod_date = mod;
1489 name.inode = fid;
1490 strncpy (name.name, file_name, 12);
1491 /* write element out separately */
1492 ele_size = sizeof(struct profile_element);
1493 buf_ptr = (vm_offset_t)&name;
1494 resid_off = name_offset;
1495
1496 while(ele_size) {
1497 error = vn_rdwr(UIO_WRITE, names_vp,
1498 (caddr_t)buf_ptr,
1499 ele_size, resid_off,
1500 UIO_SYSSPACE, IO_NODELOCKED,
1501 p->p_ucred, &resid, p);
1502 if(error) {
1503 printf("bsd_write_page_cache_file: Can't write name_element %x\n", user);
1504 VOP_UNLOCK(names_vp, 0, p);
1505 VOP_UNLOCK(data_vp, 0, p);
1506 bsd_close_page_cache_files(
1507 uid_files);
1508 thread_funnel_set(
1509 kernel_flock,
1510 funnel_state);
1511 return error;
1512 }
1513 buf_ptr += (vm_offset_t)
1514 ele_size-resid;
1515 resid_off += ele_size-resid;
1516 ele_size = resid;
1517 }
1518 }
1519
1520 if(name_offset < PAGE_SIZE * 4) {
1521 header_size = name_offset +
1522 sizeof(struct profile_element);
1523
1524 } else {
1525 header_size =
1526 sizeof(struct profile_names_header);
1527 }
1528 buf_ptr = (vm_offset_t)profile_header;
1529 resid_off = 0;
1530
1531 /* write names file header */
1532 while(header_size) {
1533 error = vn_rdwr(UIO_WRITE, names_vp,
1534 (caddr_t)buf_ptr,
1535 header_size, resid_off,
1536 UIO_SYSSPACE, IO_NODELOCKED,
1537 p->p_ucred, &resid, p);
1538 if(error) {
1539 VOP_UNLOCK(names_vp, 0, p);
1540 VOP_UNLOCK(data_vp, 0, p);
1541 printf("bsd_write_page_cache_file: Can't write header %x\n", user);
1542 bsd_close_page_cache_files(
1543 uid_files);
1544 thread_funnel_set(
1545 kernel_flock, funnel_state);
1546 return error;
1547 }
1548 buf_ptr += (vm_offset_t)header_size-resid;
1549 resid_off += header_size-resid;
1550 header_size = resid;
1551 }
1552 /* write profile to data file */
1553 resid_off = data_vattr.va_size;
1554 while(size) {
1555 error = vn_rdwr(UIO_WRITE, data_vp,
1556 (caddr_t)buffer, size, resid_off,
1557 UIO_SYSSPACE, IO_NODELOCKED,
1558 p->p_ucred, &resid, p);
1559 if(error) {
1560 VOP_UNLOCK(names_vp, 0, p);
1561 VOP_UNLOCK(data_vp, 0, p);
1562 printf("bsd_write_page_cache_file: Can't write header %x\n", user);
1563 bsd_close_page_cache_files(
1564 uid_files);
1565 thread_funnel_set(
1566 kernel_flock, funnel_state);
1567 return error;
1568 }
1569 buffer += size-resid;
1570 resid_off += size-resid;
1571 size = resid;
1572 }
1573 VOP_UNLOCK(names_vp, 0, p);
1574 VOP_UNLOCK(data_vp, 0, p);
1575 bsd_close_page_cache_files(uid_files);
1576 thread_funnel_set(kernel_flock, funnel_state);
1577 return 0;
1578 }
1579 /* Someone else wrote a twin profile before us */
1580 VOP_UNLOCK(names_vp, 0, p);
1581 VOP_UNLOCK(data_vp, 0, p);
1582 bsd_close_page_cache_files(uid_files);
1583 thread_funnel_set(kernel_flock, funnel_state);
1584 return 0;
1585 } else {
1586 VOP_UNLOCK(names_vp, 0, p);
1587 VOP_UNLOCK(data_vp, 0, p);
1588 bsd_close_page_cache_files(uid_files);
1589 thread_funnel_set(kernel_flock, funnel_state);
1590 return EINVAL;
1591 }
1592
1593 }
1594
1595 int
1596 prepare_profile_database(int user)
1597 {
1598 char *cache_path = "/var/vm/app_profile/";
1599 struct proc *p;
1600 int error;
1601 int resid;
1602 off_t resid_off;
1603 unsigned int lru;
1604 vm_size_t size;
1605
1606 struct vnode *names_vp;
1607 struct vnode *data_vp;
1608 vm_offset_t names_buf;
1609 vm_offset_t buf_ptr;
1610
1611 int profile_names_length;
1612 int profile_data_length;
1613 char *profile_data_string;
1614 char *profile_names_string;
1615 char *substring;
1616
1617 struct vattr vattr;
1618
1619 struct profile_names_header *profile_header;
1620 kern_return_t ret;
1621
1622 struct nameidata nd_names;
1623 struct nameidata nd_data;
1624
1625 int i;
1626
1627 p = current_proc();
1628
1629 ret = kmem_alloc(kernel_map,
1630 (vm_offset_t *)&profile_data_string, PATH_MAX);
1631
1632 if(ret) {
1633 return ENOMEM;
1634 }
1635
1636 /* Split the buffer in half since we know the size of */
1637 /* our file path and our allocation is adequate for */
1638 /* both file path names */
1639 profile_names_string = profile_data_string + (PATH_MAX/2);
1640
1641
1642 strcpy(profile_data_string, cache_path);
1643 strcpy(profile_names_string, cache_path);
1644 profile_names_length = profile_data_length
1645 = strlen(profile_data_string);
1646 substring = profile_data_string + profile_data_length;
1647 sprintf(substring, "%x_data", user);
1648 substring = profile_names_string + profile_names_length;
1649 sprintf(substring, "%x_names", user);
1650
1651 /* We now have the absolute file names */
1652
1653 ret = kmem_alloc(kernel_map,
1654 (vm_offset_t *)&names_buf, 4 * PAGE_SIZE);
1655 if(ret) {
1656 kmem_free(kernel_map,
1657 (vm_offset_t)profile_data_string, PATH_MAX);
1658 return ENOMEM;
1659 }
1660
1661 NDINIT(&nd_names, LOOKUP, FOLLOW,
1662 UIO_SYSSPACE, profile_names_string, p);
1663 NDINIT(&nd_data, LOOKUP, FOLLOW,
1664 UIO_SYSSPACE, profile_data_string, p);
1665
1666 if (error = vn_open(&nd_data,
1667 O_CREAT | O_EXCL | FWRITE, S_IRUSR|S_IWUSR)) {
1668 kmem_free(kernel_map,
1669 (vm_offset_t)names_buf, 4 * PAGE_SIZE);
1670 kmem_free(kernel_map,
1671 (vm_offset_t)profile_data_string, PATH_MAX);
1672 return 0;
1673 }
1674
1675 data_vp = nd_data.ni_vp;
1676 VOP_UNLOCK(data_vp, 0, p);
1677
1678 if (error = vn_open(&nd_names,
1679 O_CREAT | O_EXCL | FWRITE, S_IRUSR|S_IWUSR)) {
1680 printf("prepare_profile_database: Can't create CacheNames %s\n",
1681 profile_data_string);
1682 kmem_free(kernel_map,
1683 (vm_offset_t)names_buf, 4 * PAGE_SIZE);
1684 kmem_free(kernel_map,
1685 (vm_offset_t)profile_data_string, PATH_MAX);
1686 vrele(data_vp);
1687 return error;
1688 }
1689
1690 names_vp = nd_names.ni_vp;
1691
1692
1693 /* Write Header for new names file */
1694
1695 profile_header = (struct profile_names_header *)names_buf;
1696
1697 profile_header->number_of_profiles = 0;
1698 profile_header->user_id = user;
1699 profile_header->version = 1;
1700 profile_header->element_array =
1701 sizeof(struct profile_names_header);
1702 profile_header->spare1 = 0;
1703 profile_header->spare2 = 0;
1704 profile_header->spare3 = 0;
1705
1706 size = sizeof(struct profile_names_header);
1707 buf_ptr = (vm_offset_t)profile_header;
1708 resid_off = 0;
1709
1710 while(size) {
1711 error = vn_rdwr(UIO_WRITE, names_vp,
1712 (caddr_t)buf_ptr, size, resid_off,
1713 UIO_SYSSPACE, IO_NODELOCKED,
1714 p->p_ucred, &resid, p);
1715 if(error) {
1716 printf("prepare_profile_database: Can't write header %s\n", profile_names_string);
1717 kmem_free(kernel_map,
1718 (vm_offset_t)names_buf, 4 * PAGE_SIZE);
1719 kmem_free(kernel_map,
1720 (vm_offset_t)profile_data_string,
1721 PATH_MAX);
1722 vput(names_vp);
1723 vrele(data_vp);
1724 return error;
1725 }
1726 buf_ptr += size-resid;
1727 resid_off += size-resid;
1728 size = resid;
1729 }
1730
1731 VATTR_NULL(&vattr);
1732 vattr.va_uid = user;
1733 error = VOP_SETATTR(names_vp, &vattr, p->p_cred->pc_ucred, p);
1734 if(error) {
1735 printf("prepare_profile_database: "
1736 "Can't set user %s\n", profile_names_string);
1737 }
1738 vput(names_vp);
1739
1740 error = vn_lock(data_vp, LK_EXCLUSIVE | LK_RETRY, p);
1741 if(error) {
1742 vrele(data_vp);
1743 printf("prepare_profile_database: cannot lock data file %s\n",
1744 profile_data_string);
1745 kmem_free(kernel_map,
1746 (vm_offset_t)profile_data_string, PATH_MAX);
1747 kmem_free(kernel_map,
1748 (vm_offset_t)names_buf, 4 * PAGE_SIZE);
1749 }
1750 VATTR_NULL(&vattr);
1751 vattr.va_uid = user;
1752 error = VOP_SETATTR(data_vp, &vattr, p->p_cred->pc_ucred, p);
1753 if(error) {
1754 printf("prepare_profile_database: "
1755 "Can't set user %s\n", profile_data_string);
1756 }
1757
1758 vput(data_vp);
1759 kmem_free(kernel_map,
1760 (vm_offset_t)profile_data_string, PATH_MAX);
1761 kmem_free(kernel_map,
1762 (vm_offset_t)names_buf, 4 * PAGE_SIZE);
1763 return 0;
1764
1765 }