]> git.saurik.com Git - apple/xnu.git/blob - bsd/vm/vm_unix.c
xnu-344.21.74.tar.gz
[apple/xnu.git] / bsd / vm / vm_unix.c
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
7 *
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * file.
14 *
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
22 *
23 * @APPLE_LICENSE_HEADER_END@
24 */
25 /*
26 * Mach Operating System
27 * Copyright (c) 1987 Carnegie-Mellon University
28 * All rights reserved. The CMU software License Agreement specifies
29 * the terms and conditions for use and redistribution.
30 */
31
32 /*
33 */
34
35
36 #include <meta_features.h>
37
38 #include <kern/task.h>
39 #include <kern/thread.h>
40 #include <kern/debug.h>
41 #include <kern/lock.h>
42 #include <mach/time_value.h>
43 #include <mach/vm_param.h>
44 #include <mach/vm_prot.h>
45 #include <mach/port.h>
46
47 #include <sys/param.h>
48 #include <sys/systm.h>
49 #include <sys/dir.h>
50 #include <sys/namei.h>
51 #include <sys/proc.h>
52 #include <sys/vm.h>
53 #include <sys/file.h>
54 #include <sys/vnode.h>
55 #include <sys/buf.h>
56 #include <sys/mount.h>
57 #include <sys/trace.h>
58 #include <sys/kernel.h>
59 #include <sys/ubc.h>
60 #include <sys/stat.h>
61
62 #include <kern/kalloc.h>
63 #include <vm/vm_map.h>
64 #include <vm/vm_kern.h>
65
66 #include <machine/spl.h>
67
68 #include <mach/shared_memory_server.h>
69 #include <vm/vm_shared_memory_server.h>
70
71
72 extern zone_t lsf_zone;
73
74 useracc(addr, len, prot)
75 caddr_t addr;
76 u_int len;
77 int prot;
78 {
79 return (vm_map_check_protection(
80 current_map(),
81 trunc_page_32((unsigned int)addr), round_page_32((unsigned int)(addr+len)),
82 prot == B_READ ? VM_PROT_READ : VM_PROT_WRITE));
83 }
84
85 vslock(addr, len)
86 caddr_t addr;
87 int len;
88 {
89 kern_return_t kret;
90 kret = vm_map_wire(current_map(), trunc_page_32((unsigned int)addr),
91 round_page_32((unsigned int)(addr+len)),
92 VM_PROT_READ | VM_PROT_WRITE ,FALSE);
93
94 switch (kret) {
95 case KERN_SUCCESS:
96 return (0);
97 case KERN_INVALID_ADDRESS:
98 case KERN_NO_SPACE:
99 return (ENOMEM);
100 case KERN_PROTECTION_FAILURE:
101 return (EACCES);
102 default:
103 return (EINVAL);
104 }
105 }
106
107 vsunlock(addr, len, dirtied)
108 caddr_t addr;
109 int len;
110 int dirtied;
111 {
112 pmap_t pmap;
113 #if FIXME /* [ */
114 vm_page_t pg;
115 #endif /* FIXME ] */
116 vm_offset_t vaddr, paddr;
117 kern_return_t kret;
118
119 #if FIXME /* [ */
120 if (dirtied) {
121 pmap = get_task_pmap(current_task());
122 for (vaddr = trunc_page((unsigned int)(addr)); vaddr < round_page((unsigned int)(addr+len));
123 vaddr += PAGE_SIZE) {
124 paddr = pmap_extract(pmap, vaddr);
125 pg = PHYS_TO_VM_PAGE(paddr);
126 vm_page_set_modified(pg);
127 }
128 }
129 #endif /* FIXME ] */
130 #ifdef lint
131 dirtied++;
132 #endif /* lint */
133 kret = vm_map_unwire(current_map(), trunc_page_32((unsigned int)(addr)),
134 round_page_32((unsigned int)(addr+len)), FALSE);
135 switch (kret) {
136 case KERN_SUCCESS:
137 return (0);
138 case KERN_INVALID_ADDRESS:
139 case KERN_NO_SPACE:
140 return (ENOMEM);
141 case KERN_PROTECTION_FAILURE:
142 return (EACCES);
143 default:
144 return (EINVAL);
145 }
146 }
147
148 #if defined(sun) || BALANCE || defined(m88k)
149 #else /*defined(sun) || BALANCE || defined(m88k)*/
150 subyte(addr, byte)
151 void * addr;
152 int byte;
153 {
154 char character;
155
156 character = (char)byte;
157 return (copyout((void *)&(character), addr, sizeof(char)) == 0 ? 0 : -1);
158 }
159
160 suibyte(addr, byte)
161 void * addr;
162 int byte;
163 {
164 char character;
165
166 character = (char)byte;
167 return (copyout((void *) &(character), addr, sizeof(char)) == 0 ? 0 : -1);
168 }
169
170 int fubyte(addr)
171 void * addr;
172 {
173 unsigned char byte;
174
175 if (copyin(addr, (void *) &byte, sizeof(char)))
176 return(-1);
177 return(byte);
178 }
179
180 int fuibyte(addr)
181 void * addr;
182 {
183 unsigned char byte;
184
185 if (copyin(addr, (void *) &(byte), sizeof(char)))
186 return(-1);
187 return(byte);
188 }
189
190 suword(addr, word)
191 void * addr;
192 long word;
193 {
194 return (copyout((void *) &word, addr, sizeof(int)) == 0 ? 0 : -1);
195 }
196
197 long fuword(addr)
198 void * addr;
199 {
200 long word;
201
202 if (copyin(addr, (void *) &word, sizeof(int)))
203 return(-1);
204 return(word);
205 }
206
207 /* suiword and fuiword are the same as suword and fuword, respectively */
208
209 suiword(addr, word)
210 void * addr;
211 long word;
212 {
213 return (copyout((void *) &word, addr, sizeof(int)) == 0 ? 0 : -1);
214 }
215
216 long fuiword(addr)
217 void * addr;
218 {
219 long word;
220
221 if (copyin(addr, (void *) &word, sizeof(int)))
222 return(-1);
223 return(word);
224 }
225 #endif /* defined(sun) || BALANCE || defined(m88k) || defined(i386) */
226
227 int
228 swapon()
229 {
230 return(EOPNOTSUPP);
231 }
232
233
234 kern_return_t
235 pid_for_task(t, x)
236 mach_port_t t;
237 int *x;
238 {
239 struct proc * p;
240 task_t t1;
241 extern task_t port_name_to_task(mach_port_t t);
242 int pid = -1;
243 kern_return_t err = KERN_SUCCESS;
244 boolean_t funnel_state;
245
246 funnel_state = thread_funnel_set(kernel_flock, TRUE);
247 t1 = port_name_to_task(t);
248
249 if (t1 == TASK_NULL) {
250 err = KERN_FAILURE;
251 goto pftout;
252 } else {
253 p = get_bsdtask_info(t1);
254 if (p) {
255 pid = p->p_pid;
256 err = KERN_SUCCESS;
257 } else {
258 err = KERN_FAILURE;
259 }
260 }
261 task_deallocate(t1);
262 pftout:
263 (void) copyout((char *) &pid, (char *) x, sizeof(*x));
264 thread_funnel_set(kernel_flock, funnel_state);
265 return(err);
266 }
267
268 /*
269 * Routine: task_for_pid
270 * Purpose:
271 * Get the task port for another "process", named by its
272 * process ID on the same host as "target_task".
273 *
274 * Only permitted to privileged processes, or processes
275 * with the same user ID.
276 */
277 kern_return_t
278 task_for_pid(target_tport, pid, t)
279 mach_port_t target_tport;
280 int pid;
281 mach_port_t *t;
282 {
283 struct proc *p;
284 struct proc *p1;
285 task_t t1;
286 mach_port_t tret;
287 extern task_t port_name_to_task(mach_port_t tp);
288 void * sright;
289 int error = 0;
290 boolean_t funnel_state;
291
292 t1 = port_name_to_task(target_tport);
293 if (t1 == TASK_NULL) {
294 (void ) copyout((char *)&t1, (char *)t, sizeof(mach_port_t));
295 return(KERN_FAILURE);
296 }
297
298 funnel_state = thread_funnel_set(kernel_flock, TRUE);
299
300 restart:
301 p1 = get_bsdtask_info(t1);
302 if (
303 ((p = pfind(pid)) != (struct proc *) 0)
304 && (p1 != (struct proc *) 0)
305 && (((p->p_ucred->cr_uid == p1->p_ucred->cr_uid) &&
306 ((p->p_cred->p_ruid == p1->p_cred->p_ruid)))
307 || !(suser(p1->p_ucred, &p1->p_acflag)))
308 && (p->p_stat != SZOMB)
309 ) {
310 if (p->task != TASK_NULL) {
311 if (!task_reference_try(p->task)) {
312 mutex_pause(); /* temp loss of funnel */
313 goto restart;
314 }
315 sright = (void *)convert_task_to_port(p->task);
316 tret = (void *)
317 ipc_port_copyout_send(sright,
318 get_task_ipcspace(current_task()));
319 } else
320 tret = MACH_PORT_NULL;
321 (void ) copyout((char *)&tret, (char *) t, sizeof(mach_port_t));
322 task_deallocate(t1);
323 error = KERN_SUCCESS;
324 goto tfpout;
325 }
326 task_deallocate(t1);
327 tret = MACH_PORT_NULL;
328 (void) copyout((char *) &tret, (char *) t, sizeof(mach_port_t));
329 error = KERN_FAILURE;
330 tfpout:
331 thread_funnel_set(kernel_flock, funnel_state);
332 return(error);
333 }
334
335
336 struct load_shared_file_args {
337 char *filename;
338 caddr_t mfa;
339 u_long mfs;
340 caddr_t *ba;
341 int map_cnt;
342 sf_mapping_t *mappings;
343 int *flags;
344 };
345
346 int ws_disabled = 1;
347
348 int
349 load_shared_file(
350 struct proc *p,
351 struct load_shared_file_args *uap,
352 register *retval)
353 {
354 caddr_t mapped_file_addr=uap->mfa;
355 u_long mapped_file_size=uap->mfs;
356 caddr_t *base_address=uap->ba;
357 int map_cnt=uap->map_cnt;
358 sf_mapping_t *mappings=uap->mappings;
359 char *filename=uap->filename;
360 int *flags=uap->flags;
361 struct vnode *vp = 0;
362 struct nameidata nd, *ndp;
363 char *filename_str;
364 register int error;
365 kern_return_t kr;
366
367 struct vattr vattr;
368 memory_object_control_t file_control;
369 sf_mapping_t *map_list;
370 caddr_t local_base;
371 int local_flags;
372 int caller_flags;
373 int i;
374 int default_regions = 0;
375 vm_size_t dummy;
376 kern_return_t kret;
377
378 shared_region_mapping_t shared_region;
379 struct shared_region_task_mappings task_mapping_info;
380 shared_region_mapping_t next;
381
382 ndp = &nd;
383
384
385 /* Retrieve the base address */
386 if (error = copyin(base_address, &local_base, sizeof (caddr_t))) {
387 goto lsf_bailout;
388 }
389 if (error = copyin(flags, &local_flags, sizeof (int))) {
390 goto lsf_bailout;
391 }
392
393 if(local_flags & QUERY_IS_SYSTEM_REGION) {
394 shared_region_mapping_t default_shared_region;
395 vm_get_shared_region(current_task(), &shared_region);
396 task_mapping_info.self = (vm_offset_t)shared_region;
397
398 shared_region_mapping_info(shared_region,
399 &(task_mapping_info.text_region),
400 &(task_mapping_info.text_size),
401 &(task_mapping_info.data_region),
402 &(task_mapping_info.data_size),
403 &(task_mapping_info.region_mappings),
404 &(task_mapping_info.client_base),
405 &(task_mapping_info.alternate_base),
406 &(task_mapping_info.alternate_next),
407 &(task_mapping_info.fs_base),
408 &(task_mapping_info.system),
409 &(task_mapping_info.flags), &next);
410
411 default_shared_region =
412 lookup_default_shared_region(
413 ENV_DEFAULT_ROOT,
414 task_mapping_info.system);
415 if (shared_region == default_shared_region) {
416 local_flags = SYSTEM_REGION_BACKED;
417 } else {
418 local_flags = 0;
419 }
420 shared_region_mapping_dealloc(default_shared_region);
421 error = 0;
422 error = copyout(&local_flags, flags, sizeof (int));
423 goto lsf_bailout;
424 }
425 caller_flags = local_flags;
426 kret = kmem_alloc(kernel_map, (vm_offset_t *)&filename_str,
427 (vm_size_t)(MAXPATHLEN));
428 if (kret != KERN_SUCCESS) {
429 error = ENOMEM;
430 goto lsf_bailout;
431 }
432 kret = kmem_alloc(kernel_map, (vm_offset_t *)&map_list,
433 (vm_size_t)(map_cnt*sizeof(sf_mapping_t)));
434 if (kret != KERN_SUCCESS) {
435 kmem_free(kernel_map, (vm_offset_t)filename_str,
436 (vm_size_t)(MAXPATHLEN));
437 error = ENOMEM;
438 goto lsf_bailout;
439 }
440
441 if (error =
442 copyin(mappings, map_list, (map_cnt*sizeof(sf_mapping_t)))) {
443 goto lsf_bailout_free;
444 }
445
446 if (error = copyinstr(filename,
447 filename_str, MAXPATHLEN, (size_t *)&dummy)) {
448 goto lsf_bailout_free;
449 }
450
451 /*
452 * Get a vnode for the target file
453 */
454 NDINIT(ndp, LOOKUP, FOLLOW | LOCKLEAF, UIO_SYSSPACE,
455 filename_str, p);
456
457 if ((error = namei(ndp))) {
458 goto lsf_bailout_free;
459 }
460
461 vp = ndp->ni_vp;
462
463 if (vp->v_type != VREG) {
464 error = EINVAL;
465 goto lsf_bailout_free_vput;
466 }
467
468 UBCINFOCHECK("load_shared_file", vp);
469
470 if (error = VOP_GETATTR(vp, &vattr, p->p_ucred, p)) {
471 goto lsf_bailout_free_vput;
472 }
473
474
475 file_control = ubc_getobject(vp, UBC_HOLDOBJECT);
476 if (file_control == MEMORY_OBJECT_CONTROL_NULL) {
477 error = EINVAL;
478 goto lsf_bailout_free_vput;
479 }
480
481 #ifdef notdef
482 if(vattr.va_size != mapped_file_size) {
483 error = EINVAL;
484 goto lsf_bailout_free_vput;
485 }
486 #endif
487 if(p->p_flag & P_NOSHLIB) {
488 p->p_flag = p->p_flag & ~P_NOSHLIB;
489 }
490
491 /* load alternate regions if the caller has requested. */
492 /* Note: the new regions are "clean slates" */
493 if (local_flags & NEW_LOCAL_SHARED_REGIONS) {
494 error = clone_system_shared_regions(FALSE, ENV_DEFAULT_ROOT);
495 if (error) {
496 goto lsf_bailout_free_vput;
497 }
498 }
499
500 vm_get_shared_region(current_task(), &shared_region);
501 task_mapping_info.self = (vm_offset_t)shared_region;
502
503 shared_region_mapping_info(shared_region,
504 &(task_mapping_info.text_region),
505 &(task_mapping_info.text_size),
506 &(task_mapping_info.data_region),
507 &(task_mapping_info.data_size),
508 &(task_mapping_info.region_mappings),
509 &(task_mapping_info.client_base),
510 &(task_mapping_info.alternate_base),
511 &(task_mapping_info.alternate_next),
512 &(task_mapping_info.fs_base),
513 &(task_mapping_info.system),
514 &(task_mapping_info.flags), &next);
515
516 {
517 shared_region_mapping_t default_shared_region;
518 default_shared_region =
519 lookup_default_shared_region(
520 ENV_DEFAULT_ROOT,
521 task_mapping_info.system);
522 if(shared_region == default_shared_region) {
523 default_regions = 1;
524 }
525 shared_region_mapping_dealloc(default_shared_region);
526 }
527 /* If we are running on a removable file system we must not */
528 /* be in a set of shared regions or the file system will not */
529 /* be removable. */
530 if(((vp->v_mount != rootvnode->v_mount) && (default_regions))
531 && (lsf_mapping_pool_gauge() < 75)) {
532 /* We don't want to run out of shared memory */
533 /* map entries by starting too many private versions */
534 /* of the shared library structures */
535 int error;
536 if(p->p_flag & P_NOSHLIB) {
537 error = clone_system_shared_regions(FALSE, ENV_DEFAULT_ROOT);
538 } else {
539 error = clone_system_shared_regions(TRUE, ENV_DEFAULT_ROOT);
540 }
541 if (error) {
542 goto lsf_bailout_free_vput;
543 }
544 local_flags = local_flags & ~NEW_LOCAL_SHARED_REGIONS;
545 vm_get_shared_region(current_task(), &shared_region);
546 shared_region_mapping_info(shared_region,
547 &(task_mapping_info.text_region),
548 &(task_mapping_info.text_size),
549 &(task_mapping_info.data_region),
550 &(task_mapping_info.data_size),
551 &(task_mapping_info.region_mappings),
552 &(task_mapping_info.client_base),
553 &(task_mapping_info.alternate_base),
554 &(task_mapping_info.alternate_next),
555 &(task_mapping_info.fs_base),
556 &(task_mapping_info.system),
557 &(task_mapping_info.flags), &next);
558 }
559
560 /* This is a work-around to allow executables which have been */
561 /* built without knowledge of the proper shared segment to */
562 /* load. This code has been architected as a shared region */
563 /* handler, the knowledge of where the regions are loaded is */
564 /* problematic for the extension of shared regions as it will */
565 /* not be easy to know what region an item should go into. */
566 /* The code below however will get around a short term problem */
567 /* with executables which believe they are loading at zero. */
568
569 {
570 if (((unsigned int)local_base &
571 (~(task_mapping_info.text_size - 1))) !=
572 task_mapping_info.client_base) {
573 if(local_flags & ALTERNATE_LOAD_SITE) {
574 local_base = (caddr_t)(
575 (unsigned int)local_base &
576 (task_mapping_info.text_size - 1));
577 local_base = (caddr_t)((unsigned int)local_base
578 | task_mapping_info.client_base);
579 } else {
580 error = EINVAL;
581 goto lsf_bailout_free_vput;
582 }
583 }
584 }
585
586
587 if((kr = copyin_shared_file((vm_offset_t)mapped_file_addr,
588 mapped_file_size,
589 (vm_offset_t *)&local_base,
590 map_cnt, map_list, file_control,
591 &task_mapping_info, &local_flags))) {
592 switch (kr) {
593 case KERN_FAILURE:
594 error = EINVAL;
595 break;
596 case KERN_INVALID_ARGUMENT:
597 error = EINVAL;
598 break;
599 case KERN_INVALID_ADDRESS:
600 error = EACCES;
601 break;
602 case KERN_PROTECTION_FAILURE:
603 /* save EAUTH for authentication in this */
604 /* routine */
605 error = EPERM;
606 break;
607 case KERN_NO_SPACE:
608 error = ENOMEM;
609 break;
610 default:
611 error = EINVAL;
612 };
613 if((caller_flags & ALTERNATE_LOAD_SITE) && systemLogDiags) {
614 printf("load_shared_file: Failed to load shared file! error: 0x%x, Base_address: 0x%x, number of mappings: %d, file_control 0x%x\n", error, local_base, map_cnt, file_control);
615 for(i=0; i<map_cnt; i++) {
616 printf("load_shared_file: Mapping%d, mapping_offset: 0x%x, size: 0x%x, file_offset: 0x%x, protection: 0x%x\n"
617 , i, map_list[i].mapping_offset,
618 map_list[i].size,
619 map_list[i].file_offset,
620 map_list[i].protection);
621 }
622 }
623 } else {
624 if(default_regions)
625 local_flags |= SYSTEM_REGION_BACKED;
626 if(!(error = copyout(&local_flags, flags, sizeof (int)))) {
627 error = copyout(&local_base,
628 base_address, sizeof (caddr_t));
629 }
630 }
631
632 lsf_bailout_free_vput:
633 vput(vp);
634
635 lsf_bailout_free:
636 kmem_free(kernel_map, (vm_offset_t)filename_str,
637 (vm_size_t)(MAXPATHLEN));
638 kmem_free(kernel_map, (vm_offset_t)map_list,
639 (vm_size_t)(map_cnt*sizeof(sf_mapping_t)));
640
641 lsf_bailout:
642 return error;
643 }
644
645 struct reset_shared_file_args {
646 caddr_t *ba;
647 int map_cnt;
648 sf_mapping_t *mappings;
649 };
650
651 int
652 reset_shared_file(
653 struct proc *p,
654 struct reset_shared_file_args *uap,
655 register *retval)
656 {
657 caddr_t *base_address=uap->ba;
658 int map_cnt=uap->map_cnt;
659 sf_mapping_t *mappings=uap->mappings;
660 register int error;
661 kern_return_t kr;
662
663 sf_mapping_t *map_list;
664 caddr_t local_base;
665 vm_offset_t map_address;
666 int i;
667 kern_return_t kret;
668
669 /* Retrieve the base address */
670 if (error = copyin(base_address, &local_base, sizeof (caddr_t))) {
671 goto rsf_bailout;
672 }
673
674 if (((unsigned int)local_base & GLOBAL_SHARED_SEGMENT_MASK)
675 != GLOBAL_SHARED_TEXT_SEGMENT) {
676 error = EINVAL;
677 goto rsf_bailout;
678 }
679
680 kret = kmem_alloc(kernel_map, (vm_offset_t *)&map_list,
681 (vm_size_t)(map_cnt*sizeof(sf_mapping_t)));
682 if (kret != KERN_SUCCESS) {
683 error = ENOMEM;
684 goto rsf_bailout;
685 }
686
687 if (error =
688 copyin(mappings, map_list, (map_cnt*sizeof(sf_mapping_t)))) {
689
690 kmem_free(kernel_map, (vm_offset_t)map_list,
691 (vm_size_t)(map_cnt*sizeof(sf_mapping_t)));
692 goto rsf_bailout;
693 }
694 for (i = 0; i<map_cnt; i++) {
695 if((map_list[i].mapping_offset
696 & GLOBAL_SHARED_SEGMENT_MASK) == 0x10000000) {
697 map_address = (vm_offset_t)
698 (local_base + map_list[i].mapping_offset);
699 vm_deallocate(current_map(),
700 map_address,
701 map_list[i].size);
702 vm_map(current_map(), &map_address,
703 map_list[i].size, 0, SHARED_LIB_ALIAS,
704 shared_data_region_handle,
705 ((unsigned int)local_base
706 & SHARED_DATA_REGION_MASK) +
707 (map_list[i].mapping_offset
708 & SHARED_DATA_REGION_MASK),
709 TRUE, VM_PROT_READ,
710 VM_PROT_READ, VM_INHERIT_SHARE);
711 }
712 }
713
714 kmem_free(kernel_map, (vm_offset_t)map_list,
715 (vm_size_t)(map_cnt*sizeof(sf_mapping_t)));
716
717 rsf_bailout:
718 return error;
719 }
720
721 struct new_system_shared_regions_args {
722 int dummy;
723 };
724
725 int
726 new_system_shared_regions(
727 struct proc *p,
728 struct new_system_shared_regions_args *uap,
729 register *retval)
730 {
731 shared_region_mapping_t regions;
732 shared_region_mapping_t new_regions;
733
734 if(!(is_suser())) {
735 *retval = EINVAL;
736 return EINVAL;
737 }
738
739 /* clear all of our existing defaults */
740 remove_all_shared_regions();
741
742 *retval = 0;
743 return 0;
744 }
745
746
747
748 int
749 clone_system_shared_regions(shared_regions_active, base_vnode)
750 {
751 shared_region_mapping_t new_shared_region;
752 shared_region_mapping_t next;
753 shared_region_mapping_t old_shared_region;
754 struct shared_region_task_mappings old_info;
755 struct shared_region_task_mappings new_info;
756
757 struct proc *p;
758
759 vm_get_shared_region(current_task(), &old_shared_region);
760 old_info.self = (vm_offset_t)old_shared_region;
761 shared_region_mapping_info(old_shared_region,
762 &(old_info.text_region),
763 &(old_info.text_size),
764 &(old_info.data_region),
765 &(old_info.data_size),
766 &(old_info.region_mappings),
767 &(old_info.client_base),
768 &(old_info.alternate_base),
769 &(old_info.alternate_next),
770 &(old_info.fs_base),
771 &(old_info.system),
772 &(old_info.flags), &next);
773 if ((shared_regions_active) ||
774 (base_vnode == ENV_DEFAULT_ROOT)) {
775 if (shared_file_create_system_region(&new_shared_region))
776 return (ENOMEM);
777 } else {
778 new_shared_region =
779 lookup_default_shared_region(
780 base_vnode, old_info.system);
781 if(new_shared_region == NULL) {
782 shared_file_boot_time_init(
783 base_vnode, old_info.system);
784 vm_get_shared_region(current_task(), &new_shared_region);
785 } else {
786 vm_set_shared_region(current_task(), new_shared_region);
787 }
788 if(old_shared_region)
789 shared_region_mapping_dealloc(old_shared_region);
790 }
791 new_info.self = (vm_offset_t)new_shared_region;
792 shared_region_mapping_info(new_shared_region,
793 &(new_info.text_region),
794 &(new_info.text_size),
795 &(new_info.data_region),
796 &(new_info.data_size),
797 &(new_info.region_mappings),
798 &(new_info.client_base),
799 &(new_info.alternate_base),
800 &(new_info.alternate_next),
801 &(new_info.fs_base),
802 &(new_info.system),
803 &(new_info.flags), &next);
804 if(shared_regions_active) {
805 if(vm_region_clone(old_info.text_region, new_info.text_region)) {
806 panic("clone_system_shared_regions: shared region mis-alignment 1");
807 shared_region_mapping_dealloc(new_shared_region);
808 return(EINVAL);
809 }
810 if (vm_region_clone(old_info.data_region, new_info.data_region)) {
811 panic("clone_system_shared_regions: shared region mis-alignment 2");
812 shared_region_mapping_dealloc(new_shared_region);
813 return(EINVAL);
814 }
815 shared_region_object_chain_attach(
816 new_shared_region, old_shared_region);
817 }
818 if (vm_map_region_replace(current_map(), old_info.text_region,
819 new_info.text_region, old_info.client_base,
820 old_info.client_base+old_info.text_size)) {
821 panic("clone_system_shared_regions: shared region mis-alignment 3");
822 shared_region_mapping_dealloc(new_shared_region);
823 return(EINVAL);
824 }
825 if(vm_map_region_replace(current_map(), old_info.data_region,
826 new_info.data_region,
827 old_info.client_base + old_info.text_size,
828 old_info.client_base
829 + old_info.text_size + old_info.data_size)) {
830 panic("clone_system_shared_regions: shared region mis-alignment 4");
831 shared_region_mapping_dealloc(new_shared_region);
832 return(EINVAL);
833 }
834 vm_set_shared_region(current_task(), new_shared_region);
835
836 /* consume the reference which wasn't accounted for in object */
837 /* chain attach */
838 if(!shared_regions_active)
839 shared_region_mapping_dealloc(old_shared_region);
840
841 return(0);
842
843 }
844
845 extern vm_map_t bsd_pageable_map;
846
847 /* header for the profile name file. The profiled app info is held */
848 /* in the data file and pointed to by elements in the name file */
849
850 struct profile_names_header {
851 unsigned int number_of_profiles;
852 unsigned int user_id;
853 unsigned int version;
854 off_t element_array;
855 unsigned int spare1;
856 unsigned int spare2;
857 unsigned int spare3;
858 };
859
860 struct profile_element {
861 off_t addr;
862 vm_size_t size;
863 unsigned int mod_date;
864 unsigned int inode;
865 char name[12];
866 };
867
868 struct global_profile {
869 struct vnode *names_vp;
870 struct vnode *data_vp;
871 vm_offset_t buf_ptr;
872 unsigned int user;
873 unsigned int age;
874 unsigned int busy;
875 };
876
877 struct global_profile_cache {
878 int max_ele;
879 unsigned int age;
880 struct global_profile profiles[3];
881 };
882
883 struct global_profile_cache global_user_profile_cache =
884 {3, 0, NULL, NULL, NULL, 0, 0, 0,
885 NULL, NULL, NULL, 0, 0, 0,
886 NULL, NULL, NULL, 0, 0, 0 };
887
888 /* BSD_OPEN_PAGE_CACHE_FILES: */
889 /* Caller provides a user id. This id was used in */
890 /* prepare_profile_database to create two unique absolute */
891 /* file paths to the associated profile files. These files */
892 /* are either opened or bsd_open_page_cache_files returns an */
893 /* error. The header of the names file is then consulted. */
894 /* The header and the vnodes for the names and data files are */
895 /* returned. */
896
897 int
898 bsd_open_page_cache_files(
899 unsigned int user,
900 struct global_profile **profile)
901 {
902 char *cache_path = "/var/vm/app_profile/";
903 struct proc *p;
904 int error;
905 int resid;
906 off_t resid_off;
907 unsigned int lru;
908 vm_size_t size;
909
910 struct vnode *names_vp;
911 struct vnode *data_vp;
912 vm_offset_t names_buf;
913 vm_offset_t buf_ptr;
914
915 int profile_names_length;
916 int profile_data_length;
917 char *profile_data_string;
918 char *profile_names_string;
919 char *substring;
920
921 struct vattr vattr;
922
923 struct profile_names_header *profile_header;
924 kern_return_t ret;
925
926 struct nameidata nd_names;
927 struct nameidata nd_data;
928
929 int i;
930
931
932 p = current_proc();
933
934 restart:
935 for(i = 0; i<global_user_profile_cache.max_ele; i++) {
936 if((global_user_profile_cache.profiles[i].user == user)
937 && (global_user_profile_cache.profiles[i].data_vp
938 != NULL)) {
939 *profile = &global_user_profile_cache.profiles[i];
940 /* already in cache, we're done */
941 if ((*profile)->busy) {
942 /*
943 * drop funnel and wait
944 */
945 (void)tsleep((void *)
946 *profile,
947 PRIBIO, "app_profile", 0);
948 goto restart;
949 }
950 (*profile)->busy = 1;
951 (*profile)->age = global_user_profile_cache.age;
952 global_user_profile_cache.age+=1;
953 return 0;
954 }
955 }
956
957 lru = global_user_profile_cache.age;
958 for(i = 0; i<global_user_profile_cache.max_ele; i++) {
959 if(global_user_profile_cache.profiles[i].data_vp == NULL) {
960 *profile = &global_user_profile_cache.profiles[i];
961 (*profile)->age = global_user_profile_cache.age;
962 global_user_profile_cache.age+=1;
963 break;
964 }
965 if(global_user_profile_cache.profiles[i].age < lru) {
966 lru = global_user_profile_cache.profiles[i].age;
967 *profile = &global_user_profile_cache.profiles[i];
968 }
969 }
970
971 if ((*profile)->busy) {
972 /*
973 * drop funnel and wait
974 */
975 (void)tsleep((void *)
976 &(global_user_profile_cache),
977 PRIBIO, "app_profile", 0);
978 goto restart;
979 }
980 (*profile)->busy = 1;
981 (*profile)->user = user;
982
983 if((*profile)->data_vp != NULL) {
984 kmem_free(kernel_map,
985 (*profile)->buf_ptr, 4 * PAGE_SIZE);
986 if ((*profile)->names_vp) {
987 vrele((*profile)->names_vp);
988 (*profile)->names_vp = NULL;
989 }
990 if ((*profile)->data_vp) {
991 vrele((*profile)->data_vp);
992 (*profile)->data_vp = NULL;
993 }
994 }
995
996 /* put dummy value in for now to get */
997 /* competing request to wait above */
998 /* until we are finished */
999 (*profile)->data_vp = (struct vnode *)0xFFFFFFFF;
1000
1001 /* Try to open the appropriate users profile files */
1002 /* If neither file is present, try to create them */
1003 /* If one file is present and the other not, fail. */
1004 /* If the files do exist, check them for the app_file */
1005 /* requested and read it in if present */
1006
1007
1008 ret = kmem_alloc(kernel_map,
1009 (vm_offset_t *)&profile_data_string, PATH_MAX);
1010
1011 if(ret) {
1012 (*profile)->data_vp = NULL;
1013 (*profile)->busy = 0;
1014 wakeup(*profile);
1015 return ENOMEM;
1016 }
1017
1018 /* Split the buffer in half since we know the size of */
1019 /* our file path and our allocation is adequate for */
1020 /* both file path names */
1021 profile_names_string = profile_data_string + (PATH_MAX/2);
1022
1023
1024 strcpy(profile_data_string, cache_path);
1025 strcpy(profile_names_string, cache_path);
1026 profile_names_length = profile_data_length
1027 = strlen(profile_data_string);
1028 substring = profile_data_string + profile_data_length;
1029 sprintf(substring, "%x_data", user);
1030 substring = profile_names_string + profile_names_length;
1031 sprintf(substring, "%x_names", user);
1032
1033 /* We now have the absolute file names */
1034
1035 ret = kmem_alloc(kernel_map,
1036 (vm_offset_t *)&names_buf, 4 * PAGE_SIZE);
1037 if(ret) {
1038 kmem_free(kernel_map,
1039 (vm_offset_t)profile_data_string, PATH_MAX);
1040 (*profile)->data_vp = NULL;
1041 (*profile)->busy = 0;
1042 wakeup(*profile);
1043 return ENOMEM;
1044 }
1045
1046 NDINIT(&nd_names, LOOKUP, FOLLOW | LOCKLEAF,
1047 UIO_SYSSPACE, profile_names_string, p);
1048 NDINIT(&nd_data, LOOKUP, FOLLOW | LOCKLEAF,
1049 UIO_SYSSPACE, profile_data_string, p);
1050 if (error = vn_open(&nd_data, FREAD | FWRITE, 0)) {
1051 #ifdef notdef
1052 printf("bsd_open_page_cache_files: CacheData file not found %s\n",
1053 profile_data_string);
1054 #endif
1055 kmem_free(kernel_map,
1056 (vm_offset_t)names_buf, 4 * PAGE_SIZE);
1057 kmem_free(kernel_map,
1058 (vm_offset_t)profile_data_string, PATH_MAX);
1059 (*profile)->data_vp = NULL;
1060 (*profile)->busy = 0;
1061 wakeup(*profile);
1062 return error;
1063 }
1064
1065 data_vp = nd_data.ni_vp;
1066 VOP_UNLOCK(data_vp, 0, p);
1067
1068 if (error = vn_open(&nd_names, FREAD | FWRITE, 0)) {
1069 printf("bsd_open_page_cache_files: NamesData file not found %s\n",
1070 profile_data_string);
1071 kmem_free(kernel_map,
1072 (vm_offset_t)names_buf, 4 * PAGE_SIZE);
1073 kmem_free(kernel_map,
1074 (vm_offset_t)profile_data_string, PATH_MAX);
1075 vrele(data_vp);
1076 (*profile)->data_vp = NULL;
1077 (*profile)->busy = 0;
1078 wakeup(*profile);
1079 return error;
1080 }
1081 names_vp = nd_names.ni_vp;
1082
1083 if(error = VOP_GETATTR(names_vp, &vattr, p->p_ucred, p)) {
1084 printf("bsd_open_page_cache_files: Can't stat name file %s\n", profile_names_string);
1085 kmem_free(kernel_map,
1086 (vm_offset_t)profile_data_string, PATH_MAX);
1087 kmem_free(kernel_map,
1088 (vm_offset_t)names_buf, 4 * PAGE_SIZE);
1089 vput(names_vp);
1090 vrele(data_vp);
1091 (*profile)->data_vp = NULL;
1092 (*profile)->busy = 0;
1093 wakeup(*profile);
1094 return error;
1095 }
1096
1097 size = vattr.va_size;
1098 if(size > 4 * PAGE_SIZE)
1099 size = 4 * PAGE_SIZE;
1100 buf_ptr = names_buf;
1101 resid_off = 0;
1102
1103 while(size) {
1104 error = vn_rdwr(UIO_READ, names_vp, (caddr_t)buf_ptr,
1105 size, resid_off,
1106 UIO_SYSSPACE, IO_NODELOCKED, p->p_ucred, &resid, p);
1107 if((error) || (size == resid)) {
1108 if(!error) {
1109 error = EINVAL;
1110 }
1111 kmem_free(kernel_map,
1112 (vm_offset_t)profile_data_string, PATH_MAX);
1113 kmem_free(kernel_map,
1114 (vm_offset_t)names_buf, 4 * PAGE_SIZE);
1115 vput(names_vp);
1116 vrele(data_vp);
1117 (*profile)->data_vp = NULL;
1118 (*profile)->busy = 0;
1119 wakeup(*profile);
1120 return error;
1121 }
1122 buf_ptr += size-resid;
1123 resid_off += size-resid;
1124 size = resid;
1125 }
1126
1127 VOP_UNLOCK(names_vp, 0, p);
1128 kmem_free(kernel_map, (vm_offset_t)profile_data_string, PATH_MAX);
1129 (*profile)->names_vp = names_vp;
1130 (*profile)->data_vp = data_vp;
1131 (*profile)->buf_ptr = names_buf;
1132 return 0;
1133
1134 }
1135
1136 void
1137 bsd_close_page_cache_files(
1138 struct global_profile *profile)
1139 {
1140 profile->busy = 0;
1141 wakeup(profile);
1142 }
1143
1144 int
1145 bsd_read_page_cache_file(
1146 unsigned int user,
1147 int *fid,
1148 int *mod,
1149 char *app_name,
1150 struct vnode *app_vp,
1151 vm_offset_t *buffer,
1152 vm_offset_t *buf_size)
1153 {
1154
1155 boolean_t funnel_state;
1156
1157 struct proc *p;
1158 int error;
1159 int resid;
1160 vm_size_t size;
1161
1162 off_t profile;
1163 unsigned int profile_size;
1164
1165 vm_offset_t names_buf;
1166 struct vattr vattr;
1167
1168 kern_return_t ret;
1169
1170 struct vnode *names_vp;
1171 struct vnode *data_vp;
1172 struct vnode *vp1;
1173 struct vnode *vp2;
1174
1175 struct global_profile *uid_files;
1176
1177 funnel_state = thread_funnel_set(kernel_flock, TRUE);
1178
1179 /* Try to open the appropriate users profile files */
1180 /* If neither file is present, try to create them */
1181 /* If one file is present and the other not, fail. */
1182 /* If the files do exist, check them for the app_file */
1183 /* requested and read it in if present */
1184
1185
1186 error = bsd_open_page_cache_files(user, &uid_files);
1187 if(error) {
1188 thread_funnel_set(kernel_flock, funnel_state);
1189 return EINVAL;
1190 }
1191
1192 p = current_proc();
1193
1194 names_vp = uid_files->names_vp;
1195 data_vp = uid_files->data_vp;
1196 names_buf = uid_files->buf_ptr;
1197
1198
1199 /*
1200 * Get locks on both files, get the vnode with the lowest address first
1201 */
1202
1203 if((unsigned int)names_vp < (unsigned int)data_vp) {
1204 vp1 = names_vp;
1205 vp2 = data_vp;
1206 } else {
1207 vp1 = data_vp;
1208 vp2 = names_vp;
1209 }
1210 error = vn_lock(vp1, LK_EXCLUSIVE | LK_RETRY, p);
1211 if(error) {
1212 printf("bsd_read_page_cache_file: Can't lock profile names %x\n", user);
1213 bsd_close_page_cache_files(uid_files);
1214 thread_funnel_set(kernel_flock, funnel_state);
1215 return error;
1216 }
1217 error = vn_lock(vp2, LK_EXCLUSIVE | LK_RETRY, p);
1218 if(error) {
1219 printf("bsd_read_page_cache_file: Can't lock profile data %x\n", user);
1220 VOP_UNLOCK(vp1, 0, p);
1221 bsd_close_page_cache_files(uid_files);
1222 thread_funnel_set(kernel_flock, funnel_state);
1223 return error;
1224 }
1225
1226 if(error = VOP_GETATTR(app_vp, &vattr, p->p_ucred, p)) {
1227 VOP_UNLOCK(names_vp, 0, p);
1228 VOP_UNLOCK(data_vp, 0, p);
1229 printf("bsd_read_cache_file: Can't stat app file %s\n", app_name);
1230 bsd_close_page_cache_files(uid_files);
1231 thread_funnel_set(kernel_flock, funnel_state);
1232 return error;
1233 }
1234
1235 *fid = vattr.va_fileid;
1236 *mod = vattr.va_mtime.tv_sec;
1237
1238
1239 if (bsd_search_page_cache_data_base(names_vp, names_buf, app_name,
1240 (unsigned int) vattr.va_mtime.tv_sec,
1241 vattr.va_fileid, &profile, &profile_size) == 0) {
1242 /* profile is an offset in the profile data base */
1243 /* It is zero if no profile data was found */
1244
1245 if(profile_size == 0) {
1246 *buffer = NULL;
1247 *buf_size = 0;
1248 VOP_UNLOCK(names_vp, 0, p);
1249 VOP_UNLOCK(data_vp, 0, p);
1250 bsd_close_page_cache_files(uid_files);
1251 thread_funnel_set(kernel_flock, funnel_state);
1252 return 0;
1253 }
1254 ret = (vm_offset_t)(kmem_alloc(kernel_map, buffer, profile_size));
1255 if(ret) {
1256 VOP_UNLOCK(names_vp, 0, p);
1257 VOP_UNLOCK(data_vp, 0, p);
1258 bsd_close_page_cache_files(uid_files);
1259 thread_funnel_set(kernel_flock, funnel_state);
1260 return ENOMEM;
1261 }
1262 *buf_size = profile_size;
1263 while(profile_size) {
1264 error = vn_rdwr(UIO_READ, data_vp,
1265 (caddr_t) *buffer, profile_size,
1266 profile, UIO_SYSSPACE, IO_NODELOCKED,
1267 p->p_ucred, &resid, p);
1268 if((error) || (profile_size == resid)) {
1269 VOP_UNLOCK(names_vp, 0, p);
1270 VOP_UNLOCK(data_vp, 0, p);
1271 bsd_close_page_cache_files(uid_files);
1272 kmem_free(kernel_map, (vm_offset_t)*buffer, profile_size);
1273 thread_funnel_set(kernel_flock, funnel_state);
1274 return EINVAL;
1275 }
1276 profile += profile_size - resid;
1277 profile_size = resid;
1278 }
1279 VOP_UNLOCK(names_vp, 0, p);
1280 VOP_UNLOCK(data_vp, 0, p);
1281 bsd_close_page_cache_files(uid_files);
1282 thread_funnel_set(kernel_flock, funnel_state);
1283 return 0;
1284 } else {
1285 VOP_UNLOCK(names_vp, 0, p);
1286 VOP_UNLOCK(data_vp, 0, p);
1287 bsd_close_page_cache_files(uid_files);
1288 thread_funnel_set(kernel_flock, funnel_state);
1289 return EINVAL;
1290 }
1291
1292 }
1293
1294 int
1295 bsd_search_page_cache_data_base(
1296 struct vnode *vp,
1297 struct profile_names_header *database,
1298 char *app_name,
1299 unsigned int mod_date,
1300 unsigned int inode,
1301 off_t *profile,
1302 unsigned int *profile_size)
1303 {
1304
1305 struct proc *p;
1306
1307 unsigned int i;
1308 struct profile_element *element;
1309 unsigned int ele_total;
1310 unsigned int extended_list = 0;
1311 off_t file_off = 0;
1312 unsigned int size;
1313 off_t resid_off;
1314 int resid;
1315 vm_offset_t local_buf = NULL;
1316
1317 int error;
1318 kern_return_t ret;
1319
1320 p = current_proc();
1321
1322 if(((vm_offset_t)database->element_array) !=
1323 sizeof(struct profile_names_header)) {
1324 return EINVAL;
1325 }
1326 element = (struct profile_element *)(
1327 (vm_offset_t)database->element_array +
1328 (vm_offset_t)database);
1329
1330 ele_total = database->number_of_profiles;
1331
1332 *profile = 0;
1333 *profile_size = 0;
1334 while(ele_total) {
1335 /* note: code assumes header + n*ele comes out on a page boundary */
1336 if(((local_buf == 0) && (sizeof(struct profile_names_header) +
1337 (ele_total * sizeof(struct profile_element)))
1338 > (PAGE_SIZE * 4)) ||
1339 ((local_buf != 0) &&
1340 (ele_total * sizeof(struct profile_element))
1341 > (PAGE_SIZE * 4))) {
1342 extended_list = ele_total;
1343 if(element == (struct profile_element *)
1344 ((vm_offset_t)database->element_array +
1345 (vm_offset_t)database)) {
1346 ele_total = ((PAGE_SIZE * 4)/sizeof(struct profile_element)) - 1;
1347 } else {
1348 ele_total = (PAGE_SIZE * 4)/sizeof(struct profile_element);
1349 }
1350 extended_list -= ele_total;
1351 }
1352 for (i=0; i<ele_total; i++) {
1353 if((mod_date == element[i].mod_date)
1354 && (inode == element[i].inode)) {
1355 if(strncmp(element[i].name, app_name, 12) == 0) {
1356 *profile = element[i].addr;
1357 *profile_size = element[i].size;
1358 if(local_buf != NULL) {
1359 kmem_free(kernel_map,
1360 (vm_offset_t)local_buf, 4 * PAGE_SIZE);
1361 }
1362 return 0;
1363 }
1364 }
1365 }
1366 if(extended_list == 0)
1367 break;
1368 if(local_buf == NULL) {
1369 ret = kmem_alloc(kernel_map,
1370 (vm_offset_t *)&local_buf, 4 * PAGE_SIZE);
1371 if(ret != KERN_SUCCESS) {
1372 return ENOMEM;
1373 }
1374 }
1375 element = (struct profile_element *)local_buf;
1376 ele_total = extended_list;
1377 extended_list = 0;
1378 file_off += 4 * PAGE_SIZE;
1379 if((ele_total * sizeof(struct profile_element)) >
1380 (PAGE_SIZE * 4)) {
1381 size = PAGE_SIZE * 4;
1382 } else {
1383 size = ele_total * sizeof(struct profile_element);
1384 }
1385 resid_off = 0;
1386 while(size) {
1387 error = vn_rdwr(UIO_READ, vp,
1388 (caddr_t)(local_buf + resid_off),
1389 size, file_off + resid_off, UIO_SYSSPACE,
1390 IO_NODELOCKED, p->p_ucred, &resid, p);
1391 if((error) || (size == resid)) {
1392 if(local_buf != NULL) {
1393 kmem_free(kernel_map,
1394 (vm_offset_t)local_buf,
1395 4 * PAGE_SIZE);
1396 }
1397 return EINVAL;
1398 }
1399 resid_off += size-resid;
1400 size = resid;
1401 }
1402 }
1403 if(local_buf != NULL) {
1404 kmem_free(kernel_map,
1405 (vm_offset_t)local_buf, 4 * PAGE_SIZE);
1406 }
1407 return 0;
1408 }
1409
1410 int
1411 bsd_write_page_cache_file(
1412 unsigned int user,
1413 char *file_name,
1414 caddr_t buffer,
1415 vm_size_t size,
1416 int mod,
1417 int fid)
1418 {
1419 struct proc *p;
1420 struct nameidata nd;
1421 struct vnode *vp = 0;
1422 int resid;
1423 off_t resid_off;
1424 int error;
1425 boolean_t funnel_state;
1426 struct vattr vattr;
1427 struct vattr data_vattr;
1428
1429 off_t profile;
1430 unsigned int profile_size;
1431
1432 vm_offset_t names_buf;
1433 struct vnode *names_vp;
1434 struct vnode *data_vp;
1435 struct vnode *vp1;
1436 struct vnode *vp2;
1437
1438 struct profile_names_header *profile_header;
1439 off_t name_offset;
1440
1441 struct global_profile *uid_files;
1442
1443
1444 funnel_state = thread_funnel_set(kernel_flock, TRUE);
1445
1446
1447
1448 error = bsd_open_page_cache_files(user, &uid_files);
1449 if(error) {
1450 thread_funnel_set(kernel_flock, funnel_state);
1451 return EINVAL;
1452 }
1453
1454 p = current_proc();
1455
1456 names_vp = uid_files->names_vp;
1457 data_vp = uid_files->data_vp;
1458 names_buf = uid_files->buf_ptr;
1459
1460 /*
1461 * Get locks on both files, get the vnode with the lowest address first
1462 */
1463
1464 if((unsigned int)names_vp < (unsigned int)data_vp) {
1465 vp1 = names_vp;
1466 vp2 = data_vp;
1467 } else {
1468 vp1 = data_vp;
1469 vp2 = names_vp;
1470 }
1471
1472 error = vn_lock(vp1, LK_EXCLUSIVE | LK_RETRY, p);
1473 if(error) {
1474 printf("bsd_write_page_cache_file: Can't lock profile names %x\n", user);
1475 bsd_close_page_cache_files(uid_files);
1476 thread_funnel_set(kernel_flock, funnel_state);
1477 return error;
1478 }
1479 error = vn_lock(vp2, LK_EXCLUSIVE | LK_RETRY, p);
1480 if(error) {
1481 printf("bsd_write_page_cache_file: Can't lock profile data %x\n", user);
1482 VOP_UNLOCK(vp1, 0, p);
1483 bsd_close_page_cache_files(uid_files);
1484 thread_funnel_set(kernel_flock, funnel_state);
1485 return error;
1486 }
1487
1488 /* Stat data file for size */
1489
1490 if(error = VOP_GETATTR(data_vp, &data_vattr, p->p_ucred, p)) {
1491 VOP_UNLOCK(names_vp, 0, p);
1492 VOP_UNLOCK(data_vp, 0, p);
1493 printf("bsd_write_page_cache_file: Can't stat profile data %s\n", file_name);
1494 bsd_close_page_cache_files(uid_files);
1495 thread_funnel_set(kernel_flock, funnel_state);
1496 return error;
1497 }
1498
1499 if (bsd_search_page_cache_data_base(names_vp,
1500 (struct profile_names_header *)names_buf,
1501 file_name, (unsigned int) mod,
1502 fid, &profile, &profile_size) == 0) {
1503 /* profile is an offset in the profile data base */
1504 /* It is zero if no profile data was found */
1505
1506 if(profile_size == 0) {
1507 unsigned int header_size;
1508 vm_offset_t buf_ptr;
1509
1510 /* Our Write case */
1511
1512 /* read header for last entry */
1513 profile_header =
1514 (struct profile_names_header *)names_buf;
1515 name_offset = sizeof(struct profile_names_header) +
1516 (sizeof(struct profile_element)
1517 * profile_header->number_of_profiles);
1518 profile_header->number_of_profiles += 1;
1519
1520 if(name_offset < PAGE_SIZE * 4) {
1521 struct profile_element *name;
1522 /* write new entry */
1523 name = (struct profile_element *)
1524 (names_buf + (vm_offset_t)name_offset);
1525 name->addr = data_vattr.va_size;
1526 name->size = size;
1527 name->mod_date = mod;
1528 name->inode = fid;
1529 strncpy (name->name, file_name, 12);
1530 } else {
1531 unsigned int ele_size;
1532 struct profile_element name;
1533 /* write new entry */
1534 name.addr = data_vattr.va_size;
1535 name.size = size;
1536 name.mod_date = mod;
1537 name.inode = fid;
1538 strncpy (name.name, file_name, 12);
1539 /* write element out separately */
1540 ele_size = sizeof(struct profile_element);
1541 buf_ptr = (vm_offset_t)&name;
1542 resid_off = name_offset;
1543
1544 while(ele_size) {
1545 error = vn_rdwr(UIO_WRITE, names_vp,
1546 (caddr_t)buf_ptr,
1547 ele_size, resid_off,
1548 UIO_SYSSPACE, IO_NODELOCKED,
1549 p->p_ucred, &resid, p);
1550 if(error) {
1551 printf("bsd_write_page_cache_file: Can't write name_element %x\n", user);
1552 VOP_UNLOCK(names_vp, 0, p);
1553 VOP_UNLOCK(data_vp, 0, p);
1554 bsd_close_page_cache_files(
1555 uid_files);
1556 thread_funnel_set(
1557 kernel_flock,
1558 funnel_state);
1559 return error;
1560 }
1561 buf_ptr += (vm_offset_t)
1562 ele_size-resid;
1563 resid_off += ele_size-resid;
1564 ele_size = resid;
1565 }
1566 }
1567
1568 if(name_offset < PAGE_SIZE * 4) {
1569 header_size = name_offset +
1570 sizeof(struct profile_element);
1571
1572 } else {
1573 header_size =
1574 sizeof(struct profile_names_header);
1575 }
1576 buf_ptr = (vm_offset_t)profile_header;
1577 resid_off = 0;
1578
1579 /* write names file header */
1580 while(header_size) {
1581 error = vn_rdwr(UIO_WRITE, names_vp,
1582 (caddr_t)buf_ptr,
1583 header_size, resid_off,
1584 UIO_SYSSPACE, IO_NODELOCKED,
1585 p->p_ucred, &resid, p);
1586 if(error) {
1587 VOP_UNLOCK(names_vp, 0, p);
1588 VOP_UNLOCK(data_vp, 0, p);
1589 printf("bsd_write_page_cache_file: Can't write header %x\n", user);
1590 bsd_close_page_cache_files(
1591 uid_files);
1592 thread_funnel_set(
1593 kernel_flock, funnel_state);
1594 return error;
1595 }
1596 buf_ptr += (vm_offset_t)header_size-resid;
1597 resid_off += header_size-resid;
1598 header_size = resid;
1599 }
1600 /* write profile to data file */
1601 resid_off = data_vattr.va_size;
1602 while(size) {
1603 error = vn_rdwr(UIO_WRITE, data_vp,
1604 (caddr_t)buffer, size, resid_off,
1605 UIO_SYSSPACE, IO_NODELOCKED,
1606 p->p_ucred, &resid, p);
1607 if(error) {
1608 VOP_UNLOCK(names_vp, 0, p);
1609 VOP_UNLOCK(data_vp, 0, p);
1610 printf("bsd_write_page_cache_file: Can't write header %x\n", user);
1611 bsd_close_page_cache_files(
1612 uid_files);
1613 thread_funnel_set(
1614 kernel_flock, funnel_state);
1615 return error;
1616 }
1617 buffer += size-resid;
1618 resid_off += size-resid;
1619 size = resid;
1620 }
1621 VOP_UNLOCK(names_vp, 0, p);
1622 VOP_UNLOCK(data_vp, 0, p);
1623 bsd_close_page_cache_files(uid_files);
1624 thread_funnel_set(kernel_flock, funnel_state);
1625 return 0;
1626 }
1627 /* Someone else wrote a twin profile before us */
1628 VOP_UNLOCK(names_vp, 0, p);
1629 VOP_UNLOCK(data_vp, 0, p);
1630 bsd_close_page_cache_files(uid_files);
1631 thread_funnel_set(kernel_flock, funnel_state);
1632 return 0;
1633 } else {
1634 VOP_UNLOCK(names_vp, 0, p);
1635 VOP_UNLOCK(data_vp, 0, p);
1636 bsd_close_page_cache_files(uid_files);
1637 thread_funnel_set(kernel_flock, funnel_state);
1638 return EINVAL;
1639 }
1640
1641 }
1642
1643 int
1644 prepare_profile_database(int user)
1645 {
1646 char *cache_path = "/var/vm/app_profile/";
1647 struct proc *p;
1648 int error;
1649 int resid;
1650 off_t resid_off;
1651 unsigned int lru;
1652 vm_size_t size;
1653
1654 struct vnode *names_vp;
1655 struct vnode *data_vp;
1656 vm_offset_t names_buf;
1657 vm_offset_t buf_ptr;
1658
1659 int profile_names_length;
1660 int profile_data_length;
1661 char *profile_data_string;
1662 char *profile_names_string;
1663 char *substring;
1664
1665 struct vattr vattr;
1666
1667 struct profile_names_header *profile_header;
1668 kern_return_t ret;
1669
1670 struct nameidata nd_names;
1671 struct nameidata nd_data;
1672
1673 int i;
1674
1675 p = current_proc();
1676
1677 ret = kmem_alloc(kernel_map,
1678 (vm_offset_t *)&profile_data_string, PATH_MAX);
1679
1680 if(ret) {
1681 return ENOMEM;
1682 }
1683
1684 /* Split the buffer in half since we know the size of */
1685 /* our file path and our allocation is adequate for */
1686 /* both file path names */
1687 profile_names_string = profile_data_string + (PATH_MAX/2);
1688
1689
1690 strcpy(profile_data_string, cache_path);
1691 strcpy(profile_names_string, cache_path);
1692 profile_names_length = profile_data_length
1693 = strlen(profile_data_string);
1694 substring = profile_data_string + profile_data_length;
1695 sprintf(substring, "%x_data", user);
1696 substring = profile_names_string + profile_names_length;
1697 sprintf(substring, "%x_names", user);
1698
1699 /* We now have the absolute file names */
1700
1701 ret = kmem_alloc(kernel_map,
1702 (vm_offset_t *)&names_buf, 4 * PAGE_SIZE);
1703 if(ret) {
1704 kmem_free(kernel_map,
1705 (vm_offset_t)profile_data_string, PATH_MAX);
1706 return ENOMEM;
1707 }
1708
1709 NDINIT(&nd_names, LOOKUP, FOLLOW,
1710 UIO_SYSSPACE, profile_names_string, p);
1711 NDINIT(&nd_data, LOOKUP, FOLLOW,
1712 UIO_SYSSPACE, profile_data_string, p);
1713
1714 if (error = vn_open(&nd_data,
1715 O_CREAT | O_EXCL | FWRITE, S_IRUSR|S_IWUSR)) {
1716 kmem_free(kernel_map,
1717 (vm_offset_t)names_buf, 4 * PAGE_SIZE);
1718 kmem_free(kernel_map,
1719 (vm_offset_t)profile_data_string, PATH_MAX);
1720 return 0;
1721 }
1722
1723 data_vp = nd_data.ni_vp;
1724 VOP_UNLOCK(data_vp, 0, p);
1725
1726 if (error = vn_open(&nd_names,
1727 O_CREAT | O_EXCL | FWRITE, S_IRUSR|S_IWUSR)) {
1728 printf("prepare_profile_database: Can't create CacheNames %s\n",
1729 profile_data_string);
1730 kmem_free(kernel_map,
1731 (vm_offset_t)names_buf, 4 * PAGE_SIZE);
1732 kmem_free(kernel_map,
1733 (vm_offset_t)profile_data_string, PATH_MAX);
1734 vrele(data_vp);
1735 return error;
1736 }
1737
1738 names_vp = nd_names.ni_vp;
1739
1740
1741 /* Write Header for new names file */
1742
1743 profile_header = (struct profile_names_header *)names_buf;
1744
1745 profile_header->number_of_profiles = 0;
1746 profile_header->user_id = user;
1747 profile_header->version = 1;
1748 profile_header->element_array =
1749 sizeof(struct profile_names_header);
1750 profile_header->spare1 = 0;
1751 profile_header->spare2 = 0;
1752 profile_header->spare3 = 0;
1753
1754 size = sizeof(struct profile_names_header);
1755 buf_ptr = (vm_offset_t)profile_header;
1756 resid_off = 0;
1757
1758 while(size) {
1759 error = vn_rdwr(UIO_WRITE, names_vp,
1760 (caddr_t)buf_ptr, size, resid_off,
1761 UIO_SYSSPACE, IO_NODELOCKED,
1762 p->p_ucred, &resid, p);
1763 if(error) {
1764 printf("prepare_profile_database: Can't write header %s\n", profile_names_string);
1765 kmem_free(kernel_map,
1766 (vm_offset_t)names_buf, 4 * PAGE_SIZE);
1767 kmem_free(kernel_map,
1768 (vm_offset_t)profile_data_string,
1769 PATH_MAX);
1770 vput(names_vp);
1771 vrele(data_vp);
1772 return error;
1773 }
1774 buf_ptr += size-resid;
1775 resid_off += size-resid;
1776 size = resid;
1777 }
1778
1779 VATTR_NULL(&vattr);
1780 vattr.va_uid = user;
1781 error = VOP_SETATTR(names_vp, &vattr, p->p_cred->pc_ucred, p);
1782 if(error) {
1783 printf("prepare_profile_database: "
1784 "Can't set user %s\n", profile_names_string);
1785 }
1786 vput(names_vp);
1787
1788 error = vn_lock(data_vp, LK_EXCLUSIVE | LK_RETRY, p);
1789 if(error) {
1790 vrele(data_vp);
1791 printf("prepare_profile_database: cannot lock data file %s\n",
1792 profile_data_string);
1793 kmem_free(kernel_map,
1794 (vm_offset_t)profile_data_string, PATH_MAX);
1795 kmem_free(kernel_map,
1796 (vm_offset_t)names_buf, 4 * PAGE_SIZE);
1797 }
1798 VATTR_NULL(&vattr);
1799 vattr.va_uid = user;
1800 error = VOP_SETATTR(data_vp, &vattr, p->p_cred->pc_ucred, p);
1801 if(error) {
1802 printf("prepare_profile_database: "
1803 "Can't set user %s\n", profile_data_string);
1804 }
1805
1806 vput(data_vp);
1807 kmem_free(kernel_map,
1808 (vm_offset_t)profile_data_string, PATH_MAX);
1809 kmem_free(kernel_map,
1810 (vm_offset_t)names_buf, 4 * PAGE_SIZE);
1811 return 0;
1812
1813 }