]> git.saurik.com Git - apple/xnu.git/blame - bsd/vm/vm_unix.c
xnu-344.tar.gz
[apple/xnu.git] / bsd / vm / vm_unix.c
CommitLineData
1c79356b
A
1/*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22/*
23 * Mach Operating System
24 * Copyright (c) 1987 Carnegie-Mellon University
25 * All rights reserved. The CMU software License Agreement specifies
26 * the terms and conditions for use and redistribution.
27 */
28
29/*
30 */
9bccf70c
A
31
32
1c79356b
A
33#include <meta_features.h>
34
35#include <kern/task.h>
36#include <kern/thread.h>
37#include <kern/debug.h>
38#include <kern/lock.h>
39#include <mach/time_value.h>
40#include <mach/vm_param.h>
41#include <mach/vm_prot.h>
42#include <mach/port.h>
43
44#include <sys/param.h>
45#include <sys/systm.h>
46#include <sys/dir.h>
47#include <sys/namei.h>
48#include <sys/proc.h>
49#include <sys/vm.h>
50#include <sys/file.h>
51#include <sys/vnode.h>
52#include <sys/buf.h>
53#include <sys/mount.h>
54#include <sys/trace.h>
55#include <sys/kernel.h>
56#include <sys/ubc.h>
9bccf70c 57#include <sys/stat.h>
1c79356b
A
58
59#include <kern/kalloc.h>
1c79356b
A
60#include <vm/vm_map.h>
61#include <vm/vm_kern.h>
62
63#include <machine/spl.h>
9bccf70c 64
1c79356b 65#include <mach/shared_memory_server.h>
9bccf70c
A
66#include <vm/vm_shared_memory_server.h>
67
68
69extern shared_region_mapping_t system_shared_region;
70extern zone_t lsf_zone;
1c79356b
A
71
72useracc(addr, len, prot)
73 caddr_t addr;
74 u_int len;
75 int prot;
76{
77 return (vm_map_check_protection(
78 current_map(),
79 trunc_page(addr), round_page(addr+len),
80 prot == B_READ ? VM_PROT_READ : VM_PROT_WRITE));
81}
82
83vslock(addr, len)
84 caddr_t addr;
85 int len;
86{
0b4e3aa0
A
87kern_return_t kret;
88 kret = vm_map_wire(current_map(), trunc_page(addr),
1c79356b
A
89 round_page(addr+len),
90 VM_PROT_READ | VM_PROT_WRITE ,FALSE);
0b4e3aa0
A
91
92 switch (kret) {
93 case KERN_SUCCESS:
94 return (0);
95 case KERN_INVALID_ADDRESS:
96 case KERN_NO_SPACE:
97 return (ENOMEM);
98 case KERN_PROTECTION_FAILURE:
99 return (EACCES);
100 default:
101 return (EINVAL);
102 }
1c79356b
A
103}
104
105vsunlock(addr, len, dirtied)
106 caddr_t addr;
107 int len;
108 int dirtied;
109{
110 pmap_t pmap;
111#if FIXME /* [ */
112 vm_page_t pg;
113#endif /* FIXME ] */
114 vm_offset_t vaddr, paddr;
0b4e3aa0 115 kern_return_t kret;
1c79356b
A
116
117#if FIXME /* [ */
118 if (dirtied) {
119 pmap = get_task_pmap(current_task());
120 for (vaddr = trunc_page(addr); vaddr < round_page(addr+len);
121 vaddr += PAGE_SIZE) {
122 paddr = pmap_extract(pmap, vaddr);
123 pg = PHYS_TO_VM_PAGE(paddr);
124 vm_page_set_modified(pg);
125 }
126 }
127#endif /* FIXME ] */
128#ifdef lint
129 dirtied++;
130#endif /* lint */
0b4e3aa0 131 kret = vm_map_unwire(current_map(), trunc_page(addr),
1c79356b 132 round_page(addr+len), FALSE);
0b4e3aa0
A
133 switch (kret) {
134 case KERN_SUCCESS:
135 return (0);
136 case KERN_INVALID_ADDRESS:
137 case KERN_NO_SPACE:
138 return (ENOMEM);
139 case KERN_PROTECTION_FAILURE:
140 return (EACCES);
141 default:
142 return (EINVAL);
143 }
1c79356b
A
144}
145
146#if defined(sun) || BALANCE || defined(m88k)
147#else /*defined(sun) || BALANCE || defined(m88k)*/
148subyte(addr, byte)
149 void * addr;
150 int byte;
151{
152 char character;
153
154 character = (char)byte;
155 return (copyout((void *)&(character), addr, sizeof(char)) == 0 ? 0 : -1);
156}
157
158suibyte(addr, byte)
159 void * addr;
160 int byte;
161{
162 char character;
163
164 character = (char)byte;
165 return (copyout((void *) &(character), addr, sizeof(char)) == 0 ? 0 : -1);
166}
167
168int fubyte(addr)
169 void * addr;
170{
171 unsigned char byte;
172
173 if (copyin(addr, (void *) &byte, sizeof(char)))
174 return(-1);
175 return(byte);
176}
177
178int fuibyte(addr)
179 void * addr;
180{
181 unsigned char byte;
182
183 if (copyin(addr, (void *) &(byte), sizeof(char)))
184 return(-1);
185 return(byte);
186}
187
188suword(addr, word)
189 void * addr;
190 long word;
191{
192 return (copyout((void *) &word, addr, sizeof(int)) == 0 ? 0 : -1);
193}
194
195long fuword(addr)
196 void * addr;
197{
198 long word;
199
200 if (copyin(addr, (void *) &word, sizeof(int)))
201 return(-1);
202 return(word);
203}
204
205/* suiword and fuiword are the same as suword and fuword, respectively */
206
207suiword(addr, word)
208 void * addr;
209 long word;
210{
211 return (copyout((void *) &word, addr, sizeof(int)) == 0 ? 0 : -1);
212}
213
214long fuiword(addr)
215 void * addr;
216{
217 long word;
218
219 if (copyin(addr, (void *) &word, sizeof(int)))
220 return(-1);
221 return(word);
222}
223#endif /* defined(sun) || BALANCE || defined(m88k) || defined(i386) */
224
225int
226swapon()
227{
228 return(EOPNOTSUPP);
229}
230
1c79356b
A
231
232kern_return_t
233pid_for_task(t, x)
234 mach_port_t t;
235 int *x;
236{
237 struct proc * p;
238 task_t t1;
239 extern task_t port_name_to_task(mach_port_t t);
240 int pid = -1;
0b4e3aa0 241 kern_return_t err = KERN_SUCCESS;
1c79356b
A
242 boolean_t funnel_state;
243
244 funnel_state = thread_funnel_set(kernel_flock, TRUE);
245 t1 = port_name_to_task(t);
246
247 if (t1 == TASK_NULL) {
248 err = KERN_FAILURE;
0b4e3aa0 249 goto pftout;
1c79356b
A
250 } else {
251 p = get_bsdtask_info(t1);
252 if (p) {
253 pid = p->p_pid;
254 err = KERN_SUCCESS;
255 } else {
256 err = KERN_FAILURE;
257 }
258 }
259 task_deallocate(t1);
1c79356b 260pftout:
0b4e3aa0 261 (void) copyout((char *) &pid, (char *) x, sizeof(*x));
1c79356b
A
262 thread_funnel_set(kernel_flock, funnel_state);
263 return(err);
264}
265
266/*
267 * Routine: task_for_pid
268 * Purpose:
269 * Get the task port for another "process", named by its
270 * process ID on the same host as "target_task".
271 *
272 * Only permitted to privileged processes, or processes
273 * with the same user ID.
274 */
275kern_return_t
276task_for_pid(target_tport, pid, t)
277 mach_port_t target_tport;
278 int pid;
279 mach_port_t *t;
280{
281 struct proc *p;
282 struct proc *p1;
283 task_t t1;
284 mach_port_t tret;
285 extern task_t port_name_to_task(mach_port_t tp);
286 void * sright;
287 int error = 0;
288 boolean_t funnel_state;
289
290 t1 = port_name_to_task(target_tport);
291 if (t1 == TASK_NULL) {
292 (void ) copyout((char *)&t1, (char *)t, sizeof(mach_port_t));
0b4e3aa0 293 return(KERN_FAILURE);
1c79356b
A
294 }
295
296 funnel_state = thread_funnel_set(kernel_flock, TRUE);
297
298 restart:
299 p1 = get_bsdtask_info(t1);
300 if (
301 ((p = pfind(pid)) != (struct proc *) 0)
302 && (p1 != (struct proc *) 0)
7b1edb79
A
303 && (((p->p_ucred->cr_uid == p1->p_ucred->cr_uid) &&
304 ((p->p_cred->p_ruid == p1->p_cred->p_ruid)))
1c79356b
A
305 || !(suser(p1->p_ucred, &p1->p_acflag)))
306 && (p->p_stat != SZOMB)
307 ) {
308 if (p->task != TASK_NULL) {
309 if (!task_reference_try(p->task)) {
310 mutex_pause(); /* temp loss of funnel */
311 goto restart;
312 }
9bccf70c
A
313 sright = (void *)convert_task_to_port(p->task);
314 tret = (void *)
315 ipc_port_copyout_send(sright,
316 get_task_ipcspace(current_task()));
1c79356b
A
317 } else
318 tret = MACH_PORT_NULL;
319 (void ) copyout((char *)&tret, (char *) t, sizeof(mach_port_t));
320 task_deallocate(t1);
321 error = KERN_SUCCESS;
322 goto tfpout;
323 }
324 task_deallocate(t1);
325 tret = MACH_PORT_NULL;
326 (void) copyout((char *) &tret, (char *) t, sizeof(mach_port_t));
327 error = KERN_FAILURE;
328tfpout:
329 thread_funnel_set(kernel_flock, funnel_state);
330 return(error);
331}
332
333
334struct load_shared_file_args {
335 char *filename;
336 caddr_t mfa;
337 u_long mfs;
338 caddr_t *ba;
339 int map_cnt;
340 sf_mapping_t *mappings;
341 int *flags;
342};
343
0b4e3aa0 344int ws_disabled = 1;
1c79356b
A
345
346int
347load_shared_file(
348 struct proc *p,
349 struct load_shared_file_args *uap,
350 register *retval)
351{
352 caddr_t mapped_file_addr=uap->mfa;
353 u_long mapped_file_size=uap->mfs;
354 caddr_t *base_address=uap->ba;
355 int map_cnt=uap->map_cnt;
356 sf_mapping_t *mappings=uap->mappings;
357 char *filename=uap->filename;
358 int *flags=uap->flags;
359 struct vnode *vp = 0;
360 struct nameidata nd, *ndp;
361 char *filename_str;
362 register int error;
363 kern_return_t kr;
364
365 struct vattr vattr;
0b4e3aa0 366 memory_object_control_t file_control;
1c79356b
A
367 sf_mapping_t *map_list;
368 caddr_t local_base;
369 int local_flags;
370 int caller_flags;
371 int i;
9bccf70c 372 int default_regions = 0;
1c79356b
A
373 vm_size_t dummy;
374 kern_return_t kret;
375
376 shared_region_mapping_t shared_region;
377 struct shared_region_task_mappings task_mapping_info;
378 shared_region_mapping_t next;
379
380 ndp = &nd;
381
1c79356b
A
382
383 /* Retrieve the base address */
384 if (error = copyin(base_address, &local_base, sizeof (caddr_t))) {
385 goto lsf_bailout;
386 }
387 if (error = copyin(flags, &local_flags, sizeof (int))) {
388 goto lsf_bailout;
389 }
9bccf70c
A
390
391 if(local_flags & QUERY_IS_SYSTEM_REGION) {
392 vm_get_shared_region(current_task(), &shared_region);
393 if (shared_region == system_shared_region) {
394 local_flags = SYSTEM_REGION_BACKED;
395 } else {
396 local_flags = 0;
397 }
398 error = 0;
399 error = copyout(&local_flags, flags, sizeof (int));
400 goto lsf_bailout;
401 }
1c79356b
A
402 caller_flags = local_flags;
403 kret = kmem_alloc(kernel_map, (vm_offset_t *)&filename_str,
404 (vm_size_t)(MAXPATHLEN));
405 if (kret != KERN_SUCCESS) {
406 error = ENOMEM;
407 goto lsf_bailout;
408 }
409 kret = kmem_alloc(kernel_map, (vm_offset_t *)&map_list,
410 (vm_size_t)(map_cnt*sizeof(sf_mapping_t)));
411 if (kret != KERN_SUCCESS) {
412 kmem_free(kernel_map, (vm_offset_t)filename_str,
413 (vm_size_t)(MAXPATHLEN));
414 error = ENOMEM;
415 goto lsf_bailout;
416 }
417
418 if (error =
419 copyin(mappings, map_list, (map_cnt*sizeof(sf_mapping_t)))) {
420 goto lsf_bailout_free;
421 }
422
423 if (error = copyinstr(filename,
424 filename_str, MAXPATHLEN, (size_t *)&dummy)) {
425 goto lsf_bailout_free;
426 }
427
428 /*
429 * Get a vnode for the target file
430 */
431 NDINIT(ndp, LOOKUP, FOLLOW | LOCKLEAF, UIO_SYSSPACE,
432 filename_str, p);
433
434 if ((error = namei(ndp))) {
435 goto lsf_bailout_free;
436 }
437
438 vp = ndp->ni_vp;
439
440 if (vp->v_type != VREG) {
441 error = EINVAL;
442 goto lsf_bailout_free_vput;
443 }
444
445 UBCINFOCHECK("load_shared_file", vp);
446
447 if (error = VOP_GETATTR(vp, &vattr, p->p_ucred, p)) {
448 goto lsf_bailout_free_vput;
449 }
450
451
0b4e3aa0
A
452 file_control = ubc_getobject(vp, UBC_HOLDOBJECT);
453 if (file_control == MEMORY_OBJECT_CONTROL_NULL) {
1c79356b
A
454 error = EINVAL;
455 goto lsf_bailout_free_vput;
456 }
457
9bccf70c
A
458 vm_get_shared_region(current_task(), &shared_region);
459 if(shared_region == system_shared_region) {
460 default_regions = 1;
461 }
462 if(((vp->v_mount != rootvnode->v_mount)
463 && (shared_region == system_shared_region))
464 && (lsf_mapping_pool_gauge() < 75)) {
465 /* We don't want to run out of shared memory */
466 /* map entries by starting too many private versions */
467 /* of the shared library structures */
468 int error;
469 if(p->p_flag & P_NOSHLIB) {
470 error = clone_system_shared_regions(FALSE);
471 } else {
472 error = clone_system_shared_regions(TRUE);
473 }
474 if (error) {
475 goto lsf_bailout_free_vput;
476 }
477 local_flags = local_flags & ~NEW_LOCAL_SHARED_REGIONS;
478 vm_get_shared_region(current_task(), &shared_region);
479 }
1c79356b
A
480#ifdef notdef
481 if(vattr.va_size != mapped_file_size) {
482 error = EINVAL;
483 goto lsf_bailout_free_vput;
484 }
485#endif
9bccf70c
A
486 if(p->p_flag & P_NOSHLIB) {
487 p->p_flag = p->p_flag & ~P_NOSHLIB;
488 }
489
490 /* load alternate regions if the caller has requested. */
491 /* Note: the new regions are "clean slates" */
492 if (local_flags & NEW_LOCAL_SHARED_REGIONS) {
493 error = clone_system_shared_regions(FALSE);
494 if (error) {
495 goto lsf_bailout_free_vput;
496 }
497 vm_get_shared_region(current_task(), &shared_region);
498 }
1c79356b 499
1c79356b
A
500 task_mapping_info.self = (vm_offset_t)shared_region;
501
502 shared_region_mapping_info(shared_region,
503 &(task_mapping_info.text_region),
504 &(task_mapping_info.text_size),
505 &(task_mapping_info.data_region),
506 &(task_mapping_info.data_size),
507 &(task_mapping_info.region_mappings),
508 &(task_mapping_info.client_base),
509 &(task_mapping_info.alternate_base),
510 &(task_mapping_info.alternate_next),
511 &(task_mapping_info.flags), &next);
512
513 /* This is a work-around to allow executables which have been */
514 /* built without knowledge of the proper shared segment to */
515 /* load. This code has been architected as a shared region */
516 /* handler, the knowledge of where the regions are loaded is */
517 /* problematic for the extension of shared regions as it will */
518 /* not be easy to know what region an item should go into. */
519 /* The code below however will get around a short term problem */
520 /* with executables which believe they are loading at zero. */
521
522 {
523 if (((unsigned int)local_base &
524 (~(task_mapping_info.text_size - 1))) !=
525 task_mapping_info.client_base) {
526 if(local_flags & ALTERNATE_LOAD_SITE) {
527 local_base = (caddr_t)(
528 (unsigned int)local_base &
529 (task_mapping_info.text_size - 1));
530 local_base = (caddr_t)((unsigned int)local_base
531 | task_mapping_info.client_base);
532 } else {
533 error = EINVAL;
534 goto lsf_bailout_free_vput;
535 }
536 }
537 }
538
1c79356b
A
539
540 if((kr = copyin_shared_file((vm_offset_t)mapped_file_addr,
541 mapped_file_size,
542 (vm_offset_t *)&local_base,
0b4e3aa0 543 map_cnt, map_list, file_control,
1c79356b
A
544 &task_mapping_info, &local_flags))) {
545 switch (kr) {
546 case KERN_FAILURE:
547 error = EINVAL;
548 break;
549 case KERN_INVALID_ARGUMENT:
550 error = EINVAL;
551 break;
552 case KERN_INVALID_ADDRESS:
553 error = EACCES;
554 break;
555 case KERN_PROTECTION_FAILURE:
556 /* save EAUTH for authentication in this */
557 /* routine */
558 error = EPERM;
559 break;
560 case KERN_NO_SPACE:
561 error = ENOMEM;
562 break;
563 default:
564 error = EINVAL;
565 };
566 if((caller_flags & ALTERNATE_LOAD_SITE) && systemLogDiags) {
0b4e3aa0 567 printf("load_shared_file: Failed to load shared file! error: 0x%x, Base_address: 0x%x, number of mappings: %d, file_control 0x%x\n", error, local_base, map_cnt, file_control);
1c79356b
A
568 for(i=0; i<map_cnt; i++) {
569 printf("load_shared_file: Mapping%d, mapping_offset: 0x%x, size: 0x%x, file_offset: 0x%x, protection: 0x%x\n"
570 , i, map_list[i].mapping_offset,
571 map_list[i].size,
572 map_list[i].file_offset,
573 map_list[i].protection);
574 }
575 }
576 } else {
9bccf70c
A
577 if(default_regions)
578 local_flags |= SYSTEM_REGION_BACKED;
1c79356b
A
579 if(!(error = copyout(&local_flags, flags, sizeof (int)))) {
580 error = copyout(&local_base,
581 base_address, sizeof (caddr_t));
582 }
583 }
584
585lsf_bailout_free_vput:
586 vput(vp);
587
588lsf_bailout_free:
589 kmem_free(kernel_map, (vm_offset_t)filename_str,
590 (vm_size_t)(MAXPATHLEN));
591 kmem_free(kernel_map, (vm_offset_t)map_list,
592 (vm_size_t)(map_cnt*sizeof(sf_mapping_t)));
593
594lsf_bailout:
1c79356b
A
595 return error;
596}
597
598struct reset_shared_file_args {
599 caddr_t *ba;
600 int map_cnt;
601 sf_mapping_t *mappings;
602};
603
604int
605reset_shared_file(
606 struct proc *p,
607 struct reset_shared_file_args *uap,
608 register *retval)
609{
610 caddr_t *base_address=uap->ba;
611 int map_cnt=uap->map_cnt;
612 sf_mapping_t *mappings=uap->mappings;
613 register int error;
614 kern_return_t kr;
615
616 sf_mapping_t *map_list;
617 caddr_t local_base;
618 vm_offset_t map_address;
619 int i;
620 kern_return_t kret;
621
1c79356b
A
622 /* Retrieve the base address */
623 if (error = copyin(base_address, &local_base, sizeof (caddr_t))) {
624 goto rsf_bailout;
625 }
626
627 if (((unsigned int)local_base & GLOBAL_SHARED_SEGMENT_MASK)
628 != GLOBAL_SHARED_TEXT_SEGMENT) {
629 error = EINVAL;
630 goto rsf_bailout;
631 }
632
633 kret = kmem_alloc(kernel_map, (vm_offset_t *)&map_list,
634 (vm_size_t)(map_cnt*sizeof(sf_mapping_t)));
635 if (kret != KERN_SUCCESS) {
636 error = ENOMEM;
637 goto rsf_bailout;
638 }
639
640 if (error =
641 copyin(mappings, map_list, (map_cnt*sizeof(sf_mapping_t)))) {
642
643 kmem_free(kernel_map, (vm_offset_t)map_list,
644 (vm_size_t)(map_cnt*sizeof(sf_mapping_t)));
645 goto rsf_bailout;
646 }
647 for (i = 0; i<map_cnt; i++) {
648 if((map_list[i].mapping_offset
649 & GLOBAL_SHARED_SEGMENT_MASK) == 0x10000000) {
650 map_address = (vm_offset_t)
651 (local_base + map_list[i].mapping_offset);
652 vm_deallocate(current_map(),
653 map_address,
654 map_list[i].size);
655 vm_map(current_map(), &map_address,
656 map_list[i].size, 0, SHARED_LIB_ALIAS,
657 shared_data_region_handle,
658 ((unsigned int)local_base
659 & SHARED_DATA_REGION_MASK) +
660 (map_list[i].mapping_offset
661 & SHARED_DATA_REGION_MASK),
662 TRUE, VM_PROT_READ,
663 VM_PROT_READ, VM_INHERIT_SHARE);
664 }
665 }
666
667 kmem_free(kernel_map, (vm_offset_t)map_list,
668 (vm_size_t)(map_cnt*sizeof(sf_mapping_t)));
669
670rsf_bailout:
1c79356b
A
671 return error;
672}
673
9bccf70c
A
674struct new_system_shared_regions_args {
675 int dummy;
676};
677
678int
679new_system_shared_regions(
680 struct proc *p,
681 struct new_system_shared_regions_args *uap,
682 register *retval)
683{
684 shared_region_mapping_t regions;
685 shared_region_mapping_t new_regions;
686
687 if(!(is_suser())) {
688 *retval = EINVAL;
689 return EINVAL;
690 }
691
692 /* get current shared region info for */
693 /* restoration after new system shared */
694 /* regions are in place */
695 vm_get_shared_region(current_task(), &regions);
696
697 /* usually only called at boot time */
698 /* shared_file_boot_time_init creates */
699 /* a new set of system shared regions */
700 /* and places them as the system */
701 /* shared regions. */
702 shared_file_boot_time_init();
703
704 /* set current task back to its */
705 /* original regions. */
706 vm_get_shared_region(current_task(), &new_regions);
707 shared_region_mapping_dealloc(new_regions);
708
709 vm_set_shared_region(current_task(), regions);
710
711 *retval = 0;
712 return 0;
713}
1c79356b
A
714
715
716
717int
9bccf70c 718clone_system_shared_regions(shared_regions_active)
1c79356b
A
719{
720 shared_region_mapping_t new_shared_region;
721 shared_region_mapping_t next;
722 shared_region_mapping_t old_shared_region;
723 struct shared_region_task_mappings old_info;
724 struct shared_region_task_mappings new_info;
725
9bccf70c
A
726 struct proc *p;
727
1c79356b
A
728 if (shared_file_create_system_region(&new_shared_region))
729 return (ENOMEM);
730 vm_get_shared_region(current_task(), &old_shared_region);
731 old_info.self = (vm_offset_t)old_shared_region;
732 shared_region_mapping_info(old_shared_region,
733 &(old_info.text_region),
734 &(old_info.text_size),
735 &(old_info.data_region),
736 &(old_info.data_size),
737 &(old_info.region_mappings),
738 &(old_info.client_base),
739 &(old_info.alternate_base),
740 &(old_info.alternate_next),
741 &(old_info.flags), &next);
742 new_info.self = (vm_offset_t)new_shared_region;
743 shared_region_mapping_info(new_shared_region,
744 &(new_info.text_region),
745 &(new_info.text_size),
746 &(new_info.data_region),
747 &(new_info.data_size),
748 &(new_info.region_mappings),
749 &(new_info.client_base),
750 &(new_info.alternate_base),
751 &(new_info.alternate_next),
752 &(new_info.flags), &next);
9bccf70c
A
753 if(shared_regions_active) {
754 if(vm_region_clone(old_info.text_region, new_info.text_region)) {
755 panic("clone_system_shared_regions: shared region mis-alignment 1");
1c79356b
A
756 shared_region_mapping_dealloc(new_shared_region);
757 return(EINVAL);
9bccf70c
A
758 }
759 if (vm_region_clone(old_info.data_region, new_info.data_region)) {
760 panic("clone_system_shared_regions: shared region mis-alignment 2");
1c79356b
A
761 shared_region_mapping_dealloc(new_shared_region);
762 return(EINVAL);
9bccf70c
A
763 }
764 shared_region_object_chain_attach(
765 new_shared_region, old_shared_region);
1c79356b
A
766 }
767 if (vm_map_region_replace(current_map(), old_info.text_region,
768 new_info.text_region, old_info.client_base,
769 old_info.client_base+old_info.text_size)) {
770 panic("clone_system_shared_regions: shared region mis-alignment 3");
771 shared_region_mapping_dealloc(new_shared_region);
772 return(EINVAL);
773 }
774 if(vm_map_region_replace(current_map(), old_info.data_region,
775 new_info.data_region,
776 old_info.client_base + old_info.text_size,
777 old_info.client_base
778 + old_info.text_size + old_info.data_size)) {
779 panic("clone_system_shared_regions: shared region mis-alignment 4");
780 shared_region_mapping_dealloc(new_shared_region);
781 return(EINVAL);
782 }
783 vm_set_shared_region(current_task(), new_shared_region);
9bccf70c
A
784
785 /* consume the reference which wasn't accounted for in object */
786 /* chain attach */
787 if(!shared_regions_active)
788 shared_region_mapping_dealloc(old_shared_region);
789
1c79356b
A
790 return(0);
791
792}
9bccf70c
A
793
794extern vm_map_t bsd_pageable_map;
795
796/* header for the profile name file. The profiled app info is held */
797/* in the data file and pointed to by elements in the name file */
798
799struct profile_names_header {
800 unsigned int number_of_profiles;
801 unsigned int user_id;
802 unsigned int version;
803 off_t element_array;
804 unsigned int spare1;
805 unsigned int spare2;
806 unsigned int spare3;
807};
808
809struct profile_element {
810 off_t addr;
811 vm_size_t size;
812 unsigned int mod_date;
813 unsigned int inode;
814 char name[12];
815};
816
817struct global_profile {
818 struct vnode *names_vp;
819 struct vnode *data_vp;
820 vm_offset_t buf_ptr;
821 unsigned int user;
822 unsigned int age;
823 unsigned int busy;
824};
825
826struct global_profile_cache {
827 int max_ele;
828 unsigned int age;
829 struct global_profile profiles[3];
830};
831
832struct global_profile_cache global_user_profile_cache =
833 {3, 0, NULL, NULL, NULL, 0, 0, 0,
834 NULL, NULL, NULL, 0, 0, 0,
835 NULL, NULL, NULL, 0, 0, 0 };
836
837/* BSD_OPEN_PAGE_CACHE_FILES: */
838/* Caller provides a user id. This id was used in */
839/* prepare_profile_database to create two unique absolute */
840/* file paths to the associated profile files. These files */
841/* are either opened or bsd_open_page_cache_files returns an */
842/* error. The header of the names file is then consulted. */
843/* The header and the vnodes for the names and data files are */
844/* returned. */
845
846int
847bsd_open_page_cache_files(
848 unsigned int user,
849 struct global_profile **profile)
850{
851 char *cache_path = "/var/vm/app_profile/";
852 struct proc *p;
853 int error;
854 int resid;
855 off_t resid_off;
856 unsigned int lru;
857 vm_size_t size;
858
859 struct vnode *names_vp;
860 struct vnode *data_vp;
861 vm_offset_t names_buf;
862 vm_offset_t buf_ptr;
863
864 int profile_names_length;
865 int profile_data_length;
866 char *profile_data_string;
867 char *profile_names_string;
868 char *substring;
869
870 struct vattr vattr;
871
872 struct profile_names_header *profile_header;
873 kern_return_t ret;
874
875 struct nameidata nd_names;
876 struct nameidata nd_data;
877
878 int i;
879
880
881 p = current_proc();
882
883restart:
884 for(i = 0; i<global_user_profile_cache.max_ele; i++) {
885 if((global_user_profile_cache.profiles[i].user == user)
886 && (global_user_profile_cache.profiles[i].data_vp
887 != NULL)) {
888 *profile = &global_user_profile_cache.profiles[i];
889 /* already in cache, we're done */
890 if ((*profile)->busy) {
891 /*
892 * drop funnel and wait
893 */
894 (void)tsleep((void *)
895 *profile,
896 PRIBIO, "app_profile", 0);
897 goto restart;
898 }
899 (*profile)->busy = 1;
900 (*profile)->age = global_user_profile_cache.age;
901 global_user_profile_cache.age+=1;
902 return 0;
903 }
904 }
905
906 lru = global_user_profile_cache.age;
907 for(i = 0; i<global_user_profile_cache.max_ele; i++) {
908 if(global_user_profile_cache.profiles[i].data_vp == NULL) {
909 *profile = &global_user_profile_cache.profiles[i];
910 (*profile)->age = global_user_profile_cache.age;
911 global_user_profile_cache.age+=1;
912 break;
913 }
914 if(global_user_profile_cache.profiles[i].age < lru) {
915 lru = global_user_profile_cache.profiles[i].age;
916 *profile = &global_user_profile_cache.profiles[i];
917 }
918 }
919
920 if ((*profile)->busy) {
921 /*
922 * drop funnel and wait
923 */
924 (void)tsleep((void *)
925 &(global_user_profile_cache),
926 PRIBIO, "app_profile", 0);
927 goto restart;
928 }
929 (*profile)->busy = 1;
930 (*profile)->user = user;
931
932 if((*profile)->data_vp != NULL) {
933 kmem_free(kernel_map,
934 (*profile)->buf_ptr, 4 * PAGE_SIZE);
935 if ((*profile)->names_vp) {
936 vrele((*profile)->names_vp);
937 (*profile)->names_vp = NULL;
938 }
939 if ((*profile)->data_vp) {
940 vrele((*profile)->data_vp);
941 (*profile)->data_vp = NULL;
942 }
943 }
944
945 /* put dummy value in for now to get */
946 /* competing request to wait above */
947 /* until we are finished */
948 (*profile)->data_vp = (struct vnode *)0xFFFFFFFF;
949
950 /* Try to open the appropriate users profile files */
951 /* If neither file is present, try to create them */
952 /* If one file is present and the other not, fail. */
953 /* If the files do exist, check them for the app_file */
954 /* requested and read it in if present */
955
956
957 ret = kmem_alloc(kernel_map,
958 (vm_offset_t *)&profile_data_string, PATH_MAX);
959
960 if(ret) {
961 (*profile)->data_vp = NULL;
962 (*profile)->busy = 0;
963 wakeup(*profile);
964 return ENOMEM;
965 }
966
967 /* Split the buffer in half since we know the size of */
968 /* our file path and our allocation is adequate for */
969 /* both file path names */
970 profile_names_string = profile_data_string + (PATH_MAX/2);
971
972
973 strcpy(profile_data_string, cache_path);
974 strcpy(profile_names_string, cache_path);
975 profile_names_length = profile_data_length
976 = strlen(profile_data_string);
977 substring = profile_data_string + profile_data_length;
978 sprintf(substring, "%x_data", user);
979 substring = profile_names_string + profile_names_length;
980 sprintf(substring, "%x_names", user);
981
982 /* We now have the absolute file names */
983
984 ret = kmem_alloc(kernel_map,
985 (vm_offset_t *)&names_buf, 4 * PAGE_SIZE);
986 if(ret) {
987 kmem_free(kernel_map,
988 (vm_offset_t)profile_data_string, PATH_MAX);
989 (*profile)->data_vp = NULL;
990 (*profile)->busy = 0;
991 wakeup(*profile);
992 return ENOMEM;
993 }
994
995 NDINIT(&nd_names, LOOKUP, FOLLOW | LOCKLEAF,
996 UIO_SYSSPACE, profile_names_string, p);
997 NDINIT(&nd_data, LOOKUP, FOLLOW | LOCKLEAF,
998 UIO_SYSSPACE, profile_data_string, p);
999 if (error = vn_open(&nd_data, FREAD | FWRITE, 0)) {
1000#ifdef notdef
1001 printf("bsd_open_page_cache_files: CacheData file not found %s\n",
1002 profile_data_string);
1003#endif
1004 kmem_free(kernel_map,
1005 (vm_offset_t)names_buf, 4 * PAGE_SIZE);
1006 kmem_free(kernel_map,
1007 (vm_offset_t)profile_data_string, PATH_MAX);
1008 (*profile)->data_vp = NULL;
1009 (*profile)->busy = 0;
1010 wakeup(*profile);
1011 return error;
1012 }
1013
1014 data_vp = nd_data.ni_vp;
1015 VOP_UNLOCK(data_vp, 0, p);
1016
1017 if (error = vn_open(&nd_names, FREAD | FWRITE, 0)) {
1018 printf("bsd_open_page_cache_files: NamesData file not found %s\n",
1019 profile_data_string);
1020 kmem_free(kernel_map,
1021 (vm_offset_t)names_buf, 4 * PAGE_SIZE);
1022 kmem_free(kernel_map,
1023 (vm_offset_t)profile_data_string, PATH_MAX);
1024 vrele(data_vp);
1025 (*profile)->data_vp = NULL;
1026 (*profile)->busy = 0;
1027 wakeup(*profile);
1028 return error;
1029 }
1030 names_vp = nd_names.ni_vp;
1031
1032 if(error = VOP_GETATTR(names_vp, &vattr, p->p_ucred, p)) {
1033 printf("bsd_open_page_cache_files: Can't stat name file %s\n", profile_names_string);
1034 kmem_free(kernel_map,
1035 (vm_offset_t)profile_data_string, PATH_MAX);
1036 kmem_free(kernel_map,
1037 (vm_offset_t)names_buf, 4 * PAGE_SIZE);
1038 vput(names_vp);
1039 vrele(data_vp);
1040 (*profile)->data_vp = NULL;
1041 (*profile)->busy = 0;
1042 wakeup(*profile);
1043 return error;
1044 }
1045
1046 size = vattr.va_size;
1047 if(size > 4 * PAGE_SIZE)
1048 size = 4 * PAGE_SIZE;
1049 buf_ptr = names_buf;
1050 resid_off = 0;
1051
1052 while(size) {
1053 error = vn_rdwr(UIO_READ, names_vp, (caddr_t)buf_ptr,
1054 size, resid_off,
1055 UIO_SYSSPACE, IO_NODELOCKED, p->p_ucred, &resid, p);
1056 if((error) || (size == resid)) {
1057 if(!error) {
1058 error = EINVAL;
1059 }
1060 kmem_free(kernel_map,
1061 (vm_offset_t)profile_data_string, PATH_MAX);
1062 kmem_free(kernel_map,
1063 (vm_offset_t)names_buf, 4 * PAGE_SIZE);
1064 vput(names_vp);
1065 vrele(data_vp);
1066 (*profile)->data_vp = NULL;
1067 (*profile)->busy = 0;
1068 wakeup(*profile);
1069 return error;
1070 }
1071 buf_ptr += size-resid;
1072 resid_off += size-resid;
1073 size = resid;
1074 }
1075
1076 VOP_UNLOCK(names_vp, 0, p);
1077 kmem_free(kernel_map, (vm_offset_t)profile_data_string, PATH_MAX);
1078 (*profile)->names_vp = names_vp;
1079 (*profile)->data_vp = data_vp;
1080 (*profile)->buf_ptr = names_buf;
1081 return 0;
1082
1083}
1084
1085void
1086bsd_close_page_cache_files(
1087 struct global_profile *profile)
1088{
1089 profile->busy = 0;
1090 wakeup(profile);
1091}
1092
1093int
1094bsd_read_page_cache_file(
1095 unsigned int user,
1096 int *fid,
1097 int *mod,
1098 char *app_name,
1099 struct vnode *app_vp,
1100 vm_offset_t *buffer,
1101 vm_offset_t *buf_size)
1102{
1103
1104 boolean_t funnel_state;
1105
1106 struct proc *p;
1107 int error;
1108 int resid;
1109 vm_size_t size;
1110
1111 off_t profile;
1112 unsigned int profile_size;
1113
1114 vm_offset_t names_buf;
1115 struct vattr vattr;
1116
1117 kern_return_t ret;
1118
1119 struct vnode *names_vp;
1120 struct vnode *data_vp;
1121 struct vnode *vp1;
1122 struct vnode *vp2;
1123
1124 struct global_profile *uid_files;
1125
1126 funnel_state = thread_funnel_set(kernel_flock, TRUE);
1127
1128 /* Try to open the appropriate users profile files */
1129 /* If neither file is present, try to create them */
1130 /* If one file is present and the other not, fail. */
1131 /* If the files do exist, check them for the app_file */
1132 /* requested and read it in if present */
1133
1134
1135 error = bsd_open_page_cache_files(user, &uid_files);
1136 if(error) {
1137 thread_funnel_set(kernel_flock, funnel_state);
1138 return EINVAL;
1139 }
1140
1141 p = current_proc();
1142
1143 names_vp = uid_files->names_vp;
1144 data_vp = uid_files->data_vp;
1145 names_buf = uid_files->buf_ptr;
1146
1147
1148 /*
1149 * Get locks on both files, get the vnode with the lowest address first
1150 */
1151
1152 if((unsigned int)names_vp < (unsigned int)data_vp) {
1153 vp1 = names_vp;
1154 vp2 = data_vp;
1155 } else {
1156 vp1 = data_vp;
1157 vp2 = names_vp;
1158 }
1159 error = vn_lock(vp1, LK_EXCLUSIVE | LK_RETRY, p);
1160 if(error) {
1161 printf("bsd_read_page_cache_file: Can't lock profile names %x\n", user);
1162 bsd_close_page_cache_files(uid_files);
1163 thread_funnel_set(kernel_flock, funnel_state);
1164 return error;
1165 }
1166 error = vn_lock(vp2, LK_EXCLUSIVE | LK_RETRY, p);
1167 if(error) {
1168 printf("bsd_read_page_cache_file: Can't lock profile data %x\n", user);
1169 VOP_UNLOCK(vp1, 0, p);
1170 bsd_close_page_cache_files(uid_files);
1171 thread_funnel_set(kernel_flock, funnel_state);
1172 return error;
1173 }
1174
1175 if(error = VOP_GETATTR(app_vp, &vattr, p->p_ucred, p)) {
1176 VOP_UNLOCK(names_vp, 0, p);
1177 VOP_UNLOCK(data_vp, 0, p);
1178 printf("bsd_read_cache_file: Can't stat app file %s\n", app_name);
1179 bsd_close_page_cache_files(uid_files);
1180 thread_funnel_set(kernel_flock, funnel_state);
1181 return error;
1182 }
1183
1184 *fid = vattr.va_fileid;
1185 *mod = vattr.va_mtime.tv_sec;
1186
1187
1188 if (bsd_search_page_cache_data_base(names_vp, names_buf, app_name,
1189 (unsigned int) vattr.va_mtime.tv_sec,
1190 vattr.va_fileid, &profile, &profile_size) == 0) {
1191 /* profile is an offset in the profile data base */
1192 /* It is zero if no profile data was found */
1193
1194 if(profile_size == 0) {
1195 *buffer = NULL;
1196 *buf_size = 0;
1197 VOP_UNLOCK(names_vp, 0, p);
1198 VOP_UNLOCK(data_vp, 0, p);
1199 bsd_close_page_cache_files(uid_files);
1200 thread_funnel_set(kernel_flock, funnel_state);
1201 return 0;
1202 }
1203 ret = (vm_offset_t)(kmem_alloc(kernel_map, buffer, profile_size));
1204 if(ret) {
1205 VOP_UNLOCK(names_vp, 0, p);
1206 VOP_UNLOCK(data_vp, 0, p);
1207 bsd_close_page_cache_files(uid_files);
1208 thread_funnel_set(kernel_flock, funnel_state);
1209 return ENOMEM;
1210 }
1211 *buf_size = profile_size;
1212 while(profile_size) {
1213 error = vn_rdwr(UIO_READ, data_vp,
1214 (caddr_t) *buffer, profile_size,
1215 profile, UIO_SYSSPACE, IO_NODELOCKED,
1216 p->p_ucred, &resid, p);
1217 if(error) {
1218 VOP_UNLOCK(names_vp, 0, p);
1219 VOP_UNLOCK(data_vp, 0, p);
1220 bsd_close_page_cache_files(uid_files);
1221 kmem_free(kernel_map, (vm_offset_t)*buffer, profile_size);
1222 thread_funnel_set(kernel_flock, funnel_state);
1223 return EINVAL;
1224 }
1225 profile += profile_size - resid;
1226 profile_size = resid;
1227 }
1228 VOP_UNLOCK(names_vp, 0, p);
1229 VOP_UNLOCK(data_vp, 0, p);
1230 bsd_close_page_cache_files(uid_files);
1231 thread_funnel_set(kernel_flock, funnel_state);
1232 return 0;
1233 } else {
1234 VOP_UNLOCK(names_vp, 0, p);
1235 VOP_UNLOCK(data_vp, 0, p);
1236 bsd_close_page_cache_files(uid_files);
1237 thread_funnel_set(kernel_flock, funnel_state);
1238 return EINVAL;
1239 }
1240
1241}
1242
1243int
1244bsd_search_page_cache_data_base(
1245 struct vnode *vp,
1246 struct profile_names_header *database,
1247 char *app_name,
1248 unsigned int mod_date,
1249 unsigned int inode,
1250 off_t *profile,
1251 unsigned int *profile_size)
1252{
1253
1254 struct proc *p;
1255
1256 unsigned int i;
1257 struct profile_element *element;
1258 unsigned int ele_total;
1259 unsigned int extended_list = 0;
1260 off_t file_off = 0;
1261 unsigned int size;
1262 off_t resid_off;
1263 int resid;
1264 vm_offset_t local_buf = NULL;
1265
1266 int error;
1267 kern_return_t ret;
1268
1269 p = current_proc();
1270
1271 if(((vm_offset_t)database->element_array) !=
1272 sizeof(struct profile_names_header)) {
1273 return EINVAL;
1274 }
1275 element = (struct profile_element *)(
1276 (vm_offset_t)database->element_array +
1277 (vm_offset_t)database);
1278
1279 ele_total = database->number_of_profiles;
1280
1281 *profile = 0;
1282 *profile_size = 0;
1283 while(ele_total) {
1284 /* note: code assumes header + n*ele comes out on a page boundary */
1285 if(((local_buf == 0) && (sizeof(struct profile_names_header) +
1286 (ele_total * sizeof(struct profile_element)))
1287 > (PAGE_SIZE * 4)) ||
1288 ((local_buf != 0) &&
1289 (ele_total * sizeof(struct profile_element))
1290 > (PAGE_SIZE * 4))) {
1291 extended_list = ele_total;
1292 if(element == (struct profile_element *)
1293 ((vm_offset_t)database->element_array +
1294 (vm_offset_t)database)) {
1295 ele_total = ((PAGE_SIZE * 4)/sizeof(struct profile_element)) - 1;
1296 } else {
1297 ele_total = (PAGE_SIZE * 4)/sizeof(struct profile_element);
1298 }
1299 extended_list -= ele_total;
1300 }
1301 for (i=0; i<ele_total; i++) {
1302 if((mod_date == element[i].mod_date)
1303 && (inode == element[i].inode)) {
1304 if(strncmp(element[i].name, app_name, 12) == 0) {
1305 *profile = element[i].addr;
1306 *profile_size = element[i].size;
1307 if(local_buf != NULL) {
1308 kmem_free(kernel_map,
1309 (vm_offset_t)local_buf, 4 * PAGE_SIZE);
1310 }
1311 return 0;
1312 }
1313 }
1314 }
1315 if(extended_list == 0)
1316 break;
1317 if(local_buf == NULL) {
1318 ret = kmem_alloc(kernel_map,
1319 (vm_offset_t *)&local_buf, 4 * PAGE_SIZE);
1320 if(ret != KERN_SUCCESS) {
1321 return ENOMEM;
1322 }
1323 }
1324 element = (struct profile_element *)local_buf;
1325 ele_total = extended_list;
1326 extended_list = 0;
1327 file_off += 4 * PAGE_SIZE;
1328 if((ele_total * sizeof(struct profile_element)) >
1329 (PAGE_SIZE * 4)) {
1330 size = PAGE_SIZE * 4;
1331 } else {
1332 size = ele_total * sizeof(struct profile_element);
1333 }
1334 resid_off = 0;
1335 while(size) {
1336 error = vn_rdwr(UIO_READ, vp,
1337 (caddr_t)(local_buf + resid_off),
1338 size, file_off + resid_off, UIO_SYSSPACE,
1339 IO_NODELOCKED, p->p_ucred, &resid, p);
1340 if(error) {
1341 if(local_buf != NULL) {
1342 kmem_free(kernel_map,
1343 (vm_offset_t)local_buf,
1344 4 * PAGE_SIZE);
1345 }
1346 return EINVAL;
1347 }
1348 resid_off += size-resid;
1349 size = resid;
1350 }
1351 }
1352 if(local_buf != NULL) {
1353 kmem_free(kernel_map,
1354 (vm_offset_t)local_buf, 4 * PAGE_SIZE);
1355 }
1356 return 0;
1357}
1358
1359int
1360bsd_write_page_cache_file(
1361 unsigned int user,
1362 char *file_name,
1363 caddr_t buffer,
1364 vm_size_t size,
1365 int mod,
1366 int fid)
1367{
1368 struct proc *p;
1369 struct nameidata nd;
1370 struct vnode *vp = 0;
1371 int resid;
1372 off_t resid_off;
1373 int error;
1374 boolean_t funnel_state;
1375 struct vattr vattr;
1376 struct vattr data_vattr;
1377
1378 off_t profile;
1379 unsigned int profile_size;
1380
1381 vm_offset_t names_buf;
1382 struct vnode *names_vp;
1383 struct vnode *data_vp;
1384 struct vnode *vp1;
1385 struct vnode *vp2;
1386
1387 struct profile_names_header *profile_header;
1388 off_t name_offset;
1389
1390 struct global_profile *uid_files;
1391
1392
1393 funnel_state = thread_funnel_set(kernel_flock, TRUE);
1394
1395
1396
1397 error = bsd_open_page_cache_files(user, &uid_files);
1398 if(error) {
1399 thread_funnel_set(kernel_flock, funnel_state);
1400 return EINVAL;
1401 }
1402
1403 p = current_proc();
1404
1405 names_vp = uid_files->names_vp;
1406 data_vp = uid_files->data_vp;
1407 names_buf = uid_files->buf_ptr;
1408
1409 /*
1410 * Get locks on both files, get the vnode with the lowest address first
1411 */
1412
1413 if((unsigned int)names_vp < (unsigned int)data_vp) {
1414 vp1 = names_vp;
1415 vp2 = data_vp;
1416 } else {
1417 vp1 = data_vp;
1418 vp2 = names_vp;
1419 }
1420
1421 error = vn_lock(vp1, LK_EXCLUSIVE | LK_RETRY, p);
1422 if(error) {
1423 printf("bsd_write_page_cache_file: Can't lock profile names %x\n", user);
1424 bsd_close_page_cache_files(uid_files);
1425 thread_funnel_set(kernel_flock, funnel_state);
1426 return error;
1427 }
1428 error = vn_lock(vp2, LK_EXCLUSIVE | LK_RETRY, p);
1429 if(error) {
1430 printf("bsd_write_page_cache_file: Can't lock profile data %x\n", user);
1431 VOP_UNLOCK(vp1, 0, p);
1432 bsd_close_page_cache_files(uid_files);
1433 thread_funnel_set(kernel_flock, funnel_state);
1434 return error;
1435 }
1436
1437 /* Stat data file for size */
1438
1439 if(error = VOP_GETATTR(data_vp, &data_vattr, p->p_ucred, p)) {
1440 VOP_UNLOCK(names_vp, 0, p);
1441 VOP_UNLOCK(data_vp, 0, p);
1442 printf("bsd_write_page_cache_file: Can't stat profile data %s\n", file_name);
1443 bsd_close_page_cache_files(uid_files);
1444 thread_funnel_set(kernel_flock, funnel_state);
1445 return error;
1446 }
1447
1448 if (bsd_search_page_cache_data_base(names_vp,
1449 (struct profile_names_header *)names_buf,
1450 file_name, (unsigned int) mod,
1451 fid, &profile, &profile_size) == 0) {
1452 /* profile is an offset in the profile data base */
1453 /* It is zero if no profile data was found */
1454
1455 if(profile_size == 0) {
1456 unsigned int header_size;
1457 vm_offset_t buf_ptr;
1458
1459 /* Our Write case */
1460
1461 /* read header for last entry */
1462 profile_header =
1463 (struct profile_names_header *)names_buf;
1464 name_offset = sizeof(struct profile_names_header) +
1465 (sizeof(struct profile_element)
1466 * profile_header->number_of_profiles);
1467 profile_header->number_of_profiles += 1;
1468
1469 if(name_offset < PAGE_SIZE * 4) {
1470 struct profile_element *name;
1471 /* write new entry */
1472 name = (struct profile_element *)
1473 (names_buf + (vm_offset_t)name_offset);
1474 name->addr = data_vattr.va_size;
1475 name->size = size;
1476 name->mod_date = mod;
1477 name->inode = fid;
1478 strncpy (name->name, file_name, 12);
1479 } else {
1480 unsigned int ele_size;
1481 struct profile_element name;
1482 /* write new entry */
1483 name.addr = data_vattr.va_size;
1484 name.size = size;
1485 name.mod_date = mod;
1486 name.inode = fid;
1487 strncpy (name.name, file_name, 12);
1488 /* write element out separately */
1489 ele_size = sizeof(struct profile_element);
1490 buf_ptr = (vm_offset_t)&name;
1491 resid_off = name_offset;
1492
1493 while(ele_size) {
1494 error = vn_rdwr(UIO_WRITE, names_vp,
1495 (caddr_t)buf_ptr,
1496 ele_size, resid_off,
1497 UIO_SYSSPACE, IO_NODELOCKED,
1498 p->p_ucred, &resid, p);
1499 if(error) {
1500 printf("bsd_write_page_cache_file: Can't write name_element %x\n", user);
1501 VOP_UNLOCK(names_vp, 0, p);
1502 VOP_UNLOCK(data_vp, 0, p);
1503 bsd_close_page_cache_files(
1504 uid_files);
1505 thread_funnel_set(
1506 kernel_flock,
1507 funnel_state);
1508 return error;
1509 }
1510 buf_ptr += (vm_offset_t)
1511 ele_size-resid;
1512 resid_off += ele_size-resid;
1513 ele_size = resid;
1514 }
1515 }
1516
1517 if(name_offset < PAGE_SIZE * 4) {
1518 header_size = name_offset +
1519 sizeof(struct profile_element);
1520
1521 } else {
1522 header_size =
1523 sizeof(struct profile_names_header);
1524 }
1525 buf_ptr = (vm_offset_t)profile_header;
1526 resid_off = 0;
1527
1528 /* write names file header */
1529 while(header_size) {
1530 error = vn_rdwr(UIO_WRITE, names_vp,
1531 (caddr_t)buf_ptr,
1532 header_size, resid_off,
1533 UIO_SYSSPACE, IO_NODELOCKED,
1534 p->p_ucred, &resid, p);
1535 if(error) {
1536 VOP_UNLOCK(names_vp, 0, p);
1537 VOP_UNLOCK(data_vp, 0, p);
1538 printf("bsd_write_page_cache_file: Can't write header %x\n", user);
1539 bsd_close_page_cache_files(
1540 uid_files);
1541 thread_funnel_set(
1542 kernel_flock, funnel_state);
1543 return error;
1544 }
1545 buf_ptr += (vm_offset_t)header_size-resid;
1546 resid_off += header_size-resid;
1547 header_size = resid;
1548 }
1549 /* write profile to data file */
1550 resid_off = data_vattr.va_size;
1551 while(size) {
1552 error = vn_rdwr(UIO_WRITE, data_vp,
1553 (caddr_t)buffer, size, resid_off,
1554 UIO_SYSSPACE, IO_NODELOCKED,
1555 p->p_ucred, &resid, p);
1556 if(error) {
1557 VOP_UNLOCK(names_vp, 0, p);
1558 VOP_UNLOCK(data_vp, 0, p);
1559 printf("bsd_write_page_cache_file: Can't write header %x\n", user);
1560 bsd_close_page_cache_files(
1561 uid_files);
1562 thread_funnel_set(
1563 kernel_flock, funnel_state);
1564 return error;
1565 }
1566 buffer += size-resid;
1567 resid_off += size-resid;
1568 size = resid;
1569 }
1570 VOP_UNLOCK(names_vp, 0, p);
1571 VOP_UNLOCK(data_vp, 0, p);
1572 bsd_close_page_cache_files(uid_files);
1573 thread_funnel_set(kernel_flock, funnel_state);
1574 return 0;
1575 }
1576 /* Someone else wrote a twin profile before us */
1577 VOP_UNLOCK(names_vp, 0, p);
1578 VOP_UNLOCK(data_vp, 0, p);
1579 bsd_close_page_cache_files(uid_files);
1580 thread_funnel_set(kernel_flock, funnel_state);
1581 return 0;
1582 } else {
1583 VOP_UNLOCK(names_vp, 0, p);
1584 VOP_UNLOCK(data_vp, 0, p);
1585 bsd_close_page_cache_files(uid_files);
1586 thread_funnel_set(kernel_flock, funnel_state);
1587 return EINVAL;
1588 }
1589
1590}
1591
1592int
1593prepare_profile_database(int user)
1594{
1595 char *cache_path = "/var/vm/app_profile/";
1596 struct proc *p;
1597 int error;
1598 int resid;
1599 off_t resid_off;
1600 unsigned int lru;
1601 vm_size_t size;
1602
1603 struct vnode *names_vp;
1604 struct vnode *data_vp;
1605 vm_offset_t names_buf;
1606 vm_offset_t buf_ptr;
1607
1608 int profile_names_length;
1609 int profile_data_length;
1610 char *profile_data_string;
1611 char *profile_names_string;
1612 char *substring;
1613
1614 struct vattr vattr;
1615
1616 struct profile_names_header *profile_header;
1617 kern_return_t ret;
1618
1619 struct nameidata nd_names;
1620 struct nameidata nd_data;
1621
1622 int i;
1623
1624 p = current_proc();
1625
1626 ret = kmem_alloc(kernel_map,
1627 (vm_offset_t *)&profile_data_string, PATH_MAX);
1628
1629 if(ret) {
1630 return ENOMEM;
1631 }
1632
1633 /* Split the buffer in half since we know the size of */
1634 /* our file path and our allocation is adequate for */
1635 /* both file path names */
1636 profile_names_string = profile_data_string + (PATH_MAX/2);
1637
1638
1639 strcpy(profile_data_string, cache_path);
1640 strcpy(profile_names_string, cache_path);
1641 profile_names_length = profile_data_length
1642 = strlen(profile_data_string);
1643 substring = profile_data_string + profile_data_length;
1644 sprintf(substring, "%x_data", user);
1645 substring = profile_names_string + profile_names_length;
1646 sprintf(substring, "%x_names", user);
1647
1648 /* We now have the absolute file names */
1649
1650 ret = kmem_alloc(kernel_map,
1651 (vm_offset_t *)&names_buf, 4 * PAGE_SIZE);
1652 if(ret) {
1653 kmem_free(kernel_map,
1654 (vm_offset_t)profile_data_string, PATH_MAX);
1655 return ENOMEM;
1656 }
1657
1658 NDINIT(&nd_names, LOOKUP, FOLLOW,
1659 UIO_SYSSPACE, profile_names_string, p);
1660 NDINIT(&nd_data, LOOKUP, FOLLOW,
1661 UIO_SYSSPACE, profile_data_string, p);
1662
1663 if (error = vn_open(&nd_data,
1664 O_CREAT | O_EXCL | FWRITE, S_IRUSR|S_IWUSR)) {
1665 kmem_free(kernel_map,
1666 (vm_offset_t)names_buf, 4 * PAGE_SIZE);
1667 kmem_free(kernel_map,
1668 (vm_offset_t)profile_data_string, PATH_MAX);
1669 return 0;
1670 }
1671
1672 data_vp = nd_data.ni_vp;
1673 VOP_UNLOCK(data_vp, 0, p);
1674
1675 if (error = vn_open(&nd_names,
1676 O_CREAT | O_EXCL | FWRITE, S_IRUSR|S_IWUSR)) {
1677 printf("prepare_profile_database: Can't create CacheNames %s\n",
1678 profile_data_string);
1679 kmem_free(kernel_map,
1680 (vm_offset_t)names_buf, 4 * PAGE_SIZE);
1681 kmem_free(kernel_map,
1682 (vm_offset_t)profile_data_string, PATH_MAX);
1683 vrele(data_vp);
1684 return error;
1685 }
1686
1687 names_vp = nd_names.ni_vp;
1688
1689
1690 /* Write Header for new names file */
1691
1692 profile_header = (struct profile_names_header *)names_buf;
1693
1694 profile_header->number_of_profiles = 0;
1695 profile_header->user_id = user;
1696 profile_header->version = 1;
1697 profile_header->element_array =
1698 sizeof(struct profile_names_header);
1699 profile_header->spare1 = 0;
1700 profile_header->spare2 = 0;
1701 profile_header->spare3 = 0;
1702
1703 size = sizeof(struct profile_names_header);
1704 buf_ptr = (vm_offset_t)profile_header;
1705 resid_off = 0;
1706
1707 while(size) {
1708 error = vn_rdwr(UIO_WRITE, names_vp,
1709 (caddr_t)buf_ptr, size, resid_off,
1710 UIO_SYSSPACE, IO_NODELOCKED,
1711 p->p_ucred, &resid, p);
1712 if(error) {
1713 printf("prepare_profile_database: Can't write header %s\n", profile_names_string);
1714 kmem_free(kernel_map,
1715 (vm_offset_t)names_buf, 4 * PAGE_SIZE);
1716 kmem_free(kernel_map,
1717 (vm_offset_t)profile_data_string,
1718 PATH_MAX);
1719 vput(names_vp);
1720 vrele(data_vp);
1721 return error;
1722 }
1723 buf_ptr += size-resid;
1724 resid_off += size-resid;
1725 size = resid;
1726 }
1727
1728 VATTR_NULL(&vattr);
1729 vattr.va_uid = user;
1730 error = VOP_SETATTR(names_vp, &vattr, p->p_cred->pc_ucred, p);
1731 if(error) {
1732 printf("prepare_profile_database: "
1733 "Can't set user %s\n", profile_names_string);
1734 }
1735 vput(names_vp);
1736
1737 error = vn_lock(data_vp, LK_EXCLUSIVE | LK_RETRY, p);
1738 if(error) {
1739 vrele(data_vp);
1740 printf("prepare_profile_database: cannot lock data file %s\n",
1741 profile_data_string);
1742 kmem_free(kernel_map,
1743 (vm_offset_t)profile_data_string, PATH_MAX);
1744 kmem_free(kernel_map,
1745 (vm_offset_t)names_buf, 4 * PAGE_SIZE);
1746 }
1747 VATTR_NULL(&vattr);
1748 vattr.va_uid = user;
1749 error = VOP_SETATTR(data_vp, &vattr, p->p_cred->pc_ucred, p);
1750 if(error) {
1751 printf("prepare_profile_database: "
1752 "Can't set user %s\n", profile_data_string);
1753 }
1754
1755 vput(data_vp);
1756 kmem_free(kernel_map,
1757 (vm_offset_t)profile_data_string, PATH_MAX);
1758 kmem_free(kernel_map,
1759 (vm_offset_t)names_buf, 4 * PAGE_SIZE);
1760 return 0;
1761
1762}