]> git.saurik.com Git - apple/xnu.git/blob - bsd/vm/vm_unix.c
xnu-792.6.56.tar.gz
[apple/xnu.git] / bsd / vm / vm_unix.c
1 /*
2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23 /*
24 * Mach Operating System
25 * Copyright (c) 1987 Carnegie-Mellon University
26 * All rights reserved. The CMU software License Agreement specifies
27 * the terms and conditions for use and redistribution.
28 */
29
30 /*
31 */
32
33
34 #include <meta_features.h>
35
36 #include <kern/task.h>
37 #include <kern/thread.h>
38 #include <kern/debug.h>
39 #include <kern/lock.h>
40 #include <mach/mach_traps.h>
41 #include <mach/time_value.h>
42 #include <mach/vm_map.h>
43 #include <mach/vm_param.h>
44 #include <mach/vm_prot.h>
45 #include <mach/port.h>
46
47 #include <sys/file_internal.h>
48 #include <sys/param.h>
49 #include <sys/systm.h>
50 #include <sys/dir.h>
51 #include <sys/namei.h>
52 #include <sys/proc_internal.h>
53 #include <sys/kauth.h>
54 #include <sys/vm.h>
55 #include <sys/file.h>
56 #include <sys/vnode_internal.h>
57 #include <sys/mount.h>
58 #include <sys/trace.h>
59 #include <sys/kernel.h>
60 #include <sys/ubc_internal.h>
61 #include <sys/user.h>
62 #include <sys/stat.h>
63 #include <sys/sysproto.h>
64 #include <sys/mman.h>
65
66 #include <bsm/audit_kernel.h>
67 #include <bsm/audit_kevents.h>
68
69 #include <kern/kalloc.h>
70 #include <vm/vm_map.h>
71 #include <vm/vm_kern.h>
72
73 #include <machine/spl.h>
74
75 #include <mach/shared_memory_server.h>
76 #include <vm/vm_shared_memory_server.h>
77
78 #include <vm/vm_protos.h>
79
80
81 int
82 useracc(
83 user_addr_t addr,
84 user_size_t len,
85 int prot)
86 {
87 return (vm_map_check_protection(
88 current_map(),
89 vm_map_trunc_page(addr), vm_map_round_page(addr+len),
90 prot == B_READ ? VM_PROT_READ : VM_PROT_WRITE));
91 }
92
93 int
94 vslock(
95 user_addr_t addr,
96 user_size_t len)
97 {
98 kern_return_t kret;
99 kret = vm_map_wire(current_map(), vm_map_trunc_page(addr),
100 vm_map_round_page(addr+len),
101 VM_PROT_READ | VM_PROT_WRITE ,FALSE);
102
103 switch (kret) {
104 case KERN_SUCCESS:
105 return (0);
106 case KERN_INVALID_ADDRESS:
107 case KERN_NO_SPACE:
108 return (ENOMEM);
109 case KERN_PROTECTION_FAILURE:
110 return (EACCES);
111 default:
112 return (EINVAL);
113 }
114 }
115
116 int
117 vsunlock(
118 user_addr_t addr,
119 user_size_t len,
120 __unused int dirtied)
121 {
122 #if FIXME /* [ */
123 pmap_t pmap;
124 vm_page_t pg;
125 vm_map_offset_t vaddr;
126 ppnum_t paddr;
127 #endif /* FIXME ] */
128 kern_return_t kret;
129
130 #if FIXME /* [ */
131 if (dirtied) {
132 pmap = get_task_pmap(current_task());
133 for (vaddr = vm_map_trunc_page(addr);
134 vaddr < vm_map_round_page(addr+len);
135 vaddr += PAGE_SIZE) {
136 paddr = pmap_extract(pmap, vaddr);
137 pg = PHYS_TO_VM_PAGE(paddr);
138 vm_page_set_modified(pg);
139 }
140 }
141 #endif /* FIXME ] */
142 #ifdef lint
143 dirtied++;
144 #endif /* lint */
145 kret = vm_map_unwire(current_map(), vm_map_trunc_page(addr),
146 vm_map_round_page(addr+len), FALSE);
147 switch (kret) {
148 case KERN_SUCCESS:
149 return (0);
150 case KERN_INVALID_ADDRESS:
151 case KERN_NO_SPACE:
152 return (ENOMEM);
153 case KERN_PROTECTION_FAILURE:
154 return (EACCES);
155 default:
156 return (EINVAL);
157 }
158 }
159
160 int
161 subyte(
162 user_addr_t addr,
163 int byte)
164 {
165 char character;
166
167 character = (char)byte;
168 return (copyout((void *)&(character), addr, sizeof(char)) == 0 ? 0 : -1);
169 }
170
171 int
172 suibyte(
173 user_addr_t addr,
174 int byte)
175 {
176 char character;
177
178 character = (char)byte;
179 return (copyout((void *)&(character), addr, sizeof(char)) == 0 ? 0 : -1);
180 }
181
182 int fubyte(user_addr_t addr)
183 {
184 unsigned char byte;
185
186 if (copyin(addr, (void *) &byte, sizeof(char)))
187 return(-1);
188 return(byte);
189 }
190
191 int fuibyte(user_addr_t addr)
192 {
193 unsigned char byte;
194
195 if (copyin(addr, (void *) &(byte), sizeof(char)))
196 return(-1);
197 return(byte);
198 }
199
200 int
201 suword(
202 user_addr_t addr,
203 long word)
204 {
205 return (copyout((void *) &word, addr, sizeof(int)) == 0 ? 0 : -1);
206 }
207
208 long fuword(user_addr_t addr)
209 {
210 long word;
211
212 if (copyin(addr, (void *) &word, sizeof(int)))
213 return(-1);
214 return(word);
215 }
216
217 /* suiword and fuiword are the same as suword and fuword, respectively */
218
219 int
220 suiword(
221 user_addr_t addr,
222 long word)
223 {
224 return (copyout((void *) &word, addr, sizeof(int)) == 0 ? 0 : -1);
225 }
226
227 long fuiword(user_addr_t addr)
228 {
229 long word;
230
231 if (copyin(addr, (void *) &word, sizeof(int)))
232 return(-1);
233 return(word);
234 }
235
236 /*
237 * With a 32-bit kernel and mixed 32/64-bit user tasks, this interface allows the
238 * fetching and setting of process-sized size_t and pointer values.
239 */
240 int
241 sulong(user_addr_t addr, int64_t word)
242 {
243
244 if (IS_64BIT_PROCESS(current_proc())) {
245 return(copyout((void *)&word, addr, sizeof(word)) == 0 ? 0 : -1);
246 } else {
247 return(suiword(addr, (long)word));
248 }
249 }
250
251 int64_t
252 fulong(user_addr_t addr)
253 {
254 int64_t longword;
255
256 if (IS_64BIT_PROCESS(current_proc())) {
257 if (copyin(addr, (void *)&longword, sizeof(longword)) != 0)
258 return(-1);
259 return(longword);
260 } else {
261 return((int64_t)fuiword(addr));
262 }
263 }
264
265 int
266 suulong(user_addr_t addr, uint64_t uword)
267 {
268
269 if (IS_64BIT_PROCESS(current_proc())) {
270 return(copyout((void *)&uword, addr, sizeof(uword)) == 0 ? 0 : -1);
271 } else {
272 return(suiword(addr, (u_long)uword));
273 }
274 }
275
276 uint64_t
277 fuulong(user_addr_t addr)
278 {
279 uint64_t ulongword;
280
281 if (IS_64BIT_PROCESS(current_proc())) {
282 if (copyin(addr, (void *)&ulongword, sizeof(ulongword)) != 0)
283 return(-1ULL);
284 return(ulongword);
285 } else {
286 return((uint64_t)fuiword(addr));
287 }
288 }
289
290 int
291 swapon(__unused struct proc *procp, __unused struct swapon_args *uap, __unused int *retval)
292 {
293 return(ENOTSUP);
294 }
295
296
297 kern_return_t
298 pid_for_task(
299 struct pid_for_task_args *args)
300 {
301 mach_port_name_t t = args->t;
302 user_addr_t pid_addr = args->pid;
303 struct proc * p;
304 task_t t1;
305 int pid = -1;
306 kern_return_t err = KERN_SUCCESS;
307 boolean_t funnel_state;
308
309 AUDIT_MACH_SYSCALL_ENTER(AUE_PIDFORTASK);
310 AUDIT_ARG(mach_port1, t);
311
312 funnel_state = thread_funnel_set(kernel_flock, TRUE);
313 t1 = port_name_to_task(t);
314
315 if (t1 == TASK_NULL) {
316 err = KERN_FAILURE;
317 goto pftout;
318 } else {
319 p = get_bsdtask_info(t1);
320 if (p) {
321 pid = proc_pid(p);
322 err = KERN_SUCCESS;
323 } else {
324 err = KERN_FAILURE;
325 }
326 }
327 task_deallocate(t1);
328 pftout:
329 AUDIT_ARG(pid, pid);
330 (void) copyout((char *) &pid, pid_addr, sizeof(int));
331 thread_funnel_set(kernel_flock, funnel_state);
332 AUDIT_MACH_SYSCALL_EXIT(err);
333 return(err);
334 }
335
336 /*
337 * Routine: task_for_pid
338 * Purpose:
339 * Get the task port for another "process", named by its
340 * process ID on the same host as "target_task".
341 *
342 * Only permitted to privileged processes, or processes
343 * with the same user ID.
344 *
345 * XXX This should be a BSD system call, not a Mach trap!!!
346 */
347 kern_return_t
348 task_for_pid(
349 struct task_for_pid_args *args)
350 {
351 mach_port_name_t target_tport = args->target_tport;
352 int pid = args->pid;
353 user_addr_t task_addr = args->t;
354 struct uthread *uthread;
355 struct proc *p;
356 struct proc *p1;
357 task_t t1;
358 mach_port_name_t tret;
359 void * sright;
360 int error = 0;
361 boolean_t funnel_state;
362
363 AUDIT_MACH_SYSCALL_ENTER(AUE_TASKFORPID);
364 AUDIT_ARG(pid, pid);
365 AUDIT_ARG(mach_port1, target_tport);
366
367 t1 = port_name_to_task(target_tport);
368 if (t1 == TASK_NULL) {
369 (void ) copyout((char *)&t1, task_addr, sizeof(mach_port_name_t));
370 AUDIT_MACH_SYSCALL_EXIT(KERN_FAILURE);
371 return(KERN_FAILURE);
372 }
373
374 funnel_state = thread_funnel_set(kernel_flock, TRUE);
375
376 p1 = get_bsdtask_info(t1); /* XXX current proc */
377
378 /*
379 * Delayed binding of thread credential to process credential, if we
380 * are not running with an explicitly set thread credential.
381 */
382 uthread = get_bsdthread_info(current_thread());
383 if (uthread->uu_ucred != p1->p_ucred &&
384 (uthread->uu_flag & UT_SETUID) == 0) {
385 kauth_cred_t old = uthread->uu_ucred;
386 proc_lock(p1);
387 uthread->uu_ucred = p1->p_ucred;
388 kauth_cred_ref(uthread->uu_ucred);
389 proc_unlock(p1);
390 if (old != NOCRED)
391 kauth_cred_rele(old);
392 }
393
394 p = pfind(pid);
395 AUDIT_ARG(process, p);
396
397 if (
398 (p != (struct proc *) 0)
399 && (p1 != (struct proc *) 0)
400 && (((kauth_cred_getuid(p->p_ucred) == kauth_cred_getuid(kauth_cred_get())) &&
401 ((p->p_ucred->cr_ruid == kauth_cred_get()->cr_ruid)))
402 || !(suser(kauth_cred_get(), 0)))
403 && (p->p_stat != SZOMB)
404 ) {
405 if (p->task != TASK_NULL) {
406 task_reference(p->task);
407 sright = (void *)convert_task_to_port(p->task);
408 tret = ipc_port_copyout_send(
409 sright,
410 get_task_ipcspace(current_task()));
411 } else
412 tret = MACH_PORT_NULL;
413 AUDIT_ARG(mach_port2, tret);
414 (void ) copyout((char *)&tret, task_addr, sizeof(mach_port_name_t));
415 task_deallocate(t1);
416 error = KERN_SUCCESS;
417 goto tfpout;
418 }
419 task_deallocate(t1);
420 tret = MACH_PORT_NULL;
421 (void) copyout((char *) &tret, task_addr, sizeof(mach_port_name_t));
422 error = KERN_FAILURE;
423 tfpout:
424 thread_funnel_set(kernel_flock, funnel_state);
425 AUDIT_MACH_SYSCALL_EXIT(error);
426 return(error);
427 }
428
429
430 /*
431 * shared_region_make_private_np:
432 *
433 * This system call is for "dyld" only.
434 *
435 * It creates a private copy of the current process's "shared region" for
436 * split libraries. "dyld" uses this when the shared region is full or
437 * it needs to load a split library that conflicts with an already loaded one
438 * that this process doesn't need. "dyld" specifies a set of address ranges
439 * that it wants to keep in the now-private "shared region". These cover
440 * the set of split libraries that the process needs so far. The kernel needs
441 * to deallocate the rest of the shared region, so that it's available for
442 * more libraries for this process.
443 */
444 int
445 shared_region_make_private_np(
446 struct proc *p,
447 struct shared_region_make_private_np_args *uap,
448 __unused int *retvalp)
449 {
450 int error;
451 kern_return_t kr;
452 boolean_t using_shared_regions;
453 user_addr_t user_ranges;
454 unsigned int range_count;
455 struct shared_region_range_np *ranges;
456 shared_region_mapping_t shared_region;
457 struct shared_region_task_mappings task_mapping_info;
458 shared_region_mapping_t next;
459
460 ranges = NULL;
461
462 range_count = uap->rangeCount;
463 user_ranges = uap->ranges;
464
465 /* allocate kernel space for the "ranges" */
466 if (range_count != 0) {
467 kr = kmem_alloc(kernel_map,
468 (vm_offset_t *) &ranges,
469 (vm_size_t) (range_count * sizeof (ranges[0])));
470 if (kr != KERN_SUCCESS) {
471 error = ENOMEM;
472 goto done;
473 }
474
475 /* copy "ranges" from user-space */
476 error = copyin(user_ranges,
477 ranges,
478 (range_count * sizeof (ranges[0])));
479 if (error) {
480 goto done;
481 }
482 }
483
484 if (p->p_flag & P_NOSHLIB) {
485 /* no split library has been mapped for this process so far */
486 using_shared_regions = FALSE;
487 } else {
488 /* this process has already mapped some split libraries */
489 using_shared_regions = TRUE;
490 }
491
492 /*
493 * Get a private copy of the current shared region.
494 * Do not chain it to the system-wide shared region, as we'll want
495 * to map other split libraries in place of the old ones. We want
496 * to completely detach from the system-wide shared region and go our
497 * own way after this point, not sharing anything with other processes.
498 */
499 error = clone_system_shared_regions(using_shared_regions,
500 FALSE, /* chain_regions */
501 ENV_DEFAULT_ROOT);
502 if (error) {
503 goto done;
504 }
505
506 /* get info on the newly allocated shared region */
507 vm_get_shared_region(current_task(), &shared_region);
508 task_mapping_info.self = (vm_offset_t) shared_region;
509 shared_region_mapping_info(shared_region,
510 &(task_mapping_info.text_region),
511 &(task_mapping_info.text_size),
512 &(task_mapping_info.data_region),
513 &(task_mapping_info.data_size),
514 &(task_mapping_info.region_mappings),
515 &(task_mapping_info.client_base),
516 &(task_mapping_info.alternate_base),
517 &(task_mapping_info.alternate_next),
518 &(task_mapping_info.fs_base),
519 &(task_mapping_info.system),
520 &(task_mapping_info.flags),
521 &next);
522
523 /*
524 * We now have our private copy of the shared region, as it was before
525 * the call to clone_system_shared_regions(). We now need to clean it
526 * up and keep only the memory areas described by the "ranges" array.
527 */
528 kr = shared_region_cleanup(range_count, ranges, &task_mapping_info);
529 switch (kr) {
530 case KERN_SUCCESS:
531 error = 0;
532 break;
533 default:
534 error = EINVAL;
535 goto done;
536 }
537
538 done:
539 if (ranges != NULL) {
540 kmem_free(kernel_map,
541 (vm_offset_t) ranges,
542 range_count * sizeof (ranges[0]));
543 ranges = NULL;
544 }
545
546 return error;
547 }
548
549
550 /*
551 * shared_region_map_file_np:
552 *
553 * This system call is for "dyld" only.
554 *
555 * "dyld" wants to map parts of a split library in the shared region.
556 * We get a file descriptor on the split library to be mapped and a set
557 * of mapping instructions, describing which parts of the file to map in\
558 * which areas of the shared segment and with what protection.
559 * The "shared region" is split in 2 areas:
560 * 0x90000000 - 0xa0000000 : read-only area (for TEXT and LINKEDIT sections),
561 * 0xa0000000 - 0xb0000000 : writable area (for DATA sections).
562 *
563 */
564 int
565 shared_region_map_file_np(
566 struct proc *p,
567 struct shared_region_map_file_np_args *uap,
568 __unused int *retvalp)
569 {
570 int error;
571 kern_return_t kr;
572 int fd;
573 unsigned int mapping_count;
574 user_addr_t user_mappings; /* 64-bit */
575 user_addr_t user_slide_p; /* 64-bit */
576 struct shared_file_mapping_np *mappings;
577 struct fileproc *fp;
578 mach_vm_offset_t slide;
579 struct vnode *vp;
580 struct vfs_context context;
581 memory_object_control_t file_control;
582 memory_object_size_t file_size;
583 shared_region_mapping_t shared_region;
584 struct shared_region_task_mappings task_mapping_info;
585 shared_region_mapping_t next;
586 shared_region_mapping_t default_shared_region;
587 boolean_t using_default_region;
588 unsigned int j;
589 vm_prot_t max_prot;
590 mach_vm_offset_t base_offset, end_offset;
591 mach_vm_offset_t original_base_offset;
592 boolean_t mappings_in_segment;
593 #define SFM_MAX_STACK 6
594 struct shared_file_mapping_np stack_mappings[SFM_MAX_STACK];
595
596 mappings = NULL;
597 mapping_count = 0;
598 fp = NULL;
599 vp = NULL;
600
601 /* get file descriptor for split library from arguments */
602 fd = uap->fd;
603
604 /* get file structure from file descriptor */
605 error = fp_lookup(p, fd, &fp, 0);
606 if (error) {
607 goto done;
608 }
609
610 /* make sure we're attempting to map a vnode */
611 if (fp->f_fglob->fg_type != DTYPE_VNODE) {
612 error = EINVAL;
613 goto done;
614 }
615
616 /* we need at least read permission on the file */
617 if (! (fp->f_fglob->fg_flag & FREAD)) {
618 error = EPERM;
619 goto done;
620 }
621
622 /* get vnode from file structure */
623 error = vnode_getwithref((vnode_t)fp->f_fglob->fg_data);
624 if (error) {
625 goto done;
626 }
627 vp = (struct vnode *) fp->f_fglob->fg_data;
628
629 /* make sure the vnode is a regular file */
630 if (vp->v_type != VREG) {
631 error = EINVAL;
632 goto done;
633 }
634
635 /* get vnode size */
636 {
637 off_t fs;
638
639 context.vc_proc = p;
640 context.vc_ucred = kauth_cred_get();
641 if ((error = vnode_size(vp, &fs, &context)) != 0)
642 goto done;
643 file_size = fs;
644 }
645
646 /*
647 * Get the list of mappings the caller wants us to establish.
648 */
649 mapping_count = uap->mappingCount; /* the number of mappings */
650 if (mapping_count == 0) {
651 error = 0; /* no mappings: we're done ! */
652 goto done;
653 } else if (mapping_count <= SFM_MAX_STACK) {
654 mappings = &stack_mappings[0];
655 } else {
656 kr = kmem_alloc(kernel_map,
657 (vm_offset_t *) &mappings,
658 (vm_size_t) (mapping_count *
659 sizeof (mappings[0])));
660 if (kr != KERN_SUCCESS) {
661 error = ENOMEM;
662 goto done;
663 }
664 }
665
666 user_mappings = uap->mappings; /* the mappings, in user space */
667 error = copyin(user_mappings,
668 mappings,
669 (mapping_count * sizeof (mappings[0])));
670 if (error != 0) {
671 goto done;
672 }
673
674 /*
675 * If the caller provides a "slide" pointer, it means they're OK
676 * with us moving the mappings around to make them fit.
677 */
678 user_slide_p = uap->slide_p;
679
680 /*
681 * Make each mapping address relative to the beginning of the
682 * shared region. Check that all mappings are in the shared region.
683 * Compute the maximum set of protections required to tell the
684 * buffer cache how we mapped the file (see call to ubc_map() below).
685 */
686 max_prot = VM_PROT_NONE;
687 base_offset = -1LL;
688 end_offset = 0;
689 mappings_in_segment = TRUE;
690 for (j = 0; j < mapping_count; j++) {
691 mach_vm_offset_t segment;
692 segment = (mappings[j].sfm_address &
693 GLOBAL_SHARED_SEGMENT_MASK);
694 if (segment != GLOBAL_SHARED_TEXT_SEGMENT &&
695 segment != GLOBAL_SHARED_DATA_SEGMENT) {
696 /* this mapping is not in the shared region... */
697 if (user_slide_p == NULL) {
698 /* ... and we can't slide it in: fail */
699 error = EINVAL;
700 goto done;
701 }
702 if (j == 0) {
703 /* expect all mappings to be outside */
704 mappings_in_segment = FALSE;
705 } else if (mappings_in_segment != FALSE) {
706 /* other mappings were not outside: fail */
707 error = EINVAL;
708 goto done;
709 }
710 /* we'll try and slide that mapping in the segments */
711 } else {
712 if (j == 0) {
713 /* expect all mappings to be inside */
714 mappings_in_segment = TRUE;
715 } else if (mappings_in_segment != TRUE) {
716 /* other mappings were not inside: fail */
717 error = EINVAL;
718 goto done;
719 }
720 /* get a relative offset inside the shared segments */
721 mappings[j].sfm_address -= GLOBAL_SHARED_TEXT_SEGMENT;
722 }
723 if ((mappings[j].sfm_address & SHARED_TEXT_REGION_MASK)
724 < base_offset) {
725 base_offset = (mappings[j].sfm_address &
726 SHARED_TEXT_REGION_MASK);
727 }
728 if ((mappings[j].sfm_address & SHARED_TEXT_REGION_MASK) +
729 mappings[j].sfm_size > end_offset) {
730 end_offset =
731 (mappings[j].sfm_address &
732 SHARED_TEXT_REGION_MASK) +
733 mappings[j].sfm_size;
734 }
735 max_prot |= mappings[j].sfm_max_prot;
736 }
737 /* Make all mappings relative to the base_offset */
738 base_offset = vm_map_trunc_page(base_offset);
739 end_offset = vm_map_round_page(end_offset);
740 for (j = 0; j < mapping_count; j++) {
741 mappings[j].sfm_address -= base_offset;
742 }
743 original_base_offset = base_offset;
744 if (mappings_in_segment == FALSE) {
745 /*
746 * We're trying to map a library that was not pre-bound to
747 * be in the shared segments. We want to try and slide it
748 * back into the shared segments but as far back as possible,
749 * so that it doesn't clash with pre-bound libraries. Set
750 * the base_offset to the end of the region, so that it can't
751 * possibly fit there and will have to be slid.
752 */
753 base_offset = SHARED_TEXT_REGION_SIZE - end_offset;
754 }
755
756 /* get the file's memory object handle */
757 UBCINFOCHECK("shared_region_map_file_np", vp);
758 file_control = ubc_getobject(vp, UBC_HOLDOBJECT);
759 if (file_control == MEMORY_OBJECT_CONTROL_NULL) {
760 error = EINVAL;
761 goto done;
762 }
763
764 /*
765 * Get info about the current process's shared region.
766 * This might change if we decide we need to clone the shared region.
767 */
768 vm_get_shared_region(current_task(), &shared_region);
769 task_mapping_info.self = (vm_offset_t) shared_region;
770 shared_region_mapping_info(shared_region,
771 &(task_mapping_info.text_region),
772 &(task_mapping_info.text_size),
773 &(task_mapping_info.data_region),
774 &(task_mapping_info.data_size),
775 &(task_mapping_info.region_mappings),
776 &(task_mapping_info.client_base),
777 &(task_mapping_info.alternate_base),
778 &(task_mapping_info.alternate_next),
779 &(task_mapping_info.fs_base),
780 &(task_mapping_info.system),
781 &(task_mapping_info.flags),
782 &next);
783
784 /*
785 * Are we using the system's current shared region
786 * for this environment ?
787 */
788 default_shared_region =
789 lookup_default_shared_region(ENV_DEFAULT_ROOT,
790 task_mapping_info.system);
791 if (shared_region == default_shared_region) {
792 using_default_region = TRUE;
793 } else {
794 using_default_region = FALSE;
795 }
796 shared_region_mapping_dealloc(default_shared_region);
797
798 if (vp->v_mount != rootvnode->v_mount &&
799 using_default_region) {
800 /*
801 * The split library is not on the root filesystem. We don't
802 * want to polute the system-wide ("default") shared region
803 * with it.
804 * Reject the mapping. The caller (dyld) should "privatize"
805 * (via shared_region_make_private()) the shared region and
806 * try to establish the mapping privately for this process.
807 */
808 error = EXDEV;
809 goto done;
810 }
811
812
813 /*
814 * Map the split library.
815 */
816 kr = map_shared_file(mapping_count,
817 mappings,
818 file_control,
819 file_size,
820 &task_mapping_info,
821 base_offset,
822 (user_slide_p) ? &slide : NULL);
823
824 switch (kr) {
825 case KERN_SUCCESS:
826 /*
827 * The mapping was successful. Let the buffer cache know
828 * that we've mapped that file with these protections. This
829 * prevents the vnode from getting recycled while it's mapped.
830 */
831 (void) ubc_map(vp, max_prot);
832 error = 0;
833 break;
834 case KERN_INVALID_ADDRESS:
835 error = EFAULT;
836 goto done;
837 case KERN_PROTECTION_FAILURE:
838 error = EPERM;
839 goto done;
840 case KERN_NO_SPACE:
841 error = ENOMEM;
842 goto done;
843 case KERN_FAILURE:
844 case KERN_INVALID_ARGUMENT:
845 default:
846 error = EINVAL;
847 goto done;
848 }
849
850 if (p->p_flag & P_NOSHLIB) {
851 /* signal that this process is now using split libraries */
852 p->p_flag &= ~P_NOSHLIB;
853 }
854
855 if (user_slide_p) {
856 /*
857 * The caller provided a pointer to a "slide" offset. Let
858 * them know by how much we slid the mappings.
859 */
860 if (mappings_in_segment == FALSE) {
861 /*
862 * We faked the base_offset earlier, so undo that
863 * and take into account the real base_offset.
864 */
865 slide += SHARED_TEXT_REGION_SIZE - end_offset;
866 slide -= original_base_offset;
867 /*
868 * The mappings were slid into the shared segments
869 * and "slide" is relative to the beginning of the
870 * shared segments. Adjust it to be absolute.
871 */
872 slide += GLOBAL_SHARED_TEXT_SEGMENT;
873 }
874 error = copyout(&slide,
875 user_slide_p,
876 sizeof (int64_t));
877 }
878
879 done:
880 if (vp != NULL) {
881 /*
882 * release the vnode...
883 * ubc_map() still holds it for us in the non-error case
884 */
885 (void) vnode_put(vp);
886 vp = NULL;
887 }
888 if (fp != NULL) {
889 /* release the file descriptor */
890 fp_drop(p, fd, fp, 0);
891 fp = NULL;
892 }
893 if (mappings != NULL &&
894 mappings != &stack_mappings[0]) {
895 kmem_free(kernel_map,
896 (vm_offset_t) mappings,
897 mapping_count * sizeof (mappings[0]));
898 }
899 mappings = NULL;
900
901 return error;
902 }
903
904 int
905 load_shared_file(struct proc *p, struct load_shared_file_args *uap,
906 __unused int *retval)
907 {
908 caddr_t mapped_file_addr=uap->mfa;
909 u_long mapped_file_size=uap->mfs;
910 caddr_t *base_address=uap->ba;
911 int map_cnt=uap->map_cnt;
912 sf_mapping_t *mappings=uap->mappings;
913 char *filename=uap->filename;
914 int *flags=uap->flags;
915 struct vnode *vp = 0;
916 struct nameidata nd, *ndp;
917 char *filename_str;
918 register int error;
919 kern_return_t kr;
920
921 struct vfs_context context;
922 off_t file_size;
923 memory_object_control_t file_control;
924 sf_mapping_t *map_list;
925 caddr_t local_base;
926 int local_flags;
927 int caller_flags;
928 int i;
929 int default_regions = 0;
930 vm_size_t dummy;
931 kern_return_t kret;
932
933 shared_region_mapping_t shared_region;
934 struct shared_region_task_mappings task_mapping_info;
935 shared_region_mapping_t next;
936
937 context.vc_proc = p;
938 context.vc_ucred = kauth_cred_get();
939
940 ndp = &nd;
941
942 AUDIT_ARG(addr, CAST_USER_ADDR_T(base_address));
943 /* Retrieve the base address */
944 if ( (error = copyin(CAST_USER_ADDR_T(base_address), &local_base, sizeof (caddr_t))) ) {
945 goto lsf_bailout;
946 }
947 if ( (error = copyin(CAST_USER_ADDR_T(flags), &local_flags, sizeof (int))) ) {
948 goto lsf_bailout;
949 }
950
951 if(local_flags & QUERY_IS_SYSTEM_REGION) {
952 shared_region_mapping_t default_shared_region;
953 vm_get_shared_region(current_task(), &shared_region);
954 task_mapping_info.self = (vm_offset_t)shared_region;
955
956 shared_region_mapping_info(shared_region,
957 &(task_mapping_info.text_region),
958 &(task_mapping_info.text_size),
959 &(task_mapping_info.data_region),
960 &(task_mapping_info.data_size),
961 &(task_mapping_info.region_mappings),
962 &(task_mapping_info.client_base),
963 &(task_mapping_info.alternate_base),
964 &(task_mapping_info.alternate_next),
965 &(task_mapping_info.fs_base),
966 &(task_mapping_info.system),
967 &(task_mapping_info.flags), &next);
968
969 default_shared_region =
970 lookup_default_shared_region(
971 ENV_DEFAULT_ROOT,
972 task_mapping_info.system);
973 if (shared_region == default_shared_region) {
974 local_flags = SYSTEM_REGION_BACKED;
975 } else {
976 local_flags = 0;
977 }
978 shared_region_mapping_dealloc(default_shared_region);
979 error = 0;
980 error = copyout(&local_flags, CAST_USER_ADDR_T(flags), sizeof (int));
981 goto lsf_bailout;
982 }
983 caller_flags = local_flags;
984 kret = kmem_alloc(kernel_map, (vm_offset_t *)&filename_str,
985 (vm_size_t)(MAXPATHLEN));
986 if (kret != KERN_SUCCESS) {
987 error = ENOMEM;
988 goto lsf_bailout;
989 }
990 kret = kmem_alloc(kernel_map, (vm_offset_t *)&map_list,
991 (vm_size_t)(map_cnt*sizeof(sf_mapping_t)));
992 if (kret != KERN_SUCCESS) {
993 kmem_free(kernel_map, (vm_offset_t)filename_str,
994 (vm_size_t)(MAXPATHLEN));
995 error = ENOMEM;
996 goto lsf_bailout;
997 }
998
999 if ( (error = copyin(CAST_USER_ADDR_T(mappings), map_list, (map_cnt*sizeof(sf_mapping_t)))) ) {
1000 goto lsf_bailout_free;
1001 }
1002
1003 if ( (error = copyinstr(CAST_USER_ADDR_T(filename), filename_str,
1004 MAXPATHLEN, (size_t *)&dummy)) ) {
1005 goto lsf_bailout_free;
1006 }
1007
1008 /*
1009 * Get a vnode for the target file
1010 */
1011 NDINIT(ndp, LOOKUP, FOLLOW | LOCKLEAF | AUDITVNPATH1, UIO_SYSSPACE32,
1012 CAST_USER_ADDR_T(filename_str), &context);
1013
1014 if ((error = namei(ndp))) {
1015 goto lsf_bailout_free;
1016 }
1017 vp = ndp->ni_vp;
1018
1019 nameidone(ndp);
1020
1021 if (vp->v_type != VREG) {
1022 error = EINVAL;
1023 goto lsf_bailout_free_vput;
1024 }
1025
1026 UBCINFOCHECK("load_shared_file", vp);
1027
1028 if ((error = vnode_size(vp, &file_size, &context)) != 0)
1029 goto lsf_bailout_free_vput;
1030
1031 file_control = ubc_getobject(vp, UBC_HOLDOBJECT);
1032 if (file_control == MEMORY_OBJECT_CONTROL_NULL) {
1033 error = EINVAL;
1034 goto lsf_bailout_free_vput;
1035 }
1036
1037 #ifdef notdef
1038 if(file_size != mapped_file_size) {
1039 error = EINVAL;
1040 goto lsf_bailout_free_vput;
1041 }
1042 #endif
1043 if(p->p_flag & P_NOSHLIB) {
1044 p->p_flag = p->p_flag & ~P_NOSHLIB;
1045 }
1046
1047 /* load alternate regions if the caller has requested. */
1048 /* Note: the new regions are "clean slates" */
1049 if (local_flags & NEW_LOCAL_SHARED_REGIONS) {
1050 error = clone_system_shared_regions(FALSE,
1051 TRUE, /* chain_regions */
1052 ENV_DEFAULT_ROOT);
1053 if (error) {
1054 goto lsf_bailout_free_vput;
1055 }
1056 }
1057
1058 vm_get_shared_region(current_task(), &shared_region);
1059 task_mapping_info.self = (vm_offset_t)shared_region;
1060
1061 shared_region_mapping_info(shared_region,
1062 &(task_mapping_info.text_region),
1063 &(task_mapping_info.text_size),
1064 &(task_mapping_info.data_region),
1065 &(task_mapping_info.data_size),
1066 &(task_mapping_info.region_mappings),
1067 &(task_mapping_info.client_base),
1068 &(task_mapping_info.alternate_base),
1069 &(task_mapping_info.alternate_next),
1070 &(task_mapping_info.fs_base),
1071 &(task_mapping_info.system),
1072 &(task_mapping_info.flags), &next);
1073
1074 {
1075 shared_region_mapping_t default_shared_region;
1076 default_shared_region =
1077 lookup_default_shared_region(
1078 ENV_DEFAULT_ROOT,
1079 task_mapping_info.system);
1080 if(shared_region == default_shared_region) {
1081 default_regions = 1;
1082 }
1083 shared_region_mapping_dealloc(default_shared_region);
1084 }
1085 /* If we are running on a removable file system we must not */
1086 /* be in a set of shared regions or the file system will not */
1087 /* be removable. */
1088 if(((vp->v_mount != rootvnode->v_mount) && (default_regions))
1089 && (lsf_mapping_pool_gauge() < 75)) {
1090 /* We don't want to run out of shared memory */
1091 /* map entries by starting too many private versions */
1092 /* of the shared library structures */
1093 int error2;
1094
1095 error2 = clone_system_shared_regions(!(p->p_flag & P_NOSHLIB),
1096 TRUE, /* chain_regions */
1097 ENV_DEFAULT_ROOT);
1098 if (error2) {
1099 goto lsf_bailout_free_vput;
1100 }
1101 local_flags = local_flags & ~NEW_LOCAL_SHARED_REGIONS;
1102 vm_get_shared_region(current_task(), &shared_region);
1103 shared_region_mapping_info(shared_region,
1104 &(task_mapping_info.text_region),
1105 &(task_mapping_info.text_size),
1106 &(task_mapping_info.data_region),
1107 &(task_mapping_info.data_size),
1108 &(task_mapping_info.region_mappings),
1109 &(task_mapping_info.client_base),
1110 &(task_mapping_info.alternate_base),
1111 &(task_mapping_info.alternate_next),
1112 &(task_mapping_info.fs_base),
1113 &(task_mapping_info.system),
1114 &(task_mapping_info.flags), &next);
1115 }
1116
1117 /* This is a work-around to allow executables which have been */
1118 /* built without knowledge of the proper shared segment to */
1119 /* load. This code has been architected as a shared region */
1120 /* handler, the knowledge of where the regions are loaded is */
1121 /* problematic for the extension of shared regions as it will */
1122 /* not be easy to know what region an item should go into. */
1123 /* The code below however will get around a short term problem */
1124 /* with executables which believe they are loading at zero. */
1125
1126 {
1127 if (((unsigned int)local_base &
1128 (~(task_mapping_info.text_size - 1))) !=
1129 task_mapping_info.client_base) {
1130 if(local_flags & ALTERNATE_LOAD_SITE) {
1131 local_base = (caddr_t)(
1132 (unsigned int)local_base &
1133 (task_mapping_info.text_size - 1));
1134 local_base = (caddr_t)((unsigned int)local_base
1135 | task_mapping_info.client_base);
1136 } else {
1137 error = EINVAL;
1138 goto lsf_bailout_free_vput;
1139 }
1140 }
1141 }
1142
1143
1144 if((kr = copyin_shared_file((vm_offset_t)mapped_file_addr,
1145 mapped_file_size,
1146 (vm_offset_t *)&local_base,
1147 map_cnt, map_list, file_control,
1148 &task_mapping_info, &local_flags))) {
1149 switch (kr) {
1150 case KERN_FAILURE:
1151 error = EINVAL;
1152 break;
1153 case KERN_INVALID_ARGUMENT:
1154 error = EINVAL;
1155 break;
1156 case KERN_INVALID_ADDRESS:
1157 error = EFAULT;
1158 break;
1159 case KERN_PROTECTION_FAILURE:
1160 /* save EAUTH for authentication in this */
1161 /* routine */
1162 error = EPERM;
1163 break;
1164 case KERN_NO_SPACE:
1165 error = ENOMEM;
1166 break;
1167 default:
1168 error = EINVAL;
1169 };
1170 if((caller_flags & ALTERNATE_LOAD_SITE) && systemLogDiags) {
1171 printf("load_shared_file: Failed to load shared file! error: 0x%x, Base_address: 0x%x, number of mappings: %d, file_control 0x%x\n", error, local_base, map_cnt, file_control);
1172 for(i=0; i<map_cnt; i++) {
1173 printf("load_shared_file: Mapping%d, mapping_offset: 0x%x, size: 0x%x, file_offset: 0x%x, protection: 0x%x\n"
1174 , i, map_list[i].mapping_offset,
1175 map_list[i].size,
1176 map_list[i].file_offset,
1177 map_list[i].protection);
1178 }
1179 }
1180 } else {
1181 if(default_regions)
1182 local_flags |= SYSTEM_REGION_BACKED;
1183 if(!(error = copyout(&local_flags, CAST_USER_ADDR_T(flags), sizeof (int)))) {
1184 error = copyout(&local_base,
1185 CAST_USER_ADDR_T(base_address), sizeof (caddr_t));
1186 }
1187 }
1188
1189 lsf_bailout_free_vput:
1190 vnode_put(vp);
1191
1192 lsf_bailout_free:
1193 kmem_free(kernel_map, (vm_offset_t)filename_str,
1194 (vm_size_t)(MAXPATHLEN));
1195 kmem_free(kernel_map, (vm_offset_t)map_list,
1196 (vm_size_t)(map_cnt*sizeof(sf_mapping_t)));
1197
1198 lsf_bailout:
1199 return error;
1200 }
1201
1202 int
1203 reset_shared_file(__unused struct proc *p, struct reset_shared_file_args *uap,
1204 __unused register int *retval)
1205 {
1206 caddr_t *base_address=uap->ba;
1207 int map_cnt=uap->map_cnt;
1208 sf_mapping_t *mappings=uap->mappings;
1209 register int error;
1210
1211 sf_mapping_t *map_list;
1212 caddr_t local_base;
1213 vm_offset_t map_address;
1214 int i;
1215 kern_return_t kret;
1216
1217 AUDIT_ARG(addr, CAST_DOWN(user_addr_t, base_address));
1218 /* Retrieve the base address */
1219 if ( (error = copyin(CAST_USER_ADDR_T(base_address), &local_base, sizeof (caddr_t))) ) {
1220 goto rsf_bailout;
1221 }
1222
1223 if (((unsigned int)local_base & GLOBAL_SHARED_SEGMENT_MASK)
1224 != GLOBAL_SHARED_TEXT_SEGMENT) {
1225 error = EINVAL;
1226 goto rsf_bailout;
1227 }
1228
1229 kret = kmem_alloc(kernel_map, (vm_offset_t *)&map_list,
1230 (vm_size_t)(map_cnt*sizeof(sf_mapping_t)));
1231 if (kret != KERN_SUCCESS) {
1232 error = ENOMEM;
1233 goto rsf_bailout;
1234 }
1235
1236 if ( (error =
1237 copyin(CAST_USER_ADDR_T(mappings), map_list, (map_cnt*sizeof(sf_mapping_t)))) ) {
1238
1239 kmem_free(kernel_map, (vm_offset_t)map_list,
1240 (vm_size_t)(map_cnt*sizeof(sf_mapping_t)));
1241 goto rsf_bailout;
1242 }
1243 for (i = 0; i<map_cnt; i++) {
1244 if((map_list[i].mapping_offset
1245 & GLOBAL_SHARED_SEGMENT_MASK) == 0x10000000) {
1246 map_address = (vm_offset_t)
1247 (local_base + map_list[i].mapping_offset);
1248 vm_deallocate(current_map(),
1249 map_address,
1250 map_list[i].size);
1251 vm_map(current_map(), &map_address,
1252 map_list[i].size, 0,
1253 SHARED_LIB_ALIAS | VM_FLAGS_FIXED,
1254 shared_data_region_handle,
1255 ((unsigned int)local_base
1256 & SHARED_DATA_REGION_MASK) +
1257 (map_list[i].mapping_offset
1258 & SHARED_DATA_REGION_MASK),
1259 TRUE, VM_PROT_READ,
1260 VM_PROT_READ, VM_INHERIT_SHARE);
1261 }
1262 }
1263
1264 kmem_free(kernel_map, (vm_offset_t)map_list,
1265 (vm_size_t)(map_cnt*sizeof(sf_mapping_t)));
1266
1267 rsf_bailout:
1268 return error;
1269 }
1270
1271 int
1272 new_system_shared_regions(__unused struct proc *p,
1273 __unused struct new_system_shared_regions_args *uap,
1274 register int *retval)
1275 {
1276 if(!(is_suser())) {
1277 *retval = EINVAL;
1278 return EINVAL;
1279 }
1280
1281 /* clear all of our existing defaults */
1282 remove_all_shared_regions();
1283
1284 *retval = 0;
1285 return 0;
1286 }
1287
1288
1289
1290 int
1291 clone_system_shared_regions(
1292 int shared_regions_active,
1293 int chain_regions,
1294 int base_vnode)
1295 {
1296 shared_region_mapping_t new_shared_region;
1297 shared_region_mapping_t next;
1298 shared_region_mapping_t old_shared_region;
1299 struct shared_region_task_mappings old_info;
1300 struct shared_region_task_mappings new_info;
1301
1302 vm_get_shared_region(current_task(), &old_shared_region);
1303 old_info.self = (vm_offset_t)old_shared_region;
1304 shared_region_mapping_info(old_shared_region,
1305 &(old_info.text_region),
1306 &(old_info.text_size),
1307 &(old_info.data_region),
1308 &(old_info.data_size),
1309 &(old_info.region_mappings),
1310 &(old_info.client_base),
1311 &(old_info.alternate_base),
1312 &(old_info.alternate_next),
1313 &(old_info.fs_base),
1314 &(old_info.system),
1315 &(old_info.flags), &next);
1316 if ((shared_regions_active) ||
1317 (base_vnode == ENV_DEFAULT_ROOT)) {
1318 if (shared_file_create_system_region(&new_shared_region))
1319 return (ENOMEM);
1320 } else {
1321 new_shared_region =
1322 lookup_default_shared_region(
1323 base_vnode, old_info.system);
1324 if(new_shared_region == NULL) {
1325 shared_file_boot_time_init(
1326 base_vnode, old_info.system);
1327 vm_get_shared_region(current_task(), &new_shared_region);
1328 } else {
1329 vm_set_shared_region(current_task(), new_shared_region);
1330 }
1331 if(old_shared_region)
1332 shared_region_mapping_dealloc(old_shared_region);
1333 }
1334 new_info.self = (vm_offset_t)new_shared_region;
1335 shared_region_mapping_info(new_shared_region,
1336 &(new_info.text_region),
1337 &(new_info.text_size),
1338 &(new_info.data_region),
1339 &(new_info.data_size),
1340 &(new_info.region_mappings),
1341 &(new_info.client_base),
1342 &(new_info.alternate_base),
1343 &(new_info.alternate_next),
1344 &(new_info.fs_base),
1345 &(new_info.system),
1346 &(new_info.flags), &next);
1347 if(shared_regions_active) {
1348 if(vm_region_clone(old_info.text_region, new_info.text_region)) {
1349 panic("clone_system_shared_regions: shared region mis-alignment 1");
1350 shared_region_mapping_dealloc(new_shared_region);
1351 return(EINVAL);
1352 }
1353 if (vm_region_clone(old_info.data_region, new_info.data_region)) {
1354 panic("clone_system_shared_regions: shared region mis-alignment 2");
1355 shared_region_mapping_dealloc(new_shared_region);
1356 return(EINVAL);
1357 }
1358 if (chain_regions) {
1359 /*
1360 * We want a "shadowed" clone, a private superset of the old
1361 * shared region. The info about the old mappings is still
1362 * valid for us.
1363 */
1364 shared_region_object_chain_attach(
1365 new_shared_region, old_shared_region);
1366 } else {
1367 /*
1368 * We want a completely detached clone with no link to
1369 * the old shared region. We'll be removing some mappings
1370 * in our private, cloned, shared region, so the old mappings
1371 * will become irrelevant to us. Since we have a private
1372 * "shared region" now, it isn't going to be shared with
1373 * anyone else and we won't need to maintain mappings info.
1374 */
1375 shared_region_object_chain_detached(new_shared_region);
1376 }
1377 }
1378 if (vm_map_region_replace(current_map(), old_info.text_region,
1379 new_info.text_region, old_info.client_base,
1380 old_info.client_base+old_info.text_size)) {
1381 panic("clone_system_shared_regions: shared region mis-alignment 3");
1382 shared_region_mapping_dealloc(new_shared_region);
1383 return(EINVAL);
1384 }
1385 if(vm_map_region_replace(current_map(), old_info.data_region,
1386 new_info.data_region,
1387 old_info.client_base + old_info.text_size,
1388 old_info.client_base
1389 + old_info.text_size + old_info.data_size)) {
1390 panic("clone_system_shared_regions: shared region mis-alignment 4");
1391 shared_region_mapping_dealloc(new_shared_region);
1392 return(EINVAL);
1393 }
1394 vm_set_shared_region(current_task(), new_shared_region);
1395
1396 /* consume the reference which wasn't accounted for in object */
1397 /* chain attach */
1398 if (!shared_regions_active || !chain_regions)
1399 shared_region_mapping_dealloc(old_shared_region);
1400
1401 return(0);
1402
1403 }
1404
1405 /* header for the profile name file. The profiled app info is held */
1406 /* in the data file and pointed to by elements in the name file */
1407
1408 struct profile_names_header {
1409 unsigned int number_of_profiles;
1410 unsigned int user_id;
1411 unsigned int version;
1412 off_t element_array;
1413 unsigned int spare1;
1414 unsigned int spare2;
1415 unsigned int spare3;
1416 };
1417
1418 struct profile_element {
1419 off_t addr;
1420 vm_size_t size;
1421 unsigned int mod_date;
1422 unsigned int inode;
1423 char name[12];
1424 };
1425
1426 struct global_profile {
1427 struct vnode *names_vp;
1428 struct vnode *data_vp;
1429 vm_offset_t buf_ptr;
1430 unsigned int user;
1431 unsigned int age;
1432 unsigned int busy;
1433 };
1434
1435 struct global_profile_cache {
1436 int max_ele;
1437 unsigned int age;
1438 struct global_profile profiles[3];
1439 };
1440
1441 /* forward declarations */
1442 int bsd_open_page_cache_files(unsigned int user,
1443 struct global_profile **profile);
1444 void bsd_close_page_cache_files(struct global_profile *profile);
1445 int bsd_search_page_cache_data_base(
1446 struct vnode *vp,
1447 struct profile_names_header *database,
1448 char *app_name,
1449 unsigned int mod_date,
1450 unsigned int inode,
1451 off_t *profile,
1452 unsigned int *profile_size);
1453
1454 struct global_profile_cache global_user_profile_cache =
1455 {3, 0, {{NULL, NULL, 0, 0, 0, 0},
1456 {NULL, NULL, 0, 0, 0, 0},
1457 {NULL, NULL, 0, 0, 0, 0}} };
1458
1459 /* BSD_OPEN_PAGE_CACHE_FILES: */
1460 /* Caller provides a user id. This id was used in */
1461 /* prepare_profile_database to create two unique absolute */
1462 /* file paths to the associated profile files. These files */
1463 /* are either opened or bsd_open_page_cache_files returns an */
1464 /* error. The header of the names file is then consulted. */
1465 /* The header and the vnodes for the names and data files are */
1466 /* returned. */
1467
1468 int
1469 bsd_open_page_cache_files(
1470 unsigned int user,
1471 struct global_profile **profile)
1472 {
1473 const char *cache_path = "/var/vm/app_profile/";
1474 struct proc *p;
1475 int error;
1476 vm_size_t resid;
1477 off_t resid_off;
1478 unsigned int lru;
1479 vm_size_t size;
1480
1481 struct vnode *names_vp;
1482 struct vnode *data_vp;
1483 vm_offset_t names_buf;
1484 vm_offset_t buf_ptr;
1485
1486 int profile_names_length;
1487 int profile_data_length;
1488 char *profile_data_string;
1489 char *profile_names_string;
1490 char *substring;
1491
1492 off_t file_size;
1493 struct vfs_context context;
1494
1495 kern_return_t ret;
1496
1497 struct nameidata nd_names;
1498 struct nameidata nd_data;
1499 int i;
1500
1501
1502 p = current_proc();
1503
1504 context.vc_proc = p;
1505 context.vc_ucred = kauth_cred_get();
1506
1507 restart:
1508 for(i = 0; i<global_user_profile_cache.max_ele; i++) {
1509 if((global_user_profile_cache.profiles[i].user == user)
1510 && (global_user_profile_cache.profiles[i].data_vp
1511 != NULL)) {
1512 *profile = &global_user_profile_cache.profiles[i];
1513 /* already in cache, we're done */
1514 if ((*profile)->busy) {
1515 /*
1516 * drop funnel and wait
1517 */
1518 (void)tsleep((void *)
1519 *profile,
1520 PRIBIO, "app_profile", 0);
1521 goto restart;
1522 }
1523 (*profile)->busy = 1;
1524 (*profile)->age = global_user_profile_cache.age;
1525
1526 /*
1527 * entries in cache are held with a valid
1528 * usecount... take an iocount which will
1529 * be dropped in "bsd_close_page_cache_files"
1530 * which is called after the read or writes to
1531 * these files are done
1532 */
1533 if ( (vnode_getwithref((*profile)->data_vp)) ) {
1534
1535 vnode_rele((*profile)->data_vp);
1536 vnode_rele((*profile)->names_vp);
1537
1538 (*profile)->data_vp = NULL;
1539 (*profile)->busy = 0;
1540 wakeup(*profile);
1541
1542 goto restart;
1543 }
1544 if ( (vnode_getwithref((*profile)->names_vp)) ) {
1545
1546 vnode_put((*profile)->data_vp);
1547 vnode_rele((*profile)->data_vp);
1548 vnode_rele((*profile)->names_vp);
1549
1550 (*profile)->data_vp = NULL;
1551 (*profile)->busy = 0;
1552 wakeup(*profile);
1553
1554 goto restart;
1555 }
1556 global_user_profile_cache.age+=1;
1557 return 0;
1558 }
1559 }
1560
1561 lru = global_user_profile_cache.age;
1562 *profile = NULL;
1563 for(i = 0; i<global_user_profile_cache.max_ele; i++) {
1564 /* Skip entry if it is in the process of being reused */
1565 if(global_user_profile_cache.profiles[i].data_vp ==
1566 (struct vnode *)0xFFFFFFFF)
1567 continue;
1568 /* Otherwise grab the first empty entry */
1569 if(global_user_profile_cache.profiles[i].data_vp == NULL) {
1570 *profile = &global_user_profile_cache.profiles[i];
1571 (*profile)->age = global_user_profile_cache.age;
1572 break;
1573 }
1574 /* Otherwise grab the oldest entry */
1575 if(global_user_profile_cache.profiles[i].age < lru) {
1576 lru = global_user_profile_cache.profiles[i].age;
1577 *profile = &global_user_profile_cache.profiles[i];
1578 }
1579 }
1580
1581 /* Did we set it? */
1582 if (*profile == NULL) {
1583 /*
1584 * No entries are available; this can only happen if all
1585 * of them are currently in the process of being reused;
1586 * if this happens, we sleep on the address of the first
1587 * element, and restart. This is less than ideal, but we
1588 * know it will work because we know that there will be a
1589 * wakeup on any entry currently in the process of being
1590 * reused.
1591 *
1592 * XXX Reccomend a two handed clock and more than 3 total
1593 * XXX cache entries at some point in the future.
1594 */
1595 /*
1596 * drop funnel and wait
1597 */
1598 (void)tsleep((void *)
1599 &global_user_profile_cache.profiles[0],
1600 PRIBIO, "app_profile", 0);
1601 goto restart;
1602 }
1603
1604 /*
1605 * If it's currently busy, we've picked the one at the end of the
1606 * LRU list, but it's currently being actively used. We sleep on
1607 * its address and restart.
1608 */
1609 if ((*profile)->busy) {
1610 /*
1611 * drop funnel and wait
1612 */
1613 (void)tsleep((void *)
1614 *profile,
1615 PRIBIO, "app_profile", 0);
1616 goto restart;
1617 }
1618 (*profile)->busy = 1;
1619 (*profile)->user = user;
1620
1621 /*
1622 * put dummy value in for now to get competing request to wait
1623 * above until we are finished
1624 *
1625 * Save the data_vp before setting it, so we can set it before
1626 * we kmem_free() or vrele(). If we don't do this, then we
1627 * have a potential funnel race condition we have to deal with.
1628 */
1629 data_vp = (*profile)->data_vp;
1630 (*profile)->data_vp = (struct vnode *)0xFFFFFFFF;
1631
1632 /*
1633 * Age the cache here in all cases; this guarantees that we won't
1634 * be reusing only one entry over and over, once the system reaches
1635 * steady-state.
1636 */
1637 global_user_profile_cache.age+=1;
1638
1639 if(data_vp != NULL) {
1640 kmem_free(kernel_map,
1641 (*profile)->buf_ptr, 4 * PAGE_SIZE);
1642 if ((*profile)->names_vp) {
1643 vnode_rele((*profile)->names_vp);
1644 (*profile)->names_vp = NULL;
1645 }
1646 vnode_rele(data_vp);
1647 }
1648
1649 /* Try to open the appropriate users profile files */
1650 /* If neither file is present, try to create them */
1651 /* If one file is present and the other not, fail. */
1652 /* If the files do exist, check them for the app_file */
1653 /* requested and read it in if present */
1654
1655 ret = kmem_alloc(kernel_map,
1656 (vm_offset_t *)&profile_data_string, PATH_MAX);
1657
1658 if(ret) {
1659 (*profile)->data_vp = NULL;
1660 (*profile)->busy = 0;
1661 wakeup(*profile);
1662 return ENOMEM;
1663 }
1664
1665 /* Split the buffer in half since we know the size of */
1666 /* our file path and our allocation is adequate for */
1667 /* both file path names */
1668 profile_names_string = profile_data_string + (PATH_MAX/2);
1669
1670
1671 strcpy(profile_data_string, cache_path);
1672 strcpy(profile_names_string, cache_path);
1673 profile_names_length = profile_data_length
1674 = strlen(profile_data_string);
1675 substring = profile_data_string + profile_data_length;
1676 sprintf(substring, "%x_data", user);
1677 substring = profile_names_string + profile_names_length;
1678 sprintf(substring, "%x_names", user);
1679
1680 /* We now have the absolute file names */
1681
1682 ret = kmem_alloc(kernel_map,
1683 (vm_offset_t *)&names_buf, 4 * PAGE_SIZE);
1684 if(ret) {
1685 kmem_free(kernel_map,
1686 (vm_offset_t)profile_data_string, PATH_MAX);
1687 (*profile)->data_vp = NULL;
1688 (*profile)->busy = 0;
1689 wakeup(*profile);
1690 return ENOMEM;
1691 }
1692
1693 NDINIT(&nd_names, LOOKUP, FOLLOW | LOCKLEAF,
1694 UIO_SYSSPACE32, CAST_USER_ADDR_T(profile_names_string), &context);
1695 NDINIT(&nd_data, LOOKUP, FOLLOW | LOCKLEAF,
1696 UIO_SYSSPACE32, CAST_USER_ADDR_T(profile_data_string), &context);
1697
1698 if ( (error = vn_open(&nd_data, FREAD | FWRITE, 0)) ) {
1699 #ifdef notdef
1700 printf("bsd_open_page_cache_files: CacheData file not found %s\n",
1701 profile_data_string);
1702 #endif
1703 kmem_free(kernel_map,
1704 (vm_offset_t)names_buf, 4 * PAGE_SIZE);
1705 kmem_free(kernel_map,
1706 (vm_offset_t)profile_data_string, PATH_MAX);
1707 (*profile)->data_vp = NULL;
1708 (*profile)->busy = 0;
1709 wakeup(*profile);
1710 return error;
1711 }
1712 data_vp = nd_data.ni_vp;
1713
1714 if ( (error = vn_open(&nd_names, FREAD | FWRITE, 0)) ) {
1715 printf("bsd_open_page_cache_files: NamesData file not found %s\n",
1716 profile_data_string);
1717 kmem_free(kernel_map,
1718 (vm_offset_t)names_buf, 4 * PAGE_SIZE);
1719 kmem_free(kernel_map,
1720 (vm_offset_t)profile_data_string, PATH_MAX);
1721
1722 vnode_rele(data_vp);
1723 vnode_put(data_vp);
1724
1725 (*profile)->data_vp = NULL;
1726 (*profile)->busy = 0;
1727 wakeup(*profile);
1728 return error;
1729 }
1730 names_vp = nd_names.ni_vp;
1731
1732 if ((error = vnode_size(names_vp, &file_size, &context)) != 0) {
1733 printf("bsd_open_page_cache_files: Can't stat name file %s\n", profile_names_string);
1734 kmem_free(kernel_map,
1735 (vm_offset_t)profile_data_string, PATH_MAX);
1736 kmem_free(kernel_map,
1737 (vm_offset_t)names_buf, 4 * PAGE_SIZE);
1738
1739 vnode_rele(names_vp);
1740 vnode_put(names_vp);
1741 vnode_rele(data_vp);
1742 vnode_put(data_vp);
1743
1744 (*profile)->data_vp = NULL;
1745 (*profile)->busy = 0;
1746 wakeup(*profile);
1747 return error;
1748 }
1749
1750 size = file_size;
1751 if(size > 4 * PAGE_SIZE)
1752 size = 4 * PAGE_SIZE;
1753 buf_ptr = names_buf;
1754 resid_off = 0;
1755
1756 while(size) {
1757 error = vn_rdwr(UIO_READ, names_vp, (caddr_t)buf_ptr,
1758 size, resid_off,
1759 UIO_SYSSPACE32, IO_NODELOCKED, kauth_cred_get(), &resid, p);
1760 if((error) || (size == resid)) {
1761 if(!error) {
1762 error = EINVAL;
1763 }
1764 kmem_free(kernel_map,
1765 (vm_offset_t)profile_data_string, PATH_MAX);
1766 kmem_free(kernel_map,
1767 (vm_offset_t)names_buf, 4 * PAGE_SIZE);
1768
1769 vnode_rele(names_vp);
1770 vnode_put(names_vp);
1771 vnode_rele(data_vp);
1772 vnode_put(data_vp);
1773
1774 (*profile)->data_vp = NULL;
1775 (*profile)->busy = 0;
1776 wakeup(*profile);
1777 return error;
1778 }
1779 buf_ptr += size-resid;
1780 resid_off += size-resid;
1781 size = resid;
1782 }
1783 kmem_free(kernel_map, (vm_offset_t)profile_data_string, PATH_MAX);
1784
1785 (*profile)->names_vp = names_vp;
1786 (*profile)->data_vp = data_vp;
1787 (*profile)->buf_ptr = names_buf;
1788
1789 /*
1790 * at this point, the both the names_vp and the data_vp have
1791 * both a valid usecount and an iocount held
1792 */
1793 return 0;
1794
1795 }
1796
1797 void
1798 bsd_close_page_cache_files(
1799 struct global_profile *profile)
1800 {
1801 vnode_put(profile->data_vp);
1802 vnode_put(profile->names_vp);
1803
1804 profile->busy = 0;
1805 wakeup(profile);
1806 }
1807
1808 int
1809 bsd_read_page_cache_file(
1810 unsigned int user,
1811 int *fid,
1812 int *mod,
1813 char *app_name,
1814 struct vnode *app_vp,
1815 vm_offset_t *buffer,
1816 vm_offset_t *bufsize)
1817 {
1818
1819 boolean_t funnel_state;
1820
1821 struct proc *p;
1822 int error;
1823 unsigned int resid;
1824
1825 off_t profile;
1826 unsigned int profile_size;
1827
1828 vm_offset_t names_buf;
1829 struct vnode_attr va;
1830 struct vfs_context context;
1831
1832 kern_return_t ret;
1833
1834 struct vnode *names_vp;
1835 struct vnode *data_vp;
1836
1837 struct global_profile *uid_files;
1838
1839 funnel_state = thread_funnel_set(kernel_flock, TRUE);
1840
1841 /* Try to open the appropriate users profile files */
1842 /* If neither file is present, try to create them */
1843 /* If one file is present and the other not, fail. */
1844 /* If the files do exist, check them for the app_file */
1845 /* requested and read it in if present */
1846
1847
1848 error = bsd_open_page_cache_files(user, &uid_files);
1849 if(error) {
1850 thread_funnel_set(kernel_flock, funnel_state);
1851 return EINVAL;
1852 }
1853
1854 p = current_proc();
1855
1856 names_vp = uid_files->names_vp;
1857 data_vp = uid_files->data_vp;
1858 names_buf = uid_files->buf_ptr;
1859
1860 context.vc_proc = p;
1861 context.vc_ucred = kauth_cred_get();
1862
1863 VATTR_INIT(&va);
1864 VATTR_WANTED(&va, va_fileid);
1865 VATTR_WANTED(&va, va_modify_time);
1866
1867 if ((error = vnode_getattr(app_vp, &va, &context))) {
1868 printf("bsd_read_cache_file: Can't stat app file %s\n", app_name);
1869 bsd_close_page_cache_files(uid_files);
1870 thread_funnel_set(kernel_flock, funnel_state);
1871 return error;
1872 }
1873
1874 *fid = (u_long)va.va_fileid;
1875 *mod = va.va_modify_time.tv_sec;
1876
1877 if (bsd_search_page_cache_data_base(
1878 names_vp,
1879 (struct profile_names_header *)names_buf,
1880 app_name,
1881 (unsigned int) va.va_modify_time.tv_sec,
1882 (u_long)va.va_fileid, &profile, &profile_size) == 0) {
1883 /* profile is an offset in the profile data base */
1884 /* It is zero if no profile data was found */
1885
1886 if(profile_size == 0) {
1887 *buffer = 0;
1888 *bufsize = 0;
1889 bsd_close_page_cache_files(uid_files);
1890 thread_funnel_set(kernel_flock, funnel_state);
1891 return 0;
1892 }
1893 ret = (vm_offset_t)(kmem_alloc(kernel_map, buffer, profile_size));
1894 if(ret) {
1895 bsd_close_page_cache_files(uid_files);
1896 thread_funnel_set(kernel_flock, funnel_state);
1897 return ENOMEM;
1898 }
1899 *bufsize = profile_size;
1900 while(profile_size) {
1901 error = vn_rdwr(UIO_READ, data_vp,
1902 (caddr_t) *buffer, profile_size,
1903 profile, UIO_SYSSPACE32, IO_NODELOCKED,
1904 kauth_cred_get(), &resid, p);
1905 if((error) || (profile_size == resid)) {
1906 bsd_close_page_cache_files(uid_files);
1907 kmem_free(kernel_map, (vm_offset_t)*buffer, profile_size);
1908 thread_funnel_set(kernel_flock, funnel_state);
1909 return EINVAL;
1910 }
1911 profile += profile_size - resid;
1912 profile_size = resid;
1913 }
1914 bsd_close_page_cache_files(uid_files);
1915 thread_funnel_set(kernel_flock, funnel_state);
1916 return 0;
1917 } else {
1918 bsd_close_page_cache_files(uid_files);
1919 thread_funnel_set(kernel_flock, funnel_state);
1920 return EINVAL;
1921 }
1922
1923 }
1924
1925 int
1926 bsd_search_page_cache_data_base(
1927 struct vnode *vp,
1928 struct profile_names_header *database,
1929 char *app_name,
1930 unsigned int mod_date,
1931 unsigned int inode,
1932 off_t *profile,
1933 unsigned int *profile_size)
1934 {
1935
1936 struct proc *p;
1937
1938 unsigned int i;
1939 struct profile_element *element;
1940 unsigned int ele_total;
1941 unsigned int extended_list = 0;
1942 off_t file_off = 0;
1943 unsigned int size;
1944 off_t resid_off;
1945 unsigned int resid;
1946 vm_offset_t local_buf = 0;
1947
1948 int error;
1949 kern_return_t ret;
1950
1951 p = current_proc();
1952
1953 if(((vm_offset_t)database->element_array) !=
1954 sizeof(struct profile_names_header)) {
1955 return EINVAL;
1956 }
1957 element = (struct profile_element *)(
1958 (vm_offset_t)database->element_array +
1959 (vm_offset_t)database);
1960
1961 ele_total = database->number_of_profiles;
1962
1963 *profile = 0;
1964 *profile_size = 0;
1965 while(ele_total) {
1966 /* note: code assumes header + n*ele comes out on a page boundary */
1967 if(((local_buf == 0) && (sizeof(struct profile_names_header) +
1968 (ele_total * sizeof(struct profile_element)))
1969 > (PAGE_SIZE * 4)) ||
1970 ((local_buf != 0) &&
1971 (ele_total * sizeof(struct profile_element))
1972 > (PAGE_SIZE * 4))) {
1973 extended_list = ele_total;
1974 if(element == (struct profile_element *)
1975 ((vm_offset_t)database->element_array +
1976 (vm_offset_t)database)) {
1977 ele_total = ((PAGE_SIZE * 4)/sizeof(struct profile_element)) - 1;
1978 } else {
1979 ele_total = (PAGE_SIZE * 4)/sizeof(struct profile_element);
1980 }
1981 extended_list -= ele_total;
1982 }
1983 for (i=0; i<ele_total; i++) {
1984 if((mod_date == element[i].mod_date)
1985 && (inode == element[i].inode)) {
1986 if(strncmp(element[i].name, app_name, 12) == 0) {
1987 *profile = element[i].addr;
1988 *profile_size = element[i].size;
1989 if(local_buf != 0) {
1990 kmem_free(kernel_map, local_buf, 4 * PAGE_SIZE);
1991 }
1992 return 0;
1993 }
1994 }
1995 }
1996 if(extended_list == 0)
1997 break;
1998 if(local_buf == 0) {
1999 ret = kmem_alloc(kernel_map, &local_buf, 4 * PAGE_SIZE);
2000 if(ret != KERN_SUCCESS) {
2001 return ENOMEM;
2002 }
2003 }
2004 element = (struct profile_element *)local_buf;
2005 ele_total = extended_list;
2006 extended_list = 0;
2007 file_off += 4 * PAGE_SIZE;
2008 if((ele_total * sizeof(struct profile_element)) >
2009 (PAGE_SIZE * 4)) {
2010 size = PAGE_SIZE * 4;
2011 } else {
2012 size = ele_total * sizeof(struct profile_element);
2013 }
2014 resid_off = 0;
2015 while(size) {
2016 error = vn_rdwr(UIO_READ, vp,
2017 CAST_DOWN(caddr_t, (local_buf + resid_off)),
2018 size, file_off + resid_off, UIO_SYSSPACE32,
2019 IO_NODELOCKED, kauth_cred_get(), &resid, p);
2020 if((error) || (size == resid)) {
2021 if(local_buf != 0) {
2022 kmem_free(kernel_map, local_buf, 4 * PAGE_SIZE);
2023 }
2024 return EINVAL;
2025 }
2026 resid_off += size-resid;
2027 size = resid;
2028 }
2029 }
2030 if(local_buf != 0) {
2031 kmem_free(kernel_map, local_buf, 4 * PAGE_SIZE);
2032 }
2033 return 0;
2034 }
2035
2036 int
2037 bsd_write_page_cache_file(
2038 unsigned int user,
2039 char *file_name,
2040 caddr_t buffer,
2041 vm_size_t size,
2042 int mod,
2043 int fid)
2044 {
2045 struct proc *p;
2046 int resid;
2047 off_t resid_off;
2048 int error;
2049 boolean_t funnel_state;
2050 off_t file_size;
2051 struct vfs_context context;
2052 off_t profile;
2053 unsigned int profile_size;
2054
2055 vm_offset_t names_buf;
2056 struct vnode *names_vp;
2057 struct vnode *data_vp;
2058 struct profile_names_header *profile_header;
2059 off_t name_offset;
2060 struct global_profile *uid_files;
2061
2062
2063 funnel_state = thread_funnel_set(kernel_flock, TRUE);
2064
2065
2066 error = bsd_open_page_cache_files(user, &uid_files);
2067 if(error) {
2068 thread_funnel_set(kernel_flock, funnel_state);
2069 return EINVAL;
2070 }
2071
2072 p = current_proc();
2073
2074 names_vp = uid_files->names_vp;
2075 data_vp = uid_files->data_vp;
2076 names_buf = uid_files->buf_ptr;
2077
2078 /* Stat data file for size */
2079
2080 context.vc_proc = p;
2081 context.vc_ucred = kauth_cred_get();
2082
2083 if ((error = vnode_size(data_vp, &file_size, &context)) != 0) {
2084 printf("bsd_write_page_cache_file: Can't stat profile data %s\n", file_name);
2085 bsd_close_page_cache_files(uid_files);
2086 thread_funnel_set(kernel_flock, funnel_state);
2087 return error;
2088 }
2089
2090 if (bsd_search_page_cache_data_base(names_vp,
2091 (struct profile_names_header *)names_buf,
2092 file_name, (unsigned int) mod,
2093 fid, &profile, &profile_size) == 0) {
2094 /* profile is an offset in the profile data base */
2095 /* It is zero if no profile data was found */
2096
2097 if(profile_size == 0) {
2098 unsigned int header_size;
2099 vm_offset_t buf_ptr;
2100
2101 /* Our Write case */
2102
2103 /* read header for last entry */
2104 profile_header =
2105 (struct profile_names_header *)names_buf;
2106 name_offset = sizeof(struct profile_names_header) +
2107 (sizeof(struct profile_element)
2108 * profile_header->number_of_profiles);
2109 profile_header->number_of_profiles += 1;
2110
2111 if(name_offset < PAGE_SIZE * 4) {
2112 struct profile_element *name;
2113 /* write new entry */
2114 name = (struct profile_element *)
2115 (names_buf + (vm_offset_t)name_offset);
2116 name->addr = file_size;
2117 name->size = size;
2118 name->mod_date = mod;
2119 name->inode = fid;
2120 strncpy (name->name, file_name, 12);
2121 } else {
2122 unsigned int ele_size;
2123 struct profile_element name;
2124 /* write new entry */
2125 name.addr = file_size;
2126 name.size = size;
2127 name.mod_date = mod;
2128 name.inode = fid;
2129 strncpy (name.name, file_name, 12);
2130 /* write element out separately */
2131 ele_size = sizeof(struct profile_element);
2132 buf_ptr = (vm_offset_t)&name;
2133 resid_off = name_offset;
2134
2135 while(ele_size) {
2136 error = vn_rdwr(UIO_WRITE, names_vp,
2137 (caddr_t)buf_ptr,
2138 ele_size, resid_off,
2139 UIO_SYSSPACE32, IO_NODELOCKED,
2140 kauth_cred_get(), &resid, p);
2141 if(error) {
2142 printf("bsd_write_page_cache_file: Can't write name_element %x\n", user);
2143 bsd_close_page_cache_files(
2144 uid_files);
2145 thread_funnel_set(
2146 kernel_flock,
2147 funnel_state);
2148 return error;
2149 }
2150 buf_ptr += (vm_offset_t)
2151 ele_size-resid;
2152 resid_off += ele_size-resid;
2153 ele_size = resid;
2154 }
2155 }
2156
2157 if(name_offset < PAGE_SIZE * 4) {
2158 header_size = name_offset +
2159 sizeof(struct profile_element);
2160
2161 } else {
2162 header_size =
2163 sizeof(struct profile_names_header);
2164 }
2165 buf_ptr = (vm_offset_t)profile_header;
2166 resid_off = 0;
2167
2168 /* write names file header */
2169 while(header_size) {
2170 error = vn_rdwr(UIO_WRITE, names_vp,
2171 (caddr_t)buf_ptr,
2172 header_size, resid_off,
2173 UIO_SYSSPACE32, IO_NODELOCKED,
2174 kauth_cred_get(), &resid, p);
2175 if(error) {
2176 printf("bsd_write_page_cache_file: Can't write header %x\n", user);
2177 bsd_close_page_cache_files(
2178 uid_files);
2179 thread_funnel_set(
2180 kernel_flock, funnel_state);
2181 return error;
2182 }
2183 buf_ptr += (vm_offset_t)header_size-resid;
2184 resid_off += header_size-resid;
2185 header_size = resid;
2186 }
2187 /* write profile to data file */
2188 resid_off = file_size;
2189 while(size) {
2190 error = vn_rdwr(UIO_WRITE, data_vp,
2191 (caddr_t)buffer, size, resid_off,
2192 UIO_SYSSPACE32, IO_NODELOCKED,
2193 kauth_cred_get(), &resid, p);
2194 if(error) {
2195 printf("bsd_write_page_cache_file: Can't write header %x\n", user);
2196 bsd_close_page_cache_files(
2197 uid_files);
2198 thread_funnel_set(
2199 kernel_flock, funnel_state);
2200 return error;
2201 }
2202 buffer += size-resid;
2203 resid_off += size-resid;
2204 size = resid;
2205 }
2206 bsd_close_page_cache_files(uid_files);
2207 thread_funnel_set(kernel_flock, funnel_state);
2208 return 0;
2209 }
2210 /* Someone else wrote a twin profile before us */
2211 bsd_close_page_cache_files(uid_files);
2212 thread_funnel_set(kernel_flock, funnel_state);
2213 return 0;
2214 } else {
2215 bsd_close_page_cache_files(uid_files);
2216 thread_funnel_set(kernel_flock, funnel_state);
2217 return EINVAL;
2218 }
2219
2220 }
2221
2222 int
2223 prepare_profile_database(int user)
2224 {
2225 const char *cache_path = "/var/vm/app_profile/";
2226 struct proc *p;
2227 int error;
2228 int resid;
2229 off_t resid_off;
2230 vm_size_t size;
2231
2232 struct vnode *names_vp;
2233 struct vnode *data_vp;
2234 vm_offset_t names_buf;
2235 vm_offset_t buf_ptr;
2236
2237 int profile_names_length;
2238 int profile_data_length;
2239 char *profile_data_string;
2240 char *profile_names_string;
2241 char *substring;
2242
2243 struct vnode_attr va;
2244 struct vfs_context context;
2245
2246 struct profile_names_header *profile_header;
2247 kern_return_t ret;
2248
2249 struct nameidata nd_names;
2250 struct nameidata nd_data;
2251
2252 p = current_proc();
2253
2254 context.vc_proc = p;
2255 context.vc_ucred = kauth_cred_get();
2256
2257 ret = kmem_alloc(kernel_map,
2258 (vm_offset_t *)&profile_data_string, PATH_MAX);
2259
2260 if(ret) {
2261 return ENOMEM;
2262 }
2263
2264 /* Split the buffer in half since we know the size of */
2265 /* our file path and our allocation is adequate for */
2266 /* both file path names */
2267 profile_names_string = profile_data_string + (PATH_MAX/2);
2268
2269
2270 strcpy(profile_data_string, cache_path);
2271 strcpy(profile_names_string, cache_path);
2272 profile_names_length = profile_data_length
2273 = strlen(profile_data_string);
2274 substring = profile_data_string + profile_data_length;
2275 sprintf(substring, "%x_data", user);
2276 substring = profile_names_string + profile_names_length;
2277 sprintf(substring, "%x_names", user);
2278
2279 /* We now have the absolute file names */
2280
2281 ret = kmem_alloc(kernel_map,
2282 (vm_offset_t *)&names_buf, 4 * PAGE_SIZE);
2283 if(ret) {
2284 kmem_free(kernel_map,
2285 (vm_offset_t)profile_data_string, PATH_MAX);
2286 return ENOMEM;
2287 }
2288
2289 NDINIT(&nd_names, LOOKUP, FOLLOW,
2290 UIO_SYSSPACE32, CAST_USER_ADDR_T(profile_names_string), &context);
2291 NDINIT(&nd_data, LOOKUP, FOLLOW,
2292 UIO_SYSSPACE32, CAST_USER_ADDR_T(profile_data_string), &context);
2293
2294 if ( (error = vn_open(&nd_data,
2295 O_CREAT | O_EXCL | FWRITE, S_IRUSR|S_IWUSR)) ) {
2296 kmem_free(kernel_map,
2297 (vm_offset_t)names_buf, 4 * PAGE_SIZE);
2298 kmem_free(kernel_map,
2299 (vm_offset_t)profile_data_string, PATH_MAX);
2300
2301 return 0;
2302 }
2303 data_vp = nd_data.ni_vp;
2304
2305 if ( (error = vn_open(&nd_names,
2306 O_CREAT | O_EXCL | FWRITE, S_IRUSR|S_IWUSR)) ) {
2307 printf("prepare_profile_database: Can't create CacheNames %s\n",
2308 profile_data_string);
2309 kmem_free(kernel_map,
2310 (vm_offset_t)names_buf, 4 * PAGE_SIZE);
2311 kmem_free(kernel_map,
2312 (vm_offset_t)profile_data_string, PATH_MAX);
2313
2314 vnode_rele(data_vp);
2315 vnode_put(data_vp);
2316
2317 return error;
2318 }
2319 names_vp = nd_names.ni_vp;
2320
2321 /* Write Header for new names file */
2322
2323 profile_header = (struct profile_names_header *)names_buf;
2324
2325 profile_header->number_of_profiles = 0;
2326 profile_header->user_id = user;
2327 profile_header->version = 1;
2328 profile_header->element_array =
2329 sizeof(struct profile_names_header);
2330 profile_header->spare1 = 0;
2331 profile_header->spare2 = 0;
2332 profile_header->spare3 = 0;
2333
2334 size = sizeof(struct profile_names_header);
2335 buf_ptr = (vm_offset_t)profile_header;
2336 resid_off = 0;
2337
2338 while(size) {
2339 error = vn_rdwr(UIO_WRITE, names_vp,
2340 (caddr_t)buf_ptr, size, resid_off,
2341 UIO_SYSSPACE32, IO_NODELOCKED,
2342 kauth_cred_get(), &resid, p);
2343 if(error) {
2344 printf("prepare_profile_database: Can't write header %s\n", profile_names_string);
2345 kmem_free(kernel_map,
2346 (vm_offset_t)names_buf, 4 * PAGE_SIZE);
2347 kmem_free(kernel_map,
2348 (vm_offset_t)profile_data_string,
2349 PATH_MAX);
2350
2351 vnode_rele(names_vp);
2352 vnode_put(names_vp);
2353 vnode_rele(data_vp);
2354 vnode_put(data_vp);
2355
2356 return error;
2357 }
2358 buf_ptr += size-resid;
2359 resid_off += size-resid;
2360 size = resid;
2361 }
2362 VATTR_INIT(&va);
2363 VATTR_SET(&va, va_uid, user);
2364
2365 error = vnode_setattr(names_vp, &va, &context);
2366 if(error) {
2367 printf("prepare_profile_database: "
2368 "Can't set user %s\n", profile_names_string);
2369 }
2370 vnode_rele(names_vp);
2371 vnode_put(names_vp);
2372
2373 VATTR_INIT(&va);
2374 VATTR_SET(&va, va_uid, user);
2375 error = vnode_setattr(data_vp, &va, &context);
2376 if(error) {
2377 printf("prepare_profile_database: "
2378 "Can't set user %s\n", profile_data_string);
2379 }
2380 vnode_rele(data_vp);
2381 vnode_put(data_vp);
2382
2383 kmem_free(kernel_map,
2384 (vm_offset_t)profile_data_string, PATH_MAX);
2385 kmem_free(kernel_map,
2386 (vm_offset_t)names_buf, 4 * PAGE_SIZE);
2387 return 0;
2388
2389 }