]> git.saurik.com Git - apple/xnu.git/blob - bsd/vm/vm_unix.c
5fedbadf964e0742762b41d33f1aaaf2a4ec1221
[apple/xnu.git] / bsd / vm / vm_unix.c
1 /*
2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 /*
23 * Mach Operating System
24 * Copyright (c) 1987 Carnegie-Mellon University
25 * All rights reserved. The CMU software License Agreement specifies
26 * the terms and conditions for use and redistribution.
27 */
28
29 /*
30 */
31
32
33 #include <meta_features.h>
34
35 #include <kern/task.h>
36 #include <kern/thread.h>
37 #include <kern/debug.h>
38 #include <kern/lock.h>
39 #include <mach/mach_traps.h>
40 #include <mach/time_value.h>
41 #include <mach/vm_map.h>
42 #include <mach/vm_param.h>
43 #include <mach/vm_prot.h>
44 #include <mach/port.h>
45
46 #include <sys/file_internal.h>
47 #include <sys/param.h>
48 #include <sys/systm.h>
49 #include <sys/dir.h>
50 #include <sys/namei.h>
51 #include <sys/proc_internal.h>
52 #include <sys/kauth.h>
53 #include <sys/vm.h>
54 #include <sys/file.h>
55 #include <sys/vnode_internal.h>
56 #include <sys/mount.h>
57 #include <sys/trace.h>
58 #include <sys/kernel.h>
59 #include <sys/ubc_internal.h>
60 #include <sys/user.h>
61 #include <sys/syslog.h>
62 #include <sys/stat.h>
63 #include <sys/sysproto.h>
64 #include <sys/mman.h>
65 #include <sys/sysctl.h>
66
67 #include <bsm/audit_kernel.h>
68 #include <bsm/audit_kevents.h>
69
70 #include <kern/kalloc.h>
71 #include <vm/vm_map.h>
72 #include <vm/vm_kern.h>
73
74 #include <machine/spl.h>
75
76 #include <mach/shared_memory_server.h>
77 #include <vm/vm_shared_memory_server.h>
78
79 #include <vm/vm_protos.h>
80
81 void
82 log_nx_failure(addr64_t vaddr, vm_prot_t prot)
83 {
84 printf("NX failure: %s - vaddr=%qx, prot=%x\n", current_proc()->p_comm, vaddr, prot);
85 }
86
87
88 int
89 useracc(
90 user_addr_t addr,
91 user_size_t len,
92 int prot)
93 {
94 return (vm_map_check_protection(
95 current_map(),
96 vm_map_trunc_page(addr), vm_map_round_page(addr+len),
97 prot == B_READ ? VM_PROT_READ : VM_PROT_WRITE));
98 }
99
100 int
101 vslock(
102 user_addr_t addr,
103 user_size_t len)
104 {
105 kern_return_t kret;
106 kret = vm_map_wire(current_map(), vm_map_trunc_page(addr),
107 vm_map_round_page(addr+len),
108 VM_PROT_READ | VM_PROT_WRITE ,FALSE);
109
110 switch (kret) {
111 case KERN_SUCCESS:
112 return (0);
113 case KERN_INVALID_ADDRESS:
114 case KERN_NO_SPACE:
115 return (ENOMEM);
116 case KERN_PROTECTION_FAILURE:
117 return (EACCES);
118 default:
119 return (EINVAL);
120 }
121 }
122
123 int
124 vsunlock(
125 user_addr_t addr,
126 user_size_t len,
127 __unused int dirtied)
128 {
129 #if FIXME /* [ */
130 pmap_t pmap;
131 vm_page_t pg;
132 vm_map_offset_t vaddr;
133 ppnum_t paddr;
134 #endif /* FIXME ] */
135 kern_return_t kret;
136
137 #if FIXME /* [ */
138 if (dirtied) {
139 pmap = get_task_pmap(current_task());
140 for (vaddr = vm_map_trunc_page(addr);
141 vaddr < vm_map_round_page(addr+len);
142 vaddr += PAGE_SIZE) {
143 paddr = pmap_extract(pmap, vaddr);
144 pg = PHYS_TO_VM_PAGE(paddr);
145 vm_page_set_modified(pg);
146 }
147 }
148 #endif /* FIXME ] */
149 #ifdef lint
150 dirtied++;
151 #endif /* lint */
152 kret = vm_map_unwire(current_map(), vm_map_trunc_page(addr),
153 vm_map_round_page(addr+len), FALSE);
154 switch (kret) {
155 case KERN_SUCCESS:
156 return (0);
157 case KERN_INVALID_ADDRESS:
158 case KERN_NO_SPACE:
159 return (ENOMEM);
160 case KERN_PROTECTION_FAILURE:
161 return (EACCES);
162 default:
163 return (EINVAL);
164 }
165 }
166
167 int
168 subyte(
169 user_addr_t addr,
170 int byte)
171 {
172 char character;
173
174 character = (char)byte;
175 return (copyout((void *)&(character), addr, sizeof(char)) == 0 ? 0 : -1);
176 }
177
178 int
179 suibyte(
180 user_addr_t addr,
181 int byte)
182 {
183 char character;
184
185 character = (char)byte;
186 return (copyout((void *)&(character), addr, sizeof(char)) == 0 ? 0 : -1);
187 }
188
189 int fubyte(user_addr_t addr)
190 {
191 unsigned char byte;
192
193 if (copyin(addr, (void *) &byte, sizeof(char)))
194 return(-1);
195 return(byte);
196 }
197
198 int fuibyte(user_addr_t addr)
199 {
200 unsigned char byte;
201
202 if (copyin(addr, (void *) &(byte), sizeof(char)))
203 return(-1);
204 return(byte);
205 }
206
207 int
208 suword(
209 user_addr_t addr,
210 long word)
211 {
212 return (copyout((void *) &word, addr, sizeof(int)) == 0 ? 0 : -1);
213 }
214
215 long fuword(user_addr_t addr)
216 {
217 long word;
218
219 if (copyin(addr, (void *) &word, sizeof(int)))
220 return(-1);
221 return(word);
222 }
223
224 /* suiword and fuiword are the same as suword and fuword, respectively */
225
226 int
227 suiword(
228 user_addr_t addr,
229 long word)
230 {
231 return (copyout((void *) &word, addr, sizeof(int)) == 0 ? 0 : -1);
232 }
233
234 long fuiword(user_addr_t addr)
235 {
236 long word;
237
238 if (copyin(addr, (void *) &word, sizeof(int)))
239 return(-1);
240 return(word);
241 }
242
243 /*
244 * With a 32-bit kernel and mixed 32/64-bit user tasks, this interface allows the
245 * fetching and setting of process-sized size_t and pointer values.
246 */
247 int
248 sulong(user_addr_t addr, int64_t word)
249 {
250
251 if (IS_64BIT_PROCESS(current_proc())) {
252 return(copyout((void *)&word, addr, sizeof(word)) == 0 ? 0 : -1);
253 } else {
254 return(suiword(addr, (long)word));
255 }
256 }
257
258 int64_t
259 fulong(user_addr_t addr)
260 {
261 int64_t longword;
262
263 if (IS_64BIT_PROCESS(current_proc())) {
264 if (copyin(addr, (void *)&longword, sizeof(longword)) != 0)
265 return(-1);
266 return(longword);
267 } else {
268 return((int64_t)fuiword(addr));
269 }
270 }
271
272 int
273 suulong(user_addr_t addr, uint64_t uword)
274 {
275
276 if (IS_64BIT_PROCESS(current_proc())) {
277 return(copyout((void *)&uword, addr, sizeof(uword)) == 0 ? 0 : -1);
278 } else {
279 return(suiword(addr, (u_long)uword));
280 }
281 }
282
283 uint64_t
284 fuulong(user_addr_t addr)
285 {
286 uint64_t ulongword;
287
288 if (IS_64BIT_PROCESS(current_proc())) {
289 if (copyin(addr, (void *)&ulongword, sizeof(ulongword)) != 0)
290 return(-1ULL);
291 return(ulongword);
292 } else {
293 return((uint64_t)fuiword(addr));
294 }
295 }
296
297 int
298 swapon(__unused struct proc *procp, __unused struct swapon_args *uap, __unused int *retval)
299 {
300 return(ENOTSUP);
301 }
302
303
304 kern_return_t
305 pid_for_task(
306 struct pid_for_task_args *args)
307 {
308 mach_port_name_t t = args->t;
309 user_addr_t pid_addr = args->pid;
310 struct proc * p;
311 task_t t1;
312 int pid = -1;
313 kern_return_t err = KERN_SUCCESS;
314 boolean_t funnel_state;
315
316 AUDIT_MACH_SYSCALL_ENTER(AUE_PIDFORTASK);
317 AUDIT_ARG(mach_port1, t);
318
319 funnel_state = thread_funnel_set(kernel_flock, TRUE);
320 t1 = port_name_to_task(t);
321
322 if (t1 == TASK_NULL) {
323 err = KERN_FAILURE;
324 goto pftout;
325 } else {
326 p = get_bsdtask_info(t1);
327 if (p) {
328 pid = proc_pid(p);
329 err = KERN_SUCCESS;
330 } else {
331 err = KERN_FAILURE;
332 }
333 }
334 task_deallocate(t1);
335 pftout:
336 AUDIT_ARG(pid, pid);
337 (void) copyout((char *) &pid, pid_addr, sizeof(int));
338 thread_funnel_set(kernel_flock, funnel_state);
339 AUDIT_MACH_SYSCALL_EXIT(err);
340 return(err);
341 }
342
343 /*
344 * Routine: task_for_pid
345 * Purpose:
346 * Get the task port for another "process", named by its
347 * process ID on the same host as "target_task".
348 *
349 * Only permitted to privileged processes, or processes
350 * with the same user ID.
351 *
352 * XXX This should be a BSD system call, not a Mach trap!!!
353 */
354 /*
355 *
356 * tfp_policy = KERN_TFP_POLICY_DENY; Deny Mode: None allowed except for self
357 * tfp_policy = KERN_TFP_POLICY_PERMISSIVE; Permissive Mode: all permissive; related ones allowed or privileged
358 * tfp_policy = KERN_TFP_POLICY_RESTRICTED; Restricted Mode: self access allowed; setgid (to tfp_group) are allowed for other tasks
359 *
360 */
361 static int tfp_policy = KERN_TFP_POLICY_RESTRICTED;
362 /* the groutp is inited to kmem group and is modifiable by sysctl */
363 static int tfp_group_inited = 0; /* policy groups are loaded ... */
364 static gid_t tfp_group_ronly = 0; /* procview group */
365 static gid_t tfp_group_rw = 0; /* procmod group */
366
367 kern_return_t
368 task_for_pid(
369 struct task_for_pid_args *args)
370 {
371 mach_port_name_t target_tport = args->target_tport;
372 int pid = args->pid;
373 user_addr_t task_addr = args->t;
374 struct uthread *uthread;
375 struct proc *p;
376 struct proc *p1;
377 task_t t1;
378 mach_port_name_t tret;
379 void * sright;
380 int error = 0;
381 int is_member = 0;
382 boolean_t funnel_state;
383 boolean_t ispermitted = FALSE;
384 char procname[MAXCOMLEN+1];
385
386 AUDIT_MACH_SYSCALL_ENTER(AUE_TASKFORPID);
387 AUDIT_ARG(pid, pid);
388 AUDIT_ARG(mach_port1, target_tport);
389
390 t1 = port_name_to_task(target_tport);
391 if (t1 == TASK_NULL) {
392 (void ) copyout((char *)&t1, task_addr, sizeof(mach_port_name_t));
393 AUDIT_MACH_SYSCALL_EXIT(KERN_FAILURE);
394 return(KERN_FAILURE);
395 }
396
397 funnel_state = thread_funnel_set(kernel_flock, TRUE);
398
399 p1 = current_proc();
400
401 /*
402 * Delayed binding of thread credential to process credential, if we
403 * are not running with an explicitly set thread credential.
404 */
405 uthread = get_bsdthread_info(current_thread());
406 if (uthread->uu_ucred != p1->p_ucred &&
407 (uthread->uu_flag & UT_SETUID) == 0) {
408 kauth_cred_t old = uthread->uu_ucred;
409 proc_lock(p1);
410 uthread->uu_ucred = p1->p_ucred;
411 kauth_cred_ref(uthread->uu_ucred);
412 proc_unlock(p1);
413 if (old != NOCRED)
414 kauth_cred_rele(old);
415 }
416
417 p = pfind(pid);
418 AUDIT_ARG(process, p);
419
420 switch (tfp_policy) {
421
422 case KERN_TFP_POLICY_PERMISSIVE:
423 /* self or suser or related ones */
424 if ((p != (struct proc *) 0)
425 && (p1 != (struct proc *) 0)
426 && (
427 (p1 == p)
428 || !(suser(kauth_cred_get(), 0))
429 || ((kauth_cred_getuid(p->p_ucred) == kauth_cred_getuid(kauth_cred_get())) &&
430 ((p->p_ucred->cr_ruid == kauth_cred_get()->cr_ruid))
431 && ((p->p_flag & P_SUGID) == 0))
432 )
433 && (p->p_stat != SZOMB)
434 )
435 ispermitted = TRUE;
436 break;
437
438 case KERN_TFP_POLICY_RESTRICTED:
439 /* self or suser or setgid and related ones only */
440 if ((p != (struct proc *) 0)
441 && (p1 != (struct proc *) 0)
442 && (
443 (p1 == p)
444 || !(suser(kauth_cred_get(), 0))
445 || (((tfp_group_inited != 0) &&
446 (
447 ((kauth_cred_ismember_gid(kauth_cred_get(),
448 tfp_group_ronly, &is_member) == 0) && is_member)
449 ||((kauth_cred_ismember_gid(kauth_cred_get(),
450 tfp_group_rw, &is_member) == 0) && is_member)
451 )
452 )
453 && ((kauth_cred_getuid(p->p_ucred) == kauth_cred_getuid(kauth_cred_get())) &&
454 ((p->p_ucred->cr_ruid == kauth_cred_get()->cr_ruid))
455 && ((p->p_flag & P_SUGID) == 0))
456 )
457 )
458 && (p->p_stat != SZOMB)
459 )
460 ispermitted = TRUE;
461
462 break;
463
464 case KERN_TFP_POLICY_DENY:
465 /* self or suser only */
466 default:
467 /* do not return task port of other task at all */
468 if ((p1 != (struct proc *) 0) && (p != (struct proc *) 0) && (p->p_stat != SZOMB)
469 && ((p1 == p) || !(suser(kauth_cred_get(), 0))))
470 ispermitted = TRUE;
471 else
472 ispermitted = FALSE;
473 break;
474 };
475
476
477 if (ispermitted == TRUE) {
478 if (p->task != TASK_NULL) {
479 task_reference(p->task);
480 sright = (void *)convert_task_to_port(p->task);
481 tret = ipc_port_copyout_send(
482 sright,
483 get_task_ipcspace(current_task()));
484 } else
485 tret = MACH_PORT_NULL;
486 AUDIT_ARG(mach_port2, tret);
487 (void ) copyout((char *)&tret, task_addr, sizeof(mach_port_name_t));
488 task_deallocate(t1);
489 error = KERN_SUCCESS;
490 goto tfpout;
491 } else {
492 /*
493 * There is no guarantee that p_comm is null terminated and
494 * kernel implementation of string functions are complete. So
495 * ensure stale info is not leaked out, bzero the buffer
496 */
497 bzero(&procname[0], MAXCOMLEN+1);
498 strncpy(&procname[0], &p1->p_comm[0], MAXCOMLEN);
499 if (tfp_policy != KERN_TFP_POLICY_PERMISSIVE)
500 log(LOG_NOTICE, "(%d: %s)tfp: failed on %d:\n",
501 ((p1 != PROC_NULL)?(p1->p_pid):0), &procname[0],
502 ((p != PROC_NULL)?(p->p_pid):0));
503 }
504
505 task_deallocate(t1);
506 tret = MACH_PORT_NULL;
507 (void) copyout((char *) &tret, task_addr, sizeof(mach_port_name_t));
508 error = KERN_FAILURE;
509 tfpout:
510 thread_funnel_set(kernel_flock, funnel_state);
511 AUDIT_MACH_SYSCALL_EXIT(error);
512 return(error);
513 }
514
515 /*
516 * Routine: task_name_for_pid
517 * Purpose:
518 * Get the task name port for another "process", named by its
519 * process ID on the same host as "target_task".
520 *
521 * Only permitted to privileged processes, or processes
522 * with the same user ID.
523 *
524 * XXX This should be a BSD system call, not a Mach trap!!!
525 */
526
527 kern_return_t
528 task_name_for_pid(
529 struct task_name_for_pid_args *args)
530 {
531 mach_port_name_t target_tport = args->target_tport;
532 int pid = args->pid;
533 user_addr_t task_addr = args->t;
534 struct uthread *uthread;
535 struct proc *p;
536 struct proc *p1;
537 task_t t1;
538 mach_port_name_t tret;
539 void * sright;
540 int error = 0;
541 boolean_t funnel_state;
542
543 AUDIT_MACH_SYSCALL_ENTER(AUE_TASKNAMEFORPID);
544 AUDIT_ARG(pid, pid);
545 AUDIT_ARG(mach_port1, target_tport);
546
547 t1 = port_name_to_task(target_tport);
548 if (t1 == TASK_NULL) {
549 (void ) copyout((char *)&t1, task_addr, sizeof(mach_port_name_t));
550 AUDIT_MACH_SYSCALL_EXIT(KERN_FAILURE);
551 return(KERN_FAILURE);
552 }
553
554 funnel_state = thread_funnel_set(kernel_flock, TRUE);
555
556 p1 = current_proc();
557
558 /*
559 * Delayed binding of thread credential to process credential, if we
560 * are not running with an explicitly set thread credential.
561 */
562 uthread = get_bsdthread_info(current_thread());
563 if (uthread->uu_ucred != p1->p_ucred &&
564 (uthread->uu_flag & UT_SETUID) == 0) {
565 kauth_cred_t old = uthread->uu_ucred;
566 proc_lock(p1);
567 uthread->uu_ucred = p1->p_ucred;
568 kauth_cred_ref(uthread->uu_ucred);
569 proc_unlock(p1);
570 if (old != NOCRED)
571 kauth_cred_rele(old);
572 }
573
574 p = pfind(pid);
575 AUDIT_ARG(process, p);
576
577 if ((p != (struct proc *) 0)
578 && (p->p_stat != SZOMB)
579 && (p1 != (struct proc *) 0)
580 && ((p1 == p)
581 || !(suser(kauth_cred_get(), 0))
582 || ((kauth_cred_getuid(p->p_ucred) == kauth_cred_getuid(kauth_cred_get())) &&
583 ((p->p_ucred->cr_ruid == kauth_cred_get()->cr_ruid)))))
584 {
585 if (p->task != TASK_NULL)
586 {
587 task_reference(p->task);
588 sright = (void *)convert_task_name_to_port(p->task);
589 tret = ipc_port_copyout_send(
590 sright,
591 get_task_ipcspace(current_task()));
592 } else
593 tret = MACH_PORT_NULL;
594 AUDIT_ARG(mach_port2, tret);
595 (void ) copyout((char *)&tret, task_addr, sizeof(mach_port_name_t));
596 task_deallocate(t1);
597 error = KERN_SUCCESS;
598 goto tnfpout;
599 }
600
601 task_deallocate(t1);
602 tret = MACH_PORT_NULL;
603 (void) copyout((char *) &tret, task_addr, sizeof(mach_port_name_t));
604 error = KERN_FAILURE;
605 tnfpout:
606 thread_funnel_set(kernel_flock, funnel_state);
607 AUDIT_MACH_SYSCALL_EXIT(error);
608 return(error);
609 }
610
611 static int
612 sysctl_settfp_policy(__unused struct sysctl_oid *oidp, void *arg1,
613 __unused int arg2, struct sysctl_req *req)
614 {
615 int error = 0;
616 int new_value;
617
618 error = SYSCTL_OUT(req, arg1, sizeof(int));
619 if (error || req->newptr == USER_ADDR_NULL)
620 return(error);
621
622 if (!is_suser())
623 return(EPERM);
624
625 if ((error = SYSCTL_IN(req, &new_value, sizeof(int)))) {
626 goto out;
627 }
628 if ((new_value == KERN_TFP_POLICY_DENY)
629 || (new_value == KERN_TFP_POLICY_PERMISSIVE)
630 || (new_value == KERN_TFP_POLICY_RESTRICTED))
631 tfp_policy = new_value;
632 else
633 error = EINVAL;
634 out:
635 return(error);
636
637 }
638
639 static int
640 sysctl_settfp_groups(__unused struct sysctl_oid *oidp, void *arg1,
641 __unused int arg2, struct sysctl_req *req)
642 {
643 int error = 0;
644 int new_value;
645
646 error = SYSCTL_OUT(req, arg1, sizeof(int));
647 if (error || req->newptr == USER_ADDR_NULL)
648 return(error);
649
650 if (!is_suser())
651 return(EPERM);
652
653 /*
654 * Once set; cannot be reset till next boot. Launchd will set this
655 * in its pid 1 init and no one can set after that.
656 */
657 if (tfp_group_inited != 0)
658 return(EPERM);
659
660 if ((error = SYSCTL_IN(req, &new_value, sizeof(int)))) {
661 goto out;
662 }
663
664 if (new_value >= 100)
665 error = EINVAL;
666 else {
667 if (arg1 == &tfp_group_ronly)
668 tfp_group_ronly = new_value;
669 else if (arg1 == &tfp_group_rw)
670 tfp_group_rw = new_value;
671 else
672 error = EINVAL;
673 if ((tfp_group_ronly != 0 ) && (tfp_group_rw != 0 ))
674 tfp_group_inited = 1;
675 }
676
677 out:
678 return(error);
679 }
680
681 SYSCTL_NODE(_kern, KERN_TFP, tfp, CTLFLAG_RW, 0, "tfp");
682 SYSCTL_PROC(_kern_tfp, KERN_TFP_POLICY, policy, CTLTYPE_INT | CTLFLAG_RW,
683 &tfp_policy, sizeof(uint32_t), &sysctl_settfp_policy ,"I","policy");
684 SYSCTL_PROC(_kern_tfp, KERN_TFP_READ_GROUP, read_group, CTLTYPE_INT | CTLFLAG_RW,
685 &tfp_group_ronly, sizeof(uint32_t), &sysctl_settfp_groups ,"I","read_group");
686 SYSCTL_PROC(_kern_tfp, KERN_TFP_RW_GROUP, rw_group, CTLTYPE_INT | CTLFLAG_RW,
687 &tfp_group_rw, sizeof(uint32_t), &sysctl_settfp_groups ,"I","rw_group");
688
689
690 SYSCTL_INT(_vm, OID_AUTO, shared_region_trace_level, CTLFLAG_RW, &shared_region_trace_level, 0, "");
691
692 /*
693 * shared_region_make_private_np:
694 *
695 * This system call is for "dyld" only.
696 *
697 * It creates a private copy of the current process's "shared region" for
698 * split libraries. "dyld" uses this when the shared region is full or
699 * it needs to load a split library that conflicts with an already loaded one
700 * that this process doesn't need. "dyld" specifies a set of address ranges
701 * that it wants to keep in the now-private "shared region". These cover
702 * the set of split libraries that the process needs so far. The kernel needs
703 * to deallocate the rest of the shared region, so that it's available for
704 * more libraries for this process.
705 */
706 int
707 shared_region_make_private_np(
708 struct proc *p,
709 struct shared_region_make_private_np_args *uap,
710 __unused int *retvalp)
711 {
712 int error;
713 kern_return_t kr;
714 boolean_t using_shared_regions;
715 user_addr_t user_ranges;
716 unsigned int range_count;
717 struct shared_region_range_np *ranges;
718 shared_region_mapping_t shared_region;
719 struct shared_region_task_mappings task_mapping_info;
720 shared_region_mapping_t next;
721
722 ranges = NULL;
723
724 range_count = uap->rangeCount;
725 user_ranges = uap->ranges;
726
727 SHARED_REGION_TRACE(
728 SHARED_REGION_TRACE_INFO,
729 ("shared_region: %p [%d(%s)] "
730 "make_private(rangecount=%d)\n",
731 current_thread(), p->p_pid, p->p_comm, range_count));
732
733 /* allocate kernel space for the "ranges" */
734 if (range_count != 0) {
735 kr = kmem_alloc(kernel_map,
736 (vm_offset_t *) &ranges,
737 (vm_size_t) (range_count * sizeof (ranges[0])));
738 if (kr != KERN_SUCCESS) {
739 error = ENOMEM;
740 goto done;
741 }
742
743 /* copy "ranges" from user-space */
744 error = copyin(user_ranges,
745 ranges,
746 (range_count * sizeof (ranges[0])));
747 if (error) {
748 goto done;
749 }
750 }
751
752 if (p->p_flag & P_NOSHLIB) {
753 /* no split library has been mapped for this process so far */
754 using_shared_regions = FALSE;
755 } else {
756 /* this process has already mapped some split libraries */
757 using_shared_regions = TRUE;
758 }
759
760 /*
761 * Get a private copy of the current shared region.
762 * Do not chain it to the system-wide shared region, as we'll want
763 * to map other split libraries in place of the old ones. We want
764 * to completely detach from the system-wide shared region and go our
765 * own way after this point, not sharing anything with other processes.
766 */
767 error = clone_system_shared_regions(using_shared_regions,
768 FALSE, /* chain_regions */
769 ENV_DEFAULT_ROOT);
770 if (error) {
771 goto done;
772 }
773
774 /* get info on the newly allocated shared region */
775 vm_get_shared_region(current_task(), &shared_region);
776 task_mapping_info.self = (vm_offset_t) shared_region;
777 shared_region_mapping_info(shared_region,
778 &(task_mapping_info.text_region),
779 &(task_mapping_info.text_size),
780 &(task_mapping_info.data_region),
781 &(task_mapping_info.data_size),
782 &(task_mapping_info.region_mappings),
783 &(task_mapping_info.client_base),
784 &(task_mapping_info.alternate_base),
785 &(task_mapping_info.alternate_next),
786 &(task_mapping_info.fs_base),
787 &(task_mapping_info.system),
788 &(task_mapping_info.flags),
789 &next);
790
791 /*
792 * We now have our private copy of the shared region, as it was before
793 * the call to clone_system_shared_regions(). We now need to clean it
794 * up and keep only the memory areas described by the "ranges" array.
795 */
796 kr = shared_region_cleanup(range_count, ranges, &task_mapping_info);
797 switch (kr) {
798 case KERN_SUCCESS:
799 error = 0;
800 break;
801 default:
802 error = EINVAL;
803 goto done;
804 }
805
806 done:
807 if (ranges != NULL) {
808 kmem_free(kernel_map,
809 (vm_offset_t) ranges,
810 range_count * sizeof (ranges[0]));
811 ranges = NULL;
812 }
813
814 SHARED_REGION_TRACE(
815 SHARED_REGION_TRACE_INFO,
816 ("shared_region: %p [%d(%s)] "
817 "make_private(rangecount=%d) -> %d "
818 "shared_region=%p[%x,%x,%x]\n",
819 current_thread(), p->p_pid, p->p_comm,
820 range_count, error, shared_region,
821 task_mapping_info.fs_base,
822 task_mapping_info.system,
823 task_mapping_info.flags));
824
825 return error;
826 }
827
828
829 /*
830 * shared_region_map_file_np:
831 *
832 * This system call is for "dyld" only.
833 *
834 * "dyld" wants to map parts of a split library in the shared region.
835 * We get a file descriptor on the split library to be mapped and a set
836 * of mapping instructions, describing which parts of the file to map in\
837 * which areas of the shared segment and with what protection.
838 * The "shared region" is split in 2 areas:
839 * 0x90000000 - 0xa0000000 : read-only area (for TEXT and LINKEDIT sections),
840 * 0xa0000000 - 0xb0000000 : writable area (for DATA sections).
841 *
842 */
843 int
844 shared_region_map_file_np(
845 struct proc *p,
846 struct shared_region_map_file_np_args *uap,
847 __unused int *retvalp)
848 {
849 int error;
850 kern_return_t kr;
851 int fd;
852 unsigned int mapping_count;
853 user_addr_t user_mappings; /* 64-bit */
854 user_addr_t user_slide_p; /* 64-bit */
855 struct shared_file_mapping_np *mappings;
856 struct fileproc *fp;
857 mach_vm_offset_t slide;
858 struct vnode *vp;
859 struct vfs_context context;
860 memory_object_control_t file_control;
861 memory_object_size_t file_size;
862 shared_region_mapping_t shared_region;
863 struct shared_region_task_mappings task_mapping_info;
864 shared_region_mapping_t next;
865 shared_region_mapping_t default_shared_region;
866 boolean_t using_default_region;
867 unsigned int j;
868 vm_prot_t max_prot;
869 mach_vm_offset_t base_offset, end_offset;
870 mach_vm_offset_t original_base_offset;
871 boolean_t mappings_in_segment;
872 #define SFM_MAX_STACK 6
873 struct shared_file_mapping_np stack_mappings[SFM_MAX_STACK];
874
875 mappings = NULL;
876 mapping_count = 0;
877 fp = NULL;
878 vp = NULL;
879
880 /* get file descriptor for split library from arguments */
881 fd = uap->fd;
882
883 /* get file structure from file descriptor */
884 error = fp_lookup(p, fd, &fp, 0);
885 if (error) {
886 SHARED_REGION_TRACE(
887 SHARED_REGION_TRACE_ERROR,
888 ("shared_region: %p [%d(%s)] map_file: "
889 "fd=%d lookup failed (error=%d)\n",
890 current_thread(), p->p_pid, p->p_comm, fd, error));
891 goto done;
892 }
893
894 /* make sure we're attempting to map a vnode */
895 if (fp->f_fglob->fg_type != DTYPE_VNODE) {
896 SHARED_REGION_TRACE(
897 SHARED_REGION_TRACE_ERROR,
898 ("shared_region: %p [%d(%s)] map_file: "
899 "fd=%d not a vnode (type=%d)\n",
900 current_thread(), p->p_pid, p->p_comm,
901 fd, fp->f_fglob->fg_type));
902 error = EINVAL;
903 goto done;
904 }
905
906 /* we need at least read permission on the file */
907 if (! (fp->f_fglob->fg_flag & FREAD)) {
908 SHARED_REGION_TRACE(
909 SHARED_REGION_TRACE_ERROR,
910 ("shared_region: %p [%d(%s)] map_file: "
911 "fd=%d not readable\n",
912 current_thread(), p->p_pid, p->p_comm, fd));
913 error = EPERM;
914 goto done;
915 }
916
917 /* get vnode from file structure */
918 error = vnode_getwithref((vnode_t)fp->f_fglob->fg_data);
919 if (error) {
920 SHARED_REGION_TRACE(
921 SHARED_REGION_TRACE_ERROR,
922 ("shared_region: %p [%d(%s)] map_file: "
923 "fd=%d getwithref failed (error=%d)\n",
924 current_thread(), p->p_pid, p->p_comm, fd, error));
925 goto done;
926 }
927 vp = (struct vnode *) fp->f_fglob->fg_data;
928
929 /* make sure the vnode is a regular file */
930 if (vp->v_type != VREG) {
931 SHARED_REGION_TRACE(
932 SHARED_REGION_TRACE_ERROR,
933 ("shared_region: %p [%d(%s)] map_file(%p:'%s'): "
934 "not a file (type=%d)\n",
935 current_thread(), p->p_pid, p->p_comm,
936 vp, vp->v_name, vp->v_type));
937 error = EINVAL;
938 goto done;
939 }
940
941 /* get vnode size */
942 {
943 off_t fs;
944
945 context.vc_proc = p;
946 context.vc_ucred = kauth_cred_get();
947 if ((error = vnode_size(vp, &fs, &context)) != 0) {
948 SHARED_REGION_TRACE(
949 SHARED_REGION_TRACE_ERROR,
950 ("shared_region: %p [%d(%s)] "
951 "map_file(%p:'%s'): "
952 "vnode_size(%p) failed (error=%d)\n",
953 current_thread(), p->p_pid, p->p_comm,
954 vp, vp->v_name, vp));
955 goto done;
956 }
957 file_size = fs;
958 }
959
960 /*
961 * Get the list of mappings the caller wants us to establish.
962 */
963 mapping_count = uap->mappingCount; /* the number of mappings */
964 if (mapping_count == 0) {
965 SHARED_REGION_TRACE(
966 SHARED_REGION_TRACE_INFO,
967 ("shared_region: %p [%d(%s)] map_file(%p:'%s'): "
968 "no mappings\n",
969 current_thread(), p->p_pid, p->p_comm,
970 vp, vp->v_name));
971 error = 0; /* no mappings: we're done ! */
972 goto done;
973 } else if (mapping_count <= SFM_MAX_STACK) {
974 mappings = &stack_mappings[0];
975 } else {
976 kr = kmem_alloc(kernel_map,
977 (vm_offset_t *) &mappings,
978 (vm_size_t) (mapping_count *
979 sizeof (mappings[0])));
980 if (kr != KERN_SUCCESS) {
981 SHARED_REGION_TRACE(
982 SHARED_REGION_TRACE_ERROR,
983 ("shared_region: %p [%d(%s)] "
984 "map_file(%p:'%s'): "
985 "failed to allocate %d mappings (kr=0x%x)\n",
986 current_thread(), p->p_pid, p->p_comm,
987 vp, vp->v_name, mapping_count, kr));
988 error = ENOMEM;
989 goto done;
990 }
991 }
992
993 user_mappings = uap->mappings; /* the mappings, in user space */
994 error = copyin(user_mappings,
995 mappings,
996 (mapping_count * sizeof (mappings[0])));
997 if (error != 0) {
998 SHARED_REGION_TRACE(
999 SHARED_REGION_TRACE_ERROR,
1000 ("shared_region: %p [%d(%s)] map_file(%p:'%s'): "
1001 "failed to copyin %d mappings (error=%d)\n",
1002 current_thread(), p->p_pid, p->p_comm,
1003 vp, vp->v_name, mapping_count, error));
1004 goto done;
1005 }
1006
1007 /*
1008 * If the caller provides a "slide" pointer, it means they're OK
1009 * with us moving the mappings around to make them fit.
1010 */
1011 user_slide_p = uap->slide_p;
1012
1013 /*
1014 * Make each mapping address relative to the beginning of the
1015 * shared region. Check that all mappings are in the shared region.
1016 * Compute the maximum set of protections required to tell the
1017 * buffer cache how we mapped the file (see call to ubc_map() below).
1018 */
1019 max_prot = VM_PROT_NONE;
1020 base_offset = -1LL;
1021 end_offset = 0;
1022 mappings_in_segment = TRUE;
1023 for (j = 0; j < mapping_count; j++) {
1024 mach_vm_offset_t segment;
1025 segment = (mappings[j].sfm_address &
1026 GLOBAL_SHARED_SEGMENT_MASK);
1027 if (segment != GLOBAL_SHARED_TEXT_SEGMENT &&
1028 segment != GLOBAL_SHARED_DATA_SEGMENT) {
1029 /* this mapping is not in the shared region... */
1030 if (user_slide_p == NULL) {
1031 /* ... and we can't slide it in: fail */
1032 SHARED_REGION_TRACE(
1033 SHARED_REGION_TRACE_CONFLICT,
1034 ("shared_region: %p [%d(%s)] "
1035 "map_file(%p:'%s'): "
1036 "mapping %p not in shared segment & "
1037 "no sliding\n",
1038 current_thread(), p->p_pid, p->p_comm,
1039 vp, vp->v_name,
1040 mappings[j].sfm_address));
1041 error = EINVAL;
1042 goto done;
1043 }
1044 if (j == 0) {
1045 /* expect all mappings to be outside */
1046 mappings_in_segment = FALSE;
1047 } else if (mappings_in_segment != FALSE) {
1048 /* other mappings were not outside: fail */
1049 SHARED_REGION_TRACE(
1050 SHARED_REGION_TRACE_CONFLICT,
1051 ("shared_region: %p [%d(%s)] "
1052 "map_file(%p:'%s'): "
1053 "mapping %p not in shared segment & "
1054 "other mappings in shared segment\n",
1055 current_thread(), p->p_pid, p->p_comm,
1056 vp, vp->v_name,
1057 mappings[j].sfm_address));
1058 error = EINVAL;
1059 goto done;
1060 }
1061 /* we'll try and slide that mapping in the segments */
1062 } else {
1063 if (j == 0) {
1064 /* expect all mappings to be inside */
1065 mappings_in_segment = TRUE;
1066 } else if (mappings_in_segment != TRUE) {
1067 /* other mappings were not inside: fail */
1068 SHARED_REGION_TRACE(
1069 SHARED_REGION_TRACE_CONFLICT,
1070 ("shared_region: %p [%d(%s)] "
1071 "map_file(%p:'%s'): "
1072 "mapping %p in shared segment & "
1073 "others in shared segment\n",
1074 current_thread(), p->p_pid, p->p_comm,
1075 vp, vp->v_name,
1076 mappings[j].sfm_address));
1077 error = EINVAL;
1078 goto done;
1079 }
1080 /* get a relative offset inside the shared segments */
1081 mappings[j].sfm_address -= GLOBAL_SHARED_TEXT_SEGMENT;
1082 }
1083 if ((mappings[j].sfm_address & SHARED_TEXT_REGION_MASK)
1084 < base_offset) {
1085 base_offset = (mappings[j].sfm_address &
1086 SHARED_TEXT_REGION_MASK);
1087 }
1088 if ((mappings[j].sfm_address & SHARED_TEXT_REGION_MASK) +
1089 mappings[j].sfm_size > end_offset) {
1090 end_offset =
1091 (mappings[j].sfm_address &
1092 SHARED_TEXT_REGION_MASK) +
1093 mappings[j].sfm_size;
1094 }
1095 max_prot |= mappings[j].sfm_max_prot;
1096 }
1097 /* Make all mappings relative to the base_offset */
1098 base_offset = vm_map_trunc_page(base_offset);
1099 end_offset = vm_map_round_page(end_offset);
1100 for (j = 0; j < mapping_count; j++) {
1101 mappings[j].sfm_address -= base_offset;
1102 }
1103 original_base_offset = base_offset;
1104 if (mappings_in_segment == FALSE) {
1105 /*
1106 * We're trying to map a library that was not pre-bound to
1107 * be in the shared segments. We want to try and slide it
1108 * back into the shared segments but as far back as possible,
1109 * so that it doesn't clash with pre-bound libraries. Set
1110 * the base_offset to the end of the region, so that it can't
1111 * possibly fit there and will have to be slid.
1112 */
1113 base_offset = SHARED_TEXT_REGION_SIZE - end_offset;
1114 }
1115
1116 /* get the file's memory object handle */
1117 UBCINFOCHECK("shared_region_map_file_np", vp);
1118 file_control = ubc_getobject(vp, UBC_HOLDOBJECT);
1119 if (file_control == MEMORY_OBJECT_CONTROL_NULL) {
1120 SHARED_REGION_TRACE(
1121 SHARED_REGION_TRACE_ERROR,
1122 ("shared_region: %p [%d(%s)] map_file(%p:'%s'): "
1123 "ubc_getobject() failed\n",
1124 current_thread(), p->p_pid, p->p_comm,
1125 vp, vp->v_name));
1126 error = EINVAL;
1127 goto done;
1128 }
1129
1130 /*
1131 * Get info about the current process's shared region.
1132 * This might change if we decide we need to clone the shared region.
1133 */
1134 vm_get_shared_region(current_task(), &shared_region);
1135 task_mapping_info.self = (vm_offset_t) shared_region;
1136 shared_region_mapping_info(shared_region,
1137 &(task_mapping_info.text_region),
1138 &(task_mapping_info.text_size),
1139 &(task_mapping_info.data_region),
1140 &(task_mapping_info.data_size),
1141 &(task_mapping_info.region_mappings),
1142 &(task_mapping_info.client_base),
1143 &(task_mapping_info.alternate_base),
1144 &(task_mapping_info.alternate_next),
1145 &(task_mapping_info.fs_base),
1146 &(task_mapping_info.system),
1147 &(task_mapping_info.flags),
1148 &next);
1149
1150 /*
1151 * Are we using the system's current shared region
1152 * for this environment ?
1153 */
1154 default_shared_region =
1155 lookup_default_shared_region(ENV_DEFAULT_ROOT,
1156 task_mapping_info.system);
1157 if (shared_region == default_shared_region) {
1158 using_default_region = TRUE;
1159 } else {
1160 using_default_region = FALSE;
1161 }
1162 shared_region_mapping_dealloc(default_shared_region);
1163
1164 if (vp->v_mount != rootvnode->v_mount &&
1165 using_default_region) {
1166 /*
1167 * The split library is not on the root filesystem. We don't
1168 * want to polute the system-wide ("default") shared region
1169 * with it.
1170 * Reject the mapping. The caller (dyld) should "privatize"
1171 * (via shared_region_make_private()) the shared region and
1172 * try to establish the mapping privately for this process.
1173 */
1174 SHARED_REGION_TRACE(
1175 SHARED_REGION_TRACE_CONFLICT,
1176 ("shared_region: %p [%d(%s)] "
1177 "map_file(%p:'%s'): "
1178 "not on root volume\n",
1179 current_thread(), p->p_pid, p->p_comm,
1180 vp->v_name));
1181 error = EXDEV;
1182 goto done;
1183 }
1184
1185
1186 /*
1187 * Map the split library.
1188 */
1189 kr = map_shared_file(mapping_count,
1190 mappings,
1191 file_control,
1192 file_size,
1193 &task_mapping_info,
1194 base_offset,
1195 (user_slide_p) ? &slide : NULL);
1196
1197 if (kr == KERN_SUCCESS) {
1198 /*
1199 * The mapping was successful. Let the buffer cache know
1200 * that we've mapped that file with these protections. This
1201 * prevents the vnode from getting recycled while it's mapped.
1202 */
1203 (void) ubc_map(vp, max_prot);
1204 error = 0;
1205 } else {
1206 SHARED_REGION_TRACE(
1207 SHARED_REGION_TRACE_CONFLICT,
1208 ("shared_region: %p [%d(%s)] "
1209 "map_file(%p:'%s'): "
1210 "map_shared_file failed, kr=0x%x\n",
1211 current_thread(), p->p_pid, p->p_comm,
1212 vp, vp->v_name, kr));
1213 switch (kr) {
1214 case KERN_INVALID_ADDRESS:
1215 error = EFAULT;
1216 goto done;
1217 case KERN_PROTECTION_FAILURE:
1218 error = EPERM;
1219 goto done;
1220 case KERN_NO_SPACE:
1221 error = ENOMEM;
1222 goto done;
1223 case KERN_FAILURE:
1224 case KERN_INVALID_ARGUMENT:
1225 default:
1226 error = EINVAL;
1227 goto done;
1228 }
1229 }
1230
1231 if (p->p_flag & P_NOSHLIB) {
1232 /* signal that this process is now using split libraries */
1233 p->p_flag &= ~P_NOSHLIB;
1234 }
1235
1236 if (user_slide_p) {
1237 /*
1238 * The caller provided a pointer to a "slide" offset. Let
1239 * them know by how much we slid the mappings.
1240 */
1241 if (mappings_in_segment == FALSE) {
1242 /*
1243 * We faked the base_offset earlier, so undo that
1244 * and take into account the real base_offset.
1245 */
1246 slide += SHARED_TEXT_REGION_SIZE - end_offset;
1247 slide -= original_base_offset;
1248 /*
1249 * The mappings were slid into the shared segments
1250 * and "slide" is relative to the beginning of the
1251 * shared segments. Adjust it to be absolute.
1252 */
1253 slide += GLOBAL_SHARED_TEXT_SEGMENT;
1254 }
1255 error = copyout(&slide,
1256 user_slide_p,
1257 sizeof (slide));
1258 if (slide != 0) {
1259 SHARED_REGION_TRACE(
1260 SHARED_REGION_TRACE_CONFLICT,
1261 ("shared_region: %p [%d(%s)] "
1262 "map_file(%p:'%s'): "
1263 "slid by 0x%llx\n",
1264 current_thread(), p->p_pid, p->p_comm,
1265 vp, vp->v_name, slide));
1266 }
1267 }
1268
1269 done:
1270 if (vp != NULL) {
1271 /*
1272 * release the vnode...
1273 * ubc_map() still holds it for us in the non-error case
1274 */
1275 (void) vnode_put(vp);
1276 vp = NULL;
1277 }
1278 if (fp != NULL) {
1279 /* release the file descriptor */
1280 fp_drop(p, fd, fp, 0);
1281 fp = NULL;
1282 }
1283 if (mappings != NULL &&
1284 mappings != &stack_mappings[0]) {
1285 kmem_free(kernel_map,
1286 (vm_offset_t) mappings,
1287 mapping_count * sizeof (mappings[0]));
1288 }
1289 mappings = NULL;
1290
1291 return error;
1292 }
1293
1294 int
1295 load_shared_file(struct proc *p, struct load_shared_file_args *uap,
1296 __unused int *retval)
1297 {
1298 caddr_t mapped_file_addr=uap->mfa;
1299 u_long mapped_file_size=uap->mfs;
1300 caddr_t *base_address=uap->ba;
1301 int map_cnt=uap->map_cnt;
1302 sf_mapping_t *mappings=uap->mappings;
1303 char *filename=uap->filename;
1304 int *flags=uap->flags;
1305 struct vnode *vp = 0;
1306 struct nameidata nd, *ndp;
1307 char *filename_str;
1308 register int error;
1309 kern_return_t kr;
1310
1311 struct vfs_context context;
1312 off_t file_size;
1313 memory_object_control_t file_control;
1314 sf_mapping_t *map_list;
1315 caddr_t local_base;
1316 int local_flags;
1317 int caller_flags;
1318 int i;
1319 int default_regions = 0;
1320 vm_size_t dummy;
1321 kern_return_t kret;
1322
1323 shared_region_mapping_t shared_region;
1324 struct shared_region_task_mappings task_mapping_info;
1325 shared_region_mapping_t next;
1326
1327 context.vc_proc = p;
1328 context.vc_ucred = kauth_cred_get();
1329
1330 ndp = &nd;
1331
1332 AUDIT_ARG(addr, CAST_USER_ADDR_T(base_address));
1333 /* Retrieve the base address */
1334 if ( (error = copyin(CAST_USER_ADDR_T(base_address), &local_base, sizeof (caddr_t))) ) {
1335 goto lsf_bailout;
1336 }
1337 if ( (error = copyin(CAST_USER_ADDR_T(flags), &local_flags, sizeof (int))) ) {
1338 goto lsf_bailout;
1339 }
1340
1341 if(local_flags & QUERY_IS_SYSTEM_REGION) {
1342 shared_region_mapping_t default_shared_region;
1343 vm_get_shared_region(current_task(), &shared_region);
1344 task_mapping_info.self = (vm_offset_t)shared_region;
1345
1346 shared_region_mapping_info(shared_region,
1347 &(task_mapping_info.text_region),
1348 &(task_mapping_info.text_size),
1349 &(task_mapping_info.data_region),
1350 &(task_mapping_info.data_size),
1351 &(task_mapping_info.region_mappings),
1352 &(task_mapping_info.client_base),
1353 &(task_mapping_info.alternate_base),
1354 &(task_mapping_info.alternate_next),
1355 &(task_mapping_info.fs_base),
1356 &(task_mapping_info.system),
1357 &(task_mapping_info.flags), &next);
1358
1359 default_shared_region =
1360 lookup_default_shared_region(
1361 ENV_DEFAULT_ROOT,
1362 task_mapping_info.system);
1363 if (shared_region == default_shared_region) {
1364 local_flags = SYSTEM_REGION_BACKED;
1365 } else {
1366 local_flags = 0;
1367 }
1368 shared_region_mapping_dealloc(default_shared_region);
1369 error = 0;
1370 error = copyout(&local_flags, CAST_USER_ADDR_T(flags), sizeof (int));
1371 goto lsf_bailout;
1372 }
1373 caller_flags = local_flags;
1374 kret = kmem_alloc(kernel_map, (vm_offset_t *)&filename_str,
1375 (vm_size_t)(MAXPATHLEN));
1376 if (kret != KERN_SUCCESS) {
1377 error = ENOMEM;
1378 goto lsf_bailout;
1379 }
1380 kret = kmem_alloc(kernel_map, (vm_offset_t *)&map_list,
1381 (vm_size_t)(map_cnt*sizeof(sf_mapping_t)));
1382 if (kret != KERN_SUCCESS) {
1383 kmem_free(kernel_map, (vm_offset_t)filename_str,
1384 (vm_size_t)(MAXPATHLEN));
1385 error = ENOMEM;
1386 goto lsf_bailout;
1387 }
1388
1389 if ( (error = copyin(CAST_USER_ADDR_T(mappings), map_list, (map_cnt*sizeof(sf_mapping_t)))) ) {
1390 goto lsf_bailout_free;
1391 }
1392
1393 if ( (error = copyinstr(CAST_USER_ADDR_T(filename), filename_str,
1394 MAXPATHLEN, (size_t *)&dummy)) ) {
1395 goto lsf_bailout_free;
1396 }
1397
1398 /*
1399 * Get a vnode for the target file
1400 */
1401 NDINIT(ndp, LOOKUP, FOLLOW | LOCKLEAF | AUDITVNPATH1, UIO_SYSSPACE32,
1402 CAST_USER_ADDR_T(filename_str), &context);
1403
1404 if ((error = namei(ndp))) {
1405 goto lsf_bailout_free;
1406 }
1407 vp = ndp->ni_vp;
1408
1409 nameidone(ndp);
1410
1411 if (vp->v_type != VREG) {
1412 error = EINVAL;
1413 goto lsf_bailout_free_vput;
1414 }
1415
1416 UBCINFOCHECK("load_shared_file", vp);
1417
1418 if ((error = vnode_size(vp, &file_size, &context)) != 0)
1419 goto lsf_bailout_free_vput;
1420
1421 file_control = ubc_getobject(vp, UBC_HOLDOBJECT);
1422 if (file_control == MEMORY_OBJECT_CONTROL_NULL) {
1423 error = EINVAL;
1424 goto lsf_bailout_free_vput;
1425 }
1426
1427 #ifdef notdef
1428 if(file_size != mapped_file_size) {
1429 error = EINVAL;
1430 goto lsf_bailout_free_vput;
1431 }
1432 #endif
1433 if(p->p_flag & P_NOSHLIB) {
1434 p->p_flag = p->p_flag & ~P_NOSHLIB;
1435 }
1436
1437 /* load alternate regions if the caller has requested. */
1438 /* Note: the new regions are "clean slates" */
1439 if (local_flags & NEW_LOCAL_SHARED_REGIONS) {
1440 error = clone_system_shared_regions(FALSE,
1441 TRUE, /* chain_regions */
1442 ENV_DEFAULT_ROOT);
1443 if (error) {
1444 goto lsf_bailout_free_vput;
1445 }
1446 }
1447
1448 vm_get_shared_region(current_task(), &shared_region);
1449 task_mapping_info.self = (vm_offset_t)shared_region;
1450
1451 shared_region_mapping_info(shared_region,
1452 &(task_mapping_info.text_region),
1453 &(task_mapping_info.text_size),
1454 &(task_mapping_info.data_region),
1455 &(task_mapping_info.data_size),
1456 &(task_mapping_info.region_mappings),
1457 &(task_mapping_info.client_base),
1458 &(task_mapping_info.alternate_base),
1459 &(task_mapping_info.alternate_next),
1460 &(task_mapping_info.fs_base),
1461 &(task_mapping_info.system),
1462 &(task_mapping_info.flags), &next);
1463
1464 {
1465 shared_region_mapping_t default_shared_region;
1466 default_shared_region =
1467 lookup_default_shared_region(
1468 ENV_DEFAULT_ROOT,
1469 task_mapping_info.system);
1470 if(shared_region == default_shared_region) {
1471 default_regions = 1;
1472 }
1473 shared_region_mapping_dealloc(default_shared_region);
1474 }
1475 /* If we are running on a removable file system we must not */
1476 /* be in a set of shared regions or the file system will not */
1477 /* be removable. */
1478 if(((vp->v_mount != rootvnode->v_mount) && (default_regions))
1479 && (lsf_mapping_pool_gauge() < 75)) {
1480 /* We don't want to run out of shared memory */
1481 /* map entries by starting too many private versions */
1482 /* of the shared library structures */
1483 int error2;
1484
1485 error2 = clone_system_shared_regions(!(p->p_flag & P_NOSHLIB),
1486 TRUE, /* chain_regions */
1487 ENV_DEFAULT_ROOT);
1488 if (error2) {
1489 goto lsf_bailout_free_vput;
1490 }
1491 local_flags = local_flags & ~NEW_LOCAL_SHARED_REGIONS;
1492 vm_get_shared_region(current_task(), &shared_region);
1493 shared_region_mapping_info(shared_region,
1494 &(task_mapping_info.text_region),
1495 &(task_mapping_info.text_size),
1496 &(task_mapping_info.data_region),
1497 &(task_mapping_info.data_size),
1498 &(task_mapping_info.region_mappings),
1499 &(task_mapping_info.client_base),
1500 &(task_mapping_info.alternate_base),
1501 &(task_mapping_info.alternate_next),
1502 &(task_mapping_info.fs_base),
1503 &(task_mapping_info.system),
1504 &(task_mapping_info.flags), &next);
1505 }
1506
1507 /* This is a work-around to allow executables which have been */
1508 /* built without knowledge of the proper shared segment to */
1509 /* load. This code has been architected as a shared region */
1510 /* handler, the knowledge of where the regions are loaded is */
1511 /* problematic for the extension of shared regions as it will */
1512 /* not be easy to know what region an item should go into. */
1513 /* The code below however will get around a short term problem */
1514 /* with executables which believe they are loading at zero. */
1515
1516 {
1517 if (((unsigned int)local_base &
1518 (~(task_mapping_info.text_size - 1))) !=
1519 task_mapping_info.client_base) {
1520 if(local_flags & ALTERNATE_LOAD_SITE) {
1521 local_base = (caddr_t)(
1522 (unsigned int)local_base &
1523 (task_mapping_info.text_size - 1));
1524 local_base = (caddr_t)((unsigned int)local_base
1525 | task_mapping_info.client_base);
1526 } else {
1527 error = EINVAL;
1528 goto lsf_bailout_free_vput;
1529 }
1530 }
1531 }
1532
1533
1534 if((kr = copyin_shared_file((vm_offset_t)mapped_file_addr,
1535 mapped_file_size,
1536 (vm_offset_t *)&local_base,
1537 map_cnt, map_list, file_control,
1538 &task_mapping_info, &local_flags))) {
1539 switch (kr) {
1540 case KERN_FAILURE:
1541 error = EINVAL;
1542 break;
1543 case KERN_INVALID_ARGUMENT:
1544 error = EINVAL;
1545 break;
1546 case KERN_INVALID_ADDRESS:
1547 error = EFAULT;
1548 break;
1549 case KERN_PROTECTION_FAILURE:
1550 /* save EAUTH for authentication in this */
1551 /* routine */
1552 error = EPERM;
1553 break;
1554 case KERN_NO_SPACE:
1555 error = ENOMEM;
1556 break;
1557 default:
1558 error = EINVAL;
1559 };
1560 if((caller_flags & ALTERNATE_LOAD_SITE) && systemLogDiags) {
1561 printf("load_shared_file: Failed to load shared file! error: 0x%x, Base_address: 0x%x, number of mappings: %d, file_control 0x%x\n", error, local_base, map_cnt, file_control);
1562 for(i=0; i<map_cnt; i++) {
1563 printf("load_shared_file: Mapping%d, mapping_offset: 0x%x, size: 0x%x, file_offset: 0x%x, protection: 0x%x\n"
1564 , i, map_list[i].mapping_offset,
1565 map_list[i].size,
1566 map_list[i].file_offset,
1567 map_list[i].protection);
1568 }
1569 }
1570 } else {
1571 if(default_regions)
1572 local_flags |= SYSTEM_REGION_BACKED;
1573 if(!(error = copyout(&local_flags, CAST_USER_ADDR_T(flags), sizeof (int)))) {
1574 error = copyout(&local_base,
1575 CAST_USER_ADDR_T(base_address), sizeof (caddr_t));
1576 }
1577 }
1578
1579 lsf_bailout_free_vput:
1580 vnode_put(vp);
1581
1582 lsf_bailout_free:
1583 kmem_free(kernel_map, (vm_offset_t)filename_str,
1584 (vm_size_t)(MAXPATHLEN));
1585 kmem_free(kernel_map, (vm_offset_t)map_list,
1586 (vm_size_t)(map_cnt*sizeof(sf_mapping_t)));
1587
1588 lsf_bailout:
1589 return error;
1590 }
1591
1592 int
1593 reset_shared_file(__unused struct proc *p, struct reset_shared_file_args *uap,
1594 __unused register int *retval)
1595 {
1596 caddr_t *base_address=uap->ba;
1597 int map_cnt=uap->map_cnt;
1598 sf_mapping_t *mappings=uap->mappings;
1599 register int error;
1600
1601 sf_mapping_t *map_list;
1602 caddr_t local_base;
1603 vm_offset_t map_address;
1604 int i;
1605 kern_return_t kret;
1606 shared_region_mapping_t shared_region;
1607 struct shared_region_task_mappings task_mapping_info;
1608 shared_region_mapping_t next;
1609
1610 AUDIT_ARG(addr, CAST_DOWN(user_addr_t, base_address));
1611 /* Retrieve the base address */
1612 if ( (error = copyin(CAST_USER_ADDR_T(base_address), &local_base, sizeof (caddr_t))) ) {
1613 goto rsf_bailout;
1614 }
1615
1616 if (((unsigned int)local_base & GLOBAL_SHARED_SEGMENT_MASK)
1617 != GLOBAL_SHARED_TEXT_SEGMENT) {
1618 error = EINVAL;
1619 goto rsf_bailout;
1620 }
1621
1622 kret = kmem_alloc(kernel_map, (vm_offset_t *)&map_list,
1623 (vm_size_t)(map_cnt*sizeof(sf_mapping_t)));
1624 if (kret != KERN_SUCCESS) {
1625 error = ENOMEM;
1626 goto rsf_bailout;
1627 }
1628
1629 if ( (error =
1630 copyin(CAST_USER_ADDR_T(mappings), map_list, (map_cnt*sizeof(sf_mapping_t)))) ) {
1631
1632 kmem_free(kernel_map, (vm_offset_t)map_list,
1633 (vm_size_t)(map_cnt*sizeof(sf_mapping_t)));
1634 goto rsf_bailout;
1635 }
1636
1637 vm_get_shared_region(current_task(), &shared_region);
1638 task_mapping_info.self = (vm_offset_t) shared_region;
1639 shared_region_mapping_info(shared_region,
1640 &(task_mapping_info.text_region),
1641 &(task_mapping_info.text_size),
1642 &(task_mapping_info.data_region),
1643 &(task_mapping_info.data_size),
1644 &(task_mapping_info.region_mappings),
1645 &(task_mapping_info.client_base),
1646 &(task_mapping_info.alternate_base),
1647 &(task_mapping_info.alternate_next),
1648 &(task_mapping_info.fs_base),
1649 &(task_mapping_info.system),
1650 &(task_mapping_info.flags),
1651 &next);
1652
1653 for (i = 0; i<map_cnt; i++) {
1654 if((map_list[i].mapping_offset
1655 & GLOBAL_SHARED_SEGMENT_MASK) == 0x10000000) {
1656 map_address = (vm_offset_t)
1657 (local_base + map_list[i].mapping_offset);
1658 vm_deallocate(current_map(),
1659 map_address,
1660 map_list[i].size);
1661 vm_map(current_map(), &map_address,
1662 map_list[i].size, 0,
1663 SHARED_LIB_ALIAS | VM_FLAGS_FIXED,
1664 task_mapping_info.data_region,
1665 ((unsigned int)local_base
1666 & SHARED_DATA_REGION_MASK) +
1667 (map_list[i].mapping_offset
1668 & SHARED_DATA_REGION_MASK),
1669 TRUE, VM_PROT_READ,
1670 VM_PROT_READ, VM_INHERIT_SHARE);
1671 }
1672 }
1673
1674 kmem_free(kernel_map, (vm_offset_t)map_list,
1675 (vm_size_t)(map_cnt*sizeof(sf_mapping_t)));
1676
1677 rsf_bailout:
1678 return error;
1679 }
1680
1681 int
1682 new_system_shared_regions(__unused struct proc *p,
1683 __unused struct new_system_shared_regions_args *uap,
1684 register int *retval)
1685 {
1686 if(!(is_suser())) {
1687 *retval = EINVAL;
1688 return EINVAL;
1689 }
1690
1691 /* clear all of our existing defaults */
1692 remove_all_shared_regions();
1693
1694 *retval = 0;
1695 return 0;
1696 }
1697
1698
1699
1700 int
1701 clone_system_shared_regions(
1702 int shared_regions_active,
1703 int chain_regions,
1704 int base_vnode)
1705 {
1706 shared_region_mapping_t new_shared_region;
1707 shared_region_mapping_t next;
1708 shared_region_mapping_t old_shared_region;
1709 struct shared_region_task_mappings old_info;
1710 struct shared_region_task_mappings new_info;
1711
1712 vm_get_shared_region(current_task(), &old_shared_region);
1713 old_info.self = (vm_offset_t)old_shared_region;
1714 shared_region_mapping_info(old_shared_region,
1715 &(old_info.text_region),
1716 &(old_info.text_size),
1717 &(old_info.data_region),
1718 &(old_info.data_size),
1719 &(old_info.region_mappings),
1720 &(old_info.client_base),
1721 &(old_info.alternate_base),
1722 &(old_info.alternate_next),
1723 &(old_info.fs_base),
1724 &(old_info.system),
1725 &(old_info.flags), &next);
1726
1727 if (shared_regions_active ||
1728 base_vnode == ENV_DEFAULT_ROOT) {
1729 if (shared_file_create_system_region(&new_shared_region,
1730 old_info.fs_base,
1731 old_info.system))
1732 return ENOMEM;
1733 } else {
1734 if (old_shared_region &&
1735 base_vnode == ENV_DEFAULT_ROOT) {
1736 base_vnode = old_info.fs_base;
1737 }
1738 new_shared_region =
1739 lookup_default_shared_region(base_vnode,
1740 old_info.system);
1741 if (new_shared_region == NULL) {
1742 shared_file_boot_time_init(base_vnode,
1743 old_info.system);
1744 vm_get_shared_region(current_task(),
1745 &new_shared_region);
1746 } else {
1747 vm_set_shared_region(current_task(), new_shared_region);
1748 }
1749 if (old_shared_region)
1750 shared_region_mapping_dealloc(old_shared_region);
1751 }
1752 new_info.self = (vm_offset_t)new_shared_region;
1753 shared_region_mapping_info(new_shared_region,
1754 &(new_info.text_region),
1755 &(new_info.text_size),
1756 &(new_info.data_region),
1757 &(new_info.data_size),
1758 &(new_info.region_mappings),
1759 &(new_info.client_base),
1760 &(new_info.alternate_base),
1761 &(new_info.alternate_next),
1762 &(new_info.fs_base),
1763 &(new_info.system),
1764 &(new_info.flags), &next);
1765 if(shared_regions_active) {
1766 if(vm_region_clone(old_info.text_region, new_info.text_region)) {
1767 panic("clone_system_shared_regions: shared region mis-alignment 1");
1768 shared_region_mapping_dealloc(new_shared_region);
1769 return(EINVAL);
1770 }
1771 if (vm_region_clone(old_info.data_region, new_info.data_region)) {
1772 panic("clone_system_shared_regions: shared region mis-alignment 2");
1773 shared_region_mapping_dealloc(new_shared_region);
1774 return(EINVAL);
1775 }
1776 if (chain_regions) {
1777 /*
1778 * We want a "shadowed" clone, a private superset of the old
1779 * shared region. The info about the old mappings is still
1780 * valid for us.
1781 */
1782 shared_region_object_chain_attach(
1783 new_shared_region, old_shared_region);
1784 } else {
1785 /*
1786 * We want a completely detached clone with no link to
1787 * the old shared region. We'll be removing some mappings
1788 * in our private, cloned, shared region, so the old mappings
1789 * will become irrelevant to us. Since we have a private
1790 * "shared region" now, it isn't going to be shared with
1791 * anyone else and we won't need to maintain mappings info.
1792 */
1793 shared_region_object_chain_detached(new_shared_region);
1794 }
1795 }
1796 if (vm_map_region_replace(current_map(), old_info.text_region,
1797 new_info.text_region, old_info.client_base,
1798 old_info.client_base+old_info.text_size)) {
1799 panic("clone_system_shared_regions: shared region mis-alignment 3");
1800 shared_region_mapping_dealloc(new_shared_region);
1801 return(EINVAL);
1802 }
1803 if(vm_map_region_replace(current_map(), old_info.data_region,
1804 new_info.data_region,
1805 old_info.client_base + old_info.text_size,
1806 old_info.client_base
1807 + old_info.text_size + old_info.data_size)) {
1808 panic("clone_system_shared_regions: shared region mis-alignment 4");
1809 shared_region_mapping_dealloc(new_shared_region);
1810 return(EINVAL);
1811 }
1812 vm_set_shared_region(current_task(), new_shared_region);
1813
1814 /* consume the reference which wasn't accounted for in object */
1815 /* chain attach */
1816 if (!shared_regions_active || !chain_regions)
1817 shared_region_mapping_dealloc(old_shared_region);
1818
1819 SHARED_REGION_TRACE(
1820 SHARED_REGION_TRACE_INFO,
1821 ("shared_region: %p task=%p "
1822 "clone(active=%d, base=0x%x,chain=%d) "
1823 "old=%p[%x,%x,%x] new=%p[%x,%x,%x]\n",
1824 current_thread(), current_task(),
1825 shared_regions_active, base_vnode, chain_regions,
1826 old_shared_region,
1827 old_info.fs_base,
1828 old_info.system,
1829 old_info.flags,
1830 new_shared_region,
1831 new_info.fs_base,
1832 new_info.system,
1833 new_info.flags));
1834
1835 return(0);
1836
1837 }
1838
1839 /* header for the profile name file. The profiled app info is held */
1840 /* in the data file and pointed to by elements in the name file */
1841
1842 struct profile_names_header {
1843 unsigned int number_of_profiles;
1844 unsigned int user_id;
1845 unsigned int version;
1846 off_t element_array;
1847 unsigned int spare1;
1848 unsigned int spare2;
1849 unsigned int spare3;
1850 };
1851
1852 struct profile_element {
1853 off_t addr;
1854 vm_size_t size;
1855 unsigned int mod_date;
1856 unsigned int inode;
1857 char name[12];
1858 };
1859
1860 struct global_profile {
1861 struct vnode *names_vp;
1862 struct vnode *data_vp;
1863 vm_offset_t buf_ptr;
1864 unsigned int user;
1865 unsigned int age;
1866 unsigned int busy;
1867 };
1868
1869 struct global_profile_cache {
1870 int max_ele;
1871 unsigned int age;
1872 struct global_profile profiles[3];
1873 };
1874
1875 /* forward declarations */
1876 int bsd_open_page_cache_files(unsigned int user,
1877 struct global_profile **profile);
1878 void bsd_close_page_cache_files(struct global_profile *profile);
1879 int bsd_search_page_cache_data_base(
1880 struct vnode *vp,
1881 struct profile_names_header *database,
1882 char *app_name,
1883 unsigned int mod_date,
1884 unsigned int inode,
1885 off_t *profile,
1886 unsigned int *profile_size);
1887
1888 struct global_profile_cache global_user_profile_cache =
1889 {3, 0, {{NULL, NULL, 0, 0, 0, 0},
1890 {NULL, NULL, 0, 0, 0, 0},
1891 {NULL, NULL, 0, 0, 0, 0}} };
1892
1893 /* BSD_OPEN_PAGE_CACHE_FILES: */
1894 /* Caller provides a user id. This id was used in */
1895 /* prepare_profile_database to create two unique absolute */
1896 /* file paths to the associated profile files. These files */
1897 /* are either opened or bsd_open_page_cache_files returns an */
1898 /* error. The header of the names file is then consulted. */
1899 /* The header and the vnodes for the names and data files are */
1900 /* returned. */
1901
1902 int
1903 bsd_open_page_cache_files(
1904 unsigned int user,
1905 struct global_profile **profile)
1906 {
1907 const char *cache_path = "/var/vm/app_profile/";
1908 struct proc *p;
1909 int error;
1910 vm_size_t resid;
1911 off_t resid_off;
1912 unsigned int lru;
1913 vm_size_t size;
1914
1915 struct vnode *names_vp;
1916 struct vnode *data_vp;
1917 vm_offset_t names_buf;
1918 vm_offset_t buf_ptr;
1919
1920 int profile_names_length;
1921 int profile_data_length;
1922 char *profile_data_string;
1923 char *profile_names_string;
1924 char *substring;
1925
1926 off_t file_size;
1927 struct vfs_context context;
1928
1929 kern_return_t ret;
1930
1931 struct nameidata nd_names;
1932 struct nameidata nd_data;
1933 int i;
1934
1935
1936 p = current_proc();
1937
1938 context.vc_proc = p;
1939 context.vc_ucred = kauth_cred_get();
1940
1941 restart:
1942 for(i = 0; i<global_user_profile_cache.max_ele; i++) {
1943 if((global_user_profile_cache.profiles[i].user == user)
1944 && (global_user_profile_cache.profiles[i].data_vp
1945 != NULL)) {
1946 *profile = &global_user_profile_cache.profiles[i];
1947 /* already in cache, we're done */
1948 if ((*profile)->busy) {
1949 /*
1950 * drop funnel and wait
1951 */
1952 (void)tsleep((void *)
1953 *profile,
1954 PRIBIO, "app_profile", 0);
1955 goto restart;
1956 }
1957 (*profile)->busy = 1;
1958 (*profile)->age = global_user_profile_cache.age;
1959
1960 /*
1961 * entries in cache are held with a valid
1962 * usecount... take an iocount which will
1963 * be dropped in "bsd_close_page_cache_files"
1964 * which is called after the read or writes to
1965 * these files are done
1966 */
1967 if ( (vnode_getwithref((*profile)->data_vp)) ) {
1968
1969 vnode_rele((*profile)->data_vp);
1970 vnode_rele((*profile)->names_vp);
1971
1972 (*profile)->data_vp = NULL;
1973 (*profile)->busy = 0;
1974 wakeup(*profile);
1975
1976 goto restart;
1977 }
1978 if ( (vnode_getwithref((*profile)->names_vp)) ) {
1979
1980 vnode_put((*profile)->data_vp);
1981 vnode_rele((*profile)->data_vp);
1982 vnode_rele((*profile)->names_vp);
1983
1984 (*profile)->data_vp = NULL;
1985 (*profile)->busy = 0;
1986 wakeup(*profile);
1987
1988 goto restart;
1989 }
1990 global_user_profile_cache.age+=1;
1991 return 0;
1992 }
1993 }
1994
1995 lru = global_user_profile_cache.age;
1996 *profile = NULL;
1997 for(i = 0; i<global_user_profile_cache.max_ele; i++) {
1998 /* Skip entry if it is in the process of being reused */
1999 if(global_user_profile_cache.profiles[i].data_vp ==
2000 (struct vnode *)0xFFFFFFFF)
2001 continue;
2002 /* Otherwise grab the first empty entry */
2003 if(global_user_profile_cache.profiles[i].data_vp == NULL) {
2004 *profile = &global_user_profile_cache.profiles[i];
2005 (*profile)->age = global_user_profile_cache.age;
2006 break;
2007 }
2008 /* Otherwise grab the oldest entry */
2009 if(global_user_profile_cache.profiles[i].age < lru) {
2010 lru = global_user_profile_cache.profiles[i].age;
2011 *profile = &global_user_profile_cache.profiles[i];
2012 }
2013 }
2014
2015 /* Did we set it? */
2016 if (*profile == NULL) {
2017 /*
2018 * No entries are available; this can only happen if all
2019 * of them are currently in the process of being reused;
2020 * if this happens, we sleep on the address of the first
2021 * element, and restart. This is less than ideal, but we
2022 * know it will work because we know that there will be a
2023 * wakeup on any entry currently in the process of being
2024 * reused.
2025 *
2026 * XXX Reccomend a two handed clock and more than 3 total
2027 * XXX cache entries at some point in the future.
2028 */
2029 /*
2030 * drop funnel and wait
2031 */
2032 (void)tsleep((void *)
2033 &global_user_profile_cache.profiles[0],
2034 PRIBIO, "app_profile", 0);
2035 goto restart;
2036 }
2037
2038 /*
2039 * If it's currently busy, we've picked the one at the end of the
2040 * LRU list, but it's currently being actively used. We sleep on
2041 * its address and restart.
2042 */
2043 if ((*profile)->busy) {
2044 /*
2045 * drop funnel and wait
2046 */
2047 (void)tsleep((void *)
2048 *profile,
2049 PRIBIO, "app_profile", 0);
2050 goto restart;
2051 }
2052 (*profile)->busy = 1;
2053 (*profile)->user = user;
2054
2055 /*
2056 * put dummy value in for now to get competing request to wait
2057 * above until we are finished
2058 *
2059 * Save the data_vp before setting it, so we can set it before
2060 * we kmem_free() or vrele(). If we don't do this, then we
2061 * have a potential funnel race condition we have to deal with.
2062 */
2063 data_vp = (*profile)->data_vp;
2064 (*profile)->data_vp = (struct vnode *)0xFFFFFFFF;
2065
2066 /*
2067 * Age the cache here in all cases; this guarantees that we won't
2068 * be reusing only one entry over and over, once the system reaches
2069 * steady-state.
2070 */
2071 global_user_profile_cache.age+=1;
2072
2073 if(data_vp != NULL) {
2074 kmem_free(kernel_map,
2075 (*profile)->buf_ptr, 4 * PAGE_SIZE);
2076 if ((*profile)->names_vp) {
2077 vnode_rele((*profile)->names_vp);
2078 (*profile)->names_vp = NULL;
2079 }
2080 vnode_rele(data_vp);
2081 }
2082
2083 /* Try to open the appropriate users profile files */
2084 /* If neither file is present, try to create them */
2085 /* If one file is present and the other not, fail. */
2086 /* If the files do exist, check them for the app_file */
2087 /* requested and read it in if present */
2088
2089 ret = kmem_alloc(kernel_map,
2090 (vm_offset_t *)&profile_data_string, PATH_MAX);
2091
2092 if(ret) {
2093 (*profile)->data_vp = NULL;
2094 (*profile)->busy = 0;
2095 wakeup(*profile);
2096 return ENOMEM;
2097 }
2098
2099 /* Split the buffer in half since we know the size of */
2100 /* our file path and our allocation is adequate for */
2101 /* both file path names */
2102 profile_names_string = profile_data_string + (PATH_MAX/2);
2103
2104
2105 strcpy(profile_data_string, cache_path);
2106 strcpy(profile_names_string, cache_path);
2107 profile_names_length = profile_data_length
2108 = strlen(profile_data_string);
2109 substring = profile_data_string + profile_data_length;
2110 sprintf(substring, "%x_data", user);
2111 substring = profile_names_string + profile_names_length;
2112 sprintf(substring, "%x_names", user);
2113
2114 /* We now have the absolute file names */
2115
2116 ret = kmem_alloc(kernel_map,
2117 (vm_offset_t *)&names_buf, 4 * PAGE_SIZE);
2118 if(ret) {
2119 kmem_free(kernel_map,
2120 (vm_offset_t)profile_data_string, PATH_MAX);
2121 (*profile)->data_vp = NULL;
2122 (*profile)->busy = 0;
2123 wakeup(*profile);
2124 return ENOMEM;
2125 }
2126
2127 NDINIT(&nd_names, LOOKUP, FOLLOW | LOCKLEAF,
2128 UIO_SYSSPACE32, CAST_USER_ADDR_T(profile_names_string), &context);
2129 NDINIT(&nd_data, LOOKUP, FOLLOW | LOCKLEAF,
2130 UIO_SYSSPACE32, CAST_USER_ADDR_T(profile_data_string), &context);
2131
2132 if ( (error = vn_open(&nd_data, FREAD | FWRITE, 0)) ) {
2133 #ifdef notdef
2134 printf("bsd_open_page_cache_files: CacheData file not found %s\n",
2135 profile_data_string);
2136 #endif
2137 kmem_free(kernel_map,
2138 (vm_offset_t)names_buf, 4 * PAGE_SIZE);
2139 kmem_free(kernel_map,
2140 (vm_offset_t)profile_data_string, PATH_MAX);
2141 (*profile)->data_vp = NULL;
2142 (*profile)->busy = 0;
2143 wakeup(*profile);
2144 return error;
2145 }
2146 data_vp = nd_data.ni_vp;
2147
2148 if ( (error = vn_open(&nd_names, FREAD | FWRITE, 0)) ) {
2149 printf("bsd_open_page_cache_files: NamesData file not found %s\n",
2150 profile_data_string);
2151 kmem_free(kernel_map,
2152 (vm_offset_t)names_buf, 4 * PAGE_SIZE);
2153 kmem_free(kernel_map,
2154 (vm_offset_t)profile_data_string, PATH_MAX);
2155
2156 vnode_rele(data_vp);
2157 vnode_put(data_vp);
2158
2159 (*profile)->data_vp = NULL;
2160 (*profile)->busy = 0;
2161 wakeup(*profile);
2162 return error;
2163 }
2164 names_vp = nd_names.ni_vp;
2165
2166 if ((error = vnode_size(names_vp, &file_size, &context)) != 0) {
2167 printf("bsd_open_page_cache_files: Can't stat name file %s\n", profile_names_string);
2168 kmem_free(kernel_map,
2169 (vm_offset_t)profile_data_string, PATH_MAX);
2170 kmem_free(kernel_map,
2171 (vm_offset_t)names_buf, 4 * PAGE_SIZE);
2172
2173 vnode_rele(names_vp);
2174 vnode_put(names_vp);
2175 vnode_rele(data_vp);
2176 vnode_put(data_vp);
2177
2178 (*profile)->data_vp = NULL;
2179 (*profile)->busy = 0;
2180 wakeup(*profile);
2181 return error;
2182 }
2183
2184 size = file_size;
2185 if(size > 4 * PAGE_SIZE)
2186 size = 4 * PAGE_SIZE;
2187 buf_ptr = names_buf;
2188 resid_off = 0;
2189
2190 while(size) {
2191 int resid_int;
2192 error = vn_rdwr(UIO_READ, names_vp, (caddr_t)buf_ptr,
2193 size, resid_off,
2194 UIO_SYSSPACE32, IO_NODELOCKED, kauth_cred_get(),
2195 &resid_int, p);
2196 resid = (vm_size_t) resid_int;
2197 if((error) || (size == resid)) {
2198 if(!error) {
2199 error = EINVAL;
2200 }
2201 kmem_free(kernel_map,
2202 (vm_offset_t)profile_data_string, PATH_MAX);
2203 kmem_free(kernel_map,
2204 (vm_offset_t)names_buf, 4 * PAGE_SIZE);
2205
2206 vnode_rele(names_vp);
2207 vnode_put(names_vp);
2208 vnode_rele(data_vp);
2209 vnode_put(data_vp);
2210
2211 (*profile)->data_vp = NULL;
2212 (*profile)->busy = 0;
2213 wakeup(*profile);
2214 return error;
2215 }
2216 buf_ptr += size-resid;
2217 resid_off += size-resid;
2218 size = resid;
2219 }
2220 kmem_free(kernel_map, (vm_offset_t)profile_data_string, PATH_MAX);
2221
2222 (*profile)->names_vp = names_vp;
2223 (*profile)->data_vp = data_vp;
2224 (*profile)->buf_ptr = names_buf;
2225
2226 /*
2227 * at this point, the both the names_vp and the data_vp have
2228 * both a valid usecount and an iocount held
2229 */
2230 return 0;
2231
2232 }
2233
2234 void
2235 bsd_close_page_cache_files(
2236 struct global_profile *profile)
2237 {
2238 vnode_put(profile->data_vp);
2239 vnode_put(profile->names_vp);
2240
2241 profile->busy = 0;
2242 wakeup(profile);
2243 }
2244
2245 int
2246 bsd_read_page_cache_file(
2247 unsigned int user,
2248 int *fid,
2249 int *mod,
2250 char *app_name,
2251 struct vnode *app_vp,
2252 vm_offset_t *buffer,
2253 vm_offset_t *bufsize)
2254 {
2255
2256 boolean_t funnel_state;
2257
2258 struct proc *p;
2259 int error;
2260 unsigned int resid;
2261
2262 off_t profile;
2263 unsigned int profile_size;
2264
2265 vm_offset_t names_buf;
2266 struct vnode_attr va;
2267 struct vfs_context context;
2268
2269 kern_return_t ret;
2270
2271 struct vnode *names_vp;
2272 struct vnode *data_vp;
2273
2274 struct global_profile *uid_files;
2275
2276 funnel_state = thread_funnel_set(kernel_flock, TRUE);
2277
2278 /* Try to open the appropriate users profile files */
2279 /* If neither file is present, try to create them */
2280 /* If one file is present and the other not, fail. */
2281 /* If the files do exist, check them for the app_file */
2282 /* requested and read it in if present */
2283
2284
2285 error = bsd_open_page_cache_files(user, &uid_files);
2286 if(error) {
2287 thread_funnel_set(kernel_flock, funnel_state);
2288 return EINVAL;
2289 }
2290
2291 p = current_proc();
2292
2293 names_vp = uid_files->names_vp;
2294 data_vp = uid_files->data_vp;
2295 names_buf = uid_files->buf_ptr;
2296
2297 context.vc_proc = p;
2298 context.vc_ucred = kauth_cred_get();
2299
2300 VATTR_INIT(&va);
2301 VATTR_WANTED(&va, va_fileid);
2302 VATTR_WANTED(&va, va_modify_time);
2303
2304 if ((error = vnode_getattr(app_vp, &va, &context))) {
2305 printf("bsd_read_cache_file: Can't stat app file %s\n", app_name);
2306 bsd_close_page_cache_files(uid_files);
2307 thread_funnel_set(kernel_flock, funnel_state);
2308 return error;
2309 }
2310
2311 *fid = (u_long)va.va_fileid;
2312 *mod = va.va_modify_time.tv_sec;
2313
2314 if (bsd_search_page_cache_data_base(
2315 names_vp,
2316 (struct profile_names_header *)names_buf,
2317 app_name,
2318 (unsigned int) va.va_modify_time.tv_sec,
2319 (u_long)va.va_fileid, &profile, &profile_size) == 0) {
2320 /* profile is an offset in the profile data base */
2321 /* It is zero if no profile data was found */
2322
2323 if(profile_size == 0) {
2324 *buffer = 0;
2325 *bufsize = 0;
2326 bsd_close_page_cache_files(uid_files);
2327 thread_funnel_set(kernel_flock, funnel_state);
2328 return 0;
2329 }
2330 ret = (vm_offset_t)(kmem_alloc(kernel_map, buffer, profile_size));
2331 if(ret) {
2332 bsd_close_page_cache_files(uid_files);
2333 thread_funnel_set(kernel_flock, funnel_state);
2334 return ENOMEM;
2335 }
2336 *bufsize = profile_size;
2337 while(profile_size) {
2338 int resid_int;
2339 error = vn_rdwr(UIO_READ, data_vp,
2340 (caddr_t) *buffer, profile_size,
2341 profile, UIO_SYSSPACE32, IO_NODELOCKED,
2342 kauth_cred_get(), &resid_int, p);
2343 resid = (vm_size_t) resid_int;
2344 if((error) || (profile_size == resid)) {
2345 bsd_close_page_cache_files(uid_files);
2346 kmem_free(kernel_map, (vm_offset_t)*buffer, profile_size);
2347 thread_funnel_set(kernel_flock, funnel_state);
2348 return EINVAL;
2349 }
2350 profile += profile_size - resid;
2351 profile_size = resid;
2352 }
2353 bsd_close_page_cache_files(uid_files);
2354 thread_funnel_set(kernel_flock, funnel_state);
2355 return 0;
2356 } else {
2357 bsd_close_page_cache_files(uid_files);
2358 thread_funnel_set(kernel_flock, funnel_state);
2359 return EINVAL;
2360 }
2361
2362 }
2363
2364 int
2365 bsd_search_page_cache_data_base(
2366 struct vnode *vp,
2367 struct profile_names_header *database,
2368 char *app_name,
2369 unsigned int mod_date,
2370 unsigned int inode,
2371 off_t *profile,
2372 unsigned int *profile_size)
2373 {
2374
2375 struct proc *p;
2376
2377 unsigned int i;
2378 struct profile_element *element;
2379 unsigned int ele_total;
2380 unsigned int extended_list = 0;
2381 off_t file_off = 0;
2382 unsigned int size;
2383 off_t resid_off;
2384 unsigned int resid;
2385 vm_offset_t local_buf = 0;
2386
2387 int error;
2388 kern_return_t ret;
2389
2390 p = current_proc();
2391
2392 if(((vm_offset_t)database->element_array) !=
2393 sizeof(struct profile_names_header)) {
2394 return EINVAL;
2395 }
2396 element = (struct profile_element *)(
2397 (vm_offset_t)database->element_array +
2398 (vm_offset_t)database);
2399
2400 ele_total = database->number_of_profiles;
2401
2402 *profile = 0;
2403 *profile_size = 0;
2404 while(ele_total) {
2405 /* note: code assumes header + n*ele comes out on a page boundary */
2406 if(((local_buf == 0) && (sizeof(struct profile_names_header) +
2407 (ele_total * sizeof(struct profile_element)))
2408 > (PAGE_SIZE * 4)) ||
2409 ((local_buf != 0) &&
2410 (ele_total * sizeof(struct profile_element))
2411 > (PAGE_SIZE * 4))) {
2412 extended_list = ele_total;
2413 if(element == (struct profile_element *)
2414 ((vm_offset_t)database->element_array +
2415 (vm_offset_t)database)) {
2416 ele_total = ((PAGE_SIZE * 4)/sizeof(struct profile_element)) - 1;
2417 } else {
2418 ele_total = (PAGE_SIZE * 4)/sizeof(struct profile_element);
2419 }
2420 extended_list -= ele_total;
2421 }
2422 for (i=0; i<ele_total; i++) {
2423 if((mod_date == element[i].mod_date)
2424 && (inode == element[i].inode)) {
2425 if(strncmp(element[i].name, app_name, 12) == 0) {
2426 *profile = element[i].addr;
2427 *profile_size = element[i].size;
2428 if(local_buf != 0) {
2429 kmem_free(kernel_map, local_buf, 4 * PAGE_SIZE);
2430 }
2431 return 0;
2432 }
2433 }
2434 }
2435 if(extended_list == 0)
2436 break;
2437 if(local_buf == 0) {
2438 ret = kmem_alloc(kernel_map, &local_buf, 4 * PAGE_SIZE);
2439 if(ret != KERN_SUCCESS) {
2440 return ENOMEM;
2441 }
2442 }
2443 element = (struct profile_element *)local_buf;
2444 ele_total = extended_list;
2445 extended_list = 0;
2446 file_off += 4 * PAGE_SIZE;
2447 if((ele_total * sizeof(struct profile_element)) >
2448 (PAGE_SIZE * 4)) {
2449 size = PAGE_SIZE * 4;
2450 } else {
2451 size = ele_total * sizeof(struct profile_element);
2452 }
2453 resid_off = 0;
2454 while(size) {
2455 int resid_int;
2456 error = vn_rdwr(UIO_READ, vp,
2457 CAST_DOWN(caddr_t, (local_buf + resid_off)),
2458 size, file_off + resid_off, UIO_SYSSPACE32,
2459 IO_NODELOCKED, kauth_cred_get(), &resid_int, p);
2460 resid = (vm_size_t) resid_int;
2461 if((error) || (size == resid)) {
2462 if(local_buf != 0) {
2463 kmem_free(kernel_map, local_buf, 4 * PAGE_SIZE);
2464 }
2465 return EINVAL;
2466 }
2467 resid_off += size-resid;
2468 size = resid;
2469 }
2470 }
2471 if(local_buf != 0) {
2472 kmem_free(kernel_map, local_buf, 4 * PAGE_SIZE);
2473 }
2474 return 0;
2475 }
2476
2477 int
2478 bsd_write_page_cache_file(
2479 unsigned int user,
2480 char *file_name,
2481 caddr_t buffer,
2482 vm_size_t size,
2483 int mod,
2484 int fid)
2485 {
2486 struct proc *p;
2487 int resid;
2488 off_t resid_off;
2489 int error;
2490 boolean_t funnel_state;
2491 off_t file_size;
2492 struct vfs_context context;
2493 off_t profile;
2494 unsigned int profile_size;
2495
2496 vm_offset_t names_buf;
2497 struct vnode *names_vp;
2498 struct vnode *data_vp;
2499 struct profile_names_header *profile_header;
2500 off_t name_offset;
2501 struct global_profile *uid_files;
2502
2503
2504 funnel_state = thread_funnel_set(kernel_flock, TRUE);
2505
2506
2507 error = bsd_open_page_cache_files(user, &uid_files);
2508 if(error) {
2509 thread_funnel_set(kernel_flock, funnel_state);
2510 return EINVAL;
2511 }
2512
2513 p = current_proc();
2514
2515 names_vp = uid_files->names_vp;
2516 data_vp = uid_files->data_vp;
2517 names_buf = uid_files->buf_ptr;
2518
2519 /* Stat data file for size */
2520
2521 context.vc_proc = p;
2522 context.vc_ucred = kauth_cred_get();
2523
2524 if ((error = vnode_size(data_vp, &file_size, &context)) != 0) {
2525 printf("bsd_write_page_cache_file: Can't stat profile data %s\n", file_name);
2526 bsd_close_page_cache_files(uid_files);
2527 thread_funnel_set(kernel_flock, funnel_state);
2528 return error;
2529 }
2530
2531 if (bsd_search_page_cache_data_base(names_vp,
2532 (struct profile_names_header *)names_buf,
2533 file_name, (unsigned int) mod,
2534 fid, &profile, &profile_size) == 0) {
2535 /* profile is an offset in the profile data base */
2536 /* It is zero if no profile data was found */
2537
2538 if(profile_size == 0) {
2539 unsigned int header_size;
2540 vm_offset_t buf_ptr;
2541
2542 /* Our Write case */
2543
2544 /* read header for last entry */
2545 profile_header =
2546 (struct profile_names_header *)names_buf;
2547 name_offset = sizeof(struct profile_names_header) +
2548 (sizeof(struct profile_element)
2549 * profile_header->number_of_profiles);
2550 profile_header->number_of_profiles += 1;
2551
2552 if(name_offset < PAGE_SIZE * 4) {
2553 struct profile_element *name;
2554 /* write new entry */
2555 name = (struct profile_element *)
2556 (names_buf + (vm_offset_t)name_offset);
2557 name->addr = file_size;
2558 name->size = size;
2559 name->mod_date = mod;
2560 name->inode = fid;
2561 strncpy (name->name, file_name, 12);
2562 } else {
2563 unsigned int ele_size;
2564 struct profile_element name;
2565 /* write new entry */
2566 name.addr = file_size;
2567 name.size = size;
2568 name.mod_date = mod;
2569 name.inode = fid;
2570 strncpy (name.name, file_name, 12);
2571 /* write element out separately */
2572 ele_size = sizeof(struct profile_element);
2573 buf_ptr = (vm_offset_t)&name;
2574 resid_off = name_offset;
2575
2576 while(ele_size) {
2577 error = vn_rdwr(UIO_WRITE, names_vp,
2578 (caddr_t)buf_ptr,
2579 ele_size, resid_off,
2580 UIO_SYSSPACE32, IO_NODELOCKED,
2581 kauth_cred_get(), &resid, p);
2582 if(error) {
2583 printf("bsd_write_page_cache_file: Can't write name_element %x\n", user);
2584 bsd_close_page_cache_files(
2585 uid_files);
2586 thread_funnel_set(
2587 kernel_flock,
2588 funnel_state);
2589 return error;
2590 }
2591 buf_ptr += (vm_offset_t)
2592 ele_size-resid;
2593 resid_off += ele_size-resid;
2594 ele_size = resid;
2595 }
2596 }
2597
2598 if(name_offset < PAGE_SIZE * 4) {
2599 header_size = name_offset +
2600 sizeof(struct profile_element);
2601
2602 } else {
2603 header_size =
2604 sizeof(struct profile_names_header);
2605 }
2606 buf_ptr = (vm_offset_t)profile_header;
2607 resid_off = 0;
2608
2609 /* write names file header */
2610 while(header_size) {
2611 error = vn_rdwr(UIO_WRITE, names_vp,
2612 (caddr_t)buf_ptr,
2613 header_size, resid_off,
2614 UIO_SYSSPACE32, IO_NODELOCKED,
2615 kauth_cred_get(), &resid, p);
2616 if(error) {
2617 printf("bsd_write_page_cache_file: Can't write header %x\n", user);
2618 bsd_close_page_cache_files(
2619 uid_files);
2620 thread_funnel_set(
2621 kernel_flock, funnel_state);
2622 return error;
2623 }
2624 buf_ptr += (vm_offset_t)header_size-resid;
2625 resid_off += header_size-resid;
2626 header_size = resid;
2627 }
2628 /* write profile to data file */
2629 resid_off = file_size;
2630 while(size) {
2631 error = vn_rdwr(UIO_WRITE, data_vp,
2632 (caddr_t)buffer, size, resid_off,
2633 UIO_SYSSPACE32, IO_NODELOCKED,
2634 kauth_cred_get(), &resid, p);
2635 if(error) {
2636 printf("bsd_write_page_cache_file: Can't write header %x\n", user);
2637 bsd_close_page_cache_files(
2638 uid_files);
2639 thread_funnel_set(
2640 kernel_flock, funnel_state);
2641 return error;
2642 }
2643 buffer += size-resid;
2644 resid_off += size-resid;
2645 size = resid;
2646 }
2647 bsd_close_page_cache_files(uid_files);
2648 thread_funnel_set(kernel_flock, funnel_state);
2649 return 0;
2650 }
2651 /* Someone else wrote a twin profile before us */
2652 bsd_close_page_cache_files(uid_files);
2653 thread_funnel_set(kernel_flock, funnel_state);
2654 return 0;
2655 } else {
2656 bsd_close_page_cache_files(uid_files);
2657 thread_funnel_set(kernel_flock, funnel_state);
2658 return EINVAL;
2659 }
2660
2661 }
2662
2663 int
2664 prepare_profile_database(int user)
2665 {
2666 const char *cache_path = "/var/vm/app_profile/";
2667 struct proc *p;
2668 int error;
2669 int resid;
2670 off_t resid_off;
2671 vm_size_t size;
2672
2673 struct vnode *names_vp;
2674 struct vnode *data_vp;
2675 vm_offset_t names_buf;
2676 vm_offset_t buf_ptr;
2677
2678 int profile_names_length;
2679 int profile_data_length;
2680 char *profile_data_string;
2681 char *profile_names_string;
2682 char *substring;
2683
2684 struct vnode_attr va;
2685 struct vfs_context context;
2686
2687 struct profile_names_header *profile_header;
2688 kern_return_t ret;
2689
2690 struct nameidata nd_names;
2691 struct nameidata nd_data;
2692
2693 p = current_proc();
2694
2695 context.vc_proc = p;
2696 context.vc_ucred = kauth_cred_get();
2697
2698 ret = kmem_alloc(kernel_map,
2699 (vm_offset_t *)&profile_data_string, PATH_MAX);
2700
2701 if(ret) {
2702 return ENOMEM;
2703 }
2704
2705 /* Split the buffer in half since we know the size of */
2706 /* our file path and our allocation is adequate for */
2707 /* both file path names */
2708 profile_names_string = profile_data_string + (PATH_MAX/2);
2709
2710
2711 strcpy(profile_data_string, cache_path);
2712 strcpy(profile_names_string, cache_path);
2713 profile_names_length = profile_data_length
2714 = strlen(profile_data_string);
2715 substring = profile_data_string + profile_data_length;
2716 sprintf(substring, "%x_data", user);
2717 substring = profile_names_string + profile_names_length;
2718 sprintf(substring, "%x_names", user);
2719
2720 /* We now have the absolute file names */
2721
2722 ret = kmem_alloc(kernel_map,
2723 (vm_offset_t *)&names_buf, 4 * PAGE_SIZE);
2724 if(ret) {
2725 kmem_free(kernel_map,
2726 (vm_offset_t)profile_data_string, PATH_MAX);
2727 return ENOMEM;
2728 }
2729
2730 NDINIT(&nd_names, LOOKUP, FOLLOW,
2731 UIO_SYSSPACE32, CAST_USER_ADDR_T(profile_names_string), &context);
2732 NDINIT(&nd_data, LOOKUP, FOLLOW,
2733 UIO_SYSSPACE32, CAST_USER_ADDR_T(profile_data_string), &context);
2734
2735 if ( (error = vn_open(&nd_data,
2736 O_CREAT | O_EXCL | FWRITE, S_IRUSR|S_IWUSR)) ) {
2737 kmem_free(kernel_map,
2738 (vm_offset_t)names_buf, 4 * PAGE_SIZE);
2739 kmem_free(kernel_map,
2740 (vm_offset_t)profile_data_string, PATH_MAX);
2741
2742 return 0;
2743 }
2744 data_vp = nd_data.ni_vp;
2745
2746 if ( (error = vn_open(&nd_names,
2747 O_CREAT | O_EXCL | FWRITE, S_IRUSR|S_IWUSR)) ) {
2748 printf("prepare_profile_database: Can't create CacheNames %s\n",
2749 profile_data_string);
2750 kmem_free(kernel_map,
2751 (vm_offset_t)names_buf, 4 * PAGE_SIZE);
2752 kmem_free(kernel_map,
2753 (vm_offset_t)profile_data_string, PATH_MAX);
2754
2755 vnode_rele(data_vp);
2756 vnode_put(data_vp);
2757
2758 return error;
2759 }
2760 names_vp = nd_names.ni_vp;
2761
2762 /* Write Header for new names file */
2763
2764 profile_header = (struct profile_names_header *)names_buf;
2765
2766 profile_header->number_of_profiles = 0;
2767 profile_header->user_id = user;
2768 profile_header->version = 1;
2769 profile_header->element_array =
2770 sizeof(struct profile_names_header);
2771 profile_header->spare1 = 0;
2772 profile_header->spare2 = 0;
2773 profile_header->spare3 = 0;
2774
2775 size = sizeof(struct profile_names_header);
2776 buf_ptr = (vm_offset_t)profile_header;
2777 resid_off = 0;
2778
2779 while(size) {
2780 error = vn_rdwr(UIO_WRITE, names_vp,
2781 (caddr_t)buf_ptr, size, resid_off,
2782 UIO_SYSSPACE32, IO_NODELOCKED,
2783 kauth_cred_get(), &resid, p);
2784 if(error) {
2785 printf("prepare_profile_database: Can't write header %s\n", profile_names_string);
2786 kmem_free(kernel_map,
2787 (vm_offset_t)names_buf, 4 * PAGE_SIZE);
2788 kmem_free(kernel_map,
2789 (vm_offset_t)profile_data_string,
2790 PATH_MAX);
2791
2792 vnode_rele(names_vp);
2793 vnode_put(names_vp);
2794 vnode_rele(data_vp);
2795 vnode_put(data_vp);
2796
2797 return error;
2798 }
2799 buf_ptr += size-resid;
2800 resid_off += size-resid;
2801 size = resid;
2802 }
2803 VATTR_INIT(&va);
2804 VATTR_SET(&va, va_uid, user);
2805
2806 error = vnode_setattr(names_vp, &va, &context);
2807 if(error) {
2808 printf("prepare_profile_database: "
2809 "Can't set user %s\n", profile_names_string);
2810 }
2811 vnode_rele(names_vp);
2812 vnode_put(names_vp);
2813
2814 VATTR_INIT(&va);
2815 VATTR_SET(&va, va_uid, user);
2816 error = vnode_setattr(data_vp, &va, &context);
2817 if(error) {
2818 printf("prepare_profile_database: "
2819 "Can't set user %s\n", profile_data_string);
2820 }
2821 vnode_rele(data_vp);
2822 vnode_put(data_vp);
2823
2824 kmem_free(kernel_map,
2825 (vm_offset_t)profile_data_string, PATH_MAX);
2826 kmem_free(kernel_map,
2827 (vm_offset_t)names_buf, 4 * PAGE_SIZE);
2828 return 0;
2829
2830 }