]> git.saurik.com Git - apple/xnu.git/blame - bsd/vm/vm_unix.c
xnu-517.7.21.tar.gz
[apple/xnu.git] / bsd / vm / vm_unix.c
CommitLineData
1c79356b 1/*
e5568f75 2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
1c79356b
A
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
e5568f75
A
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
1c79356b 11 *
e5568f75
A
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
1c79356b
A
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
e5568f75
A
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
1c79356b
A
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22/*
23 * Mach Operating System
24 * Copyright (c) 1987 Carnegie-Mellon University
25 * All rights reserved. The CMU software License Agreement specifies
26 * the terms and conditions for use and redistribution.
27 */
28
29/*
30 */
9bccf70c
A
31
32
1c79356b
A
33#include <meta_features.h>
34
35#include <kern/task.h>
36#include <kern/thread.h>
37#include <kern/debug.h>
38#include <kern/lock.h>
39#include <mach/time_value.h>
40#include <mach/vm_param.h>
41#include <mach/vm_prot.h>
42#include <mach/port.h>
43
44#include <sys/param.h>
45#include <sys/systm.h>
46#include <sys/dir.h>
47#include <sys/namei.h>
48#include <sys/proc.h>
49#include <sys/vm.h>
50#include <sys/file.h>
51#include <sys/vnode.h>
52#include <sys/buf.h>
53#include <sys/mount.h>
54#include <sys/trace.h>
55#include <sys/kernel.h>
56#include <sys/ubc.h>
9bccf70c 57#include <sys/stat.h>
1c79356b 58
e5568f75
A
59#include <bsm/audit_kernel.h>
60#include <bsm/audit_kevents.h>
61
1c79356b 62#include <kern/kalloc.h>
1c79356b
A
63#include <vm/vm_map.h>
64#include <vm/vm_kern.h>
65
66#include <machine/spl.h>
9bccf70c 67
1c79356b 68#include <mach/shared_memory_server.h>
9bccf70c
A
69#include <vm/vm_shared_memory_server.h>
70
71
9bccf70c 72extern zone_t lsf_zone;
1c79356b
A
73
74useracc(addr, len, prot)
75 caddr_t addr;
76 u_int len;
77 int prot;
78{
79 return (vm_map_check_protection(
80 current_map(),
55e303ae 81 trunc_page_32((unsigned int)addr), round_page_32((unsigned int)(addr+len)),
1c79356b
A
82 prot == B_READ ? VM_PROT_READ : VM_PROT_WRITE));
83}
84
85vslock(addr, len)
86 caddr_t addr;
87 int len;
88{
0b4e3aa0 89kern_return_t kret;
55e303ae
A
90 kret = vm_map_wire(current_map(), trunc_page_32((unsigned int)addr),
91 round_page_32((unsigned int)(addr+len)),
1c79356b 92 VM_PROT_READ | VM_PROT_WRITE ,FALSE);
0b4e3aa0
A
93
94 switch (kret) {
95 case KERN_SUCCESS:
96 return (0);
97 case KERN_INVALID_ADDRESS:
98 case KERN_NO_SPACE:
99 return (ENOMEM);
100 case KERN_PROTECTION_FAILURE:
101 return (EACCES);
102 default:
103 return (EINVAL);
104 }
1c79356b
A
105}
106
107vsunlock(addr, len, dirtied)
108 caddr_t addr;
109 int len;
110 int dirtied;
111{
112 pmap_t pmap;
113#if FIXME /* [ */
114 vm_page_t pg;
115#endif /* FIXME ] */
116 vm_offset_t vaddr, paddr;
0b4e3aa0 117 kern_return_t kret;
1c79356b
A
118
119#if FIXME /* [ */
120 if (dirtied) {
121 pmap = get_task_pmap(current_task());
55e303ae 122 for (vaddr = trunc_page((unsigned int)(addr)); vaddr < round_page((unsigned int)(addr+len));
1c79356b
A
123 vaddr += PAGE_SIZE) {
124 paddr = pmap_extract(pmap, vaddr);
125 pg = PHYS_TO_VM_PAGE(paddr);
126 vm_page_set_modified(pg);
127 }
128 }
129#endif /* FIXME ] */
130#ifdef lint
131 dirtied++;
132#endif /* lint */
55e303ae
A
133 kret = vm_map_unwire(current_map(), trunc_page_32((unsigned int)(addr)),
134 round_page_32((unsigned int)(addr+len)), FALSE);
0b4e3aa0
A
135 switch (kret) {
136 case KERN_SUCCESS:
137 return (0);
138 case KERN_INVALID_ADDRESS:
139 case KERN_NO_SPACE:
140 return (ENOMEM);
141 case KERN_PROTECTION_FAILURE:
142 return (EACCES);
143 default:
144 return (EINVAL);
145 }
1c79356b
A
146}
147
148#if defined(sun) || BALANCE || defined(m88k)
149#else /*defined(sun) || BALANCE || defined(m88k)*/
150subyte(addr, byte)
151 void * addr;
152 int byte;
153{
154 char character;
155
156 character = (char)byte;
157 return (copyout((void *)&(character), addr, sizeof(char)) == 0 ? 0 : -1);
158}
159
160suibyte(addr, byte)
161 void * addr;
162 int byte;
163{
164 char character;
165
166 character = (char)byte;
167 return (copyout((void *) &(character), addr, sizeof(char)) == 0 ? 0 : -1);
168}
169
170int fubyte(addr)
171 void * addr;
172{
173 unsigned char byte;
174
175 if (copyin(addr, (void *) &byte, sizeof(char)))
176 return(-1);
177 return(byte);
178}
179
180int fuibyte(addr)
181 void * addr;
182{
183 unsigned char byte;
184
185 if (copyin(addr, (void *) &(byte), sizeof(char)))
186 return(-1);
187 return(byte);
188}
189
190suword(addr, word)
191 void * addr;
192 long word;
193{
194 return (copyout((void *) &word, addr, sizeof(int)) == 0 ? 0 : -1);
195}
196
197long fuword(addr)
198 void * addr;
199{
200 long word;
201
202 if (copyin(addr, (void *) &word, sizeof(int)))
203 return(-1);
204 return(word);
205}
206
207/* suiword and fuiword are the same as suword and fuword, respectively */
208
209suiword(addr, word)
210 void * addr;
211 long word;
212{
213 return (copyout((void *) &word, addr, sizeof(int)) == 0 ? 0 : -1);
214}
215
216long fuiword(addr)
217 void * addr;
218{
219 long word;
220
221 if (copyin(addr, (void *) &word, sizeof(int)))
222 return(-1);
223 return(word);
224}
225#endif /* defined(sun) || BALANCE || defined(m88k) || defined(i386) */
226
227int
228swapon()
229{
230 return(EOPNOTSUPP);
231}
232
1c79356b
A
233
234kern_return_t
235pid_for_task(t, x)
236 mach_port_t t;
237 int *x;
238{
239 struct proc * p;
240 task_t t1;
241 extern task_t port_name_to_task(mach_port_t t);
242 int pid = -1;
0b4e3aa0 243 kern_return_t err = KERN_SUCCESS;
1c79356b
A
244 boolean_t funnel_state;
245
e5568f75
A
246 AUDIT_MACH_SYSCALL_ENTER(AUE_PIDFORTASK);
247 AUDIT_ARG(mach_port1, t);
248
1c79356b
A
249 funnel_state = thread_funnel_set(kernel_flock, TRUE);
250 t1 = port_name_to_task(t);
251
252 if (t1 == TASK_NULL) {
253 err = KERN_FAILURE;
0b4e3aa0 254 goto pftout;
1c79356b
A
255 } else {
256 p = get_bsdtask_info(t1);
257 if (p) {
258 pid = p->p_pid;
259 err = KERN_SUCCESS;
260 } else {
261 err = KERN_FAILURE;
262 }
263 }
264 task_deallocate(t1);
1c79356b 265pftout:
e5568f75 266 AUDIT_ARG(pid, pid);
0b4e3aa0 267 (void) copyout((char *) &pid, (char *) x, sizeof(*x));
1c79356b 268 thread_funnel_set(kernel_flock, funnel_state);
e5568f75 269 AUDIT_MACH_SYSCALL_EXIT(err);
1c79356b
A
270 return(err);
271}
272
273/*
274 * Routine: task_for_pid
275 * Purpose:
276 * Get the task port for another "process", named by its
277 * process ID on the same host as "target_task".
278 *
279 * Only permitted to privileged processes, or processes
280 * with the same user ID.
281 */
282kern_return_t
283task_for_pid(target_tport, pid, t)
284 mach_port_t target_tport;
285 int pid;
286 mach_port_t *t;
287{
288 struct proc *p;
289 struct proc *p1;
290 task_t t1;
291 mach_port_t tret;
292 extern task_t port_name_to_task(mach_port_t tp);
293 void * sright;
294 int error = 0;
295 boolean_t funnel_state;
296
e5568f75
A
297 AUDIT_MACH_SYSCALL_ENTER(AUE_TASKFORPID);
298 AUDIT_ARG(pid, pid);
299 AUDIT_ARG(mach_port1, target_tport);
300
1c79356b
A
301 t1 = port_name_to_task(target_tport);
302 if (t1 == TASK_NULL) {
303 (void ) copyout((char *)&t1, (char *)t, sizeof(mach_port_t));
e5568f75 304 AUDIT_MACH_SYSCALL_EXIT(KERN_FAILURE);
0b4e3aa0 305 return(KERN_FAILURE);
1c79356b
A
306 }
307
308 funnel_state = thread_funnel_set(kernel_flock, TRUE);
309
310 restart:
311 p1 = get_bsdtask_info(t1);
e5568f75
A
312 p = pfind(pid);
313 AUDIT_ARG(process, p);
1c79356b 314 if (
e5568f75 315 (p != (struct proc *) 0)
1c79356b 316 && (p1 != (struct proc *) 0)
7b1edb79
A
317 && (((p->p_ucred->cr_uid == p1->p_ucred->cr_uid) &&
318 ((p->p_cred->p_ruid == p1->p_cred->p_ruid)))
1c79356b
A
319 || !(suser(p1->p_ucred, &p1->p_acflag)))
320 && (p->p_stat != SZOMB)
321 ) {
322 if (p->task != TASK_NULL) {
323 if (!task_reference_try(p->task)) {
324 mutex_pause(); /* temp loss of funnel */
325 goto restart;
326 }
9bccf70c
A
327 sright = (void *)convert_task_to_port(p->task);
328 tret = (void *)
329 ipc_port_copyout_send(sright,
330 get_task_ipcspace(current_task()));
1c79356b
A
331 } else
332 tret = MACH_PORT_NULL;
e5568f75 333 AUDIT_ARG(mach_port2, tret);
1c79356b
A
334 (void ) copyout((char *)&tret, (char *) t, sizeof(mach_port_t));
335 task_deallocate(t1);
336 error = KERN_SUCCESS;
337 goto tfpout;
338 }
339 task_deallocate(t1);
340 tret = MACH_PORT_NULL;
341 (void) copyout((char *) &tret, (char *) t, sizeof(mach_port_t));
342 error = KERN_FAILURE;
343tfpout:
344 thread_funnel_set(kernel_flock, funnel_state);
e5568f75 345 AUDIT_MACH_SYSCALL_EXIT(error);
1c79356b
A
346 return(error);
347}
348
349
350struct load_shared_file_args {
351 char *filename;
352 caddr_t mfa;
353 u_long mfs;
354 caddr_t *ba;
355 int map_cnt;
356 sf_mapping_t *mappings;
357 int *flags;
358};
359
0b4e3aa0 360int ws_disabled = 1;
1c79356b
A
361
362int
363load_shared_file(
364 struct proc *p,
365 struct load_shared_file_args *uap,
366 register *retval)
367{
368 caddr_t mapped_file_addr=uap->mfa;
369 u_long mapped_file_size=uap->mfs;
370 caddr_t *base_address=uap->ba;
371 int map_cnt=uap->map_cnt;
372 sf_mapping_t *mappings=uap->mappings;
373 char *filename=uap->filename;
374 int *flags=uap->flags;
375 struct vnode *vp = 0;
376 struct nameidata nd, *ndp;
377 char *filename_str;
378 register int error;
379 kern_return_t kr;
380
381 struct vattr vattr;
0b4e3aa0 382 memory_object_control_t file_control;
1c79356b
A
383 sf_mapping_t *map_list;
384 caddr_t local_base;
385 int local_flags;
386 int caller_flags;
387 int i;
9bccf70c 388 int default_regions = 0;
1c79356b
A
389 vm_size_t dummy;
390 kern_return_t kret;
391
392 shared_region_mapping_t shared_region;
393 struct shared_region_task_mappings task_mapping_info;
394 shared_region_mapping_t next;
395
396 ndp = &nd;
397
e5568f75 398 AUDIT_ARG(addr, base_address);
1c79356b
A
399 /* Retrieve the base address */
400 if (error = copyin(base_address, &local_base, sizeof (caddr_t))) {
401 goto lsf_bailout;
402 }
403 if (error = copyin(flags, &local_flags, sizeof (int))) {
404 goto lsf_bailout;
405 }
9bccf70c
A
406
407 if(local_flags & QUERY_IS_SYSTEM_REGION) {
55e303ae 408 shared_region_mapping_t default_shared_region;
9bccf70c 409 vm_get_shared_region(current_task(), &shared_region);
55e303ae
A
410 task_mapping_info.self = (vm_offset_t)shared_region;
411
412 shared_region_mapping_info(shared_region,
413 &(task_mapping_info.text_region),
414 &(task_mapping_info.text_size),
415 &(task_mapping_info.data_region),
416 &(task_mapping_info.data_size),
417 &(task_mapping_info.region_mappings),
418 &(task_mapping_info.client_base),
419 &(task_mapping_info.alternate_base),
420 &(task_mapping_info.alternate_next),
421 &(task_mapping_info.fs_base),
422 &(task_mapping_info.system),
423 &(task_mapping_info.flags), &next);
424
425 default_shared_region =
426 lookup_default_shared_region(
427 ENV_DEFAULT_ROOT,
428 task_mapping_info.system);
429 if (shared_region == default_shared_region) {
9bccf70c
A
430 local_flags = SYSTEM_REGION_BACKED;
431 } else {
432 local_flags = 0;
433 }
55e303ae 434 shared_region_mapping_dealloc(default_shared_region);
9bccf70c
A
435 error = 0;
436 error = copyout(&local_flags, flags, sizeof (int));
437 goto lsf_bailout;
438 }
1c79356b
A
439 caller_flags = local_flags;
440 kret = kmem_alloc(kernel_map, (vm_offset_t *)&filename_str,
441 (vm_size_t)(MAXPATHLEN));
442 if (kret != KERN_SUCCESS) {
443 error = ENOMEM;
444 goto lsf_bailout;
445 }
446 kret = kmem_alloc(kernel_map, (vm_offset_t *)&map_list,
447 (vm_size_t)(map_cnt*sizeof(sf_mapping_t)));
448 if (kret != KERN_SUCCESS) {
449 kmem_free(kernel_map, (vm_offset_t)filename_str,
450 (vm_size_t)(MAXPATHLEN));
451 error = ENOMEM;
452 goto lsf_bailout;
453 }
454
455 if (error =
456 copyin(mappings, map_list, (map_cnt*sizeof(sf_mapping_t)))) {
457 goto lsf_bailout_free;
458 }
459
460 if (error = copyinstr(filename,
461 filename_str, MAXPATHLEN, (size_t *)&dummy)) {
462 goto lsf_bailout_free;
463 }
464
465 /*
466 * Get a vnode for the target file
467 */
e5568f75 468 NDINIT(ndp, LOOKUP, FOLLOW | LOCKLEAF | AUDITVNPATH1, UIO_SYSSPACE,
1c79356b
A
469 filename_str, p);
470
471 if ((error = namei(ndp))) {
472 goto lsf_bailout_free;
473 }
474
475 vp = ndp->ni_vp;
476
477 if (vp->v_type != VREG) {
478 error = EINVAL;
479 goto lsf_bailout_free_vput;
480 }
481
482 UBCINFOCHECK("load_shared_file", vp);
483
484 if (error = VOP_GETATTR(vp, &vattr, p->p_ucred, p)) {
485 goto lsf_bailout_free_vput;
486 }
487
488
0b4e3aa0
A
489 file_control = ubc_getobject(vp, UBC_HOLDOBJECT);
490 if (file_control == MEMORY_OBJECT_CONTROL_NULL) {
1c79356b
A
491 error = EINVAL;
492 goto lsf_bailout_free_vput;
493 }
494
495#ifdef notdef
496 if(vattr.va_size != mapped_file_size) {
497 error = EINVAL;
498 goto lsf_bailout_free_vput;
499 }
500#endif
9bccf70c
A
501 if(p->p_flag & P_NOSHLIB) {
502 p->p_flag = p->p_flag & ~P_NOSHLIB;
503 }
504
505 /* load alternate regions if the caller has requested. */
506 /* Note: the new regions are "clean slates" */
507 if (local_flags & NEW_LOCAL_SHARED_REGIONS) {
55e303ae 508 error = clone_system_shared_regions(FALSE, ENV_DEFAULT_ROOT);
9bccf70c
A
509 if (error) {
510 goto lsf_bailout_free_vput;
511 }
9bccf70c 512 }
1c79356b 513
55e303ae 514 vm_get_shared_region(current_task(), &shared_region);
1c79356b
A
515 task_mapping_info.self = (vm_offset_t)shared_region;
516
517 shared_region_mapping_info(shared_region,
518 &(task_mapping_info.text_region),
519 &(task_mapping_info.text_size),
520 &(task_mapping_info.data_region),
521 &(task_mapping_info.data_size),
522 &(task_mapping_info.region_mappings),
523 &(task_mapping_info.client_base),
524 &(task_mapping_info.alternate_base),
525 &(task_mapping_info.alternate_next),
55e303ae
A
526 &(task_mapping_info.fs_base),
527 &(task_mapping_info.system),
528 &(task_mapping_info.flags), &next);
529
530 {
531 shared_region_mapping_t default_shared_region;
532 default_shared_region =
533 lookup_default_shared_region(
534 ENV_DEFAULT_ROOT,
535 task_mapping_info.system);
536 if(shared_region == default_shared_region) {
537 default_regions = 1;
538 }
539 shared_region_mapping_dealloc(default_shared_region);
540 }
541 /* If we are running on a removable file system we must not */
542 /* be in a set of shared regions or the file system will not */
543 /* be removable. */
544 if(((vp->v_mount != rootvnode->v_mount) && (default_regions))
545 && (lsf_mapping_pool_gauge() < 75)) {
546 /* We don't want to run out of shared memory */
547 /* map entries by starting too many private versions */
548 /* of the shared library structures */
549 int error;
550 if(p->p_flag & P_NOSHLIB) {
551 error = clone_system_shared_regions(FALSE, ENV_DEFAULT_ROOT);
552 } else {
553 error = clone_system_shared_regions(TRUE, ENV_DEFAULT_ROOT);
554 }
555 if (error) {
556 goto lsf_bailout_free_vput;
557 }
558 local_flags = local_flags & ~NEW_LOCAL_SHARED_REGIONS;
559 vm_get_shared_region(current_task(), &shared_region);
560 shared_region_mapping_info(shared_region,
561 &(task_mapping_info.text_region),
562 &(task_mapping_info.text_size),
563 &(task_mapping_info.data_region),
564 &(task_mapping_info.data_size),
565 &(task_mapping_info.region_mappings),
566 &(task_mapping_info.client_base),
567 &(task_mapping_info.alternate_base),
568 &(task_mapping_info.alternate_next),
569 &(task_mapping_info.fs_base),
570 &(task_mapping_info.system),
d7e50217 571 &(task_mapping_info.flags), &next);
55e303ae 572 }
d7e50217 573
1c79356b
A
574 /* This is a work-around to allow executables which have been */
575 /* built without knowledge of the proper shared segment to */
576 /* load. This code has been architected as a shared region */
577 /* handler, the knowledge of where the regions are loaded is */
578 /* problematic for the extension of shared regions as it will */
579 /* not be easy to know what region an item should go into. */
580 /* The code below however will get around a short term problem */
581 /* with executables which believe they are loading at zero. */
582
583 {
584 if (((unsigned int)local_base &
585 (~(task_mapping_info.text_size - 1))) !=
586 task_mapping_info.client_base) {
587 if(local_flags & ALTERNATE_LOAD_SITE) {
588 local_base = (caddr_t)(
589 (unsigned int)local_base &
590 (task_mapping_info.text_size - 1));
591 local_base = (caddr_t)((unsigned int)local_base
592 | task_mapping_info.client_base);
593 } else {
594 error = EINVAL;
595 goto lsf_bailout_free_vput;
596 }
597 }
598 }
599
1c79356b
A
600
601 if((kr = copyin_shared_file((vm_offset_t)mapped_file_addr,
602 mapped_file_size,
603 (vm_offset_t *)&local_base,
0b4e3aa0 604 map_cnt, map_list, file_control,
1c79356b
A
605 &task_mapping_info, &local_flags))) {
606 switch (kr) {
607 case KERN_FAILURE:
608 error = EINVAL;
609 break;
610 case KERN_INVALID_ARGUMENT:
611 error = EINVAL;
612 break;
613 case KERN_INVALID_ADDRESS:
614 error = EACCES;
615 break;
616 case KERN_PROTECTION_FAILURE:
617 /* save EAUTH for authentication in this */
618 /* routine */
619 error = EPERM;
620 break;
621 case KERN_NO_SPACE:
622 error = ENOMEM;
623 break;
624 default:
625 error = EINVAL;
626 };
627 if((caller_flags & ALTERNATE_LOAD_SITE) && systemLogDiags) {
0b4e3aa0 628 printf("load_shared_file: Failed to load shared file! error: 0x%x, Base_address: 0x%x, number of mappings: %d, file_control 0x%x\n", error, local_base, map_cnt, file_control);
1c79356b
A
629 for(i=0; i<map_cnt; i++) {
630 printf("load_shared_file: Mapping%d, mapping_offset: 0x%x, size: 0x%x, file_offset: 0x%x, protection: 0x%x\n"
631 , i, map_list[i].mapping_offset,
632 map_list[i].size,
633 map_list[i].file_offset,
634 map_list[i].protection);
635 }
636 }
637 } else {
9bccf70c
A
638 if(default_regions)
639 local_flags |= SYSTEM_REGION_BACKED;
1c79356b
A
640 if(!(error = copyout(&local_flags, flags, sizeof (int)))) {
641 error = copyout(&local_base,
642 base_address, sizeof (caddr_t));
643 }
644 }
645
646lsf_bailout_free_vput:
647 vput(vp);
648
649lsf_bailout_free:
650 kmem_free(kernel_map, (vm_offset_t)filename_str,
651 (vm_size_t)(MAXPATHLEN));
652 kmem_free(kernel_map, (vm_offset_t)map_list,
653 (vm_size_t)(map_cnt*sizeof(sf_mapping_t)));
654
655lsf_bailout:
1c79356b
A
656 return error;
657}
658
659struct reset_shared_file_args {
660 caddr_t *ba;
661 int map_cnt;
662 sf_mapping_t *mappings;
663};
664
665int
666reset_shared_file(
667 struct proc *p,
668 struct reset_shared_file_args *uap,
669 register *retval)
670{
671 caddr_t *base_address=uap->ba;
672 int map_cnt=uap->map_cnt;
673 sf_mapping_t *mappings=uap->mappings;
674 register int error;
675 kern_return_t kr;
676
677 sf_mapping_t *map_list;
678 caddr_t local_base;
679 vm_offset_t map_address;
680 int i;
681 kern_return_t kret;
682
e5568f75 683 AUDIT_ARG(addr, base_address);
1c79356b
A
684 /* Retrieve the base address */
685 if (error = copyin(base_address, &local_base, sizeof (caddr_t))) {
686 goto rsf_bailout;
687 }
688
689 if (((unsigned int)local_base & GLOBAL_SHARED_SEGMENT_MASK)
690 != GLOBAL_SHARED_TEXT_SEGMENT) {
691 error = EINVAL;
692 goto rsf_bailout;
693 }
694
695 kret = kmem_alloc(kernel_map, (vm_offset_t *)&map_list,
696 (vm_size_t)(map_cnt*sizeof(sf_mapping_t)));
697 if (kret != KERN_SUCCESS) {
698 error = ENOMEM;
699 goto rsf_bailout;
700 }
701
702 if (error =
703 copyin(mappings, map_list, (map_cnt*sizeof(sf_mapping_t)))) {
704
705 kmem_free(kernel_map, (vm_offset_t)map_list,
706 (vm_size_t)(map_cnt*sizeof(sf_mapping_t)));
707 goto rsf_bailout;
708 }
709 for (i = 0; i<map_cnt; i++) {
710 if((map_list[i].mapping_offset
711 & GLOBAL_SHARED_SEGMENT_MASK) == 0x10000000) {
712 map_address = (vm_offset_t)
713 (local_base + map_list[i].mapping_offset);
714 vm_deallocate(current_map(),
715 map_address,
716 map_list[i].size);
717 vm_map(current_map(), &map_address,
718 map_list[i].size, 0, SHARED_LIB_ALIAS,
719 shared_data_region_handle,
720 ((unsigned int)local_base
721 & SHARED_DATA_REGION_MASK) +
722 (map_list[i].mapping_offset
723 & SHARED_DATA_REGION_MASK),
724 TRUE, VM_PROT_READ,
725 VM_PROT_READ, VM_INHERIT_SHARE);
726 }
727 }
728
729 kmem_free(kernel_map, (vm_offset_t)map_list,
730 (vm_size_t)(map_cnt*sizeof(sf_mapping_t)));
731
732rsf_bailout:
1c79356b
A
733 return error;
734}
735
9bccf70c
A
736struct new_system_shared_regions_args {
737 int dummy;
738};
739
740int
741new_system_shared_regions(
742 struct proc *p,
743 struct new_system_shared_regions_args *uap,
744 register *retval)
745{
746 shared_region_mapping_t regions;
747 shared_region_mapping_t new_regions;
748
749 if(!(is_suser())) {
750 *retval = EINVAL;
751 return EINVAL;
752 }
753
55e303ae
A
754 /* clear all of our existing defaults */
755 remove_all_shared_regions();
9bccf70c
A
756
757 *retval = 0;
758 return 0;
759}
1c79356b
A
760
761
762
763int
55e303ae 764clone_system_shared_regions(shared_regions_active, base_vnode)
1c79356b
A
765{
766 shared_region_mapping_t new_shared_region;
767 shared_region_mapping_t next;
768 shared_region_mapping_t old_shared_region;
769 struct shared_region_task_mappings old_info;
770 struct shared_region_task_mappings new_info;
771
9bccf70c
A
772 struct proc *p;
773
1c79356b
A
774 vm_get_shared_region(current_task(), &old_shared_region);
775 old_info.self = (vm_offset_t)old_shared_region;
776 shared_region_mapping_info(old_shared_region,
777 &(old_info.text_region),
778 &(old_info.text_size),
779 &(old_info.data_region),
780 &(old_info.data_size),
781 &(old_info.region_mappings),
782 &(old_info.client_base),
783 &(old_info.alternate_base),
784 &(old_info.alternate_next),
55e303ae
A
785 &(old_info.fs_base),
786 &(old_info.system),
1c79356b 787 &(old_info.flags), &next);
55e303ae
A
788 if ((shared_regions_active) ||
789 (base_vnode == ENV_DEFAULT_ROOT)) {
790 if (shared_file_create_system_region(&new_shared_region))
791 return (ENOMEM);
792 } else {
793 new_shared_region =
794 lookup_default_shared_region(
795 base_vnode, old_info.system);
796 if(new_shared_region == NULL) {
797 shared_file_boot_time_init(
798 base_vnode, old_info.system);
799 vm_get_shared_region(current_task(), &new_shared_region);
800 } else {
801 vm_set_shared_region(current_task(), new_shared_region);
802 }
803 if(old_shared_region)
804 shared_region_mapping_dealloc(old_shared_region);
805 }
1c79356b
A
806 new_info.self = (vm_offset_t)new_shared_region;
807 shared_region_mapping_info(new_shared_region,
808 &(new_info.text_region),
809 &(new_info.text_size),
810 &(new_info.data_region),
811 &(new_info.data_size),
812 &(new_info.region_mappings),
813 &(new_info.client_base),
814 &(new_info.alternate_base),
815 &(new_info.alternate_next),
55e303ae
A
816 &(new_info.fs_base),
817 &(new_info.system),
1c79356b 818 &(new_info.flags), &next);
9bccf70c
A
819 if(shared_regions_active) {
820 if(vm_region_clone(old_info.text_region, new_info.text_region)) {
821 panic("clone_system_shared_regions: shared region mis-alignment 1");
1c79356b
A
822 shared_region_mapping_dealloc(new_shared_region);
823 return(EINVAL);
9bccf70c
A
824 }
825 if (vm_region_clone(old_info.data_region, new_info.data_region)) {
826 panic("clone_system_shared_regions: shared region mis-alignment 2");
1c79356b
A
827 shared_region_mapping_dealloc(new_shared_region);
828 return(EINVAL);
9bccf70c
A
829 }
830 shared_region_object_chain_attach(
831 new_shared_region, old_shared_region);
1c79356b
A
832 }
833 if (vm_map_region_replace(current_map(), old_info.text_region,
834 new_info.text_region, old_info.client_base,
835 old_info.client_base+old_info.text_size)) {
836 panic("clone_system_shared_regions: shared region mis-alignment 3");
837 shared_region_mapping_dealloc(new_shared_region);
838 return(EINVAL);
839 }
840 if(vm_map_region_replace(current_map(), old_info.data_region,
841 new_info.data_region,
842 old_info.client_base + old_info.text_size,
843 old_info.client_base
844 + old_info.text_size + old_info.data_size)) {
845 panic("clone_system_shared_regions: shared region mis-alignment 4");
846 shared_region_mapping_dealloc(new_shared_region);
847 return(EINVAL);
848 }
849 vm_set_shared_region(current_task(), new_shared_region);
9bccf70c
A
850
851 /* consume the reference which wasn't accounted for in object */
852 /* chain attach */
853 if(!shared_regions_active)
854 shared_region_mapping_dealloc(old_shared_region);
855
1c79356b
A
856 return(0);
857
858}
9bccf70c
A
859
860extern vm_map_t bsd_pageable_map;
861
862/* header for the profile name file. The profiled app info is held */
863/* in the data file and pointed to by elements in the name file */
864
865struct profile_names_header {
866 unsigned int number_of_profiles;
867 unsigned int user_id;
868 unsigned int version;
869 off_t element_array;
870 unsigned int spare1;
871 unsigned int spare2;
872 unsigned int spare3;
873};
874
875struct profile_element {
876 off_t addr;
877 vm_size_t size;
878 unsigned int mod_date;
879 unsigned int inode;
880 char name[12];
881};
882
883struct global_profile {
884 struct vnode *names_vp;
885 struct vnode *data_vp;
886 vm_offset_t buf_ptr;
887 unsigned int user;
888 unsigned int age;
889 unsigned int busy;
890};
891
892struct global_profile_cache {
893 int max_ele;
894 unsigned int age;
895 struct global_profile profiles[3];
896};
897
898struct global_profile_cache global_user_profile_cache =
899 {3, 0, NULL, NULL, NULL, 0, 0, 0,
900 NULL, NULL, NULL, 0, 0, 0,
901 NULL, NULL, NULL, 0, 0, 0 };
902
903/* BSD_OPEN_PAGE_CACHE_FILES: */
904/* Caller provides a user id. This id was used in */
905/* prepare_profile_database to create two unique absolute */
906/* file paths to the associated profile files. These files */
907/* are either opened or bsd_open_page_cache_files returns an */
908/* error. The header of the names file is then consulted. */
909/* The header and the vnodes for the names and data files are */
910/* returned. */
911
912int
913bsd_open_page_cache_files(
914 unsigned int user,
915 struct global_profile **profile)
916{
917 char *cache_path = "/var/vm/app_profile/";
918 struct proc *p;
919 int error;
920 int resid;
921 off_t resid_off;
922 unsigned int lru;
923 vm_size_t size;
924
925 struct vnode *names_vp;
926 struct vnode *data_vp;
927 vm_offset_t names_buf;
928 vm_offset_t buf_ptr;
929
930 int profile_names_length;
931 int profile_data_length;
932 char *profile_data_string;
933 char *profile_names_string;
934 char *substring;
935
936 struct vattr vattr;
937
938 struct profile_names_header *profile_header;
939 kern_return_t ret;
940
941 struct nameidata nd_names;
942 struct nameidata nd_data;
943
944 int i;
945
946
947 p = current_proc();
948
949restart:
950 for(i = 0; i<global_user_profile_cache.max_ele; i++) {
951 if((global_user_profile_cache.profiles[i].user == user)
952 && (global_user_profile_cache.profiles[i].data_vp
953 != NULL)) {
954 *profile = &global_user_profile_cache.profiles[i];
955 /* already in cache, we're done */
956 if ((*profile)->busy) {
957 /*
958 * drop funnel and wait
959 */
960 (void)tsleep((void *)
961 *profile,
962 PRIBIO, "app_profile", 0);
963 goto restart;
964 }
965 (*profile)->busy = 1;
966 (*profile)->age = global_user_profile_cache.age;
967 global_user_profile_cache.age+=1;
968 return 0;
969 }
970 }
971
972 lru = global_user_profile_cache.age;
55e303ae 973 *profile = NULL;
9bccf70c 974 for(i = 0; i<global_user_profile_cache.max_ele; i++) {
55e303ae
A
975 /* Skip entry if it is in the process of being reused */
976 if(global_user_profile_cache.profiles[i].data_vp ==
977 (struct vnode *)0xFFFFFFFF)
978 continue;
979 /* Otherwise grab the first empty entry */
9bccf70c
A
980 if(global_user_profile_cache.profiles[i].data_vp == NULL) {
981 *profile = &global_user_profile_cache.profiles[i];
982 (*profile)->age = global_user_profile_cache.age;
9bccf70c
A
983 break;
984 }
55e303ae 985 /* Otherwise grab the oldest entry */
9bccf70c
A
986 if(global_user_profile_cache.profiles[i].age < lru) {
987 lru = global_user_profile_cache.profiles[i].age;
988 *profile = &global_user_profile_cache.profiles[i];
989 }
990 }
991
55e303ae
A
992 /* Did we set it? */
993 if (*profile == NULL) {
994 /*
995 * No entries are available; this can only happen if all
996 * of them are currently in the process of being reused;
997 * if this happens, we sleep on the address of the first
998 * element, and restart. This is less than ideal, but we
999 * know it will work because we know that there will be a
1000 * wakeup on any entry currently in the process of being
1001 * reused.
1002 *
1003 * XXX Reccomend a two handed clock and more than 3 total
1004 * XXX cache entries at some point in the future.
1005 */
1006 /*
1007 * drop funnel and wait
1008 */
1009 (void)tsleep((void *)
1010 &global_user_profile_cache.profiles[0],
1011 PRIBIO, "app_profile", 0);
1012 goto restart;
1013 }
1014
1015 /*
1016 * If it's currently busy, we've picked the one at the end of the
1017 * LRU list, but it's currently being actively used. We sleep on
1018 * its address and restart.
1019 */
9bccf70c
A
1020 if ((*profile)->busy) {
1021 /*
1022 * drop funnel and wait
1023 */
1024 (void)tsleep((void *)
55e303ae 1025 *profile,
9bccf70c
A
1026 PRIBIO, "app_profile", 0);
1027 goto restart;
1028 }
1029 (*profile)->busy = 1;
1030 (*profile)->user = user;
1031
55e303ae
A
1032 /*
1033 * put dummy value in for now to get competing request to wait
1034 * above until we are finished
1035 *
1036 * Save the data_vp before setting it, so we can set it before
1037 * we kmem_free() or vrele(). If we don't do this, then we
1038 * have a potential funnel race condition we have to deal with.
1039 */
1040 data_vp = (*profile)->data_vp;
1041 (*profile)->data_vp = (struct vnode *)0xFFFFFFFF;
1042
1043 /*
1044 * Age the cache here in all cases; this guarantees that we won't
1045 * be reusing only one entry over and over, once the system reaches
1046 * steady-state.
1047 */
1048 global_user_profile_cache.age+=1;
1049
1050 if(data_vp != NULL) {
9bccf70c
A
1051 kmem_free(kernel_map,
1052 (*profile)->buf_ptr, 4 * PAGE_SIZE);
1053 if ((*profile)->names_vp) {
1054 vrele((*profile)->names_vp);
1055 (*profile)->names_vp = NULL;
1056 }
55e303ae 1057 vrele(data_vp);
9bccf70c 1058 }
9bccf70c
A
1059
1060 /* Try to open the appropriate users profile files */
1061 /* If neither file is present, try to create them */
1062 /* If one file is present and the other not, fail. */
1063 /* If the files do exist, check them for the app_file */
1064 /* requested and read it in if present */
1065
9bccf70c
A
1066 ret = kmem_alloc(kernel_map,
1067 (vm_offset_t *)&profile_data_string, PATH_MAX);
1068
1069 if(ret) {
1070 (*profile)->data_vp = NULL;
1071 (*profile)->busy = 0;
1072 wakeup(*profile);
1073 return ENOMEM;
1074 }
1075
1076 /* Split the buffer in half since we know the size of */
1077 /* our file path and our allocation is adequate for */
1078 /* both file path names */
1079 profile_names_string = profile_data_string + (PATH_MAX/2);
1080
1081
1082 strcpy(profile_data_string, cache_path);
1083 strcpy(profile_names_string, cache_path);
1084 profile_names_length = profile_data_length
1085 = strlen(profile_data_string);
1086 substring = profile_data_string + profile_data_length;
1087 sprintf(substring, "%x_data", user);
1088 substring = profile_names_string + profile_names_length;
1089 sprintf(substring, "%x_names", user);
1090
1091 /* We now have the absolute file names */
1092
1093 ret = kmem_alloc(kernel_map,
1094 (vm_offset_t *)&names_buf, 4 * PAGE_SIZE);
1095 if(ret) {
1096 kmem_free(kernel_map,
1097 (vm_offset_t)profile_data_string, PATH_MAX);
1098 (*profile)->data_vp = NULL;
1099 (*profile)->busy = 0;
1100 wakeup(*profile);
1101 return ENOMEM;
1102 }
1103
1104 NDINIT(&nd_names, LOOKUP, FOLLOW | LOCKLEAF,
1105 UIO_SYSSPACE, profile_names_string, p);
1106 NDINIT(&nd_data, LOOKUP, FOLLOW | LOCKLEAF,
1107 UIO_SYSSPACE, profile_data_string, p);
1108 if (error = vn_open(&nd_data, FREAD | FWRITE, 0)) {
1109#ifdef notdef
1110 printf("bsd_open_page_cache_files: CacheData file not found %s\n",
1111 profile_data_string);
1112#endif
1113 kmem_free(kernel_map,
1114 (vm_offset_t)names_buf, 4 * PAGE_SIZE);
1115 kmem_free(kernel_map,
1116 (vm_offset_t)profile_data_string, PATH_MAX);
1117 (*profile)->data_vp = NULL;
1118 (*profile)->busy = 0;
1119 wakeup(*profile);
1120 return error;
1121 }
1122
1123 data_vp = nd_data.ni_vp;
1124 VOP_UNLOCK(data_vp, 0, p);
1125
1126 if (error = vn_open(&nd_names, FREAD | FWRITE, 0)) {
1127 printf("bsd_open_page_cache_files: NamesData file not found %s\n",
1128 profile_data_string);
1129 kmem_free(kernel_map,
1130 (vm_offset_t)names_buf, 4 * PAGE_SIZE);
1131 kmem_free(kernel_map,
1132 (vm_offset_t)profile_data_string, PATH_MAX);
1133 vrele(data_vp);
1134 (*profile)->data_vp = NULL;
1135 (*profile)->busy = 0;
1136 wakeup(*profile);
1137 return error;
1138 }
1139 names_vp = nd_names.ni_vp;
1140
1141 if(error = VOP_GETATTR(names_vp, &vattr, p->p_ucred, p)) {
1142 printf("bsd_open_page_cache_files: Can't stat name file %s\n", profile_names_string);
1143 kmem_free(kernel_map,
1144 (vm_offset_t)profile_data_string, PATH_MAX);
1145 kmem_free(kernel_map,
1146 (vm_offset_t)names_buf, 4 * PAGE_SIZE);
1147 vput(names_vp);
1148 vrele(data_vp);
1149 (*profile)->data_vp = NULL;
1150 (*profile)->busy = 0;
1151 wakeup(*profile);
1152 return error;
1153 }
1154
1155 size = vattr.va_size;
1156 if(size > 4 * PAGE_SIZE)
1157 size = 4 * PAGE_SIZE;
1158 buf_ptr = names_buf;
1159 resid_off = 0;
1160
1161 while(size) {
1162 error = vn_rdwr(UIO_READ, names_vp, (caddr_t)buf_ptr,
1163 size, resid_off,
1164 UIO_SYSSPACE, IO_NODELOCKED, p->p_ucred, &resid, p);
1165 if((error) || (size == resid)) {
1166 if(!error) {
1167 error = EINVAL;
1168 }
1169 kmem_free(kernel_map,
1170 (vm_offset_t)profile_data_string, PATH_MAX);
1171 kmem_free(kernel_map,
1172 (vm_offset_t)names_buf, 4 * PAGE_SIZE);
1173 vput(names_vp);
1174 vrele(data_vp);
1175 (*profile)->data_vp = NULL;
1176 (*profile)->busy = 0;
1177 wakeup(*profile);
1178 return error;
1179 }
1180 buf_ptr += size-resid;
1181 resid_off += size-resid;
1182 size = resid;
1183 }
1184
1185 VOP_UNLOCK(names_vp, 0, p);
1186 kmem_free(kernel_map, (vm_offset_t)profile_data_string, PATH_MAX);
1187 (*profile)->names_vp = names_vp;
1188 (*profile)->data_vp = data_vp;
1189 (*profile)->buf_ptr = names_buf;
1190 return 0;
1191
1192}
1193
1194void
1195bsd_close_page_cache_files(
1196 struct global_profile *profile)
1197{
1198 profile->busy = 0;
1199 wakeup(profile);
1200}
1201
1202int
1203bsd_read_page_cache_file(
1204 unsigned int user,
1205 int *fid,
1206 int *mod,
1207 char *app_name,
1208 struct vnode *app_vp,
1209 vm_offset_t *buffer,
1210 vm_offset_t *buf_size)
1211{
1212
1213 boolean_t funnel_state;
1214
1215 struct proc *p;
1216 int error;
1217 int resid;
1218 vm_size_t size;
1219
1220 off_t profile;
1221 unsigned int profile_size;
1222
1223 vm_offset_t names_buf;
1224 struct vattr vattr;
1225
1226 kern_return_t ret;
1227
1228 struct vnode *names_vp;
1229 struct vnode *data_vp;
1230 struct vnode *vp1;
1231 struct vnode *vp2;
1232
1233 struct global_profile *uid_files;
1234
1235 funnel_state = thread_funnel_set(kernel_flock, TRUE);
1236
1237 /* Try to open the appropriate users profile files */
1238 /* If neither file is present, try to create them */
1239 /* If one file is present and the other not, fail. */
1240 /* If the files do exist, check them for the app_file */
1241 /* requested and read it in if present */
1242
1243
1244 error = bsd_open_page_cache_files(user, &uid_files);
1245 if(error) {
1246 thread_funnel_set(kernel_flock, funnel_state);
1247 return EINVAL;
1248 }
1249
1250 p = current_proc();
1251
1252 names_vp = uid_files->names_vp;
1253 data_vp = uid_files->data_vp;
1254 names_buf = uid_files->buf_ptr;
1255
1256
1257 /*
1258 * Get locks on both files, get the vnode with the lowest address first
1259 */
1260
1261 if((unsigned int)names_vp < (unsigned int)data_vp) {
1262 vp1 = names_vp;
1263 vp2 = data_vp;
1264 } else {
1265 vp1 = data_vp;
1266 vp2 = names_vp;
1267 }
1268 error = vn_lock(vp1, LK_EXCLUSIVE | LK_RETRY, p);
1269 if(error) {
1270 printf("bsd_read_page_cache_file: Can't lock profile names %x\n", user);
1271 bsd_close_page_cache_files(uid_files);
1272 thread_funnel_set(kernel_flock, funnel_state);
1273 return error;
1274 }
1275 error = vn_lock(vp2, LK_EXCLUSIVE | LK_RETRY, p);
1276 if(error) {
1277 printf("bsd_read_page_cache_file: Can't lock profile data %x\n", user);
1278 VOP_UNLOCK(vp1, 0, p);
1279 bsd_close_page_cache_files(uid_files);
1280 thread_funnel_set(kernel_flock, funnel_state);
1281 return error;
1282 }
1283
1284 if(error = VOP_GETATTR(app_vp, &vattr, p->p_ucred, p)) {
1285 VOP_UNLOCK(names_vp, 0, p);
1286 VOP_UNLOCK(data_vp, 0, p);
1287 printf("bsd_read_cache_file: Can't stat app file %s\n", app_name);
1288 bsd_close_page_cache_files(uid_files);
1289 thread_funnel_set(kernel_flock, funnel_state);
1290 return error;
1291 }
1292
1293 *fid = vattr.va_fileid;
1294 *mod = vattr.va_mtime.tv_sec;
1295
1296
1297 if (bsd_search_page_cache_data_base(names_vp, names_buf, app_name,
1298 (unsigned int) vattr.va_mtime.tv_sec,
1299 vattr.va_fileid, &profile, &profile_size) == 0) {
1300 /* profile is an offset in the profile data base */
1301 /* It is zero if no profile data was found */
1302
1303 if(profile_size == 0) {
1304 *buffer = NULL;
1305 *buf_size = 0;
1306 VOP_UNLOCK(names_vp, 0, p);
1307 VOP_UNLOCK(data_vp, 0, p);
1308 bsd_close_page_cache_files(uid_files);
1309 thread_funnel_set(kernel_flock, funnel_state);
1310 return 0;
1311 }
1312 ret = (vm_offset_t)(kmem_alloc(kernel_map, buffer, profile_size));
1313 if(ret) {
1314 VOP_UNLOCK(names_vp, 0, p);
1315 VOP_UNLOCK(data_vp, 0, p);
1316 bsd_close_page_cache_files(uid_files);
1317 thread_funnel_set(kernel_flock, funnel_state);
1318 return ENOMEM;
1319 }
1320 *buf_size = profile_size;
1321 while(profile_size) {
1322 error = vn_rdwr(UIO_READ, data_vp,
1323 (caddr_t) *buffer, profile_size,
1324 profile, UIO_SYSSPACE, IO_NODELOCKED,
1325 p->p_ucred, &resid, p);
d7e50217 1326 if((error) || (profile_size == resid)) {
9bccf70c
A
1327 VOP_UNLOCK(names_vp, 0, p);
1328 VOP_UNLOCK(data_vp, 0, p);
1329 bsd_close_page_cache_files(uid_files);
1330 kmem_free(kernel_map, (vm_offset_t)*buffer, profile_size);
1331 thread_funnel_set(kernel_flock, funnel_state);
1332 return EINVAL;
1333 }
1334 profile += profile_size - resid;
1335 profile_size = resid;
1336 }
1337 VOP_UNLOCK(names_vp, 0, p);
1338 VOP_UNLOCK(data_vp, 0, p);
1339 bsd_close_page_cache_files(uid_files);
1340 thread_funnel_set(kernel_flock, funnel_state);
1341 return 0;
1342 } else {
1343 VOP_UNLOCK(names_vp, 0, p);
1344 VOP_UNLOCK(data_vp, 0, p);
1345 bsd_close_page_cache_files(uid_files);
1346 thread_funnel_set(kernel_flock, funnel_state);
1347 return EINVAL;
1348 }
1349
1350}
1351
1352int
1353bsd_search_page_cache_data_base(
1354 struct vnode *vp,
1355 struct profile_names_header *database,
1356 char *app_name,
1357 unsigned int mod_date,
1358 unsigned int inode,
1359 off_t *profile,
1360 unsigned int *profile_size)
1361{
1362
1363 struct proc *p;
1364
1365 unsigned int i;
1366 struct profile_element *element;
1367 unsigned int ele_total;
1368 unsigned int extended_list = 0;
1369 off_t file_off = 0;
1370 unsigned int size;
1371 off_t resid_off;
1372 int resid;
1373 vm_offset_t local_buf = NULL;
1374
1375 int error;
1376 kern_return_t ret;
1377
1378 p = current_proc();
1379
1380 if(((vm_offset_t)database->element_array) !=
1381 sizeof(struct profile_names_header)) {
1382 return EINVAL;
1383 }
1384 element = (struct profile_element *)(
1385 (vm_offset_t)database->element_array +
1386 (vm_offset_t)database);
1387
1388 ele_total = database->number_of_profiles;
1389
1390 *profile = 0;
1391 *profile_size = 0;
1392 while(ele_total) {
1393 /* note: code assumes header + n*ele comes out on a page boundary */
1394 if(((local_buf == 0) && (sizeof(struct profile_names_header) +
1395 (ele_total * sizeof(struct profile_element)))
1396 > (PAGE_SIZE * 4)) ||
1397 ((local_buf != 0) &&
1398 (ele_total * sizeof(struct profile_element))
1399 > (PAGE_SIZE * 4))) {
1400 extended_list = ele_total;
1401 if(element == (struct profile_element *)
1402 ((vm_offset_t)database->element_array +
1403 (vm_offset_t)database)) {
1404 ele_total = ((PAGE_SIZE * 4)/sizeof(struct profile_element)) - 1;
1405 } else {
1406 ele_total = (PAGE_SIZE * 4)/sizeof(struct profile_element);
1407 }
1408 extended_list -= ele_total;
1409 }
1410 for (i=0; i<ele_total; i++) {
1411 if((mod_date == element[i].mod_date)
1412 && (inode == element[i].inode)) {
1413 if(strncmp(element[i].name, app_name, 12) == 0) {
1414 *profile = element[i].addr;
1415 *profile_size = element[i].size;
1416 if(local_buf != NULL) {
1417 kmem_free(kernel_map,
1418 (vm_offset_t)local_buf, 4 * PAGE_SIZE);
1419 }
1420 return 0;
1421 }
1422 }
1423 }
1424 if(extended_list == 0)
1425 break;
1426 if(local_buf == NULL) {
1427 ret = kmem_alloc(kernel_map,
1428 (vm_offset_t *)&local_buf, 4 * PAGE_SIZE);
1429 if(ret != KERN_SUCCESS) {
1430 return ENOMEM;
1431 }
1432 }
1433 element = (struct profile_element *)local_buf;
1434 ele_total = extended_list;
1435 extended_list = 0;
1436 file_off += 4 * PAGE_SIZE;
1437 if((ele_total * sizeof(struct profile_element)) >
1438 (PAGE_SIZE * 4)) {
1439 size = PAGE_SIZE * 4;
1440 } else {
1441 size = ele_total * sizeof(struct profile_element);
1442 }
1443 resid_off = 0;
1444 while(size) {
1445 error = vn_rdwr(UIO_READ, vp,
55e303ae 1446 CAST_DOWN(caddr_t, (local_buf + resid_off)),
9bccf70c
A
1447 size, file_off + resid_off, UIO_SYSSPACE,
1448 IO_NODELOCKED, p->p_ucred, &resid, p);
d7e50217 1449 if((error) || (size == resid)) {
9bccf70c
A
1450 if(local_buf != NULL) {
1451 kmem_free(kernel_map,
1452 (vm_offset_t)local_buf,
1453 4 * PAGE_SIZE);
1454 }
1455 return EINVAL;
1456 }
1457 resid_off += size-resid;
1458 size = resid;
1459 }
1460 }
1461 if(local_buf != NULL) {
1462 kmem_free(kernel_map,
1463 (vm_offset_t)local_buf, 4 * PAGE_SIZE);
1464 }
1465 return 0;
1466}
1467
1468int
1469bsd_write_page_cache_file(
1470 unsigned int user,
1471 char *file_name,
1472 caddr_t buffer,
1473 vm_size_t size,
1474 int mod,
1475 int fid)
1476{
1477 struct proc *p;
1478 struct nameidata nd;
1479 struct vnode *vp = 0;
1480 int resid;
1481 off_t resid_off;
1482 int error;
1483 boolean_t funnel_state;
1484 struct vattr vattr;
1485 struct vattr data_vattr;
1486
1487 off_t profile;
1488 unsigned int profile_size;
1489
1490 vm_offset_t names_buf;
1491 struct vnode *names_vp;
1492 struct vnode *data_vp;
1493 struct vnode *vp1;
1494 struct vnode *vp2;
1495
1496 struct profile_names_header *profile_header;
1497 off_t name_offset;
1498
1499 struct global_profile *uid_files;
1500
1501
1502 funnel_state = thread_funnel_set(kernel_flock, TRUE);
1503
1504
1505
1506 error = bsd_open_page_cache_files(user, &uid_files);
1507 if(error) {
1508 thread_funnel_set(kernel_flock, funnel_state);
1509 return EINVAL;
1510 }
1511
1512 p = current_proc();
1513
1514 names_vp = uid_files->names_vp;
1515 data_vp = uid_files->data_vp;
1516 names_buf = uid_files->buf_ptr;
1517
1518 /*
1519 * Get locks on both files, get the vnode with the lowest address first
1520 */
1521
1522 if((unsigned int)names_vp < (unsigned int)data_vp) {
1523 vp1 = names_vp;
1524 vp2 = data_vp;
1525 } else {
1526 vp1 = data_vp;
1527 vp2 = names_vp;
1528 }
1529
1530 error = vn_lock(vp1, LK_EXCLUSIVE | LK_RETRY, p);
1531 if(error) {
1532 printf("bsd_write_page_cache_file: Can't lock profile names %x\n", user);
1533 bsd_close_page_cache_files(uid_files);
1534 thread_funnel_set(kernel_flock, funnel_state);
1535 return error;
1536 }
1537 error = vn_lock(vp2, LK_EXCLUSIVE | LK_RETRY, p);
1538 if(error) {
1539 printf("bsd_write_page_cache_file: Can't lock profile data %x\n", user);
1540 VOP_UNLOCK(vp1, 0, p);
1541 bsd_close_page_cache_files(uid_files);
1542 thread_funnel_set(kernel_flock, funnel_state);
1543 return error;
1544 }
1545
1546 /* Stat data file for size */
1547
1548 if(error = VOP_GETATTR(data_vp, &data_vattr, p->p_ucred, p)) {
1549 VOP_UNLOCK(names_vp, 0, p);
1550 VOP_UNLOCK(data_vp, 0, p);
1551 printf("bsd_write_page_cache_file: Can't stat profile data %s\n", file_name);
1552 bsd_close_page_cache_files(uid_files);
1553 thread_funnel_set(kernel_flock, funnel_state);
1554 return error;
1555 }
1556
1557 if (bsd_search_page_cache_data_base(names_vp,
1558 (struct profile_names_header *)names_buf,
1559 file_name, (unsigned int) mod,
1560 fid, &profile, &profile_size) == 0) {
1561 /* profile is an offset in the profile data base */
1562 /* It is zero if no profile data was found */
1563
1564 if(profile_size == 0) {
1565 unsigned int header_size;
1566 vm_offset_t buf_ptr;
1567
1568 /* Our Write case */
1569
1570 /* read header for last entry */
1571 profile_header =
1572 (struct profile_names_header *)names_buf;
1573 name_offset = sizeof(struct profile_names_header) +
1574 (sizeof(struct profile_element)
1575 * profile_header->number_of_profiles);
1576 profile_header->number_of_profiles += 1;
1577
1578 if(name_offset < PAGE_SIZE * 4) {
1579 struct profile_element *name;
1580 /* write new entry */
1581 name = (struct profile_element *)
1582 (names_buf + (vm_offset_t)name_offset);
1583 name->addr = data_vattr.va_size;
1584 name->size = size;
1585 name->mod_date = mod;
1586 name->inode = fid;
1587 strncpy (name->name, file_name, 12);
1588 } else {
1589 unsigned int ele_size;
1590 struct profile_element name;
1591 /* write new entry */
1592 name.addr = data_vattr.va_size;
1593 name.size = size;
1594 name.mod_date = mod;
1595 name.inode = fid;
1596 strncpy (name.name, file_name, 12);
1597 /* write element out separately */
1598 ele_size = sizeof(struct profile_element);
1599 buf_ptr = (vm_offset_t)&name;
1600 resid_off = name_offset;
1601
1602 while(ele_size) {
1603 error = vn_rdwr(UIO_WRITE, names_vp,
1604 (caddr_t)buf_ptr,
1605 ele_size, resid_off,
1606 UIO_SYSSPACE, IO_NODELOCKED,
1607 p->p_ucred, &resid, p);
1608 if(error) {
1609 printf("bsd_write_page_cache_file: Can't write name_element %x\n", user);
1610 VOP_UNLOCK(names_vp, 0, p);
1611 VOP_UNLOCK(data_vp, 0, p);
1612 bsd_close_page_cache_files(
1613 uid_files);
1614 thread_funnel_set(
1615 kernel_flock,
1616 funnel_state);
1617 return error;
1618 }
1619 buf_ptr += (vm_offset_t)
1620 ele_size-resid;
1621 resid_off += ele_size-resid;
1622 ele_size = resid;
1623 }
1624 }
1625
1626 if(name_offset < PAGE_SIZE * 4) {
1627 header_size = name_offset +
1628 sizeof(struct profile_element);
1629
1630 } else {
1631 header_size =
1632 sizeof(struct profile_names_header);
1633 }
1634 buf_ptr = (vm_offset_t)profile_header;
1635 resid_off = 0;
1636
1637 /* write names file header */
1638 while(header_size) {
1639 error = vn_rdwr(UIO_WRITE, names_vp,
1640 (caddr_t)buf_ptr,
1641 header_size, resid_off,
1642 UIO_SYSSPACE, IO_NODELOCKED,
1643 p->p_ucred, &resid, p);
1644 if(error) {
1645 VOP_UNLOCK(names_vp, 0, p);
1646 VOP_UNLOCK(data_vp, 0, p);
1647 printf("bsd_write_page_cache_file: Can't write header %x\n", user);
1648 bsd_close_page_cache_files(
1649 uid_files);
1650 thread_funnel_set(
1651 kernel_flock, funnel_state);
1652 return error;
1653 }
1654 buf_ptr += (vm_offset_t)header_size-resid;
1655 resid_off += header_size-resid;
1656 header_size = resid;
1657 }
1658 /* write profile to data file */
1659 resid_off = data_vattr.va_size;
1660 while(size) {
1661 error = vn_rdwr(UIO_WRITE, data_vp,
1662 (caddr_t)buffer, size, resid_off,
1663 UIO_SYSSPACE, IO_NODELOCKED,
1664 p->p_ucred, &resid, p);
1665 if(error) {
1666 VOP_UNLOCK(names_vp, 0, p);
1667 VOP_UNLOCK(data_vp, 0, p);
1668 printf("bsd_write_page_cache_file: Can't write header %x\n", user);
1669 bsd_close_page_cache_files(
1670 uid_files);
1671 thread_funnel_set(
1672 kernel_flock, funnel_state);
1673 return error;
1674 }
1675 buffer += size-resid;
1676 resid_off += size-resid;
1677 size = resid;
1678 }
1679 VOP_UNLOCK(names_vp, 0, p);
1680 VOP_UNLOCK(data_vp, 0, p);
1681 bsd_close_page_cache_files(uid_files);
1682 thread_funnel_set(kernel_flock, funnel_state);
1683 return 0;
1684 }
1685 /* Someone else wrote a twin profile before us */
1686 VOP_UNLOCK(names_vp, 0, p);
1687 VOP_UNLOCK(data_vp, 0, p);
1688 bsd_close_page_cache_files(uid_files);
1689 thread_funnel_set(kernel_flock, funnel_state);
1690 return 0;
1691 } else {
1692 VOP_UNLOCK(names_vp, 0, p);
1693 VOP_UNLOCK(data_vp, 0, p);
1694 bsd_close_page_cache_files(uid_files);
1695 thread_funnel_set(kernel_flock, funnel_state);
1696 return EINVAL;
1697 }
1698
1699}
1700
1701int
1702prepare_profile_database(int user)
1703{
1704 char *cache_path = "/var/vm/app_profile/";
1705 struct proc *p;
1706 int error;
1707 int resid;
1708 off_t resid_off;
1709 unsigned int lru;
1710 vm_size_t size;
1711
1712 struct vnode *names_vp;
1713 struct vnode *data_vp;
1714 vm_offset_t names_buf;
1715 vm_offset_t buf_ptr;
1716
1717 int profile_names_length;
1718 int profile_data_length;
1719 char *profile_data_string;
1720 char *profile_names_string;
1721 char *substring;
1722
1723 struct vattr vattr;
1724
1725 struct profile_names_header *profile_header;
1726 kern_return_t ret;
1727
1728 struct nameidata nd_names;
1729 struct nameidata nd_data;
1730
1731 int i;
1732
1733 p = current_proc();
1734
1735 ret = kmem_alloc(kernel_map,
1736 (vm_offset_t *)&profile_data_string, PATH_MAX);
1737
1738 if(ret) {
1739 return ENOMEM;
1740 }
1741
1742 /* Split the buffer in half since we know the size of */
1743 /* our file path and our allocation is adequate for */
1744 /* both file path names */
1745 profile_names_string = profile_data_string + (PATH_MAX/2);
1746
1747
1748 strcpy(profile_data_string, cache_path);
1749 strcpy(profile_names_string, cache_path);
1750 profile_names_length = profile_data_length
1751 = strlen(profile_data_string);
1752 substring = profile_data_string + profile_data_length;
1753 sprintf(substring, "%x_data", user);
1754 substring = profile_names_string + profile_names_length;
1755 sprintf(substring, "%x_names", user);
1756
1757 /* We now have the absolute file names */
1758
1759 ret = kmem_alloc(kernel_map,
1760 (vm_offset_t *)&names_buf, 4 * PAGE_SIZE);
1761 if(ret) {
1762 kmem_free(kernel_map,
1763 (vm_offset_t)profile_data_string, PATH_MAX);
1764 return ENOMEM;
1765 }
1766
1767 NDINIT(&nd_names, LOOKUP, FOLLOW,
1768 UIO_SYSSPACE, profile_names_string, p);
1769 NDINIT(&nd_data, LOOKUP, FOLLOW,
1770 UIO_SYSSPACE, profile_data_string, p);
1771
1772 if (error = vn_open(&nd_data,
1773 O_CREAT | O_EXCL | FWRITE, S_IRUSR|S_IWUSR)) {
1774 kmem_free(kernel_map,
1775 (vm_offset_t)names_buf, 4 * PAGE_SIZE);
1776 kmem_free(kernel_map,
1777 (vm_offset_t)profile_data_string, PATH_MAX);
1778 return 0;
1779 }
1780
1781 data_vp = nd_data.ni_vp;
1782 VOP_UNLOCK(data_vp, 0, p);
1783
1784 if (error = vn_open(&nd_names,
1785 O_CREAT | O_EXCL | FWRITE, S_IRUSR|S_IWUSR)) {
1786 printf("prepare_profile_database: Can't create CacheNames %s\n",
1787 profile_data_string);
1788 kmem_free(kernel_map,
1789 (vm_offset_t)names_buf, 4 * PAGE_SIZE);
1790 kmem_free(kernel_map,
1791 (vm_offset_t)profile_data_string, PATH_MAX);
1792 vrele(data_vp);
1793 return error;
1794 }
1795
1796 names_vp = nd_names.ni_vp;
1797
1798
1799 /* Write Header for new names file */
1800
1801 profile_header = (struct profile_names_header *)names_buf;
1802
1803 profile_header->number_of_profiles = 0;
1804 profile_header->user_id = user;
1805 profile_header->version = 1;
1806 profile_header->element_array =
1807 sizeof(struct profile_names_header);
1808 profile_header->spare1 = 0;
1809 profile_header->spare2 = 0;
1810 profile_header->spare3 = 0;
1811
1812 size = sizeof(struct profile_names_header);
1813 buf_ptr = (vm_offset_t)profile_header;
1814 resid_off = 0;
1815
1816 while(size) {
1817 error = vn_rdwr(UIO_WRITE, names_vp,
1818 (caddr_t)buf_ptr, size, resid_off,
1819 UIO_SYSSPACE, IO_NODELOCKED,
1820 p->p_ucred, &resid, p);
1821 if(error) {
1822 printf("prepare_profile_database: Can't write header %s\n", profile_names_string);
1823 kmem_free(kernel_map,
1824 (vm_offset_t)names_buf, 4 * PAGE_SIZE);
1825 kmem_free(kernel_map,
1826 (vm_offset_t)profile_data_string,
1827 PATH_MAX);
1828 vput(names_vp);
1829 vrele(data_vp);
1830 return error;
1831 }
1832 buf_ptr += size-resid;
1833 resid_off += size-resid;
1834 size = resid;
1835 }
1836
1837 VATTR_NULL(&vattr);
1838 vattr.va_uid = user;
1839 error = VOP_SETATTR(names_vp, &vattr, p->p_cred->pc_ucred, p);
1840 if(error) {
1841 printf("prepare_profile_database: "
1842 "Can't set user %s\n", profile_names_string);
1843 }
1844 vput(names_vp);
1845
1846 error = vn_lock(data_vp, LK_EXCLUSIVE | LK_RETRY, p);
1847 if(error) {
1848 vrele(data_vp);
1849 printf("prepare_profile_database: cannot lock data file %s\n",
1850 profile_data_string);
1851 kmem_free(kernel_map,
1852 (vm_offset_t)profile_data_string, PATH_MAX);
1853 kmem_free(kernel_map,
1854 (vm_offset_t)names_buf, 4 * PAGE_SIZE);
1855 }
1856 VATTR_NULL(&vattr);
1857 vattr.va_uid = user;
1858 error = VOP_SETATTR(data_vp, &vattr, p->p_cred->pc_ucred, p);
1859 if(error) {
1860 printf("prepare_profile_database: "
1861 "Can't set user %s\n", profile_data_string);
1862 }
1863
1864 vput(data_vp);
1865 kmem_free(kernel_map,
1866 (vm_offset_t)profile_data_string, PATH_MAX);
1867 kmem_free(kernel_map,
1868 (vm_offset_t)names_buf, 4 * PAGE_SIZE);
1869 return 0;
1870
1871}