2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
23 * Mach Operating System
24 * Copyright (c) 1987 Carnegie-Mellon University
25 * All rights reserved. The CMU software License Agreement specifies
26 * the terms and conditions for use and redistribution.
31 #include <meta_features.h>
33 #include <kern/task.h>
34 #include <kern/thread.h>
35 #include <kern/debug.h>
36 #include <kern/lock.h>
37 #include <mach/time_value.h>
38 #include <mach/vm_param.h>
39 #include <mach/vm_prot.h>
40 #include <mach/port.h>
42 #include <sys/param.h>
43 #include <sys/systm.h>
45 #include <sys/namei.h>
49 #include <sys/vnode.h>
51 #include <sys/mount.h>
52 #include <sys/trace.h>
53 #include <sys/kernel.h>
56 #include <kern/kalloc.h>
57 #include <kern/parallel.h>
58 #include <vm/vm_map.h>
59 #include <vm/vm_kern.h>
61 #include <machine/spl.h>
62 #include <mach/shared_memory_server.h>
64 useracc(addr
, len
, prot
)
69 return (vm_map_check_protection(
71 trunc_page(addr
), round_page(addr
+len
),
72 prot
== B_READ
? VM_PROT_READ
: VM_PROT_WRITE
));
79 vm_map_wire(current_map(), trunc_page(addr
),
81 VM_PROT_READ
| VM_PROT_WRITE
,FALSE
);
84 vsunlock(addr
, len
, dirtied
)
93 vm_offset_t vaddr
, paddr
;
97 pmap
= get_task_pmap(current_task());
98 for (vaddr
= trunc_page(addr
); vaddr
< round_page(addr
+len
);
100 paddr
= pmap_extract(pmap
, vaddr
);
101 pg
= PHYS_TO_VM_PAGE(paddr
);
102 vm_page_set_modified(pg
);
109 vm_map_unwire(current_map(), trunc_page(addr
),
110 round_page(addr
+len
), FALSE
);
113 #if defined(sun) || BALANCE || defined(m88k)
114 #else /*defined(sun) || BALANCE || defined(m88k)*/
121 character
= (char)byte
;
122 return (copyout((void *)&(character
), addr
, sizeof(char)) == 0 ? 0 : -1);
131 character
= (char)byte
;
132 return (copyout((void *) &(character
), addr
, sizeof(char)) == 0 ? 0 : -1);
140 if (copyin(addr
, (void *) &byte
, sizeof(char)))
150 if (copyin(addr
, (void *) &(byte
), sizeof(char)))
159 return (copyout((void *) &word
, addr
, sizeof(int)) == 0 ? 0 : -1);
167 if (copyin(addr
, (void *) &word
, sizeof(int)))
172 /* suiword and fuiword are the same as suword and fuword, respectively */
178 return (copyout((void *) &word
, addr
, sizeof(int)) == 0 ? 0 : -1);
186 if (copyin(addr
, (void *) &word
, sizeof(int)))
190 #endif /* defined(sun) || BALANCE || defined(m88k) || defined(i386) */
205 kern_return_t result
;
207 if (parent
->task
== kernel_task
)
208 result
= task_create_local(TASK_NULL
, FALSE
, FALSE
, &task
);
210 result
= task_create_local(parent
->task
, TRUE
, FALSE
, &task
);
211 if (result
!= KERN_SUCCESS
)
212 printf("fork/procdup: task_create failed. Code: 0x%x\n", result
);
214 /* task->proc = child; */
215 set_bsdtask_info(task
, child
);
216 result
= thread_create(task
, &thread
);
217 if (result
!= KERN_SUCCESS
)
218 printf("fork/procdup: thread_create failed. Code: 0x%x\n", result
);
221 thread_deallocate(thread
); // extra ref
224 * Don't need to lock thread here because it can't
225 * possibly execute and no one else knows about it.
227 /* compute_priority(thread, FALSE); */
239 extern task_t
port_name_to_task(mach_port_t t
);
242 boolean_t funnel_state
;
244 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
245 t1
= port_name_to_task(t
);
247 if (t1
== TASK_NULL
) {
250 p
= get_bsdtask_info(t1
);
259 (void) copyout((char *) &pid
, (char *) x
, sizeof(*x
));
261 thread_funnel_set(kernel_flock
, funnel_state
);
266 * Routine: task_for_pid
268 * Get the task port for another "process", named by its
269 * process ID on the same host as "target_task".
271 * Only permitted to privileged processes, or processes
272 * with the same user ID.
275 task_for_pid(target_tport
, pid
, t
)
276 mach_port_t target_tport
;
284 extern task_t
port_name_to_task(mach_port_t tp
);
287 boolean_t funnel_state
;
289 t1
= port_name_to_task(target_tport
);
290 if (t1
== TASK_NULL
) {
291 (void ) copyout((char *)&t1
, (char *)t
, sizeof(mach_port_t
));
292 error
= KERN_FAILURE
;
296 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
299 p1
= get_bsdtask_info(t1
);
301 ((p
= pfind(pid
)) != (struct proc
*) 0)
302 && (p1
!= (struct proc
*) 0)
303 && ((p
->p_ucred
->cr_uid
== p1
->p_ucred
->cr_uid
)
304 || !(suser(p1
->p_ucred
, &p1
->p_acflag
)))
305 && (p
->p_stat
!= SZOMB
)
307 if (p
->task
!= TASK_NULL
) {
308 if (!task_reference_try(p
->task
)) {
309 mutex_pause(); /* temp loss of funnel */
312 sright
= convert_task_to_port(p
->task
);
313 tret
= ipc_port_copyout_send(sright
, get_task_ipcspace(current_task()));
315 tret
= MACH_PORT_NULL
;
316 (void ) copyout((char *)&tret
, (char *) t
, sizeof(mach_port_t
));
318 error
= KERN_SUCCESS
;
322 tret
= MACH_PORT_NULL
;
323 (void) copyout((char *) &tret
, (char *) t
, sizeof(mach_port_t
));
324 error
= KERN_FAILURE
;
326 thread_funnel_set(kernel_flock
, funnel_state
);
331 struct load_shared_file_args
{
337 sf_mapping_t
*mappings
;
345 struct load_shared_file_args
*uap
,
348 caddr_t mapped_file_addr
=uap
->mfa
;
349 u_long mapped_file_size
=uap
->mfs
;
350 caddr_t
*base_address
=uap
->ba
;
351 int map_cnt
=uap
->map_cnt
;
352 sf_mapping_t
*mappings
=uap
->mappings
;
353 char *filename
=uap
->filename
;
354 int *flags
=uap
->flags
;
355 struct vnode
*vp
= 0;
356 struct nameidata nd
, *ndp
;
364 sf_mapping_t
*map_list
;
372 shared_region_mapping_t shared_region
;
373 struct shared_region_task_mappings task_mapping_info
;
374 shared_region_mapping_t next
;
380 /* Retrieve the base address */
381 if (error
= copyin(base_address
, &local_base
, sizeof (caddr_t
))) {
384 if (error
= copyin(flags
, &local_flags
, sizeof (int))) {
387 caller_flags
= local_flags
;
388 kret
= kmem_alloc(kernel_map
, (vm_offset_t
*)&filename_str
,
389 (vm_size_t
)(MAXPATHLEN
));
390 if (kret
!= KERN_SUCCESS
) {
394 kret
= kmem_alloc(kernel_map
, (vm_offset_t
*)&map_list
,
395 (vm_size_t
)(map_cnt
*sizeof(sf_mapping_t
)));
396 if (kret
!= KERN_SUCCESS
) {
397 kmem_free(kernel_map
, (vm_offset_t
)filename_str
,
398 (vm_size_t
)(MAXPATHLEN
));
404 copyin(mappings
, map_list
, (map_cnt
*sizeof(sf_mapping_t
)))) {
405 goto lsf_bailout_free
;
408 if (error
= copyinstr(filename
,
409 filename_str
, MAXPATHLEN
, (size_t *)&dummy
)) {
410 goto lsf_bailout_free
;
414 * Get a vnode for the target file
416 NDINIT(ndp
, LOOKUP
, FOLLOW
| LOCKLEAF
, UIO_SYSSPACE
,
419 if ((error
= namei(ndp
))) {
420 goto lsf_bailout_free
;
425 if (vp
->v_type
!= VREG
) {
427 goto lsf_bailout_free_vput
;
430 UBCINFOCHECK("load_shared_file", vp
);
432 if (error
= VOP_GETATTR(vp
, &vattr
, p
->p_ucred
, p
)) {
433 goto lsf_bailout_free_vput
;
437 file_object
= ubc_getobject(vp
, (UBC_NOREACTIVATE
|UBC_HOLDOBJECT
));
438 if (file_object
== (void *)NULL
) {
440 goto lsf_bailout_free_vput
;
444 if(vattr
.va_size
!= mapped_file_size
) {
446 goto lsf_bailout_free_vput
;
450 vm_get_shared_region(current_task(), &shared_region
);
451 task_mapping_info
.self
= (vm_offset_t
)shared_region
;
453 shared_region_mapping_info(shared_region
,
454 &(task_mapping_info
.text_region
),
455 &(task_mapping_info
.text_size
),
456 &(task_mapping_info
.data_region
),
457 &(task_mapping_info
.data_size
),
458 &(task_mapping_info
.region_mappings
),
459 &(task_mapping_info
.client_base
),
460 &(task_mapping_info
.alternate_base
),
461 &(task_mapping_info
.alternate_next
),
462 &(task_mapping_info
.flags
), &next
);
464 /* This is a work-around to allow executables which have been */
465 /* built without knowledge of the proper shared segment to */
466 /* load. This code has been architected as a shared region */
467 /* handler, the knowledge of where the regions are loaded is */
468 /* problematic for the extension of shared regions as it will */
469 /* not be easy to know what region an item should go into. */
470 /* The code below however will get around a short term problem */
471 /* with executables which believe they are loading at zero. */
474 if (((unsigned int)local_base
&
475 (~(task_mapping_info
.text_size
- 1))) !=
476 task_mapping_info
.client_base
) {
477 if(local_flags
& ALTERNATE_LOAD_SITE
) {
478 local_base
= (caddr_t
)(
479 (unsigned int)local_base
&
480 (task_mapping_info
.text_size
- 1));
481 local_base
= (caddr_t
)((unsigned int)local_base
482 | task_mapping_info
.client_base
);
485 goto lsf_bailout_free_vput
;
490 /* load alternate regions if the caller has requested. */
491 /* Note: the new regions are "clean slates" */
493 if (local_flags
& NEW_LOCAL_SHARED_REGIONS
) {
495 shared_region_mapping_t new_shared_region
;
496 shared_region_mapping_t old_shared_region
;
497 struct shared_region_task_mappings old_info
;
498 struct shared_region_task_mappings new_info
;
500 if(shared_file_create_system_region(&new_shared_region
)) {
502 goto lsf_bailout_free_vput
;
504 vm_get_shared_region(current_task(), &old_shared_region
);
506 old_info
.self
= (vm_offset_t
)old_shared_region
;
507 shared_region_mapping_info(old_shared_region
,
508 &(old_info
.text_region
),
509 &(old_info
.text_size
),
510 &(old_info
.data_region
),
511 &(old_info
.data_size
),
512 &(old_info
.region_mappings
),
513 &(old_info
.client_base
),
514 &(old_info
.alternate_base
),
515 &(old_info
.alternate_next
),
516 &(old_info
.flags
), &next
);
517 new_info
.self
= (vm_offset_t
)new_shared_region
;
518 shared_region_mapping_info(new_shared_region
,
519 &(new_info
.text_region
),
520 &(new_info
.text_size
),
521 &(new_info
.data_region
),
522 &(new_info
.data_size
),
523 &(new_info
.region_mappings
),
524 &(new_info
.client_base
),
525 &(new_info
.alternate_base
),
526 &(new_info
.alternate_next
),
527 &(new_info
.flags
), &next
);
528 if (vm_map_region_replace(current_map(), old_info
.text_region
,
529 new_info
.text_region
, old_info
.client_base
,
530 old_info
.client_base
+old_info
.text_size
)) {
531 panic("load_shared_file: shared region mis-alignment");
532 shared_region_mapping_dealloc(new_shared_region
);
534 goto lsf_bailout_free_vput
;
536 if(vm_map_region_replace(current_map(), old_info
.data_region
,
537 new_info
.data_region
,
538 old_info
.client_base
+ old_info
.text_size
,
540 + old_info
.text_size
+ old_info
.data_size
)) {
541 panic("load_shared_file: shared region mis-alignment 1");
542 shared_region_mapping_dealloc(new_shared_region
);
544 goto lsf_bailout_free_vput
;
546 vm_set_shared_region(current_task(), new_shared_region
);
547 task_mapping_info
= new_info
;
548 shared_region_mapping_dealloc(old_shared_region
);
551 if((kr
= copyin_shared_file((vm_offset_t
)mapped_file_addr
,
553 (vm_offset_t
*)&local_base
,
554 map_cnt
, map_list
, file_object
,
555 &task_mapping_info
, &local_flags
))) {
560 case KERN_INVALID_ARGUMENT
:
563 case KERN_INVALID_ADDRESS
:
566 case KERN_PROTECTION_FAILURE
:
567 /* save EAUTH for authentication in this */
577 if((caller_flags
& ALTERNATE_LOAD_SITE
) && systemLogDiags
) {
578 printf("load_shared_file: Failed to load shared file! error: 0x%x, Base_address: 0x%x, number of mappings: %d, file_object 0x%x\n", error
, local_base
, map_cnt
, file_object
);
579 for(i
=0; i
<map_cnt
; i
++) {
580 printf("load_shared_file: Mapping%d, mapping_offset: 0x%x, size: 0x%x, file_offset: 0x%x, protection: 0x%x\n"
581 , i
, map_list
[i
].mapping_offset
,
583 map_list
[i
].file_offset
,
584 map_list
[i
].protection
);
588 if(!(error
= copyout(&local_flags
, flags
, sizeof (int)))) {
589 error
= copyout(&local_base
,
590 base_address
, sizeof (caddr_t
));
594 lsf_bailout_free_vput
:
598 kmem_free(kernel_map
, (vm_offset_t
)filename_str
,
599 (vm_size_t
)(MAXPATHLEN
));
600 kmem_free(kernel_map
, (vm_offset_t
)map_list
,
601 (vm_size_t
)(map_cnt
*sizeof(sf_mapping_t
)));
608 struct reset_shared_file_args
{
611 sf_mapping_t
*mappings
;
617 struct reset_shared_file_args
*uap
,
620 caddr_t
*base_address
=uap
->ba
;
621 int map_cnt
=uap
->map_cnt
;
622 sf_mapping_t
*mappings
=uap
->mappings
;
626 sf_mapping_t
*map_list
;
628 vm_offset_t map_address
;
637 /* Retrieve the base address */
638 if (error
= copyin(base_address
, &local_base
, sizeof (caddr_t
))) {
642 if (((unsigned int)local_base
& GLOBAL_SHARED_SEGMENT_MASK
)
643 != GLOBAL_SHARED_TEXT_SEGMENT
) {
648 kret
= kmem_alloc(kernel_map
, (vm_offset_t
*)&map_list
,
649 (vm_size_t
)(map_cnt
*sizeof(sf_mapping_t
)));
650 if (kret
!= KERN_SUCCESS
) {
656 copyin(mappings
, map_list
, (map_cnt
*sizeof(sf_mapping_t
)))) {
658 kmem_free(kernel_map
, (vm_offset_t
)map_list
,
659 (vm_size_t
)(map_cnt
*sizeof(sf_mapping_t
)));
662 for (i
= 0; i
<map_cnt
; i
++) {
663 if((map_list
[i
].mapping_offset
664 & GLOBAL_SHARED_SEGMENT_MASK
) == 0x10000000) {
665 map_address
= (vm_offset_t
)
666 (local_base
+ map_list
[i
].mapping_offset
);
667 vm_deallocate(current_map(),
670 vm_map(current_map(), &map_address
,
671 map_list
[i
].size
, 0, SHARED_LIB_ALIAS
,
672 shared_data_region_handle
,
673 ((unsigned int)local_base
674 & SHARED_DATA_REGION_MASK
) +
675 (map_list
[i
].mapping_offset
676 & SHARED_DATA_REGION_MASK
),
678 VM_PROT_READ
, VM_INHERIT_SHARE
);
682 kmem_free(kernel_map
, (vm_offset_t
)map_list
,
683 (vm_size_t
)(map_cnt
*sizeof(sf_mapping_t
)));
694 clone_system_shared_regions()
696 shared_region_mapping_t new_shared_region
;
697 shared_region_mapping_t next
;
698 shared_region_mapping_t old_shared_region
;
699 struct shared_region_task_mappings old_info
;
700 struct shared_region_task_mappings new_info
;
702 if (shared_file_create_system_region(&new_shared_region
))
704 vm_get_shared_region(current_task(), &old_shared_region
);
705 old_info
.self
= (vm_offset_t
)old_shared_region
;
706 shared_region_mapping_info(old_shared_region
,
707 &(old_info
.text_region
),
708 &(old_info
.text_size
),
709 &(old_info
.data_region
),
710 &(old_info
.data_size
),
711 &(old_info
.region_mappings
),
712 &(old_info
.client_base
),
713 &(old_info
.alternate_base
),
714 &(old_info
.alternate_next
),
715 &(old_info
.flags
), &next
);
716 new_info
.self
= (vm_offset_t
)new_shared_region
;
717 shared_region_mapping_info(new_shared_region
,
718 &(new_info
.text_region
),
719 &(new_info
.text_size
),
720 &(new_info
.data_region
),
721 &(new_info
.data_size
),
722 &(new_info
.region_mappings
),
723 &(new_info
.client_base
),
724 &(new_info
.alternate_base
),
725 &(new_info
.alternate_next
),
726 &(new_info
.flags
), &next
);
727 if(vm_region_clone(old_info
.text_region
, new_info
.text_region
)) {
728 panic("clone_system_shared_regions: shared region mis-alignment 1");
729 shared_region_mapping_dealloc(new_shared_region
);
732 if (vm_region_clone(old_info
.data_region
, new_info
.data_region
)) {
733 panic("clone_system_shared_regions: shared region mis-alignment 2");
734 shared_region_mapping_dealloc(new_shared_region
);
737 if (vm_map_region_replace(current_map(), old_info
.text_region
,
738 new_info
.text_region
, old_info
.client_base
,
739 old_info
.client_base
+old_info
.text_size
)) {
740 panic("clone_system_shared_regions: shared region mis-alignment 3");
741 shared_region_mapping_dealloc(new_shared_region
);
744 if(vm_map_region_replace(current_map(), old_info
.data_region
,
745 new_info
.data_region
,
746 old_info
.client_base
+ old_info
.text_size
,
748 + old_info
.text_size
+ old_info
.data_size
)) {
749 panic("clone_system_shared_regions: shared region mis-alignment 4");
750 shared_region_mapping_dealloc(new_shared_region
);
753 vm_set_shared_region(current_task(), new_shared_region
);
754 shared_region_object_chain_attach(new_shared_region
, old_shared_region
);