2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
23 * Mach Operating System
24 * Copyright (c) 1987 Carnegie-Mellon University
25 * All rights reserved. The CMU software License Agreement specifies
26 * the terms and conditions for use and redistribution.
31 #include <meta_features.h>
33 #include <kern/task.h>
34 #include <kern/thread.h>
35 #include <kern/debug.h>
36 #include <kern/lock.h>
37 #include <mach/time_value.h>
38 #include <mach/vm_param.h>
39 #include <mach/vm_prot.h>
40 #include <mach/port.h>
42 #include <sys/param.h>
43 #include <sys/systm.h>
45 #include <sys/namei.h>
49 #include <sys/vnode.h>
51 #include <sys/mount.h>
52 #include <sys/trace.h>
53 #include <sys/kernel.h>
56 #include <kern/kalloc.h>
57 #include <kern/parallel.h>
58 #include <vm/vm_map.h>
59 #include <vm/vm_kern.h>
61 #include <machine/spl.h>
62 #include <mach/shared_memory_server.h>
64 useracc(addr
, len
, prot
)
69 return (vm_map_check_protection(
71 trunc_page(addr
), round_page(addr
+len
),
72 prot
== B_READ
? VM_PROT_READ
: VM_PROT_WRITE
));
80 kret
= vm_map_wire(current_map(), trunc_page(addr
),
82 VM_PROT_READ
| VM_PROT_WRITE
,FALSE
);
87 case KERN_INVALID_ADDRESS
:
90 case KERN_PROTECTION_FAILURE
:
97 vsunlock(addr
, len
, dirtied
)
106 vm_offset_t vaddr
, paddr
;
111 pmap
= get_task_pmap(current_task());
112 for (vaddr
= trunc_page(addr
); vaddr
< round_page(addr
+len
);
113 vaddr
+= PAGE_SIZE
) {
114 paddr
= pmap_extract(pmap
, vaddr
);
115 pg
= PHYS_TO_VM_PAGE(paddr
);
116 vm_page_set_modified(pg
);
123 kret
= vm_map_unwire(current_map(), trunc_page(addr
),
124 round_page(addr
+len
), FALSE
);
128 case KERN_INVALID_ADDRESS
:
131 case KERN_PROTECTION_FAILURE
:
138 #if defined(sun) || BALANCE || defined(m88k)
139 #else /*defined(sun) || BALANCE || defined(m88k)*/
146 character
= (char)byte
;
147 return (copyout((void *)&(character
), addr
, sizeof(char)) == 0 ? 0 : -1);
156 character
= (char)byte
;
157 return (copyout((void *) &(character
), addr
, sizeof(char)) == 0 ? 0 : -1);
165 if (copyin(addr
, (void *) &byte
, sizeof(char)))
175 if (copyin(addr
, (void *) &(byte
), sizeof(char)))
184 return (copyout((void *) &word
, addr
, sizeof(int)) == 0 ? 0 : -1);
192 if (copyin(addr
, (void *) &word
, sizeof(int)))
197 /* suiword and fuiword are the same as suword and fuword, respectively */
203 return (copyout((void *) &word
, addr
, sizeof(int)) == 0 ? 0 : -1);
211 if (copyin(addr
, (void *) &word
, sizeof(int)))
215 #endif /* defined(sun) || BALANCE || defined(m88k) || defined(i386) */
231 extern task_t
port_name_to_task(mach_port_t t
);
233 kern_return_t err
= KERN_SUCCESS
;
234 boolean_t funnel_state
;
236 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
237 t1
= port_name_to_task(t
);
239 if (t1
== TASK_NULL
) {
243 p
= get_bsdtask_info(t1
);
253 (void) copyout((char *) &pid
, (char *) x
, sizeof(*x
));
254 thread_funnel_set(kernel_flock
, funnel_state
);
259 * Routine: task_for_pid
261 * Get the task port for another "process", named by its
262 * process ID on the same host as "target_task".
264 * Only permitted to privileged processes, or processes
265 * with the same user ID.
268 task_for_pid(target_tport
, pid
, t
)
269 mach_port_t target_tport
;
277 extern task_t
port_name_to_task(mach_port_t tp
);
280 boolean_t funnel_state
;
282 t1
= port_name_to_task(target_tport
);
283 if (t1
== TASK_NULL
) {
284 (void ) copyout((char *)&t1
, (char *)t
, sizeof(mach_port_t
));
285 return(KERN_FAILURE
);
288 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
291 p1
= get_bsdtask_info(t1
);
293 ((p
= pfind(pid
)) != (struct proc
*) 0)
294 && (p1
!= (struct proc
*) 0)
295 && ((p
->p_ucred
->cr_uid
== p1
->p_ucred
->cr_uid
)
296 || !(suser(p1
->p_ucred
, &p1
->p_acflag
)))
297 && (p
->p_stat
!= SZOMB
)
299 if (p
->task
!= TASK_NULL
) {
300 if (!task_reference_try(p
->task
)) {
301 mutex_pause(); /* temp loss of funnel */
304 sright
= convert_task_to_port(p
->task
);
305 tret
= ipc_port_copyout_send(sright
, get_task_ipcspace(current_task()));
307 tret
= MACH_PORT_NULL
;
308 (void ) copyout((char *)&tret
, (char *) t
, sizeof(mach_port_t
));
310 error
= KERN_SUCCESS
;
314 tret
= MACH_PORT_NULL
;
315 (void) copyout((char *) &tret
, (char *) t
, sizeof(mach_port_t
));
316 error
= KERN_FAILURE
;
318 thread_funnel_set(kernel_flock
, funnel_state
);
323 struct load_shared_file_args
{
329 sf_mapping_t
*mappings
;
338 struct load_shared_file_args
*uap
,
341 caddr_t mapped_file_addr
=uap
->mfa
;
342 u_long mapped_file_size
=uap
->mfs
;
343 caddr_t
*base_address
=uap
->ba
;
344 int map_cnt
=uap
->map_cnt
;
345 sf_mapping_t
*mappings
=uap
->mappings
;
346 char *filename
=uap
->filename
;
347 int *flags
=uap
->flags
;
348 struct vnode
*vp
= 0;
349 struct nameidata nd
, *ndp
;
355 memory_object_control_t file_control
;
356 sf_mapping_t
*map_list
;
364 shared_region_mapping_t shared_region
;
365 struct shared_region_task_mappings task_mapping_info
;
366 shared_region_mapping_t next
;
372 /* Retrieve the base address */
373 if (error
= copyin(base_address
, &local_base
, sizeof (caddr_t
))) {
376 if (error
= copyin(flags
, &local_flags
, sizeof (int))) {
379 caller_flags
= local_flags
;
380 kret
= kmem_alloc(kernel_map
, (vm_offset_t
*)&filename_str
,
381 (vm_size_t
)(MAXPATHLEN
));
382 if (kret
!= KERN_SUCCESS
) {
386 kret
= kmem_alloc(kernel_map
, (vm_offset_t
*)&map_list
,
387 (vm_size_t
)(map_cnt
*sizeof(sf_mapping_t
)));
388 if (kret
!= KERN_SUCCESS
) {
389 kmem_free(kernel_map
, (vm_offset_t
)filename_str
,
390 (vm_size_t
)(MAXPATHLEN
));
396 copyin(mappings
, map_list
, (map_cnt
*sizeof(sf_mapping_t
)))) {
397 goto lsf_bailout_free
;
400 if (error
= copyinstr(filename
,
401 filename_str
, MAXPATHLEN
, (size_t *)&dummy
)) {
402 goto lsf_bailout_free
;
406 * Get a vnode for the target file
408 NDINIT(ndp
, LOOKUP
, FOLLOW
| LOCKLEAF
, UIO_SYSSPACE
,
411 if ((error
= namei(ndp
))) {
412 goto lsf_bailout_free
;
417 if (vp
->v_type
!= VREG
) {
419 goto lsf_bailout_free_vput
;
422 UBCINFOCHECK("load_shared_file", vp
);
424 if (error
= VOP_GETATTR(vp
, &vattr
, p
->p_ucred
, p
)) {
425 goto lsf_bailout_free_vput
;
429 file_control
= ubc_getobject(vp
, UBC_HOLDOBJECT
);
430 if (file_control
== MEMORY_OBJECT_CONTROL_NULL
) {
432 goto lsf_bailout_free_vput
;
436 if(vattr
.va_size
!= mapped_file_size
) {
438 goto lsf_bailout_free_vput
;
442 vm_get_shared_region(current_task(), &shared_region
);
443 task_mapping_info
.self
= (vm_offset_t
)shared_region
;
445 shared_region_mapping_info(shared_region
,
446 &(task_mapping_info
.text_region
),
447 &(task_mapping_info
.text_size
),
448 &(task_mapping_info
.data_region
),
449 &(task_mapping_info
.data_size
),
450 &(task_mapping_info
.region_mappings
),
451 &(task_mapping_info
.client_base
),
452 &(task_mapping_info
.alternate_base
),
453 &(task_mapping_info
.alternate_next
),
454 &(task_mapping_info
.flags
), &next
);
456 /* This is a work-around to allow executables which have been */
457 /* built without knowledge of the proper shared segment to */
458 /* load. This code has been architected as a shared region */
459 /* handler, the knowledge of where the regions are loaded is */
460 /* problematic for the extension of shared regions as it will */
461 /* not be easy to know what region an item should go into. */
462 /* The code below however will get around a short term problem */
463 /* with executables which believe they are loading at zero. */
466 if (((unsigned int)local_base
&
467 (~(task_mapping_info
.text_size
- 1))) !=
468 task_mapping_info
.client_base
) {
469 if(local_flags
& ALTERNATE_LOAD_SITE
) {
470 local_base
= (caddr_t
)(
471 (unsigned int)local_base
&
472 (task_mapping_info
.text_size
- 1));
473 local_base
= (caddr_t
)((unsigned int)local_base
474 | task_mapping_info
.client_base
);
477 goto lsf_bailout_free_vput
;
482 /* load alternate regions if the caller has requested. */
483 /* Note: the new regions are "clean slates" */
485 if (local_flags
& NEW_LOCAL_SHARED_REGIONS
) {
487 shared_region_mapping_t new_shared_region
;
488 shared_region_mapping_t old_shared_region
;
489 struct shared_region_task_mappings old_info
;
490 struct shared_region_task_mappings new_info
;
492 if(shared_file_create_system_region(&new_shared_region
)) {
494 goto lsf_bailout_free_vput
;
496 vm_get_shared_region(current_task(), &old_shared_region
);
498 old_info
.self
= (vm_offset_t
)old_shared_region
;
499 shared_region_mapping_info(old_shared_region
,
500 &(old_info
.text_region
),
501 &(old_info
.text_size
),
502 &(old_info
.data_region
),
503 &(old_info
.data_size
),
504 &(old_info
.region_mappings
),
505 &(old_info
.client_base
),
506 &(old_info
.alternate_base
),
507 &(old_info
.alternate_next
),
508 &(old_info
.flags
), &next
);
509 new_info
.self
= (vm_offset_t
)new_shared_region
;
510 shared_region_mapping_info(new_shared_region
,
511 &(new_info
.text_region
),
512 &(new_info
.text_size
),
513 &(new_info
.data_region
),
514 &(new_info
.data_size
),
515 &(new_info
.region_mappings
),
516 &(new_info
.client_base
),
517 &(new_info
.alternate_base
),
518 &(new_info
.alternate_next
),
519 &(new_info
.flags
), &next
);
520 if (vm_map_region_replace(current_map(), old_info
.text_region
,
521 new_info
.text_region
, old_info
.client_base
,
522 old_info
.client_base
+old_info
.text_size
)) {
523 panic("load_shared_file: shared region mis-alignment");
524 shared_region_mapping_dealloc(new_shared_region
);
526 goto lsf_bailout_free_vput
;
528 if(vm_map_region_replace(current_map(), old_info
.data_region
,
529 new_info
.data_region
,
530 old_info
.client_base
+ old_info
.text_size
,
532 + old_info
.text_size
+ old_info
.data_size
)) {
533 panic("load_shared_file: shared region mis-alignment 1");
534 shared_region_mapping_dealloc(new_shared_region
);
536 goto lsf_bailout_free_vput
;
538 vm_set_shared_region(current_task(), new_shared_region
);
539 task_mapping_info
= new_info
;
540 shared_region_mapping_dealloc(old_shared_region
);
543 if((kr
= copyin_shared_file((vm_offset_t
)mapped_file_addr
,
545 (vm_offset_t
*)&local_base
,
546 map_cnt
, map_list
, file_control
,
547 &task_mapping_info
, &local_flags
))) {
552 case KERN_INVALID_ARGUMENT
:
555 case KERN_INVALID_ADDRESS
:
558 case KERN_PROTECTION_FAILURE
:
559 /* save EAUTH for authentication in this */
569 if((caller_flags
& ALTERNATE_LOAD_SITE
) && systemLogDiags
) {
570 printf("load_shared_file: Failed to load shared file! error: 0x%x, Base_address: 0x%x, number of mappings: %d, file_control 0x%x\n", error
, local_base
, map_cnt
, file_control
);
571 for(i
=0; i
<map_cnt
; i
++) {
572 printf("load_shared_file: Mapping%d, mapping_offset: 0x%x, size: 0x%x, file_offset: 0x%x, protection: 0x%x\n"
573 , i
, map_list
[i
].mapping_offset
,
575 map_list
[i
].file_offset
,
576 map_list
[i
].protection
);
580 if(!(error
= copyout(&local_flags
, flags
, sizeof (int)))) {
581 error
= copyout(&local_base
,
582 base_address
, sizeof (caddr_t
));
586 lsf_bailout_free_vput
:
590 kmem_free(kernel_map
, (vm_offset_t
)filename_str
,
591 (vm_size_t
)(MAXPATHLEN
));
592 kmem_free(kernel_map
, (vm_offset_t
)map_list
,
593 (vm_size_t
)(map_cnt
*sizeof(sf_mapping_t
)));
600 struct reset_shared_file_args
{
603 sf_mapping_t
*mappings
;
609 struct reset_shared_file_args
*uap
,
612 caddr_t
*base_address
=uap
->ba
;
613 int map_cnt
=uap
->map_cnt
;
614 sf_mapping_t
*mappings
=uap
->mappings
;
618 sf_mapping_t
*map_list
;
620 vm_offset_t map_address
;
629 /* Retrieve the base address */
630 if (error
= copyin(base_address
, &local_base
, sizeof (caddr_t
))) {
634 if (((unsigned int)local_base
& GLOBAL_SHARED_SEGMENT_MASK
)
635 != GLOBAL_SHARED_TEXT_SEGMENT
) {
640 kret
= kmem_alloc(kernel_map
, (vm_offset_t
*)&map_list
,
641 (vm_size_t
)(map_cnt
*sizeof(sf_mapping_t
)));
642 if (kret
!= KERN_SUCCESS
) {
648 copyin(mappings
, map_list
, (map_cnt
*sizeof(sf_mapping_t
)))) {
650 kmem_free(kernel_map
, (vm_offset_t
)map_list
,
651 (vm_size_t
)(map_cnt
*sizeof(sf_mapping_t
)));
654 for (i
= 0; i
<map_cnt
; i
++) {
655 if((map_list
[i
].mapping_offset
656 & GLOBAL_SHARED_SEGMENT_MASK
) == 0x10000000) {
657 map_address
= (vm_offset_t
)
658 (local_base
+ map_list
[i
].mapping_offset
);
659 vm_deallocate(current_map(),
662 vm_map(current_map(), &map_address
,
663 map_list
[i
].size
, 0, SHARED_LIB_ALIAS
,
664 shared_data_region_handle
,
665 ((unsigned int)local_base
666 & SHARED_DATA_REGION_MASK
) +
667 (map_list
[i
].mapping_offset
668 & SHARED_DATA_REGION_MASK
),
670 VM_PROT_READ
, VM_INHERIT_SHARE
);
674 kmem_free(kernel_map
, (vm_offset_t
)map_list
,
675 (vm_size_t
)(map_cnt
*sizeof(sf_mapping_t
)));
686 clone_system_shared_regions()
688 shared_region_mapping_t new_shared_region
;
689 shared_region_mapping_t next
;
690 shared_region_mapping_t old_shared_region
;
691 struct shared_region_task_mappings old_info
;
692 struct shared_region_task_mappings new_info
;
694 if (shared_file_create_system_region(&new_shared_region
))
696 vm_get_shared_region(current_task(), &old_shared_region
);
697 old_info
.self
= (vm_offset_t
)old_shared_region
;
698 shared_region_mapping_info(old_shared_region
,
699 &(old_info
.text_region
),
700 &(old_info
.text_size
),
701 &(old_info
.data_region
),
702 &(old_info
.data_size
),
703 &(old_info
.region_mappings
),
704 &(old_info
.client_base
),
705 &(old_info
.alternate_base
),
706 &(old_info
.alternate_next
),
707 &(old_info
.flags
), &next
);
708 new_info
.self
= (vm_offset_t
)new_shared_region
;
709 shared_region_mapping_info(new_shared_region
,
710 &(new_info
.text_region
),
711 &(new_info
.text_size
),
712 &(new_info
.data_region
),
713 &(new_info
.data_size
),
714 &(new_info
.region_mappings
),
715 &(new_info
.client_base
),
716 &(new_info
.alternate_base
),
717 &(new_info
.alternate_next
),
718 &(new_info
.flags
), &next
);
719 if(vm_region_clone(old_info
.text_region
, new_info
.text_region
)) {
720 panic("clone_system_shared_regions: shared region mis-alignment 1");
721 shared_region_mapping_dealloc(new_shared_region
);
724 if (vm_region_clone(old_info
.data_region
, new_info
.data_region
)) {
725 panic("clone_system_shared_regions: shared region mis-alignment 2");
726 shared_region_mapping_dealloc(new_shared_region
);
729 if (vm_map_region_replace(current_map(), old_info
.text_region
,
730 new_info
.text_region
, old_info
.client_base
,
731 old_info
.client_base
+old_info
.text_size
)) {
732 panic("clone_system_shared_regions: shared region mis-alignment 3");
733 shared_region_mapping_dealloc(new_shared_region
);
736 if(vm_map_region_replace(current_map(), old_info
.data_region
,
737 new_info
.data_region
,
738 old_info
.client_base
+ old_info
.text_size
,
740 + old_info
.text_size
+ old_info
.data_size
)) {
741 panic("clone_system_shared_regions: shared region mis-alignment 4");
742 shared_region_mapping_dealloc(new_shared_region
);
745 vm_set_shared_region(current_task(), new_shared_region
);
746 shared_region_object_chain_attach(new_shared_region
, old_shared_region
);