2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
23 * Mach Operating System
24 * Copyright (c) 1987 Carnegie-Mellon University
25 * All rights reserved. The CMU software License Agreement specifies
26 * the terms and conditions for use and redistribution.
31 #include <meta_features.h>
33 #include <kern/task.h>
34 #include <kern/thread.h>
35 #include <kern/debug.h>
36 #include <kern/lock.h>
37 #include <mach/time_value.h>
38 #include <mach/vm_param.h>
39 #include <mach/vm_prot.h>
40 #include <mach/port.h>
42 #include <sys/param.h>
43 #include <sys/systm.h>
45 #include <sys/namei.h>
49 #include <sys/vnode.h>
51 #include <sys/mount.h>
52 #include <sys/trace.h>
53 #include <sys/kernel.h>
56 #include <kern/kalloc.h>
57 #include <kern/parallel.h>
58 #include <vm/vm_map.h>
59 #include <vm/vm_kern.h>
61 #include <machine/spl.h>
62 #include <mach/shared_memory_server.h>
64 useracc(addr
, len
, prot
)
69 return (vm_map_check_protection(
71 trunc_page(addr
), round_page(addr
+len
),
72 prot
== B_READ
? VM_PROT_READ
: VM_PROT_WRITE
));
80 kret
= vm_map_wire(current_map(), trunc_page(addr
),
82 VM_PROT_READ
| VM_PROT_WRITE
,FALSE
);
87 case KERN_INVALID_ADDRESS
:
90 case KERN_PROTECTION_FAILURE
:
97 vsunlock(addr
, len
, dirtied
)
106 vm_offset_t vaddr
, paddr
;
111 pmap
= get_task_pmap(current_task());
112 for (vaddr
= trunc_page(addr
); vaddr
< round_page(addr
+len
);
113 vaddr
+= PAGE_SIZE
) {
114 paddr
= pmap_extract(pmap
, vaddr
);
115 pg
= PHYS_TO_VM_PAGE(paddr
);
116 vm_page_set_modified(pg
);
123 kret
= vm_map_unwire(current_map(), trunc_page(addr
),
124 round_page(addr
+len
), FALSE
);
128 case KERN_INVALID_ADDRESS
:
131 case KERN_PROTECTION_FAILURE
:
138 #if defined(sun) || BALANCE || defined(m88k)
139 #else /*defined(sun) || BALANCE || defined(m88k)*/
146 character
= (char)byte
;
147 return (copyout((void *)&(character
), addr
, sizeof(char)) == 0 ? 0 : -1);
156 character
= (char)byte
;
157 return (copyout((void *) &(character
), addr
, sizeof(char)) == 0 ? 0 : -1);
165 if (copyin(addr
, (void *) &byte
, sizeof(char)))
175 if (copyin(addr
, (void *) &(byte
), sizeof(char)))
184 return (copyout((void *) &word
, addr
, sizeof(int)) == 0 ? 0 : -1);
192 if (copyin(addr
, (void *) &word
, sizeof(int)))
197 /* suiword and fuiword are the same as suword and fuword, respectively */
203 return (copyout((void *) &word
, addr
, sizeof(int)) == 0 ? 0 : -1);
211 if (copyin(addr
, (void *) &word
, sizeof(int)))
215 #endif /* defined(sun) || BALANCE || defined(m88k) || defined(i386) */
231 extern task_t
port_name_to_task(mach_port_t t
);
233 kern_return_t err
= KERN_SUCCESS
;
234 boolean_t funnel_state
;
236 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
237 t1
= port_name_to_task(t
);
239 if (t1
== TASK_NULL
) {
243 p
= get_bsdtask_info(t1
);
253 (void) copyout((char *) &pid
, (char *) x
, sizeof(*x
));
254 thread_funnel_set(kernel_flock
, funnel_state
);
259 * Routine: task_for_pid
261 * Get the task port for another "process", named by its
262 * process ID on the same host as "target_task".
264 * Only permitted to privileged processes, or processes
265 * with the same user ID.
268 task_for_pid(target_tport
, pid
, t
)
269 mach_port_t target_tport
;
277 extern task_t
port_name_to_task(mach_port_t tp
);
280 boolean_t funnel_state
;
282 t1
= port_name_to_task(target_tport
);
283 if (t1
== TASK_NULL
) {
284 (void ) copyout((char *)&t1
, (char *)t
, sizeof(mach_port_t
));
285 return(KERN_FAILURE
);
288 funnel_state
= thread_funnel_set(kernel_flock
, TRUE
);
291 p1
= get_bsdtask_info(t1
);
293 ((p
= pfind(pid
)) != (struct proc
*) 0)
294 && (p1
!= (struct proc
*) 0)
295 && (((p
->p_ucred
->cr_uid
== p1
->p_ucred
->cr_uid
) &&
296 ((p
->p_cred
->p_ruid
== p1
->p_cred
->p_ruid
)))
297 || !(suser(p1
->p_ucred
, &p1
->p_acflag
)))
298 && (p
->p_stat
!= SZOMB
)
300 if (p
->task
!= TASK_NULL
) {
301 if (!task_reference_try(p
->task
)) {
302 mutex_pause(); /* temp loss of funnel */
305 sright
= convert_task_to_port(p
->task
);
306 tret
= ipc_port_copyout_send(sright
, get_task_ipcspace(current_task()));
308 tret
= MACH_PORT_NULL
;
309 (void ) copyout((char *)&tret
, (char *) t
, sizeof(mach_port_t
));
311 error
= KERN_SUCCESS
;
315 tret
= MACH_PORT_NULL
;
316 (void) copyout((char *) &tret
, (char *) t
, sizeof(mach_port_t
));
317 error
= KERN_FAILURE
;
319 thread_funnel_set(kernel_flock
, funnel_state
);
324 struct load_shared_file_args
{
330 sf_mapping_t
*mappings
;
339 struct load_shared_file_args
*uap
,
342 caddr_t mapped_file_addr
=uap
->mfa
;
343 u_long mapped_file_size
=uap
->mfs
;
344 caddr_t
*base_address
=uap
->ba
;
345 int map_cnt
=uap
->map_cnt
;
346 sf_mapping_t
*mappings
=uap
->mappings
;
347 char *filename
=uap
->filename
;
348 int *flags
=uap
->flags
;
349 struct vnode
*vp
= 0;
350 struct nameidata nd
, *ndp
;
356 memory_object_control_t file_control
;
357 sf_mapping_t
*map_list
;
365 shared_region_mapping_t shared_region
;
366 struct shared_region_task_mappings task_mapping_info
;
367 shared_region_mapping_t next
;
373 /* Retrieve the base address */
374 if (error
= copyin(base_address
, &local_base
, sizeof (caddr_t
))) {
377 if (error
= copyin(flags
, &local_flags
, sizeof (int))) {
380 caller_flags
= local_flags
;
381 kret
= kmem_alloc(kernel_map
, (vm_offset_t
*)&filename_str
,
382 (vm_size_t
)(MAXPATHLEN
));
383 if (kret
!= KERN_SUCCESS
) {
387 kret
= kmem_alloc(kernel_map
, (vm_offset_t
*)&map_list
,
388 (vm_size_t
)(map_cnt
*sizeof(sf_mapping_t
)));
389 if (kret
!= KERN_SUCCESS
) {
390 kmem_free(kernel_map
, (vm_offset_t
)filename_str
,
391 (vm_size_t
)(MAXPATHLEN
));
397 copyin(mappings
, map_list
, (map_cnt
*sizeof(sf_mapping_t
)))) {
398 goto lsf_bailout_free
;
401 if (error
= copyinstr(filename
,
402 filename_str
, MAXPATHLEN
, (size_t *)&dummy
)) {
403 goto lsf_bailout_free
;
407 * Get a vnode for the target file
409 NDINIT(ndp
, LOOKUP
, FOLLOW
| LOCKLEAF
, UIO_SYSSPACE
,
412 if ((error
= namei(ndp
))) {
413 goto lsf_bailout_free
;
418 if (vp
->v_type
!= VREG
) {
420 goto lsf_bailout_free_vput
;
423 UBCINFOCHECK("load_shared_file", vp
);
425 if (error
= VOP_GETATTR(vp
, &vattr
, p
->p_ucred
, p
)) {
426 goto lsf_bailout_free_vput
;
430 file_control
= ubc_getobject(vp
, UBC_HOLDOBJECT
);
431 if (file_control
== MEMORY_OBJECT_CONTROL_NULL
) {
433 goto lsf_bailout_free_vput
;
437 if(vattr
.va_size
!= mapped_file_size
) {
439 goto lsf_bailout_free_vput
;
443 vm_get_shared_region(current_task(), &shared_region
);
444 task_mapping_info
.self
= (vm_offset_t
)shared_region
;
446 shared_region_mapping_info(shared_region
,
447 &(task_mapping_info
.text_region
),
448 &(task_mapping_info
.text_size
),
449 &(task_mapping_info
.data_region
),
450 &(task_mapping_info
.data_size
),
451 &(task_mapping_info
.region_mappings
),
452 &(task_mapping_info
.client_base
),
453 &(task_mapping_info
.alternate_base
),
454 &(task_mapping_info
.alternate_next
),
455 &(task_mapping_info
.flags
), &next
);
457 /* This is a work-around to allow executables which have been */
458 /* built without knowledge of the proper shared segment to */
459 /* load. This code has been architected as a shared region */
460 /* handler, the knowledge of where the regions are loaded is */
461 /* problematic for the extension of shared regions as it will */
462 /* not be easy to know what region an item should go into. */
463 /* The code below however will get around a short term problem */
464 /* with executables which believe they are loading at zero. */
467 if (((unsigned int)local_base
&
468 (~(task_mapping_info
.text_size
- 1))) !=
469 task_mapping_info
.client_base
) {
470 if(local_flags
& ALTERNATE_LOAD_SITE
) {
471 local_base
= (caddr_t
)(
472 (unsigned int)local_base
&
473 (task_mapping_info
.text_size
- 1));
474 local_base
= (caddr_t
)((unsigned int)local_base
475 | task_mapping_info
.client_base
);
478 goto lsf_bailout_free_vput
;
483 /* load alternate regions if the caller has requested. */
484 /* Note: the new regions are "clean slates" */
486 if (local_flags
& NEW_LOCAL_SHARED_REGIONS
) {
488 shared_region_mapping_t new_shared_region
;
489 shared_region_mapping_t old_shared_region
;
490 struct shared_region_task_mappings old_info
;
491 struct shared_region_task_mappings new_info
;
493 if(shared_file_create_system_region(&new_shared_region
)) {
495 goto lsf_bailout_free_vput
;
497 vm_get_shared_region(current_task(), &old_shared_region
);
499 old_info
.self
= (vm_offset_t
)old_shared_region
;
500 shared_region_mapping_info(old_shared_region
,
501 &(old_info
.text_region
),
502 &(old_info
.text_size
),
503 &(old_info
.data_region
),
504 &(old_info
.data_size
),
505 &(old_info
.region_mappings
),
506 &(old_info
.client_base
),
507 &(old_info
.alternate_base
),
508 &(old_info
.alternate_next
),
509 &(old_info
.flags
), &next
);
510 new_info
.self
= (vm_offset_t
)new_shared_region
;
511 shared_region_mapping_info(new_shared_region
,
512 &(new_info
.text_region
),
513 &(new_info
.text_size
),
514 &(new_info
.data_region
),
515 &(new_info
.data_size
),
516 &(new_info
.region_mappings
),
517 &(new_info
.client_base
),
518 &(new_info
.alternate_base
),
519 &(new_info
.alternate_next
),
520 &(new_info
.flags
), &next
);
521 if (vm_map_region_replace(current_map(), old_info
.text_region
,
522 new_info
.text_region
, old_info
.client_base
,
523 old_info
.client_base
+old_info
.text_size
)) {
524 panic("load_shared_file: shared region mis-alignment");
525 shared_region_mapping_dealloc(new_shared_region
);
527 goto lsf_bailout_free_vput
;
529 if(vm_map_region_replace(current_map(), old_info
.data_region
,
530 new_info
.data_region
,
531 old_info
.client_base
+ old_info
.text_size
,
533 + old_info
.text_size
+ old_info
.data_size
)) {
534 panic("load_shared_file: shared region mis-alignment 1");
535 shared_region_mapping_dealloc(new_shared_region
);
537 goto lsf_bailout_free_vput
;
539 vm_set_shared_region(current_task(), new_shared_region
);
540 task_mapping_info
= new_info
;
541 shared_region_mapping_dealloc(old_shared_region
);
544 if((kr
= copyin_shared_file((vm_offset_t
)mapped_file_addr
,
546 (vm_offset_t
*)&local_base
,
547 map_cnt
, map_list
, file_control
,
548 &task_mapping_info
, &local_flags
))) {
553 case KERN_INVALID_ARGUMENT
:
556 case KERN_INVALID_ADDRESS
:
559 case KERN_PROTECTION_FAILURE
:
560 /* save EAUTH for authentication in this */
570 if((caller_flags
& ALTERNATE_LOAD_SITE
) && systemLogDiags
) {
571 printf("load_shared_file: Failed to load shared file! error: 0x%x, Base_address: 0x%x, number of mappings: %d, file_control 0x%x\n", error
, local_base
, map_cnt
, file_control
);
572 for(i
=0; i
<map_cnt
; i
++) {
573 printf("load_shared_file: Mapping%d, mapping_offset: 0x%x, size: 0x%x, file_offset: 0x%x, protection: 0x%x\n"
574 , i
, map_list
[i
].mapping_offset
,
576 map_list
[i
].file_offset
,
577 map_list
[i
].protection
);
581 if(!(error
= copyout(&local_flags
, flags
, sizeof (int)))) {
582 error
= copyout(&local_base
,
583 base_address
, sizeof (caddr_t
));
587 lsf_bailout_free_vput
:
591 kmem_free(kernel_map
, (vm_offset_t
)filename_str
,
592 (vm_size_t
)(MAXPATHLEN
));
593 kmem_free(kernel_map
, (vm_offset_t
)map_list
,
594 (vm_size_t
)(map_cnt
*sizeof(sf_mapping_t
)));
601 struct reset_shared_file_args
{
604 sf_mapping_t
*mappings
;
610 struct reset_shared_file_args
*uap
,
613 caddr_t
*base_address
=uap
->ba
;
614 int map_cnt
=uap
->map_cnt
;
615 sf_mapping_t
*mappings
=uap
->mappings
;
619 sf_mapping_t
*map_list
;
621 vm_offset_t map_address
;
630 /* Retrieve the base address */
631 if (error
= copyin(base_address
, &local_base
, sizeof (caddr_t
))) {
635 if (((unsigned int)local_base
& GLOBAL_SHARED_SEGMENT_MASK
)
636 != GLOBAL_SHARED_TEXT_SEGMENT
) {
641 kret
= kmem_alloc(kernel_map
, (vm_offset_t
*)&map_list
,
642 (vm_size_t
)(map_cnt
*sizeof(sf_mapping_t
)));
643 if (kret
!= KERN_SUCCESS
) {
649 copyin(mappings
, map_list
, (map_cnt
*sizeof(sf_mapping_t
)))) {
651 kmem_free(kernel_map
, (vm_offset_t
)map_list
,
652 (vm_size_t
)(map_cnt
*sizeof(sf_mapping_t
)));
655 for (i
= 0; i
<map_cnt
; i
++) {
656 if((map_list
[i
].mapping_offset
657 & GLOBAL_SHARED_SEGMENT_MASK
) == 0x10000000) {
658 map_address
= (vm_offset_t
)
659 (local_base
+ map_list
[i
].mapping_offset
);
660 vm_deallocate(current_map(),
663 vm_map(current_map(), &map_address
,
664 map_list
[i
].size
, 0, SHARED_LIB_ALIAS
,
665 shared_data_region_handle
,
666 ((unsigned int)local_base
667 & SHARED_DATA_REGION_MASK
) +
668 (map_list
[i
].mapping_offset
669 & SHARED_DATA_REGION_MASK
),
671 VM_PROT_READ
, VM_INHERIT_SHARE
);
675 kmem_free(kernel_map
, (vm_offset_t
)map_list
,
676 (vm_size_t
)(map_cnt
*sizeof(sf_mapping_t
)));
687 clone_system_shared_regions()
689 shared_region_mapping_t new_shared_region
;
690 shared_region_mapping_t next
;
691 shared_region_mapping_t old_shared_region
;
692 struct shared_region_task_mappings old_info
;
693 struct shared_region_task_mappings new_info
;
695 if (shared_file_create_system_region(&new_shared_region
))
697 vm_get_shared_region(current_task(), &old_shared_region
);
698 old_info
.self
= (vm_offset_t
)old_shared_region
;
699 shared_region_mapping_info(old_shared_region
,
700 &(old_info
.text_region
),
701 &(old_info
.text_size
),
702 &(old_info
.data_region
),
703 &(old_info
.data_size
),
704 &(old_info
.region_mappings
),
705 &(old_info
.client_base
),
706 &(old_info
.alternate_base
),
707 &(old_info
.alternate_next
),
708 &(old_info
.flags
), &next
);
709 new_info
.self
= (vm_offset_t
)new_shared_region
;
710 shared_region_mapping_info(new_shared_region
,
711 &(new_info
.text_region
),
712 &(new_info
.text_size
),
713 &(new_info
.data_region
),
714 &(new_info
.data_size
),
715 &(new_info
.region_mappings
),
716 &(new_info
.client_base
),
717 &(new_info
.alternate_base
),
718 &(new_info
.alternate_next
),
719 &(new_info
.flags
), &next
);
720 if(vm_region_clone(old_info
.text_region
, new_info
.text_region
)) {
721 panic("clone_system_shared_regions: shared region mis-alignment 1");
722 shared_region_mapping_dealloc(new_shared_region
);
725 if (vm_region_clone(old_info
.data_region
, new_info
.data_region
)) {
726 panic("clone_system_shared_regions: shared region mis-alignment 2");
727 shared_region_mapping_dealloc(new_shared_region
);
730 if (vm_map_region_replace(current_map(), old_info
.text_region
,
731 new_info
.text_region
, old_info
.client_base
,
732 old_info
.client_base
+old_info
.text_size
)) {
733 panic("clone_system_shared_regions: shared region mis-alignment 3");
734 shared_region_mapping_dealloc(new_shared_region
);
737 if(vm_map_region_replace(current_map(), old_info
.data_region
,
738 new_info
.data_region
,
739 old_info
.client_base
+ old_info
.text_size
,
741 + old_info
.text_size
+ old_info
.data_size
)) {
742 panic("clone_system_shared_regions: shared region mis-alignment 4");
743 shared_region_mapping_dealloc(new_shared_region
);
746 vm_set_shared_region(current_task(), new_shared_region
);
747 shared_region_object_chain_attach(new_shared_region
, old_shared_region
);