]> git.saurik.com Git - apple/xnu.git/blob - bsd/vm/vm_unix.c
41d4714bc40192fbad24932d400bcd2d14c909c2
[apple/xnu.git] / bsd / vm / vm_unix.c
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 /*
23 * Mach Operating System
24 * Copyright (c) 1987 Carnegie-Mellon University
25 * All rights reserved. The CMU software License Agreement specifies
26 * the terms and conditions for use and redistribution.
27 */
28
29 /*
30 */
31 #include <meta_features.h>
32
33 #include <kern/task.h>
34 #include <kern/thread.h>
35 #include <kern/debug.h>
36 #include <kern/lock.h>
37 #include <mach/time_value.h>
38 #include <mach/vm_param.h>
39 #include <mach/vm_prot.h>
40 #include <mach/port.h>
41
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/dir.h>
45 #include <sys/namei.h>
46 #include <sys/proc.h>
47 #include <sys/vm.h>
48 #include <sys/file.h>
49 #include <sys/vnode.h>
50 #include <sys/buf.h>
51 #include <sys/mount.h>
52 #include <sys/trace.h>
53 #include <sys/kernel.h>
54 #include <sys/ubc.h>
55
56 #include <kern/kalloc.h>
57 #include <kern/parallel.h>
58 #include <vm/vm_map.h>
59 #include <vm/vm_kern.h>
60
61 #include <machine/spl.h>
62 #include <mach/shared_memory_server.h>
63
64 useracc(addr, len, prot)
65 caddr_t addr;
66 u_int len;
67 int prot;
68 {
69 return (vm_map_check_protection(
70 current_map(),
71 trunc_page(addr), round_page(addr+len),
72 prot == B_READ ? VM_PROT_READ : VM_PROT_WRITE));
73 }
74
75 vslock(addr, len)
76 caddr_t addr;
77 int len;
78 {
79 vm_map_wire(current_map(), trunc_page(addr),
80 round_page(addr+len),
81 VM_PROT_READ | VM_PROT_WRITE ,FALSE);
82 }
83
84 vsunlock(addr, len, dirtied)
85 caddr_t addr;
86 int len;
87 int dirtied;
88 {
89 pmap_t pmap;
90 #if FIXME /* [ */
91 vm_page_t pg;
92 #endif /* FIXME ] */
93 vm_offset_t vaddr, paddr;
94
95 #if FIXME /* [ */
96 if (dirtied) {
97 pmap = get_task_pmap(current_task());
98 for (vaddr = trunc_page(addr); vaddr < round_page(addr+len);
99 vaddr += PAGE_SIZE) {
100 paddr = pmap_extract(pmap, vaddr);
101 pg = PHYS_TO_VM_PAGE(paddr);
102 vm_page_set_modified(pg);
103 }
104 }
105 #endif /* FIXME ] */
106 #ifdef lint
107 dirtied++;
108 #endif /* lint */
109 vm_map_unwire(current_map(), trunc_page(addr),
110 round_page(addr+len), FALSE);
111 }
112
113 #if defined(sun) || BALANCE || defined(m88k)
114 #else /*defined(sun) || BALANCE || defined(m88k)*/
115 subyte(addr, byte)
116 void * addr;
117 int byte;
118 {
119 char character;
120
121 character = (char)byte;
122 return (copyout((void *)&(character), addr, sizeof(char)) == 0 ? 0 : -1);
123 }
124
125 suibyte(addr, byte)
126 void * addr;
127 int byte;
128 {
129 char character;
130
131 character = (char)byte;
132 return (copyout((void *) &(character), addr, sizeof(char)) == 0 ? 0 : -1);
133 }
134
135 int fubyte(addr)
136 void * addr;
137 {
138 unsigned char byte;
139
140 if (copyin(addr, (void *) &byte, sizeof(char)))
141 return(-1);
142 return(byte);
143 }
144
145 int fuibyte(addr)
146 void * addr;
147 {
148 unsigned char byte;
149
150 if (copyin(addr, (void *) &(byte), sizeof(char)))
151 return(-1);
152 return(byte);
153 }
154
155 suword(addr, word)
156 void * addr;
157 long word;
158 {
159 return (copyout((void *) &word, addr, sizeof(int)) == 0 ? 0 : -1);
160 }
161
162 long fuword(addr)
163 void * addr;
164 {
165 long word;
166
167 if (copyin(addr, (void *) &word, sizeof(int)))
168 return(-1);
169 return(word);
170 }
171
172 /* suiword and fuiword are the same as suword and fuword, respectively */
173
174 suiword(addr, word)
175 void * addr;
176 long word;
177 {
178 return (copyout((void *) &word, addr, sizeof(int)) == 0 ? 0 : -1);
179 }
180
181 long fuiword(addr)
182 void * addr;
183 {
184 long word;
185
186 if (copyin(addr, (void *) &word, sizeof(int)))
187 return(-1);
188 return(word);
189 }
190 #endif /* defined(sun) || BALANCE || defined(m88k) || defined(i386) */
191
192 int
193 swapon()
194 {
195 return(EOPNOTSUPP);
196 }
197
198 thread_t
199 procdup(
200 struct proc *child,
201 struct proc *parent)
202 {
203 thread_t thread;
204 task_t task;
205 kern_return_t result;
206
207 if (parent->task == kernel_task)
208 result = task_create_local(TASK_NULL, FALSE, FALSE, &task);
209 else
210 result = task_create_local(parent->task, TRUE, FALSE, &task);
211 if (result != KERN_SUCCESS)
212 printf("fork/procdup: task_create failed. Code: 0x%x\n", result);
213 child->task = task;
214 /* task->proc = child; */
215 set_bsdtask_info(task, child);
216 result = thread_create(task, &thread);
217 if (result != KERN_SUCCESS)
218 printf("fork/procdup: thread_create failed. Code: 0x%x\n", result);
219
220 #if FIXME /* [ */
221 thread_deallocate(thread); // extra ref
222
223 /*
224 * Don't need to lock thread here because it can't
225 * possibly execute and no one else knows about it.
226 */
227 /* compute_priority(thread, FALSE); */
228 #endif /* ] */
229 return(thread);
230 }
231
232 kern_return_t
233 pid_for_task(t, x)
234 mach_port_t t;
235 int *x;
236 {
237 struct proc * p;
238 task_t t1;
239 extern task_t port_name_to_task(mach_port_t t);
240 int pid = -1;
241 kern_return_t err;
242 boolean_t funnel_state;
243
244 funnel_state = thread_funnel_set(kernel_flock, TRUE);
245 t1 = port_name_to_task(t);
246
247 if (t1 == TASK_NULL) {
248 err = KERN_FAILURE;
249 } else {
250 p = get_bsdtask_info(t1);
251 if (p) {
252 pid = p->p_pid;
253 err = KERN_SUCCESS;
254 } else {
255 err = KERN_FAILURE;
256 }
257 }
258 task_deallocate(t1);
259 (void) copyout((char *) &pid, (char *) x, sizeof(*x));
260 pftout:
261 thread_funnel_set(kernel_flock, funnel_state);
262 return(err);
263 }
264
265 /*
266 * Routine: task_for_pid
267 * Purpose:
268 * Get the task port for another "process", named by its
269 * process ID on the same host as "target_task".
270 *
271 * Only permitted to privileged processes, or processes
272 * with the same user ID.
273 */
274 kern_return_t
275 task_for_pid(target_tport, pid, t)
276 mach_port_t target_tport;
277 int pid;
278 mach_port_t *t;
279 {
280 struct proc *p;
281 struct proc *p1;
282 task_t t1;
283 mach_port_t tret;
284 extern task_t port_name_to_task(mach_port_t tp);
285 void * sright;
286 int error = 0;
287 boolean_t funnel_state;
288
289 t1 = port_name_to_task(target_tport);
290 if (t1 == TASK_NULL) {
291 (void ) copyout((char *)&t1, (char *)t, sizeof(mach_port_t));
292 error = KERN_FAILURE;
293 goto tfpout;
294 }
295
296 funnel_state = thread_funnel_set(kernel_flock, TRUE);
297
298 restart:
299 p1 = get_bsdtask_info(t1);
300 if (
301 ((p = pfind(pid)) != (struct proc *) 0)
302 && (p1 != (struct proc *) 0)
303 && ((p->p_ucred->cr_uid == p1->p_ucred->cr_uid)
304 || !(suser(p1->p_ucred, &p1->p_acflag)))
305 && (p->p_stat != SZOMB)
306 ) {
307 if (p->task != TASK_NULL) {
308 if (!task_reference_try(p->task)) {
309 mutex_pause(); /* temp loss of funnel */
310 goto restart;
311 }
312 sright = convert_task_to_port(p->task);
313 tret = ipc_port_copyout_send(sright, get_task_ipcspace(current_task()));
314 } else
315 tret = MACH_PORT_NULL;
316 (void ) copyout((char *)&tret, (char *) t, sizeof(mach_port_t));
317 task_deallocate(t1);
318 error = KERN_SUCCESS;
319 goto tfpout;
320 }
321 task_deallocate(t1);
322 tret = MACH_PORT_NULL;
323 (void) copyout((char *) &tret, (char *) t, sizeof(mach_port_t));
324 error = KERN_FAILURE;
325 tfpout:
326 thread_funnel_set(kernel_flock, funnel_state);
327 return(error);
328 }
329
330
331 struct load_shared_file_args {
332 char *filename;
333 caddr_t mfa;
334 u_long mfs;
335 caddr_t *ba;
336 int map_cnt;
337 sf_mapping_t *mappings;
338 int *flags;
339 };
340
341
342 int
343 load_shared_file(
344 struct proc *p,
345 struct load_shared_file_args *uap,
346 register *retval)
347 {
348 caddr_t mapped_file_addr=uap->mfa;
349 u_long mapped_file_size=uap->mfs;
350 caddr_t *base_address=uap->ba;
351 int map_cnt=uap->map_cnt;
352 sf_mapping_t *mappings=uap->mappings;
353 char *filename=uap->filename;
354 int *flags=uap->flags;
355 struct vnode *vp = 0;
356 struct nameidata nd, *ndp;
357 char *filename_str;
358 register int error;
359 kern_return_t kr;
360
361 struct vattr vattr;
362 void *object;
363 void *file_object;
364 sf_mapping_t *map_list;
365 caddr_t local_base;
366 int local_flags;
367 int caller_flags;
368 int i;
369 vm_size_t dummy;
370 kern_return_t kret;
371
372 shared_region_mapping_t shared_region;
373 struct shared_region_task_mappings task_mapping_info;
374 shared_region_mapping_t next;
375
376 ndp = &nd;
377
378 unix_master();
379
380 /* Retrieve the base address */
381 if (error = copyin(base_address, &local_base, sizeof (caddr_t))) {
382 goto lsf_bailout;
383 }
384 if (error = copyin(flags, &local_flags, sizeof (int))) {
385 goto lsf_bailout;
386 }
387 caller_flags = local_flags;
388 kret = kmem_alloc(kernel_map, (vm_offset_t *)&filename_str,
389 (vm_size_t)(MAXPATHLEN));
390 if (kret != KERN_SUCCESS) {
391 error = ENOMEM;
392 goto lsf_bailout;
393 }
394 kret = kmem_alloc(kernel_map, (vm_offset_t *)&map_list,
395 (vm_size_t)(map_cnt*sizeof(sf_mapping_t)));
396 if (kret != KERN_SUCCESS) {
397 kmem_free(kernel_map, (vm_offset_t)filename_str,
398 (vm_size_t)(MAXPATHLEN));
399 error = ENOMEM;
400 goto lsf_bailout;
401 }
402
403 if (error =
404 copyin(mappings, map_list, (map_cnt*sizeof(sf_mapping_t)))) {
405 goto lsf_bailout_free;
406 }
407
408 if (error = copyinstr(filename,
409 filename_str, MAXPATHLEN, (size_t *)&dummy)) {
410 goto lsf_bailout_free;
411 }
412
413 /*
414 * Get a vnode for the target file
415 */
416 NDINIT(ndp, LOOKUP, FOLLOW | LOCKLEAF, UIO_SYSSPACE,
417 filename_str, p);
418
419 if ((error = namei(ndp))) {
420 goto lsf_bailout_free;
421 }
422
423 vp = ndp->ni_vp;
424
425 if (vp->v_type != VREG) {
426 error = EINVAL;
427 goto lsf_bailout_free_vput;
428 }
429
430 UBCINFOCHECK("load_shared_file", vp);
431
432 if (error = VOP_GETATTR(vp, &vattr, p->p_ucred, p)) {
433 goto lsf_bailout_free_vput;
434 }
435
436
437 file_object = ubc_getobject(vp, (UBC_NOREACTIVATE|UBC_HOLDOBJECT));
438 if (file_object == (void *)NULL) {
439 error = EINVAL;
440 goto lsf_bailout_free_vput;
441 }
442
443 #ifdef notdef
444 if(vattr.va_size != mapped_file_size) {
445 error = EINVAL;
446 goto lsf_bailout_free_vput;
447 }
448 #endif
449
450 vm_get_shared_region(current_task(), &shared_region);
451 task_mapping_info.self = (vm_offset_t)shared_region;
452
453 shared_region_mapping_info(shared_region,
454 &(task_mapping_info.text_region),
455 &(task_mapping_info.text_size),
456 &(task_mapping_info.data_region),
457 &(task_mapping_info.data_size),
458 &(task_mapping_info.region_mappings),
459 &(task_mapping_info.client_base),
460 &(task_mapping_info.alternate_base),
461 &(task_mapping_info.alternate_next),
462 &(task_mapping_info.flags), &next);
463
464 /* This is a work-around to allow executables which have been */
465 /* built without knowledge of the proper shared segment to */
466 /* load. This code has been architected as a shared region */
467 /* handler, the knowledge of where the regions are loaded is */
468 /* problematic for the extension of shared regions as it will */
469 /* not be easy to know what region an item should go into. */
470 /* The code below however will get around a short term problem */
471 /* with executables which believe they are loading at zero. */
472
473 {
474 if (((unsigned int)local_base &
475 (~(task_mapping_info.text_size - 1))) !=
476 task_mapping_info.client_base) {
477 if(local_flags & ALTERNATE_LOAD_SITE) {
478 local_base = (caddr_t)(
479 (unsigned int)local_base &
480 (task_mapping_info.text_size - 1));
481 local_base = (caddr_t)((unsigned int)local_base
482 | task_mapping_info.client_base);
483 } else {
484 error = EINVAL;
485 goto lsf_bailout_free_vput;
486 }
487 }
488 }
489
490 /* load alternate regions if the caller has requested. */
491 /* Note: the new regions are "clean slates" */
492
493 if (local_flags & NEW_LOCAL_SHARED_REGIONS) {
494
495 shared_region_mapping_t new_shared_region;
496 shared_region_mapping_t old_shared_region;
497 struct shared_region_task_mappings old_info;
498 struct shared_region_task_mappings new_info;
499
500 if(shared_file_create_system_region(&new_shared_region)) {
501 error = ENOMEM;
502 goto lsf_bailout_free_vput;
503 }
504 vm_get_shared_region(current_task(), &old_shared_region);
505
506 old_info.self = (vm_offset_t)old_shared_region;
507 shared_region_mapping_info(old_shared_region,
508 &(old_info.text_region),
509 &(old_info.text_size),
510 &(old_info.data_region),
511 &(old_info.data_size),
512 &(old_info.region_mappings),
513 &(old_info.client_base),
514 &(old_info.alternate_base),
515 &(old_info.alternate_next),
516 &(old_info.flags), &next);
517 new_info.self = (vm_offset_t)new_shared_region;
518 shared_region_mapping_info(new_shared_region,
519 &(new_info.text_region),
520 &(new_info.text_size),
521 &(new_info.data_region),
522 &(new_info.data_size),
523 &(new_info.region_mappings),
524 &(new_info.client_base),
525 &(new_info.alternate_base),
526 &(new_info.alternate_next),
527 &(new_info.flags), &next);
528 if (vm_map_region_replace(current_map(), old_info.text_region,
529 new_info.text_region, old_info.client_base,
530 old_info.client_base+old_info.text_size)) {
531 panic("load_shared_file: shared region mis-alignment");
532 shared_region_mapping_dealloc(new_shared_region);
533 error = EINVAL;
534 goto lsf_bailout_free_vput;
535 }
536 if(vm_map_region_replace(current_map(), old_info.data_region,
537 new_info.data_region,
538 old_info.client_base + old_info.text_size,
539 old_info.client_base
540 + old_info.text_size + old_info.data_size)) {
541 panic("load_shared_file: shared region mis-alignment 1");
542 shared_region_mapping_dealloc(new_shared_region);
543 error = EINVAL;
544 goto lsf_bailout_free_vput;
545 }
546 vm_set_shared_region(current_task(), new_shared_region);
547 task_mapping_info = new_info;
548 shared_region_mapping_dealloc(old_shared_region);
549 }
550
551 if((kr = copyin_shared_file((vm_offset_t)mapped_file_addr,
552 mapped_file_size,
553 (vm_offset_t *)&local_base,
554 map_cnt, map_list, file_object,
555 &task_mapping_info, &local_flags))) {
556 switch (kr) {
557 case KERN_FAILURE:
558 error = EINVAL;
559 break;
560 case KERN_INVALID_ARGUMENT:
561 error = EINVAL;
562 break;
563 case KERN_INVALID_ADDRESS:
564 error = EACCES;
565 break;
566 case KERN_PROTECTION_FAILURE:
567 /* save EAUTH for authentication in this */
568 /* routine */
569 error = EPERM;
570 break;
571 case KERN_NO_SPACE:
572 error = ENOMEM;
573 break;
574 default:
575 error = EINVAL;
576 };
577 if((caller_flags & ALTERNATE_LOAD_SITE) && systemLogDiags) {
578 printf("load_shared_file: Failed to load shared file! error: 0x%x, Base_address: 0x%x, number of mappings: %d, file_object 0x%x\n", error, local_base, map_cnt, file_object);
579 for(i=0; i<map_cnt; i++) {
580 printf("load_shared_file: Mapping%d, mapping_offset: 0x%x, size: 0x%x, file_offset: 0x%x, protection: 0x%x\n"
581 , i, map_list[i].mapping_offset,
582 map_list[i].size,
583 map_list[i].file_offset,
584 map_list[i].protection);
585 }
586 }
587 } else {
588 if(!(error = copyout(&local_flags, flags, sizeof (int)))) {
589 error = copyout(&local_base,
590 base_address, sizeof (caddr_t));
591 }
592 }
593
594 lsf_bailout_free_vput:
595 vput(vp);
596
597 lsf_bailout_free:
598 kmem_free(kernel_map, (vm_offset_t)filename_str,
599 (vm_size_t)(MAXPATHLEN));
600 kmem_free(kernel_map, (vm_offset_t)map_list,
601 (vm_size_t)(map_cnt*sizeof(sf_mapping_t)));
602
603 lsf_bailout:
604 unix_release();
605 return error;
606 }
607
608 struct reset_shared_file_args {
609 caddr_t *ba;
610 int map_cnt;
611 sf_mapping_t *mappings;
612 };
613
614 int
615 reset_shared_file(
616 struct proc *p,
617 struct reset_shared_file_args *uap,
618 register *retval)
619 {
620 caddr_t *base_address=uap->ba;
621 int map_cnt=uap->map_cnt;
622 sf_mapping_t *mappings=uap->mappings;
623 register int error;
624 kern_return_t kr;
625
626 sf_mapping_t *map_list;
627 caddr_t local_base;
628 vm_offset_t map_address;
629 int i;
630 kern_return_t kret;
631
632
633
634
635 unix_master();
636
637 /* Retrieve the base address */
638 if (error = copyin(base_address, &local_base, sizeof (caddr_t))) {
639 goto rsf_bailout;
640 }
641
642 if (((unsigned int)local_base & GLOBAL_SHARED_SEGMENT_MASK)
643 != GLOBAL_SHARED_TEXT_SEGMENT) {
644 error = EINVAL;
645 goto rsf_bailout;
646 }
647
648 kret = kmem_alloc(kernel_map, (vm_offset_t *)&map_list,
649 (vm_size_t)(map_cnt*sizeof(sf_mapping_t)));
650 if (kret != KERN_SUCCESS) {
651 error = ENOMEM;
652 goto rsf_bailout;
653 }
654
655 if (error =
656 copyin(mappings, map_list, (map_cnt*sizeof(sf_mapping_t)))) {
657
658 kmem_free(kernel_map, (vm_offset_t)map_list,
659 (vm_size_t)(map_cnt*sizeof(sf_mapping_t)));
660 goto rsf_bailout;
661 }
662 for (i = 0; i<map_cnt; i++) {
663 if((map_list[i].mapping_offset
664 & GLOBAL_SHARED_SEGMENT_MASK) == 0x10000000) {
665 map_address = (vm_offset_t)
666 (local_base + map_list[i].mapping_offset);
667 vm_deallocate(current_map(),
668 map_address,
669 map_list[i].size);
670 vm_map(current_map(), &map_address,
671 map_list[i].size, 0, SHARED_LIB_ALIAS,
672 shared_data_region_handle,
673 ((unsigned int)local_base
674 & SHARED_DATA_REGION_MASK) +
675 (map_list[i].mapping_offset
676 & SHARED_DATA_REGION_MASK),
677 TRUE, VM_PROT_READ,
678 VM_PROT_READ, VM_INHERIT_SHARE);
679 }
680 }
681
682 kmem_free(kernel_map, (vm_offset_t)map_list,
683 (vm_size_t)(map_cnt*sizeof(sf_mapping_t)));
684
685 rsf_bailout:
686 unix_release();
687 return error;
688 }
689
690
691
692
693 int
694 clone_system_shared_regions()
695 {
696 shared_region_mapping_t new_shared_region;
697 shared_region_mapping_t next;
698 shared_region_mapping_t old_shared_region;
699 struct shared_region_task_mappings old_info;
700 struct shared_region_task_mappings new_info;
701
702 if (shared_file_create_system_region(&new_shared_region))
703 return (ENOMEM);
704 vm_get_shared_region(current_task(), &old_shared_region);
705 old_info.self = (vm_offset_t)old_shared_region;
706 shared_region_mapping_info(old_shared_region,
707 &(old_info.text_region),
708 &(old_info.text_size),
709 &(old_info.data_region),
710 &(old_info.data_size),
711 &(old_info.region_mappings),
712 &(old_info.client_base),
713 &(old_info.alternate_base),
714 &(old_info.alternate_next),
715 &(old_info.flags), &next);
716 new_info.self = (vm_offset_t)new_shared_region;
717 shared_region_mapping_info(new_shared_region,
718 &(new_info.text_region),
719 &(new_info.text_size),
720 &(new_info.data_region),
721 &(new_info.data_size),
722 &(new_info.region_mappings),
723 &(new_info.client_base),
724 &(new_info.alternate_base),
725 &(new_info.alternate_next),
726 &(new_info.flags), &next);
727 if(vm_region_clone(old_info.text_region, new_info.text_region)) {
728 panic("clone_system_shared_regions: shared region mis-alignment 1");
729 shared_region_mapping_dealloc(new_shared_region);
730 return(EINVAL);
731 }
732 if (vm_region_clone(old_info.data_region, new_info.data_region)) {
733 panic("clone_system_shared_regions: shared region mis-alignment 2");
734 shared_region_mapping_dealloc(new_shared_region);
735 return(EINVAL);
736 }
737 if (vm_map_region_replace(current_map(), old_info.text_region,
738 new_info.text_region, old_info.client_base,
739 old_info.client_base+old_info.text_size)) {
740 panic("clone_system_shared_regions: shared region mis-alignment 3");
741 shared_region_mapping_dealloc(new_shared_region);
742 return(EINVAL);
743 }
744 if(vm_map_region_replace(current_map(), old_info.data_region,
745 new_info.data_region,
746 old_info.client_base + old_info.text_size,
747 old_info.client_base
748 + old_info.text_size + old_info.data_size)) {
749 panic("clone_system_shared_regions: shared region mis-alignment 4");
750 shared_region_mapping_dealloc(new_shared_region);
751 return(EINVAL);
752 }
753 vm_set_shared_region(current_task(), new_shared_region);
754 shared_region_object_chain_attach(new_shared_region, old_shared_region);
755 return(0);
756
757 }