]> git.saurik.com Git - apple/xnu.git/blob - bsd/vm/vm_unix.c
xnu-201.5.tar.gz
[apple/xnu.git] / bsd / vm / vm_unix.c
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 /*
23 * Mach Operating System
24 * Copyright (c) 1987 Carnegie-Mellon University
25 * All rights reserved. The CMU software License Agreement specifies
26 * the terms and conditions for use and redistribution.
27 */
28
29 /*
30 */
31 #include <meta_features.h>
32
33 #include <kern/task.h>
34 #include <kern/thread.h>
35 #include <kern/debug.h>
36 #include <kern/lock.h>
37 #include <mach/time_value.h>
38 #include <mach/vm_param.h>
39 #include <mach/vm_prot.h>
40 #include <mach/port.h>
41
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/dir.h>
45 #include <sys/namei.h>
46 #include <sys/proc.h>
47 #include <sys/vm.h>
48 #include <sys/file.h>
49 #include <sys/vnode.h>
50 #include <sys/buf.h>
51 #include <sys/mount.h>
52 #include <sys/trace.h>
53 #include <sys/kernel.h>
54 #include <sys/ubc.h>
55
56 #include <kern/kalloc.h>
57 #include <kern/parallel.h>
58 #include <vm/vm_map.h>
59 #include <vm/vm_kern.h>
60
61 #include <machine/spl.h>
62 #include <mach/shared_memory_server.h>
63
64 useracc(addr, len, prot)
65 caddr_t addr;
66 u_int len;
67 int prot;
68 {
69 return (vm_map_check_protection(
70 current_map(),
71 trunc_page(addr), round_page(addr+len),
72 prot == B_READ ? VM_PROT_READ : VM_PROT_WRITE));
73 }
74
75 vslock(addr, len)
76 caddr_t addr;
77 int len;
78 {
79 kern_return_t kret;
80 kret = vm_map_wire(current_map(), trunc_page(addr),
81 round_page(addr+len),
82 VM_PROT_READ | VM_PROT_WRITE ,FALSE);
83
84 switch (kret) {
85 case KERN_SUCCESS:
86 return (0);
87 case KERN_INVALID_ADDRESS:
88 case KERN_NO_SPACE:
89 return (ENOMEM);
90 case KERN_PROTECTION_FAILURE:
91 return (EACCES);
92 default:
93 return (EINVAL);
94 }
95 }
96
97 vsunlock(addr, len, dirtied)
98 caddr_t addr;
99 int len;
100 int dirtied;
101 {
102 pmap_t pmap;
103 #if FIXME /* [ */
104 vm_page_t pg;
105 #endif /* FIXME ] */
106 vm_offset_t vaddr, paddr;
107 kern_return_t kret;
108
109 #if FIXME /* [ */
110 if (dirtied) {
111 pmap = get_task_pmap(current_task());
112 for (vaddr = trunc_page(addr); vaddr < round_page(addr+len);
113 vaddr += PAGE_SIZE) {
114 paddr = pmap_extract(pmap, vaddr);
115 pg = PHYS_TO_VM_PAGE(paddr);
116 vm_page_set_modified(pg);
117 }
118 }
119 #endif /* FIXME ] */
120 #ifdef lint
121 dirtied++;
122 #endif /* lint */
123 kret = vm_map_unwire(current_map(), trunc_page(addr),
124 round_page(addr+len), FALSE);
125 switch (kret) {
126 case KERN_SUCCESS:
127 return (0);
128 case KERN_INVALID_ADDRESS:
129 case KERN_NO_SPACE:
130 return (ENOMEM);
131 case KERN_PROTECTION_FAILURE:
132 return (EACCES);
133 default:
134 return (EINVAL);
135 }
136 }
137
138 #if defined(sun) || BALANCE || defined(m88k)
139 #else /*defined(sun) || BALANCE || defined(m88k)*/
140 subyte(addr, byte)
141 void * addr;
142 int byte;
143 {
144 char character;
145
146 character = (char)byte;
147 return (copyout((void *)&(character), addr, sizeof(char)) == 0 ? 0 : -1);
148 }
149
150 suibyte(addr, byte)
151 void * addr;
152 int byte;
153 {
154 char character;
155
156 character = (char)byte;
157 return (copyout((void *) &(character), addr, sizeof(char)) == 0 ? 0 : -1);
158 }
159
160 int fubyte(addr)
161 void * addr;
162 {
163 unsigned char byte;
164
165 if (copyin(addr, (void *) &byte, sizeof(char)))
166 return(-1);
167 return(byte);
168 }
169
170 int fuibyte(addr)
171 void * addr;
172 {
173 unsigned char byte;
174
175 if (copyin(addr, (void *) &(byte), sizeof(char)))
176 return(-1);
177 return(byte);
178 }
179
180 suword(addr, word)
181 void * addr;
182 long word;
183 {
184 return (copyout((void *) &word, addr, sizeof(int)) == 0 ? 0 : -1);
185 }
186
187 long fuword(addr)
188 void * addr;
189 {
190 long word;
191
192 if (copyin(addr, (void *) &word, sizeof(int)))
193 return(-1);
194 return(word);
195 }
196
197 /* suiword and fuiword are the same as suword and fuword, respectively */
198
199 suiword(addr, word)
200 void * addr;
201 long word;
202 {
203 return (copyout((void *) &word, addr, sizeof(int)) == 0 ? 0 : -1);
204 }
205
206 long fuiword(addr)
207 void * addr;
208 {
209 long word;
210
211 if (copyin(addr, (void *) &word, sizeof(int)))
212 return(-1);
213 return(word);
214 }
215 #endif /* defined(sun) || BALANCE || defined(m88k) || defined(i386) */
216
217 int
218 swapon()
219 {
220 return(EOPNOTSUPP);
221 }
222
223
224 kern_return_t
225 pid_for_task(t, x)
226 mach_port_t t;
227 int *x;
228 {
229 struct proc * p;
230 task_t t1;
231 extern task_t port_name_to_task(mach_port_t t);
232 int pid = -1;
233 kern_return_t err = KERN_SUCCESS;
234 boolean_t funnel_state;
235
236 funnel_state = thread_funnel_set(kernel_flock, TRUE);
237 t1 = port_name_to_task(t);
238
239 if (t1 == TASK_NULL) {
240 err = KERN_FAILURE;
241 goto pftout;
242 } else {
243 p = get_bsdtask_info(t1);
244 if (p) {
245 pid = p->p_pid;
246 err = KERN_SUCCESS;
247 } else {
248 err = KERN_FAILURE;
249 }
250 }
251 task_deallocate(t1);
252 pftout:
253 (void) copyout((char *) &pid, (char *) x, sizeof(*x));
254 thread_funnel_set(kernel_flock, funnel_state);
255 return(err);
256 }
257
258 /*
259 * Routine: task_for_pid
260 * Purpose:
261 * Get the task port for another "process", named by its
262 * process ID on the same host as "target_task".
263 *
264 * Only permitted to privileged processes, or processes
265 * with the same user ID.
266 */
267 kern_return_t
268 task_for_pid(target_tport, pid, t)
269 mach_port_t target_tport;
270 int pid;
271 mach_port_t *t;
272 {
273 struct proc *p;
274 struct proc *p1;
275 task_t t1;
276 mach_port_t tret;
277 extern task_t port_name_to_task(mach_port_t tp);
278 void * sright;
279 int error = 0;
280 boolean_t funnel_state;
281
282 t1 = port_name_to_task(target_tport);
283 if (t1 == TASK_NULL) {
284 (void ) copyout((char *)&t1, (char *)t, sizeof(mach_port_t));
285 return(KERN_FAILURE);
286 }
287
288 funnel_state = thread_funnel_set(kernel_flock, TRUE);
289
290 restart:
291 p1 = get_bsdtask_info(t1);
292 if (
293 ((p = pfind(pid)) != (struct proc *) 0)
294 && (p1 != (struct proc *) 0)
295 && ((p->p_ucred->cr_uid == p1->p_ucred->cr_uid)
296 || !(suser(p1->p_ucred, &p1->p_acflag)))
297 && (p->p_stat != SZOMB)
298 ) {
299 if (p->task != TASK_NULL) {
300 if (!task_reference_try(p->task)) {
301 mutex_pause(); /* temp loss of funnel */
302 goto restart;
303 }
304 sright = convert_task_to_port(p->task);
305 tret = ipc_port_copyout_send(sright, get_task_ipcspace(current_task()));
306 } else
307 tret = MACH_PORT_NULL;
308 (void ) copyout((char *)&tret, (char *) t, sizeof(mach_port_t));
309 task_deallocate(t1);
310 error = KERN_SUCCESS;
311 goto tfpout;
312 }
313 task_deallocate(t1);
314 tret = MACH_PORT_NULL;
315 (void) copyout((char *) &tret, (char *) t, sizeof(mach_port_t));
316 error = KERN_FAILURE;
317 tfpout:
318 thread_funnel_set(kernel_flock, funnel_state);
319 return(error);
320 }
321
322
323 struct load_shared_file_args {
324 char *filename;
325 caddr_t mfa;
326 u_long mfs;
327 caddr_t *ba;
328 int map_cnt;
329 sf_mapping_t *mappings;
330 int *flags;
331 };
332
333 int ws_disabled = 1;
334
335 int
336 load_shared_file(
337 struct proc *p,
338 struct load_shared_file_args *uap,
339 register *retval)
340 {
341 caddr_t mapped_file_addr=uap->mfa;
342 u_long mapped_file_size=uap->mfs;
343 caddr_t *base_address=uap->ba;
344 int map_cnt=uap->map_cnt;
345 sf_mapping_t *mappings=uap->mappings;
346 char *filename=uap->filename;
347 int *flags=uap->flags;
348 struct vnode *vp = 0;
349 struct nameidata nd, *ndp;
350 char *filename_str;
351 register int error;
352 kern_return_t kr;
353
354 struct vattr vattr;
355 memory_object_control_t file_control;
356 sf_mapping_t *map_list;
357 caddr_t local_base;
358 int local_flags;
359 int caller_flags;
360 int i;
361 vm_size_t dummy;
362 kern_return_t kret;
363
364 shared_region_mapping_t shared_region;
365 struct shared_region_task_mappings task_mapping_info;
366 shared_region_mapping_t next;
367
368 ndp = &nd;
369
370 unix_master();
371
372 /* Retrieve the base address */
373 if (error = copyin(base_address, &local_base, sizeof (caddr_t))) {
374 goto lsf_bailout;
375 }
376 if (error = copyin(flags, &local_flags, sizeof (int))) {
377 goto lsf_bailout;
378 }
379 caller_flags = local_flags;
380 kret = kmem_alloc(kernel_map, (vm_offset_t *)&filename_str,
381 (vm_size_t)(MAXPATHLEN));
382 if (kret != KERN_SUCCESS) {
383 error = ENOMEM;
384 goto lsf_bailout;
385 }
386 kret = kmem_alloc(kernel_map, (vm_offset_t *)&map_list,
387 (vm_size_t)(map_cnt*sizeof(sf_mapping_t)));
388 if (kret != KERN_SUCCESS) {
389 kmem_free(kernel_map, (vm_offset_t)filename_str,
390 (vm_size_t)(MAXPATHLEN));
391 error = ENOMEM;
392 goto lsf_bailout;
393 }
394
395 if (error =
396 copyin(mappings, map_list, (map_cnt*sizeof(sf_mapping_t)))) {
397 goto lsf_bailout_free;
398 }
399
400 if (error = copyinstr(filename,
401 filename_str, MAXPATHLEN, (size_t *)&dummy)) {
402 goto lsf_bailout_free;
403 }
404
405 /*
406 * Get a vnode for the target file
407 */
408 NDINIT(ndp, LOOKUP, FOLLOW | LOCKLEAF, UIO_SYSSPACE,
409 filename_str, p);
410
411 if ((error = namei(ndp))) {
412 goto lsf_bailout_free;
413 }
414
415 vp = ndp->ni_vp;
416
417 if (vp->v_type != VREG) {
418 error = EINVAL;
419 goto lsf_bailout_free_vput;
420 }
421
422 UBCINFOCHECK("load_shared_file", vp);
423
424 if (error = VOP_GETATTR(vp, &vattr, p->p_ucred, p)) {
425 goto lsf_bailout_free_vput;
426 }
427
428
429 file_control = ubc_getobject(vp, UBC_HOLDOBJECT);
430 if (file_control == MEMORY_OBJECT_CONTROL_NULL) {
431 error = EINVAL;
432 goto lsf_bailout_free_vput;
433 }
434
435 #ifdef notdef
436 if(vattr.va_size != mapped_file_size) {
437 error = EINVAL;
438 goto lsf_bailout_free_vput;
439 }
440 #endif
441
442 vm_get_shared_region(current_task(), &shared_region);
443 task_mapping_info.self = (vm_offset_t)shared_region;
444
445 shared_region_mapping_info(shared_region,
446 &(task_mapping_info.text_region),
447 &(task_mapping_info.text_size),
448 &(task_mapping_info.data_region),
449 &(task_mapping_info.data_size),
450 &(task_mapping_info.region_mappings),
451 &(task_mapping_info.client_base),
452 &(task_mapping_info.alternate_base),
453 &(task_mapping_info.alternate_next),
454 &(task_mapping_info.flags), &next);
455
456 /* This is a work-around to allow executables which have been */
457 /* built without knowledge of the proper shared segment to */
458 /* load. This code has been architected as a shared region */
459 /* handler, the knowledge of where the regions are loaded is */
460 /* problematic for the extension of shared regions as it will */
461 /* not be easy to know what region an item should go into. */
462 /* The code below however will get around a short term problem */
463 /* with executables which believe they are loading at zero. */
464
465 {
466 if (((unsigned int)local_base &
467 (~(task_mapping_info.text_size - 1))) !=
468 task_mapping_info.client_base) {
469 if(local_flags & ALTERNATE_LOAD_SITE) {
470 local_base = (caddr_t)(
471 (unsigned int)local_base &
472 (task_mapping_info.text_size - 1));
473 local_base = (caddr_t)((unsigned int)local_base
474 | task_mapping_info.client_base);
475 } else {
476 error = EINVAL;
477 goto lsf_bailout_free_vput;
478 }
479 }
480 }
481
482 /* load alternate regions if the caller has requested. */
483 /* Note: the new regions are "clean slates" */
484
485 if (local_flags & NEW_LOCAL_SHARED_REGIONS) {
486
487 shared_region_mapping_t new_shared_region;
488 shared_region_mapping_t old_shared_region;
489 struct shared_region_task_mappings old_info;
490 struct shared_region_task_mappings new_info;
491
492 if(shared_file_create_system_region(&new_shared_region)) {
493 error = ENOMEM;
494 goto lsf_bailout_free_vput;
495 }
496 vm_get_shared_region(current_task(), &old_shared_region);
497
498 old_info.self = (vm_offset_t)old_shared_region;
499 shared_region_mapping_info(old_shared_region,
500 &(old_info.text_region),
501 &(old_info.text_size),
502 &(old_info.data_region),
503 &(old_info.data_size),
504 &(old_info.region_mappings),
505 &(old_info.client_base),
506 &(old_info.alternate_base),
507 &(old_info.alternate_next),
508 &(old_info.flags), &next);
509 new_info.self = (vm_offset_t)new_shared_region;
510 shared_region_mapping_info(new_shared_region,
511 &(new_info.text_region),
512 &(new_info.text_size),
513 &(new_info.data_region),
514 &(new_info.data_size),
515 &(new_info.region_mappings),
516 &(new_info.client_base),
517 &(new_info.alternate_base),
518 &(new_info.alternate_next),
519 &(new_info.flags), &next);
520 if (vm_map_region_replace(current_map(), old_info.text_region,
521 new_info.text_region, old_info.client_base,
522 old_info.client_base+old_info.text_size)) {
523 panic("load_shared_file: shared region mis-alignment");
524 shared_region_mapping_dealloc(new_shared_region);
525 error = EINVAL;
526 goto lsf_bailout_free_vput;
527 }
528 if(vm_map_region_replace(current_map(), old_info.data_region,
529 new_info.data_region,
530 old_info.client_base + old_info.text_size,
531 old_info.client_base
532 + old_info.text_size + old_info.data_size)) {
533 panic("load_shared_file: shared region mis-alignment 1");
534 shared_region_mapping_dealloc(new_shared_region);
535 error = EINVAL;
536 goto lsf_bailout_free_vput;
537 }
538 vm_set_shared_region(current_task(), new_shared_region);
539 task_mapping_info = new_info;
540 shared_region_mapping_dealloc(old_shared_region);
541 }
542
543 if((kr = copyin_shared_file((vm_offset_t)mapped_file_addr,
544 mapped_file_size,
545 (vm_offset_t *)&local_base,
546 map_cnt, map_list, file_control,
547 &task_mapping_info, &local_flags))) {
548 switch (kr) {
549 case KERN_FAILURE:
550 error = EINVAL;
551 break;
552 case KERN_INVALID_ARGUMENT:
553 error = EINVAL;
554 break;
555 case KERN_INVALID_ADDRESS:
556 error = EACCES;
557 break;
558 case KERN_PROTECTION_FAILURE:
559 /* save EAUTH for authentication in this */
560 /* routine */
561 error = EPERM;
562 break;
563 case KERN_NO_SPACE:
564 error = ENOMEM;
565 break;
566 default:
567 error = EINVAL;
568 };
569 if((caller_flags & ALTERNATE_LOAD_SITE) && systemLogDiags) {
570 printf("load_shared_file: Failed to load shared file! error: 0x%x, Base_address: 0x%x, number of mappings: %d, file_control 0x%x\n", error, local_base, map_cnt, file_control);
571 for(i=0; i<map_cnt; i++) {
572 printf("load_shared_file: Mapping%d, mapping_offset: 0x%x, size: 0x%x, file_offset: 0x%x, protection: 0x%x\n"
573 , i, map_list[i].mapping_offset,
574 map_list[i].size,
575 map_list[i].file_offset,
576 map_list[i].protection);
577 }
578 }
579 } else {
580 if(!(error = copyout(&local_flags, flags, sizeof (int)))) {
581 error = copyout(&local_base,
582 base_address, sizeof (caddr_t));
583 }
584 }
585
586 lsf_bailout_free_vput:
587 vput(vp);
588
589 lsf_bailout_free:
590 kmem_free(kernel_map, (vm_offset_t)filename_str,
591 (vm_size_t)(MAXPATHLEN));
592 kmem_free(kernel_map, (vm_offset_t)map_list,
593 (vm_size_t)(map_cnt*sizeof(sf_mapping_t)));
594
595 lsf_bailout:
596 unix_release();
597 return error;
598 }
599
600 struct reset_shared_file_args {
601 caddr_t *ba;
602 int map_cnt;
603 sf_mapping_t *mappings;
604 };
605
606 int
607 reset_shared_file(
608 struct proc *p,
609 struct reset_shared_file_args *uap,
610 register *retval)
611 {
612 caddr_t *base_address=uap->ba;
613 int map_cnt=uap->map_cnt;
614 sf_mapping_t *mappings=uap->mappings;
615 register int error;
616 kern_return_t kr;
617
618 sf_mapping_t *map_list;
619 caddr_t local_base;
620 vm_offset_t map_address;
621 int i;
622 kern_return_t kret;
623
624
625
626
627 unix_master();
628
629 /* Retrieve the base address */
630 if (error = copyin(base_address, &local_base, sizeof (caddr_t))) {
631 goto rsf_bailout;
632 }
633
634 if (((unsigned int)local_base & GLOBAL_SHARED_SEGMENT_MASK)
635 != GLOBAL_SHARED_TEXT_SEGMENT) {
636 error = EINVAL;
637 goto rsf_bailout;
638 }
639
640 kret = kmem_alloc(kernel_map, (vm_offset_t *)&map_list,
641 (vm_size_t)(map_cnt*sizeof(sf_mapping_t)));
642 if (kret != KERN_SUCCESS) {
643 error = ENOMEM;
644 goto rsf_bailout;
645 }
646
647 if (error =
648 copyin(mappings, map_list, (map_cnt*sizeof(sf_mapping_t)))) {
649
650 kmem_free(kernel_map, (vm_offset_t)map_list,
651 (vm_size_t)(map_cnt*sizeof(sf_mapping_t)));
652 goto rsf_bailout;
653 }
654 for (i = 0; i<map_cnt; i++) {
655 if((map_list[i].mapping_offset
656 & GLOBAL_SHARED_SEGMENT_MASK) == 0x10000000) {
657 map_address = (vm_offset_t)
658 (local_base + map_list[i].mapping_offset);
659 vm_deallocate(current_map(),
660 map_address,
661 map_list[i].size);
662 vm_map(current_map(), &map_address,
663 map_list[i].size, 0, SHARED_LIB_ALIAS,
664 shared_data_region_handle,
665 ((unsigned int)local_base
666 & SHARED_DATA_REGION_MASK) +
667 (map_list[i].mapping_offset
668 & SHARED_DATA_REGION_MASK),
669 TRUE, VM_PROT_READ,
670 VM_PROT_READ, VM_INHERIT_SHARE);
671 }
672 }
673
674 kmem_free(kernel_map, (vm_offset_t)map_list,
675 (vm_size_t)(map_cnt*sizeof(sf_mapping_t)));
676
677 rsf_bailout:
678 unix_release();
679 return error;
680 }
681
682
683
684
685 int
686 clone_system_shared_regions()
687 {
688 shared_region_mapping_t new_shared_region;
689 shared_region_mapping_t next;
690 shared_region_mapping_t old_shared_region;
691 struct shared_region_task_mappings old_info;
692 struct shared_region_task_mappings new_info;
693
694 if (shared_file_create_system_region(&new_shared_region))
695 return (ENOMEM);
696 vm_get_shared_region(current_task(), &old_shared_region);
697 old_info.self = (vm_offset_t)old_shared_region;
698 shared_region_mapping_info(old_shared_region,
699 &(old_info.text_region),
700 &(old_info.text_size),
701 &(old_info.data_region),
702 &(old_info.data_size),
703 &(old_info.region_mappings),
704 &(old_info.client_base),
705 &(old_info.alternate_base),
706 &(old_info.alternate_next),
707 &(old_info.flags), &next);
708 new_info.self = (vm_offset_t)new_shared_region;
709 shared_region_mapping_info(new_shared_region,
710 &(new_info.text_region),
711 &(new_info.text_size),
712 &(new_info.data_region),
713 &(new_info.data_size),
714 &(new_info.region_mappings),
715 &(new_info.client_base),
716 &(new_info.alternate_base),
717 &(new_info.alternate_next),
718 &(new_info.flags), &next);
719 if(vm_region_clone(old_info.text_region, new_info.text_region)) {
720 panic("clone_system_shared_regions: shared region mis-alignment 1");
721 shared_region_mapping_dealloc(new_shared_region);
722 return(EINVAL);
723 }
724 if (vm_region_clone(old_info.data_region, new_info.data_region)) {
725 panic("clone_system_shared_regions: shared region mis-alignment 2");
726 shared_region_mapping_dealloc(new_shared_region);
727 return(EINVAL);
728 }
729 if (vm_map_region_replace(current_map(), old_info.text_region,
730 new_info.text_region, old_info.client_base,
731 old_info.client_base+old_info.text_size)) {
732 panic("clone_system_shared_regions: shared region mis-alignment 3");
733 shared_region_mapping_dealloc(new_shared_region);
734 return(EINVAL);
735 }
736 if(vm_map_region_replace(current_map(), old_info.data_region,
737 new_info.data_region,
738 old_info.client_base + old_info.text_size,
739 old_info.client_base
740 + old_info.text_size + old_info.data_size)) {
741 panic("clone_system_shared_regions: shared region mis-alignment 4");
742 shared_region_mapping_dealloc(new_shared_region);
743 return(EINVAL);
744 }
745 vm_set_shared_region(current_task(), new_shared_region);
746 shared_region_object_chain_attach(new_shared_region, old_shared_region);
747 return(0);
748
749 }