]>
Commit | Line | Data |
---|---|---|
1c79356b A |
1 | /* |
2 | * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * The contents of this file constitute Original Code as defined in and | |
7 | * are subject to the Apple Public Source License Version 1.1 (the | |
8 | * "License"). You may not use this file except in compliance with the | |
9 | * License. Please obtain a copy of the License at | |
10 | * http://www.apple.com/publicsource and read it before using this file. | |
11 | * | |
12 | * This Original Code and all software distributed under the License are | |
13 | * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
14 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
15 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
16 | * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the | |
17 | * License for the specific language governing rights and limitations | |
18 | * under the License. | |
19 | * | |
20 | * @APPLE_LICENSE_HEADER_END@ | |
21 | */ | |
22 | /* | |
23 | * Mach Operating System | |
24 | * Copyright (c) 1987 Carnegie-Mellon University | |
25 | * All rights reserved. The CMU software License Agreement specifies | |
26 | * the terms and conditions for use and redistribution. | |
27 | */ | |
28 | ||
29 | /* | |
30 | */ | |
31 | #include <meta_features.h> | |
32 | ||
33 | #include <kern/task.h> | |
34 | #include <kern/thread.h> | |
35 | #include <kern/debug.h> | |
36 | #include <kern/lock.h> | |
37 | #include <mach/time_value.h> | |
38 | #include <mach/vm_param.h> | |
39 | #include <mach/vm_prot.h> | |
40 | #include <mach/port.h> | |
41 | ||
42 | #include <sys/param.h> | |
43 | #include <sys/systm.h> | |
44 | #include <sys/dir.h> | |
45 | #include <sys/namei.h> | |
46 | #include <sys/proc.h> | |
47 | #include <sys/vm.h> | |
48 | #include <sys/file.h> | |
49 | #include <sys/vnode.h> | |
50 | #include <sys/buf.h> | |
51 | #include <sys/mount.h> | |
52 | #include <sys/trace.h> | |
53 | #include <sys/kernel.h> | |
54 | #include <sys/ubc.h> | |
55 | ||
56 | #include <kern/kalloc.h> | |
57 | #include <kern/parallel.h> | |
58 | #include <vm/vm_map.h> | |
59 | #include <vm/vm_kern.h> | |
60 | ||
61 | #include <machine/spl.h> | |
62 | #include <mach/shared_memory_server.h> | |
63 | ||
64 | useracc(addr, len, prot) | |
65 | caddr_t addr; | |
66 | u_int len; | |
67 | int prot; | |
68 | { | |
69 | return (vm_map_check_protection( | |
70 | current_map(), | |
71 | trunc_page(addr), round_page(addr+len), | |
72 | prot == B_READ ? VM_PROT_READ : VM_PROT_WRITE)); | |
73 | } | |
74 | ||
75 | vslock(addr, len) | |
76 | caddr_t addr; | |
77 | int len; | |
78 | { | |
0b4e3aa0 A |
79 | kern_return_t kret; |
80 | kret = vm_map_wire(current_map(), trunc_page(addr), | |
1c79356b A |
81 | round_page(addr+len), |
82 | VM_PROT_READ | VM_PROT_WRITE ,FALSE); | |
0b4e3aa0 A |
83 | |
84 | switch (kret) { | |
85 | case KERN_SUCCESS: | |
86 | return (0); | |
87 | case KERN_INVALID_ADDRESS: | |
88 | case KERN_NO_SPACE: | |
89 | return (ENOMEM); | |
90 | case KERN_PROTECTION_FAILURE: | |
91 | return (EACCES); | |
92 | default: | |
93 | return (EINVAL); | |
94 | } | |
1c79356b A |
95 | } |
96 | ||
97 | vsunlock(addr, len, dirtied) | |
98 | caddr_t addr; | |
99 | int len; | |
100 | int dirtied; | |
101 | { | |
102 | pmap_t pmap; | |
103 | #if FIXME /* [ */ | |
104 | vm_page_t pg; | |
105 | #endif /* FIXME ] */ | |
106 | vm_offset_t vaddr, paddr; | |
0b4e3aa0 | 107 | kern_return_t kret; |
1c79356b A |
108 | |
109 | #if FIXME /* [ */ | |
110 | if (dirtied) { | |
111 | pmap = get_task_pmap(current_task()); | |
112 | for (vaddr = trunc_page(addr); vaddr < round_page(addr+len); | |
113 | vaddr += PAGE_SIZE) { | |
114 | paddr = pmap_extract(pmap, vaddr); | |
115 | pg = PHYS_TO_VM_PAGE(paddr); | |
116 | vm_page_set_modified(pg); | |
117 | } | |
118 | } | |
119 | #endif /* FIXME ] */ | |
120 | #ifdef lint | |
121 | dirtied++; | |
122 | #endif /* lint */ | |
0b4e3aa0 | 123 | kret = vm_map_unwire(current_map(), trunc_page(addr), |
1c79356b | 124 | round_page(addr+len), FALSE); |
0b4e3aa0 A |
125 | switch (kret) { |
126 | case KERN_SUCCESS: | |
127 | return (0); | |
128 | case KERN_INVALID_ADDRESS: | |
129 | case KERN_NO_SPACE: | |
130 | return (ENOMEM); | |
131 | case KERN_PROTECTION_FAILURE: | |
132 | return (EACCES); | |
133 | default: | |
134 | return (EINVAL); | |
135 | } | |
1c79356b A |
136 | } |
137 | ||
138 | #if defined(sun) || BALANCE || defined(m88k) | |
139 | #else /*defined(sun) || BALANCE || defined(m88k)*/ | |
140 | subyte(addr, byte) | |
141 | void * addr; | |
142 | int byte; | |
143 | { | |
144 | char character; | |
145 | ||
146 | character = (char)byte; | |
147 | return (copyout((void *)&(character), addr, sizeof(char)) == 0 ? 0 : -1); | |
148 | } | |
149 | ||
150 | suibyte(addr, byte) | |
151 | void * addr; | |
152 | int byte; | |
153 | { | |
154 | char character; | |
155 | ||
156 | character = (char)byte; | |
157 | return (copyout((void *) &(character), addr, sizeof(char)) == 0 ? 0 : -1); | |
158 | } | |
159 | ||
160 | int fubyte(addr) | |
161 | void * addr; | |
162 | { | |
163 | unsigned char byte; | |
164 | ||
165 | if (copyin(addr, (void *) &byte, sizeof(char))) | |
166 | return(-1); | |
167 | return(byte); | |
168 | } | |
169 | ||
170 | int fuibyte(addr) | |
171 | void * addr; | |
172 | { | |
173 | unsigned char byte; | |
174 | ||
175 | if (copyin(addr, (void *) &(byte), sizeof(char))) | |
176 | return(-1); | |
177 | return(byte); | |
178 | } | |
179 | ||
180 | suword(addr, word) | |
181 | void * addr; | |
182 | long word; | |
183 | { | |
184 | return (copyout((void *) &word, addr, sizeof(int)) == 0 ? 0 : -1); | |
185 | } | |
186 | ||
187 | long fuword(addr) | |
188 | void * addr; | |
189 | { | |
190 | long word; | |
191 | ||
192 | if (copyin(addr, (void *) &word, sizeof(int))) | |
193 | return(-1); | |
194 | return(word); | |
195 | } | |
196 | ||
197 | /* suiword and fuiword are the same as suword and fuword, respectively */ | |
198 | ||
199 | suiword(addr, word) | |
200 | void * addr; | |
201 | long word; | |
202 | { | |
203 | return (copyout((void *) &word, addr, sizeof(int)) == 0 ? 0 : -1); | |
204 | } | |
205 | ||
206 | long fuiword(addr) | |
207 | void * addr; | |
208 | { | |
209 | long word; | |
210 | ||
211 | if (copyin(addr, (void *) &word, sizeof(int))) | |
212 | return(-1); | |
213 | return(word); | |
214 | } | |
215 | #endif /* defined(sun) || BALANCE || defined(m88k) || defined(i386) */ | |
216 | ||
217 | int | |
218 | swapon() | |
219 | { | |
220 | return(EOPNOTSUPP); | |
221 | } | |
222 | ||
1c79356b A |
223 | |
224 | kern_return_t | |
225 | pid_for_task(t, x) | |
226 | mach_port_t t; | |
227 | int *x; | |
228 | { | |
229 | struct proc * p; | |
230 | task_t t1; | |
231 | extern task_t port_name_to_task(mach_port_t t); | |
232 | int pid = -1; | |
0b4e3aa0 | 233 | kern_return_t err = KERN_SUCCESS; |
1c79356b A |
234 | boolean_t funnel_state; |
235 | ||
236 | funnel_state = thread_funnel_set(kernel_flock, TRUE); | |
237 | t1 = port_name_to_task(t); | |
238 | ||
239 | if (t1 == TASK_NULL) { | |
240 | err = KERN_FAILURE; | |
0b4e3aa0 | 241 | goto pftout; |
1c79356b A |
242 | } else { |
243 | p = get_bsdtask_info(t1); | |
244 | if (p) { | |
245 | pid = p->p_pid; | |
246 | err = KERN_SUCCESS; | |
247 | } else { | |
248 | err = KERN_FAILURE; | |
249 | } | |
250 | } | |
251 | task_deallocate(t1); | |
1c79356b | 252 | pftout: |
0b4e3aa0 | 253 | (void) copyout((char *) &pid, (char *) x, sizeof(*x)); |
1c79356b A |
254 | thread_funnel_set(kernel_flock, funnel_state); |
255 | return(err); | |
256 | } | |
257 | ||
258 | /* | |
259 | * Routine: task_for_pid | |
260 | * Purpose: | |
261 | * Get the task port for another "process", named by its | |
262 | * process ID on the same host as "target_task". | |
263 | * | |
264 | * Only permitted to privileged processes, or processes | |
265 | * with the same user ID. | |
266 | */ | |
267 | kern_return_t | |
268 | task_for_pid(target_tport, pid, t) | |
269 | mach_port_t target_tport; | |
270 | int pid; | |
271 | mach_port_t *t; | |
272 | { | |
273 | struct proc *p; | |
274 | struct proc *p1; | |
275 | task_t t1; | |
276 | mach_port_t tret; | |
277 | extern task_t port_name_to_task(mach_port_t tp); | |
278 | void * sright; | |
279 | int error = 0; | |
280 | boolean_t funnel_state; | |
281 | ||
282 | t1 = port_name_to_task(target_tport); | |
283 | if (t1 == TASK_NULL) { | |
284 | (void ) copyout((char *)&t1, (char *)t, sizeof(mach_port_t)); | |
0b4e3aa0 | 285 | return(KERN_FAILURE); |
1c79356b A |
286 | } |
287 | ||
288 | funnel_state = thread_funnel_set(kernel_flock, TRUE); | |
289 | ||
290 | restart: | |
291 | p1 = get_bsdtask_info(t1); | |
292 | if ( | |
293 | ((p = pfind(pid)) != (struct proc *) 0) | |
294 | && (p1 != (struct proc *) 0) | |
7b1edb79 A |
295 | && (((p->p_ucred->cr_uid == p1->p_ucred->cr_uid) && |
296 | ((p->p_cred->p_ruid == p1->p_cred->p_ruid))) | |
1c79356b A |
297 | || !(suser(p1->p_ucred, &p1->p_acflag))) |
298 | && (p->p_stat != SZOMB) | |
299 | ) { | |
300 | if (p->task != TASK_NULL) { | |
301 | if (!task_reference_try(p->task)) { | |
302 | mutex_pause(); /* temp loss of funnel */ | |
303 | goto restart; | |
304 | } | |
305 | sright = convert_task_to_port(p->task); | |
306 | tret = ipc_port_copyout_send(sright, get_task_ipcspace(current_task())); | |
307 | } else | |
308 | tret = MACH_PORT_NULL; | |
309 | (void ) copyout((char *)&tret, (char *) t, sizeof(mach_port_t)); | |
310 | task_deallocate(t1); | |
311 | error = KERN_SUCCESS; | |
312 | goto tfpout; | |
313 | } | |
314 | task_deallocate(t1); | |
315 | tret = MACH_PORT_NULL; | |
316 | (void) copyout((char *) &tret, (char *) t, sizeof(mach_port_t)); | |
317 | error = KERN_FAILURE; | |
318 | tfpout: | |
319 | thread_funnel_set(kernel_flock, funnel_state); | |
320 | return(error); | |
321 | } | |
322 | ||
323 | ||
324 | struct load_shared_file_args { | |
325 | char *filename; | |
326 | caddr_t mfa; | |
327 | u_long mfs; | |
328 | caddr_t *ba; | |
329 | int map_cnt; | |
330 | sf_mapping_t *mappings; | |
331 | int *flags; | |
332 | }; | |
333 | ||
0b4e3aa0 | 334 | int ws_disabled = 1; |
1c79356b A |
335 | |
336 | int | |
337 | load_shared_file( | |
338 | struct proc *p, | |
339 | struct load_shared_file_args *uap, | |
340 | register *retval) | |
341 | { | |
342 | caddr_t mapped_file_addr=uap->mfa; | |
343 | u_long mapped_file_size=uap->mfs; | |
344 | caddr_t *base_address=uap->ba; | |
345 | int map_cnt=uap->map_cnt; | |
346 | sf_mapping_t *mappings=uap->mappings; | |
347 | char *filename=uap->filename; | |
348 | int *flags=uap->flags; | |
349 | struct vnode *vp = 0; | |
350 | struct nameidata nd, *ndp; | |
351 | char *filename_str; | |
352 | register int error; | |
353 | kern_return_t kr; | |
354 | ||
355 | struct vattr vattr; | |
0b4e3aa0 | 356 | memory_object_control_t file_control; |
1c79356b A |
357 | sf_mapping_t *map_list; |
358 | caddr_t local_base; | |
359 | int local_flags; | |
360 | int caller_flags; | |
361 | int i; | |
362 | vm_size_t dummy; | |
363 | kern_return_t kret; | |
364 | ||
365 | shared_region_mapping_t shared_region; | |
366 | struct shared_region_task_mappings task_mapping_info; | |
367 | shared_region_mapping_t next; | |
368 | ||
369 | ndp = &nd; | |
370 | ||
371 | unix_master(); | |
372 | ||
373 | /* Retrieve the base address */ | |
374 | if (error = copyin(base_address, &local_base, sizeof (caddr_t))) { | |
375 | goto lsf_bailout; | |
376 | } | |
377 | if (error = copyin(flags, &local_flags, sizeof (int))) { | |
378 | goto lsf_bailout; | |
379 | } | |
380 | caller_flags = local_flags; | |
381 | kret = kmem_alloc(kernel_map, (vm_offset_t *)&filename_str, | |
382 | (vm_size_t)(MAXPATHLEN)); | |
383 | if (kret != KERN_SUCCESS) { | |
384 | error = ENOMEM; | |
385 | goto lsf_bailout; | |
386 | } | |
387 | kret = kmem_alloc(kernel_map, (vm_offset_t *)&map_list, | |
388 | (vm_size_t)(map_cnt*sizeof(sf_mapping_t))); | |
389 | if (kret != KERN_SUCCESS) { | |
390 | kmem_free(kernel_map, (vm_offset_t)filename_str, | |
391 | (vm_size_t)(MAXPATHLEN)); | |
392 | error = ENOMEM; | |
393 | goto lsf_bailout; | |
394 | } | |
395 | ||
396 | if (error = | |
397 | copyin(mappings, map_list, (map_cnt*sizeof(sf_mapping_t)))) { | |
398 | goto lsf_bailout_free; | |
399 | } | |
400 | ||
401 | if (error = copyinstr(filename, | |
402 | filename_str, MAXPATHLEN, (size_t *)&dummy)) { | |
403 | goto lsf_bailout_free; | |
404 | } | |
405 | ||
406 | /* | |
407 | * Get a vnode for the target file | |
408 | */ | |
409 | NDINIT(ndp, LOOKUP, FOLLOW | LOCKLEAF, UIO_SYSSPACE, | |
410 | filename_str, p); | |
411 | ||
412 | if ((error = namei(ndp))) { | |
413 | goto lsf_bailout_free; | |
414 | } | |
415 | ||
416 | vp = ndp->ni_vp; | |
417 | ||
418 | if (vp->v_type != VREG) { | |
419 | error = EINVAL; | |
420 | goto lsf_bailout_free_vput; | |
421 | } | |
422 | ||
423 | UBCINFOCHECK("load_shared_file", vp); | |
424 | ||
425 | if (error = VOP_GETATTR(vp, &vattr, p->p_ucred, p)) { | |
426 | goto lsf_bailout_free_vput; | |
427 | } | |
428 | ||
429 | ||
0b4e3aa0 A |
430 | file_control = ubc_getobject(vp, UBC_HOLDOBJECT); |
431 | if (file_control == MEMORY_OBJECT_CONTROL_NULL) { | |
1c79356b A |
432 | error = EINVAL; |
433 | goto lsf_bailout_free_vput; | |
434 | } | |
435 | ||
436 | #ifdef notdef | |
437 | if(vattr.va_size != mapped_file_size) { | |
438 | error = EINVAL; | |
439 | goto lsf_bailout_free_vput; | |
440 | } | |
441 | #endif | |
442 | ||
443 | vm_get_shared_region(current_task(), &shared_region); | |
444 | task_mapping_info.self = (vm_offset_t)shared_region; | |
445 | ||
446 | shared_region_mapping_info(shared_region, | |
447 | &(task_mapping_info.text_region), | |
448 | &(task_mapping_info.text_size), | |
449 | &(task_mapping_info.data_region), | |
450 | &(task_mapping_info.data_size), | |
451 | &(task_mapping_info.region_mappings), | |
452 | &(task_mapping_info.client_base), | |
453 | &(task_mapping_info.alternate_base), | |
454 | &(task_mapping_info.alternate_next), | |
455 | &(task_mapping_info.flags), &next); | |
456 | ||
457 | /* This is a work-around to allow executables which have been */ | |
458 | /* built without knowledge of the proper shared segment to */ | |
459 | /* load. This code has been architected as a shared region */ | |
460 | /* handler, the knowledge of where the regions are loaded is */ | |
461 | /* problematic for the extension of shared regions as it will */ | |
462 | /* not be easy to know what region an item should go into. */ | |
463 | /* The code below however will get around a short term problem */ | |
464 | /* with executables which believe they are loading at zero. */ | |
465 | ||
466 | { | |
467 | if (((unsigned int)local_base & | |
468 | (~(task_mapping_info.text_size - 1))) != | |
469 | task_mapping_info.client_base) { | |
470 | if(local_flags & ALTERNATE_LOAD_SITE) { | |
471 | local_base = (caddr_t)( | |
472 | (unsigned int)local_base & | |
473 | (task_mapping_info.text_size - 1)); | |
474 | local_base = (caddr_t)((unsigned int)local_base | |
475 | | task_mapping_info.client_base); | |
476 | } else { | |
477 | error = EINVAL; | |
478 | goto lsf_bailout_free_vput; | |
479 | } | |
480 | } | |
481 | } | |
482 | ||
483 | /* load alternate regions if the caller has requested. */ | |
484 | /* Note: the new regions are "clean slates" */ | |
485 | ||
486 | if (local_flags & NEW_LOCAL_SHARED_REGIONS) { | |
487 | ||
488 | shared_region_mapping_t new_shared_region; | |
489 | shared_region_mapping_t old_shared_region; | |
490 | struct shared_region_task_mappings old_info; | |
491 | struct shared_region_task_mappings new_info; | |
492 | ||
493 | if(shared_file_create_system_region(&new_shared_region)) { | |
494 | error = ENOMEM; | |
495 | goto lsf_bailout_free_vput; | |
496 | } | |
497 | vm_get_shared_region(current_task(), &old_shared_region); | |
498 | ||
499 | old_info.self = (vm_offset_t)old_shared_region; | |
500 | shared_region_mapping_info(old_shared_region, | |
501 | &(old_info.text_region), | |
502 | &(old_info.text_size), | |
503 | &(old_info.data_region), | |
504 | &(old_info.data_size), | |
505 | &(old_info.region_mappings), | |
506 | &(old_info.client_base), | |
507 | &(old_info.alternate_base), | |
508 | &(old_info.alternate_next), | |
509 | &(old_info.flags), &next); | |
510 | new_info.self = (vm_offset_t)new_shared_region; | |
511 | shared_region_mapping_info(new_shared_region, | |
512 | &(new_info.text_region), | |
513 | &(new_info.text_size), | |
514 | &(new_info.data_region), | |
515 | &(new_info.data_size), | |
516 | &(new_info.region_mappings), | |
517 | &(new_info.client_base), | |
518 | &(new_info.alternate_base), | |
519 | &(new_info.alternate_next), | |
520 | &(new_info.flags), &next); | |
521 | if (vm_map_region_replace(current_map(), old_info.text_region, | |
522 | new_info.text_region, old_info.client_base, | |
523 | old_info.client_base+old_info.text_size)) { | |
524 | panic("load_shared_file: shared region mis-alignment"); | |
525 | shared_region_mapping_dealloc(new_shared_region); | |
526 | error = EINVAL; | |
527 | goto lsf_bailout_free_vput; | |
528 | } | |
529 | if(vm_map_region_replace(current_map(), old_info.data_region, | |
530 | new_info.data_region, | |
531 | old_info.client_base + old_info.text_size, | |
532 | old_info.client_base | |
533 | + old_info.text_size + old_info.data_size)) { | |
534 | panic("load_shared_file: shared region mis-alignment 1"); | |
535 | shared_region_mapping_dealloc(new_shared_region); | |
536 | error = EINVAL; | |
537 | goto lsf_bailout_free_vput; | |
538 | } | |
539 | vm_set_shared_region(current_task(), new_shared_region); | |
540 | task_mapping_info = new_info; | |
541 | shared_region_mapping_dealloc(old_shared_region); | |
542 | } | |
543 | ||
544 | if((kr = copyin_shared_file((vm_offset_t)mapped_file_addr, | |
545 | mapped_file_size, | |
546 | (vm_offset_t *)&local_base, | |
0b4e3aa0 | 547 | map_cnt, map_list, file_control, |
1c79356b A |
548 | &task_mapping_info, &local_flags))) { |
549 | switch (kr) { | |
550 | case KERN_FAILURE: | |
551 | error = EINVAL; | |
552 | break; | |
553 | case KERN_INVALID_ARGUMENT: | |
554 | error = EINVAL; | |
555 | break; | |
556 | case KERN_INVALID_ADDRESS: | |
557 | error = EACCES; | |
558 | break; | |
559 | case KERN_PROTECTION_FAILURE: | |
560 | /* save EAUTH for authentication in this */ | |
561 | /* routine */ | |
562 | error = EPERM; | |
563 | break; | |
564 | case KERN_NO_SPACE: | |
565 | error = ENOMEM; | |
566 | break; | |
567 | default: | |
568 | error = EINVAL; | |
569 | }; | |
570 | if((caller_flags & ALTERNATE_LOAD_SITE) && systemLogDiags) { | |
0b4e3aa0 | 571 | printf("load_shared_file: Failed to load shared file! error: 0x%x, Base_address: 0x%x, number of mappings: %d, file_control 0x%x\n", error, local_base, map_cnt, file_control); |
1c79356b A |
572 | for(i=0; i<map_cnt; i++) { |
573 | printf("load_shared_file: Mapping%d, mapping_offset: 0x%x, size: 0x%x, file_offset: 0x%x, protection: 0x%x\n" | |
574 | , i, map_list[i].mapping_offset, | |
575 | map_list[i].size, | |
576 | map_list[i].file_offset, | |
577 | map_list[i].protection); | |
578 | } | |
579 | } | |
580 | } else { | |
581 | if(!(error = copyout(&local_flags, flags, sizeof (int)))) { | |
582 | error = copyout(&local_base, | |
583 | base_address, sizeof (caddr_t)); | |
584 | } | |
585 | } | |
586 | ||
587 | lsf_bailout_free_vput: | |
588 | vput(vp); | |
589 | ||
590 | lsf_bailout_free: | |
591 | kmem_free(kernel_map, (vm_offset_t)filename_str, | |
592 | (vm_size_t)(MAXPATHLEN)); | |
593 | kmem_free(kernel_map, (vm_offset_t)map_list, | |
594 | (vm_size_t)(map_cnt*sizeof(sf_mapping_t))); | |
595 | ||
596 | lsf_bailout: | |
597 | unix_release(); | |
598 | return error; | |
599 | } | |
600 | ||
601 | struct reset_shared_file_args { | |
602 | caddr_t *ba; | |
603 | int map_cnt; | |
604 | sf_mapping_t *mappings; | |
605 | }; | |
606 | ||
607 | int | |
608 | reset_shared_file( | |
609 | struct proc *p, | |
610 | struct reset_shared_file_args *uap, | |
611 | register *retval) | |
612 | { | |
613 | caddr_t *base_address=uap->ba; | |
614 | int map_cnt=uap->map_cnt; | |
615 | sf_mapping_t *mappings=uap->mappings; | |
616 | register int error; | |
617 | kern_return_t kr; | |
618 | ||
619 | sf_mapping_t *map_list; | |
620 | caddr_t local_base; | |
621 | vm_offset_t map_address; | |
622 | int i; | |
623 | kern_return_t kret; | |
624 | ||
625 | ||
626 | ||
627 | ||
628 | unix_master(); | |
629 | ||
630 | /* Retrieve the base address */ | |
631 | if (error = copyin(base_address, &local_base, sizeof (caddr_t))) { | |
632 | goto rsf_bailout; | |
633 | } | |
634 | ||
635 | if (((unsigned int)local_base & GLOBAL_SHARED_SEGMENT_MASK) | |
636 | != GLOBAL_SHARED_TEXT_SEGMENT) { | |
637 | error = EINVAL; | |
638 | goto rsf_bailout; | |
639 | } | |
640 | ||
641 | kret = kmem_alloc(kernel_map, (vm_offset_t *)&map_list, | |
642 | (vm_size_t)(map_cnt*sizeof(sf_mapping_t))); | |
643 | if (kret != KERN_SUCCESS) { | |
644 | error = ENOMEM; | |
645 | goto rsf_bailout; | |
646 | } | |
647 | ||
648 | if (error = | |
649 | copyin(mappings, map_list, (map_cnt*sizeof(sf_mapping_t)))) { | |
650 | ||
651 | kmem_free(kernel_map, (vm_offset_t)map_list, | |
652 | (vm_size_t)(map_cnt*sizeof(sf_mapping_t))); | |
653 | goto rsf_bailout; | |
654 | } | |
655 | for (i = 0; i<map_cnt; i++) { | |
656 | if((map_list[i].mapping_offset | |
657 | & GLOBAL_SHARED_SEGMENT_MASK) == 0x10000000) { | |
658 | map_address = (vm_offset_t) | |
659 | (local_base + map_list[i].mapping_offset); | |
660 | vm_deallocate(current_map(), | |
661 | map_address, | |
662 | map_list[i].size); | |
663 | vm_map(current_map(), &map_address, | |
664 | map_list[i].size, 0, SHARED_LIB_ALIAS, | |
665 | shared_data_region_handle, | |
666 | ((unsigned int)local_base | |
667 | & SHARED_DATA_REGION_MASK) + | |
668 | (map_list[i].mapping_offset | |
669 | & SHARED_DATA_REGION_MASK), | |
670 | TRUE, VM_PROT_READ, | |
671 | VM_PROT_READ, VM_INHERIT_SHARE); | |
672 | } | |
673 | } | |
674 | ||
675 | kmem_free(kernel_map, (vm_offset_t)map_list, | |
676 | (vm_size_t)(map_cnt*sizeof(sf_mapping_t))); | |
677 | ||
678 | rsf_bailout: | |
679 | unix_release(); | |
680 | return error; | |
681 | } | |
682 | ||
683 | ||
684 | ||
685 | ||
686 | int | |
687 | clone_system_shared_regions() | |
688 | { | |
689 | shared_region_mapping_t new_shared_region; | |
690 | shared_region_mapping_t next; | |
691 | shared_region_mapping_t old_shared_region; | |
692 | struct shared_region_task_mappings old_info; | |
693 | struct shared_region_task_mappings new_info; | |
694 | ||
695 | if (shared_file_create_system_region(&new_shared_region)) | |
696 | return (ENOMEM); | |
697 | vm_get_shared_region(current_task(), &old_shared_region); | |
698 | old_info.self = (vm_offset_t)old_shared_region; | |
699 | shared_region_mapping_info(old_shared_region, | |
700 | &(old_info.text_region), | |
701 | &(old_info.text_size), | |
702 | &(old_info.data_region), | |
703 | &(old_info.data_size), | |
704 | &(old_info.region_mappings), | |
705 | &(old_info.client_base), | |
706 | &(old_info.alternate_base), | |
707 | &(old_info.alternate_next), | |
708 | &(old_info.flags), &next); | |
709 | new_info.self = (vm_offset_t)new_shared_region; | |
710 | shared_region_mapping_info(new_shared_region, | |
711 | &(new_info.text_region), | |
712 | &(new_info.text_size), | |
713 | &(new_info.data_region), | |
714 | &(new_info.data_size), | |
715 | &(new_info.region_mappings), | |
716 | &(new_info.client_base), | |
717 | &(new_info.alternate_base), | |
718 | &(new_info.alternate_next), | |
719 | &(new_info.flags), &next); | |
720 | if(vm_region_clone(old_info.text_region, new_info.text_region)) { | |
721 | panic("clone_system_shared_regions: shared region mis-alignment 1"); | |
722 | shared_region_mapping_dealloc(new_shared_region); | |
723 | return(EINVAL); | |
724 | } | |
725 | if (vm_region_clone(old_info.data_region, new_info.data_region)) { | |
726 | panic("clone_system_shared_regions: shared region mis-alignment 2"); | |
727 | shared_region_mapping_dealloc(new_shared_region); | |
728 | return(EINVAL); | |
729 | } | |
730 | if (vm_map_region_replace(current_map(), old_info.text_region, | |
731 | new_info.text_region, old_info.client_base, | |
732 | old_info.client_base+old_info.text_size)) { | |
733 | panic("clone_system_shared_regions: shared region mis-alignment 3"); | |
734 | shared_region_mapping_dealloc(new_shared_region); | |
735 | return(EINVAL); | |
736 | } | |
737 | if(vm_map_region_replace(current_map(), old_info.data_region, | |
738 | new_info.data_region, | |
739 | old_info.client_base + old_info.text_size, | |
740 | old_info.client_base | |
741 | + old_info.text_size + old_info.data_size)) { | |
742 | panic("clone_system_shared_regions: shared region mis-alignment 4"); | |
743 | shared_region_mapping_dealloc(new_shared_region); | |
744 | return(EINVAL); | |
745 | } | |
746 | vm_set_shared_region(current_task(), new_shared_region); | |
747 | shared_region_object_chain_attach(new_shared_region, old_shared_region); | |
748 | return(0); | |
749 | ||
750 | } |