]>
Commit | Line | Data |
---|---|---|
1c79356b | 1 | /* |
0b4e3aa0 | 2 | * Copyright (c) 2000-2001 Apple Computer, Inc. All rights reserved. |
1c79356b A |
3 | * |
4 | * @APPLE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * The contents of this file constitute Original Code as defined in and | |
7 | * are subject to the Apple Public Source License Version 1.1 (the | |
8 | * "License"). You may not use this file except in compliance with the | |
9 | * License. Please obtain a copy of the License at | |
10 | * http://www.apple.com/publicsource and read it before using this file. | |
11 | * | |
12 | * This Original Code and all software distributed under the License are | |
13 | * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
14 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
15 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
16 | * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the | |
17 | * License for the specific language governing rights and limitations | |
18 | * under the License. | |
19 | * | |
20 | * @APPLE_LICENSE_HEADER_END@ | |
21 | */ | |
22 | /* | |
23 | * Copyright (C) 1988, 1989, NeXT, Inc. | |
24 | * | |
25 | * File: kern/mach_loader.c | |
26 | * Author: Avadis Tevanian, Jr. | |
27 | * | |
28 | * Mach object file loader (kernel version, for now). | |
29 | * | |
30 | * 21-Jul-88 Avadis Tevanian, Jr. (avie) at NeXT | |
31 | * Started. | |
32 | */ | |
33 | #include <sys/param.h> | |
34 | #include <sys/vnode.h> | |
35 | #include <sys/uio.h> | |
36 | #include <sys/namei.h> | |
37 | #include <sys/proc.h> | |
38 | #include <sys/stat.h> | |
39 | #include <sys/malloc.h> | |
40 | #include <sys/mount.h> | |
41 | #include <sys/fcntl.h> | |
42 | #include <sys/ubc.h> | |
43 | ||
1c79356b A |
44 | #include <mach/mach_types.h> |
45 | ||
46 | #include <kern/mach_loader.h> | |
47 | ||
48 | #include <mach-o/fat.h> | |
49 | #include <mach-o/loader.h> | |
50 | ||
51 | #include <kern/cpu_number.h> | |
52 | ||
53 | #include <vm/vm_map.h> | |
54 | #include <vm/vm_kern.h> | |
55 | #include <vm/vm_pager.h> | |
56 | #include <vm/vnode_pager.h> | |
57 | #include <mach/shared_memory_server.h> | |
58 | #include <mach/vm_statistics.h> | |
59 | ||
60 | /* | |
61 | * Prototypes of static functions. | |
62 | */ | |
63 | static | |
64 | load_return_t | |
65 | parse_machfile( | |
66 | struct vnode *vp, | |
0b4e3aa0 A |
67 | vm_map_t map, |
68 | thread_act_t thr_act, | |
1c79356b A |
69 | struct mach_header *header, |
70 | unsigned long file_offset, | |
71 | unsigned long macho_size, | |
0b4e3aa0 | 72 | int depth, |
1c79356b A |
73 | load_result_t *result |
74 | ), | |
75 | load_segment( | |
76 | struct segment_command *scp, | |
0b4e3aa0 A |
77 | void * pager, |
78 | unsigned long pager_offset, | |
79 | unsigned long macho_size, | |
80 | unsigned long end_of_file, | |
81 | vm_map_t map, | |
82 | load_result_t *result | |
1c79356b A |
83 | ), |
84 | load_unixthread( | |
85 | struct thread_command *tcp, | |
0b4e3aa0 A |
86 | thread_act_t thr_act, |
87 | load_result_t *result | |
1c79356b A |
88 | ), |
89 | load_thread( | |
90 | struct thread_command *tcp, | |
0b4e3aa0 A |
91 | thread_act_t thr_act, |
92 | load_result_t *result | |
1c79356b A |
93 | ), |
94 | load_threadstate( | |
0b4e3aa0 | 95 | thread_t thread, |
1c79356b A |
96 | unsigned long *ts, |
97 | unsigned long total_size | |
98 | ), | |
99 | load_threadstack( | |
0b4e3aa0 | 100 | thread_t thread, |
1c79356b A |
101 | unsigned long *ts, |
102 | unsigned long total_size, | |
0b4e3aa0 A |
103 | vm_offset_t *user_stack, |
104 | int *customstack | |
1c79356b A |
105 | ), |
106 | load_threadentry( | |
0b4e3aa0 | 107 | thread_t thread, |
1c79356b A |
108 | unsigned long *ts, |
109 | unsigned long total_size, | |
0b4e3aa0 | 110 | vm_offset_t *entry_point |
1c79356b A |
111 | ), |
112 | load_dylinker( | |
113 | struct dylinker_command *lcp, | |
0b4e3aa0 A |
114 | vm_map_t map, |
115 | thread_act_t thr_act, | |
116 | int depth, | |
117 | load_result_t *result | |
1c79356b A |
118 | ), |
119 | get_macho_vnode( | |
0b4e3aa0 | 120 | char *path, |
1c79356b A |
121 | struct mach_header *mach_header, |
122 | unsigned long *file_offset, | |
123 | unsigned long *macho_size, | |
124 | struct vnode **vpp | |
125 | ); | |
126 | ||
127 | load_return_t | |
128 | load_machfile( | |
129 | struct vnode *vp, | |
130 | struct mach_header *header, | |
131 | unsigned long file_offset, | |
132 | unsigned long macho_size, | |
0b4e3aa0 A |
133 | load_result_t *result, |
134 | thread_act_t thr_act, | |
135 | vm_map_t new_map | |
1c79356b A |
136 | ) |
137 | { | |
138 | pmap_t pmap; | |
139 | vm_map_t map; | |
140 | vm_map_t old_map; | |
141 | load_result_t myresult; | |
142 | kern_return_t kret; | |
143 | load_return_t lret; | |
0b4e3aa0 A |
144 | boolean_t create_map = TRUE; |
145 | ||
146 | if (new_map != VM_MAP_NULL) { | |
147 | create_map = FALSE; | |
148 | } | |
1c79356b | 149 | |
0b4e3aa0 A |
150 | if (create_map) { |
151 | old_map = current_map(); | |
1c79356b | 152 | #ifdef i386 |
0b4e3aa0 A |
153 | pmap = get_task_pmap(current_task()); |
154 | pmap_reference(pmap); | |
1c79356b | 155 | #else |
0b4e3aa0 | 156 | pmap = pmap_create((vm_size_t) 0); |
1c79356b | 157 | #endif |
0b4e3aa0 A |
158 | map = vm_map_create(pmap, |
159 | get_map_min(old_map), | |
160 | get_map_max(old_map), | |
161 | TRUE); /**** FIXME ****/ | |
162 | } else | |
163 | map = new_map; | |
1c79356b A |
164 | |
165 | if (!result) | |
166 | result = &myresult; | |
167 | ||
168 | *result = (load_result_t) { 0 }; | |
169 | ||
0b4e3aa0 A |
170 | lret = parse_machfile(vp, map, thr_act, header, file_offset, macho_size, |
171 | 0, result); | |
1c79356b A |
172 | |
173 | if (lret != LOAD_SUCCESS) { | |
0b4e3aa0 A |
174 | if (create_map) |
175 | vm_map_deallocate(map); /* will lose pmap reference too */ | |
1c79356b A |
176 | return(lret); |
177 | } | |
178 | /* | |
179 | * Commit to new map. First make sure that the current | |
180 | * users of the task get done with it, and that we clean | |
181 | * up the old contents of IPC and memory. The task is | |
182 | * guaranteed to be single threaded upon return (us). | |
183 | * | |
184 | * Swap the new map for the old at the task level and at | |
185 | * our activation. The latter consumes our new map reference | |
186 | * but each leaves us responsible for the old_map reference. | |
187 | * That lets us get off the pmap associated with it, and | |
188 | * then we can release it. | |
189 | */ | |
0b4e3aa0 A |
190 | if (create_map) { |
191 | task_halt(current_task()); | |
1c79356b | 192 | |
0b4e3aa0 A |
193 | old_map = swap_task_map(current_task(), map); |
194 | vm_map_deallocate(old_map); | |
1c79356b | 195 | |
0b4e3aa0 | 196 | old_map = swap_act_map(current_act(), map); |
1c79356b A |
197 | |
198 | #ifndef i386 | |
0b4e3aa0 | 199 | pmap_switch(pmap); /* Make sure we are using the new pmap */ |
1c79356b | 200 | #endif |
0b4e3aa0 A |
201 | vm_map_deallocate(old_map); |
202 | } | |
1c79356b A |
203 | return(LOAD_SUCCESS); |
204 | } | |
205 | ||
206 | int dylink_test = 1; | |
207 | extern vm_offset_t system_shared_region; | |
208 | ||
209 | static | |
210 | load_return_t | |
211 | parse_machfile( | |
212 | struct vnode *vp, | |
213 | vm_map_t map, | |
0b4e3aa0 | 214 | thread_act_t thr_act, |
1c79356b A |
215 | struct mach_header *header, |
216 | unsigned long file_offset, | |
217 | unsigned long macho_size, | |
218 | int depth, | |
1c79356b A |
219 | load_result_t *result |
220 | ) | |
221 | { | |
222 | struct machine_slot *ms; | |
223 | int ncmds; | |
224 | struct load_command *lcp, *next; | |
225 | struct dylinker_command *dlp = 0; | |
226 | void * pager; | |
227 | load_return_t ret; | |
228 | vm_offset_t addr, kl_addr; | |
229 | vm_size_t size,kl_size; | |
230 | int offset; | |
231 | int pass; | |
232 | struct proc *p = current_proc(); /* XXXX */ | |
233 | int error; | |
234 | int resid=0; | |
0b4e3aa0 | 235 | task_t task; |
1c79356b A |
236 | |
237 | /* | |
238 | * Break infinite recursion | |
239 | */ | |
240 | if (depth > 6) | |
241 | return(LOAD_FAILURE); | |
0b4e3aa0 A |
242 | |
243 | task = (task_t)get_threadtask(thr_act); | |
244 | ||
1c79356b A |
245 | depth++; |
246 | ||
247 | /* | |
248 | * Check to see if right machine type. | |
249 | */ | |
250 | ms = &machine_slot[cpu_number()]; | |
251 | if ((header->cputype != ms->cpu_type) || | |
252 | !check_cpu_subtype(header->cpusubtype)) | |
253 | return(LOAD_BADARCH); | |
254 | ||
255 | switch (header->filetype) { | |
256 | ||
257 | case MH_OBJECT: | |
258 | case MH_EXECUTE: | |
259 | case MH_PRELOAD: | |
260 | if (depth != 1) | |
261 | return (LOAD_FAILURE); | |
262 | break; | |
263 | ||
264 | case MH_FVMLIB: | |
265 | case MH_DYLIB: | |
266 | if (depth == 1) | |
267 | return (LOAD_FAILURE); | |
268 | break; | |
269 | ||
270 | case MH_DYLINKER: | |
271 | if (depth != 2) | |
272 | return (LOAD_FAILURE); | |
273 | break; | |
274 | ||
275 | default: | |
276 | return (LOAD_FAILURE); | |
277 | } | |
278 | ||
279 | /* | |
280 | * Get the pager for the file. | |
281 | */ | |
282 | UBCINFOCHECK("parse_machfile", vp); | |
283 | pager = (void *) ubc_getpager(vp); | |
284 | ||
285 | /* | |
286 | * Map portion that must be accessible directly into | |
287 | * kernel's map. | |
288 | */ | |
289 | if ((sizeof (struct mach_header) + header->sizeofcmds) > macho_size) | |
290 | return(LOAD_BADMACHO); | |
291 | ||
292 | /* | |
293 | * Round size of Mach-O commands up to page boundry. | |
294 | */ | |
295 | size = round_page(sizeof (struct mach_header) + header->sizeofcmds); | |
296 | if (size <= 0) | |
297 | return(LOAD_BADMACHO); | |
298 | ||
299 | /* | |
300 | * Map the load commands into kernel memory. | |
301 | */ | |
302 | addr = 0; | |
1c79356b A |
303 | kl_size = size; |
304 | kl_addr = kalloc(size); | |
305 | addr = kl_addr; | |
0b4e3aa0 | 306 | if (addr == NULL) |
1c79356b | 307 | return(LOAD_NOSPACE); |
0b4e3aa0 | 308 | |
1c79356b A |
309 | if(error = vn_rdwr(UIO_READ, vp, addr, size, file_offset, |
310 | UIO_SYSSPACE, 0, p->p_ucred, &resid, p)) { | |
0b4e3aa0 A |
311 | if (kl_addr ) |
312 | kfree(kl_addr, kl_size); | |
1c79356b A |
313 | return(EIO); |
314 | } | |
315 | /* ubc_map(vp); */ /* NOT HERE */ | |
316 | ||
1c79356b A |
317 | /* |
318 | * Scan through the commands, processing each one as necessary. | |
319 | */ | |
320 | for (pass = 1; pass <= 2; pass++) { | |
321 | offset = sizeof(struct mach_header); | |
322 | ncmds = header->ncmds; | |
323 | while (ncmds--) { | |
324 | /* | |
325 | * Get a pointer to the command. | |
326 | */ | |
327 | lcp = (struct load_command *)(addr + offset); | |
328 | offset += lcp->cmdsize; | |
329 | ||
330 | /* | |
331 | * Check for valid lcp pointer by checking | |
332 | * next offset. | |
333 | */ | |
334 | if (offset > header->sizeofcmds | |
335 | + sizeof(struct mach_header)) { | |
0b4e3aa0 A |
336 | if (kl_addr ) |
337 | kfree(kl_addr, kl_size); | |
1c79356b A |
338 | return(LOAD_BADMACHO); |
339 | } | |
340 | ||
341 | /* | |
342 | * Check for valid command. | |
343 | */ | |
344 | switch(lcp->cmd) { | |
345 | case LC_SEGMENT: | |
346 | if (pass != 1) | |
347 | break; | |
348 | ret = load_segment( | |
349 | (struct segment_command *) lcp, | |
350 | pager, file_offset, | |
351 | macho_size, | |
352 | (unsigned long)ubc_getsize(vp), | |
353 | map, | |
354 | result); | |
355 | break; | |
356 | case LC_THREAD: | |
357 | if (pass != 2) | |
358 | break; | |
0b4e3aa0 | 359 | ret = load_thread((struct thread_command *)lcp, thr_act, |
1c79356b A |
360 | result); |
361 | break; | |
362 | case LC_UNIXTHREAD: | |
363 | if (pass != 2) | |
364 | break; | |
365 | ret = load_unixthread( | |
0b4e3aa0 | 366 | (struct thread_command *) lcp, thr_act, |
1c79356b A |
367 | result); |
368 | break; | |
1c79356b A |
369 | case LC_LOAD_DYLINKER: |
370 | if (pass != 2) | |
371 | break; | |
372 | if (depth == 1 || dlp == 0) | |
373 | dlp = (struct dylinker_command *)lcp; | |
374 | else | |
375 | ret = LOAD_FAILURE; | |
376 | break; | |
377 | default: | |
378 | ret = KERN_SUCCESS;/* ignore other stuff */ | |
379 | } | |
380 | if (ret != LOAD_SUCCESS) | |
381 | break; | |
382 | } | |
383 | if (ret != LOAD_SUCCESS) | |
384 | break; | |
385 | } | |
386 | if (ret == LOAD_SUCCESS && dlp != 0) { | |
387 | vm_offset_t addr; | |
388 | shared_region_mapping_t shared_region; | |
389 | struct shared_region_task_mappings map_info; | |
390 | shared_region_mapping_t next; | |
391 | ||
392 | RedoLookup: | |
0b4e3aa0 | 393 | vm_get_shared_region(task, &shared_region); |
1c79356b A |
394 | map_info.self = (vm_offset_t)shared_region; |
395 | shared_region_mapping_info(shared_region, | |
396 | &(map_info.text_region), | |
397 | &(map_info.text_size), | |
398 | &(map_info.data_region), | |
399 | &(map_info.data_size), | |
400 | &(map_info.region_mappings), | |
401 | &(map_info.client_base), | |
402 | &(map_info.alternate_base), | |
403 | &(map_info.alternate_next), | |
404 | &(map_info.flags), &next); | |
405 | ||
406 | if((map_info.flags & SHARED_REGION_FULL) && | |
407 | (map_info.flags & SHARED_REGION_SYSTEM)) { | |
408 | if(map_info.self != (vm_offset_t)system_shared_region) { | |
409 | shared_region_mapping_ref(system_shared_region); | |
0b4e3aa0 | 410 | vm_set_shared_region(task, |
1c79356b A |
411 | system_shared_region); |
412 | shared_region_mapping_dealloc( | |
413 | (shared_region_mapping_t)map_info.self); | |
414 | goto RedoLookup; | |
415 | } | |
416 | } | |
417 | ||
418 | ||
419 | if (dylink_test) { | |
420 | addr = map_info.client_base; | |
421 | vm_map(map, &addr, map_info.text_size, 0, | |
422 | (VM_MEMORY_SHARED_PMAP << 24) | |
423 | | SHARED_LIB_ALIAS, | |
424 | map_info.text_region, 0, FALSE, | |
425 | VM_PROT_READ, VM_PROT_READ, VM_INHERIT_SHARE); | |
426 | addr = map_info.client_base + map_info.text_size; | |
427 | vm_map(map, &addr, map_info.data_size, | |
428 | 0, SHARED_LIB_ALIAS, | |
429 | map_info.data_region, 0, TRUE, | |
430 | VM_PROT_READ, VM_PROT_READ, VM_INHERIT_SHARE); | |
431 | } | |
0b4e3aa0 | 432 | ret = load_dylinker(dlp, map, thr_act, depth, result); |
1c79356b A |
433 | } |
434 | ||
0b4e3aa0 A |
435 | if (kl_addr ) |
436 | kfree(kl_addr, kl_size); | |
437 | ||
1c79356b A |
438 | if ((ret == LOAD_SUCCESS) && (depth == 1) && |
439 | (result->thread_count == 0)) | |
440 | ret = LOAD_FAILURE; | |
441 | if (ret == LOAD_SUCCESS) | |
442 | ubc_map(vp); | |
443 | ||
444 | return(ret); | |
445 | } | |
446 | ||
447 | static | |
448 | load_return_t | |
449 | load_segment( | |
450 | struct segment_command *scp, | |
451 | void * pager, | |
452 | unsigned long pager_offset, | |
453 | unsigned long macho_size, | |
454 | unsigned long end_of_file, | |
455 | vm_map_t map, | |
456 | load_result_t *result | |
457 | ) | |
458 | { | |
459 | kern_return_t ret; | |
460 | vm_offset_t map_addr, map_offset; | |
461 | vm_size_t map_size, seg_size, delta_size; | |
462 | caddr_t tmp; | |
463 | vm_prot_t initprot; | |
464 | vm_prot_t maxprot; | |
465 | #if 1 | |
466 | extern int print_map_addr; | |
467 | #endif /* 1 */ | |
468 | ||
469 | /* | |
470 | * Make sure what we get from the file is really ours (as specified | |
471 | * by macho_size). | |
472 | */ | |
473 | if (scp->fileoff + scp->filesize > macho_size) | |
474 | return (LOAD_BADMACHO); | |
475 | ||
476 | seg_size = round_page(scp->vmsize); | |
477 | if (seg_size == 0) | |
478 | return(KERN_SUCCESS); | |
479 | ||
480 | /* | |
481 | * Round sizes to page size. | |
482 | */ | |
483 | map_size = round_page(scp->filesize); | |
484 | map_addr = trunc_page(scp->vmaddr); | |
485 | ||
486 | map_offset = pager_offset + scp->fileoff; | |
487 | ||
488 | if (map_size > 0) { | |
489 | initprot = (scp->initprot) & VM_PROT_ALL; | |
490 | maxprot = (scp->maxprot) & VM_PROT_ALL; | |
491 | /* | |
492 | * Map a copy of the file into the address space. | |
493 | */ | |
494 | ret = vm_map(map, | |
495 | &map_addr, map_size, (vm_offset_t)0, FALSE, | |
496 | pager, map_offset, TRUE, | |
497 | initprot, maxprot, | |
498 | VM_INHERIT_DEFAULT); | |
499 | if (ret != KERN_SUCCESS) | |
500 | return(LOAD_NOSPACE); | |
501 | ||
502 | #if 1 | |
503 | if (print_map_addr) | |
504 | printf("LSegment: Mapped addr= %x; size = %x\n", map_addr, map_size); | |
505 | #endif /* 1 */ | |
506 | /* | |
507 | * If the file didn't end on a page boundary, | |
508 | * we need to zero the leftover. | |
509 | */ | |
510 | delta_size = map_size - scp->filesize; | |
511 | #if FIXME | |
512 | if (delta_size > 0) { | |
513 | vm_offset_t tmp; | |
514 | ||
515 | ret = vm_allocate(kernel_map, &tmp, delta_size, TRUE); | |
516 | if (ret != KERN_SUCCESS) | |
517 | return(LOAD_RESOURCE); | |
518 | ||
519 | if (copyout(tmp, map_addr + scp->filesize, | |
520 | delta_size)) { | |
521 | (void) vm_deallocate( | |
522 | kernel_map, tmp, delta_size); | |
523 | return(LOAD_FAILURE); | |
524 | } | |
525 | ||
526 | (void) vm_deallocate(kernel_map, tmp, delta_size); | |
527 | } | |
528 | #endif /* FIXME */ | |
529 | } | |
530 | ||
531 | /* | |
532 | * If the virtual size of the segment is greater | |
533 | * than the size from the file, we need to allocate | |
534 | * zero fill memory for the rest. | |
535 | */ | |
536 | delta_size = seg_size - map_size; | |
537 | if (delta_size > 0) { | |
538 | vm_offset_t tmp = map_addr + map_size; | |
539 | ||
540 | ret = vm_allocate(map, &tmp, delta_size, FALSE); | |
541 | if (ret != KERN_SUCCESS) | |
542 | return(LOAD_NOSPACE); | |
543 | } | |
544 | ||
545 | /* | |
546 | * Set protection values. (Note: ignore errors!) | |
547 | */ | |
548 | ||
549 | if (scp->maxprot != VM_PROT_DEFAULT) { | |
550 | (void) vm_protect(map, | |
551 | map_addr, seg_size, | |
552 | TRUE, scp->maxprot); | |
553 | } | |
554 | if (scp->initprot != VM_PROT_DEFAULT) { | |
555 | (void) vm_protect(map, | |
556 | map_addr, seg_size, | |
557 | FALSE, scp->initprot); | |
558 | } | |
559 | if ( (scp->fileoff == 0) && (scp->filesize != 0) ) | |
560 | result->mach_header = map_addr; | |
561 | return(LOAD_SUCCESS); | |
562 | } | |
563 | ||
564 | static | |
565 | load_return_t | |
566 | load_unixthread( | |
567 | struct thread_command *tcp, | |
0b4e3aa0 | 568 | thread_act_t thr_act, |
1c79356b A |
569 | load_result_t *result |
570 | ) | |
571 | { | |
572 | thread_t thread = current_thread(); | |
573 | load_return_t ret; | |
0b4e3aa0 | 574 | int customstack =0; |
1c79356b A |
575 | |
576 | if (result->thread_count != 0) | |
577 | return (LOAD_FAILURE); | |
578 | ||
0b4e3aa0 | 579 | thread = getshuttle_thread(thr_act); |
1c79356b A |
580 | ret = load_threadstack(thread, |
581 | (unsigned long *)(((vm_offset_t)tcp) + | |
582 | sizeof(struct thread_command)), | |
583 | tcp->cmdsize - sizeof(struct thread_command), | |
0b4e3aa0 A |
584 | &result->user_stack, |
585 | &customstack); | |
1c79356b A |
586 | if (ret != LOAD_SUCCESS) |
587 | return(ret); | |
588 | ||
0b4e3aa0 A |
589 | if (customstack) |
590 | result->customstack = 1; | |
591 | else | |
592 | result->customstack = 0; | |
1c79356b A |
593 | ret = load_threadentry(thread, |
594 | (unsigned long *)(((vm_offset_t)tcp) + | |
595 | sizeof(struct thread_command)), | |
596 | tcp->cmdsize - sizeof(struct thread_command), | |
597 | &result->entry_point); | |
598 | if (ret != LOAD_SUCCESS) | |
599 | return(ret); | |
600 | ||
601 | ret = load_threadstate(thread, | |
602 | (unsigned long *)(((vm_offset_t)tcp) + | |
603 | sizeof(struct thread_command)), | |
604 | tcp->cmdsize - sizeof(struct thread_command)); | |
605 | if (ret != LOAD_SUCCESS) | |
606 | return (ret); | |
607 | ||
608 | result->unixproc = TRUE; | |
609 | result->thread_count++; | |
610 | ||
611 | return(LOAD_SUCCESS); | |
612 | } | |
613 | ||
614 | static | |
615 | load_return_t | |
616 | load_thread( | |
617 | struct thread_command *tcp, | |
0b4e3aa0 | 618 | thread_act_t thr_act, |
1c79356b A |
619 | load_result_t *result |
620 | ) | |
621 | { | |
622 | thread_t thread; | |
623 | kern_return_t kret; | |
624 | load_return_t lret; | |
0b4e3aa0 A |
625 | task_t task; |
626 | int customstack=0; | |
1c79356b | 627 | |
0b4e3aa0 A |
628 | task = get_threadtask(thr_act); |
629 | thread = getshuttle_thread(thr_act); | |
630 | ||
631 | /* if count is 0; same as thr_act */ | |
632 | if (result->thread_count != 0) { | |
633 | kret = thread_create(task, &thread); | |
1c79356b A |
634 | if (kret != KERN_SUCCESS) |
635 | return(LOAD_RESOURCE); | |
636 | thread_deallocate(thread); | |
637 | } | |
638 | ||
639 | lret = load_threadstate(thread, | |
640 | (unsigned long *)(((vm_offset_t)tcp) + | |
641 | sizeof(struct thread_command)), | |
642 | tcp->cmdsize - sizeof(struct thread_command)); | |
643 | if (lret != LOAD_SUCCESS) | |
644 | return (lret); | |
645 | ||
646 | if (result->thread_count == 0) { | |
0b4e3aa0 | 647 | lret = load_threadstack(thread, |
1c79356b A |
648 | (unsigned long *)(((vm_offset_t)tcp) + |
649 | sizeof(struct thread_command)), | |
650 | tcp->cmdsize - sizeof(struct thread_command), | |
0b4e3aa0 A |
651 | &result->user_stack, |
652 | &customstack); | |
653 | if (customstack) | |
654 | result->customstack = 1; | |
655 | else | |
656 | result->customstack = 0; | |
657 | ||
1c79356b A |
658 | if (lret != LOAD_SUCCESS) |
659 | return(lret); | |
660 | ||
0b4e3aa0 | 661 | lret = load_threadentry(thread, |
1c79356b A |
662 | (unsigned long *)(((vm_offset_t)tcp) + |
663 | sizeof(struct thread_command)), | |
664 | tcp->cmdsize - sizeof(struct thread_command), | |
665 | &result->entry_point); | |
666 | if (lret != LOAD_SUCCESS) | |
667 | return(lret); | |
668 | } | |
669 | /* | |
670 | * Resume thread now, note that this means that the thread | |
671 | * commands should appear after all the load commands to | |
672 | * be sure they don't reference anything not yet mapped. | |
673 | */ | |
674 | else | |
675 | thread_resume(thread); | |
676 | ||
677 | result->thread_count++; | |
678 | ||
679 | return(LOAD_SUCCESS); | |
680 | } | |
681 | ||
682 | static | |
683 | load_return_t | |
684 | load_threadstate( | |
685 | thread_t thread, | |
686 | unsigned long *ts, | |
687 | unsigned long total_size | |
688 | ) | |
689 | { | |
690 | kern_return_t ret; | |
691 | unsigned long size; | |
692 | int flavor; | |
693 | ||
694 | /* | |
695 | * Set the thread state. | |
696 | */ | |
697 | ||
698 | while (total_size > 0) { | |
699 | flavor = *ts++; | |
700 | size = *ts++; | |
701 | total_size -= (size+2)*sizeof(unsigned long); | |
702 | if (total_size < 0) | |
703 | return(LOAD_BADMACHO); | |
704 | ret = thread_setstatus(getact_thread(thread), flavor, ts, size); | |
705 | if (ret != KERN_SUCCESS) | |
706 | return(LOAD_FAILURE); | |
707 | ts += size; /* ts is a (unsigned long *) */ | |
708 | } | |
709 | return(LOAD_SUCCESS); | |
710 | } | |
711 | ||
712 | static | |
713 | load_return_t | |
714 | load_threadstack( | |
715 | thread_t thread, | |
716 | unsigned long *ts, | |
717 | unsigned long total_size, | |
0b4e3aa0 A |
718 | vm_offset_t *user_stack, |
719 | int *customstack | |
1c79356b A |
720 | ) |
721 | { | |
722 | kern_return_t ret; | |
723 | unsigned long size; | |
724 | int flavor; | |
725 | ||
726 | /* | |
727 | * Set the thread state. | |
728 | */ | |
729 | *user_stack = 0; | |
730 | while (total_size > 0) { | |
731 | flavor = *ts++; | |
732 | size = *ts++; | |
733 | total_size -= (size+2)*sizeof(unsigned long); | |
734 | if (total_size < 0) | |
735 | return(LOAD_BADMACHO); | |
0b4e3aa0 | 736 | ret = thread_userstack(thread, flavor, ts, size, user_stack, customstack); |
1c79356b A |
737 | if (ret != KERN_SUCCESS) |
738 | return(LOAD_FAILURE); | |
739 | ts += size; /* ts is a (unsigned long *) */ | |
740 | } | |
741 | return(LOAD_SUCCESS); | |
742 | } | |
743 | ||
744 | static | |
745 | load_return_t | |
746 | load_threadentry( | |
747 | thread_t thread, | |
748 | unsigned long *ts, | |
749 | unsigned long total_size, | |
750 | vm_offset_t *entry_point | |
751 | ) | |
752 | { | |
753 | kern_return_t ret; | |
754 | unsigned long size; | |
755 | int flavor; | |
756 | ||
757 | /* | |
758 | * Set the thread state. | |
759 | */ | |
760 | *entry_point = 0; | |
761 | while (total_size > 0) { | |
762 | flavor = *ts++; | |
763 | size = *ts++; | |
764 | total_size -= (size+2)*sizeof(unsigned long); | |
765 | if (total_size < 0) | |
766 | return(LOAD_BADMACHO); | |
767 | ret = thread_entrypoint(thread, flavor, ts, size, entry_point); | |
768 | if (ret != KERN_SUCCESS) | |
769 | return(LOAD_FAILURE); | |
770 | ts += size; /* ts is a (unsigned long *) */ | |
771 | } | |
772 | return(LOAD_SUCCESS); | |
773 | } | |
774 | ||
1c79356b A |
775 | |
776 | static | |
777 | load_return_t | |
778 | load_dylinker( | |
779 | struct dylinker_command *lcp, | |
780 | vm_map_t map, | |
0b4e3aa0 | 781 | thread_act_t thr_act, |
1c79356b A |
782 | int depth, |
783 | load_result_t *result | |
784 | ) | |
785 | { | |
786 | char *name; | |
787 | char *p; | |
788 | struct vnode *vp; | |
789 | struct mach_header header; | |
790 | unsigned long file_offset; | |
791 | unsigned long macho_size; | |
792 | vm_map_t copy_map; | |
793 | load_result_t myresult; | |
794 | kern_return_t ret; | |
795 | vm_map_copy_t tmp; | |
796 | vm_offset_t dyl_start, map_addr; | |
797 | vm_size_t dyl_length; | |
798 | ||
799 | name = (char *)lcp + lcp->name.offset; | |
800 | /* | |
801 | * Check for a proper null terminated string. | |
802 | */ | |
803 | p = name; | |
804 | do { | |
805 | if (p >= (char *)lcp + lcp->cmdsize) | |
806 | return(LOAD_BADMACHO); | |
807 | } while (*p++); | |
808 | ||
809 | ret = get_macho_vnode(name, &header, &file_offset, &macho_size, &vp); | |
810 | if (ret) | |
811 | return (ret); | |
812 | ||
813 | myresult = (load_result_t) { 0 }; | |
814 | ||
815 | /* | |
816 | * Load the Mach-O. | |
817 | */ | |
818 | ||
819 | copy_map = vm_map_create(pmap_create(macho_size), | |
820 | get_map_min(map), get_map_max( map), TRUE); | |
821 | ||
0b4e3aa0 | 822 | ret = parse_machfile(vp, copy_map, thr_act, &header, |
1c79356b | 823 | file_offset, macho_size, |
0b4e3aa0 | 824 | depth, &myresult); |
1c79356b A |
825 | |
826 | if (ret) | |
827 | goto out; | |
828 | ||
829 | if (get_map_nentries(copy_map) > 0) { | |
830 | ||
831 | dyl_start = get_map_start(copy_map); | |
832 | dyl_length = get_map_end(copy_map) - dyl_start; | |
833 | ||
834 | map_addr = dyl_start; | |
835 | ret = vm_allocate(map, &map_addr, dyl_length, FALSE); | |
836 | if (ret != KERN_SUCCESS) { | |
837 | ret = vm_allocate(map, &map_addr, dyl_length, TRUE); | |
838 | } | |
839 | ||
840 | if (ret != KERN_SUCCESS) { | |
841 | ret = LOAD_NOSPACE; | |
842 | goto out; | |
843 | ||
844 | } | |
845 | ret = vm_map_copyin(copy_map, dyl_start, dyl_length, TRUE, | |
846 | &tmp); | |
847 | if (ret != KERN_SUCCESS) { | |
848 | (void) vm_map_remove(map, | |
849 | map_addr, | |
850 | map_addr + dyl_length, | |
851 | VM_MAP_NO_FLAGS); | |
852 | goto out; | |
853 | } | |
854 | ||
855 | ret = vm_map_copy_overwrite(map, map_addr, tmp, FALSE); | |
856 | if (ret != KERN_SUCCESS) { | |
857 | vm_map_copy_discard(tmp); | |
858 | (void) vm_map_remove(map, | |
859 | map_addr, | |
860 | map_addr + dyl_length, | |
861 | VM_MAP_NO_FLAGS); | |
862 | goto out; } | |
863 | ||
864 | if (map_addr != dyl_start) | |
865 | myresult.entry_point += (map_addr - dyl_start); | |
866 | } else | |
867 | ret = LOAD_FAILURE; | |
868 | ||
869 | if (ret == LOAD_SUCCESS) { | |
870 | result->dynlinker = TRUE; | |
871 | result->entry_point = myresult.entry_point; | |
872 | ubc_map(vp); | |
873 | } | |
874 | out: | |
875 | vm_map_deallocate(copy_map); | |
876 | ||
877 | vrele(vp); | |
878 | return (ret); | |
879 | ||
880 | } | |
881 | ||
882 | static | |
883 | load_return_t | |
884 | get_macho_vnode( | |
885 | char *path, | |
886 | struct mach_header *mach_header, | |
887 | unsigned long *file_offset, | |
888 | unsigned long *macho_size, | |
889 | struct vnode **vpp | |
890 | ) | |
891 | { | |
892 | struct vnode *vp; | |
893 | struct vattr attr, *atp; | |
894 | struct nameidata nid, *ndp; | |
895 | struct proc *p = current_proc(); /* XXXX */ | |
896 | boolean_t is_fat; | |
897 | struct fat_arch fat_arch; | |
0b4e3aa0 | 898 | int error = KERN_SUCCESS; |
1c79356b A |
899 | int resid; |
900 | union { | |
901 | struct mach_header mach_header; | |
902 | struct fat_header fat_header; | |
903 | char pad[512]; | |
904 | } header; | |
0b4e3aa0 A |
905 | off_t fsize = (off_t)0; |
906 | struct ucred *cred = p->p_ucred; | |
1c79356b A |
907 | |
908 | ndp = &nid; | |
909 | atp = &attr; | |
910 | ||
911 | /* init the namei data to point the file user's program name */ | |
0b4e3aa0 | 912 | NDINIT(ndp, LOOKUP, FOLLOW | LOCKLEAF, UIO_SYSSPACE, path, p); |
1c79356b A |
913 | |
914 | if (error = namei(ndp)) | |
915 | return(error); | |
916 | ||
917 | vp = ndp->ni_vp; | |
918 | ||
919 | /* check for regular file */ | |
920 | if (vp->v_type != VREG) { | |
921 | error = EACCES; | |
922 | goto bad1; | |
923 | } | |
924 | ||
925 | /* get attributes */ | |
0b4e3aa0 | 926 | if (error = VOP_GETATTR(vp, &attr, cred, p)) |
1c79356b A |
927 | goto bad1; |
928 | ||
929 | /* Check mount point */ | |
930 | if (vp->v_mount->mnt_flag & MNT_NOEXEC) { | |
931 | error = EACCES; | |
932 | goto bad1; | |
933 | } | |
934 | ||
935 | if ((vp->v_mount->mnt_flag & MNT_NOSUID) || (p->p_flag & P_TRACED)) | |
936 | atp->va_mode &= ~(VSUID | VSGID); | |
937 | ||
0b4e3aa0 A |
938 | /* check access. for root we have to see if any exec bit on */ |
939 | if (error = VOP_ACCESS(vp, VEXEC, cred, p)) | |
1c79356b A |
940 | goto bad1; |
941 | if ((atp->va_mode & (S_IXUSR | S_IXGRP | S_IXOTH)) == 0) { | |
942 | error = EACCES; | |
943 | goto bad1; | |
944 | } | |
945 | ||
0b4e3aa0 A |
946 | /* hold the vnode for the IO */ |
947 | if (UBCINFOEXISTS(vp) && !ubc_hold(vp)) { | |
948 | error = ENOENT; | |
949 | goto bad1; | |
950 | } | |
951 | ||
1c79356b | 952 | /* try to open it */ |
0b4e3aa0 A |
953 | if (error = VOP_OPEN(vp, FREAD, cred, p)) { |
954 | ubc_rele(vp); | |
1c79356b | 955 | goto bad1; |
0b4e3aa0 A |
956 | } |
957 | ||
1c79356b | 958 | if(error = vn_rdwr(UIO_READ, vp, (caddr_t)&header, sizeof(header), 0, |
0b4e3aa0 | 959 | UIO_SYSSPACE, IO_NODELOCKED, cred, &resid, p)) |
1c79356b | 960 | goto bad2; |
1c79356b A |
961 | |
962 | if (header.mach_header.magic == MH_MAGIC) | |
963 | is_fat = FALSE; | |
964 | else if (header.fat_header.magic == FAT_MAGIC || | |
965 | header.fat_header.magic == FAT_CIGAM) | |
966 | is_fat = TRUE; | |
967 | else { | |
968 | error = LOAD_BADMACHO; | |
969 | goto bad2; | |
970 | } | |
971 | ||
972 | if (is_fat) { | |
0b4e3aa0 | 973 | /* Look up our architecture in the fat file. */ |
1c79356b | 974 | error = fatfile_getarch(vp, (vm_offset_t)(&header.fat_header), &fat_arch); |
0b4e3aa0 | 975 | if (error != LOAD_SUCCESS) |
1c79356b | 976 | goto bad2; |
0b4e3aa0 A |
977 | |
978 | /* Read the Mach-O header out of it */ | |
1c79356b A |
979 | error = vn_rdwr(UIO_READ, vp, &header.mach_header, |
980 | sizeof(header.mach_header), fat_arch.offset, | |
0b4e3aa0 | 981 | UIO_SYSSPACE, IO_NODELOCKED, cred, &resid, p); |
1c79356b A |
982 | if (error) { |
983 | error = LOAD_FAILURE; | |
984 | goto bad2; | |
985 | } | |
986 | ||
0b4e3aa0 | 987 | /* Is this really a Mach-O? */ |
1c79356b A |
988 | if (header.mach_header.magic != MH_MAGIC) { |
989 | error = LOAD_BADMACHO; | |
990 | goto bad2; | |
991 | } | |
0b4e3aa0 | 992 | |
1c79356b | 993 | *file_offset = fat_arch.offset; |
0b4e3aa0 | 994 | *macho_size = fsize = fat_arch.size; |
1c79356b | 995 | } else { |
0b4e3aa0 | 996 | |
1c79356b | 997 | *file_offset = 0; |
0b4e3aa0 | 998 | *macho_size = fsize = attr.va_size; |
1c79356b A |
999 | } |
1000 | ||
0b4e3aa0 A |
1001 | *mach_header = header.mach_header; |
1002 | *vpp = vp; | |
1003 | if (UBCISVALID(vp)) | |
1004 | ubc_setsize(vp, fsize); /* XXX why? */ | |
1005 | ||
1006 | VOP_UNLOCK(vp, 0, p); | |
1007 | ubc_rele(vp); | |
1008 | return (error); | |
1009 | ||
1c79356b | 1010 | bad2: |
1c79356b | 1011 | VOP_UNLOCK(vp, 0, p); |
0b4e3aa0 A |
1012 | error = VOP_CLOSE(vp, FREAD, cred, p); |
1013 | ubc_rele(vp); | |
1014 | vrele(vp); | |
1c79356b | 1015 | return (error); |
0b4e3aa0 | 1016 | |
1c79356b | 1017 | bad1: |
1c79356b A |
1018 | vput(vp); |
1019 | return(error); | |
1020 | } |