]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * The contents of this file constitute Original Code as defined in and | |
7 | * are subject to the Apple Public Source License Version 1.1 (the | |
8 | * "License"). You may not use this file except in compliance with the | |
9 | * License. Please obtain a copy of the License at | |
10 | * http://www.apple.com/publicsource and read it before using this file. | |
11 | * | |
12 | * This Original Code and all software distributed under the License are | |
13 | * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
14 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
15 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
16 | * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the | |
17 | * License for the specific language governing rights and limitations | |
18 | * under the License. | |
19 | * | |
20 | * @APPLE_LICENSE_HEADER_END@ | |
21 | */ | |
22 | /* | |
23 | * Copyright (C) 1988, 1989, NeXT, Inc. | |
24 | * | |
25 | * File: kern/mach_loader.c | |
26 | * Author: Avadis Tevanian, Jr. | |
27 | * | |
28 | * Mach object file loader (kernel version, for now). | |
29 | * | |
30 | * 21-Jul-88 Avadis Tevanian, Jr. (avie) at NeXT | |
31 | * Started. | |
32 | */ | |
33 | ||
34 | #include <sys/param.h> | |
35 | #include <sys/vnode_internal.h> | |
36 | #include <sys/uio.h> | |
37 | #include <sys/namei.h> | |
38 | #include <sys/proc_internal.h> | |
39 | #include <sys/kauth.h> | |
40 | #include <sys/stat.h> | |
41 | #include <sys/malloc.h> | |
42 | #include <sys/mount_internal.h> | |
43 | #include <sys/fcntl.h> | |
44 | #include <sys/ubc_internal.h> | |
45 | #include <sys/imgact.h> | |
46 | ||
47 | #include <mach/mach_types.h> | |
48 | #include <mach/vm_map.h> /* vm_allocate() */ | |
49 | #include <mach/mach_vm.h> /* mach_vm_allocate() */ | |
50 | #include <mach/vm_statistics.h> | |
51 | #include <mach/shared_memory_server.h> | |
52 | #include <mach/task.h> | |
53 | #include <mach/thread_act.h> | |
54 | ||
55 | #include <machine/vmparam.h> | |
56 | ||
57 | #include <kern/kern_types.h> | |
58 | #include <kern/cpu_number.h> | |
59 | #include <kern/mach_loader.h> | |
60 | #include <kern/kalloc.h> | |
61 | #include <kern/task.h> | |
62 | #include <kern/thread.h> | |
63 | ||
64 | #include <mach-o/fat.h> | |
65 | #include <mach-o/loader.h> | |
66 | ||
67 | #include <vm/pmap.h> | |
68 | #include <vm/vm_map.h> | |
69 | #include <vm/vm_kern.h> | |
70 | #include <vm/vm_pager.h> | |
71 | #include <vm/vnode_pager.h> | |
72 | #include <vm/vm_shared_memory_server.h> | |
73 | #include <vm/vm_protos.h> | |
74 | ||
75 | /* | |
76 | * XXX vm/pmap.h should not treat these prototypes as MACH_KERNEL_PRIVATE | |
77 | * when KERNEL is defined. | |
78 | */ | |
79 | extern pmap_t pmap_create(vm_map_size_t size, boolean_t is_64bit); | |
80 | extern void pmap_switch(pmap_t); | |
81 | extern void pmap_map_sharedpage(task_t task, pmap_t pmap); | |
82 | ||
83 | /* | |
84 | * XXX kern/thread.h should not treat these prototypes as MACH_KERNEL_PRIVATE | |
85 | * when KERNEL is defined. | |
86 | */ | |
87 | extern kern_return_t thread_setstatus(thread_t thread, int flavor, | |
88 | thread_state_t tstate, | |
89 | mach_msg_type_number_t count); | |
90 | ||
91 | extern kern_return_t thread_state_initialize(thread_t thread); | |
92 | ||
93 | ||
94 | /* XXX should have prototypes in a shared header file */ | |
95 | extern int get_map_nentries(vm_map_t); | |
96 | extern kern_return_t thread_userstack(thread_t, int, thread_state_t, | |
97 | unsigned int, mach_vm_offset_t *, int *); | |
98 | extern kern_return_t thread_entrypoint(thread_t, int, thread_state_t, | |
99 | unsigned int, mach_vm_offset_t *); | |
100 | ||
101 | ||
102 | /* An empty load_result_t */ | |
103 | static load_result_t load_result_null = { | |
104 | MACH_VM_MIN_ADDRESS, | |
105 | MACH_VM_MIN_ADDRESS, | |
106 | MACH_VM_MIN_ADDRESS, | |
107 | 0, | |
108 | 0, | |
109 | 0, | |
110 | 0 | |
111 | }; | |
112 | ||
113 | /* | |
114 | * Prototypes of static functions. | |
115 | */ | |
116 | static load_return_t | |
117 | parse_machfile( | |
118 | struct vnode *vp, | |
119 | vm_map_t map, | |
120 | thread_t thr_act, | |
121 | struct mach_header *header, | |
122 | off_t file_offset, | |
123 | off_t macho_size, | |
124 | boolean_t shared_regions, | |
125 | boolean_t clean_regions, | |
126 | int depth, | |
127 | load_result_t *result | |
128 | ); | |
129 | ||
130 | static load_return_t | |
131 | load_segment( | |
132 | struct segment_command *scp, | |
133 | void * pager, | |
134 | off_t pager_offset, | |
135 | off_t macho_size, | |
136 | off_t end_of_file, | |
137 | vm_map_t map, | |
138 | load_result_t *result | |
139 | ); | |
140 | ||
141 | static load_return_t | |
142 | load_segment_64( | |
143 | struct segment_command_64 *scp64, | |
144 | void *pager, | |
145 | off_t pager_offset, | |
146 | off_t macho_size, | |
147 | off_t end_of_file, | |
148 | vm_map_t map, | |
149 | load_result_t *result | |
150 | ); | |
151 | ||
152 | static load_return_t | |
153 | load_unixthread( | |
154 | struct thread_command *tcp, | |
155 | thread_t thr_act, | |
156 | load_result_t *result | |
157 | ); | |
158 | ||
159 | static load_return_t | |
160 | load_thread( | |
161 | struct thread_command *tcp, | |
162 | thread_t thr_act, | |
163 | load_result_t *result | |
164 | ); | |
165 | ||
166 | static load_return_t | |
167 | load_threadstate( | |
168 | thread_t thread, | |
169 | unsigned long *ts, | |
170 | unsigned long total_size | |
171 | ); | |
172 | ||
173 | static load_return_t | |
174 | load_threadstack( | |
175 | thread_t thread, | |
176 | unsigned long *ts, | |
177 | unsigned long total_size, | |
178 | mach_vm_offset_t *user_stack, | |
179 | int *customstack | |
180 | ); | |
181 | ||
182 | static load_return_t | |
183 | load_threadentry( | |
184 | thread_t thread, | |
185 | unsigned long *ts, | |
186 | unsigned long total_size, | |
187 | mach_vm_offset_t *entry_point | |
188 | ); | |
189 | ||
190 | static load_return_t | |
191 | load_dylinker( | |
192 | struct dylinker_command *lcp, | |
193 | integer_t archbits, | |
194 | vm_map_t map, | |
195 | thread_t thr_act, | |
196 | int depth, | |
197 | load_result_t *result, | |
198 | boolean_t clean_regions, | |
199 | boolean_t is_64bit | |
200 | ); | |
201 | ||
202 | static load_return_t | |
203 | get_macho_vnode( | |
204 | char *path, | |
205 | integer_t archbits, | |
206 | struct mach_header *mach_header, | |
207 | off_t *file_offset, | |
208 | off_t *macho_size, | |
209 | struct vnode **vpp | |
210 | ); | |
211 | ||
212 | load_return_t | |
213 | load_machfile( | |
214 | struct image_params *imgp, | |
215 | struct mach_header *header, | |
216 | thread_t thr_act, | |
217 | vm_map_t new_map, | |
218 | boolean_t clean_regions, | |
219 | load_result_t *result | |
220 | ) | |
221 | { | |
222 | struct vnode *vp = imgp->ip_vp; | |
223 | off_t file_offset = imgp->ip_arch_offset; | |
224 | off_t macho_size = imgp->ip_arch_size; | |
225 | ||
226 | pmap_t pmap = 0; /* protected by create_map */ | |
227 | vm_map_t map; | |
228 | vm_map_t old_map; | |
229 | load_result_t myresult; | |
230 | load_return_t lret; | |
231 | boolean_t create_map = TRUE; | |
232 | ||
233 | if (new_map != VM_MAP_NULL) { | |
234 | create_map = FALSE; | |
235 | } | |
236 | ||
237 | if (create_map) { | |
238 | old_map = current_map(); | |
239 | #ifdef NO_NESTED_PMAP | |
240 | pmap = get_task_pmap(current_task()); | |
241 | pmap_reference(pmap); | |
242 | #else /* NO_NESTED_PMAP */ | |
243 | pmap = pmap_create((vm_map_size_t) 0, (imgp->ip_flags & IMGPF_IS_64BIT)); | |
244 | #endif /* NO_NESTED_PMAP */ | |
245 | map = vm_map_create(pmap, | |
246 | 0, | |
247 | vm_compute_max_offset((imgp->ip_flags & IMGPF_IS_64BIT)), | |
248 | TRUE); | |
249 | } else | |
250 | map = new_map; | |
251 | ||
252 | if ( (header->flags & MH_ALLOW_STACK_EXECUTION) ) | |
253 | vm_map_disable_NX(map); | |
254 | ||
255 | if (!result) | |
256 | result = &myresult; | |
257 | ||
258 | *result = load_result_null; | |
259 | ||
260 | lret = parse_machfile(vp, map, thr_act, header, file_offset, macho_size, | |
261 | ((imgp->ip_flags & IMGPF_IS_64BIT) == 0), /* shared regions? */ | |
262 | clean_regions, 0, result); | |
263 | ||
264 | if (lret != LOAD_SUCCESS) { | |
265 | if (create_map) { | |
266 | vm_map_deallocate(map); /* will lose pmap reference too */ | |
267 | } | |
268 | return(lret); | |
269 | } | |
270 | ||
271 | /* | |
272 | * For 64-bit users, check for presence of a 4GB page zero | |
273 | * which will enable the kernel to share the user's address space | |
274 | * and hence avoid TLB flushes on kernel entry/exit | |
275 | */ | |
276 | if ((imgp->ip_flags & IMGPF_IS_64BIT) && | |
277 | vm_map_has_4GB_pagezero(map)) | |
278 | vm_map_set_4GB_pagezero(map); | |
279 | ||
280 | /* | |
281 | * Commit to new map. First make sure that the current | |
282 | * users of the task get done with it, and that we clean | |
283 | * up the old contents of IPC and memory. The task is | |
284 | * guaranteed to be single threaded upon return (us). | |
285 | * | |
286 | * Swap the new map for the old, which consumes our new map | |
287 | * reference but each leaves us responsible for the old_map reference. | |
288 | * That lets us get off the pmap associated with it, and | |
289 | * then we can release it. | |
290 | */ | |
291 | ||
292 | if (create_map) { | |
293 | task_halt(current_task()); | |
294 | ||
295 | old_map = swap_task_map(current_task(), map); | |
296 | vm_map_clear_4GB_pagezero(old_map); | |
297 | #ifndef NO_NESTED_PMAP | |
298 | pmap_switch(pmap); /* Make sure we are using the new pmap */ | |
299 | #endif /* !NO_NESTED_PMAP */ | |
300 | vm_map_deallocate(old_map); | |
301 | } | |
302 | return(LOAD_SUCCESS); | |
303 | } | |
304 | ||
305 | int dylink_test = 1; | |
306 | ||
307 | /* | |
308 | * The file size of a mach-o file is limited to 32 bits; this is because | |
309 | * this is the limit on the kalloc() of enough bytes for a mach_header and | |
310 | * the contents of its sizeofcmds, which is currently constrained to 32 | |
311 | * bits in the file format itself. We read into the kernel buffer the | |
312 | * commands section, and then parse it in order to parse the mach-o file | |
313 | * format load_command segment(s). We are only interested in a subset of | |
314 | * the total set of possible commands. | |
315 | */ | |
316 | static | |
317 | load_return_t | |
318 | parse_machfile( | |
319 | struct vnode *vp, | |
320 | vm_map_t map, | |
321 | thread_t thr_act, | |
322 | struct mach_header *header, | |
323 | off_t file_offset, | |
324 | off_t macho_size, | |
325 | boolean_t shared_regions, | |
326 | boolean_t clean_regions, | |
327 | int depth, | |
328 | load_result_t *result | |
329 | ) | |
330 | { | |
331 | uint32_t ncmds; | |
332 | struct load_command *lcp; | |
333 | struct dylinker_command *dlp = 0; | |
334 | integer_t dlarchbits = 0; | |
335 | void * pager; | |
336 | load_return_t ret = LOAD_SUCCESS; | |
337 | caddr_t addr; | |
338 | void * kl_addr; | |
339 | vm_size_t size,kl_size; | |
340 | size_t offset; | |
341 | size_t oldoffset; /* for overflow check */ | |
342 | int pass; | |
343 | struct proc *p = current_proc(); /* XXXX */ | |
344 | int error; | |
345 | int resid=0; | |
346 | task_t task; | |
347 | size_t mach_header_sz = sizeof(struct mach_header); | |
348 | boolean_t abi64; | |
349 | ||
350 | if (header->magic == MH_MAGIC_64 || | |
351 | header->magic == MH_CIGAM_64) { | |
352 | mach_header_sz = sizeof(struct mach_header_64); | |
353 | } | |
354 | ||
355 | /* | |
356 | * Break infinite recursion | |
357 | */ | |
358 | if (depth > 6) | |
359 | return(LOAD_FAILURE); | |
360 | ||
361 | task = (task_t)get_threadtask(thr_act); | |
362 | ||
363 | depth++; | |
364 | ||
365 | /* | |
366 | * Check to see if right machine type. | |
367 | */ | |
368 | if (((cpu_type_t)(header->cputype & ~CPU_ARCH_MASK) != cpu_type()) || | |
369 | !grade_binary(header->cputype, header->cpusubtype)) | |
370 | return(LOAD_BADARCH); | |
371 | ||
372 | abi64 = ((header->cputype & CPU_ARCH_ABI64) == CPU_ARCH_ABI64); | |
373 | ||
374 | switch (header->filetype) { | |
375 | ||
376 | case MH_OBJECT: | |
377 | case MH_EXECUTE: | |
378 | case MH_PRELOAD: | |
379 | if (depth != 1) | |
380 | return (LOAD_FAILURE); | |
381 | break; | |
382 | ||
383 | case MH_FVMLIB: | |
384 | case MH_DYLIB: | |
385 | if (depth == 1) | |
386 | return (LOAD_FAILURE); | |
387 | break; | |
388 | ||
389 | case MH_DYLINKER: | |
390 | if (depth != 2) | |
391 | return (LOAD_FAILURE); | |
392 | break; | |
393 | ||
394 | default: | |
395 | return (LOAD_FAILURE); | |
396 | } | |
397 | ||
398 | /* | |
399 | * Get the pager for the file. | |
400 | */ | |
401 | UBCINFOCHECK("parse_machfile", vp); | |
402 | pager = (void *) ubc_getpager(vp); | |
403 | ||
404 | /* | |
405 | * Map portion that must be accessible directly into | |
406 | * kernel's map. | |
407 | */ | |
408 | if ((mach_header_sz + header->sizeofcmds) > macho_size) | |
409 | return(LOAD_BADMACHO); | |
410 | ||
411 | /* | |
412 | * Round size of Mach-O commands up to page boundry. | |
413 | */ | |
414 | size = round_page(mach_header_sz + header->sizeofcmds); | |
415 | if (size <= 0) | |
416 | return(LOAD_BADMACHO); | |
417 | ||
418 | /* | |
419 | * Map the load commands into kernel memory. | |
420 | */ | |
421 | addr = 0; | |
422 | kl_size = size; | |
423 | kl_addr = kalloc(size); | |
424 | addr = (caddr_t)kl_addr; | |
425 | if (addr == NULL) | |
426 | return(LOAD_NOSPACE); | |
427 | ||
428 | error = vn_rdwr(UIO_READ, vp, addr, size, file_offset, | |
429 | UIO_SYSSPACE32, 0, kauth_cred_get(), &resid, p); | |
430 | if (error) { | |
431 | if (kl_addr ) | |
432 | kfree(kl_addr, kl_size); | |
433 | return(LOAD_IOERROR); | |
434 | } | |
435 | /* (void)ubc_map(vp, PROT_EXEC); */ /* NOT HERE */ | |
436 | ||
437 | /* | |
438 | * Scan through the commands, processing each one as necessary. | |
439 | */ | |
440 | for (pass = 1; pass <= 2; pass++) { | |
441 | /* | |
442 | * Loop through each of the load_commands indicated by the | |
443 | * Mach-O header; if an absurd value is provided, we just | |
444 | * run off the end of the reserved section by incrementing | |
445 | * the offset too far, so we are implicitly fail-safe. | |
446 | */ | |
447 | offset = mach_header_sz; | |
448 | ncmds = header->ncmds; | |
449 | while (ncmds--) { | |
450 | /* | |
451 | * Get a pointer to the command. | |
452 | */ | |
453 | lcp = (struct load_command *)(addr + offset); | |
454 | oldoffset = offset; | |
455 | offset += lcp->cmdsize; | |
456 | ||
457 | /* | |
458 | * Perform prevalidation of the struct load_command | |
459 | * before we attempt to use its contents. Invalid | |
460 | * values are ones which result in an overflow, or | |
461 | * which can not possibly be valid commands, or which | |
462 | * straddle or exist past the reserved section at the | |
463 | * start of the image. | |
464 | */ | |
465 | if (oldoffset > offset || | |
466 | lcp->cmdsize < sizeof(struct load_command) || | |
467 | offset > header->sizeofcmds + mach_header_sz) { | |
468 | ret = LOAD_BADMACHO; | |
469 | break; | |
470 | } | |
471 | ||
472 | /* | |
473 | * Act on struct load_command's for which kernel | |
474 | * intervention is required. | |
475 | */ | |
476 | switch(lcp->cmd) { | |
477 | case LC_SEGMENT_64: | |
478 | if (pass != 1) | |
479 | break; | |
480 | ret = load_segment_64( | |
481 | (struct segment_command_64 *)lcp, | |
482 | pager, | |
483 | file_offset, | |
484 | macho_size, | |
485 | ubc_getsize(vp), | |
486 | map, | |
487 | result); | |
488 | break; | |
489 | case LC_SEGMENT: | |
490 | if (pass != 1) | |
491 | break; | |
492 | ret = load_segment( | |
493 | (struct segment_command *) lcp, | |
494 | pager, | |
495 | file_offset, | |
496 | macho_size, | |
497 | ubc_getsize(vp), | |
498 | map, | |
499 | result); | |
500 | break; | |
501 | case LC_THREAD: | |
502 | if (pass != 2) | |
503 | break; | |
504 | ret = load_thread((struct thread_command *)lcp, | |
505 | thr_act, | |
506 | result); | |
507 | break; | |
508 | case LC_UNIXTHREAD: | |
509 | if (pass != 2) | |
510 | break; | |
511 | ret = load_unixthread( | |
512 | (struct thread_command *) lcp, | |
513 | thr_act, | |
514 | result); | |
515 | break; | |
516 | case LC_LOAD_DYLINKER: | |
517 | if (pass != 2) | |
518 | break; | |
519 | if ((depth == 1) && (dlp == 0)) { | |
520 | dlp = (struct dylinker_command *)lcp; | |
521 | dlarchbits = (header->cputype & CPU_ARCH_MASK); | |
522 | } else { | |
523 | ret = LOAD_FAILURE; | |
524 | } | |
525 | break; | |
526 | default: | |
527 | /* Other commands are ignored by the kernel */ | |
528 | ret = LOAD_SUCCESS; | |
529 | break; | |
530 | } | |
531 | if (ret != LOAD_SUCCESS) | |
532 | break; | |
533 | } | |
534 | if (ret != LOAD_SUCCESS) | |
535 | break; | |
536 | } | |
537 | if (ret == LOAD_SUCCESS) { | |
538 | ||
539 | if (shared_regions) { | |
540 | vm_offset_t vmaddr; | |
541 | shared_region_mapping_t shared_region; | |
542 | struct shared_region_task_mappings map_info; | |
543 | shared_region_mapping_t next; | |
544 | ||
545 | RedoLookup: | |
546 | vm_get_shared_region(task, &shared_region); | |
547 | map_info.self = (vm_offset_t)shared_region; | |
548 | shared_region_mapping_info(shared_region, | |
549 | &(map_info.text_region), | |
550 | &(map_info.text_size), | |
551 | &(map_info.data_region), | |
552 | &(map_info.data_size), | |
553 | &(map_info.region_mappings), | |
554 | &(map_info.client_base), | |
555 | &(map_info.alternate_base), | |
556 | &(map_info.alternate_next), | |
557 | &(map_info.fs_base), | |
558 | &(map_info.system), | |
559 | &(map_info.flags), &next); | |
560 | ||
561 | if((map_info.flags & SHARED_REGION_FULL) || | |
562 | (map_info.flags & SHARED_REGION_STALE)) { | |
563 | shared_region_mapping_t system_region; | |
564 | system_region = lookup_default_shared_region( | |
565 | map_info.fs_base, map_info.system); | |
566 | if((map_info.self != (vm_offset_t)system_region) && | |
567 | (map_info.flags & SHARED_REGION_SYSTEM)) { | |
568 | if(system_region == NULL) { | |
569 | shared_file_boot_time_init( | |
570 | map_info.fs_base, map_info.system); | |
571 | } else { | |
572 | vm_set_shared_region(task, system_region); | |
573 | } | |
574 | shared_region_mapping_dealloc( | |
575 | (shared_region_mapping_t)map_info.self); | |
576 | goto RedoLookup; | |
577 | } else if (map_info.flags & SHARED_REGION_SYSTEM) { | |
578 | shared_region_mapping_dealloc(system_region); | |
579 | shared_file_boot_time_init( | |
580 | map_info.fs_base, map_info.system); | |
581 | shared_region_mapping_dealloc( | |
582 | (shared_region_mapping_t)map_info.self); | |
583 | } else { | |
584 | shared_region_mapping_dealloc(system_region); | |
585 | } | |
586 | } | |
587 | ||
588 | if (dylink_test) { | |
589 | p->p_flag |= P_NOSHLIB; /* no shlibs in use */ | |
590 | vmaddr = map_info.client_base; | |
591 | if(clean_regions) { | |
592 | vm_map(map, &vmaddr, map_info.text_size, | |
593 | 0, SHARED_LIB_ALIAS|VM_FLAGS_FIXED, | |
594 | map_info.text_region, 0, FALSE, | |
595 | VM_PROT_READ, VM_PROT_READ, VM_INHERIT_SHARE); | |
596 | } else { | |
597 | vm_map(map, &vmaddr, map_info.text_size, 0, | |
598 | (VM_MEMORY_SHARED_PMAP << 24) | |
599 | | SHARED_LIB_ALIAS | VM_FLAGS_FIXED, | |
600 | map_info.text_region, 0, FALSE, | |
601 | VM_PROT_READ, VM_PROT_READ, VM_INHERIT_SHARE); | |
602 | } | |
603 | vmaddr = map_info.client_base + map_info.text_size; | |
604 | vm_map(map, &vmaddr, map_info.data_size, | |
605 | 0, SHARED_LIB_ALIAS | VM_FLAGS_FIXED, | |
606 | map_info.data_region, 0, TRUE, | |
607 | VM_PROT_READ, VM_PROT_READ, VM_INHERIT_SHARE); | |
608 | ||
609 | while (next) { | |
610 | /* this should be fleshed out for the general case */ | |
611 | /* but this is not necessary for now. Indeed we */ | |
612 | /* are handling the com page inside of the */ | |
613 | /* shared_region mapping create calls for now for */ | |
614 | /* simplicities sake. If more general support is */ | |
615 | /* needed the code to manipulate the shared range */ | |
616 | /* chain can be pulled out and moved to the callers*/ | |
617 | shared_region_mapping_info(next, | |
618 | &(map_info.text_region), | |
619 | &(map_info.text_size), | |
620 | &(map_info.data_region), | |
621 | &(map_info.data_size), | |
622 | &(map_info.region_mappings), | |
623 | &(map_info.client_base), | |
624 | &(map_info.alternate_base), | |
625 | &(map_info.alternate_next), | |
626 | &(map_info.fs_base), | |
627 | &(map_info.system), | |
628 | &(map_info.flags), &next); | |
629 | ||
630 | vmaddr = map_info.client_base; | |
631 | vm_map(map, &vmaddr, map_info.text_size, | |
632 | 0, SHARED_LIB_ALIAS | VM_FLAGS_FIXED, | |
633 | map_info.text_region, 0, FALSE, | |
634 | VM_PROT_READ, VM_PROT_READ, VM_INHERIT_SHARE); | |
635 | } | |
636 | } | |
637 | } | |
638 | if (dlp != 0) | |
639 | ret = load_dylinker(dlp, dlarchbits, map, thr_act, depth, result, clean_regions, abi64); | |
640 | ||
641 | if(depth == 1) { | |
642 | if (result->thread_count == 0) | |
643 | ret = LOAD_FAILURE; | |
644 | else if ( abi64 ) { | |
645 | /* Map in 64-bit commpage */ | |
646 | /* LP64todo - make this clean */ | |
647 | pmap_map_sharedpage(current_task(), get_map_pmap(map)); | |
648 | vm_map_commpage64(map); | |
649 | } else { | |
650 | #ifdef __i386__ | |
651 | /* | |
652 | * On Intel, the comm page doesn't get mapped | |
653 | * automatically because it goes beyond the current end | |
654 | * of the VM map in the current 3GB/1GB address space | |
655 | * model. | |
656 | * XXX This will probably become unnecessary when we | |
657 | * switch to the 4GB/4GB address space model. | |
658 | */ | |
659 | vm_map_commpage32(map); | |
660 | #endif /* __i386__ */ | |
661 | } | |
662 | } | |
663 | } | |
664 | ||
665 | if (kl_addr ) | |
666 | kfree(kl_addr, kl_size); | |
667 | ||
668 | if (ret == LOAD_SUCCESS) | |
669 | (void)ubc_map(vp, PROT_EXEC); | |
670 | ||
671 | return(ret); | |
672 | } | |
673 | ||
674 | #ifndef SG_PROTECTED_VERSION_1 | |
675 | #define SG_PROTECTED_VERSION_1 0x8 | |
676 | #endif /* SG_PROTECTED_VERSION_1 */ | |
677 | ||
678 | #ifdef __i386__ | |
679 | ||
680 | #define APPLE_UNPROTECTED_HEADER_SIZE (3 * PAGE_SIZE_64) | |
681 | ||
682 | static load_return_t | |
683 | unprotect_segment_64( | |
684 | uint64_t file_off, | |
685 | uint64_t file_size, | |
686 | vm_map_t map, | |
687 | vm_map_offset_t map_addr, | |
688 | vm_map_size_t map_size) | |
689 | { | |
690 | kern_return_t kr; | |
691 | ||
692 | /* | |
693 | * The first APPLE_UNPROTECTED_HEADER_SIZE bytes (from offset 0 of | |
694 | * this part of a Universal binary) are not protected... | |
695 | * The rest needs to be "transformed". | |
696 | */ | |
697 | if (file_off <= APPLE_UNPROTECTED_HEADER_SIZE && | |
698 | file_off + file_size <= APPLE_UNPROTECTED_HEADER_SIZE) { | |
699 | /* it's all unprotected, nothing to do... */ | |
700 | kr = KERN_SUCCESS; | |
701 | } else { | |
702 | if (file_off <= APPLE_UNPROTECTED_HEADER_SIZE) { | |
703 | /* | |
704 | * We start mapping in the unprotected area. | |
705 | * Skip the unprotected part... | |
706 | */ | |
707 | vm_map_offset_t delta; | |
708 | ||
709 | delta = APPLE_UNPROTECTED_HEADER_SIZE; | |
710 | delta -= file_off; | |
711 | map_addr += delta; | |
712 | map_size -= delta; | |
713 | } | |
714 | /* ... transform the rest of the mapping. */ | |
715 | kr = vm_map_apple_protected(map, | |
716 | map_addr, | |
717 | map_addr + map_size); | |
718 | } | |
719 | ||
720 | if (kr != KERN_SUCCESS) { | |
721 | return LOAD_FAILURE; | |
722 | } | |
723 | return LOAD_SUCCESS; | |
724 | } | |
725 | #else /* __i386__ */ | |
726 | #define unprotect_segment_64(file_off, file_size, map, map_addr, map_size) \ | |
727 | LOAD_SUCCESS | |
728 | #endif /* __i386__ */ | |
729 | ||
730 | static | |
731 | load_return_t | |
732 | load_segment( | |
733 | struct segment_command *scp, | |
734 | void * pager, | |
735 | off_t pager_offset, | |
736 | off_t macho_size, | |
737 | __unused off_t end_of_file, | |
738 | vm_map_t map, | |
739 | load_result_t *result | |
740 | ) | |
741 | { | |
742 | kern_return_t ret; | |
743 | vm_offset_t map_addr, map_offset; | |
744 | vm_size_t map_size, seg_size, delta_size; | |
745 | vm_prot_t initprot; | |
746 | vm_prot_t maxprot; | |
747 | ||
748 | /* | |
749 | * Make sure what we get from the file is really ours (as specified | |
750 | * by macho_size). | |
751 | */ | |
752 | if (scp->fileoff + scp->filesize > macho_size) | |
753 | return (LOAD_BADMACHO); | |
754 | ||
755 | seg_size = round_page(scp->vmsize); | |
756 | if (seg_size == 0) | |
757 | return(KERN_SUCCESS); | |
758 | ||
759 | /* | |
760 | * Round sizes to page size. | |
761 | */ | |
762 | map_size = round_page(scp->filesize); | |
763 | map_addr = trunc_page(scp->vmaddr); | |
764 | ||
765 | #if 0 /* XXX (4596982) this interferes with Rosetta */ | |
766 | if (map_addr == 0 && | |
767 | map_size == 0 && | |
768 | seg_size != 0 && | |
769 | (scp->initprot & VM_PROT_ALL) == VM_PROT_NONE && | |
770 | (scp->maxprot & VM_PROT_ALL) == VM_PROT_NONE) { | |
771 | /* | |
772 | * This is a "page zero" segment: it starts at address 0, | |
773 | * is not mapped from the binary file and is not accessible. | |
774 | * User-space should never be able to access that memory, so | |
775 | * make it completely off limits by raising the VM map's | |
776 | * minimum offset. | |
777 | */ | |
778 | ret = vm_map_raise_min_offset(map, (vm_map_offset_t) seg_size); | |
779 | if (ret != KERN_SUCCESS) { | |
780 | return LOAD_FAILURE; | |
781 | } | |
782 | return LOAD_SUCCESS; | |
783 | } | |
784 | #endif | |
785 | ||
786 | map_offset = pager_offset + scp->fileoff; | |
787 | ||
788 | if (map_size > 0) { | |
789 | initprot = (scp->initprot) & VM_PROT_ALL; | |
790 | maxprot = (scp->maxprot) & VM_PROT_ALL; | |
791 | /* | |
792 | * Map a copy of the file into the address space. | |
793 | */ | |
794 | ret = vm_map(map, | |
795 | &map_addr, map_size, (vm_offset_t)0, | |
796 | VM_FLAGS_FIXED, pager, map_offset, TRUE, | |
797 | initprot, maxprot, | |
798 | VM_INHERIT_DEFAULT); | |
799 | if (ret != KERN_SUCCESS) | |
800 | return(LOAD_NOSPACE); | |
801 | ||
802 | /* | |
803 | * If the file didn't end on a page boundary, | |
804 | * we need to zero the leftover. | |
805 | */ | |
806 | delta_size = map_size - scp->filesize; | |
807 | #if FIXME | |
808 | if (delta_size > 0) { | |
809 | vm_offset_t tmp; | |
810 | ||
811 | ret = vm_allocate(kernel_map, &tmp, delta_size, VM_FLAGS_ANYWHERE); | |
812 | if (ret != KERN_SUCCESS) | |
813 | return(LOAD_RESOURCE); | |
814 | ||
815 | if (copyout(tmp, map_addr + scp->filesize, | |
816 | delta_size)) { | |
817 | (void) vm_deallocate( | |
818 | kernel_map, tmp, delta_size); | |
819 | return(LOAD_FAILURE); | |
820 | } | |
821 | ||
822 | (void) vm_deallocate(kernel_map, tmp, delta_size); | |
823 | } | |
824 | #endif /* FIXME */ | |
825 | } | |
826 | ||
827 | /* | |
828 | * If the virtual size of the segment is greater | |
829 | * than the size from the file, we need to allocate | |
830 | * zero fill memory for the rest. | |
831 | */ | |
832 | delta_size = seg_size - map_size; | |
833 | if (delta_size > 0) { | |
834 | vm_offset_t tmp = map_addr + map_size; | |
835 | ||
836 | ret = vm_map(map, &tmp, delta_size, 0, VM_FLAGS_FIXED, | |
837 | NULL, 0, FALSE, | |
838 | scp->initprot, scp->maxprot, | |
839 | VM_INHERIT_DEFAULT); | |
840 | if (ret != KERN_SUCCESS) | |
841 | return(LOAD_NOSPACE); | |
842 | } | |
843 | ||
844 | if ( (scp->fileoff == 0) && (scp->filesize != 0) ) | |
845 | result->mach_header = map_addr; | |
846 | ||
847 | if (scp->flags & SG_PROTECTED_VERSION_1) { | |
848 | ret = unprotect_segment_64((uint64_t) scp->fileoff, | |
849 | (uint64_t) scp->filesize, | |
850 | map, | |
851 | (vm_map_offset_t) map_addr, | |
852 | (vm_map_size_t) map_size); | |
853 | } else { | |
854 | ret = LOAD_SUCCESS; | |
855 | } | |
856 | ||
857 | return ret; | |
858 | } | |
859 | ||
860 | static | |
861 | load_return_t | |
862 | load_segment_64( | |
863 | struct segment_command_64 *scp64, | |
864 | void * pager, | |
865 | off_t pager_offset, | |
866 | off_t macho_size, | |
867 | __unused off_t end_of_file, | |
868 | vm_map_t map, | |
869 | load_result_t *result | |
870 | ) | |
871 | { | |
872 | kern_return_t ret; | |
873 | mach_vm_offset_t map_addr, map_offset; | |
874 | mach_vm_size_t map_size, seg_size, delta_size; | |
875 | vm_prot_t initprot; | |
876 | vm_prot_t maxprot; | |
877 | ||
878 | /* | |
879 | * Make sure what we get from the file is really ours (as specified | |
880 | * by macho_size). | |
881 | */ | |
882 | if (scp64->fileoff + scp64->filesize > (uint64_t)macho_size) | |
883 | return (LOAD_BADMACHO); | |
884 | ||
885 | seg_size = round_page_64(scp64->vmsize); | |
886 | if (seg_size == 0) | |
887 | return(KERN_SUCCESS); | |
888 | ||
889 | /* | |
890 | * Round sizes to page size. | |
891 | */ | |
892 | map_size = round_page_64(scp64->filesize); /* limited to 32 bits */ | |
893 | map_addr = round_page_64(scp64->vmaddr); | |
894 | ||
895 | if (map_addr == 0 && | |
896 | map_size == 0 && | |
897 | seg_size != 0 && | |
898 | (scp64->initprot & VM_PROT_ALL) == VM_PROT_NONE && | |
899 | (scp64->maxprot & VM_PROT_ALL) == VM_PROT_NONE) { | |
900 | /* | |
901 | * This is a "page zero" segment: it starts at address 0, | |
902 | * is not mapped from the binary file and is not accessible. | |
903 | * User-space should never be able to access that memory, so | |
904 | * make it completely off limits by raising the VM map's | |
905 | * minimum offset. | |
906 | */ | |
907 | ret = vm_map_raise_min_offset(map, seg_size); | |
908 | if (ret != KERN_SUCCESS) { | |
909 | return LOAD_FAILURE; | |
910 | } | |
911 | return LOAD_SUCCESS; | |
912 | } | |
913 | ||
914 | map_offset = pager_offset + scp64->fileoff; /* limited to 32 bits */ | |
915 | ||
916 | if (map_size > 0) { | |
917 | initprot = (scp64->initprot) & VM_PROT_ALL; | |
918 | maxprot = (scp64->maxprot) & VM_PROT_ALL; | |
919 | /* | |
920 | * Map a copy of the file into the address space. | |
921 | */ | |
922 | ret = mach_vm_map(map, | |
923 | &map_addr, map_size, (mach_vm_offset_t)0, | |
924 | VM_FLAGS_FIXED, pager, map_offset, TRUE, | |
925 | initprot, maxprot, | |
926 | VM_INHERIT_DEFAULT); | |
927 | if (ret != KERN_SUCCESS) | |
928 | return(LOAD_NOSPACE); | |
929 | ||
930 | /* | |
931 | * If the file didn't end on a page boundary, | |
932 | * we need to zero the leftover. | |
933 | */ | |
934 | delta_size = map_size - scp64->filesize; | |
935 | #if FIXME | |
936 | if (delta_size > 0) { | |
937 | mach_vm_offset_t tmp; | |
938 | ||
939 | ret = vm_allocate(kernel_map, &tmp, delta_size, VM_FLAGS_ANYWHERE); | |
940 | if (ret != KERN_SUCCESS) | |
941 | return(LOAD_RESOURCE); | |
942 | ||
943 | if (copyout(tmp, map_addr + scp64->filesize, | |
944 | delta_size)) { | |
945 | (void) vm_deallocate( | |
946 | kernel_map, tmp, delta_size); | |
947 | return (LOAD_FAILURE); | |
948 | } | |
949 | ||
950 | (void) vm_deallocate(kernel_map, tmp, delta_size); | |
951 | } | |
952 | #endif /* FIXME */ | |
953 | } | |
954 | ||
955 | /* | |
956 | * If the virtual size of the segment is greater | |
957 | * than the size from the file, we need to allocate | |
958 | * zero fill memory for the rest. | |
959 | */ | |
960 | delta_size = seg_size - map_size; | |
961 | if (delta_size > 0) { | |
962 | mach_vm_offset_t tmp = map_addr + map_size; | |
963 | ||
964 | ret = mach_vm_map(map, &tmp, delta_size, 0, VM_FLAGS_FIXED, | |
965 | NULL, 0, FALSE, | |
966 | scp64->initprot, scp64->maxprot, | |
967 | VM_INHERIT_DEFAULT); | |
968 | if (ret != KERN_SUCCESS) | |
969 | return(LOAD_NOSPACE); | |
970 | } | |
971 | ||
972 | if ( (scp64->fileoff == 0) && (scp64->filesize != 0) ) | |
973 | result->mach_header = map_addr; | |
974 | ||
975 | if (scp64->flags & SG_PROTECTED_VERSION_1) { | |
976 | ret = unprotect_segment_64(scp64->fileoff, | |
977 | scp64->filesize, | |
978 | map, | |
979 | map_addr, | |
980 | map_size); | |
981 | } else { | |
982 | ret = LOAD_SUCCESS; | |
983 | } | |
984 | ||
985 | return ret; | |
986 | } | |
987 | ||
988 | static | |
989 | load_return_t | |
990 | load_thread( | |
991 | struct thread_command *tcp, | |
992 | thread_t thread, | |
993 | load_result_t *result | |
994 | ) | |
995 | { | |
996 | kern_return_t kret; | |
997 | load_return_t lret; | |
998 | task_t task; | |
999 | int customstack=0; | |
1000 | ||
1001 | task = get_threadtask(thread); | |
1002 | ||
1003 | /* if count is 0; same as thr_act */ | |
1004 | if (result->thread_count != 0) { | |
1005 | kret = thread_create(task, &thread); | |
1006 | if (kret != KERN_SUCCESS) | |
1007 | return(LOAD_RESOURCE); | |
1008 | thread_deallocate(thread); | |
1009 | } | |
1010 | ||
1011 | lret = load_threadstate(thread, | |
1012 | (unsigned long *)(((vm_offset_t)tcp) + | |
1013 | sizeof(struct thread_command)), | |
1014 | tcp->cmdsize - sizeof(struct thread_command)); | |
1015 | if (lret != LOAD_SUCCESS) | |
1016 | return (lret); | |
1017 | ||
1018 | if (result->thread_count == 0) { | |
1019 | lret = load_threadstack(thread, | |
1020 | (unsigned long *)(((vm_offset_t)tcp) + | |
1021 | sizeof(struct thread_command)), | |
1022 | tcp->cmdsize - sizeof(struct thread_command), | |
1023 | &result->user_stack, | |
1024 | &customstack); | |
1025 | if (customstack) | |
1026 | result->customstack = 1; | |
1027 | else | |
1028 | result->customstack = 0; | |
1029 | ||
1030 | if (lret != LOAD_SUCCESS) | |
1031 | return(lret); | |
1032 | ||
1033 | lret = load_threadentry(thread, | |
1034 | (unsigned long *)(((vm_offset_t)tcp) + | |
1035 | sizeof(struct thread_command)), | |
1036 | tcp->cmdsize - sizeof(struct thread_command), | |
1037 | &result->entry_point); | |
1038 | if (lret != LOAD_SUCCESS) | |
1039 | return(lret); | |
1040 | } | |
1041 | /* | |
1042 | * Resume thread now, note that this means that the thread | |
1043 | * commands should appear after all the load commands to | |
1044 | * be sure they don't reference anything not yet mapped. | |
1045 | */ | |
1046 | else | |
1047 | thread_resume(thread); | |
1048 | ||
1049 | result->thread_count++; | |
1050 | ||
1051 | return(LOAD_SUCCESS); | |
1052 | } | |
1053 | ||
1054 | static | |
1055 | load_return_t | |
1056 | load_unixthread( | |
1057 | struct thread_command *tcp, | |
1058 | thread_t thread, | |
1059 | load_result_t *result | |
1060 | ) | |
1061 | { | |
1062 | load_return_t ret; | |
1063 | int customstack =0; | |
1064 | ||
1065 | if (result->thread_count != 0) | |
1066 | return (LOAD_FAILURE); | |
1067 | ||
1068 | ret = load_threadstack(thread, | |
1069 | (unsigned long *)(((vm_offset_t)tcp) + | |
1070 | sizeof(struct thread_command)), | |
1071 | tcp->cmdsize - sizeof(struct thread_command), | |
1072 | &result->user_stack, | |
1073 | &customstack); | |
1074 | if (ret != LOAD_SUCCESS) | |
1075 | return(ret); | |
1076 | ||
1077 | if (customstack) | |
1078 | result->customstack = 1; | |
1079 | else | |
1080 | result->customstack = 0; | |
1081 | ret = load_threadentry(thread, | |
1082 | (unsigned long *)(((vm_offset_t)tcp) + | |
1083 | sizeof(struct thread_command)), | |
1084 | tcp->cmdsize - sizeof(struct thread_command), | |
1085 | &result->entry_point); | |
1086 | if (ret != LOAD_SUCCESS) | |
1087 | return(ret); | |
1088 | ||
1089 | ret = load_threadstate(thread, | |
1090 | (unsigned long *)(((vm_offset_t)tcp) + | |
1091 | sizeof(struct thread_command)), | |
1092 | tcp->cmdsize - sizeof(struct thread_command)); | |
1093 | if (ret != LOAD_SUCCESS) | |
1094 | return (ret); | |
1095 | ||
1096 | result->unixproc = TRUE; | |
1097 | result->thread_count++; | |
1098 | ||
1099 | return(LOAD_SUCCESS); | |
1100 | } | |
1101 | ||
1102 | static | |
1103 | load_return_t | |
1104 | load_threadstate( | |
1105 | thread_t thread, | |
1106 | unsigned long *ts, | |
1107 | unsigned long total_size | |
1108 | ) | |
1109 | { | |
1110 | kern_return_t ret; | |
1111 | unsigned long size; | |
1112 | int flavor; | |
1113 | unsigned long thread_size; | |
1114 | ||
1115 | ret = thread_state_initialize( thread ); | |
1116 | if (ret != KERN_SUCCESS) | |
1117 | return(LOAD_FAILURE); | |
1118 | ||
1119 | /* | |
1120 | * Set the new thread state; iterate through the state flavors in | |
1121 | * the mach-o file. | |
1122 | */ | |
1123 | while (total_size > 0) { | |
1124 | flavor = *ts++; | |
1125 | size = *ts++; | |
1126 | thread_size = (size+2)*sizeof(unsigned long); | |
1127 | if (thread_size > total_size) | |
1128 | return(LOAD_BADMACHO); | |
1129 | total_size -= thread_size; | |
1130 | /* | |
1131 | * Third argument is a kernel space pointer; it gets cast | |
1132 | * to the appropriate type in machine_thread_set_state() | |
1133 | * based on the value of flavor. | |
1134 | */ | |
1135 | ret = thread_setstatus(thread, flavor, (thread_state_t)ts, size); | |
1136 | if (ret != KERN_SUCCESS) | |
1137 | return(LOAD_FAILURE); | |
1138 | ts += size; /* ts is a (unsigned long *) */ | |
1139 | } | |
1140 | return(LOAD_SUCCESS); | |
1141 | } | |
1142 | ||
1143 | static | |
1144 | load_return_t | |
1145 | load_threadstack( | |
1146 | thread_t thread, | |
1147 | unsigned long *ts, | |
1148 | unsigned long total_size, | |
1149 | user_addr_t *user_stack, | |
1150 | int *customstack | |
1151 | ) | |
1152 | { | |
1153 | kern_return_t ret; | |
1154 | unsigned long size; | |
1155 | int flavor; | |
1156 | unsigned long stack_size; | |
1157 | ||
1158 | while (total_size > 0) { | |
1159 | flavor = *ts++; | |
1160 | size = *ts++; | |
1161 | stack_size = (size+2)*sizeof(unsigned long); | |
1162 | if (stack_size > total_size) | |
1163 | return(LOAD_BADMACHO); | |
1164 | total_size -= stack_size; | |
1165 | ||
1166 | /* | |
1167 | * Third argument is a kernel space pointer; it gets cast | |
1168 | * to the appropriate type in thread_userstack() based on | |
1169 | * the value of flavor. | |
1170 | */ | |
1171 | ret = thread_userstack(thread, flavor, (thread_state_t)ts, size, user_stack, customstack); | |
1172 | if (ret != KERN_SUCCESS) | |
1173 | return(LOAD_FAILURE); | |
1174 | ts += size; /* ts is a (unsigned long *) */ | |
1175 | } | |
1176 | return(LOAD_SUCCESS); | |
1177 | } | |
1178 | ||
1179 | static | |
1180 | load_return_t | |
1181 | load_threadentry( | |
1182 | thread_t thread, | |
1183 | unsigned long *ts, | |
1184 | unsigned long total_size, | |
1185 | mach_vm_offset_t *entry_point | |
1186 | ) | |
1187 | { | |
1188 | kern_return_t ret; | |
1189 | unsigned long size; | |
1190 | int flavor; | |
1191 | unsigned long entry_size; | |
1192 | ||
1193 | /* | |
1194 | * Set the thread state. | |
1195 | */ | |
1196 | *entry_point = MACH_VM_MIN_ADDRESS; | |
1197 | while (total_size > 0) { | |
1198 | flavor = *ts++; | |
1199 | size = *ts++; | |
1200 | entry_size = (size+2)*sizeof(unsigned long); | |
1201 | if (entry_size > total_size) | |
1202 | return(LOAD_BADMACHO); | |
1203 | total_size -= entry_size; | |
1204 | /* | |
1205 | * Third argument is a kernel space pointer; it gets cast | |
1206 | * to the appropriate type in thread_entrypoint() based on | |
1207 | * the value of flavor. | |
1208 | */ | |
1209 | ret = thread_entrypoint(thread, flavor, (thread_state_t)ts, size, entry_point); | |
1210 | if (ret != KERN_SUCCESS) | |
1211 | return(LOAD_FAILURE); | |
1212 | ts += size; /* ts is a (unsigned long *) */ | |
1213 | } | |
1214 | return(LOAD_SUCCESS); | |
1215 | } | |
1216 | ||
1217 | ||
1218 | static | |
1219 | load_return_t | |
1220 | load_dylinker( | |
1221 | struct dylinker_command *lcp, | |
1222 | integer_t archbits, | |
1223 | vm_map_t map, | |
1224 | thread_t thr_act, | |
1225 | int depth, | |
1226 | load_result_t *result, | |
1227 | boolean_t clean_regions, | |
1228 | boolean_t is_64bit | |
1229 | ) | |
1230 | { | |
1231 | char *name; | |
1232 | char *p; | |
1233 | struct vnode *vp; | |
1234 | struct mach_header header; | |
1235 | off_t file_offset; | |
1236 | off_t macho_size; | |
1237 | vm_map_t copy_map; | |
1238 | load_result_t myresult; | |
1239 | kern_return_t ret; | |
1240 | vm_map_copy_t tmp; | |
1241 | mach_vm_offset_t dyl_start, map_addr; | |
1242 | mach_vm_size_t dyl_length; | |
1243 | ||
1244 | name = (char *)lcp + lcp->name.offset; | |
1245 | /* | |
1246 | * Check for a proper null terminated string. | |
1247 | */ | |
1248 | p = name; | |
1249 | do { | |
1250 | if (p >= (char *)lcp + lcp->cmdsize) | |
1251 | return(LOAD_BADMACHO); | |
1252 | } while (*p++); | |
1253 | ||
1254 | ret = get_macho_vnode(name, archbits, &header, &file_offset, &macho_size, &vp); | |
1255 | if (ret) | |
1256 | return (ret); | |
1257 | ||
1258 | /* | |
1259 | * Load the Mach-O. | |
1260 | * Use a temporary map to do the work. | |
1261 | */ | |
1262 | copy_map = vm_map_create(pmap_create(vm_map_round_page(macho_size), | |
1263 | is_64bit), | |
1264 | get_map_min(map), get_map_max(map), TRUE); | |
1265 | if (VM_MAP_NULL == copy_map) { | |
1266 | ret = LOAD_RESOURCE; | |
1267 | goto out; | |
1268 | } | |
1269 | ||
1270 | myresult = load_result_null; | |
1271 | ||
1272 | ret = parse_machfile(vp, copy_map, thr_act, &header, | |
1273 | file_offset, macho_size, | |
1274 | FALSE, clean_regions, depth, &myresult); | |
1275 | ||
1276 | if (ret) | |
1277 | goto out; | |
1278 | ||
1279 | if (get_map_nentries(copy_map) > 0) { | |
1280 | ||
1281 | dyl_start = mach_get_vm_start(copy_map); | |
1282 | dyl_length = mach_get_vm_end(copy_map) - dyl_start; | |
1283 | ||
1284 | map_addr = dyl_start; | |
1285 | ret = mach_vm_allocate(map, &map_addr, dyl_length, VM_FLAGS_FIXED); | |
1286 | if (ret != KERN_SUCCESS) { | |
1287 | ret = mach_vm_allocate(map, &map_addr, dyl_length, VM_FLAGS_ANYWHERE); | |
1288 | } | |
1289 | ||
1290 | if (ret != KERN_SUCCESS) { | |
1291 | ret = LOAD_NOSPACE; | |
1292 | goto out; | |
1293 | ||
1294 | } | |
1295 | ret = vm_map_copyin(copy_map, | |
1296 | (vm_map_address_t)dyl_start, | |
1297 | (vm_map_size_t)dyl_length, | |
1298 | TRUE, &tmp); | |
1299 | if (ret != KERN_SUCCESS) { | |
1300 | (void) vm_map_remove(map, | |
1301 | vm_map_trunc_page(map_addr), | |
1302 | vm_map_round_page(map_addr + dyl_length), | |
1303 | VM_MAP_NO_FLAGS); | |
1304 | goto out; | |
1305 | } | |
1306 | ||
1307 | ret = vm_map_copy_overwrite(map, | |
1308 | (vm_map_address_t)map_addr, | |
1309 | tmp, FALSE); | |
1310 | if (ret != KERN_SUCCESS) { | |
1311 | vm_map_copy_discard(tmp); | |
1312 | (void) vm_map_remove(map, | |
1313 | vm_map_trunc_page(map_addr), | |
1314 | vm_map_round_page(map_addr + dyl_length), | |
1315 | VM_MAP_NO_FLAGS); | |
1316 | goto out; | |
1317 | } | |
1318 | ||
1319 | if (map_addr != dyl_start) | |
1320 | myresult.entry_point += (map_addr - dyl_start); | |
1321 | } else | |
1322 | ret = LOAD_FAILURE; | |
1323 | ||
1324 | if (ret == LOAD_SUCCESS) { | |
1325 | result->dynlinker = TRUE; | |
1326 | result->entry_point = myresult.entry_point; | |
1327 | (void)ubc_map(vp, PROT_EXEC); | |
1328 | } | |
1329 | out: | |
1330 | vm_map_deallocate(copy_map); | |
1331 | ||
1332 | vnode_put(vp); | |
1333 | return (ret); | |
1334 | ||
1335 | } | |
1336 | ||
1337 | /* | |
1338 | * This routine exists to support the load_dylinker(). | |
1339 | * | |
1340 | * This routine has its own, separate, understanding of the FAT file format, | |
1341 | * which is terrifically unfortunate. | |
1342 | */ | |
1343 | static | |
1344 | load_return_t | |
1345 | get_macho_vnode( | |
1346 | char *path, | |
1347 | integer_t archbits, | |
1348 | struct mach_header *mach_header, | |
1349 | off_t *file_offset, | |
1350 | off_t *macho_size, | |
1351 | struct vnode **vpp | |
1352 | ) | |
1353 | { | |
1354 | struct vnode *vp; | |
1355 | struct vfs_context context; | |
1356 | struct nameidata nid, *ndp; | |
1357 | struct proc *p = current_proc(); /* XXXX */ | |
1358 | boolean_t is_fat; | |
1359 | struct fat_arch fat_arch; | |
1360 | int error = LOAD_SUCCESS; | |
1361 | int resid; | |
1362 | union { | |
1363 | struct mach_header mach_header; | |
1364 | struct fat_header fat_header; | |
1365 | char pad[512]; | |
1366 | } header; | |
1367 | off_t fsize = (off_t)0; | |
1368 | struct ucred *cred = kauth_cred_get(); | |
1369 | int err2; | |
1370 | ||
1371 | context.vc_proc = p; | |
1372 | context.vc_ucred = cred; | |
1373 | ||
1374 | ndp = &nid; | |
1375 | ||
1376 | /* init the namei data to point the file user's program name */ | |
1377 | NDINIT(ndp, LOOKUP, FOLLOW | LOCKLEAF, UIO_SYSSPACE32, CAST_USER_ADDR_T(path), &context); | |
1378 | ||
1379 | if ((error = namei(ndp)) != 0) { | |
1380 | if (error == ENOENT) | |
1381 | error = LOAD_ENOENT; | |
1382 | else | |
1383 | error = LOAD_FAILURE; | |
1384 | return(error); | |
1385 | } | |
1386 | nameidone(ndp); | |
1387 | vp = ndp->ni_vp; | |
1388 | ||
1389 | /* check for regular file */ | |
1390 | if (vp->v_type != VREG) { | |
1391 | error = LOAD_PROTECT; | |
1392 | goto bad1; | |
1393 | } | |
1394 | ||
1395 | /* get size */ | |
1396 | if ((error = vnode_size(vp, &fsize, &context)) != 0) { | |
1397 | error = LOAD_FAILURE; | |
1398 | goto bad1; | |
1399 | } | |
1400 | ||
1401 | /* Check mount point */ | |
1402 | if (vp->v_mount->mnt_flag & MNT_NOEXEC) { | |
1403 | error = LOAD_PROTECT; | |
1404 | goto bad1; | |
1405 | } | |
1406 | ||
1407 | /* check access */ | |
1408 | if ((error = vnode_authorize(vp, NULL, KAUTH_VNODE_EXECUTE, &context)) != 0) { | |
1409 | error = LOAD_PROTECT; | |
1410 | goto bad1; | |
1411 | } | |
1412 | ||
1413 | /* try to open it */ | |
1414 | if ((error = VNOP_OPEN(vp, FREAD, &context)) != 0) { | |
1415 | error = LOAD_PROTECT; | |
1416 | goto bad1; | |
1417 | } | |
1418 | ||
1419 | if ((error = vn_rdwr(UIO_READ, vp, (caddr_t)&header, sizeof(header), 0, | |
1420 | UIO_SYSSPACE32, IO_NODELOCKED, cred, &resid, p)) != 0) { | |
1421 | error = LOAD_IOERROR; | |
1422 | goto bad2; | |
1423 | } | |
1424 | ||
1425 | if (header.mach_header.magic == MH_MAGIC || | |
1426 | header.mach_header.magic == MH_MAGIC_64) | |
1427 | is_fat = FALSE; | |
1428 | else if (header.fat_header.magic == FAT_MAGIC || | |
1429 | header.fat_header.magic == FAT_CIGAM) | |
1430 | is_fat = TRUE; | |
1431 | else { | |
1432 | error = LOAD_BADMACHO; | |
1433 | goto bad2; | |
1434 | } | |
1435 | ||
1436 | if (is_fat) { | |
1437 | /* Look up our architecture in the fat file. */ | |
1438 | error = fatfile_getarch_with_bits(vp, archbits, (vm_offset_t)(&header.fat_header), &fat_arch); | |
1439 | if (error != LOAD_SUCCESS) | |
1440 | goto bad2; | |
1441 | ||
1442 | /* Read the Mach-O header out of it */ | |
1443 | error = vn_rdwr(UIO_READ, vp, (caddr_t)&header.mach_header, | |
1444 | sizeof(header.mach_header), fat_arch.offset, | |
1445 | UIO_SYSSPACE32, IO_NODELOCKED, cred, &resid, p); | |
1446 | if (error) { | |
1447 | error = LOAD_IOERROR; | |
1448 | goto bad2; | |
1449 | } | |
1450 | ||
1451 | /* Is this really a Mach-O? */ | |
1452 | if (header.mach_header.magic != MH_MAGIC && | |
1453 | header.mach_header.magic != MH_MAGIC_64) { | |
1454 | error = LOAD_BADMACHO; | |
1455 | goto bad2; | |
1456 | } | |
1457 | ||
1458 | *file_offset = fat_arch.offset; | |
1459 | *macho_size = fsize = fat_arch.size; | |
1460 | } else { | |
1461 | /* | |
1462 | * Force get_macho_vnode() to fail if the architecture bits | |
1463 | * do not match the expected architecture bits. This in | |
1464 | * turn causes load_dylinker() to fail for the same reason, | |
1465 | * so it ensures the dynamic linker and the binary are in | |
1466 | * lock-step. This is potentially bad, if we ever add to | |
1467 | * the CPU_ARCH_* bits any bits that are desirable but not | |
1468 | * required, since the dynamic linker might work, but we will | |
1469 | * refuse to load it because of this check. | |
1470 | */ | |
1471 | if ((cpu_type_t)(header.mach_header.cputype & CPU_ARCH_MASK) != archbits) | |
1472 | return(LOAD_BADARCH); | |
1473 | ||
1474 | *file_offset = 0; | |
1475 | *macho_size = fsize; | |
1476 | } | |
1477 | ||
1478 | *mach_header = header.mach_header; | |
1479 | *vpp = vp; | |
1480 | ||
1481 | ubc_setsize(vp, fsize); | |
1482 | ||
1483 | return (error); | |
1484 | ||
1485 | bad2: | |
1486 | err2 = VNOP_CLOSE(vp, FREAD, &context); | |
1487 | vnode_put(vp); | |
1488 | return (error); | |
1489 | ||
1490 | bad1: | |
1491 | vnode_put(vp); | |
1492 | return(error); | |
1493 | } |