]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Copyright (c) 2000-2020 Apple Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * This file contains Original Code and/or Modifications of Original Code | |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
14 | * | |
15 | * Please obtain a copy of the License at | |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
25 | * | |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ | |
27 | */ | |
28 | /* | |
29 | * Copyright (C) 1988, 1989, NeXT, Inc. | |
30 | * | |
31 | * File: kern/mach_loader.c | |
32 | * Author: Avadis Tevanian, Jr. | |
33 | * | |
34 | * Mach object file loader (kernel version, for now). | |
35 | * | |
36 | * 21-Jul-88 Avadis Tevanian, Jr. (avie) at NeXT | |
37 | * Started. | |
38 | */ | |
39 | ||
40 | #include <sys/param.h> | |
41 | #include <sys/vnode_internal.h> | |
42 | #include <sys/uio.h> | |
43 | #include <sys/namei.h> | |
44 | #include <sys/proc_internal.h> | |
45 | #include <sys/kauth.h> | |
46 | #include <sys/stat.h> | |
47 | #include <sys/malloc.h> | |
48 | #include <sys/mount_internal.h> | |
49 | #include <sys/fcntl.h> | |
50 | #include <sys/file_internal.h> | |
51 | #include <sys/ubc_internal.h> | |
52 | #include <sys/imgact.h> | |
53 | #include <sys/codesign.h> | |
54 | #include <sys/proc_uuid_policy.h> | |
55 | #include <sys/reason.h> | |
56 | #include <sys/kdebug.h> | |
57 | #include <sys/spawn_internal.h> | |
58 | ||
59 | #include <mach/mach_types.h> | |
60 | #include <mach/vm_map.h> /* vm_allocate() */ | |
61 | #include <mach/mach_vm.h> /* mach_vm_allocate() */ | |
62 | #include <mach/vm_statistics.h> | |
63 | #include <mach/task.h> | |
64 | #include <mach/thread_act.h> | |
65 | ||
66 | #include <machine/vmparam.h> | |
67 | #include <machine/exec.h> | |
68 | #include <machine/pal_routines.h> | |
69 | ||
70 | #include <kern/ast.h> | |
71 | #include <kern/kern_types.h> | |
72 | #include <kern/cpu_number.h> | |
73 | #include <kern/mach_loader.h> | |
74 | #include <kern/mach_fat.h> | |
75 | #include <kern/kalloc.h> | |
76 | #include <kern/task.h> | |
77 | #include <kern/thread.h> | |
78 | #include <kern/page_decrypt.h> | |
79 | ||
80 | #include <mach-o/fat.h> | |
81 | #include <mach-o/loader.h> | |
82 | ||
83 | #include <vm/pmap.h> | |
84 | #include <vm/vm_map.h> | |
85 | #include <vm/vm_kern.h> | |
86 | #include <vm/vm_pager.h> | |
87 | #include <vm/vnode_pager.h> | |
88 | #include <vm/vm_protos.h> | |
89 | #include <vm/vm_shared_region.h> | |
90 | #include <IOKit/IOReturn.h> /* for kIOReturnNotPrivileged */ | |
91 | #include <IOKit/IOBSD.h> /* for IOVnodeHasEntitlement */ | |
92 | ||
93 | #include <os/overflow.h> | |
94 | ||
95 | /* | |
96 | * XXX vm/pmap.h should not treat these prototypes as MACH_KERNEL_PRIVATE | |
97 | * when KERNEL is defined. | |
98 | */ | |
99 | extern pmap_t pmap_create_options(ledger_t ledger, vm_map_size_t size, | |
100 | unsigned int flags); | |
101 | #if __has_feature(ptrauth_calls) && XNU_TARGET_OS_OSX | |
102 | extern void pmap_disable_user_jop(pmap_t pmap); | |
103 | #endif /* __has_feature(ptrauth_calls) && XNU_TARGET_OS_OSX */ | |
104 | ||
105 | /* XXX should have prototypes in a shared header file */ | |
106 | extern int get_map_nentries(vm_map_t); | |
107 | ||
108 | extern kern_return_t memory_object_signed(memory_object_control_t control, | |
109 | boolean_t is_signed); | |
110 | ||
111 | /* An empty load_result_t */ | |
112 | static const load_result_t load_result_null = { | |
113 | .mach_header = MACH_VM_MIN_ADDRESS, | |
114 | .entry_point = MACH_VM_MIN_ADDRESS, | |
115 | .user_stack = MACH_VM_MIN_ADDRESS, | |
116 | .user_stack_size = 0, | |
117 | .user_stack_alloc = MACH_VM_MIN_ADDRESS, | |
118 | .user_stack_alloc_size = 0, | |
119 | .all_image_info_addr = MACH_VM_MIN_ADDRESS, | |
120 | .all_image_info_size = 0, | |
121 | .thread_count = 0, | |
122 | .unixproc = 0, | |
123 | .dynlinker = 0, | |
124 | .needs_dynlinker = 0, | |
125 | .validentry = 0, | |
126 | .using_lcmain = 0, | |
127 | .is_64bit_addr = 0, | |
128 | .is_64bit_data = 0, | |
129 | .custom_stack = 0, | |
130 | .csflags = 0, | |
131 | .has_pagezero = 0, | |
132 | .uuid = { 0 }, | |
133 | .min_vm_addr = MACH_VM_MAX_ADDRESS, | |
134 | .max_vm_addr = MACH_VM_MIN_ADDRESS, | |
135 | .cs_end_offset = 0, | |
136 | .threadstate = NULL, | |
137 | .threadstate_sz = 0, | |
138 | .is_cambria = 0, | |
139 | .dynlinker_mach_header = MACH_VM_MIN_ADDRESS, | |
140 | .dynlinker_fd = -1, | |
141 | }; | |
142 | ||
143 | /* | |
144 | * Prototypes of static functions. | |
145 | */ | |
146 | static load_return_t | |
147 | parse_machfile( | |
148 | struct vnode *vp, | |
149 | vm_map_t map, | |
150 | thread_t thread, | |
151 | struct mach_header *header, | |
152 | off_t file_offset, | |
153 | off_t macho_size, | |
154 | int depth, | |
155 | int64_t slide, | |
156 | int64_t dyld_slide, | |
157 | load_result_t *result, | |
158 | load_result_t *binresult, | |
159 | struct image_params *imgp | |
160 | ); | |
161 | ||
162 | static load_return_t | |
163 | load_segment( | |
164 | struct load_command *lcp, | |
165 | uint32_t filetype, | |
166 | void *control, | |
167 | off_t pager_offset, | |
168 | off_t macho_size, | |
169 | struct vnode *vp, | |
170 | vm_map_t map, | |
171 | int64_t slide, | |
172 | load_result_t *result, | |
173 | struct image_params *imgp | |
174 | ); | |
175 | ||
176 | static load_return_t | |
177 | load_uuid( | |
178 | struct uuid_command *uulp, | |
179 | char *command_end, | |
180 | load_result_t *result | |
181 | ); | |
182 | ||
183 | static load_return_t | |
184 | load_version( | |
185 | struct version_min_command *vmc, | |
186 | boolean_t *found_version_cmd, | |
187 | int ip_flags, | |
188 | load_result_t *result | |
189 | ); | |
190 | ||
191 | static load_return_t | |
192 | load_code_signature( | |
193 | struct linkedit_data_command *lcp, | |
194 | struct vnode *vp, | |
195 | off_t macho_offset, | |
196 | off_t macho_size, | |
197 | cpu_type_t cputype, | |
198 | cpu_subtype_t cpusubtype, | |
199 | load_result_t *result, | |
200 | struct image_params *imgp); | |
201 | ||
202 | #if CONFIG_CODE_DECRYPTION | |
203 | static load_return_t | |
204 | set_code_unprotect( | |
205 | struct encryption_info_command *lcp, | |
206 | caddr_t addr, | |
207 | vm_map_t map, | |
208 | int64_t slide, | |
209 | struct vnode *vp, | |
210 | off_t macho_offset, | |
211 | cpu_type_t cputype, | |
212 | cpu_subtype_t cpusubtype); | |
213 | #endif | |
214 | ||
215 | static | |
216 | load_return_t | |
217 | load_main( | |
218 | struct entry_point_command *epc, | |
219 | thread_t thread, | |
220 | int64_t slide, | |
221 | load_result_t *result | |
222 | ); | |
223 | ||
224 | static | |
225 | load_return_t | |
226 | setup_driver_main( | |
227 | thread_t thread, | |
228 | int64_t slide, | |
229 | load_result_t *result | |
230 | ); | |
231 | ||
232 | static load_return_t | |
233 | load_unixthread( | |
234 | struct thread_command *tcp, | |
235 | thread_t thread, | |
236 | int64_t slide, | |
237 | boolean_t is_x86_64_compat_binary, | |
238 | load_result_t *result | |
239 | ); | |
240 | ||
241 | static load_return_t | |
242 | load_threadstate( | |
243 | thread_t thread, | |
244 | uint32_t *ts, | |
245 | uint32_t total_size, | |
246 | load_result_t * | |
247 | ); | |
248 | ||
249 | static load_return_t | |
250 | load_threadstack( | |
251 | thread_t thread, | |
252 | uint32_t *ts, | |
253 | uint32_t total_size, | |
254 | mach_vm_offset_t *user_stack, | |
255 | int *customstack, | |
256 | boolean_t is_x86_64_compat_binary, | |
257 | load_result_t *result | |
258 | ); | |
259 | ||
260 | static load_return_t | |
261 | load_threadentry( | |
262 | thread_t thread, | |
263 | uint32_t *ts, | |
264 | uint32_t total_size, | |
265 | mach_vm_offset_t *entry_point | |
266 | ); | |
267 | ||
268 | static load_return_t | |
269 | load_dylinker( | |
270 | struct dylinker_command *lcp, | |
271 | integer_t archbits, | |
272 | vm_map_t map, | |
273 | thread_t thread, | |
274 | int depth, | |
275 | int64_t slide, | |
276 | load_result_t *result, | |
277 | struct image_params *imgp | |
278 | ); | |
279 | ||
280 | ||
281 | #if __x86_64__ | |
282 | extern int bootarg_no32exec; | |
283 | static boolean_t | |
284 | check_if_simulator_binary( | |
285 | struct image_params *imgp, | |
286 | off_t file_offset, | |
287 | off_t macho_size); | |
288 | #endif | |
289 | ||
290 | struct macho_data; | |
291 | ||
292 | static load_return_t | |
293 | get_macho_vnode( | |
294 | const char *path, | |
295 | integer_t archbits, | |
296 | struct mach_header *mach_header, | |
297 | off_t *file_offset, | |
298 | off_t *macho_size, | |
299 | struct macho_data *macho_data, | |
300 | struct vnode **vpp, | |
301 | struct image_params *imgp | |
302 | ); | |
303 | ||
304 | static inline void | |
305 | widen_segment_command(const struct segment_command *scp32, | |
306 | struct segment_command_64 *scp) | |
307 | { | |
308 | scp->cmd = scp32->cmd; | |
309 | scp->cmdsize = scp32->cmdsize; | |
310 | bcopy(scp32->segname, scp->segname, sizeof(scp->segname)); | |
311 | scp->vmaddr = scp32->vmaddr; | |
312 | scp->vmsize = scp32->vmsize; | |
313 | scp->fileoff = scp32->fileoff; | |
314 | scp->filesize = scp32->filesize; | |
315 | scp->maxprot = scp32->maxprot; | |
316 | scp->initprot = scp32->initprot; | |
317 | scp->nsects = scp32->nsects; | |
318 | scp->flags = scp32->flags; | |
319 | } | |
320 | ||
321 | static void | |
322 | note_all_image_info_section(const struct segment_command_64 *scp, | |
323 | boolean_t is64, size_t section_size, const void *sections, | |
324 | int64_t slide, load_result_t *result) | |
325 | { | |
326 | const union { | |
327 | struct section s32; | |
328 | struct section_64 s64; | |
329 | } *sectionp; | |
330 | unsigned int i; | |
331 | ||
332 | ||
333 | if (strncmp(scp->segname, "__DATA_DIRTY", sizeof(scp->segname)) != 0 && | |
334 | strncmp(scp->segname, "__DATA", sizeof(scp->segname)) != 0) { | |
335 | return; | |
336 | } | |
337 | for (i = 0; i < scp->nsects; ++i) { | |
338 | sectionp = (const void *) | |
339 | ((const char *)sections + section_size * i); | |
340 | if (0 == strncmp(sectionp->s64.sectname, "__all_image_info", | |
341 | sizeof(sectionp->s64.sectname))) { | |
342 | result->all_image_info_addr = | |
343 | is64 ? sectionp->s64.addr : sectionp->s32.addr; | |
344 | result->all_image_info_addr += slide; | |
345 | result->all_image_info_size = | |
346 | is64 ? sectionp->s64.size : sectionp->s32.size; | |
347 | return; | |
348 | } | |
349 | } | |
350 | } | |
351 | ||
352 | #if __arm64__ | |
353 | /* | |
354 | * Allow bypassing some security rules (hard pagezero, no write+execute) | |
355 | * in exchange for better binary compatibility for legacy apps built | |
356 | * before 16KB-alignment was enforced. | |
357 | */ | |
358 | const int fourk_binary_compatibility_unsafe = TRUE; | |
359 | const int fourk_binary_compatibility_allow_wx = FALSE; | |
360 | #endif /* __arm64__ */ | |
361 | ||
362 | #if __has_feature(ptrauth_calls) && XNU_TARGET_OS_OSX | |
363 | /** | |
364 | * Determines whether this is an arm64e process which may host in-process | |
365 | * plugins. | |
366 | */ | |
367 | static inline bool | |
368 | arm64e_plugin_host(struct image_params *imgp, load_result_t *result) | |
369 | { | |
370 | if (imgp->ip_flags & IMGPF_NOJOP) { | |
371 | return false; | |
372 | } | |
373 | ||
374 | if (!result->platform_binary) { | |
375 | return false; | |
376 | } | |
377 | ||
378 | struct cs_blob *csblob = csvnode_get_blob(imgp->ip_vp, imgp->ip_arch_offset); | |
379 | const char *identity = csblob_get_identity(csblob); | |
380 | if (!identity) { | |
381 | return false; | |
382 | } | |
383 | ||
384 | /* Check if override host plugin entitlement is present and posix spawn attribute to disable A keys is passed */ | |
385 | if (IOVnodeHasEntitlement(imgp->ip_vp, (int64_t)imgp->ip_arch_offset, OVERRIDE_PLUGIN_HOST_ENTITLEMENT)) { | |
386 | return imgp->ip_flags & IMGPF_PLUGIN_HOST_DISABLE_A_KEYS; | |
387 | } | |
388 | ||
389 | /* Disabling library validation is a good signal that this process plans to host plugins */ | |
390 | const char *const disable_lv_entitlements[] = { | |
391 | "com.apple.security.cs.disable-library-validation", | |
392 | "com.apple.private.cs.automator-plugins", | |
393 | CLEAR_LV_ENTITLEMENT, | |
394 | }; | |
395 | for (size_t i = 0; i < ARRAY_COUNT(disable_lv_entitlements); i++) { | |
396 | if (IOVnodeHasEntitlement(imgp->ip_vp, (int64_t)imgp->ip_arch_offset, disable_lv_entitlements[i])) { | |
397 | return true; | |
398 | } | |
399 | } | |
400 | ||
401 | /* From /System/Library/Security/HardeningExceptions.plist */ | |
402 | const char *const hardening_exceptions[] = { | |
403 | "com.apple.perl5", /* Scripting engines may load third party code and jit*/ | |
404 | "com.apple.perl", /* Scripting engines may load third party code and jit*/ | |
405 | "org.python.python", /* Scripting engines may load third party code and jit*/ | |
406 | "com.apple.expect", /* Scripting engines may load third party code and jit*/ | |
407 | "com.tcltk.wish", /* Scripting engines may load third party code and jit*/ | |
408 | "com.tcltk.tclsh", /* Scripting engines may load third party code and jit*/ | |
409 | "com.apple.ruby", /* Scripting engines may load third party code and jit*/ | |
410 | "com.apple.bash", /* Required for the 'enable' command */ | |
411 | "com.apple.zsh", /* Required for the 'zmodload' command */ | |
412 | "com.apple.ksh", /* Required for 'builtin' command */ | |
413 | }; | |
414 | for (size_t i = 0; i < ARRAY_COUNT(hardening_exceptions); i++) { | |
415 | if (strncmp(hardening_exceptions[i], identity, strlen(hardening_exceptions[i])) == 0) { | |
416 | return true; | |
417 | } | |
418 | } | |
419 | ||
420 | return false; | |
421 | } | |
422 | #endif /* __has_feature(ptrauth_calls) && XNU_TARGET_OS_OSX */ | |
423 | ||
424 | load_return_t | |
425 | load_machfile( | |
426 | struct image_params *imgp, | |
427 | struct mach_header *header, | |
428 | thread_t thread, | |
429 | vm_map_t *mapp, | |
430 | load_result_t *result | |
431 | ) | |
432 | { | |
433 | struct vnode *vp = imgp->ip_vp; | |
434 | off_t file_offset = imgp->ip_arch_offset; | |
435 | off_t macho_size = imgp->ip_arch_size; | |
436 | off_t total_size = 0; | |
437 | off_t file_size = imgp->ip_vattr->va_data_size; | |
438 | pmap_t pmap = 0; /* protected by create_map */ | |
439 | vm_map_t map; | |
440 | load_result_t myresult; | |
441 | load_return_t lret; | |
442 | boolean_t enforce_hard_pagezero = TRUE; | |
443 | int in_exec = (imgp->ip_flags & IMGPF_EXEC); | |
444 | task_t task = current_task(); | |
445 | int64_t aslr_page_offset = 0; | |
446 | int64_t dyld_aslr_page_offset = 0; | |
447 | int64_t aslr_section_size = 0; | |
448 | int64_t aslr_section_offset = 0; | |
449 | kern_return_t kret; | |
450 | unsigned int pmap_flags = 0; | |
451 | ||
452 | if (os_add_overflow(file_offset, macho_size, &total_size) || | |
453 | total_size > file_size) { | |
454 | return LOAD_BADMACHO; | |
455 | } | |
456 | ||
457 | result->is_64bit_addr = ((imgp->ip_flags & IMGPF_IS_64BIT_ADDR) == IMGPF_IS_64BIT_ADDR); | |
458 | result->is_64bit_data = ((imgp->ip_flags & IMGPF_IS_64BIT_DATA) == IMGPF_IS_64BIT_DATA); | |
459 | #if defined(HAS_APPLE_PAC) | |
460 | pmap_flags |= (imgp->ip_flags & IMGPF_NOJOP) ? PMAP_CREATE_DISABLE_JOP : 0; | |
461 | #endif /* defined(HAS_APPLE_PAC) */ | |
462 | pmap_flags |= result->is_64bit_addr ? PMAP_CREATE_64BIT : 0; | |
463 | ||
464 | task_t ledger_task; | |
465 | if (imgp->ip_new_thread) { | |
466 | ledger_task = get_threadtask(imgp->ip_new_thread); | |
467 | } else { | |
468 | ledger_task = task; | |
469 | } | |
470 | ||
471 | #if defined(PMAP_CREATE_FORCE_4K_PAGES) && (DEBUG || DEVELOPMENT) | |
472 | if (imgp->ip_px_sa != NULL) { | |
473 | struct _posix_spawnattr* psa = (struct _posix_spawnattr *) imgp->ip_px_sa; | |
474 | if (psa->psa_flags & _POSIX_SPAWN_FORCE_4K_PAGES) { | |
475 | pmap_flags |= PMAP_CREATE_FORCE_4K_PAGES; | |
476 | } | |
477 | } | |
478 | #endif /* defined(PMAP_CREATE_FORCE_4K_PAGES) && (DEBUG || DEVELOPMENT) */ | |
479 | ||
480 | pmap = pmap_create_options(get_task_ledger(ledger_task), | |
481 | (vm_map_size_t) 0, | |
482 | pmap_flags); | |
483 | if (pmap == NULL) { | |
484 | return LOAD_RESOURCE; | |
485 | } | |
486 | map = vm_map_create(pmap, | |
487 | 0, | |
488 | vm_compute_max_offset(result->is_64bit_addr), | |
489 | TRUE); | |
490 | ||
491 | #if defined(__arm64__) | |
492 | if (result->is_64bit_addr) { | |
493 | /* enforce 16KB alignment of VM map entries */ | |
494 | vm_map_set_page_shift(map, SIXTEENK_PAGE_SHIFT); | |
495 | } else { | |
496 | vm_map_set_page_shift(map, page_shift_user32); | |
497 | } | |
498 | #elif (__ARM_ARCH_7K__ >= 2) && defined(PLATFORM_WatchOS) | |
499 | /* enforce 16KB alignment for watch targets with new ABI */ | |
500 | vm_map_set_page_shift(map, SIXTEENK_PAGE_SHIFT); | |
501 | #endif /* __arm64__ */ | |
502 | ||
503 | #if PMAP_CREATE_FORCE_4K_PAGES | |
504 | if (pmap_flags & PMAP_CREATE_FORCE_4K_PAGES) { | |
505 | DEBUG4K_LIFE("***** launching '%s' as 4k *****\n", vp->v_name); | |
506 | vm_map_set_page_shift(map, FOURK_PAGE_SHIFT); | |
507 | } | |
508 | #endif /* PMAP_CREATE_FORCE_4K_PAGES */ | |
509 | ||
510 | #ifndef CONFIG_ENFORCE_SIGNED_CODE | |
511 | /* This turns off faulting for executable pages, which allows | |
512 | * to circumvent Code Signing Enforcement. The per process | |
513 | * flag (CS_ENFORCEMENT) is not set yet, but we can use the | |
514 | * global flag. | |
515 | */ | |
516 | if (!cs_process_global_enforcement() && (header->flags & MH_ALLOW_STACK_EXECUTION)) { | |
517 | vm_map_disable_NX(map); | |
518 | // TODO: Message Trace or log that this is happening | |
519 | } | |
520 | #endif | |
521 | ||
522 | /* Forcibly disallow execution from data pages on even if the arch | |
523 | * normally permits it. */ | |
524 | if ((header->flags & MH_NO_HEAP_EXECUTION) && !(imgp->ip_flags & IMGPF_ALLOW_DATA_EXEC)) { | |
525 | vm_map_disallow_data_exec(map); | |
526 | } | |
527 | ||
528 | /* | |
529 | * Compute a random offset for ASLR, and an independent random offset for dyld. | |
530 | */ | |
531 | if (!(imgp->ip_flags & IMGPF_DISABLE_ASLR)) { | |
532 | vm_map_get_max_aslr_slide_section(map, &aslr_section_offset, &aslr_section_size); | |
533 | aslr_section_offset = (random() % aslr_section_offset) * aslr_section_size; | |
534 | ||
535 | aslr_page_offset = random(); | |
536 | aslr_page_offset %= vm_map_get_max_aslr_slide_pages(map); | |
537 | aslr_page_offset <<= vm_map_page_shift(map); | |
538 | ||
539 | dyld_aslr_page_offset = random(); | |
540 | dyld_aslr_page_offset %= vm_map_get_max_loader_aslr_slide_pages(map); | |
541 | dyld_aslr_page_offset <<= vm_map_page_shift(map); | |
542 | ||
543 | aslr_page_offset += aslr_section_offset; | |
544 | } | |
545 | if (vm_map_page_shift(map) < (int)PAGE_SHIFT) { | |
546 | DEBUG4K_LOAD("slide=0x%llx dyld_slide=0x%llx\n", aslr_page_offset, dyld_aslr_page_offset); | |
547 | } | |
548 | ||
549 | if (!result) { | |
550 | result = &myresult; | |
551 | } | |
552 | ||
553 | *result = load_result_null; | |
554 | ||
555 | /* | |
556 | * re-set the bitness on the load result since we cleared the load result above. | |
557 | */ | |
558 | result->is_64bit_addr = ((imgp->ip_flags & IMGPF_IS_64BIT_ADDR) == IMGPF_IS_64BIT_ADDR); | |
559 | result->is_64bit_data = ((imgp->ip_flags & IMGPF_IS_64BIT_DATA) == IMGPF_IS_64BIT_DATA); | |
560 | ||
561 | lret = parse_machfile(vp, map, thread, header, file_offset, macho_size, | |
562 | 0, aslr_page_offset, dyld_aslr_page_offset, result, | |
563 | NULL, imgp); | |
564 | ||
565 | if (lret != LOAD_SUCCESS) { | |
566 | vm_map_deallocate(map); /* will lose pmap reference too */ | |
567 | return lret; | |
568 | } | |
569 | ||
570 | #if __x86_64__ | |
571 | /* | |
572 | * On x86, for compatibility, don't enforce the hard page-zero restriction for 32-bit binaries. | |
573 | */ | |
574 | if (!result->is_64bit_addr) { | |
575 | enforce_hard_pagezero = FALSE; | |
576 | } | |
577 | ||
578 | /* | |
579 | * For processes with IMGPF_HIGH_BITS_ASLR, add a few random high bits | |
580 | * to the start address for "anywhere" memory allocations. | |
581 | */ | |
582 | #define VM_MAP_HIGH_START_BITS_COUNT 8 | |
583 | #define VM_MAP_HIGH_START_BITS_SHIFT 27 | |
584 | if (result->is_64bit_addr && | |
585 | (imgp->ip_flags & IMGPF_HIGH_BITS_ASLR)) { | |
586 | int random_bits; | |
587 | vm_map_offset_t high_start; | |
588 | ||
589 | random_bits = random(); | |
590 | random_bits &= (1 << VM_MAP_HIGH_START_BITS_COUNT) - 1; | |
591 | high_start = (((vm_map_offset_t)random_bits) | |
592 | << VM_MAP_HIGH_START_BITS_SHIFT); | |
593 | vm_map_set_high_start(map, high_start); | |
594 | } | |
595 | #endif /* __x86_64__ */ | |
596 | ||
597 | /* | |
598 | * Check to see if the page zero is enforced by the map->min_offset. | |
599 | */ | |
600 | if (enforce_hard_pagezero && | |
601 | (vm_map_has_hard_pagezero(map, 0x1000) == FALSE)) { | |
602 | #if __arm64__ | |
603 | if ( | |
604 | !result->is_64bit_addr && /* not 64-bit address space */ | |
605 | !(header->flags & MH_PIE) && /* not PIE */ | |
606 | (vm_map_page_shift(map) != FOURK_PAGE_SHIFT || | |
607 | PAGE_SHIFT != FOURK_PAGE_SHIFT) && /* page size != 4KB */ | |
608 | result->has_pagezero && /* has a "soft" page zero */ | |
609 | fourk_binary_compatibility_unsafe) { | |
610 | /* | |
611 | * For backwards compatibility of "4K" apps on | |
612 | * a 16K system, do not enforce a hard page zero... | |
613 | */ | |
614 | } else | |
615 | #endif /* __arm64__ */ | |
616 | { | |
617 | vm_map_deallocate(map); /* will lose pmap reference too */ | |
618 | return LOAD_BADMACHO; | |
619 | } | |
620 | } | |
621 | ||
622 | #if __arm64__ | |
623 | if (enforce_hard_pagezero && result->is_64bit_addr && (header->cputype == CPU_TYPE_ARM64)) { | |
624 | /* 64 bit ARM binary must have "hard page zero" of 4GB to cover the lower 32 bit address space */ | |
625 | if (vm_map_has_hard_pagezero(map, 0x100000000) == FALSE) { | |
626 | vm_map_deallocate(map); /* will lose pmap reference too */ | |
627 | return LOAD_BADMACHO; | |
628 | } | |
629 | } | |
630 | #endif | |
631 | ||
632 | vm_commit_pagezero_status(map); | |
633 | ||
634 | /* | |
635 | * If this is an exec, then we are going to destroy the old | |
636 | * task, and it's correct to halt it; if it's spawn, the | |
637 | * task is not yet running, and it makes no sense. | |
638 | */ | |
639 | if (in_exec) { | |
640 | proc_t p = vfs_context_proc(imgp->ip_vfs_context); | |
641 | /* | |
642 | * Mark the task as halting and start the other | |
643 | * threads towards terminating themselves. Then | |
644 | * make sure any threads waiting for a process | |
645 | * transition get informed that we are committed to | |
646 | * this transition, and then finally complete the | |
647 | * task halting (wait for threads and then cleanup | |
648 | * task resources). | |
649 | * | |
650 | * NOTE: task_start_halt() makes sure that no new | |
651 | * threads are created in the task during the transition. | |
652 | * We need to mark the workqueue as exiting before we | |
653 | * wait for threads to terminate (at the end of which | |
654 | * we no longer have a prohibition on thread creation). | |
655 | * | |
656 | * Finally, clean up any lingering workqueue data structures | |
657 | * that may have been left behind by the workqueue threads | |
658 | * as they exited (and then clean up the work queue itself). | |
659 | */ | |
660 | kret = task_start_halt(task); | |
661 | if (kret != KERN_SUCCESS) { | |
662 | vm_map_deallocate(map); /* will lose pmap reference too */ | |
663 | return LOAD_FAILURE; | |
664 | } | |
665 | proc_transcommit(p, 0); | |
666 | workq_mark_exiting(p); | |
667 | task_complete_halt(task); | |
668 | workq_exit(p); | |
669 | ||
670 | /* | |
671 | * Roll up accounting info to new task. The roll up is done after | |
672 | * task_complete_halt to make sure the thread accounting info is | |
673 | * rolled up to current_task. | |
674 | */ | |
675 | task_rollup_accounting_info(get_threadtask(thread), task); | |
676 | } | |
677 | *mapp = map; | |
678 | ||
679 | #if __has_feature(ptrauth_calls) && defined(XNU_TARGET_OS_OSX) | |
680 | /* | |
681 | * arm64e plugin hosts currently run with JOP keys disabled, since they | |
682 | * may need to run arm64 plugins. | |
683 | */ | |
684 | if (arm64e_plugin_host(imgp, result)) { | |
685 | imgp->ip_flags |= IMGPF_NOJOP; | |
686 | pmap_disable_user_jop(pmap); | |
687 | } | |
688 | #endif /* __has_feature(ptrauth_calls) && defined(XNU_TARGET_OS_OSX) */ | |
689 | ||
690 | #ifdef CONFIG_32BIT_TELEMETRY | |
691 | if (!result->is_64bit_data) { | |
692 | /* | |
693 | * This may not need to be an AST; we merely need to ensure that | |
694 | * we gather telemetry at the point where all of the information | |
695 | * that we want has been added to the process. | |
696 | */ | |
697 | task_set_32bit_log_flag(get_threadtask(thread)); | |
698 | act_set_astbsd(thread); | |
699 | } | |
700 | #endif /* CONFIG_32BIT_TELEMETRY */ | |
701 | ||
702 | return LOAD_SUCCESS; | |
703 | } | |
704 | ||
705 | int macho_printf = 0; | |
706 | #define MACHO_PRINTF(args) \ | |
707 | do { \ | |
708 | if (macho_printf) { \ | |
709 | printf args; \ | |
710 | } \ | |
711 | } while (0) | |
712 | ||
713 | ||
714 | static boolean_t | |
715 | pie_required( | |
716 | cpu_type_t exectype, | |
717 | cpu_subtype_t execsubtype) | |
718 | { | |
719 | switch (exectype) { | |
720 | case CPU_TYPE_X86_64: | |
721 | return FALSE; | |
722 | case CPU_TYPE_ARM64: | |
723 | return TRUE; | |
724 | case CPU_TYPE_ARM: | |
725 | switch (execsubtype) { | |
726 | case CPU_SUBTYPE_ARM_V7K: | |
727 | return TRUE; | |
728 | } | |
729 | break; | |
730 | } | |
731 | return FALSE; | |
732 | } | |
733 | ||
734 | /* | |
735 | * The file size of a mach-o file is limited to 32 bits; this is because | |
736 | * this is the limit on the kalloc() of enough bytes for a mach_header and | |
737 | * the contents of its sizeofcmds, which is currently constrained to 32 | |
738 | * bits in the file format itself. We read into the kernel buffer the | |
739 | * commands section, and then parse it in order to parse the mach-o file | |
740 | * format load_command segment(s). We are only interested in a subset of | |
741 | * the total set of possible commands. If "map"==VM_MAP_NULL or | |
742 | * "thread"==THREAD_NULL, do not make permament VM modifications, | |
743 | * just preflight the parse. | |
744 | */ | |
745 | static | |
746 | load_return_t | |
747 | parse_machfile( | |
748 | struct vnode *vp, | |
749 | vm_map_t map, | |
750 | thread_t thread, | |
751 | struct mach_header *header, | |
752 | off_t file_offset, | |
753 | off_t macho_size, | |
754 | int depth, | |
755 | int64_t aslr_offset, | |
756 | int64_t dyld_aslr_offset, | |
757 | load_result_t *result, | |
758 | load_result_t *binresult, | |
759 | struct image_params *imgp | |
760 | ) | |
761 | { | |
762 | uint32_t ncmds; | |
763 | struct load_command *lcp; | |
764 | struct dylinker_command *dlp = 0; | |
765 | void * control; | |
766 | load_return_t ret = LOAD_SUCCESS; | |
767 | void * addr; | |
768 | vm_size_t alloc_size, cmds_size; | |
769 | size_t offset; | |
770 | size_t oldoffset; /* for overflow check */ | |
771 | int pass; | |
772 | proc_t p = vfs_context_proc(imgp->ip_vfs_context); | |
773 | int error; | |
774 | int resid = 0; | |
775 | int spawn = (imgp->ip_flags & IMGPF_SPAWN); | |
776 | int vfexec = (imgp->ip_flags & IMGPF_VFORK_EXEC); | |
777 | size_t mach_header_sz = sizeof(struct mach_header); | |
778 | boolean_t abi64; | |
779 | boolean_t got_code_signatures = FALSE; | |
780 | boolean_t found_header_segment = FALSE; | |
781 | boolean_t found_xhdr = FALSE; | |
782 | boolean_t found_version_cmd = FALSE; | |
783 | int64_t slide = 0; | |
784 | boolean_t dyld_no_load_addr = FALSE; | |
785 | boolean_t is_dyld = FALSE; | |
786 | vm_map_offset_t effective_page_mask = PAGE_MASK; | |
787 | #if __arm64__ | |
788 | uint64_t pagezero_end = 0; | |
789 | uint64_t executable_end = 0; | |
790 | uint64_t writable_start = 0; | |
791 | vm_map_size_t effective_page_size; | |
792 | ||
793 | effective_page_mask = vm_map_page_mask(map); | |
794 | effective_page_size = vm_map_page_size(map); | |
795 | #endif /* __arm64__ */ | |
796 | ||
797 | if (header->magic == MH_MAGIC_64 || | |
798 | header->magic == MH_CIGAM_64) { | |
799 | mach_header_sz = sizeof(struct mach_header_64); | |
800 | } | |
801 | ||
802 | /* | |
803 | * Break infinite recursion | |
804 | */ | |
805 | if (depth > 2) { | |
806 | return LOAD_FAILURE; | |
807 | } | |
808 | ||
809 | depth++; | |
810 | ||
811 | /* | |
812 | * Check to see if right machine type. | |
813 | */ | |
814 | if (((cpu_type_t)(header->cputype & ~CPU_ARCH_MASK) != (cpu_type() & ~CPU_ARCH_MASK)) | |
815 | ) { | |
816 | return LOAD_BADARCH; | |
817 | } | |
818 | ||
819 | if (!grade_binary(header->cputype, | |
820 | header->cpusubtype & ~CPU_SUBTYPE_MASK, | |
821 | header->cpusubtype & CPU_SUBTYPE_MASK, TRUE)) { | |
822 | return LOAD_BADARCH; | |
823 | } | |
824 | ||
825 | abi64 = ((header->cputype & CPU_ARCH_ABI64) == CPU_ARCH_ABI64); | |
826 | ||
827 | switch (header->filetype) { | |
828 | case MH_EXECUTE: | |
829 | if (depth != 1 && depth != 3) { | |
830 | return LOAD_FAILURE; | |
831 | } | |
832 | if (header->flags & MH_DYLDLINK) { | |
833 | /* Check properties of dynamic executables */ | |
834 | if (!(header->flags & MH_PIE) && pie_required(header->cputype, header->cpusubtype & ~CPU_SUBTYPE_MASK)) { | |
835 | return LOAD_FAILURE; | |
836 | } | |
837 | result->needs_dynlinker = TRUE; | |
838 | } else if (header->cputype == CPU_TYPE_X86_64) { | |
839 | /* x86_64 static binaries allowed */ | |
840 | } else { | |
841 | /* Check properties of static executables (disallowed except for development) */ | |
842 | #if !(DEVELOPMENT || DEBUG) | |
843 | return LOAD_FAILURE; | |
844 | #endif | |
845 | } | |
846 | break; | |
847 | case MH_DYLINKER: | |
848 | if (depth != 2) { | |
849 | return LOAD_FAILURE; | |
850 | } | |
851 | is_dyld = TRUE; | |
852 | break; | |
853 | ||
854 | default: | |
855 | return LOAD_FAILURE; | |
856 | } | |
857 | ||
858 | /* | |
859 | * For PIE and dyld, slide everything by the ASLR offset. | |
860 | */ | |
861 | if ((header->flags & MH_PIE) || is_dyld) { | |
862 | slide = aslr_offset; | |
863 | } | |
864 | ||
865 | /* | |
866 | * Get the pager for the file. | |
867 | */ | |
868 | control = ubc_getobject(vp, UBC_FLAGS_NONE); | |
869 | ||
870 | /* ensure header + sizeofcmds falls within the file */ | |
871 | if (os_add_overflow(mach_header_sz, header->sizeofcmds, &cmds_size) || | |
872 | (off_t)cmds_size > macho_size || | |
873 | round_page_overflow(cmds_size, &alloc_size) || | |
874 | alloc_size > INT_MAX) { | |
875 | return LOAD_BADMACHO; | |
876 | } | |
877 | ||
878 | /* | |
879 | * Map the load commands into kernel memory. | |
880 | */ | |
881 | addr = kalloc(alloc_size); | |
882 | if (addr == NULL) { | |
883 | return LOAD_NOSPACE; | |
884 | } | |
885 | ||
886 | error = vn_rdwr(UIO_READ, vp, addr, (int)alloc_size, file_offset, | |
887 | UIO_SYSSPACE, 0, vfs_context_ucred(imgp->ip_vfs_context), &resid, p); | |
888 | if (error) { | |
889 | kfree(addr, alloc_size); | |
890 | return LOAD_IOERROR; | |
891 | } | |
892 | ||
893 | if (resid) { | |
894 | { | |
895 | /* We must be able to read in as much as the mach_header indicated */ | |
896 | kfree(addr, alloc_size); | |
897 | return LOAD_BADMACHO; | |
898 | } | |
899 | } | |
900 | ||
901 | /* | |
902 | * Scan through the commands, processing each one as necessary. | |
903 | * We parse in three passes through the headers: | |
904 | * 0: determine if TEXT and DATA boundary can be page-aligned, load platform version | |
905 | * 1: thread state, uuid, code signature | |
906 | * 2: segments | |
907 | * 3: dyld, encryption, check entry point | |
908 | */ | |
909 | ||
910 | boolean_t slide_realign = FALSE; | |
911 | #if __arm64__ | |
912 | if (!abi64) { | |
913 | slide_realign = TRUE; | |
914 | } | |
915 | #endif | |
916 | ||
917 | for (pass = 0; pass <= 3; pass++) { | |
918 | if (pass == 1) { | |
919 | #if __arm64__ | |
920 | boolean_t is_pie; | |
921 | int64_t adjust; | |
922 | ||
923 | is_pie = ((header->flags & MH_PIE) != 0); | |
924 | if (pagezero_end != 0 && | |
925 | pagezero_end < effective_page_size) { | |
926 | /* need at least 1 page for PAGEZERO */ | |
927 | adjust = effective_page_size; | |
928 | MACHO_PRINTF(("pagezero boundary at " | |
929 | "0x%llx; adjust slide from " | |
930 | "0x%llx to 0x%llx%s\n", | |
931 | (uint64_t) pagezero_end, | |
932 | slide, | |
933 | slide + adjust, | |
934 | (is_pie | |
935 | ? "" | |
936 | : " BUT NO PIE ****** :-("))); | |
937 | if (is_pie) { | |
938 | slide += adjust; | |
939 | pagezero_end += adjust; | |
940 | executable_end += adjust; | |
941 | writable_start += adjust; | |
942 | } | |
943 | } | |
944 | if (pagezero_end != 0) { | |
945 | result->has_pagezero = TRUE; | |
946 | } | |
947 | if (executable_end == writable_start && | |
948 | (executable_end & effective_page_mask) != 0 && | |
949 | (executable_end & FOURK_PAGE_MASK) == 0) { | |
950 | /* | |
951 | * The TEXT/DATA boundary is 4K-aligned but | |
952 | * not page-aligned. Adjust the slide to make | |
953 | * it page-aligned and avoid having a page | |
954 | * with both write and execute permissions. | |
955 | */ | |
956 | adjust = | |
957 | (effective_page_size - | |
958 | (executable_end & effective_page_mask)); | |
959 | MACHO_PRINTF(("page-unaligned X-W boundary at " | |
960 | "0x%llx; adjust slide from " | |
961 | "0x%llx to 0x%llx%s\n", | |
962 | (uint64_t) executable_end, | |
963 | slide, | |
964 | slide + adjust, | |
965 | (is_pie | |
966 | ? "" | |
967 | : " BUT NO PIE ****** :-("))); | |
968 | if (is_pie) { | |
969 | slide += adjust; | |
970 | } | |
971 | } | |
972 | #endif /* __arm64__ */ | |
973 | ||
974 | if (dyld_no_load_addr && binresult) { | |
975 | /* | |
976 | * The dyld Mach-O does not specify a load address. Try to locate | |
977 | * it right after the main binary. If binresult == NULL, load | |
978 | * directly to the given slide. | |
979 | */ | |
980 | mach_vm_address_t max_vm_addr = binresult->max_vm_addr; | |
981 | slide = vm_map_round_page(slide + max_vm_addr, effective_page_mask); | |
982 | } | |
983 | } | |
984 | ||
985 | /* | |
986 | * Check that the entry point is contained in an executable segment | |
987 | */ | |
988 | if ((pass == 3) && (thread != THREAD_NULL)) { | |
989 | if (depth == 1 && imgp && (imgp->ip_flags & IMGPF_DRIVER)) { | |
990 | /* Driver binaries must have driverkit platform */ | |
991 | if (result->ip_platform == PLATFORM_DRIVERKIT) { | |
992 | /* Driver binaries have no entry point */ | |
993 | ret = setup_driver_main(thread, slide, result); | |
994 | } else { | |
995 | ret = LOAD_FAILURE; | |
996 | } | |
997 | } else if (!result->using_lcmain && result->validentry == 0) { | |
998 | ret = LOAD_FAILURE; | |
999 | } | |
1000 | if (ret != KERN_SUCCESS) { | |
1001 | thread_state_initialize(thread); | |
1002 | break; | |
1003 | } | |
1004 | } | |
1005 | ||
1006 | /* | |
1007 | * Check that some segment maps the start of the mach-o file, which is | |
1008 | * needed by the dynamic loader to read the mach headers, etc. | |
1009 | */ | |
1010 | if ((pass == 3) && (found_header_segment == FALSE)) { | |
1011 | ret = LOAD_BADMACHO; | |
1012 | break; | |
1013 | } | |
1014 | ||
1015 | /* | |
1016 | * Loop through each of the load_commands indicated by the | |
1017 | * Mach-O header; if an absurd value is provided, we just | |
1018 | * run off the end of the reserved section by incrementing | |
1019 | * the offset too far, so we are implicitly fail-safe. | |
1020 | */ | |
1021 | offset = mach_header_sz; | |
1022 | ncmds = header->ncmds; | |
1023 | ||
1024 | while (ncmds--) { | |
1025 | /* ensure enough space for a minimal load command */ | |
1026 | if (offset + sizeof(struct load_command) > cmds_size) { | |
1027 | ret = LOAD_BADMACHO; | |
1028 | break; | |
1029 | } | |
1030 | ||
1031 | /* | |
1032 | * Get a pointer to the command. | |
1033 | */ | |
1034 | lcp = (struct load_command *)(addr + offset); | |
1035 | oldoffset = offset; | |
1036 | ||
1037 | /* | |
1038 | * Perform prevalidation of the struct load_command | |
1039 | * before we attempt to use its contents. Invalid | |
1040 | * values are ones which result in an overflow, or | |
1041 | * which can not possibly be valid commands, or which | |
1042 | * straddle or exist past the reserved section at the | |
1043 | * start of the image. | |
1044 | */ | |
1045 | if (os_add_overflow(offset, lcp->cmdsize, &offset) || | |
1046 | lcp->cmdsize < sizeof(struct load_command) || | |
1047 | offset > cmds_size) { | |
1048 | ret = LOAD_BADMACHO; | |
1049 | break; | |
1050 | } | |
1051 | ||
1052 | /* | |
1053 | * Act on struct load_command's for which kernel | |
1054 | * intervention is required. | |
1055 | * Note that each load command implementation is expected to validate | |
1056 | * that lcp->cmdsize is large enough to fit its specific struct type | |
1057 | * before dereferencing fields not covered by struct load_command. | |
1058 | */ | |
1059 | switch (lcp->cmd) { | |
1060 | case LC_SEGMENT: { | |
1061 | struct segment_command *scp = (struct segment_command *) lcp; | |
1062 | if (scp->cmdsize < sizeof(*scp)) { | |
1063 | ret = LOAD_BADMACHO; | |
1064 | break; | |
1065 | } | |
1066 | if (pass == 0) { | |
1067 | if (is_dyld && scp->vmaddr == 0 && scp->fileoff == 0) { | |
1068 | dyld_no_load_addr = TRUE; | |
1069 | if (!slide_realign) { | |
1070 | /* got what we need, bail early on pass 0 */ | |
1071 | continue; | |
1072 | } | |
1073 | } | |
1074 | ||
1075 | #if __arm64__ | |
1076 | assert(!abi64); | |
1077 | ||
1078 | if (scp->initprot == 0 && scp->maxprot == 0 && scp->vmaddr == 0) { | |
1079 | /* PAGEZERO */ | |
1080 | if (os_add3_overflow(scp->vmaddr, scp->vmsize, slide, &pagezero_end) || pagezero_end > UINT32_MAX) { | |
1081 | ret = LOAD_BADMACHO; | |
1082 | break; | |
1083 | } | |
1084 | } | |
1085 | if (scp->initprot & VM_PROT_EXECUTE) { | |
1086 | /* TEXT */ | |
1087 | if (os_add3_overflow(scp->vmaddr, scp->vmsize, slide, &executable_end) || executable_end > UINT32_MAX) { | |
1088 | ret = LOAD_BADMACHO; | |
1089 | break; | |
1090 | } | |
1091 | } | |
1092 | if (scp->initprot & VM_PROT_WRITE) { | |
1093 | /* DATA */ | |
1094 | if (os_add_overflow(scp->vmaddr, slide, &writable_start) || writable_start > UINT32_MAX) { | |
1095 | ret = LOAD_BADMACHO; | |
1096 | break; | |
1097 | } | |
1098 | } | |
1099 | #endif /* __arm64__ */ | |
1100 | break; | |
1101 | } | |
1102 | ||
1103 | if (pass == 1 && !strncmp(scp->segname, "__XHDR", sizeof(scp->segname))) { | |
1104 | found_xhdr = TRUE; | |
1105 | } | |
1106 | ||
1107 | if (pass != 2) { | |
1108 | break; | |
1109 | } | |
1110 | ||
1111 | if (abi64) { | |
1112 | /* | |
1113 | * Having an LC_SEGMENT command for the | |
1114 | * wrong ABI is invalid <rdar://problem/11021230> | |
1115 | */ | |
1116 | ret = LOAD_BADMACHO; | |
1117 | break; | |
1118 | } | |
1119 | ||
1120 | ret = load_segment(lcp, | |
1121 | header->filetype, | |
1122 | control, | |
1123 | file_offset, | |
1124 | macho_size, | |
1125 | vp, | |
1126 | map, | |
1127 | slide, | |
1128 | result, | |
1129 | imgp); | |
1130 | if (ret == LOAD_SUCCESS && scp->fileoff == 0 && scp->filesize > 0) { | |
1131 | /* Enforce a single segment mapping offset zero, with R+X | |
1132 | * protection. */ | |
1133 | if (found_header_segment || | |
1134 | ((scp->initprot & (VM_PROT_READ | VM_PROT_EXECUTE)) != (VM_PROT_READ | VM_PROT_EXECUTE))) { | |
1135 | ret = LOAD_BADMACHO; | |
1136 | break; | |
1137 | } | |
1138 | found_header_segment = TRUE; | |
1139 | } | |
1140 | ||
1141 | break; | |
1142 | } | |
1143 | case LC_SEGMENT_64: { | |
1144 | struct segment_command_64 *scp64 = (struct segment_command_64 *) lcp; | |
1145 | if (scp64->cmdsize < sizeof(*scp64)) { | |
1146 | ret = LOAD_BADMACHO; | |
1147 | break; | |
1148 | } | |
1149 | if (pass == 0) { | |
1150 | if (is_dyld && scp64->vmaddr == 0 && scp64->fileoff == 0) { | |
1151 | dyld_no_load_addr = TRUE; | |
1152 | } | |
1153 | /* got what we need, bail early on pass 0 */ | |
1154 | continue; | |
1155 | } | |
1156 | ||
1157 | if (pass == 1 && !strncmp(scp64->segname, "__XHDR", sizeof(scp64->segname))) { | |
1158 | found_xhdr = TRUE; | |
1159 | } | |
1160 | ||
1161 | if (pass != 2) { | |
1162 | break; | |
1163 | } | |
1164 | ||
1165 | if (!abi64) { | |
1166 | /* | |
1167 | * Having an LC_SEGMENT_64 command for the | |
1168 | * wrong ABI is invalid <rdar://problem/11021230> | |
1169 | */ | |
1170 | ret = LOAD_BADMACHO; | |
1171 | break; | |
1172 | } | |
1173 | ||
1174 | ret = load_segment(lcp, | |
1175 | header->filetype, | |
1176 | control, | |
1177 | file_offset, | |
1178 | macho_size, | |
1179 | vp, | |
1180 | map, | |
1181 | slide, | |
1182 | result, | |
1183 | imgp); | |
1184 | ||
1185 | if (ret == LOAD_SUCCESS && scp64->fileoff == 0 && scp64->filesize > 0) { | |
1186 | /* Enforce a single segment mapping offset zero, with R+X | |
1187 | * protection. */ | |
1188 | if (found_header_segment || | |
1189 | ((scp64->initprot & (VM_PROT_READ | VM_PROT_EXECUTE)) != (VM_PROT_READ | VM_PROT_EXECUTE))) { | |
1190 | ret = LOAD_BADMACHO; | |
1191 | break; | |
1192 | } | |
1193 | found_header_segment = TRUE; | |
1194 | } | |
1195 | ||
1196 | break; | |
1197 | } | |
1198 | case LC_UNIXTHREAD: { | |
1199 | boolean_t is_x86_64_compat_binary = FALSE; | |
1200 | if (pass != 1) { | |
1201 | break; | |
1202 | } | |
1203 | ret = load_unixthread( | |
1204 | (struct thread_command *) lcp, | |
1205 | thread, | |
1206 | slide, | |
1207 | is_x86_64_compat_binary, | |
1208 | result); | |
1209 | break; | |
1210 | } | |
1211 | case LC_MAIN: | |
1212 | if (pass != 1) { | |
1213 | break; | |
1214 | } | |
1215 | if (depth != 1) { | |
1216 | break; | |
1217 | } | |
1218 | ret = load_main( | |
1219 | (struct entry_point_command *) lcp, | |
1220 | thread, | |
1221 | slide, | |
1222 | result); | |
1223 | break; | |
1224 | case LC_LOAD_DYLINKER: | |
1225 | if (pass != 3) { | |
1226 | break; | |
1227 | } | |
1228 | if ((depth == 1) && (dlp == 0)) { | |
1229 | dlp = (struct dylinker_command *)lcp; | |
1230 | } else { | |
1231 | ret = LOAD_FAILURE; | |
1232 | } | |
1233 | break; | |
1234 | case LC_UUID: | |
1235 | if (pass == 1 && depth == 1) { | |
1236 | ret = load_uuid((struct uuid_command *) lcp, | |
1237 | (char *)addr + cmds_size, | |
1238 | result); | |
1239 | } | |
1240 | break; | |
1241 | case LC_CODE_SIGNATURE: | |
1242 | /* CODE SIGNING */ | |
1243 | if (pass != 1) { | |
1244 | break; | |
1245 | } | |
1246 | ||
1247 | /* pager -> uip -> | |
1248 | * load signatures & store in uip | |
1249 | * set VM object "signed_pages" | |
1250 | */ | |
1251 | ret = load_code_signature( | |
1252 | (struct linkedit_data_command *) lcp, | |
1253 | vp, | |
1254 | file_offset, | |
1255 | macho_size, | |
1256 | header->cputype, | |
1257 | header->cpusubtype, | |
1258 | result, | |
1259 | imgp); | |
1260 | if (ret != LOAD_SUCCESS) { | |
1261 | printf("proc %d: load code signature error %d " | |
1262 | "for file \"%s\"\n", | |
1263 | p->p_pid, ret, vp->v_name); | |
1264 | /* | |
1265 | * Allow injections to be ignored on devices w/o enforcement enabled | |
1266 | */ | |
1267 | if (!cs_process_global_enforcement()) { | |
1268 | ret = LOAD_SUCCESS; /* ignore error */ | |
1269 | } | |
1270 | } else { | |
1271 | got_code_signatures = TRUE; | |
1272 | } | |
1273 | ||
1274 | if (got_code_signatures) { | |
1275 | unsigned tainted = CS_VALIDATE_TAINTED; | |
1276 | boolean_t valid = FALSE; | |
1277 | vm_size_t off = 0; | |
1278 | ||
1279 | ||
1280 | if (cs_debug > 10) { | |
1281 | printf("validating initial pages of %s\n", vp->v_name); | |
1282 | } | |
1283 | ||
1284 | while (off < alloc_size && ret == LOAD_SUCCESS) { | |
1285 | tainted = CS_VALIDATE_TAINTED; | |
1286 | ||
1287 | valid = cs_validate_range(vp, | |
1288 | NULL, | |
1289 | file_offset + off, | |
1290 | addr + off, | |
1291 | MIN(PAGE_SIZE, cmds_size), | |
1292 | &tainted); | |
1293 | if (!valid || (tainted & CS_VALIDATE_TAINTED)) { | |
1294 | if (cs_debug) { | |
1295 | printf("CODE SIGNING: %s[%d]: invalid initial page at offset %lld validated:%d tainted:%d csflags:0x%x\n", | |
1296 | vp->v_name, p->p_pid, (long long)(file_offset + off), valid, tainted, result->csflags); | |
1297 | } | |
1298 | if (cs_process_global_enforcement() || | |
1299 | (result->csflags & (CS_HARD | CS_KILL | CS_ENFORCEMENT))) { | |
1300 | ret = LOAD_FAILURE; | |
1301 | } | |
1302 | result->csflags &= ~CS_VALID; | |
1303 | } | |
1304 | off += PAGE_SIZE; | |
1305 | } | |
1306 | } | |
1307 | ||
1308 | break; | |
1309 | #if CONFIG_CODE_DECRYPTION | |
1310 | case LC_ENCRYPTION_INFO: | |
1311 | case LC_ENCRYPTION_INFO_64: | |
1312 | if (pass != 3) { | |
1313 | break; | |
1314 | } | |
1315 | ret = set_code_unprotect( | |
1316 | (struct encryption_info_command *) lcp, | |
1317 | addr, map, slide, vp, file_offset, | |
1318 | header->cputype, header->cpusubtype); | |
1319 | if (ret != LOAD_SUCCESS) { | |
1320 | os_reason_t load_failure_reason = OS_REASON_NULL; | |
1321 | printf("proc %d: set_code_unprotect() error %d " | |
1322 | "for file \"%s\"\n", | |
1323 | p->p_pid, ret, vp->v_name); | |
1324 | /* | |
1325 | * Don't let the app run if it's | |
1326 | * encrypted but we failed to set up the | |
1327 | * decrypter. If the keys are missing it will | |
1328 | * return LOAD_DECRYPTFAIL. | |
1329 | */ | |
1330 | if (ret == LOAD_DECRYPTFAIL) { | |
1331 | /* failed to load due to missing FP keys */ | |
1332 | proc_lock(p); | |
1333 | p->p_lflag |= P_LTERM_DECRYPTFAIL; | |
1334 | proc_unlock(p); | |
1335 | ||
1336 | KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXITREASON_CREATE) | DBG_FUNC_NONE, | |
1337 | p->p_pid, OS_REASON_EXEC, EXEC_EXIT_REASON_FAIRPLAY_DECRYPT, 0, 0); | |
1338 | load_failure_reason = os_reason_create(OS_REASON_EXEC, EXEC_EXIT_REASON_FAIRPLAY_DECRYPT); | |
1339 | } else { | |
1340 | KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXITREASON_CREATE) | DBG_FUNC_NONE, | |
1341 | p->p_pid, OS_REASON_EXEC, EXEC_EXIT_REASON_DECRYPT, 0, 0); | |
1342 | load_failure_reason = os_reason_create(OS_REASON_EXEC, EXEC_EXIT_REASON_DECRYPT); | |
1343 | } | |
1344 | ||
1345 | /* | |
1346 | * Don't signal the process if it was forked and in a partially constructed | |
1347 | * state as part of a spawn -- it will just be torn down when the exec fails. | |
1348 | */ | |
1349 | if (!spawn) { | |
1350 | assert(load_failure_reason != OS_REASON_NULL); | |
1351 | if (vfexec) { | |
1352 | psignal_vfork_with_reason(p, get_threadtask(imgp->ip_new_thread), imgp->ip_new_thread, SIGKILL, load_failure_reason); | |
1353 | load_failure_reason = OS_REASON_NULL; | |
1354 | } else { | |
1355 | psignal_with_reason(p, SIGKILL, load_failure_reason); | |
1356 | load_failure_reason = OS_REASON_NULL; | |
1357 | } | |
1358 | } else { | |
1359 | os_reason_free(load_failure_reason); | |
1360 | load_failure_reason = OS_REASON_NULL; | |
1361 | } | |
1362 | } | |
1363 | break; | |
1364 | #endif | |
1365 | case LC_VERSION_MIN_IPHONEOS: | |
1366 | case LC_VERSION_MIN_MACOSX: | |
1367 | case LC_VERSION_MIN_WATCHOS: | |
1368 | case LC_VERSION_MIN_TVOS: { | |
1369 | struct version_min_command *vmc; | |
1370 | ||
1371 | if (depth != 1 || pass != 0) { | |
1372 | break; | |
1373 | } | |
1374 | vmc = (struct version_min_command *) lcp; | |
1375 | ret = load_version(vmc, &found_version_cmd, imgp->ip_flags, result); | |
1376 | #if XNU_TARGET_OS_OSX | |
1377 | if (ret == LOAD_SUCCESS) { | |
1378 | if (result->ip_platform == PLATFORM_IOS) { | |
1379 | vm_map_mark_alien(map); | |
1380 | } else { | |
1381 | assert(!vm_map_is_alien(map)); | |
1382 | } | |
1383 | } | |
1384 | #endif /* XNU_TARGET_OS_OSX */ | |
1385 | break; | |
1386 | } | |
1387 | case LC_BUILD_VERSION: { | |
1388 | if (depth != 1 || pass != 0) { | |
1389 | break; | |
1390 | } | |
1391 | struct build_version_command* bvc = (struct build_version_command*)lcp; | |
1392 | if (bvc->cmdsize < sizeof(*bvc)) { | |
1393 | ret = LOAD_BADMACHO; | |
1394 | break; | |
1395 | } | |
1396 | if (found_version_cmd == TRUE) { | |
1397 | ret = LOAD_BADMACHO; | |
1398 | break; | |
1399 | } | |
1400 | result->ip_platform = bvc->platform; | |
1401 | result->lr_sdk = bvc->sdk; | |
1402 | result->lr_min_sdk = bvc->minos; | |
1403 | found_version_cmd = TRUE; | |
1404 | #if XNU_TARGET_OS_OSX | |
1405 | if (result->ip_platform == PLATFORM_IOS) { | |
1406 | vm_map_mark_alien(map); | |
1407 | } else { | |
1408 | assert(!vm_map_is_alien(map)); | |
1409 | } | |
1410 | #endif /* XNU_TARGET_OS_OSX */ | |
1411 | break; | |
1412 | } | |
1413 | default: | |
1414 | /* Other commands are ignored by the kernel */ | |
1415 | ret = LOAD_SUCCESS; | |
1416 | break; | |
1417 | } | |
1418 | if (ret != LOAD_SUCCESS) { | |
1419 | break; | |
1420 | } | |
1421 | } | |
1422 | if (ret != LOAD_SUCCESS) { | |
1423 | break; | |
1424 | } | |
1425 | } | |
1426 | ||
1427 | if (ret == LOAD_SUCCESS) { | |
1428 | if (!got_code_signatures && cs_process_global_enforcement()) { | |
1429 | ret = LOAD_FAILURE; | |
1430 | } | |
1431 | ||
1432 | /* Make sure if we need dyld, we got it */ | |
1433 | if (result->needs_dynlinker && !dlp) { | |
1434 | ret = LOAD_FAILURE; | |
1435 | } | |
1436 | ||
1437 | if ((ret == LOAD_SUCCESS) && (dlp != 0)) { | |
1438 | /* | |
1439 | * load the dylinker, and slide it by the independent DYLD ASLR | |
1440 | * offset regardless of the PIE-ness of the main binary. | |
1441 | */ | |
1442 | ret = load_dylinker(dlp, header->cputype, map, thread, depth, | |
1443 | dyld_aslr_offset, result, imgp); | |
1444 | } | |
1445 | ||
1446 | ||
1447 | if ((ret == LOAD_SUCCESS) && (depth == 1)) { | |
1448 | if (result->thread_count == 0) { | |
1449 | ret = LOAD_FAILURE; | |
1450 | } | |
1451 | #if CONFIG_ENFORCE_SIGNED_CODE | |
1452 | if (result->needs_dynlinker && !(result->csflags & CS_DYLD_PLATFORM)) { | |
1453 | ret = LOAD_FAILURE; | |
1454 | } | |
1455 | #endif | |
1456 | } | |
1457 | } | |
1458 | ||
1459 | if (ret == LOAD_BADMACHO && found_xhdr) { | |
1460 | ret = LOAD_BADMACHO_UPX; | |
1461 | } | |
1462 | ||
1463 | kfree(addr, alloc_size); | |
1464 | ||
1465 | return ret; | |
1466 | } | |
1467 | ||
1468 | load_return_t | |
1469 | validate_potential_simulator_binary( | |
1470 | cpu_type_t exectype __unused, | |
1471 | struct image_params *imgp __unused, | |
1472 | off_t file_offset __unused, | |
1473 | off_t macho_size __unused) | |
1474 | { | |
1475 | #if __x86_64__ | |
1476 | /* Allow 32 bit exec only for simulator binaries */ | |
1477 | if (bootarg_no32exec && imgp != NULL && exectype == CPU_TYPE_X86) { | |
1478 | if (imgp->ip_simulator_binary == IMGPF_SB_DEFAULT) { | |
1479 | boolean_t simulator_binary = check_if_simulator_binary(imgp, file_offset, macho_size); | |
1480 | imgp->ip_simulator_binary = simulator_binary ? IMGPF_SB_TRUE : IMGPF_SB_FALSE; | |
1481 | } | |
1482 | ||
1483 | if (imgp->ip_simulator_binary != IMGPF_SB_TRUE) { | |
1484 | return LOAD_BADARCH; | |
1485 | } | |
1486 | } | |
1487 | #endif | |
1488 | return LOAD_SUCCESS; | |
1489 | } | |
1490 | ||
1491 | #if __x86_64__ | |
1492 | static boolean_t | |
1493 | check_if_simulator_binary( | |
1494 | struct image_params *imgp, | |
1495 | off_t file_offset, | |
1496 | off_t macho_size) | |
1497 | { | |
1498 | struct mach_header *header; | |
1499 | char *ip_vdata = NULL; | |
1500 | kauth_cred_t cred = NULL; | |
1501 | uint32_t ncmds; | |
1502 | struct load_command *lcp; | |
1503 | boolean_t simulator_binary = FALSE; | |
1504 | void * addr = NULL; | |
1505 | vm_size_t alloc_size, cmds_size; | |
1506 | size_t offset; | |
1507 | proc_t p = current_proc(); /* XXXX */ | |
1508 | int error; | |
1509 | int resid = 0; | |
1510 | size_t mach_header_sz = sizeof(struct mach_header); | |
1511 | ||
1512 | ||
1513 | cred = kauth_cred_proc_ref(p); | |
1514 | ||
1515 | /* Allocate page to copyin mach header */ | |
1516 | ip_vdata = kalloc(PAGE_SIZE); | |
1517 | bzero(ip_vdata, PAGE_SIZE); | |
1518 | if (ip_vdata == NULL) { | |
1519 | goto bad; | |
1520 | } | |
1521 | ||
1522 | /* Read the Mach-O header */ | |
1523 | error = vn_rdwr(UIO_READ, imgp->ip_vp, ip_vdata, | |
1524 | PAGE_SIZE, file_offset, | |
1525 | UIO_SYSSPACE, (IO_UNIT | IO_NODELOCKED), | |
1526 | cred, &resid, p); | |
1527 | if (error) { | |
1528 | goto bad; | |
1529 | } | |
1530 | ||
1531 | header = (struct mach_header *)ip_vdata; | |
1532 | ||
1533 | if (header->magic == MH_MAGIC_64 || | |
1534 | header->magic == MH_CIGAM_64) { | |
1535 | mach_header_sz = sizeof(struct mach_header_64); | |
1536 | } | |
1537 | ||
1538 | /* ensure header + sizeofcmds falls within the file */ | |
1539 | if (os_add_overflow(mach_header_sz, header->sizeofcmds, &cmds_size) || | |
1540 | (off_t)cmds_size > macho_size || | |
1541 | round_page_overflow(cmds_size, &alloc_size) || | |
1542 | alloc_size > INT_MAX) { | |
1543 | goto bad; | |
1544 | } | |
1545 | ||
1546 | /* | |
1547 | * Map the load commands into kernel memory. | |
1548 | */ | |
1549 | addr = kalloc(alloc_size); | |
1550 | if (addr == NULL) { | |
1551 | goto bad; | |
1552 | } | |
1553 | ||
1554 | error = vn_rdwr(UIO_READ, imgp->ip_vp, addr, (int)alloc_size, file_offset, | |
1555 | UIO_SYSSPACE, IO_NODELOCKED, cred, &resid, p); | |
1556 | if (error) { | |
1557 | goto bad; | |
1558 | } | |
1559 | ||
1560 | if (resid) { | |
1561 | /* We must be able to read in as much as the mach_header indicated */ | |
1562 | goto bad; | |
1563 | } | |
1564 | ||
1565 | /* | |
1566 | * Loop through each of the load_commands indicated by the | |
1567 | * Mach-O header; if an absurd value is provided, we just | |
1568 | * run off the end of the reserved section by incrementing | |
1569 | * the offset too far, so we are implicitly fail-safe. | |
1570 | */ | |
1571 | offset = mach_header_sz; | |
1572 | ncmds = header->ncmds; | |
1573 | ||
1574 | while (ncmds--) { | |
1575 | /* ensure enough space for a minimal load command */ | |
1576 | if (offset + sizeof(struct load_command) > cmds_size) { | |
1577 | break; | |
1578 | } | |
1579 | ||
1580 | /* | |
1581 | * Get a pointer to the command. | |
1582 | */ | |
1583 | lcp = (struct load_command *)(addr + offset); | |
1584 | ||
1585 | /* | |
1586 | * Perform prevalidation of the struct load_command | |
1587 | * before we attempt to use its contents. Invalid | |
1588 | * values are ones which result in an overflow, or | |
1589 | * which can not possibly be valid commands, or which | |
1590 | * straddle or exist past the reserved section at the | |
1591 | * start of the image. | |
1592 | */ | |
1593 | if (os_add_overflow(offset, lcp->cmdsize, &offset) || | |
1594 | lcp->cmdsize < sizeof(struct load_command) || | |
1595 | offset > cmds_size) { | |
1596 | break; | |
1597 | } | |
1598 | ||
1599 | /* Check if its a simulator binary. */ | |
1600 | switch (lcp->cmd) { | |
1601 | case LC_VERSION_MIN_WATCHOS: | |
1602 | simulator_binary = TRUE; | |
1603 | break; | |
1604 | ||
1605 | case LC_BUILD_VERSION: { | |
1606 | struct build_version_command *bvc; | |
1607 | ||
1608 | bvc = (struct build_version_command *) lcp; | |
1609 | if (bvc->cmdsize < sizeof(*bvc)) { | |
1610 | /* unsafe to use this command struct if cmdsize | |
1611 | * validated above is too small for it to fit */ | |
1612 | break; | |
1613 | } | |
1614 | if (bvc->platform == PLATFORM_IOSSIMULATOR || | |
1615 | bvc->platform == PLATFORM_WATCHOSSIMULATOR) { | |
1616 | simulator_binary = TRUE; | |
1617 | } | |
1618 | ||
1619 | break; | |
1620 | } | |
1621 | ||
1622 | case LC_VERSION_MIN_IPHONEOS: { | |
1623 | simulator_binary = TRUE; | |
1624 | break; | |
1625 | } | |
1626 | ||
1627 | default: | |
1628 | /* ignore other load commands */ | |
1629 | break; | |
1630 | } | |
1631 | ||
1632 | if (simulator_binary == TRUE) { | |
1633 | break; | |
1634 | } | |
1635 | } | |
1636 | ||
1637 | bad: | |
1638 | if (ip_vdata) { | |
1639 | kfree(ip_vdata, PAGE_SIZE); | |
1640 | } | |
1641 | ||
1642 | if (cred) { | |
1643 | kauth_cred_unref(&cred); | |
1644 | } | |
1645 | ||
1646 | if (addr) { | |
1647 | kfree(addr, alloc_size); | |
1648 | } | |
1649 | ||
1650 | return simulator_binary; | |
1651 | } | |
1652 | #endif /* __x86_64__ */ | |
1653 | ||
1654 | #if CONFIG_CODE_DECRYPTION | |
1655 | ||
1656 | #define APPLE_UNPROTECTED_HEADER_SIZE (3 * 4096) | |
1657 | ||
1658 | static load_return_t | |
1659 | unprotect_dsmos_segment( | |
1660 | uint64_t file_off, | |
1661 | uint64_t file_size, | |
1662 | struct vnode *vp, | |
1663 | off_t macho_offset, | |
1664 | vm_map_t map, | |
1665 | vm_map_offset_t map_addr, | |
1666 | vm_map_size_t map_size) | |
1667 | { | |
1668 | kern_return_t kr; | |
1669 | uint64_t slice_off; | |
1670 | ||
1671 | /* | |
1672 | * The first APPLE_UNPROTECTED_HEADER_SIZE bytes (from offset 0 of | |
1673 | * this part of a Universal binary) are not protected... | |
1674 | * The rest needs to be "transformed". | |
1675 | */ | |
1676 | slice_off = file_off - macho_offset; | |
1677 | if (slice_off <= APPLE_UNPROTECTED_HEADER_SIZE && | |
1678 | slice_off + file_size <= APPLE_UNPROTECTED_HEADER_SIZE) { | |
1679 | /* it's all unprotected, nothing to do... */ | |
1680 | kr = KERN_SUCCESS; | |
1681 | } else { | |
1682 | if (slice_off <= APPLE_UNPROTECTED_HEADER_SIZE) { | |
1683 | /* | |
1684 | * We start mapping in the unprotected area. | |
1685 | * Skip the unprotected part... | |
1686 | */ | |
1687 | uint64_t delta_file; | |
1688 | vm_map_offset_t delta_map; | |
1689 | ||
1690 | delta_file = (uint64_t)APPLE_UNPROTECTED_HEADER_SIZE; | |
1691 | delta_file -= slice_off; | |
1692 | if (os_convert_overflow(delta_file, &delta_map)) { | |
1693 | return LOAD_BADMACHO; | |
1694 | } | |
1695 | if (os_add_overflow(map_addr, delta_map, &map_addr)) { | |
1696 | return LOAD_BADMACHO; | |
1697 | } | |
1698 | if (os_sub_overflow(map_size, delta_map, &map_size)) { | |
1699 | return LOAD_BADMACHO; | |
1700 | } | |
1701 | } | |
1702 | /* ... transform the rest of the mapping. */ | |
1703 | struct pager_crypt_info crypt_info; | |
1704 | crypt_info.page_decrypt = dsmos_page_transform; | |
1705 | crypt_info.crypt_ops = NULL; | |
1706 | crypt_info.crypt_end = NULL; | |
1707 | #pragma unused(vp, macho_offset) | |
1708 | crypt_info.crypt_ops = (void *)0x2e69cf40; | |
1709 | vm_map_offset_t crypto_backing_offset; | |
1710 | crypto_backing_offset = -1; /* i.e. use map entry's offset */ | |
1711 | #if VM_MAP_DEBUG_APPLE_PROTECT | |
1712 | if (vm_map_debug_apple_protect) { | |
1713 | struct proc *p; | |
1714 | p = current_proc(); | |
1715 | printf("APPLE_PROTECT: %d[%s] map %p " | |
1716 | "[0x%llx:0x%llx] %s(%s)\n", | |
1717 | p->p_pid, p->p_comm, map, | |
1718 | (uint64_t) map_addr, | |
1719 | (uint64_t) (map_addr + map_size), | |
1720 | __FUNCTION__, vp->v_name); | |
1721 | } | |
1722 | #endif /* VM_MAP_DEBUG_APPLE_PROTECT */ | |
1723 | ||
1724 | /* The DSMOS pager can only be used by apple signed code */ | |
1725 | struct cs_blob * blob = csvnode_get_blob(vp, file_off); | |
1726 | if (blob == NULL || !blob->csb_platform_binary || blob->csb_platform_path) { | |
1727 | return LOAD_FAILURE; | |
1728 | } | |
1729 | ||
1730 | kr = vm_map_apple_protected(map, | |
1731 | map_addr, | |
1732 | map_addr + map_size, | |
1733 | crypto_backing_offset, | |
1734 | &crypt_info, | |
1735 | CRYPTID_APP_ENCRYPTION); | |
1736 | } | |
1737 | ||
1738 | if (kr != KERN_SUCCESS) { | |
1739 | return LOAD_FAILURE; | |
1740 | } | |
1741 | return LOAD_SUCCESS; | |
1742 | } | |
1743 | #else /* CONFIG_CODE_DECRYPTION */ | |
1744 | static load_return_t | |
1745 | unprotect_dsmos_segment( | |
1746 | __unused uint64_t file_off, | |
1747 | __unused uint64_t file_size, | |
1748 | __unused struct vnode *vp, | |
1749 | __unused off_t macho_offset, | |
1750 | __unused vm_map_t map, | |
1751 | __unused vm_map_offset_t map_addr, | |
1752 | __unused vm_map_size_t map_size) | |
1753 | { | |
1754 | return LOAD_SUCCESS; | |
1755 | } | |
1756 | #endif /* CONFIG_CODE_DECRYPTION */ | |
1757 | ||
1758 | ||
1759 | /* | |
1760 | * map_segment: | |
1761 | * Maps a Mach-O segment, taking care of mis-alignment (wrt the system | |
1762 | * page size) issues. | |
1763 | * | |
1764 | * The mapping might result in 1, 2 or 3 map entries: | |
1765 | * 1. for the first page, which could be overlap with the previous | |
1766 | * mapping, | |
1767 | * 2. for the center (if applicable), | |
1768 | * 3. for the last page, which could overlap with the next mapping. | |
1769 | * | |
1770 | * For each of those map entries, we might have to interpose a | |
1771 | * "fourk_pager" to deal with mis-alignment wrt the system page size, | |
1772 | * either in the mapping address and/or size or the file offset and/or | |
1773 | * size. | |
1774 | * The "fourk_pager" itself would be mapped with proper alignment | |
1775 | * wrt the system page size and would then be populated with the | |
1776 | * information about the intended mapping, with a "4KB" granularity. | |
1777 | */ | |
1778 | static kern_return_t | |
1779 | map_segment( | |
1780 | vm_map_t map, | |
1781 | vm_map_offset_t vm_start, | |
1782 | vm_map_offset_t vm_end, | |
1783 | memory_object_control_t control, | |
1784 | vm_map_offset_t file_start, | |
1785 | vm_map_offset_t file_end, | |
1786 | vm_prot_t initprot, | |
1787 | vm_prot_t maxprot, | |
1788 | load_result_t *result) | |
1789 | { | |
1790 | vm_map_offset_t cur_offset, cur_start, cur_end; | |
1791 | kern_return_t ret; | |
1792 | vm_map_offset_t effective_page_mask; | |
1793 | vm_map_kernel_flags_t vmk_flags, cur_vmk_flags; | |
1794 | ||
1795 | if (vm_end < vm_start || | |
1796 | file_end < file_start) { | |
1797 | return LOAD_BADMACHO; | |
1798 | } | |
1799 | if (vm_end == vm_start || | |
1800 | file_end == file_start) { | |
1801 | /* nothing to map... */ | |
1802 | return LOAD_SUCCESS; | |
1803 | } | |
1804 | ||
1805 | effective_page_mask = vm_map_page_mask(map); | |
1806 | ||
1807 | vmk_flags = VM_MAP_KERNEL_FLAGS_NONE; | |
1808 | if (vm_map_page_aligned(vm_start, effective_page_mask) && | |
1809 | vm_map_page_aligned(vm_end, effective_page_mask) && | |
1810 | vm_map_page_aligned(file_start, effective_page_mask) && | |
1811 | vm_map_page_aligned(file_end, effective_page_mask)) { | |
1812 | /* all page-aligned and map-aligned: proceed */ | |
1813 | } else { | |
1814 | #if __arm64__ | |
1815 | /* use an intermediate "4K" pager */ | |
1816 | vmk_flags.vmkf_fourk = TRUE; | |
1817 | #else /* __arm64__ */ | |
1818 | panic("map_segment: unexpected mis-alignment " | |
1819 | "vm[0x%llx:0x%llx] file[0x%llx:0x%llx]\n", | |
1820 | (uint64_t) vm_start, | |
1821 | (uint64_t) vm_end, | |
1822 | (uint64_t) file_start, | |
1823 | (uint64_t) file_end); | |
1824 | #endif /* __arm64__ */ | |
1825 | } | |
1826 | ||
1827 | cur_offset = 0; | |
1828 | cur_start = vm_start; | |
1829 | cur_end = vm_start; | |
1830 | #if __arm64__ | |
1831 | if (!vm_map_page_aligned(vm_start, effective_page_mask)) { | |
1832 | /* one 4K pager for the 1st page */ | |
1833 | cur_end = vm_map_round_page(cur_start, effective_page_mask); | |
1834 | if (cur_end > vm_end) { | |
1835 | cur_end = vm_start + (file_end - file_start); | |
1836 | } | |
1837 | if (control != MEMORY_OBJECT_CONTROL_NULL) { | |
1838 | /* no copy-on-read for mapped binaries */ | |
1839 | vmk_flags.vmkf_no_copy_on_read = 1; | |
1840 | ret = vm_map_enter_mem_object_control( | |
1841 | map, | |
1842 | &cur_start, | |
1843 | cur_end - cur_start, | |
1844 | (mach_vm_offset_t)0, | |
1845 | VM_FLAGS_FIXED, | |
1846 | vmk_flags, | |
1847 | VM_KERN_MEMORY_NONE, | |
1848 | control, | |
1849 | file_start + cur_offset, | |
1850 | TRUE, /* copy */ | |
1851 | initprot, maxprot, | |
1852 | VM_INHERIT_DEFAULT); | |
1853 | } else { | |
1854 | ret = vm_map_enter_mem_object( | |
1855 | map, | |
1856 | &cur_start, | |
1857 | cur_end - cur_start, | |
1858 | (mach_vm_offset_t)0, | |
1859 | VM_FLAGS_FIXED, | |
1860 | vmk_flags, | |
1861 | VM_KERN_MEMORY_NONE, | |
1862 | IPC_PORT_NULL, | |
1863 | 0, /* offset */ | |
1864 | TRUE, /* copy */ | |
1865 | initprot, maxprot, | |
1866 | VM_INHERIT_DEFAULT); | |
1867 | } | |
1868 | if (ret != KERN_SUCCESS) { | |
1869 | return LOAD_NOSPACE; | |
1870 | } | |
1871 | cur_offset += cur_end - cur_start; | |
1872 | } | |
1873 | #endif /* __arm64__ */ | |
1874 | if (cur_end >= vm_start + (file_end - file_start)) { | |
1875 | /* all mapped: done */ | |
1876 | goto done; | |
1877 | } | |
1878 | if (vm_map_round_page(cur_end, effective_page_mask) >= | |
1879 | vm_map_trunc_page(vm_start + (file_end - file_start), | |
1880 | effective_page_mask)) { | |
1881 | /* no middle */ | |
1882 | } else { | |
1883 | cur_start = cur_end; | |
1884 | if ((vm_start & effective_page_mask) != | |
1885 | (file_start & effective_page_mask)) { | |
1886 | /* one 4K pager for the middle */ | |
1887 | cur_vmk_flags = vmk_flags; | |
1888 | } else { | |
1889 | /* regular mapping for the middle */ | |
1890 | cur_vmk_flags = VM_MAP_KERNEL_FLAGS_NONE; | |
1891 | } | |
1892 | ||
1893 | #if !defined(XNU_TARGET_OS_OSX) | |
1894 | (void) result; | |
1895 | #else /* !defined(XNU_TARGET_OS_OSX) */ | |
1896 | /* | |
1897 | * This process doesn't have its new csflags (from | |
1898 | * the image being loaded) yet, so tell VM to override the | |
1899 | * current process's CS_ENFORCEMENT for this mapping. | |
1900 | */ | |
1901 | if (result->csflags & CS_ENFORCEMENT) { | |
1902 | cur_vmk_flags.vmkf_cs_enforcement = TRUE; | |
1903 | } else { | |
1904 | cur_vmk_flags.vmkf_cs_enforcement = FALSE; | |
1905 | } | |
1906 | cur_vmk_flags.vmkf_cs_enforcement_override = TRUE; | |
1907 | #endif /* !defined(XNU_TARGET_OS_OSX) */ | |
1908 | ||
1909 | if (result->is_cambria && (initprot & VM_PROT_EXECUTE) == VM_PROT_EXECUTE) { | |
1910 | cur_vmk_flags.vmkf_translated_allow_execute = TRUE; | |
1911 | } | |
1912 | ||
1913 | cur_end = vm_map_trunc_page(vm_start + (file_end - | |
1914 | file_start), | |
1915 | effective_page_mask); | |
1916 | if (control != MEMORY_OBJECT_CONTROL_NULL) { | |
1917 | /* no copy-on-read for mapped binaries */ | |
1918 | cur_vmk_flags.vmkf_no_copy_on_read = 1; | |
1919 | ret = vm_map_enter_mem_object_control( | |
1920 | map, | |
1921 | &cur_start, | |
1922 | cur_end - cur_start, | |
1923 | (mach_vm_offset_t)0, | |
1924 | VM_FLAGS_FIXED, | |
1925 | cur_vmk_flags, | |
1926 | VM_KERN_MEMORY_NONE, | |
1927 | control, | |
1928 | file_start + cur_offset, | |
1929 | TRUE, /* copy */ | |
1930 | initprot, maxprot, | |
1931 | VM_INHERIT_DEFAULT); | |
1932 | } else { | |
1933 | ret = vm_map_enter_mem_object( | |
1934 | map, | |
1935 | &cur_start, | |
1936 | cur_end - cur_start, | |
1937 | (mach_vm_offset_t)0, | |
1938 | VM_FLAGS_FIXED, | |
1939 | cur_vmk_flags, | |
1940 | VM_KERN_MEMORY_NONE, | |
1941 | IPC_PORT_NULL, | |
1942 | 0, /* offset */ | |
1943 | TRUE, /* copy */ | |
1944 | initprot, maxprot, | |
1945 | VM_INHERIT_DEFAULT); | |
1946 | } | |
1947 | if (ret != KERN_SUCCESS) { | |
1948 | return LOAD_NOSPACE; | |
1949 | } | |
1950 | cur_offset += cur_end - cur_start; | |
1951 | } | |
1952 | if (cur_end >= vm_start + (file_end - file_start)) { | |
1953 | /* all mapped: done */ | |
1954 | goto done; | |
1955 | } | |
1956 | cur_start = cur_end; | |
1957 | #if __arm64__ | |
1958 | if (!vm_map_page_aligned(vm_start + (file_end - file_start), | |
1959 | effective_page_mask)) { | |
1960 | /* one 4K pager for the last page */ | |
1961 | cur_end = vm_start + (file_end - file_start); | |
1962 | if (control != MEMORY_OBJECT_CONTROL_NULL) { | |
1963 | /* no copy-on-read for mapped binaries */ | |
1964 | vmk_flags.vmkf_no_copy_on_read = 1; | |
1965 | ret = vm_map_enter_mem_object_control( | |
1966 | map, | |
1967 | &cur_start, | |
1968 | cur_end - cur_start, | |
1969 | (mach_vm_offset_t)0, | |
1970 | VM_FLAGS_FIXED, | |
1971 | vmk_flags, | |
1972 | VM_KERN_MEMORY_NONE, | |
1973 | control, | |
1974 | file_start + cur_offset, | |
1975 | TRUE, /* copy */ | |
1976 | initprot, maxprot, | |
1977 | VM_INHERIT_DEFAULT); | |
1978 | } else { | |
1979 | ret = vm_map_enter_mem_object( | |
1980 | map, | |
1981 | &cur_start, | |
1982 | cur_end - cur_start, | |
1983 | (mach_vm_offset_t)0, | |
1984 | VM_FLAGS_FIXED, | |
1985 | vmk_flags, | |
1986 | VM_KERN_MEMORY_NONE, | |
1987 | IPC_PORT_NULL, | |
1988 | 0, /* offset */ | |
1989 | TRUE, /* copy */ | |
1990 | initprot, maxprot, | |
1991 | VM_INHERIT_DEFAULT); | |
1992 | } | |
1993 | if (ret != KERN_SUCCESS) { | |
1994 | return LOAD_NOSPACE; | |
1995 | } | |
1996 | cur_offset += cur_end - cur_start; | |
1997 | } | |
1998 | #endif /* __arm64__ */ | |
1999 | done: | |
2000 | assert(cur_end >= vm_start + (file_end - file_start)); | |
2001 | return LOAD_SUCCESS; | |
2002 | } | |
2003 | ||
2004 | static | |
2005 | load_return_t | |
2006 | load_segment( | |
2007 | struct load_command *lcp, | |
2008 | uint32_t filetype, | |
2009 | void * control, | |
2010 | off_t pager_offset, | |
2011 | off_t macho_size, | |
2012 | struct vnode *vp, | |
2013 | vm_map_t map, | |
2014 | int64_t slide, | |
2015 | load_result_t *result, | |
2016 | struct image_params *imgp) | |
2017 | { | |
2018 | struct segment_command_64 segment_command, *scp; | |
2019 | kern_return_t ret; | |
2020 | vm_map_size_t delta_size; | |
2021 | vm_prot_t initprot; | |
2022 | vm_prot_t maxprot; | |
2023 | size_t segment_command_size, total_section_size, | |
2024 | single_section_size; | |
2025 | uint64_t file_offset, file_size; | |
2026 | vm_map_offset_t vm_offset; | |
2027 | size_t vm_size; | |
2028 | vm_map_offset_t vm_start, vm_end, vm_end_aligned; | |
2029 | vm_map_offset_t file_start, file_end; | |
2030 | kern_return_t kr; | |
2031 | boolean_t verbose; | |
2032 | vm_map_size_t effective_page_size; | |
2033 | vm_map_offset_t effective_page_mask; | |
2034 | #if __arm64__ | |
2035 | vm_map_kernel_flags_t vmk_flags; | |
2036 | boolean_t fourk_align; | |
2037 | #endif /* __arm64__ */ | |
2038 | ||
2039 | (void)imgp; | |
2040 | ||
2041 | effective_page_size = vm_map_page_size(map); | |
2042 | effective_page_mask = vm_map_page_mask(map); | |
2043 | ||
2044 | verbose = FALSE; | |
2045 | if (LC_SEGMENT_64 == lcp->cmd) { | |
2046 | segment_command_size = sizeof(struct segment_command_64); | |
2047 | single_section_size = sizeof(struct section_64); | |
2048 | #if __arm64__ | |
2049 | /* 64-bit binary: should already be 16K-aligned */ | |
2050 | fourk_align = FALSE; | |
2051 | ||
2052 | if (vm_map_page_shift(map) == FOURK_PAGE_SHIFT && | |
2053 | PAGE_SHIFT != FOURK_PAGE_SHIFT) { | |
2054 | fourk_align = TRUE; | |
2055 | verbose = TRUE; | |
2056 | } | |
2057 | #endif /* __arm64__ */ | |
2058 | } else { | |
2059 | segment_command_size = sizeof(struct segment_command); | |
2060 | single_section_size = sizeof(struct section); | |
2061 | #if __arm64__ | |
2062 | /* 32-bit binary: might need 4K-alignment */ | |
2063 | if (effective_page_size != FOURK_PAGE_SIZE) { | |
2064 | /* not using 4K page size: need fourk_pager */ | |
2065 | fourk_align = TRUE; | |
2066 | verbose = TRUE; | |
2067 | } else { | |
2068 | /* using 4K page size: no need for re-alignment */ | |
2069 | fourk_align = FALSE; | |
2070 | } | |
2071 | #endif /* __arm64__ */ | |
2072 | } | |
2073 | if (lcp->cmdsize < segment_command_size) { | |
2074 | DEBUG4K_ERROR("LOAD_BADMACHO cmdsize %d < %zu\n", lcp->cmdsize, segment_command_size); | |
2075 | return LOAD_BADMACHO; | |
2076 | } | |
2077 | total_section_size = lcp->cmdsize - segment_command_size; | |
2078 | ||
2079 | if (LC_SEGMENT_64 == lcp->cmd) { | |
2080 | scp = (struct segment_command_64 *)lcp; | |
2081 | } else { | |
2082 | scp = &segment_command; | |
2083 | widen_segment_command((struct segment_command *)lcp, scp); | |
2084 | } | |
2085 | ||
2086 | if (verbose) { | |
2087 | MACHO_PRINTF(("+++ load_segment %s " | |
2088 | "vm[0x%llx:0x%llx] file[0x%llx:0x%llx] " | |
2089 | "prot %d/%d flags 0x%x\n", | |
2090 | scp->segname, | |
2091 | (uint64_t)(slide + scp->vmaddr), | |
2092 | (uint64_t)(slide + scp->vmaddr + scp->vmsize), | |
2093 | pager_offset + scp->fileoff, | |
2094 | pager_offset + scp->fileoff + scp->filesize, | |
2095 | scp->initprot, | |
2096 | scp->maxprot, | |
2097 | scp->flags)); | |
2098 | } | |
2099 | ||
2100 | /* | |
2101 | * Make sure what we get from the file is really ours (as specified | |
2102 | * by macho_size). | |
2103 | */ | |
2104 | if (scp->fileoff + scp->filesize < scp->fileoff || | |
2105 | scp->fileoff + scp->filesize > (uint64_t)macho_size) { | |
2106 | DEBUG4K_ERROR("LOAD_BADMACHO fileoff 0x%llx filesize 0x%llx macho_size 0x%llx\n", scp->fileoff, scp->filesize, (uint64_t)macho_size); | |
2107 | return LOAD_BADMACHO; | |
2108 | } | |
2109 | /* | |
2110 | * Ensure that the number of sections specified would fit | |
2111 | * within the load command size. | |
2112 | */ | |
2113 | if (total_section_size / single_section_size < scp->nsects) { | |
2114 | DEBUG4K_ERROR("LOAD_BADMACHO 0x%zx 0x%zx %d\n", total_section_size, single_section_size, scp->nsects); | |
2115 | return LOAD_BADMACHO; | |
2116 | } | |
2117 | /* | |
2118 | * Make sure the segment is page-aligned in the file. | |
2119 | */ | |
2120 | if (os_add_overflow(pager_offset, scp->fileoff, &file_offset)) { | |
2121 | DEBUG4K_ERROR("LOAD_BADMACHO file_offset: 0x%llx + 0x%llx\n", pager_offset, scp->fileoff); | |
2122 | return LOAD_BADMACHO; | |
2123 | } | |
2124 | file_size = scp->filesize; | |
2125 | #if __arm64__ | |
2126 | if (fourk_align) { | |
2127 | if ((file_offset & FOURK_PAGE_MASK) != 0) { | |
2128 | /* | |
2129 | * we can't mmap() it if it's not at least 4KB-aligned | |
2130 | * in the file | |
2131 | */ | |
2132 | DEBUG4K_ERROR("LOAD_BADMACHO file_offset 0x%llx\n", file_offset); | |
2133 | return LOAD_BADMACHO; | |
2134 | } | |
2135 | } else | |
2136 | #endif /* __arm64__ */ | |
2137 | if ((file_offset & PAGE_MASK_64) != 0 || | |
2138 | /* we can't mmap() it if it's not page-aligned in the file */ | |
2139 | (file_offset & vm_map_page_mask(map)) != 0) { | |
2140 | /* | |
2141 | * The 1st test would have failed if the system's page size | |
2142 | * was what this process believe is the page size, so let's | |
2143 | * fail here too for the sake of consistency. | |
2144 | */ | |
2145 | DEBUG4K_ERROR("LOAD_BADMACHO file_offset 0x%llx\n", file_offset); | |
2146 | return LOAD_BADMACHO; | |
2147 | } | |
2148 | ||
2149 | /* | |
2150 | * If we have a code signature attached for this slice | |
2151 | * require that the segments are within the signed part | |
2152 | * of the file. | |
2153 | */ | |
2154 | if (result->cs_end_offset && | |
2155 | result->cs_end_offset < (off_t)scp->fileoff && | |
2156 | result->cs_end_offset - scp->fileoff < scp->filesize) { | |
2157 | if (cs_debug) { | |
2158 | printf("section outside code signature\n"); | |
2159 | } | |
2160 | DEBUG4K_ERROR("LOAD_BADMACHO end_offset 0x%llx fileoff 0x%llx filesize 0x%llx\n", result->cs_end_offset, scp->fileoff, scp->filesize); | |
2161 | return LOAD_BADMACHO; | |
2162 | } | |
2163 | ||
2164 | if (os_add_overflow(scp->vmaddr, slide, &vm_offset)) { | |
2165 | if (cs_debug) { | |
2166 | printf("vmaddr too large\n"); | |
2167 | } | |
2168 | DEBUG4K_ERROR("LOAD_BADMACHO vmaddr 0x%llx slide 0x%llx vm_offset 0x%llx\n", scp->vmaddr, slide, (uint64_t)vm_offset); | |
2169 | return LOAD_BADMACHO; | |
2170 | } | |
2171 | ||
2172 | if (scp->vmsize > SIZE_MAX) { | |
2173 | DEBUG4K_ERROR("LOAD_BADMACHO vmsize 0x%llx\n", scp->vmsize); | |
2174 | return LOAD_BADMACHO; | |
2175 | } | |
2176 | ||
2177 | vm_size = (size_t)scp->vmsize; | |
2178 | ||
2179 | if (vm_size == 0) { | |
2180 | return LOAD_SUCCESS; | |
2181 | } | |
2182 | if (scp->vmaddr == 0 && | |
2183 | file_size == 0 && | |
2184 | vm_size != 0 && | |
2185 | (scp->initprot & VM_PROT_ALL) == VM_PROT_NONE && | |
2186 | (scp->maxprot & VM_PROT_ALL) == VM_PROT_NONE) { | |
2187 | if (map == VM_MAP_NULL) { | |
2188 | return LOAD_SUCCESS; | |
2189 | } | |
2190 | ||
2191 | /* | |
2192 | * For PIE, extend page zero rather than moving it. Extending | |
2193 | * page zero keeps early allocations from falling predictably | |
2194 | * between the end of page zero and the beginning of the first | |
2195 | * slid segment. | |
2196 | */ | |
2197 | /* | |
2198 | * This is a "page zero" segment: it starts at address 0, | |
2199 | * is not mapped from the binary file and is not accessible. | |
2200 | * User-space should never be able to access that memory, so | |
2201 | * make it completely off limits by raising the VM map's | |
2202 | * minimum offset. | |
2203 | */ | |
2204 | vm_end = (vm_map_offset_t)(vm_offset + vm_size); | |
2205 | if (vm_end < vm_offset) { | |
2206 | DEBUG4K_ERROR("LOAD_BADMACHO vm_end 0x%llx vm_offset 0x%llx vm_size 0x%llx\n", (uint64_t)vm_end, (uint64_t)vm_offset, (uint64_t)vm_size); | |
2207 | return LOAD_BADMACHO; | |
2208 | } | |
2209 | ||
2210 | if (verbose) { | |
2211 | MACHO_PRINTF(("++++++ load_segment: " | |
2212 | "page_zero up to 0x%llx\n", | |
2213 | (uint64_t) vm_end)); | |
2214 | } | |
2215 | #if __arm64__ | |
2216 | if (fourk_align) { | |
2217 | /* raise min_offset as much as page-alignment allows */ | |
2218 | vm_end_aligned = vm_map_trunc_page(vm_end, | |
2219 | effective_page_mask); | |
2220 | } else | |
2221 | #endif /* __arm64__ */ | |
2222 | { | |
2223 | vm_end = vm_map_round_page(vm_end, | |
2224 | PAGE_MASK_64); | |
2225 | vm_end_aligned = vm_end; | |
2226 | } | |
2227 | ret = vm_map_raise_min_offset(map, | |
2228 | vm_end_aligned); | |
2229 | #if __arm64__ | |
2230 | if (ret == 0 && | |
2231 | vm_end > vm_end_aligned) { | |
2232 | /* use fourk_pager to map the rest of pagezero */ | |
2233 | assert(fourk_align); | |
2234 | vmk_flags = VM_MAP_KERNEL_FLAGS_NONE; | |
2235 | vmk_flags.vmkf_fourk = TRUE; | |
2236 | ret = vm_map_enter_mem_object( | |
2237 | map, | |
2238 | &vm_end_aligned, | |
2239 | vm_end - vm_end_aligned, | |
2240 | (mach_vm_offset_t) 0, /* mask */ | |
2241 | VM_FLAGS_FIXED, | |
2242 | vmk_flags, | |
2243 | VM_KERN_MEMORY_NONE, | |
2244 | IPC_PORT_NULL, | |
2245 | 0, | |
2246 | FALSE, /* copy */ | |
2247 | (scp->initprot & VM_PROT_ALL), | |
2248 | (scp->maxprot & VM_PROT_ALL), | |
2249 | VM_INHERIT_DEFAULT); | |
2250 | } | |
2251 | #endif /* __arm64__ */ | |
2252 | ||
2253 | if (ret != KERN_SUCCESS) { | |
2254 | DEBUG4K_ERROR("LOAD_FAILURE ret 0x%x\n", ret); | |
2255 | return LOAD_FAILURE; | |
2256 | } | |
2257 | return LOAD_SUCCESS; | |
2258 | } else { | |
2259 | #if !defined(XNU_TARGET_OS_OSX) | |
2260 | /* not PAGEZERO: should not be mapped at address 0 */ | |
2261 | if (filetype != MH_DYLINKER && scp->vmaddr == 0) { | |
2262 | DEBUG4K_ERROR("LOAD_BADMACHO filetype %d vmaddr 0x%llx\n", filetype, scp->vmaddr); | |
2263 | return LOAD_BADMACHO; | |
2264 | } | |
2265 | #endif /* !defined(XNU_TARGET_OS_OSX) */ | |
2266 | } | |
2267 | ||
2268 | #if __arm64__ | |
2269 | if (fourk_align) { | |
2270 | /* 4K-align */ | |
2271 | file_start = vm_map_trunc_page(file_offset, | |
2272 | FOURK_PAGE_MASK); | |
2273 | file_end = vm_map_round_page(file_offset + file_size, | |
2274 | FOURK_PAGE_MASK); | |
2275 | vm_start = vm_map_trunc_page(vm_offset, | |
2276 | FOURK_PAGE_MASK); | |
2277 | vm_end = vm_map_round_page(vm_offset + vm_size, | |
2278 | FOURK_PAGE_MASK); | |
2279 | ||
2280 | if (file_offset - file_start > FOURK_PAGE_MASK || | |
2281 | file_end - file_offset - file_size > FOURK_PAGE_MASK) { | |
2282 | DEBUG4K_ERROR("LOAD_BADMACHO file_start / file_size wrap " | |
2283 | "[0x%llx:0x%llx] -> [0x%llx:0x%llx]\n", | |
2284 | file_offset, | |
2285 | file_offset + file_size, | |
2286 | (uint64_t) file_start, | |
2287 | (uint64_t) file_end); | |
2288 | return LOAD_BADMACHO; | |
2289 | } | |
2290 | ||
2291 | if (!strncmp(scp->segname, "__LINKEDIT", 11) && | |
2292 | page_aligned(file_start) && | |
2293 | vm_map_page_aligned(file_start, vm_map_page_mask(map)) && | |
2294 | page_aligned(vm_start) && | |
2295 | vm_map_page_aligned(vm_start, vm_map_page_mask(map))) { | |
2296 | /* XXX last segment: ignore mis-aligned tail */ | |
2297 | file_end = vm_map_round_page(file_end, | |
2298 | effective_page_mask); | |
2299 | vm_end = vm_map_round_page(vm_end, | |
2300 | effective_page_mask); | |
2301 | } | |
2302 | } else | |
2303 | #endif /* __arm64__ */ | |
2304 | { | |
2305 | file_start = vm_map_trunc_page(file_offset, | |
2306 | effective_page_mask); | |
2307 | file_end = vm_map_round_page(file_offset + file_size, | |
2308 | effective_page_mask); | |
2309 | vm_start = vm_map_trunc_page(vm_offset, | |
2310 | effective_page_mask); | |
2311 | vm_end = vm_map_round_page(vm_offset + vm_size, | |
2312 | effective_page_mask); | |
2313 | ||
2314 | if (file_offset - file_start > effective_page_mask || | |
2315 | file_end - file_offset - file_size > effective_page_mask) { | |
2316 | DEBUG4K_ERROR("LOAD_BADMACHO file_start / file_size wrap " | |
2317 | "[0x%llx:0x%llx] -> [0x%llx:0x%llx]\n", | |
2318 | file_offset, | |
2319 | file_offset + file_size, | |
2320 | (uint64_t) file_start, | |
2321 | (uint64_t) file_end); | |
2322 | return LOAD_BADMACHO; | |
2323 | } | |
2324 | } | |
2325 | ||
2326 | if (vm_start < result->min_vm_addr) { | |
2327 | result->min_vm_addr = vm_start; | |
2328 | } | |
2329 | if (vm_end > result->max_vm_addr) { | |
2330 | result->max_vm_addr = vm_end; | |
2331 | } | |
2332 | ||
2333 | if (map == VM_MAP_NULL) { | |
2334 | return LOAD_SUCCESS; | |
2335 | } | |
2336 | ||
2337 | if (vm_size > 0) { | |
2338 | initprot = (scp->initprot) & VM_PROT_ALL; | |
2339 | maxprot = (scp->maxprot) & VM_PROT_ALL; | |
2340 | /* | |
2341 | * Map a copy of the file into the address space. | |
2342 | */ | |
2343 | if (verbose) { | |
2344 | MACHO_PRINTF(("++++++ load_segment: " | |
2345 | "mapping at vm [0x%llx:0x%llx] of " | |
2346 | "file [0x%llx:0x%llx]\n", | |
2347 | (uint64_t) vm_start, | |
2348 | (uint64_t) vm_end, | |
2349 | (uint64_t) file_start, | |
2350 | (uint64_t) file_end)); | |
2351 | } | |
2352 | ret = map_segment(map, | |
2353 | vm_start, | |
2354 | vm_end, | |
2355 | control, | |
2356 | file_start, | |
2357 | file_end, | |
2358 | initprot, | |
2359 | maxprot, | |
2360 | result); | |
2361 | if (ret) { | |
2362 | DEBUG4K_ERROR("LOAD_NOSPACE start 0x%llx end 0x%llx ret 0x%x\n", (uint64_t)vm_start, (uint64_t)vm_end, ret); | |
2363 | return LOAD_NOSPACE; | |
2364 | } | |
2365 | ||
2366 | #if FIXME | |
2367 | /* | |
2368 | * If the file didn't end on a page boundary, | |
2369 | * we need to zero the leftover. | |
2370 | */ | |
2371 | delta_size = map_size - scp->filesize; | |
2372 | if (delta_size > 0) { | |
2373 | mach_vm_offset_t tmp; | |
2374 | ||
2375 | ret = mach_vm_allocate_kernel(kernel_map, &tmp, delta_size, VM_FLAGS_ANYWHERE, VM_KERN_MEMORY_BSD); | |
2376 | if (ret != KERN_SUCCESS) { | |
2377 | DEBUG4K_ERROR("LOAD_RESOURCE delta_size 0x%llx ret 0x%x\n", delta_size, ret); | |
2378 | return LOAD_RESOURCE; | |
2379 | } | |
2380 | ||
2381 | if (copyout(tmp, map_addr + scp->filesize, | |
2382 | delta_size)) { | |
2383 | (void) mach_vm_deallocate( | |
2384 | kernel_map, tmp, delta_size); | |
2385 | DEBUG4K_ERROR("LOAD_FAILURE copyout 0x%llx 0x%llx\n", map_addr + scp->filesize, delta_size); | |
2386 | return LOAD_FAILURE; | |
2387 | } | |
2388 | ||
2389 | (void) mach_vm_deallocate(kernel_map, tmp, delta_size); | |
2390 | } | |
2391 | #endif /* FIXME */ | |
2392 | } | |
2393 | ||
2394 | /* | |
2395 | * If the virtual size of the segment is greater | |
2396 | * than the size from the file, we need to allocate | |
2397 | * zero fill memory for the rest. | |
2398 | */ | |
2399 | if ((vm_end - vm_start) > (file_end - file_start)) { | |
2400 | delta_size = (vm_end - vm_start) - (file_end - file_start); | |
2401 | } else { | |
2402 | delta_size = 0; | |
2403 | } | |
2404 | if (delta_size > 0) { | |
2405 | vm_map_offset_t tmp_start; | |
2406 | vm_map_offset_t tmp_end; | |
2407 | ||
2408 | if (os_add_overflow(vm_start, file_end - file_start, &tmp_start)) { | |
2409 | DEBUG4K_ERROR("LOAD_NOSPACE tmp_start: 0x%llx + 0x%llx\n", (uint64_t)vm_start, (uint64_t)(file_end - file_start)); | |
2410 | return LOAD_NOSPACE; | |
2411 | } | |
2412 | ||
2413 | if (os_add_overflow(tmp_start, delta_size, &tmp_end)) { | |
2414 | DEBUG4K_ERROR("LOAD_NOSPACE tmp_end: 0x%llx + 0x%llx\n", (uint64_t)tmp_start, (uint64_t)delta_size); | |
2415 | return LOAD_NOSPACE; | |
2416 | } | |
2417 | ||
2418 | if (verbose) { | |
2419 | MACHO_PRINTF(("++++++ load_segment: " | |
2420 | "delta mapping vm [0x%llx:0x%llx]\n", | |
2421 | (uint64_t) tmp_start, | |
2422 | (uint64_t) tmp_end)); | |
2423 | } | |
2424 | kr = map_segment(map, | |
2425 | tmp_start, | |
2426 | tmp_end, | |
2427 | MEMORY_OBJECT_CONTROL_NULL, | |
2428 | 0, | |
2429 | delta_size, | |
2430 | scp->initprot, | |
2431 | scp->maxprot, | |
2432 | result); | |
2433 | if (kr != KERN_SUCCESS) { | |
2434 | DEBUG4K_ERROR("LOAD_NOSPACE 0x%llx 0x%llx kr 0x%x\n", (unsigned long long)tmp_start, (uint64_t)delta_size, kr); | |
2435 | return LOAD_NOSPACE; | |
2436 | } | |
2437 | } | |
2438 | ||
2439 | if ((scp->fileoff == 0) && (scp->filesize != 0)) { | |
2440 | result->mach_header = vm_offset; | |
2441 | } | |
2442 | ||
2443 | if (scp->flags & SG_PROTECTED_VERSION_1) { | |
2444 | ret = unprotect_dsmos_segment(file_start, | |
2445 | file_end - file_start, | |
2446 | vp, | |
2447 | pager_offset, | |
2448 | map, | |
2449 | vm_start, | |
2450 | vm_end - vm_start); | |
2451 | if (ret != LOAD_SUCCESS) { | |
2452 | DEBUG4K_ERROR("unprotect 0x%llx 0x%llx ret %d \n", (uint64_t)vm_start, (uint64_t)vm_end, ret); | |
2453 | return ret; | |
2454 | } | |
2455 | } else { | |
2456 | ret = LOAD_SUCCESS; | |
2457 | } | |
2458 | ||
2459 | if (LOAD_SUCCESS == ret && | |
2460 | filetype == MH_DYLINKER && | |
2461 | result->all_image_info_addr == MACH_VM_MIN_ADDRESS) { | |
2462 | note_all_image_info_section(scp, | |
2463 | LC_SEGMENT_64 == lcp->cmd, | |
2464 | single_section_size, | |
2465 | ((const char *)lcp + | |
2466 | segment_command_size), | |
2467 | slide, | |
2468 | result); | |
2469 | } | |
2470 | ||
2471 | if (result->entry_point != MACH_VM_MIN_ADDRESS) { | |
2472 | if ((result->entry_point >= vm_offset) && (result->entry_point < (vm_offset + vm_size))) { | |
2473 | if ((scp->initprot & (VM_PROT_READ | VM_PROT_EXECUTE)) == (VM_PROT_READ | VM_PROT_EXECUTE)) { | |
2474 | result->validentry = 1; | |
2475 | } else { | |
2476 | /* right range but wrong protections, unset if previously validated */ | |
2477 | result->validentry = 0; | |
2478 | } | |
2479 | } | |
2480 | } | |
2481 | ||
2482 | if (ret != LOAD_SUCCESS && verbose) { | |
2483 | DEBUG4K_ERROR("ret %d\n", ret); | |
2484 | } | |
2485 | return ret; | |
2486 | } | |
2487 | ||
2488 | static | |
2489 | load_return_t | |
2490 | load_uuid( | |
2491 | struct uuid_command *uulp, | |
2492 | char *command_end, | |
2493 | load_result_t *result | |
2494 | ) | |
2495 | { | |
2496 | /* | |
2497 | * We need to check the following for this command: | |
2498 | * - The command size should be atleast the size of struct uuid_command | |
2499 | * - The UUID part of the command should be completely within the mach-o header | |
2500 | */ | |
2501 | ||
2502 | if ((uulp->cmdsize < sizeof(struct uuid_command)) || | |
2503 | (((char *)uulp + sizeof(struct uuid_command)) > command_end)) { | |
2504 | return LOAD_BADMACHO; | |
2505 | } | |
2506 | ||
2507 | memcpy(&result->uuid[0], &uulp->uuid[0], sizeof(result->uuid)); | |
2508 | return LOAD_SUCCESS; | |
2509 | } | |
2510 | ||
2511 | static | |
2512 | load_return_t | |
2513 | load_version( | |
2514 | struct version_min_command *vmc, | |
2515 | boolean_t *found_version_cmd, | |
2516 | int ip_flags __unused, | |
2517 | load_result_t *result | |
2518 | ) | |
2519 | { | |
2520 | uint32_t platform = 0; | |
2521 | uint32_t sdk; | |
2522 | uint32_t min_sdk; | |
2523 | ||
2524 | if (vmc->cmdsize < sizeof(*vmc)) { | |
2525 | return LOAD_BADMACHO; | |
2526 | } | |
2527 | if (*found_version_cmd == TRUE) { | |
2528 | return LOAD_BADMACHO; | |
2529 | } | |
2530 | *found_version_cmd = TRUE; | |
2531 | sdk = vmc->sdk; | |
2532 | min_sdk = vmc->version; | |
2533 | switch (vmc->cmd) { | |
2534 | case LC_VERSION_MIN_MACOSX: | |
2535 | platform = PLATFORM_MACOS; | |
2536 | break; | |
2537 | #if __x86_64__ /* __x86_64__ */ | |
2538 | case LC_VERSION_MIN_IPHONEOS: | |
2539 | platform = PLATFORM_IOSSIMULATOR; | |
2540 | break; | |
2541 | case LC_VERSION_MIN_WATCHOS: | |
2542 | platform = PLATFORM_WATCHOSSIMULATOR; | |
2543 | break; | |
2544 | case LC_VERSION_MIN_TVOS: | |
2545 | platform = PLATFORM_TVOSSIMULATOR; | |
2546 | break; | |
2547 | #else | |
2548 | case LC_VERSION_MIN_IPHONEOS: { | |
2549 | #if __arm64__ | |
2550 | extern int legacy_footprint_entitlement_mode; | |
2551 | if (vmc->sdk < (12 << 16)) { | |
2552 | /* app built with a pre-iOS12 SDK: apply legacy footprint mitigation */ | |
2553 | result->legacy_footprint = TRUE; | |
2554 | } | |
2555 | #endif /* __arm64__ */ | |
2556 | platform = PLATFORM_IOS; | |
2557 | break; | |
2558 | } | |
2559 | case LC_VERSION_MIN_WATCHOS: | |
2560 | platform = PLATFORM_WATCHOS; | |
2561 | break; | |
2562 | case LC_VERSION_MIN_TVOS: | |
2563 | platform = PLATFORM_TVOS; | |
2564 | break; | |
2565 | #endif /* __x86_64__ */ | |
2566 | /* All LC_VERSION_MIN_* load commands are legacy and we will not be adding any more */ | |
2567 | default: | |
2568 | sdk = (uint32_t)-1; | |
2569 | min_sdk = (uint32_t)-1; | |
2570 | __builtin_unreachable(); | |
2571 | } | |
2572 | result->ip_platform = platform; | |
2573 | result->lr_min_sdk = min_sdk; | |
2574 | result->lr_sdk = sdk; | |
2575 | return LOAD_SUCCESS; | |
2576 | } | |
2577 | ||
2578 | static | |
2579 | load_return_t | |
2580 | load_main( | |
2581 | struct entry_point_command *epc, | |
2582 | thread_t thread, | |
2583 | int64_t slide, | |
2584 | load_result_t *result | |
2585 | ) | |
2586 | { | |
2587 | mach_vm_offset_t addr; | |
2588 | kern_return_t ret; | |
2589 | ||
2590 | if (epc->cmdsize < sizeof(*epc)) { | |
2591 | return LOAD_BADMACHO; | |
2592 | } | |
2593 | if (result->thread_count != 0) { | |
2594 | return LOAD_FAILURE; | |
2595 | } | |
2596 | ||
2597 | if (thread == THREAD_NULL) { | |
2598 | return LOAD_SUCCESS; | |
2599 | } | |
2600 | ||
2601 | /* | |
2602 | * LC_MAIN specifies stack size but not location. | |
2603 | * Add guard page to allocation size (MAXSSIZ includes guard page). | |
2604 | */ | |
2605 | if (epc->stacksize) { | |
2606 | if (os_add_overflow(epc->stacksize, 4 * PAGE_SIZE, &result->user_stack_size)) { | |
2607 | /* | |
2608 | * We are going to immediately throw away this result, but we want | |
2609 | * to make sure we aren't loading a dangerously close to | |
2610 | * overflowing value, since this will have a guard page added to it | |
2611 | * and be rounded to page boundaries | |
2612 | */ | |
2613 | return LOAD_BADMACHO; | |
2614 | } | |
2615 | result->user_stack_size = epc->stacksize; | |
2616 | if (os_add_overflow(epc->stacksize, PAGE_SIZE, &result->user_stack_alloc_size)) { | |
2617 | return LOAD_BADMACHO; | |
2618 | } | |
2619 | result->custom_stack = TRUE; | |
2620 | } else { | |
2621 | result->user_stack_alloc_size = MAXSSIZ; | |
2622 | } | |
2623 | ||
2624 | /* use default location for stack */ | |
2625 | ret = thread_userstackdefault(&addr, result->is_64bit_addr); | |
2626 | if (ret != KERN_SUCCESS) { | |
2627 | return LOAD_FAILURE; | |
2628 | } | |
2629 | ||
2630 | /* The stack slides down from the default location */ | |
2631 | result->user_stack = (user_addr_t)mach_vm_trunc_page((user_addr_t)addr - slide); | |
2632 | ||
2633 | if (result->using_lcmain || result->entry_point != MACH_VM_MIN_ADDRESS) { | |
2634 | /* Already processed LC_MAIN or LC_UNIXTHREAD */ | |
2635 | return LOAD_FAILURE; | |
2636 | } | |
2637 | ||
2638 | /* kernel does *not* use entryoff from LC_MAIN. Dyld uses it. */ | |
2639 | result->needs_dynlinker = TRUE; | |
2640 | result->using_lcmain = TRUE; | |
2641 | ||
2642 | ret = thread_state_initialize( thread ); | |
2643 | if (ret != KERN_SUCCESS) { | |
2644 | return LOAD_FAILURE; | |
2645 | } | |
2646 | ||
2647 | result->unixproc = TRUE; | |
2648 | result->thread_count++; | |
2649 | ||
2650 | return LOAD_SUCCESS; | |
2651 | } | |
2652 | ||
2653 | static | |
2654 | load_return_t | |
2655 | setup_driver_main( | |
2656 | thread_t thread, | |
2657 | int64_t slide, | |
2658 | load_result_t *result | |
2659 | ) | |
2660 | { | |
2661 | mach_vm_offset_t addr; | |
2662 | kern_return_t ret; | |
2663 | ||
2664 | /* Driver binaries have no LC_MAIN, use defaults */ | |
2665 | ||
2666 | if (thread == THREAD_NULL) { | |
2667 | return LOAD_SUCCESS; | |
2668 | } | |
2669 | ||
2670 | result->user_stack_alloc_size = MAXSSIZ; | |
2671 | ||
2672 | /* use default location for stack */ | |
2673 | ret = thread_userstackdefault(&addr, result->is_64bit_addr); | |
2674 | if (ret != KERN_SUCCESS) { | |
2675 | return LOAD_FAILURE; | |
2676 | } | |
2677 | ||
2678 | /* The stack slides down from the default location */ | |
2679 | result->user_stack = (user_addr_t)addr; | |
2680 | result->user_stack -= slide; | |
2681 | ||
2682 | if (result->using_lcmain || result->entry_point != MACH_VM_MIN_ADDRESS) { | |
2683 | /* Already processed LC_MAIN or LC_UNIXTHREAD */ | |
2684 | return LOAD_FAILURE; | |
2685 | } | |
2686 | ||
2687 | result->needs_dynlinker = TRUE; | |
2688 | ||
2689 | ret = thread_state_initialize( thread ); | |
2690 | if (ret != KERN_SUCCESS) { | |
2691 | return LOAD_FAILURE; | |
2692 | } | |
2693 | ||
2694 | result->unixproc = TRUE; | |
2695 | result->thread_count++; | |
2696 | ||
2697 | return LOAD_SUCCESS; | |
2698 | } | |
2699 | ||
2700 | static | |
2701 | load_return_t | |
2702 | load_unixthread( | |
2703 | struct thread_command *tcp, | |
2704 | thread_t thread, | |
2705 | int64_t slide, | |
2706 | boolean_t is_x86_64_compat_binary, | |
2707 | load_result_t *result | |
2708 | ) | |
2709 | { | |
2710 | load_return_t ret; | |
2711 | int customstack = 0; | |
2712 | mach_vm_offset_t addr; | |
2713 | if (tcp->cmdsize < sizeof(*tcp)) { | |
2714 | return LOAD_BADMACHO; | |
2715 | } | |
2716 | if (result->thread_count != 0) { | |
2717 | return LOAD_FAILURE; | |
2718 | } | |
2719 | ||
2720 | if (thread == THREAD_NULL) { | |
2721 | return LOAD_SUCCESS; | |
2722 | } | |
2723 | ||
2724 | ret = load_threadstack(thread, | |
2725 | (uint32_t *)(((vm_offset_t)tcp) + | |
2726 | sizeof(struct thread_command)), | |
2727 | tcp->cmdsize - sizeof(struct thread_command), | |
2728 | &addr, &customstack, is_x86_64_compat_binary, result); | |
2729 | if (ret != LOAD_SUCCESS) { | |
2730 | return ret; | |
2731 | } | |
2732 | ||
2733 | /* LC_UNIXTHREAD optionally specifies stack size and location */ | |
2734 | ||
2735 | if (customstack) { | |
2736 | result->custom_stack = TRUE; | |
2737 | } else { | |
2738 | result->user_stack_alloc_size = MAXSSIZ; | |
2739 | } | |
2740 | ||
2741 | /* The stack slides down from the default location */ | |
2742 | result->user_stack = (user_addr_t)mach_vm_trunc_page((user_addr_t)addr - slide); | |
2743 | ||
2744 | { | |
2745 | ret = load_threadentry(thread, | |
2746 | (uint32_t *)(((vm_offset_t)tcp) + | |
2747 | sizeof(struct thread_command)), | |
2748 | tcp->cmdsize - sizeof(struct thread_command), | |
2749 | &addr); | |
2750 | if (ret != LOAD_SUCCESS) { | |
2751 | return ret; | |
2752 | } | |
2753 | ||
2754 | if (result->using_lcmain || result->entry_point != MACH_VM_MIN_ADDRESS) { | |
2755 | /* Already processed LC_MAIN or LC_UNIXTHREAD */ | |
2756 | return LOAD_FAILURE; | |
2757 | } | |
2758 | ||
2759 | result->entry_point = (user_addr_t)addr; | |
2760 | result->entry_point += slide; | |
2761 | ||
2762 | ret = load_threadstate(thread, | |
2763 | (uint32_t *)(((vm_offset_t)tcp) + sizeof(struct thread_command)), | |
2764 | tcp->cmdsize - sizeof(struct thread_command), | |
2765 | result); | |
2766 | if (ret != LOAD_SUCCESS) { | |
2767 | return ret; | |
2768 | } | |
2769 | } | |
2770 | ||
2771 | result->unixproc = TRUE; | |
2772 | result->thread_count++; | |
2773 | ||
2774 | return LOAD_SUCCESS; | |
2775 | } | |
2776 | ||
2777 | static | |
2778 | load_return_t | |
2779 | load_threadstate( | |
2780 | thread_t thread, | |
2781 | uint32_t *ts, | |
2782 | uint32_t total_size, | |
2783 | load_result_t *result | |
2784 | ) | |
2785 | { | |
2786 | uint32_t size; | |
2787 | int flavor; | |
2788 | uint32_t thread_size; | |
2789 | uint32_t *local_ts = NULL; | |
2790 | uint32_t local_ts_size = 0; | |
2791 | int ret; | |
2792 | ||
2793 | (void)thread; | |
2794 | ||
2795 | if (total_size > 0) { | |
2796 | local_ts_size = total_size; | |
2797 | local_ts = kalloc(local_ts_size); | |
2798 | if (local_ts == NULL) { | |
2799 | return LOAD_FAILURE; | |
2800 | } | |
2801 | memcpy(local_ts, ts, local_ts_size); | |
2802 | ts = local_ts; | |
2803 | } | |
2804 | ||
2805 | /* | |
2806 | * Validate the new thread state; iterate through the state flavors in | |
2807 | * the Mach-O file. | |
2808 | * XXX: we should validate the machine state here, to avoid failing at | |
2809 | * activation time where we can't bail out cleanly. | |
2810 | */ | |
2811 | while (total_size > 0) { | |
2812 | if (total_size < 2 * sizeof(uint32_t)) { | |
2813 | return LOAD_BADMACHO; | |
2814 | } | |
2815 | ||
2816 | flavor = *ts++; | |
2817 | size = *ts++; | |
2818 | ||
2819 | if (os_add_and_mul_overflow(size, 2, sizeof(uint32_t), &thread_size) || | |
2820 | os_sub_overflow(total_size, thread_size, &total_size)) { | |
2821 | ret = LOAD_BADMACHO; | |
2822 | goto bad; | |
2823 | } | |
2824 | ||
2825 | ts += size; /* ts is a (uint32_t *) */ | |
2826 | } | |
2827 | ||
2828 | result->threadstate = local_ts; | |
2829 | result->threadstate_sz = local_ts_size; | |
2830 | return LOAD_SUCCESS; | |
2831 | ||
2832 | bad: | |
2833 | if (local_ts) { | |
2834 | kfree(local_ts, local_ts_size); | |
2835 | } | |
2836 | return ret; | |
2837 | } | |
2838 | ||
2839 | ||
2840 | static | |
2841 | load_return_t | |
2842 | load_threadstack( | |
2843 | thread_t thread, | |
2844 | uint32_t *ts, | |
2845 | uint32_t total_size, | |
2846 | mach_vm_offset_t *user_stack, | |
2847 | int *customstack, | |
2848 | __unused boolean_t is_x86_64_compat_binary, | |
2849 | load_result_t *result | |
2850 | ) | |
2851 | { | |
2852 | kern_return_t ret; | |
2853 | uint32_t size; | |
2854 | int flavor; | |
2855 | uint32_t stack_size; | |
2856 | ||
2857 | if (total_size == 0) { | |
2858 | return LOAD_BADMACHO; | |
2859 | } | |
2860 | ||
2861 | while (total_size > 0) { | |
2862 | if (total_size < 2 * sizeof(uint32_t)) { | |
2863 | return LOAD_BADMACHO; | |
2864 | } | |
2865 | ||
2866 | flavor = *ts++; | |
2867 | size = *ts++; | |
2868 | if (UINT32_MAX - 2 < size || | |
2869 | UINT32_MAX / sizeof(uint32_t) < size + 2) { | |
2870 | return LOAD_BADMACHO; | |
2871 | } | |
2872 | stack_size = (size + 2) * sizeof(uint32_t); | |
2873 | if (stack_size > total_size) { | |
2874 | return LOAD_BADMACHO; | |
2875 | } | |
2876 | total_size -= stack_size; | |
2877 | ||
2878 | /* | |
2879 | * Third argument is a kernel space pointer; it gets cast | |
2880 | * to the appropriate type in thread_userstack() based on | |
2881 | * the value of flavor. | |
2882 | */ | |
2883 | { | |
2884 | ret = thread_userstack(thread, flavor, (thread_state_t)ts, size, user_stack, customstack, result->is_64bit_data); | |
2885 | if (ret != KERN_SUCCESS) { | |
2886 | return LOAD_FAILURE; | |
2887 | } | |
2888 | } | |
2889 | ||
2890 | ts += size; /* ts is a (uint32_t *) */ | |
2891 | } | |
2892 | return LOAD_SUCCESS; | |
2893 | } | |
2894 | ||
2895 | static | |
2896 | load_return_t | |
2897 | load_threadentry( | |
2898 | thread_t thread, | |
2899 | uint32_t *ts, | |
2900 | uint32_t total_size, | |
2901 | mach_vm_offset_t *entry_point | |
2902 | ) | |
2903 | { | |
2904 | kern_return_t ret; | |
2905 | uint32_t size; | |
2906 | int flavor; | |
2907 | uint32_t entry_size; | |
2908 | ||
2909 | /* | |
2910 | * Set the thread state. | |
2911 | */ | |
2912 | *entry_point = MACH_VM_MIN_ADDRESS; | |
2913 | while (total_size > 0) { | |
2914 | if (total_size < 2 * sizeof(uint32_t)) { | |
2915 | return LOAD_BADMACHO; | |
2916 | } | |
2917 | ||
2918 | flavor = *ts++; | |
2919 | size = *ts++; | |
2920 | if (UINT32_MAX - 2 < size || | |
2921 | UINT32_MAX / sizeof(uint32_t) < size + 2) { | |
2922 | return LOAD_BADMACHO; | |
2923 | } | |
2924 | entry_size = (size + 2) * sizeof(uint32_t); | |
2925 | if (entry_size > total_size) { | |
2926 | return LOAD_BADMACHO; | |
2927 | } | |
2928 | total_size -= entry_size; | |
2929 | /* | |
2930 | * Third argument is a kernel space pointer; it gets cast | |
2931 | * to the appropriate type in thread_entrypoint() based on | |
2932 | * the value of flavor. | |
2933 | */ | |
2934 | ret = thread_entrypoint(thread, flavor, (thread_state_t)ts, size, entry_point); | |
2935 | if (ret != KERN_SUCCESS) { | |
2936 | return LOAD_FAILURE; | |
2937 | } | |
2938 | ts += size; /* ts is a (uint32_t *) */ | |
2939 | } | |
2940 | return LOAD_SUCCESS; | |
2941 | } | |
2942 | ||
2943 | struct macho_data { | |
2944 | struct nameidata __nid; | |
2945 | union macho_vnode_header { | |
2946 | struct mach_header mach_header; | |
2947 | struct fat_header fat_header; | |
2948 | char __pad[512]; | |
2949 | } __header; | |
2950 | }; | |
2951 | ||
2952 | #define DEFAULT_DYLD_PATH "/usr/lib/dyld" | |
2953 | ||
2954 | #if (DEVELOPMENT || DEBUG) | |
2955 | extern char dyld_alt_path[]; | |
2956 | extern int use_alt_dyld; | |
2957 | #endif | |
2958 | ||
2959 | static load_return_t | |
2960 | load_dylinker( | |
2961 | struct dylinker_command *lcp, | |
2962 | cpu_type_t cputype, | |
2963 | vm_map_t map, | |
2964 | thread_t thread, | |
2965 | int depth, | |
2966 | int64_t slide, | |
2967 | load_result_t *result, | |
2968 | struct image_params *imgp | |
2969 | ) | |
2970 | { | |
2971 | const char *name; | |
2972 | struct vnode *vp = NULLVP; /* set by get_macho_vnode() */ | |
2973 | struct mach_header *header; | |
2974 | off_t file_offset = 0; /* set by get_macho_vnode() */ | |
2975 | off_t macho_size = 0; /* set by get_macho_vnode() */ | |
2976 | load_result_t *myresult; | |
2977 | kern_return_t ret; | |
2978 | struct macho_data *macho_data; | |
2979 | struct { | |
2980 | struct mach_header __header; | |
2981 | load_result_t __myresult; | |
2982 | struct macho_data __macho_data; | |
2983 | } *dyld_data; | |
2984 | ||
2985 | if (lcp->cmdsize < sizeof(*lcp) || lcp->name.offset >= lcp->cmdsize) { | |
2986 | return LOAD_BADMACHO; | |
2987 | } | |
2988 | ||
2989 | name = (const char *)lcp + lcp->name.offset; | |
2990 | ||
2991 | /* Check for a proper null terminated string. */ | |
2992 | size_t maxsz = lcp->cmdsize - lcp->name.offset; | |
2993 | size_t namelen = strnlen(name, maxsz); | |
2994 | if (namelen >= maxsz) { | |
2995 | return LOAD_BADMACHO; | |
2996 | } | |
2997 | ||
2998 | #if (DEVELOPMENT || DEBUG) | |
2999 | ||
3000 | /* | |
3001 | * rdar://23680808 | |
3002 | * If an alternate dyld has been specified via boot args, check | |
3003 | * to see if PROC_UUID_ALT_DYLD_POLICY has been set on this | |
3004 | * executable and redirect the kernel to load that linker. | |
3005 | */ | |
3006 | ||
3007 | if (use_alt_dyld) { | |
3008 | int policy_error; | |
3009 | uint32_t policy_flags = 0; | |
3010 | int32_t policy_gencount = 0; | |
3011 | ||
3012 | policy_error = proc_uuid_policy_lookup(result->uuid, &policy_flags, &policy_gencount); | |
3013 | if (policy_error == 0) { | |
3014 | if (policy_flags & PROC_UUID_ALT_DYLD_POLICY) { | |
3015 | name = dyld_alt_path; | |
3016 | } | |
3017 | } | |
3018 | } | |
3019 | #endif | |
3020 | ||
3021 | #if !(DEVELOPMENT || DEBUG) | |
3022 | if (0 != strcmp(name, DEFAULT_DYLD_PATH)) { | |
3023 | return LOAD_BADMACHO; | |
3024 | } | |
3025 | #endif | |
3026 | ||
3027 | /* Allocate wad-of-data from heap to reduce excessively deep stacks */ | |
3028 | ||
3029 | dyld_data = kheap_alloc(KHEAP_TEMP, sizeof(*dyld_data), Z_WAITOK); | |
3030 | header = &dyld_data->__header; | |
3031 | myresult = &dyld_data->__myresult; | |
3032 | macho_data = &dyld_data->__macho_data; | |
3033 | ||
3034 | { | |
3035 | cputype = (cputype & CPU_ARCH_MASK) | (cpu_type() & ~CPU_ARCH_MASK); | |
3036 | } | |
3037 | ||
3038 | ret = get_macho_vnode(name, cputype, header, | |
3039 | &file_offset, &macho_size, macho_data, &vp, imgp); | |
3040 | if (ret) { | |
3041 | goto novp_out; | |
3042 | } | |
3043 | ||
3044 | *myresult = load_result_null; | |
3045 | myresult->is_64bit_addr = result->is_64bit_addr; | |
3046 | myresult->is_64bit_data = result->is_64bit_data; | |
3047 | ||
3048 | ret = parse_machfile(vp, map, thread, header, file_offset, | |
3049 | macho_size, depth, slide, 0, myresult, result, imgp); | |
3050 | ||
3051 | if (ret == LOAD_SUCCESS) { | |
3052 | if (result->threadstate) { | |
3053 | /* don't use the app's threadstate if we have a dyld */ | |
3054 | kfree(result->threadstate, result->threadstate_sz); | |
3055 | } | |
3056 | result->threadstate = myresult->threadstate; | |
3057 | result->threadstate_sz = myresult->threadstate_sz; | |
3058 | ||
3059 | result->dynlinker = TRUE; | |
3060 | result->entry_point = myresult->entry_point; | |
3061 | result->validentry = myresult->validentry; | |
3062 | result->all_image_info_addr = myresult->all_image_info_addr; | |
3063 | result->all_image_info_size = myresult->all_image_info_size; | |
3064 | if (myresult->platform_binary) { | |
3065 | result->csflags |= CS_DYLD_PLATFORM; | |
3066 | } | |
3067 | ||
3068 | } | |
3069 | ||
3070 | struct vnode_attr *va; | |
3071 | va = kheap_alloc(KHEAP_TEMP, sizeof(*va), Z_WAITOK | Z_ZERO); | |
3072 | VATTR_INIT(va); | |
3073 | VATTR_WANTED(va, va_fsid64); | |
3074 | VATTR_WANTED(va, va_fsid); | |
3075 | VATTR_WANTED(va, va_fileid); | |
3076 | int error = vnode_getattr(vp, va, imgp->ip_vfs_context); | |
3077 | if (error == 0) { | |
3078 | imgp->ip_dyld_fsid = vnode_get_va_fsid(va); | |
3079 | imgp->ip_dyld_fsobjid = va->va_fileid; | |
3080 | } | |
3081 | ||
3082 | vnode_put(vp); | |
3083 | kheap_free(KHEAP_TEMP, va, sizeof(*va)); | |
3084 | novp_out: | |
3085 | kheap_free(KHEAP_TEMP, dyld_data, sizeof(*dyld_data)); | |
3086 | return ret; | |
3087 | } | |
3088 | ||
3089 | ||
3090 | static load_return_t | |
3091 | load_code_signature( | |
3092 | struct linkedit_data_command *lcp, | |
3093 | struct vnode *vp, | |
3094 | off_t macho_offset, | |
3095 | off_t macho_size, | |
3096 | cpu_type_t cputype, | |
3097 | cpu_subtype_t cpusubtype, | |
3098 | load_result_t *result, | |
3099 | struct image_params *imgp) | |
3100 | { | |
3101 | int ret; | |
3102 | kern_return_t kr; | |
3103 | vm_offset_t addr; | |
3104 | int resid; | |
3105 | struct cs_blob *blob; | |
3106 | int error; | |
3107 | vm_size_t blob_size; | |
3108 | uint32_t sum; | |
3109 | boolean_t anyCPU; | |
3110 | ||
3111 | addr = 0; | |
3112 | blob = NULL; | |
3113 | ||
3114 | cpusubtype &= ~CPU_SUBTYPE_MASK; | |
3115 | ||
3116 | if (lcp->cmdsize != sizeof(struct linkedit_data_command)) { | |
3117 | ret = LOAD_BADMACHO; | |
3118 | goto out; | |
3119 | } | |
3120 | ||
3121 | sum = 0; | |
3122 | if (os_add_overflow(lcp->dataoff, lcp->datasize, &sum) || sum > macho_size) { | |
3123 | ret = LOAD_BADMACHO; | |
3124 | goto out; | |
3125 | } | |
3126 | ||
3127 | blob = ubc_cs_blob_get(vp, cputype, cpusubtype, macho_offset); | |
3128 | ||
3129 | if (blob != NULL) { | |
3130 | /* we already have a blob for this vnode and cpu(sub)type */ | |
3131 | anyCPU = blob->csb_cpu_type == -1; | |
3132 | if ((blob->csb_cpu_type != cputype && | |
3133 | blob->csb_cpu_subtype != cpusubtype && !anyCPU) || | |
3134 | blob->csb_base_offset != macho_offset) { | |
3135 | /* the blob has changed for this vnode: fail ! */ | |
3136 | ret = LOAD_BADMACHO; | |
3137 | goto out; | |
3138 | } | |
3139 | ||
3140 | /* It matches the blob we want here, let's verify the version */ | |
3141 | if (!anyCPU && ubc_cs_generation_check(vp) == 0) { | |
3142 | /* No need to revalidate, we're good! */ | |
3143 | ret = LOAD_SUCCESS; | |
3144 | goto out; | |
3145 | } | |
3146 | ||
3147 | /* That blob may be stale, let's revalidate. */ | |
3148 | error = ubc_cs_blob_revalidate(vp, blob, imgp, 0, result->ip_platform); | |
3149 | if (error == 0) { | |
3150 | /* Revalidation succeeded, we're good! */ | |
3151 | /* If we were revaliding a CS blob with any CPU arch we adjust it */ | |
3152 | if (anyCPU) { | |
3153 | vnode_lock_spin(vp); | |
3154 | blob->csb_cpu_type = cputype; | |
3155 | blob->csb_cpu_subtype = cpusubtype; | |
3156 | vnode_unlock(vp); | |
3157 | } | |
3158 | ret = LOAD_SUCCESS; | |
3159 | goto out; | |
3160 | } | |
3161 | ||
3162 | if (error != EAGAIN) { | |
3163 | printf("load_code_signature: revalidation failed: %d\n", error); | |
3164 | ret = LOAD_FAILURE; | |
3165 | goto out; | |
3166 | } | |
3167 | ||
3168 | assert(error == EAGAIN); | |
3169 | ||
3170 | /* | |
3171 | * Revalidation was not possible for this blob. We just continue as if there was no blob, | |
3172 | * rereading the signature, and ubc_cs_blob_add will do the right thing. | |
3173 | */ | |
3174 | blob = NULL; | |
3175 | } | |
3176 | ||
3177 | blob_size = lcp->datasize; | |
3178 | kr = ubc_cs_blob_allocate(&addr, &blob_size); | |
3179 | if (kr != KERN_SUCCESS) { | |
3180 | ret = LOAD_NOSPACE; | |
3181 | goto out; | |
3182 | } | |
3183 | ||
3184 | resid = 0; | |
3185 | error = vn_rdwr(UIO_READ, | |
3186 | vp, | |
3187 | (caddr_t) addr, | |
3188 | lcp->datasize, | |
3189 | macho_offset + lcp->dataoff, | |
3190 | UIO_SYSSPACE, | |
3191 | 0, | |
3192 | kauth_cred_get(), | |
3193 | &resid, | |
3194 | current_proc()); | |
3195 | if (error || resid != 0) { | |
3196 | ret = LOAD_IOERROR; | |
3197 | goto out; | |
3198 | } | |
3199 | ||
3200 | if (ubc_cs_blob_add(vp, | |
3201 | result->ip_platform, | |
3202 | cputype, | |
3203 | cpusubtype, | |
3204 | macho_offset, | |
3205 | &addr, | |
3206 | lcp->datasize, | |
3207 | imgp, | |
3208 | 0, | |
3209 | &blob)) { | |
3210 | if (addr) { | |
3211 | ubc_cs_blob_deallocate(addr, blob_size); | |
3212 | addr = 0; | |
3213 | } | |
3214 | ret = LOAD_FAILURE; | |
3215 | goto out; | |
3216 | } else { | |
3217 | /* ubc_cs_blob_add() has consumed "addr" */ | |
3218 | addr = 0; | |
3219 | } | |
3220 | ||
3221 | #if CHECK_CS_VALIDATION_BITMAP | |
3222 | ubc_cs_validation_bitmap_allocate( vp ); | |
3223 | #endif | |
3224 | ||
3225 | ret = LOAD_SUCCESS; | |
3226 | out: | |
3227 | if (ret == LOAD_SUCCESS) { | |
3228 | if (blob == NULL) { | |
3229 | panic("success, but no blob!"); | |
3230 | } | |
3231 | ||
3232 | result->csflags |= blob->csb_flags; | |
3233 | result->platform_binary = blob->csb_platform_binary; | |
3234 | result->cs_end_offset = blob->csb_end_offset; | |
3235 | } | |
3236 | if (addr != 0) { | |
3237 | ubc_cs_blob_deallocate(addr, blob_size); | |
3238 | addr = 0; | |
3239 | } | |
3240 | ||
3241 | return ret; | |
3242 | } | |
3243 | ||
3244 | ||
3245 | #if CONFIG_CODE_DECRYPTION | |
3246 | ||
3247 | static load_return_t | |
3248 | set_code_unprotect( | |
3249 | struct encryption_info_command *eip, | |
3250 | caddr_t addr, | |
3251 | vm_map_t map, | |
3252 | int64_t slide, | |
3253 | struct vnode *vp, | |
3254 | off_t macho_offset, | |
3255 | cpu_type_t cputype, | |
3256 | cpu_subtype_t cpusubtype) | |
3257 | { | |
3258 | int error, len; | |
3259 | pager_crypt_info_t crypt_info; | |
3260 | const char * cryptname = 0; | |
3261 | char *vpath; | |
3262 | ||
3263 | size_t offset; | |
3264 | struct segment_command_64 *seg64; | |
3265 | struct segment_command *seg32; | |
3266 | vm_map_offset_t map_offset, map_size; | |
3267 | vm_object_offset_t crypto_backing_offset; | |
3268 | kern_return_t kr; | |
3269 | ||
3270 | if (eip->cmdsize < sizeof(*eip)) { | |
3271 | return LOAD_BADMACHO; | |
3272 | } | |
3273 | ||
3274 | switch (eip->cryptid) { | |
3275 | case 0: | |
3276 | /* not encrypted, just an empty load command */ | |
3277 | return LOAD_SUCCESS; | |
3278 | case 1: | |
3279 | cryptname = "com.apple.unfree"; | |
3280 | break; | |
3281 | case 0x10: | |
3282 | /* some random cryptid that you could manually put into | |
3283 | * your binary if you want NULL */ | |
3284 | cryptname = "com.apple.null"; | |
3285 | break; | |
3286 | default: | |
3287 | return LOAD_BADMACHO; | |
3288 | } | |
3289 | ||
3290 | if (map == VM_MAP_NULL) { | |
3291 | return LOAD_SUCCESS; | |
3292 | } | |
3293 | if (NULL == text_crypter_create) { | |
3294 | return LOAD_FAILURE; | |
3295 | } | |
3296 | ||
3297 | vpath = zalloc(ZV_NAMEI); | |
3298 | ||
3299 | len = MAXPATHLEN; | |
3300 | error = vn_getpath(vp, vpath, &len); | |
3301 | if (error) { | |
3302 | zfree(ZV_NAMEI, vpath); | |
3303 | return LOAD_FAILURE; | |
3304 | } | |
3305 | ||
3306 | /* set up decrypter first */ | |
3307 | crypt_file_data_t crypt_data = { | |
3308 | .filename = vpath, | |
3309 | .cputype = cputype, | |
3310 | .cpusubtype = cpusubtype | |
3311 | }; | |
3312 | kr = text_crypter_create(&crypt_info, cryptname, (void*)&crypt_data); | |
3313 | #if VM_MAP_DEBUG_APPLE_PROTECT | |
3314 | if (vm_map_debug_apple_protect) { | |
3315 | struct proc *p; | |
3316 | p = current_proc(); | |
3317 | printf("APPLE_PROTECT: %d[%s] map %p %s(%s) -> 0x%x\n", | |
3318 | p->p_pid, p->p_comm, map, __FUNCTION__, vpath, kr); | |
3319 | } | |
3320 | #endif /* VM_MAP_DEBUG_APPLE_PROTECT */ | |
3321 | zfree(ZV_NAMEI, vpath); | |
3322 | ||
3323 | if (kr) { | |
3324 | printf("set_code_unprotect: unable to create decrypter %s, kr=%d\n", | |
3325 | cryptname, kr); | |
3326 | if (kr == kIOReturnNotPrivileged) { | |
3327 | /* text encryption returned decryption failure */ | |
3328 | return LOAD_DECRYPTFAIL; | |
3329 | } else { | |
3330 | return LOAD_RESOURCE; | |
3331 | } | |
3332 | } | |
3333 | ||
3334 | /* this is terrible, but we have to rescan the load commands to find the | |
3335 | * virtual address of this encrypted stuff. This code is gonna look like | |
3336 | * the dyld source one day... */ | |
3337 | struct mach_header *header = (struct mach_header *)addr; | |
3338 | size_t mach_header_sz = sizeof(struct mach_header); | |
3339 | if (header->magic == MH_MAGIC_64 || | |
3340 | header->magic == MH_CIGAM_64) { | |
3341 | mach_header_sz = sizeof(struct mach_header_64); | |
3342 | } | |
3343 | offset = mach_header_sz; | |
3344 | uint32_t ncmds = header->ncmds; | |
3345 | while (ncmds--) { | |
3346 | /* | |
3347 | * Get a pointer to the command. | |
3348 | */ | |
3349 | struct load_command *lcp = (struct load_command *)(addr + offset); | |
3350 | offset += lcp->cmdsize; | |
3351 | ||
3352 | switch (lcp->cmd) { | |
3353 | case LC_SEGMENT_64: | |
3354 | seg64 = (struct segment_command_64 *)lcp; | |
3355 | if ((seg64->fileoff <= eip->cryptoff) && | |
3356 | (seg64->fileoff + seg64->filesize >= | |
3357 | eip->cryptoff + eip->cryptsize)) { | |
3358 | map_offset = (vm_map_offset_t)(seg64->vmaddr + eip->cryptoff - seg64->fileoff + slide); | |
3359 | map_size = eip->cryptsize; | |
3360 | crypto_backing_offset = macho_offset + eip->cryptoff; | |
3361 | goto remap_now; | |
3362 | } | |
3363 | break; | |
3364 | case LC_SEGMENT: | |
3365 | seg32 = (struct segment_command *)lcp; | |
3366 | if ((seg32->fileoff <= eip->cryptoff) && | |
3367 | (seg32->fileoff + seg32->filesize >= | |
3368 | eip->cryptoff + eip->cryptsize)) { | |
3369 | map_offset = (vm_map_offset_t)(seg32->vmaddr + eip->cryptoff - seg32->fileoff + slide); | |
3370 | map_size = eip->cryptsize; | |
3371 | crypto_backing_offset = macho_offset + eip->cryptoff; | |
3372 | goto remap_now; | |
3373 | } | |
3374 | break; | |
3375 | } | |
3376 | } | |
3377 | ||
3378 | /* if we get here, did not find anything */ | |
3379 | return LOAD_BADMACHO; | |
3380 | ||
3381 | remap_now: | |
3382 | /* now remap using the decrypter */ | |
3383 | MACHO_PRINTF(("+++ set_code_unprotect: vm[0x%llx:0x%llx]\n", | |
3384 | (uint64_t) map_offset, | |
3385 | (uint64_t) (map_offset + map_size))); | |
3386 | kr = vm_map_apple_protected(map, | |
3387 | map_offset, | |
3388 | map_offset + map_size, | |
3389 | crypto_backing_offset, | |
3390 | &crypt_info, | |
3391 | CRYPTID_APP_ENCRYPTION); | |
3392 | if (kr) { | |
3393 | printf("set_code_unprotect(): mapping failed with %x\n", kr); | |
3394 | return LOAD_PROTECT; | |
3395 | } | |
3396 | ||
3397 | return LOAD_SUCCESS; | |
3398 | } | |
3399 | ||
3400 | #endif | |
3401 | ||
3402 | /* | |
3403 | * This routine exists to support the load_dylinker(). | |
3404 | * | |
3405 | * This routine has its own, separate, understanding of the FAT file format, | |
3406 | * which is terrifically unfortunate. | |
3407 | */ | |
3408 | static | |
3409 | load_return_t | |
3410 | get_macho_vnode( | |
3411 | const char *path, | |
3412 | cpu_type_t cputype, | |
3413 | struct mach_header *mach_header, | |
3414 | off_t *file_offset, | |
3415 | off_t *macho_size, | |
3416 | struct macho_data *data, | |
3417 | struct vnode **vpp, | |
3418 | struct image_params *imgp | |
3419 | ) | |
3420 | { | |
3421 | struct vnode *vp; | |
3422 | vfs_context_t ctx = vfs_context_current(); | |
3423 | proc_t p = vfs_context_proc(ctx); | |
3424 | kauth_cred_t kerncred; | |
3425 | struct nameidata *ndp = &data->__nid; | |
3426 | boolean_t is_fat; | |
3427 | struct fat_arch fat_arch; | |
3428 | int error; | |
3429 | int resid; | |
3430 | union macho_vnode_header *header = &data->__header; | |
3431 | off_t fsize = (off_t)0; | |
3432 | ||
3433 | /* | |
3434 | * Capture the kernel credential for use in the actual read of the | |
3435 | * file, since the user doing the execution may have execute rights | |
3436 | * but not read rights, but to exec something, we have to either map | |
3437 | * or read it into the new process address space, which requires | |
3438 | * read rights. This is to deal with lack of common credential | |
3439 | * serialization code which would treat NOCRED as "serialize 'root'". | |
3440 | */ | |
3441 | kerncred = vfs_context_ucred(vfs_context_kernel()); | |
3442 | ||
3443 | /* init the namei data to point the file user's program name */ | |
3444 | NDINIT(ndp, LOOKUP, OP_OPEN, FOLLOW | LOCKLEAF, UIO_SYSSPACE, CAST_USER_ADDR_T(path), ctx); | |
3445 | ||
3446 | if ((error = namei(ndp)) != 0) { | |
3447 | if (error == ENOENT) { | |
3448 | error = LOAD_ENOENT; | |
3449 | } else { | |
3450 | error = LOAD_FAILURE; | |
3451 | } | |
3452 | return error; | |
3453 | } | |
3454 | nameidone(ndp); | |
3455 | vp = ndp->ni_vp; | |
3456 | ||
3457 | /* check for regular file */ | |
3458 | if (vp->v_type != VREG) { | |
3459 | error = LOAD_PROTECT; | |
3460 | goto bad1; | |
3461 | } | |
3462 | ||
3463 | /* get size */ | |
3464 | if ((error = vnode_size(vp, &fsize, ctx)) != 0) { | |
3465 | error = LOAD_FAILURE; | |
3466 | goto bad1; | |
3467 | } | |
3468 | ||
3469 | /* Check mount point */ | |
3470 | if (vp->v_mount->mnt_flag & MNT_NOEXEC) { | |
3471 | error = LOAD_PROTECT; | |
3472 | goto bad1; | |
3473 | } | |
3474 | ||
3475 | /* check access */ | |
3476 | if ((error = vnode_authorize(vp, NULL, KAUTH_VNODE_EXECUTE | KAUTH_VNODE_READ_DATA, ctx)) != 0) { | |
3477 | error = LOAD_PROTECT; | |
3478 | goto bad1; | |
3479 | } | |
3480 | ||
3481 | /* try to open it */ | |
3482 | if ((error = VNOP_OPEN(vp, FREAD, ctx)) != 0) { | |
3483 | error = LOAD_PROTECT; | |
3484 | goto bad1; | |
3485 | } | |
3486 | ||
3487 | if ((error = vn_rdwr(UIO_READ, vp, (caddr_t)header, sizeof(*header), 0, | |
3488 | UIO_SYSSPACE, IO_NODELOCKED, kerncred, &resid, p)) != 0) { | |
3489 | error = LOAD_IOERROR; | |
3490 | goto bad2; | |
3491 | } | |
3492 | ||
3493 | if (resid) { | |
3494 | error = LOAD_BADMACHO; | |
3495 | goto bad2; | |
3496 | } | |
3497 | ||
3498 | if (header->mach_header.magic == MH_MAGIC || | |
3499 | header->mach_header.magic == MH_MAGIC_64) { | |
3500 | is_fat = FALSE; | |
3501 | } else if (OSSwapBigToHostInt32(header->fat_header.magic) == FAT_MAGIC) { | |
3502 | is_fat = TRUE; | |
3503 | } else { | |
3504 | error = LOAD_BADMACHO; | |
3505 | goto bad2; | |
3506 | } | |
3507 | ||
3508 | if (is_fat) { | |
3509 | error = fatfile_validate_fatarches((vm_offset_t)(&header->fat_header), | |
3510 | sizeof(*header)); | |
3511 | if (error != LOAD_SUCCESS) { | |
3512 | goto bad2; | |
3513 | } | |
3514 | ||
3515 | /* Look up our architecture in the fat file. */ | |
3516 | error = fatfile_getbestarch_for_cputype(cputype, CPU_SUBTYPE_ANY, | |
3517 | (vm_offset_t)(&header->fat_header), sizeof(*header), imgp, &fat_arch); | |
3518 | if (error != LOAD_SUCCESS) { | |
3519 | goto bad2; | |
3520 | } | |
3521 | ||
3522 | /* Read the Mach-O header out of it */ | |
3523 | error = vn_rdwr(UIO_READ, vp, (caddr_t)&header->mach_header, | |
3524 | sizeof(header->mach_header), fat_arch.offset, | |
3525 | UIO_SYSSPACE, IO_NODELOCKED, kerncred, &resid, p); | |
3526 | if (error) { | |
3527 | error = LOAD_IOERROR; | |
3528 | goto bad2; | |
3529 | } | |
3530 | ||
3531 | if (resid) { | |
3532 | error = LOAD_BADMACHO; | |
3533 | goto bad2; | |
3534 | } | |
3535 | ||
3536 | /* Is this really a Mach-O? */ | |
3537 | if (header->mach_header.magic != MH_MAGIC && | |
3538 | header->mach_header.magic != MH_MAGIC_64) { | |
3539 | error = LOAD_BADMACHO; | |
3540 | goto bad2; | |
3541 | } | |
3542 | ||
3543 | *file_offset = fat_arch.offset; | |
3544 | *macho_size = fat_arch.size; | |
3545 | } else { | |
3546 | /* | |
3547 | * Force get_macho_vnode() to fail if the architecture bits | |
3548 | * do not match the expected architecture bits. This in | |
3549 | * turn causes load_dylinker() to fail for the same reason, | |
3550 | * so it ensures the dynamic linker and the binary are in | |
3551 | * lock-step. This is potentially bad, if we ever add to | |
3552 | * the CPU_ARCH_* bits any bits that are desirable but not | |
3553 | * required, since the dynamic linker might work, but we will | |
3554 | * refuse to load it because of this check. | |
3555 | */ | |
3556 | if ((cpu_type_t)header->mach_header.cputype != cputype) { | |
3557 | error = LOAD_BADARCH; | |
3558 | goto bad2; | |
3559 | } | |
3560 | ||
3561 | *file_offset = 0; | |
3562 | *macho_size = fsize; | |
3563 | } | |
3564 | ||
3565 | *mach_header = header->mach_header; | |
3566 | *vpp = vp; | |
3567 | ||
3568 | ubc_setsize(vp, fsize); | |
3569 | return error; | |
3570 | ||
3571 | bad2: | |
3572 | (void) VNOP_CLOSE(vp, FREAD, ctx); | |
3573 | bad1: | |
3574 | vnode_put(vp); | |
3575 | return error; | |
3576 | } |