]> git.saurik.com Git - apple/dyld.git/blob - src/dyld.cpp
cda4944afc6a3b6ce8e16089616fefab92722b44
[apple/dyld.git] / src / dyld.cpp
1 /* -*- mode: C++; c-basic-offset: 4; tab-width: 4 -*-
2 *
3 * Copyright (c) 2004-2013 Apple Inc. All rights reserved.
4 *
5 * @APPLE_LICENSE_HEADER_START@
6 *
7 * This file contains Original Code and/or Modifications of Original Code
8 * as defined in and that are subject to the Apple Public Source License
9 * Version 2.0 (the 'License'). You may not use this file except in
10 * compliance with the License. Please obtain a copy of the License at
11 * http://www.opensource.apple.com/apsl/ and read it before using this
12 * file.
13 *
14 * The Original Code and all software distributed under the License are
15 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
16 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
17 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
19 * Please see the License for the specific language governing rights and
20 * limitations under the License.
21 *
22 * @APPLE_LICENSE_HEADER_END@
23 */
24
25 #include <stdint.h>
26 #include <string.h>
27 #include <unistd.h>
28 #include <errno.h>
29 #include <fcntl.h>
30 #include <dirent.h>
31 #include <pthread.h>
32 #include <libproc.h>
33 #include <sys/param.h>
34 #include <mach/mach_time.h> // mach_absolute_time()
35 #include <mach/mach_init.h>
36 #include <sys/types.h>
37 #include <sys/stat.h>
38 #include <sys/syscall.h>
39 #include <sys/socket.h>
40 #include <sys/un.h>
41 #include <sys/syslog.h>
42 #include <sys/uio.h>
43 #include <mach-o/fat.h>
44 #include <mach-o/loader.h>
45 #include <mach-o/ldsyms.h>
46 #include <libkern/OSByteOrder.h>
47 #include <libkern/OSAtomic.h>
48 #include <mach/mach.h>
49 #include <sys/sysctl.h>
50 #include <sys/mman.h>
51 #include <sys/dtrace.h>
52 #include <libkern/OSAtomic.h>
53 #include <Availability.h>
54 #include <System/sys/codesign.h>
55 #include <System/sys/csr.h>
56 #include <_simple.h>
57 #include <os/lock_private.h>
58 #include <System/machine/cpu_capabilities.h>
59 #include <System/sys/reason.h>
60 #include <kern/kcdata.h>
61 #include <sandbox.h>
62 #include <sandbox/private.h>
63
64 #include <array>
65
66 #ifndef CPU_SUBTYPE_ARM_V5TEJ
67 #define CPU_SUBTYPE_ARM_V5TEJ ((cpu_subtype_t) 7)
68 #endif
69 #ifndef CPU_SUBTYPE_ARM_XSCALE
70 #define CPU_SUBTYPE_ARM_XSCALE ((cpu_subtype_t) 8)
71 #endif
72 #ifndef CPU_SUBTYPE_ARM_V7
73 #define CPU_SUBTYPE_ARM_V7 ((cpu_subtype_t) 9)
74 #endif
75 #ifndef CPU_SUBTYPE_ARM_V7F
76 #define CPU_SUBTYPE_ARM_V7F ((cpu_subtype_t) 10)
77 #endif
78 #ifndef CPU_SUBTYPE_ARM_V7S
79 #define CPU_SUBTYPE_ARM_V7S ((cpu_subtype_t) 11)
80 #endif
81 #ifndef CPU_SUBTYPE_ARM_V7K
82 #define CPU_SUBTYPE_ARM_V7K ((cpu_subtype_t) 12)
83 #endif
84 #ifndef LC_DYLD_ENVIRONMENT
85 #define LC_DYLD_ENVIRONMENT 0x27
86 #endif
87
88 #ifndef CPU_SUBTYPE_X86_64_H
89 #define CPU_SUBTYPE_X86_64_H ((cpu_subtype_t) 8)
90 #endif
91
92 #ifndef VM_PROT_SLIDE
93 #define VM_PROT_SLIDE 0x20
94 #endif
95
96 #include <vector>
97 #include <algorithm>
98
99 #include "mach-o/dyld_gdb.h"
100
101 #include "dyld.h"
102 #include "ImageLoader.h"
103 #include "ImageLoaderMachO.h"
104 #include "dyldLibSystemInterface.h"
105 #if DYLD_SHARED_CACHE_SUPPORT
106 #include "dyld_cache_format.h"
107 #endif
108 #include "dyld_process_info_internal.h"
109 #include <coreSymbolicationDyldSupport.h>
110 #if TARGET_IPHONE_SIMULATOR
111 extern "C" void xcoresymbolication_load_notifier(void *connection, uint64_t load_timestamp, const char *image_path, const struct mach_header *mach_header);
112 extern "C" void xcoresymbolication_unload_notifier(void *connection, uint64_t unload_timestamp, const char *image_path, const struct mach_header *mach_header);
113 #define coresymbolication_load_notifier(c, t, p, h) xcoresymbolication_load_notifier(c, t, p, h)
114 #define coresymbolication_unload_notifier(c, t, p, h) xcoresymbolication_unload_notifier(c, t, p, h)
115 #endif
116
117 #if SUPPORT_ACCELERATE_TABLES
118 #include "ImageLoaderMegaDylib.h"
119 #endif
120
121 #if TARGET_IPHONE_SIMULATOR
122 extern "C" void* gSyscallHelpers;
123 #else
124 #include "dyldSyscallInterface.h"
125 #endif
126
127
128 // not libc header for send() syscall interface
129 extern "C" ssize_t __sendto(int, const void *, size_t, int, const struct sockaddr *, socklen_t);
130
131
132 // ARM and x86_64 are the only architecture that use cpu-sub-types
133 #define CPU_SUBTYPES_SUPPORTED ((__arm__ || __x86_64__) && !TARGET_IPHONE_SIMULATOR)
134
135 #if __LP64__
136 #define LC_SEGMENT_COMMAND LC_SEGMENT_64
137 #define LC_SEGMENT_COMMAND_WRONG LC_SEGMENT
138 #define LC_ENCRYPT_COMMAND LC_ENCRYPTION_INFO
139 #define macho_segment_command segment_command_64
140 #define macho_section section_64
141 #else
142 #define LC_SEGMENT_COMMAND LC_SEGMENT
143 #define LC_SEGMENT_COMMAND_WRONG LC_SEGMENT_64
144 #define LC_ENCRYPT_COMMAND LC_ENCRYPTION_INFO_64
145 #define macho_segment_command segment_command
146 #define macho_section section
147 #endif
148
149
150
151 #define CPU_TYPE_MASK 0x00FFFFFF /* complement of CPU_ARCH_MASK */
152
153
154 /* implemented in dyld_gdb.cpp */
155 extern void resetAllImages();
156 extern void addImagesToAllImages(uint32_t infoCount, const dyld_image_info info[]);
157 extern void removeImageFromAllImages(const mach_header* mh);
158 extern void addNonSharedCacheImageUUID(const dyld_uuid_info& info);
159 extern const char* notifyGDB(enum dyld_image_states state, uint32_t infoCount, const dyld_image_info info[]);
160 extern size_t allImagesCount();
161
162 // magic so CrashReporter logs message
163 extern "C" {
164 char error_string[1024];
165 }
166
167 // magic linker symbol for start of dyld binary
168 extern "C" const macho_header __dso_handle;
169
170
171 //
172 // The file contains the core of dyld used to get a process to main().
173 // The API's that dyld supports are implemented in dyldAPIs.cpp.
174 //
175 //
176 //
177 //
178 //
179 namespace dyld {
180 struct RegisteredDOF { const mach_header* mh; int registrationID; };
181 struct DylibOverride { const char* installName; const char* override; };
182 }
183
184
185 VECTOR_NEVER_DESTRUCTED(ImageLoader*);
186 VECTOR_NEVER_DESTRUCTED(dyld::RegisteredDOF);
187 VECTOR_NEVER_DESTRUCTED(dyld::ImageCallback);
188 VECTOR_NEVER_DESTRUCTED(dyld::DylibOverride);
189 VECTOR_NEVER_DESTRUCTED(ImageLoader::DynamicReference);
190
191 VECTOR_NEVER_DESTRUCTED(dyld_image_state_change_handler);
192
193 namespace dyld {
194
195
196 //
197 // state of all environment variables dyld uses
198 //
199 struct EnvironmentVariables {
200 const char* const * DYLD_FRAMEWORK_PATH;
201 const char* const * DYLD_FALLBACK_FRAMEWORK_PATH;
202 const char* const * DYLD_LIBRARY_PATH;
203 const char* const * DYLD_FALLBACK_LIBRARY_PATH;
204 const char* const * DYLD_INSERT_LIBRARIES;
205 const char* const * LD_LIBRARY_PATH; // for unix conformance
206 const char* const * DYLD_VERSIONED_LIBRARY_PATH;
207 const char* const * DYLD_VERSIONED_FRAMEWORK_PATH;
208 bool DYLD_PRINT_LIBRARIES_POST_LAUNCH;
209 bool DYLD_BIND_AT_LAUNCH;
210 bool DYLD_PRINT_STATISTICS;
211 bool DYLD_PRINT_STATISTICS_DETAILS;
212 bool DYLD_PRINT_OPTS;
213 bool DYLD_PRINT_ENV;
214 bool DYLD_DISABLE_DOFS;
215 bool DYLD_PRINT_CS_NOTIFICATIONS;
216 // DYLD_SHARED_CACHE_DONT_VALIDATE ==> sSharedCacheIgnoreInodeAndTimeStamp
217 // DYLD_SHARED_CACHE_DIR ==> sSharedCacheDir
218 // DYLD_ROOT_PATH ==> gLinkContext.rootPaths
219 // DYLD_IMAGE_SUFFIX ==> gLinkContext.imageSuffix
220 // DYLD_PRINT_OPTS ==> gLinkContext.verboseOpts
221 // DYLD_PRINT_ENV ==> gLinkContext.verboseEnv
222 // DYLD_FORCE_FLAT_NAMESPACE ==> gLinkContext.bindFlat
223 // DYLD_PRINT_INITIALIZERS ==> gLinkContext.verboseInit
224 // DYLD_PRINT_SEGMENTS ==> gLinkContext.verboseMapping
225 // DYLD_PRINT_BINDINGS ==> gLinkContext.verboseBind
226 // DYLD_PRINT_WEAK_BINDINGS ==> gLinkContext.verboseWeakBind
227 // DYLD_PRINT_REBASINGS ==> gLinkContext.verboseRebase
228 // DYLD_PRINT_DOFS ==> gLinkContext.verboseDOF
229 // DYLD_PRINT_APIS ==> gLogAPIs
230 // DYLD_IGNORE_PREBINDING ==> gLinkContext.prebindUsage
231 // DYLD_PREBIND_DEBUG ==> gLinkContext.verbosePrebinding
232 // DYLD_NEW_LOCAL_SHARED_REGIONS ==> gLinkContext.sharedRegionMode
233 // DYLD_SHARED_REGION ==> gLinkContext.sharedRegionMode
234 // DYLD_PRINT_WARNINGS ==> gLinkContext.verboseWarnings
235 // DYLD_PRINT_RPATHS ==> gLinkContext.verboseRPaths
236 // DYLD_PRINT_INTERPOSING ==> gLinkContext.verboseInterposing
237 // DYLD_PRINT_LIBRARIES ==> gLinkContext.verboseLoading
238 };
239
240
241
242 typedef std::vector<dyld_image_state_change_handler> StateHandlers;
243
244
245 enum EnvVarMode { envNone, envPrintOnly, envAll };
246
247 // all global state
248 static const char* sExecPath = NULL;
249 static const char* sExecShortName = NULL;
250 static const macho_header* sMainExecutableMachHeader = NULL;
251 #if CPU_SUBTYPES_SUPPORTED
252 static cpu_type_t sHostCPU;
253 static cpu_subtype_t sHostCPUsubtype;
254 #endif
255 static ImageLoaderMachO* sMainExecutable = NULL;
256 static EnvVarMode sEnvMode = envNone;
257 static size_t sInsertedDylibCount = 0;
258 static std::vector<ImageLoader*> sAllImages;
259 static std::vector<ImageLoader*> sImageRoots;
260 static std::vector<ImageLoader*> sImageFilesNeedingTermination;
261 static std::vector<RegisteredDOF> sImageFilesNeedingDOFUnregistration;
262 static std::vector<ImageCallback> sAddImageCallbacks;
263 static std::vector<ImageCallback> sRemoveImageCallbacks;
264 static bool sRemoveImageCallbacksInUse = false;
265 static void* sSingleHandlers[7][3];
266 static void* sBatchHandlers[7][3];
267 static ImageLoader* sLastImageByAddressCache;
268 static EnvironmentVariables sEnv;
269 #if __MAC_OS_X_VERSION_MIN_REQUIRED
270 static const char* sFrameworkFallbackPaths[] = { "$HOME/Library/Frameworks", "/Library/Frameworks", "/Network/Library/Frameworks", "/System/Library/Frameworks", NULL };
271 static const char* sLibraryFallbackPaths[] = { "$HOME/lib", "/usr/local/lib", "/usr/lib", NULL };
272 #else
273 static const char* sFrameworkFallbackPaths[] = { "/System/Library/Frameworks", NULL };
274 static const char* sLibraryFallbackPaths[] = { "/usr/local/lib", "/usr/lib", NULL };
275 #endif
276 static const char* sRestrictedFrameworkFallbackPaths[] = { "/System/Library/Frameworks", NULL };
277 static const char* sRestrictedLibraryFallbackPaths[] = { "/usr/lib", NULL };
278 static UndefinedHandler sUndefinedHandler = NULL;
279 static ImageLoader* sBundleBeingLoaded = NULL; // hack until OFI is reworked
280 #if DYLD_SHARED_CACHE_SUPPORT
281 static const dyld_cache_header* sSharedCache = NULL;
282 static long sSharedCacheSlide = 0;
283 static bool sSharedCacheIgnoreInodeAndTimeStamp = false;
284 bool gSharedCacheOverridden = false;
285 #if __IPHONE_OS_VERSION_MIN_REQUIRED
286 static const char* sSharedCacheDir = IPHONE_DYLD_SHARED_CACHE_DIR;
287 #define ENABLE_DYLIBS_TO_OVERRIDE_CACHE_SIZE 1024
288 #else
289 static const char* sSharedCacheDir = MACOSX_DYLD_SHARED_CACHE_DIR;
290 #endif
291 #endif
292 ImageLoader::LinkContext gLinkContext;
293 bool gLogAPIs = false;
294 #if SUPPORT_ACCELERATE_TABLES
295 bool gLogAppAPIs = false;
296 #endif
297 const struct LibSystemHelpers* gLibSystemHelpers = NULL;
298 #if SUPPORT_OLD_CRT_INITIALIZATION
299 bool gRunInitializersOldWay = false;
300 #endif
301 static std::vector<DylibOverride> sDylibOverrides;
302 #if !TARGET_IPHONE_SIMULATOR
303 static int sLogSocket = -1;
304 #endif
305 static bool sFrameworksFoundAsDylibs = false;
306 #if __x86_64__ && DYLD_SHARED_CACHE_SUPPORT
307 static bool sHaswell = false;
308 #endif
309 static std::vector<ImageLoader::DynamicReference> sDynamicReferences;
310 static OSSpinLock sDynamicReferencesLock = 0;
311 #if !TARGET_IPHONE_SIMULATOR
312 static bool sLogToFile = false;
313 #endif
314 static char sLoadingCrashMessage[1024] = "dyld: launch, loading dependent libraries";
315
316 static _dyld_objc_notify_mapped sNotifyObjCMapped;
317 static _dyld_objc_notify_init sNotifyObjCInit;
318 static _dyld_objc_notify_unmapped sNotifyObjCUnmapped;
319
320 #if __IPHONE_OS_VERSION_MIN_REQUIRED && !TARGET_IPHONE_SIMULATOR
321 static bool sForceStderr = false;
322 #endif
323
324
325
326 #if SUPPORT_ACCELERATE_TABLES
327 static ImageLoaderMegaDylib* sAllCacheImagesProxy = NULL;
328 static bool sDisableAcceleratorTables = false;
329 #endif
330
331 //
332 // The MappedRanges structure is used for fast address->image lookups.
333 // The table is only updated when the dyld lock is held, so we don't
334 // need to worry about multiple writers. But readers may look at this
335 // data without holding the lock. Therefore, all updates must be done
336 // in an order that will never cause readers to see inconsistent data.
337 // The general rule is that if the image field is non-NULL then
338 // the other fields are valid.
339 //
340 struct MappedRanges
341 {
342 MappedRanges* next;
343 unsigned long count;
344 struct {
345 ImageLoader* image;
346 uintptr_t start;
347 uintptr_t end;
348 } array[1];
349 };
350
351 static MappedRanges* sMappedRangesStart;
352
353 void addMappedRange(ImageLoader* image, uintptr_t start, uintptr_t end)
354 {
355 //dyld::log("addMappedRange(0x%lX->0x%lX) for %s\n", start, end, image->getShortName());
356 for (MappedRanges* p = sMappedRangesStart; p != NULL; p = p->next) {
357 for (unsigned long i=0; i < p->count; ++i) {
358 if ( p->array[i].image == NULL ) {
359 p->array[i].start = start;
360 p->array[i].end = end;
361 // add image field last with a barrier so that any reader will see consistent records
362 OSMemoryBarrier();
363 p->array[i].image = image;
364 return;
365 }
366 }
367 }
368 // table must be full, chain another
369 #if SUPPORT_ACCELERATE_TABLES
370 unsigned count = (sAllCacheImagesProxy != NULL) ? 16 : 400;
371 #else
372 unsigned count = 400;
373 #endif
374 size_t allocationSize = sizeof(MappedRanges) + (count-1)*3*sizeof(void*);
375 MappedRanges* newRanges = (MappedRanges*)malloc(allocationSize);
376 bzero(newRanges, allocationSize);
377 newRanges->count = count;
378 newRanges->array[0].start = start;
379 newRanges->array[0].end = end;
380 newRanges->array[0].image = image;
381 OSMemoryBarrier();
382 if ( sMappedRangesStart == NULL ) {
383 sMappedRangesStart = newRanges;
384 }
385 else {
386 for (MappedRanges* p = sMappedRangesStart; p != NULL; p = p->next) {
387 if ( p->next == NULL ) {
388 OSMemoryBarrier();
389 p->next = newRanges;
390 break;
391 }
392 }
393 }
394 }
395
396 void removedMappedRanges(ImageLoader* image)
397 {
398 for (MappedRanges* p = sMappedRangesStart; p != NULL; p = p->next) {
399 for (unsigned long i=0; i < p->count; ++i) {
400 if ( p->array[i].image == image ) {
401 // clear with a barrier so that any reader will see consistent records
402 OSMemoryBarrier();
403 p->array[i].image = NULL;
404 }
405 }
406 }
407 }
408
409 ImageLoader* findMappedRange(uintptr_t target)
410 {
411 for (MappedRanges* p = sMappedRangesStart; p != NULL; p = p->next) {
412 for (unsigned long i=0; i < p->count; ++i) {
413 if ( p->array[i].image != NULL ) {
414 if ( (p->array[i].start <= target) && (target < p->array[i].end) )
415 return p->array[i].image;
416 }
417 }
418 }
419 return NULL;
420 }
421
422
423
424 const char* mkstringf(const char* format, ...)
425 {
426 _SIMPLE_STRING buf = _simple_salloc();
427 if ( buf != NULL ) {
428 va_list list;
429 va_start(list, format);
430 _simple_vsprintf(buf, format, list);
431 va_end(list);
432 const char* t = strdup(_simple_string(buf));
433 _simple_sfree(buf);
434 if ( t != NULL )
435 return t;
436 }
437 return "mkstringf, out of memory error";
438 }
439
440
441 void throwf(const char* format, ...)
442 {
443 _SIMPLE_STRING buf = _simple_salloc();
444 if ( buf != NULL ) {
445 va_list list;
446 va_start(list, format);
447 _simple_vsprintf(buf, format, list);
448 va_end(list);
449 const char* t = strdup(_simple_string(buf));
450 _simple_sfree(buf);
451 if ( t != NULL )
452 throw t;
453 }
454 throw "throwf, out of memory error";
455 }
456
457
458 #if !TARGET_IPHONE_SIMULATOR
459 static int sLogfile = STDERR_FILENO;
460 #endif
461
462 #if !TARGET_IPHONE_SIMULATOR
463 // based on CFUtilities.c: also_do_stderr()
464 static bool useSyslog()
465 {
466 // Use syslog() for processes managed by launchd
467 static bool launchdChecked = false;
468 static bool launchdOwned = false;
469 if ( !launchdChecked && gProcessInfo->libSystemInitialized ) {
470 if ( (gLibSystemHelpers != NULL) && (gLibSystemHelpers->version >= 11) ) {
471 // <rdar://problem/23520449> only call isLaunchdOwned() after libSystem is initialized
472 launchdOwned = (*gLibSystemHelpers->isLaunchdOwned)();
473 launchdChecked = true;
474 }
475 }
476 if ( launchdChecked && launchdOwned )
477 return true;
478
479 // If stderr is not available, use syslog()
480 struct stat sb;
481 int result = fstat(STDERR_FILENO, &sb);
482 if ( result < 0 )
483 return true; // file descriptor 2 is closed
484
485 return false;
486 }
487
488
489 static void socket_syslogv(int priority, const char* format, va_list list)
490 {
491 // lazily create socket and connection to syslogd
492 if ( sLogSocket == -1 ) {
493 sLogSocket = ::socket(AF_UNIX, SOCK_DGRAM, 0);
494 if (sLogSocket == -1)
495 return; // cannot log
496 ::fcntl(sLogSocket, F_SETFD, 1);
497
498 struct sockaddr_un addr;
499 addr.sun_family = AF_UNIX;
500 strncpy(addr.sun_path, _PATH_LOG, sizeof(addr.sun_path));
501 if ( ::connect(sLogSocket, (struct sockaddr *)&addr, sizeof(addr)) == -1 ) {
502 ::close(sLogSocket);
503 sLogSocket = -1;
504 return;
505 }
506 }
507
508 // format message to syslogd like: "<priority>Process[pid]: message"
509 _SIMPLE_STRING buf = _simple_salloc();
510 if ( buf == NULL )
511 return;
512 if ( _simple_sprintf(buf, "<%d>%s[%d]: ", LOG_USER|LOG_NOTICE, sExecShortName, getpid()) == 0 ) {
513 if ( _simple_vsprintf(buf, format, list) == 0 ) {
514 const char* p = _simple_string(buf);
515 ::__sendto(sLogSocket, p, strlen(p), 0, NULL, 0);
516 }
517 }
518 _simple_sfree(buf);
519 }
520
521
522
523 void vlog(const char* format, va_list list)
524 {
525 #if __IPHONE_OS_VERSION_MIN_REQUIRED && !TARGET_IPHONE_SIMULATOR
526 // <rdar://problem/25965832> log to console when running iOS app from Xcode
527 if ( !sLogToFile && !sForceStderr && useSyslog() )
528 #else
529 if ( !sLogToFile && useSyslog() )
530 #endif
531 socket_syslogv(LOG_ERR, format, list);
532 else {
533 _simple_vdprintf(sLogfile, format, list);
534 }
535 }
536
537 void log(const char* format, ...)
538 {
539 va_list list;
540 va_start(list, format);
541 vlog(format, list);
542 va_end(list);
543 }
544
545
546 void vwarn(const char* format, va_list list)
547 {
548 _simple_dprintf(sLogfile, "dyld: warning, ");
549 _simple_vdprintf(sLogfile, format, list);
550 }
551
552 void warn(const char* format, ...)
553 {
554 va_list list;
555 va_start(list, format);
556 vwarn(format, list);
557 va_end(list);
558 }
559
560
561 #endif // !TARGET_IPHONE_SIMULATOR
562
563
564 // <rdar://problem/8867781> control access to sAllImages through a lock
565 // because global dyld lock is not held during initialization phase of dlopen()
566 // <rdar://problem/16145518> Use OSSpinLockLock to allow yielding
567 static OSSpinLock sAllImagesLock = 0;
568
569 static void allImagesLock()
570 {
571 OSSpinLockLock(&sAllImagesLock);
572 }
573
574 static void allImagesUnlock()
575 {
576 OSSpinLockUnlock(&sAllImagesLock);
577 }
578
579
580 // utility class to assure files are closed when an exception is thrown
581 class FileOpener {
582 public:
583 FileOpener(const char* path);
584 ~FileOpener();
585 int getFileDescriptor() { return fd; }
586 private:
587 int fd;
588 };
589
590 FileOpener::FileOpener(const char* path)
591 : fd(-1)
592 {
593 fd = my_open(path, O_RDONLY, 0);
594 }
595
596 FileOpener::~FileOpener()
597 {
598 if ( fd != -1 )
599 close(fd);
600 }
601
602
603 static void registerDOFs(const std::vector<ImageLoader::DOFInfo>& dofs)
604 {
605 const size_t dofSectionCount = dofs.size();
606 if ( !sEnv.DYLD_DISABLE_DOFS && (dofSectionCount != 0) ) {
607 int fd = open("/dev/" DTRACEMNR_HELPER, O_RDWR);
608 if ( fd < 0 ) {
609 //dyld::warn("can't open /dev/" DTRACEMNR_HELPER " to register dtrace DOF sections\n");
610 }
611 else {
612 // allocate a buffer on the stack for the variable length dof_ioctl_data_t type
613 uint8_t buffer[sizeof(dof_ioctl_data_t) + dofSectionCount*sizeof(dof_helper_t)];
614 dof_ioctl_data_t* ioctlData = (dof_ioctl_data_t*)buffer;
615
616 // fill in buffer with one dof_helper_t per DOF section
617 ioctlData->dofiod_count = dofSectionCount;
618 for (unsigned int i=0; i < dofSectionCount; ++i) {
619 strlcpy(ioctlData->dofiod_helpers[i].dofhp_mod, dofs[i].imageShortName, DTRACE_MODNAMELEN);
620 ioctlData->dofiod_helpers[i].dofhp_dof = (uintptr_t)(dofs[i].dof);
621 ioctlData->dofiod_helpers[i].dofhp_addr = (uintptr_t)(dofs[i].dof);
622 }
623
624 // tell kernel about all DOF sections en mas
625 // pass pointer to ioctlData because ioctl() only copies a fixed size amount of data into kernel
626 user_addr_t val = (user_addr_t)(unsigned long)ioctlData;
627 if ( ioctl(fd, DTRACEHIOC_ADDDOF, &val) != -1 ) {
628 // kernel returns a unique identifier for each section in the dofiod_helpers[].dofhp_dof field.
629 for (unsigned int i=0; i < dofSectionCount; ++i) {
630 RegisteredDOF info;
631 info.mh = dofs[i].imageHeader;
632 info.registrationID = (int)(ioctlData->dofiod_helpers[i].dofhp_dof);
633 sImageFilesNeedingDOFUnregistration.push_back(info);
634 if ( gLinkContext.verboseDOF ) {
635 dyld::log("dyld: registering DOF section %p in %s with dtrace, ID=0x%08X\n",
636 dofs[i].dof, dofs[i].imageShortName, info.registrationID);
637 }
638 }
639 }
640 else {
641 //dyld::log( "dyld: ioctl to register dtrace DOF section failed\n");
642 }
643 close(fd);
644 }
645 }
646 }
647
648 static void unregisterDOF(int registrationID)
649 {
650 int fd = open("/dev/" DTRACEMNR_HELPER, O_RDWR);
651 if ( fd < 0 ) {
652 dyld::warn("can't open /dev/" DTRACEMNR_HELPER " to unregister dtrace DOF section\n");
653 }
654 else {
655 ioctl(fd, DTRACEHIOC_REMOVE, registrationID);
656 close(fd);
657 if ( gLinkContext.verboseInit )
658 dyld::warn("unregistering DOF section ID=0x%08X with dtrace\n", registrationID);
659 }
660 }
661
662
663 //
664 // _dyld_register_func_for_add_image() is implemented as part of the general image state change notification
665 //
666 static void notifyAddImageCallbacks(ImageLoader* image)
667 {
668 // use guard so that we cannot notify about the same image twice
669 if ( ! image->addFuncNotified() ) {
670 for (std::vector<ImageCallback>::iterator it=sAddImageCallbacks.begin(); it != sAddImageCallbacks.end(); it++)
671 (*it)(image->machHeader(), image->getSlide());
672 image->setAddFuncNotified();
673 }
674 }
675
676
677
678 // notify gdb about these new images
679 static const char* updateAllImages(enum dyld_image_states state, uint32_t infoCount, const struct dyld_image_info info[])
680 {
681 // <rdar://problem/8812589> don't add images without paths to all-image-info-list
682 if ( info[0].imageFilePath != NULL )
683 addImagesToAllImages(infoCount, info);
684 return NULL;
685 }
686
687
688 static StateHandlers* stateToHandlers(dyld_image_states state, void* handlersArray[7][3])
689 {
690 switch ( state ) {
691 case dyld_image_state_mapped:
692 return reinterpret_cast<StateHandlers*>(&handlersArray[0]);
693
694 case dyld_image_state_dependents_mapped:
695 return reinterpret_cast<StateHandlers*>(&handlersArray[1]);
696
697 case dyld_image_state_rebased:
698 return reinterpret_cast<StateHandlers*>(&handlersArray[2]);
699
700 case dyld_image_state_bound:
701 return reinterpret_cast<StateHandlers*>(&handlersArray[3]);
702
703 case dyld_image_state_dependents_initialized:
704 return reinterpret_cast<StateHandlers*>(&handlersArray[4]);
705
706 case dyld_image_state_initialized:
707 return reinterpret_cast<StateHandlers*>(&handlersArray[5]);
708
709 case dyld_image_state_terminated:
710 return reinterpret_cast<StateHandlers*>(&handlersArray[6]);
711 }
712 return NULL;
713 }
714
715 #if SUPPORT_ACCELERATE_TABLES
716 static dyld_image_state_change_handler getPreInitNotifyHandler(unsigned index)
717 {
718 std::vector<dyld_image_state_change_handler>* handlers = stateToHandlers(dyld_image_state_dependents_initialized, sSingleHandlers);
719 if ( index >= handlers->size() )
720 return NULL;
721 return (*handlers)[index];
722 }
723
724 static dyld_image_state_change_handler getBoundBatchHandler(unsigned index)
725 {
726 std::vector<dyld_image_state_change_handler>* handlers = stateToHandlers(dyld_image_state_bound, sBatchHandlers);
727 if ( index >= handlers->size() )
728 return NULL;
729 return (*handlers)[index];
730 }
731
732 static void notifySingleFromCache(dyld_image_states state, const mach_header* mh, const char* path)
733 {
734 //dyld::log("notifySingle(state=%d, image=%s)\n", state, image->getPath());
735 std::vector<dyld_image_state_change_handler>* handlers = stateToHandlers(state, sSingleHandlers);
736 if ( handlers != NULL ) {
737 dyld_image_info info;
738 info.imageLoadAddress = mh;
739 info.imageFilePath = path;
740 info.imageFileModDate = 0;
741 for (dyld_image_state_change_handler handler : *handlers) {
742 const char* result = (*handler)(state, 1, &info);
743 if ( (result != NULL) && (state == dyld_image_state_mapped) ) {
744 //fprintf(stderr, " image rejected by handler=%p\n", *it);
745 // make copy of thrown string so that later catch clauses can free it
746 const char* str = strdup(result);
747 throw str;
748 }
749 }
750 }
751 if ( (state == dyld_image_state_dependents_initialized) && (sNotifyObjCInit != NULL) && (mh->flags & MH_HAS_OBJC) ) {
752 (*sNotifyObjCInit)(path, mh);
753 }
754 }
755 #endif
756
757 static mach_port_t sNotifyReplyPorts[DYLD_MAX_PROCESS_INFO_NOTIFY_COUNT];
758
759
760 static void notifyMonitoringDyld(bool unloading, unsigned portSlot, unsigned imageCount, const dyld_image_info infos[])
761 {
762 unsigned entriesSize = imageCount*sizeof(dyld_process_info_image_entry);
763 unsigned pathsSize = 0;
764 for (unsigned j=0; j < imageCount; ++j) {
765 pathsSize += (strlen(infos[j].imageFilePath) + 1);
766 }
767 unsigned totalSize = (sizeof(dyld_process_info_notify_header) + entriesSize + pathsSize + 127) & -128; // align
768 if ( totalSize > DYLD_PROCESS_INFO_NOTIFY_MAX_BUFFER_SIZE ) {
769 // Putting all image paths into one message would make buffer too big.
770 // Instead split into two messages. Recurse as needed until paths fit in buffer.
771 unsigned imageHalfCount = imageCount/2;
772 notifyMonitoringDyld(unloading, portSlot, imageHalfCount, infos);
773 notifyMonitoringDyld(unloading, portSlot, imageCount - imageHalfCount, &infos[imageHalfCount]);
774 return;
775 }
776 uint8_t buffer[totalSize];
777 dyld_process_info_notify_header* header = (dyld_process_info_notify_header*)buffer;
778 header->version = 1;
779 header->imageCount = imageCount;
780 header->imagesOffset = sizeof(dyld_process_info_notify_header);
781 header->stringsOffset = sizeof(dyld_process_info_notify_header) + entriesSize;
782 header->timestamp = mach_absolute_time();
783 dyld_process_info_image_entry* entries = (dyld_process_info_image_entry*)&buffer[header->imagesOffset];
784 char* const pathPoolStart = (char*)&buffer[header->stringsOffset];
785 char* pathPool = pathPoolStart;
786 for (unsigned j=0; j < imageCount; ++j) {
787 strcpy(pathPool, infos[j].imageFilePath);
788 uint32_t len = (uint32_t)strlen(pathPool);
789 bzero(entries->uuid, 16);
790 const ImageLoader* image = findImageByMachHeader(infos[j].imageLoadAddress);
791 if ( image != NULL ) {
792 image->getUUID(entries->uuid);
793 }
794 #if SUPPORT_ACCELERATE_TABLES
795 else if ( sAllCacheImagesProxy != NULL ) {
796 const mach_header* mh;
797 const char* path;
798 unsigned index;
799 if ( sAllCacheImagesProxy->addressInCache(infos[j].imageLoadAddress, &mh, &path, &index) ) {
800 sAllCacheImagesProxy->getDylibUUID(index, entries->uuid);
801 }
802 }
803 #endif
804 entries->loadAddress = (uint64_t)infos[j].imageLoadAddress;
805 entries->pathStringOffset = (uint32_t)(pathPool - pathPoolStart);
806 entries->pathLength = len;
807 pathPool += (len +1);
808 ++entries;
809 }
810
811 if ( sNotifyReplyPorts[portSlot] == 0 ) {
812 if ( !mach_port_allocate(mach_task_self(), MACH_PORT_RIGHT_RECEIVE, &sNotifyReplyPorts[portSlot]) )
813 mach_port_insert_right(mach_task_self(), sNotifyReplyPorts[portSlot], sNotifyReplyPorts[portSlot], MACH_MSG_TYPE_MAKE_SEND);
814 //dyld::log("allocated reply port %d\n", sNotifyReplyPorts[portSlot]);
815 }
816 //dyld::log("found port to send to\n");
817 mach_msg_header_t* h = (mach_msg_header_t*)buffer;
818 h->msgh_bits = MACH_MSGH_BITS(MACH_MSG_TYPE_COPY_SEND,MACH_MSG_TYPE_MAKE_SEND); // MACH_MSG_TYPE_MAKE_SEND_ONCE
819 h->msgh_id = unloading ? DYLD_PROCESS_INFO_NOTIFY_UNLOAD_ID : DYLD_PROCESS_INFO_NOTIFY_LOAD_ID;
820 h->msgh_local_port = sNotifyReplyPorts[portSlot];
821 h->msgh_remote_port = dyld::gProcessInfo->notifyPorts[portSlot];
822 h->msgh_reserved = 0;
823 h->msgh_size = (mach_msg_size_t)sizeof(buffer);
824 //dyld::log("sending to port[%d]=%d, size=%d, reply port=%d, id=0x%X\n", portSlot, dyld::gProcessInfo->notifyPorts[portSlot], h->msgh_size, sNotifyReplyPorts[portSlot], h->msgh_id);
825 kern_return_t sendResult = mach_msg(h, MACH_SEND_MSG | MACH_RCV_MSG | MACH_SEND_TIMEOUT, h->msgh_size, h->msgh_size, sNotifyReplyPorts[portSlot], 100, MACH_PORT_NULL);
826 //dyld::log("send result = 0x%X, msg_id=%d, msg_size=%d\n", sendResult, h->msgh_id, h->msgh_size);
827 if ( sendResult == MACH_SEND_INVALID_DEST ) {
828 // sender is not responding, detatch
829 //dyld::log("process requesting notification gone. deallocation send port %d and receive port %d\n", dyld::gProcessInfo->notifyPorts[portSlot], sNotifyReplyPorts[portSlot]);
830 mach_port_deallocate(mach_task_self(), dyld::gProcessInfo->notifyPorts[portSlot]);
831 mach_port_deallocate(mach_task_self(), sNotifyReplyPorts[portSlot]);
832 dyld::gProcessInfo->notifyPorts[portSlot] = 0;
833 sNotifyReplyPorts[portSlot] = 0;
834 }
835 }
836
837 #define MAX_KERNEL_IMAGES_PER_CALL (100)
838
839 static void flushKernelNotifications(bool loading, bool force, std::array<dyld_kernel_image_info_t,MAX_KERNEL_IMAGES_PER_CALL>& kernelInfos, uint32_t &kernelInfoCount) {
840 if ((force && kernelInfoCount != 0) || kernelInfoCount == MAX_KERNEL_IMAGES_PER_CALL) {
841 if (loading) {
842 task_register_dyld_image_infos(mach_task_self(), kernelInfos.data(), kernelInfoCount);
843 } else {
844 task_unregister_dyld_image_infos(mach_task_self(), kernelInfos.data(), kernelInfoCount);
845 }
846 kernelInfoCount = 0;
847 }
848 }
849
850 static
851 void queueKernelNotification(const ImageLoader& image, bool loading, std::array<dyld_kernel_image_info_t,MAX_KERNEL_IMAGES_PER_CALL>& kernelInfos, uint32_t &kernelInfoCount) {
852 if ( !image.inSharedCache() ) {
853 ino_t inode = image.getInode();
854 image.getUUID(kernelInfos[kernelInfoCount].uuid);
855 memcpy(&kernelInfos[kernelInfoCount].fsobjid, &inode, 8);
856 kernelInfos[kernelInfoCount].load_addr = (uint64_t)image.machHeader();
857 // FIXME we should also be grabbing the device ID, but that is not necessary yet,
858 // and requires threading it through the ImageLoader
859 kernelInfos[kernelInfoCount].fsid.val[0] = 0;
860 kernelInfos[kernelInfoCount].fsid.val[1] = 0;
861 kernelInfoCount++;
862 }
863 flushKernelNotifications(loading, false, kernelInfos, kernelInfoCount);
864 }
865
866 void notifyKernel(const ImageLoader& image, bool loading) {
867 std::array<dyld_kernel_image_info_t,MAX_KERNEL_IMAGES_PER_CALL> kernelInfos;
868 uint32_t kernelInfoCount = 0;
869 queueKernelNotification(image, loading, kernelInfos, kernelInfoCount);
870 flushKernelNotifications(loading, true, kernelInfos, kernelInfoCount);
871 }
872
873 static void notifySingle(dyld_image_states state, const ImageLoader* image, ImageLoader::InitializerTimingList* timingInfo)
874 {
875 //dyld::log("notifySingle(state=%d, image=%s)\n", state, image->getPath());
876 std::vector<dyld_image_state_change_handler>* handlers = stateToHandlers(state, sSingleHandlers);
877 if ( handlers != NULL ) {
878 dyld_image_info info;
879 info.imageLoadAddress = image->machHeader();
880 info.imageFilePath = image->getRealPath();
881 info.imageFileModDate = image->lastModified();
882 for (std::vector<dyld_image_state_change_handler>::iterator it = handlers->begin(); it != handlers->end(); ++it) {
883 const char* result = (*it)(state, 1, &info);
884 if ( (result != NULL) && (state == dyld_image_state_mapped) ) {
885 //fprintf(stderr, " image rejected by handler=%p\n", *it);
886 // make copy of thrown string so that later catch clauses can free it
887 const char* str = strdup(result);
888 throw str;
889 }
890 }
891 }
892 if ( state == dyld_image_state_mapped ) {
893 // <rdar://problem/7008875> Save load addr + UUID for images from outside the shared cache
894 if ( !image->inSharedCache() ) {
895 dyld_uuid_info info;
896 if ( image->getUUID(info.imageUUID) ) {
897 info.imageLoadAddress = image->machHeader();
898 addNonSharedCacheImageUUID(info);
899 }
900 }
901 }
902 if ( (state == dyld_image_state_dependents_initialized) && (sNotifyObjCInit != NULL) && image->notifyObjC() ) {
903 uint64_t t0 = mach_absolute_time();
904 (*sNotifyObjCInit)(image->getRealPath(), image->machHeader());
905 uint64_t t1 = mach_absolute_time();
906 uint64_t t2 = mach_absolute_time();
907 uint64_t timeInObjC = t1-t0;
908 uint64_t emptyTime = (t2-t1)*100;
909 if ( (timeInObjC > emptyTime) && (timingInfo != NULL) ) {
910 timingInfo->addTime(image->getShortName(), timeInObjC);
911 }
912 }
913 // mach message csdlc about dynamically unloaded images
914 if ( image->addFuncNotified() && (state == dyld_image_state_terminated) ) {
915 notifyKernel(*image, false);
916
917 uint64_t loadTimestamp = mach_absolute_time();
918 if ( sEnv.DYLD_PRINT_CS_NOTIFICATIONS ) {
919 dyld::log("dyld: coresymbolication_unload_notifier(%p, 0x%016llX, %p, %s)\n",
920 dyld::gProcessInfo->coreSymbolicationShmPage, loadTimestamp, image->machHeader(), image->getPath());
921 }
922 if ( dyld::gProcessInfo->coreSymbolicationShmPage != NULL) {
923 coresymbolication_unload_notifier(dyld::gProcessInfo->coreSymbolicationShmPage, loadTimestamp, image->getPath(), image->machHeader());
924 }
925 for (int slot=0; slot < DYLD_MAX_PROCESS_INFO_NOTIFY_COUNT; ++slot) {
926 if ( dyld::gProcessInfo->notifyPorts[slot] != 0 ) {
927 dyld_image_info info;
928 info.imageLoadAddress = image->machHeader();
929 info.imageFilePath = image->getPath();
930 info.imageFileModDate = 0;
931 notifyMonitoringDyld(true, slot, 1, &info);
932 }
933 else if ( sNotifyReplyPorts[slot] != 0 ) {
934 // monitoring process detached from this process, so release reply port
935 //dyld::log("deallocated reply port %d\n", sNotifyReplyPorts[slot]);
936 mach_port_deallocate(mach_task_self(), sNotifyReplyPorts[slot]);
937 sNotifyReplyPorts[slot] = 0;
938 }
939 }
940 }
941
942 }
943
944
945 //
946 // Normally, dyld_all_image_infos is only updated in batches after an entire
947 // graph is loaded. But if there is an error loading the initial set of
948 // dylibs needed by the main executable, dyld_all_image_infos is not yet set
949 // up, leading to usually brief crash logs.
950 //
951 // This function manually adds the images loaded so far to dyld::gProcessInfo.
952 // It should only be called before terminating.
953 //
954 void syncAllImages()
955 {
956 for (std::vector<ImageLoader*>::iterator it=sAllImages.begin(); it != sAllImages.end(); ++it) {
957 dyld_image_info info;
958 ImageLoader* image = *it;
959 info.imageLoadAddress = image->machHeader();
960 info.imageFilePath = image->getRealPath();
961 info.imageFileModDate = image->lastModified();
962 // add to all_image_infos if not already there
963 bool found = false;
964 int existingCount = dyld::gProcessInfo->infoArrayCount;
965 const dyld_image_info* existing = dyld::gProcessInfo->infoArray;
966 if ( existing != NULL ) {
967 for (int i=0; i < existingCount; ++i) {
968 if ( existing[i].imageLoadAddress == info.imageLoadAddress ) {
969 //dyld::log("not adding %s\n", info.imageFilePath);
970 found = true;
971 break;
972 }
973 }
974 }
975 if ( ! found ) {
976 //dyld::log("adding %s\n", info.imageFilePath);
977 addImagesToAllImages(1, &info);
978 }
979 }
980 }
981
982
983 static int imageSorter(const void* l, const void* r)
984 {
985 const ImageLoader* left = *((ImageLoader**)l);
986 const ImageLoader* right= *((ImageLoader**)r);
987 return left->compare(right);
988 }
989
990 static void notifyBatchPartial(dyld_image_states state, bool orLater, dyld_image_state_change_handler onlyHandler, bool preflightOnly, bool onlyObjCMappedNotification)
991 {
992 std::vector<dyld_image_state_change_handler>* handlers = stateToHandlers(state, sBatchHandlers);
993 std::array<dyld_kernel_image_info_t,MAX_KERNEL_IMAGES_PER_CALL> kernelInfos;
994 uint32_t kernelInfoCount = 0;
995
996 if ( (handlers != NULL) || ((state == dyld_image_state_bound) && (sNotifyObjCMapped != NULL)) ) {
997 // don't use a vector because it will use malloc/free and we want notifcation to be low cost
998 allImagesLock();
999 dyld_image_info infos[allImagesCount()+1];
1000 ImageLoader* images[allImagesCount()+1];
1001 ImageLoader** end = images;
1002 for (std::vector<ImageLoader*>::iterator it=sAllImages.begin(); it != sAllImages.end(); it++) {
1003 dyld_image_states imageState = (*it)->getState();
1004 if ( (imageState == state) || (orLater && (imageState > state)) )
1005 *end++ = *it;
1006 }
1007 if ( sBundleBeingLoaded != NULL ) {
1008 dyld_image_states imageState = sBundleBeingLoaded->getState();
1009 if ( (imageState == state) || (orLater && (imageState > state)) )
1010 *end++ = sBundleBeingLoaded;
1011 }
1012 const char* dontLoadReason = NULL;
1013 uint32_t imageCount = (uint32_t)(end-images);
1014 if ( imageCount != 0 ) {
1015 // sort bottom up
1016 qsort(images, imageCount, sizeof(ImageLoader*), &imageSorter);
1017 // build info array
1018 for (unsigned int i=0; i < imageCount; ++i) {
1019 dyld_image_info* p = &infos[i];
1020 ImageLoader* image = images[i];
1021 //dyld::log(" state=%d, name=%s\n", state, image->getPath());
1022 p->imageLoadAddress = image->machHeader();
1023 p->imageFilePath = image->getRealPath();
1024 p->imageFileModDate = image->lastModified();
1025 // get these registered with the kernel as early as possible
1026 if ( state == dyld_image_state_dependents_mapped)
1027 queueKernelNotification(*image, true, kernelInfos, kernelInfoCount);
1028 // special case for add_image hook
1029 if ( state == dyld_image_state_bound )
1030 notifyAddImageCallbacks(image);
1031 }
1032 flushKernelNotifications(true, true, kernelInfos, kernelInfoCount);
1033 }
1034 #if SUPPORT_ACCELERATE_TABLES
1035 if ( sAllCacheImagesProxy != NULL ) {
1036 unsigned cacheCount = sAllCacheImagesProxy->appendImagesToNotify(state, orLater, &infos[imageCount]);
1037 // support _dyld_register_func_for_add_image()
1038 if ( state == dyld_image_state_bound ) {
1039 for (ImageCallback callback : sAddImageCallbacks) {
1040 for (unsigned i=0; i < cacheCount; ++i)
1041 (*callback)(infos[imageCount+i].imageLoadAddress, sSharedCacheSlide);
1042 }
1043 }
1044 imageCount += cacheCount;
1045 }
1046 #endif
1047 if ( imageCount != 0 ) {
1048 if ( !onlyObjCMappedNotification ) {
1049 if ( onlyHandler != NULL ) {
1050 const char* result = NULL;
1051 if ( result == NULL ) {
1052 result = (*onlyHandler)(state, imageCount, infos);
1053 }
1054 if ( (result != NULL) && (state == dyld_image_state_dependents_mapped) ) {
1055 //fprintf(stderr, " images rejected by handler=%p\n", onlyHandler);
1056 // make copy of thrown string so that later catch clauses can free it
1057 dontLoadReason = strdup(result);
1058 }
1059 }
1060 else {
1061 // call each handler with whole array
1062 if ( handlers != NULL ) {
1063 for (std::vector<dyld_image_state_change_handler>::iterator it = handlers->begin(); it != handlers->end(); ++it) {
1064 const char* result = (*it)(state, imageCount, infos);
1065 if ( (result != NULL) && (state == dyld_image_state_dependents_mapped) ) {
1066 //fprintf(stderr, " images rejected by handler=%p\n", *it);
1067 // make copy of thrown string so that later catch clauses can free it
1068 dontLoadReason = strdup(result);
1069 break;
1070 }
1071 }
1072 }
1073 }
1074 }
1075 // tell objc about new images
1076 if ( (onlyHandler == NULL) && ((state == dyld_image_state_bound) || (orLater && (dyld_image_state_bound > state))) && (sNotifyObjCMapped != NULL) ) {
1077 const char* paths[imageCount];
1078 const mach_header* mhs[imageCount];
1079 unsigned objcImageCount = 0;
1080 for (int i=0; i < imageCount; ++i) {
1081 const ImageLoader* image = findImageByMachHeader(infos[i].imageLoadAddress);
1082 bool hasObjC = false;
1083 if ( image != NULL ) {
1084 hasObjC = image->notifyObjC();
1085 }
1086 #if SUPPORT_ACCELERATE_TABLES
1087 else if ( sAllCacheImagesProxy != NULL ) {
1088 const mach_header* mh;
1089 const char* path;
1090 unsigned index;
1091 if ( sAllCacheImagesProxy->addressInCache(infos[i].imageLoadAddress, &mh, &path, &index) ) {
1092 hasObjC = (mh->flags & MH_HAS_OBJC);
1093 }
1094 }
1095 #endif
1096 if ( hasObjC ) {
1097 paths[objcImageCount] = infos[i].imageFilePath;
1098 mhs[objcImageCount] = infos[i].imageLoadAddress;
1099 ++objcImageCount;
1100 }
1101 }
1102 if ( objcImageCount != 0 ) {
1103 uint64_t t0 = mach_absolute_time();
1104 (*sNotifyObjCMapped)(objcImageCount, paths, mhs);
1105 uint64_t t1 = mach_absolute_time();
1106 ImageLoader::fgTotalObjCSetupTime += (t1-t0);
1107 }
1108 }
1109 }
1110 allImagesUnlock();
1111 if ( dontLoadReason != NULL )
1112 throw dontLoadReason;
1113 if ( !preflightOnly && (state == dyld_image_state_dependents_mapped) ) {
1114 if ( (dyld::gProcessInfo->coreSymbolicationShmPage != NULL) || sEnv.DYLD_PRINT_CS_NOTIFICATIONS ) {
1115 // mach message csdlc about loaded images
1116 uint64_t loadTimestamp = mach_absolute_time();
1117 for (unsigned j=0; j < imageCount; ++j) {
1118 if ( sEnv.DYLD_PRINT_CS_NOTIFICATIONS ) {
1119 dyld::log("dyld: coresymbolication_load_notifier(%p, 0x%016llX, %p, %s)\n",
1120 dyld::gProcessInfo->coreSymbolicationShmPage, loadTimestamp, infos[j].imageLoadAddress, infos[j].imageFilePath);
1121 }
1122 coresymbolication_load_notifier(dyld::gProcessInfo->coreSymbolicationShmPage, loadTimestamp, infos[j].imageFilePath, infos[j].imageLoadAddress);
1123 }
1124 }
1125 for (int slot=0; slot < DYLD_MAX_PROCESS_INFO_NOTIFY_COUNT; ++slot) {
1126 if ( dyld::gProcessInfo->notifyPorts[slot] )
1127 notifyMonitoringDyld(false, slot, imageCount, infos);
1128 }
1129 }
1130 }
1131 }
1132
1133
1134
1135 static void notifyBatch(dyld_image_states state, bool preflightOnly)
1136 {
1137 notifyBatchPartial(state, false, NULL, preflightOnly, false);
1138 }
1139
1140 // In order for register_func_for_add_image() callbacks to to be called bottom up,
1141 // we need to maintain a list of root images. The main executable is usally the
1142 // first root. Any images dynamically added are also roots (unless already loaded).
1143 // If DYLD_INSERT_LIBRARIES is used, those libraries are first.
1144 static void addRootImage(ImageLoader* image)
1145 {
1146 //dyld::log("addRootImage(%p, %s)\n", image, image->getPath());
1147 // add to list of roots
1148 sImageRoots.push_back(image);
1149 }
1150
1151
1152 static void clearAllDepths()
1153 {
1154 for (std::vector<ImageLoader*>::iterator it=sAllImages.begin(); it != sAllImages.end(); it++)
1155 (*it)->clearDepth();
1156 }
1157
1158 static void printAllDepths()
1159 {
1160 for (std::vector<ImageLoader*>::iterator it=sAllImages.begin(); it != sAllImages.end(); it++)
1161 dyld::log("%03d %s\n", (*it)->getDepth(), (*it)->getShortName());
1162 }
1163
1164
1165 static unsigned int imageCount()
1166 {
1167 allImagesLock();
1168 unsigned int result = (unsigned int)sAllImages.size();
1169 allImagesUnlock();
1170 return (result);
1171 }
1172
1173
1174 static void setNewProgramVars(const ProgramVars& newVars)
1175 {
1176 // make a copy of the pointers to program variables
1177 gLinkContext.programVars = newVars;
1178
1179 // now set each program global to their initial value
1180 *gLinkContext.programVars.NXArgcPtr = gLinkContext.argc;
1181 *gLinkContext.programVars.NXArgvPtr = gLinkContext.argv;
1182 *gLinkContext.programVars.environPtr = gLinkContext.envp;
1183 *gLinkContext.programVars.__prognamePtr = gLinkContext.progname;
1184 }
1185
1186 #if SUPPORT_OLD_CRT_INITIALIZATION
1187 static void setRunInitialzersOldWay()
1188 {
1189 gRunInitializersOldWay = true;
1190 }
1191 #endif
1192
1193 static bool sandboxBlocked(const char* path, const char* kind)
1194 {
1195 #if TARGET_IPHONE_SIMULATOR
1196 // sandbox calls not yet supported in simulator runtime
1197 return false;
1198 #else
1199 sandbox_filter_type filter = (sandbox_filter_type)(SANDBOX_FILTER_PATH | SANDBOX_CHECK_NO_REPORT);
1200 return ( sandbox_check(getpid(), kind, filter, path) > 0 );
1201 #endif
1202 }
1203
1204 bool sandboxBlockedMmap(const char* path)
1205 {
1206 return sandboxBlocked(path, "file-map-executable");
1207 }
1208
1209 bool sandboxBlockedOpen(const char* path)
1210 {
1211 return sandboxBlocked(path, "file-read-data");
1212 }
1213
1214 bool sandboxBlockedStat(const char* path)
1215 {
1216 return sandboxBlocked(path, "file-read-metadata");
1217 }
1218
1219
1220 static void addDynamicReference(ImageLoader* from, ImageLoader* to) {
1221 // don't add dynamic reference if target is in the shared cache (since it can't be unloaded)
1222 if ( to->inSharedCache() )
1223 return;
1224
1225 // don't add dynamic reference if there already is a static one
1226 if ( from->dependsOn(to) )
1227 return;
1228
1229 // don't add if this combination already exists
1230 OSSpinLockLock(&sDynamicReferencesLock);
1231 for (std::vector<ImageLoader::DynamicReference>::iterator it=sDynamicReferences.begin(); it != sDynamicReferences.end(); ++it) {
1232 if ( (it->from == from) && (it->to == to) ) {
1233 OSSpinLockUnlock(&sDynamicReferencesLock);
1234 return;
1235 }
1236 }
1237
1238 //dyld::log("addDynamicReference(%s, %s\n", from->getShortName(), to->getShortName());
1239 ImageLoader::DynamicReference t;
1240 t.from = from;
1241 t.to = to;
1242 sDynamicReferences.push_back(t);
1243 OSSpinLockUnlock(&sDynamicReferencesLock);
1244 }
1245
1246 static void addImage(ImageLoader* image)
1247 {
1248 // add to master list
1249 allImagesLock();
1250 sAllImages.push_back(image);
1251 allImagesUnlock();
1252
1253 // update mapped ranges
1254 uintptr_t lastSegStart = 0;
1255 uintptr_t lastSegEnd = 0;
1256 for(unsigned int i=0, e=image->segmentCount(); i < e; ++i) {
1257 if ( image->segUnaccessible(i) )
1258 continue;
1259 uintptr_t start = image->segActualLoadAddress(i);
1260 uintptr_t end = image->segActualEndAddress(i);
1261 if ( start == lastSegEnd ) {
1262 // two segments are contiguous, just record combined segments
1263 lastSegEnd = end;
1264 }
1265 else {
1266 // non-contiguous segments, record last (if any)
1267 if ( lastSegEnd != 0 )
1268 addMappedRange(image, lastSegStart, lastSegEnd);
1269 lastSegStart = start;
1270 lastSegEnd = end;
1271 }
1272 }
1273 if ( lastSegEnd != 0 )
1274 addMappedRange(image, lastSegStart, lastSegEnd);
1275
1276
1277 if ( gLinkContext.verboseLoading || (sEnv.DYLD_PRINT_LIBRARIES_POST_LAUNCH && (sMainExecutable!=NULL) && sMainExecutable->isLinked()) ) {
1278 dyld::log("dyld: loaded: %s\n", image->getPath());
1279 }
1280
1281 }
1282
1283 //
1284 // Helper for std::remove_if
1285 //
1286 class RefUsesImage {
1287 public:
1288 RefUsesImage(ImageLoader* image) : _image(image) {}
1289 bool operator()(const ImageLoader::DynamicReference& ref) const {
1290 return ( (ref.from == _image) || (ref.to == _image) );
1291 }
1292 private:
1293 ImageLoader* _image;
1294 };
1295
1296
1297
1298 void removeImage(ImageLoader* image)
1299 {
1300 // if has dtrace DOF section, tell dtrace it is going away, then remove from sImageFilesNeedingDOFUnregistration
1301 for (std::vector<RegisteredDOF>::iterator it=sImageFilesNeedingDOFUnregistration.begin(); it != sImageFilesNeedingDOFUnregistration.end(); ) {
1302 if ( it->mh == image->machHeader() ) {
1303 unregisterDOF(it->registrationID);
1304 sImageFilesNeedingDOFUnregistration.erase(it);
1305 // don't increment iterator, the erase caused next element to be copied to where this iterator points
1306 }
1307 else {
1308 ++it;
1309 }
1310 }
1311
1312 // tell all registered remove image handlers about this
1313 // do this before removing image from internal data structures so that the callback can query dyld about the image
1314 if ( image->getState() >= dyld_image_state_bound ) {
1315 sRemoveImageCallbacksInUse = true; // This only runs inside dyld's global lock, so ok to use a global for the in-use flag.
1316 for (std::vector<ImageCallback>::iterator it=sRemoveImageCallbacks.begin(); it != sRemoveImageCallbacks.end(); it++) {
1317 (*it)(image->machHeader(), image->getSlide());
1318 }
1319 sRemoveImageCallbacksInUse = false;
1320
1321 if ( sNotifyObjCUnmapped != NULL && image->notifyObjC() )
1322 (*sNotifyObjCUnmapped)(image->getRealPath(), image->machHeader());
1323 }
1324
1325 // notify
1326 notifySingle(dyld_image_state_terminated, image, NULL);
1327
1328 // remove from mapped images table
1329 removedMappedRanges(image);
1330
1331 // remove from master list
1332 allImagesLock();
1333 for (std::vector<ImageLoader*>::iterator it=sAllImages.begin(); it != sAllImages.end(); it++) {
1334 if ( *it == image ) {
1335 sAllImages.erase(it);
1336 break;
1337 }
1338 }
1339 allImagesUnlock();
1340
1341 // remove from sDynamicReferences
1342 OSSpinLockLock(&sDynamicReferencesLock);
1343 sDynamicReferences.erase(std::remove_if(sDynamicReferences.begin(), sDynamicReferences.end(), RefUsesImage(image)), sDynamicReferences.end());
1344 OSSpinLockUnlock(&sDynamicReferencesLock);
1345
1346 // flush find-by-address cache (do this after removed from master list, so there is no chance it can come back)
1347 if ( sLastImageByAddressCache == image )
1348 sLastImageByAddressCache = NULL;
1349
1350 // if in root list, pull it out
1351 for (std::vector<ImageLoader*>::iterator it=sImageRoots.begin(); it != sImageRoots.end(); it++) {
1352 if ( *it == image ) {
1353 sImageRoots.erase(it);
1354 break;
1355 }
1356 }
1357
1358 // log if requested
1359 if ( gLinkContext.verboseLoading || (sEnv.DYLD_PRINT_LIBRARIES_POST_LAUNCH && (sMainExecutable!=NULL) && sMainExecutable->isLinked()) ) {
1360 dyld::log("dyld: unloaded: %s\n", image->getPath());
1361 }
1362
1363 // tell gdb, new way
1364 removeImageFromAllImages(image->machHeader());
1365 }
1366
1367
1368 void runImageStaticTerminators(ImageLoader* image)
1369 {
1370 // if in termination list, pull it out and run terminator
1371 bool mightBeMore;
1372 do {
1373 mightBeMore = false;
1374 for (std::vector<ImageLoader*>::iterator it=sImageFilesNeedingTermination.begin(); it != sImageFilesNeedingTermination.end(); it++) {
1375 if ( *it == image ) {
1376 sImageFilesNeedingTermination.erase(it);
1377 if (gLogAPIs) dyld::log("dlclose(), running static terminators for %p %s\n", image, image->getShortName());
1378 image->doTermination(gLinkContext);
1379 mightBeMore = true;
1380 break;
1381 }
1382 }
1383 } while ( mightBeMore );
1384 }
1385
1386 static void terminationRecorder(ImageLoader* image)
1387 {
1388 sImageFilesNeedingTermination.push_back(image);
1389 }
1390
1391 const char* getExecutablePath()
1392 {
1393 return sExecPath;
1394 }
1395
1396 static void runAllStaticTerminators(void* extra)
1397 {
1398 try {
1399 const size_t imageCount = sImageFilesNeedingTermination.size();
1400 for(size_t i=imageCount; i > 0; --i){
1401 ImageLoader* image = sImageFilesNeedingTermination[i-1];
1402 image->doTermination(gLinkContext);
1403 }
1404 sImageFilesNeedingTermination.clear();
1405 notifyBatch(dyld_image_state_terminated, false);
1406 }
1407 catch (const char* msg) {
1408 halt(msg);
1409 }
1410 }
1411
1412 void initializeMainExecutable()
1413 {
1414 // record that we've reached this step
1415 gLinkContext.startedInitializingMainExecutable = true;
1416
1417 // run initialzers for any inserted dylibs
1418 ImageLoader::InitializerTimingList initializerTimes[allImagesCount()];
1419 initializerTimes[0].count = 0;
1420 const size_t rootCount = sImageRoots.size();
1421 if ( rootCount > 1 ) {
1422 for(size_t i=1; i < rootCount; ++i) {
1423 sImageRoots[i]->runInitializers(gLinkContext, initializerTimes[0]);
1424 }
1425 }
1426
1427 // run initializers for main executable and everything it brings up
1428 sMainExecutable->runInitializers(gLinkContext, initializerTimes[0]);
1429
1430 // register cxa_atexit() handler to run static terminators in all loaded images when this process exits
1431 if ( gLibSystemHelpers != NULL )
1432 (*gLibSystemHelpers->cxa_atexit)(&runAllStaticTerminators, NULL, NULL);
1433
1434 // dump info if requested
1435 if ( sEnv.DYLD_PRINT_STATISTICS )
1436 ImageLoader::printStatistics((unsigned int)allImagesCount(), initializerTimes[0]);
1437 if ( sEnv.DYLD_PRINT_STATISTICS_DETAILS )
1438 ImageLoaderMachO::printStatisticsDetails((unsigned int)allImagesCount(), initializerTimes[0]);
1439 }
1440
1441 bool mainExecutablePrebound()
1442 {
1443 return sMainExecutable->usablePrebinding(gLinkContext);
1444 }
1445
1446 ImageLoader* mainExecutable()
1447 {
1448 return sMainExecutable;
1449 }
1450
1451
1452
1453
1454 #if SUPPORT_VERSIONED_PATHS
1455
1456 // forward reference
1457 static bool getDylibVersionAndInstallname(const char* dylibPath, uint32_t* version, char* installName);
1458
1459
1460 //
1461 // Examines a dylib file and if its current_version is newer than the installed
1462 // dylib at its install_name, then add the dylib file to sDylibOverrides.
1463 //
1464 static void checkDylibOverride(const char* dylibFile)
1465 {
1466 //dyld::log("checkDylibOverride('%s')\n", dylibFile);
1467 uint32_t altVersion;
1468 char sysInstallName[PATH_MAX];
1469 if ( getDylibVersionAndInstallname(dylibFile, &altVersion, sysInstallName) && (sysInstallName[0] =='/') ) {
1470 //dyld::log("%s has version 0x%08X and install name %s\n", dylibFile, altVersion, sysInstallName);
1471 uint32_t sysVersion;
1472 if ( getDylibVersionAndInstallname(sysInstallName, &sysVersion, NULL) ) {
1473 //dyld::log("%s has version 0x%08X\n", sysInstallName, sysVersion);
1474 if ( altVersion > sysVersion ) {
1475 //dyld::log("override found: %s -> %s\n", sysInstallName, dylibFile);
1476 // see if there already is an override for this dylib
1477 bool entryExists = false;
1478 for (std::vector<DylibOverride>::iterator it = sDylibOverrides.begin(); it != sDylibOverrides.end(); ++it) {
1479 if ( strcmp(it->installName, sysInstallName) == 0 ) {
1480 entryExists = true;
1481 uint32_t prevVersion;
1482 if ( getDylibVersionAndInstallname(it->override, &prevVersion, NULL) ) {
1483 if ( altVersion > prevVersion ) {
1484 // found an even newer override
1485 free((void*)(it->override));
1486 char resolvedPath[PATH_MAX];
1487 if ( realpath(dylibFile, resolvedPath) != NULL )
1488 it->override = strdup(resolvedPath);
1489 else
1490 it->override = strdup(dylibFile);
1491 break;
1492 }
1493 }
1494 }
1495 }
1496 if ( ! entryExists ) {
1497 DylibOverride entry;
1498 entry.installName = strdup(sysInstallName);
1499 char resolvedPath[PATH_MAX];
1500 if ( realpath(dylibFile, resolvedPath) != NULL )
1501 entry.override = strdup(resolvedPath);
1502 else
1503 entry.override = strdup(dylibFile);
1504 sDylibOverrides.push_back(entry);
1505 //dyld::log("added override: %s -> %s\n", entry.installName, entry.override);
1506 }
1507 }
1508 }
1509 }
1510
1511 }
1512
1513 static void checkDylibOverridesInDir(const char* dirPath)
1514 {
1515 //dyld::log("checkDylibOverridesInDir('%s')\n", dirPath);
1516 char dylibPath[PATH_MAX];
1517 long dirPathLen = strlcpy(dylibPath, dirPath, PATH_MAX-1);
1518 if ( dirPathLen >= PATH_MAX )
1519 return;
1520 DIR* dirp = opendir(dirPath);
1521 if ( dirp != NULL) {
1522 dirent entry;
1523 dirent* entp = NULL;
1524 while ( readdir_r(dirp, &entry, &entp) == 0 ) {
1525 if ( entp == NULL )
1526 break;
1527 if ( entp->d_type != DT_REG )
1528 continue;
1529 dylibPath[dirPathLen] = '/';
1530 dylibPath[dirPathLen+1] = '\0';
1531 if ( strlcat(dylibPath, entp->d_name, PATH_MAX) >= PATH_MAX )
1532 continue;
1533 checkDylibOverride(dylibPath);
1534 }
1535 closedir(dirp);
1536 }
1537 }
1538
1539
1540 static void checkFrameworkOverridesInDir(const char* dirPath)
1541 {
1542 //dyld::log("checkFrameworkOverridesInDir('%s')\n", dirPath);
1543 char frameworkPath[PATH_MAX];
1544 long dirPathLen = strlcpy(frameworkPath, dirPath, PATH_MAX-1);
1545 if ( dirPathLen >= PATH_MAX )
1546 return;
1547 DIR* dirp = opendir(dirPath);
1548 if ( dirp != NULL) {
1549 dirent entry;
1550 dirent* entp = NULL;
1551 while ( readdir_r(dirp, &entry, &entp) == 0 ) {
1552 if ( entp == NULL )
1553 break;
1554 if ( entp->d_type != DT_DIR )
1555 continue;
1556 frameworkPath[dirPathLen] = '/';
1557 frameworkPath[dirPathLen+1] = '\0';
1558 int dirNameLen = (int)strlen(entp->d_name);
1559 if ( dirNameLen < 11 )
1560 continue;
1561 if ( strcmp(&entp->d_name[dirNameLen-10], ".framework") != 0 )
1562 continue;
1563 if ( strlcat(frameworkPath, entp->d_name, PATH_MAX) >= PATH_MAX )
1564 continue;
1565 if ( strlcat(frameworkPath, "/", PATH_MAX) >= PATH_MAX )
1566 continue;
1567 if ( strlcat(frameworkPath, entp->d_name, PATH_MAX) >= PATH_MAX )
1568 continue;
1569 frameworkPath[strlen(frameworkPath)-10] = '\0';
1570 checkDylibOverride(frameworkPath);
1571 }
1572 closedir(dirp);
1573 }
1574 }
1575 #endif // SUPPORT_VERSIONED_PATHS
1576
1577
1578 //
1579 // Turns a colon separated list of strings into a NULL terminated array
1580 // of string pointers. If mainExecutableDir param is not NULL,
1581 // substitutes @loader_path with main executable's dir.
1582 //
1583 static const char** parseColonList(const char* list, const char* mainExecutableDir)
1584 {
1585 static const char* sEmptyList[] = { NULL };
1586
1587 if ( list[0] == '\0' )
1588 return sEmptyList;
1589
1590 int colonCount = 0;
1591 for(const char* s=list; *s != '\0'; ++s) {
1592 if (*s == ':')
1593 ++colonCount;
1594 }
1595
1596 int index = 0;
1597 const char* start = list;
1598 char** result = new char*[colonCount+2];
1599 for(const char* s=list; *s != '\0'; ++s) {
1600 if (*s == ':') {
1601 size_t len = s-start;
1602 if ( (mainExecutableDir != NULL) && (strncmp(start, "@loader_path/", 13) == 0) ) {
1603 #if __MAC_OS_X_VERSION_MIN_REQUIRED
1604 if ( gLinkContext.processIsRestricted ) {
1605 dyld::log("dyld: warning: @loader_path/ ignored in restricted process\n");
1606 continue;
1607 }
1608 #endif
1609 size_t mainExecDirLen = strlen(mainExecutableDir);
1610 char* str = new char[mainExecDirLen+len+1];
1611 strcpy(str, mainExecutableDir);
1612 strlcat(str, &start[13], mainExecDirLen+len+1);
1613 str[mainExecDirLen+len-13] = '\0';
1614 start = &s[1];
1615 result[index++] = str;
1616 }
1617 else if ( (mainExecutableDir != NULL) && (strncmp(start, "@executable_path/", 17) == 0) ) {
1618 #if __MAC_OS_X_VERSION_MIN_REQUIRED
1619 if ( gLinkContext.processIsRestricted ) {
1620 dyld::log("dyld: warning: @executable_path/ ignored in restricted process\n");
1621 continue;
1622 }
1623 #endif
1624 size_t mainExecDirLen = strlen(mainExecutableDir);
1625 char* str = new char[mainExecDirLen+len+1];
1626 strcpy(str, mainExecutableDir);
1627 strlcat(str, &start[17], mainExecDirLen+len+1);
1628 str[mainExecDirLen+len-17] = '\0';
1629 start = &s[1];
1630 result[index++] = str;
1631 }
1632 else {
1633 char* str = new char[len+1];
1634 strncpy(str, start, len);
1635 str[len] = '\0';
1636 start = &s[1];
1637 result[index++] = str;
1638 }
1639 }
1640 }
1641 size_t len = strlen(start);
1642 if ( (mainExecutableDir != NULL) && (strncmp(start, "@loader_path/", 13) == 0) ) {
1643 #if __MAC_OS_X_VERSION_MIN_REQUIRED
1644 if ( gLinkContext.processIsRestricted ) {
1645 dyld::log("dyld: warning: @loader_path/ ignored in restricted process\n");
1646 }
1647 else
1648 #endif
1649 {
1650 size_t mainExecDirLen = strlen(mainExecutableDir);
1651 char* str = new char[mainExecDirLen+len+1];
1652 strcpy(str, mainExecutableDir);
1653 strlcat(str, &start[13], mainExecDirLen+len+1);
1654 str[mainExecDirLen+len-13] = '\0';
1655 result[index++] = str;
1656 }
1657 }
1658 else if ( (mainExecutableDir != NULL) && (strncmp(start, "@executable_path/", 17) == 0) ) {
1659 #if __MAC_OS_X_VERSION_MIN_REQUIRED
1660 if ( gLinkContext.processIsRestricted ) {
1661 dyld::log("dyld: warning: @executable_path/ ignored in restricted process\n");
1662 }
1663 else
1664 #endif
1665 {
1666 size_t mainExecDirLen = strlen(mainExecutableDir);
1667 char* str = new char[mainExecDirLen+len+1];
1668 strcpy(str, mainExecutableDir);
1669 strlcat(str, &start[17], mainExecDirLen+len+1);
1670 str[mainExecDirLen+len-17] = '\0';
1671 result[index++] = str;
1672 }
1673 }
1674 else {
1675 char* str = new char[len+1];
1676 strcpy(str, start);
1677 result[index++] = str;
1678 }
1679 result[index] = NULL;
1680
1681 //dyld::log("parseColonList(%s)\n", list);
1682 //for(int i=0; result[i] != NULL; ++i)
1683 // dyld::log(" %s\n", result[i]);
1684 return (const char**)result;
1685 }
1686
1687 static void appendParsedColonList(const char* list, const char* mainExecutableDir, const char* const ** storage)
1688 {
1689 const char** newlist = parseColonList(list, mainExecutableDir);
1690 if ( *storage == NULL ) {
1691 // first time, just set
1692 *storage = newlist;
1693 }
1694 else {
1695 // need to append to existing list
1696 const char* const* existing = *storage;
1697 int count = 0;
1698 for(int i=0; existing[i] != NULL; ++i)
1699 ++count;
1700 for(int i=0; newlist[i] != NULL; ++i)
1701 ++count;
1702 const char** combinedList = new const char*[count+2];
1703 int index = 0;
1704 for(int i=0; existing[i] != NULL; ++i)
1705 combinedList[index++] = existing[i];
1706 for(int i=0; newlist[i] != NULL; ++i)
1707 combinedList[index++] = newlist[i];
1708 combinedList[index] = NULL;
1709 // leak old arrays
1710 *storage = combinedList;
1711 }
1712 }
1713
1714 #if __MAC_OS_X_VERSION_MIN_REQUIRED
1715 static void paths_expand_roots(const char **paths, const char *key, const char *val)
1716 {
1717 // assert(val != NULL);
1718 // assert(paths != NULL);
1719 if(NULL != key) {
1720 size_t keyLen = strlen(key);
1721 for(int i=0; paths[i] != NULL; ++i) {
1722 if ( strncmp(paths[i], key, keyLen) == 0 ) {
1723 char* newPath = new char[strlen(val) + (strlen(paths[i]) - keyLen) + 1];
1724 strcpy(newPath, val);
1725 strcat(newPath, &paths[i][keyLen]);
1726 paths[i] = newPath;
1727 }
1728 }
1729 }
1730 return;
1731 }
1732
1733 static void removePathWithPrefix(const char* paths[], const char* prefix)
1734 {
1735 size_t prefixLen = strlen(prefix);
1736 int skip = 0;
1737 int i;
1738 for(i = 0; paths[i] != NULL; ++i) {
1739 if ( strncmp(paths[i], prefix, prefixLen) == 0 )
1740 ++skip;
1741 else
1742 paths[i-skip] = paths[i];
1743 }
1744 paths[i-skip] = NULL;
1745 }
1746 #endif
1747
1748
1749 #if 0
1750 static void paths_dump(const char **paths)
1751 {
1752 // assert(paths != NULL);
1753 const char **strs = paths;
1754 while(*strs != NULL)
1755 {
1756 dyld::log("\"%s\"\n", *strs);
1757 strs++;
1758 }
1759 return;
1760 }
1761 #endif
1762
1763 static void printOptions(const char* argv[])
1764 {
1765 uint32_t i = 0;
1766 while ( NULL != argv[i] ) {
1767 dyld::log("opt[%i] = \"%s\"\n", i, argv[i]);
1768 i++;
1769 }
1770 }
1771
1772 static void printEnvironmentVariables(const char* envp[])
1773 {
1774 while ( NULL != *envp ) {
1775 dyld::log("%s\n", *envp);
1776 envp++;
1777 }
1778 }
1779
1780 void processDyldEnvironmentVariable(const char* key, const char* value, const char* mainExecutableDir)
1781 {
1782 if ( strcmp(key, "DYLD_FRAMEWORK_PATH") == 0 ) {
1783 appendParsedColonList(value, mainExecutableDir, &sEnv.DYLD_FRAMEWORK_PATH);
1784 }
1785 else if ( strcmp(key, "DYLD_FALLBACK_FRAMEWORK_PATH") == 0 ) {
1786 appendParsedColonList(value, mainExecutableDir, &sEnv.DYLD_FALLBACK_FRAMEWORK_PATH);
1787 }
1788 else if ( strcmp(key, "DYLD_LIBRARY_PATH") == 0 ) {
1789 appendParsedColonList(value, mainExecutableDir, &sEnv.DYLD_LIBRARY_PATH);
1790 }
1791 else if ( strcmp(key, "DYLD_FALLBACK_LIBRARY_PATH") == 0 ) {
1792 appendParsedColonList(value, mainExecutableDir, &sEnv.DYLD_FALLBACK_LIBRARY_PATH);
1793 }
1794 #if SUPPORT_ROOT_PATH
1795 else if ( (strcmp(key, "DYLD_ROOT_PATH") == 0) || (strcmp(key, "DYLD_PATHS_ROOT") == 0) ) {
1796 if ( strcmp(value, "/") != 0 ) {
1797 gLinkContext.rootPaths = parseColonList(value, mainExecutableDir);
1798 for (int i=0; gLinkContext.rootPaths[i] != NULL; ++i) {
1799 if ( gLinkContext.rootPaths[i][0] != '/' ) {
1800 dyld::warn("DYLD_ROOT_PATH not used because it contains a non-absolute path\n");
1801 gLinkContext.rootPaths = NULL;
1802 break;
1803 }
1804 }
1805 }
1806 }
1807 #endif
1808 else if ( strcmp(key, "DYLD_IMAGE_SUFFIX") == 0 ) {
1809 gLinkContext.imageSuffix = value;
1810 }
1811 else if ( strcmp(key, "DYLD_INSERT_LIBRARIES") == 0 ) {
1812 sEnv.DYLD_INSERT_LIBRARIES = parseColonList(value, NULL);
1813 #if SUPPORT_ACCELERATE_TABLES
1814 sDisableAcceleratorTables = true;
1815 #endif
1816 }
1817 else if ( strcmp(key, "DYLD_PRINT_OPTS") == 0 ) {
1818 sEnv.DYLD_PRINT_OPTS = true;
1819 }
1820 else if ( strcmp(key, "DYLD_PRINT_ENV") == 0 ) {
1821 sEnv.DYLD_PRINT_ENV = true;
1822 }
1823 else if ( strcmp(key, "DYLD_DISABLE_DOFS") == 0 ) {
1824 sEnv.DYLD_DISABLE_DOFS = true;
1825 }
1826 else if ( strcmp(key, "DYLD_DISABLE_PREFETCH") == 0 ) {
1827 gLinkContext.preFetchDisabled = true;
1828 }
1829 else if ( strcmp(key, "DYLD_PRINT_LIBRARIES") == 0 ) {
1830 gLinkContext.verboseLoading = true;
1831 }
1832 else if ( strcmp(key, "DYLD_PRINT_LIBRARIES_POST_LAUNCH") == 0 ) {
1833 sEnv.DYLD_PRINT_LIBRARIES_POST_LAUNCH = true;
1834 }
1835 else if ( strcmp(key, "DYLD_BIND_AT_LAUNCH") == 0 ) {
1836 sEnv.DYLD_BIND_AT_LAUNCH = true;
1837 }
1838 else if ( strcmp(key, "DYLD_FORCE_FLAT_NAMESPACE") == 0 ) {
1839 gLinkContext.bindFlat = true;
1840 }
1841 else if ( strcmp(key, "DYLD_NEW_LOCAL_SHARED_REGIONS") == 0 ) {
1842 // ignore, no longer relevant but some scripts still set it
1843 }
1844 else if ( strcmp(key, "DYLD_NO_FIX_PREBINDING") == 0 ) {
1845 }
1846 else if ( strcmp(key, "DYLD_PREBIND_DEBUG") == 0 ) {
1847 gLinkContext.verbosePrebinding = true;
1848 }
1849 else if ( strcmp(key, "DYLD_PRINT_INITIALIZERS") == 0 ) {
1850 gLinkContext.verboseInit = true;
1851 }
1852 else if ( strcmp(key, "DYLD_PRINT_DOFS") == 0 ) {
1853 gLinkContext.verboseDOF = true;
1854 }
1855 else if ( strcmp(key, "DYLD_PRINT_STATISTICS") == 0 ) {
1856 sEnv.DYLD_PRINT_STATISTICS = true;
1857 #if __IPHONE_OS_VERSION_MIN_REQUIRED && !TARGET_IPHONE_SIMULATOR
1858 // <rdar://problem/26614838> DYLD_PRINT_STATISTICS no longer logs to xcode console for device apps
1859 sForceStderr = true;
1860 #endif
1861 }
1862 else if ( strcmp(key, "DYLD_PRINT_TO_STDERR") == 0 ) {
1863 #if __IPHONE_OS_VERSION_MIN_REQUIRED && !TARGET_IPHONE_SIMULATOR
1864 // <rdar://problem/26633440> DYLD_PRINT_STATISTICS no longer logs to xcode console for device apps
1865 sForceStderr = true;
1866 #endif
1867 }
1868 else if ( strcmp(key, "DYLD_PRINT_STATISTICS_DETAILS") == 0 ) {
1869 sEnv.DYLD_PRINT_STATISTICS_DETAILS = true;
1870 }
1871 else if ( strcmp(key, "DYLD_PRINT_SEGMENTS") == 0 ) {
1872 gLinkContext.verboseMapping = true;
1873 }
1874 else if ( strcmp(key, "DYLD_PRINT_BINDINGS") == 0 ) {
1875 gLinkContext.verboseBind = true;
1876 }
1877 else if ( strcmp(key, "DYLD_PRINT_WEAK_BINDINGS") == 0 ) {
1878 gLinkContext.verboseWeakBind = true;
1879 }
1880 else if ( strcmp(key, "DYLD_PRINT_REBASINGS") == 0 ) {
1881 gLinkContext.verboseRebase = true;
1882 }
1883 else if ( strcmp(key, "DYLD_PRINT_APIS") == 0 ) {
1884 gLogAPIs = true;
1885 }
1886 #if SUPPORT_ACCELERATE_TABLES
1887 else if ( strcmp(key, "DYLD_PRINT_APIS_APP") == 0 ) {
1888 gLogAppAPIs = true;
1889 }
1890 #endif
1891 else if ( strcmp(key, "DYLD_PRINT_WARNINGS") == 0 ) {
1892 gLinkContext.verboseWarnings = true;
1893 }
1894 else if ( strcmp(key, "DYLD_PRINT_RPATHS") == 0 ) {
1895 gLinkContext.verboseRPaths = true;
1896 }
1897 else if ( strcmp(key, "DYLD_PRINT_CS_NOTIFICATIONS") == 0 ) {
1898 sEnv.DYLD_PRINT_CS_NOTIFICATIONS = true;
1899 }
1900 else if ( strcmp(key, "DYLD_PRINT_INTERPOSING") == 0 ) {
1901 gLinkContext.verboseInterposing = true;
1902 }
1903 else if ( strcmp(key, "DYLD_PRINT_CODE_SIGNATURES") == 0 ) {
1904 gLinkContext.verboseCodeSignatures = true;
1905 }
1906 else if ( strcmp(key, "DYLD_SHARED_REGION") == 0 ) {
1907 if ( strcmp(value, "private") == 0 ) {
1908 gLinkContext.sharedRegionMode = ImageLoader::kUsePrivateSharedRegion;
1909 }
1910 else if ( strcmp(value, "avoid") == 0 ) {
1911 gLinkContext.sharedRegionMode = ImageLoader::kDontUseSharedRegion;
1912 }
1913 else if ( strcmp(value, "use") == 0 ) {
1914 gLinkContext.sharedRegionMode = ImageLoader::kUseSharedRegion;
1915 }
1916 else if ( value[0] == '\0' ) {
1917 gLinkContext.sharedRegionMode = ImageLoader::kUseSharedRegion;
1918 }
1919 else {
1920 dyld::warn("unknown option to DYLD_SHARED_REGION. Valid options are: use, private, avoid\n");
1921 }
1922 }
1923 #if DYLD_SHARED_CACHE_SUPPORT
1924 else if ( strcmp(key, "DYLD_SHARED_CACHE_DIR") == 0 ) {
1925 sSharedCacheDir = value;
1926 }
1927 else if ( strcmp(key, "DYLD_SHARED_CACHE_DONT_VALIDATE") == 0 ) {
1928 sSharedCacheIgnoreInodeAndTimeStamp = true;
1929 }
1930 #endif
1931 else if ( strcmp(key, "DYLD_IGNORE_PREBINDING") == 0 ) {
1932 if ( strcmp(value, "all") == 0 ) {
1933 gLinkContext.prebindUsage = ImageLoader::kUseNoPrebinding;
1934 }
1935 else if ( strcmp(value, "app") == 0 ) {
1936 gLinkContext.prebindUsage = ImageLoader::kUseAllButAppPredbinding;
1937 }
1938 else if ( strcmp(value, "nonsplit") == 0 ) {
1939 gLinkContext.prebindUsage = ImageLoader::kUseSplitSegPrebinding;
1940 }
1941 else if ( value[0] == '\0' ) {
1942 gLinkContext.prebindUsage = ImageLoader::kUseSplitSegPrebinding;
1943 }
1944 else {
1945 dyld::warn("unknown option to DYLD_IGNORE_PREBINDING. Valid options are: all, app, nonsplit\n");
1946 }
1947 }
1948 #if SUPPORT_VERSIONED_PATHS
1949 else if ( strcmp(key, "DYLD_VERSIONED_LIBRARY_PATH") == 0 ) {
1950 appendParsedColonList(value, mainExecutableDir, &sEnv.DYLD_VERSIONED_LIBRARY_PATH);
1951 #if SUPPORT_ACCELERATE_TABLES
1952 sDisableAcceleratorTables = true;
1953 #endif
1954 }
1955 else if ( strcmp(key, "DYLD_VERSIONED_FRAMEWORK_PATH") == 0 ) {
1956 appendParsedColonList(value, mainExecutableDir, &sEnv.DYLD_VERSIONED_FRAMEWORK_PATH);
1957 #if SUPPORT_ACCELERATE_TABLES
1958 sDisableAcceleratorTables = true;
1959 #endif
1960 }
1961 #endif
1962 #if !TARGET_IPHONE_SIMULATOR
1963 else if ( (strcmp(key, "DYLD_PRINT_TO_FILE") == 0) && (mainExecutableDir == NULL) ) {
1964 int fd = open(value, O_WRONLY | O_CREAT | O_APPEND, 0644);
1965 if ( fd != -1 ) {
1966 sLogfile = fd;
1967 sLogToFile = true;
1968 }
1969 else {
1970 dyld::log("dyld: could not open DYLD_PRINT_TO_FILE='%s', errno=%d\n", value, errno);
1971 }
1972 }
1973 #endif
1974 else {
1975 dyld::warn("unknown environment variable: %s\n", key);
1976 }
1977 }
1978
1979
1980 #if SUPPORT_LC_DYLD_ENVIRONMENT
1981 static void checkLoadCommandEnvironmentVariables()
1982 {
1983 // <rdar://problem/8440934> Support augmenting dyld environment variables in load commands
1984 const uint32_t cmd_count = sMainExecutableMachHeader->ncmds;
1985 const struct load_command* const cmds = (struct load_command*)(((char*)sMainExecutableMachHeader)+sizeof(macho_header));
1986 const struct load_command* cmd = cmds;
1987 for (uint32_t i = 0; i < cmd_count; ++i) {
1988 switch (cmd->cmd) {
1989 case LC_DYLD_ENVIRONMENT:
1990 {
1991 const struct dylinker_command* envcmd = (struct dylinker_command*)cmd;
1992 const char* keyEqualsValue = (char*)envcmd + envcmd->name.offset;
1993 char mainExecutableDir[strlen(sExecPath)+2];
1994 strcpy(mainExecutableDir, sExecPath);
1995 char* lastSlash = strrchr(mainExecutableDir, '/');
1996 if ( lastSlash != NULL)
1997 lastSlash[1] = '\0';
1998 // only process variables that start with DYLD_ and end in _PATH
1999 if ( (strncmp(keyEqualsValue, "DYLD_", 5) == 0) ) {
2000 const char* equals = strchr(keyEqualsValue, '=');
2001 if ( equals != NULL ) {
2002 if ( strncmp(&equals[-5], "_PATH", 5) == 0 ) {
2003 const char* value = &equals[1];
2004 const size_t keyLen = equals-keyEqualsValue;
2005 // <rdar://problem/22799635> don't let malformed load command overflow stack
2006 if ( keyLen < 40 ) {
2007 char key[keyLen+1];
2008 strncpy(key, keyEqualsValue, keyLen);
2009 key[keyLen] = '\0';
2010 //dyld::log("processing: %s\n", keyEqualsValue);
2011 //dyld::log("mainExecutableDir: %s\n", mainExecutableDir);
2012 processDyldEnvironmentVariable(key, value, mainExecutableDir);
2013 }
2014 }
2015 }
2016 }
2017 }
2018 break;
2019 }
2020 cmd = (const struct load_command*)(((char*)cmd)+cmd->cmdsize);
2021 }
2022 }
2023 #endif // SUPPORT_LC_DYLD_ENVIRONMENT
2024
2025
2026 static bool hasCodeSignatureLoadCommand(const macho_header* mh)
2027 {
2028 const uint32_t cmd_count = mh->ncmds;
2029 const struct load_command* const cmds = (struct load_command*)(((char*)mh)+sizeof(macho_header));
2030 const struct load_command* cmd = cmds;
2031 for (uint32_t i = 0; i < cmd_count; ++i) {
2032 if (cmd->cmd == LC_CODE_SIGNATURE)
2033 return true;
2034 cmd = (const struct load_command*)(((char*)cmd)+cmd->cmdsize);
2035 }
2036 return false;
2037 }
2038
2039
2040 #if SUPPORT_VERSIONED_PATHS
2041 static void checkVersionedPaths()
2042 {
2043 // search DYLD_VERSIONED_LIBRARY_PATH directories for dylibs and check if they are newer
2044 if ( sEnv.DYLD_VERSIONED_LIBRARY_PATH != NULL ) {
2045 for(const char* const* lp = sEnv.DYLD_VERSIONED_LIBRARY_PATH; *lp != NULL; ++lp) {
2046 checkDylibOverridesInDir(*lp);
2047 }
2048 }
2049
2050 // search DYLD_VERSIONED_FRAMEWORK_PATH directories for dylibs and check if they are newer
2051 if ( sEnv.DYLD_VERSIONED_FRAMEWORK_PATH != NULL ) {
2052 for(const char* const* fp = sEnv.DYLD_VERSIONED_FRAMEWORK_PATH; *fp != NULL; ++fp) {
2053 checkFrameworkOverridesInDir(*fp);
2054 }
2055 }
2056 }
2057 #endif
2058
2059
2060 #if __MAC_OS_X_VERSION_MIN_REQUIRED
2061 //
2062 // For security, setuid programs ignore DYLD_* environment variables.
2063 // Additionally, the DYLD_* enviroment variables are removed
2064 // from the environment, so that any child processes don't see them.
2065 //
2066 static void pruneEnvironmentVariables(const char* envp[], const char*** applep)
2067 {
2068 #if SUPPORT_LC_DYLD_ENVIRONMENT
2069 checkLoadCommandEnvironmentVariables();
2070 #endif
2071
2072 // delete all DYLD_* and LD_LIBRARY_PATH environment variables
2073 int removedCount = 0;
2074 const char** d = envp;
2075 for(const char** s = envp; *s != NULL; s++) {
2076 if ( (strncmp(*s, "DYLD_", 5) != 0) && (strncmp(*s, "LD_LIBRARY_PATH=", 16) != 0) ) {
2077 *d++ = *s;
2078 }
2079 else {
2080 ++removedCount;
2081 }
2082 }
2083 *d++ = NULL;
2084 // slide apple parameters
2085 if ( removedCount > 0 ) {
2086 *applep = d;
2087 do {
2088 *d = d[removedCount];
2089 } while ( *d++ != NULL );
2090 for(int i=0; i < removedCount; ++i)
2091 *d++ = NULL;
2092 }
2093
2094 // disable framework and library fallback paths for setuid binaries rdar://problem/4589305
2095 sEnv.DYLD_FALLBACK_FRAMEWORK_PATH = NULL;
2096 sEnv.DYLD_FALLBACK_LIBRARY_PATH = NULL;
2097
2098 if ( removedCount > 0 )
2099 strlcat(sLoadingCrashMessage, ", ignoring DYLD_* env vars", sizeof(sLoadingCrashMessage));
2100 }
2101 #endif
2102
2103 static void defaultUninitializedFallbackPaths(const char* envp[])
2104 {
2105 #if __MAC_OS_X_VERSION_MIN_REQUIRED
2106 if ( gLinkContext.processIsRestricted ) {
2107 sEnv.DYLD_FALLBACK_FRAMEWORK_PATH = sRestrictedFrameworkFallbackPaths;
2108 sEnv.DYLD_FALLBACK_LIBRARY_PATH = sRestrictedLibraryFallbackPaths;
2109 return;
2110 }
2111
2112 // default value for DYLD_FALLBACK_FRAMEWORK_PATH, if not set in environment
2113 const char* home = _simple_getenv(envp, "HOME");;
2114 if ( sEnv.DYLD_FALLBACK_FRAMEWORK_PATH == NULL ) {
2115 const char** fpaths = sFrameworkFallbackPaths;
2116 if ( home == NULL )
2117 removePathWithPrefix(fpaths, "$HOME");
2118 else
2119 paths_expand_roots(fpaths, "$HOME", home);
2120 sEnv.DYLD_FALLBACK_FRAMEWORK_PATH = fpaths;
2121 }
2122
2123 // default value for DYLD_FALLBACK_LIBRARY_PATH, if not set in environment
2124 if ( sEnv.DYLD_FALLBACK_LIBRARY_PATH == NULL ) {
2125 const char** lpaths = sLibraryFallbackPaths;
2126 if ( home == NULL )
2127 removePathWithPrefix(lpaths, "$HOME");
2128 else
2129 paths_expand_roots(lpaths, "$HOME", home);
2130 sEnv.DYLD_FALLBACK_LIBRARY_PATH = lpaths;
2131 }
2132 #else
2133 if ( sEnv.DYLD_FALLBACK_FRAMEWORK_PATH == NULL )
2134 sEnv.DYLD_FALLBACK_FRAMEWORK_PATH = sFrameworkFallbackPaths;
2135
2136 if ( sEnv.DYLD_FALLBACK_LIBRARY_PATH == NULL )
2137 sEnv.DYLD_FALLBACK_LIBRARY_PATH = sLibraryFallbackPaths;
2138 #endif
2139 }
2140
2141
2142 static void checkEnvironmentVariables(const char* envp[])
2143 {
2144 if ( sEnvMode == envNone )
2145 return;
2146 const char** p;
2147 for(p = envp; *p != NULL; p++) {
2148 const char* keyEqualsValue = *p;
2149 if ( strncmp(keyEqualsValue, "DYLD_", 5) == 0 ) {
2150 const char* equals = strchr(keyEqualsValue, '=');
2151 if ( equals != NULL ) {
2152 strlcat(sLoadingCrashMessage, "\n", sizeof(sLoadingCrashMessage));
2153 strlcat(sLoadingCrashMessage, keyEqualsValue, sizeof(sLoadingCrashMessage));
2154 const char* value = &equals[1];
2155 const size_t keyLen = equals-keyEqualsValue;
2156 char key[keyLen+1];
2157 strncpy(key, keyEqualsValue, keyLen);
2158 key[keyLen] = '\0';
2159 if ( (sEnvMode == envPrintOnly) && (strncmp(key, "DYLD_PRINT_", 11) != 0) )
2160 continue;
2161 processDyldEnvironmentVariable(key, value, NULL);
2162 }
2163 }
2164 else if ( strncmp(keyEqualsValue, "LD_LIBRARY_PATH=", 16) == 0 ) {
2165 const char* path = &keyEqualsValue[16];
2166 sEnv.LD_LIBRARY_PATH = parseColonList(path, NULL);
2167 }
2168 }
2169
2170 #if SUPPORT_LC_DYLD_ENVIRONMENT
2171 checkLoadCommandEnvironmentVariables();
2172 #endif // SUPPORT_LC_DYLD_ENVIRONMENT
2173
2174 #if SUPPORT_ROOT_PATH
2175 // <rdar://problem/11281064> DYLD_IMAGE_SUFFIX and DYLD_ROOT_PATH cannot be used together
2176 if ( (gLinkContext.imageSuffix != NULL) && (gLinkContext.rootPaths != NULL) ) {
2177 dyld::warn("Ignoring DYLD_IMAGE_SUFFIX because DYLD_ROOT_PATH is used.\n");
2178 gLinkContext.imageSuffix = NULL;
2179 }
2180 #endif
2181 }
2182
2183 #if __x86_64__ && DYLD_SHARED_CACHE_SUPPORT
2184 static bool isGCProgram(const macho_header* mh, uintptr_t slide)
2185 {
2186 const uint32_t cmd_count = mh->ncmds;
2187 const struct load_command* const cmds = (struct load_command*)(((char*)mh)+sizeof(macho_header));
2188 const struct load_command* cmd = cmds;
2189 for (uint32_t i = 0; i < cmd_count; ++i) {
2190 switch (cmd->cmd) {
2191 case LC_SEGMENT_COMMAND:
2192 {
2193 const struct macho_segment_command* seg = (struct macho_segment_command*)cmd;
2194 if (strcmp(seg->segname, "__DATA") == 0) {
2195 const struct macho_section* const sectionsStart = (struct macho_section*)((char*)seg + sizeof(struct macho_segment_command));
2196 const struct macho_section* const sectionsEnd = &sectionsStart[seg->nsects];
2197 for (const struct macho_section* sect=sectionsStart; sect < sectionsEnd; ++sect) {
2198 if (strncmp(sect->sectname, "__objc_imageinfo", 16) == 0) {
2199 const uint32_t* objcInfo = (uint32_t*)(sect->addr + slide);
2200 return (objcInfo[1] & 6); // 6 = (OBJC_IMAGE_SUPPORTS_GC | OBJC_IMAGE_REQUIRES_GC)
2201 }
2202 }
2203 }
2204 }
2205 break;
2206 }
2207 cmd = (const struct load_command*)(((char*)cmd)+cmd->cmdsize);
2208 }
2209 return false;
2210 }
2211 #endif
2212
2213 static void getHostInfo(const macho_header* mainExecutableMH, uintptr_t mainExecutableSlide)
2214 {
2215 #if CPU_SUBTYPES_SUPPORTED
2216 #if __ARM_ARCH_7K__
2217 sHostCPU = CPU_TYPE_ARM;
2218 sHostCPUsubtype = CPU_SUBTYPE_ARM_V7K;
2219 #elif __ARM_ARCH_7A__
2220 sHostCPU = CPU_TYPE_ARM;
2221 sHostCPUsubtype = CPU_SUBTYPE_ARM_V7;
2222 #elif __ARM_ARCH_6K__
2223 sHostCPU = CPU_TYPE_ARM;
2224 sHostCPUsubtype = CPU_SUBTYPE_ARM_V6;
2225 #elif __ARM_ARCH_7F__
2226 sHostCPU = CPU_TYPE_ARM;
2227 sHostCPUsubtype = CPU_SUBTYPE_ARM_V7F;
2228 #elif __ARM_ARCH_7S__
2229 sHostCPU = CPU_TYPE_ARM;
2230 sHostCPUsubtype = CPU_SUBTYPE_ARM_V7S;
2231 #else
2232 struct host_basic_info info;
2233 mach_msg_type_number_t count = HOST_BASIC_INFO_COUNT;
2234 mach_port_t hostPort = mach_host_self();
2235 kern_return_t result = host_info(hostPort, HOST_BASIC_INFO, (host_info_t)&info, &count);
2236 if ( result != KERN_SUCCESS )
2237 throw "host_info() failed";
2238 sHostCPU = info.cpu_type;
2239 sHostCPUsubtype = info.cpu_subtype;
2240 mach_port_deallocate(mach_task_self(), hostPort);
2241 #if __x86_64__
2242 #if DYLD_SHARED_CACHE_SUPPORT
2243 sHaswell = (sHostCPUsubtype == CPU_SUBTYPE_X86_64_H);
2244 // <rdar://problem/18528074> x86_64h: Fall back to the x86_64 slice if an app requires GC.
2245 if ( sHaswell ) {
2246 if ( isGCProgram(mainExecutableMH, mainExecutableSlide) ) {
2247 // When running a GC program on a haswell machine, don't use and 'h slices
2248 sHostCPUsubtype = CPU_SUBTYPE_X86_64_ALL;
2249 sHaswell = false;
2250 gLinkContext.sharedRegionMode = ImageLoader::kDontUseSharedRegion;
2251 }
2252 }
2253 #endif
2254 #endif
2255 #endif
2256 #endif
2257 }
2258
2259 static void checkSharedRegionDisable()
2260 {
2261 #if __MAC_OS_X_VERSION_MIN_REQUIRED
2262 // if main executable has segments that overlap the shared region,
2263 // then disable using the shared region
2264 if ( sMainExecutable->overlapsWithAddressRange((void*)(uintptr_t)SHARED_REGION_BASE, (void*)(uintptr_t)(SHARED_REGION_BASE + SHARED_REGION_SIZE)) ) {
2265 gLinkContext.sharedRegionMode = ImageLoader::kDontUseSharedRegion;
2266 if ( gLinkContext.verboseMapping )
2267 dyld::warn("disabling shared region because main executable overlaps\n");
2268 }
2269 #if __i386__
2270 if ( gLinkContext.processIsRestricted ) {
2271 // <rdar://problem/15280847> use private or no shared region for suid processes
2272 gLinkContext.sharedRegionMode = ImageLoader::kUsePrivateSharedRegion;
2273 }
2274 #endif
2275 #endif
2276 // iPhoneOS cannot run without shared region
2277 }
2278
2279 bool validImage(const ImageLoader* possibleImage)
2280 {
2281 const size_t imageCount = sAllImages.size();
2282 for(size_t i=0; i < imageCount; ++i) {
2283 if ( possibleImage == sAllImages[i] ) {
2284 return true;
2285 }
2286 }
2287 return false;
2288 }
2289
2290 uint32_t getImageCount()
2291 {
2292 return (uint32_t)sAllImages.size();
2293 }
2294
2295 ImageLoader* getIndexedImage(unsigned int index)
2296 {
2297 if ( index < sAllImages.size() )
2298 return sAllImages[index];
2299 return NULL;
2300 }
2301
2302 ImageLoader* findImageByMachHeader(const struct mach_header* target)
2303 {
2304 return findMappedRange((uintptr_t)target);
2305 }
2306
2307
2308 ImageLoader* findImageContainingAddress(const void* addr)
2309 {
2310 #if SUPPORT_ACCELERATE_TABLES
2311 if ( sAllCacheImagesProxy != NULL ) {
2312 const mach_header* mh;
2313 const char* path;
2314 unsigned index;
2315 if ( sAllCacheImagesProxy->addressInCache(addr, &mh, &path, &index) )
2316 return sAllCacheImagesProxy;
2317 }
2318 #endif
2319 return findMappedRange((uintptr_t)addr);
2320 }
2321
2322
2323 ImageLoader* findImageContainingSymbol(const void* symbol)
2324 {
2325 for (std::vector<ImageLoader*>::iterator it=sAllImages.begin(); it != sAllImages.end(); it++) {
2326 ImageLoader* anImage = *it;
2327 if ( anImage->containsSymbol(symbol) )
2328 return anImage;
2329 }
2330 return NULL;
2331 }
2332
2333
2334
2335 void forEachImageDo( void (*callback)(ImageLoader*, void* userData), void* userData)
2336 {
2337 const size_t imageCount = sAllImages.size();
2338 for(size_t i=0; i < imageCount; ++i) {
2339 ImageLoader* anImage = sAllImages[i];
2340 (*callback)(anImage, userData);
2341 }
2342 }
2343
2344 ImageLoader* findLoadedImage(const struct stat& stat_buf)
2345 {
2346 const size_t imageCount = sAllImages.size();
2347 for(size_t i=0; i < imageCount; ++i){
2348 ImageLoader* anImage = sAllImages[i];
2349 if ( anImage->statMatch(stat_buf) )
2350 return anImage;
2351 }
2352 return NULL;
2353 }
2354
2355 // based on ANSI-C strstr()
2356 static const char* strrstr(const char* str, const char* sub)
2357 {
2358 const size_t sublen = strlen(sub);
2359 for(const char* p = &str[strlen(str)]; p != str; --p) {
2360 if ( strncmp(p, sub, sublen) == 0 )
2361 return p;
2362 }
2363 return NULL;
2364 }
2365
2366
2367 //
2368 // Find framework path
2369 //
2370 // /path/foo.framework/foo => foo.framework/foo
2371 // /path/foo.framework/Versions/A/foo => foo.framework/Versions/A/foo
2372 // /path/foo.framework/Frameworks/bar.framework/bar => bar.framework/bar
2373 // /path/foo.framework/Libraries/bar.dylb => NULL
2374 // /path/foo.framework/bar => NULL
2375 //
2376 // Returns NULL if not a framework path
2377 //
2378 static const char* getFrameworkPartialPath(const char* path)
2379 {
2380 const char* dirDot = strrstr(path, ".framework/");
2381 if ( dirDot != NULL ) {
2382 const char* dirStart = dirDot;
2383 for ( ; dirStart >= path; --dirStart) {
2384 if ( (*dirStart == '/') || (dirStart == path) ) {
2385 const char* frameworkStart = &dirStart[1];
2386 if ( dirStart == path )
2387 --frameworkStart;
2388 size_t len = dirDot - frameworkStart;
2389 char framework[len+1];
2390 strncpy(framework, frameworkStart, len);
2391 framework[len] = '\0';
2392 const char* leaf = strrchr(path, '/');
2393 if ( leaf != NULL ) {
2394 if ( strcmp(framework, &leaf[1]) == 0 ) {
2395 return frameworkStart;
2396 }
2397 if ( gLinkContext.imageSuffix != NULL ) {
2398 // some debug frameworks have install names that end in _debug
2399 if ( strncmp(framework, &leaf[1], len) == 0 ) {
2400 if ( strcmp( gLinkContext.imageSuffix, &leaf[len+1]) == 0 )
2401 return frameworkStart;
2402 }
2403 }
2404 }
2405 }
2406 }
2407 }
2408 return NULL;
2409 }
2410
2411
2412 static const char* getLibraryLeafName(const char* path)
2413 {
2414 const char* start = strrchr(path, '/');
2415 if ( start != NULL )
2416 return &start[1];
2417 else
2418 return path;
2419 }
2420
2421
2422 // only for architectures that use cpu-sub-types
2423 #if CPU_SUBTYPES_SUPPORTED
2424
2425 const cpu_subtype_t CPU_SUBTYPE_END_OF_LIST = -1;
2426
2427
2428 //
2429 // A fat file may contain multiple sub-images for the same CPU type.
2430 // In that case, dyld picks which sub-image to use by scanning a table
2431 // of preferred cpu-sub-types for the running cpu.
2432 //
2433 // There is one row in the table for each cpu-sub-type on which dyld might run.
2434 // The first entry in a row is that cpu-sub-type. It is followed by all
2435 // cpu-sub-types that can run on that cpu, if preferred order. Each row ends with
2436 // a "SUBTYPE_ALL" (to denote that images written to run on any cpu-sub-type are usable),
2437 // followed by one or more CPU_SUBTYPE_END_OF_LIST to pad out this row.
2438 //
2439
2440
2441 #if __arm__
2442 //
2443 // ARM sub-type lists
2444 //
2445 const int kARM_RowCount = 8;
2446 static const cpu_subtype_t kARM[kARM_RowCount][9] = {
2447
2448 // armv7f can run: v7f, v7, v6, v5, and v4
2449 { CPU_SUBTYPE_ARM_V7F, CPU_SUBTYPE_ARM_V7, CPU_SUBTYPE_ARM_V6, CPU_SUBTYPE_ARM_V5TEJ, CPU_SUBTYPE_ARM_V4T, CPU_SUBTYPE_ARM_ALL, CPU_SUBTYPE_END_OF_LIST },
2450
2451 // armv7k can run: v7k
2452 { CPU_SUBTYPE_ARM_V7K, CPU_SUBTYPE_END_OF_LIST },
2453
2454 // armv7s can run: v7s, v7, v7f, v7k, v6, v5, and v4
2455 { CPU_SUBTYPE_ARM_V7S, CPU_SUBTYPE_ARM_V7, CPU_SUBTYPE_ARM_V7F, CPU_SUBTYPE_ARM_V6, CPU_SUBTYPE_ARM_V5TEJ, CPU_SUBTYPE_ARM_V4T, CPU_SUBTYPE_ARM_ALL, CPU_SUBTYPE_END_OF_LIST },
2456
2457 // armv7 can run: v7, v6, v5, and v4
2458 { CPU_SUBTYPE_ARM_V7, CPU_SUBTYPE_ARM_V6, CPU_SUBTYPE_ARM_V5TEJ, CPU_SUBTYPE_ARM_V4T, CPU_SUBTYPE_ARM_ALL, CPU_SUBTYPE_END_OF_LIST },
2459
2460 // armv6 can run: v6, v5, and v4
2461 { CPU_SUBTYPE_ARM_V6, CPU_SUBTYPE_ARM_V5TEJ, CPU_SUBTYPE_ARM_V4T, CPU_SUBTYPE_ARM_ALL, CPU_SUBTYPE_END_OF_LIST, CPU_SUBTYPE_END_OF_LIST },
2462
2463 // xscale can run: xscale, v5, and v4
2464 { CPU_SUBTYPE_ARM_XSCALE, CPU_SUBTYPE_ARM_V5TEJ, CPU_SUBTYPE_ARM_V4T, CPU_SUBTYPE_ARM_ALL, CPU_SUBTYPE_END_OF_LIST, CPU_SUBTYPE_END_OF_LIST },
2465
2466 // armv5 can run: v5 and v4
2467 { CPU_SUBTYPE_ARM_V5TEJ, CPU_SUBTYPE_ARM_V4T, CPU_SUBTYPE_ARM_ALL, CPU_SUBTYPE_END_OF_LIST, CPU_SUBTYPE_END_OF_LIST, CPU_SUBTYPE_END_OF_LIST },
2468
2469 // armv4 can run: v4
2470 { CPU_SUBTYPE_ARM_V4T, CPU_SUBTYPE_ARM_ALL, CPU_SUBTYPE_END_OF_LIST, CPU_SUBTYPE_END_OF_LIST, CPU_SUBTYPE_END_OF_LIST, CPU_SUBTYPE_END_OF_LIST },
2471 };
2472 #endif
2473
2474 #if __x86_64__
2475 //
2476 // x86_64 sub-type lists
2477 //
2478 const int kX86_64_RowCount = 2;
2479 static const cpu_subtype_t kX86_64[kX86_64_RowCount][5] = {
2480
2481 // x86_64h can run: x86_64h, x86_64h(lib), x86_64(lib), and x86_64
2482 { CPU_SUBTYPE_X86_64_H, CPU_SUBTYPE_LIB64|CPU_SUBTYPE_X86_64_H, CPU_SUBTYPE_LIB64|CPU_SUBTYPE_X86_64_ALL, CPU_SUBTYPE_X86_64_ALL, CPU_SUBTYPE_END_OF_LIST },
2483
2484 // x86_64 can run: x86_64(lib) and x86_64
2485 { CPU_SUBTYPE_X86_64_ALL, CPU_SUBTYPE_LIB64|CPU_SUBTYPE_X86_64_ALL, CPU_SUBTYPE_END_OF_LIST },
2486
2487 };
2488 #endif
2489
2490
2491 // scan the tables above to find the cpu-sub-type-list for this machine
2492 static const cpu_subtype_t* findCPUSubtypeList(cpu_type_t cpu, cpu_subtype_t subtype)
2493 {
2494 switch (cpu) {
2495 #if __arm__
2496 case CPU_TYPE_ARM:
2497 for (int i=0; i < kARM_RowCount ; ++i) {
2498 if ( kARM[i][0] == subtype )
2499 return kARM[i];
2500 }
2501 break;
2502 #endif
2503 #if __x86_64__
2504 case CPU_TYPE_X86_64:
2505 for (int i=0; i < kX86_64_RowCount ; ++i) {
2506 if ( kX86_64[i][0] == subtype )
2507 return kX86_64[i];
2508 }
2509 break;
2510 #endif
2511 }
2512 return NULL;
2513 }
2514
2515
2516
2517
2518 // scan fat table-of-contents for best most preferred subtype
2519 static bool fatFindBestFromOrderedList(cpu_type_t cpu, const cpu_subtype_t list[], const fat_header* fh, uint64_t* offset, uint64_t* len)
2520 {
2521 const fat_arch* const archs = (fat_arch*)(((char*)fh)+sizeof(fat_header));
2522 for (uint32_t subTypeIndex=0; list[subTypeIndex] != CPU_SUBTYPE_END_OF_LIST; ++subTypeIndex) {
2523 for(uint32_t fatIndex=0; fatIndex < OSSwapBigToHostInt32(fh->nfat_arch); ++fatIndex) {
2524 if ( ((cpu_type_t)OSSwapBigToHostInt32(archs[fatIndex].cputype) == cpu)
2525 && (list[subTypeIndex] == (cpu_subtype_t)OSSwapBigToHostInt32(archs[fatIndex].cpusubtype)) ) {
2526 *offset = OSSwapBigToHostInt32(archs[fatIndex].offset);
2527 *len = OSSwapBigToHostInt32(archs[fatIndex].size);
2528 return true;
2529 }
2530 }
2531 }
2532 return false;
2533 }
2534
2535 // scan fat table-of-contents for exact match of cpu and cpu-sub-type
2536 static bool fatFindExactMatch(cpu_type_t cpu, cpu_subtype_t subtype, const fat_header* fh, uint64_t* offset, uint64_t* len)
2537 {
2538 const fat_arch* archs = (fat_arch*)(((char*)fh)+sizeof(fat_header));
2539 for(uint32_t i=0; i < OSSwapBigToHostInt32(fh->nfat_arch); ++i) {
2540 if ( ((cpu_type_t)OSSwapBigToHostInt32(archs[i].cputype) == cpu)
2541 && ((cpu_subtype_t)OSSwapBigToHostInt32(archs[i].cpusubtype) == subtype) ) {
2542 *offset = OSSwapBigToHostInt32(archs[i].offset);
2543 *len = OSSwapBigToHostInt32(archs[i].size);
2544 return true;
2545 }
2546 }
2547 return false;
2548 }
2549
2550 // scan fat table-of-contents for image with matching cpu-type and runs-on-all-sub-types
2551 static bool fatFindRunsOnAllCPUs(cpu_type_t cpu, const fat_header* fh, uint64_t* offset, uint64_t* len)
2552 {
2553 const fat_arch* archs = (fat_arch*)(((char*)fh)+sizeof(fat_header));
2554 for(uint32_t i=0; i < OSSwapBigToHostInt32(fh->nfat_arch); ++i) {
2555 if ( (cpu_type_t)OSSwapBigToHostInt32(archs[i].cputype) == cpu) {
2556 switch (cpu) {
2557 #if __arm__
2558 case CPU_TYPE_ARM:
2559 if ( (cpu_subtype_t)OSSwapBigToHostInt32(archs[i].cpusubtype) == CPU_SUBTYPE_ARM_ALL ) {
2560 *offset = OSSwapBigToHostInt32(archs[i].offset);
2561 *len = OSSwapBigToHostInt32(archs[i].size);
2562 return true;
2563 }
2564 break;
2565 #endif
2566 #if __x86_64__
2567 case CPU_TYPE_X86_64:
2568 if ( (cpu_subtype_t)OSSwapBigToHostInt32(archs[i].cpusubtype) == CPU_SUBTYPE_X86_64_ALL ) {
2569 *offset = OSSwapBigToHostInt32(archs[i].offset);
2570 *len = OSSwapBigToHostInt32(archs[i].size);
2571 return true;
2572 }
2573 break;
2574 #endif
2575 }
2576 }
2577 }
2578 return false;
2579 }
2580
2581 #endif // CPU_SUBTYPES_SUPPORTED
2582
2583
2584 //
2585 // Validate the fat_header and fat_arch array:
2586 //
2587 // 1) arch count would not cause array to extend past 4096 byte read buffer
2588 // 2) no slice overlaps the fat_header and arch array
2589 // 3) arch list does not contain duplicate cputype/cpusubtype tuples
2590 // 4) arch list does not have two overlapping slices.
2591 //
2592 static bool fatValidate(const fat_header* fh)
2593 {
2594 if ( fh->magic != OSSwapBigToHostInt32(FAT_MAGIC) )
2595 return false;
2596
2597 // since only first 4096 bytes of file read, we can only handle up to 204 slices.
2598 const uint32_t sliceCount = OSSwapBigToHostInt32(fh->nfat_arch);
2599 if ( sliceCount > 204 )
2600 return false;
2601
2602 // compare all slices looking for conflicts
2603 const fat_arch* archs = (fat_arch*)(((char*)fh)+sizeof(fat_header));
2604 for (uint32_t i=0; i < sliceCount; ++i) {
2605 uint32_t i_offset = OSSwapBigToHostInt32(archs[i].offset);
2606 uint32_t i_size = OSSwapBigToHostInt32(archs[i].size);
2607 uint32_t i_cputype = OSSwapBigToHostInt32(archs[i].cputype);
2608 uint32_t i_cpusubtype = OSSwapBigToHostInt32(archs[i].cpusubtype);
2609 uint32_t i_end = i_offset + i_size;
2610 // slice cannot overlap with header
2611 if ( i_offset < 4096 )
2612 return false;
2613 // slice size cannot overflow
2614 if ( i_end < i_offset )
2615 return false;
2616 for (uint32_t j=i+1; j < sliceCount; ++j) {
2617 uint32_t j_offset = OSSwapBigToHostInt32(archs[j].offset);
2618 uint32_t j_size = OSSwapBigToHostInt32(archs[j].size);
2619 uint32_t j_cputype = OSSwapBigToHostInt32(archs[j].cputype);
2620 uint32_t j_cpusubtype = OSSwapBigToHostInt32(archs[j].cpusubtype);
2621 uint32_t j_end = j_offset + j_size;
2622 // duplicate slices types not allowed
2623 if ( (i_cputype == j_cputype) && (i_cpusubtype == j_cpusubtype) )
2624 return false;
2625 // slice size cannot overflow
2626 if ( j_end < j_offset )
2627 return false;
2628 // check for overlap of slices
2629 if ( i_offset <= j_offset ) {
2630 if ( j_offset < i_end )
2631 return false; // j overlaps end of i
2632 }
2633 else {
2634 // j overlaps end of i
2635 if ( i_offset < j_end )
2636 return false; // i overlaps end of j
2637 }
2638 }
2639 }
2640 return true;
2641 }
2642
2643 //
2644 // A fat file may contain multiple sub-images for the same cpu-type,
2645 // each optimized for a different cpu-sub-type (e.g G3 or G5).
2646 // This routine picks the optimal sub-image.
2647 //
2648 static bool fatFindBest(const fat_header* fh, uint64_t* offset, uint64_t* len)
2649 {
2650 if ( !fatValidate(fh) )
2651 return false;
2652
2653 #if CPU_SUBTYPES_SUPPORTED
2654 // assume all dylibs loaded must have same cpu type as main executable
2655 const cpu_type_t cpu = sMainExecutableMachHeader->cputype;
2656
2657 // We only know the subtype to use if the main executable cpu type matches the host
2658 if ( (cpu & CPU_TYPE_MASK) == sHostCPU ) {
2659 // get preference ordered list of subtypes
2660 const cpu_subtype_t* subTypePreferenceList = findCPUSubtypeList(cpu, sHostCPUsubtype);
2661
2662 // use ordered list to find best sub-image in fat file
2663 if ( subTypePreferenceList != NULL ) {
2664 if ( fatFindBestFromOrderedList(cpu, subTypePreferenceList, fh, offset, len) )
2665 return true;
2666 }
2667
2668 // if running cpu is not in list, try for an exact match
2669 if ( fatFindExactMatch(cpu, sHostCPUsubtype, fh, offset, len) )
2670 return true;
2671 }
2672
2673 // running on an uknown cpu, can only load generic code
2674 return fatFindRunsOnAllCPUs(cpu, fh, offset, len);
2675 #else
2676 // just find first slice with matching architecture
2677 const fat_arch* archs = (fat_arch*)(((char*)fh)+sizeof(fat_header));
2678 for(uint32_t i=0; i < OSSwapBigToHostInt32(fh->nfat_arch); ++i) {
2679 if ( (cpu_type_t)OSSwapBigToHostInt32(archs[i].cputype) == sMainExecutableMachHeader->cputype) {
2680 *offset = OSSwapBigToHostInt32(archs[i].offset);
2681 *len = OSSwapBigToHostInt32(archs[i].size);
2682 return true;
2683 }
2684 }
2685 return false;
2686 #endif
2687 }
2688
2689
2690
2691 //
2692 // This is used to validate if a non-fat (aka thin or raw) mach-o file can be used
2693 // on the current processor. //
2694 bool isCompatibleMachO(const uint8_t* firstPage, const char* path)
2695 {
2696 #if CPU_SUBTYPES_SUPPORTED
2697 // It is deemed compatible if any of the following are true:
2698 // 1) mach_header subtype is in list of compatible subtypes for running processor
2699 // 2) mach_header subtype is same as running processor subtype
2700 // 3) mach_header subtype runs on all processor variants
2701 const mach_header* mh = (mach_header*)firstPage;
2702 if ( mh->magic == sMainExecutableMachHeader->magic ) {
2703 if ( mh->cputype == sMainExecutableMachHeader->cputype ) {
2704 if ( (mh->cputype & CPU_TYPE_MASK) == sHostCPU ) {
2705 // get preference ordered list of subtypes that this machine can use
2706 const cpu_subtype_t* subTypePreferenceList = findCPUSubtypeList(mh->cputype, sHostCPUsubtype);
2707 if ( subTypePreferenceList != NULL ) {
2708 // if image's subtype is in the list, it is compatible
2709 for (const cpu_subtype_t* p = subTypePreferenceList; *p != CPU_SUBTYPE_END_OF_LIST; ++p) {
2710 if ( *p == mh->cpusubtype )
2711 return true;
2712 }
2713 // have list and not in list, so not compatible
2714 throwf("incompatible cpu-subtype: 0x%08X in %s", mh->cpusubtype, path);
2715 }
2716 // unknown cpu sub-type, but if exact match for current subtype then ok to use
2717 if ( mh->cpusubtype == sHostCPUsubtype )
2718 return true;
2719 }
2720
2721 // cpu type has no ordered list of subtypes
2722 switch (mh->cputype) {
2723 case CPU_TYPE_I386:
2724 case CPU_TYPE_X86_64:
2725 // subtypes are not used or these architectures
2726 return true;
2727 }
2728 }
2729 }
2730 #else
2731 // For architectures that don't support cpu-sub-types
2732 // this just check the cpu type.
2733 const mach_header* mh = (mach_header*)firstPage;
2734 if ( mh->magic == sMainExecutableMachHeader->magic ) {
2735 if ( mh->cputype == sMainExecutableMachHeader->cputype ) {
2736 return true;
2737 }
2738 }
2739 #endif
2740 return false;
2741 }
2742
2743
2744
2745
2746 // The kernel maps in main executable before dyld gets control. We need to
2747 // make an ImageLoader* for the already mapped in main executable.
2748 static ImageLoaderMachO* instantiateFromLoadedImage(const macho_header* mh, uintptr_t slide, const char* path)
2749 {
2750 // try mach-o loader
2751 if ( isCompatibleMachO((const uint8_t*)mh, path) ) {
2752 ImageLoader* image = ImageLoaderMachO::instantiateMainExecutable(mh, slide, path, gLinkContext);
2753 addImage(image);
2754 return (ImageLoaderMachO*)image;
2755 }
2756
2757 throw "main executable not a known format";
2758 }
2759
2760 #if DYLD_SHARED_CACHE_SUPPORT
2761
2762 #if __IPHONE_OS_VERSION_MIN_REQUIRED
2763 static bool dylibsCanOverrideCache()
2764 {
2765 uint32_t devFlags = *((uint32_t*)_COMM_PAGE_DEV_FIRM);
2766 if ( (devFlags & 1) == 0 )
2767 return false;
2768 return ( (sSharedCache != NULL) && (sSharedCache->cacheType == kDyldSharedCacheTypeDevelopment) );
2769 }
2770 #endif
2771
2772 static bool findInSharedCacheImage(const char* path, bool searchByPath, const struct stat* stat_buf, const macho_header** mh, const char** pathInCache, long* slide)
2773 {
2774 if ( sSharedCache != NULL ) {
2775 #if __MAC_OS_X_VERSION_MIN_REQUIRED
2776 // Mac OS X always requires inode/mtime to valid cache
2777 // if stat() not done yet, do it now
2778 struct stat statb;
2779 if ( stat_buf == NULL ) {
2780 if ( my_stat(path, &statb) == -1 )
2781 return false;
2782 stat_buf = &statb;
2783 }
2784 #endif
2785 #if __IPHONE_OS_VERSION_MIN_REQUIRED
2786 uint64_t hash = 0;
2787 for (const char* s=path; *s != '\0'; ++s)
2788 hash += hash*4 + *s;
2789 #endif
2790
2791 // walk shared cache to see if there is a cached image that matches the inode/mtime/path desired
2792 const dyld_cache_image_info* const start = (dyld_cache_image_info*)((uint8_t*)sSharedCache + sSharedCache->imagesOffset);
2793 const dyld_cache_image_info* const end = &start[sSharedCache->imagesCount];
2794 #if __IPHONE_OS_VERSION_MIN_REQUIRED
2795 const bool cacheHasHashInfo = (start->modTime == 0);
2796 #endif
2797 for( const dyld_cache_image_info* p = start; p != end; ++p) {
2798 #if __IPHONE_OS_VERSION_MIN_REQUIRED
2799 // just check path
2800 const char* aPath = (char*)sSharedCache + p->pathFileOffset;
2801 if ( cacheHasHashInfo && (p->inode != hash) )
2802 continue;
2803 if ( strcmp(path, aPath) == 0 ) {
2804 // found image in cache
2805 *mh = (macho_header*)(p->address+sSharedCacheSlide);
2806 *pathInCache = aPath;
2807 *slide = sSharedCacheSlide;
2808 if ( aPath < (char*)(*mh) ) {
2809 // <rdar://problem/22056997> found alias, rescan list to get canonical name
2810 for (const dyld_cache_image_info* p2 = start; p2 != end; ++p2) {
2811 if ( p2->address == p->address ) {
2812 *pathInCache = (char*)sSharedCache + p2->pathFileOffset;
2813 break;
2814 }
2815 }
2816 }
2817 return true;
2818 }
2819 #elif __MAC_OS_X_VERSION_MIN_REQUIRED
2820 // check mtime and inode first because it is fast
2821 bool inodeMatch = ( ((time_t)p->modTime == stat_buf->st_mtime) && ((ino_t)p->inode == stat_buf->st_ino) );
2822 if ( searchByPath || sSharedCacheIgnoreInodeAndTimeStamp || inodeMatch ) {
2823 // mod-time and inode match an image in the shared cache, now check path
2824 const char* aPath = (char*)sSharedCache + p->pathFileOffset;
2825 bool cacheHit = (strcmp(path, aPath) == 0);
2826 if ( inodeMatch && !cacheHit ) {
2827 // path does not match install name of dylib in cache, but inode and mtime does match
2828 // perhaps path is a symlink to the cached dylib
2829 struct stat pathInCacheStatBuf;
2830 if ( my_stat(aPath, &pathInCacheStatBuf) != -1 )
2831 cacheHit = ( (pathInCacheStatBuf.st_dev == stat_buf->st_dev) && (pathInCacheStatBuf.st_ino == stat_buf->st_ino) );
2832 }
2833 if ( cacheHit ) {
2834 // found image in cache, return info
2835 *mh = (macho_header*)(p->address+sSharedCacheSlide);
2836 //dyld::log("findInSharedCacheImage(), mh=%p, p->address=0x%0llX, slid=0x%0lX, path=%s\n",
2837 // *mh, p->address, sSharedCacheSlide, aPath);
2838 *pathInCache = aPath;
2839 *slide = sSharedCacheSlide;
2840 return true;
2841 }
2842 }
2843 #endif
2844 }
2845 }
2846 return false;
2847 }
2848
2849 bool inSharedCache(const char* path)
2850 {
2851 const macho_header* mhInCache;
2852 const char* pathInCache;
2853 long slide;
2854 return findInSharedCacheImage(path, true, NULL, &mhInCache, &pathInCache, &slide);
2855 }
2856
2857 #endif
2858
2859 static ImageLoader* checkandAddImage(ImageLoader* image, const LoadContext& context)
2860 {
2861 // now sanity check that this loaded image does not have the same install path as any existing image
2862 const char* loadedImageInstallPath = image->getInstallPath();
2863 if ( image->isDylib() && (loadedImageInstallPath != NULL) && (loadedImageInstallPath[0] == '/') ) {
2864 for (std::vector<ImageLoader*>::iterator it=sAllImages.begin(); it != sAllImages.end(); it++) {
2865 ImageLoader* anImage = *it;
2866 const char* installPath = anImage->getInstallPath();
2867 if ( installPath != NULL) {
2868 if ( strcmp(loadedImageInstallPath, installPath) == 0 ) {
2869 //dyld::log("duplicate(%s) => %p\n", installPath, anImage);
2870 removeImage(image);
2871 ImageLoader::deleteImage(image);
2872 return anImage;
2873 }
2874 }
2875 }
2876 }
2877
2878 // some API's restrict what they can load
2879 if ( context.mustBeBundle && !image->isBundle() )
2880 throw "not a bundle";
2881 if ( context.mustBeDylib && !image->isDylib() )
2882 throw "not a dylib";
2883
2884 // regular main executables cannot be loaded
2885 if ( image->isExecutable() ) {
2886 if ( !context.canBePIE || !image->isPositionIndependentExecutable() )
2887 throw "can't load a main executable";
2888 }
2889
2890 // don't add bundles to global list, they can be loaded but not linked. When linked it will be added to list
2891 if ( ! image->isBundle() )
2892 addImage(image);
2893
2894 return image;
2895 }
2896
2897 #if TARGET_IPHONE_SIMULATOR
2898 static bool isSimulatorBinary(const uint8_t* firstPages, const char* path)
2899 {
2900 const macho_header* mh = (macho_header*)firstPages;
2901 const uint32_t cmd_count = mh->ncmds;
2902 const load_command* const cmds = (struct load_command*)(((char*)mh)+sizeof(macho_header));
2903 const load_command* const cmdsEnd = (load_command*)((char*)cmds + mh->sizeofcmds);
2904 const struct load_command* cmd = cmds;
2905 for (uint32_t i = 0; i < cmd_count; ++i) {
2906 switch (cmd->cmd) {
2907 #if TARGET_OS_WATCH
2908 case LC_VERSION_MIN_WATCHOS:
2909 return true;
2910 #elif TARGET_OS_TV
2911 case LC_VERSION_MIN_TVOS:
2912 return true;
2913 #elif TARGET_OS_IOS
2914 case LC_VERSION_MIN_IPHONEOS:
2915 return true;
2916 #endif
2917 case LC_VERSION_MIN_MACOSX:
2918 // grandfather in a few libSystem dylibs
2919 if ((strcmp(path, "/usr/lib/system/libsystem_kernel.dylib") == 0) ||
2920 (strcmp(path, "/usr/lib/system/libsystem_platform.dylib") == 0) ||
2921 (strcmp(path, "/usr/lib/system/libsystem_pthread.dylib") == 0))
2922 return true;
2923 return false;
2924 }
2925 cmd = (const struct load_command*)(((char*)cmd)+cmd->cmdsize);
2926 if ( cmd > cmdsEnd )
2927 return false;
2928 }
2929 return false;
2930 }
2931 #endif
2932
2933 // map in file and instantiate an ImageLoader
2934 static ImageLoader* loadPhase6(int fd, const struct stat& stat_buf, const char* path, const LoadContext& context)
2935 {
2936 //dyld::log("%s(%s)\n", __func__ , path);
2937 uint64_t fileOffset = 0;
2938 uint64_t fileLength = stat_buf.st_size;
2939
2940 // validate it is a file (not directory)
2941 if ( (stat_buf.st_mode & S_IFMT) != S_IFREG )
2942 throw "not a file";
2943
2944 uint8_t firstPages[MAX_MACH_O_HEADER_AND_LOAD_COMMANDS_SIZE];
2945 bool shortPage = false;
2946
2947 // min mach-o file is 4K
2948 if ( fileLength < 4096 ) {
2949 if ( pread(fd, firstPages, fileLength, 0) != (ssize_t)fileLength )
2950 throwf("pread of short file failed: %d", errno);
2951 shortPage = true;
2952 }
2953 else {
2954 // optimistically read only first 4KB
2955 if ( pread(fd, firstPages, 4096, 0) != 4096 )
2956 throwf("pread of first 4K failed: %d", errno);
2957 }
2958
2959 // if fat wrapper, find usable sub-file
2960 const fat_header* fileStartAsFat = (fat_header*)firstPages;
2961 if ( fileStartAsFat->magic == OSSwapBigToHostInt32(FAT_MAGIC) ) {
2962 if ( OSSwapBigToHostInt32(fileStartAsFat->nfat_arch) > ((4096 - sizeof(fat_header)) / sizeof(fat_arch)) )
2963 throwf("fat header too large: %u entries", OSSwapBigToHostInt32(fileStartAsFat->nfat_arch));
2964 if ( fatFindBest(fileStartAsFat, &fileOffset, &fileLength) ) {
2965 if ( (fileOffset+fileLength) > (uint64_t)(stat_buf.st_size) )
2966 throwf("truncated fat file. file length=%llu, but needed slice goes to %llu", stat_buf.st_size, fileOffset+fileLength);
2967 if (pread(fd, firstPages, 4096, fileOffset) != 4096)
2968 throwf("pread of fat file failed: %d", errno);
2969 }
2970 else {
2971 throw "no matching architecture in universal wrapper";
2972 }
2973 }
2974
2975 // try mach-o loader
2976 if ( shortPage )
2977 throw "file too short";
2978 if ( isCompatibleMachO(firstPages, path) ) {
2979
2980 // only MH_BUNDLE, MH_DYLIB, and some MH_EXECUTE can be dynamically loaded
2981 const mach_header* mh = (mach_header*)firstPages;
2982 switch ( mh->filetype ) {
2983 case MH_EXECUTE:
2984 case MH_DYLIB:
2985 case MH_BUNDLE:
2986 break;
2987 default:
2988 throw "mach-o, but wrong filetype";
2989 }
2990
2991 uint32_t headerAndLoadCommandsSize = sizeof(macho_header) + mh->sizeofcmds;
2992 if ( headerAndLoadCommandsSize > MAX_MACH_O_HEADER_AND_LOAD_COMMANDS_SIZE )
2993 throwf("malformed mach-o: load commands size (%u) > %u", headerAndLoadCommandsSize, MAX_MACH_O_HEADER_AND_LOAD_COMMANDS_SIZE);
2994
2995 if ( headerAndLoadCommandsSize > fileLength )
2996 dyld::throwf("malformed mach-o: load commands size (%u) > mach-o file size (%llu)", headerAndLoadCommandsSize, fileLength);
2997
2998 if ( headerAndLoadCommandsSize > 4096 ) {
2999 // read more pages
3000 unsigned readAmount = headerAndLoadCommandsSize - 4096;
3001 if ( pread(fd, &firstPages[4096], readAmount, fileOffset+4096) != readAmount )
3002 throwf("pread of extra load commands past 4KB failed: %d", errno);
3003 }
3004
3005 #if TARGET_IPHONE_SIMULATOR
3006 // <rdar://problem/14168872> dyld_sim should restrict loading osx binaries
3007 if ( !isSimulatorBinary(firstPages, path) ) {
3008 #if TARGET_OS_WATCH
3009 throw "mach-o, but not built for watchOS simulator";
3010 #elif TARGET_OS_TV
3011 throw "mach-o, but not built for tvOS simulator";
3012 #else
3013 throw "mach-o, but not built for iOS simulator";
3014 #endif
3015 }
3016 #endif
3017
3018 // instantiate an image
3019 ImageLoader* image = ImageLoaderMachO::instantiateFromFile(path, fd, firstPages, headerAndLoadCommandsSize, fileOffset, fileLength, stat_buf, gLinkContext);
3020
3021 // validate
3022 return checkandAddImage(image, context);
3023 }
3024
3025 // try other file formats here...
3026
3027
3028 // throw error about what was found
3029 switch (*(uint32_t*)firstPages) {
3030 case MH_MAGIC:
3031 case MH_CIGAM:
3032 case MH_MAGIC_64:
3033 case MH_CIGAM_64:
3034 throw "mach-o, but wrong architecture";
3035 default:
3036 throwf("unknown file type, first eight bytes: 0x%02X 0x%02X 0x%02X 0x%02X 0x%02X 0x%02X 0x%02X 0x%02X",
3037 firstPages[0], firstPages[1], firstPages[2], firstPages[3], firstPages[4], firstPages[5], firstPages[6],firstPages[7]);
3038 }
3039 }
3040
3041
3042 static ImageLoader* loadPhase5open(const char* path, const LoadContext& context, const struct stat& stat_buf, std::vector<const char*>* exceptions)
3043 {
3044 //dyld::log("%s(%s, %p)\n", __func__ , path, exceptions);
3045
3046 // open file (automagically closed when this function exits)
3047 FileOpener file(path);
3048
3049 // just return NULL if file not found, but record any other errors
3050 if ( file.getFileDescriptor() == -1 ) {
3051 int err = errno;
3052 if ( err != ENOENT ) {
3053 const char* newMsg;
3054 if ( (err == EPERM) && sandboxBlockedOpen(path) )
3055 newMsg = dyld::mkstringf("file system sandbox blocked open() of '%s'", path);
3056 else
3057 newMsg = dyld::mkstringf("%s: open() failed with errno=%d", path, err);
3058 exceptions->push_back(newMsg);
3059 }
3060 return NULL;
3061 }
3062
3063 try {
3064 return loadPhase6(file.getFileDescriptor(), stat_buf, path, context);
3065 }
3066 catch (const char* msg) {
3067 const char* newMsg = dyld::mkstringf("%s: %s", path, msg);
3068 exceptions->push_back(newMsg);
3069 free((void*)msg);
3070 return NULL;
3071 }
3072 }
3073
3074
3075 #if __MAC_OS_X_VERSION_MIN_REQUIRED
3076 static ImageLoader* loadPhase5load(const char* path, const char* orgPath, const LoadContext& context, unsigned& cacheIndex, std::vector<const char*>* exceptions)
3077 {
3078 //dyld::log("%s(%s, %p)\n", __func__ , path, exceptions);
3079 ImageLoader* image = NULL;
3080
3081 #if SUPPORT_ACCELERATE_TABLES
3082 if ( sAllCacheImagesProxy != NULL ) {
3083 unsigned index;
3084 if ( sAllCacheImagesProxy->hasDylib(path, &index) )
3085 return sAllCacheImagesProxy;
3086 }
3087 #endif
3088
3089 // just return NULL if file not found, but record any other errors
3090 struct stat stat_buf;
3091 if ( my_stat(path, &stat_buf) == -1 ) {
3092 int err = errno;
3093 if ( err != ENOENT ) {
3094 if ( (err == EPERM) && sandboxBlockedStat(path) )
3095 exceptions->push_back(dyld::mkstringf("%s: file system sandbox blocked stat()", path));
3096 else
3097 exceptions->push_back(dyld::mkstringf("%s: stat() failed with errno=%d", path, err));
3098 }
3099 return NULL;
3100 }
3101
3102 // in case image was renamed or found via symlinks, check for inode match
3103 image = findLoadedImage(stat_buf);
3104 if ( image != NULL )
3105 return image;
3106
3107 // do nothing if not already loaded and if RTLD_NOLOAD or NSADDIMAGE_OPTION_RETURN_ONLY_IF_LOADED
3108 if ( context.dontLoad )
3109 return NULL;
3110
3111 #if DYLD_SHARED_CACHE_SUPPORT
3112 // see if this image is in shared cache
3113 const macho_header* mhInCache;
3114 const char* pathInCache;
3115 long slideInCache;
3116 if ( findInSharedCacheImage(path, false, &stat_buf, &mhInCache, &pathInCache, &slideInCache) ) {
3117 image = ImageLoaderMachO::instantiateFromCache(mhInCache, pathInCache, slideInCache, stat_buf, gLinkContext);
3118 return checkandAddImage(image, context);
3119 }
3120 #endif
3121 // file exists and is not in dyld shared cache, so open it
3122 return loadPhase5open(path, context, stat_buf, exceptions);
3123 }
3124 #endif // __MAC_OS_X_VERSION_MIN_REQUIRED
3125
3126
3127
3128 #if __IPHONE_OS_VERSION_MIN_REQUIRED
3129 static ImageLoader* loadPhase5stat(const char* path, const LoadContext& context, struct stat* stat_buf,
3130 int* statErrNo, bool* imageFound, std::vector<const char*>* exceptions)
3131 {
3132 ImageLoader* image = NULL;
3133 *imageFound = false;
3134 *statErrNo = 0;
3135 if ( my_stat(path, stat_buf) == 0 ) {
3136 // in case image was renamed or found via symlinks, check for inode match
3137 image = findLoadedImage(*stat_buf);
3138 if ( image != NULL ) {
3139 *imageFound = true;
3140 return image;
3141 }
3142 // do nothing if not already loaded and if RTLD_NOLOAD
3143 if ( context.dontLoad ) {
3144 *imageFound = true;
3145 return NULL;
3146 }
3147 image = loadPhase5open(path, context, *stat_buf, exceptions);
3148 if ( image != NULL ) {
3149 *imageFound = true;
3150 return image;
3151 }
3152 }
3153 else {
3154 *statErrNo = errno;
3155 }
3156 return NULL;
3157 }
3158
3159 // try to open file
3160 static ImageLoader* loadPhase5load(const char* path, const char* orgPath, const LoadContext& context, unsigned& cacheIndex, std::vector<const char*>* exceptions)
3161 {
3162 //dyld::log("%s(%s, %p)\n", __func__ , path, exceptions);
3163 struct stat stat_buf;
3164 bool imageFound;
3165 int statErrNo;
3166 ImageLoader* image;
3167 #if DYLD_SHARED_CACHE_SUPPORT
3168 #if SUPPORT_ACCELERATE_TABLES
3169 if ( sAllCacheImagesProxy != NULL ) {
3170 if ( sAllCacheImagesProxy->hasDylib(path, &cacheIndex) )
3171 return sAllCacheImagesProxy;
3172 }
3173 #endif
3174 if ( dylibsCanOverrideCache() ) {
3175 // flag is set that allows installed framework roots to override dyld shared cache
3176 image = loadPhase5stat(path, context, &stat_buf, &statErrNo, &imageFound, exceptions);
3177 if ( imageFound )
3178 return image;
3179 }
3180 // see if this image is in shared cache
3181 const macho_header* mhInCache;
3182 const char* pathInCache;
3183 long slideInCache;
3184 if ( findInSharedCacheImage(path, true, NULL, &mhInCache, &pathInCache, &slideInCache) ) {
3185 // see if this image in the cache was already loaded via a different path
3186 for (std::vector<ImageLoader*>::iterator it=sAllImages.begin(); it != sAllImages.end(); ++it) {
3187 ImageLoader* anImage = *it;
3188 if ( (const macho_header*)anImage->machHeader() == mhInCache )
3189 return anImage;
3190 }
3191 // do nothing if not already loaded and if RTLD_NOLOAD
3192 if ( context.dontLoad )
3193 return NULL;
3194 // nope, so instantiate a new image from dyld shared cache
3195 // <rdar://problem/7014995> zero out stat buffer so mtime, etc are zero for items from the shared cache
3196 bzero(&stat_buf, sizeof(stat_buf));
3197 image = ImageLoaderMachO::instantiateFromCache(mhInCache, pathInCache, slideInCache, stat_buf, gLinkContext);
3198 return checkandAddImage(image, context);
3199 }
3200
3201 if ( !dylibsCanOverrideCache() ) {
3202 // flag is not set, and not in cache to try opening it
3203 image = loadPhase5stat(path, context, &stat_buf, &statErrNo, &imageFound, exceptions);
3204 if ( imageFound )
3205 return image;
3206 }
3207 #else
3208 image = loadPhase5stat(path, context, &stat_buf, &statErrNo, &imageFound, exceptions);
3209 if ( imageFound )
3210 return image;
3211 #endif
3212 // just return NULL if file not found, but record any other errors
3213 if ( (statErrNo != ENOENT) && (statErrNo != 0) ) {
3214 if ( (statErrNo == EPERM) && sandboxBlockedStat(path) )
3215 exceptions->push_back(dyld::mkstringf("%s: file system sandbox blocked stat()", path));
3216 else
3217 exceptions->push_back(dyld::mkstringf("%s: stat() failed with errno=%d", path, statErrNo));
3218 }
3219 return NULL;
3220 }
3221 #endif // __IPHONE_OS_VERSION_MIN_REQUIRED
3222
3223
3224 // look for path match with existing loaded images
3225 static ImageLoader* loadPhase5check(const char* path, const char* orgPath, const LoadContext& context)
3226 {
3227 //dyld::log("%s(%s, %s)\n", __func__ , path, orgPath);
3228 // search path against load-path and install-path of all already loaded images
3229 uint32_t hash = ImageLoader::hash(path);
3230 //dyld::log("check() hash=%d, path=%s\n", hash, path);
3231 for (std::vector<ImageLoader*>::iterator it=sAllImages.begin(); it != sAllImages.end(); it++) {
3232 ImageLoader* anImage = *it;
3233 // check hash first to cut down on strcmp calls
3234 //dyld::log(" check() hash=%d, path=%s\n", anImage->getPathHash(), anImage->getPath());
3235 if ( anImage->getPathHash() == hash ) {
3236 if ( strcmp(path, anImage->getPath()) == 0 ) {
3237 // if we are looking for a dylib don't return something else
3238 if ( !context.mustBeDylib || anImage->isDylib() )
3239 return anImage;
3240 }
3241 }
3242 if ( context.matchByInstallName || anImage->matchInstallPath() ) {
3243 const char* installPath = anImage->getInstallPath();
3244 if ( installPath != NULL) {
3245 if ( strcmp(path, installPath) == 0 ) {
3246 // if we are looking for a dylib don't return something else
3247 if ( !context.mustBeDylib || anImage->isDylib() )
3248 return anImage;
3249 }
3250 }
3251 }
3252 // an install name starting with @rpath should match by install name, not just real path
3253 if ( (orgPath[0] == '@') && (strncmp(orgPath, "@rpath/", 7) == 0) ) {
3254 const char* installPath = anImage->getInstallPath();
3255 if ( installPath != NULL) {
3256 if ( !context.mustBeDylib || anImage->isDylib() ) {
3257 if ( strcmp(orgPath, installPath) == 0 )
3258 return anImage;
3259 }
3260 }
3261 }
3262 }
3263
3264 //dyld::log("%s(%s) => NULL\n", __func__, path);
3265 return NULL;
3266 }
3267
3268
3269 // open or check existing
3270 static ImageLoader* loadPhase5(const char* path, const char* orgPath, const LoadContext& context, unsigned& cacheIndex, std::vector<const char*>* exceptions)
3271 {
3272 //dyld::log("%s(%s, %p)\n", __func__ , path, exceptions);
3273
3274 // check for specific dylib overrides
3275 for (std::vector<DylibOverride>::iterator it = sDylibOverrides.begin(); it != sDylibOverrides.end(); ++it) {
3276 if ( strcmp(it->installName, path) == 0 ) {
3277 path = it->override;
3278 break;
3279 }
3280 }
3281
3282 if ( exceptions != NULL )
3283 return loadPhase5load(path, orgPath, context, cacheIndex, exceptions);
3284 else
3285 return loadPhase5check(path, orgPath, context);
3286 }
3287
3288 // try with and without image suffix
3289 static ImageLoader* loadPhase4(const char* path, const char* orgPath, const LoadContext& context, unsigned& cacheIndex, std::vector<const char*>* exceptions)
3290 {
3291 //dyld::log("%s(%s, %p)\n", __func__ , path, exceptions);
3292 ImageLoader* image = NULL;
3293 if ( gLinkContext.imageSuffix != NULL ) {
3294 char pathWithSuffix[strlen(path)+strlen( gLinkContext.imageSuffix)+2];
3295 ImageLoader::addSuffix(path, gLinkContext.imageSuffix, pathWithSuffix);
3296 image = loadPhase5(pathWithSuffix, orgPath, context, cacheIndex, exceptions);
3297 }
3298 if ( image == NULL )
3299 image = loadPhase5(path, orgPath, context, cacheIndex, exceptions);
3300 return image;
3301 }
3302
3303 static ImageLoader* loadPhase2(const char* path, const char* orgPath, const LoadContext& context,
3304 const char* const frameworkPaths[], const char* const libraryPaths[],
3305 unsigned& cacheIndex, std::vector<const char*>* exceptions); // forward reference
3306
3307
3308 // expand @ variables
3309 static ImageLoader* loadPhase3(const char* path, const char* orgPath, const LoadContext& context, unsigned& cacheIndex, std::vector<const char*>* exceptions)
3310 {
3311 //dyld::log("%s(%s, %p)\n", __func__ , path, exceptions);
3312 ImageLoader* image = NULL;
3313 if ( strncmp(path, "@executable_path/", 17) == 0 ) {
3314 #if __MAC_OS_X_VERSION_MIN_REQUIRED
3315 // executable_path cannot be in used in any binary in a setuid process rdar://problem/4589305
3316 if ( gLinkContext.processIsRestricted )
3317 throwf("unsafe use of @executable_path in %s with restricted binary", context.origin);
3318 #endif
3319 // handle @executable_path path prefix
3320 const char* executablePath = sExecPath;
3321 char newPath[strlen(executablePath) + strlen(path)];
3322 strcpy(newPath, executablePath);
3323 char* addPoint = strrchr(newPath,'/');
3324 if ( addPoint != NULL )
3325 strcpy(&addPoint[1], &path[17]);
3326 else
3327 strcpy(newPath, &path[17]);
3328 image = loadPhase4(newPath, orgPath, context, cacheIndex, exceptions);
3329 if ( image != NULL )
3330 return image;
3331
3332 // perhaps main executable path is a sym link, find realpath and retry
3333 char resolvedPath[PATH_MAX];
3334 if ( realpath(sExecPath, resolvedPath) != NULL ) {
3335 char newRealPath[strlen(resolvedPath) + strlen(path)];
3336 strcpy(newRealPath, resolvedPath);
3337 addPoint = strrchr(newRealPath,'/');
3338 if ( addPoint != NULL )
3339 strcpy(&addPoint[1], &path[17]);
3340 else
3341 strcpy(newRealPath, &path[17]);
3342 image = loadPhase4(newRealPath, orgPath, context, cacheIndex, exceptions);
3343 if ( image != NULL )
3344 return image;
3345 }
3346 }
3347 else if ( (strncmp(path, "@loader_path/", 13) == 0) && (context.origin != NULL) ) {
3348 #if __MAC_OS_X_VERSION_MIN_REQUIRED
3349 // @loader_path cannot be used from the main executable of a setuid process rdar://problem/4589305
3350 if ( gLinkContext.processIsRestricted && (strcmp(context.origin, sExecPath) == 0) )
3351 throwf("unsafe use of @loader_path in %s with restricted binary", context.origin);
3352 #endif
3353 // handle @loader_path path prefix
3354 char newPath[strlen(context.origin) + strlen(path)];
3355 strcpy(newPath, context.origin);
3356 char* addPoint = strrchr(newPath,'/');
3357 if ( addPoint != NULL )
3358 strcpy(&addPoint[1], &path[13]);
3359 else
3360 strcpy(newPath, &path[13]);
3361 image = loadPhase4(newPath, orgPath, context, cacheIndex, exceptions);
3362 if ( image != NULL )
3363 return image;
3364
3365 // perhaps loader path is a sym link, find realpath and retry
3366 char resolvedPath[PATH_MAX];
3367 if ( realpath(context.origin, resolvedPath) != NULL ) {
3368 char newRealPath[strlen(resolvedPath) + strlen(path)];
3369 strcpy(newRealPath, resolvedPath);
3370 addPoint = strrchr(newRealPath,'/');
3371 if ( addPoint != NULL )
3372 strcpy(&addPoint[1], &path[13]);
3373 else
3374 strcpy(newRealPath, &path[13]);
3375 image = loadPhase4(newRealPath, orgPath, context, cacheIndex, exceptions);
3376 if ( image != NULL )
3377 return image;
3378 }
3379 }
3380 else if ( context.implicitRPath || (strncmp(path, "@rpath/", 7) == 0) ) {
3381 const char* trailingPath = (strncmp(path, "@rpath/", 7) == 0) ? &path[7] : path;
3382 // substitute @rpath with all -rpath paths up the load chain
3383 for(const ImageLoader::RPathChain* rp=context.rpath; rp != NULL; rp=rp->next) {
3384 if (rp->paths != NULL ) {
3385 for(std::vector<const char*>::iterator it=rp->paths->begin(); it != rp->paths->end(); ++it) {
3386 const char* anRPath = *it;
3387 char newPath[strlen(anRPath) + strlen(trailingPath)+2];
3388 strcpy(newPath, anRPath);
3389 if ( newPath[strlen(newPath)-1] != '/' )
3390 strcat(newPath, "/");
3391 strcat(newPath, trailingPath);
3392 image = loadPhase4(newPath, orgPath, context, cacheIndex, exceptions);
3393 if ( gLinkContext.verboseRPaths && (exceptions != NULL) ) {
3394 if ( image != NULL )
3395 dyld::log("RPATH successful expansion of %s to: %s\n", orgPath, newPath);
3396 else
3397 dyld::log("RPATH failed to expanding %s to: %s\n", orgPath, newPath);
3398 }
3399 if ( image != NULL )
3400 return image;
3401 }
3402 }
3403 }
3404
3405 // substitute @rpath with LD_LIBRARY_PATH
3406 if ( sEnv.LD_LIBRARY_PATH != NULL ) {
3407 image = loadPhase2(trailingPath, orgPath, context, NULL, sEnv.LD_LIBRARY_PATH, cacheIndex, exceptions);
3408 if ( image != NULL )
3409 return image;
3410 }
3411
3412 // if this is the "open" pass, don't try to open @rpath/... as a relative path
3413 if ( (exceptions != NULL) && (trailingPath != path) )
3414 return NULL;
3415 }
3416 #if __MAC_OS_X_VERSION_MIN_REQUIRED
3417 else if ( gLinkContext.processIsRestricted && (path[0] != '/' ) ) {
3418 throwf("unsafe use of relative rpath %s in %s with restricted binary", path, context.origin);
3419 }
3420 #endif
3421
3422 return loadPhase4(path, orgPath, context, cacheIndex, exceptions);
3423 }
3424
3425
3426 // try search paths
3427 static ImageLoader* loadPhase2(const char* path, const char* orgPath, const LoadContext& context,
3428 const char* const frameworkPaths[], const char* const libraryPaths[],
3429 unsigned& cacheIndex, std::vector<const char*>* exceptions)
3430 {
3431 //dyld::log("%s(%s, %p)\n", __func__ , path, exceptions);
3432 ImageLoader* image = NULL;
3433 const char* frameworkPartialPath = getFrameworkPartialPath(path);
3434 if ( frameworkPaths != NULL ) {
3435 if ( frameworkPartialPath != NULL ) {
3436 const size_t frameworkPartialPathLen = strlen(frameworkPartialPath);
3437 for(const char* const* fp = frameworkPaths; *fp != NULL; ++fp) {
3438 char npath[strlen(*fp)+frameworkPartialPathLen+8];
3439 strcpy(npath, *fp);
3440 strcat(npath, "/");
3441 strcat(npath, frameworkPartialPath);
3442 //dyld::log("dyld: fallback framework path used: %s() -> loadPhase4(\"%s\", ...)\n", __func__, npath);
3443 image = loadPhase4(npath, orgPath, context, cacheIndex, exceptions);
3444 if ( image != NULL )
3445 return image;
3446 }
3447 }
3448 }
3449 // <rdar://problem/12649639> An executable with the same name as a framework & DYLD_LIBRARY_PATH pointing to it gets loaded twice
3450 // <rdar://problem/14160846> Some apps depend on frameworks being found via library paths
3451 if ( (libraryPaths != NULL) && ((frameworkPartialPath == NULL) || sFrameworksFoundAsDylibs) ) {
3452 const char* libraryLeafName = getLibraryLeafName(path);
3453 const size_t libraryLeafNameLen = strlen(libraryLeafName);
3454 for(const char* const* lp = libraryPaths; *lp != NULL; ++lp) {
3455 char libpath[strlen(*lp)+libraryLeafNameLen+8];
3456 strcpy(libpath, *lp);
3457 strcat(libpath, "/");
3458 strcat(libpath, libraryLeafName);
3459 //dyld::log("dyld: fallback library path used: %s() -> loadPhase4(\"%s\", ...)\n", __func__, libpath);
3460 image = loadPhase4(libpath, orgPath, context, cacheIndex, exceptions);
3461 if ( image != NULL )
3462 return image;
3463 }
3464 }
3465 return NULL;
3466 }
3467
3468 // try search overrides and fallbacks
3469 static ImageLoader* loadPhase1(const char* path, const char* orgPath, const LoadContext& context, unsigned& cacheIndex, std::vector<const char*>* exceptions)
3470 {
3471 //dyld::log("%s(%s, %p)\n", __func__ , path, exceptions);
3472 ImageLoader* image = NULL;
3473
3474 // handle LD_LIBRARY_PATH environment variables that force searching
3475 if ( context.useLdLibraryPath && (sEnv.LD_LIBRARY_PATH != NULL) ) {
3476 image = loadPhase2(path, orgPath, context, NULL, sEnv.LD_LIBRARY_PATH, cacheIndex,exceptions);
3477 if ( image != NULL )
3478 return image;
3479 }
3480
3481 // handle DYLD_ environment variables that force searching
3482 if ( context.useSearchPaths && ((sEnv.DYLD_FRAMEWORK_PATH != NULL) || (sEnv.DYLD_LIBRARY_PATH != NULL)) ) {
3483 image = loadPhase2(path, orgPath, context, sEnv.DYLD_FRAMEWORK_PATH, sEnv.DYLD_LIBRARY_PATH, cacheIndex, exceptions);
3484 if ( image != NULL )
3485 return image;
3486 }
3487
3488 // try raw path
3489 image = loadPhase3(path, orgPath, context, cacheIndex, exceptions);
3490 if ( image != NULL )
3491 return image;
3492
3493 // try fallback paths during second time (will open file)
3494 const char* const* fallbackLibraryPaths = sEnv.DYLD_FALLBACK_LIBRARY_PATH;
3495 if ( (fallbackLibraryPaths != NULL) && !context.useFallbackPaths )
3496 fallbackLibraryPaths = NULL;
3497 if ( !context.dontLoad && (exceptions != NULL) && ((sEnv.DYLD_FALLBACK_FRAMEWORK_PATH != NULL) || (fallbackLibraryPaths != NULL)) ) {
3498 image = loadPhase2(path, orgPath, context, sEnv.DYLD_FALLBACK_FRAMEWORK_PATH, fallbackLibraryPaths, cacheIndex, exceptions);
3499 if ( image != NULL )
3500 return image;
3501 }
3502
3503 return NULL;
3504 }
3505
3506 // try root substitutions
3507 static ImageLoader* loadPhase0(const char* path, const char* orgPath, const LoadContext& context, unsigned& cacheIndex, std::vector<const char*>* exceptions)
3508 {
3509 //dyld::log("%s(%s, %p)\n", __func__ , path, exceptions);
3510
3511 #if SUPPORT_ROOT_PATH
3512 // handle DYLD_ROOT_PATH which forces absolute paths to use a new root
3513 if ( (gLinkContext.rootPaths != NULL) && (path[0] == '/') ) {
3514 for(const char* const* rootPath = gLinkContext.rootPaths ; *rootPath != NULL; ++rootPath) {
3515 char newPath[strlen(*rootPath) + strlen(path)+2];
3516 strcpy(newPath, *rootPath);
3517 strcat(newPath, path);
3518 ImageLoader* image = loadPhase1(newPath, orgPath, context, cacheIndex, exceptions);
3519 if ( image != NULL )
3520 return image;
3521 }
3522 }
3523 #endif
3524
3525 // try raw path
3526 return loadPhase1(path, orgPath, context, cacheIndex, exceptions);
3527 }
3528
3529 #if DYLD_SHARED_CACHE_SUPPORT
3530 static bool cacheablePath(const char* path) {
3531 if (strncmp(path, "/usr/lib/", 9) == 0)
3532 return true;
3533 if (strncmp(path, "/System/Library/", 16) == 0)
3534 return true;
3535 return false;
3536 }
3537 #endif
3538
3539 //
3540 // Given all the DYLD_ environment variables, the general case for loading libraries
3541 // is that any given path expands into a list of possible locations to load. We
3542 // also must take care to ensure two copies of the "same" library are never loaded.
3543 //
3544 // The algorithm used here is that there is a separate function for each "phase" of the
3545 // path expansion. Each phase function calls the next phase with each possible expansion
3546 // of that phase. The result is the last phase is called with all possible paths.
3547 //
3548 // To catch duplicates the algorithm is run twice. The first time, the last phase checks
3549 // the path against all loaded images. The second time, the last phase calls open() on
3550 // the path. Either time, if an image is found, the phases all unwind without checking
3551 // for other paths.
3552 //
3553 ImageLoader* load(const char* path, const LoadContext& context, unsigned& cacheIndex)
3554 {
3555 CRSetCrashLogMessage2(path);
3556 const char* orgPath = path;
3557 cacheIndex = UINT32_MAX;
3558
3559 //dyld::log("%s(%s)\n", __func__ , path);
3560 char realPath[PATH_MAX];
3561 // when DYLD_IMAGE_SUFFIX is in used, do a realpath(), otherwise a load of "Foo.framework/Foo" will not match
3562 if ( context.useSearchPaths && ( gLinkContext.imageSuffix != NULL) ) {
3563 if ( realpath(path, realPath) != NULL )
3564 path = realPath;
3565 }
3566
3567 // try all path permutations and check against existing loaded images
3568
3569 ImageLoader* image = loadPhase0(path, orgPath, context, cacheIndex, NULL);
3570 if ( image != NULL ) {
3571 CRSetCrashLogMessage2(NULL);
3572 return image;
3573 }
3574
3575 // try all path permutations and try open() until first success
3576 std::vector<const char*> exceptions;
3577 image = loadPhase0(path, orgPath, context, cacheIndex, &exceptions);
3578 #if __IPHONE_OS_VERSION_MIN_REQUIRED && DYLD_SHARED_CACHE_SUPPORT && !TARGET_IPHONE_SIMULATOR
3579 // <rdar://problem/16704628> support symlinks on disk to a path in dyld shared cache
3580 if ( (image == NULL) && cacheablePath(path) && !context.dontLoad ) {
3581 char resolvedPath[PATH_MAX];
3582 realpath(path, resolvedPath);
3583 int myerr = errno;
3584 // If realpath() resolves to a path which does not exist on disk, errno is set to ENOENT
3585 if ( (myerr == ENOENT) || (myerr == 0) )
3586 {
3587 // see if this image is in shared cache
3588 const macho_header* mhInCache;
3589 const char* pathInCache;
3590 long slideInCache;
3591 if ( findInSharedCacheImage(resolvedPath, false, NULL, &mhInCache, &pathInCache, &slideInCache) ) {
3592 struct stat stat_buf;
3593 bzero(&stat_buf, sizeof(stat_buf));
3594 try {
3595 image = ImageLoaderMachO::instantiateFromCache(mhInCache, pathInCache, slideInCache, stat_buf, gLinkContext);
3596 image = checkandAddImage(image, context);
3597 }
3598 catch (...) {
3599 image = NULL;
3600 }
3601 }
3602 }
3603 }
3604 #endif
3605 CRSetCrashLogMessage2(NULL);
3606 if ( image != NULL ) {
3607 // <rdar://problem/6916014> leak in dyld during dlopen when using DYLD_ variables
3608 for (std::vector<const char*>::iterator it = exceptions.begin(); it != exceptions.end(); ++it) {
3609 free((void*)(*it));
3610 }
3611 #if DYLD_SHARED_CACHE_SUPPORT
3612 // if loaded image is not from cache, but original path is in cache
3613 // set gSharedCacheOverridden flag to disable some ObjC optimizations
3614 if ( !gSharedCacheOverridden && !image->inSharedCache() && image->isDylib() && cacheablePath(path) && inSharedCache(path) ) {
3615 gSharedCacheOverridden = true;
3616 }
3617 #endif
3618 return image;
3619 }
3620 else if ( exceptions.size() == 0 ) {
3621 if ( context.dontLoad ) {
3622 return NULL;
3623 }
3624 else
3625 throw "image not found";
3626 }
3627 else {
3628 const char* msgStart = "no suitable image found. Did find:";
3629 const char* delim = "\n\t";
3630 size_t allsizes = strlen(msgStart)+8;
3631 for (size_t i=0; i < exceptions.size(); ++i)
3632 allsizes += (strlen(exceptions[i]) + strlen(delim));
3633 char* fullMsg = new char[allsizes];
3634 strcpy(fullMsg, msgStart);
3635 for (size_t i=0; i < exceptions.size(); ++i) {
3636 strcat(fullMsg, delim);
3637 strcat(fullMsg, exceptions[i]);
3638 free((void*)exceptions[i]);
3639 }
3640 throw (const char*)fullMsg;
3641 }
3642 }
3643
3644
3645
3646 #if DYLD_SHARED_CACHE_SUPPORT
3647
3648
3649
3650 #if __i386__
3651 #define ARCH_NAME "i386"
3652 #define ARCH_CACHE_MAGIC "dyld_v1 i386"
3653 #elif __x86_64__
3654 #define ARCH_NAME "x86_64"
3655 #define ARCH_CACHE_MAGIC "dyld_v1 x86_64"
3656 #define ARCH_NAME_H "x86_64h"
3657 #define ARCH_CACHE_MAGIC_H "dyld_v1 x86_64h"
3658 #elif __ARM_ARCH_5TEJ__
3659 #define ARCH_NAME "armv5"
3660 #define ARCH_CACHE_MAGIC "dyld_v1 armv5"
3661 #elif __ARM_ARCH_6K__
3662 #define ARCH_NAME "armv6"
3663 #define ARCH_CACHE_MAGIC "dyld_v1 armv6"
3664 #elif __ARM_ARCH_7F__
3665 #define ARCH_NAME "armv7f"
3666 #define ARCH_CACHE_MAGIC "dyld_v1 armv7f"
3667 #elif __ARM_ARCH_7K__
3668 #define ARCH_NAME "armv7k"
3669 #define ARCH_CACHE_MAGIC "dyld_v1 armv7k"
3670 #elif __ARM_ARCH_7A__
3671 #define ARCH_NAME "armv7"
3672 #define ARCH_CACHE_MAGIC "dyld_v1 armv7"
3673 #elif __ARM_ARCH_7S__
3674 #define ARCH_NAME "armv7s"
3675 #define ARCH_CACHE_MAGIC "dyld_v1 armv7s"
3676 #elif __arm64__
3677 #define ARCH_NAME "arm64"
3678 #define ARCH_CACHE_MAGIC "dyld_v1 arm64"
3679 #endif
3680
3681
3682 static int __attribute__((noinline)) _shared_region_check_np(uint64_t* start_address)
3683 {
3684 if ( gLinkContext.sharedRegionMode == ImageLoader::kUseSharedRegion )
3685 return syscall(294, start_address);
3686 return -1;
3687 }
3688
3689
3690 static void rebaseChain(uint8_t* pageContent, uint16_t startOffset, uintptr_t slideAmount, const dyld_cache_slide_info2* slideInfo)
3691 {
3692 const uintptr_t deltaMask = (uintptr_t)(slideInfo->delta_mask);
3693 const uintptr_t valueMask = ~deltaMask;
3694 const uintptr_t valueAdd = (uintptr_t)(slideInfo->value_add);
3695 const unsigned deltaShift = __builtin_ctzll(deltaMask) - 2;
3696
3697 uint32_t pageOffset = startOffset;
3698 uint32_t delta = 1;
3699 while ( delta != 0 ) {
3700 uint8_t* loc = pageContent + pageOffset;
3701 uintptr_t rawValue = *((uintptr_t*)loc);
3702 delta = (uint32_t)((rawValue & deltaMask) >> deltaShift);
3703 uintptr_t value = (rawValue & valueMask);
3704 if ( value != 0 ) {
3705 value += valueAdd;
3706 value += slideAmount;
3707 }
3708 *((uintptr_t*)loc) = value;
3709 //dyld::log(" pageOffset=0x%03X, loc=%p, org value=0x%08llX, new value=0x%08llX, delta=0x%X\n", pageOffset, loc, (uint64_t)rawValue, (uint64_t)value, delta);
3710 pageOffset += delta;
3711 }
3712 }
3713
3714
3715 static void loadAndCheckCodeSignature(int fd, uint32_t count, const shared_file_mapping_np mappings[],
3716 off_t codeSignatureOffset, size_t codeSignatureSize,
3717 const void *firstPages, size_t firstPagesSize)
3718 {
3719 // register code signature blob for whole dyld cache
3720 fsignatures_t siginfo;
3721 siginfo.fs_file_start = 0; // cache always starts at beginning of file
3722 siginfo.fs_blob_start = (void*)codeSignatureOffset;
3723 siginfo.fs_blob_size = codeSignatureSize;
3724
3725 int result = fcntl(fd, F_ADDFILESIGS_RETURN, &siginfo);
3726 // <rdar://problem/12891874> don't warn in chrooted case because mapping syscall is about to fail too
3727 if ( result == -1 ) {
3728 #if __IPHONE_OS_VERSION_MIN_REQUIRED
3729 throwf("code signature registration for shared cache failed with errno=%d\n", errno);
3730 #else
3731 if ( gLinkContext.verboseMapping )
3732 dyld::log("dyld: code signature registration for shared cache failed with errno=%d\n", errno);
3733 #endif
3734 }
3735 uint64_t codeSignedLength = siginfo.fs_file_start;
3736 for (uint32_t i = 0; i < count; ++i) {
3737 if ( (mappings[i].sfm_size > codeSignedLength) || (mappings[i].sfm_file_offset > (codeSignedLength - mappings[i].sfm_size)) )
3738 throw "dyld shared cache mapping not covered by code signature";
3739 }
3740
3741 void *fdata = xmmap(NULL, firstPagesSize, PROT_READ|PROT_EXEC, MAP_PRIVATE, fd, 0);
3742 if ( fdata == MAP_FAILED )
3743 throwf("mmap() errno=%d validating first page of shared cache", errno);
3744 if ( memcmp(fdata, firstPages, firstPagesSize) != 0 )
3745 throwf("mmap() page compare failed for shared cache");
3746 munmap(fdata, firstPagesSize);
3747 }
3748
3749 static int __attribute__((noinline)) _shared_region_map_and_slide_np(int fd, uint32_t count, const shared_file_mapping_np mappings[],
3750 long slide, void* slideInfo, unsigned long slideInfoSize)
3751 {
3752 if ( gLinkContext.sharedRegionMode == ImageLoader::kUseSharedRegion ) {
3753 return syscall(438, fd, count, mappings, slide, slideInfo, slideInfoSize);
3754 }
3755
3756 // remove the shared region sub-map
3757 vm_deallocate(mach_task_self(), (vm_address_t)SHARED_REGION_BASE, SHARED_REGION_SIZE);
3758
3759 // notify gdb or other lurkers that this process is no longer using the shared region
3760 dyld::gProcessInfo->processDetachedFromSharedRegion = true;
3761
3762 // map cache just for this process with mmap()
3763 const shared_file_mapping_np* const start = mappings;
3764 const shared_file_mapping_np* const end = &mappings[count];
3765 for (const shared_file_mapping_np* p = start; p < end; ++p ) {
3766 void* mmapAddress = (void*)(uintptr_t)(p->sfm_address);
3767 size_t size = p->sfm_size;
3768 //dyld::log("dyld: mapping address %p with size 0x%08lX\n", mmapAddress, size);
3769 int protection = 0;
3770 if ( p->sfm_init_prot & VM_PROT_EXECUTE )
3771 protection |= PROT_EXEC;
3772 if ( p->sfm_init_prot & VM_PROT_READ )
3773 protection |= PROT_READ;
3774 if ( p->sfm_init_prot & VM_PROT_WRITE )
3775 protection |= PROT_WRITE;
3776 off_t offset = p->sfm_file_offset;
3777 if ( mmap(mmapAddress, size, protection, MAP_FIXED | MAP_PRIVATE, fd, offset) != mmapAddress ) {
3778 // failed to map some chunk of this shared cache file
3779 // clear shared region
3780 vm_deallocate(mach_task_self(), (vm_address_t)SHARED_REGION_BASE, SHARED_REGION_SIZE);
3781 // go back to not using shared region at all
3782 gLinkContext.sharedRegionMode = ImageLoader::kDontUseSharedRegion;
3783 if ( gLinkContext.verboseMapping ) {
3784 dyld::log("dyld: shared cached region cannot be mapped at address %p with size 0x%08lX\n",
3785 mmapAddress, size);
3786 }
3787 // return failure
3788 return -1;
3789 }
3790 }
3791
3792 // update all __DATA pages with slide info
3793 const dyld_cache_slide_info* slideInfoHeader = (dyld_cache_slide_info*)slideInfo;
3794 if ( slideInfoHeader->version == 2 ) {
3795 const dyld_cache_slide_info2* slideHeader = (dyld_cache_slide_info2*)slideInfo;
3796 const uint32_t page_size = slideHeader->page_size;
3797 const uint16_t* page_starts = (uint16_t*)((long)(slideInfo) + slideHeader->page_starts_offset);
3798 const uint16_t* page_extras = (uint16_t*)((long)(slideInfo) + slideHeader->page_extras_offset);
3799 const uintptr_t dataPagesStart = mappings[1].sfm_address;
3800 for (int i=0; i < slideHeader->page_starts_count; ++i) {
3801 uint8_t* page = (uint8_t*)(long)(dataPagesStart + (page_size*i));
3802 uint16_t pageEntry = page_starts[i];
3803 //dyld::log("page[%d]: page_starts[i]=0x%04X\n", i, pageEntry);
3804 if ( pageEntry == DYLD_CACHE_SLIDE_PAGE_ATTR_NO_REBASE )
3805 continue;
3806 if ( pageEntry & DYLD_CACHE_SLIDE_PAGE_ATTR_EXTRA ) {
3807 uint16_t chainIndex = (pageEntry & 0x3FFF);
3808 bool done = false;
3809 while ( !done ) {
3810 uint16_t info = page_extras[chainIndex];
3811 uint16_t pageStartOffset = (info & 0x3FFF)*4;
3812 //dyld::log(" chain[%d] pageOffset=0x%03X\n", chainIndex, pageStartOffset);
3813 rebaseChain(page, pageStartOffset, slide, slideHeader);
3814 done = (info & DYLD_CACHE_SLIDE_PAGE_ATTR_END);
3815 ++chainIndex;
3816 }
3817 }
3818 else {
3819 uint32_t pageOffset = pageEntry * 4;
3820 //dyld::log(" start pageOffset=0x%03X\n", pageOffset);
3821 rebaseChain(page, pageOffset, slide, slideHeader);
3822 }
3823 }
3824 }
3825 else if ( slide != 0 ) {
3826 const uintptr_t dataPagesStart = mappings[1].sfm_address;
3827 const uint16_t* toc = (uint16_t*)((long)(slideInfoHeader) + slideInfoHeader->toc_offset);
3828 const uint8_t* entries = (uint8_t*)((long)(slideInfoHeader) + slideInfoHeader->entries_offset);
3829 for(uint32_t i=0; i < slideInfoHeader->toc_count; ++i) {
3830 const uint8_t* entry = &entries[toc[i]*slideInfoHeader->entries_size];
3831 const uint8_t* page = (uint8_t*)(long)(dataPagesStart + (4096*i));
3832 //dyld::log("page=%p toc[%d]=%d entries=%p\n", page, i, toc[i], entry);
3833 for(int j=0; j < 128; ++j) {
3834 uint8_t b = entry[j];
3835 //dyld::log(" entry[%d] = 0x%02X\n", j, b);
3836 if ( b != 0 ) {
3837 for(int k=0; k < 8; ++k) {
3838 if ( b & (1<<k) ) {
3839 uintptr_t* p = (uintptr_t*)(page + j*8*4 + k*4);
3840 uintptr_t value = *p;
3841 //dyld::log(" *%p was 0x%lX will be 0x%lX\n", p, value, value+sSharedCacheSlide);
3842 *p = value + slide;
3843 }
3844 }
3845 }
3846 }
3847 }
3848 }
3849
3850 // succesfully mapped shared cache for just this process
3851 gLinkContext.sharedRegionMode = ImageLoader::kUsePrivateSharedRegion;
3852
3853 return 0;
3854 }
3855
3856
3857 const void* imMemorySharedCacheHeader()
3858 {
3859 return sSharedCache;
3860 }
3861
3862 const char* getStandardSharedCacheFilePath()
3863 {
3864 #if __IPHONE_OS_VERSION_MIN_REQUIRED
3865 return IPHONE_DYLD_SHARED_CACHE_DIR DYLD_SHARED_CACHE_BASE_NAME ARCH_NAME;
3866 #else
3867 #if __x86_64__
3868 if ( sHaswell ) {
3869 const char* path2 = MACOSX_DYLD_SHARED_CACHE_DIR DYLD_SHARED_CACHE_BASE_NAME ARCH_NAME_H;
3870 struct stat statBuf;
3871 if ( my_stat(path2, &statBuf) == 0 )
3872 return path2;
3873 }
3874 #endif
3875 return MACOSX_DYLD_SHARED_CACHE_DIR DYLD_SHARED_CACHE_BASE_NAME ARCH_NAME;
3876 #endif
3877 }
3878
3879 int openSharedCacheFile()
3880 {
3881 char path[MAXPATHLEN];
3882 strlcpy(path, sSharedCacheDir, MAXPATHLEN);
3883 strlcat(path, "/", MAXPATHLEN);
3884 #if __x86_64__
3885 if ( sHaswell ) {
3886 strlcat(path, DYLD_SHARED_CACHE_BASE_NAME ARCH_NAME_H, MAXPATHLEN);
3887 int fd = my_open(path, O_RDONLY, 0);
3888 if ( fd != -1 ) {
3889 if ( gLinkContext.verboseMapping )
3890 dyld::log("dyld: Mapping%s shared cache from %s\n", (gLinkContext.sharedRegionMode == ImageLoader::kUsePrivateSharedRegion) ? " private": "", path);
3891 return fd;
3892 }
3893 strlcpy(path, sSharedCacheDir, MAXPATHLEN);
3894 }
3895 #endif
3896 strlcat(path, DYLD_SHARED_CACHE_BASE_NAME ARCH_NAME, MAXPATHLEN);
3897 #if __IPHONE_OS_VERSION_MIN_REQUIRED
3898 struct stat enableStatBuf;
3899 struct stat devCacheStatBuf;
3900 struct stat prodCacheStatBuf;
3901 if ( ((my_stat(IPHONE_DYLD_SHARED_CACHE_DIR "enable-dylibs-to-override-cache", &enableStatBuf) == 0)
3902 && (enableStatBuf.st_size < ENABLE_DYLIBS_TO_OVERRIDE_CACHE_SIZE)
3903 && (my_stat(IPHONE_DYLD_SHARED_CACHE_DIR DYLD_SHARED_CACHE_BASE_NAME ARCH_NAME DYLD_SHARED_CACHE_DEVELOPMENT_EXT, &devCacheStatBuf) == 0))
3904 || (my_stat(IPHONE_DYLD_SHARED_CACHE_DIR DYLD_SHARED_CACHE_BASE_NAME ARCH_NAME, &prodCacheStatBuf) != 0))
3905 strlcat(path, DYLD_SHARED_CACHE_DEVELOPMENT_EXT, MAXPATHLEN);
3906 #endif
3907 if ( gLinkContext.verboseMapping )
3908 dyld::log("dyld: Mapping%s shared cache from %s\n", (gLinkContext.sharedRegionMode == ImageLoader::kUsePrivateSharedRegion) ? " private": "", path);
3909 return my_open(path, O_RDONLY, 0);
3910 }
3911
3912
3913 static void getCacheBounds(uint32_t mappingsCount, const shared_file_mapping_np mappings[], uint64_t& lowAddress, uint64_t& highAddress)
3914 {
3915 lowAddress = 0;
3916 highAddress = 0;
3917 for(uint32_t i=0; i < mappingsCount; ++i) {
3918 if ( lowAddress == 0 ) {
3919 lowAddress = mappings[i].sfm_address;
3920 highAddress = mappings[i].sfm_address + mappings[i].sfm_size;
3921 }
3922 else {
3923 if ( mappings[i].sfm_address < lowAddress )
3924 lowAddress = mappings[i].sfm_address;
3925 if ( (mappings[i].sfm_address + mappings[i].sfm_size) > highAddress )
3926 highAddress = mappings[i].sfm_address + mappings[i].sfm_size;
3927 }
3928 }
3929 }
3930
3931 static long pickCacheSlide(uint32_t mappingsCount, shared_file_mapping_np mappings[])
3932 {
3933 #if __x86_64__
3934 // x86_64 has a two memory regions:
3935 // 256MB at 0x00007FFF70000000
3936 // 1024MB at 0x00007FFF80000000
3937 // Some old shared caches have r/w region after rx region, so all regions slide within 1GB range
3938 // Newer shared caches have r/w region based at 0x7FFF70000000 and r/o regions at 0x7FFF80000000, so each part has max slide
3939 if ( (mappingsCount >= 3) && (mappings[1].sfm_init_prot == (VM_PROT_READ|VM_PROT_WRITE)) && (mappings[1].sfm_address == 0x00007FFF70000000) ) {
3940 const uint64_t rwSize = mappings[1].sfm_size;
3941 const uint64_t rwSlop = 0x10000000ULL - rwSize;
3942 const uint64_t roSize = (mappings[2].sfm_address + mappings[2].sfm_size) - mappings[0].sfm_address;
3943 const uint64_t roSlop = 0x40000000ULL - roSize;
3944 const uint64_t space = (rwSlop < roSlop) ? rwSlop : roSlop;
3945
3946 // choose new random slide
3947 long slide = (arc4random() % space) & (-4096);
3948 //dyld::log("rwSlop=0x%0llX, roSlop=0x%0llX\n", rwSlop, roSlop);
3949 //dyld::log("space=0x%0llX, slide=0x%0lX\n", space, slide);
3950
3951 // update mappings
3952 for(uint32_t i=0; i < mappingsCount; ++i) {
3953 mappings[i].sfm_address += slide;
3954 }
3955
3956 return slide;
3957 }
3958 // else fall through to handle old style cache
3959 #endif
3960 // get bounds of cache
3961 uint64_t lowAddress;
3962 uint64_t highAddress;
3963 getCacheBounds(mappingsCount, mappings, lowAddress, highAddress);
3964
3965 // find slop space
3966 const uint64_t space = (SHARED_REGION_BASE + SHARED_REGION_SIZE) - highAddress;
3967
3968 // choose new random slide
3969 #if __arm__
3970 // <rdar://problem/20848977> change shared cache slide for 32-bit arm to always be 16k aligned
3971 long slide = ((arc4random() % space) & (-16384));
3972 #else
3973 long slide = dyld_page_trunc(arc4random() % space);
3974 #endif
3975 //dyld::log("slideSpace=0x%0llX\n", space);
3976 //dyld::log("slide=0x%0lX\n", slide);
3977
3978 // update mappings
3979 for(uint32_t i=0; i < mappingsCount; ++i) {
3980 mappings[i].sfm_address += slide;
3981 }
3982
3983 return slide;
3984 }
3985
3986 static void mapSharedCache()
3987 {
3988 uint64_t cacheBaseAddress = 0;
3989 // quick check if a cache is already mapped into shared region
3990 if ( _shared_region_check_np(&cacheBaseAddress) == 0 ) {
3991 sSharedCache = (dyld_cache_header*)cacheBaseAddress;
3992 // if we don't understand the currently mapped shared cache, then ignore
3993 #if __x86_64__
3994 const char* magic = (sHaswell ? ARCH_CACHE_MAGIC_H : ARCH_CACHE_MAGIC);
3995 #else
3996 const char* magic = ARCH_CACHE_MAGIC;
3997 #endif
3998 if ( strcmp(sSharedCache->magic, magic) != 0 ) {
3999 sSharedCache = NULL;
4000 if ( gLinkContext.verboseMapping ) {
4001 dyld::log("dyld: existing shared cached in memory is not compatible\n");
4002 return;
4003 }
4004 }
4005 // check if cache file is slidable
4006 const dyld_cache_header* header = sSharedCache;
4007 if ( (header->mappingOffset >= 0x48) && (header->slideInfoSize != 0) ) {
4008 // solve for slide by comparing loaded address to address of first region
4009 const uint8_t* loadedAddress = (uint8_t*)sSharedCache;
4010 const dyld_cache_mapping_info* const mappings = (dyld_cache_mapping_info*)(loadedAddress+header->mappingOffset);
4011 const uint8_t* preferedLoadAddress = (uint8_t*)(long)(mappings[0].address);
4012 sSharedCacheSlide = loadedAddress - preferedLoadAddress;
4013 dyld::gProcessInfo->sharedCacheSlide = sSharedCacheSlide;
4014 dyld::gProcessInfo->sharedCacheBaseAddress = cacheBaseAddress;
4015 //dyld::log("sSharedCacheSlide=0x%08lX, loadedAddress=%p, preferedLoadAddress=%p\n", sSharedCacheSlide, loadedAddress, preferedLoadAddress);
4016 }
4017 // if cache has a uuid, copy it
4018 if ( header->mappingOffset >= 0x68 ) {
4019 memcpy(dyld::gProcessInfo->sharedCacheUUID, header->uuid, 16);
4020 }
4021 // verbose logging
4022 if ( gLinkContext.verboseMapping ) {
4023 dyld::log("dyld: re-using existing %s shared cache mapping\n", (header->cacheType == kDyldSharedCacheTypeDevelopment ? "development" : "production"));
4024 }
4025 if (header->mappingOffset >= 0x68) {
4026 dyld_kernel_image_info_t kernelCacheInfo;
4027 memcpy(&kernelCacheInfo.uuid[0], &sSharedCache->uuid[0], sizeof(uuid_t));
4028 kernelCacheInfo.load_addr = (uint64_t)sSharedCache;
4029 kernelCacheInfo.fsobjid.fid_objno = 0;
4030 kernelCacheInfo.fsobjid.fid_generation = 0;
4031 kernelCacheInfo.fsid.val[0] = 0;
4032 kernelCacheInfo.fsid.val[0] = 0;
4033 task_register_dyld_shared_cache_image_info(mach_task_self(), kernelCacheInfo, false, false);
4034 }
4035 }
4036 else {
4037 #if __i386__ || __x86_64__
4038 // <rdar://problem/5925940> Safe Boot should disable dyld shared cache
4039 // if we are in safe-boot mode and the cache was not made during this boot cycle,
4040 // delete the cache file
4041 uint32_t safeBootValue = 0;
4042 size_t safeBootValueSize = sizeof(safeBootValue);
4043 if ( (sysctlbyname("kern.safeboot", &safeBootValue, &safeBootValueSize, NULL, 0) == 0) && (safeBootValue != 0) ) {
4044 // user booted machine in safe-boot mode
4045 struct stat dyldCacheStatInfo;
4046 // Don't use custom DYLD_SHARED_CACHE_DIR if provided, use standard path
4047 if ( my_stat(MACOSX_DYLD_SHARED_CACHE_DIR DYLD_SHARED_CACHE_BASE_NAME ARCH_NAME, &dyldCacheStatInfo) == 0 ) {
4048 struct timeval bootTimeValue;
4049 size_t bootTimeValueSize = sizeof(bootTimeValue);
4050 if ( (sysctlbyname("kern.boottime", &bootTimeValue, &bootTimeValueSize, NULL, 0) == 0) && (bootTimeValue.tv_sec != 0) ) {
4051 // if the cache file was created before this boot, then throw it away and let it rebuild itself
4052 if ( dyldCacheStatInfo.st_mtime < bootTimeValue.tv_sec ) {
4053 ::unlink(MACOSX_DYLD_SHARED_CACHE_DIR DYLD_SHARED_CACHE_BASE_NAME ARCH_NAME);
4054 gLinkContext.sharedRegionMode = ImageLoader::kDontUseSharedRegion;
4055 return;
4056 }
4057 }
4058 }
4059 }
4060 #endif
4061 // map in shared cache to shared region
4062 int fd = openSharedCacheFile();
4063 if ( fd != -1 ) {
4064 uint8_t firstPages[8192];
4065 if ( ::read(fd, firstPages, 8192) == 8192 ) {
4066 dyld_cache_header* header = (dyld_cache_header*)firstPages;
4067 #if __x86_64__
4068 const char* magic = (sHaswell ? ARCH_CACHE_MAGIC_H : ARCH_CACHE_MAGIC);
4069 #else
4070 const char* magic = ARCH_CACHE_MAGIC;
4071 #endif
4072 if ( strcmp(header->magic, magic) == 0 ) {
4073 const dyld_cache_mapping_info* const fileMappingsStart = (dyld_cache_mapping_info*)&firstPages[header->mappingOffset];
4074 const dyld_cache_mapping_info* const fileMappingsEnd = &fileMappingsStart[header->mappingCount];
4075 #if __IPHONE_OS_VERSION_MIN_REQUIRED
4076 if ( (header->mappingCount != 3)
4077 || (header->mappingOffset > 256)
4078 || (fileMappingsStart[0].fileOffset != 0)
4079 || (fileMappingsStart[0].address != SHARED_REGION_BASE)
4080 || ((fileMappingsStart[0].address + fileMappingsStart[0].size) > fileMappingsStart[1].address)
4081 || ((fileMappingsStart[1].address + fileMappingsStart[1].size) > fileMappingsStart[2].address)
4082 || ((fileMappingsStart[0].fileOffset + fileMappingsStart[0].size) != fileMappingsStart[1].fileOffset)
4083 || ((fileMappingsStart[1].fileOffset + fileMappingsStart[1].size) != fileMappingsStart[2].fileOffset) )
4084 throw "dyld shared cache file is invalid";
4085 #endif
4086 shared_file_mapping_np mappings[header->mappingCount];
4087 unsigned int mappingCount = header->mappingCount;
4088 int readWriteMappingIndex = -1;
4089 int readOnlyMappingIndex = -1;
4090 // validate that the cache file has not been truncated
4091 bool goodCache = false;
4092 struct stat stat_buf;
4093 if ( fstat(fd, &stat_buf) == 0 ) {
4094 goodCache = true;
4095 int i=0;
4096 for (const dyld_cache_mapping_info* p = fileMappingsStart; p < fileMappingsEnd; ++p, ++i) {
4097 mappings[i].sfm_address = p->address;
4098 mappings[i].sfm_size = p->size;
4099 mappings[i].sfm_file_offset = p->fileOffset;
4100 mappings[i].sfm_max_prot = p->maxProt;
4101 mappings[i].sfm_init_prot = p->initProt;
4102 // rdar://problem/5694507 old update_dyld_shared_cache tool could make a cache file
4103 // that is not page aligned, but otherwise ok.
4104 if ( p->fileOffset+p->size > (uint64_t)(stat_buf.st_size+4095 & (-4096)) ) {
4105 dyld::log("dyld: shared cached file is corrupt: %s" DYLD_SHARED_CACHE_BASE_NAME ARCH_NAME "\n", sSharedCacheDir);
4106 goodCache = false;
4107 }
4108 if ( (mappings[i].sfm_init_prot & (VM_PROT_READ|VM_PROT_WRITE)) == (VM_PROT_READ|VM_PROT_WRITE) ) {
4109 readWriteMappingIndex = i;
4110 }
4111 if ( mappings[i].sfm_init_prot == VM_PROT_READ ) {
4112 readOnlyMappingIndex = i;
4113 }
4114 }
4115 // if shared cache is code signed, add a mapping for the code signature
4116 uint64_t signatureSize = header->codeSignatureSize;
4117 // zero size in header means signature runs to end-of-file
4118 if ( signatureSize == 0 )
4119 signatureSize = stat_buf.st_size - header->codeSignatureOffset;
4120 if ( signatureSize != 0 ) {
4121 #if __arm__ || __arm64__
4122 size_t alignedSignatureSize = (signatureSize+16383) & (-16384);
4123 #else
4124 size_t alignedSignatureSize = (signatureSize+4095) & (-4096);
4125 #endif
4126 // <rdar://problem/23188073> validate code signature covers entire shared cache
4127 loadAndCheckCodeSignature(fd, mappingCount, mappings, header->codeSignatureOffset, alignedSignatureSize, firstPages, sizeof(firstPages));
4128 }
4129 #if __IPHONE_OS_VERSION_MIN_REQUIRED
4130 else {
4131 throw "dyld shared cache file not code signed";
4132 }
4133 #endif
4134 }
4135 #if __MAC_OS_X_VERSION_MIN_REQUIRED
4136 // sanity check that /usr/lib/libSystem.B.dylib stat() info matches cache
4137 if ( header->imagesCount * sizeof(dyld_cache_image_info) + header->imagesOffset < 8192 ) {
4138 bool foundLibSystem = false;
4139 if ( my_stat("/usr/lib/libSystem.B.dylib", &stat_buf) == 0 ) {
4140 const dyld_cache_image_info* images = (dyld_cache_image_info*)&firstPages[header->imagesOffset];
4141 const dyld_cache_image_info* const imagesEnd = &images[header->imagesCount];
4142 for (const dyld_cache_image_info* p = images; p < imagesEnd; ++p) {
4143 if ( ((time_t)p->modTime == stat_buf.st_mtime) && ((ino_t)p->inode == stat_buf.st_ino) ) {
4144 foundLibSystem = true;
4145 break;
4146 }
4147 }
4148 }
4149 if ( !sSharedCacheIgnoreInodeAndTimeStamp && !foundLibSystem ) {
4150 dyld::log("dyld: shared cached file was built against a different libSystem.dylib, ignoring cache.\n"
4151 "to update dyld shared cache run: 'sudo update_dyld_shared_cache' then reboot.\n");
4152 goodCache = false;
4153 }
4154 }
4155 #endif
4156 #if __IPHONE_OS_VERSION_MIN_REQUIRED
4157 {
4158 uint64_t lowAddress;
4159 uint64_t highAddress;
4160 getCacheBounds(mappingCount, mappings, lowAddress, highAddress);
4161 if ( (highAddress-lowAddress) > SHARED_REGION_SIZE )
4162 throw "dyld shared cache is too big to fit in shared region";
4163 }
4164 #endif
4165
4166 if ( goodCache && (readWriteMappingIndex == -1) ) {
4167 dyld::log("dyld: shared cached file is missing read/write mapping: %s" DYLD_SHARED_CACHE_BASE_NAME ARCH_NAME "\n", sSharedCacheDir);
4168 goodCache = false;
4169 }
4170 if ( goodCache && (readOnlyMappingIndex == -1) ) {
4171 dyld::log("dyld: shared cached file is missing read-only mapping: %s" DYLD_SHARED_CACHE_BASE_NAME ARCH_NAME "\n", sSharedCacheDir);
4172 goodCache = false;
4173 }
4174 if ( goodCache ) {
4175 long cacheSlide = 0;
4176 void* slideInfo = (void*)(long)(mappings[readOnlyMappingIndex].sfm_address + (header->slideInfoOffset - mappings[readOnlyMappingIndex].sfm_file_offset));;
4177 uint64_t slideInfoSize = header->slideInfoSize;
4178 // check if shared cache contains slid info
4179 if ( slideInfoSize != 0 ) {
4180 // <rdar://problem/8611968> don't slide shared cache if ASLR disabled (main executable didn't slide)
4181 if ( sMainExecutable->isPositionIndependentExecutable() && (sMainExecutable->getSlide() == 0) ) {
4182 cacheSlide = 0;
4183 }
4184 else {
4185 // generate random slide amount
4186 cacheSlide = pickCacheSlide(mappingCount, mappings);
4187 }
4188
4189 slideInfo = (void*)((uint8_t*)slideInfo + cacheSlide);
4190 // add VM_PROT_SLIDE bit to __DATA area of cache
4191 mappings[readWriteMappingIndex].sfm_max_prot |= VM_PROT_SLIDE;
4192 mappings[readWriteMappingIndex].sfm_init_prot |= VM_PROT_SLIDE;
4193 }
4194 if ( gLinkContext.verboseMapping ) {
4195 dyld::log("dyld: calling _shared_region_map_and_slide_np() with regions:\n");
4196 for (int i=0; i < mappingCount; ++i) {
4197 dyld::log(" address=0x%08llX, size=0x%08llX, fileOffset=0x%08llX\n", mappings[i].sfm_address, mappings[i].sfm_size, mappings[i].sfm_file_offset);
4198 }
4199 }
4200
4201 if (_shared_region_map_and_slide_np(fd, mappingCount, mappings, cacheSlide, slideInfo, slideInfoSize) == 0) {
4202 // successfully mapped cache into shared region
4203 sSharedCache = (dyld_cache_header*)mappings[0].sfm_address;
4204 sSharedCacheSlide = cacheSlide;
4205 dyld::gProcessInfo->sharedCacheSlide = cacheSlide;
4206 dyld::gProcessInfo->sharedCacheBaseAddress = mappings[0].sfm_address;
4207 //dyld::log("sSharedCache=%p sSharedCacheSlide=0x%08lX\n", sSharedCache, sSharedCacheSlide);
4208 // if cache has a uuid, copy it
4209 if ( header->mappingOffset >= 0x68 ) {
4210 const bool privateSharedCache = gLinkContext.sharedRegionMode == ImageLoader::kUsePrivateSharedRegion;
4211 memcpy(dyld::gProcessInfo->sharedCacheUUID, header->uuid, 16);
4212 dyld_kernel_image_info_t kernelCacheInfo;
4213 memcpy(&kernelCacheInfo.uuid[0], &sSharedCache->uuid[0], sizeof(uuid_t));
4214 kernelCacheInfo.load_addr = (uint64_t)sSharedCache;
4215 kernelCacheInfo.fsobjid.fid_objno = 0;
4216 kernelCacheInfo.fsobjid.fid_generation = 0;
4217 kernelCacheInfo.fsid.val[0] = 0;
4218 kernelCacheInfo.fsid.val[0] = 0;
4219 if (privateSharedCache) {
4220 kernelCacheInfo.fsobjid = *(fsobj_id_t*)(&stat_buf.st_ino);
4221 struct statfs statfs_buf;
4222 if ( fstatfs(fd, &statfs_buf) == 0 ) {
4223 kernelCacheInfo.fsid = statfs_buf.f_fsid;
4224 }
4225 }
4226 task_register_dyld_shared_cache_image_info(mach_task_self(), kernelCacheInfo, false, privateSharedCache);
4227 }
4228 }
4229 else {
4230 #if __IPHONE_OS_VERSION_MIN_REQUIRED
4231 throwf("dyld shared cache could not be mapped. errno=%d, slide=0x%08lX, slideInfo=%p, slideInfoSize=0x%08llX, mappingCount=%u, "
4232 "address/size/off/init/max [0]=0x%0llX/0x%0llX/0x%0llX/0x%02X/0x%02X, [1]=0x%0llX/0x%0llX/0x%0llX/0x%02X/0x%02X, [2]=0x%0llX/0x%0llX/0x%0llX/0x%02X/0x%02X",
4233 errno, cacheSlide, slideInfo, slideInfoSize, mappingCount,
4234 mappings[0].sfm_address, mappings[0].sfm_size, mappings[0].sfm_file_offset, mappings[0].sfm_init_prot, mappings[0].sfm_max_prot,
4235 mappings[1].sfm_address, mappings[1].sfm_size, mappings[1].sfm_file_offset, mappings[1].sfm_init_prot, mappings[1].sfm_max_prot,
4236 mappings[2].sfm_address, mappings[2].sfm_size, mappings[2].sfm_file_offset, mappings[2].sfm_init_prot, mappings[2].sfm_max_prot);
4237 #endif
4238 if ( gLinkContext.verboseMapping )
4239 dyld::log("dyld: shared cached file could not be mapped\n");
4240 }
4241 }
4242 }
4243 else {
4244 if ( gLinkContext.verboseMapping )
4245 dyld::log("dyld: shared cached file is invalid\n");
4246 }
4247 }
4248 else {
4249 if ( gLinkContext.verboseMapping )
4250 dyld::log("dyld: shared cached file cannot be read\n");
4251 }
4252 close(fd);
4253 }
4254 else {
4255 if ( gLinkContext.verboseMapping )
4256 dyld::log("dyld: shared cached file cannot be opened\n");
4257 }
4258 }
4259
4260 // remember if dyld loaded at same address as when cache built
4261 if ( sSharedCache != NULL ) {
4262 gLinkContext.dyldLoadedAtSameAddressNeededBySharedCache = ((uintptr_t)(sSharedCache->dyldBaseAddress) == (uintptr_t)&_mh_dylinker_header);
4263 }
4264
4265 // tell gdb where the shared cache is
4266 if ( sSharedCache != NULL ) {
4267 const dyld_cache_mapping_info* const start = (dyld_cache_mapping_info*)((uint8_t*)sSharedCache + sSharedCache->mappingOffset);
4268 dyld_shared_cache_ranges.sharedRegionsCount = sSharedCache->mappingCount;
4269 // only room to tell gdb about first four regions
4270 if ( dyld_shared_cache_ranges.sharedRegionsCount > 4 )
4271 dyld_shared_cache_ranges.sharedRegionsCount = 4;
4272 const dyld_cache_mapping_info* const end = &start[dyld_shared_cache_ranges.sharedRegionsCount];
4273 int index = 0;
4274 for (const dyld_cache_mapping_info* p = start; p < end; ++p, ++index ) {
4275 dyld_shared_cache_ranges.ranges[index].start = p->address+sSharedCacheSlide;
4276 dyld_shared_cache_ranges.ranges[index].length = p->size;
4277 if ( gLinkContext.verboseMapping ) {
4278 dyld::log(" 0x%08llX->0x%08llX %s%s%s init=%x, max=%x\n",
4279 p->address+sSharedCacheSlide, p->address+sSharedCacheSlide+p->size-1,
4280 ((p->initProt & VM_PROT_READ) ? "read " : ""),
4281 ((p->initProt & VM_PROT_WRITE) ? "write " : ""),
4282 ((p->initProt & VM_PROT_EXECUTE) ? "execute " : ""), p->initProt, p->maxProt);
4283 }
4284 #if __i386__
4285 // If a non-writable and executable region is found in the R/W shared region, then this is __IMPORT segments
4286 // This is an old cache. Make writable. dyld no longer supports turn W on and off as it binds
4287 if ( (p->initProt == (VM_PROT_READ|VM_PROT_EXECUTE)) && ((p->address & 0xF0000000) == 0xA0000000) ) {
4288 if ( p->size != 0 ) {
4289 vm_prot_t prot = VM_PROT_EXECUTE | PROT_READ | VM_PROT_WRITE;
4290 vm_protect(mach_task_self(), p->address, p->size, false, prot);
4291 if ( gLinkContext.verboseMapping ) {
4292 dyld::log("%18s at 0x%08llX->0x%08llX altered permissions to %c%c%c\n", "", p->address,
4293 p->address+p->size-1,
4294 (prot & PROT_READ) ? 'r' : '.', (prot & PROT_WRITE) ? 'w' : '.', (prot & PROT_EXEC) ? 'x' : '.' );
4295 }
4296 }
4297 }
4298 #endif
4299 }
4300 if ( gLinkContext.verboseMapping ) {
4301 // list the code blob
4302 dyld_cache_header* header = (dyld_cache_header*)sSharedCache;
4303 uint64_t signatureSize = header->codeSignatureSize;
4304 // zero size in header means signature runs to end-of-file
4305 if ( signatureSize == 0 ) {
4306 struct stat stat_buf;
4307 // FIXME: need size of cache file actually used
4308 if ( my_stat(IPHONE_DYLD_SHARED_CACHE_DIR DYLD_SHARED_CACHE_BASE_NAME ARCH_NAME, &stat_buf) == 0 )
4309 signatureSize = stat_buf.st_size - header->codeSignatureOffset;
4310 }
4311 if ( signatureSize != 0 ) {
4312 const dyld_cache_mapping_info* const last = &start[dyld_shared_cache_ranges.sharedRegionsCount-1];
4313 uint64_t codeBlobStart = last->address + last->size;
4314 dyld::log(" 0x%08llX->0x%08llX (code signature)\n", codeBlobStart, codeBlobStart+signatureSize);
4315 }
4316 }
4317 #if SUPPORT_ACCELERATE_TABLES
4318 if ( !dylibsCanOverrideCache() && !sDisableAcceleratorTables && (sSharedCache->mappingOffset > 0x80) && (sSharedCache->accelerateInfoAddr != 0) ) {
4319 sAllCacheImagesProxy = ImageLoaderMegaDylib::makeImageLoaderMegaDylib(sSharedCache, sSharedCacheSlide, gLinkContext);
4320 }
4321 #endif
4322 }
4323 }
4324 #endif // #if DYLD_SHARED_CACHE_SUPPORT
4325
4326
4327
4328 // create when NSLinkModule is called for a second time on a bundle
4329 ImageLoader* cloneImage(ImageLoader* image)
4330 {
4331 // open file (automagically closed when this function exits)
4332 FileOpener file(image->getPath());
4333
4334 struct stat stat_buf;
4335 if ( fstat(file.getFileDescriptor(), &stat_buf) == -1)
4336 throw "stat error";
4337
4338 dyld::LoadContext context;
4339 context.useSearchPaths = false;
4340 context.useFallbackPaths = false;
4341 context.useLdLibraryPath = false;
4342 context.implicitRPath = false;
4343 context.matchByInstallName = false;
4344 context.dontLoad = false;
4345 context.mustBeBundle = true;
4346 context.mustBeDylib = false;
4347 context.canBePIE = false;
4348 context.origin = NULL;
4349 context.rpath = NULL;
4350 return loadPhase6(file.getFileDescriptor(), stat_buf, image->getPath(), context);
4351 }
4352
4353
4354 ImageLoader* loadFromMemory(const uint8_t* mem, uint64_t len, const char* moduleName)
4355 {
4356 // if fat wrapper, find usable sub-file
4357 const fat_header* memStartAsFat = (fat_header*)mem;
4358 uint64_t fileOffset = 0;
4359 uint64_t fileLength = len;
4360 if ( memStartAsFat->magic == OSSwapBigToHostInt32(FAT_MAGIC) ) {
4361 if ( fatFindBest(memStartAsFat, &fileOffset, &fileLength) ) {
4362 mem = &mem[fileOffset];
4363 len = fileLength;
4364 }
4365 else {
4366 throw "no matching architecture in universal wrapper";
4367 }
4368 }
4369
4370 // try each loader
4371 if ( isCompatibleMachO(mem, moduleName) ) {
4372 ImageLoader* image = ImageLoaderMachO::instantiateFromMemory(moduleName, (macho_header*)mem, len, gLinkContext);
4373 // don't add bundles to global list, they can be loaded but not linked. When linked it will be added to list
4374 if ( ! image->isBundle() )
4375 addImage(image);
4376 return image;
4377 }
4378
4379 // try other file formats here...
4380
4381 // throw error about what was found
4382 switch (*(uint32_t*)mem) {
4383 case MH_MAGIC:
4384 case MH_CIGAM:
4385 case MH_MAGIC_64:
4386 case MH_CIGAM_64:
4387 throw "mach-o, but wrong architecture";
4388 default:
4389 throwf("unknown file type, first eight bytes: 0x%02X 0x%02X 0x%02X 0x%02X 0x%02X 0x%02X 0x%02X 0x%02X",
4390 mem[0], mem[1], mem[2], mem[3], mem[4], mem[5], mem[6],mem[7]);
4391 }
4392 }
4393
4394
4395 void registerAddCallback(ImageCallback func)
4396 {
4397 // now add to list to get notified when any more images are added
4398 sAddImageCallbacks.push_back(func);
4399
4400 // call callback with all existing images
4401 for (std::vector<ImageLoader*>::iterator it=sAllImages.begin(); it != sAllImages.end(); it++) {
4402 ImageLoader* image = *it;
4403 if ( image->getState() >= dyld_image_state_bound && image->getState() < dyld_image_state_terminated )
4404 (*func)(image->machHeader(), image->getSlide());
4405 }
4406 #if SUPPORT_ACCELERATE_TABLES
4407 if ( sAllCacheImagesProxy != NULL ) {
4408 dyld_image_info infos[allImagesCount()+1];
4409 unsigned cacheCount = sAllCacheImagesProxy->appendImagesToNotify(dyld_image_state_bound, true, infos);
4410 for (unsigned i=0; i < cacheCount; ++i) {
4411 (*func)(infos[i].imageLoadAddress, sSharedCacheSlide);
4412 }
4413 }
4414 #endif
4415 }
4416
4417 void registerRemoveCallback(ImageCallback func)
4418 {
4419 // <rdar://problem/15025198> ignore calls to register a notification during a notification
4420 if ( sRemoveImageCallbacksInUse )
4421 return;
4422 sRemoveImageCallbacks.push_back(func);
4423 }
4424
4425 void clearErrorMessage()
4426 {
4427 error_string[0] = '\0';
4428 }
4429
4430 void setErrorMessage(const char* message)
4431 {
4432 // save off error message in global buffer for CrashReporter to find
4433 strlcpy(error_string, message, sizeof(error_string));
4434 }
4435
4436 const char* getErrorMessage()
4437 {
4438 return error_string;
4439 }
4440
4441 void halt(const char* message)
4442 {
4443 dyld::log("dyld: %s\n", message);
4444 setErrorMessage(message);
4445 dyld::gProcessInfo->errorMessage = error_string;
4446 if ( !gLinkContext.startedInitializingMainExecutable )
4447 dyld::gProcessInfo->terminationFlags = 1;
4448 else
4449 dyld::gProcessInfo->terminationFlags = 0;
4450
4451 char payloadBuffer[EXIT_REASON_PAYLOAD_MAX_LEN];
4452 dyld_abort_payload* payload = (dyld_abort_payload*)payloadBuffer;
4453 payload->version = 1;
4454 payload->flags = gLinkContext.startedInitializingMainExecutable ? 0 : 1;
4455 payload->targetDylibPathOffset = 0;
4456 payload->clientPathOffset = 0;
4457 payload->symbolOffset = 0;
4458 int payloadSize = sizeof(dyld_abort_payload);
4459
4460 if ( dyld::gProcessInfo->errorTargetDylibPath != NULL ) {
4461 payload->targetDylibPathOffset = payloadSize;
4462 payloadSize += strlcpy(&payloadBuffer[payloadSize], dyld::gProcessInfo->errorTargetDylibPath, sizeof(payloadBuffer)-payloadSize) + 1;
4463 }
4464 if ( dyld::gProcessInfo->errorClientOfDylibPath != NULL ) {
4465 payload->clientPathOffset = payloadSize;
4466 payloadSize += strlcpy(&payloadBuffer[payloadSize], dyld::gProcessInfo->errorClientOfDylibPath, sizeof(payloadBuffer)-payloadSize) + 1;
4467 }
4468 if ( dyld::gProcessInfo->errorSymbol != NULL ) {
4469 payload->symbolOffset = payloadSize;
4470 payloadSize += strlcpy(&payloadBuffer[payloadSize], dyld::gProcessInfo->errorSymbol, sizeof(payloadBuffer)-payloadSize) + 1;
4471 }
4472 char truncMessage[EXIT_REASON_USER_DESC_MAX_LEN];
4473 strlcpy(truncMessage, message, EXIT_REASON_USER_DESC_MAX_LEN);
4474 abort_with_payload(OS_REASON_DYLD, dyld::gProcessInfo->errorKind ? dyld::gProcessInfo->errorKind : DYLD_EXIT_REASON_OTHER, payloadBuffer, payloadSize, truncMessage, 0);
4475 }
4476
4477 static void setErrorStrings(unsigned errorCode, const char* errorClientOfDylibPath,
4478 const char* errorTargetDylibPath, const char* errorSymbol)
4479 {
4480 dyld::gProcessInfo->errorKind = errorCode;
4481 dyld::gProcessInfo->errorClientOfDylibPath = errorClientOfDylibPath;
4482 dyld::gProcessInfo->errorTargetDylibPath = errorTargetDylibPath;
4483 dyld::gProcessInfo->errorSymbol = errorSymbol;
4484 }
4485
4486
4487 uintptr_t bindLazySymbol(const mach_header* mh, uintptr_t* lazyPointer)
4488 {
4489 uintptr_t result = 0;
4490 // acquire read-lock on dyld's data structures
4491 #if 0 // rdar://problem/3811777 turn off locking until deadlock is resolved
4492 if ( gLibSystemHelpers != NULL )
4493 (*gLibSystemHelpers->lockForReading)();
4494 #endif
4495 // lookup and bind lazy pointer and get target address
4496 try {
4497 ImageLoader* target;
4498 #if __i386__
4499 // fast stubs pass NULL for mh and image is instead found via the location of stub (aka lazyPointer)
4500 if ( mh == NULL )
4501 target = dyld::findImageContainingAddress(lazyPointer);
4502 else
4503 target = dyld::findImageByMachHeader(mh);
4504 #else
4505 // note, target should always be mach-o, because only mach-o lazy handler wired up to this
4506 target = dyld::findImageByMachHeader(mh);
4507 #endif
4508 if ( target == NULL )
4509 throwf("image not found for lazy pointer at %p", lazyPointer);
4510 result = target->doBindLazySymbol(lazyPointer, gLinkContext);
4511 }
4512 catch (const char* message) {
4513 dyld::log("dyld: lazy symbol binding failed: %s\n", message);
4514 halt(message);
4515 }
4516 // release read-lock on dyld's data structures
4517 #if 0
4518 if ( gLibSystemHelpers != NULL )
4519 (*gLibSystemHelpers->unlockForReading)();
4520 #endif
4521 // return target address to glue which jumps to it with real parameters restored
4522 return result;
4523 }
4524
4525
4526 uintptr_t fastBindLazySymbol(ImageLoader** imageLoaderCache, uintptr_t lazyBindingInfoOffset)
4527 {
4528 uintptr_t result = 0;
4529 // get image
4530 if ( *imageLoaderCache == NULL ) {
4531 // save in cache
4532 *imageLoaderCache = dyld::findMappedRange((uintptr_t)imageLoaderCache);
4533 if ( *imageLoaderCache == NULL ) {
4534 #if SUPPORT_ACCELERATE_TABLES
4535 if ( sAllCacheImagesProxy != NULL ) {
4536 const mach_header* mh;
4537 const char* path;
4538 unsigned index;
4539 if ( sAllCacheImagesProxy->addressInCache(imageLoaderCache, &mh, &path, &index) ) {
4540 result = sAllCacheImagesProxy->bindLazy(lazyBindingInfoOffset, gLinkContext, mh, index);
4541 if ( result == 0 ) {
4542 halt("dyld: lazy symbol binding failed for image in dyld shared\n");
4543 }
4544 return result;
4545 }
4546 }
4547 #endif
4548 const char* message = "fast lazy binding from unknown image";
4549 dyld::log("dyld: %s\n", message);
4550 halt(message);
4551 }
4552 }
4553
4554 // bind lazy pointer and return it
4555 try {
4556 result = (*imageLoaderCache)->doBindFastLazySymbol((uint32_t)lazyBindingInfoOffset, gLinkContext,
4557 (dyld::gLibSystemHelpers != NULL) ? dyld::gLibSystemHelpers->acquireGlobalDyldLock : NULL,
4558 (dyld::gLibSystemHelpers != NULL) ? dyld::gLibSystemHelpers->releaseGlobalDyldLock : NULL);
4559 }
4560 catch (const char* message) {
4561 dyld::log("dyld: lazy symbol binding failed: %s\n", message);
4562 halt(message);
4563 }
4564
4565 // return target address to glue which jumps to it with real parameters restored
4566 return result;
4567 }
4568
4569
4570
4571 void registerUndefinedHandler(UndefinedHandler handler)
4572 {
4573 sUndefinedHandler = handler;
4574 }
4575
4576 static void undefinedHandler(const char* symboName)
4577 {
4578 if ( sUndefinedHandler != NULL ) {
4579 (*sUndefinedHandler)(symboName);
4580 }
4581 }
4582
4583 static bool findExportedSymbol(const char* name, bool onlyInCoalesced, const ImageLoader::Symbol** sym, const ImageLoader** image)
4584 {
4585 // search all images in order
4586 const ImageLoader* firstWeakImage = NULL;
4587 const ImageLoader::Symbol* firstWeakSym = NULL;
4588 const size_t imageCount = sAllImages.size();
4589 for(size_t i=0; i < imageCount; ++i) {
4590 ImageLoader* anImage = sAllImages[i];
4591 // the use of inserted libraries alters search order
4592 // so that inserted libraries are found before the main executable
4593 if ( sInsertedDylibCount > 0 ) {
4594 if ( i < sInsertedDylibCount )
4595 anImage = sAllImages[i+1];
4596 else if ( i == sInsertedDylibCount )
4597 anImage = sAllImages[0];
4598 }
4599 if ( ! anImage->hasHiddenExports() && (!onlyInCoalesced || anImage->hasCoalescedExports()) ) {
4600 *sym = anImage->findExportedSymbol(name, false, image);
4601 if ( *sym != NULL ) {
4602 // if weak definition found, record first one found
4603 if ( ((*image)->getExportedSymbolInfo(*sym) & ImageLoader::kWeakDefinition) != 0 ) {
4604 if ( firstWeakImage == NULL ) {
4605 firstWeakImage = *image;
4606 firstWeakSym = *sym;
4607 }
4608 }
4609 else {
4610 // found non-weak, so immediately return with it
4611 return true;
4612 }
4613 }
4614 }
4615 }
4616 if ( firstWeakSym != NULL ) {
4617 // found a weak definition, but no non-weak, so return first weak found
4618 *sym = firstWeakSym;
4619 *image = firstWeakImage;
4620 return true;
4621 }
4622 #if SUPPORT_ACCELERATE_TABLES
4623 if ( sAllCacheImagesProxy != NULL ) {
4624 if ( sAllCacheImagesProxy->flatFindSymbol(name, onlyInCoalesced, sym, image) )
4625 return true;
4626 }
4627 #endif
4628
4629 return false;
4630 }
4631
4632 bool flatFindExportedSymbol(const char* name, const ImageLoader::Symbol** sym, const ImageLoader** image)
4633 {
4634 return findExportedSymbol(name, false, sym, image);
4635 }
4636
4637 bool findCoalescedExportedSymbol(const char* name, const ImageLoader::Symbol** sym, const ImageLoader** image)
4638 {
4639 return findExportedSymbol(name, true, sym, image);
4640 }
4641
4642
4643 bool flatFindExportedSymbolWithHint(const char* name, const char* librarySubstring, const ImageLoader::Symbol** sym, const ImageLoader** image)
4644 {
4645 // search all images in order
4646 const size_t imageCount = sAllImages.size();
4647 for(size_t i=0; i < imageCount; ++i){
4648 ImageLoader* anImage = sAllImages[i];
4649 // only look at images whose paths contain the hint string (NULL hint string is wildcard)
4650 if ( ! anImage->isBundle() && ((librarySubstring==NULL) || (strstr(anImage->getPath(), librarySubstring) != NULL)) ) {
4651 *sym = anImage->findExportedSymbol(name, false, image);
4652 if ( *sym != NULL ) {
4653 return true;
4654 }
4655 }
4656 }
4657 return false;
4658 }
4659
4660
4661 unsigned int getCoalescedImages(ImageLoader* images[], unsigned imageIndex[])
4662 {
4663 unsigned int count = 0;
4664 for (std::vector<ImageLoader*>::iterator it=sAllImages.begin(); it != sAllImages.end(); it++) {
4665 ImageLoader* image = *it;
4666 if ( image->participatesInCoalescing() ) {
4667 images[count] = *it;
4668 imageIndex[count] = 0;
4669 ++count;
4670 }
4671 }
4672 #if SUPPORT_ACCELERATE_TABLES
4673 if ( sAllCacheImagesProxy != NULL ) {
4674 sAllCacheImagesProxy->appendImagesNeedingCoalescing(images, imageIndex, count);
4675 }
4676 #endif
4677 return count;
4678 }
4679
4680
4681 static ImageLoader::MappedRegion* getMappedRegions(ImageLoader::MappedRegion* regions)
4682 {
4683 ImageLoader::MappedRegion* end = regions;
4684 for (std::vector<ImageLoader*>::iterator it=sAllImages.begin(); it != sAllImages.end(); it++) {
4685 (*it)->getMappedRegions(end);
4686 }
4687 return end;
4688 }
4689
4690 void registerImageStateSingleChangeHandler(dyld_image_states state, dyld_image_state_change_handler handler)
4691 {
4692 // mark the image that the handler is in as never-unload because dyld has a reference into it
4693 ImageLoader* handlerImage = findImageContainingAddress((void*)handler);
4694 if ( handlerImage != NULL )
4695 handlerImage->setNeverUnload();
4696
4697 // add to list of handlers
4698 std::vector<dyld_image_state_change_handler>* handlers = stateToHandlers(state, sSingleHandlers);
4699 if ( handlers != NULL ) {
4700 // <rdar://problem/10332417> need updateAllImages() to be last in dyld_image_state_mapped list
4701 // so that if ObjC adds a handler that prevents a load, it happens before the gdb list is updated
4702 if ( state == dyld_image_state_mapped )
4703 handlers->insert(handlers->begin(), handler);
4704 else
4705 handlers->push_back(handler);
4706
4707 // call callback with all existing images
4708 for (std::vector<ImageLoader*>::iterator it=sAllImages.begin(); it != sAllImages.end(); it++) {
4709 ImageLoader* image = *it;
4710 dyld_image_info info;
4711 info.imageLoadAddress = image->machHeader();
4712 info.imageFilePath = image->getRealPath();
4713 info.imageFileModDate = image->lastModified();
4714 // should only call handler if state == image->state
4715 if ( image->getState() == state )
4716 (*handler)(state, 1, &info);
4717 // ignore returned string, too late to do anything
4718 }
4719 }
4720 }
4721
4722 void registerImageStateBatchChangeHandler(dyld_image_states state, dyld_image_state_change_handler handler)
4723 {
4724 // mark the image that the handler is in as never-unload because dyld has a reference into it
4725 ImageLoader* handlerImage = findImageContainingAddress((void*)handler);
4726 if ( handlerImage != NULL )
4727 handlerImage->setNeverUnload();
4728
4729 // add to list of handlers
4730 std::vector<dyld_image_state_change_handler>* handlers = stateToHandlers(state, sBatchHandlers);
4731 if ( handlers != NULL ) {
4732 // insert at front, so that gdb handler is always last
4733 handlers->insert(handlers->begin(), handler);
4734
4735 // call callback with all existing images
4736 try {
4737 notifyBatchPartial(state, true, handler, false, false);
4738 }
4739 catch (const char* msg) {
4740 // ignore request to abort during registration
4741 }
4742 }
4743 }
4744
4745
4746 void registerObjCNotifiers(_dyld_objc_notify_mapped mapped, _dyld_objc_notify_init init, _dyld_objc_notify_unmapped unmapped)
4747 {
4748 // record functions to call
4749 sNotifyObjCMapped = mapped;
4750 sNotifyObjCInit = init;
4751 sNotifyObjCUnmapped = unmapped;
4752
4753 // call 'mapped' function with all images mapped so far
4754 try {
4755 notifyBatchPartial(dyld_image_state_bound, true, NULL, false, true);
4756 }
4757 catch (const char* msg) {
4758 // ignore request to abort during registration
4759 }
4760 }
4761
4762 bool sharedCacheUUID(uuid_t uuid)
4763 {
4764 #if DYLD_SHARED_CACHE_SUPPORT
4765 if ( sSharedCache == NULL )
4766 return false;
4767
4768 memcpy(uuid, sSharedCache->uuid, 16);
4769 return true;
4770 #else
4771 return false;
4772 #endif
4773 }
4774
4775 #if SUPPORT_ACCELERATE_TABLES
4776
4777 bool dlopenFromCache(const char* path, int mode, void** handle)
4778 {
4779 if ( sAllCacheImagesProxy == NULL )
4780 return false;
4781 bool result = sAllCacheImagesProxy->dlopenFromCache(gLinkContext, path, mode, handle);
4782 if ( !result && (strchr(path, '/') == NULL) ) {
4783 // POSIX says you can call dlopen() with a leaf name (e.g. dlopen("libz.dylb"))
4784 char fallbackPath[PATH_MAX];
4785 strcpy(fallbackPath, "/usr/lib/");
4786 strlcat(fallbackPath, path, PATH_MAX);
4787 result = sAllCacheImagesProxy->dlopenFromCache(gLinkContext, fallbackPath, mode, handle);
4788 }
4789 return result;
4790 }
4791
4792 bool makeCacheHandle(ImageLoader* image, unsigned cacheIndex, int mode, void** result)
4793 {
4794 if ( sAllCacheImagesProxy == NULL )
4795 return false;
4796 return sAllCacheImagesProxy->makeCacheHandle(gLinkContext, cacheIndex, mode, result);
4797 }
4798
4799 bool isCacheHandle(void* handle)
4800 {
4801 if ( sAllCacheImagesProxy == NULL )
4802 return false;
4803 return sAllCacheImagesProxy->isCacheHandle(handle, NULL, NULL);
4804 }
4805
4806 bool isPathInCache(const char* path)
4807 {
4808 if ( sAllCacheImagesProxy == NULL )
4809 return false;
4810 unsigned index;
4811 return sAllCacheImagesProxy->hasDylib(path, &index);
4812 }
4813
4814 const char* getPathFromIndex(unsigned cacheIndex)
4815 {
4816 if ( sAllCacheImagesProxy == NULL )
4817 return NULL;
4818 return sAllCacheImagesProxy->getIndexedPath(cacheIndex);
4819 }
4820
4821 void* dlsymFromCache(void* handle, const char* symName, unsigned index)
4822 {
4823 if ( sAllCacheImagesProxy == NULL )
4824 return NULL;
4825 return sAllCacheImagesProxy->dlsymFromCache(gLinkContext, handle, symName, index);
4826 }
4827
4828 bool addressInCache(const void* address, const mach_header** mh, const char** path, unsigned* index)
4829 {
4830 if ( sAllCacheImagesProxy == NULL )
4831 return false;
4832 unsigned ignore;
4833 return sAllCacheImagesProxy->addressInCache(address, mh, path, index ? index : &ignore);
4834 }
4835
4836 bool findUnwindSections(const void* addr, dyld_unwind_sections* info)
4837 {
4838 if ( sAllCacheImagesProxy == NULL )
4839 return false;
4840 return sAllCacheImagesProxy->findUnwindSections(addr, info);
4841 }
4842
4843 bool dladdrFromCache(const void* address, Dl_info* info)
4844 {
4845 if ( sAllCacheImagesProxy == NULL )
4846 return false;
4847 return sAllCacheImagesProxy->dladdrFromCache(address, info);
4848 }
4849 #endif
4850
4851 static ImageLoader* libraryLocator(const char* libraryName, bool search, const char* origin, const ImageLoader::RPathChain* rpaths, unsigned& cacheIndex)
4852 {
4853 dyld::LoadContext context;
4854 context.useSearchPaths = search;
4855 context.useFallbackPaths = search;
4856 context.useLdLibraryPath = false;
4857 context.implicitRPath = false;
4858 context.matchByInstallName = false;
4859 context.dontLoad = false;
4860 context.mustBeBundle = false;
4861 context.mustBeDylib = true;
4862 context.canBePIE = false;
4863 context.origin = origin;
4864 context.rpath = rpaths;
4865 return load(libraryName, context, cacheIndex);
4866 }
4867
4868 static const char* basename(const char* path)
4869 {
4870 const char* last = path;
4871 for (const char* s = path; *s != '\0'; s++) {
4872 if (*s == '/')
4873 last = s+1;
4874 }
4875 return last;
4876 }
4877
4878 static void setContext(const macho_header* mainExecutableMH, int argc, const char* argv[], const char* envp[], const char* apple[])
4879 {
4880 gLinkContext.loadLibrary = &libraryLocator;
4881 gLinkContext.terminationRecorder = &terminationRecorder;
4882 gLinkContext.flatExportFinder = &flatFindExportedSymbol;
4883 gLinkContext.coalescedExportFinder = &findCoalescedExportedSymbol;
4884 gLinkContext.getCoalescedImages = &getCoalescedImages;
4885 gLinkContext.undefinedHandler = &undefinedHandler;
4886 gLinkContext.getAllMappedRegions = &getMappedRegions;
4887 gLinkContext.bindingHandler = NULL;
4888 gLinkContext.notifySingle = &notifySingle;
4889 gLinkContext.notifyBatch = &notifyBatch;
4890 gLinkContext.removeImage = &removeImage;
4891 gLinkContext.registerDOFs = &registerDOFs;
4892 gLinkContext.clearAllDepths = &clearAllDepths;
4893 gLinkContext.printAllDepths = &printAllDepths;
4894 gLinkContext.imageCount = &imageCount;
4895 gLinkContext.setNewProgramVars = &setNewProgramVars;
4896 #if DYLD_SHARED_CACHE_SUPPORT
4897 gLinkContext.inSharedCache = &inSharedCache;
4898 #endif
4899 gLinkContext.setErrorStrings = &setErrorStrings;
4900 #if SUPPORT_OLD_CRT_INITIALIZATION
4901 gLinkContext.setRunInitialzersOldWay= &setRunInitialzersOldWay;
4902 #endif
4903 gLinkContext.findImageContainingAddress = &findImageContainingAddress;
4904 gLinkContext.addDynamicReference = &addDynamicReference;
4905 #if SUPPORT_ACCELERATE_TABLES
4906 gLinkContext.notifySingleFromCache = &notifySingleFromCache;
4907 gLinkContext.getPreInitNotifyHandler= &getPreInitNotifyHandler;
4908 gLinkContext.getBoundBatchHandler = &getBoundBatchHandler;
4909 #endif
4910 gLinkContext.bindingOptions = ImageLoader::kBindingNone;
4911 gLinkContext.argc = argc;
4912 gLinkContext.argv = argv;
4913 gLinkContext.envp = envp;
4914 gLinkContext.apple = apple;
4915 gLinkContext.progname = (argv[0] != NULL) ? basename(argv[0]) : "";
4916 gLinkContext.programVars.mh = mainExecutableMH;
4917 gLinkContext.programVars.NXArgcPtr = &gLinkContext.argc;
4918 gLinkContext.programVars.NXArgvPtr = &gLinkContext.argv;
4919 gLinkContext.programVars.environPtr = &gLinkContext.envp;
4920 gLinkContext.programVars.__prognamePtr=&gLinkContext.progname;
4921 gLinkContext.mainExecutable = NULL;
4922 gLinkContext.imageSuffix = NULL;
4923 gLinkContext.dynamicInterposeArray = NULL;
4924 gLinkContext.dynamicInterposeCount = 0;
4925 gLinkContext.prebindUsage = ImageLoader::kUseAllPrebinding;
4926 #if TARGET_IPHONE_SIMULATOR
4927 gLinkContext.sharedRegionMode = ImageLoader::kDontUseSharedRegion;
4928 #else
4929 gLinkContext.sharedRegionMode = ImageLoader::kUseSharedRegion;
4930 #endif
4931 }
4932
4933
4934
4935 //
4936 // Look for a special segment in the mach header.
4937 // Its presences means that the binary wants to have DYLD ignore
4938 // DYLD_ environment variables.
4939 //
4940 static bool hasRestrictedSegment(const macho_header* mh)
4941 {
4942 const uint32_t cmd_count = mh->ncmds;
4943 const struct load_command* const cmds = (struct load_command*)(((char*)mh)+sizeof(macho_header));
4944 const struct load_command* cmd = cmds;
4945 for (uint32_t i = 0; i < cmd_count; ++i) {
4946 switch (cmd->cmd) {
4947 case LC_SEGMENT_COMMAND:
4948 {
4949 const struct macho_segment_command* seg = (struct macho_segment_command*)cmd;
4950
4951 //dyld::log("seg name: %s\n", seg->segname);
4952 if (strcmp(seg->segname, "__RESTRICT") == 0) {
4953 const struct macho_section* const sectionsStart = (struct macho_section*)((char*)seg + sizeof(struct macho_segment_command));
4954 const struct macho_section* const sectionsEnd = &sectionsStart[seg->nsects];
4955 for (const struct macho_section* sect=sectionsStart; sect < sectionsEnd; ++sect) {
4956 if (strcmp(sect->sectname, "__restrict") == 0)
4957 return true;
4958 }
4959 }
4960 }
4961 break;
4962 }
4963 cmd = (const struct load_command*)(((char*)cmd)+cmd->cmdsize);
4964 }
4965
4966 return false;
4967 }
4968
4969 #if __IPHONE_OS_VERSION_MIN_REQUIRED
4970 static bool isFairPlayEncrypted(const macho_header* mh)
4971 {
4972 const uint32_t cmd_count = mh->ncmds;
4973 const struct load_command* const cmds = (struct load_command*)(((char*)mh)+sizeof(macho_header));
4974 const struct load_command* cmd = cmds;
4975 for (uint32_t i = 0; i < cmd_count; ++i) {
4976 if ( cmd->cmd == LC_ENCRYPT_COMMAND ) {
4977 const encryption_info_command* enc = (encryption_info_command*)cmd;
4978 return (enc->cryptid != 0);
4979 }
4980 cmd = (const struct load_command*)(((char*)cmd)+cmd->cmdsize);
4981 }
4982
4983 return false;
4984 }
4985 #endif
4986
4987 #if SUPPORT_VERSIONED_PATHS
4988
4989 static bool readFirstPage(const char* dylibPath, uint8_t firstPage[4096])
4990 {
4991 firstPage[0] = 0;
4992 // open file (automagically closed when this function exits)
4993 FileOpener file(dylibPath);
4994
4995 if ( file.getFileDescriptor() == -1 )
4996 return false;
4997
4998 if ( pread(file.getFileDescriptor(), firstPage, 4096, 0) != 4096 )
4999 return false;
5000
5001 // if fat wrapper, find usable sub-file
5002 const fat_header* fileStartAsFat = (fat_header*)firstPage;
5003 if ( fileStartAsFat->magic == OSSwapBigToHostInt32(FAT_MAGIC) ) {
5004 uint64_t fileOffset;
5005 uint64_t fileLength;
5006 if ( fatFindBest(fileStartAsFat, &fileOffset, &fileLength) ) {
5007 if ( pread(file.getFileDescriptor(), firstPage, 4096, fileOffset) != 4096 )
5008 return false;
5009 }
5010 else {
5011 return false;
5012 }
5013 }
5014
5015 return true;
5016 }
5017
5018 //
5019 // Peeks at a dylib file and returns its current_version and install_name.
5020 // Returns false on error.
5021 //
5022 static bool getDylibVersionAndInstallname(const char* dylibPath, uint32_t* version, char* installName)
5023 {
5024 uint8_t firstPage[4096];
5025 const macho_header* mh = (macho_header*)firstPage;
5026 if ( !readFirstPage(dylibPath, firstPage) ) {
5027 #if DYLD_SHARED_CACHE_SUPPORT
5028 // If file cannot be read, check to see if path is in shared cache
5029 const macho_header* mhInCache;
5030 const char* pathInCache;
5031 long slideInCache;
5032 if ( !findInSharedCacheImage(dylibPath, true, NULL, &mhInCache, &pathInCache, &slideInCache) )
5033 return false;
5034 mh = mhInCache;
5035 #else
5036 return false;
5037 #endif
5038 }
5039
5040 // check mach-o header
5041 if ( mh->magic != sMainExecutableMachHeader->magic )
5042 return false;
5043 if ( mh->cputype != sMainExecutableMachHeader->cputype )
5044 return false;
5045
5046 // scan load commands for LC_ID_DYLIB
5047 const uint32_t cmd_count = mh->ncmds;
5048 const struct load_command* const cmds = (struct load_command*)(((char*)mh)+sizeof(macho_header));
5049 const struct load_command* const cmdsReadEnd = (struct load_command*)(((char*)mh)+4096);
5050 const struct load_command* cmd = cmds;
5051 for (uint32_t i = 0; i < cmd_count; ++i) {
5052 switch (cmd->cmd) {
5053 case LC_ID_DYLIB:
5054 {
5055 const struct dylib_command* id = (struct dylib_command*)cmd;
5056 *version = id->dylib.current_version;
5057 if ( installName != NULL )
5058 strlcpy(installName, (char *)id + id->dylib.name.offset, PATH_MAX);
5059 return true;
5060 }
5061 break;
5062 }
5063 cmd = (const struct load_command*)(((char*)cmd)+cmd->cmdsize);
5064 if ( cmd > cmdsReadEnd )
5065 return false;
5066 }
5067
5068 return false;
5069 }
5070 #endif // SUPPORT_VERSIONED_PATHS
5071
5072
5073 #if 0
5074 static void printAllImages()
5075 {
5076 dyld::log("printAllImages()\n");
5077 for (std::vector<ImageLoader*>::iterator it=sAllImages.begin(); it != sAllImages.end(); it++) {
5078 ImageLoader* image = *it;
5079 dyld_image_states imageState = image->getState();
5080 dyld::log(" state=%d, dlopen-count=%d, never-unload=%d, in-use=%d, name=%s\n",
5081 imageState, image->dlopenCount(), image->neverUnload(), image->isMarkedInUse(), image->getShortName());
5082 }
5083 }
5084 #endif
5085
5086 void link(ImageLoader* image, bool forceLazysBound, bool neverUnload, const ImageLoader::RPathChain& loaderRPaths, unsigned cacheIndex)
5087 {
5088 // add to list of known images. This did not happen at creation time for bundles
5089 if ( image->isBundle() && !image->isLinked() )
5090 addImage(image);
5091
5092 // we detect root images as those not linked in yet
5093 if ( !image->isLinked() )
5094 addRootImage(image);
5095
5096 // process images
5097 try {
5098 const char* path = image->getPath();
5099 #if SUPPORT_ACCELERATE_TABLES
5100 if ( image == sAllCacheImagesProxy )
5101 path = sAllCacheImagesProxy->getIndexedPath(cacheIndex);
5102 #endif
5103 image->link(gLinkContext, forceLazysBound, false, neverUnload, loaderRPaths, path);
5104 }
5105 catch (const char* msg) {
5106 garbageCollectImages();
5107 throw;
5108 }
5109 }
5110
5111
5112 void runInitializers(ImageLoader* image)
5113 {
5114 // do bottom up initialization
5115 ImageLoader::InitializerTimingList initializerTimes[allImagesCount()];
5116 initializerTimes[0].count = 0;
5117 image->runInitializers(gLinkContext, initializerTimes[0]);
5118 }
5119
5120 // This function is called at the end of dlclose() when the reference count goes to zero.
5121 // The dylib being unloaded may have brought in other dependent dylibs when it was loaded.
5122 // Those dependent dylibs need to be unloaded, but only if they are not referenced by
5123 // something else. We use a standard mark and sweep garbage collection.
5124 //
5125 // The tricky part is that when a dylib is unloaded it may have a termination function that
5126 // can run and itself call dlclose() on yet another dylib. The problem is that this
5127 // sort of gabage collection is not re-entrant. Instead a terminator's call to dlclose()
5128 // which calls garbageCollectImages() will just set a flag to re-do the garbage collection
5129 // when the current pass is done.
5130 //
5131 // Also note that this is done within the dyld global lock, so it is always single threaded.
5132 //
5133 void garbageCollectImages()
5134 {
5135 static bool sDoingGC = false;
5136 static bool sRedo = false;
5137
5138 if ( sDoingGC ) {
5139 // GC is currently being run, just set a flag to have it run again.
5140 sRedo = true;
5141 return;
5142 }
5143
5144 sDoingGC = true;
5145 do {
5146 sRedo = false;
5147
5148 // mark phase: mark all images not-in-use
5149 for (std::vector<ImageLoader*>::iterator it=sAllImages.begin(); it != sAllImages.end(); it++) {
5150 ImageLoader* image = *it;
5151 //dyld::log("gc: neverUnload=%d name=%s\n", image->neverUnload(), image->getShortName());
5152 image->markNotUsed();
5153 }
5154
5155 // sweep phase: mark as in-use, images reachable from never-unload or in-use image
5156 for (std::vector<ImageLoader*>::iterator it=sAllImages.begin(); it != sAllImages.end(); it++) {
5157 ImageLoader* image = *it;
5158 if ( (image->dlopenCount() != 0) || image->neverUnload() || (image == sMainExecutable) ) {
5159 OSSpinLockLock(&sDynamicReferencesLock);
5160 image->markedUsedRecursive(sDynamicReferences);
5161 OSSpinLockUnlock(&sDynamicReferencesLock);
5162 }
5163 }
5164
5165 // collect phase: build array of images not marked in-use
5166 ImageLoader* deadImages[sAllImages.size()];
5167 unsigned deadCount = 0;
5168 int maxRangeCount = 0;
5169 for (std::vector<ImageLoader*>::iterator it=sAllImages.begin(); it != sAllImages.end(); it++) {
5170 ImageLoader* image = *it;
5171 if ( ! image->isMarkedInUse() ) {
5172 deadImages[deadCount++] = image;
5173 if (gLogAPIs) dyld::log("dlclose(), found unused image %p %s\n", image, image->getShortName());
5174 maxRangeCount += image->segmentCount();
5175 }
5176 }
5177
5178 // collect phase: run termination routines for images not marked in-use
5179 __cxa_range_t ranges[maxRangeCount];
5180 int rangeCount = 0;
5181 for (unsigned i=0; i < deadCount; ++i) {
5182 ImageLoader* image = deadImages[i];
5183 for (unsigned int j=0; j < image->segmentCount(); ++j) {
5184 if ( !image->segExecutable(j) )
5185 continue;
5186 if ( rangeCount < maxRangeCount ) {
5187 ranges[rangeCount].addr = (const void*)image->segActualLoadAddress(j);
5188 ranges[rangeCount].length = image->segSize(j);
5189 ++rangeCount;
5190 }
5191 }
5192 try {
5193 runImageStaticTerminators(image);
5194 }
5195 catch (const char* msg) {
5196 dyld::warn("problem running terminators for image: %s\n", msg);
5197 }
5198 }
5199
5200 // <rdar://problem/14718598> dyld should call __cxa_finalize_ranges()
5201 if ( (rangeCount > 0) && (gLibSystemHelpers != NULL) && (gLibSystemHelpers->version >= 13) )
5202 (*gLibSystemHelpers->cxa_finalize_ranges)(ranges, rangeCount);
5203
5204 // collect phase: delete all images which are not marked in-use
5205 bool mightBeMore;
5206 do {
5207 mightBeMore = false;
5208 for (std::vector<ImageLoader*>::iterator it=sAllImages.begin(); it != sAllImages.end(); it++) {
5209 ImageLoader* image = *it;
5210 if ( ! image->isMarkedInUse() ) {
5211 try {
5212 if (gLogAPIs) dyld::log("dlclose(), deleting %p %s\n", image, image->getShortName());
5213 removeImage(image);
5214 ImageLoader::deleteImage(image);
5215 mightBeMore = true;
5216 break; // interator in invalidated by this removal
5217 }
5218 catch (const char* msg) {
5219 dyld::warn("problem deleting image: %s\n", msg);
5220 }
5221 }
5222 }
5223 } while ( mightBeMore );
5224 } while (sRedo);
5225 sDoingGC = false;
5226
5227 //printAllImages();
5228
5229 }
5230
5231
5232 static void preflight_finally(ImageLoader* image)
5233 {
5234 if ( image->isBundle() ) {
5235 removeImageFromAllImages(image->machHeader());
5236 ImageLoader::deleteImage(image);
5237 }
5238 sBundleBeingLoaded = NULL;
5239 dyld::garbageCollectImages();
5240 }
5241
5242
5243 void preflight(ImageLoader* image, const ImageLoader::RPathChain& loaderRPaths, unsigned cacheIndex)
5244 {
5245 try {
5246 if ( image->isBundle() )
5247 sBundleBeingLoaded = image; // hack
5248 const char* path = image->getPath();
5249 #if SUPPORT_ACCELERATE_TABLES
5250 if ( image == sAllCacheImagesProxy )
5251 path = sAllCacheImagesProxy->getIndexedPath(cacheIndex);
5252 #endif
5253 image->link(gLinkContext, false, true, false, loaderRPaths, path);
5254 }
5255 catch (const char* msg) {
5256 preflight_finally(image);
5257 throw;
5258 }
5259 preflight_finally(image);
5260 }
5261
5262 static void loadInsertedDylib(const char* path)
5263 {
5264 ImageLoader* image = NULL;
5265 unsigned cacheIndex;
5266 try {
5267 LoadContext context;
5268 context.useSearchPaths = false;
5269 context.useFallbackPaths = false;
5270 context.useLdLibraryPath = false;
5271 context.implicitRPath = false;
5272 context.matchByInstallName = false;
5273 context.dontLoad = false;
5274 context.mustBeBundle = false;
5275 context.mustBeDylib = true;
5276 context.canBePIE = false;
5277 context.origin = NULL; // can't use @loader_path with DYLD_INSERT_LIBRARIES
5278 context.rpath = NULL;
5279 image = load(path, context, cacheIndex);
5280 }
5281 catch (const char* msg) {
5282 #if TARGET_IPHONE_SIMULATOR
5283 dyld::log("dyld: warning: could not load inserted library '%s' because %s\n", path, msg);
5284 #else
5285 #if __MAC_OS_X_VERSION_MIN_REQUIRED
5286 if ( gLinkContext.processUsingLibraryValidation )
5287 dyld::log("dyld: warning: could not load inserted library '%s' into library validated process because %s\n", path, msg);
5288 else
5289 #endif
5290 halt(dyld::mkstringf("could not load inserted library '%s' because %s\n", path, msg));
5291 #endif
5292 }
5293 catch (...) {
5294 halt(dyld::mkstringf("could not load inserted library '%s'\n", path));
5295 }
5296 }
5297
5298
5299 //
5300 // Sets:
5301 // sEnvMode
5302 // gLinkContext.requireCodeSignature
5303 // gLinkContext.processIsRestricted // Mac OS X only
5304 // gLinkContext.processUsingLibraryValidation // Mac OS X only
5305 //
5306 static void configureProcessRestrictions(const macho_header* mainExecutableMH)
5307 {
5308 uint32_t flags;
5309 #if TARGET_IPHONE_SIMULATOR
5310 sEnvMode = envAll;
5311 gLinkContext.requireCodeSignature = true;
5312 #elif __IPHONE_OS_VERSION_MIN_REQUIRED
5313 sEnvMode = envNone;
5314 gLinkContext.requireCodeSignature = true;
5315 if ( csops(0, CS_OPS_STATUS, &flags, sizeof(flags)) != -1 ) {
5316 if ( flags & CS_ENFORCEMENT ) {
5317 if ( flags & CS_GET_TASK_ALLOW ) {
5318 // Xcode built app for Debug allowed to use DYLD_* variables
5319 sEnvMode = envAll;
5320 }
5321 else {
5322 // Development kernel can use DYLD_PRINT_* variables on any FairPlay encrypted app
5323 uint32_t secureValue = 0;
5324 size_t secureValueSize = sizeof(secureValue);
5325 if ( (sysctlbyname("kern.secure_kernel", &secureValue, &secureValueSize, NULL, 0) == 0) && (secureValue == 0) && isFairPlayEncrypted(mainExecutableMH) ) {
5326 sEnvMode = envPrintOnly;
5327 }
5328 }
5329 }
5330 else {
5331 // Development kernel can run unsigned code
5332 sEnvMode = envAll;
5333 gLinkContext.requireCodeSignature = false;
5334 }
5335 }
5336 if ( issetugid() ) {
5337 sEnvMode = envNone;
5338 }
5339 #elif __MAC_OS_X_VERSION_MIN_REQUIRED
5340 sEnvMode = envAll;
5341 gLinkContext.requireCodeSignature = false;
5342 gLinkContext.processIsRestricted = false;
5343 gLinkContext.processUsingLibraryValidation = false;
5344 // any processes with setuid or setgid bit set or with __RESTRICT segment is restricted
5345 if ( issetugid() || hasRestrictedSegment(mainExecutableMH) ) {
5346 gLinkContext.processIsRestricted = true;
5347 }
5348 if ( csops(0, CS_OPS_STATUS, &flags, sizeof(flags)) != -1 ) {
5349 // On OS X CS_RESTRICT means the program was signed with entitlements
5350 if ( ((flags & CS_RESTRICT) == CS_RESTRICT) && (csr_check(CSR_ALLOW_TASK_FOR_PID) != 0) ) {
5351 gLinkContext.processIsRestricted = true;
5352 }
5353 // Library Validation loosens searching but requires everything to be code signed
5354 if ( flags & CS_REQUIRE_LV ) {
5355 gLinkContext.processIsRestricted = false;
5356 //gLinkContext.requireCodeSignature = true;
5357 gLinkContext.processUsingLibraryValidation = true;
5358 }
5359 }
5360 #endif
5361 }
5362
5363
5364 bool processIsRestricted()
5365 {
5366 #if __MAC_OS_X_VERSION_MIN_REQUIRED
5367 return gLinkContext.processIsRestricted;
5368 #else
5369 return false;
5370 #endif
5371 }
5372
5373
5374 // <rdar://problem/10583252> Add dyld to uuidArray to enable symbolication of stackshots
5375 static void addDyldImageToUUIDList()
5376 {
5377 const struct macho_header* mh = (macho_header*)&__dso_handle;
5378 const uint32_t cmd_count = mh->ncmds;
5379 const struct load_command* const cmds = (struct load_command*)((char*)mh + sizeof(macho_header));
5380 const struct load_command* cmd = cmds;
5381 for (uint32_t i = 0; i < cmd_count; ++i) {
5382 switch (cmd->cmd) {
5383 case LC_UUID: {
5384 uuid_command* uc = (uuid_command*)cmd;
5385 dyld_uuid_info info;
5386 info.imageLoadAddress = (mach_header*)mh;
5387 memcpy(info.imageUUID, uc->uuid, 16);
5388 addNonSharedCacheImageUUID(info);
5389 return;
5390 }
5391 }
5392 cmd = (const struct load_command*)(((char*)cmd)+cmd->cmdsize);
5393 }
5394 }
5395
5396 void notifyKernelAboutDyld()
5397 {
5398 const struct macho_header* mh = (macho_header*)&__dso_handle;
5399 const uint32_t cmd_count = mh->ncmds;
5400 const struct load_command* const cmds = (struct load_command*)((char*)mh + sizeof(macho_header));
5401 const struct load_command* cmd = cmds;
5402 for (uint32_t i = 0; i < cmd_count; ++i) {
5403 switch (cmd->cmd) {
5404 case LC_UUID: {
5405 // Add dyld to the kernel image info
5406 uuid_command* uc = (uuid_command*)cmd;
5407 dyld_kernel_image_info_t kernelInfo;
5408 memcpy(kernelInfo.uuid, uc->uuid, 16);
5409 kernelInfo.load_addr = (uint64_t)mh;
5410 kernelInfo.fsobjid.fid_objno = 0;
5411 kernelInfo.fsobjid.fid_generation = 0;
5412 kernelInfo.fsid.val[0] = 0;
5413 kernelInfo.fsid.val[1] = 0;
5414 task_register_dyld_image_infos(mach_task_self(), &kernelInfo, 1);
5415 return;
5416 }
5417 }
5418 cmd = (const struct load_command*)(((char*)cmd)+cmd->cmdsize);
5419 }
5420 }
5421
5422 #if __MAC_OS_X_VERSION_MIN_REQUIRED
5423 typedef int (*open_proc_t)(const char*, int, int);
5424 typedef int (*fcntl_proc_t)(int, int, void*);
5425 typedef int (*ioctl_proc_t)(int, unsigned long, void*);
5426 static void* getProcessInfo() { return dyld::gProcessInfo; }
5427 static SyscallHelpers sSysCalls = {
5428 7,
5429 // added in version 1
5430 (open_proc_t)&open,
5431 &close,
5432 &pread,
5433 &write,
5434 &mmap,
5435 &munmap,
5436 &madvise,
5437 &stat,
5438 (fcntl_proc_t)&fcntl,
5439 (ioctl_proc_t)&ioctl,
5440 &issetugid,
5441 &getcwd,
5442 &realpath,
5443 &vm_allocate,
5444 &vm_deallocate,
5445 &vm_protect,
5446 &vlog,
5447 &vwarn,
5448 &pthread_mutex_lock,
5449 &pthread_mutex_unlock,
5450 &mach_thread_self,
5451 &mach_port_deallocate,
5452 &task_self_trap,
5453 &mach_timebase_info,
5454 &OSAtomicCompareAndSwapPtrBarrier,
5455 &OSMemoryBarrier,
5456 &getProcessInfo,
5457 &__error,
5458 &mach_absolute_time,
5459 // added in version 2
5460 &thread_switch,
5461 // added in version 3
5462 &opendir,
5463 &readdir_r,
5464 &closedir,
5465 // added in version 4
5466 &coresymbolication_load_notifier,
5467 &coresymbolication_unload_notifier,
5468 // Added in version 5
5469 &proc_regionfilename,
5470 &getpid,
5471 &mach_port_insert_right,
5472 &mach_port_allocate,
5473 &mach_msg,
5474 // Added in version 6
5475 &abort_with_payload,
5476 // Added in version 7
5477 &task_register_dyld_image_infos,
5478 &task_unregister_dyld_image_infos,
5479 &task_get_dyld_image_infos,
5480 &task_register_dyld_shared_cache_image_info,
5481 &task_register_dyld_set_dyld_state,
5482 &task_register_dyld_get_process_state
5483 };
5484
5485 __attribute__((noinline))
5486 static const char* useSimulatorDyld(int fd, const macho_header* mainExecutableMH, const char* dyldPath,
5487 int argc, const char* argv[], const char* envp[], const char* apple[],
5488 uintptr_t* startGlue, uintptr_t* mainAddr)
5489 {
5490 *startGlue = 0;
5491 *mainAddr = 0;
5492
5493 // <rdar://problem/25311921> simulator does not support restricted processes
5494 uint32_t flags;
5495 if ( csops(0, CS_OPS_STATUS, &flags, sizeof(flags)) == -1 )
5496 return "csops() failed";
5497 if ( (flags & CS_RESTRICT) == CS_RESTRICT )
5498 return "dyld_sim cannot be loaded in a restricted process";
5499 if ( issetugid() )
5500 return "dyld_sim cannot be loaded in a setuid process";
5501 if ( hasRestrictedSegment(mainExecutableMH) )
5502 return "dyld_sim cannot be loaded in a restricted process";
5503
5504 // get file size of dyld_sim
5505 struct stat sb;
5506 if ( fstat(fd, &sb) == -1 )
5507 return "stat(dyld_sim) failed";
5508
5509 // read first page of dyld_sim file
5510 uint8_t firstPage[4096];
5511 if ( pread(fd, firstPage, 4096, 0) != 4096 )
5512 return "pread(dyld_sim) failed";
5513
5514 // if fat file, pick matching slice
5515 uint64_t fileOffset = 0;
5516 uint64_t fileLength = sb.st_size;
5517 const fat_header* fileStartAsFat = (fat_header*)firstPage;
5518 if ( fileStartAsFat->magic == OSSwapBigToHostInt32(FAT_MAGIC) ) {
5519 if ( !fatFindBest(fileStartAsFat, &fileOffset, &fileLength) )
5520 return "no matching arch in dyld_sim";
5521 // re-read buffer from start of mach-o slice in fat file
5522 if ( pread(fd, firstPage, 4096, fileOffset) != 4096 )
5523 return "pread(dyld_sim) failed";
5524 }
5525 else if ( !isCompatibleMachO(firstPage, dyldPath) ) {
5526 return "dyld_sim not compatible mach-o";
5527 }
5528
5529 // calculate total size of dyld segments
5530 const macho_header* mh = (const macho_header*)firstPage;
5531 struct macho_segment_command* lastSeg = NULL;
5532 struct macho_segment_command* firstSeg = NULL;
5533 uintptr_t mappingSize = 0;
5534 uintptr_t preferredLoadAddress = 0;
5535 const uint32_t cmd_count = mh->ncmds;
5536 if ( mh->sizeofcmds > 4096 )
5537 return "dyld_sim load commands to large";
5538 if ( (sizeof(macho_header) + mh->sizeofcmds) > 4096 )
5539 return "dyld_sim load commands to large";
5540 const struct load_command* const cmds = (struct load_command*)(((char*)mh)+sizeof(macho_header));
5541 const struct load_command* const endCmds = (struct load_command*)(((char*)mh) + sizeof(macho_header) + mh->sizeofcmds);
5542 const struct load_command* cmd = cmds;
5543 for (uint32_t i = 0; i < cmd_count; ++i) {
5544 uint32_t cmdLength = cmd->cmdsize;
5545 if ( cmdLength < 8 )
5546 return "dyld_sim load command too small";
5547 const struct load_command* const nextCmd = (const struct load_command*)(((char*)cmd)+cmdLength);
5548 if ( (nextCmd > endCmds) || (nextCmd < cmd) )
5549 return "dyld_sim load command too large";
5550 switch (cmd->cmd) {
5551 case LC_SEGMENT_COMMAND:
5552 {
5553 struct macho_segment_command* seg = (struct macho_segment_command*)cmd;
5554 if ( seg->vmaddr + seg->vmsize < seg->vmaddr )
5555 return "dyld_sim seg wraps address space";
5556 if ( seg->vmsize < seg->filesize )
5557 return "dyld_sim seg vmsize too small";
5558 if ( (seg->fileoff + seg->filesize) < seg->fileoff )
5559 return "dyld_sim seg size wraps address space";
5560 if ( lastSeg == NULL ) {
5561 // first segment must be __TEXT and start at beginning of file/slice
5562 firstSeg = seg;
5563 if ( strcmp(seg->segname, "__TEXT") != 0 )
5564 return "dyld_sim first segment not __TEXT";
5565 if ( seg->fileoff != 0 )
5566 return "dyld_sim first segment not at file offset zero";
5567 if ( seg->filesize < (sizeof(macho_header) + mh->sizeofcmds) )
5568 return "dyld_sim first segment smaller than load commands";
5569 preferredLoadAddress = seg->vmaddr;
5570 }
5571 else {
5572 // other sements must be continguous with previous segment and not executable
5573 if ( lastSeg->fileoff + lastSeg->filesize != seg->fileoff )
5574 return "dyld_sim segments not contiguous";
5575 if ( lastSeg->vmaddr + lastSeg->vmsize != seg->vmaddr )
5576 return "dyld_sim segments not address contiguous";
5577 if ( (seg->initprot & VM_PROT_EXECUTE) != 0 )
5578 return "dyld_sim non-first segment is executable";
5579 }
5580 mappingSize += seg->vmsize;
5581 lastSeg = seg;
5582 }
5583 break;
5584 case LC_SEGMENT_COMMAND_WRONG:
5585 return "dyld_sim wrong load segment load command";
5586 }
5587 cmd = nextCmd;
5588 }
5589 // last segment must be named __LINKEDIT and not writable
5590 if ( strcmp(lastSeg->segname, "__LINKEDIT") != 0 )
5591 return "dyld_sim last segment not __LINKEDIT";
5592 if ( lastSeg->initprot & VM_PROT_WRITE )
5593 return "dyld_sim __LINKEDIT segment writable";
5594
5595 // reserve space, then mmap each segment
5596 vm_address_t loadAddress = 0;
5597 if ( ::vm_allocate(mach_task_self(), &loadAddress, mappingSize, VM_FLAGS_ANYWHERE) != 0 )
5598 return "dyld_sim cannot allocate space";
5599 cmd = cmds;
5600 struct linkedit_data_command* codeSigCmd = NULL;
5601 struct source_version_command* dyldVersionCmd = NULL;
5602 for (uint32_t i = 0; i < cmd_count; ++i) {
5603 switch (cmd->cmd) {
5604 case LC_SEGMENT_COMMAND:
5605 {
5606 struct macho_segment_command* seg = (struct macho_segment_command*)cmd;
5607 uintptr_t requestedLoadAddress = seg->vmaddr - preferredLoadAddress + loadAddress;
5608 void* segAddress = ::mmap((void*)requestedLoadAddress, seg->filesize, seg->initprot, MAP_FIXED | MAP_PRIVATE, fd, fileOffset + seg->fileoff);
5609 //dyld::log("dyld_sim %s mapped at %p\n", seg->segname, segAddress);
5610 if ( segAddress == (void*)(-1) )
5611 return "dyld_sim mmap() of segment failed";
5612 if ( ((uintptr_t)segAddress < loadAddress) || ((uintptr_t)segAddress+seg->filesize > loadAddress+mappingSize) )
5613 return "dyld_sim mmap() to wrong location";
5614 }
5615 break;
5616 case LC_CODE_SIGNATURE:
5617 codeSigCmd = (struct linkedit_data_command*)cmd;
5618 break;
5619 case LC_SOURCE_VERSION:
5620 dyldVersionCmd = (struct source_version_command*)cmd;
5621 break;
5622 }
5623 cmd = (const struct load_command*)(((char*)cmd)+cmd->cmdsize);
5624 }
5625
5626 // must have code signature which is contained within LINKEDIT segment
5627 if ( codeSigCmd == NULL )
5628 return "dyld_sim not code signed";
5629 if ( codeSigCmd->dataoff < lastSeg->fileoff )
5630 return "dyld_sim code signature not in __LINKEDIT";
5631 if ( (codeSigCmd->dataoff + codeSigCmd->datasize) < codeSigCmd->dataoff )
5632 return "dyld_sim code signature size wraps";
5633 if ( (codeSigCmd->dataoff + codeSigCmd->datasize) > (lastSeg->fileoff + lastSeg->filesize) )
5634 return "dyld_sim code signature extends beyond __LINKEDIT";
5635
5636 fsignatures_t siginfo;
5637 siginfo.fs_file_start=fileOffset; // start of mach-o slice in fat file
5638 siginfo.fs_blob_start=(void*)(long)(codeSigCmd->dataoff); // start of code-signature in mach-o file
5639 siginfo.fs_blob_size=codeSigCmd->datasize; // size of code-signature
5640 int result = fcntl(fd, F_ADDFILESIGS_FOR_DYLD_SIM, &siginfo);
5641 if ( result == -1 ) {
5642 return mkstringf("dyld_sim fcntl(F_ADDFILESIGS_FOR_DYLD_SIM) failed with errno=%d", errno);
5643 }
5644 close(fd);
5645 // file range covered by code signature must extend up to code signature itself
5646 if ( siginfo.fs_file_start < codeSigCmd->dataoff )
5647 return mkstringf("dyld_sim code signature does not cover all of dyld_sim. Signature covers up to 0x%08lX. Signature starts at 0x%08X", (unsigned long)siginfo.fs_file_start, codeSigCmd->dataoff);
5648
5649
5650 // walk newly mapped dyld_sim __TEXT load commands to find entry point
5651 uintptr_t entry = 0;
5652 cmd = (struct load_command*)(((char*)loadAddress)+sizeof(macho_header));
5653 const uint32_t count = ((macho_header*)(loadAddress))->ncmds;
5654 for (uint32_t i = 0; i < count; ++i) {
5655 if (cmd->cmd == LC_UNIXTHREAD) {
5656 #if __i386__
5657 const i386_thread_state_t* registers = (i386_thread_state_t*)(((char*)cmd) + 16);
5658 // entry point must be in first segment
5659 if ( registers->__eip < firstSeg->vmaddr )
5660 return "dyld_sim entry point not in __TEXT segment";
5661 if ( registers->__eip > (firstSeg->vmaddr + firstSeg->vmsize) )
5662 return "dyld_sim entry point not in __TEXT segment";
5663 entry = (registers->__eip + loadAddress - preferredLoadAddress);
5664 #elif __x86_64__
5665 const x86_thread_state64_t* registers = (x86_thread_state64_t*)(((char*)cmd) + 16);
5666 // entry point must be in first segment
5667 if ( registers->__rip < firstSeg->vmaddr )
5668 return "dyld_sim entry point not in __TEXT segment";
5669 if ( registers->__rip > (firstSeg->vmaddr + firstSeg->vmsize) )
5670 return "dyld_sim entry point not in __TEXT segment";
5671 entry = (registers->__rip + loadAddress - preferredLoadAddress);
5672 #endif
5673 }
5674 cmd = (const struct load_command*)(((char*)cmd)+cmd->cmdsize);
5675 }
5676
5677 // notify debugger that dyld_sim is loaded
5678 dyld_image_info info;
5679 info.imageLoadAddress = (mach_header*)loadAddress;
5680 info.imageFilePath = strdup(dyldPath);
5681 info.imageFileModDate = sb.st_mtime;
5682 addImagesToAllImages(1, &info);
5683 dyld::gProcessInfo->notification(dyld_image_adding, 1, &info);
5684
5685 const char** appleParams = apple;
5686 // jump into new simulator dyld
5687 typedef uintptr_t (*sim_entry_proc_t)(int argc, const char* argv[], const char* envp[], const char* apple[],
5688 const macho_header* mainExecutableMH, const macho_header* dyldMH, uintptr_t dyldSlide,
5689 const dyld::SyscallHelpers* vtable, uintptr_t* startGlue);
5690 sim_entry_proc_t newDyld = (sim_entry_proc_t)entry;
5691 *mainAddr = (*newDyld)(argc, argv, envp, appleParams, mainExecutableMH, (macho_header*)loadAddress,
5692 loadAddress - preferredLoadAddress,
5693 &sSysCalls, startGlue);
5694 return NULL;
5695 }
5696 #endif
5697
5698
5699 //
5700 // Entry point for dyld. The kernel loads dyld and jumps to __dyld_start which
5701 // sets up some registers and call this function.
5702 //
5703 // Returns address of main() in target program which __dyld_start jumps to
5704 //
5705 uintptr_t
5706 _main(const macho_header* mainExecutableMH, uintptr_t mainExecutableSlide,
5707 int argc, const char* argv[], const char* envp[], const char* apple[],
5708 uintptr_t* startGlue)
5709 {
5710 uintptr_t result = 0;
5711 sMainExecutableMachHeader = mainExecutableMH;
5712 #if __MAC_OS_X_VERSION_MIN_REQUIRED
5713 // if this is host dyld, check to see if iOS simulator is being run
5714 const char* rootPath = _simple_getenv(envp, "DYLD_ROOT_PATH");
5715 if ( rootPath != NULL ) {
5716 // Add dyld to the kernel image info before we jump to the sim
5717 notifyKernelAboutDyld();
5718
5719 // look to see if simulator has its own dyld
5720 char simDyldPath[PATH_MAX];
5721 strlcpy(simDyldPath, rootPath, PATH_MAX);
5722 strlcat(simDyldPath, "/usr/lib/dyld_sim", PATH_MAX);
5723 int fd = my_open(simDyldPath, O_RDONLY, 0);
5724 if ( fd != -1 ) {
5725 const char* errMessage = useSimulatorDyld(fd, mainExecutableMH, simDyldPath, argc, argv, envp, apple, startGlue, &result);
5726 if ( errMessage != NULL )
5727 halt(errMessage);
5728 return result;
5729 }
5730 }
5731 #endif
5732
5733 CRSetCrashLogMessage("dyld: launch started");
5734
5735 setContext(mainExecutableMH, argc, argv, envp, apple);
5736
5737 // Pickup the pointer to the exec path.
5738 sExecPath = _simple_getenv(apple, "executable_path");
5739
5740 // <rdar://problem/13868260> Remove interim apple[0] transition code from dyld
5741 if (!sExecPath) sExecPath = apple[0];
5742
5743 if ( sExecPath[0] != '/' ) {
5744 // have relative path, use cwd to make absolute
5745 char cwdbuff[MAXPATHLEN];
5746 if ( getcwd(cwdbuff, MAXPATHLEN) != NULL ) {
5747 // maybe use static buffer to avoid calling malloc so early...
5748 char* s = new char[strlen(cwdbuff) + strlen(sExecPath) + 2];
5749 strcpy(s, cwdbuff);
5750 strcat(s, "/");
5751 strcat(s, sExecPath);
5752 sExecPath = s;
5753 }
5754 }
5755 // Remember short name of process for later logging
5756 sExecShortName = ::strrchr(sExecPath, '/');
5757 if ( sExecShortName != NULL )
5758 ++sExecShortName;
5759 else
5760 sExecShortName = sExecPath;
5761
5762 configureProcessRestrictions(mainExecutableMH);
5763
5764 #if __MAC_OS_X_VERSION_MIN_REQUIRED
5765 if ( gLinkContext.processIsRestricted ) {
5766 pruneEnvironmentVariables(envp, &apple);
5767 // set again because envp and apple may have changed or moved
5768 setContext(mainExecutableMH, argc, argv, envp, apple);
5769 }
5770 else
5771 #endif
5772 {
5773 checkEnvironmentVariables(envp);
5774 defaultUninitializedFallbackPaths(envp);
5775 }
5776 if ( sEnv.DYLD_PRINT_OPTS )
5777 printOptions(argv);
5778 if ( sEnv.DYLD_PRINT_ENV )
5779 printEnvironmentVariables(envp);
5780 getHostInfo(mainExecutableMH, mainExecutableSlide);
5781 // install gdb notifier
5782 stateToHandlers(dyld_image_state_dependents_mapped, sBatchHandlers)->push_back(notifyGDB);
5783 stateToHandlers(dyld_image_state_mapped, sSingleHandlers)->push_back(updateAllImages);
5784 // make initial allocations large enough that it is unlikely to need to be re-alloced
5785 sImageRoots.reserve(16);
5786 sAddImageCallbacks.reserve(4);
5787 sRemoveImageCallbacks.reserve(4);
5788 sImageFilesNeedingTermination.reserve(16);
5789 sImageFilesNeedingDOFUnregistration.reserve(8);
5790
5791 #if !TARGET_IPHONE_SIMULATOR
5792 #ifdef WAIT_FOR_SYSTEM_ORDER_HANDSHAKE
5793 // <rdar://problem/6849505> Add gating mechanism to dyld support system order file generation process
5794 WAIT_FOR_SYSTEM_ORDER_HANDSHAKE(dyld::gProcessInfo->systemOrderFlag);
5795 #endif
5796 #endif
5797
5798
5799 try {
5800 // add dyld itself to UUID list
5801 addDyldImageToUUIDList();
5802 notifyKernelAboutDyld();
5803
5804 #if SUPPORT_ACCELERATE_TABLES
5805 bool mainExcutableAlreadyRebased = false;
5806
5807 reloadAllImages:
5808 #endif
5809
5810 CRSetCrashLogMessage(sLoadingCrashMessage);
5811 // instantiate ImageLoader for main executable
5812 sMainExecutable = instantiateFromLoadedImage(mainExecutableMH, mainExecutableSlide, sExecPath);
5813 gLinkContext.mainExecutable = sMainExecutable;
5814 gLinkContext.mainExecutableCodeSigned = hasCodeSignatureLoadCommand(mainExecutableMH);
5815
5816 #if TARGET_IPHONE_SIMULATOR
5817 // check main executable is not too new for this OS
5818 {
5819 if ( ! isSimulatorBinary((uint8_t*)mainExecutableMH, sExecPath) ) {
5820 throwf("program was built for a platform that is not supported by this runtime");
5821 }
5822 uint32_t mainMinOS = sMainExecutable->minOSVersion();
5823
5824 // dyld is always built for the current OS, so we can get the current OS version
5825 // from the load command in dyld itself.
5826 uint32_t dyldMinOS = ImageLoaderMachO::minOSVersion((const mach_header*)&__dso_handle);
5827 if ( mainMinOS > dyldMinOS ) {
5828 #if TARGET_OS_WATCH
5829 throwf("app was built for watchOS %d.%d which is newer than this simulator %d.%d",
5830 mainMinOS >> 16, ((mainMinOS >> 8) & 0xFF),
5831 dyldMinOS >> 16, ((dyldMinOS >> 8) & 0xFF));
5832 #elif TARGET_OS_TV
5833 throwf("app was built for tvOS %d.%d which is newer than this simulator %d.%d",
5834 mainMinOS >> 16, ((mainMinOS >> 8) & 0xFF),
5835 dyldMinOS >> 16, ((dyldMinOS >> 8) & 0xFF));
5836 #else
5837 throwf("app was built for iOS %d.%d which is newer than this simulator %d.%d",
5838 mainMinOS >> 16, ((mainMinOS >> 8) & 0xFF),
5839 dyldMinOS >> 16, ((dyldMinOS >> 8) & 0xFF));
5840 #endif
5841 }
5842 }
5843 #endif
5844
5845
5846 #if __MAC_OS_X_VERSION_MIN_REQUIRED
5847 // <rdar://problem/22805519> be less strict about old mach-o binaries
5848 uint32_t mainSDK = sMainExecutable->sdkVersion();
5849 gLinkContext.strictMachORequired = (mainSDK >= DYLD_MACOSX_VERSION_10_12) || gLinkContext.processUsingLibraryValidation;
5850 #else
5851 // simulators, iOS, tvOS, and watchOS are always strict
5852 gLinkContext.strictMachORequired = true;
5853 #endif
5854
5855 // load shared cache
5856 checkSharedRegionDisable();
5857 #if DYLD_SHARED_CACHE_SUPPORT
5858 if ( gLinkContext.sharedRegionMode != ImageLoader::kDontUseSharedRegion ) {
5859 mapSharedCache();
5860 } else {
5861 dyld_kernel_image_info_t kernelCacheInfo;
5862 bzero(&kernelCacheInfo.uuid[0], sizeof(uuid_t));
5863 kernelCacheInfo.load_addr = 0;
5864 kernelCacheInfo.fsobjid.fid_objno = 0;
5865 kernelCacheInfo.fsobjid.fid_generation = 0;
5866 kernelCacheInfo.fsid.val[0] = 0;
5867 kernelCacheInfo.fsid.val[0] = 0;
5868 task_register_dyld_shared_cache_image_info(mach_task_self(), kernelCacheInfo, true, false);
5869 }
5870 #endif
5871
5872 #if SUPPORT_ACCELERATE_TABLES
5873 sAllImages.reserve((sAllCacheImagesProxy != NULL) ? 16 : INITIAL_IMAGE_COUNT);
5874 #else
5875 sAllImages.reserve(INITIAL_IMAGE_COUNT);
5876 #endif
5877
5878 // Now that shared cache is loaded, setup an versioned dylib overrides
5879 #if SUPPORT_VERSIONED_PATHS
5880 checkVersionedPaths();
5881 #endif
5882
5883
5884 // dyld_all_image_infos image list does not contain dyld
5885 // add it as dyldPath field in dyld_all_image_infos
5886 // for simulator, dyld_sim is in image list, need host dyld added
5887 #if TARGET_IPHONE_SIMULATOR
5888 // get path of host dyld from table of syscall vectors in host dyld
5889 void* addressInDyld = gSyscallHelpers;
5890 #else
5891 // get path of dyld itself
5892 void* addressInDyld = (void*)&__dso_handle;
5893 #endif
5894 char dyldPathBuffer[MAXPATHLEN+1];
5895 int len = proc_regionfilename(getpid(), (uint64_t)(long)addressInDyld, dyldPathBuffer, MAXPATHLEN);
5896 if ( (len != 0) && (strcmp(dyldPathBuffer, gProcessInfo->dyldPath) != 0) ) {
5897 gProcessInfo->dyldPath = strdup(dyldPathBuffer);
5898 }
5899
5900 // load any inserted libraries
5901 if ( sEnv.DYLD_INSERT_LIBRARIES != NULL ) {
5902 for (const char* const* lib = sEnv.DYLD_INSERT_LIBRARIES; *lib != NULL; ++lib)
5903 loadInsertedDylib(*lib);
5904 }
5905 // record count of inserted libraries so that a flat search will look at
5906 // inserted libraries, then main, then others.
5907 sInsertedDylibCount = sAllImages.size()-1;
5908
5909 // link main executable
5910 gLinkContext.linkingMainExecutable = true;
5911 #if SUPPORT_ACCELERATE_TABLES
5912 if ( mainExcutableAlreadyRebased ) {
5913 // previous link() on main executable has already adjusted its internal pointers for ASLR
5914 // work around that by rebasing by inverse amount
5915 sMainExecutable->rebase(gLinkContext, -mainExecutableSlide);
5916 }
5917 #endif
5918 link(sMainExecutable, sEnv.DYLD_BIND_AT_LAUNCH, true, ImageLoader::RPathChain(NULL, NULL), -1);
5919 sMainExecutable->setNeverUnloadRecursive();
5920 if ( sMainExecutable->forceFlat() ) {
5921 gLinkContext.bindFlat = true;
5922 gLinkContext.prebindUsage = ImageLoader::kUseNoPrebinding;
5923 }
5924
5925 // link any inserted libraries
5926 // do this after linking main executable so that any dylibs pulled in by inserted
5927 // dylibs (e.g. libSystem) will not be in front of dylibs the program uses
5928 if ( sInsertedDylibCount > 0 ) {
5929 for(unsigned int i=0; i < sInsertedDylibCount; ++i) {
5930 ImageLoader* image = sAllImages[i+1];
5931 link(image, sEnv.DYLD_BIND_AT_LAUNCH, true, ImageLoader::RPathChain(NULL, NULL), -1);
5932 image->setNeverUnloadRecursive();
5933 }
5934 // only INSERTED libraries can interpose
5935 // register interposing info after all inserted libraries are bound so chaining works
5936 for(unsigned int i=0; i < sInsertedDylibCount; ++i) {
5937 ImageLoader* image = sAllImages[i+1];
5938 image->registerInterposing();
5939 }
5940 }
5941
5942 // <rdar://problem/19315404> dyld should support interposition even without DYLD_INSERT_LIBRARIES
5943 for (long i=sInsertedDylibCount+1; i < sAllImages.size(); ++i) {
5944 ImageLoader* image = sAllImages[i];
5945 if ( image->inSharedCache() )
5946 continue;
5947 image->registerInterposing();
5948 }
5949 #if SUPPORT_ACCELERATE_TABLES
5950 if ( (sAllCacheImagesProxy != NULL) && ImageLoader::haveInterposingTuples() ) {
5951 // Accelerator tables cannot be used with implicit interposing, so relaunch with accelerator tables disabled
5952 ImageLoader::clearInterposingTuples();
5953 // unmap all loaded dylibs (but not main executable)
5954 for (long i=1; i < sAllImages.size(); ++i) {
5955 ImageLoader* image = sAllImages[i];
5956 if ( image == sMainExecutable )
5957 continue;
5958 if ( image == sAllCacheImagesProxy )
5959 continue;
5960 image->setCanUnload();
5961 ImageLoader::deleteImage(image);
5962 }
5963 // note: we don't need to worry about inserted images because if DYLD_INSERT_LIBRARIES was set we would not be using the accelerator table
5964 sAllImages.clear();
5965 sImageRoots.clear();
5966 sImageFilesNeedingTermination.clear();
5967 sImageFilesNeedingDOFUnregistration.clear();
5968 sAddImageCallbacks.clear();
5969 sRemoveImageCallbacks.clear();
5970 sDisableAcceleratorTables = true;
5971 sAllCacheImagesProxy = NULL;
5972 sMappedRangesStart = NULL;
5973 mainExcutableAlreadyRebased = true;
5974 gLinkContext.linkingMainExecutable = false;
5975 resetAllImages();
5976 goto reloadAllImages;
5977 }
5978 #endif
5979
5980 // apply interposing to initial set of images
5981 for(int i=0; i < sImageRoots.size(); ++i) {
5982 sImageRoots[i]->applyInterposing(gLinkContext);
5983 }
5984 gLinkContext.linkingMainExecutable = false;
5985
5986 // <rdar://problem/12186933> do weak binding only after all inserted images linked
5987 sMainExecutable->weakBind(gLinkContext);
5988
5989 #if DYLD_SHARED_CACHE_SUPPORT
5990 // If cache has branch island dylibs, tell debugger about them
5991 if ( (sSharedCache != NULL) && (sSharedCache->mappingOffset >= 0x78) && (sSharedCache->branchPoolsOffset != 0) ) {
5992 uint32_t count = sSharedCache->branchPoolsCount;
5993 dyld_image_info info[count];
5994 const uint64_t* poolAddress = (uint64_t*)((char*)sSharedCache + sSharedCache->branchPoolsOffset);
5995 // <rdar://problem/20799203> empty branch pools can be in development cache
5996 if ( ((mach_header*)poolAddress)->magic == sMainExecutableMachHeader->magic ) {
5997 for (int poolIndex=0; poolIndex < count; ++poolIndex) {
5998 uint64_t poolAddr = poolAddress[poolIndex] + sSharedCacheSlide;
5999 info[poolIndex].imageLoadAddress = (mach_header*)(long)poolAddr;
6000 info[poolIndex].imageFilePath = "dyld_shared_cache_branch_islands";
6001 info[poolIndex].imageFileModDate = 0;
6002 }
6003 // add to all_images list
6004 addImagesToAllImages(count, info);
6005 // tell gdb about new branch island images
6006 gProcessInfo->notification(dyld_image_adding, count, info);
6007 }
6008 }
6009 #endif
6010
6011 CRSetCrashLogMessage("dyld: launch, running initializers");
6012 #if SUPPORT_OLD_CRT_INITIALIZATION
6013 // Old way is to run initializers via a callback from crt1.o
6014 if ( ! gRunInitializersOldWay )
6015 initializeMainExecutable();
6016 #else
6017 // run all initializers
6018 initializeMainExecutable();
6019 #endif
6020 // find entry point for main executable
6021 result = (uintptr_t)sMainExecutable->getThreadPC();
6022 if ( result != 0 ) {
6023 // main executable uses LC_MAIN, needs to return to glue in libdyld.dylib
6024 if ( (gLibSystemHelpers != NULL) && (gLibSystemHelpers->version >= 9) )
6025 *startGlue = (uintptr_t)gLibSystemHelpers->startGlueToCallExit;
6026 else
6027 halt("libdyld.dylib support not present for LC_MAIN");
6028 }
6029 else {
6030 // main executable uses LC_UNIXTHREAD, dyld needs to let "start" in program set up for main()
6031 result = (uintptr_t)sMainExecutable->getMain();
6032 *startGlue = 0;
6033 }
6034 }
6035 catch(const char* message) {
6036 syncAllImages();
6037 halt(message);
6038 }
6039 catch(...) {
6040 dyld::log("dyld: launch failed\n");
6041 }
6042
6043 CRSetCrashLogMessage(NULL);
6044
6045 return result;
6046 }
6047
6048
6049
6050 } // namespace
6051
6052
6053