]> git.saurik.com Git - apple/dyld.git/blob - src/dyld.cpp
4aff26a8ce56ef723a5ef973c55ecc47dcc51ced
[apple/dyld.git] / src / dyld.cpp
1 /* -*- mode: C++; c-basic-offset: 4; tab-width: 4 -*-
2 *
3 * Copyright (c) 2004-2013 Apple Inc. All rights reserved.
4 *
5 * @APPLE_LICENSE_HEADER_START@
6 *
7 * This file contains Original Code and/or Modifications of Original Code
8 * as defined in and that are subject to the Apple Public Source License
9 * Version 2.0 (the 'License'). You may not use this file except in
10 * compliance with the License. Please obtain a copy of the License at
11 * http://www.opensource.apple.com/apsl/ and read it before using this
12 * file.
13 *
14 * The Original Code and all software distributed under the License are
15 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
16 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
17 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
19 * Please see the License for the specific language governing rights and
20 * limitations under the License.
21 *
22 * @APPLE_LICENSE_HEADER_END@
23 */
24
25 #include <stdint.h>
26 #include <string.h>
27 #include <unistd.h>
28 #include <errno.h>
29 #include <fcntl.h>
30 #include <dirent.h>
31 #include <pthread.h>
32 #include <libproc.h>
33 #include <sys/param.h>
34 #include <mach/mach_time.h> // mach_absolute_time()
35 #include <mach/mach_init.h>
36 #include <sys/types.h>
37 #include <sys/stat.h>
38 #include <sys/syscall.h>
39 #include <sys/socket.h>
40 #include <sys/un.h>
41 #include <sys/syslog.h>
42 #include <sys/uio.h>
43 #include <mach-o/fat.h>
44 #include <mach-o/loader.h>
45 #include <mach-o/ldsyms.h>
46 #include <libkern/OSByteOrder.h>
47 #include <libkern/OSAtomic.h>
48 #include <mach/mach.h>
49 #include <sys/sysctl.h>
50 #include <sys/mman.h>
51 #include <sys/dtrace.h>
52 #include <libkern/OSAtomic.h>
53 #include <Availability.h>
54 #include <System/sys/codesign.h>
55 #include <System/sys/csr.h>
56 #include <_simple.h>
57 #include <os/lock_private.h>
58 #include <System/machine/cpu_capabilities.h>
59 #include <System/sys/reason.h>
60 #include <kern/kcdata.h>
61 #include <sandbox.h>
62 #include <sandbox/private.h>
63
64 #include <array>
65
66 #ifndef CPU_SUBTYPE_ARM_V5TEJ
67 #define CPU_SUBTYPE_ARM_V5TEJ ((cpu_subtype_t) 7)
68 #endif
69 #ifndef CPU_SUBTYPE_ARM_XSCALE
70 #define CPU_SUBTYPE_ARM_XSCALE ((cpu_subtype_t) 8)
71 #endif
72 #ifndef CPU_SUBTYPE_ARM_V7
73 #define CPU_SUBTYPE_ARM_V7 ((cpu_subtype_t) 9)
74 #endif
75 #ifndef CPU_SUBTYPE_ARM_V7F
76 #define CPU_SUBTYPE_ARM_V7F ((cpu_subtype_t) 10)
77 #endif
78 #ifndef CPU_SUBTYPE_ARM_V7S
79 #define CPU_SUBTYPE_ARM_V7S ((cpu_subtype_t) 11)
80 #endif
81 #ifndef CPU_SUBTYPE_ARM_V7K
82 #define CPU_SUBTYPE_ARM_V7K ((cpu_subtype_t) 12)
83 #endif
84 #ifndef LC_DYLD_ENVIRONMENT
85 #define LC_DYLD_ENVIRONMENT 0x27
86 #endif
87
88 #ifndef CPU_SUBTYPE_X86_64_H
89 #define CPU_SUBTYPE_X86_64_H ((cpu_subtype_t) 8)
90 #endif
91
92 #ifndef VM_PROT_SLIDE
93 #define VM_PROT_SLIDE 0x20
94 #endif
95
96 #include <vector>
97 #include <algorithm>
98
99 #include "mach-o/dyld_gdb.h"
100
101 #include "dyld.h"
102 #include "ImageLoader.h"
103 #include "ImageLoaderMachO.h"
104 #include "dyldLibSystemInterface.h"
105 #if DYLD_SHARED_CACHE_SUPPORT
106 #include "dyld_cache_format.h"
107 #endif
108 #include "dyld_process_info_internal.h"
109 #include <coreSymbolicationDyldSupport.h>
110 #if TARGET_IPHONE_SIMULATOR
111 extern "C" void xcoresymbolication_load_notifier(void *connection, uint64_t load_timestamp, const char *image_path, const struct mach_header *mach_header);
112 extern "C" void xcoresymbolication_unload_notifier(void *connection, uint64_t unload_timestamp, const char *image_path, const struct mach_header *mach_header);
113 #define coresymbolication_load_notifier(c, t, p, h) xcoresymbolication_load_notifier(c, t, p, h)
114 #define coresymbolication_unload_notifier(c, t, p, h) xcoresymbolication_unload_notifier(c, t, p, h)
115 #endif
116
117 #if SUPPORT_ACCELERATE_TABLES
118 #include "ImageLoaderMegaDylib.h"
119 #endif
120
121 #if TARGET_IPHONE_SIMULATOR
122 extern "C" void* gSyscallHelpers;
123 #else
124 #include "dyldSyscallInterface.h"
125 #endif
126
127
128 // not libc header for send() syscall interface
129 extern "C" ssize_t __sendto(int, const void *, size_t, int, const struct sockaddr *, socklen_t);
130
131
132 // ARM and x86_64 are the only architecture that use cpu-sub-types
133 #define CPU_SUBTYPES_SUPPORTED ((__arm__ || __x86_64__) && !TARGET_IPHONE_SIMULATOR)
134
135 #if __LP64__
136 #define LC_SEGMENT_COMMAND LC_SEGMENT_64
137 #define LC_SEGMENT_COMMAND_WRONG LC_SEGMENT
138 #define LC_ENCRYPT_COMMAND LC_ENCRYPTION_INFO
139 #define macho_segment_command segment_command_64
140 #define macho_section section_64
141 #else
142 #define LC_SEGMENT_COMMAND LC_SEGMENT
143 #define LC_SEGMENT_COMMAND_WRONG LC_SEGMENT_64
144 #define LC_ENCRYPT_COMMAND LC_ENCRYPTION_INFO_64
145 #define macho_segment_command segment_command
146 #define macho_section section
147 #endif
148
149
150
151 #define CPU_TYPE_MASK 0x00FFFFFF /* complement of CPU_ARCH_MASK */
152
153
154 /* implemented in dyld_gdb.cpp */
155 extern void resetAllImages();
156 extern void addImagesToAllImages(uint32_t infoCount, const dyld_image_info info[]);
157 extern void removeImageFromAllImages(const mach_header* mh);
158 extern void addNonSharedCacheImageUUID(const dyld_uuid_info& info);
159 extern const char* notifyGDB(enum dyld_image_states state, uint32_t infoCount, const dyld_image_info info[]);
160 extern size_t allImagesCount();
161
162 // magic so CrashReporter logs message
163 extern "C" {
164 char error_string[1024];
165 }
166
167 // magic linker symbol for start of dyld binary
168 extern "C" const macho_header __dso_handle;
169
170
171 //
172 // The file contains the core of dyld used to get a process to main().
173 // The API's that dyld supports are implemented in dyldAPIs.cpp.
174 //
175 //
176 //
177 //
178 //
179 namespace dyld {
180 struct RegisteredDOF { const mach_header* mh; int registrationID; };
181 struct DylibOverride { const char* installName; const char* override; };
182 }
183
184
185 VECTOR_NEVER_DESTRUCTED(ImageLoader*);
186 VECTOR_NEVER_DESTRUCTED(dyld::RegisteredDOF);
187 VECTOR_NEVER_DESTRUCTED(dyld::ImageCallback);
188 VECTOR_NEVER_DESTRUCTED(dyld::DylibOverride);
189 VECTOR_NEVER_DESTRUCTED(ImageLoader::DynamicReference);
190
191 VECTOR_NEVER_DESTRUCTED(dyld_image_state_change_handler);
192
193 namespace dyld {
194
195
196 //
197 // state of all environment variables dyld uses
198 //
199 struct EnvironmentVariables {
200 const char* const * DYLD_FRAMEWORK_PATH;
201 const char* const * DYLD_FALLBACK_FRAMEWORK_PATH;
202 const char* const * DYLD_LIBRARY_PATH;
203 const char* const * DYLD_FALLBACK_LIBRARY_PATH;
204 const char* const * DYLD_INSERT_LIBRARIES;
205 const char* const * LD_LIBRARY_PATH; // for unix conformance
206 const char* const * DYLD_VERSIONED_LIBRARY_PATH;
207 const char* const * DYLD_VERSIONED_FRAMEWORK_PATH;
208 bool DYLD_PRINT_LIBRARIES_POST_LAUNCH;
209 bool DYLD_BIND_AT_LAUNCH;
210 bool DYLD_PRINT_STATISTICS;
211 bool DYLD_PRINT_STATISTICS_DETAILS;
212 bool DYLD_PRINT_OPTS;
213 bool DYLD_PRINT_ENV;
214 bool DYLD_DISABLE_DOFS;
215 bool DYLD_PRINT_CS_NOTIFICATIONS;
216 // DYLD_SHARED_CACHE_DONT_VALIDATE ==> sSharedCacheIgnoreInodeAndTimeStamp
217 // DYLD_SHARED_CACHE_DIR ==> sSharedCacheDir
218 // DYLD_ROOT_PATH ==> gLinkContext.rootPaths
219 // DYLD_IMAGE_SUFFIX ==> gLinkContext.imageSuffix
220 // DYLD_PRINT_OPTS ==> gLinkContext.verboseOpts
221 // DYLD_PRINT_ENV ==> gLinkContext.verboseEnv
222 // DYLD_FORCE_FLAT_NAMESPACE ==> gLinkContext.bindFlat
223 // DYLD_PRINT_INITIALIZERS ==> gLinkContext.verboseInit
224 // DYLD_PRINT_SEGMENTS ==> gLinkContext.verboseMapping
225 // DYLD_PRINT_BINDINGS ==> gLinkContext.verboseBind
226 // DYLD_PRINT_WEAK_BINDINGS ==> gLinkContext.verboseWeakBind
227 // DYLD_PRINT_REBASINGS ==> gLinkContext.verboseRebase
228 // DYLD_PRINT_DOFS ==> gLinkContext.verboseDOF
229 // DYLD_PRINT_APIS ==> gLogAPIs
230 // DYLD_IGNORE_PREBINDING ==> gLinkContext.prebindUsage
231 // DYLD_PREBIND_DEBUG ==> gLinkContext.verbosePrebinding
232 // DYLD_NEW_LOCAL_SHARED_REGIONS ==> gLinkContext.sharedRegionMode
233 // DYLD_SHARED_REGION ==> gLinkContext.sharedRegionMode
234 // DYLD_PRINT_WARNINGS ==> gLinkContext.verboseWarnings
235 // DYLD_PRINT_RPATHS ==> gLinkContext.verboseRPaths
236 // DYLD_PRINT_INTERPOSING ==> gLinkContext.verboseInterposing
237 // DYLD_PRINT_LIBRARIES ==> gLinkContext.verboseLoading
238 };
239
240
241
242 typedef std::vector<dyld_image_state_change_handler> StateHandlers;
243
244
245 enum EnvVarMode { envNone, envPrintOnly, envAll };
246
247 // all global state
248 static const char* sExecPath = NULL;
249 static const char* sExecShortName = NULL;
250 static const macho_header* sMainExecutableMachHeader = NULL;
251 #if CPU_SUBTYPES_SUPPORTED
252 static cpu_type_t sHostCPU;
253 static cpu_subtype_t sHostCPUsubtype;
254 #endif
255 static ImageLoaderMachO* sMainExecutable = NULL;
256 static EnvVarMode sEnvMode = envNone;
257 static size_t sInsertedDylibCount = 0;
258 static std::vector<ImageLoader*> sAllImages;
259 static std::vector<ImageLoader*> sImageRoots;
260 static std::vector<ImageLoader*> sImageFilesNeedingTermination;
261 static std::vector<RegisteredDOF> sImageFilesNeedingDOFUnregistration;
262 static std::vector<ImageCallback> sAddImageCallbacks;
263 static std::vector<ImageCallback> sRemoveImageCallbacks;
264 static bool sRemoveImageCallbacksInUse = false;
265 static void* sSingleHandlers[7][3];
266 static void* sBatchHandlers[7][3];
267 static ImageLoader* sLastImageByAddressCache;
268 static EnvironmentVariables sEnv;
269 #if __MAC_OS_X_VERSION_MIN_REQUIRED
270 static const char* sFrameworkFallbackPaths[] = { "$HOME/Library/Frameworks", "/Library/Frameworks", "/Network/Library/Frameworks", "/System/Library/Frameworks", NULL };
271 static const char* sLibraryFallbackPaths[] = { "$HOME/lib", "/usr/local/lib", "/usr/lib", NULL };
272 #else
273 static const char* sFrameworkFallbackPaths[] = { "/System/Library/Frameworks", NULL };
274 static const char* sLibraryFallbackPaths[] = { "/usr/local/lib", "/usr/lib", NULL };
275 #endif
276 static const char* sRestrictedFrameworkFallbackPaths[] = { "/System/Library/Frameworks", NULL };
277 static const char* sRestrictedLibraryFallbackPaths[] = { "/usr/lib", NULL };
278 static UndefinedHandler sUndefinedHandler = NULL;
279 static ImageLoader* sBundleBeingLoaded = NULL; // hack until OFI is reworked
280 #if DYLD_SHARED_CACHE_SUPPORT
281 static const dyld_cache_header* sSharedCache = NULL;
282 static long sSharedCacheSlide = 0;
283 static bool sSharedCacheIgnoreInodeAndTimeStamp = false;
284 bool gSharedCacheOverridden = false;
285 #if __IPHONE_OS_VERSION_MIN_REQUIRED
286 static const char* sSharedCacheDir = IPHONE_DYLD_SHARED_CACHE_DIR;
287 #define ENABLE_DYLIBS_TO_OVERRIDE_CACHE_SIZE 1024
288 #else
289 static const char* sSharedCacheDir = MACOSX_DYLD_SHARED_CACHE_DIR;
290 #endif
291 #endif
292 ImageLoader::LinkContext gLinkContext;
293 bool gLogAPIs = false;
294 #if SUPPORT_ACCELERATE_TABLES
295 bool gLogAppAPIs = false;
296 #endif
297 const struct LibSystemHelpers* gLibSystemHelpers = NULL;
298 #if SUPPORT_OLD_CRT_INITIALIZATION
299 bool gRunInitializersOldWay = false;
300 #endif
301 static std::vector<DylibOverride> sDylibOverrides;
302 #if !TARGET_IPHONE_SIMULATOR
303 static int sLogSocket = -1;
304 #endif
305 static bool sFrameworksFoundAsDylibs = false;
306 #if __x86_64__ && DYLD_SHARED_CACHE_SUPPORT
307 static bool sHaswell = false;
308 #endif
309 static std::vector<ImageLoader::DynamicReference> sDynamicReferences;
310 static OSSpinLock sDynamicReferencesLock = 0;
311 #if !TARGET_IPHONE_SIMULATOR
312 static bool sLogToFile = false;
313 #endif
314 static char sLoadingCrashMessage[1024] = "dyld: launch, loading dependent libraries";
315 static bool sSafeMode = false;
316 static _dyld_objc_notify_mapped sNotifyObjCMapped;
317 static _dyld_objc_notify_init sNotifyObjCInit;
318 static _dyld_objc_notify_unmapped sNotifyObjCUnmapped;
319
320 #if __IPHONE_OS_VERSION_MIN_REQUIRED && !TARGET_IPHONE_SIMULATOR
321 static bool sForceStderr = false;
322 #endif
323
324
325
326 #if SUPPORT_ACCELERATE_TABLES
327 static ImageLoaderMegaDylib* sAllCacheImagesProxy = NULL;
328 static bool sDisableAcceleratorTables = false;
329 #endif
330
331 //
332 // The MappedRanges structure is used for fast address->image lookups.
333 // The table is only updated when the dyld lock is held, so we don't
334 // need to worry about multiple writers. But readers may look at this
335 // data without holding the lock. Therefore, all updates must be done
336 // in an order that will never cause readers to see inconsistent data.
337 // The general rule is that if the image field is non-NULL then
338 // the other fields are valid.
339 //
340 struct MappedRanges
341 {
342 MappedRanges* next;
343 unsigned long count;
344 struct {
345 ImageLoader* image;
346 uintptr_t start;
347 uintptr_t end;
348 } array[1];
349 };
350
351 static MappedRanges* sMappedRangesStart;
352
353 void addMappedRange(ImageLoader* image, uintptr_t start, uintptr_t end)
354 {
355 //dyld::log("addMappedRange(0x%lX->0x%lX) for %s\n", start, end, image->getShortName());
356 for (MappedRanges* p = sMappedRangesStart; p != NULL; p = p->next) {
357 for (unsigned long i=0; i < p->count; ++i) {
358 if ( p->array[i].image == NULL ) {
359 p->array[i].start = start;
360 p->array[i].end = end;
361 // add image field last with a barrier so that any reader will see consistent records
362 OSMemoryBarrier();
363 p->array[i].image = image;
364 return;
365 }
366 }
367 }
368 // table must be full, chain another
369 #if SUPPORT_ACCELERATE_TABLES
370 unsigned count = (sAllCacheImagesProxy != NULL) ? 16 : 400;
371 #else
372 unsigned count = 400;
373 #endif
374 size_t allocationSize = sizeof(MappedRanges) + (count-1)*3*sizeof(void*);
375 MappedRanges* newRanges = (MappedRanges*)malloc(allocationSize);
376 bzero(newRanges, allocationSize);
377 newRanges->count = count;
378 newRanges->array[0].start = start;
379 newRanges->array[0].end = end;
380 newRanges->array[0].image = image;
381 OSMemoryBarrier();
382 if ( sMappedRangesStart == NULL ) {
383 sMappedRangesStart = newRanges;
384 }
385 else {
386 for (MappedRanges* p = sMappedRangesStart; p != NULL; p = p->next) {
387 if ( p->next == NULL ) {
388 OSMemoryBarrier();
389 p->next = newRanges;
390 break;
391 }
392 }
393 }
394 }
395
396 void removedMappedRanges(ImageLoader* image)
397 {
398 for (MappedRanges* p = sMappedRangesStart; p != NULL; p = p->next) {
399 for (unsigned long i=0; i < p->count; ++i) {
400 if ( p->array[i].image == image ) {
401 // clear with a barrier so that any reader will see consistent records
402 OSMemoryBarrier();
403 p->array[i].image = NULL;
404 }
405 }
406 }
407 }
408
409 ImageLoader* findMappedRange(uintptr_t target)
410 {
411 for (MappedRanges* p = sMappedRangesStart; p != NULL; p = p->next) {
412 for (unsigned long i=0; i < p->count; ++i) {
413 if ( p->array[i].image != NULL ) {
414 if ( (p->array[i].start <= target) && (target < p->array[i].end) )
415 return p->array[i].image;
416 }
417 }
418 }
419 return NULL;
420 }
421
422
423
424 const char* mkstringf(const char* format, ...)
425 {
426 _SIMPLE_STRING buf = _simple_salloc();
427 if ( buf != NULL ) {
428 va_list list;
429 va_start(list, format);
430 _simple_vsprintf(buf, format, list);
431 va_end(list);
432 const char* t = strdup(_simple_string(buf));
433 _simple_sfree(buf);
434 if ( t != NULL )
435 return t;
436 }
437 return "mkstringf, out of memory error";
438 }
439
440
441 void throwf(const char* format, ...)
442 {
443 _SIMPLE_STRING buf = _simple_salloc();
444 if ( buf != NULL ) {
445 va_list list;
446 va_start(list, format);
447 _simple_vsprintf(buf, format, list);
448 va_end(list);
449 const char* t = strdup(_simple_string(buf));
450 _simple_sfree(buf);
451 if ( t != NULL )
452 throw t;
453 }
454 throw "throwf, out of memory error";
455 }
456
457
458 #if !TARGET_IPHONE_SIMULATOR
459 static int sLogfile = STDERR_FILENO;
460 #endif
461
462 #if !TARGET_IPHONE_SIMULATOR
463 // based on CFUtilities.c: also_do_stderr()
464 static bool useSyslog()
465 {
466 // Use syslog() for processes managed by launchd
467 static bool launchdChecked = false;
468 static bool launchdOwned = false;
469 if ( !launchdChecked && gProcessInfo->libSystemInitialized ) {
470 if ( (gLibSystemHelpers != NULL) && (gLibSystemHelpers->version >= 11) ) {
471 // <rdar://problem/23520449> only call isLaunchdOwned() after libSystem is initialized
472 launchdOwned = (*gLibSystemHelpers->isLaunchdOwned)();
473 launchdChecked = true;
474 }
475 }
476 if ( launchdChecked && launchdOwned )
477 return true;
478
479 // If stderr is not available, use syslog()
480 struct stat sb;
481 int result = fstat(STDERR_FILENO, &sb);
482 if ( result < 0 )
483 return true; // file descriptor 2 is closed
484
485 return false;
486 }
487
488
489 static void socket_syslogv(int priority, const char* format, va_list list)
490 {
491 // lazily create socket and connection to syslogd
492 if ( sLogSocket == -1 ) {
493 sLogSocket = ::socket(AF_UNIX, SOCK_DGRAM, 0);
494 if (sLogSocket == -1)
495 return; // cannot log
496 ::fcntl(sLogSocket, F_SETFD, 1);
497
498 struct sockaddr_un addr;
499 addr.sun_family = AF_UNIX;
500 strncpy(addr.sun_path, _PATH_LOG, sizeof(addr.sun_path));
501 if ( ::connect(sLogSocket, (struct sockaddr *)&addr, sizeof(addr)) == -1 ) {
502 ::close(sLogSocket);
503 sLogSocket = -1;
504 return;
505 }
506 }
507
508 // format message to syslogd like: "<priority>Process[pid]: message"
509 _SIMPLE_STRING buf = _simple_salloc();
510 if ( buf == NULL )
511 return;
512 if ( _simple_sprintf(buf, "<%d>%s[%d]: ", LOG_USER|LOG_NOTICE, sExecShortName, getpid()) == 0 ) {
513 if ( _simple_vsprintf(buf, format, list) == 0 ) {
514 const char* p = _simple_string(buf);
515 ::__sendto(sLogSocket, p, strlen(p), 0, NULL, 0);
516 }
517 }
518 _simple_sfree(buf);
519 }
520
521
522
523 void vlog(const char* format, va_list list)
524 {
525 #if __IPHONE_OS_VERSION_MIN_REQUIRED && !TARGET_IPHONE_SIMULATOR
526 // <rdar://problem/25965832> log to console when running iOS app from Xcode
527 if ( !sLogToFile && !sForceStderr && useSyslog() )
528 #else
529 if ( !sLogToFile && useSyslog() )
530 #endif
531 socket_syslogv(LOG_ERR, format, list);
532 else {
533 _simple_vdprintf(sLogfile, format, list);
534 }
535 }
536
537 void log(const char* format, ...)
538 {
539 va_list list;
540 va_start(list, format);
541 vlog(format, list);
542 va_end(list);
543 }
544
545
546 void vwarn(const char* format, va_list list)
547 {
548 _simple_dprintf(sLogfile, "dyld: warning, ");
549 _simple_vdprintf(sLogfile, format, list);
550 }
551
552 void warn(const char* format, ...)
553 {
554 va_list list;
555 va_start(list, format);
556 vwarn(format, list);
557 va_end(list);
558 }
559
560
561 #endif // !TARGET_IPHONE_SIMULATOR
562
563
564 // <rdar://problem/8867781> control access to sAllImages through a lock
565 // because global dyld lock is not held during initialization phase of dlopen()
566 // <rdar://problem/16145518> Use OSSpinLockLock to allow yielding
567 static OSSpinLock sAllImagesLock = 0;
568
569 static void allImagesLock()
570 {
571 OSSpinLockLock(&sAllImagesLock);
572 }
573
574 static void allImagesUnlock()
575 {
576 OSSpinLockUnlock(&sAllImagesLock);
577 }
578
579
580 // utility class to assure files are closed when an exception is thrown
581 class FileOpener {
582 public:
583 FileOpener(const char* path);
584 ~FileOpener();
585 int getFileDescriptor() { return fd; }
586 private:
587 int fd;
588 };
589
590 FileOpener::FileOpener(const char* path)
591 : fd(-1)
592 {
593 fd = my_open(path, O_RDONLY, 0);
594 }
595
596 FileOpener::~FileOpener()
597 {
598 if ( fd != -1 )
599 close(fd);
600 }
601
602
603 static void registerDOFs(const std::vector<ImageLoader::DOFInfo>& dofs)
604 {
605 const size_t dofSectionCount = dofs.size();
606 if ( !sEnv.DYLD_DISABLE_DOFS && (dofSectionCount != 0) ) {
607 int fd = open("/dev/" DTRACEMNR_HELPER, O_RDWR);
608 if ( fd < 0 ) {
609 //dyld::warn("can't open /dev/" DTRACEMNR_HELPER " to register dtrace DOF sections\n");
610 }
611 else {
612 // allocate a buffer on the stack for the variable length dof_ioctl_data_t type
613 uint8_t buffer[sizeof(dof_ioctl_data_t) + dofSectionCount*sizeof(dof_helper_t)];
614 dof_ioctl_data_t* ioctlData = (dof_ioctl_data_t*)buffer;
615
616 // fill in buffer with one dof_helper_t per DOF section
617 ioctlData->dofiod_count = dofSectionCount;
618 for (unsigned int i=0; i < dofSectionCount; ++i) {
619 strlcpy(ioctlData->dofiod_helpers[i].dofhp_mod, dofs[i].imageShortName, DTRACE_MODNAMELEN);
620 ioctlData->dofiod_helpers[i].dofhp_dof = (uintptr_t)(dofs[i].dof);
621 ioctlData->dofiod_helpers[i].dofhp_addr = (uintptr_t)(dofs[i].dof);
622 }
623
624 // tell kernel about all DOF sections en mas
625 // pass pointer to ioctlData because ioctl() only copies a fixed size amount of data into kernel
626 user_addr_t val = (user_addr_t)(unsigned long)ioctlData;
627 if ( ioctl(fd, DTRACEHIOC_ADDDOF, &val) != -1 ) {
628 // kernel returns a unique identifier for each section in the dofiod_helpers[].dofhp_dof field.
629 for (unsigned int i=0; i < dofSectionCount; ++i) {
630 RegisteredDOF info;
631 info.mh = dofs[i].imageHeader;
632 info.registrationID = (int)(ioctlData->dofiod_helpers[i].dofhp_dof);
633 sImageFilesNeedingDOFUnregistration.push_back(info);
634 if ( gLinkContext.verboseDOF ) {
635 dyld::log("dyld: registering DOF section %p in %s with dtrace, ID=0x%08X\n",
636 dofs[i].dof, dofs[i].imageShortName, info.registrationID);
637 }
638 }
639 }
640 else {
641 //dyld::log( "dyld: ioctl to register dtrace DOF section failed\n");
642 }
643 close(fd);
644 }
645 }
646 }
647
648 static void unregisterDOF(int registrationID)
649 {
650 int fd = open("/dev/" DTRACEMNR_HELPER, O_RDWR);
651 if ( fd < 0 ) {
652 dyld::warn("can't open /dev/" DTRACEMNR_HELPER " to unregister dtrace DOF section\n");
653 }
654 else {
655 ioctl(fd, DTRACEHIOC_REMOVE, registrationID);
656 close(fd);
657 if ( gLinkContext.verboseInit )
658 dyld::warn("unregistering DOF section ID=0x%08X with dtrace\n", registrationID);
659 }
660 }
661
662
663 //
664 // _dyld_register_func_for_add_image() is implemented as part of the general image state change notification
665 //
666 static void notifyAddImageCallbacks(ImageLoader* image)
667 {
668 // use guard so that we cannot notify about the same image twice
669 if ( ! image->addFuncNotified() ) {
670 for (std::vector<ImageCallback>::iterator it=sAddImageCallbacks.begin(); it != sAddImageCallbacks.end(); it++)
671 (*it)(image->machHeader(), image->getSlide());
672 image->setAddFuncNotified();
673 }
674 }
675
676
677
678 // notify gdb about these new images
679 static const char* updateAllImages(enum dyld_image_states state, uint32_t infoCount, const struct dyld_image_info info[])
680 {
681 // <rdar://problem/8812589> don't add images without paths to all-image-info-list
682 if ( info[0].imageFilePath != NULL )
683 addImagesToAllImages(infoCount, info);
684 return NULL;
685 }
686
687
688 static StateHandlers* stateToHandlers(dyld_image_states state, void* handlersArray[7][3])
689 {
690 switch ( state ) {
691 case dyld_image_state_mapped:
692 return reinterpret_cast<StateHandlers*>(&handlersArray[0]);
693
694 case dyld_image_state_dependents_mapped:
695 return reinterpret_cast<StateHandlers*>(&handlersArray[1]);
696
697 case dyld_image_state_rebased:
698 return reinterpret_cast<StateHandlers*>(&handlersArray[2]);
699
700 case dyld_image_state_bound:
701 return reinterpret_cast<StateHandlers*>(&handlersArray[3]);
702
703 case dyld_image_state_dependents_initialized:
704 return reinterpret_cast<StateHandlers*>(&handlersArray[4]);
705
706 case dyld_image_state_initialized:
707 return reinterpret_cast<StateHandlers*>(&handlersArray[5]);
708
709 case dyld_image_state_terminated:
710 return reinterpret_cast<StateHandlers*>(&handlersArray[6]);
711 }
712 return NULL;
713 }
714
715 #if SUPPORT_ACCELERATE_TABLES
716 static dyld_image_state_change_handler getPreInitNotifyHandler(unsigned index)
717 {
718 std::vector<dyld_image_state_change_handler>* handlers = stateToHandlers(dyld_image_state_dependents_initialized, sSingleHandlers);
719 if ( index >= handlers->size() )
720 return NULL;
721 return (*handlers)[index];
722 }
723
724 static dyld_image_state_change_handler getBoundBatchHandler(unsigned index)
725 {
726 std::vector<dyld_image_state_change_handler>* handlers = stateToHandlers(dyld_image_state_bound, sBatchHandlers);
727 if ( index >= handlers->size() )
728 return NULL;
729 return (*handlers)[index];
730 }
731
732 static void notifySingleFromCache(dyld_image_states state, const mach_header* mh, const char* path)
733 {
734 //dyld::log("notifySingle(state=%d, image=%s)\n", state, image->getPath());
735 std::vector<dyld_image_state_change_handler>* handlers = stateToHandlers(state, sSingleHandlers);
736 if ( handlers != NULL ) {
737 dyld_image_info info;
738 info.imageLoadAddress = mh;
739 info.imageFilePath = path;
740 info.imageFileModDate = 0;
741 for (dyld_image_state_change_handler handler : *handlers) {
742 const char* result = (*handler)(state, 1, &info);
743 if ( (result != NULL) && (state == dyld_image_state_mapped) ) {
744 //fprintf(stderr, " image rejected by handler=%p\n", *it);
745 // make copy of thrown string so that later catch clauses can free it
746 const char* str = strdup(result);
747 throw str;
748 }
749 }
750 }
751 if ( (state == dyld_image_state_dependents_initialized) && (sNotifyObjCInit != NULL) && (mh->flags & MH_HAS_OBJC) ) {
752 (*sNotifyObjCInit)(path, mh);
753 }
754 }
755 #endif
756
757 static mach_port_t sNotifyReplyPorts[DYLD_MAX_PROCESS_INFO_NOTIFY_COUNT];
758
759
760 static void notifyMonitoringDyld(bool unloading, unsigned portSlot, unsigned imageCount, const dyld_image_info infos[])
761 {
762 unsigned entriesSize = imageCount*sizeof(dyld_process_info_image_entry);
763 unsigned pathsSize = 0;
764 for (unsigned j=0; j < imageCount; ++j) {
765 pathsSize += (strlen(infos[j].imageFilePath) + 1);
766 }
767 unsigned totalSize = (sizeof(dyld_process_info_notify_header) + entriesSize + pathsSize + 127) & -128; // align
768 if ( totalSize > DYLD_PROCESS_INFO_NOTIFY_MAX_BUFFER_SIZE ) {
769 // Putting all image paths into one message would make buffer too big.
770 // Instead split into two messages. Recurse as needed until paths fit in buffer.
771 unsigned imageHalfCount = imageCount/2;
772 notifyMonitoringDyld(unloading, portSlot, imageHalfCount, infos);
773 notifyMonitoringDyld(unloading, portSlot, imageCount - imageHalfCount, &infos[imageHalfCount]);
774 return;
775 }
776 uint8_t buffer[totalSize];
777 dyld_process_info_notify_header* header = (dyld_process_info_notify_header*)buffer;
778 header->version = 1;
779 header->imageCount = imageCount;
780 header->imagesOffset = sizeof(dyld_process_info_notify_header);
781 header->stringsOffset = sizeof(dyld_process_info_notify_header) + entriesSize;
782 header->timestamp = dyld::gProcessInfo->infoArrayChangeTimestamp;
783 dyld_process_info_image_entry* entries = (dyld_process_info_image_entry*)&buffer[header->imagesOffset];
784 char* const pathPoolStart = (char*)&buffer[header->stringsOffset];
785 char* pathPool = pathPoolStart;
786 for (unsigned j=0; j < imageCount; ++j) {
787 strcpy(pathPool, infos[j].imageFilePath);
788 uint32_t len = (uint32_t)strlen(pathPool);
789 bzero(entries->uuid, 16);
790 const ImageLoader* image = findImageByMachHeader(infos[j].imageLoadAddress);
791 if ( image != NULL ) {
792 image->getUUID(entries->uuid);
793 }
794 #if SUPPORT_ACCELERATE_TABLES
795 else if ( sAllCacheImagesProxy != NULL ) {
796 const mach_header* mh;
797 const char* path;
798 unsigned index;
799 if ( sAllCacheImagesProxy->addressInCache(infos[j].imageLoadAddress, &mh, &path, &index) ) {
800 sAllCacheImagesProxy->getDylibUUID(index, entries->uuid);
801 }
802 }
803 #endif
804 entries->loadAddress = (uint64_t)infos[j].imageLoadAddress;
805 entries->pathStringOffset = (uint32_t)(pathPool - pathPoolStart);
806 entries->pathLength = len;
807 pathPool += (len +1);
808 ++entries;
809 }
810
811 if ( sNotifyReplyPorts[portSlot] == 0 ) {
812 if ( !mach_port_allocate(mach_task_self(), MACH_PORT_RIGHT_RECEIVE, &sNotifyReplyPorts[portSlot]) )
813 mach_port_insert_right(mach_task_self(), sNotifyReplyPorts[portSlot], sNotifyReplyPorts[portSlot], MACH_MSG_TYPE_MAKE_SEND);
814 //dyld::log("allocated reply port %d\n", sNotifyReplyPorts[portSlot]);
815 }
816 //dyld::log("found port to send to\n");
817 mach_msg_header_t* h = (mach_msg_header_t*)buffer;
818 h->msgh_bits = MACH_MSGH_BITS(MACH_MSG_TYPE_COPY_SEND,MACH_MSG_TYPE_MAKE_SEND); // MACH_MSG_TYPE_MAKE_SEND_ONCE
819 h->msgh_id = unloading ? DYLD_PROCESS_INFO_NOTIFY_UNLOAD_ID : DYLD_PROCESS_INFO_NOTIFY_LOAD_ID;
820 h->msgh_local_port = sNotifyReplyPorts[portSlot];
821 h->msgh_remote_port = dyld::gProcessInfo->notifyPorts[portSlot];
822 h->msgh_reserved = 0;
823 h->msgh_size = (mach_msg_size_t)sizeof(buffer);
824 //dyld::log("sending to port[%d]=%d, size=%d, reply port=%d, id=0x%X\n", portSlot, dyld::gProcessInfo->notifyPorts[portSlot], h->msgh_size, sNotifyReplyPorts[portSlot], h->msgh_id);
825 kern_return_t sendResult = mach_msg(h, MACH_SEND_MSG | MACH_RCV_MSG | MACH_SEND_TIMEOUT, h->msgh_size, h->msgh_size, sNotifyReplyPorts[portSlot], 100, MACH_PORT_NULL);
826 //dyld::log("send result = 0x%X, msg_id=%d, msg_size=%d\n", sendResult, h->msgh_id, h->msgh_size);
827 if ( sendResult == MACH_SEND_INVALID_DEST ) {
828 // sender is not responding, detatch
829 //dyld::log("process requesting notification gone. deallocation send port %d and receive port %d\n", dyld::gProcessInfo->notifyPorts[portSlot], sNotifyReplyPorts[portSlot]);
830 mach_port_deallocate(mach_task_self(), dyld::gProcessInfo->notifyPorts[portSlot]);
831 mach_port_deallocate(mach_task_self(), sNotifyReplyPorts[portSlot]);
832 dyld::gProcessInfo->notifyPorts[portSlot] = 0;
833 sNotifyReplyPorts[portSlot] = 0;
834 }
835 }
836
837 static void notifyMonitoringDyldMain()
838 {
839 for (int slot=0; slot < DYLD_MAX_PROCESS_INFO_NOTIFY_COUNT; ++slot) {
840 if ( dyld::gProcessInfo->notifyPorts[slot] != 0 ) {
841 if ( sNotifyReplyPorts[slot] == 0 ) {
842 if ( !mach_port_allocate(mach_task_self(), MACH_PORT_RIGHT_RECEIVE, &sNotifyReplyPorts[slot]) )
843 mach_port_insert_right(mach_task_self(), sNotifyReplyPorts[slot], sNotifyReplyPorts[slot], MACH_MSG_TYPE_MAKE_SEND);
844 //dyld::log("allocated reply port %d\n", sNotifyReplyPorts[slot]);
845 }
846 //dyld::log("found port to send to\n");
847 mach_msg_header_t h;
848 h.msgh_bits = MACH_MSGH_BITS(MACH_MSG_TYPE_COPY_SEND,MACH_MSG_TYPE_MAKE_SEND); // MACH_MSG_TYPE_MAKE_SEND_ONCE
849 h.msgh_id = DYLD_PROCESS_INFO_NOTIFY_MAIN_ID;
850 h.msgh_local_port = sNotifyReplyPorts[slot];
851 h.msgh_remote_port = dyld::gProcessInfo->notifyPorts[slot];
852 h.msgh_reserved = 0;
853 h.msgh_size = (mach_msg_size_t)sizeof(mach_msg_header_t);
854 //dyld::log("sending to port[%d]=%d, size=%d, reply port=%d, id=0x%X\n", slot, dyld::gProcessInfo->notifyPorts[slot], h.msgh_size, sNotifyReplyPorts[slot], h.msgh_id);
855 kern_return_t sendResult = mach_msg(&h, MACH_SEND_MSG | MACH_RCV_MSG | MACH_SEND_TIMEOUT, h.msgh_size, h.msgh_size, sNotifyReplyPorts[slot], 100, MACH_PORT_NULL);
856 //dyld::log("send result = 0x%X, msg_id=%d, msg_size=%d\n", sendResult, h.msgh_id, h.msgh_size);
857 if ( sendResult == MACH_SEND_INVALID_DEST ) {
858 // sender is not responding, detatch
859 //dyld::log("process requesting notification gone. deallocation send port %d and receive port %d\n", dyld::gProcessInfo->notifyPorts[slot], sNotifyReplyPorts[slot]);
860 mach_port_deallocate(mach_task_self(), dyld::gProcessInfo->notifyPorts[slot]);
861 mach_port_deallocate(mach_task_self(), sNotifyReplyPorts[slot]);
862 dyld::gProcessInfo->notifyPorts[slot] = 0;
863 sNotifyReplyPorts[slot] = 0;
864 }
865 }
866 }
867 }
868
869 #define MAX_KERNEL_IMAGES_PER_CALL (100)
870
871 static void flushKernelNotifications(bool loading, bool force, std::array<dyld_kernel_image_info_t,MAX_KERNEL_IMAGES_PER_CALL>& kernelInfos, uint32_t &kernelInfoCount) {
872 if ((force && kernelInfoCount != 0) || kernelInfoCount == MAX_KERNEL_IMAGES_PER_CALL) {
873 if (loading) {
874 task_register_dyld_image_infos(mach_task_self(), kernelInfos.data(), kernelInfoCount);
875 } else {
876 task_unregister_dyld_image_infos(mach_task_self(), kernelInfos.data(), kernelInfoCount);
877 }
878 kernelInfoCount = 0;
879 }
880 }
881
882 static
883 void queueKernelNotification(const ImageLoader& image, bool loading, std::array<dyld_kernel_image_info_t,MAX_KERNEL_IMAGES_PER_CALL>& kernelInfos, uint32_t &kernelInfoCount) {
884 if ( !image.inSharedCache() ) {
885 ino_t inode = image.getInode();
886 image.getUUID(kernelInfos[kernelInfoCount].uuid);
887 memcpy(&kernelInfos[kernelInfoCount].fsobjid, &inode, 8);
888 kernelInfos[kernelInfoCount].load_addr = (uint64_t)image.machHeader();
889 // FIXME we should also be grabbing the device ID, but that is not necessary yet,
890 // and requires threading it through the ImageLoader
891 kernelInfos[kernelInfoCount].fsid.val[0] = 0;
892 kernelInfos[kernelInfoCount].fsid.val[1] = 0;
893 kernelInfoCount++;
894 }
895 flushKernelNotifications(loading, false, kernelInfos, kernelInfoCount);
896 }
897
898 void notifyKernel(const ImageLoader& image, bool loading) {
899 std::array<dyld_kernel_image_info_t,MAX_KERNEL_IMAGES_PER_CALL> kernelInfos;
900 uint32_t kernelInfoCount = 0;
901 queueKernelNotification(image, loading, kernelInfos, kernelInfoCount);
902 flushKernelNotifications(loading, true, kernelInfos, kernelInfoCount);
903 }
904
905 static void notifySingle(dyld_image_states state, const ImageLoader* image, ImageLoader::InitializerTimingList* timingInfo)
906 {
907 //dyld::log("notifySingle(state=%d, image=%s)\n", state, image->getPath());
908 std::vector<dyld_image_state_change_handler>* handlers = stateToHandlers(state, sSingleHandlers);
909 if ( handlers != NULL ) {
910 dyld_image_info info;
911 info.imageLoadAddress = image->machHeader();
912 info.imageFilePath = image->getRealPath();
913 info.imageFileModDate = image->lastModified();
914 for (std::vector<dyld_image_state_change_handler>::iterator it = handlers->begin(); it != handlers->end(); ++it) {
915 const char* result = (*it)(state, 1, &info);
916 if ( (result != NULL) && (state == dyld_image_state_mapped) ) {
917 //fprintf(stderr, " image rejected by handler=%p\n", *it);
918 // make copy of thrown string so that later catch clauses can free it
919 const char* str = strdup(result);
920 throw str;
921 }
922 }
923 }
924 if ( state == dyld_image_state_mapped ) {
925 // <rdar://problem/7008875> Save load addr + UUID for images from outside the shared cache
926 if ( !image->inSharedCache() ) {
927 dyld_uuid_info info;
928 if ( image->getUUID(info.imageUUID) ) {
929 info.imageLoadAddress = image->machHeader();
930 addNonSharedCacheImageUUID(info);
931 }
932 }
933 }
934 if ( (state == dyld_image_state_dependents_initialized) && (sNotifyObjCInit != NULL) && image->notifyObjC() ) {
935 uint64_t t0 = mach_absolute_time();
936 (*sNotifyObjCInit)(image->getRealPath(), image->machHeader());
937 uint64_t t1 = mach_absolute_time();
938 uint64_t t2 = mach_absolute_time();
939 uint64_t timeInObjC = t1-t0;
940 uint64_t emptyTime = (t2-t1)*100;
941 if ( (timeInObjC > emptyTime) && (timingInfo != NULL) ) {
942 timingInfo->addTime(image->getShortName(), timeInObjC);
943 }
944 }
945 // mach message csdlc about dynamically unloaded images
946 if ( image->addFuncNotified() && (state == dyld_image_state_terminated) ) {
947 notifyKernel(*image, false);
948
949 uint64_t loadTimestamp = mach_absolute_time();
950 if ( sEnv.DYLD_PRINT_CS_NOTIFICATIONS ) {
951 dyld::log("dyld: coresymbolication_unload_notifier(%p, 0x%016llX, %p, %s)\n",
952 dyld::gProcessInfo->coreSymbolicationShmPage, loadTimestamp, image->machHeader(), image->getPath());
953 }
954 if ( dyld::gProcessInfo->coreSymbolicationShmPage != NULL) {
955 coresymbolication_unload_notifier(dyld::gProcessInfo->coreSymbolicationShmPage, loadTimestamp, image->getPath(), image->machHeader());
956 }
957 for (int slot=0; slot < DYLD_MAX_PROCESS_INFO_NOTIFY_COUNT; ++slot) {
958 if ( dyld::gProcessInfo->notifyPorts[slot] != 0 ) {
959 dyld_image_info info;
960 info.imageLoadAddress = image->machHeader();
961 info.imageFilePath = image->getPath();
962 info.imageFileModDate = 0;
963 notifyMonitoringDyld(true, slot, 1, &info);
964 }
965 else if ( sNotifyReplyPorts[slot] != 0 ) {
966 // monitoring process detached from this process, so release reply port
967 //dyld::log("deallocated reply port %d\n", sNotifyReplyPorts[slot]);
968 mach_port_deallocate(mach_task_self(), sNotifyReplyPorts[slot]);
969 sNotifyReplyPorts[slot] = 0;
970 }
971 }
972 }
973
974 }
975
976
977 //
978 // Normally, dyld_all_image_infos is only updated in batches after an entire
979 // graph is loaded. But if there is an error loading the initial set of
980 // dylibs needed by the main executable, dyld_all_image_infos is not yet set
981 // up, leading to usually brief crash logs.
982 //
983 // This function manually adds the images loaded so far to dyld::gProcessInfo.
984 // It should only be called before terminating.
985 //
986 void syncAllImages()
987 {
988 for (std::vector<ImageLoader*>::iterator it=sAllImages.begin(); it != sAllImages.end(); ++it) {
989 dyld_image_info info;
990 ImageLoader* image = *it;
991 info.imageLoadAddress = image->machHeader();
992 info.imageFilePath = image->getRealPath();
993 info.imageFileModDate = image->lastModified();
994 // add to all_image_infos if not already there
995 bool found = false;
996 int existingCount = dyld::gProcessInfo->infoArrayCount;
997 const dyld_image_info* existing = dyld::gProcessInfo->infoArray;
998 if ( existing != NULL ) {
999 for (int i=0; i < existingCount; ++i) {
1000 if ( existing[i].imageLoadAddress == info.imageLoadAddress ) {
1001 //dyld::log("not adding %s\n", info.imageFilePath);
1002 found = true;
1003 break;
1004 }
1005 }
1006 }
1007 if ( ! found ) {
1008 //dyld::log("adding %s\n", info.imageFilePath);
1009 addImagesToAllImages(1, &info);
1010 }
1011 }
1012 }
1013
1014
1015 static int imageSorter(const void* l, const void* r)
1016 {
1017 const ImageLoader* left = *((ImageLoader**)l);
1018 const ImageLoader* right= *((ImageLoader**)r);
1019 return left->compare(right);
1020 }
1021
1022 static void notifyBatchPartial(dyld_image_states state, bool orLater, dyld_image_state_change_handler onlyHandler, bool preflightOnly, bool onlyObjCMappedNotification)
1023 {
1024 std::vector<dyld_image_state_change_handler>* handlers = stateToHandlers(state, sBatchHandlers);
1025 std::array<dyld_kernel_image_info_t,MAX_KERNEL_IMAGES_PER_CALL> kernelInfos;
1026 uint32_t kernelInfoCount = 0;
1027
1028 if ( (handlers != NULL) || ((state == dyld_image_state_bound) && (sNotifyObjCMapped != NULL)) ) {
1029 // don't use a vector because it will use malloc/free and we want notifcation to be low cost
1030 allImagesLock();
1031 dyld_image_info infos[allImagesCount()+1];
1032 ImageLoader* images[allImagesCount()+1];
1033 ImageLoader** end = images;
1034 for (std::vector<ImageLoader*>::iterator it=sAllImages.begin(); it != sAllImages.end(); it++) {
1035 dyld_image_states imageState = (*it)->getState();
1036 if ( (imageState == state) || (orLater && (imageState > state)) )
1037 *end++ = *it;
1038 }
1039 if ( sBundleBeingLoaded != NULL ) {
1040 dyld_image_states imageState = sBundleBeingLoaded->getState();
1041 if ( (imageState == state) || (orLater && (imageState > state)) )
1042 *end++ = sBundleBeingLoaded;
1043 }
1044 const char* dontLoadReason = NULL;
1045 uint32_t imageCount = (uint32_t)(end-images);
1046 if ( imageCount != 0 ) {
1047 // sort bottom up
1048 qsort(images, imageCount, sizeof(ImageLoader*), &imageSorter);
1049 // build info array
1050 for (unsigned int i=0; i < imageCount; ++i) {
1051 dyld_image_info* p = &infos[i];
1052 ImageLoader* image = images[i];
1053 //dyld::log(" state=%d, name=%s\n", state, image->getPath());
1054 p->imageLoadAddress = image->machHeader();
1055 p->imageFilePath = image->getRealPath();
1056 p->imageFileModDate = image->lastModified();
1057 // get these registered with the kernel as early as possible
1058 if ( state == dyld_image_state_dependents_mapped)
1059 queueKernelNotification(*image, true, kernelInfos, kernelInfoCount);
1060 // special case for add_image hook
1061 if ( state == dyld_image_state_bound )
1062 notifyAddImageCallbacks(image);
1063 }
1064 flushKernelNotifications(true, true, kernelInfos, kernelInfoCount);
1065 }
1066 #if SUPPORT_ACCELERATE_TABLES
1067 if ( sAllCacheImagesProxy != NULL ) {
1068 unsigned cacheCount = sAllCacheImagesProxy->appendImagesToNotify(state, orLater, &infos[imageCount]);
1069 // support _dyld_register_func_for_add_image()
1070 if ( state == dyld_image_state_bound ) {
1071 for (ImageCallback callback : sAddImageCallbacks) {
1072 for (unsigned i=0; i < cacheCount; ++i)
1073 (*callback)(infos[imageCount+i].imageLoadAddress, sSharedCacheSlide);
1074 }
1075 }
1076 imageCount += cacheCount;
1077 }
1078 #endif
1079 if ( imageCount != 0 ) {
1080 if ( !onlyObjCMappedNotification ) {
1081 if ( onlyHandler != NULL ) {
1082 const char* result = NULL;
1083 if ( result == NULL ) {
1084 result = (*onlyHandler)(state, imageCount, infos);
1085 }
1086 if ( (result != NULL) && (state == dyld_image_state_dependents_mapped) ) {
1087 //fprintf(stderr, " images rejected by handler=%p\n", onlyHandler);
1088 // make copy of thrown string so that later catch clauses can free it
1089 dontLoadReason = strdup(result);
1090 }
1091 }
1092 else {
1093 // call each handler with whole array
1094 if ( handlers != NULL ) {
1095 for (std::vector<dyld_image_state_change_handler>::iterator it = handlers->begin(); it != handlers->end(); ++it) {
1096 const char* result = (*it)(state, imageCount, infos);
1097 if ( (result != NULL) && (state == dyld_image_state_dependents_mapped) ) {
1098 //fprintf(stderr, " images rejected by handler=%p\n", *it);
1099 // make copy of thrown string so that later catch clauses can free it
1100 dontLoadReason = strdup(result);
1101 break;
1102 }
1103 }
1104 }
1105 }
1106 }
1107 // tell objc about new images
1108 if ( (onlyHandler == NULL) && ((state == dyld_image_state_bound) || (orLater && (dyld_image_state_bound > state))) && (sNotifyObjCMapped != NULL) ) {
1109 const char* paths[imageCount];
1110 const mach_header* mhs[imageCount];
1111 unsigned objcImageCount = 0;
1112 for (int i=0; i < imageCount; ++i) {
1113 const ImageLoader* image = findImageByMachHeader(infos[i].imageLoadAddress);
1114 bool hasObjC = false;
1115 if ( image != NULL ) {
1116 hasObjC = image->notifyObjC();
1117 }
1118 #if SUPPORT_ACCELERATE_TABLES
1119 else if ( sAllCacheImagesProxy != NULL ) {
1120 const mach_header* mh;
1121 const char* path;
1122 unsigned index;
1123 if ( sAllCacheImagesProxy->addressInCache(infos[i].imageLoadAddress, &mh, &path, &index) ) {
1124 hasObjC = (mh->flags & MH_HAS_OBJC);
1125 }
1126 }
1127 #endif
1128 if ( hasObjC ) {
1129 paths[objcImageCount] = infos[i].imageFilePath;
1130 mhs[objcImageCount] = infos[i].imageLoadAddress;
1131 ++objcImageCount;
1132 }
1133 }
1134 if ( objcImageCount != 0 ) {
1135 uint64_t t0 = mach_absolute_time();
1136 (*sNotifyObjCMapped)(objcImageCount, paths, mhs);
1137 uint64_t t1 = mach_absolute_time();
1138 ImageLoader::fgTotalObjCSetupTime += (t1-t0);
1139 }
1140 }
1141 }
1142 allImagesUnlock();
1143 if ( dontLoadReason != NULL )
1144 throw dontLoadReason;
1145 if ( !preflightOnly && (state == dyld_image_state_dependents_mapped) ) {
1146 if ( (dyld::gProcessInfo->coreSymbolicationShmPage != NULL) || sEnv.DYLD_PRINT_CS_NOTIFICATIONS ) {
1147 // mach message csdlc about loaded images
1148 uint64_t loadTimestamp = mach_absolute_time();
1149 for (unsigned j=0; j < imageCount; ++j) {
1150 if ( sEnv.DYLD_PRINT_CS_NOTIFICATIONS ) {
1151 dyld::log("dyld: coresymbolication_load_notifier(%p, 0x%016llX, %p, %s)\n",
1152 dyld::gProcessInfo->coreSymbolicationShmPage, loadTimestamp, infos[j].imageLoadAddress, infos[j].imageFilePath);
1153 }
1154 coresymbolication_load_notifier(dyld::gProcessInfo->coreSymbolicationShmPage, loadTimestamp, infos[j].imageFilePath, infos[j].imageLoadAddress);
1155 }
1156 }
1157 for (int slot=0; slot < DYLD_MAX_PROCESS_INFO_NOTIFY_COUNT; ++slot) {
1158 if ( dyld::gProcessInfo->notifyPorts[slot] )
1159 notifyMonitoringDyld(false, slot, imageCount, infos);
1160 }
1161 }
1162 }
1163 }
1164
1165
1166
1167 static void notifyBatch(dyld_image_states state, bool preflightOnly)
1168 {
1169 notifyBatchPartial(state, false, NULL, preflightOnly, false);
1170 }
1171
1172 // In order for register_func_for_add_image() callbacks to to be called bottom up,
1173 // we need to maintain a list of root images. The main executable is usally the
1174 // first root. Any images dynamically added are also roots (unless already loaded).
1175 // If DYLD_INSERT_LIBRARIES is used, those libraries are first.
1176 static void addRootImage(ImageLoader* image)
1177 {
1178 //dyld::log("addRootImage(%p, %s)\n", image, image->getPath());
1179 // add to list of roots
1180 sImageRoots.push_back(image);
1181 }
1182
1183
1184 static void clearAllDepths()
1185 {
1186 for (std::vector<ImageLoader*>::iterator it=sAllImages.begin(); it != sAllImages.end(); it++)
1187 (*it)->clearDepth();
1188 }
1189
1190 static void printAllDepths()
1191 {
1192 for (std::vector<ImageLoader*>::iterator it=sAllImages.begin(); it != sAllImages.end(); it++)
1193 dyld::log("%03d %s\n", (*it)->getDepth(), (*it)->getShortName());
1194 }
1195
1196
1197 static unsigned int imageCount()
1198 {
1199 allImagesLock();
1200 unsigned int result = (unsigned int)sAllImages.size();
1201 allImagesUnlock();
1202 return (result);
1203 }
1204
1205
1206 static void setNewProgramVars(const ProgramVars& newVars)
1207 {
1208 // make a copy of the pointers to program variables
1209 gLinkContext.programVars = newVars;
1210
1211 // now set each program global to their initial value
1212 *gLinkContext.programVars.NXArgcPtr = gLinkContext.argc;
1213 *gLinkContext.programVars.NXArgvPtr = gLinkContext.argv;
1214 *gLinkContext.programVars.environPtr = gLinkContext.envp;
1215 *gLinkContext.programVars.__prognamePtr = gLinkContext.progname;
1216 }
1217
1218 #if SUPPORT_OLD_CRT_INITIALIZATION
1219 static void setRunInitialzersOldWay()
1220 {
1221 gRunInitializersOldWay = true;
1222 }
1223 #endif
1224
1225 static bool sandboxBlocked(const char* path, const char* kind)
1226 {
1227 #if TARGET_IPHONE_SIMULATOR
1228 // sandbox calls not yet supported in simulator runtime
1229 return false;
1230 #else
1231 sandbox_filter_type filter = (sandbox_filter_type)(SANDBOX_FILTER_PATH | SANDBOX_CHECK_NO_REPORT);
1232 return ( sandbox_check(getpid(), kind, filter, path) > 0 );
1233 #endif
1234 }
1235
1236 bool sandboxBlockedMmap(const char* path)
1237 {
1238 return sandboxBlocked(path, "file-map-executable");
1239 }
1240
1241 bool sandboxBlockedOpen(const char* path)
1242 {
1243 return sandboxBlocked(path, "file-read-data");
1244 }
1245
1246 bool sandboxBlockedStat(const char* path)
1247 {
1248 return sandboxBlocked(path, "file-read-metadata");
1249 }
1250
1251
1252 static void addDynamicReference(ImageLoader* from, ImageLoader* to) {
1253 // don't add dynamic reference if target is in the shared cache (since it can't be unloaded)
1254 if ( to->inSharedCache() )
1255 return;
1256
1257 // don't add dynamic reference if there already is a static one
1258 if ( from->dependsOn(to) )
1259 return;
1260
1261 // don't add if this combination already exists
1262 OSSpinLockLock(&sDynamicReferencesLock);
1263 for (std::vector<ImageLoader::DynamicReference>::iterator it=sDynamicReferences.begin(); it != sDynamicReferences.end(); ++it) {
1264 if ( (it->from == from) && (it->to == to) ) {
1265 OSSpinLockUnlock(&sDynamicReferencesLock);
1266 return;
1267 }
1268 }
1269
1270 //dyld::log("addDynamicReference(%s, %s\n", from->getShortName(), to->getShortName());
1271 ImageLoader::DynamicReference t;
1272 t.from = from;
1273 t.to = to;
1274 sDynamicReferences.push_back(t);
1275 OSSpinLockUnlock(&sDynamicReferencesLock);
1276 }
1277
1278 static void addImage(ImageLoader* image)
1279 {
1280 // add to master list
1281 allImagesLock();
1282 sAllImages.push_back(image);
1283 allImagesUnlock();
1284
1285 // update mapped ranges
1286 uintptr_t lastSegStart = 0;
1287 uintptr_t lastSegEnd = 0;
1288 for(unsigned int i=0, e=image->segmentCount(); i < e; ++i) {
1289 if ( image->segUnaccessible(i) )
1290 continue;
1291 uintptr_t start = image->segActualLoadAddress(i);
1292 uintptr_t end = image->segActualEndAddress(i);
1293 if ( start == lastSegEnd ) {
1294 // two segments are contiguous, just record combined segments
1295 lastSegEnd = end;
1296 }
1297 else {
1298 // non-contiguous segments, record last (if any)
1299 if ( lastSegEnd != 0 )
1300 addMappedRange(image, lastSegStart, lastSegEnd);
1301 lastSegStart = start;
1302 lastSegEnd = end;
1303 }
1304 }
1305 if ( lastSegEnd != 0 )
1306 addMappedRange(image, lastSegStart, lastSegEnd);
1307
1308
1309 if ( gLinkContext.verboseLoading || (sEnv.DYLD_PRINT_LIBRARIES_POST_LAUNCH && (sMainExecutable!=NULL) && sMainExecutable->isLinked()) ) {
1310 dyld::log("dyld: loaded: %s\n", image->getPath());
1311 }
1312
1313 }
1314
1315 //
1316 // Helper for std::remove_if
1317 //
1318 class RefUsesImage {
1319 public:
1320 RefUsesImage(ImageLoader* image) : _image(image) {}
1321 bool operator()(const ImageLoader::DynamicReference& ref) const {
1322 return ( (ref.from == _image) || (ref.to == _image) );
1323 }
1324 private:
1325 ImageLoader* _image;
1326 };
1327
1328
1329
1330 void removeImage(ImageLoader* image)
1331 {
1332 // if has dtrace DOF section, tell dtrace it is going away, then remove from sImageFilesNeedingDOFUnregistration
1333 for (std::vector<RegisteredDOF>::iterator it=sImageFilesNeedingDOFUnregistration.begin(); it != sImageFilesNeedingDOFUnregistration.end(); ) {
1334 if ( it->mh == image->machHeader() ) {
1335 unregisterDOF(it->registrationID);
1336 sImageFilesNeedingDOFUnregistration.erase(it);
1337 // don't increment iterator, the erase caused next element to be copied to where this iterator points
1338 }
1339 else {
1340 ++it;
1341 }
1342 }
1343
1344 // tell all registered remove image handlers about this
1345 // do this before removing image from internal data structures so that the callback can query dyld about the image
1346 if ( image->getState() >= dyld_image_state_bound ) {
1347 sRemoveImageCallbacksInUse = true; // This only runs inside dyld's global lock, so ok to use a global for the in-use flag.
1348 for (std::vector<ImageCallback>::iterator it=sRemoveImageCallbacks.begin(); it != sRemoveImageCallbacks.end(); it++) {
1349 (*it)(image->machHeader(), image->getSlide());
1350 }
1351 sRemoveImageCallbacksInUse = false;
1352
1353 if ( sNotifyObjCUnmapped != NULL && image->notifyObjC() )
1354 (*sNotifyObjCUnmapped)(image->getRealPath(), image->machHeader());
1355 }
1356
1357 // notify
1358 notifySingle(dyld_image_state_terminated, image, NULL);
1359
1360 // remove from mapped images table
1361 removedMappedRanges(image);
1362
1363 // remove from master list
1364 allImagesLock();
1365 for (std::vector<ImageLoader*>::iterator it=sAllImages.begin(); it != sAllImages.end(); it++) {
1366 if ( *it == image ) {
1367 sAllImages.erase(it);
1368 break;
1369 }
1370 }
1371 allImagesUnlock();
1372
1373 // remove from sDynamicReferences
1374 OSSpinLockLock(&sDynamicReferencesLock);
1375 sDynamicReferences.erase(std::remove_if(sDynamicReferences.begin(), sDynamicReferences.end(), RefUsesImage(image)), sDynamicReferences.end());
1376 OSSpinLockUnlock(&sDynamicReferencesLock);
1377
1378 // flush find-by-address cache (do this after removed from master list, so there is no chance it can come back)
1379 if ( sLastImageByAddressCache == image )
1380 sLastImageByAddressCache = NULL;
1381
1382 // if in root list, pull it out
1383 for (std::vector<ImageLoader*>::iterator it=sImageRoots.begin(); it != sImageRoots.end(); it++) {
1384 if ( *it == image ) {
1385 sImageRoots.erase(it);
1386 break;
1387 }
1388 }
1389
1390 // log if requested
1391 if ( gLinkContext.verboseLoading || (sEnv.DYLD_PRINT_LIBRARIES_POST_LAUNCH && (sMainExecutable!=NULL) && sMainExecutable->isLinked()) ) {
1392 dyld::log("dyld: unloaded: %s\n", image->getPath());
1393 }
1394
1395 // tell gdb, new way
1396 removeImageFromAllImages(image->machHeader());
1397 }
1398
1399
1400 void runImageStaticTerminators(ImageLoader* image)
1401 {
1402 // if in termination list, pull it out and run terminator
1403 bool mightBeMore;
1404 do {
1405 mightBeMore = false;
1406 for (std::vector<ImageLoader*>::iterator it=sImageFilesNeedingTermination.begin(); it != sImageFilesNeedingTermination.end(); it++) {
1407 if ( *it == image ) {
1408 sImageFilesNeedingTermination.erase(it);
1409 if (gLogAPIs) dyld::log("dlclose(), running static terminators for %p %s\n", image, image->getShortName());
1410 image->doTermination(gLinkContext);
1411 mightBeMore = true;
1412 break;
1413 }
1414 }
1415 } while ( mightBeMore );
1416 }
1417
1418 static void terminationRecorder(ImageLoader* image)
1419 {
1420 sImageFilesNeedingTermination.push_back(image);
1421 }
1422
1423 const char* getExecutablePath()
1424 {
1425 return sExecPath;
1426 }
1427
1428 static void runAllStaticTerminators(void* extra)
1429 {
1430 try {
1431 const size_t imageCount = sImageFilesNeedingTermination.size();
1432 for(size_t i=imageCount; i > 0; --i){
1433 ImageLoader* image = sImageFilesNeedingTermination[i-1];
1434 image->doTermination(gLinkContext);
1435 }
1436 sImageFilesNeedingTermination.clear();
1437 notifyBatch(dyld_image_state_terminated, false);
1438 }
1439 catch (const char* msg) {
1440 halt(msg);
1441 }
1442 }
1443
1444 void initializeMainExecutable()
1445 {
1446 // record that we've reached this step
1447 gLinkContext.startedInitializingMainExecutable = true;
1448
1449 // run initialzers for any inserted dylibs
1450 ImageLoader::InitializerTimingList initializerTimes[allImagesCount()];
1451 initializerTimes[0].count = 0;
1452 const size_t rootCount = sImageRoots.size();
1453 if ( rootCount > 1 ) {
1454 for(size_t i=1; i < rootCount; ++i) {
1455 sImageRoots[i]->runInitializers(gLinkContext, initializerTimes[0]);
1456 }
1457 }
1458
1459 // run initializers for main executable and everything it brings up
1460 sMainExecutable->runInitializers(gLinkContext, initializerTimes[0]);
1461
1462 // register cxa_atexit() handler to run static terminators in all loaded images when this process exits
1463 if ( gLibSystemHelpers != NULL )
1464 (*gLibSystemHelpers->cxa_atexit)(&runAllStaticTerminators, NULL, NULL);
1465
1466 // dump info if requested
1467 if ( sEnv.DYLD_PRINT_STATISTICS )
1468 ImageLoader::printStatistics((unsigned int)allImagesCount(), initializerTimes[0]);
1469 if ( sEnv.DYLD_PRINT_STATISTICS_DETAILS )
1470 ImageLoaderMachO::printStatisticsDetails((unsigned int)allImagesCount(), initializerTimes[0]);
1471 }
1472
1473 bool mainExecutablePrebound()
1474 {
1475 return sMainExecutable->usablePrebinding(gLinkContext);
1476 }
1477
1478 ImageLoader* mainExecutable()
1479 {
1480 return sMainExecutable;
1481 }
1482
1483
1484
1485
1486 #if SUPPORT_VERSIONED_PATHS
1487
1488 // forward reference
1489 static bool getDylibVersionAndInstallname(const char* dylibPath, uint32_t* version, char* installName);
1490
1491
1492 //
1493 // Examines a dylib file and if its current_version is newer than the installed
1494 // dylib at its install_name, then add the dylib file to sDylibOverrides.
1495 //
1496 static void checkDylibOverride(const char* dylibFile)
1497 {
1498 //dyld::log("checkDylibOverride('%s')\n", dylibFile);
1499 uint32_t altVersion;
1500 char sysInstallName[PATH_MAX];
1501 if ( getDylibVersionAndInstallname(dylibFile, &altVersion, sysInstallName) && (sysInstallName[0] =='/') ) {
1502 //dyld::log("%s has version 0x%08X and install name %s\n", dylibFile, altVersion, sysInstallName);
1503 uint32_t sysVersion;
1504 if ( getDylibVersionAndInstallname(sysInstallName, &sysVersion, NULL) ) {
1505 //dyld::log("%s has version 0x%08X\n", sysInstallName, sysVersion);
1506 if ( altVersion > sysVersion ) {
1507 //dyld::log("override found: %s -> %s\n", sysInstallName, dylibFile);
1508 // see if there already is an override for this dylib
1509 bool entryExists = false;
1510 for (std::vector<DylibOverride>::iterator it = sDylibOverrides.begin(); it != sDylibOverrides.end(); ++it) {
1511 if ( strcmp(it->installName, sysInstallName) == 0 ) {
1512 entryExists = true;
1513 uint32_t prevVersion;
1514 if ( getDylibVersionAndInstallname(it->override, &prevVersion, NULL) ) {
1515 if ( altVersion > prevVersion ) {
1516 // found an even newer override
1517 free((void*)(it->override));
1518 char resolvedPath[PATH_MAX];
1519 if ( realpath(dylibFile, resolvedPath) != NULL )
1520 it->override = strdup(resolvedPath);
1521 else
1522 it->override = strdup(dylibFile);
1523 break;
1524 }
1525 }
1526 }
1527 }
1528 if ( ! entryExists ) {
1529 DylibOverride entry;
1530 entry.installName = strdup(sysInstallName);
1531 char resolvedPath[PATH_MAX];
1532 if ( realpath(dylibFile, resolvedPath) != NULL )
1533 entry.override = strdup(resolvedPath);
1534 else
1535 entry.override = strdup(dylibFile);
1536 sDylibOverrides.push_back(entry);
1537 //dyld::log("added override: %s -> %s\n", entry.installName, entry.override);
1538 }
1539 }
1540 }
1541 }
1542
1543 }
1544
1545 static void checkDylibOverridesInDir(const char* dirPath)
1546 {
1547 //dyld::log("checkDylibOverridesInDir('%s')\n", dirPath);
1548 char dylibPath[PATH_MAX];
1549 long dirPathLen = strlcpy(dylibPath, dirPath, PATH_MAX-1);
1550 if ( dirPathLen >= PATH_MAX )
1551 return;
1552 DIR* dirp = opendir(dirPath);
1553 if ( dirp != NULL) {
1554 dirent entry;
1555 dirent* entp = NULL;
1556 while ( readdir_r(dirp, &entry, &entp) == 0 ) {
1557 if ( entp == NULL )
1558 break;
1559 if ( entp->d_type != DT_REG )
1560 continue;
1561 dylibPath[dirPathLen] = '/';
1562 dylibPath[dirPathLen+1] = '\0';
1563 if ( strlcat(dylibPath, entp->d_name, PATH_MAX) >= PATH_MAX )
1564 continue;
1565 checkDylibOverride(dylibPath);
1566 }
1567 closedir(dirp);
1568 }
1569 }
1570
1571
1572 static void checkFrameworkOverridesInDir(const char* dirPath)
1573 {
1574 //dyld::log("checkFrameworkOverridesInDir('%s')\n", dirPath);
1575 char frameworkPath[PATH_MAX];
1576 long dirPathLen = strlcpy(frameworkPath, dirPath, PATH_MAX-1);
1577 if ( dirPathLen >= PATH_MAX )
1578 return;
1579 DIR* dirp = opendir(dirPath);
1580 if ( dirp != NULL) {
1581 dirent entry;
1582 dirent* entp = NULL;
1583 while ( readdir_r(dirp, &entry, &entp) == 0 ) {
1584 if ( entp == NULL )
1585 break;
1586 if ( entp->d_type != DT_DIR )
1587 continue;
1588 frameworkPath[dirPathLen] = '/';
1589 frameworkPath[dirPathLen+1] = '\0';
1590 int dirNameLen = (int)strlen(entp->d_name);
1591 if ( dirNameLen < 11 )
1592 continue;
1593 if ( strcmp(&entp->d_name[dirNameLen-10], ".framework") != 0 )
1594 continue;
1595 if ( strlcat(frameworkPath, entp->d_name, PATH_MAX) >= PATH_MAX )
1596 continue;
1597 if ( strlcat(frameworkPath, "/", PATH_MAX) >= PATH_MAX )
1598 continue;
1599 if ( strlcat(frameworkPath, entp->d_name, PATH_MAX) >= PATH_MAX )
1600 continue;
1601 frameworkPath[strlen(frameworkPath)-10] = '\0';
1602 checkDylibOverride(frameworkPath);
1603 }
1604 closedir(dirp);
1605 }
1606 }
1607 #endif // SUPPORT_VERSIONED_PATHS
1608
1609
1610 //
1611 // Turns a colon separated list of strings into a NULL terminated array
1612 // of string pointers. If mainExecutableDir param is not NULL,
1613 // substitutes @loader_path with main executable's dir.
1614 //
1615 static const char** parseColonList(const char* list, const char* mainExecutableDir)
1616 {
1617 static const char* sEmptyList[] = { NULL };
1618
1619 if ( list[0] == '\0' )
1620 return sEmptyList;
1621
1622 int colonCount = 0;
1623 for(const char* s=list; *s != '\0'; ++s) {
1624 if (*s == ':')
1625 ++colonCount;
1626 }
1627
1628 int index = 0;
1629 const char* start = list;
1630 char** result = new char*[colonCount+2];
1631 for(const char* s=list; *s != '\0'; ++s) {
1632 if (*s == ':') {
1633 size_t len = s-start;
1634 if ( (mainExecutableDir != NULL) && (strncmp(start, "@loader_path/", 13) == 0) ) {
1635 #if __MAC_OS_X_VERSION_MIN_REQUIRED
1636 if ( gLinkContext.processIsRestricted ) {
1637 dyld::log("dyld: warning: @loader_path/ ignored in restricted process\n");
1638 continue;
1639 }
1640 #endif
1641 size_t mainExecDirLen = strlen(mainExecutableDir);
1642 char* str = new char[mainExecDirLen+len+1];
1643 strcpy(str, mainExecutableDir);
1644 strlcat(str, &start[13], mainExecDirLen+len+1);
1645 str[mainExecDirLen+len-13] = '\0';
1646 start = &s[1];
1647 result[index++] = str;
1648 }
1649 else if ( (mainExecutableDir != NULL) && (strncmp(start, "@executable_path/", 17) == 0) ) {
1650 #if __MAC_OS_X_VERSION_MIN_REQUIRED
1651 if ( gLinkContext.processIsRestricted ) {
1652 dyld::log("dyld: warning: @executable_path/ ignored in restricted process\n");
1653 continue;
1654 }
1655 #endif
1656 size_t mainExecDirLen = strlen(mainExecutableDir);
1657 char* str = new char[mainExecDirLen+len+1];
1658 strcpy(str, mainExecutableDir);
1659 strlcat(str, &start[17], mainExecDirLen+len+1);
1660 str[mainExecDirLen+len-17] = '\0';
1661 start = &s[1];
1662 result[index++] = str;
1663 }
1664 else {
1665 char* str = new char[len+1];
1666 strncpy(str, start, len);
1667 str[len] = '\0';
1668 start = &s[1];
1669 result[index++] = str;
1670 }
1671 }
1672 }
1673 size_t len = strlen(start);
1674 if ( (mainExecutableDir != NULL) && (strncmp(start, "@loader_path/", 13) == 0) ) {
1675 #if __MAC_OS_X_VERSION_MIN_REQUIRED
1676 if ( gLinkContext.processIsRestricted ) {
1677 dyld::log("dyld: warning: @loader_path/ ignored in restricted process\n");
1678 }
1679 else
1680 #endif
1681 {
1682 size_t mainExecDirLen = strlen(mainExecutableDir);
1683 char* str = new char[mainExecDirLen+len+1];
1684 strcpy(str, mainExecutableDir);
1685 strlcat(str, &start[13], mainExecDirLen+len+1);
1686 str[mainExecDirLen+len-13] = '\0';
1687 result[index++] = str;
1688 }
1689 }
1690 else if ( (mainExecutableDir != NULL) && (strncmp(start, "@executable_path/", 17) == 0) ) {
1691 #if __MAC_OS_X_VERSION_MIN_REQUIRED
1692 if ( gLinkContext.processIsRestricted ) {
1693 dyld::log("dyld: warning: @executable_path/ ignored in restricted process\n");
1694 }
1695 else
1696 #endif
1697 {
1698 size_t mainExecDirLen = strlen(mainExecutableDir);
1699 char* str = new char[mainExecDirLen+len+1];
1700 strcpy(str, mainExecutableDir);
1701 strlcat(str, &start[17], mainExecDirLen+len+1);
1702 str[mainExecDirLen+len-17] = '\0';
1703 result[index++] = str;
1704 }
1705 }
1706 else {
1707 char* str = new char[len+1];
1708 strcpy(str, start);
1709 result[index++] = str;
1710 }
1711 result[index] = NULL;
1712
1713 //dyld::log("parseColonList(%s)\n", list);
1714 //for(int i=0; result[i] != NULL; ++i)
1715 // dyld::log(" %s\n", result[i]);
1716 return (const char**)result;
1717 }
1718
1719 static void appendParsedColonList(const char* list, const char* mainExecutableDir, const char* const ** storage)
1720 {
1721 const char** newlist = parseColonList(list, mainExecutableDir);
1722 if ( *storage == NULL ) {
1723 // first time, just set
1724 *storage = newlist;
1725 }
1726 else {
1727 // need to append to existing list
1728 const char* const* existing = *storage;
1729 int count = 0;
1730 for(int i=0; existing[i] != NULL; ++i)
1731 ++count;
1732 for(int i=0; newlist[i] != NULL; ++i)
1733 ++count;
1734 const char** combinedList = new const char*[count+2];
1735 int index = 0;
1736 for(int i=0; existing[i] != NULL; ++i)
1737 combinedList[index++] = existing[i];
1738 for(int i=0; newlist[i] != NULL; ++i)
1739 combinedList[index++] = newlist[i];
1740 combinedList[index] = NULL;
1741 // leak old arrays
1742 *storage = combinedList;
1743 }
1744 }
1745
1746 #if __MAC_OS_X_VERSION_MIN_REQUIRED
1747 static void paths_expand_roots(const char **paths, const char *key, const char *val)
1748 {
1749 // assert(val != NULL);
1750 // assert(paths != NULL);
1751 if(NULL != key) {
1752 size_t keyLen = strlen(key);
1753 for(int i=0; paths[i] != NULL; ++i) {
1754 if ( strncmp(paths[i], key, keyLen) == 0 ) {
1755 char* newPath = new char[strlen(val) + (strlen(paths[i]) - keyLen) + 1];
1756 strcpy(newPath, val);
1757 strcat(newPath, &paths[i][keyLen]);
1758 paths[i] = newPath;
1759 }
1760 }
1761 }
1762 return;
1763 }
1764
1765 static void removePathWithPrefix(const char* paths[], const char* prefix)
1766 {
1767 size_t prefixLen = strlen(prefix);
1768 int skip = 0;
1769 int i;
1770 for(i = 0; paths[i] != NULL; ++i) {
1771 if ( strncmp(paths[i], prefix, prefixLen) == 0 )
1772 ++skip;
1773 else
1774 paths[i-skip] = paths[i];
1775 }
1776 paths[i-skip] = NULL;
1777 }
1778 #endif
1779
1780
1781 #if 0
1782 static void paths_dump(const char **paths)
1783 {
1784 // assert(paths != NULL);
1785 const char **strs = paths;
1786 while(*strs != NULL)
1787 {
1788 dyld::log("\"%s\"\n", *strs);
1789 strs++;
1790 }
1791 return;
1792 }
1793 #endif
1794
1795 static void printOptions(const char* argv[])
1796 {
1797 uint32_t i = 0;
1798 while ( NULL != argv[i] ) {
1799 dyld::log("opt[%i] = \"%s\"\n", i, argv[i]);
1800 i++;
1801 }
1802 }
1803
1804 static void printEnvironmentVariables(const char* envp[])
1805 {
1806 while ( NULL != *envp ) {
1807 dyld::log("%s\n", *envp);
1808 envp++;
1809 }
1810 }
1811
1812 void processDyldEnvironmentVariable(const char* key, const char* value, const char* mainExecutableDir)
1813 {
1814 if ( strcmp(key, "DYLD_FRAMEWORK_PATH") == 0 ) {
1815 appendParsedColonList(value, mainExecutableDir, &sEnv.DYLD_FRAMEWORK_PATH);
1816 }
1817 else if ( strcmp(key, "DYLD_FALLBACK_FRAMEWORK_PATH") == 0 ) {
1818 appendParsedColonList(value, mainExecutableDir, &sEnv.DYLD_FALLBACK_FRAMEWORK_PATH);
1819 }
1820 else if ( strcmp(key, "DYLD_LIBRARY_PATH") == 0 ) {
1821 appendParsedColonList(value, mainExecutableDir, &sEnv.DYLD_LIBRARY_PATH);
1822 }
1823 else if ( strcmp(key, "DYLD_FALLBACK_LIBRARY_PATH") == 0 ) {
1824 appendParsedColonList(value, mainExecutableDir, &sEnv.DYLD_FALLBACK_LIBRARY_PATH);
1825 }
1826 #if SUPPORT_ROOT_PATH
1827 else if ( (strcmp(key, "DYLD_ROOT_PATH") == 0) || (strcmp(key, "DYLD_PATHS_ROOT") == 0) ) {
1828 if ( strcmp(value, "/") != 0 ) {
1829 gLinkContext.rootPaths = parseColonList(value, mainExecutableDir);
1830 for (int i=0; gLinkContext.rootPaths[i] != NULL; ++i) {
1831 if ( gLinkContext.rootPaths[i][0] != '/' ) {
1832 dyld::warn("DYLD_ROOT_PATH not used because it contains a non-absolute path\n");
1833 gLinkContext.rootPaths = NULL;
1834 break;
1835 }
1836 }
1837 }
1838 }
1839 #endif
1840 else if ( strcmp(key, "DYLD_IMAGE_SUFFIX") == 0 ) {
1841 gLinkContext.imageSuffix = value;
1842 }
1843 else if ( strcmp(key, "DYLD_INSERT_LIBRARIES") == 0 ) {
1844 sEnv.DYLD_INSERT_LIBRARIES = parseColonList(value, NULL);
1845 #if SUPPORT_ACCELERATE_TABLES
1846 sDisableAcceleratorTables = true;
1847 #endif
1848 }
1849 else if ( strcmp(key, "DYLD_PRINT_OPTS") == 0 ) {
1850 sEnv.DYLD_PRINT_OPTS = true;
1851 }
1852 else if ( strcmp(key, "DYLD_PRINT_ENV") == 0 ) {
1853 sEnv.DYLD_PRINT_ENV = true;
1854 }
1855 else if ( strcmp(key, "DYLD_DISABLE_DOFS") == 0 ) {
1856 sEnv.DYLD_DISABLE_DOFS = true;
1857 }
1858 else if ( strcmp(key, "DYLD_DISABLE_PREFETCH") == 0 ) {
1859 gLinkContext.preFetchDisabled = true;
1860 }
1861 else if ( strcmp(key, "DYLD_PRINT_LIBRARIES") == 0 ) {
1862 gLinkContext.verboseLoading = true;
1863 }
1864 else if ( strcmp(key, "DYLD_PRINT_LIBRARIES_POST_LAUNCH") == 0 ) {
1865 sEnv.DYLD_PRINT_LIBRARIES_POST_LAUNCH = true;
1866 }
1867 else if ( strcmp(key, "DYLD_BIND_AT_LAUNCH") == 0 ) {
1868 sEnv.DYLD_BIND_AT_LAUNCH = true;
1869 }
1870 else if ( strcmp(key, "DYLD_FORCE_FLAT_NAMESPACE") == 0 ) {
1871 gLinkContext.bindFlat = true;
1872 }
1873 else if ( strcmp(key, "DYLD_NEW_LOCAL_SHARED_REGIONS") == 0 ) {
1874 // ignore, no longer relevant but some scripts still set it
1875 }
1876 else if ( strcmp(key, "DYLD_NO_FIX_PREBINDING") == 0 ) {
1877 }
1878 else if ( strcmp(key, "DYLD_PREBIND_DEBUG") == 0 ) {
1879 gLinkContext.verbosePrebinding = true;
1880 }
1881 else if ( strcmp(key, "DYLD_PRINT_INITIALIZERS") == 0 ) {
1882 gLinkContext.verboseInit = true;
1883 }
1884 else if ( strcmp(key, "DYLD_PRINT_DOFS") == 0 ) {
1885 gLinkContext.verboseDOF = true;
1886 }
1887 else if ( strcmp(key, "DYLD_PRINT_STATISTICS") == 0 ) {
1888 sEnv.DYLD_PRINT_STATISTICS = true;
1889 #if __IPHONE_OS_VERSION_MIN_REQUIRED && !TARGET_IPHONE_SIMULATOR
1890 // <rdar://problem/26614838> DYLD_PRINT_STATISTICS no longer logs to xcode console for device apps
1891 sForceStderr = true;
1892 #endif
1893 }
1894 else if ( strcmp(key, "DYLD_PRINT_TO_STDERR") == 0 ) {
1895 #if __IPHONE_OS_VERSION_MIN_REQUIRED && !TARGET_IPHONE_SIMULATOR
1896 // <rdar://problem/26633440> DYLD_PRINT_STATISTICS no longer logs to xcode console for device apps
1897 sForceStderr = true;
1898 #endif
1899 }
1900 else if ( strcmp(key, "DYLD_PRINT_STATISTICS_DETAILS") == 0 ) {
1901 sEnv.DYLD_PRINT_STATISTICS_DETAILS = true;
1902 }
1903 else if ( strcmp(key, "DYLD_PRINT_SEGMENTS") == 0 ) {
1904 gLinkContext.verboseMapping = true;
1905 }
1906 else if ( strcmp(key, "DYLD_PRINT_BINDINGS") == 0 ) {
1907 gLinkContext.verboseBind = true;
1908 }
1909 else if ( strcmp(key, "DYLD_PRINT_WEAK_BINDINGS") == 0 ) {
1910 gLinkContext.verboseWeakBind = true;
1911 }
1912 else if ( strcmp(key, "DYLD_PRINT_REBASINGS") == 0 ) {
1913 gLinkContext.verboseRebase = true;
1914 }
1915 else if ( strcmp(key, "DYLD_PRINT_APIS") == 0 ) {
1916 gLogAPIs = true;
1917 }
1918 #if SUPPORT_ACCELERATE_TABLES
1919 else if ( strcmp(key, "DYLD_PRINT_APIS_APP") == 0 ) {
1920 gLogAppAPIs = true;
1921 }
1922 #endif
1923 else if ( strcmp(key, "DYLD_PRINT_WARNINGS") == 0 ) {
1924 gLinkContext.verboseWarnings = true;
1925 }
1926 else if ( strcmp(key, "DYLD_PRINT_RPATHS") == 0 ) {
1927 gLinkContext.verboseRPaths = true;
1928 }
1929 else if ( strcmp(key, "DYLD_PRINT_CS_NOTIFICATIONS") == 0 ) {
1930 sEnv.DYLD_PRINT_CS_NOTIFICATIONS = true;
1931 }
1932 else if ( strcmp(key, "DYLD_PRINT_INTERPOSING") == 0 ) {
1933 gLinkContext.verboseInterposing = true;
1934 }
1935 else if ( strcmp(key, "DYLD_PRINT_CODE_SIGNATURES") == 0 ) {
1936 gLinkContext.verboseCodeSignatures = true;
1937 }
1938 else if ( (strcmp(key, "DYLD_SHARED_REGION") == 0) && !sSafeMode ) {
1939 if ( strcmp(value, "private") == 0 ) {
1940 gLinkContext.sharedRegionMode = ImageLoader::kUsePrivateSharedRegion;
1941 }
1942 else if ( strcmp(value, "avoid") == 0 ) {
1943 gLinkContext.sharedRegionMode = ImageLoader::kDontUseSharedRegion;
1944 }
1945 else if ( strcmp(value, "use") == 0 ) {
1946 gLinkContext.sharedRegionMode = ImageLoader::kUseSharedRegion;
1947 }
1948 else if ( value[0] == '\0' ) {
1949 gLinkContext.sharedRegionMode = ImageLoader::kUseSharedRegion;
1950 }
1951 else {
1952 dyld::warn("unknown option to DYLD_SHARED_REGION. Valid options are: use, private, avoid\n");
1953 }
1954 }
1955 #if DYLD_SHARED_CACHE_SUPPORT
1956 else if ( (strcmp(key, "DYLD_SHARED_CACHE_DIR") == 0) && !sSafeMode ) {
1957 sSharedCacheDir = value;
1958 }
1959 else if ( (strcmp(key, "DYLD_SHARED_CACHE_DONT_VALIDATE") == 0) && !sSafeMode ) {
1960 sSharedCacheIgnoreInodeAndTimeStamp = true;
1961 }
1962 #endif
1963 else if ( strcmp(key, "DYLD_IGNORE_PREBINDING") == 0 ) {
1964 if ( strcmp(value, "all") == 0 ) {
1965 gLinkContext.prebindUsage = ImageLoader::kUseNoPrebinding;
1966 }
1967 else if ( strcmp(value, "app") == 0 ) {
1968 gLinkContext.prebindUsage = ImageLoader::kUseAllButAppPredbinding;
1969 }
1970 else if ( strcmp(value, "nonsplit") == 0 ) {
1971 gLinkContext.prebindUsage = ImageLoader::kUseSplitSegPrebinding;
1972 }
1973 else if ( value[0] == '\0' ) {
1974 gLinkContext.prebindUsage = ImageLoader::kUseSplitSegPrebinding;
1975 }
1976 else {
1977 dyld::warn("unknown option to DYLD_IGNORE_PREBINDING. Valid options are: all, app, nonsplit\n");
1978 }
1979 }
1980 #if SUPPORT_VERSIONED_PATHS
1981 else if ( strcmp(key, "DYLD_VERSIONED_LIBRARY_PATH") == 0 ) {
1982 appendParsedColonList(value, mainExecutableDir, &sEnv.DYLD_VERSIONED_LIBRARY_PATH);
1983 #if SUPPORT_ACCELERATE_TABLES
1984 sDisableAcceleratorTables = true;
1985 #endif
1986 }
1987 else if ( strcmp(key, "DYLD_VERSIONED_FRAMEWORK_PATH") == 0 ) {
1988 appendParsedColonList(value, mainExecutableDir, &sEnv.DYLD_VERSIONED_FRAMEWORK_PATH);
1989 #if SUPPORT_ACCELERATE_TABLES
1990 sDisableAcceleratorTables = true;
1991 #endif
1992 }
1993 #endif
1994 #if !TARGET_IPHONE_SIMULATOR
1995 else if ( (strcmp(key, "DYLD_PRINT_TO_FILE") == 0) && (mainExecutableDir == NULL) && !sSafeMode ) {
1996 int fd = open(value, O_WRONLY | O_CREAT | O_APPEND, 0644);
1997 if ( fd != -1 ) {
1998 sLogfile = fd;
1999 sLogToFile = true;
2000 }
2001 else {
2002 dyld::log("dyld: could not open DYLD_PRINT_TO_FILE='%s', errno=%d\n", value, errno);
2003 }
2004 }
2005 #endif
2006 else {
2007 dyld::warn("unknown environment variable: %s\n", key);
2008 }
2009 }
2010
2011
2012 #if SUPPORT_LC_DYLD_ENVIRONMENT
2013 static void checkLoadCommandEnvironmentVariables()
2014 {
2015 // <rdar://problem/8440934> Support augmenting dyld environment variables in load commands
2016 const uint32_t cmd_count = sMainExecutableMachHeader->ncmds;
2017 const struct load_command* const cmds = (struct load_command*)(((char*)sMainExecutableMachHeader)+sizeof(macho_header));
2018 const struct load_command* cmd = cmds;
2019 for (uint32_t i = 0; i < cmd_count; ++i) {
2020 switch (cmd->cmd) {
2021 case LC_DYLD_ENVIRONMENT:
2022 {
2023 const struct dylinker_command* envcmd = (struct dylinker_command*)cmd;
2024 const char* keyEqualsValue = (char*)envcmd + envcmd->name.offset;
2025 char mainExecutableDir[strlen(sExecPath)+2];
2026 strcpy(mainExecutableDir, sExecPath);
2027 char* lastSlash = strrchr(mainExecutableDir, '/');
2028 if ( lastSlash != NULL)
2029 lastSlash[1] = '\0';
2030 // only process variables that start with DYLD_ and end in _PATH
2031 if ( (strncmp(keyEqualsValue, "DYLD_", 5) == 0) ) {
2032 const char* equals = strchr(keyEqualsValue, '=');
2033 if ( equals != NULL ) {
2034 if ( strncmp(&equals[-5], "_PATH", 5) == 0 ) {
2035 const char* value = &equals[1];
2036 const size_t keyLen = equals-keyEqualsValue;
2037 // <rdar://problem/22799635> don't let malformed load command overflow stack
2038 if ( keyLen < 40 ) {
2039 char key[keyLen+1];
2040 strncpy(key, keyEqualsValue, keyLen);
2041 key[keyLen] = '\0';
2042 //dyld::log("processing: %s\n", keyEqualsValue);
2043 //dyld::log("mainExecutableDir: %s\n", mainExecutableDir);
2044 processDyldEnvironmentVariable(key, value, mainExecutableDir);
2045 }
2046 }
2047 }
2048 }
2049 }
2050 break;
2051 }
2052 cmd = (const struct load_command*)(((char*)cmd)+cmd->cmdsize);
2053 }
2054 }
2055 #endif // SUPPORT_LC_DYLD_ENVIRONMENT
2056
2057
2058 static bool hasCodeSignatureLoadCommand(const macho_header* mh)
2059 {
2060 const uint32_t cmd_count = mh->ncmds;
2061 const struct load_command* const cmds = (struct load_command*)(((char*)mh)+sizeof(macho_header));
2062 const struct load_command* cmd = cmds;
2063 for (uint32_t i = 0; i < cmd_count; ++i) {
2064 if (cmd->cmd == LC_CODE_SIGNATURE)
2065 return true;
2066 cmd = (const struct load_command*)(((char*)cmd)+cmd->cmdsize);
2067 }
2068 return false;
2069 }
2070
2071
2072 #if SUPPORT_VERSIONED_PATHS
2073 static void checkVersionedPaths()
2074 {
2075 // search DYLD_VERSIONED_LIBRARY_PATH directories for dylibs and check if they are newer
2076 if ( sEnv.DYLD_VERSIONED_LIBRARY_PATH != NULL ) {
2077 for(const char* const* lp = sEnv.DYLD_VERSIONED_LIBRARY_PATH; *lp != NULL; ++lp) {
2078 checkDylibOverridesInDir(*lp);
2079 }
2080 }
2081
2082 // search DYLD_VERSIONED_FRAMEWORK_PATH directories for dylibs and check if they are newer
2083 if ( sEnv.DYLD_VERSIONED_FRAMEWORK_PATH != NULL ) {
2084 for(const char* const* fp = sEnv.DYLD_VERSIONED_FRAMEWORK_PATH; *fp != NULL; ++fp) {
2085 checkFrameworkOverridesInDir(*fp);
2086 }
2087 }
2088 }
2089 #endif
2090
2091
2092 #if __MAC_OS_X_VERSION_MIN_REQUIRED
2093 //
2094 // For security, setuid programs ignore DYLD_* environment variables.
2095 // Additionally, the DYLD_* enviroment variables are removed
2096 // from the environment, so that any child processes don't see them.
2097 //
2098 static void pruneEnvironmentVariables(const char* envp[], const char*** applep)
2099 {
2100 #if SUPPORT_LC_DYLD_ENVIRONMENT
2101 checkLoadCommandEnvironmentVariables();
2102 #endif
2103
2104 // delete all DYLD_* and LD_LIBRARY_PATH environment variables
2105 int removedCount = 0;
2106 const char** d = envp;
2107 for(const char** s = envp; *s != NULL; s++) {
2108 if ( (strncmp(*s, "DYLD_", 5) != 0) && (strncmp(*s, "LD_LIBRARY_PATH=", 16) != 0) ) {
2109 *d++ = *s;
2110 }
2111 else {
2112 ++removedCount;
2113 }
2114 }
2115 *d++ = NULL;
2116 // slide apple parameters
2117 if ( removedCount > 0 ) {
2118 *applep = d;
2119 do {
2120 *d = d[removedCount];
2121 } while ( *d++ != NULL );
2122 for(int i=0; i < removedCount; ++i)
2123 *d++ = NULL;
2124 }
2125
2126 // disable framework and library fallback paths for setuid binaries rdar://problem/4589305
2127 sEnv.DYLD_FALLBACK_FRAMEWORK_PATH = NULL;
2128 sEnv.DYLD_FALLBACK_LIBRARY_PATH = NULL;
2129
2130 if ( removedCount > 0 )
2131 strlcat(sLoadingCrashMessage, ", ignoring DYLD_* env vars", sizeof(sLoadingCrashMessage));
2132 }
2133 #endif
2134
2135 static void defaultUninitializedFallbackPaths(const char* envp[])
2136 {
2137 #if __MAC_OS_X_VERSION_MIN_REQUIRED
2138 if ( gLinkContext.processIsRestricted ) {
2139 sEnv.DYLD_FALLBACK_FRAMEWORK_PATH = sRestrictedFrameworkFallbackPaths;
2140 sEnv.DYLD_FALLBACK_LIBRARY_PATH = sRestrictedLibraryFallbackPaths;
2141 return;
2142 }
2143
2144 // default value for DYLD_FALLBACK_FRAMEWORK_PATH, if not set in environment
2145 const char* home = _simple_getenv(envp, "HOME");;
2146 if ( sEnv.DYLD_FALLBACK_FRAMEWORK_PATH == NULL ) {
2147 const char** fpaths = sFrameworkFallbackPaths;
2148 if ( home == NULL )
2149 removePathWithPrefix(fpaths, "$HOME");
2150 else
2151 paths_expand_roots(fpaths, "$HOME", home);
2152 sEnv.DYLD_FALLBACK_FRAMEWORK_PATH = fpaths;
2153 }
2154
2155 // default value for DYLD_FALLBACK_LIBRARY_PATH, if not set in environment
2156 if ( sEnv.DYLD_FALLBACK_LIBRARY_PATH == NULL ) {
2157 const char** lpaths = sLibraryFallbackPaths;
2158 if ( home == NULL )
2159 removePathWithPrefix(lpaths, "$HOME");
2160 else
2161 paths_expand_roots(lpaths, "$HOME", home);
2162 sEnv.DYLD_FALLBACK_LIBRARY_PATH = lpaths;
2163 }
2164 #else
2165 if ( sEnv.DYLD_FALLBACK_FRAMEWORK_PATH == NULL )
2166 sEnv.DYLD_FALLBACK_FRAMEWORK_PATH = sFrameworkFallbackPaths;
2167
2168 if ( sEnv.DYLD_FALLBACK_LIBRARY_PATH == NULL )
2169 sEnv.DYLD_FALLBACK_LIBRARY_PATH = sLibraryFallbackPaths;
2170 #endif
2171 }
2172
2173
2174 static void checkEnvironmentVariables(const char* envp[])
2175 {
2176 if ( sEnvMode == envNone )
2177 return;
2178 const char** p;
2179 for(p = envp; *p != NULL; p++) {
2180 const char* keyEqualsValue = *p;
2181 if ( strncmp(keyEqualsValue, "DYLD_", 5) == 0 ) {
2182 const char* equals = strchr(keyEqualsValue, '=');
2183 if ( equals != NULL ) {
2184 strlcat(sLoadingCrashMessage, "\n", sizeof(sLoadingCrashMessage));
2185 strlcat(sLoadingCrashMessage, keyEqualsValue, sizeof(sLoadingCrashMessage));
2186 const char* value = &equals[1];
2187 const size_t keyLen = equals-keyEqualsValue;
2188 char key[keyLen+1];
2189 strncpy(key, keyEqualsValue, keyLen);
2190 key[keyLen] = '\0';
2191 if ( (sEnvMode == envPrintOnly) && (strncmp(key, "DYLD_PRINT_", 11) != 0) )
2192 continue;
2193 processDyldEnvironmentVariable(key, value, NULL);
2194 }
2195 }
2196 else if ( strncmp(keyEqualsValue, "LD_LIBRARY_PATH=", 16) == 0 ) {
2197 const char* path = &keyEqualsValue[16];
2198 sEnv.LD_LIBRARY_PATH = parseColonList(path, NULL);
2199 }
2200 }
2201
2202 #if SUPPORT_LC_DYLD_ENVIRONMENT
2203 checkLoadCommandEnvironmentVariables();
2204 #endif // SUPPORT_LC_DYLD_ENVIRONMENT
2205
2206 #if SUPPORT_ROOT_PATH
2207 // <rdar://problem/11281064> DYLD_IMAGE_SUFFIX and DYLD_ROOT_PATH cannot be used together
2208 if ( (gLinkContext.imageSuffix != NULL) && (gLinkContext.rootPaths != NULL) ) {
2209 dyld::warn("Ignoring DYLD_IMAGE_SUFFIX because DYLD_ROOT_PATH is used.\n");
2210 gLinkContext.imageSuffix = NULL;
2211 }
2212 #endif
2213 }
2214
2215 #if __x86_64__ && DYLD_SHARED_CACHE_SUPPORT
2216 static bool isGCProgram(const macho_header* mh, uintptr_t slide)
2217 {
2218 const uint32_t cmd_count = mh->ncmds;
2219 const struct load_command* const cmds = (struct load_command*)(((char*)mh)+sizeof(macho_header));
2220 const struct load_command* cmd = cmds;
2221 for (uint32_t i = 0; i < cmd_count; ++i) {
2222 switch (cmd->cmd) {
2223 case LC_SEGMENT_COMMAND:
2224 {
2225 const struct macho_segment_command* seg = (struct macho_segment_command*)cmd;
2226 if (strcmp(seg->segname, "__DATA") == 0) {
2227 const struct macho_section* const sectionsStart = (struct macho_section*)((char*)seg + sizeof(struct macho_segment_command));
2228 const struct macho_section* const sectionsEnd = &sectionsStart[seg->nsects];
2229 for (const struct macho_section* sect=sectionsStart; sect < sectionsEnd; ++sect) {
2230 if (strncmp(sect->sectname, "__objc_imageinfo", 16) == 0) {
2231 const uint32_t* objcInfo = (uint32_t*)(sect->addr + slide);
2232 return (objcInfo[1] & 6); // 6 = (OBJC_IMAGE_SUPPORTS_GC | OBJC_IMAGE_REQUIRES_GC)
2233 }
2234 }
2235 }
2236 }
2237 break;
2238 }
2239 cmd = (const struct load_command*)(((char*)cmd)+cmd->cmdsize);
2240 }
2241 return false;
2242 }
2243 #endif
2244
2245 static void getHostInfo(const macho_header* mainExecutableMH, uintptr_t mainExecutableSlide)
2246 {
2247 #if CPU_SUBTYPES_SUPPORTED
2248 #if __ARM_ARCH_7K__
2249 sHostCPU = CPU_TYPE_ARM;
2250 sHostCPUsubtype = CPU_SUBTYPE_ARM_V7K;
2251 #elif __ARM_ARCH_7A__
2252 sHostCPU = CPU_TYPE_ARM;
2253 sHostCPUsubtype = CPU_SUBTYPE_ARM_V7;
2254 #elif __ARM_ARCH_6K__
2255 sHostCPU = CPU_TYPE_ARM;
2256 sHostCPUsubtype = CPU_SUBTYPE_ARM_V6;
2257 #elif __ARM_ARCH_7F__
2258 sHostCPU = CPU_TYPE_ARM;
2259 sHostCPUsubtype = CPU_SUBTYPE_ARM_V7F;
2260 #elif __ARM_ARCH_7S__
2261 sHostCPU = CPU_TYPE_ARM;
2262 sHostCPUsubtype = CPU_SUBTYPE_ARM_V7S;
2263 #else
2264 struct host_basic_info info;
2265 mach_msg_type_number_t count = HOST_BASIC_INFO_COUNT;
2266 mach_port_t hostPort = mach_host_self();
2267 kern_return_t result = host_info(hostPort, HOST_BASIC_INFO, (host_info_t)&info, &count);
2268 if ( result != KERN_SUCCESS )
2269 throw "host_info() failed";
2270 sHostCPU = info.cpu_type;
2271 sHostCPUsubtype = info.cpu_subtype;
2272 mach_port_deallocate(mach_task_self(), hostPort);
2273 #if __x86_64__
2274 #if DYLD_SHARED_CACHE_SUPPORT
2275 sHaswell = (sHostCPUsubtype == CPU_SUBTYPE_X86_64_H);
2276 // <rdar://problem/18528074> x86_64h: Fall back to the x86_64 slice if an app requires GC.
2277 if ( sHaswell ) {
2278 if ( isGCProgram(mainExecutableMH, mainExecutableSlide) ) {
2279 // When running a GC program on a haswell machine, don't use and 'h slices
2280 sHostCPUsubtype = CPU_SUBTYPE_X86_64_ALL;
2281 sHaswell = false;
2282 gLinkContext.sharedRegionMode = ImageLoader::kDontUseSharedRegion;
2283 }
2284 }
2285 #endif
2286 #endif
2287 #endif
2288 #endif
2289 }
2290
2291 static void checkSharedRegionDisable()
2292 {
2293 #if __MAC_OS_X_VERSION_MIN_REQUIRED
2294 // if main executable has segments that overlap the shared region,
2295 // then disable using the shared region
2296 if ( sMainExecutable->overlapsWithAddressRange((void*)(uintptr_t)SHARED_REGION_BASE, (void*)(uintptr_t)(SHARED_REGION_BASE + SHARED_REGION_SIZE)) ) {
2297 gLinkContext.sharedRegionMode = ImageLoader::kDontUseSharedRegion;
2298 if ( gLinkContext.verboseMapping )
2299 dyld::warn("disabling shared region because main executable overlaps\n");
2300 }
2301 #if __i386__
2302 if ( gLinkContext.processIsRestricted ) {
2303 // <rdar://problem/15280847> use private or no shared region for suid processes
2304 gLinkContext.sharedRegionMode = ImageLoader::kUsePrivateSharedRegion;
2305 }
2306 #endif
2307 #endif
2308 // iPhoneOS cannot run without shared region
2309 }
2310
2311 bool validImage(const ImageLoader* possibleImage)
2312 {
2313 const size_t imageCount = sAllImages.size();
2314 for(size_t i=0; i < imageCount; ++i) {
2315 if ( possibleImage == sAllImages[i] ) {
2316 return true;
2317 }
2318 }
2319 return false;
2320 }
2321
2322 uint32_t getImageCount()
2323 {
2324 return (uint32_t)sAllImages.size();
2325 }
2326
2327 ImageLoader* getIndexedImage(unsigned int index)
2328 {
2329 if ( index < sAllImages.size() )
2330 return sAllImages[index];
2331 return NULL;
2332 }
2333
2334 ImageLoader* findImageByMachHeader(const struct mach_header* target)
2335 {
2336 return findMappedRange((uintptr_t)target);
2337 }
2338
2339
2340 ImageLoader* findImageContainingAddress(const void* addr)
2341 {
2342 #if SUPPORT_ACCELERATE_TABLES
2343 if ( sAllCacheImagesProxy != NULL ) {
2344 const mach_header* mh;
2345 const char* path;
2346 unsigned index;
2347 if ( sAllCacheImagesProxy->addressInCache(addr, &mh, &path, &index) )
2348 return sAllCacheImagesProxy;
2349 }
2350 #endif
2351 return findMappedRange((uintptr_t)addr);
2352 }
2353
2354
2355 ImageLoader* findImageContainingSymbol(const void* symbol)
2356 {
2357 for (std::vector<ImageLoader*>::iterator it=sAllImages.begin(); it != sAllImages.end(); it++) {
2358 ImageLoader* anImage = *it;
2359 if ( anImage->containsSymbol(symbol) )
2360 return anImage;
2361 }
2362 return NULL;
2363 }
2364
2365
2366
2367 void forEachImageDo( void (*callback)(ImageLoader*, void* userData), void* userData)
2368 {
2369 const size_t imageCount = sAllImages.size();
2370 for(size_t i=0; i < imageCount; ++i) {
2371 ImageLoader* anImage = sAllImages[i];
2372 (*callback)(anImage, userData);
2373 }
2374 }
2375
2376 ImageLoader* findLoadedImage(const struct stat& stat_buf)
2377 {
2378 const size_t imageCount = sAllImages.size();
2379 for(size_t i=0; i < imageCount; ++i){
2380 ImageLoader* anImage = sAllImages[i];
2381 if ( anImage->statMatch(stat_buf) )
2382 return anImage;
2383 }
2384 return NULL;
2385 }
2386
2387 // based on ANSI-C strstr()
2388 static const char* strrstr(const char* str, const char* sub)
2389 {
2390 const size_t sublen = strlen(sub);
2391 for(const char* p = &str[strlen(str)]; p != str; --p) {
2392 if ( strncmp(p, sub, sublen) == 0 )
2393 return p;
2394 }
2395 return NULL;
2396 }
2397
2398
2399 //
2400 // Find framework path
2401 //
2402 // /path/foo.framework/foo => foo.framework/foo
2403 // /path/foo.framework/Versions/A/foo => foo.framework/Versions/A/foo
2404 // /path/foo.framework/Frameworks/bar.framework/bar => bar.framework/bar
2405 // /path/foo.framework/Libraries/bar.dylb => NULL
2406 // /path/foo.framework/bar => NULL
2407 //
2408 // Returns NULL if not a framework path
2409 //
2410 static const char* getFrameworkPartialPath(const char* path)
2411 {
2412 const char* dirDot = strrstr(path, ".framework/");
2413 if ( dirDot != NULL ) {
2414 const char* dirStart = dirDot;
2415 for ( ; dirStart >= path; --dirStart) {
2416 if ( (*dirStart == '/') || (dirStart == path) ) {
2417 const char* frameworkStart = &dirStart[1];
2418 if ( dirStart == path )
2419 --frameworkStart;
2420 size_t len = dirDot - frameworkStart;
2421 char framework[len+1];
2422 strncpy(framework, frameworkStart, len);
2423 framework[len] = '\0';
2424 const char* leaf = strrchr(path, '/');
2425 if ( leaf != NULL ) {
2426 if ( strcmp(framework, &leaf[1]) == 0 ) {
2427 return frameworkStart;
2428 }
2429 if ( gLinkContext.imageSuffix != NULL ) {
2430 // some debug frameworks have install names that end in _debug
2431 if ( strncmp(framework, &leaf[1], len) == 0 ) {
2432 if ( strcmp( gLinkContext.imageSuffix, &leaf[len+1]) == 0 )
2433 return frameworkStart;
2434 }
2435 }
2436 }
2437 }
2438 }
2439 }
2440 return NULL;
2441 }
2442
2443
2444 static const char* getLibraryLeafName(const char* path)
2445 {
2446 const char* start = strrchr(path, '/');
2447 if ( start != NULL )
2448 return &start[1];
2449 else
2450 return path;
2451 }
2452
2453
2454 // only for architectures that use cpu-sub-types
2455 #if CPU_SUBTYPES_SUPPORTED
2456
2457 const cpu_subtype_t CPU_SUBTYPE_END_OF_LIST = -1;
2458
2459
2460 //
2461 // A fat file may contain multiple sub-images for the same CPU type.
2462 // In that case, dyld picks which sub-image to use by scanning a table
2463 // of preferred cpu-sub-types for the running cpu.
2464 //
2465 // There is one row in the table for each cpu-sub-type on which dyld might run.
2466 // The first entry in a row is that cpu-sub-type. It is followed by all
2467 // cpu-sub-types that can run on that cpu, if preferred order. Each row ends with
2468 // a "SUBTYPE_ALL" (to denote that images written to run on any cpu-sub-type are usable),
2469 // followed by one or more CPU_SUBTYPE_END_OF_LIST to pad out this row.
2470 //
2471
2472
2473 #if __arm__
2474 //
2475 // ARM sub-type lists
2476 //
2477 const int kARM_RowCount = 8;
2478 static const cpu_subtype_t kARM[kARM_RowCount][9] = {
2479
2480 // armv7f can run: v7f, v7, v6, v5, and v4
2481 { CPU_SUBTYPE_ARM_V7F, CPU_SUBTYPE_ARM_V7, CPU_SUBTYPE_ARM_V6, CPU_SUBTYPE_ARM_V5TEJ, CPU_SUBTYPE_ARM_V4T, CPU_SUBTYPE_ARM_ALL, CPU_SUBTYPE_END_OF_LIST },
2482
2483 // armv7k can run: v7k
2484 { CPU_SUBTYPE_ARM_V7K, CPU_SUBTYPE_END_OF_LIST },
2485
2486 // armv7s can run: v7s, v7, v7f, v7k, v6, v5, and v4
2487 { CPU_SUBTYPE_ARM_V7S, CPU_SUBTYPE_ARM_V7, CPU_SUBTYPE_ARM_V7F, CPU_SUBTYPE_ARM_V6, CPU_SUBTYPE_ARM_V5TEJ, CPU_SUBTYPE_ARM_V4T, CPU_SUBTYPE_ARM_ALL, CPU_SUBTYPE_END_OF_LIST },
2488
2489 // armv7 can run: v7, v6, v5, and v4
2490 { CPU_SUBTYPE_ARM_V7, CPU_SUBTYPE_ARM_V6, CPU_SUBTYPE_ARM_V5TEJ, CPU_SUBTYPE_ARM_V4T, CPU_SUBTYPE_ARM_ALL, CPU_SUBTYPE_END_OF_LIST },
2491
2492 // armv6 can run: v6, v5, and v4
2493 { CPU_SUBTYPE_ARM_V6, CPU_SUBTYPE_ARM_V5TEJ, CPU_SUBTYPE_ARM_V4T, CPU_SUBTYPE_ARM_ALL, CPU_SUBTYPE_END_OF_LIST, CPU_SUBTYPE_END_OF_LIST },
2494
2495 // xscale can run: xscale, v5, and v4
2496 { CPU_SUBTYPE_ARM_XSCALE, CPU_SUBTYPE_ARM_V5TEJ, CPU_SUBTYPE_ARM_V4T, CPU_SUBTYPE_ARM_ALL, CPU_SUBTYPE_END_OF_LIST, CPU_SUBTYPE_END_OF_LIST },
2497
2498 // armv5 can run: v5 and v4
2499 { CPU_SUBTYPE_ARM_V5TEJ, CPU_SUBTYPE_ARM_V4T, CPU_SUBTYPE_ARM_ALL, CPU_SUBTYPE_END_OF_LIST, CPU_SUBTYPE_END_OF_LIST, CPU_SUBTYPE_END_OF_LIST },
2500
2501 // armv4 can run: v4
2502 { CPU_SUBTYPE_ARM_V4T, CPU_SUBTYPE_ARM_ALL, CPU_SUBTYPE_END_OF_LIST, CPU_SUBTYPE_END_OF_LIST, CPU_SUBTYPE_END_OF_LIST, CPU_SUBTYPE_END_OF_LIST },
2503 };
2504 #endif
2505
2506 #if __x86_64__
2507 //
2508 // x86_64 sub-type lists
2509 //
2510 const int kX86_64_RowCount = 2;
2511 static const cpu_subtype_t kX86_64[kX86_64_RowCount][5] = {
2512
2513 // x86_64h can run: x86_64h, x86_64h(lib), x86_64(lib), and x86_64
2514 { CPU_SUBTYPE_X86_64_H, CPU_SUBTYPE_LIB64|CPU_SUBTYPE_X86_64_H, CPU_SUBTYPE_LIB64|CPU_SUBTYPE_X86_64_ALL, CPU_SUBTYPE_X86_64_ALL, CPU_SUBTYPE_END_OF_LIST },
2515
2516 // x86_64 can run: x86_64(lib) and x86_64
2517 { CPU_SUBTYPE_X86_64_ALL, CPU_SUBTYPE_LIB64|CPU_SUBTYPE_X86_64_ALL, CPU_SUBTYPE_END_OF_LIST },
2518
2519 };
2520 #endif
2521
2522
2523 // scan the tables above to find the cpu-sub-type-list for this machine
2524 static const cpu_subtype_t* findCPUSubtypeList(cpu_type_t cpu, cpu_subtype_t subtype)
2525 {
2526 switch (cpu) {
2527 #if __arm__
2528 case CPU_TYPE_ARM:
2529 for (int i=0; i < kARM_RowCount ; ++i) {
2530 if ( kARM[i][0] == subtype )
2531 return kARM[i];
2532 }
2533 break;
2534 #endif
2535 #if __x86_64__
2536 case CPU_TYPE_X86_64:
2537 for (int i=0; i < kX86_64_RowCount ; ++i) {
2538 if ( kX86_64[i][0] == subtype )
2539 return kX86_64[i];
2540 }
2541 break;
2542 #endif
2543 }
2544 return NULL;
2545 }
2546
2547
2548
2549
2550 // scan fat table-of-contents for best most preferred subtype
2551 static bool fatFindBestFromOrderedList(cpu_type_t cpu, const cpu_subtype_t list[], const fat_header* fh, uint64_t* offset, uint64_t* len)
2552 {
2553 const fat_arch* const archs = (fat_arch*)(((char*)fh)+sizeof(fat_header));
2554 for (uint32_t subTypeIndex=0; list[subTypeIndex] != CPU_SUBTYPE_END_OF_LIST; ++subTypeIndex) {
2555 for(uint32_t fatIndex=0; fatIndex < OSSwapBigToHostInt32(fh->nfat_arch); ++fatIndex) {
2556 if ( ((cpu_type_t)OSSwapBigToHostInt32(archs[fatIndex].cputype) == cpu)
2557 && (list[subTypeIndex] == (cpu_subtype_t)OSSwapBigToHostInt32(archs[fatIndex].cpusubtype)) ) {
2558 *offset = OSSwapBigToHostInt32(archs[fatIndex].offset);
2559 *len = OSSwapBigToHostInt32(archs[fatIndex].size);
2560 return true;
2561 }
2562 }
2563 }
2564 return false;
2565 }
2566
2567 // scan fat table-of-contents for exact match of cpu and cpu-sub-type
2568 static bool fatFindExactMatch(cpu_type_t cpu, cpu_subtype_t subtype, const fat_header* fh, uint64_t* offset, uint64_t* len)
2569 {
2570 const fat_arch* archs = (fat_arch*)(((char*)fh)+sizeof(fat_header));
2571 for(uint32_t i=0; i < OSSwapBigToHostInt32(fh->nfat_arch); ++i) {
2572 if ( ((cpu_type_t)OSSwapBigToHostInt32(archs[i].cputype) == cpu)
2573 && ((cpu_subtype_t)OSSwapBigToHostInt32(archs[i].cpusubtype) == subtype) ) {
2574 *offset = OSSwapBigToHostInt32(archs[i].offset);
2575 *len = OSSwapBigToHostInt32(archs[i].size);
2576 return true;
2577 }
2578 }
2579 return false;
2580 }
2581
2582 // scan fat table-of-contents for image with matching cpu-type and runs-on-all-sub-types
2583 static bool fatFindRunsOnAllCPUs(cpu_type_t cpu, const fat_header* fh, uint64_t* offset, uint64_t* len)
2584 {
2585 const fat_arch* archs = (fat_arch*)(((char*)fh)+sizeof(fat_header));
2586 for(uint32_t i=0; i < OSSwapBigToHostInt32(fh->nfat_arch); ++i) {
2587 if ( (cpu_type_t)OSSwapBigToHostInt32(archs[i].cputype) == cpu) {
2588 switch (cpu) {
2589 #if __arm__
2590 case CPU_TYPE_ARM:
2591 if ( (cpu_subtype_t)OSSwapBigToHostInt32(archs[i].cpusubtype) == CPU_SUBTYPE_ARM_ALL ) {
2592 *offset = OSSwapBigToHostInt32(archs[i].offset);
2593 *len = OSSwapBigToHostInt32(archs[i].size);
2594 return true;
2595 }
2596 break;
2597 #endif
2598 #if __x86_64__
2599 case CPU_TYPE_X86_64:
2600 if ( (cpu_subtype_t)OSSwapBigToHostInt32(archs[i].cpusubtype) == CPU_SUBTYPE_X86_64_ALL ) {
2601 *offset = OSSwapBigToHostInt32(archs[i].offset);
2602 *len = OSSwapBigToHostInt32(archs[i].size);
2603 return true;
2604 }
2605 break;
2606 #endif
2607 }
2608 }
2609 }
2610 return false;
2611 }
2612
2613 #endif // CPU_SUBTYPES_SUPPORTED
2614
2615
2616 //
2617 // Validate the fat_header and fat_arch array:
2618 //
2619 // 1) arch count would not cause array to extend past 4096 byte read buffer
2620 // 2) no slice overlaps the fat_header and arch array
2621 // 3) arch list does not contain duplicate cputype/cpusubtype tuples
2622 // 4) arch list does not have two overlapping slices.
2623 //
2624 static bool fatValidate(const fat_header* fh)
2625 {
2626 if ( fh->magic != OSSwapBigToHostInt32(FAT_MAGIC) )
2627 return false;
2628
2629 // since only first 4096 bytes of file read, we can only handle up to 204 slices.
2630 const uint32_t sliceCount = OSSwapBigToHostInt32(fh->nfat_arch);
2631 if ( sliceCount > 204 )
2632 return false;
2633
2634 // compare all slices looking for conflicts
2635 const fat_arch* archs = (fat_arch*)(((char*)fh)+sizeof(fat_header));
2636 for (uint32_t i=0; i < sliceCount; ++i) {
2637 uint32_t i_offset = OSSwapBigToHostInt32(archs[i].offset);
2638 uint32_t i_size = OSSwapBigToHostInt32(archs[i].size);
2639 uint32_t i_cputype = OSSwapBigToHostInt32(archs[i].cputype);
2640 uint32_t i_cpusubtype = OSSwapBigToHostInt32(archs[i].cpusubtype);
2641 uint32_t i_end = i_offset + i_size;
2642 // slice cannot overlap with header
2643 if ( i_offset < 4096 )
2644 return false;
2645 // slice size cannot overflow
2646 if ( i_end < i_offset )
2647 return false;
2648 for (uint32_t j=i+1; j < sliceCount; ++j) {
2649 uint32_t j_offset = OSSwapBigToHostInt32(archs[j].offset);
2650 uint32_t j_size = OSSwapBigToHostInt32(archs[j].size);
2651 uint32_t j_cputype = OSSwapBigToHostInt32(archs[j].cputype);
2652 uint32_t j_cpusubtype = OSSwapBigToHostInt32(archs[j].cpusubtype);
2653 uint32_t j_end = j_offset + j_size;
2654 // duplicate slices types not allowed
2655 if ( (i_cputype == j_cputype) && (i_cpusubtype == j_cpusubtype) )
2656 return false;
2657 // slice size cannot overflow
2658 if ( j_end < j_offset )
2659 return false;
2660 // check for overlap of slices
2661 if ( i_offset <= j_offset ) {
2662 if ( j_offset < i_end )
2663 return false; // j overlaps end of i
2664 }
2665 else {
2666 // j overlaps end of i
2667 if ( i_offset < j_end )
2668 return false; // i overlaps end of j
2669 }
2670 }
2671 }
2672 return true;
2673 }
2674
2675 //
2676 // A fat file may contain multiple sub-images for the same cpu-type,
2677 // each optimized for a different cpu-sub-type (e.g G3 or G5).
2678 // This routine picks the optimal sub-image.
2679 //
2680 static bool fatFindBest(const fat_header* fh, uint64_t* offset, uint64_t* len)
2681 {
2682 if ( !fatValidate(fh) )
2683 return false;
2684
2685 #if CPU_SUBTYPES_SUPPORTED
2686 // assume all dylibs loaded must have same cpu type as main executable
2687 const cpu_type_t cpu = sMainExecutableMachHeader->cputype;
2688
2689 // We only know the subtype to use if the main executable cpu type matches the host
2690 if ( (cpu & CPU_TYPE_MASK) == sHostCPU ) {
2691 // get preference ordered list of subtypes
2692 const cpu_subtype_t* subTypePreferenceList = findCPUSubtypeList(cpu, sHostCPUsubtype);
2693
2694 // use ordered list to find best sub-image in fat file
2695 if ( subTypePreferenceList != NULL ) {
2696 if ( fatFindBestFromOrderedList(cpu, subTypePreferenceList, fh, offset, len) )
2697 return true;
2698 }
2699
2700 // if running cpu is not in list, try for an exact match
2701 if ( fatFindExactMatch(cpu, sHostCPUsubtype, fh, offset, len) )
2702 return true;
2703 }
2704
2705 // running on an uknown cpu, can only load generic code
2706 return fatFindRunsOnAllCPUs(cpu, fh, offset, len);
2707 #else
2708 // just find first slice with matching architecture
2709 const fat_arch* archs = (fat_arch*)(((char*)fh)+sizeof(fat_header));
2710 for(uint32_t i=0; i < OSSwapBigToHostInt32(fh->nfat_arch); ++i) {
2711 if ( (cpu_type_t)OSSwapBigToHostInt32(archs[i].cputype) == sMainExecutableMachHeader->cputype) {
2712 *offset = OSSwapBigToHostInt32(archs[i].offset);
2713 *len = OSSwapBigToHostInt32(archs[i].size);
2714 return true;
2715 }
2716 }
2717 return false;
2718 #endif
2719 }
2720
2721
2722
2723 //
2724 // This is used to validate if a non-fat (aka thin or raw) mach-o file can be used
2725 // on the current processor. //
2726 bool isCompatibleMachO(const uint8_t* firstPage, const char* path)
2727 {
2728 #if CPU_SUBTYPES_SUPPORTED
2729 // It is deemed compatible if any of the following are true:
2730 // 1) mach_header subtype is in list of compatible subtypes for running processor
2731 // 2) mach_header subtype is same as running processor subtype
2732 // 3) mach_header subtype runs on all processor variants
2733 const mach_header* mh = (mach_header*)firstPage;
2734 if ( mh->magic == sMainExecutableMachHeader->magic ) {
2735 if ( mh->cputype == sMainExecutableMachHeader->cputype ) {
2736 if ( (mh->cputype & CPU_TYPE_MASK) == sHostCPU ) {
2737 // get preference ordered list of subtypes that this machine can use
2738 const cpu_subtype_t* subTypePreferenceList = findCPUSubtypeList(mh->cputype, sHostCPUsubtype);
2739 if ( subTypePreferenceList != NULL ) {
2740 // if image's subtype is in the list, it is compatible
2741 for (const cpu_subtype_t* p = subTypePreferenceList; *p != CPU_SUBTYPE_END_OF_LIST; ++p) {
2742 if ( *p == mh->cpusubtype )
2743 return true;
2744 }
2745 // have list and not in list, so not compatible
2746 throwf("incompatible cpu-subtype: 0x%08X in %s", mh->cpusubtype, path);
2747 }
2748 // unknown cpu sub-type, but if exact match for current subtype then ok to use
2749 if ( mh->cpusubtype == sHostCPUsubtype )
2750 return true;
2751 }
2752
2753 // cpu type has no ordered list of subtypes
2754 switch (mh->cputype) {
2755 case CPU_TYPE_I386:
2756 case CPU_TYPE_X86_64:
2757 // subtypes are not used or these architectures
2758 return true;
2759 }
2760 }
2761 }
2762 #else
2763 // For architectures that don't support cpu-sub-types
2764 // this just check the cpu type.
2765 const mach_header* mh = (mach_header*)firstPage;
2766 if ( mh->magic == sMainExecutableMachHeader->magic ) {
2767 if ( mh->cputype == sMainExecutableMachHeader->cputype ) {
2768 return true;
2769 }
2770 }
2771 #endif
2772 return false;
2773 }
2774
2775
2776
2777
2778 // The kernel maps in main executable before dyld gets control. We need to
2779 // make an ImageLoader* for the already mapped in main executable.
2780 static ImageLoaderMachO* instantiateFromLoadedImage(const macho_header* mh, uintptr_t slide, const char* path)
2781 {
2782 // try mach-o loader
2783 if ( isCompatibleMachO((const uint8_t*)mh, path) ) {
2784 ImageLoader* image = ImageLoaderMachO::instantiateMainExecutable(mh, slide, path, gLinkContext);
2785 addImage(image);
2786 return (ImageLoaderMachO*)image;
2787 }
2788
2789 throw "main executable not a known format";
2790 }
2791
2792 #if DYLD_SHARED_CACHE_SUPPORT
2793
2794 #if __IPHONE_OS_VERSION_MIN_REQUIRED
2795 static bool dylibsCanOverrideCache()
2796 {
2797 uint32_t devFlags = *((uint32_t*)_COMM_PAGE_DEV_FIRM);
2798 if ( (devFlags & 1) == 0 )
2799 return false;
2800 return ( (sSharedCache != NULL) && (sSharedCache->cacheType == kDyldSharedCacheTypeDevelopment) );
2801 }
2802 #endif
2803
2804 static bool findInSharedCacheImage(const char* path, bool searchByPath, const struct stat* stat_buf, const macho_header** mh, const char** pathInCache, long* slide)
2805 {
2806 if ( sSharedCache != NULL ) {
2807 #if __MAC_OS_X_VERSION_MIN_REQUIRED
2808 // Mac OS X always requires inode/mtime to valid cache
2809 // if stat() not done yet, do it now
2810 struct stat statb;
2811 if ( stat_buf == NULL ) {
2812 if ( my_stat(path, &statb) == -1 )
2813 return false;
2814 stat_buf = &statb;
2815 }
2816 #endif
2817 #if __IPHONE_OS_VERSION_MIN_REQUIRED
2818 uint64_t hash = 0;
2819 for (const char* s=path; *s != '\0'; ++s)
2820 hash += hash*4 + *s;
2821 #endif
2822
2823 // walk shared cache to see if there is a cached image that matches the inode/mtime/path desired
2824 const dyld_cache_image_info* const start = (dyld_cache_image_info*)((uint8_t*)sSharedCache + sSharedCache->imagesOffset);
2825 const dyld_cache_image_info* const end = &start[sSharedCache->imagesCount];
2826 #if __IPHONE_OS_VERSION_MIN_REQUIRED
2827 const bool cacheHasHashInfo = (start->modTime == 0);
2828 #endif
2829 for( const dyld_cache_image_info* p = start; p != end; ++p) {
2830 #if __IPHONE_OS_VERSION_MIN_REQUIRED
2831 // just check path
2832 const char* aPath = (char*)sSharedCache + p->pathFileOffset;
2833 if ( cacheHasHashInfo && (p->inode != hash) )
2834 continue;
2835 if ( strcmp(path, aPath) == 0 ) {
2836 // found image in cache
2837 *mh = (macho_header*)(p->address+sSharedCacheSlide);
2838 *pathInCache = aPath;
2839 *slide = sSharedCacheSlide;
2840 if ( aPath < (char*)(*mh) ) {
2841 // <rdar://problem/22056997> found alias, rescan list to get canonical name
2842 for (const dyld_cache_image_info* p2 = start; p2 != end; ++p2) {
2843 if ( p2->address == p->address ) {
2844 *pathInCache = (char*)sSharedCache + p2->pathFileOffset;
2845 break;
2846 }
2847 }
2848 }
2849 return true;
2850 }
2851 #elif __MAC_OS_X_VERSION_MIN_REQUIRED
2852 // check mtime and inode first because it is fast
2853 bool inodeMatch = ( ((time_t)p->modTime == stat_buf->st_mtime) && ((ino_t)p->inode == stat_buf->st_ino) );
2854 if ( searchByPath || sSharedCacheIgnoreInodeAndTimeStamp || inodeMatch ) {
2855 // mod-time and inode match an image in the shared cache, now check path
2856 const char* aPath = (char*)sSharedCache + p->pathFileOffset;
2857 bool cacheHit = (strcmp(path, aPath) == 0);
2858 if ( inodeMatch && !cacheHit ) {
2859 // path does not match install name of dylib in cache, but inode and mtime does match
2860 // perhaps path is a symlink to the cached dylib
2861 struct stat pathInCacheStatBuf;
2862 if ( my_stat(aPath, &pathInCacheStatBuf) != -1 )
2863 cacheHit = ( (pathInCacheStatBuf.st_dev == stat_buf->st_dev) && (pathInCacheStatBuf.st_ino == stat_buf->st_ino) );
2864 }
2865 if ( cacheHit ) {
2866 // found image in cache, return info
2867 *mh = (macho_header*)(p->address+sSharedCacheSlide);
2868 //dyld::log("findInSharedCacheImage(), mh=%p, p->address=0x%0llX, slid=0x%0lX, path=%s\n",
2869 // *mh, p->address, sSharedCacheSlide, aPath);
2870 *pathInCache = aPath;
2871 *slide = sSharedCacheSlide;
2872 return true;
2873 }
2874 }
2875 #endif
2876 }
2877 }
2878 return false;
2879 }
2880
2881 bool inSharedCache(const char* path)
2882 {
2883 const macho_header* mhInCache;
2884 const char* pathInCache;
2885 long slide;
2886 return findInSharedCacheImage(path, true, NULL, &mhInCache, &pathInCache, &slide);
2887 }
2888
2889 #endif
2890
2891 static ImageLoader* checkandAddImage(ImageLoader* image, const LoadContext& context)
2892 {
2893 // now sanity check that this loaded image does not have the same install path as any existing image
2894 const char* loadedImageInstallPath = image->getInstallPath();
2895 if ( image->isDylib() && (loadedImageInstallPath != NULL) && (loadedImageInstallPath[0] == '/') ) {
2896 for (std::vector<ImageLoader*>::iterator it=sAllImages.begin(); it != sAllImages.end(); it++) {
2897 ImageLoader* anImage = *it;
2898 const char* installPath = anImage->getInstallPath();
2899 if ( installPath != NULL) {
2900 if ( strcmp(loadedImageInstallPath, installPath) == 0 ) {
2901 //dyld::log("duplicate(%s) => %p\n", installPath, anImage);
2902 removeImage(image);
2903 ImageLoader::deleteImage(image);
2904 return anImage;
2905 }
2906 }
2907 }
2908 }
2909
2910 // some API's restrict what they can load
2911 if ( context.mustBeBundle && !image->isBundle() )
2912 throw "not a bundle";
2913 if ( context.mustBeDylib && !image->isDylib() )
2914 throw "not a dylib";
2915
2916 // regular main executables cannot be loaded
2917 if ( image->isExecutable() ) {
2918 if ( !context.canBePIE || !image->isPositionIndependentExecutable() )
2919 throw "can't load a main executable";
2920 }
2921
2922 // don't add bundles to global list, they can be loaded but not linked. When linked it will be added to list
2923 if ( ! image->isBundle() )
2924 addImage(image);
2925
2926 return image;
2927 }
2928
2929 #if TARGET_IPHONE_SIMULATOR
2930 static bool isSimulatorBinary(const uint8_t* firstPages, const char* path)
2931 {
2932 const macho_header* mh = (macho_header*)firstPages;
2933 const uint32_t cmd_count = mh->ncmds;
2934 const load_command* const cmds = (struct load_command*)(((char*)mh)+sizeof(macho_header));
2935 const load_command* const cmdsEnd = (load_command*)((char*)cmds + mh->sizeofcmds);
2936 const struct load_command* cmd = cmds;
2937 for (uint32_t i = 0; i < cmd_count; ++i) {
2938 switch (cmd->cmd) {
2939 #if TARGET_OS_WATCH
2940 case LC_VERSION_MIN_WATCHOS:
2941 return true;
2942 #elif TARGET_OS_TV
2943 case LC_VERSION_MIN_TVOS:
2944 return true;
2945 #elif TARGET_OS_IOS
2946 case LC_VERSION_MIN_IPHONEOS:
2947 return true;
2948 #endif
2949 case LC_VERSION_MIN_MACOSX:
2950 // grandfather in a few libSystem dylibs
2951 if ((strcmp(path, "/usr/lib/system/libsystem_kernel.dylib") == 0) ||
2952 (strcmp(path, "/usr/lib/system/libsystem_platform.dylib") == 0) ||
2953 (strcmp(path, "/usr/lib/system/libsystem_pthread.dylib") == 0))
2954 return true;
2955 return false;
2956 }
2957 cmd = (const struct load_command*)(((char*)cmd)+cmd->cmdsize);
2958 if ( cmd > cmdsEnd )
2959 return false;
2960 }
2961 return false;
2962 }
2963 #endif
2964
2965 // map in file and instantiate an ImageLoader
2966 static ImageLoader* loadPhase6(int fd, const struct stat& stat_buf, const char* path, const LoadContext& context)
2967 {
2968 //dyld::log("%s(%s)\n", __func__ , path);
2969 uint64_t fileOffset = 0;
2970 uint64_t fileLength = stat_buf.st_size;
2971
2972 // validate it is a file (not directory)
2973 if ( (stat_buf.st_mode & S_IFMT) != S_IFREG )
2974 throw "not a file";
2975
2976 uint8_t firstPages[MAX_MACH_O_HEADER_AND_LOAD_COMMANDS_SIZE];
2977 bool shortPage = false;
2978
2979 // min mach-o file is 4K
2980 if ( fileLength < 4096 ) {
2981 if ( pread(fd, firstPages, fileLength, 0) != (ssize_t)fileLength )
2982 throwf("pread of short file failed: %d", errno);
2983 shortPage = true;
2984 }
2985 else {
2986 // optimistically read only first 4KB
2987 if ( pread(fd, firstPages, 4096, 0) != 4096 )
2988 throwf("pread of first 4K failed: %d", errno);
2989 }
2990
2991 // if fat wrapper, find usable sub-file
2992 const fat_header* fileStartAsFat = (fat_header*)firstPages;
2993 if ( fileStartAsFat->magic == OSSwapBigToHostInt32(FAT_MAGIC) ) {
2994 if ( OSSwapBigToHostInt32(fileStartAsFat->nfat_arch) > ((4096 - sizeof(fat_header)) / sizeof(fat_arch)) )
2995 throwf("fat header too large: %u entries", OSSwapBigToHostInt32(fileStartAsFat->nfat_arch));
2996 if ( fatFindBest(fileStartAsFat, &fileOffset, &fileLength) ) {
2997 if ( (fileOffset+fileLength) > (uint64_t)(stat_buf.st_size) )
2998 throwf("truncated fat file. file length=%llu, but needed slice goes to %llu", stat_buf.st_size, fileOffset+fileLength);
2999 if (pread(fd, firstPages, 4096, fileOffset) != 4096)
3000 throwf("pread of fat file failed: %d", errno);
3001 }
3002 else {
3003 throw "no matching architecture in universal wrapper";
3004 }
3005 }
3006
3007 // try mach-o loader
3008 if ( shortPage )
3009 throw "file too short";
3010 if ( isCompatibleMachO(firstPages, path) ) {
3011
3012 // only MH_BUNDLE, MH_DYLIB, and some MH_EXECUTE can be dynamically loaded
3013 const mach_header* mh = (mach_header*)firstPages;
3014 switch ( mh->filetype ) {
3015 case MH_EXECUTE:
3016 case MH_DYLIB:
3017 case MH_BUNDLE:
3018 break;
3019 default:
3020 throw "mach-o, but wrong filetype";
3021 }
3022
3023 uint32_t headerAndLoadCommandsSize = sizeof(macho_header) + mh->sizeofcmds;
3024 if ( headerAndLoadCommandsSize > MAX_MACH_O_HEADER_AND_LOAD_COMMANDS_SIZE )
3025 throwf("malformed mach-o: load commands size (%u) > %u", headerAndLoadCommandsSize, MAX_MACH_O_HEADER_AND_LOAD_COMMANDS_SIZE);
3026
3027 if ( headerAndLoadCommandsSize > fileLength )
3028 dyld::throwf("malformed mach-o: load commands size (%u) > mach-o file size (%llu)", headerAndLoadCommandsSize, fileLength);
3029
3030 if ( headerAndLoadCommandsSize > 4096 ) {
3031 // read more pages
3032 unsigned readAmount = headerAndLoadCommandsSize - 4096;
3033 if ( pread(fd, &firstPages[4096], readAmount, fileOffset+4096) != readAmount )
3034 throwf("pread of extra load commands past 4KB failed: %d", errno);
3035 }
3036
3037 #if TARGET_IPHONE_SIMULATOR
3038 // <rdar://problem/14168872> dyld_sim should restrict loading osx binaries
3039 if ( !isSimulatorBinary(firstPages, path) ) {
3040 #if TARGET_OS_WATCH
3041 throw "mach-o, but not built for watchOS simulator";
3042 #elif TARGET_OS_TV
3043 throw "mach-o, but not built for tvOS simulator";
3044 #else
3045 throw "mach-o, but not built for iOS simulator";
3046 #endif
3047 }
3048 #endif
3049
3050 // instantiate an image
3051 ImageLoader* image = ImageLoaderMachO::instantiateFromFile(path, fd, firstPages, headerAndLoadCommandsSize, fileOffset, fileLength, stat_buf, gLinkContext);
3052
3053 // validate
3054 return checkandAddImage(image, context);
3055 }
3056
3057 // try other file formats here...
3058
3059
3060 // throw error about what was found
3061 switch (*(uint32_t*)firstPages) {
3062 case MH_MAGIC:
3063 case MH_CIGAM:
3064 case MH_MAGIC_64:
3065 case MH_CIGAM_64:
3066 throw "mach-o, but wrong architecture";
3067 default:
3068 throwf("unknown file type, first eight bytes: 0x%02X 0x%02X 0x%02X 0x%02X 0x%02X 0x%02X 0x%02X 0x%02X",
3069 firstPages[0], firstPages[1], firstPages[2], firstPages[3], firstPages[4], firstPages[5], firstPages[6],firstPages[7]);
3070 }
3071 }
3072
3073
3074 static ImageLoader* loadPhase5open(const char* path, const LoadContext& context, const struct stat& stat_buf, std::vector<const char*>* exceptions)
3075 {
3076 //dyld::log("%s(%s, %p)\n", __func__ , path, exceptions);
3077
3078 // open file (automagically closed when this function exits)
3079 FileOpener file(path);
3080
3081 // just return NULL if file not found, but record any other errors
3082 if ( file.getFileDescriptor() == -1 ) {
3083 int err = errno;
3084 if ( err != ENOENT ) {
3085 const char* newMsg;
3086 if ( (err == EPERM) && sandboxBlockedOpen(path) )
3087 newMsg = dyld::mkstringf("file system sandbox blocked open() of '%s'", path);
3088 else
3089 newMsg = dyld::mkstringf("%s: open() failed with errno=%d", path, err);
3090 exceptions->push_back(newMsg);
3091 }
3092 return NULL;
3093 }
3094
3095 try {
3096 return loadPhase6(file.getFileDescriptor(), stat_buf, path, context);
3097 }
3098 catch (const char* msg) {
3099 const char* newMsg = dyld::mkstringf("%s: %s", path, msg);
3100 exceptions->push_back(newMsg);
3101 free((void*)msg);
3102 return NULL;
3103 }
3104 }
3105
3106
3107 #if __MAC_OS_X_VERSION_MIN_REQUIRED
3108 static ImageLoader* loadPhase5load(const char* path, const char* orgPath, const LoadContext& context, unsigned& cacheIndex, std::vector<const char*>* exceptions)
3109 {
3110 //dyld::log("%s(%s, %p)\n", __func__ , path, exceptions);
3111 ImageLoader* image = NULL;
3112
3113 #if SUPPORT_ACCELERATE_TABLES
3114 if ( sAllCacheImagesProxy != NULL ) {
3115 unsigned index;
3116 if ( sAllCacheImagesProxy->hasDylib(path, &index) )
3117 return sAllCacheImagesProxy;
3118 }
3119 #endif
3120
3121 // just return NULL if file not found, but record any other errors
3122 struct stat stat_buf;
3123 if ( my_stat(path, &stat_buf) == -1 ) {
3124 int err = errno;
3125 if ( err != ENOENT ) {
3126 if ( (err == EPERM) && sandboxBlockedStat(path) )
3127 exceptions->push_back(dyld::mkstringf("%s: file system sandbox blocked stat()", path));
3128 else
3129 exceptions->push_back(dyld::mkstringf("%s: stat() failed with errno=%d", path, err));
3130 }
3131 return NULL;
3132 }
3133
3134 // in case image was renamed or found via symlinks, check for inode match
3135 image = findLoadedImage(stat_buf);
3136 if ( image != NULL )
3137 return image;
3138
3139 // do nothing if not already loaded and if RTLD_NOLOAD or NSADDIMAGE_OPTION_RETURN_ONLY_IF_LOADED
3140 if ( context.dontLoad )
3141 return NULL;
3142
3143 #if DYLD_SHARED_CACHE_SUPPORT
3144 // see if this image is in shared cache
3145 const macho_header* mhInCache;
3146 const char* pathInCache;
3147 long slideInCache;
3148 if ( findInSharedCacheImage(path, false, &stat_buf, &mhInCache, &pathInCache, &slideInCache) ) {
3149 image = ImageLoaderMachO::instantiateFromCache(mhInCache, pathInCache, slideInCache, stat_buf, gLinkContext);
3150 return checkandAddImage(image, context);
3151 }
3152 #endif
3153 // file exists and is not in dyld shared cache, so open it
3154 return loadPhase5open(path, context, stat_buf, exceptions);
3155 }
3156 #endif // __MAC_OS_X_VERSION_MIN_REQUIRED
3157
3158
3159
3160 #if __IPHONE_OS_VERSION_MIN_REQUIRED
3161 static ImageLoader* loadPhase5stat(const char* path, const LoadContext& context, struct stat* stat_buf,
3162 int* statErrNo, bool* imageFound, std::vector<const char*>* exceptions)
3163 {
3164 ImageLoader* image = NULL;
3165 *imageFound = false;
3166 *statErrNo = 0;
3167 if ( my_stat(path, stat_buf) == 0 ) {
3168 // in case image was renamed or found via symlinks, check for inode match
3169 image = findLoadedImage(*stat_buf);
3170 if ( image != NULL ) {
3171 *imageFound = true;
3172 return image;
3173 }
3174 // do nothing if not already loaded and if RTLD_NOLOAD
3175 if ( context.dontLoad ) {
3176 *imageFound = true;
3177 return NULL;
3178 }
3179 image = loadPhase5open(path, context, *stat_buf, exceptions);
3180 if ( image != NULL ) {
3181 *imageFound = true;
3182 return image;
3183 }
3184 }
3185 else {
3186 *statErrNo = errno;
3187 }
3188 return NULL;
3189 }
3190
3191 // try to open file
3192 static ImageLoader* loadPhase5load(const char* path, const char* orgPath, const LoadContext& context, unsigned& cacheIndex, std::vector<const char*>* exceptions)
3193 {
3194 //dyld::log("%s(%s, %p)\n", __func__ , path, exceptions);
3195 struct stat stat_buf;
3196 bool imageFound;
3197 int statErrNo;
3198 ImageLoader* image;
3199 #if DYLD_SHARED_CACHE_SUPPORT
3200 #if SUPPORT_ACCELERATE_TABLES
3201 if ( sAllCacheImagesProxy != NULL ) {
3202 if ( sAllCacheImagesProxy->hasDylib(path, &cacheIndex) )
3203 return sAllCacheImagesProxy;
3204 }
3205 #endif
3206 if ( dylibsCanOverrideCache() ) {
3207 // flag is set that allows installed framework roots to override dyld shared cache
3208 image = loadPhase5stat(path, context, &stat_buf, &statErrNo, &imageFound, exceptions);
3209 if ( imageFound )
3210 return image;
3211 }
3212 // see if this image is in shared cache
3213 const macho_header* mhInCache;
3214 const char* pathInCache;
3215 long slideInCache;
3216 if ( findInSharedCacheImage(path, true, NULL, &mhInCache, &pathInCache, &slideInCache) ) {
3217 // see if this image in the cache was already loaded via a different path
3218 for (std::vector<ImageLoader*>::iterator it=sAllImages.begin(); it != sAllImages.end(); ++it) {
3219 ImageLoader* anImage = *it;
3220 if ( (const macho_header*)anImage->machHeader() == mhInCache )
3221 return anImage;
3222 }
3223 // do nothing if not already loaded and if RTLD_NOLOAD
3224 if ( context.dontLoad )
3225 return NULL;
3226 // nope, so instantiate a new image from dyld shared cache
3227 // <rdar://problem/7014995> zero out stat buffer so mtime, etc are zero for items from the shared cache
3228 bzero(&stat_buf, sizeof(stat_buf));
3229 image = ImageLoaderMachO::instantiateFromCache(mhInCache, pathInCache, slideInCache, stat_buf, gLinkContext);
3230 return checkandAddImage(image, context);
3231 }
3232
3233 if ( !dylibsCanOverrideCache() ) {
3234 // flag is not set, and not in cache to try opening it
3235 image = loadPhase5stat(path, context, &stat_buf, &statErrNo, &imageFound, exceptions);
3236 if ( imageFound )
3237 return image;
3238 }
3239 #else
3240 image = loadPhase5stat(path, context, &stat_buf, &statErrNo, &imageFound, exceptions);
3241 if ( imageFound )
3242 return image;
3243 #endif
3244 // just return NULL if file not found, but record any other errors
3245 if ( (statErrNo != ENOENT) && (statErrNo != 0) ) {
3246 if ( (statErrNo == EPERM) && sandboxBlockedStat(path) )
3247 exceptions->push_back(dyld::mkstringf("%s: file system sandbox blocked stat()", path));
3248 else
3249 exceptions->push_back(dyld::mkstringf("%s: stat() failed with errno=%d", path, statErrNo));
3250 }
3251 return NULL;
3252 }
3253 #endif // __IPHONE_OS_VERSION_MIN_REQUIRED
3254
3255
3256 // look for path match with existing loaded images
3257 static ImageLoader* loadPhase5check(const char* path, const char* orgPath, const LoadContext& context)
3258 {
3259 //dyld::log("%s(%s, %s)\n", __func__ , path, orgPath);
3260 // search path against load-path and install-path of all already loaded images
3261 uint32_t hash = ImageLoader::hash(path);
3262 //dyld::log("check() hash=%d, path=%s\n", hash, path);
3263 for (std::vector<ImageLoader*>::iterator it=sAllImages.begin(); it != sAllImages.end(); it++) {
3264 ImageLoader* anImage = *it;
3265 // check hash first to cut down on strcmp calls
3266 //dyld::log(" check() hash=%d, path=%s\n", anImage->getPathHash(), anImage->getPath());
3267 if ( anImage->getPathHash() == hash ) {
3268 if ( strcmp(path, anImage->getPath()) == 0 ) {
3269 // if we are looking for a dylib don't return something else
3270 if ( !context.mustBeDylib || anImage->isDylib() )
3271 return anImage;
3272 }
3273 }
3274 if ( context.matchByInstallName || anImage->matchInstallPath() ) {
3275 const char* installPath = anImage->getInstallPath();
3276 if ( installPath != NULL) {
3277 if ( strcmp(path, installPath) == 0 ) {
3278 // if we are looking for a dylib don't return something else
3279 if ( !context.mustBeDylib || anImage->isDylib() )
3280 return anImage;
3281 }
3282 }
3283 }
3284 // an install name starting with @rpath should match by install name, not just real path
3285 if ( (orgPath[0] == '@') && (strncmp(orgPath, "@rpath/", 7) == 0) ) {
3286 const char* installPath = anImage->getInstallPath();
3287 if ( installPath != NULL) {
3288 if ( !context.mustBeDylib || anImage->isDylib() ) {
3289 if ( strcmp(orgPath, installPath) == 0 )
3290 return anImage;
3291 }
3292 }
3293 }
3294 }
3295
3296 //dyld::log("%s(%s) => NULL\n", __func__, path);
3297 return NULL;
3298 }
3299
3300
3301 // open or check existing
3302 static ImageLoader* loadPhase5(const char* path, const char* orgPath, const LoadContext& context, unsigned& cacheIndex, std::vector<const char*>* exceptions)
3303 {
3304 //dyld::log("%s(%s, %p)\n", __func__ , path, exceptions);
3305
3306 // check for specific dylib overrides
3307 for (std::vector<DylibOverride>::iterator it = sDylibOverrides.begin(); it != sDylibOverrides.end(); ++it) {
3308 if ( strcmp(it->installName, path) == 0 ) {
3309 path = it->override;
3310 break;
3311 }
3312 }
3313
3314 if ( exceptions != NULL )
3315 return loadPhase5load(path, orgPath, context, cacheIndex, exceptions);
3316 else
3317 return loadPhase5check(path, orgPath, context);
3318 }
3319
3320 // try with and without image suffix
3321 static ImageLoader* loadPhase4(const char* path, const char* orgPath, const LoadContext& context, unsigned& cacheIndex, std::vector<const char*>* exceptions)
3322 {
3323 //dyld::log("%s(%s, %p)\n", __func__ , path, exceptions);
3324 ImageLoader* image = NULL;
3325 if ( gLinkContext.imageSuffix != NULL ) {
3326 char pathWithSuffix[strlen(path)+strlen( gLinkContext.imageSuffix)+2];
3327 ImageLoader::addSuffix(path, gLinkContext.imageSuffix, pathWithSuffix);
3328 image = loadPhase5(pathWithSuffix, orgPath, context, cacheIndex, exceptions);
3329 }
3330 if ( image == NULL )
3331 image = loadPhase5(path, orgPath, context, cacheIndex, exceptions);
3332 return image;
3333 }
3334
3335 static ImageLoader* loadPhase2(const char* path, const char* orgPath, const LoadContext& context,
3336 const char* const frameworkPaths[], const char* const libraryPaths[],
3337 unsigned& cacheIndex, std::vector<const char*>* exceptions); // forward reference
3338
3339
3340 // expand @ variables
3341 static ImageLoader* loadPhase3(const char* path, const char* orgPath, const LoadContext& context, unsigned& cacheIndex, std::vector<const char*>* exceptions)
3342 {
3343 //dyld::log("%s(%s, %p)\n", __func__ , path, exceptions);
3344 ImageLoader* image = NULL;
3345 if ( strncmp(path, "@executable_path/", 17) == 0 ) {
3346 #if __MAC_OS_X_VERSION_MIN_REQUIRED
3347 // executable_path cannot be in used in any binary in a setuid process rdar://problem/4589305
3348 if ( gLinkContext.processIsRestricted )
3349 throwf("unsafe use of @executable_path in %s with restricted binary", context.origin);
3350 #endif
3351 // handle @executable_path path prefix
3352 const char* executablePath = sExecPath;
3353 char newPath[strlen(executablePath) + strlen(path)];
3354 strcpy(newPath, executablePath);
3355 char* addPoint = strrchr(newPath,'/');
3356 if ( addPoint != NULL )
3357 strcpy(&addPoint[1], &path[17]);
3358 else
3359 strcpy(newPath, &path[17]);
3360 image = loadPhase4(newPath, orgPath, context, cacheIndex, exceptions);
3361 if ( image != NULL )
3362 return image;
3363
3364 // perhaps main executable path is a sym link, find realpath and retry
3365 char resolvedPath[PATH_MAX];
3366 if ( realpath(sExecPath, resolvedPath) != NULL ) {
3367 char newRealPath[strlen(resolvedPath) + strlen(path)];
3368 strcpy(newRealPath, resolvedPath);
3369 addPoint = strrchr(newRealPath,'/');
3370 if ( addPoint != NULL )
3371 strcpy(&addPoint[1], &path[17]);
3372 else
3373 strcpy(newRealPath, &path[17]);
3374 image = loadPhase4(newRealPath, orgPath, context, cacheIndex, exceptions);
3375 if ( image != NULL )
3376 return image;
3377 }
3378 }
3379 else if ( (strncmp(path, "@loader_path/", 13) == 0) && (context.origin != NULL) ) {
3380 #if __MAC_OS_X_VERSION_MIN_REQUIRED
3381 // @loader_path cannot be used from the main executable of a setuid process rdar://problem/4589305
3382 if ( gLinkContext.processIsRestricted && (strcmp(context.origin, sExecPath) == 0) )
3383 throwf("unsafe use of @loader_path in %s with restricted binary", context.origin);
3384 #endif
3385 // handle @loader_path path prefix
3386 char newPath[strlen(context.origin) + strlen(path)];
3387 strcpy(newPath, context.origin);
3388 char* addPoint = strrchr(newPath,'/');
3389 if ( addPoint != NULL )
3390 strcpy(&addPoint[1], &path[13]);
3391 else
3392 strcpy(newPath, &path[13]);
3393 image = loadPhase4(newPath, orgPath, context, cacheIndex, exceptions);
3394 if ( image != NULL )
3395 return image;
3396
3397 // perhaps loader path is a sym link, find realpath and retry
3398 char resolvedPath[PATH_MAX];
3399 if ( realpath(context.origin, resolvedPath) != NULL ) {
3400 char newRealPath[strlen(resolvedPath) + strlen(path)];
3401 strcpy(newRealPath, resolvedPath);
3402 addPoint = strrchr(newRealPath,'/');
3403 if ( addPoint != NULL )
3404 strcpy(&addPoint[1], &path[13]);
3405 else
3406 strcpy(newRealPath, &path[13]);
3407 image = loadPhase4(newRealPath, orgPath, context, cacheIndex, exceptions);
3408 if ( image != NULL )
3409 return image;
3410 }
3411 }
3412 else if ( context.implicitRPath || (strncmp(path, "@rpath/", 7) == 0) ) {
3413 const char* trailingPath = (strncmp(path, "@rpath/", 7) == 0) ? &path[7] : path;
3414 // substitute @rpath with all -rpath paths up the load chain
3415 for(const ImageLoader::RPathChain* rp=context.rpath; rp != NULL; rp=rp->next) {
3416 if (rp->paths != NULL ) {
3417 for(std::vector<const char*>::iterator it=rp->paths->begin(); it != rp->paths->end(); ++it) {
3418 const char* anRPath = *it;
3419 char newPath[strlen(anRPath) + strlen(trailingPath)+2];
3420 strcpy(newPath, anRPath);
3421 if ( newPath[strlen(newPath)-1] != '/' )
3422 strcat(newPath, "/");
3423 strcat(newPath, trailingPath);
3424 image = loadPhase4(newPath, orgPath, context, cacheIndex, exceptions);
3425 if ( gLinkContext.verboseRPaths && (exceptions != NULL) ) {
3426 if ( image != NULL )
3427 dyld::log("RPATH successful expansion of %s to: %s\n", orgPath, newPath);
3428 else
3429 dyld::log("RPATH failed to expanding %s to: %s\n", orgPath, newPath);
3430 }
3431 if ( image != NULL )
3432 return image;
3433 }
3434 }
3435 }
3436
3437 // substitute @rpath with LD_LIBRARY_PATH
3438 if ( sEnv.LD_LIBRARY_PATH != NULL ) {
3439 image = loadPhase2(trailingPath, orgPath, context, NULL, sEnv.LD_LIBRARY_PATH, cacheIndex, exceptions);
3440 if ( image != NULL )
3441 return image;
3442 }
3443
3444 // if this is the "open" pass, don't try to open @rpath/... as a relative path
3445 if ( (exceptions != NULL) && (trailingPath != path) )
3446 return NULL;
3447 }
3448 #if __MAC_OS_X_VERSION_MIN_REQUIRED
3449 else if ( gLinkContext.processIsRestricted && (path[0] != '/' ) ) {
3450 throwf("unsafe use of relative rpath %s in %s with restricted binary", path, context.origin);
3451 }
3452 #endif
3453
3454 return loadPhase4(path, orgPath, context, cacheIndex, exceptions);
3455 }
3456
3457 static ImageLoader* loadPhase2cache(const char* path, const char *orgPath, const LoadContext& context, unsigned& cacheIndex, std::vector<const char*>* exceptions) {
3458 ImageLoader* image = NULL;
3459 #if !TARGET_IPHONE_SIMULATOR
3460 if ( exceptions != NULL) {
3461 char resolvedPath[PATH_MAX];
3462 realpath(path, resolvedPath);
3463 int myerr = errno;
3464 // If realpath() resolves to a path which does not exist on disk, errno is set to ENOENT
3465 if ( (myerr == ENOENT) || (myerr == 0) )
3466 {
3467 image = loadPhase4(resolvedPath, orgPath, context, cacheIndex, exceptions);
3468 }
3469 }
3470 #endif
3471 return image;
3472 }
3473
3474
3475 // try search paths
3476 static ImageLoader* loadPhase2(const char* path, const char* orgPath, const LoadContext& context,
3477 const char* const frameworkPaths[], const char* const libraryPaths[],
3478 unsigned& cacheIndex, std::vector<const char*>* exceptions)
3479 {
3480 //dyld::log("%s(%s, %p)\n", __func__ , path, exceptions);
3481 ImageLoader* image = NULL;
3482 const char* frameworkPartialPath = getFrameworkPartialPath(path);
3483 if ( frameworkPaths != NULL ) {
3484 if ( frameworkPartialPath != NULL ) {
3485 const size_t frameworkPartialPathLen = strlen(frameworkPartialPath);
3486 for(const char* const* fp = frameworkPaths; *fp != NULL; ++fp) {
3487 char npath[strlen(*fp)+frameworkPartialPathLen+8];
3488 strcpy(npath, *fp);
3489 strcat(npath, "/");
3490 strcat(npath, frameworkPartialPath);
3491 //dyld::log("dyld: fallback framework path used: %s() -> loadPhase4(\"%s\", ...)\n", __func__, npath);
3492 image = loadPhase4(npath, orgPath, context, cacheIndex, exceptions);
3493 // Look in the cache if appropriate
3494 if ( image == NULL)
3495 image = loadPhase2cache(npath, orgPath, context, cacheIndex, exceptions);
3496 if ( image != NULL )
3497 return image;
3498 }
3499 }
3500 }
3501 // <rdar://problem/12649639> An executable with the same name as a framework & DYLD_LIBRARY_PATH pointing to it gets loaded twice
3502 // <rdar://problem/14160846> Some apps depend on frameworks being found via library paths
3503 if ( (libraryPaths != NULL) && ((frameworkPartialPath == NULL) || sFrameworksFoundAsDylibs) ) {
3504 const char* libraryLeafName = getLibraryLeafName(path);
3505 const size_t libraryLeafNameLen = strlen(libraryLeafName);
3506 for(const char* const* lp = libraryPaths; *lp != NULL; ++lp) {
3507 char libpath[strlen(*lp)+libraryLeafNameLen+8];
3508 strcpy(libpath, *lp);
3509 strcat(libpath, "/");
3510 strcat(libpath, libraryLeafName);
3511 //dyld::log("dyld: fallback library path used: %s() -> loadPhase4(\"%s\", ...)\n", __func__, libpath);
3512 image = loadPhase4(libpath, orgPath, context, cacheIndex, exceptions);
3513 // Look in the cache if appropriate
3514 if ( image == NULL)
3515 image = loadPhase2cache(libpath, orgPath, context, cacheIndex, exceptions);
3516 if ( image != NULL )
3517 return image;
3518 }
3519 }
3520 return NULL;
3521 }
3522
3523 // try search overrides and fallbacks
3524 static ImageLoader* loadPhase1(const char* path, const char* orgPath, const LoadContext& context, unsigned& cacheIndex, std::vector<const char*>* exceptions)
3525 {
3526 //dyld::log("%s(%s, %p)\n", __func__ , path, exceptions);
3527 ImageLoader* image = NULL;
3528
3529 // handle LD_LIBRARY_PATH environment variables that force searching
3530 if ( context.useLdLibraryPath && (sEnv.LD_LIBRARY_PATH != NULL) ) {
3531 image = loadPhase2(path, orgPath, context, NULL, sEnv.LD_LIBRARY_PATH, cacheIndex,exceptions);
3532 if ( image != NULL )
3533 return image;
3534 }
3535
3536 // handle DYLD_ environment variables that force searching
3537 if ( context.useSearchPaths && ((sEnv.DYLD_FRAMEWORK_PATH != NULL) || (sEnv.DYLD_LIBRARY_PATH != NULL)) ) {
3538 image = loadPhase2(path, orgPath, context, sEnv.DYLD_FRAMEWORK_PATH, sEnv.DYLD_LIBRARY_PATH, cacheIndex, exceptions);
3539 if ( image != NULL )
3540 return image;
3541 }
3542
3543 // try raw path
3544 image = loadPhase3(path, orgPath, context, cacheIndex, exceptions);
3545 if ( image != NULL )
3546 return image;
3547
3548 // try fallback paths during second time (will open file)
3549 const char* const* fallbackLibraryPaths = sEnv.DYLD_FALLBACK_LIBRARY_PATH;
3550 if ( (fallbackLibraryPaths != NULL) && !context.useFallbackPaths )
3551 fallbackLibraryPaths = NULL;
3552 if ( !context.dontLoad && (exceptions != NULL) && ((sEnv.DYLD_FALLBACK_FRAMEWORK_PATH != NULL) || (fallbackLibraryPaths != NULL)) ) {
3553 image = loadPhase2(path, orgPath, context, sEnv.DYLD_FALLBACK_FRAMEWORK_PATH, fallbackLibraryPaths, cacheIndex, exceptions);
3554 if ( image != NULL )
3555 return image;
3556 }
3557
3558 return NULL;
3559 }
3560
3561 // try root substitutions
3562 static ImageLoader* loadPhase0(const char* path, const char* orgPath, const LoadContext& context, unsigned& cacheIndex, std::vector<const char*>* exceptions)
3563 {
3564 //dyld::log("%s(%s, %p)\n", __func__ , path, exceptions);
3565
3566 #if SUPPORT_ROOT_PATH
3567 // handle DYLD_ROOT_PATH which forces absolute paths to use a new root
3568 if ( (gLinkContext.rootPaths != NULL) && (path[0] == '/') ) {
3569 for(const char* const* rootPath = gLinkContext.rootPaths ; *rootPath != NULL; ++rootPath) {
3570 char newPath[strlen(*rootPath) + strlen(path)+2];
3571 strcpy(newPath, *rootPath);
3572 strcat(newPath, path);
3573 ImageLoader* image = loadPhase1(newPath, orgPath, context, cacheIndex, exceptions);
3574 if ( image != NULL )
3575 return image;
3576 }
3577 }
3578 #endif
3579
3580 // try raw path
3581 return loadPhase1(path, orgPath, context, cacheIndex, exceptions);
3582 }
3583
3584 #if DYLD_SHARED_CACHE_SUPPORT
3585 static bool cacheablePath(const char* path) {
3586 if (strncmp(path, "/usr/lib/", 9) == 0)
3587 return true;
3588 if (strncmp(path, "/System/Library/", 16) == 0)
3589 return true;
3590 return false;
3591 }
3592 #endif
3593
3594 //
3595 // Given all the DYLD_ environment variables, the general case for loading libraries
3596 // is that any given path expands into a list of possible locations to load. We
3597 // also must take care to ensure two copies of the "same" library are never loaded.
3598 //
3599 // The algorithm used here is that there is a separate function for each "phase" of the
3600 // path expansion. Each phase function calls the next phase with each possible expansion
3601 // of that phase. The result is the last phase is called with all possible paths.
3602 //
3603 // To catch duplicates the algorithm is run twice. The first time, the last phase checks
3604 // the path against all loaded images. The second time, the last phase calls open() on
3605 // the path. Either time, if an image is found, the phases all unwind without checking
3606 // for other paths.
3607 //
3608 ImageLoader* load(const char* path, const LoadContext& context, unsigned& cacheIndex)
3609 {
3610 CRSetCrashLogMessage2(path);
3611 const char* orgPath = path;
3612 cacheIndex = UINT32_MAX;
3613
3614 //dyld::log("%s(%s)\n", __func__ , path);
3615 char realPath[PATH_MAX];
3616 // when DYLD_IMAGE_SUFFIX is in used, do a realpath(), otherwise a load of "Foo.framework/Foo" will not match
3617 if ( context.useSearchPaths && ( gLinkContext.imageSuffix != NULL) ) {
3618 if ( realpath(path, realPath) != NULL )
3619 path = realPath;
3620 }
3621
3622 // try all path permutations and check against existing loaded images
3623
3624 ImageLoader* image = loadPhase0(path, orgPath, context, cacheIndex, NULL);
3625 if ( image != NULL ) {
3626 CRSetCrashLogMessage2(NULL);
3627 return image;
3628 }
3629
3630 // try all path permutations and try open() until first success
3631 std::vector<const char*> exceptions;
3632 image = loadPhase0(path, orgPath, context, cacheIndex, &exceptions);
3633 #if !TARGET_IPHONE_SIMULATOR
3634 // <rdar://problem/16704628> support symlinks on disk to a path in dyld shared cache
3635 if ( image == NULL)
3636 image = loadPhase2cache(path, orgPath, context, cacheIndex, &exceptions);
3637 #endif
3638 CRSetCrashLogMessage2(NULL);
3639 if ( image != NULL ) {
3640 // <rdar://problem/6916014> leak in dyld during dlopen when using DYLD_ variables
3641 for (std::vector<const char*>::iterator it = exceptions.begin(); it != exceptions.end(); ++it) {
3642 free((void*)(*it));
3643 }
3644 #if DYLD_SHARED_CACHE_SUPPORT
3645 // if loaded image is not from cache, but original path is in cache
3646 // set gSharedCacheOverridden flag to disable some ObjC optimizations
3647 if ( !gSharedCacheOverridden && !image->inSharedCache() && image->isDylib() && cacheablePath(path) && inSharedCache(path) ) {
3648 gSharedCacheOverridden = true;
3649 }
3650 #endif
3651 return image;
3652 }
3653 else if ( exceptions.size() == 0 ) {
3654 if ( context.dontLoad ) {
3655 return NULL;
3656 }
3657 else
3658 throw "image not found";
3659 }
3660 else {
3661 const char* msgStart = "no suitable image found. Did find:";
3662 const char* delim = "\n\t";
3663 size_t allsizes = strlen(msgStart)+8;
3664 for (size_t i=0; i < exceptions.size(); ++i)
3665 allsizes += (strlen(exceptions[i]) + strlen(delim));
3666 char* fullMsg = new char[allsizes];
3667 strcpy(fullMsg, msgStart);
3668 for (size_t i=0; i < exceptions.size(); ++i) {
3669 strcat(fullMsg, delim);
3670 strcat(fullMsg, exceptions[i]);
3671 free((void*)exceptions[i]);
3672 }
3673 throw (const char*)fullMsg;
3674 }
3675 }
3676
3677
3678
3679 #if DYLD_SHARED_CACHE_SUPPORT
3680
3681
3682
3683 #if __i386__
3684 #define ARCH_NAME "i386"
3685 #define ARCH_CACHE_MAGIC "dyld_v1 i386"
3686 #elif __x86_64__
3687 #define ARCH_NAME "x86_64"
3688 #define ARCH_CACHE_MAGIC "dyld_v1 x86_64"
3689 #define ARCH_NAME_H "x86_64h"
3690 #define ARCH_CACHE_MAGIC_H "dyld_v1 x86_64h"
3691 #elif __ARM_ARCH_5TEJ__
3692 #define ARCH_NAME "armv5"
3693 #define ARCH_CACHE_MAGIC "dyld_v1 armv5"
3694 #elif __ARM_ARCH_6K__
3695 #define ARCH_NAME "armv6"
3696 #define ARCH_CACHE_MAGIC "dyld_v1 armv6"
3697 #elif __ARM_ARCH_7F__
3698 #define ARCH_NAME "armv7f"
3699 #define ARCH_CACHE_MAGIC "dyld_v1 armv7f"
3700 #elif __ARM_ARCH_7K__
3701 #define ARCH_NAME "armv7k"
3702 #define ARCH_CACHE_MAGIC "dyld_v1 armv7k"
3703 #elif __ARM_ARCH_7A__
3704 #define ARCH_NAME "armv7"
3705 #define ARCH_CACHE_MAGIC "dyld_v1 armv7"
3706 #elif __ARM_ARCH_7S__
3707 #define ARCH_NAME "armv7s"
3708 #define ARCH_CACHE_MAGIC "dyld_v1 armv7s"
3709 #elif __arm64__
3710 #define ARCH_NAME "arm64"
3711 #define ARCH_CACHE_MAGIC "dyld_v1 arm64"
3712 #endif
3713
3714
3715 static int __attribute__((noinline)) _shared_region_check_np(uint64_t* start_address)
3716 {
3717 if ( gLinkContext.sharedRegionMode == ImageLoader::kUseSharedRegion )
3718 return syscall(294, start_address);
3719 return -1;
3720 }
3721
3722
3723 static void rebaseChain(uint8_t* pageContent, uint16_t startOffset, uintptr_t slideAmount, const dyld_cache_slide_info2* slideInfo)
3724 {
3725 const uintptr_t deltaMask = (uintptr_t)(slideInfo->delta_mask);
3726 const uintptr_t valueMask = ~deltaMask;
3727 const uintptr_t valueAdd = (uintptr_t)(slideInfo->value_add);
3728 const unsigned deltaShift = __builtin_ctzll(deltaMask) - 2;
3729
3730 uint32_t pageOffset = startOffset;
3731 uint32_t delta = 1;
3732 while ( delta != 0 ) {
3733 uint8_t* loc = pageContent + pageOffset;
3734 uintptr_t rawValue = *((uintptr_t*)loc);
3735 delta = (uint32_t)((rawValue & deltaMask) >> deltaShift);
3736 uintptr_t value = (rawValue & valueMask);
3737 if ( value != 0 ) {
3738 value += valueAdd;
3739 value += slideAmount;
3740 }
3741 *((uintptr_t*)loc) = value;
3742 //dyld::log(" pageOffset=0x%03X, loc=%p, org value=0x%08llX, new value=0x%08llX, delta=0x%X\n", pageOffset, loc, (uint64_t)rawValue, (uint64_t)value, delta);
3743 pageOffset += delta;
3744 }
3745 }
3746
3747
3748 static void loadAndCheckCodeSignature(int fd, uint32_t count, const shared_file_mapping_np mappings[],
3749 off_t codeSignatureOffset, size_t codeSignatureSize,
3750 const void *firstPages, size_t firstPagesSize)
3751 {
3752 // register code signature blob for whole dyld cache
3753 fsignatures_t siginfo;
3754 siginfo.fs_file_start = 0; // cache always starts at beginning of file
3755 siginfo.fs_blob_start = (void*)codeSignatureOffset;
3756 siginfo.fs_blob_size = codeSignatureSize;
3757
3758 int result = fcntl(fd, F_ADDFILESIGS_RETURN, &siginfo);
3759 // <rdar://problem/12891874> don't warn in chrooted case because mapping syscall is about to fail too
3760 if ( result == -1 ) {
3761 #if __IPHONE_OS_VERSION_MIN_REQUIRED
3762 throwf("code signature registration for shared cache failed with errno=%d\n", errno);
3763 #else
3764 if ( gLinkContext.verboseMapping )
3765 dyld::log("dyld: code signature registration for shared cache failed with errno=%d\n", errno);
3766 #endif
3767 }
3768 uint64_t codeSignedLength = siginfo.fs_file_start;
3769 for (uint32_t i = 0; i < count; ++i) {
3770 if ( (mappings[i].sfm_size > codeSignedLength) || (mappings[i].sfm_file_offset > (codeSignedLength - mappings[i].sfm_size)) )
3771 throw "dyld shared cache mapping not covered by code signature";
3772 }
3773
3774 void *fdata = xmmap(NULL, firstPagesSize, PROT_READ|PROT_EXEC, MAP_PRIVATE, fd, 0);
3775 if ( fdata == MAP_FAILED )
3776 throwf("mmap() errno=%d validating first page of shared cache", errno);
3777 if ( memcmp(fdata, firstPages, firstPagesSize) != 0 )
3778 throwf("mmap() page compare failed for shared cache");
3779 munmap(fdata, firstPagesSize);
3780 }
3781
3782 static int __attribute__((noinline)) _shared_region_map_and_slide_np(int fd, uint32_t count, const shared_file_mapping_np mappings[],
3783 long slide, void* slideInfo, unsigned long slideInfoSize)
3784 {
3785 if ( gLinkContext.sharedRegionMode == ImageLoader::kUseSharedRegion ) {
3786 return syscall(438, fd, count, mappings, slide, slideInfo, slideInfoSize);
3787 }
3788
3789 // remove the shared region sub-map
3790 vm_deallocate(mach_task_self(), (vm_address_t)SHARED_REGION_BASE, SHARED_REGION_SIZE);
3791
3792 // notify gdb or other lurkers that this process is no longer using the shared region
3793 dyld::gProcessInfo->processDetachedFromSharedRegion = true;
3794
3795 // map cache just for this process with mmap()
3796 const shared_file_mapping_np* const start = mappings;
3797 const shared_file_mapping_np* const end = &mappings[count];
3798 for (const shared_file_mapping_np* p = start; p < end; ++p ) {
3799 void* mmapAddress = (void*)(uintptr_t)(p->sfm_address);
3800 size_t size = p->sfm_size;
3801 //dyld::log("dyld: mapping address %p with size 0x%08lX\n", mmapAddress, size);
3802 int protection = 0;
3803 if ( p->sfm_init_prot & VM_PROT_EXECUTE )
3804 protection |= PROT_EXEC;
3805 if ( p->sfm_init_prot & VM_PROT_READ )
3806 protection |= PROT_READ;
3807 if ( p->sfm_init_prot & VM_PROT_WRITE )
3808 protection |= PROT_WRITE;
3809 off_t offset = p->sfm_file_offset;
3810 if ( mmap(mmapAddress, size, protection, MAP_FIXED | MAP_PRIVATE, fd, offset) != mmapAddress ) {
3811 // failed to map some chunk of this shared cache file
3812 // clear shared region
3813 vm_deallocate(mach_task_self(), (vm_address_t)SHARED_REGION_BASE, SHARED_REGION_SIZE);
3814 // go back to not using shared region at all
3815 gLinkContext.sharedRegionMode = ImageLoader::kDontUseSharedRegion;
3816 if ( gLinkContext.verboseMapping ) {
3817 dyld::log("dyld: shared cached region cannot be mapped at address %p with size 0x%08lX\n",
3818 mmapAddress, size);
3819 }
3820 // return failure
3821 return -1;
3822 }
3823 }
3824
3825 // update all __DATA pages with slide info
3826 const dyld_cache_slide_info* slideInfoHeader = (dyld_cache_slide_info*)slideInfo;
3827 if ( slideInfoHeader->version == 2 ) {
3828 const dyld_cache_slide_info2* slideHeader = (dyld_cache_slide_info2*)slideInfo;
3829 const uint32_t page_size = slideHeader->page_size;
3830 const uint16_t* page_starts = (uint16_t*)((long)(slideInfo) + slideHeader->page_starts_offset);
3831 const uint16_t* page_extras = (uint16_t*)((long)(slideInfo) + slideHeader->page_extras_offset);
3832 const uintptr_t dataPagesStart = mappings[1].sfm_address;
3833 for (int i=0; i < slideHeader->page_starts_count; ++i) {
3834 uint8_t* page = (uint8_t*)(long)(dataPagesStart + (page_size*i));
3835 uint16_t pageEntry = page_starts[i];
3836 //dyld::log("page[%d]: page_starts[i]=0x%04X\n", i, pageEntry);
3837 if ( pageEntry == DYLD_CACHE_SLIDE_PAGE_ATTR_NO_REBASE )
3838 continue;
3839 if ( pageEntry & DYLD_CACHE_SLIDE_PAGE_ATTR_EXTRA ) {
3840 uint16_t chainIndex = (pageEntry & 0x3FFF);
3841 bool done = false;
3842 while ( !done ) {
3843 uint16_t info = page_extras[chainIndex];
3844 uint16_t pageStartOffset = (info & 0x3FFF)*4;
3845 //dyld::log(" chain[%d] pageOffset=0x%03X\n", chainIndex, pageStartOffset);
3846 rebaseChain(page, pageStartOffset, slide, slideHeader);
3847 done = (info & DYLD_CACHE_SLIDE_PAGE_ATTR_END);
3848 ++chainIndex;
3849 }
3850 }
3851 else {
3852 uint32_t pageOffset = pageEntry * 4;
3853 //dyld::log(" start pageOffset=0x%03X\n", pageOffset);
3854 rebaseChain(page, pageOffset, slide, slideHeader);
3855 }
3856 }
3857 }
3858 else if ( slide != 0 ) {
3859 const uintptr_t dataPagesStart = mappings[1].sfm_address;
3860 const uint16_t* toc = (uint16_t*)((long)(slideInfoHeader) + slideInfoHeader->toc_offset);
3861 const uint8_t* entries = (uint8_t*)((long)(slideInfoHeader) + slideInfoHeader->entries_offset);
3862 for(uint32_t i=0; i < slideInfoHeader->toc_count; ++i) {
3863 const uint8_t* entry = &entries[toc[i]*slideInfoHeader->entries_size];
3864 const uint8_t* page = (uint8_t*)(long)(dataPagesStart + (4096*i));
3865 //dyld::log("page=%p toc[%d]=%d entries=%p\n", page, i, toc[i], entry);
3866 for(int j=0; j < 128; ++j) {
3867 uint8_t b = entry[j];
3868 //dyld::log(" entry[%d] = 0x%02X\n", j, b);
3869 if ( b != 0 ) {
3870 for(int k=0; k < 8; ++k) {
3871 if ( b & (1<<k) ) {
3872 uintptr_t* p = (uintptr_t*)(page + j*8*4 + k*4);
3873 uintptr_t value = *p;
3874 //dyld::log(" *%p was 0x%lX will be 0x%lX\n", p, value, value+sSharedCacheSlide);
3875 *p = value + slide;
3876 }
3877 }
3878 }
3879 }
3880 }
3881 }
3882
3883 // succesfully mapped shared cache for just this process
3884 gLinkContext.sharedRegionMode = ImageLoader::kUsePrivateSharedRegion;
3885
3886 return 0;
3887 }
3888
3889
3890 const void* imMemorySharedCacheHeader()
3891 {
3892 return sSharedCache;
3893 }
3894
3895 const char* getStandardSharedCacheFilePath()
3896 {
3897 #if __IPHONE_OS_VERSION_MIN_REQUIRED
3898 return IPHONE_DYLD_SHARED_CACHE_DIR DYLD_SHARED_CACHE_BASE_NAME ARCH_NAME;
3899 #else
3900 #if __x86_64__
3901 if ( sHaswell ) {
3902 const char* path2 = MACOSX_DYLD_SHARED_CACHE_DIR DYLD_SHARED_CACHE_BASE_NAME ARCH_NAME_H;
3903 struct stat statBuf;
3904 if ( my_stat(path2, &statBuf) == 0 )
3905 return path2;
3906 }
3907 #endif
3908 return MACOSX_DYLD_SHARED_CACHE_DIR DYLD_SHARED_CACHE_BASE_NAME ARCH_NAME;
3909 #endif
3910 }
3911
3912 int openSharedCacheFile()
3913 {
3914 char path[MAXPATHLEN];
3915 strlcpy(path, sSharedCacheDir, MAXPATHLEN);
3916 strlcat(path, "/", MAXPATHLEN);
3917 #if __x86_64__
3918 if ( sHaswell ) {
3919 strlcat(path, DYLD_SHARED_CACHE_BASE_NAME ARCH_NAME_H, MAXPATHLEN);
3920 int fd = my_open(path, O_RDONLY, 0);
3921 if ( fd != -1 ) {
3922 if ( gLinkContext.verboseMapping )
3923 dyld::log("dyld: Mapping%s shared cache from %s\n", (gLinkContext.sharedRegionMode == ImageLoader::kUsePrivateSharedRegion) ? " private": "", path);
3924 return fd;
3925 }
3926 strlcpy(path, sSharedCacheDir, MAXPATHLEN);
3927 }
3928 #endif
3929 strlcat(path, DYLD_SHARED_CACHE_BASE_NAME ARCH_NAME, MAXPATHLEN);
3930 #if __IPHONE_OS_VERSION_MIN_REQUIRED
3931 struct stat enableStatBuf;
3932 struct stat devCacheStatBuf;
3933 struct stat prodCacheStatBuf;
3934 if ( ((my_stat(IPHONE_DYLD_SHARED_CACHE_DIR "enable-dylibs-to-override-cache", &enableStatBuf) == 0)
3935 && (enableStatBuf.st_size < ENABLE_DYLIBS_TO_OVERRIDE_CACHE_SIZE)
3936 && (my_stat(IPHONE_DYLD_SHARED_CACHE_DIR DYLD_SHARED_CACHE_BASE_NAME ARCH_NAME DYLD_SHARED_CACHE_DEVELOPMENT_EXT, &devCacheStatBuf) == 0))
3937 || (my_stat(IPHONE_DYLD_SHARED_CACHE_DIR DYLD_SHARED_CACHE_BASE_NAME ARCH_NAME, &prodCacheStatBuf) != 0))
3938 strlcat(path, DYLD_SHARED_CACHE_DEVELOPMENT_EXT, MAXPATHLEN);
3939 #endif
3940 if ( gLinkContext.verboseMapping )
3941 dyld::log("dyld: Mapping%s shared cache from %s\n", (gLinkContext.sharedRegionMode == ImageLoader::kUsePrivateSharedRegion) ? " private": "", path);
3942 return my_open(path, O_RDONLY, 0);
3943 }
3944
3945
3946 static void getCacheBounds(uint32_t mappingsCount, const shared_file_mapping_np mappings[], uint64_t& lowAddress, uint64_t& highAddress)
3947 {
3948 lowAddress = 0;
3949 highAddress = 0;
3950 for(uint32_t i=0; i < mappingsCount; ++i) {
3951 if ( lowAddress == 0 ) {
3952 lowAddress = mappings[i].sfm_address;
3953 highAddress = mappings[i].sfm_address + mappings[i].sfm_size;
3954 }
3955 else {
3956 if ( mappings[i].sfm_address < lowAddress )
3957 lowAddress = mappings[i].sfm_address;
3958 if ( (mappings[i].sfm_address + mappings[i].sfm_size) > highAddress )
3959 highAddress = mappings[i].sfm_address + mappings[i].sfm_size;
3960 }
3961 }
3962 }
3963
3964 static long pickCacheSlide(uint32_t mappingsCount, shared_file_mapping_np mappings[])
3965 {
3966 #if __x86_64__
3967 // x86_64 has a two memory regions:
3968 // 256MB at 0x00007FFF70000000
3969 // 1024MB at 0x00007FFF80000000
3970 // Some old shared caches have r/w region after rx region, so all regions slide within 1GB range
3971 // Newer shared caches have r/w region based at 0x7FFF70000000 and r/o regions at 0x7FFF80000000, so each part has max slide
3972 if ( (mappingsCount >= 3) && (mappings[1].sfm_init_prot == (VM_PROT_READ|VM_PROT_WRITE)) && (mappings[1].sfm_address == 0x00007FFF70000000) ) {
3973 const uint64_t rwSize = mappings[1].sfm_size;
3974 const uint64_t rwSlop = 0x10000000ULL - rwSize;
3975 const uint64_t roSize = (mappings[2].sfm_address + mappings[2].sfm_size) - mappings[0].sfm_address;
3976 const uint64_t roSlop = 0x40000000ULL - roSize;
3977 const uint64_t space = (rwSlop < roSlop) ? rwSlop : roSlop;
3978
3979 // choose new random slide
3980 long slide = (arc4random() % space) & (-4096);
3981 //dyld::log("rwSlop=0x%0llX, roSlop=0x%0llX\n", rwSlop, roSlop);
3982 //dyld::log("space=0x%0llX, slide=0x%0lX\n", space, slide);
3983
3984 // update mappings
3985 for(uint32_t i=0; i < mappingsCount; ++i) {
3986 mappings[i].sfm_address += slide;
3987 }
3988
3989 return slide;
3990 }
3991 // else fall through to handle old style cache
3992 #endif
3993 // get bounds of cache
3994 uint64_t lowAddress;
3995 uint64_t highAddress;
3996 getCacheBounds(mappingsCount, mappings, lowAddress, highAddress);
3997
3998 // find slop space
3999 const uint64_t space = (SHARED_REGION_BASE + SHARED_REGION_SIZE) - highAddress;
4000
4001 // choose new random slide
4002 #if __arm__
4003 // <rdar://problem/20848977> change shared cache slide for 32-bit arm to always be 16k aligned
4004 long slide = ((arc4random() % space) & (-16384));
4005 #else
4006 long slide = dyld_page_trunc(arc4random() % space);
4007 #endif
4008 //dyld::log("slideSpace=0x%0llX\n", space);
4009 //dyld::log("slide=0x%0lX\n", slide);
4010
4011 // update mappings
4012 for(uint32_t i=0; i < mappingsCount; ++i) {
4013 mappings[i].sfm_address += slide;
4014 }
4015
4016 return slide;
4017 }
4018
4019 static void mapSharedCache()
4020 {
4021 uint64_t cacheBaseAddress = 0;
4022 // quick check if a cache is already mapped into shared region
4023 if ( _shared_region_check_np(&cacheBaseAddress) == 0 ) {
4024 sSharedCache = (dyld_cache_header*)cacheBaseAddress;
4025 // if we don't understand the currently mapped shared cache, then ignore
4026 #if __x86_64__
4027 const char* magic = (sHaswell ? ARCH_CACHE_MAGIC_H : ARCH_CACHE_MAGIC);
4028 #else
4029 const char* magic = ARCH_CACHE_MAGIC;
4030 #endif
4031 if ( strcmp(sSharedCache->magic, magic) != 0 ) {
4032 sSharedCache = NULL;
4033 if ( gLinkContext.verboseMapping ) {
4034 dyld::log("dyld: existing shared cached in memory is not compatible\n");
4035 return;
4036 }
4037 }
4038 dyld::gProcessInfo->sharedCacheBaseAddress = cacheBaseAddress;
4039 // check if cache file is slidable
4040 const dyld_cache_header* header = sSharedCache;
4041 if ( (header->mappingOffset >= 0x48) && (header->slideInfoSize != 0) ) {
4042 // solve for slide by comparing loaded address to address of first region
4043 const uint8_t* loadedAddress = (uint8_t*)sSharedCache;
4044 const dyld_cache_mapping_info* const mappings = (dyld_cache_mapping_info*)(loadedAddress+header->mappingOffset);
4045 const uint8_t* preferedLoadAddress = (uint8_t*)(long)(mappings[0].address);
4046 sSharedCacheSlide = loadedAddress - preferedLoadAddress;
4047 dyld::gProcessInfo->sharedCacheSlide = sSharedCacheSlide;
4048 //dyld::log("sSharedCacheSlide=0x%08lX, loadedAddress=%p, preferedLoadAddress=%p\n", sSharedCacheSlide, loadedAddress, preferedLoadAddress);
4049 }
4050 // if cache has a uuid, copy it
4051 if ( header->mappingOffset >= 0x68 ) {
4052 memcpy(dyld::gProcessInfo->sharedCacheUUID, header->uuid, 16);
4053 }
4054 // verbose logging
4055 if ( gLinkContext.verboseMapping ) {
4056 dyld::log("dyld: re-using existing %s shared cache mapping\n", (header->cacheType == kDyldSharedCacheTypeDevelopment ? "development" : "production"));
4057 }
4058 if (header->mappingOffset >= 0x68) {
4059 dyld_kernel_image_info_t kernelCacheInfo;
4060 memcpy(&kernelCacheInfo.uuid[0], &sSharedCache->uuid[0], sizeof(uuid_t));
4061 kernelCacheInfo.load_addr = (uint64_t)sSharedCache;
4062 kernelCacheInfo.fsobjid.fid_objno = 0;
4063 kernelCacheInfo.fsobjid.fid_generation = 0;
4064 kernelCacheInfo.fsid.val[0] = 0;
4065 kernelCacheInfo.fsid.val[0] = 0;
4066 task_register_dyld_shared_cache_image_info(mach_task_self(), kernelCacheInfo, false, false);
4067 }
4068 }
4069 else {
4070 #if __i386__ || __x86_64__
4071 // <rdar://problem/5925940> Safe Boot should disable dyld shared cache
4072 // if we are in safe-boot mode and the cache was not made during this boot cycle,
4073 // delete the cache file
4074 uint32_t safeBootValue = 0;
4075 size_t safeBootValueSize = sizeof(safeBootValue);
4076 if ( (sysctlbyname("kern.safeboot", &safeBootValue, &safeBootValueSize, NULL, 0) == 0) && (safeBootValue != 0) ) {
4077 // user booted machine in safe-boot mode
4078 struct stat dyldCacheStatInfo;
4079 // Don't use custom DYLD_SHARED_CACHE_DIR if provided, use standard path
4080 if ( my_stat(MACOSX_DYLD_SHARED_CACHE_DIR DYLD_SHARED_CACHE_BASE_NAME ARCH_NAME, &dyldCacheStatInfo) == 0 ) {
4081 struct timeval bootTimeValue;
4082 size_t bootTimeValueSize = sizeof(bootTimeValue);
4083 if ( (sysctlbyname("kern.boottime", &bootTimeValue, &bootTimeValueSize, NULL, 0) == 0) && (bootTimeValue.tv_sec != 0) ) {
4084 // if the cache file was created before this boot, then throw it away and let it rebuild itself
4085 if ( dyldCacheStatInfo.st_mtime < bootTimeValue.tv_sec ) {
4086 ::unlink(MACOSX_DYLD_SHARED_CACHE_DIR DYLD_SHARED_CACHE_BASE_NAME ARCH_NAME);
4087 gLinkContext.sharedRegionMode = ImageLoader::kDontUseSharedRegion;
4088 return;
4089 }
4090 }
4091 }
4092 }
4093 #endif
4094 // map in shared cache to shared region
4095 int fd = openSharedCacheFile();
4096 if ( fd != -1 ) {
4097 uint8_t firstPages[8192];
4098 if ( ::read(fd, firstPages, 8192) == 8192 ) {
4099 dyld_cache_header* header = (dyld_cache_header*)firstPages;
4100 #if __x86_64__
4101 const char* magic = (sHaswell ? ARCH_CACHE_MAGIC_H : ARCH_CACHE_MAGIC);
4102 #else
4103 const char* magic = ARCH_CACHE_MAGIC;
4104 #endif
4105 if ( strcmp(header->magic, magic) == 0 ) {
4106 const dyld_cache_mapping_info* const fileMappingsStart = (dyld_cache_mapping_info*)&firstPages[header->mappingOffset];
4107 const dyld_cache_mapping_info* const fileMappingsEnd = &fileMappingsStart[header->mappingCount];
4108 #if __IPHONE_OS_VERSION_MIN_REQUIRED
4109 if ( (header->mappingCount != 3)
4110 || (header->mappingOffset > 256)
4111 || (fileMappingsStart[0].fileOffset != 0)
4112 || (fileMappingsStart[0].address != SHARED_REGION_BASE)
4113 || ((fileMappingsStart[0].address + fileMappingsStart[0].size) > fileMappingsStart[1].address)
4114 || ((fileMappingsStart[1].address + fileMappingsStart[1].size) > fileMappingsStart[2].address)
4115 || ((fileMappingsStart[0].fileOffset + fileMappingsStart[0].size) != fileMappingsStart[1].fileOffset)
4116 || ((fileMappingsStart[1].fileOffset + fileMappingsStart[1].size) != fileMappingsStart[2].fileOffset) )
4117 throw "dyld shared cache file is invalid";
4118 #endif
4119 shared_file_mapping_np mappings[header->mappingCount];
4120 unsigned int mappingCount = header->mappingCount;
4121 int readWriteMappingIndex = -1;
4122 int readOnlyMappingIndex = -1;
4123 // validate that the cache file has not been truncated
4124 bool goodCache = false;
4125 struct stat stat_buf;
4126 if ( fstat(fd, &stat_buf) == 0 ) {
4127 goodCache = true;
4128 int i=0;
4129 for (const dyld_cache_mapping_info* p = fileMappingsStart; p < fileMappingsEnd; ++p, ++i) {
4130 mappings[i].sfm_address = p->address;
4131 mappings[i].sfm_size = p->size;
4132 mappings[i].sfm_file_offset = p->fileOffset;
4133 mappings[i].sfm_max_prot = p->maxProt;
4134 mappings[i].sfm_init_prot = p->initProt;
4135 // rdar://problem/5694507 old update_dyld_shared_cache tool could make a cache file
4136 // that is not page aligned, but otherwise ok.
4137 if ( p->fileOffset+p->size > (uint64_t)(stat_buf.st_size+4095 & (-4096)) ) {
4138 dyld::log("dyld: shared cached file is corrupt: %s" DYLD_SHARED_CACHE_BASE_NAME ARCH_NAME "\n", sSharedCacheDir);
4139 goodCache = false;
4140 }
4141 if ( (mappings[i].sfm_init_prot & (VM_PROT_READ|VM_PROT_WRITE)) == (VM_PROT_READ|VM_PROT_WRITE) ) {
4142 readWriteMappingIndex = i;
4143 }
4144 if ( mappings[i].sfm_init_prot == VM_PROT_READ ) {
4145 readOnlyMappingIndex = i;
4146 }
4147 }
4148 // if shared cache is code signed, add a mapping for the code signature
4149 uint64_t signatureSize = header->codeSignatureSize;
4150 // zero size in header means signature runs to end-of-file
4151 if ( signatureSize == 0 )
4152 signatureSize = stat_buf.st_size - header->codeSignatureOffset;
4153 if ( signatureSize != 0 ) {
4154 #if __arm__ || __arm64__
4155 size_t alignedSignatureSize = (signatureSize+16383) & (-16384);
4156 #else
4157 size_t alignedSignatureSize = (signatureSize+4095) & (-4096);
4158 #endif
4159 // <rdar://problem/23188073> validate code signature covers entire shared cache
4160 loadAndCheckCodeSignature(fd, mappingCount, mappings, header->codeSignatureOffset, alignedSignatureSize, firstPages, sizeof(firstPages));
4161 }
4162 #if __IPHONE_OS_VERSION_MIN_REQUIRED
4163 else {
4164 throw "dyld shared cache file not code signed";
4165 }
4166 #endif
4167 }
4168 #if __MAC_OS_X_VERSION_MIN_REQUIRED
4169 // sanity check that /usr/lib/libSystem.B.dylib stat() info matches cache
4170 if ( header->imagesCount * sizeof(dyld_cache_image_info) + header->imagesOffset < 8192 ) {
4171 bool foundLibSystem = false;
4172 if ( my_stat("/usr/lib/libSystem.B.dylib", &stat_buf) == 0 ) {
4173 const dyld_cache_image_info* images = (dyld_cache_image_info*)&firstPages[header->imagesOffset];
4174 const dyld_cache_image_info* const imagesEnd = &images[header->imagesCount];
4175 for (const dyld_cache_image_info* p = images; p < imagesEnd; ++p) {
4176 if ( ((time_t)p->modTime == stat_buf.st_mtime) && ((ino_t)p->inode == stat_buf.st_ino) ) {
4177 foundLibSystem = true;
4178 break;
4179 }
4180 }
4181 }
4182 if ( !sSharedCacheIgnoreInodeAndTimeStamp && !foundLibSystem ) {
4183 dyld::log("dyld: shared cached file was built against a different libSystem.dylib, ignoring cache.\n"
4184 "to update dyld shared cache run: 'sudo update_dyld_shared_cache' then reboot.\n");
4185 goodCache = false;
4186 }
4187 }
4188 #endif
4189 #if __IPHONE_OS_VERSION_MIN_REQUIRED
4190 {
4191 uint64_t lowAddress;
4192 uint64_t highAddress;
4193 getCacheBounds(mappingCount, mappings, lowAddress, highAddress);
4194 if ( (highAddress-lowAddress) > SHARED_REGION_SIZE )
4195 throw "dyld shared cache is too big to fit in shared region";
4196 }
4197 #endif
4198
4199 if ( goodCache && (readWriteMappingIndex == -1) ) {
4200 dyld::log("dyld: shared cached file is missing read/write mapping: %s" DYLD_SHARED_CACHE_BASE_NAME ARCH_NAME "\n", sSharedCacheDir);
4201 goodCache = false;
4202 }
4203 if ( goodCache && (readOnlyMappingIndex == -1) ) {
4204 dyld::log("dyld: shared cached file is missing read-only mapping: %s" DYLD_SHARED_CACHE_BASE_NAME ARCH_NAME "\n", sSharedCacheDir);
4205 goodCache = false;
4206 }
4207 if ( goodCache ) {
4208 long cacheSlide = 0;
4209 void* slideInfo = (void*)(long)(mappings[readOnlyMappingIndex].sfm_address + (header->slideInfoOffset - mappings[readOnlyMappingIndex].sfm_file_offset));;
4210 uint64_t slideInfoSize = header->slideInfoSize;
4211 // check if shared cache contains slid info
4212 if ( slideInfoSize != 0 ) {
4213 // <rdar://problem/8611968> don't slide shared cache if ASLR disabled (main executable didn't slide)
4214 if ( sMainExecutable->isPositionIndependentExecutable() && (sMainExecutable->getSlide() == 0) ) {
4215 cacheSlide = 0;
4216 }
4217 else {
4218 // generate random slide amount
4219 cacheSlide = pickCacheSlide(mappingCount, mappings);
4220 }
4221
4222 slideInfo = (void*)((uint8_t*)slideInfo + cacheSlide);
4223 // add VM_PROT_SLIDE bit to __DATA area of cache
4224 mappings[readWriteMappingIndex].sfm_max_prot |= VM_PROT_SLIDE;
4225 mappings[readWriteMappingIndex].sfm_init_prot |= VM_PROT_SLIDE;
4226 }
4227 if ( gLinkContext.verboseMapping ) {
4228 dyld::log("dyld: calling _shared_region_map_and_slide_np() with regions:\n");
4229 for (int i=0; i < mappingCount; ++i) {
4230 dyld::log(" address=0x%08llX, size=0x%08llX, fileOffset=0x%08llX\n", mappings[i].sfm_address, mappings[i].sfm_size, mappings[i].sfm_file_offset);
4231 }
4232 }
4233
4234 if (_shared_region_map_and_slide_np(fd, mappingCount, mappings, cacheSlide, slideInfo, slideInfoSize) == 0) {
4235 // successfully mapped cache into shared region
4236 sSharedCache = (dyld_cache_header*)mappings[0].sfm_address;
4237 sSharedCacheSlide = cacheSlide;
4238 dyld::gProcessInfo->sharedCacheSlide = cacheSlide;
4239 dyld::gProcessInfo->sharedCacheBaseAddress = mappings[0].sfm_address;
4240 //dyld::log("sSharedCache=%p sSharedCacheSlide=0x%08lX\n", sSharedCache, sSharedCacheSlide);
4241 // if cache has a uuid, copy it
4242 if ( header->mappingOffset >= 0x68 ) {
4243 const bool privateSharedCache = gLinkContext.sharedRegionMode == ImageLoader::kUsePrivateSharedRegion;
4244 memcpy(dyld::gProcessInfo->sharedCacheUUID, header->uuid, 16);
4245 dyld_kernel_image_info_t kernelCacheInfo;
4246 memcpy(&kernelCacheInfo.uuid[0], &sSharedCache->uuid[0], sizeof(uuid_t));
4247 kernelCacheInfo.load_addr = (uint64_t)sSharedCache;
4248 kernelCacheInfo.fsobjid.fid_objno = 0;
4249 kernelCacheInfo.fsobjid.fid_generation = 0;
4250 kernelCacheInfo.fsid.val[0] = 0;
4251 kernelCacheInfo.fsid.val[0] = 0;
4252 if (privateSharedCache) {
4253 kernelCacheInfo.fsobjid = *(fsobj_id_t*)(&stat_buf.st_ino);
4254 struct statfs statfs_buf;
4255 if ( fstatfs(fd, &statfs_buf) == 0 ) {
4256 kernelCacheInfo.fsid = statfs_buf.f_fsid;
4257 }
4258 }
4259 task_register_dyld_shared_cache_image_info(mach_task_self(), kernelCacheInfo, false, privateSharedCache);
4260 }
4261 }
4262 else {
4263 #if __IPHONE_OS_VERSION_MIN_REQUIRED
4264 throwf("dyld shared cache could not be mapped. errno=%d, slide=0x%08lX, slideInfo=%p, slideInfoSize=0x%08llX, mappingCount=%u, "
4265 "address/size/off/init/max [0]=0x%0llX/0x%0llX/0x%0llX/0x%02X/0x%02X, [1]=0x%0llX/0x%0llX/0x%0llX/0x%02X/0x%02X, [2]=0x%0llX/0x%0llX/0x%0llX/0x%02X/0x%02X",
4266 errno, cacheSlide, slideInfo, slideInfoSize, mappingCount,
4267 mappings[0].sfm_address, mappings[0].sfm_size, mappings[0].sfm_file_offset, mappings[0].sfm_init_prot, mappings[0].sfm_max_prot,
4268 mappings[1].sfm_address, mappings[1].sfm_size, mappings[1].sfm_file_offset, mappings[1].sfm_init_prot, mappings[1].sfm_max_prot,
4269 mappings[2].sfm_address, mappings[2].sfm_size, mappings[2].sfm_file_offset, mappings[2].sfm_init_prot, mappings[2].sfm_max_prot);
4270 #endif
4271 if ( gLinkContext.verboseMapping )
4272 dyld::log("dyld: shared cached file could not be mapped\n");
4273 }
4274 }
4275 }
4276 else {
4277 if ( gLinkContext.verboseMapping )
4278 dyld::log("dyld: shared cached file is invalid\n");
4279 }
4280 }
4281 else {
4282 if ( gLinkContext.verboseMapping )
4283 dyld::log("dyld: shared cached file cannot be read\n");
4284 }
4285 close(fd);
4286 }
4287 else {
4288 if ( gLinkContext.verboseMapping )
4289 dyld::log("dyld: shared cached file cannot be opened\n");
4290 }
4291 }
4292
4293 // remember if dyld loaded at same address as when cache built
4294 if ( sSharedCache != NULL ) {
4295 gLinkContext.dyldLoadedAtSameAddressNeededBySharedCache = ((uintptr_t)(sSharedCache->dyldBaseAddress) == (uintptr_t)&_mh_dylinker_header);
4296 }
4297
4298 // tell gdb where the shared cache is
4299 if ( sSharedCache != NULL ) {
4300 const dyld_cache_mapping_info* const start = (dyld_cache_mapping_info*)((uint8_t*)sSharedCache + sSharedCache->mappingOffset);
4301 dyld_shared_cache_ranges.sharedRegionsCount = sSharedCache->mappingCount;
4302 // only room to tell gdb about first four regions
4303 if ( dyld_shared_cache_ranges.sharedRegionsCount > 4 )
4304 dyld_shared_cache_ranges.sharedRegionsCount = 4;
4305 const dyld_cache_mapping_info* const end = &start[dyld_shared_cache_ranges.sharedRegionsCount];
4306 int index = 0;
4307 for (const dyld_cache_mapping_info* p = start; p < end; ++p, ++index ) {
4308 dyld_shared_cache_ranges.ranges[index].start = p->address+sSharedCacheSlide;
4309 dyld_shared_cache_ranges.ranges[index].length = p->size;
4310 if ( gLinkContext.verboseMapping ) {
4311 dyld::log(" 0x%08llX->0x%08llX %s%s%s init=%x, max=%x\n",
4312 p->address+sSharedCacheSlide, p->address+sSharedCacheSlide+p->size-1,
4313 ((p->initProt & VM_PROT_READ) ? "read " : ""),
4314 ((p->initProt & VM_PROT_WRITE) ? "write " : ""),
4315 ((p->initProt & VM_PROT_EXECUTE) ? "execute " : ""), p->initProt, p->maxProt);
4316 }
4317 #if __i386__
4318 // If a non-writable and executable region is found in the R/W shared region, then this is __IMPORT segments
4319 // This is an old cache. Make writable. dyld no longer supports turn W on and off as it binds
4320 if ( (p->initProt == (VM_PROT_READ|VM_PROT_EXECUTE)) && ((p->address & 0xF0000000) == 0xA0000000) ) {
4321 if ( p->size != 0 ) {
4322 vm_prot_t prot = VM_PROT_EXECUTE | PROT_READ | VM_PROT_WRITE;
4323 vm_protect(mach_task_self(), p->address, p->size, false, prot);
4324 if ( gLinkContext.verboseMapping ) {
4325 dyld::log("%18s at 0x%08llX->0x%08llX altered permissions to %c%c%c\n", "", p->address,
4326 p->address+p->size-1,
4327 (prot & PROT_READ) ? 'r' : '.', (prot & PROT_WRITE) ? 'w' : '.', (prot & PROT_EXEC) ? 'x' : '.' );
4328 }
4329 }
4330 }
4331 #endif
4332 }
4333 if ( gLinkContext.verboseMapping ) {
4334 // list the code blob
4335 dyld_cache_header* header = (dyld_cache_header*)sSharedCache;
4336 uint64_t signatureSize = header->codeSignatureSize;
4337 // zero size in header means signature runs to end-of-file
4338 if ( signatureSize == 0 ) {
4339 struct stat stat_buf;
4340 // FIXME: need size of cache file actually used
4341 if ( my_stat(IPHONE_DYLD_SHARED_CACHE_DIR DYLD_SHARED_CACHE_BASE_NAME ARCH_NAME, &stat_buf) == 0 )
4342 signatureSize = stat_buf.st_size - header->codeSignatureOffset;
4343 }
4344 if ( signatureSize != 0 ) {
4345 const dyld_cache_mapping_info* const last = &start[dyld_shared_cache_ranges.sharedRegionsCount-1];
4346 uint64_t codeBlobStart = last->address + last->size;
4347 dyld::log(" 0x%08llX->0x%08llX (code signature)\n", codeBlobStart, codeBlobStart+signatureSize);
4348 }
4349 }
4350 #if SUPPORT_ACCELERATE_TABLES
4351 if ( !dylibsCanOverrideCache() && !sDisableAcceleratorTables && (sSharedCache->mappingOffset > 0x80) && (sSharedCache->accelerateInfoAddr != 0) ) {
4352 sAllCacheImagesProxy = ImageLoaderMegaDylib::makeImageLoaderMegaDylib(sSharedCache, sSharedCacheSlide, gLinkContext);
4353 }
4354 #endif
4355 }
4356 }
4357 #endif // #if DYLD_SHARED_CACHE_SUPPORT
4358
4359
4360
4361 // create when NSLinkModule is called for a second time on a bundle
4362 ImageLoader* cloneImage(ImageLoader* image)
4363 {
4364 // open file (automagically closed when this function exits)
4365 FileOpener file(image->getPath());
4366
4367 struct stat stat_buf;
4368 if ( fstat(file.getFileDescriptor(), &stat_buf) == -1)
4369 throw "stat error";
4370
4371 dyld::LoadContext context;
4372 context.useSearchPaths = false;
4373 context.useFallbackPaths = false;
4374 context.useLdLibraryPath = false;
4375 context.implicitRPath = false;
4376 context.matchByInstallName = false;
4377 context.dontLoad = false;
4378 context.mustBeBundle = true;
4379 context.mustBeDylib = false;
4380 context.canBePIE = false;
4381 context.origin = NULL;
4382 context.rpath = NULL;
4383 return loadPhase6(file.getFileDescriptor(), stat_buf, image->getPath(), context);
4384 }
4385
4386
4387 ImageLoader* loadFromMemory(const uint8_t* mem, uint64_t len, const char* moduleName)
4388 {
4389 // if fat wrapper, find usable sub-file
4390 const fat_header* memStartAsFat = (fat_header*)mem;
4391 uint64_t fileOffset = 0;
4392 uint64_t fileLength = len;
4393 if ( memStartAsFat->magic == OSSwapBigToHostInt32(FAT_MAGIC) ) {
4394 if ( fatFindBest(memStartAsFat, &fileOffset, &fileLength) ) {
4395 mem = &mem[fileOffset];
4396 len = fileLength;
4397 }
4398 else {
4399 throw "no matching architecture in universal wrapper";
4400 }
4401 }
4402
4403 // try each loader
4404 if ( isCompatibleMachO(mem, moduleName) ) {
4405 ImageLoader* image = ImageLoaderMachO::instantiateFromMemory(moduleName, (macho_header*)mem, len, gLinkContext);
4406 // don't add bundles to global list, they can be loaded but not linked. When linked it will be added to list
4407 if ( ! image->isBundle() )
4408 addImage(image);
4409 return image;
4410 }
4411
4412 // try other file formats here...
4413
4414 // throw error about what was found
4415 switch (*(uint32_t*)mem) {
4416 case MH_MAGIC:
4417 case MH_CIGAM:
4418 case MH_MAGIC_64:
4419 case MH_CIGAM_64:
4420 throw "mach-o, but wrong architecture";
4421 default:
4422 throwf("unknown file type, first eight bytes: 0x%02X 0x%02X 0x%02X 0x%02X 0x%02X 0x%02X 0x%02X 0x%02X",
4423 mem[0], mem[1], mem[2], mem[3], mem[4], mem[5], mem[6],mem[7]);
4424 }
4425 }
4426
4427
4428 void registerAddCallback(ImageCallback func)
4429 {
4430 // now add to list to get notified when any more images are added
4431 sAddImageCallbacks.push_back(func);
4432
4433 // call callback with all existing images
4434 for (std::vector<ImageLoader*>::iterator it=sAllImages.begin(); it != sAllImages.end(); it++) {
4435 ImageLoader* image = *it;
4436 if ( image->getState() >= dyld_image_state_bound && image->getState() < dyld_image_state_terminated )
4437 (*func)(image->machHeader(), image->getSlide());
4438 }
4439 #if SUPPORT_ACCELERATE_TABLES
4440 if ( sAllCacheImagesProxy != NULL ) {
4441 dyld_image_info infos[allImagesCount()+1];
4442 unsigned cacheCount = sAllCacheImagesProxy->appendImagesToNotify(dyld_image_state_bound, true, infos);
4443 for (unsigned i=0; i < cacheCount; ++i) {
4444 (*func)(infos[i].imageLoadAddress, sSharedCacheSlide);
4445 }
4446 }
4447 #endif
4448 }
4449
4450 void registerRemoveCallback(ImageCallback func)
4451 {
4452 // <rdar://problem/15025198> ignore calls to register a notification during a notification
4453 if ( sRemoveImageCallbacksInUse )
4454 return;
4455 sRemoveImageCallbacks.push_back(func);
4456 }
4457
4458 void clearErrorMessage()
4459 {
4460 error_string[0] = '\0';
4461 }
4462
4463 void setErrorMessage(const char* message)
4464 {
4465 // save off error message in global buffer for CrashReporter to find
4466 strlcpy(error_string, message, sizeof(error_string));
4467 }
4468
4469 const char* getErrorMessage()
4470 {
4471 return error_string;
4472 }
4473
4474 void halt(const char* message)
4475 {
4476 dyld::log("dyld: %s\n", message);
4477 setErrorMessage(message);
4478 dyld::gProcessInfo->errorMessage = error_string;
4479 if ( !gLinkContext.startedInitializingMainExecutable )
4480 dyld::gProcessInfo->terminationFlags = 1;
4481 else
4482 dyld::gProcessInfo->terminationFlags = 0;
4483
4484 char payloadBuffer[EXIT_REASON_PAYLOAD_MAX_LEN];
4485 dyld_abort_payload* payload = (dyld_abort_payload*)payloadBuffer;
4486 payload->version = 1;
4487 payload->flags = gLinkContext.startedInitializingMainExecutable ? 0 : 1;
4488 payload->targetDylibPathOffset = 0;
4489 payload->clientPathOffset = 0;
4490 payload->symbolOffset = 0;
4491 int payloadSize = sizeof(dyld_abort_payload);
4492
4493 if ( dyld::gProcessInfo->errorTargetDylibPath != NULL ) {
4494 payload->targetDylibPathOffset = payloadSize;
4495 payloadSize += strlcpy(&payloadBuffer[payloadSize], dyld::gProcessInfo->errorTargetDylibPath, sizeof(payloadBuffer)-payloadSize) + 1;
4496 }
4497 if ( dyld::gProcessInfo->errorClientOfDylibPath != NULL ) {
4498 payload->clientPathOffset = payloadSize;
4499 payloadSize += strlcpy(&payloadBuffer[payloadSize], dyld::gProcessInfo->errorClientOfDylibPath, sizeof(payloadBuffer)-payloadSize) + 1;
4500 }
4501 if ( dyld::gProcessInfo->errorSymbol != NULL ) {
4502 payload->symbolOffset = payloadSize;
4503 payloadSize += strlcpy(&payloadBuffer[payloadSize], dyld::gProcessInfo->errorSymbol, sizeof(payloadBuffer)-payloadSize) + 1;
4504 }
4505 char truncMessage[EXIT_REASON_USER_DESC_MAX_LEN];
4506 strlcpy(truncMessage, message, EXIT_REASON_USER_DESC_MAX_LEN);
4507 abort_with_payload(OS_REASON_DYLD, dyld::gProcessInfo->errorKind ? dyld::gProcessInfo->errorKind : DYLD_EXIT_REASON_OTHER, payloadBuffer, payloadSize, truncMessage, 0);
4508 }
4509
4510 static void setErrorStrings(unsigned errorCode, const char* errorClientOfDylibPath,
4511 const char* errorTargetDylibPath, const char* errorSymbol)
4512 {
4513 dyld::gProcessInfo->errorKind = errorCode;
4514 dyld::gProcessInfo->errorClientOfDylibPath = errorClientOfDylibPath;
4515 dyld::gProcessInfo->errorTargetDylibPath = errorTargetDylibPath;
4516 dyld::gProcessInfo->errorSymbol = errorSymbol;
4517 }
4518
4519
4520 uintptr_t bindLazySymbol(const mach_header* mh, uintptr_t* lazyPointer)
4521 {
4522 uintptr_t result = 0;
4523 // acquire read-lock on dyld's data structures
4524 #if 0 // rdar://problem/3811777 turn off locking until deadlock is resolved
4525 if ( gLibSystemHelpers != NULL )
4526 (*gLibSystemHelpers->lockForReading)();
4527 #endif
4528 // lookup and bind lazy pointer and get target address
4529 try {
4530 ImageLoader* target;
4531 #if __i386__
4532 // fast stubs pass NULL for mh and image is instead found via the location of stub (aka lazyPointer)
4533 if ( mh == NULL )
4534 target = dyld::findImageContainingAddress(lazyPointer);
4535 else
4536 target = dyld::findImageByMachHeader(mh);
4537 #else
4538 // note, target should always be mach-o, because only mach-o lazy handler wired up to this
4539 target = dyld::findImageByMachHeader(mh);
4540 #endif
4541 if ( target == NULL )
4542 throwf("image not found for lazy pointer at %p", lazyPointer);
4543 result = target->doBindLazySymbol(lazyPointer, gLinkContext);
4544 }
4545 catch (const char* message) {
4546 dyld::log("dyld: lazy symbol binding failed: %s\n", message);
4547 halt(message);
4548 }
4549 // release read-lock on dyld's data structures
4550 #if 0
4551 if ( gLibSystemHelpers != NULL )
4552 (*gLibSystemHelpers->unlockForReading)();
4553 #endif
4554 // return target address to glue which jumps to it with real parameters restored
4555 return result;
4556 }
4557
4558
4559 uintptr_t fastBindLazySymbol(ImageLoader** imageLoaderCache, uintptr_t lazyBindingInfoOffset)
4560 {
4561 uintptr_t result = 0;
4562 // get image
4563 if ( *imageLoaderCache == NULL ) {
4564 // save in cache
4565 *imageLoaderCache = dyld::findMappedRange((uintptr_t)imageLoaderCache);
4566 if ( *imageLoaderCache == NULL ) {
4567 #if SUPPORT_ACCELERATE_TABLES
4568 if ( sAllCacheImagesProxy != NULL ) {
4569 const mach_header* mh;
4570 const char* path;
4571 unsigned index;
4572 if ( sAllCacheImagesProxy->addressInCache(imageLoaderCache, &mh, &path, &index) ) {
4573 result = sAllCacheImagesProxy->bindLazy(lazyBindingInfoOffset, gLinkContext, mh, index);
4574 if ( result == 0 ) {
4575 halt("dyld: lazy symbol binding failed for image in dyld shared\n");
4576 }
4577 return result;
4578 }
4579 }
4580 #endif
4581 const char* message = "fast lazy binding from unknown image";
4582 dyld::log("dyld: %s\n", message);
4583 halt(message);
4584 }
4585 }
4586
4587 // bind lazy pointer and return it
4588 try {
4589 result = (*imageLoaderCache)->doBindFastLazySymbol((uint32_t)lazyBindingInfoOffset, gLinkContext,
4590 (dyld::gLibSystemHelpers != NULL) ? dyld::gLibSystemHelpers->acquireGlobalDyldLock : NULL,
4591 (dyld::gLibSystemHelpers != NULL) ? dyld::gLibSystemHelpers->releaseGlobalDyldLock : NULL);
4592 }
4593 catch (const char* message) {
4594 dyld::log("dyld: lazy symbol binding failed: %s\n", message);
4595 halt(message);
4596 }
4597
4598 // return target address to glue which jumps to it with real parameters restored
4599 return result;
4600 }
4601
4602
4603
4604 void registerUndefinedHandler(UndefinedHandler handler)
4605 {
4606 sUndefinedHandler = handler;
4607 }
4608
4609 static void undefinedHandler(const char* symboName)
4610 {
4611 if ( sUndefinedHandler != NULL ) {
4612 (*sUndefinedHandler)(symboName);
4613 }
4614 }
4615
4616 static bool findExportedSymbol(const char* name, bool onlyInCoalesced, const ImageLoader::Symbol** sym, const ImageLoader** image)
4617 {
4618 // search all images in order
4619 const ImageLoader* firstWeakImage = NULL;
4620 const ImageLoader::Symbol* firstWeakSym = NULL;
4621 const size_t imageCount = sAllImages.size();
4622 for(size_t i=0; i < imageCount; ++i) {
4623 ImageLoader* anImage = sAllImages[i];
4624 // the use of inserted libraries alters search order
4625 // so that inserted libraries are found before the main executable
4626 if ( sInsertedDylibCount > 0 ) {
4627 if ( i < sInsertedDylibCount )
4628 anImage = sAllImages[i+1];
4629 else if ( i == sInsertedDylibCount )
4630 anImage = sAllImages[0];
4631 }
4632 if ( ! anImage->hasHiddenExports() && (!onlyInCoalesced || anImage->hasCoalescedExports()) ) {
4633 *sym = anImage->findExportedSymbol(name, false, image);
4634 if ( *sym != NULL ) {
4635 // if weak definition found, record first one found
4636 if ( ((*image)->getExportedSymbolInfo(*sym) & ImageLoader::kWeakDefinition) != 0 ) {
4637 if ( firstWeakImage == NULL ) {
4638 firstWeakImage = *image;
4639 firstWeakSym = *sym;
4640 }
4641 }
4642 else {
4643 // found non-weak, so immediately return with it
4644 return true;
4645 }
4646 }
4647 }
4648 }
4649 if ( firstWeakSym != NULL ) {
4650 // found a weak definition, but no non-weak, so return first weak found
4651 *sym = firstWeakSym;
4652 *image = firstWeakImage;
4653 return true;
4654 }
4655 #if SUPPORT_ACCELERATE_TABLES
4656 if ( sAllCacheImagesProxy != NULL ) {
4657 if ( sAllCacheImagesProxy->flatFindSymbol(name, onlyInCoalesced, sym, image) )
4658 return true;
4659 }
4660 #endif
4661
4662 return false;
4663 }
4664
4665 bool flatFindExportedSymbol(const char* name, const ImageLoader::Symbol** sym, const ImageLoader** image)
4666 {
4667 return findExportedSymbol(name, false, sym, image);
4668 }
4669
4670 bool findCoalescedExportedSymbol(const char* name, const ImageLoader::Symbol** sym, const ImageLoader** image)
4671 {
4672 return findExportedSymbol(name, true, sym, image);
4673 }
4674
4675
4676 bool flatFindExportedSymbolWithHint(const char* name, const char* librarySubstring, const ImageLoader::Symbol** sym, const ImageLoader** image)
4677 {
4678 // search all images in order
4679 const size_t imageCount = sAllImages.size();
4680 for(size_t i=0; i < imageCount; ++i){
4681 ImageLoader* anImage = sAllImages[i];
4682 // only look at images whose paths contain the hint string (NULL hint string is wildcard)
4683 if ( ! anImage->isBundle() && ((librarySubstring==NULL) || (strstr(anImage->getPath(), librarySubstring) != NULL)) ) {
4684 *sym = anImage->findExportedSymbol(name, false, image);
4685 if ( *sym != NULL ) {
4686 return true;
4687 }
4688 }
4689 }
4690 return false;
4691 }
4692
4693
4694 unsigned int getCoalescedImages(ImageLoader* images[], unsigned imageIndex[])
4695 {
4696 unsigned int count = 0;
4697 for (std::vector<ImageLoader*>::iterator it=sAllImages.begin(); it != sAllImages.end(); it++) {
4698 ImageLoader* image = *it;
4699 if ( image->participatesInCoalescing() ) {
4700 images[count] = *it;
4701 imageIndex[count] = 0;
4702 ++count;
4703 }
4704 }
4705 #if SUPPORT_ACCELERATE_TABLES
4706 if ( sAllCacheImagesProxy != NULL ) {
4707 sAllCacheImagesProxy->appendImagesNeedingCoalescing(images, imageIndex, count);
4708 }
4709 #endif
4710 return count;
4711 }
4712
4713
4714 static ImageLoader::MappedRegion* getMappedRegions(ImageLoader::MappedRegion* regions)
4715 {
4716 ImageLoader::MappedRegion* end = regions;
4717 for (std::vector<ImageLoader*>::iterator it=sAllImages.begin(); it != sAllImages.end(); it++) {
4718 (*it)->getMappedRegions(end);
4719 }
4720 return end;
4721 }
4722
4723 void registerImageStateSingleChangeHandler(dyld_image_states state, dyld_image_state_change_handler handler)
4724 {
4725 // mark the image that the handler is in as never-unload because dyld has a reference into it
4726 ImageLoader* handlerImage = findImageContainingAddress((void*)handler);
4727 if ( handlerImage != NULL )
4728 handlerImage->setNeverUnload();
4729
4730 // add to list of handlers
4731 std::vector<dyld_image_state_change_handler>* handlers = stateToHandlers(state, sSingleHandlers);
4732 if ( handlers != NULL ) {
4733 // <rdar://problem/10332417> need updateAllImages() to be last in dyld_image_state_mapped list
4734 // so that if ObjC adds a handler that prevents a load, it happens before the gdb list is updated
4735 if ( state == dyld_image_state_mapped )
4736 handlers->insert(handlers->begin(), handler);
4737 else
4738 handlers->push_back(handler);
4739
4740 // call callback with all existing images
4741 for (std::vector<ImageLoader*>::iterator it=sAllImages.begin(); it != sAllImages.end(); it++) {
4742 ImageLoader* image = *it;
4743 dyld_image_info info;
4744 info.imageLoadAddress = image->machHeader();
4745 info.imageFilePath = image->getRealPath();
4746 info.imageFileModDate = image->lastModified();
4747 // should only call handler if state == image->state
4748 if ( image->getState() == state )
4749 (*handler)(state, 1, &info);
4750 // ignore returned string, too late to do anything
4751 }
4752 }
4753 }
4754
4755 void registerImageStateBatchChangeHandler(dyld_image_states state, dyld_image_state_change_handler handler)
4756 {
4757 // mark the image that the handler is in as never-unload because dyld has a reference into it
4758 ImageLoader* handlerImage = findImageContainingAddress((void*)handler);
4759 if ( handlerImage != NULL )
4760 handlerImage->setNeverUnload();
4761
4762 // add to list of handlers
4763 std::vector<dyld_image_state_change_handler>* handlers = stateToHandlers(state, sBatchHandlers);
4764 if ( handlers != NULL ) {
4765 // insert at front, so that gdb handler is always last
4766 handlers->insert(handlers->begin(), handler);
4767
4768 // call callback with all existing images
4769 try {
4770 notifyBatchPartial(state, true, handler, false, false);
4771 }
4772 catch (const char* msg) {
4773 // ignore request to abort during registration
4774 }
4775 }
4776 }
4777
4778
4779 void registerObjCNotifiers(_dyld_objc_notify_mapped mapped, _dyld_objc_notify_init init, _dyld_objc_notify_unmapped unmapped)
4780 {
4781 // record functions to call
4782 sNotifyObjCMapped = mapped;
4783 sNotifyObjCInit = init;
4784 sNotifyObjCUnmapped = unmapped;
4785
4786 // call 'mapped' function with all images mapped so far
4787 try {
4788 notifyBatchPartial(dyld_image_state_bound, true, NULL, false, true);
4789 }
4790 catch (const char* msg) {
4791 // ignore request to abort during registration
4792 }
4793 }
4794
4795 bool sharedCacheUUID(uuid_t uuid)
4796 {
4797 #if DYLD_SHARED_CACHE_SUPPORT
4798 if ( sSharedCache == NULL )
4799 return false;
4800
4801 memcpy(uuid, sSharedCache->uuid, 16);
4802 return true;
4803 #else
4804 return false;
4805 #endif
4806 }
4807
4808 #if SUPPORT_ACCELERATE_TABLES
4809
4810 bool dlopenFromCache(const char* path, int mode, void** handle)
4811 {
4812 if ( sAllCacheImagesProxy == NULL )
4813 return false;
4814 char fallbackPath[PATH_MAX];
4815 bool result = sAllCacheImagesProxy->dlopenFromCache(gLinkContext, path, mode, handle);
4816 if ( !result && (strchr(path, '/') == NULL) ) {
4817 // POSIX says you can call dlopen() with a leaf name (e.g. dlopen("libz.dylb"))
4818 strcpy(fallbackPath, "/usr/lib/");
4819 strlcat(fallbackPath, path, PATH_MAX);
4820 result = sAllCacheImagesProxy->dlopenFromCache(gLinkContext, fallbackPath, mode, handle);
4821 if ( !result )
4822 path = fallbackPath;
4823 }
4824 if ( !result ) {
4825 // leaf name could be a symlink
4826 char resolvedPath[PATH_MAX];
4827 realpath(path, resolvedPath);
4828 int realpathErrno = errno;
4829 // If realpath() resolves to a path which does not exist on disk, errno is set to ENOENT
4830 if ( (realpathErrno == ENOENT) || (realpathErrno == 0) ) {
4831 result = sAllCacheImagesProxy->dlopenFromCache(gLinkContext, resolvedPath, mode, handle);
4832 }
4833 }
4834
4835 return result;
4836 }
4837
4838 bool makeCacheHandle(ImageLoader* image, unsigned cacheIndex, int mode, void** result)
4839 {
4840 if ( sAllCacheImagesProxy == NULL )
4841 return false;
4842 return sAllCacheImagesProxy->makeCacheHandle(gLinkContext, cacheIndex, mode, result);
4843 }
4844
4845 bool isCacheHandle(void* handle)
4846 {
4847 if ( sAllCacheImagesProxy == NULL )
4848 return false;
4849 return sAllCacheImagesProxy->isCacheHandle(handle, NULL, NULL);
4850 }
4851
4852 bool isPathInCache(const char* path)
4853 {
4854 if ( sAllCacheImagesProxy == NULL )
4855 return false;
4856 unsigned index;
4857 return sAllCacheImagesProxy->hasDylib(path, &index);
4858 }
4859
4860 const char* getPathFromIndex(unsigned cacheIndex)
4861 {
4862 if ( sAllCacheImagesProxy == NULL )
4863 return NULL;
4864 return sAllCacheImagesProxy->getIndexedPath(cacheIndex);
4865 }
4866
4867 void* dlsymFromCache(void* handle, const char* symName, unsigned index)
4868 {
4869 if ( sAllCacheImagesProxy == NULL )
4870 return NULL;
4871 return sAllCacheImagesProxy->dlsymFromCache(gLinkContext, handle, symName, index);
4872 }
4873
4874 bool addressInCache(const void* address, const mach_header** mh, const char** path, unsigned* index)
4875 {
4876 if ( sAllCacheImagesProxy == NULL )
4877 return false;
4878 unsigned ignore;
4879 return sAllCacheImagesProxy->addressInCache(address, mh, path, index ? index : &ignore);
4880 }
4881
4882 bool findUnwindSections(const void* addr, dyld_unwind_sections* info)
4883 {
4884 if ( sAllCacheImagesProxy == NULL )
4885 return false;
4886 return sAllCacheImagesProxy->findUnwindSections(addr, info);
4887 }
4888
4889 bool dladdrFromCache(const void* address, Dl_info* info)
4890 {
4891 if ( sAllCacheImagesProxy == NULL )
4892 return false;
4893 return sAllCacheImagesProxy->dladdrFromCache(address, info);
4894 }
4895 #endif
4896
4897 static ImageLoader* libraryLocator(const char* libraryName, bool search, const char* origin, const ImageLoader::RPathChain* rpaths, unsigned& cacheIndex)
4898 {
4899 dyld::LoadContext context;
4900 context.useSearchPaths = search;
4901 context.useFallbackPaths = search;
4902 context.useLdLibraryPath = false;
4903 context.implicitRPath = false;
4904 context.matchByInstallName = false;
4905 context.dontLoad = false;
4906 context.mustBeBundle = false;
4907 context.mustBeDylib = true;
4908 context.canBePIE = false;
4909 context.origin = origin;
4910 context.rpath = rpaths;
4911 return load(libraryName, context, cacheIndex);
4912 }
4913
4914 static const char* basename(const char* path)
4915 {
4916 const char* last = path;
4917 for (const char* s = path; *s != '\0'; s++) {
4918 if (*s == '/')
4919 last = s+1;
4920 }
4921 return last;
4922 }
4923
4924 static void setContext(const macho_header* mainExecutableMH, int argc, const char* argv[], const char* envp[], const char* apple[])
4925 {
4926 gLinkContext.loadLibrary = &libraryLocator;
4927 gLinkContext.terminationRecorder = &terminationRecorder;
4928 gLinkContext.flatExportFinder = &flatFindExportedSymbol;
4929 gLinkContext.coalescedExportFinder = &findCoalescedExportedSymbol;
4930 gLinkContext.getCoalescedImages = &getCoalescedImages;
4931 gLinkContext.undefinedHandler = &undefinedHandler;
4932 gLinkContext.getAllMappedRegions = &getMappedRegions;
4933 gLinkContext.bindingHandler = NULL;
4934 gLinkContext.notifySingle = &notifySingle;
4935 gLinkContext.notifyBatch = &notifyBatch;
4936 gLinkContext.removeImage = &removeImage;
4937 gLinkContext.registerDOFs = &registerDOFs;
4938 gLinkContext.clearAllDepths = &clearAllDepths;
4939 gLinkContext.printAllDepths = &printAllDepths;
4940 gLinkContext.imageCount = &imageCount;
4941 gLinkContext.setNewProgramVars = &setNewProgramVars;
4942 #if DYLD_SHARED_CACHE_SUPPORT
4943 gLinkContext.inSharedCache = &inSharedCache;
4944 #endif
4945 gLinkContext.setErrorStrings = &setErrorStrings;
4946 #if SUPPORT_OLD_CRT_INITIALIZATION
4947 gLinkContext.setRunInitialzersOldWay= &setRunInitialzersOldWay;
4948 #endif
4949 gLinkContext.findImageContainingAddress = &findImageContainingAddress;
4950 gLinkContext.addDynamicReference = &addDynamicReference;
4951 #if SUPPORT_ACCELERATE_TABLES
4952 gLinkContext.notifySingleFromCache = &notifySingleFromCache;
4953 gLinkContext.getPreInitNotifyHandler= &getPreInitNotifyHandler;
4954 gLinkContext.getBoundBatchHandler = &getBoundBatchHandler;
4955 #endif
4956 gLinkContext.bindingOptions = ImageLoader::kBindingNone;
4957 gLinkContext.argc = argc;
4958 gLinkContext.argv = argv;
4959 gLinkContext.envp = envp;
4960 gLinkContext.apple = apple;
4961 gLinkContext.progname = (argv[0] != NULL) ? basename(argv[0]) : "";
4962 gLinkContext.programVars.mh = mainExecutableMH;
4963 gLinkContext.programVars.NXArgcPtr = &gLinkContext.argc;
4964 gLinkContext.programVars.NXArgvPtr = &gLinkContext.argv;
4965 gLinkContext.programVars.environPtr = &gLinkContext.envp;
4966 gLinkContext.programVars.__prognamePtr=&gLinkContext.progname;
4967 gLinkContext.mainExecutable = NULL;
4968 gLinkContext.imageSuffix = NULL;
4969 gLinkContext.dynamicInterposeArray = NULL;
4970 gLinkContext.dynamicInterposeCount = 0;
4971 gLinkContext.prebindUsage = ImageLoader::kUseAllPrebinding;
4972 #if TARGET_IPHONE_SIMULATOR
4973 gLinkContext.sharedRegionMode = ImageLoader::kDontUseSharedRegion;
4974 #else
4975 gLinkContext.sharedRegionMode = ImageLoader::kUseSharedRegion;
4976 #endif
4977 }
4978
4979
4980
4981 //
4982 // Look for a special segment in the mach header.
4983 // Its presences means that the binary wants to have DYLD ignore
4984 // DYLD_ environment variables.
4985 //
4986 static bool hasRestrictedSegment(const macho_header* mh)
4987 {
4988 const uint32_t cmd_count = mh->ncmds;
4989 const struct load_command* const cmds = (struct load_command*)(((char*)mh)+sizeof(macho_header));
4990 const struct load_command* cmd = cmds;
4991 for (uint32_t i = 0; i < cmd_count; ++i) {
4992 switch (cmd->cmd) {
4993 case LC_SEGMENT_COMMAND:
4994 {
4995 const struct macho_segment_command* seg = (struct macho_segment_command*)cmd;
4996
4997 //dyld::log("seg name: %s\n", seg->segname);
4998 if (strcmp(seg->segname, "__RESTRICT") == 0) {
4999 const struct macho_section* const sectionsStart = (struct macho_section*)((char*)seg + sizeof(struct macho_segment_command));
5000 const struct macho_section* const sectionsEnd = &sectionsStart[seg->nsects];
5001 for (const struct macho_section* sect=sectionsStart; sect < sectionsEnd; ++sect) {
5002 if (strcmp(sect->sectname, "__restrict") == 0)
5003 return true;
5004 }
5005 }
5006 }
5007 break;
5008 }
5009 cmd = (const struct load_command*)(((char*)cmd)+cmd->cmdsize);
5010 }
5011
5012 return false;
5013 }
5014
5015 #if __IPHONE_OS_VERSION_MIN_REQUIRED
5016 static bool isFairPlayEncrypted(const macho_header* mh)
5017 {
5018 const uint32_t cmd_count = mh->ncmds;
5019 const struct load_command* const cmds = (struct load_command*)(((char*)mh)+sizeof(macho_header));
5020 const struct load_command* cmd = cmds;
5021 for (uint32_t i = 0; i < cmd_count; ++i) {
5022 if ( cmd->cmd == LC_ENCRYPT_COMMAND ) {
5023 const encryption_info_command* enc = (encryption_info_command*)cmd;
5024 return (enc->cryptid != 0);
5025 }
5026 cmd = (const struct load_command*)(((char*)cmd)+cmd->cmdsize);
5027 }
5028
5029 return false;
5030 }
5031 #endif
5032
5033 #if SUPPORT_VERSIONED_PATHS
5034
5035 static bool readFirstPage(const char* dylibPath, uint8_t firstPage[4096])
5036 {
5037 firstPage[0] = 0;
5038 // open file (automagically closed when this function exits)
5039 FileOpener file(dylibPath);
5040
5041 if ( file.getFileDescriptor() == -1 )
5042 return false;
5043
5044 if ( pread(file.getFileDescriptor(), firstPage, 4096, 0) != 4096 )
5045 return false;
5046
5047 // if fat wrapper, find usable sub-file
5048 const fat_header* fileStartAsFat = (fat_header*)firstPage;
5049 if ( fileStartAsFat->magic == OSSwapBigToHostInt32(FAT_MAGIC) ) {
5050 uint64_t fileOffset;
5051 uint64_t fileLength;
5052 if ( fatFindBest(fileStartAsFat, &fileOffset, &fileLength) ) {
5053 if ( pread(file.getFileDescriptor(), firstPage, 4096, fileOffset) != 4096 )
5054 return false;
5055 }
5056 else {
5057 return false;
5058 }
5059 }
5060
5061 return true;
5062 }
5063
5064 //
5065 // Peeks at a dylib file and returns its current_version and install_name.
5066 // Returns false on error.
5067 //
5068 static bool getDylibVersionAndInstallname(const char* dylibPath, uint32_t* version, char* installName)
5069 {
5070 uint8_t firstPage[4096];
5071 const macho_header* mh = (macho_header*)firstPage;
5072 if ( !readFirstPage(dylibPath, firstPage) ) {
5073 #if DYLD_SHARED_CACHE_SUPPORT
5074 // If file cannot be read, check to see if path is in shared cache
5075 const macho_header* mhInCache;
5076 const char* pathInCache;
5077 long slideInCache;
5078 if ( !findInSharedCacheImage(dylibPath, true, NULL, &mhInCache, &pathInCache, &slideInCache) )
5079 return false;
5080 mh = mhInCache;
5081 #else
5082 return false;
5083 #endif
5084 }
5085
5086 // check mach-o header
5087 if ( mh->magic != sMainExecutableMachHeader->magic )
5088 return false;
5089 if ( mh->cputype != sMainExecutableMachHeader->cputype )
5090 return false;
5091
5092 // scan load commands for LC_ID_DYLIB
5093 const uint32_t cmd_count = mh->ncmds;
5094 const struct load_command* const cmds = (struct load_command*)(((char*)mh)+sizeof(macho_header));
5095 const struct load_command* const cmdsReadEnd = (struct load_command*)(((char*)mh)+4096);
5096 const struct load_command* cmd = cmds;
5097 for (uint32_t i = 0; i < cmd_count; ++i) {
5098 switch (cmd->cmd) {
5099 case LC_ID_DYLIB:
5100 {
5101 const struct dylib_command* id = (struct dylib_command*)cmd;
5102 *version = id->dylib.current_version;
5103 if ( installName != NULL )
5104 strlcpy(installName, (char *)id + id->dylib.name.offset, PATH_MAX);
5105 return true;
5106 }
5107 break;
5108 }
5109 cmd = (const struct load_command*)(((char*)cmd)+cmd->cmdsize);
5110 if ( cmd > cmdsReadEnd )
5111 return false;
5112 }
5113
5114 return false;
5115 }
5116 #endif // SUPPORT_VERSIONED_PATHS
5117
5118
5119 #if 0
5120 static void printAllImages()
5121 {
5122 dyld::log("printAllImages()\n");
5123 for (std::vector<ImageLoader*>::iterator it=sAllImages.begin(); it != sAllImages.end(); it++) {
5124 ImageLoader* image = *it;
5125 dyld_image_states imageState = image->getState();
5126 dyld::log(" state=%d, dlopen-count=%d, never-unload=%d, in-use=%d, name=%s\n",
5127 imageState, image->dlopenCount(), image->neverUnload(), image->isMarkedInUse(), image->getShortName());
5128 }
5129 }
5130 #endif
5131
5132 void link(ImageLoader* image, bool forceLazysBound, bool neverUnload, const ImageLoader::RPathChain& loaderRPaths, unsigned cacheIndex)
5133 {
5134 // add to list of known images. This did not happen at creation time for bundles
5135 if ( image->isBundle() && !image->isLinked() )
5136 addImage(image);
5137
5138 // we detect root images as those not linked in yet
5139 if ( !image->isLinked() )
5140 addRootImage(image);
5141
5142 // process images
5143 try {
5144 const char* path = image->getPath();
5145 #if SUPPORT_ACCELERATE_TABLES
5146 if ( image == sAllCacheImagesProxy )
5147 path = sAllCacheImagesProxy->getIndexedPath(cacheIndex);
5148 #endif
5149 image->link(gLinkContext, forceLazysBound, false, neverUnload, loaderRPaths, path);
5150 }
5151 catch (const char* msg) {
5152 garbageCollectImages();
5153 throw;
5154 }
5155 }
5156
5157
5158 void runInitializers(ImageLoader* image)
5159 {
5160 // do bottom up initialization
5161 ImageLoader::InitializerTimingList initializerTimes[allImagesCount()];
5162 initializerTimes[0].count = 0;
5163 image->runInitializers(gLinkContext, initializerTimes[0]);
5164 }
5165
5166 // This function is called at the end of dlclose() when the reference count goes to zero.
5167 // The dylib being unloaded may have brought in other dependent dylibs when it was loaded.
5168 // Those dependent dylibs need to be unloaded, but only if they are not referenced by
5169 // something else. We use a standard mark and sweep garbage collection.
5170 //
5171 // The tricky part is that when a dylib is unloaded it may have a termination function that
5172 // can run and itself call dlclose() on yet another dylib. The problem is that this
5173 // sort of gabage collection is not re-entrant. Instead a terminator's call to dlclose()
5174 // which calls garbageCollectImages() will just set a flag to re-do the garbage collection
5175 // when the current pass is done.
5176 //
5177 // Also note that this is done within the dyld global lock, so it is always single threaded.
5178 //
5179 void garbageCollectImages()
5180 {
5181 static bool sDoingGC = false;
5182 static bool sRedo = false;
5183
5184 if ( sDoingGC ) {
5185 // GC is currently being run, just set a flag to have it run again.
5186 sRedo = true;
5187 return;
5188 }
5189
5190 sDoingGC = true;
5191 do {
5192 sRedo = false;
5193
5194 // mark phase: mark all images not-in-use
5195 for (std::vector<ImageLoader*>::iterator it=sAllImages.begin(); it != sAllImages.end(); it++) {
5196 ImageLoader* image = *it;
5197 //dyld::log("gc: neverUnload=%d name=%s\n", image->neverUnload(), image->getShortName());
5198 image->markNotUsed();
5199 }
5200
5201 // sweep phase: mark as in-use, images reachable from never-unload or in-use image
5202 for (std::vector<ImageLoader*>::iterator it=sAllImages.begin(); it != sAllImages.end(); it++) {
5203 ImageLoader* image = *it;
5204 if ( (image->dlopenCount() != 0) || image->neverUnload() || (image == sMainExecutable) ) {
5205 OSSpinLockLock(&sDynamicReferencesLock);
5206 image->markedUsedRecursive(sDynamicReferences);
5207 OSSpinLockUnlock(&sDynamicReferencesLock);
5208 }
5209 }
5210
5211 // collect phase: build array of images not marked in-use
5212 ImageLoader* deadImages[sAllImages.size()];
5213 unsigned deadCount = 0;
5214 int maxRangeCount = 0;
5215 for (std::vector<ImageLoader*>::iterator it=sAllImages.begin(); it != sAllImages.end(); it++) {
5216 ImageLoader* image = *it;
5217 if ( ! image->isMarkedInUse() ) {
5218 deadImages[deadCount++] = image;
5219 if (gLogAPIs) dyld::log("dlclose(), found unused image %p %s\n", image, image->getShortName());
5220 maxRangeCount += image->segmentCount();
5221 }
5222 }
5223
5224 // collect phase: run termination routines for images not marked in-use
5225 __cxa_range_t ranges[maxRangeCount];
5226 int rangeCount = 0;
5227 for (unsigned i=0; i < deadCount; ++i) {
5228 ImageLoader* image = deadImages[i];
5229 for (unsigned int j=0; j < image->segmentCount(); ++j) {
5230 if ( !image->segExecutable(j) )
5231 continue;
5232 if ( rangeCount < maxRangeCount ) {
5233 ranges[rangeCount].addr = (const void*)image->segActualLoadAddress(j);
5234 ranges[rangeCount].length = image->segSize(j);
5235 ++rangeCount;
5236 }
5237 }
5238 try {
5239 runImageStaticTerminators(image);
5240 }
5241 catch (const char* msg) {
5242 dyld::warn("problem running terminators for image: %s\n", msg);
5243 }
5244 }
5245
5246 // <rdar://problem/14718598> dyld should call __cxa_finalize_ranges()
5247 if ( (rangeCount > 0) && (gLibSystemHelpers != NULL) && (gLibSystemHelpers->version >= 13) )
5248 (*gLibSystemHelpers->cxa_finalize_ranges)(ranges, rangeCount);
5249
5250 // collect phase: delete all images which are not marked in-use
5251 bool mightBeMore;
5252 do {
5253 mightBeMore = false;
5254 for (std::vector<ImageLoader*>::iterator it=sAllImages.begin(); it != sAllImages.end(); it++) {
5255 ImageLoader* image = *it;
5256 if ( ! image->isMarkedInUse() ) {
5257 try {
5258 if (gLogAPIs) dyld::log("dlclose(), deleting %p %s\n", image, image->getShortName());
5259 removeImage(image);
5260 ImageLoader::deleteImage(image);
5261 mightBeMore = true;
5262 break; // interator in invalidated by this removal
5263 }
5264 catch (const char* msg) {
5265 dyld::warn("problem deleting image: %s\n", msg);
5266 }
5267 }
5268 }
5269 } while ( mightBeMore );
5270 } while (sRedo);
5271 sDoingGC = false;
5272
5273 //printAllImages();
5274
5275 }
5276
5277
5278 static void preflight_finally(ImageLoader* image)
5279 {
5280 if ( image->isBundle() ) {
5281 removeImageFromAllImages(image->machHeader());
5282 ImageLoader::deleteImage(image);
5283 }
5284 sBundleBeingLoaded = NULL;
5285 dyld::garbageCollectImages();
5286 }
5287
5288
5289 void preflight(ImageLoader* image, const ImageLoader::RPathChain& loaderRPaths, unsigned cacheIndex)
5290 {
5291 try {
5292 if ( image->isBundle() )
5293 sBundleBeingLoaded = image; // hack
5294 const char* path = image->getPath();
5295 #if SUPPORT_ACCELERATE_TABLES
5296 if ( image == sAllCacheImagesProxy )
5297 path = sAllCacheImagesProxy->getIndexedPath(cacheIndex);
5298 #endif
5299 image->link(gLinkContext, false, true, false, loaderRPaths, path);
5300 }
5301 catch (const char* msg) {
5302 preflight_finally(image);
5303 throw;
5304 }
5305 preflight_finally(image);
5306 }
5307
5308 static void loadInsertedDylib(const char* path)
5309 {
5310 ImageLoader* image = NULL;
5311 unsigned cacheIndex;
5312 try {
5313 LoadContext context;
5314 context.useSearchPaths = false;
5315 context.useFallbackPaths = false;
5316 context.useLdLibraryPath = false;
5317 context.implicitRPath = false;
5318 context.matchByInstallName = false;
5319 context.dontLoad = false;
5320 context.mustBeBundle = false;
5321 context.mustBeDylib = true;
5322 context.canBePIE = false;
5323 context.origin = NULL; // can't use @loader_path with DYLD_INSERT_LIBRARIES
5324 context.rpath = NULL;
5325 image = load(path, context, cacheIndex);
5326 }
5327 catch (const char* msg) {
5328 #if TARGET_IPHONE_SIMULATOR
5329 dyld::log("dyld: warning: could not load inserted library '%s' because %s\n", path, msg);
5330 #else
5331 #if __MAC_OS_X_VERSION_MIN_REQUIRED
5332 if ( gLinkContext.processUsingLibraryValidation )
5333 dyld::log("dyld: warning: could not load inserted library '%s' into library validated process because %s\n", path, msg);
5334 else
5335 #endif
5336 halt(dyld::mkstringf("could not load inserted library '%s' because %s\n", path, msg));
5337 #endif
5338 }
5339 catch (...) {
5340 halt(dyld::mkstringf("could not load inserted library '%s'\n", path));
5341 }
5342 }
5343
5344
5345 //
5346 // Sets:
5347 // sEnvMode
5348 // gLinkContext.requireCodeSignature
5349 // gLinkContext.processIsRestricted // Mac OS X only
5350 // gLinkContext.processUsingLibraryValidation // Mac OS X only
5351 //
5352 static void configureProcessRestrictions(const macho_header* mainExecutableMH)
5353 {
5354 uint32_t flags;
5355 #if TARGET_IPHONE_SIMULATOR
5356 sEnvMode = envAll;
5357 gLinkContext.requireCodeSignature = true;
5358 #elif __IPHONE_OS_VERSION_MIN_REQUIRED
5359 sEnvMode = envNone;
5360 gLinkContext.requireCodeSignature = true;
5361 if ( csops(0, CS_OPS_STATUS, &flags, sizeof(flags)) != -1 ) {
5362 if ( flags & CS_ENFORCEMENT ) {
5363 if ( flags & CS_GET_TASK_ALLOW ) {
5364 // Xcode built app for Debug allowed to use DYLD_* variables
5365 sEnvMode = envAll;
5366 }
5367 else {
5368 // Development kernel can use DYLD_PRINT_* variables on any FairPlay encrypted app
5369 uint32_t secureValue = 0;
5370 size_t secureValueSize = sizeof(secureValue);
5371 if ( (sysctlbyname("kern.secure_kernel", &secureValue, &secureValueSize, NULL, 0) == 0) && (secureValue == 0) && isFairPlayEncrypted(mainExecutableMH) ) {
5372 sEnvMode = envPrintOnly;
5373 }
5374 }
5375 }
5376 else {
5377 // Development kernel can run unsigned code
5378 sEnvMode = envAll;
5379 gLinkContext.requireCodeSignature = false;
5380 }
5381 }
5382 if ( issetugid() ) {
5383 sEnvMode = envNone;
5384 }
5385 #elif __MAC_OS_X_VERSION_MIN_REQUIRED
5386 sEnvMode = envAll;
5387 gLinkContext.requireCodeSignature = false;
5388 gLinkContext.processIsRestricted = false;
5389 gLinkContext.processUsingLibraryValidation = false;
5390 // any processes with setuid or setgid bit set or with __RESTRICT segment is restricted
5391 if ( issetugid() || hasRestrictedSegment(mainExecutableMH) ) {
5392 gLinkContext.processIsRestricted = true;
5393 }
5394 bool usingSIP = (csr_check(CSR_ALLOW_TASK_FOR_PID) != 0);
5395 if ( csops(0, CS_OPS_STATUS, &flags, sizeof(flags)) != -1 ) {
5396 // On OS X CS_RESTRICT means the program was signed with entitlements
5397 if ( ((flags & CS_RESTRICT) == CS_RESTRICT) && usingSIP ) {
5398 gLinkContext.processIsRestricted = true;
5399 }
5400 // Library Validation loosens searching but requires everything to be code signed
5401 if ( flags & CS_REQUIRE_LV ) {
5402 gLinkContext.processIsRestricted = false;
5403 //gLinkContext.requireCodeSignature = true;
5404 gLinkContext.processUsingLibraryValidation = true;
5405 sSafeMode = usingSIP;
5406 }
5407 }
5408 #endif
5409 }
5410
5411
5412 bool processIsRestricted()
5413 {
5414 #if __MAC_OS_X_VERSION_MIN_REQUIRED
5415 return gLinkContext.processIsRestricted;
5416 #else
5417 return false;
5418 #endif
5419 }
5420
5421
5422 // <rdar://problem/10583252> Add dyld to uuidArray to enable symbolication of stackshots
5423 static void addDyldImageToUUIDList()
5424 {
5425 const struct macho_header* mh = (macho_header*)&__dso_handle;
5426 const uint32_t cmd_count = mh->ncmds;
5427 const struct load_command* const cmds = (struct load_command*)((char*)mh + sizeof(macho_header));
5428 const struct load_command* cmd = cmds;
5429 for (uint32_t i = 0; i < cmd_count; ++i) {
5430 switch (cmd->cmd) {
5431 case LC_UUID: {
5432 uuid_command* uc = (uuid_command*)cmd;
5433 dyld_uuid_info info;
5434 info.imageLoadAddress = (mach_header*)mh;
5435 memcpy(info.imageUUID, uc->uuid, 16);
5436 addNonSharedCacheImageUUID(info);
5437 return;
5438 }
5439 }
5440 cmd = (const struct load_command*)(((char*)cmd)+cmd->cmdsize);
5441 }
5442 }
5443
5444 void notifyKernelAboutDyld()
5445 {
5446 const struct macho_header* mh = (macho_header*)&__dso_handle;
5447 const uint32_t cmd_count = mh->ncmds;
5448 const struct load_command* const cmds = (struct load_command*)((char*)mh + sizeof(macho_header));
5449 const struct load_command* cmd = cmds;
5450 for (uint32_t i = 0; i < cmd_count; ++i) {
5451 switch (cmd->cmd) {
5452 case LC_UUID: {
5453 // Add dyld to the kernel image info
5454 uuid_command* uc = (uuid_command*)cmd;
5455 dyld_kernel_image_info_t kernelInfo;
5456 memcpy(kernelInfo.uuid, uc->uuid, 16);
5457 kernelInfo.load_addr = (uint64_t)mh;
5458 kernelInfo.fsobjid.fid_objno = 0;
5459 kernelInfo.fsobjid.fid_generation = 0;
5460 kernelInfo.fsid.val[0] = 0;
5461 kernelInfo.fsid.val[1] = 0;
5462 task_register_dyld_image_infos(mach_task_self(), &kernelInfo, 1);
5463 return;
5464 }
5465 }
5466 cmd = (const struct load_command*)(((char*)cmd)+cmd->cmdsize);
5467 }
5468 }
5469
5470 #if __MAC_OS_X_VERSION_MIN_REQUIRED
5471 typedef int (*open_proc_t)(const char*, int, int);
5472 typedef int (*fcntl_proc_t)(int, int, void*);
5473 typedef int (*ioctl_proc_t)(int, unsigned long, void*);
5474 static void* getProcessInfo() { return dyld::gProcessInfo; }
5475 static SyscallHelpers sSysCalls = {
5476 7,
5477 // added in version 1
5478 (open_proc_t)&open,
5479 &close,
5480 &pread,
5481 &write,
5482 &mmap,
5483 &munmap,
5484 &madvise,
5485 &stat,
5486 (fcntl_proc_t)&fcntl,
5487 (ioctl_proc_t)&ioctl,
5488 &issetugid,
5489 &getcwd,
5490 &realpath,
5491 &vm_allocate,
5492 &vm_deallocate,
5493 &vm_protect,
5494 &vlog,
5495 &vwarn,
5496 &pthread_mutex_lock,
5497 &pthread_mutex_unlock,
5498 &mach_thread_self,
5499 &mach_port_deallocate,
5500 &task_self_trap,
5501 &mach_timebase_info,
5502 &OSAtomicCompareAndSwapPtrBarrier,
5503 &OSMemoryBarrier,
5504 &getProcessInfo,
5505 &__error,
5506 &mach_absolute_time,
5507 // added in version 2
5508 &thread_switch,
5509 // added in version 3
5510 &opendir,
5511 &readdir_r,
5512 &closedir,
5513 // added in version 4
5514 &coresymbolication_load_notifier,
5515 &coresymbolication_unload_notifier,
5516 // Added in version 5
5517 &proc_regionfilename,
5518 &getpid,
5519 &mach_port_insert_right,
5520 &mach_port_allocate,
5521 &mach_msg,
5522 // Added in version 6
5523 &abort_with_payload,
5524 // Added in version 7
5525 &task_register_dyld_image_infos,
5526 &task_unregister_dyld_image_infos,
5527 &task_get_dyld_image_infos,
5528 &task_register_dyld_shared_cache_image_info,
5529 &task_register_dyld_set_dyld_state,
5530 &task_register_dyld_get_process_state
5531 };
5532
5533 __attribute__((noinline))
5534 static const char* useSimulatorDyld(int fd, const macho_header* mainExecutableMH, const char* dyldPath,
5535 int argc, const char* argv[], const char* envp[], const char* apple[],
5536 uintptr_t* startGlue, uintptr_t* mainAddr)
5537 {
5538 *startGlue = 0;
5539 *mainAddr = 0;
5540
5541 // <rdar://problem/25311921> simulator does not support restricted processes
5542 uint32_t flags;
5543 if ( csops(0, CS_OPS_STATUS, &flags, sizeof(flags)) == -1 )
5544 return "csops() failed";
5545 if ( (flags & CS_RESTRICT) == CS_RESTRICT )
5546 return "dyld_sim cannot be loaded in a restricted process";
5547 if ( issetugid() )
5548 return "dyld_sim cannot be loaded in a setuid process";
5549 if ( hasRestrictedSegment(mainExecutableMH) )
5550 return "dyld_sim cannot be loaded in a restricted process";
5551
5552 // get file size of dyld_sim
5553 struct stat sb;
5554 if ( fstat(fd, &sb) == -1 )
5555 return "stat(dyld_sim) failed";
5556
5557 // read first page of dyld_sim file
5558 uint8_t firstPage[4096];
5559 if ( pread(fd, firstPage, 4096, 0) != 4096 )
5560 return "pread(dyld_sim) failed";
5561
5562 // if fat file, pick matching slice
5563 uint64_t fileOffset = 0;
5564 uint64_t fileLength = sb.st_size;
5565 const fat_header* fileStartAsFat = (fat_header*)firstPage;
5566 if ( fileStartAsFat->magic == OSSwapBigToHostInt32(FAT_MAGIC) ) {
5567 if ( !fatFindBest(fileStartAsFat, &fileOffset, &fileLength) )
5568 return "no matching arch in dyld_sim";
5569 // re-read buffer from start of mach-o slice in fat file
5570 if ( pread(fd, firstPage, 4096, fileOffset) != 4096 )
5571 return "pread(dyld_sim) failed";
5572 }
5573 else if ( !isCompatibleMachO(firstPage, dyldPath) ) {
5574 return "dyld_sim not compatible mach-o";
5575 }
5576
5577 // calculate total size of dyld segments
5578 const macho_header* mh = (const macho_header*)firstPage;
5579 struct macho_segment_command* lastSeg = NULL;
5580 struct macho_segment_command* firstSeg = NULL;
5581 uintptr_t mappingSize = 0;
5582 uintptr_t preferredLoadAddress = 0;
5583 const uint32_t cmd_count = mh->ncmds;
5584 if ( mh->sizeofcmds > 4096 )
5585 return "dyld_sim load commands to large";
5586 if ( (sizeof(macho_header) + mh->sizeofcmds) > 4096 )
5587 return "dyld_sim load commands to large";
5588 const struct load_command* const cmds = (struct load_command*)(((char*)mh)+sizeof(macho_header));
5589 const struct load_command* const endCmds = (struct load_command*)(((char*)mh) + sizeof(macho_header) + mh->sizeofcmds);
5590 const struct load_command* cmd = cmds;
5591 for (uint32_t i = 0; i < cmd_count; ++i) {
5592 uint32_t cmdLength = cmd->cmdsize;
5593 if ( cmdLength < 8 )
5594 return "dyld_sim load command too small";
5595 const struct load_command* const nextCmd = (const struct load_command*)(((char*)cmd)+cmdLength);
5596 if ( (nextCmd > endCmds) || (nextCmd < cmd) )
5597 return "dyld_sim load command too large";
5598 switch (cmd->cmd) {
5599 case LC_SEGMENT_COMMAND:
5600 {
5601 struct macho_segment_command* seg = (struct macho_segment_command*)cmd;
5602 if ( seg->vmaddr + seg->vmsize < seg->vmaddr )
5603 return "dyld_sim seg wraps address space";
5604 if ( seg->vmsize < seg->filesize )
5605 return "dyld_sim seg vmsize too small";
5606 if ( (seg->fileoff + seg->filesize) < seg->fileoff )
5607 return "dyld_sim seg size wraps address space";
5608 if ( lastSeg == NULL ) {
5609 // first segment must be __TEXT and start at beginning of file/slice
5610 firstSeg = seg;
5611 if ( strcmp(seg->segname, "__TEXT") != 0 )
5612 return "dyld_sim first segment not __TEXT";
5613 if ( seg->fileoff != 0 )
5614 return "dyld_sim first segment not at file offset zero";
5615 if ( seg->filesize < (sizeof(macho_header) + mh->sizeofcmds) )
5616 return "dyld_sim first segment smaller than load commands";
5617 preferredLoadAddress = seg->vmaddr;
5618 }
5619 else {
5620 // other sements must be continguous with previous segment and not executable
5621 if ( lastSeg->fileoff + lastSeg->filesize != seg->fileoff )
5622 return "dyld_sim segments not contiguous";
5623 if ( lastSeg->vmaddr + lastSeg->vmsize != seg->vmaddr )
5624 return "dyld_sim segments not address contiguous";
5625 if ( (seg->initprot & VM_PROT_EXECUTE) != 0 )
5626 return "dyld_sim non-first segment is executable";
5627 }
5628 mappingSize += seg->vmsize;
5629 lastSeg = seg;
5630 }
5631 break;
5632 case LC_SEGMENT_COMMAND_WRONG:
5633 return "dyld_sim wrong load segment load command";
5634 }
5635 cmd = nextCmd;
5636 }
5637 // last segment must be named __LINKEDIT and not writable
5638 if ( strcmp(lastSeg->segname, "__LINKEDIT") != 0 )
5639 return "dyld_sim last segment not __LINKEDIT";
5640 if ( lastSeg->initprot & VM_PROT_WRITE )
5641 return "dyld_sim __LINKEDIT segment writable";
5642
5643 // reserve space, then mmap each segment
5644 vm_address_t loadAddress = 0;
5645 if ( ::vm_allocate(mach_task_self(), &loadAddress, mappingSize, VM_FLAGS_ANYWHERE) != 0 )
5646 return "dyld_sim cannot allocate space";
5647 cmd = cmds;
5648 struct linkedit_data_command* codeSigCmd = NULL;
5649 struct source_version_command* dyldVersionCmd = NULL;
5650 for (uint32_t i = 0; i < cmd_count; ++i) {
5651 switch (cmd->cmd) {
5652 case LC_SEGMENT_COMMAND:
5653 {
5654 struct macho_segment_command* seg = (struct macho_segment_command*)cmd;
5655 uintptr_t requestedLoadAddress = seg->vmaddr - preferredLoadAddress + loadAddress;
5656 void* segAddress = ::mmap((void*)requestedLoadAddress, seg->filesize, seg->initprot, MAP_FIXED | MAP_PRIVATE, fd, fileOffset + seg->fileoff);
5657 //dyld::log("dyld_sim %s mapped at %p\n", seg->segname, segAddress);
5658 if ( segAddress == (void*)(-1) )
5659 return "dyld_sim mmap() of segment failed";
5660 if ( ((uintptr_t)segAddress < loadAddress) || ((uintptr_t)segAddress+seg->filesize > loadAddress+mappingSize) )
5661 return "dyld_sim mmap() to wrong location";
5662 }
5663 break;
5664 case LC_CODE_SIGNATURE:
5665 codeSigCmd = (struct linkedit_data_command*)cmd;
5666 break;
5667 case LC_SOURCE_VERSION:
5668 dyldVersionCmd = (struct source_version_command*)cmd;
5669 break;
5670 }
5671 cmd = (const struct load_command*)(((char*)cmd)+cmd->cmdsize);
5672 }
5673
5674 // must have code signature which is contained within LINKEDIT segment
5675 if ( codeSigCmd == NULL )
5676 return "dyld_sim not code signed";
5677 if ( codeSigCmd->dataoff < lastSeg->fileoff )
5678 return "dyld_sim code signature not in __LINKEDIT";
5679 if ( (codeSigCmd->dataoff + codeSigCmd->datasize) < codeSigCmd->dataoff )
5680 return "dyld_sim code signature size wraps";
5681 if ( (codeSigCmd->dataoff + codeSigCmd->datasize) > (lastSeg->fileoff + lastSeg->filesize) )
5682 return "dyld_sim code signature extends beyond __LINKEDIT";
5683
5684 fsignatures_t siginfo;
5685 siginfo.fs_file_start=fileOffset; // start of mach-o slice in fat file
5686 siginfo.fs_blob_start=(void*)(long)(codeSigCmd->dataoff); // start of code-signature in mach-o file
5687 siginfo.fs_blob_size=codeSigCmd->datasize; // size of code-signature
5688 int result = fcntl(fd, F_ADDFILESIGS_FOR_DYLD_SIM, &siginfo);
5689 if ( result == -1 ) {
5690 return mkstringf("dyld_sim fcntl(F_ADDFILESIGS_FOR_DYLD_SIM) failed with errno=%d", errno);
5691 }
5692 close(fd);
5693 // file range covered by code signature must extend up to code signature itself
5694 if ( siginfo.fs_file_start < codeSigCmd->dataoff )
5695 return mkstringf("dyld_sim code signature does not cover all of dyld_sim. Signature covers up to 0x%08lX. Signature starts at 0x%08X", (unsigned long)siginfo.fs_file_start, codeSigCmd->dataoff);
5696
5697
5698 // walk newly mapped dyld_sim __TEXT load commands to find entry point
5699 uintptr_t entry = 0;
5700 cmd = (struct load_command*)(((char*)loadAddress)+sizeof(macho_header));
5701 const uint32_t count = ((macho_header*)(loadAddress))->ncmds;
5702 for (uint32_t i = 0; i < count; ++i) {
5703 if (cmd->cmd == LC_UNIXTHREAD) {
5704 #if __i386__
5705 const i386_thread_state_t* registers = (i386_thread_state_t*)(((char*)cmd) + 16);
5706 // entry point must be in first segment
5707 if ( registers->__eip < firstSeg->vmaddr )
5708 return "dyld_sim entry point not in __TEXT segment";
5709 if ( registers->__eip > (firstSeg->vmaddr + firstSeg->vmsize) )
5710 return "dyld_sim entry point not in __TEXT segment";
5711 entry = (registers->__eip + loadAddress - preferredLoadAddress);
5712 #elif __x86_64__
5713 const x86_thread_state64_t* registers = (x86_thread_state64_t*)(((char*)cmd) + 16);
5714 // entry point must be in first segment
5715 if ( registers->__rip < firstSeg->vmaddr )
5716 return "dyld_sim entry point not in __TEXT segment";
5717 if ( registers->__rip > (firstSeg->vmaddr + firstSeg->vmsize) )
5718 return "dyld_sim entry point not in __TEXT segment";
5719 entry = (registers->__rip + loadAddress - preferredLoadAddress);
5720 #endif
5721 }
5722 cmd = (const struct load_command*)(((char*)cmd)+cmd->cmdsize);
5723 }
5724
5725 // notify debugger that dyld_sim is loaded
5726 dyld_image_info info;
5727 info.imageLoadAddress = (mach_header*)loadAddress;
5728 info.imageFilePath = strdup(dyldPath);
5729 info.imageFileModDate = sb.st_mtime;
5730 addImagesToAllImages(1, &info);
5731 dyld::gProcessInfo->notification(dyld_image_adding, 1, &info);
5732
5733 const char** appleParams = apple;
5734 // jump into new simulator dyld
5735 typedef uintptr_t (*sim_entry_proc_t)(int argc, const char* argv[], const char* envp[], const char* apple[],
5736 const macho_header* mainExecutableMH, const macho_header* dyldMH, uintptr_t dyldSlide,
5737 const dyld::SyscallHelpers* vtable, uintptr_t* startGlue);
5738 sim_entry_proc_t newDyld = (sim_entry_proc_t)entry;
5739 *mainAddr = (*newDyld)(argc, argv, envp, appleParams, mainExecutableMH, (macho_header*)loadAddress,
5740 loadAddress - preferredLoadAddress,
5741 &sSysCalls, startGlue);
5742 return NULL;
5743 }
5744 #endif
5745
5746
5747 //
5748 // Entry point for dyld. The kernel loads dyld and jumps to __dyld_start which
5749 // sets up some registers and call this function.
5750 //
5751 // Returns address of main() in target program which __dyld_start jumps to
5752 //
5753 uintptr_t
5754 _main(const macho_header* mainExecutableMH, uintptr_t mainExecutableSlide,
5755 int argc, const char* argv[], const char* envp[], const char* apple[],
5756 uintptr_t* startGlue)
5757 {
5758 uintptr_t result = 0;
5759 sMainExecutableMachHeader = mainExecutableMH;
5760 #if __MAC_OS_X_VERSION_MIN_REQUIRED
5761 // if this is host dyld, check to see if iOS simulator is being run
5762 const char* rootPath = _simple_getenv(envp, "DYLD_ROOT_PATH");
5763 if ( rootPath != NULL ) {
5764 // Add dyld to the kernel image info before we jump to the sim
5765 notifyKernelAboutDyld();
5766
5767 // look to see if simulator has its own dyld
5768 char simDyldPath[PATH_MAX];
5769 strlcpy(simDyldPath, rootPath, PATH_MAX);
5770 strlcat(simDyldPath, "/usr/lib/dyld_sim", PATH_MAX);
5771 int fd = my_open(simDyldPath, O_RDONLY, 0);
5772 if ( fd != -1 ) {
5773 const char* errMessage = useSimulatorDyld(fd, mainExecutableMH, simDyldPath, argc, argv, envp, apple, startGlue, &result);
5774 if ( errMessage != NULL )
5775 halt(errMessage);
5776 return result;
5777 }
5778 }
5779 #endif
5780
5781 CRSetCrashLogMessage("dyld: launch started");
5782
5783 setContext(mainExecutableMH, argc, argv, envp, apple);
5784
5785 // Pickup the pointer to the exec path.
5786 sExecPath = _simple_getenv(apple, "executable_path");
5787
5788 // <rdar://problem/13868260> Remove interim apple[0] transition code from dyld
5789 if (!sExecPath) sExecPath = apple[0];
5790
5791 if ( sExecPath[0] != '/' ) {
5792 // have relative path, use cwd to make absolute
5793 char cwdbuff[MAXPATHLEN];
5794 if ( getcwd(cwdbuff, MAXPATHLEN) != NULL ) {
5795 // maybe use static buffer to avoid calling malloc so early...
5796 char* s = new char[strlen(cwdbuff) + strlen(sExecPath) + 2];
5797 strcpy(s, cwdbuff);
5798 strcat(s, "/");
5799 strcat(s, sExecPath);
5800 sExecPath = s;
5801 }
5802 }
5803 // Remember short name of process for later logging
5804 sExecShortName = ::strrchr(sExecPath, '/');
5805 if ( sExecShortName != NULL )
5806 ++sExecShortName;
5807 else
5808 sExecShortName = sExecPath;
5809
5810 configureProcessRestrictions(mainExecutableMH);
5811
5812 #if __MAC_OS_X_VERSION_MIN_REQUIRED
5813 if ( gLinkContext.processIsRestricted ) {
5814 pruneEnvironmentVariables(envp, &apple);
5815 // set again because envp and apple may have changed or moved
5816 setContext(mainExecutableMH, argc, argv, envp, apple);
5817 }
5818 else
5819 #endif
5820 {
5821 checkEnvironmentVariables(envp);
5822 defaultUninitializedFallbackPaths(envp);
5823 }
5824 if ( sEnv.DYLD_PRINT_OPTS )
5825 printOptions(argv);
5826 if ( sEnv.DYLD_PRINT_ENV )
5827 printEnvironmentVariables(envp);
5828 getHostInfo(mainExecutableMH, mainExecutableSlide);
5829 // install gdb notifier
5830 stateToHandlers(dyld_image_state_dependents_mapped, sBatchHandlers)->push_back(notifyGDB);
5831 stateToHandlers(dyld_image_state_mapped, sSingleHandlers)->push_back(updateAllImages);
5832 // make initial allocations large enough that it is unlikely to need to be re-alloced
5833 sImageRoots.reserve(16);
5834 sAddImageCallbacks.reserve(4);
5835 sRemoveImageCallbacks.reserve(4);
5836 sImageFilesNeedingTermination.reserve(16);
5837 sImageFilesNeedingDOFUnregistration.reserve(8);
5838
5839 #if !TARGET_IPHONE_SIMULATOR
5840 #ifdef WAIT_FOR_SYSTEM_ORDER_HANDSHAKE
5841 // <rdar://problem/6849505> Add gating mechanism to dyld support system order file generation process
5842 WAIT_FOR_SYSTEM_ORDER_HANDSHAKE(dyld::gProcessInfo->systemOrderFlag);
5843 #endif
5844 #endif
5845
5846
5847 try {
5848 // add dyld itself to UUID list
5849 addDyldImageToUUIDList();
5850 notifyKernelAboutDyld();
5851
5852 #if SUPPORT_ACCELERATE_TABLES
5853 bool mainExcutableAlreadyRebased = false;
5854
5855 reloadAllImages:
5856 #endif
5857
5858 CRSetCrashLogMessage(sLoadingCrashMessage);
5859 // instantiate ImageLoader for main executable
5860 sMainExecutable = instantiateFromLoadedImage(mainExecutableMH, mainExecutableSlide, sExecPath);
5861 gLinkContext.mainExecutable = sMainExecutable;
5862 gLinkContext.mainExecutableCodeSigned = hasCodeSignatureLoadCommand(mainExecutableMH);
5863
5864 #if TARGET_IPHONE_SIMULATOR
5865 // check main executable is not too new for this OS
5866 {
5867 if ( ! isSimulatorBinary((uint8_t*)mainExecutableMH, sExecPath) ) {
5868 throwf("program was built for a platform that is not supported by this runtime");
5869 }
5870 uint32_t mainMinOS = sMainExecutable->minOSVersion();
5871
5872 // dyld is always built for the current OS, so we can get the current OS version
5873 // from the load command in dyld itself.
5874 uint32_t dyldMinOS = ImageLoaderMachO::minOSVersion((const mach_header*)&__dso_handle);
5875 if ( mainMinOS > dyldMinOS ) {
5876 #if TARGET_OS_WATCH
5877 throwf("app was built for watchOS %d.%d which is newer than this simulator %d.%d",
5878 mainMinOS >> 16, ((mainMinOS >> 8) & 0xFF),
5879 dyldMinOS >> 16, ((dyldMinOS >> 8) & 0xFF));
5880 #elif TARGET_OS_TV
5881 throwf("app was built for tvOS %d.%d which is newer than this simulator %d.%d",
5882 mainMinOS >> 16, ((mainMinOS >> 8) & 0xFF),
5883 dyldMinOS >> 16, ((dyldMinOS >> 8) & 0xFF));
5884 #else
5885 throwf("app was built for iOS %d.%d which is newer than this simulator %d.%d",
5886 mainMinOS >> 16, ((mainMinOS >> 8) & 0xFF),
5887 dyldMinOS >> 16, ((dyldMinOS >> 8) & 0xFF));
5888 #endif
5889 }
5890 }
5891 #endif
5892
5893
5894 #if __MAC_OS_X_VERSION_MIN_REQUIRED
5895 // <rdar://problem/22805519> be less strict about old mach-o binaries
5896 uint32_t mainSDK = sMainExecutable->sdkVersion();
5897 gLinkContext.strictMachORequired = (mainSDK >= DYLD_MACOSX_VERSION_10_12) || gLinkContext.processUsingLibraryValidation;
5898 #else
5899 // simulators, iOS, tvOS, and watchOS are always strict
5900 gLinkContext.strictMachORequired = true;
5901 #endif
5902
5903 // load shared cache
5904 checkSharedRegionDisable();
5905 #if DYLD_SHARED_CACHE_SUPPORT
5906 if ( gLinkContext.sharedRegionMode != ImageLoader::kDontUseSharedRegion ) {
5907 mapSharedCache();
5908 } else {
5909 dyld_kernel_image_info_t kernelCacheInfo;
5910 bzero(&kernelCacheInfo.uuid[0], sizeof(uuid_t));
5911 kernelCacheInfo.load_addr = 0;
5912 kernelCacheInfo.fsobjid.fid_objno = 0;
5913 kernelCacheInfo.fsobjid.fid_generation = 0;
5914 kernelCacheInfo.fsid.val[0] = 0;
5915 kernelCacheInfo.fsid.val[0] = 0;
5916 task_register_dyld_shared_cache_image_info(mach_task_self(), kernelCacheInfo, true, false);
5917 }
5918 #endif
5919
5920 #if SUPPORT_ACCELERATE_TABLES
5921 sAllImages.reserve((sAllCacheImagesProxy != NULL) ? 16 : INITIAL_IMAGE_COUNT);
5922 #else
5923 sAllImages.reserve(INITIAL_IMAGE_COUNT);
5924 #endif
5925
5926 // Now that shared cache is loaded, setup an versioned dylib overrides
5927 #if SUPPORT_VERSIONED_PATHS
5928 checkVersionedPaths();
5929 #endif
5930
5931
5932 // dyld_all_image_infos image list does not contain dyld
5933 // add it as dyldPath field in dyld_all_image_infos
5934 // for simulator, dyld_sim is in image list, need host dyld added
5935 #if TARGET_IPHONE_SIMULATOR
5936 // get path of host dyld from table of syscall vectors in host dyld
5937 void* addressInDyld = gSyscallHelpers;
5938 #else
5939 // get path of dyld itself
5940 void* addressInDyld = (void*)&__dso_handle;
5941 #endif
5942 char dyldPathBuffer[MAXPATHLEN+1];
5943 int len = proc_regionfilename(getpid(), (uint64_t)(long)addressInDyld, dyldPathBuffer, MAXPATHLEN);
5944 if ( len > 0 ) {
5945 dyldPathBuffer[len] = '\0'; // proc_regionfilename() does not zero terminate returned string
5946 if ( strcmp(dyldPathBuffer, gProcessInfo->dyldPath) != 0 )
5947 gProcessInfo->dyldPath = strdup(dyldPathBuffer);
5948 }
5949
5950 // load any inserted libraries
5951 if ( sEnv.DYLD_INSERT_LIBRARIES != NULL ) {
5952 for (const char* const* lib = sEnv.DYLD_INSERT_LIBRARIES; *lib != NULL; ++lib)
5953 loadInsertedDylib(*lib);
5954 }
5955 // record count of inserted libraries so that a flat search will look at
5956 // inserted libraries, then main, then others.
5957 sInsertedDylibCount = sAllImages.size()-1;
5958
5959 // link main executable
5960 gLinkContext.linkingMainExecutable = true;
5961 #if SUPPORT_ACCELERATE_TABLES
5962 if ( mainExcutableAlreadyRebased ) {
5963 // previous link() on main executable has already adjusted its internal pointers for ASLR
5964 // work around that by rebasing by inverse amount
5965 sMainExecutable->rebase(gLinkContext, -mainExecutableSlide);
5966 }
5967 #endif
5968 link(sMainExecutable, sEnv.DYLD_BIND_AT_LAUNCH, true, ImageLoader::RPathChain(NULL, NULL), -1);
5969 sMainExecutable->setNeverUnloadRecursive();
5970 if ( sMainExecutable->forceFlat() ) {
5971 gLinkContext.bindFlat = true;
5972 gLinkContext.prebindUsage = ImageLoader::kUseNoPrebinding;
5973 }
5974
5975 // link any inserted libraries
5976 // do this after linking main executable so that any dylibs pulled in by inserted
5977 // dylibs (e.g. libSystem) will not be in front of dylibs the program uses
5978 if ( sInsertedDylibCount > 0 ) {
5979 for(unsigned int i=0; i < sInsertedDylibCount; ++i) {
5980 ImageLoader* image = sAllImages[i+1];
5981 link(image, sEnv.DYLD_BIND_AT_LAUNCH, true, ImageLoader::RPathChain(NULL, NULL), -1);
5982 image->setNeverUnloadRecursive();
5983 }
5984 // only INSERTED libraries can interpose
5985 // register interposing info after all inserted libraries are bound so chaining works
5986 for(unsigned int i=0; i < sInsertedDylibCount; ++i) {
5987 ImageLoader* image = sAllImages[i+1];
5988 image->registerInterposing();
5989 }
5990 }
5991
5992 // <rdar://problem/19315404> dyld should support interposition even without DYLD_INSERT_LIBRARIES
5993 for (long i=sInsertedDylibCount+1; i < sAllImages.size(); ++i) {
5994 ImageLoader* image = sAllImages[i];
5995 if ( image->inSharedCache() )
5996 continue;
5997 image->registerInterposing();
5998 }
5999 #if SUPPORT_ACCELERATE_TABLES
6000 if ( (sAllCacheImagesProxy != NULL) && ImageLoader::haveInterposingTuples() ) {
6001 // Accelerator tables cannot be used with implicit interposing, so relaunch with accelerator tables disabled
6002 ImageLoader::clearInterposingTuples();
6003 // unmap all loaded dylibs (but not main executable)
6004 for (long i=1; i < sAllImages.size(); ++i) {
6005 ImageLoader* image = sAllImages[i];
6006 if ( image == sMainExecutable )
6007 continue;
6008 if ( image == sAllCacheImagesProxy )
6009 continue;
6010 image->setCanUnload();
6011 ImageLoader::deleteImage(image);
6012 }
6013 // note: we don't need to worry about inserted images because if DYLD_INSERT_LIBRARIES was set we would not be using the accelerator table
6014 sAllImages.clear();
6015 sImageRoots.clear();
6016 sImageFilesNeedingTermination.clear();
6017 sImageFilesNeedingDOFUnregistration.clear();
6018 sAddImageCallbacks.clear();
6019 sRemoveImageCallbacks.clear();
6020 sDisableAcceleratorTables = true;
6021 sAllCacheImagesProxy = NULL;
6022 sMappedRangesStart = NULL;
6023 mainExcutableAlreadyRebased = true;
6024 gLinkContext.linkingMainExecutable = false;
6025 resetAllImages();
6026 goto reloadAllImages;
6027 }
6028 #endif
6029
6030 // apply interposing to initial set of images
6031 for(int i=0; i < sImageRoots.size(); ++i) {
6032 sImageRoots[i]->applyInterposing(gLinkContext);
6033 }
6034 gLinkContext.linkingMainExecutable = false;
6035
6036 // <rdar://problem/12186933> do weak binding only after all inserted images linked
6037 sMainExecutable->weakBind(gLinkContext);
6038
6039 #if DYLD_SHARED_CACHE_SUPPORT
6040 // If cache has branch island dylibs, tell debugger about them
6041 if ( (sSharedCache != NULL) && (sSharedCache->mappingOffset >= 0x78) && (sSharedCache->branchPoolsOffset != 0) ) {
6042 uint32_t count = sSharedCache->branchPoolsCount;
6043 dyld_image_info info[count];
6044 const uint64_t* poolAddress = (uint64_t*)((char*)sSharedCache + sSharedCache->branchPoolsOffset);
6045 // <rdar://problem/20799203> empty branch pools can be in development cache
6046 if ( ((mach_header*)poolAddress)->magic == sMainExecutableMachHeader->magic ) {
6047 for (int poolIndex=0; poolIndex < count; ++poolIndex) {
6048 uint64_t poolAddr = poolAddress[poolIndex] + sSharedCacheSlide;
6049 info[poolIndex].imageLoadAddress = (mach_header*)(long)poolAddr;
6050 info[poolIndex].imageFilePath = "dyld_shared_cache_branch_islands";
6051 info[poolIndex].imageFileModDate = 0;
6052 }
6053 // add to all_images list
6054 addImagesToAllImages(count, info);
6055 // tell gdb about new branch island images
6056 gProcessInfo->notification(dyld_image_adding, count, info);
6057 }
6058 }
6059 #endif
6060
6061 CRSetCrashLogMessage("dyld: launch, running initializers");
6062 #if SUPPORT_OLD_CRT_INITIALIZATION
6063 // Old way is to run initializers via a callback from crt1.o
6064 if ( ! gRunInitializersOldWay )
6065 initializeMainExecutable();
6066 #else
6067 // run all initializers
6068 initializeMainExecutable();
6069 #endif
6070
6071 // notify any montoring proccesses that this process is about to enter main()
6072 notifyMonitoringDyldMain();
6073
6074 // find entry point for main executable
6075 result = (uintptr_t)sMainExecutable->getThreadPC();
6076 if ( result != 0 ) {
6077 // main executable uses LC_MAIN, needs to return to glue in libdyld.dylib
6078 if ( (gLibSystemHelpers != NULL) && (gLibSystemHelpers->version >= 9) )
6079 *startGlue = (uintptr_t)gLibSystemHelpers->startGlueToCallExit;
6080 else
6081 halt("libdyld.dylib support not present for LC_MAIN");
6082 }
6083 else {
6084 // main executable uses LC_UNIXTHREAD, dyld needs to let "start" in program set up for main()
6085 result = (uintptr_t)sMainExecutable->getMain();
6086 *startGlue = 0;
6087 }
6088 }
6089 catch(const char* message) {
6090 syncAllImages();
6091 halt(message);
6092 }
6093 catch(...) {
6094 dyld::log("dyld: launch failed\n");
6095 }
6096
6097 CRSetCrashLogMessage(NULL);
6098
6099 return result;
6100 }
6101
6102
6103
6104 } // namespace
6105
6106
6107