dyld-832.7.1.tar.gz
[apple/dyld.git] / src / dyld_process_info.cpp
1 /* -*- mode: C++; c-basic-offset: 4; tab-width: 4 -*-
2 *
3 * Copyright (c) 2016 Apple Inc. All rights reserved.
4 *
5 * @APPLE_LICENSE_HEADER_START@
6 *
7 * This file contains Original Code and/or Modifications of Original Code
8 * as defined in and that are subject to the Apple Public Source License
9 * Version 2.0 (the 'License'). You may not use this file except in
10 * compliance with the License. Please obtain a copy of the License at
11 * http://www.opensource.apple.com/apsl/ and read it before using this
12 * file.
13 *
14 * The Original Code and all software distributed under the License are
15 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
16 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
17 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
19 * Please see the License for the specific language governing rights and
20 * limitations under the License.
21 *
22 * @APPLE_LICENSE_HEADER_END@
23 */
24
25 #include <stdlib.h>
26 #include <string.h>
27 #include <limits.h>
28 #include <stdio.h>
29 #include <libproc.h>
30 #include <sys/param.h>
31 #include <mach/shared_region.h>
32 #include <mach/mach_vm.h>
33 #include <mach/vm_region.h>
34 #include <libkern/OSAtomic.h>
35 #include <mach-o/dyld_process_info.h>
36 #include <mach-o/dyld_images.h>
37
38 #include "MachOFile.h"
39 #include "dyld_process_info_internal.h"
40 #include "Tracing.h"
41
42 // this was in dyld_priv.h but it is no longer exported
43 extern "C" {
44 const struct dyld_all_image_infos* _dyld_get_all_image_infos();
45 }
46
47 RemoteBuffer& RemoteBuffer::operator=(RemoteBuffer&& other) {
48 std::swap(_localAddress, other._localAddress);
49 std::swap(_size, other._size);
50 std::swap(_kr, other._kr);
51 std::swap(_shared, other._shared);
52 return *this;
53 }
54
55 RemoteBuffer::RemoteBuffer() : _localAddress(0), _size(0), _kr(KERN_SUCCESS), _shared(false) {}
56 RemoteBuffer::RemoteBuffer(std::tuple<mach_vm_address_t,vm_size_t,kern_return_t,bool> T)
57 : _localAddress(std::get<0>(T)), _size(std::get<1>(T)), _kr(std::get<2>(T)), _shared(std::get<3>(T)) {}
58
59 RemoteBuffer::RemoteBuffer(task_t task, mach_vm_address_t remote_address, size_t remote_size, bool shared, bool allow_truncation)
60 : RemoteBuffer(RemoteBuffer::create(task, remote_address, remote_size, shared, allow_truncation)) {};
61
62 std::pair<mach_vm_address_t, kern_return_t>
63 RemoteBuffer::map(task_t task, mach_vm_address_t remote_address, vm_size_t size, bool shared) {
64 vm_prot_t cur_protection = VM_PROT_NONE;
65 vm_prot_t max_protection = VM_PROT_NONE;
66 int flags;
67 if (size == 0) {
68 return std::make_pair(MACH_VM_MIN_ADDRESS, KERN_INVALID_ARGUMENT);
69 }
70 if (shared) {
71 flags = VM_FLAGS_ANYWHERE | VM_FLAGS_RETURN_DATA_ADDR;
72 } else {
73 // <rdar://55343677>
74 // Since we are getting rid of the flag probing we have to make sure that simulator libdyld's do not use VM_FLAGS_RESILIENT_MEDIA
75 // FIXME: Remove this when simulator builds do not support back deployment to 10.14
76 #if TARGET_OS_SIMULATOR
77 flags = VM_FLAGS_ANYWHERE | VM_FLAGS_RETURN_DATA_ADDR | VM_FLAGS_RESILIENT_CODESIGN;
78 #else
79 flags = VM_FLAGS_ANYWHERE | VM_FLAGS_RETURN_DATA_ADDR | VM_FLAGS_RESILIENT_CODESIGN | VM_FLAGS_RESILIENT_MEDIA;
80 #endif
81 }
82 mach_vm_address_t localAddress = 0;
83 auto kr = mach_vm_remap(mach_task_self(),
84 &localAddress,
85 size,
86 0, // mask
87 flags,
88 task,
89 remote_address,
90 !shared,
91 &cur_protection,
92 &max_protection,
93 VM_INHERIT_NONE);
94 // The call is not succesfull return
95 if (kr != KERN_SUCCESS) {
96 return std::make_pair(MACH_VM_MIN_ADDRESS, kr);
97 }
98 // If it is not a shared buffer then copy it into a local buffer so our results are coherent in the event
99 // the page goes way due to storage removal, etc. We have to do this because even after we read the page the
100 // contents might go away of the object is paged out and then the backing region is disconnected (for example, if
101 // we are copying some memory in the middle of a mach-o that is on a USB drive that is disconnected after we perform
102 // the mapping). Once we copy them into a local buffer the memory will be handled by the default pager instead of
103 // potentially being backed by the mmap pager, and thus will be guaranteed not to mutate out from under us.
104 if (!shared) {
105 void* buffer = malloc(size);
106 if (buffer == nullptr) {
107 (void)vm_deallocate(mach_task_self(), (vm_address_t)localAddress, size);
108 return std::make_pair(MACH_VM_MIN_ADDRESS, KERN_NO_SPACE);
109 }
110 memcpy(buffer, (void *)localAddress, size);
111 (void)vm_deallocate(mach_task_self(), (vm_address_t)localAddress, size);
112 return std::make_pair((vm_address_t)buffer, KERN_SUCCESS);
113 }
114 // A shared buffer was requested, if the permissions are not correct deallocate the region and return failure
115 if (cur_protection != (VM_PROT_READ|VM_PROT_WRITE)) {
116 if (localAddress != 0) {
117 (void)vm_deallocate(mach_task_self(), (size_t)localAddress, size);
118 }
119 return std::make_pair(MACH_VM_MIN_ADDRESS, KERN_PROTECTION_FAILURE);
120 }
121 // We have a successfully created shared buffer with the correct permissions, return it
122 return std::make_pair(localAddress, KERN_SUCCESS);
123 }
124
125 std::tuple<mach_vm_address_t,vm_size_t,kern_return_t,bool> RemoteBuffer::create(task_t task,
126 mach_vm_address_t remote_address,
127 size_t size,
128 bool shared,
129 bool allow_truncation) {
130 mach_vm_address_t localAddress;
131 kern_return_t kr;
132 // Try the initial map
133 std::tie(localAddress, kr) = map(task, remote_address, size, shared);
134 if (kr == KERN_SUCCESS) return std::make_tuple(localAddress, size, kr, shared);
135 // The first attempt failed, truncate if possible and try again. We only need to try once since the largest
136 // truncatable buffer we map is less than a single page. To be more general we would need to try repeatedly in a
137 // loop.
138 if (allow_truncation) {
139 size = PAGE_SIZE - remote_address%PAGE_SIZE;
140 std::tie(localAddress, kr) = map(task, remote_address, size, shared);
141 if (kr == KERN_SUCCESS) return std::make_tuple(localAddress, size, kr, shared);
142 }
143 // If we reach this then the mapping completely failed
144 return std::make_tuple(MACH_VM_MIN_ADDRESS, 0, kr, shared);
145 }
146
147 RemoteBuffer::~RemoteBuffer() {
148 if (!_localAddress) { return; }
149
150 if (_shared) {
151 (void)vm_deallocate(mach_task_self(), (vm_address_t)_localAddress, _size);
152 } else {
153 free((void*)_localAddress);
154 }
155 }
156 void *RemoteBuffer::getLocalAddress() const { return (void *)_localAddress; }
157 size_t RemoteBuffer::getSize() const { return _size; }
158 kern_return_t RemoteBuffer::getKernelReturn() const { return _kr; }
159
160 void withRemoteBuffer(task_t task, mach_vm_address_t remote_address, size_t remote_size, bool shared, bool allow_truncation, kern_return_t *kr, void (^block)(void *buffer, size_t size)) {
161 kern_return_t krSink = KERN_SUCCESS;
162 if (kr == nullptr) {
163 kr = &krSink;
164 }
165 RemoteBuffer buffer(task, remote_address, remote_size, shared, allow_truncation);
166 *kr = buffer.getKernelReturn();
167 if (*kr == KERN_SUCCESS) {
168 block(buffer.getLocalAddress(), buffer.getSize());
169 }
170 }
171
172
173 //
174 // Opaque object returned by _dyld_process_info_create()
175 //
176
177 struct __attribute__((visibility("hidden"))) dyld_process_info_deleter { // deleter
178 // dyld_process_info_deleter() {};
179 // dyld_process_info_deleter(const dyld_process_info_deleter&) { }
180 // dyld_process_info_deleter(dyld_process_info_deleter&) {}
181 // dyld_process_info_deleter(dyld_process_info_deleter&&) {}
182 void operator()(dyld_process_info_base* p) const {
183 if (p) {
184 free(p);
185 }
186 };
187 };
188
189 static dyld_process_info_deleter deleter;
190 typedef std::unique_ptr<dyld_process_info_base, dyld_process_info_deleter> dyld_process_info_ptr;
191
192 struct __attribute__((visibility("hidden"))) dyld_process_info_base {
193 template<typename T1, typename T2>
194 static dyld_process_info_ptr make(task_t task, const T1& allImageInfo, uint64_t timestamp, kern_return_t* kr);
195 template<typename T>
196 static dyld_process_info_ptr makeSuspended(task_t task, const T& allImageInfo, kern_return_t* kr);
197
198 std::atomic<uint32_t>& retainCount() const { return _retainCount; }
199 dyld_process_cache_info* cacheInfo() const { return (dyld_process_cache_info*)(((char*)this) + _cacheInfoOffset); }
200 dyld_process_aot_cache_info* aotCacheInfo() const { return (dyld_process_aot_cache_info*)(((char*)this) + _aotCacheInfoOffset); }
201 dyld_process_state_info* stateInfo() const { return (dyld_process_state_info*)(((char*)this) + _stateInfoOffset); }
202 dyld_platform_t platform() const { return _platform; }
203
204 void forEachImage(void (^callback)(uint64_t machHeaderAddress, const uuid_t uuid, const char* path)) const;
205 void forEachAotImage(bool (^callback)(uint64_t x86Address, uint64_t aotAddress, uint64_t aotSize, uint8_t* aotImageKey, size_t aotImageKeySize)) const;
206 void forEachSegment(uint64_t machHeaderAddress, void (^callback)(uint64_t segmentAddress, uint64_t segmentSize, const char* segmentName)) const;
207
208 bool reserveSpace(size_t space) {
209 if (_freeSpace < space) { return false; }
210 _freeSpace -= space;
211 return true;
212 }
213
214 void retain()
215 {
216 _retainCount++;
217 }
218
219 void release()
220 {
221 uint32_t newCount = --_retainCount;
222
223 if ( newCount == 0 ) {
224 free(this);
225 }
226 }
227
228 private:
229 struct ImageInfo {
230 uuid_t uuid;
231 uint64_t loadAddress;
232 const char* path;
233 uint32_t segmentStartIndex;
234 uint32_t segmentsCount;
235 };
236
237 struct SegmentInfo {
238 const char* name;
239 uint64_t addr;
240 uint64_t size;
241 };
242
243 dyld_process_info_base(dyld_platform_t platform, unsigned imageCount, unsigned aotImageCount, size_t totalSize);
244 void* operator new (size_t, void* buf) { return buf; }
245
246 static bool inCache(uint64_t addr) { return (addr > SHARED_REGION_BASE) && (addr < SHARED_REGION_BASE+SHARED_REGION_SIZE); }
247 bool addImage(task_t task, bool sameCacheAsThisProcess, uint64_t imageAddress, uint64_t imagePath, const char* imagePathLocal);
248
249 bool addAotImage(dyld_aot_image_info_64 aotImageInfo);
250
251 kern_return_t addDyldImage(task_t task, uint64_t dyldAddress, uint64_t dyldPathAddress, const char* localPath);
252
253 bool invalid() { return ((char*)_stringRevBumpPtr < (char*)_curSegment); }
254 const char* copyPath(task_t task, uint64_t pathAddr);
255 const char* addString(const char*, size_t);
256 const char* copySegmentName(const char*);
257
258 void addInfoFromLoadCommands(const mach_header* mh, uint64_t addressInTask, size_t size);
259 kern_return_t addInfoFromRemoteLoadCommands(task_t task, uint64_t remoteMH);
260
261 void inspectLocalImageLoadCommands(uint64_t imageAddress, void* func);
262 kern_return_t inspectRemoteImageLoadCommands(task_t task, uint64_t imageAddress, void* func);
263
264 mutable std::atomic<uint32_t> _retainCount;
265 const uint32_t _cacheInfoOffset;
266 const uint32_t _aotCacheInfoOffset;
267 const uint32_t _stateInfoOffset;
268 const uint32_t _imageInfosOffset;
269 const uint32_t _aotImageInfosOffset;
270 const uint32_t _segmentInfosOffset;
271 size_t _freeSpace;
272 dyld_platform_t _platform;
273 ImageInfo* const _firstImage;
274 ImageInfo* _curImage;
275 dyld_aot_image_info_64* const _firstAotImage;
276 dyld_aot_image_info_64* _curAotImage;
277 SegmentInfo* const _firstSegment;
278 SegmentInfo* _curSegment;
279 uint32_t _curSegmentIndex;
280 char* _stringRevBumpPtr;
281
282 // dyld_process_cache_info cacheInfo;
283 // dyld_process_state_info stateInfo;
284 // ImageInfo images[];
285 // SegmentInfo segments[];
286 // char stringPool[]
287 };
288
289 dyld_process_info_base::dyld_process_info_base(dyld_platform_t platform, unsigned imageCount, unsigned aotImageCount, size_t totalSize)
290 : _retainCount(1), _cacheInfoOffset(sizeof(dyld_process_info_base)),
291 _aotCacheInfoOffset(sizeof(dyld_process_info_base) + sizeof(dyld_process_cache_info)),
292 _stateInfoOffset(sizeof(dyld_process_info_base) + sizeof(dyld_process_cache_info) + sizeof(dyld_process_aot_cache_info)),
293 _imageInfosOffset(sizeof(dyld_process_info_base) + sizeof(dyld_process_cache_info) + sizeof(dyld_process_aot_cache_info) + sizeof(dyld_process_state_info)),
294 _aotImageInfosOffset(sizeof(dyld_process_info_base) + sizeof(dyld_process_cache_info) + sizeof(dyld_process_aot_cache_info) + sizeof(dyld_process_state_info) + imageCount*sizeof(ImageInfo)),
295 _segmentInfosOffset(sizeof(dyld_process_info_base) + sizeof(dyld_process_cache_info) + sizeof(dyld_process_aot_cache_info) + sizeof(dyld_process_state_info) + imageCount*sizeof(ImageInfo) + aotImageCount*sizeof(dyld_aot_image_info_64)),
296 _freeSpace(totalSize), _platform(platform),
297 _firstImage((ImageInfo*)(((uint8_t*)this) + _imageInfosOffset)),
298 _curImage((ImageInfo*)(((uint8_t*)this) + _imageInfosOffset)),
299 _firstAotImage((dyld_aot_image_info_64*)(((uint8_t*)this) + _aotImageInfosOffset)),
300 _curAotImage((dyld_aot_image_info_64*)(((uint8_t*)this) + _aotImageInfosOffset)),
301 _firstSegment((SegmentInfo*)(((uint8_t*)this) + _segmentInfosOffset)),
302 _curSegment((SegmentInfo*)(((uint8_t*)this) + _segmentInfosOffset)),
303 _curSegmentIndex(0),
304 _stringRevBumpPtr((char*)(this)+totalSize)
305 {
306 }
307
308 template<typename T1, typename T2>
309 dyld_process_info_ptr dyld_process_info_base::make(task_t task, const T1& allImageInfo, uint64_t timestamp, kern_return_t* kr)
310 {
311 __block dyld_process_info_ptr result = nullptr;
312
313 // bail out of dyld is too old
314 if ( allImageInfo.version < 15 ) {
315 *kr = KERN_FAILURE;
316 return nullptr;
317 }
318
319 // Check if the process is suspended
320 if (allImageInfo.infoArrayChangeTimestamp == 0) {
321 result = dyld_process_info_base::makeSuspended<T1>(task, allImageInfo, kr);
322 // If we have a result return it, otherwise rescan
323 if (result) {
324 // If it returned the process is suspended and there is nothing more to do
325 return std::move(result);
326 } else {
327 // Check to see if the process change timestamp is greater than 0, if not then sleep to let the process
328 // finish initializing
329 if (allImageInfo.infoArrayChangeTimestamp == 0) {
330 usleep(1000 * 50); // 50ms
331 }
332 }
333 }
334
335 // Test to see if there are no changes and we can exit early
336 if (timestamp != 0 && timestamp == allImageInfo.infoArrayChangeTimestamp) {
337 *kr = KERN_SUCCESS;
338 return nullptr;
339 }
340
341 for (uint32_t j=0; j < 10; ++j) {
342 uint64_t currentTimestamp = allImageInfo.infoArrayChangeTimestamp;
343 mach_vm_address_t infoArray = allImageInfo.infoArray;
344 if (currentTimestamp == 0) continue;
345 if (infoArray == 0) {
346 // Check if the task is suspended mid dylib load and exit early
347 mach_task_basic_info ti;
348 mach_msg_type_number_t count = MACH_TASK_BASIC_INFO_COUNT;
349 if ((*kr = task_info(task, MACH_TASK_BASIC_INFO, (task_info_t)&ti, &count))) {
350 continue;
351 }
352
353 // The task is suspended, exit
354 if (ti.suspend_count != 0) {
355 // Not exactly correct, but conveys that operation may succeed in the future
356 *kr = KERN_RESOURCE_SHORTAGE;
357 return nullptr;
358 }
359 continue;
360 };
361
362 // For the moment we are going to truncate any image list longer than 8192 because some programs do
363 // terrible things that corrupt their own image lists and we need to stop clients from crashing
364 // reading them. We can try to do something more advanced in the future. rdar://27446361
365 uint32_t imageCount = allImageInfo.infoArrayCount;
366 imageCount = MIN(imageCount, 8192);
367 size_t imageArraySize = imageCount * sizeof(T2);
368
369 withRemoteBuffer(task, infoArray, imageArraySize, false, false, kr, ^(void *buffer, size_t size) {
370 // figure out how many path strings will need to be copied and their size
371 T2* imageArray = (T2 *)buffer;
372 const dyld_all_image_infos* myInfo = _dyld_get_all_image_infos();
373 bool sameCacheAsThisProcess = !allImageInfo.processDetachedFromSharedRegion
374 && !myInfo->processDetachedFromSharedRegion
375 && ((memcmp(myInfo->sharedCacheUUID, &allImageInfo.sharedCacheUUID[0], 16) == 0)
376 && (myInfo->sharedCacheSlide == allImageInfo.sharedCacheSlide));
377 unsigned countOfPathsNeedingCopying = 0;
378 if ( sameCacheAsThisProcess ) {
379 for (uint32_t i=0; i < imageCount; ++i) {
380 if ( !inCache(imageArray[i].imageFilePath) )
381 ++countOfPathsNeedingCopying;
382 }
383 }
384 else {
385 countOfPathsNeedingCopying = imageCount+1;
386 }
387 unsigned imageCountWithDyld = imageCount+1;
388
389 // allocate result object
390 size_t allocationSize = sizeof(dyld_process_info_base)
391 + sizeof(dyld_process_cache_info)
392 + sizeof(dyld_process_aot_cache_info)
393 + sizeof(dyld_process_state_info)
394 + sizeof(ImageInfo)*(imageCountWithDyld)
395 + sizeof(dyld_aot_image_info_64)*(allImageInfo.aotInfoCount) // add the size necessary for aot info to this buffer
396 + sizeof(SegmentInfo)*imageCountWithDyld*10
397 + countOfPathsNeedingCopying*PATH_MAX;
398 void* storage = malloc(allocationSize);
399 if (storage == nullptr) {
400 *kr = KERN_NO_SPACE;
401 result = nullptr;
402 return;
403 }
404 auto info = dyld_process_info_ptr(new (storage) dyld_process_info_base(allImageInfo.platform, imageCountWithDyld, allImageInfo.aotInfoCount, allocationSize), deleter);
405 (void)info->reserveSpace(sizeof(dyld_process_info_base)+sizeof(dyld_process_cache_info)+sizeof(dyld_process_state_info)+sizeof(dyld_process_aot_cache_info));
406 (void)info->reserveSpace(sizeof(ImageInfo)*imageCountWithDyld);
407
408 // fill in base info
409 dyld_process_cache_info* cacheInfo = info->cacheInfo();
410 memcpy(cacheInfo->cacheUUID, &allImageInfo.sharedCacheUUID[0], 16);
411 cacheInfo->cacheBaseAddress = allImageInfo.sharedCacheBaseAddress;
412 cacheInfo->privateCache = allImageInfo.processDetachedFromSharedRegion;
413 // if no cache is used, allImageInfo has all zeros for cache UUID
414 cacheInfo->noCache = true;
415 for (int i=0; i < 16; ++i) {
416 if ( cacheInfo->cacheUUID[i] != 0 ) {
417 cacheInfo->noCache = false;
418 }
419 }
420
421 // fill in aot shared cache info
422 dyld_process_aot_cache_info* aotCacheInfo = info->aotCacheInfo();
423 memcpy(aotCacheInfo->cacheUUID, &allImageInfo.aotSharedCacheUUID[0], 16);
424 aotCacheInfo->cacheBaseAddress = allImageInfo.aotSharedCacheBaseAddress;
425
426 dyld_process_state_info* stateInfo = info->stateInfo();
427 stateInfo->timestamp = currentTimestamp;
428 stateInfo->imageCount = imageCountWithDyld;
429 stateInfo->initialImageCount = (uint32_t)(allImageInfo.initialImageCount+1);
430 stateInfo->dyldState = dyld_process_state_dyld_initialized;
431
432 if ( allImageInfo.libSystemInitialized != 0 ) {
433 stateInfo->dyldState = dyld_process_state_libSystem_initialized;
434 if ( allImageInfo.initialImageCount != imageCount ) {
435 stateInfo->dyldState = dyld_process_state_program_running;
436 }
437 }
438 if ( allImageInfo.errorMessage != 0 ) {
439 stateInfo->dyldState = allImageInfo.terminationFlags ? dyld_process_state_terminated_before_inits : dyld_process_state_dyld_terminated;
440 }
441 // fill in info for dyld
442 if ( allImageInfo.dyldPath != 0 ) {
443 if ((*kr = info->addDyldImage(task, allImageInfo.dyldImageLoadAddress, allImageInfo.dyldPath, NULL))) {
444 result = nullptr;
445 return;
446 }
447 }
448 // fill in info for each image
449 for (uint32_t i=0; i < imageCount; ++i) {
450 if (!info->addImage(task, sameCacheAsThisProcess, imageArray[i].imageLoadAddress, imageArray[i].imageFilePath, NULL)) {
451 result = nullptr;
452 return;
453 }
454 }
455 // sanity check internal data did not overflow
456 if ( info->invalid() ) {
457 *kr = KERN_FAILURE;
458 result = nullptr;
459 return;
460 }
461
462 result = std::move(info);
463 });
464
465 mach_vm_address_t aotImageArray = allImageInfo.aotInfoArray;
466 // shortcircuit this code path if aotImageArray == 0 (32 vs 64 bit struct difference)
467 // and if result == nullptr, since we need to append aot image infos to the process info struct
468 if (aotImageArray != 0 && result != nullptr) {
469 uint32_t aotImageCount = allImageInfo.aotInfoCount;
470 size_t aotImageArraySize = aotImageCount * sizeof(dyld_aot_image_info_64);
471
472 withRemoteBuffer(task, aotImageArray, aotImageArraySize, false, false, kr, ^(void *buffer, size_t size) {
473 dyld_aot_image_info_64* imageArray = (dyld_aot_image_info_64*)buffer;
474 for (uint32_t i = 0; i < aotImageCount; i++) {
475 if (!result->addAotImage(imageArray[i])) {
476 result = nullptr;
477 return;
478 }
479 }
480 });
481 }
482
483 if (result) break;
484 }
485
486 return std::move(result);
487 }
488
489 template<typename T>
490 dyld_process_info_ptr dyld_process_info_base::makeSuspended(task_t task, const T& allImageInfo, kern_return_t* kr)
491 {
492 pid_t pid;
493 if ((*kr = pid_for_task(task, &pid))) {
494 return NULL;
495 }
496
497 mach_task_basic_info ti;
498 mach_msg_type_number_t count = MACH_TASK_BASIC_INFO_COUNT;
499 if ((*kr = task_info(task, MACH_TASK_BASIC_INFO, (task_info_t)&ti, &count))) {
500 return nullptr;
501 }
502
503 // The task is not suspended, exit
504 if (ti.suspend_count == 0) {
505 return nullptr;
506 }
507
508 __block unsigned imageCount = 0; // main executable and dyld
509 __block uint64_t mainExecutableAddress = 0;
510 __block uint64_t dyldAddress = 0;
511 char dyldPathBuffer[PATH_MAX+1];
512 char mainExecutablePathBuffer[PATH_MAX+1];
513 __block char * dyldPath = &dyldPathBuffer[0];
514 __block char * mainExecutablePath = &mainExecutablePathBuffer[0];
515 __block dyld3::Platform platformID = dyld3::Platform::unknown;
516 mach_vm_size_t size;
517 for (mach_vm_address_t address = 0; ; address += size) {
518 vm_region_basic_info_data_64_t info;
519 mach_port_t objectName;
520 unsigned int infoCount = VM_REGION_BASIC_INFO_COUNT_64;
521 if (kern_return_t r = mach_vm_region(task, &address, &size, VM_REGION_BASIC_INFO,
522 (vm_region_info_t)&info, &infoCount, &objectName)) {
523 break;
524 }
525 if ( info.protection != (VM_PROT_READ|VM_PROT_EXECUTE) )
526 continue;
527 // read start of vm region to verify it is a mach header
528 withRemoteObject(task, address, false, NULL, ^(mach_header_64 mhBuffer){
529 if ( (mhBuffer.magic != MH_MAGIC) && (mhBuffer.magic != MH_MAGIC_64) )
530 return;
531 // now know the region is the start of a mach-o file
532 if ( mhBuffer.filetype == MH_EXECUTE ) {
533 mainExecutableAddress = address;
534 int len = proc_regionfilename(pid, mainExecutableAddress, mainExecutablePath, PATH_MAX);
535 if ( len != 0 ) {
536 mainExecutablePath[len] = '\0';
537 }
538 ++imageCount;
539 }
540 else if ( mhBuffer.filetype == MH_DYLINKER ) {
541 dyldAddress = address;
542 int len = proc_regionfilename(pid, dyldAddress, dyldPath, PATH_MAX);
543 if ( len != 0 ) {
544 dyldPath[len] = '\0';
545 }
546 ++imageCount;
547 }
548 });
549 //fprintf(stderr, "vm region: addr=0x%llX, size=0x%llX, prot=0x%X\n", (uint64_t)address, (uint64_t)size, info.protection);
550 }
551 //fprintf(stderr, "dyld: addr=0x%llX, path=%s\n", dyldAddress, dyldPathBuffer);
552 //fprintf(stderr, "app: addr=0x%llX, path=%s\n", mainExecutableAddress, mainExecutablePathBuffer);
553
554 // explicitly set aot image count to 0 in the suspended case
555 unsigned aotImageCount = 0;
556
557 // allocate result object
558 size_t allocationSize = sizeof(dyld_process_info_base)
559 + sizeof(dyld_process_cache_info)
560 + sizeof(dyld_process_aot_cache_info)
561 + sizeof(dyld_process_state_info)
562 + sizeof(ImageInfo)*(imageCount)
563 + sizeof(dyld_aot_image_info_64)*aotImageCount // this should always be 0, but including it here to be explicit
564 + sizeof(SegmentInfo)*imageCount*10
565 + imageCount*PATH_MAX;
566 void* storage = malloc(allocationSize);
567 if (storage == nullptr) {
568 *kr = KERN_NO_SPACE;
569 return nullptr;
570 }
571 auto obj = dyld_process_info_ptr(new (storage) dyld_process_info_base((dyld_platform_t)platformID, imageCount, aotImageCount, allocationSize), deleter);
572 (void)obj->reserveSpace(sizeof(dyld_process_info_base)+sizeof(dyld_process_cache_info)+sizeof(dyld_process_aot_cache_info)+sizeof(dyld_process_state_info));
573 // fill in base info
574 dyld_process_cache_info* cacheInfo = obj->cacheInfo();
575 bzero(cacheInfo->cacheUUID, 16);
576 cacheInfo->cacheBaseAddress = 0;
577 cacheInfo->noCache = true;
578 cacheInfo->privateCache = false;
579
580 // zero out aot cache info
581 dyld_process_aot_cache_info* aotCacheInfo = obj->aotCacheInfo();
582 bzero(aotCacheInfo->cacheUUID, 16);
583 aotCacheInfo->cacheBaseAddress = 0;
584
585 dyld_process_state_info* stateInfo = obj->stateInfo();
586 stateInfo->timestamp = 0;
587 stateInfo->imageCount = imageCount;
588 stateInfo->initialImageCount = imageCount;
589 stateInfo->dyldState = dyld_process_state_not_started;
590
591 // fill in info for dyld
592 if ( dyldAddress != 0 ) {
593 if ((*kr = obj->addDyldImage(task, dyldAddress, 0, dyldPath))) {
594 return nullptr;
595 }
596 }
597
598 // fill in info for each image
599 if ( mainExecutableAddress != 0 ) {
600 if (!obj->addImage(task, false, mainExecutableAddress, 0, mainExecutablePath)) {
601 return nullptr;
602 }
603 }
604
605 if (allImageInfo.infoArrayChangeTimestamp != 0) {
606 return nullptr;
607 }
608
609 count = MACH_TASK_BASIC_INFO_COUNT;
610 if ((*kr = task_info(task, MACH_TASK_BASIC_INFO, (task_info_t)&ti, &count))) {
611 return nullptr;
612 }
613
614 // The task is not suspended, exit
615 if (ti.suspend_count == 0) {
616 return nullptr;
617 }
618
619 return obj;
620 }
621
622
623
624 const char* dyld_process_info_base::addString(const char* str, size_t maxlen)
625 {
626 size_t len = strnlen(str, maxlen) + 1;
627 // If we don't have enough space return an empty string
628 if (!reserveSpace(len)) { return ""; }
629 _stringRevBumpPtr -= len;
630 strlcpy(_stringRevBumpPtr, str, len);
631 return _stringRevBumpPtr;
632 }
633
634 const char* dyld_process_info_base::copyPath(task_t task, uint64_t stringAddressInTask)
635 {
636 __block const char* retval = "";
637 withRemoteBuffer(task, stringAddressInTask, PATH_MAX, false, true, nullptr, ^(void *buffer, size_t size) {
638 retval = addString(static_cast<const char *>(buffer), size);
639 });
640 return retval;
641 }
642
643 bool dyld_process_info_base::addImage(task_t task, bool sameCacheAsThisProcess, uint64_t imageAddress, uint64_t imagePath, const char* imagePathLocal)
644 {
645 _curImage->loadAddress = imageAddress;
646 _curImage->segmentStartIndex = _curSegmentIndex;
647 if ( imagePathLocal != NULL ) {
648 _curImage->path = addString(imagePathLocal, PATH_MAX);
649 } else if ( sameCacheAsThisProcess && inCache(imagePath) ) {
650 _curImage->path = (const char*)imagePath;
651 } else if (imagePath) {
652 _curImage->path = copyPath(task, imagePath);
653 } else {
654 _curImage->path = "";
655 }
656
657 if ( sameCacheAsThisProcess && inCache(imageAddress) ) {
658 addInfoFromLoadCommands((mach_header*)imageAddress, imageAddress, 32*1024);
659 } else if (addInfoFromRemoteLoadCommands(task, imageAddress) != KERN_SUCCESS) {
660 // The image is not here, return early
661 return false;
662 }
663 _curImage->segmentsCount = _curSegmentIndex - _curImage->segmentStartIndex;
664 _curImage++;
665 return true;
666 }
667
668 bool dyld_process_info_base::addAotImage(dyld_aot_image_info_64 aotImageInfo) {
669 if (!reserveSpace(sizeof(dyld_aot_image_info_64))) {
670 return false;
671 }
672 _curAotImage->x86LoadAddress = aotImageInfo.x86LoadAddress;
673 _curAotImage->aotLoadAddress = aotImageInfo.aotLoadAddress;
674 _curAotImage->aotImageSize = aotImageInfo.aotImageSize;
675 memcpy(_curAotImage->aotImageKey, aotImageInfo.aotImageKey, sizeof(aotImageInfo.aotImageKey));
676
677 _curAotImage++;
678 return true;
679 }
680
681 kern_return_t dyld_process_info_base::addInfoFromRemoteLoadCommands(task_t task, uint64_t remoteMH) {
682 __block kern_return_t kr = KERN_SUCCESS;
683 __block size_t headerPagesSize = 0;
684 __block bool done = false;
685
686 //Since the minimum we can reasonably map is a page, map that.
687 withRemoteBuffer(task, remoteMH, PAGE_SIZE, false, false, &kr, ^(void * buffer, size_t size) {
688 const mach_header* mh = (const mach_header*)buffer;
689 headerPagesSize = sizeof(mach_header) + mh->sizeofcmds;
690 if (headerPagesSize <= PAGE_SIZE) {
691 addInfoFromLoadCommands(mh, remoteMH, size);
692 done = true;
693 }
694 });
695
696 //The load commands did not fit in the first page, but now we know the size, so remap and try again
697 if (!done) {
698 if (kr != KERN_SUCCESS) {
699 return kr;
700 }
701 withRemoteBuffer(task, remoteMH, headerPagesSize, false, false, &kr, ^(void * buffer, size_t size) {
702 addInfoFromLoadCommands((mach_header*)buffer, remoteMH, size);
703 });
704 }
705
706 return kr;
707 }
708
709 kern_return_t dyld_process_info_base::addDyldImage(task_t task, uint64_t dyldAddress, uint64_t dyldPathAddress, const char* localPath)
710 {
711 __block kern_return_t kr = KERN_SUCCESS;
712 _curImage->loadAddress = dyldAddress;
713 _curImage->segmentStartIndex = _curSegmentIndex;
714 if ( localPath != NULL ) {
715 _curImage->path = addString(localPath, PATH_MAX);
716 }
717 else {
718 _curImage->path = copyPath(task, dyldPathAddress);
719 if ( kr != KERN_SUCCESS)
720 return kr;
721 }
722
723 kr = addInfoFromRemoteLoadCommands(task, dyldAddress);
724 if ( kr != KERN_SUCCESS)
725 return kr;
726
727 _curImage->segmentsCount = _curSegmentIndex - _curImage->segmentStartIndex;
728 _curImage++;
729 return KERN_SUCCESS;
730 }
731
732
733 void dyld_process_info_base::addInfoFromLoadCommands(const mach_header* mh, uint64_t addressInTask, size_t size)
734 {
735 const load_command* startCmds = NULL;
736 if ( mh->magic == MH_MAGIC_64 )
737 startCmds = (load_command*)((char *)mh + sizeof(mach_header_64));
738 else if ( mh->magic == MH_MAGIC )
739 startCmds = (load_command*)((char *)mh + sizeof(mach_header));
740 else
741 return; // not a mach-o file, or wrong endianness
742
743 const load_command* const cmdsEnd = (load_command*)((char*)startCmds + mh->sizeofcmds);
744 const load_command* cmd = startCmds;
745 for(uint32_t i = 0; i < mh->ncmds; ++i) {
746 const load_command* nextCmd = (load_command*)((char *)cmd + cmd->cmdsize);
747 if ( (cmd->cmdsize < 8) || (nextCmd > cmdsEnd) || (nextCmd < startCmds) ) {
748 return; // malformed load command
749 }
750 if ( cmd->cmd == LC_UUID ) {
751 const uuid_command* uuidCmd = (uuid_command*)cmd;
752 memcpy(_curImage->uuid, uuidCmd->uuid, 16);
753 }
754 else if ( cmd->cmd == LC_SEGMENT ) {
755 if (!reserveSpace(sizeof(SegmentInfo))) { break; }
756 const segment_command* segCmd = (segment_command*)cmd;
757 _curSegment->name = copySegmentName(segCmd->segname);
758 _curSegment->addr = segCmd->vmaddr;
759 _curSegment->size = segCmd->vmsize;
760 _curSegment++;
761 _curSegmentIndex++;
762 }
763 else if ( cmd->cmd == LC_SEGMENT_64 ) {
764 if (!reserveSpace(sizeof(SegmentInfo))) { break; }
765 const segment_command_64* segCmd = (segment_command_64*)cmd;
766 _curSegment->name = copySegmentName(segCmd->segname);
767 _curSegment->addr = segCmd->vmaddr;
768 _curSegment->size = segCmd->vmsize;
769 _curSegment++;
770 _curSegmentIndex++;
771 }
772 cmd = nextCmd;
773 }
774 }
775
776 const char* dyld_process_info_base::copySegmentName(const char* name)
777 {
778 // don't copy names of standard segments into string pool
779 static const char* stdSegNames[] = {
780 "__TEXT", "__DATA", "__LINKEDIT",
781 "__DATA_DIRTY", "__DATA_CONST",
782 "__OBJC", "__OBJC_CONST",
783 "__AUTH", "__AUTH_CONST",
784 NULL
785 };
786 for (const char** s=stdSegNames; *s != NULL; ++s) {
787 if ( strcmp(name, *s) == 0 )
788 return *s;
789 }
790 // copy custom segment names into string pool
791 return addString(name, 16);
792 }
793
794 void dyld_process_info_base::forEachImage(void (^callback)(uint64_t machHeaderAddress, const uuid_t uuid, const char* path)) const
795 {
796 for (const ImageInfo* p = _firstImage; p < _curImage; ++p) {
797 callback(p->loadAddress, p->uuid, p->path);
798 }
799 }
800
801
802 #if TARGET_OS_OSX
803 void dyld_process_info_base::forEachAotImage(bool (^callback)(uint64_t x86Address, uint64_t aotAddress, uint64_t aotSize, uint8_t* aotImageKey, size_t aotImageKeySize)) const
804 {
805 for (const dyld_aot_image_info_64* p = _firstAotImage; p < _curAotImage; ++p) {
806 if (!callback(p->x86LoadAddress, p->aotLoadAddress, p->aotImageSize, (uint8_t*)p->aotImageKey, sizeof(p->aotImageKey))) {
807 break;
808 }
809 }
810 }
811 #endif
812
813 void dyld_process_info_base::forEachSegment(uint64_t machHeaderAddress, void (^callback)(uint64_t segmentAddress, uint64_t segmentSize, const char* segmentName)) const
814 {
815 for (const ImageInfo* p = _firstImage; p < _curImage; ++p) {
816 if ( p->loadAddress == machHeaderAddress ) {
817 uint64_t slide = 0;
818 for (uint32_t i=0; i < p->segmentsCount; ++i) {
819 const SegmentInfo* seg = &_firstSegment[p->segmentStartIndex+i];
820 if ( strcmp(seg->name, "__TEXT") == 0 ) {
821 slide = machHeaderAddress - seg->addr;
822 break;
823 }
824 }
825 for (uint32_t i=0; i < p->segmentsCount; ++i) {
826 const SegmentInfo* seg = &_firstSegment[p->segmentStartIndex+i];
827 callback(seg->addr + slide, seg->size, seg->name);
828 }
829 break;
830 }
831 }
832 }
833
834 dyld_process_info _dyld_process_info_create(task_t task, uint64_t timestamp, kern_return_t* kr)
835 {
836 __block dyld_process_info result = nullptr;
837 kern_return_t krSink = KERN_SUCCESS;
838 if (kr == nullptr) {
839 kr = &krSink;
840 }
841 *kr = KERN_SUCCESS;
842
843 task_dyld_info_data_t task_dyld_info;
844 mach_msg_type_number_t count = TASK_DYLD_INFO_COUNT;
845 if ( kern_return_t r = task_info(task, TASK_DYLD_INFO, (task_info_t)&task_dyld_info, &count) ) {
846 *kr = r;
847 return nullptr;
848 }
849
850 //The kernel will return MACH_VM_MIN_ADDRESS for an executable that has not had dyld loaded
851 if (task_dyld_info.all_image_info_addr == MACH_VM_MIN_ADDRESS)
852 return nullptr;
853
854 // We use a true shared memory buffer here, that way by making sure that libdyld in both processes
855 // reads and writes the the timestamp atomically we can make sure we get a coherent view of the
856 // remote process.
857 // That also means that we *MUST* directly read the memory, which is why we template the make() call
858 withRemoteBuffer(task, task_dyld_info.all_image_info_addr, (size_t)task_dyld_info.all_image_info_size, true, false, kr, ^(void *buffer, size_t size) {
859 dyld_process_info_ptr base;
860 if (task_dyld_info.all_image_info_format == TASK_DYLD_ALL_IMAGE_INFO_32 ) {
861 const dyld_all_image_infos_32* info = (const dyld_all_image_infos_32*)buffer;
862 base = dyld_process_info_base::make<dyld_all_image_infos_32, dyld_image_info_32>(task, *info, timestamp, kr);
863 } else {
864 const dyld_all_image_infos_64* info = (const dyld_all_image_infos_64*)buffer;
865 base = dyld_process_info_base::make<dyld_all_image_infos_64, dyld_image_info_64>(task, *info, timestamp, kr);
866 }
867 if (base) {
868 result = base.release();
869 }
870 });
871 return result;
872 }
873
874 void _dyld_process_info_get_state(dyld_process_info info, dyld_process_state_info* stateInfo)
875 {
876 *stateInfo = *info->stateInfo();
877 }
878
879 void _dyld_process_info_get_cache(dyld_process_info info, dyld_process_cache_info* cacheInfo)
880 {
881 *cacheInfo = *info->cacheInfo();
882 }
883
884 void _dyld_process_info_get_aot_cache(dyld_process_info info, dyld_process_aot_cache_info* aotCacheInfo)
885 {
886 *aotCacheInfo = *info->aotCacheInfo();
887 }
888
889 void _dyld_process_info_retain(dyld_process_info object)
890 {
891 const_cast<dyld_process_info_base*>(object)->retain();
892 }
893
894 dyld_platform_t _dyld_process_info_get_platform(dyld_process_info object) {
895 return const_cast<dyld_process_info_base*>(object)->platform();
896 }
897
898 void _dyld_process_info_release(dyld_process_info object)
899 {
900 const_cast<dyld_process_info_base*>(object)->release();
901 }
902
903 void _dyld_process_info_for_each_image(dyld_process_info info, void (^callback)(uint64_t machHeaderAddress, const uuid_t uuid, const char* path))
904 {
905 info->forEachImage(callback);
906 }
907
908 #if TARGET_OS_OSX
909 void _dyld_process_info_for_each_aot_image(dyld_process_info info, bool (^callback)(uint64_t x86Address, uint64_t aotAddress, uint64_t aotSize, uint8_t* aotImageKey, size_t aotImageKeySize))
910 {
911 info->forEachAotImage(callback);
912 }
913 #endif
914
915 void _dyld_process_info_for_each_segment(dyld_process_info info, uint64_t machHeaderAddress, void (^callback)(uint64_t segmentAddress, uint64_t segmentSize, const char* segmentName))
916 {
917 info->forEachSegment(machHeaderAddress, callback);
918 }
919
920
921