1 /* -*- mode: C++; c-basic-offset: 4; tab-width: 4 -*-
3 * Copyright (c) 2016 Apple Inc. All rights reserved.
5 * @APPLE_LICENSE_HEADER_START@
7 * This file contains Original Code and/or Modifications of Original Code
8 * as defined in and that are subject to the Apple Public Source License
9 * Version 2.0 (the 'License'). You may not use this file except in
10 * compliance with the License. Please obtain a copy of the License at
11 * http://www.opensource.apple.com/apsl/ and read it before using this
14 * The Original Code and all software distributed under the License are
15 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
16 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
17 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
19 * Please see the License for the specific language governing rights and
20 * limitations under the License.
22 * @APPLE_LICENSE_HEADER_END@
31 #include <sys/param.h>
32 #include <mach/shared_region.h>
33 #include <mach/mach_vm.h>
34 #include <mach/vm_region.h>
35 #include <libkern/OSAtomic.h>
36 #include <mach-o/dyld_process_info.h>
37 #include <mach-o/dyld_images.h>
39 #include "MachOFile.h"
40 #include "dyld_process_info_internal.h"
43 // this was in dyld_priv.h but it is no longer exported
45 const struct dyld_all_image_infos
* _dyld_get_all_image_infos();
48 RemoteBuffer
& RemoteBuffer::operator=(RemoteBuffer
&& other
) {
49 std::swap(_localAddress
, other
._localAddress
);
50 std::swap(_size
, other
._size
);
51 std::swap(_kr
, other
._kr
);
55 RemoteBuffer::RemoteBuffer() : _localAddress(0), _size(0), _kr(KERN_SUCCESS
) {}
56 RemoteBuffer::RemoteBuffer(std::tuple
<mach_vm_address_t
,vm_size_t
,kern_return_t
> T
)
57 : _localAddress(std::get
<0>(T
)), _size(std::get
<1>(T
)), _kr(std::get
<2>(T
)) {}
59 RemoteBuffer::RemoteBuffer(task_t task
, mach_vm_address_t remote_address
, size_t remote_size
, bool allow_truncation
)
60 : RemoteBuffer(RemoteBuffer::create(task
, remote_address
, remote_size
, allow_truncation
)) {};
62 std::pair
<mach_vm_address_t
, kern_return_t
>
63 RemoteBuffer::map(task_t task
, mach_vm_address_t remote_address
, vm_size_t size
) {
64 static kern_return_t (*mvrn
)(vm_map_t
, mach_vm_address_t
*, mach_vm_size_t
, mach_vm_offset_t
, int, vm_map_read_t
, mach_vm_address_t
,
65 boolean_t
, vm_prot_t
*, vm_prot_t
*, vm_inherit_t
) = nullptr;
66 vm_prot_t cur_protection
= VM_PROT_NONE
;
67 vm_prot_t max_protection
= VM_PROT_READ
;
69 return std::make_pair(MACH_VM_MIN_ADDRESS
, KERN_INVALID_ARGUMENT
);
71 mach_vm_address_t localAddress
= 0;
72 #if TARGET_OS_SIMULATOR
73 static dispatch_once_t onceToken
;
74 dispatch_once(&onceToken
, ^{
75 mvrn
= (kern_return_t (*)(vm_map_t
, mach_vm_address_t
*, mach_vm_size_t
, mach_vm_offset_t
, int, vm_map_read_t
, mach_vm_address_t
,
76 boolean_t
, vm_prot_t
*, vm_prot_t
*, vm_inherit_t
))dlsym(RTLD_DEFAULT
, "mach_vm_remap_new");
77 if (mvrn
== nullptr) {
78 // We are running on a system that does not support task_read ports, use the old call
79 mvrn
= (kern_return_t (*)(vm_map_t
, mach_vm_address_t
*, mach_vm_size_t
, mach_vm_offset_t
, int, vm_map_read_t
, mach_vm_address_t
,
80 boolean_t
, vm_prot_t
*, vm_prot_t
*, vm_inherit_t
))dlsym(RTLD_DEFAULT
, "mach_vm_remap");
84 mvrn
= &mach_vm_remap_new
;
86 auto kr
= mvrn(mach_task_self(),
90 VM_FLAGS_ANYWHERE
| VM_FLAGS_RESILIENT_CODESIGN
| VM_FLAGS_RESILIENT_MEDIA
,
97 // The call is not succesfull return
98 if (kr
!= KERN_SUCCESS
) {
99 return std::make_pair(MACH_VM_MIN_ADDRESS
, kr
);
101 // If it is not a shared buffer then copy it into a local buffer so our results are coherent in the event
102 // the page goes way due to storage removal, etc. We have to do this because even after we read the page the
103 // contents might go away of the object is paged out and then the backing region is disconnected (for example, if
104 // we are copying some memory in the middle of a mach-o that is on a USB drive that is disconnected after we perform
105 // the mapping). Once we copy them into a local buffer the memory will be handled by the default pager instead of
106 // potentially being backed by the mmap pager, and thus will be guaranteed not to mutate out from under us.
107 void* buffer
= malloc(size
);
108 if (buffer
== nullptr) {
109 (void)vm_deallocate(mach_task_self(), (vm_address_t
)localAddress
, size
);
110 return std::make_pair(MACH_VM_MIN_ADDRESS
, KERN_NO_SPACE
);
112 memcpy(buffer
, (void *)localAddress
, size
);
113 (void)vm_deallocate(mach_task_self(), (vm_address_t
)localAddress
, size
);
114 return std::make_pair((vm_address_t
)buffer
, KERN_SUCCESS
);
117 std::tuple
<mach_vm_address_t
,vm_size_t
,kern_return_t
> RemoteBuffer::create(task_t task
,
118 mach_vm_address_t remote_address
,
120 bool allow_truncation
) {
121 mach_vm_address_t localAddress
;
123 // Try the initial map
124 std::tie(localAddress
, kr
) = map(task
, remote_address
, size
);
125 if (kr
== KERN_SUCCESS
) return std::make_tuple(localAddress
, size
, kr
);
126 // The first attempt failed, truncate if possible and try again. We only need to try once since the largest
127 // truncatable buffer we map is less than a single page. To be more general we would need to try repeatedly in a
129 if (allow_truncation
) {
130 size
= PAGE_SIZE
- remote_address%PAGE_SIZE
;
131 std::tie(localAddress
, kr
) = map(task
, remote_address
, size
);
132 if (kr
== KERN_SUCCESS
) return std::make_tuple(localAddress
, size
, kr
);
134 // If we reach this then the mapping completely failed
135 return std::make_tuple(MACH_VM_MIN_ADDRESS
, 0, kr
);
138 RemoteBuffer::~RemoteBuffer() {
139 if (!_localAddress
) { return; }
140 free((void*)_localAddress
);
142 void *RemoteBuffer::getLocalAddress() const { return (void *)_localAddress
; }
143 size_t RemoteBuffer::getSize() const { return _size
; }
144 kern_return_t
RemoteBuffer::getKernelReturn() const { return _kr
; }
146 void withRemoteBuffer(task_t task
, mach_vm_address_t remote_address
, size_t remote_size
, bool allow_truncation
, kern_return_t
*kr
, void (^block
)(void *buffer
, size_t size
)) {
147 kern_return_t krSink
= KERN_SUCCESS
;
151 RemoteBuffer
buffer(task
, remote_address
, remote_size
, allow_truncation
);
152 *kr
= buffer
.getKernelReturn();
153 if (*kr
== KERN_SUCCESS
) {
154 block(buffer
.getLocalAddress(), buffer
.getSize());
160 // Opaque object returned by _dyld_process_info_create()
163 struct __attribute__((visibility("hidden"))) dyld_process_info_deleter
{ // deleter
164 // dyld_process_info_deleter() {};
165 // dyld_process_info_deleter(const dyld_process_info_deleter&) { }
166 // dyld_process_info_deleter(dyld_process_info_deleter&) {}
167 // dyld_process_info_deleter(dyld_process_info_deleter&&) {}
168 void operator()(dyld_process_info_base
* p
) const {
175 static dyld_process_info_deleter deleter
;
176 typedef std::unique_ptr
<dyld_process_info_base
, dyld_process_info_deleter
> dyld_process_info_ptr
;
178 struct __attribute__((visibility("hidden"))) dyld_process_info_base
{
179 template<typename T1
, typename T2
>
180 static dyld_process_info_ptr
make(task_t task
, const T1
& allImageInfo
, uint64_t timestamp
, kern_return_t
* kr
);
182 static dyld_process_info_ptr
makeSuspended(task_t task
, const T
& allImageInfo
, kern_return_t
* kr
);
184 std::atomic
<uint32_t>& retainCount() const { return _retainCount
; }
185 dyld_process_cache_info
* cacheInfo() const { return (dyld_process_cache_info
*)(((char*)this) + _cacheInfoOffset
); }
186 dyld_process_aot_cache_info
* aotCacheInfo() const { return (dyld_process_aot_cache_info
*)(((char*)this) + _aotCacheInfoOffset
); }
187 dyld_process_state_info
* stateInfo() const { return (dyld_process_state_info
*)(((char*)this) + _stateInfoOffset
); }
188 dyld_platform_t
platform() const { return _platform
; }
190 void forEachImage(void (^callback
)(uint64_t machHeaderAddress
, const uuid_t uuid
, const char* path
)) const;
191 void forEachAotImage(bool (^callback
)(uint64_t x86Address
, uint64_t aotAddress
, uint64_t aotSize
, uint8_t* aotImageKey
, size_t aotImageKeySize
)) const;
192 void forEachSegment(uint64_t machHeaderAddress
, void (^callback
)(uint64_t segmentAddress
, uint64_t segmentSize
, const char* segmentName
)) const;
194 bool reserveSpace(size_t space
) {
195 if (_freeSpace
< space
) { return false; }
207 uint32_t newCount
= --_retainCount
;
209 if ( newCount
== 0 ) {
217 uint64_t loadAddress
;
219 uint32_t segmentStartIndex
;
220 uint32_t segmentsCount
;
229 dyld_process_info_base(dyld_platform_t platform
, unsigned imageCount
, unsigned aotImageCount
, size_t totalSize
);
230 void* operator new (size_t, void* buf
) { return buf
; }
232 static bool inCache(uint64_t addr
) { return (addr
> SHARED_REGION_BASE
) && (addr
< SHARED_REGION_BASE
+SHARED_REGION_SIZE
); }
233 bool addImage(task_t task
, bool sameCacheAsThisProcess
, uint64_t imageAddress
, uint64_t imagePath
, const char* imagePathLocal
);
235 bool addAotImage(dyld_aot_image_info_64 aotImageInfo
);
237 kern_return_t
addDyldImage(task_t task
, uint64_t dyldAddress
, uint64_t dyldPathAddress
, const char* localPath
);
239 bool invalid() { return ((char*)_stringRevBumpPtr
< (char*)_curSegment
); }
240 const char* copyPath(task_t task
, uint64_t pathAddr
);
241 const char* addString(const char*, size_t);
242 const char* copySegmentName(const char*);
244 void addInfoFromLoadCommands(const mach_header
* mh
, uint64_t addressInTask
, size_t size
);
245 kern_return_t
addInfoFromRemoteLoadCommands(task_t task
, uint64_t remoteMH
);
247 void inspectLocalImageLoadCommands(uint64_t imageAddress
, void* func
);
248 kern_return_t
inspectRemoteImageLoadCommands(task_t task
, uint64_t imageAddress
, void* func
);
250 mutable std::atomic
<uint32_t> _retainCount
;
251 const uint32_t _cacheInfoOffset
;
252 const uint32_t _aotCacheInfoOffset
;
253 const uint32_t _stateInfoOffset
;
254 const uint32_t _imageInfosOffset
;
255 const uint32_t _aotImageInfosOffset
;
256 const uint32_t _segmentInfosOffset
;
258 dyld_platform_t _platform
;
259 ImageInfo
* const _firstImage
;
260 ImageInfo
* _curImage
;
261 dyld_aot_image_info_64
* const _firstAotImage
;
262 dyld_aot_image_info_64
* _curAotImage
;
263 SegmentInfo
* const _firstSegment
;
264 SegmentInfo
* _curSegment
;
265 uint32_t _curSegmentIndex
;
266 char* _stringRevBumpPtr
;
268 // dyld_process_cache_info cacheInfo;
269 // dyld_process_state_info stateInfo;
270 // ImageInfo images[];
271 // SegmentInfo segments[];
275 dyld_process_info_base::dyld_process_info_base(dyld_platform_t platform
, unsigned imageCount
, unsigned aotImageCount
, size_t totalSize
)
276 : _retainCount(1), _cacheInfoOffset(sizeof(dyld_process_info_base
)),
277 _aotCacheInfoOffset(sizeof(dyld_process_info_base
) + sizeof(dyld_process_cache_info
)),
278 _stateInfoOffset(sizeof(dyld_process_info_base
) + sizeof(dyld_process_cache_info
) + sizeof(dyld_process_aot_cache_info
)),
279 _imageInfosOffset(sizeof(dyld_process_info_base
) + sizeof(dyld_process_cache_info
) + sizeof(dyld_process_aot_cache_info
) + sizeof(dyld_process_state_info
)),
280 _aotImageInfosOffset(sizeof(dyld_process_info_base
) + sizeof(dyld_process_cache_info
) + sizeof(dyld_process_aot_cache_info
) + sizeof(dyld_process_state_info
) + imageCount
*sizeof(ImageInfo
)),
281 _segmentInfosOffset(sizeof(dyld_process_info_base
) + sizeof(dyld_process_cache_info
) + sizeof(dyld_process_aot_cache_info
) + sizeof(dyld_process_state_info
) + imageCount
*sizeof(ImageInfo
) + aotImageCount
*sizeof(dyld_aot_image_info_64
)),
282 _freeSpace(totalSize
), _platform(platform
),
283 _firstImage((ImageInfo
*)(((uint8_t*)this) + _imageInfosOffset
)),
284 _curImage((ImageInfo
*)(((uint8_t*)this) + _imageInfosOffset
)),
285 _firstAotImage((dyld_aot_image_info_64
*)(((uint8_t*)this) + _aotImageInfosOffset
)),
286 _curAotImage((dyld_aot_image_info_64
*)(((uint8_t*)this) + _aotImageInfosOffset
)),
287 _firstSegment((SegmentInfo
*)(((uint8_t*)this) + _segmentInfosOffset
)),
288 _curSegment((SegmentInfo
*)(((uint8_t*)this) + _segmentInfosOffset
)),
290 _stringRevBumpPtr((char*)(this)+totalSize
)
294 template<typename T1
, typename T2
>
295 dyld_process_info_ptr
dyld_process_info_base::make(task_t task
, const T1
& allImageInfo
, uint64_t timestamp
, kern_return_t
* kr
)
297 __block dyld_process_info_ptr result
= nullptr;
299 // bail out of dyld is too old
300 if ( allImageInfo
.version
< 15 ) {
305 // Check if the process is suspended
306 if (allImageInfo
.infoArrayChangeTimestamp
== 0) {
307 result
= dyld_process_info_base::makeSuspended
<T1
>(task
, allImageInfo
, kr
);
308 // If we have a result return it, otherwise rescan
310 // If it returned the process is suspended and there is nothing more to do
311 return std::move(result
);
313 usleep(1000 * 50); // 50ms
314 // Not exactly correct, but conveys that operation may succeed in the future
315 *kr
= KERN_RESOURCE_SHORTAGE
;
319 // Test to see if there are no changes and we can exit early
320 if (timestamp
!= 0 && timestamp
== allImageInfo
.infoArrayChangeTimestamp
) {
325 uint64_t currentTimestamp
= allImageInfo
.infoArrayChangeTimestamp
;
326 mach_vm_address_t infoArray
= allImageInfo
.infoArray
;
327 if (infoArray
== 0) {
328 usleep(1000 * 50); // 50ms
329 // Not exactly correct, but conveys that operation may succeed in the future
330 *kr
= KERN_RESOURCE_SHORTAGE
;
334 // For the moment we are going to truncate any image list longer than 8192 because some programs do
335 // terrible things that corrupt their own image lists and we need to stop clients from crashing
336 // reading them. We can try to do something more advanced in the future. rdar://27446361
337 uint32_t imageCount
= allImageInfo
.infoArrayCount
;
338 imageCount
= MIN(imageCount
, 8192);
339 size_t imageArraySize
= imageCount
* sizeof(T2
);
341 withRemoteBuffer(task
, infoArray
, imageArraySize
, false, kr
, ^(void *buffer
, size_t size
) {
342 // figure out how many path strings will need to be copied and their size
343 T2
* imageArray
= (T2
*)buffer
;
344 const dyld_all_image_infos
* myInfo
= _dyld_get_all_image_infos();
345 bool sameCacheAsThisProcess
= !allImageInfo
.processDetachedFromSharedRegion
346 && !myInfo
->processDetachedFromSharedRegion
347 && ((memcmp(myInfo
->sharedCacheUUID
, &allImageInfo
.sharedCacheUUID
[0], 16) == 0)
348 && (myInfo
->sharedCacheSlide
== allImageInfo
.sharedCacheSlide
));
349 unsigned countOfPathsNeedingCopying
= 0;
350 if ( sameCacheAsThisProcess
) {
351 for (uint32_t i
=0; i
< imageCount
; ++i
) {
352 if ( !inCache(imageArray
[i
].imageFilePath
) )
353 ++countOfPathsNeedingCopying
;
357 countOfPathsNeedingCopying
= imageCount
+1;
359 unsigned imageCountWithDyld
= imageCount
+1;
361 // allocate result object
362 size_t allocationSize
= sizeof(dyld_process_info_base
)
363 + sizeof(dyld_process_cache_info
)
364 + sizeof(dyld_process_aot_cache_info
)
365 + sizeof(dyld_process_state_info
)
366 + sizeof(ImageInfo
)*(imageCountWithDyld
)
367 + sizeof(dyld_aot_image_info_64
)*(allImageInfo
.aotInfoCount
) // add the size necessary for aot info to this buffer
368 + sizeof(SegmentInfo
)*imageCountWithDyld
*10
369 + countOfPathsNeedingCopying
*PATH_MAX
;
370 void* storage
= malloc(allocationSize
);
371 if (storage
== nullptr) {
376 auto info
= dyld_process_info_ptr(new (storage
) dyld_process_info_base(allImageInfo
.platform
, imageCountWithDyld
, allImageInfo
.aotInfoCount
, allocationSize
), deleter
);
377 (void)info
->reserveSpace(sizeof(dyld_process_info_base
)+sizeof(dyld_process_cache_info
)+sizeof(dyld_process_state_info
)+sizeof(dyld_process_aot_cache_info
));
378 (void)info
->reserveSpace(sizeof(ImageInfo
)*imageCountWithDyld
);
381 dyld_process_cache_info
* cacheInfo
= info
->cacheInfo();
382 memcpy(cacheInfo
->cacheUUID
, &allImageInfo
.sharedCacheUUID
[0], 16);
383 cacheInfo
->cacheBaseAddress
= allImageInfo
.sharedCacheBaseAddress
;
384 cacheInfo
->privateCache
= allImageInfo
.processDetachedFromSharedRegion
;
385 // if no cache is used, allImageInfo has all zeros for cache UUID
386 cacheInfo
->noCache
= true;
387 for (int i
=0; i
< 16; ++i
) {
388 if ( cacheInfo
->cacheUUID
[i
] != 0 ) {
389 cacheInfo
->noCache
= false;
393 // fill in aot shared cache info
394 dyld_process_aot_cache_info
* aotCacheInfo
= info
->aotCacheInfo();
395 memcpy(aotCacheInfo
->cacheUUID
, &allImageInfo
.aotSharedCacheUUID
[0], 16);
396 aotCacheInfo
->cacheBaseAddress
= allImageInfo
.aotSharedCacheBaseAddress
;
398 dyld_process_state_info
* stateInfo
= info
->stateInfo();
399 stateInfo
->timestamp
= currentTimestamp
;
400 stateInfo
->imageCount
= imageCountWithDyld
;
401 stateInfo
->initialImageCount
= (uint32_t)(allImageInfo
.initialImageCount
+1);
402 stateInfo
->dyldState
= dyld_process_state_dyld_initialized
;
404 if ( allImageInfo
.libSystemInitialized
!= 0 ) {
405 stateInfo
->dyldState
= dyld_process_state_libSystem_initialized
;
406 if ( allImageInfo
.initialImageCount
!= imageCount
) {
407 stateInfo
->dyldState
= dyld_process_state_program_running
;
410 if ( allImageInfo
.errorMessage
!= 0 ) {
411 stateInfo
->dyldState
= allImageInfo
.terminationFlags
? dyld_process_state_terminated_before_inits
: dyld_process_state_dyld_terminated
;
413 // fill in info for dyld
414 if ( allImageInfo
.dyldPath
!= 0 ) {
415 if ((*kr
= info
->addDyldImage(task
, allImageInfo
.dyldImageLoadAddress
, allImageInfo
.dyldPath
, NULL
))) {
421 // fill in info for each image
422 for (uint32_t i
=0; i
< imageCount
; ++i
) {
423 if (!info
->addImage(task
, sameCacheAsThisProcess
, imageArray
[i
].imageLoadAddress
, imageArray
[i
].imageFilePath
, NULL
)) {
429 // sanity check internal data did not overflow
430 if ( info
->invalid() ) {
436 result
= std::move(info
);
439 mach_vm_address_t aotImageArray
= allImageInfo
.aotInfoArray
;
440 // shortcircuit this code path if aotImageArray == 0 (32 vs 64 bit struct difference)
441 // and if result == nullptr, since we need to append aot image infos to the process info struct
442 if (aotImageArray
!= 0 && result
!= nullptr) {
443 uint32_t aotImageCount
= allImageInfo
.aotInfoCount
;
444 size_t aotImageArraySize
= aotImageCount
* sizeof(dyld_aot_image_info_64
);
446 withRemoteBuffer(task
, aotImageArray
, aotImageArraySize
, false, kr
, ^(void *buffer
, size_t size
) {
447 dyld_aot_image_info_64
* imageArray
= (dyld_aot_image_info_64
*)buffer
;
448 for (uint32_t i
= 0; i
< aotImageCount
; i
++) {
449 if (!result
->addAotImage(imageArray
[i
])) {
457 return std::move(result
);
461 dyld_process_info_ptr
dyld_process_info_base::makeSuspended(task_t task
, const T
& allImageInfo
, kern_return_t
* kr
)
464 if ((*kr
= pid_for_task(task
, &pid
))) {
468 mach_task_basic_info ti
;
469 mach_msg_type_number_t count
= MACH_TASK_BASIC_INFO_COUNT
;
470 if ((*kr
= task_info(task
, MACH_TASK_BASIC_INFO
, (task_info_t
)&ti
, &count
))) {
474 // The task is not suspended, exit
475 if (ti
.suspend_count
== 0) {
479 __block
unsigned imageCount
= 0; // main executable and dyld
480 __block
uint64_t mainExecutableAddress
= 0;
481 __block
uint64_t dyldAddress
= 0;
482 char dyldPathBuffer
[PATH_MAX
+1];
483 char mainExecutablePathBuffer
[PATH_MAX
+1];
484 __block
char * dyldPath
= &dyldPathBuffer
[0];
485 __block
char * mainExecutablePath
= &mainExecutablePathBuffer
[0];
486 __block
dyld3::Platform platformID
= dyld3::Platform::unknown
;
488 for (mach_vm_address_t address
= 0; ; address
+= size
) {
489 vm_region_basic_info_data_64_t info
;
490 mach_port_t objectName
;
491 unsigned int infoCount
= VM_REGION_BASIC_INFO_COUNT_64
;
492 if (kern_return_t r
= mach_vm_region(task
, &address
, &size
, VM_REGION_BASIC_INFO
,
493 (vm_region_info_t
)&info
, &infoCount
, &objectName
)) {
496 if ( info
.protection
!= (VM_PROT_READ
|VM_PROT_EXECUTE
) )
498 // read start of vm region to verify it is a mach header
499 withRemoteObject(task
, address
, NULL
, ^(mach_header_64 mhBuffer
){
500 if ( (mhBuffer
.magic
!= MH_MAGIC
) && (mhBuffer
.magic
!= MH_MAGIC_64
) )
502 // now know the region is the start of a mach-o file
503 if ( mhBuffer
.filetype
== MH_EXECUTE
) {
504 mainExecutableAddress
= address
;
505 int len
= proc_regionfilename(pid
, mainExecutableAddress
, mainExecutablePath
, PATH_MAX
);
507 mainExecutablePath
[len
] = '\0';
511 else if ( mhBuffer
.filetype
== MH_DYLINKER
) {
512 dyldAddress
= address
;
513 int len
= proc_regionfilename(pid
, dyldAddress
, dyldPath
, PATH_MAX
);
515 dyldPath
[len
] = '\0';
520 //fprintf(stderr, "vm region: addr=0x%llX, size=0x%llX, prot=0x%X\n", (uint64_t)address, (uint64_t)size, info.protection);
522 //fprintf(stderr, "dyld: addr=0x%llX, path=%s\n", dyldAddress, dyldPathBuffer);
523 //fprintf(stderr, "app: addr=0x%llX, path=%s\n", mainExecutableAddress, mainExecutablePathBuffer);
525 // explicitly set aot image count to 0 in the suspended case
526 unsigned aotImageCount
= 0;
528 // allocate result object
529 size_t allocationSize
= sizeof(dyld_process_info_base
)
530 + sizeof(dyld_process_cache_info
)
531 + sizeof(dyld_process_aot_cache_info
)
532 + sizeof(dyld_process_state_info
)
533 + sizeof(ImageInfo
)*(imageCount
)
534 + sizeof(dyld_aot_image_info_64
)*aotImageCount
// this should always be 0, but including it here to be explicit
535 + sizeof(SegmentInfo
)*imageCount
*10
536 + imageCount
*PATH_MAX
;
537 void* storage
= malloc(allocationSize
);
538 if (storage
== nullptr) {
542 auto obj
= dyld_process_info_ptr(new (storage
) dyld_process_info_base((dyld_platform_t
)platformID
, imageCount
, aotImageCount
, allocationSize
), deleter
);
543 (void)obj
->reserveSpace(sizeof(dyld_process_info_base
)+sizeof(dyld_process_cache_info
)+sizeof(dyld_process_aot_cache_info
)+sizeof(dyld_process_state_info
));
545 dyld_process_cache_info
* cacheInfo
= obj
->cacheInfo();
546 bzero(cacheInfo
->cacheUUID
, 16);
547 cacheInfo
->cacheBaseAddress
= 0;
548 cacheInfo
->noCache
= true;
549 cacheInfo
->privateCache
= false;
551 // zero out aot cache info
552 dyld_process_aot_cache_info
* aotCacheInfo
= obj
->aotCacheInfo();
553 bzero(aotCacheInfo
->cacheUUID
, 16);
554 aotCacheInfo
->cacheBaseAddress
= 0;
556 dyld_process_state_info
* stateInfo
= obj
->stateInfo();
557 stateInfo
->timestamp
= 0;
558 stateInfo
->imageCount
= imageCount
;
559 stateInfo
->initialImageCount
= imageCount
;
560 stateInfo
->dyldState
= dyld_process_state_not_started
;
562 // fill in info for dyld
563 if ( dyldAddress
!= 0 ) {
564 if ((*kr
= obj
->addDyldImage(task
, dyldAddress
, 0, dyldPath
))) {
569 // fill in info for each image
570 if ( mainExecutableAddress
!= 0 ) {
571 if (!obj
->addImage(task
, false, mainExecutableAddress
, 0, mainExecutablePath
)) {
576 if (allImageInfo
.infoArrayChangeTimestamp
!= 0) {
580 count
= MACH_TASK_BASIC_INFO_COUNT
;
581 if ((*kr
= task_info(task
, MACH_TASK_BASIC_INFO
, (task_info_t
)&ti
, &count
))) {
585 // The task is not suspended, exit
586 if (ti
.suspend_count
== 0) {
595 const char* dyld_process_info_base::addString(const char* str
, size_t maxlen
)
597 size_t len
= strnlen(str
, maxlen
) + 1;
598 // If we don't have enough space return an empty string
599 if (!reserveSpace(len
)) { return ""; }
600 _stringRevBumpPtr
-= len
;
601 strlcpy(_stringRevBumpPtr
, str
, len
);
602 return _stringRevBumpPtr
;
605 const char* dyld_process_info_base::copyPath(task_t task
, uint64_t stringAddressInTask
)
607 __block
const char* retval
= "";
608 withRemoteBuffer(task
, stringAddressInTask
, PATH_MAX
, true, nullptr, ^(void *buffer
, size_t size
) {
609 retval
= addString(static_cast<const char *>(buffer
), size
);
614 bool dyld_process_info_base::addImage(task_t task
, bool sameCacheAsThisProcess
, uint64_t imageAddress
, uint64_t imagePath
, const char* imagePathLocal
)
616 _curImage
->loadAddress
= imageAddress
;
617 _curImage
->segmentStartIndex
= _curSegmentIndex
;
618 if ( imagePathLocal
!= NULL
) {
619 _curImage
->path
= addString(imagePathLocal
, PATH_MAX
);
620 } else if ( sameCacheAsThisProcess
&& inCache(imagePath
) ) {
621 _curImage
->path
= (const char*)imagePath
;
622 } else if (imagePath
) {
623 _curImage
->path
= copyPath(task
, imagePath
);
625 _curImage
->path
= "";
628 if ( sameCacheAsThisProcess
&& inCache(imageAddress
) ) {
629 addInfoFromLoadCommands((mach_header
*)imageAddress
, imageAddress
, 32*1024);
630 } else if (addInfoFromRemoteLoadCommands(task
, imageAddress
) != KERN_SUCCESS
) {
631 // The image is not here, return early
634 _curImage
->segmentsCount
= _curSegmentIndex
- _curImage
->segmentStartIndex
;
639 bool dyld_process_info_base::addAotImage(dyld_aot_image_info_64 aotImageInfo
) {
640 if (!reserveSpace(sizeof(dyld_aot_image_info_64
))) {
643 _curAotImage
->x86LoadAddress
= aotImageInfo
.x86LoadAddress
;
644 _curAotImage
->aotLoadAddress
= aotImageInfo
.aotLoadAddress
;
645 _curAotImage
->aotImageSize
= aotImageInfo
.aotImageSize
;
646 memcpy(_curAotImage
->aotImageKey
, aotImageInfo
.aotImageKey
, sizeof(aotImageInfo
.aotImageKey
));
652 kern_return_t
dyld_process_info_base::addInfoFromRemoteLoadCommands(task_t task
, uint64_t remoteMH
) {
653 __block kern_return_t kr
= KERN_SUCCESS
;
654 __block
size_t headerPagesSize
= 0;
655 __block
bool done
= false;
657 //Since the minimum we can reasonably map is a page, map that.
658 withRemoteBuffer(task
, remoteMH
, PAGE_SIZE
, false, &kr
, ^(void * buffer
, size_t size
) {
659 const mach_header
* mh
= (const mach_header
*)buffer
;
660 headerPagesSize
= sizeof(mach_header
) + mh
->sizeofcmds
;
661 if (headerPagesSize
<= PAGE_SIZE
) {
662 addInfoFromLoadCommands(mh
, remoteMH
, size
);
667 //The load commands did not fit in the first page, but now we know the size, so remap and try again
669 if (kr
!= KERN_SUCCESS
) {
672 withRemoteBuffer(task
, remoteMH
, headerPagesSize
, false, &kr
, ^(void * buffer
, size_t size
) {
673 addInfoFromLoadCommands((mach_header
*)buffer
, remoteMH
, size
);
680 kern_return_t
dyld_process_info_base::addDyldImage(task_t task
, uint64_t dyldAddress
, uint64_t dyldPathAddress
, const char* localPath
)
682 __block kern_return_t kr
= KERN_SUCCESS
;
683 _curImage
->loadAddress
= dyldAddress
;
684 _curImage
->segmentStartIndex
= _curSegmentIndex
;
685 if ( localPath
!= NULL
) {
686 _curImage
->path
= addString(localPath
, PATH_MAX
);
689 _curImage
->path
= copyPath(task
, dyldPathAddress
);
690 if ( kr
!= KERN_SUCCESS
)
694 kr
= addInfoFromRemoteLoadCommands(task
, dyldAddress
);
695 if ( kr
!= KERN_SUCCESS
)
698 _curImage
->segmentsCount
= _curSegmentIndex
- _curImage
->segmentStartIndex
;
704 void dyld_process_info_base::addInfoFromLoadCommands(const mach_header
* mh
, uint64_t addressInTask
, size_t size
)
706 const load_command
* startCmds
= NULL
;
707 if ( mh
->magic
== MH_MAGIC_64
)
708 startCmds
= (load_command
*)((char *)mh
+ sizeof(mach_header_64
));
709 else if ( mh
->magic
== MH_MAGIC
)
710 startCmds
= (load_command
*)((char *)mh
+ sizeof(mach_header
));
712 return; // not a mach-o file, or wrong endianness
714 const load_command
* const cmdsEnd
= (load_command
*)((char*)startCmds
+ mh
->sizeofcmds
);
715 const load_command
* cmd
= startCmds
;
716 for(uint32_t i
= 0; i
< mh
->ncmds
; ++i
) {
717 const load_command
* nextCmd
= (load_command
*)((char *)cmd
+ cmd
->cmdsize
);
718 if ( (cmd
->cmdsize
< 8) || (nextCmd
> cmdsEnd
) || (nextCmd
< startCmds
) ) {
719 return; // malformed load command
721 if ( cmd
->cmd
== LC_UUID
) {
722 const uuid_command
* uuidCmd
= (uuid_command
*)cmd
;
723 memcpy(_curImage
->uuid
, uuidCmd
->uuid
, 16);
725 else if ( cmd
->cmd
== LC_SEGMENT
) {
726 if (!reserveSpace(sizeof(SegmentInfo
))) { break; }
727 const segment_command
* segCmd
= (segment_command
*)cmd
;
728 _curSegment
->name
= copySegmentName(segCmd
->segname
);
729 _curSegment
->addr
= segCmd
->vmaddr
;
730 _curSegment
->size
= segCmd
->vmsize
;
734 else if ( cmd
->cmd
== LC_SEGMENT_64
) {
735 if (!reserveSpace(sizeof(SegmentInfo
))) { break; }
736 const segment_command_64
* segCmd
= (segment_command_64
*)cmd
;
737 _curSegment
->name
= copySegmentName(segCmd
->segname
);
738 _curSegment
->addr
= segCmd
->vmaddr
;
739 _curSegment
->size
= segCmd
->vmsize
;
747 const char* dyld_process_info_base::copySegmentName(const char* name
)
749 // don't copy names of standard segments into string pool
750 static const char* stdSegNames
[] = {
751 "__TEXT", "__DATA", "__LINKEDIT",
752 "__DATA_DIRTY", "__DATA_CONST",
753 "__OBJC", "__OBJC_CONST",
754 "__AUTH", "__AUTH_CONST",
757 for (const char** s
=stdSegNames
; *s
!= NULL
; ++s
) {
758 if ( strcmp(name
, *s
) == 0 )
761 // copy custom segment names into string pool
762 return addString(name
, 16);
765 void dyld_process_info_base::forEachImage(void (^callback
)(uint64_t machHeaderAddress
, const uuid_t uuid
, const char* path
)) const
767 for (const ImageInfo
* p
= _firstImage
; p
< _curImage
; ++p
) {
768 callback(p
->loadAddress
, p
->uuid
, p
->path
);
774 void dyld_process_info_base::forEachAotImage(bool (^callback
)(uint64_t x86Address
, uint64_t aotAddress
, uint64_t aotSize
, uint8_t* aotImageKey
, size_t aotImageKeySize
)) const
776 for (const dyld_aot_image_info_64
* p
= _firstAotImage
; p
< _curAotImage
; ++p
) {
777 if (!callback(p
->x86LoadAddress
, p
->aotLoadAddress
, p
->aotImageSize
, (uint8_t*)p
->aotImageKey
, sizeof(p
->aotImageKey
))) {
784 void dyld_process_info_base::forEachSegment(uint64_t machHeaderAddress
, void (^callback
)(uint64_t segmentAddress
, uint64_t segmentSize
, const char* segmentName
)) const
786 for (const ImageInfo
* p
= _firstImage
; p
< _curImage
; ++p
) {
787 if ( p
->loadAddress
== machHeaderAddress
) {
789 for (uint32_t i
=0; i
< p
->segmentsCount
; ++i
) {
790 const SegmentInfo
* seg
= &_firstSegment
[p
->segmentStartIndex
+i
];
791 if ( strcmp(seg
->name
, "__TEXT") == 0 ) {
792 slide
= machHeaderAddress
- seg
->addr
;
796 for (uint32_t i
=0; i
< p
->segmentsCount
; ++i
) {
797 const SegmentInfo
* seg
= &_firstSegment
[p
->segmentStartIndex
+i
];
798 callback(seg
->addr
+ slide
, seg
->size
, seg
->name
);
805 dyld_process_info
_dyld_process_info_create(task_t task
, uint64_t timestamp
, kern_return_t
* kr
)
807 __block dyld_process_info result
= nullptr;
808 kern_return_t krSink
= KERN_SUCCESS
;
814 task_dyld_info_data_t task_dyld_info
;
815 mach_msg_type_number_t count
= TASK_DYLD_INFO_COUNT
;
816 if ( kern_return_t r
= task_info(task
, TASK_DYLD_INFO
, (task_info_t
)&task_dyld_info
, &count
) ) {
821 //The kernel will return MACH_VM_MIN_ADDRESS for an executable that has not had dyld loaded
822 if (task_dyld_info
.all_image_info_addr
== MACH_VM_MIN_ADDRESS
) {
827 for (auto i
= 0; i
< 10; ++i
) {
828 withRemoteBuffer(task
, task_dyld_info
.all_image_info_addr
, (size_t)task_dyld_info
.all_image_info_size
, false, kr
, ^(void *buffer
, size_t size
) {
829 dyld_process_info_ptr base
;
830 if (task_dyld_info
.all_image_info_format
== TASK_DYLD_ALL_IMAGE_INFO_32
) {
831 const dyld_all_image_infos_32
* info
= (const dyld_all_image_infos_32
*)buffer
;
832 base
= dyld_process_info_base::make
<dyld_all_image_infos_32
, dyld_image_info_32
>(task
, *info
, timestamp
, kr
);
834 const dyld_all_image_infos_64
* info
= (const dyld_all_image_infos_64
*)buffer
;
835 base
= dyld_process_info_base::make
<dyld_all_image_infos_64
, dyld_image_info_64
>(task
, *info
, timestamp
, kr
);
841 result
= base
.release();
844 if (kr
== KERN_SUCCESS
) { break; }
849 void _dyld_process_info_get_state(dyld_process_info info
, dyld_process_state_info
* stateInfo
)
851 *stateInfo
= *info
->stateInfo();
854 void _dyld_process_info_get_cache(dyld_process_info info
, dyld_process_cache_info
* cacheInfo
)
856 *cacheInfo
= *info
->cacheInfo();
859 void _dyld_process_info_get_aot_cache(dyld_process_info info
, dyld_process_aot_cache_info
* aotCacheInfo
)
861 *aotCacheInfo
= *info
->aotCacheInfo();
864 void _dyld_process_info_retain(dyld_process_info object
)
866 const_cast<dyld_process_info_base
*>(object
)->retain();
869 dyld_platform_t
_dyld_process_info_get_platform(dyld_process_info object
) {
870 return const_cast<dyld_process_info_base
*>(object
)->platform();
873 void _dyld_process_info_release(dyld_process_info object
)
875 const_cast<dyld_process_info_base
*>(object
)->release();
878 void _dyld_process_info_for_each_image(dyld_process_info info
, void (^callback
)(uint64_t machHeaderAddress
, const uuid_t uuid
, const char* path
))
880 info
->forEachImage(callback
);
884 void _dyld_process_info_for_each_aot_image(dyld_process_info info
, bool (^callback
)(uint64_t x86Address
, uint64_t aotAddress
, uint64_t aotSize
, uint8_t* aotImageKey
, size_t aotImageKeySize
))
886 info
->forEachAotImage(callback
);
890 void _dyld_process_info_for_each_segment(dyld_process_info info
, uint64_t machHeaderAddress
, void (^callback
)(uint64_t segmentAddress
, uint64_t segmentSize
, const char* segmentName
))
892 info
->forEachSegment(machHeaderAddress
, callback
);