1 /* -*- mode: C++; c-basic-offset: 4; tab-width: 4 -*-
3 * Copyright (c) 2016 Apple Inc. All rights reserved.
5 * @APPLE_LICENSE_HEADER_START@
7 * This file contains Original Code and/or Modifications of Original Code
8 * as defined in and that are subject to the Apple Public Source License
9 * Version 2.0 (the 'License'). You may not use this file except in
10 * compliance with the License. Please obtain a copy of the License at
11 * http://www.opensource.apple.com/apsl/ and read it before using this
14 * The Original Code and all software distributed under the License are
15 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
16 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
17 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
19 * Please see the License for the specific language governing rights and
20 * limitations under the License.
22 * @APPLE_LICENSE_HEADER_END@
30 #include <sys/param.h>
31 #include <mach/shared_region.h>
32 #include <mach/mach_vm.h>
33 #include <mach/vm_region.h>
34 #include <libkern/OSAtomic.h>
35 #include <mach-o/dyld_process_info.h>
36 #include <mach-o/dyld_images.h>
38 #include "MachOFile.h"
39 #include "dyld_process_info_internal.h"
42 // this was in dyld_priv.h but it is no longer exported
44 const struct dyld_all_image_infos
* _dyld_get_all_image_infos();
47 RemoteBuffer
& RemoteBuffer::operator=(RemoteBuffer
&& other
) {
48 std::swap(_localAddress
, other
._localAddress
);
49 std::swap(_size
, other
._size
);
50 std::swap(_kr
, other
._kr
);
51 std::swap(_shared
, other
._shared
);
55 RemoteBuffer::RemoteBuffer() : _localAddress(0), _size(0), _kr(KERN_SUCCESS
), _shared(false) {}
56 RemoteBuffer::RemoteBuffer(std::tuple
<mach_vm_address_t
,vm_size_t
,kern_return_t
,bool> T
)
57 : _localAddress(std::get
<0>(T
)), _size(std::get
<1>(T
)), _kr(std::get
<2>(T
)), _shared(std::get
<3>(T
)) {}
59 RemoteBuffer::RemoteBuffer(task_t task
, mach_vm_address_t remote_address
, size_t remote_size
, bool shared
, bool allow_truncation
)
60 : RemoteBuffer(RemoteBuffer::create(task
, remote_address
, remote_size
, shared
, allow_truncation
)) {};
62 std::pair
<mach_vm_address_t
, kern_return_t
>
63 RemoteBuffer::map(task_t task
, mach_vm_address_t remote_address
, vm_size_t size
, bool shared
) {
64 vm_prot_t cur_protection
= VM_PROT_NONE
;
65 vm_prot_t max_protection
= VM_PROT_NONE
;
68 return std::make_pair(MACH_VM_MIN_ADDRESS
, KERN_INVALID_ARGUMENT
);
71 flags
= VM_FLAGS_ANYWHERE
| VM_FLAGS_RETURN_DATA_ADDR
;
74 // Since we are getting rid of the flag probing we have to make sure that simulator libdyld's do not use VM_FLAGS_RESILIENT_MEDIA
75 // FIXME: Remove this when simulator builds do not support back deployment to 10.14
76 #if TARGET_OS_SIMULATOR
77 flags
= VM_FLAGS_ANYWHERE
| VM_FLAGS_RETURN_DATA_ADDR
| VM_FLAGS_RESILIENT_CODESIGN
;
79 flags
= VM_FLAGS_ANYWHERE
| VM_FLAGS_RETURN_DATA_ADDR
| VM_FLAGS_RESILIENT_CODESIGN
| VM_FLAGS_RESILIENT_MEDIA
;
82 mach_vm_address_t localAddress
= 0;
83 auto kr
= mach_vm_remap(mach_task_self(),
94 // The call is not succesfull return
95 if (kr
!= KERN_SUCCESS
) {
96 return std::make_pair(MACH_VM_MIN_ADDRESS
, kr
);
98 // If it is not a shared buffer then copy it into a local buffer so our results are coherent in the event
99 // the page goes way due to storage removal, etc. We have to do this because even after we read the page the
100 // contents might go away of the object is paged out and then the backing region is disconnected (for example, if
101 // we are copying some memory in the middle of a mach-o that is on a USB drive that is disconnected after we perform
102 // the mapping). Once we copy them into a local buffer the memory will be handled by the default pager instead of
103 // potentially being backed by the mmap pager, and thus will be guaranteed not to mutate out from under us.
105 void* buffer
= malloc(size
);
106 if (buffer
== nullptr) {
107 (void)vm_deallocate(mach_task_self(), localAddress
, size
);
108 return std::make_pair(MACH_VM_MIN_ADDRESS
, kr
);
110 memcpy(buffer
, (void *)localAddress
, size
);
111 (void)vm_deallocate(mach_task_self(), localAddress
, size
);
112 return std::make_pair((vm_address_t
)buffer
, KERN_SUCCESS
);
114 // A shared buffer was requested, if the permissions are not correct deallocate the region and return failure
115 if (cur_protection
!= (VM_PROT_READ
|VM_PROT_WRITE
)) {
116 if (localAddress
!= 0) {
117 (void)vm_deallocate(mach_task_self(), (size_t)localAddress
, size
);
119 return std::make_pair(MACH_VM_MIN_ADDRESS
, KERN_PROTECTION_FAILURE
);
121 // We have a successfully created shared buffer with the correct permissions, return it
122 return std::make_pair(localAddress
, KERN_SUCCESS
);
125 std::tuple
<mach_vm_address_t
,vm_size_t
,kern_return_t
,bool> RemoteBuffer::create(task_t task
,
126 mach_vm_address_t remote_address
,
129 bool allow_truncation
) {
130 mach_vm_address_t localAddress
;
132 // Try the initial map
133 std::tie(localAddress
, kr
) = map(task
, remote_address
, size
, shared
);
134 if (kr
== KERN_SUCCESS
) return std::make_tuple(localAddress
, size
, kr
, shared
);
135 // The first attempt failed, truncate if possible and try again. We only need to try once since the largest
136 // truncatable buffer we map is less than a single page. To be more general we would need to try repeatedly in a
138 if (allow_truncation
) {
139 size
= PAGE_SIZE
- remote_address%PAGE_SIZE
;
140 std::tie(localAddress
, kr
) = map(task
, remote_address
, size
, shared
);
141 if (kr
== KERN_SUCCESS
) return std::make_tuple(localAddress
, size
, kr
, shared
);
143 // If we reach this then the mapping completely failed
144 return std::make_tuple(MACH_VM_MIN_ADDRESS
, 0, kr
, shared
);
147 RemoteBuffer::~RemoteBuffer() {
148 if (!_localAddress
) { return; }
151 (void)vm_deallocate(mach_task_self(), (vm_address_t
)_localAddress
, _size
);
153 free((void*)_localAddress
);
156 void *RemoteBuffer::getLocalAddress() const { return (void *)_localAddress
; }
157 size_t RemoteBuffer::getSize() const { return _size
; }
158 kern_return_t
RemoteBuffer::getKernelReturn() const { return _kr
; }
160 void withRemoteBuffer(task_t task
, mach_vm_address_t remote_address
, size_t remote_size
, bool shared
, bool allow_truncation
, kern_return_t
*kr
, void (^block
)(void *buffer
, size_t size
)) {
161 kern_return_t krSink
= KERN_SUCCESS
;
165 RemoteBuffer
buffer(task
, remote_address
, remote_size
, shared
, allow_truncation
);
166 *kr
= buffer
.getKernelReturn();
167 if (*kr
== KERN_SUCCESS
) {
168 block(buffer
.getLocalAddress(), buffer
.getSize());
174 // Opaque object returned by _dyld_process_info_create()
177 struct __attribute__((visibility("hidden"))) dyld_process_info_deleter
{ // deleter
178 // dyld_process_info_deleter() {};
179 // dyld_process_info_deleter(const dyld_process_info_deleter&) { }
180 // dyld_process_info_deleter(dyld_process_info_deleter&) {}
181 // dyld_process_info_deleter(dyld_process_info_deleter&&) {}
182 void operator()(dyld_process_info_base
* p
) const {
189 static dyld_process_info_deleter deleter
;
190 typedef std::unique_ptr
<dyld_process_info_base
, dyld_process_info_deleter
> dyld_process_info_ptr
;
192 struct __attribute__((visibility("hidden"))) dyld_process_info_base
{
193 template<typename T1
, typename T2
>
194 static dyld_process_info_ptr
make(task_t task
, const T1
& allImageInfo
, uint64_t timestamp
, kern_return_t
* kr
);
196 static dyld_process_info_ptr
makeSuspended(task_t task
, const T
& allImageInfo
, kern_return_t
* kr
);
198 std::atomic
<uint32_t>& retainCount() const { return _retainCount
; }
199 dyld_process_cache_info
* cacheInfo() const { return (dyld_process_cache_info
*)(((char*)this) + _cacheInfoOffset
); }
200 dyld_process_state_info
* stateInfo() const { return (dyld_process_state_info
*)(((char*)this) + _stateInfoOffset
); }
201 dyld_platform_t
platform() const { return _platform
; }
203 void forEachImage(void (^callback
)(uint64_t machHeaderAddress
, const uuid_t uuid
, const char* path
)) const;
204 void forEachSegment(uint64_t machHeaderAddress
, void (^callback
)(uint64_t segmentAddress
, uint64_t segmentSize
, const char* segmentName
)) const;
206 bool reserveSpace(size_t space
) {
207 if (_freeSpace
< space
) { return false; }
219 uint32_t newCount
= --_retainCount
;
221 if ( newCount
== 0 ) {
229 uint64_t loadAddress
;
231 uint32_t segmentStartIndex
;
232 uint32_t segmentsCount
;
241 dyld_process_info_base(dyld_platform_t platform
, unsigned imageCount
, size_t totalSize
);
242 void* operator new (size_t, void* buf
) { return buf
; }
244 static bool inCache(uint64_t addr
) { return (addr
> SHARED_REGION_BASE
) && (addr
< SHARED_REGION_BASE
+SHARED_REGION_SIZE
); }
245 bool addImage(task_t task
, bool sameCacheAsThisProcess
, uint64_t imageAddress
, uint64_t imagePath
, const char* imagePathLocal
);
247 kern_return_t
addDyldImage(task_t task
, uint64_t dyldAddress
, uint64_t dyldPathAddress
, const char* localPath
);
249 bool invalid() { return ((char*)_stringRevBumpPtr
< (char*)_curSegment
); }
250 const char* copyPath(task_t task
, uint64_t pathAddr
);
251 const char* addString(const char*, size_t);
252 const char* copySegmentName(const char*);
254 void addInfoFromLoadCommands(const mach_header
* mh
, uint64_t addressInTask
, size_t size
);
255 kern_return_t
addInfoFromRemoteLoadCommands(task_t task
, uint64_t remoteMH
);
257 void inspectLocalImageLoadCommands(uint64_t imageAddress
, void* func
);
258 kern_return_t
inspectRemoteImageLoadCommands(task_t task
, uint64_t imageAddress
, void* func
);
260 mutable std::atomic
<uint32_t> _retainCount
;
261 const uint32_t _cacheInfoOffset
;
262 const uint32_t _stateInfoOffset
;
263 const uint32_t _imageInfosOffset
;
264 const uint32_t _segmentInfosOffset
;
266 dyld_platform_t _platform
;
267 ImageInfo
* const _firstImage
;
268 ImageInfo
* _curImage
;
269 SegmentInfo
* const _firstSegment
;
270 SegmentInfo
* _curSegment
;
271 uint32_t _curSegmentIndex
;
272 char* _stringRevBumpPtr
;
274 // dyld_process_cache_info cacheInfo;
275 // dyld_process_state_info stateInfo;
276 // ImageInfo images[];
277 // SegmentInfo segments[];
281 dyld_process_info_base::dyld_process_info_base(dyld_platform_t platform
, unsigned imageCount
, size_t totalSize
)
282 : _retainCount(1), _cacheInfoOffset(sizeof(dyld_process_info_base
)),
283 _stateInfoOffset(sizeof(dyld_process_info_base
) + sizeof(dyld_process_cache_info
)),
284 _imageInfosOffset(sizeof(dyld_process_info_base
) + sizeof(dyld_process_cache_info
) + sizeof(dyld_process_state_info
)),
285 _segmentInfosOffset(sizeof(dyld_process_info_base
) + sizeof(dyld_process_cache_info
) + sizeof(dyld_process_state_info
) + imageCount
*sizeof(ImageInfo
)),
286 _freeSpace(totalSize
), _platform(platform
),
287 _firstImage((ImageInfo
*)(((uint8_t*)this) + _imageInfosOffset
)),
288 _curImage((ImageInfo
*)(((uint8_t*)this) + _imageInfosOffset
)),
289 _firstSegment((SegmentInfo
*)(((uint8_t*)this) + _segmentInfosOffset
)),
290 _curSegment((SegmentInfo
*)(((uint8_t*)this) + _segmentInfosOffset
)),
292 _stringRevBumpPtr((char*)(this)+totalSize
)
296 template<typename T1
, typename T2
>
297 dyld_process_info_ptr
dyld_process_info_base::make(task_t task
, const T1
& allImageInfo
, uint64_t timestamp
, kern_return_t
* kr
)
299 __block dyld_process_info_ptr result
= nullptr;
301 // bail out of dyld is too old
302 if ( allImageInfo
.version
< 15 ) {
307 // Check if the process is suspended
308 if (allImageInfo
.infoArrayChangeTimestamp
== 0) {
309 result
= dyld_process_info_base::makeSuspended
<T1
>(task
, allImageInfo
, kr
);
310 // If we have a result return it, otherwise rescan
312 // If it returned the process is suspended and there is nothing more to do
313 return std::move(result
);
315 // Check to see if the process change timestamp is greater than 0, if not then sleep to let the process
316 // finish initializing
317 if (allImageInfo
.infoArrayChangeTimestamp
== 0) {
318 usleep(1000 * 50); // 50ms
323 // Test to see if there are no changes and we can exit early
324 if (timestamp
!= 0 && timestamp
== allImageInfo
.infoArrayChangeTimestamp
) {
329 for (uint32_t j
=0; j
< 10; ++j
) {
330 uint64_t currentTimestamp
= allImageInfo
.infoArrayChangeTimestamp
;
331 mach_vm_address_t infoArray
= allImageInfo
.infoArray
;
332 if (currentTimestamp
== 0) continue;
333 if (infoArray
== 0) {
334 // Check if the task is suspended mid dylib load and exit early
335 mach_task_basic_info ti
;
336 mach_msg_type_number_t count
= MACH_TASK_BASIC_INFO_COUNT
;
337 if ((*kr
= task_info(task
, MACH_TASK_BASIC_INFO
, (task_info_t
)&ti
, &count
))) {
341 // The task is suspended, exit
342 if (ti
.suspend_count
!= 0) {
343 // Not exactly correct, but conveys that operation may succeed in the future
344 *kr
= KERN_RESOURCE_SHORTAGE
;
350 // For the moment we are going to truncate any image list longer than 8192 because some programs do
351 // terrible things that corrupt their own image lists and we need to stop clients from crashing
352 // reading them. We can try to do something more advanced in the future. rdar://27446361
353 uint32_t imageCount
= allImageInfo
.infoArrayCount
;
354 imageCount
= MIN(imageCount
, 8192);
355 size_t imageArraySize
= imageCount
* sizeof(T2
);
357 withRemoteBuffer(task
, infoArray
, imageArraySize
, false, false, kr
, ^(void *buffer
, size_t size
) {
358 // figure out how many path strings will need to be copied and their size
359 T2
* imageArray
= (T2
*)buffer
;
360 const dyld_all_image_infos
* myInfo
= _dyld_get_all_image_infos();
361 bool sameCacheAsThisProcess
= !allImageInfo
.processDetachedFromSharedRegion
362 && !myInfo
->processDetachedFromSharedRegion
363 && ((memcmp(myInfo
->sharedCacheUUID
, &allImageInfo
.sharedCacheUUID
[0], 16) == 0)
364 && (myInfo
->sharedCacheSlide
== allImageInfo
.sharedCacheSlide
));
365 unsigned countOfPathsNeedingCopying
= 0;
366 if ( sameCacheAsThisProcess
) {
367 for (uint32_t i
=0; i
< imageCount
; ++i
) {
368 if ( !inCache(imageArray
[i
].imageFilePath
) )
369 ++countOfPathsNeedingCopying
;
373 countOfPathsNeedingCopying
= imageCount
+1;
375 unsigned imageCountWithDyld
= imageCount
+1;
377 // allocate result object
378 size_t allocationSize
= sizeof(dyld_process_info_base
)
379 + sizeof(dyld_process_cache_info
)
380 + sizeof(dyld_process_state_info
)
381 + sizeof(ImageInfo
)*(imageCountWithDyld
)
382 + sizeof(SegmentInfo
)*imageCountWithDyld
*5
383 + countOfPathsNeedingCopying
*PATH_MAX
;
384 void* storage
= malloc(allocationSize
);
385 if (storage
== nullptr) {
390 auto info
= dyld_process_info_ptr(new (storage
) dyld_process_info_base(allImageInfo
.platform
, imageCountWithDyld
, allocationSize
), deleter
);
391 (void)info
->reserveSpace(sizeof(dyld_process_info_base
)+sizeof(dyld_process_cache_info
)+sizeof(dyld_process_state_info
));
394 dyld_process_cache_info
* cacheInfo
= info
->cacheInfo();
395 memcpy(cacheInfo
->cacheUUID
, &allImageInfo
.sharedCacheUUID
[0], 16);
396 cacheInfo
->cacheBaseAddress
= allImageInfo
.sharedCacheBaseAddress
;
397 cacheInfo
->privateCache
= allImageInfo
.processDetachedFromSharedRegion
;
398 // if no cache is used, allImageInfo has all zeros for cache UUID
399 cacheInfo
->noCache
= true;
400 for (int i
=0; i
< 16; ++i
) {
401 if ( cacheInfo
->cacheUUID
[i
] != 0 ) {
402 cacheInfo
->noCache
= false;
406 dyld_process_state_info
* stateInfo
= info
->stateInfo();
407 stateInfo
->timestamp
= currentTimestamp
;
408 stateInfo
->imageCount
= imageCountWithDyld
;
409 stateInfo
->initialImageCount
= (uint32_t)(allImageInfo
.initialImageCount
+1);
410 stateInfo
->dyldState
= dyld_process_state_dyld_initialized
;
412 if ( allImageInfo
.libSystemInitialized
!= 0 ) {
413 stateInfo
->dyldState
= dyld_process_state_libSystem_initialized
;
414 if ( allImageInfo
.initialImageCount
!= imageCount
) {
415 stateInfo
->dyldState
= dyld_process_state_program_running
;
418 if ( allImageInfo
.errorMessage
!= 0 ) {
419 stateInfo
->dyldState
= allImageInfo
.terminationFlags
? dyld_process_state_terminated_before_inits
: dyld_process_state_dyld_terminated
;
421 // fill in info for dyld
422 if ( allImageInfo
.dyldPath
!= 0 ) {
423 if ((*kr
= info
->addDyldImage(task
, allImageInfo
.dyldImageLoadAddress
, allImageInfo
.dyldPath
, NULL
))) {
428 // fill in info for each image
429 for (uint32_t i
=0; i
< imageCount
; ++i
) {
430 if (!info
->addImage(task
, sameCacheAsThisProcess
, imageArray
[i
].imageLoadAddress
, imageArray
[i
].imageFilePath
, NULL
)) {
435 // sanity check internal data did not overflow
436 if ( info
->invalid() ) {
442 result
= std::move(info
);
448 return std::move(result
);
452 dyld_process_info_ptr
dyld_process_info_base::makeSuspended(task_t task
, const T
& allImageInfo
, kern_return_t
* kr
)
455 if ((*kr
= pid_for_task(task
, &pid
))) {
459 mach_task_basic_info ti
;
460 mach_msg_type_number_t count
= MACH_TASK_BASIC_INFO_COUNT
;
461 if ((*kr
= task_info(task
, MACH_TASK_BASIC_INFO
, (task_info_t
)&ti
, &count
))) {
465 // The task is not suspended, exit
466 if (ti
.suspend_count
== 0) {
470 __block
unsigned imageCount
= 0; // main executable and dyld
471 __block
uint64_t mainExecutableAddress
= 0;
472 __block
uint64_t dyldAddress
= 0;
473 char dyldPathBuffer
[PATH_MAX
+1];
474 char mainExecutablePathBuffer
[PATH_MAX
+1];
475 __block
char * dyldPath
= &dyldPathBuffer
[0];
476 __block
char * mainExecutablePath
= &mainExecutablePathBuffer
[0];
477 __block
dyld3::Platform platformID
= dyld3::Platform::unknown
;
479 for (mach_vm_address_t address
= 0; ; address
+= size
) {
480 vm_region_basic_info_data_64_t info
;
481 mach_port_t objectName
;
482 unsigned int infoCount
= VM_REGION_BASIC_INFO_COUNT_64
;
483 if (kern_return_t r
= mach_vm_region(task
, &address
, &size
, VM_REGION_BASIC_INFO
,
484 (vm_region_info_t
)&info
, &infoCount
, &objectName
)) {
487 if ( info
.protection
!= (VM_PROT_READ
|VM_PROT_EXECUTE
) )
489 // read start of vm region to verify it is a mach header
490 withRemoteObject(task
, address
, false, NULL
, ^(mach_header_64 mhBuffer
){
491 if ( (mhBuffer
.magic
!= MH_MAGIC
) && (mhBuffer
.magic
!= MH_MAGIC_64
) )
493 // now know the region is the start of a mach-o file
494 if ( mhBuffer
.filetype
== MH_EXECUTE
) {
495 mainExecutableAddress
= address
;
496 int len
= proc_regionfilename(pid
, mainExecutableAddress
, mainExecutablePath
, PATH_MAX
);
498 mainExecutablePath
[len
] = '\0';
502 else if ( mhBuffer
.filetype
== MH_DYLINKER
) {
503 dyldAddress
= address
;
504 int len
= proc_regionfilename(pid
, dyldAddress
, dyldPath
, PATH_MAX
);
506 dyldPath
[len
] = '\0';
511 //fprintf(stderr, "vm region: addr=0x%llX, size=0x%llX, prot=0x%X\n", (uint64_t)address, (uint64_t)size, info.protection);
513 //fprintf(stderr, "dyld: addr=0x%llX, path=%s\n", dyldAddress, dyldPathBuffer);
514 //fprintf(stderr, "app: addr=0x%llX, path=%s\n", mainExecutableAddress, mainExecutablePathBuffer);
516 // allocate result object
517 size_t allocationSize
= sizeof(dyld_process_info_base
)
518 + sizeof(dyld_process_cache_info
)
519 + sizeof(dyld_process_state_info
)
520 + sizeof(ImageInfo
)*(imageCount
)
521 + sizeof(SegmentInfo
)*imageCount
*5
522 + imageCount
*PATH_MAX
;
523 void* storage
= malloc(allocationSize
);
524 if (storage
== nullptr) {
528 auto obj
= dyld_process_info_ptr(new (storage
) dyld_process_info_base((dyld_platform_t
)platformID
, imageCount
, allocationSize
), deleter
);
529 (void)obj
->reserveSpace(sizeof(dyld_process_info_base
)+sizeof(dyld_process_cache_info
)+sizeof(dyld_process_state_info
));
531 dyld_process_cache_info
* cacheInfo
= obj
->cacheInfo();
532 bzero(cacheInfo
->cacheUUID
, 16);
533 cacheInfo
->cacheBaseAddress
= 0;
534 cacheInfo
->noCache
= true;
535 cacheInfo
->privateCache
= false;
537 dyld_process_state_info
* stateInfo
= obj
->stateInfo();
538 stateInfo
->timestamp
= 0;
539 stateInfo
->imageCount
= imageCount
;
540 stateInfo
->initialImageCount
= imageCount
;
541 stateInfo
->dyldState
= dyld_process_state_not_started
;
543 // fill in info for dyld
544 if ( dyldAddress
!= 0 ) {
545 if ((*kr
= obj
->addDyldImage(task
, dyldAddress
, 0, dyldPath
))) {
550 // fill in info for each image
551 if ( mainExecutableAddress
!= 0 ) {
552 if (!obj
->addImage(task
, false, mainExecutableAddress
, 0, mainExecutablePath
)) {
557 if (allImageInfo
.infoArrayChangeTimestamp
!= 0) {
561 count
= MACH_TASK_BASIC_INFO_COUNT
;
562 if ((*kr
= task_info(task
, MACH_TASK_BASIC_INFO
, (task_info_t
)&ti
, &count
))) {
566 // The task is not suspended, exit
567 if (ti
.suspend_count
== 0) {
576 const char* dyld_process_info_base::addString(const char* str
, size_t maxlen
)
578 size_t len
= strnlen(str
, maxlen
) + 1;
579 // If we don't have enough space return an empty string
580 if (!reserveSpace(len
)) { return ""; }
581 _stringRevBumpPtr
-= len
;
582 strlcpy(_stringRevBumpPtr
, str
, len
);
583 return _stringRevBumpPtr
;
586 const char* dyld_process_info_base::copyPath(task_t task
, uint64_t stringAddressInTask
)
588 __block
const char* retval
= "";
589 withRemoteBuffer(task
, stringAddressInTask
, PATH_MAX
, false, true, nullptr, ^(void *buffer
, size_t size
) {
590 retval
= addString(static_cast<const char *>(buffer
), size
);
595 bool dyld_process_info_base::addImage(task_t task
, bool sameCacheAsThisProcess
, uint64_t imageAddress
, uint64_t imagePath
, const char* imagePathLocal
)
597 if (!reserveSpace(sizeof(ImageInfo
))) { return false; }
598 _curImage
->loadAddress
= imageAddress
;
599 _curImage
->segmentStartIndex
= _curSegmentIndex
;
600 if ( imagePathLocal
!= NULL
) {
601 _curImage
->path
= addString(imagePathLocal
, PATH_MAX
);
602 } else if ( sameCacheAsThisProcess
&& inCache(imagePath
) ) {
603 _curImage
->path
= (const char*)imagePath
;
604 } else if (imagePath
) {
605 _curImage
->path
= copyPath(task
, imagePath
);
607 _curImage
->path
= "";
610 if ( sameCacheAsThisProcess
&& inCache(imageAddress
) ) {
611 addInfoFromLoadCommands((mach_header
*)imageAddress
, imageAddress
, 32*1024);
612 } else if (addInfoFromRemoteLoadCommands(task
, imageAddress
) != KERN_SUCCESS
) {
613 // The image is not here, return early
616 _curImage
->segmentsCount
= _curSegmentIndex
- _curImage
->segmentStartIndex
;
622 kern_return_t
dyld_process_info_base::addInfoFromRemoteLoadCommands(task_t task
, uint64_t remoteMH
) {
623 __block kern_return_t kr
= KERN_SUCCESS
;
624 __block
size_t headerPagesSize
= 0;
625 __block
bool done
= false;
627 //Since the minimum we can reasonably map is a page, map that.
628 withRemoteBuffer(task
, remoteMH
, PAGE_SIZE
, false, false, &kr
, ^(void * buffer
, size_t size
) {
629 const mach_header
* mh
= (const mach_header
*)buffer
;
630 headerPagesSize
= sizeof(mach_header
) + mh
->sizeofcmds
;
631 if (headerPagesSize
<= PAGE_SIZE
) {
632 addInfoFromLoadCommands(mh
, remoteMH
, size
);
637 //The load commands did not fit in the first page, but now we know the size, so remap and try again
639 if (kr
!= KERN_SUCCESS
) {
642 withRemoteBuffer(task
, remoteMH
, headerPagesSize
, false, false, &kr
, ^(void * buffer
, size_t size
) {
643 addInfoFromLoadCommands((mach_header
*)buffer
, remoteMH
, size
);
650 kern_return_t
dyld_process_info_base::addDyldImage(task_t task
, uint64_t dyldAddress
, uint64_t dyldPathAddress
, const char* localPath
)
652 if (!reserveSpace(sizeof(ImageInfo
))) {
653 // If we don't have ebnough spacee the data will be truncated, but well formed. Return success so
654 // symbolicators can try and use it
657 __block kern_return_t kr
= KERN_SUCCESS
;
658 _curImage
->loadAddress
= dyldAddress
;
659 _curImage
->segmentStartIndex
= _curSegmentIndex
;
660 if ( localPath
!= NULL
) {
661 _curImage
->path
= addString(localPath
, PATH_MAX
);
664 _curImage
->path
= copyPath(task
, dyldPathAddress
);
665 if ( kr
!= KERN_SUCCESS
)
669 kr
= addInfoFromRemoteLoadCommands(task
, dyldAddress
);
670 if ( kr
!= KERN_SUCCESS
)
673 _curImage
->segmentsCount
= _curSegmentIndex
- _curImage
->segmentStartIndex
;
679 void dyld_process_info_base::addInfoFromLoadCommands(const mach_header
* mh
, uint64_t addressInTask
, size_t size
)
681 const load_command
* startCmds
= NULL
;
682 if ( mh
->magic
== MH_MAGIC_64
)
683 startCmds
= (load_command
*)((char *)mh
+ sizeof(mach_header_64
));
684 else if ( mh
->magic
== MH_MAGIC
)
685 startCmds
= (load_command
*)((char *)mh
+ sizeof(mach_header
));
687 return; // not a mach-o file, or wrong endianness
689 const load_command
* const cmdsEnd
= (load_command
*)((char*)startCmds
+ mh
->sizeofcmds
);
690 const load_command
* cmd
= startCmds
;
691 for(uint32_t i
= 0; i
< mh
->ncmds
; ++i
) {
692 const load_command
* nextCmd
= (load_command
*)((char *)cmd
+ cmd
->cmdsize
);
693 if ( (cmd
->cmdsize
< 8) || (nextCmd
> cmdsEnd
) || (nextCmd
< startCmds
) ) {
694 return; // malformed load command
696 if ( cmd
->cmd
== LC_UUID
) {
697 const uuid_command
* uuidCmd
= (uuid_command
*)cmd
;
698 memcpy(_curImage
->uuid
, uuidCmd
->uuid
, 16);
700 else if ( cmd
->cmd
== LC_SEGMENT
) {
701 if (!reserveSpace(sizeof(SegmentInfo
))) { break; }
702 const segment_command
* segCmd
= (segment_command
*)cmd
;
703 _curSegment
->name
= copySegmentName(segCmd
->segname
);
704 _curSegment
->addr
= segCmd
->vmaddr
;
705 _curSegment
->size
= segCmd
->vmsize
;
709 else if ( cmd
->cmd
== LC_SEGMENT_64
) {
710 if (!reserveSpace(sizeof(SegmentInfo
))) { break; }
711 const segment_command_64
* segCmd
= (segment_command_64
*)cmd
;
712 _curSegment
->name
= copySegmentName(segCmd
->segname
);
713 _curSegment
->addr
= segCmd
->vmaddr
;
714 _curSegment
->size
= segCmd
->vmsize
;
722 const char* dyld_process_info_base::copySegmentName(const char* name
)
724 // don't copy names of standard segments into string pool
725 static const char* stdSegNames
[] = {"__TEXT", "__DATA", "__LINKEDIT", "__DATA_DIRTY", "__DATA_CONST", "__OBJC", NULL
};
726 for (const char** s
=stdSegNames
; *s
!= NULL
; ++s
) {
727 if ( strcmp(name
, *s
) == 0 )
730 // copy custom segment names into string pool
731 return addString(name
, 16);
734 void dyld_process_info_base::forEachImage(void (^callback
)(uint64_t machHeaderAddress
, const uuid_t uuid
, const char* path
)) const
736 for (const ImageInfo
* p
= _firstImage
; p
< _curImage
; ++p
) {
737 callback(p
->loadAddress
, p
->uuid
, p
->path
);
741 void dyld_process_info_base::forEachSegment(uint64_t machHeaderAddress
, void (^callback
)(uint64_t segmentAddress
, uint64_t segmentSize
, const char* segmentName
)) const
743 for (const ImageInfo
* p
= _firstImage
; p
< _curImage
; ++p
) {
744 if ( p
->loadAddress
== machHeaderAddress
) {
746 for (uint32_t i
=0; i
< p
->segmentsCount
; ++i
) {
747 const SegmentInfo
* seg
= &_firstSegment
[p
->segmentStartIndex
+i
];
748 if ( strcmp(seg
->name
, "__TEXT") == 0 ) {
749 slide
= machHeaderAddress
- seg
->addr
;
753 for (uint32_t i
=0; i
< p
->segmentsCount
; ++i
) {
754 const SegmentInfo
* seg
= &_firstSegment
[p
->segmentStartIndex
+i
];
755 callback(seg
->addr
+ slide
, seg
->size
, seg
->name
);
762 dyld_process_info
_dyld_process_info_create(task_t task
, uint64_t timestamp
, kern_return_t
* kr
)
764 __block dyld_process_info result
= nullptr;
765 kern_return_t krSink
= KERN_SUCCESS
;
771 task_dyld_info_data_t task_dyld_info
;
772 mach_msg_type_number_t count
= TASK_DYLD_INFO_COUNT
;
773 if ( kern_return_t r
= task_info(task
, TASK_DYLD_INFO
, (task_info_t
)&task_dyld_info
, &count
) ) {
778 //The kernel will return MACH_VM_MIN_ADDRESS for an executable that has not had dyld loaded
779 if (task_dyld_info
.all_image_info_addr
== MACH_VM_MIN_ADDRESS
)
782 // We use a true shared memory buffer here, that way by making sure that libdyld in both processes
783 // reads and writes the the timestamp atomically we can make sure we get a coherent view of the
785 // That also means that we *MUST* directly read the memory, which is why we template the make() call
786 withRemoteBuffer(task
, task_dyld_info
.all_image_info_addr
, (size_t)task_dyld_info
.all_image_info_size
, true, false, kr
, ^(void *buffer
, size_t size
) {
787 dyld_process_info_ptr base
;
788 if (task_dyld_info
.all_image_info_format
== TASK_DYLD_ALL_IMAGE_INFO_32
) {
789 const dyld_all_image_infos_32
* info
= (const dyld_all_image_infos_32
*)buffer
;
790 base
= dyld_process_info_base::make
<dyld_all_image_infos_32
, dyld_image_info_32
>(task
, *info
, timestamp
, kr
);
792 const dyld_all_image_infos_64
* info
= (const dyld_all_image_infos_64
*)buffer
;
793 base
= dyld_process_info_base::make
<dyld_all_image_infos_64
, dyld_image_info_64
>(task
, *info
, timestamp
, kr
);
796 result
= base
.release();
802 void _dyld_process_info_get_state(dyld_process_info info
, dyld_process_state_info
* stateInfo
)
804 *stateInfo
= *info
->stateInfo();
807 void _dyld_process_info_get_cache(dyld_process_info info
, dyld_process_cache_info
* cacheInfo
)
809 *cacheInfo
= *info
->cacheInfo();
812 void _dyld_process_info_retain(dyld_process_info object
)
814 const_cast<dyld_process_info_base
*>(object
)->retain();
817 dyld_platform_t
_dyld_process_info_get_platform(dyld_process_info object
) {
818 return const_cast<dyld_process_info_base
*>(object
)->platform();
821 void _dyld_process_info_release(dyld_process_info object
)
823 const_cast<dyld_process_info_base
*>(object
)->release();
826 void _dyld_process_info_for_each_image(dyld_process_info info
, void (^callback
)(uint64_t machHeaderAddress
, const uuid_t uuid
, const char* path
))
828 info
->forEachImage(callback
);
832 void _dyld_process_info_for_each_segment(dyld_process_info info
, uint64_t machHeaderAddress
, void (^callback
)(uint64_t segmentAddress
, uint64_t segmentSize
, const char* segmentName
))
834 info
->forEachSegment(machHeaderAddress
, callback
);