1 /* -*- mode: C++; c-basic-offset: 4; tab-width: 4 -*-
3 * Copyright (c) 2009-2012 Apple Inc. All rights reserved.
5 * @APPLE_LICENSE_HEADER_START@
7 * This file contains Original Code and/or Modifications of Original Code
8 * as defined in and that are subject to the Apple Public Source License
9 * Version 2.0 (the 'License'). You may not use this file except in
10 * compliance with the License. Please obtain a copy of the License at
11 * http://www.opensource.apple.com/apsl/ and read it before using this
14 * The Original Code and all software distributed under the License are
15 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
16 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
17 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
19 * Please see the License for the specific language governing rights and
20 * limitations under the License.
22 * @APPLE_LICENSE_HEADER_END@
27 #include <Availability.h>
30 #include "dsc_iterator.h"
31 #include "dyld_cache_format.h"
33 #include "Architectures.hpp"
34 #include "MachOFileAbstraction.hpp"
35 #include "CacheFileAbstraction.hpp"
42 // convert an address in the shared region where the cache would normally be mapped, into an address where the cache is currently mapped
44 const uint8_t* mappedAddress(const uint8_t* cache
, const uint8_t* cacheEnd
, uint64_t addr
)
46 const dyldCacheHeader
<E
>* header
= (dyldCacheHeader
<E
>*)cache
;
47 const dyldCacheFileMapping
<E
>* mappings
= (dyldCacheFileMapping
<E
>*)&cache
[header
->mappingOffset()];
48 for (uint32_t i
=0; i
< header
->mappingCount(); ++i
) {
49 if ( (mappings
[i
].address() <= addr
) && (addr
< (mappings
[i
].address() + mappings
[i
].size())) ) {
50 uint32_t cacheOffset
= mappings
[i
].file_offset() + addr
- mappings
[i
].address();
51 const uint8_t* result
= &cache
[cacheOffset
];
52 if ( result
< cacheEnd
)
61 // call the callback block on each segment in this image
63 int walkSegments(const uint8_t* cache
, const uint8_t* cacheEnd
, const uint8_t* firstSeg
, const char* dylibPath
, const uint8_t* machHeader
,
64 void (^callback
)(const dyld_shared_cache_dylib_info
* dylibInfo
, const dyld_shared_cache_segment_info
* segInfo
))
66 typedef typename
A::P P
;
67 typedef typename
A::P::E E
;
68 dyld_shared_cache_dylib_info dylibInfo
;
69 dyld_shared_cache_segment_info segInfo
;
70 dylibInfo
.version
= 1;
71 dylibInfo
.isAlias
= (dylibPath
< (char*)firstSeg
); // paths for aliases are store between cache header and first segment
72 dylibInfo
.machHeader
= machHeader
;
73 dylibInfo
.path
= dylibPath
;
74 const macho_header
<P
>* mh
= (const macho_header
<P
>*)machHeader
;
75 const macho_load_command
<P
>* const cmds
= (macho_load_command
<P
>*)(machHeader
+ sizeof(macho_header
<P
>));
76 if ( (machHeader
+ mh
->sizeofcmds()) > cacheEnd
)
78 const uint32_t cmd_count
= mh
->ncmds();
79 const macho_load_command
<P
>* cmd
= cmds
;
81 dylibInfo
.uuid
= NULL
;
82 for (uint32_t i
= 0; i
< cmd_count
; ++i
) {
83 if ( cmd
->cmd() == LC_UUID
) {
84 const uuid_command
* uc
= (const uuid_command
*)cmd
;
85 dylibInfo
.uuid
= &uc
->uuid
;
88 cmd
= (const macho_load_command
<P
>*)(((uint8_t*)cmd
)+cmd
->cmdsize());
90 // callback for each LC_SEGMENT
92 for (uint32_t i
= 0; i
< cmd_count
; ++i
) {
93 if ( cmd
->cmd() == macho_segment_command
<P
>::CMD
) {
94 macho_segment_command
<P
>* segCmd
= (macho_segment_command
<P
>*)cmd
;
95 uint64_t fileOffset
= segCmd
->fileoff();
96 // work around until <rdar://problem/7022345> is fixed
97 if ( fileOffset
== 0 ) {
98 fileOffset
= (machHeader
- cache
);
100 uint64_t sizem
= segCmd
->vmsize();
101 if ( strcmp(segCmd
->segname(), "__LINKEDIT") == 0 ) {
102 // clip LINKEDIT size if bigger than cache file
103 if ( (fileOffset
+sizem
) > (uint64_t)(cacheEnd
-cache
) )
104 sizem
= (cacheEnd
-cache
)-fileOffset
;
107 segInfo
.name
= segCmd
->segname();
108 segInfo
.fileOffset
= fileOffset
;
109 segInfo
.fileSize
= sizem
;
110 segInfo
.address
= segCmd
->vmaddr();
111 callback(&dylibInfo
, &segInfo
);
113 cmd
= (const macho_load_command
<P
>*)(((uint8_t*)cmd
)+cmd
->cmdsize());
119 // call walkSegments on each image in the cache
120 template <typename A
>
121 int walkImages(const uint8_t* cache
, uint32_t size
, void (^callback
)(const dyld_shared_cache_dylib_info
* dylibInfo
, const dyld_shared_cache_segment_info
* segInfo
))
123 // Sanity check there is at least a header
124 if ( (size
> 0) && (size
< 0x7000) )
126 typedef typename
A::P::E E
;
127 typedef typename
A::P P
;
128 const dyldCacheHeader
<E
>* header
= (dyldCacheHeader
<E
>*)cache
;
129 const dyldCacheImageInfo
<E
>* dylibs
= (dyldCacheImageInfo
<E
>*)&cache
[header
->imagesOffset()];
130 const dyldCacheFileMapping
<E
>* mappings
= (dyldCacheFileMapping
<E
>*)&cache
[header
->mappingOffset()];
131 uint64_t greatestMappingOffset
= 0;
132 for (uint32_t i
=0; i
< header
->mappingCount(); ++i
) {
133 if ( (size
!= 0) && (mappings
[i
].file_offset() > size
) )
135 uint64_t endOffset
= mappings
[i
].file_offset()+mappings
[i
].size();
136 if ( (size
!= 0) && (endOffset
> size
) )
138 if ( endOffset
> greatestMappingOffset
)
139 greatestMappingOffset
= endOffset
;
141 const uint8_t* cacheEnd
= &cache
[size
];
143 // Zero size means old API is being used, assume all mapped
144 cacheEnd
= &cache
[greatestMappingOffset
];
147 // verifiy mappings are not bigger than size
148 if ( size
< greatestMappingOffset
)
151 // verify all image infos are mapped
152 if ( (const uint8_t*)&dylibs
[header
->imagesCount()] > cacheEnd
)
154 const uint8_t* firstSeg
= NULL
;
155 for (uint32_t i
=0; i
< header
->imagesCount(); ++i
) {
156 const char* dylibPath
= (char*)cache
+ dylibs
[i
].pathFileOffset();
157 if ( (const uint8_t*)dylibPath
> cacheEnd
)
159 const uint8_t* machHeader
= mappedAddress
<E
>(cache
, cacheEnd
, dylibs
[i
].address());
160 if ( machHeader
== NULL
)
162 if ( machHeader
> cacheEnd
)
164 if ( firstSeg
== NULL
)
165 firstSeg
= machHeader
;
166 int result
= walkSegments
<A
>(cache
, cacheEnd
, firstSeg
, dylibPath
, machHeader
, callback
);
176 // Given a pointer to an in-memory copy of a dyld shared cache file,
177 // this routine will call the callback block once for each segment
178 // in each dylib in the shared cache file.
179 // Returns -1 if there was an error, otherwise 0.
180 extern int dyld_shared_cache_iterate(const void* shared_cache_file
, uint32_t shared_cache_size
,
181 void (^callback
)(const dyld_shared_cache_dylib_info
* dylibInfo
, const dyld_shared_cache_segment_info
* segInfo
)) {
182 const uint8_t* cache
= (uint8_t*)shared_cache_file
;
183 if ( strcmp((char*)cache
, "dyld_v1 i386") == 0 )
184 return dyld::walkImages
<x86
>(cache
, shared_cache_size
, callback
);
185 else if ( strcmp((char*)cache
, "dyld_v1 x86_64") == 0 )
186 return dyld::walkImages
<x86_64
>(cache
, shared_cache_size
, callback
);
187 else if ( strcmp((char*)cache
, "dyld_v1 armv5") == 0 )
188 return dyld::walkImages
<arm
>(cache
, shared_cache_size
, callback
);
189 else if ( strcmp((char*)cache
, "dyld_v1 armv6") == 0 )
190 return dyld::walkImages
<arm
>(cache
, shared_cache_size
, callback
);
191 else if ( strcmp((char*)cache
, "dyld_v1 armv7") == 0 )
192 return dyld::walkImages
<arm
>(cache
, shared_cache_size
, callback
);
193 else if ( strncmp((char*)cache
, "dyld_v1 armv7", 14) == 0 )
194 return dyld::walkImages
<arm
>(cache
, shared_cache_size
, callback
);
200 // implement old version by calling new version
201 int dyld_shared_cache_iterate_segments_with_slide(const void* shared_cache_file
, dyld_shared_cache_iterator_slide_t callback
)
203 return dyld_shared_cache_iterate(shared_cache_file
, 0, ^(const dyld_shared_cache_dylib_info
* dylibInfo
, const dyld_shared_cache_segment_info
* segInfo
) {
204 callback(dylibInfo
->path
, segInfo
->name
, segInfo
->fileOffset
, segInfo
->fileSize
, segInfo
->address
, 0);
208 // implement non-block version by calling block version
209 int dyld_shared_cache_iterate_segments_with_slide_nb(const void* shared_cache_file
, dyld_shared_cache_iterator_slide_nb_t func
, void* userData
)
211 return dyld_shared_cache_iterate_segments_with_slide(shared_cache_file
, ^(const char* dylibName
, const char* segName
,
212 uint64_t offset
, uint64_t size
, uint64_t mappedddress
, uint64_t slide
) {
213 (*func
)(dylibName
, segName
, offset
, size
, mappedddress
, slide
, userData
);
218 // implement non-slide version by wrapping slide version in block
219 int dyld_shared_cache_iterate_segments(const void* shared_cache_file
, dyld_shared_cache_iterator_t callback
)
221 dyld_shared_cache_iterator_slide_t wrapper_cb
= ^(const char* dylibName
, const char* segName
, uint64_t offset
,
222 uint64_t size
, uint64_t mappedddress
, uint64_t slide
) {
223 callback(dylibName
, segName
, offset
, size
, mappedddress
);
225 return dyld_shared_cache_iterate_segments_with_slide(shared_cache_file
, wrapper_cb
);
228 // implement non-slide,non-block version by wrapping slide version in block
229 int dyld_shared_cache_iterate_segments_nb(const void* shared_cache_file
, dyld_shared_cache_iterator_nb_t func
, void* userData
)
231 return dyld_shared_cache_iterate_segments_with_slide(shared_cache_file
, ^(const char* dylibName
, const char* segName
,
232 uint64_t offset
, uint64_t size
, uint64_t mappedddress
, uint64_t slide
) {
233 (*func
)(dylibName
, segName
, offset
, size
, mappedddress
, userData
);