1 /* -*- mode: C++; c-basic-offset: 4; tab-width: 4 -*-
3 * Copyright (c) 2009-2010 Apple Inc. All rights reserved.
5 * @APPLE_LICENSE_HEADER_START@
7 * This file contains Original Code and/or Modifications of Original Code
8 * as defined in and that are subject to the Apple Public Source License
9 * Version 2.0 (the 'License'). You may not use this file except in
10 * compliance with the License. Please obtain a copy of the License at
11 * http://www.opensource.apple.com/apsl/ and read it before using this
14 * The Original Code and all software distributed under the License are
15 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
16 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
17 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
19 * Please see the License for the specific language governing rights and
20 * limitations under the License.
22 * @APPLE_LICENSE_HEADER_END@
27 #include <Availability.h>
30 #include "dsc_iterator.h"
31 #include "dyld_cache_format.h"
33 #include "Architectures.hpp"
34 #include "MachOFileAbstraction.hpp"
35 #include "CacheFileAbstraction.hpp"
42 // convert an address in the shared region where the cache would normally be mapped, into an address where the cache is currently mapped
44 const uint8_t* mappedAddress(const uint8_t* cache
, uint64_t addr
)
46 const dyldCacheHeader
<E
>* header
= (dyldCacheHeader
<E
>*)cache
;
47 const dyldCacheFileMapping
<E
>* mappings
= (dyldCacheFileMapping
<E
>*)&cache
[header
->mappingOffset()];
48 for (uint32_t i
=0; i
< header
->mappingCount(); ++i
) {
49 if ( (mappings
[i
].address() <= addr
) && (addr
< (mappings
[i
].address() + mappings
[i
].size())) ) {
50 return &cache
[mappings
[i
].file_offset() + addr
- mappings
[i
].address()];
56 // call the callback block on each segment in this image
58 void walkSegments(const uint8_t* cache
, const char* dylibPath
, const uint8_t* machHeader
, uint64_t slide
, dyld_shared_cache_iterator_slide_t callback
)
60 typedef typename
A::P P
;
61 typedef typename
A::P::E E
;
62 const macho_header
<P
>* mh
= (const macho_header
<P
>*)machHeader
;
63 const macho_load_command
<P
>* const cmds
= (macho_load_command
<P
>*)(machHeader
+ sizeof(macho_header
<P
>));
64 const uint32_t cmd_count
= mh
->ncmds();
65 const macho_load_command
<P
>* cmd
= cmds
;
66 for (uint32_t i
= 0; i
< cmd_count
; ++i
) {
67 if ( cmd
->cmd() == macho_segment_command
<P
>::CMD
) {
68 macho_segment_command
<P
>* segCmd
= (macho_segment_command
<P
>*)cmd
;
69 uint64_t fileOffset
= segCmd
->fileoff();
70 // work around until <rdar://problem/7022345> is fixed
71 if ( fileOffset
== 0 ) {
72 fileOffset
= (machHeader
- cache
);
74 uint64_t sizem
= segCmd
->vmsize();
75 if ( strcmp(segCmd
->segname(), "__LINKEDIT") == 0 ) {
76 // clip LINKEDIT size if bigger than cache file
77 const dyldCacheHeader
<E
>* header
= (dyldCacheHeader
<E
>*)cache
;
78 const dyldCacheFileMapping
<E
>* mappings
= (dyldCacheFileMapping
<E
>*)&cache
[header
->mappingOffset()];
79 if ( mappings
[2].file_offset() <= fileOffset
) {
80 if ( sizem
> mappings
[2].size() )
81 sizem
= mappings
[2].file_offset() + mappings
[2].size() - fileOffset
;
84 callback(dylibPath
, segCmd
->segname(), fileOffset
, sizem
, segCmd
->vmaddr()+slide
, slide
);
86 cmd
= (const macho_load_command
<P
>*)(((uint8_t*)cmd
)+cmd
->cmdsize());
91 // call walkSegments on each image in the cache
93 int walkImages(const uint8_t* cache
, dyld_shared_cache_iterator_slide_t callback
)
95 typedef typename
A::P::E E
;
96 typedef typename
A::P P
;
97 typedef typename
A::P::uint_t pint_t
;
98 const dyldCacheHeader
<E
>* header
= (dyldCacheHeader
<E
>*)cache
;
100 if ( header
->mappingOffset() >= 0x48 ) {
101 const dyldCacheFileMapping
<E
>* mappings
= (dyldCacheFileMapping
<E
>*)&cache
[header
->mappingOffset()];
102 uint64_t storedPointerToHeader
= P::getP(*((pint_t
*)&cache
[mappings
[1].file_offset()]));
103 slide
= storedPointerToHeader
- mappings
[0].address();
105 const dyldCacheImageInfo
<E
>* dylibs
= (dyldCacheImageInfo
<E
>*)&cache
[header
->imagesOffset()];
106 for (uint32_t i
=0; i
< header
->imagesCount(); ++i
) {
107 const char* dylibPath
= (char*)cache
+ dylibs
[i
].pathFileOffset();
108 const uint8_t* machHeader
= mappedAddress
<E
>(cache
, dylibs
[i
].address());
109 walkSegments
<A
>(cache
, dylibPath
, machHeader
, slide
, callback
);
117 // Given a pointer to an in-memory copy of a dyld shared cache file,
118 // this routine will call the callback block once for each segment
119 // in each dylib in the shared cache file.
120 // Returns -1 if there was an error, otherwise 0.
121 int dyld_shared_cache_iterate_segments_with_slide(const void* shared_cache_file
, dyld_shared_cache_iterator_slide_t callback
)
123 const uint8_t* cache
= (uint8_t*)shared_cache_file
;
124 if ( strcmp((char*)cache
, "dyld_v1 i386") == 0 )
125 return dyld::walkImages
<x86
>(cache
, callback
);
126 else if ( strcmp((char*)cache
, "dyld_v1 x86_64") == 0 )
127 return dyld::walkImages
<x86_64
>(cache
, callback
);
128 else if ( strcmp((char*)cache
, "dyld_v1 armv5") == 0 )
129 return dyld::walkImages
<arm
>(cache
, callback
);
130 else if ( strcmp((char*)cache
, "dyld_v1 armv6") == 0 )
131 return dyld::walkImages
<arm
>(cache
, callback
);
132 else if ( strcmp((char*)cache
, "dyld_v1 armv7") == 0 )
133 return dyld::walkImages
<arm
>(cache
, callback
);
134 else if ( strncmp((char*)cache
, "dyld_v1 armv7", 14) == 0 )
135 return dyld::walkImages
<arm
>(cache
, callback
);
140 // implement non-block version by calling block version
141 int dyld_shared_cache_iterate_segments_with_slide_nb(const void* shared_cache_file
, dyld_shared_cache_iterator_slide_nb_t func
, void* userData
)
143 return dyld_shared_cache_iterate_segments_with_slide(shared_cache_file
, ^(const char* dylibName
, const char* segName
,
144 uint64_t offset
, uint64_t size
, uint64_t mappedddress
, uint64_t slide
) {
145 (*func
)(dylibName
, segName
, offset
, size
, mappedddress
, slide
, userData
);
150 // implement non-slide version by wrapping slide version in block
151 int dyld_shared_cache_iterate_segments(const void* shared_cache_file
, dyld_shared_cache_iterator_t callback
)
153 dyld_shared_cache_iterator_slide_t wrapper_cb
= ^(const char* dylibName
, const char* segName
, uint64_t offset
,
154 uint64_t size
, uint64_t mappedddress
, uint64_t slide
) {
155 callback(dylibName
, segName
, offset
, size
, mappedddress
);
157 return dyld_shared_cache_iterate_segments_with_slide(shared_cache_file
, wrapper_cb
);
160 // implement non-slide,non-block version by wrapping slide version in block
161 int dyld_shared_cache_iterate_segments_nb(const void* shared_cache_file
, dyld_shared_cache_iterator_nb_t func
, void* userData
)
163 return dyld_shared_cache_iterate_segments_with_slide(shared_cache_file
, ^(const char* dylibName
, const char* segName
,
164 uint64_t offset
, uint64_t size
, uint64_t mappedddress
, uint64_t slide
) {
165 (*func
)(dylibName
, segName
, offset
, size
, mappedddress
, userData
);