]> git.saurik.com Git - apple/dyld.git/blob - launch-cache/dsc_iterator.cpp
dyld-210.2.3.tar.gz
[apple/dyld.git] / launch-cache / dsc_iterator.cpp
1 /* -*- mode: C++; c-basic-offset: 4; tab-width: 4 -*-
2 *
3 * Copyright (c) 2009-2010 Apple Inc. All rights reserved.
4 *
5 * @APPLE_LICENSE_HEADER_START@
6 *
7 * This file contains Original Code and/or Modifications of Original Code
8 * as defined in and that are subject to the Apple Public Source License
9 * Version 2.0 (the 'License'). You may not use this file except in
10 * compliance with the License. Please obtain a copy of the License at
11 * http://www.opensource.apple.com/apsl/ and read it before using this
12 * file.
13 *
14 * The Original Code and all software distributed under the License are
15 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
16 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
17 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
19 * Please see the License for the specific language governing rights and
20 * limitations under the License.
21 *
22 * @APPLE_LICENSE_HEADER_END@
23 */
24
25 #include <stdlib.h>
26 #include <stdio.h>
27 #include <Availability.h>
28
29
30 #include "dsc_iterator.h"
31 #include "dyld_cache_format.h"
32 #define NO_ULEB
33 #include "Architectures.hpp"
34 #include "MachOFileAbstraction.hpp"
35 #include "CacheFileAbstraction.hpp"
36
37
38
39 namespace dyld {
40
41
42 // convert an address in the shared region where the cache would normally be mapped, into an address where the cache is currently mapped
43 template <typename E>
44 const uint8_t* mappedAddress(const uint8_t* cache, uint64_t addr)
45 {
46 const dyldCacheHeader<E>* header = (dyldCacheHeader<E>*)cache;
47 const dyldCacheFileMapping<E>* mappings = (dyldCacheFileMapping<E>*)&cache[header->mappingOffset()];
48 for (uint32_t i=0; i < header->mappingCount(); ++i) {
49 if ( (mappings[i].address() <= addr) && (addr < (mappings[i].address() + mappings[i].size())) ) {
50 return &cache[mappings[i].file_offset() + addr - mappings[i].address()];
51 }
52 }
53 return NULL;
54 }
55
56 // call the callback block on each segment in this image
57 template <typename A>
58 void walkSegments(const uint8_t* cache, const char* dylibPath, const uint8_t* machHeader, uint64_t slide, dyld_shared_cache_iterator_slide_t callback)
59 {
60 typedef typename A::P P;
61 typedef typename A::P::E E;
62 const macho_header<P>* mh = (const macho_header<P>*)machHeader;
63 const macho_load_command<P>* const cmds = (macho_load_command<P>*)(machHeader + sizeof(macho_header<P>));
64 const uint32_t cmd_count = mh->ncmds();
65 const macho_load_command<P>* cmd = cmds;
66 for (uint32_t i = 0; i < cmd_count; ++i) {
67 if ( cmd->cmd() == macho_segment_command<P>::CMD ) {
68 macho_segment_command<P>* segCmd = (macho_segment_command<P>*)cmd;
69 uint64_t fileOffset = segCmd->fileoff();
70 // work around until <rdar://problem/7022345> is fixed
71 if ( fileOffset == 0 ) {
72 fileOffset = (machHeader - cache);
73 }
74 uint64_t sizem = segCmd->vmsize();
75 if ( strcmp(segCmd->segname(), "__LINKEDIT") == 0 ) {
76 // clip LINKEDIT size if bigger than cache file
77 const dyldCacheHeader<E>* header = (dyldCacheHeader<E>*)cache;
78 const dyldCacheFileMapping<E>* mappings = (dyldCacheFileMapping<E>*)&cache[header->mappingOffset()];
79 if ( mappings[2].file_offset() <= fileOffset ) {
80 if ( sizem > mappings[2].size() )
81 sizem = mappings[2].file_offset() + mappings[2].size() - fileOffset;
82 }
83 }
84 callback(dylibPath, segCmd->segname(), fileOffset, sizem, segCmd->vmaddr()+slide, slide);
85 }
86 cmd = (const macho_load_command<P>*)(((uint8_t*)cmd)+cmd->cmdsize());
87 }
88 }
89
90
91 // call walkSegments on each image in the cache
92 template <typename A>
93 int walkImages(const uint8_t* cache, dyld_shared_cache_iterator_slide_t callback)
94 {
95 typedef typename A::P::E E;
96 typedef typename A::P P;
97 typedef typename A::P::uint_t pint_t;
98 const dyldCacheHeader<E>* header = (dyldCacheHeader<E>*)cache;
99 uint64_t slide = 0;
100 if ( header->mappingOffset() >= 0x48 ) {
101 const dyldCacheFileMapping<E>* mappings = (dyldCacheFileMapping<E>*)&cache[header->mappingOffset()];
102 uint64_t storedPointerToHeader = P::getP(*((pint_t*)&cache[mappings[1].file_offset()]));
103 slide = storedPointerToHeader - mappings[0].address();
104 }
105 const dyldCacheImageInfo<E>* dylibs = (dyldCacheImageInfo<E>*)&cache[header->imagesOffset()];
106 for (uint32_t i=0; i < header->imagesCount(); ++i) {
107 const char* dylibPath = (char*)cache + dylibs[i].pathFileOffset();
108 const uint8_t* machHeader = mappedAddress<E>(cache, dylibs[i].address());
109 walkSegments<A>(cache, dylibPath, machHeader, slide, callback);
110 }
111 return 0;
112 }
113
114 }
115
116
117 // Given a pointer to an in-memory copy of a dyld shared cache file,
118 // this routine will call the callback block once for each segment
119 // in each dylib in the shared cache file.
120 // Returns -1 if there was an error, otherwise 0.
121 int dyld_shared_cache_iterate_segments_with_slide(const void* shared_cache_file, dyld_shared_cache_iterator_slide_t callback)
122 {
123 const uint8_t* cache = (uint8_t*)shared_cache_file;
124 if ( strcmp((char*)cache, "dyld_v1 i386") == 0 )
125 return dyld::walkImages<x86>(cache, callback);
126 else if ( strcmp((char*)cache, "dyld_v1 x86_64") == 0 )
127 return dyld::walkImages<x86_64>(cache, callback);
128 else if ( strcmp((char*)cache, "dyld_v1 armv5") == 0 )
129 return dyld::walkImages<arm>(cache, callback);
130 else if ( strcmp((char*)cache, "dyld_v1 armv6") == 0 )
131 return dyld::walkImages<arm>(cache, callback);
132 else if ( strcmp((char*)cache, "dyld_v1 armv7") == 0 )
133 return dyld::walkImages<arm>(cache, callback);
134 else if ( strncmp((char*)cache, "dyld_v1 armv7", 14) == 0 )
135 return dyld::walkImages<arm>(cache, callback);
136 else
137 return -1;
138 }
139
140 // implement non-block version by calling block version
141 int dyld_shared_cache_iterate_segments_with_slide_nb(const void* shared_cache_file, dyld_shared_cache_iterator_slide_nb_t func, void* userData)
142 {
143 return dyld_shared_cache_iterate_segments_with_slide(shared_cache_file, ^(const char* dylibName, const char* segName,
144 uint64_t offset, uint64_t size, uint64_t mappedddress, uint64_t slide) {
145 (*func)(dylibName, segName, offset, size, mappedddress, slide, userData);
146 });
147 }
148
149
150 // implement non-slide version by wrapping slide version in block
151 int dyld_shared_cache_iterate_segments(const void* shared_cache_file, dyld_shared_cache_iterator_t callback)
152 {
153 dyld_shared_cache_iterator_slide_t wrapper_cb = ^(const char* dylibName, const char* segName, uint64_t offset,
154 uint64_t size, uint64_t mappedddress, uint64_t slide) {
155 callback(dylibName, segName, offset, size, mappedddress);
156 };
157 return dyld_shared_cache_iterate_segments_with_slide(shared_cache_file, wrapper_cb);
158 }
159
160 // implement non-slide,non-block version by wrapping slide version in block
161 int dyld_shared_cache_iterate_segments_nb(const void* shared_cache_file, dyld_shared_cache_iterator_nb_t func, void* userData)
162 {
163 return dyld_shared_cache_iterate_segments_with_slide(shared_cache_file, ^(const char* dylibName, const char* segName,
164 uint64_t offset, uint64_t size, uint64_t mappedddress, uint64_t slide) {
165 (*func)(dylibName, segName, offset, size, mappedddress, userData);
166 });
167 }
168