dyld-832.7.1.tar.gz
[apple/dyld.git] / testing / kernel-cache-tests / auxkc-pageablekc-vtable-patching / test.py
1 #!/usr/bin/python2.7
2
3 import os
4 import KernelCollection
5
6
7 # The kernel has class OSObject and subclass KernelClass
8 # foo.kext sublclasses KernelClass to get Foo1, and subclasses that to get Foo2
9 # bar.kext sublclasses Foo1 to get Bar1, and subclasses that to get Bar2
10
11 # In KernelClass the vtable layout is:
12 # [ ..., foo() kernelClassUsed0() ]
13
14 # In Foo1, the layout is:
15 # [ ..., foo() kernelClass_RESERVED0(), foo1Used0(), foo1Used1() ]
16
17 # In Foo2, the layout is:
18 # [ ..., foo() kernelClass_RESERVED0(), foo1Used0(), foo1_RESERVED1(), foo1_RESERVED2(), foo1_RESERVED3() ]
19
20 # In Bar1, the layout is:
21 # [ ..., foo() kernelClass_RESERVED0(), foo1Used0(), foo1_RESERVED1(), foo1_RESERVED2(), foo1_RESERVED3() ]
22
23 # In Bar2, the layout is:
24 # [ ..., foo() kernelClass_RESERVED0(), foo1Used0(), foo1_RESERVED1(), foo1_RESERVED2(), foo1_RESERVED3() ]
25
26 # All kext's will end up getting the vtable entry after foo() patched to kernelClassUsed0()
27 # Foo2, Bar1, Bar2, will also get the vtable entry after foo1Used0() patched to foo1Used1()
28
29 def findSymbolVMAddr(kernel_cache, dylib_index, symbol_name):
30 for symbol_and_addr in kernel_cache.dictionary()["dylibs"][dylib_index]["global-symbols"]:
31 if symbol_and_addr["name"] == symbol_name:
32 return symbol_and_addr["vmAddr"]
33 return None
34
35 def findFixupVMAddr(kernel_cache, fixup_name):
36 for fixup_vmaddr, fixup_target in kernel_cache.dictionary()["fixups"].iteritems():
37 if fixup_target == fixup_name:
38 return fixup_vmaddr
39 return None
40
41 def findPagableFixupVMAddr(kernel_cache, dylib_index, fixup_name):
42 for fixup_vmaddr, fixup_target in kernel_cache.dictionary()["dylibs"][dylib_index]["fixups"].iteritems():
43 if fixup_target == fixup_name:
44 return fixup_vmaddr
45 return None
46
47 def findAuxFixupVMAddr(kernel_cache, dylib_index, fixup_name):
48 for fixup_vmaddr, fixup_target in kernel_cache.dictionary()["dylibs"][dylib_index]["fixups"].iteritems():
49 if fixup_target == fixup_name:
50 return fixup_vmaddr
51 return None
52
53 def offsetVMAddr(vmAddr, offset):
54 het_int = int(vmAddr, 16)
55 het_int = het_int + offset
56 return ''.join([ '0x', hex(het_int).upper()[2:] ])
57
58 def check(kernel_cache):
59 enableLogging = False
60 kernel_cache.buildKernelCollection("x86_64", "/auxkc-pageablekc-vtable-patching/main.kc", "/auxkc-pageablekc-vtable-patching/main.kernel", "", [], [])
61 kernel_cache.analyze("/auxkc-pageablekc-vtable-patching/main.kc", ["-layout", "-arch", "x86_64"])
62
63 assert len(kernel_cache.dictionary()["dylibs"]) == 1
64 assert kernel_cache.dictionary()["dylibs"][0]["name"] == "com.apple.kernel"
65
66 # Get the addresses for the symbols we are looking at. This will make it easier to work out the fixup slots
67 kernel_cache.analyze("/auxkc-pageablekc-vtable-patching/main.kc", ["-symbols", "-arch", "x86_64"])
68
69 # From kernel, we want to know where the vtable is, and the foo() and kernelClassUsed0() slots in that vtable
70 # KernelClass::foo()
71 kernelClassFooVMAddr = findSymbolVMAddr(kernel_cache, 0, "__ZN11KernelClass3fooEv")
72 if enableLogging:
73 print "kernelClassFooVMAddr: " + kernelClassFooVMAddr
74
75 # KernelClass::kernelClassUsed0()
76 kernelClassUsed0VMAddr = findSymbolVMAddr(kernel_cache, 0, "__ZN11KernelClass16kernelClassUsed0Ev")
77 if enableLogging:
78 print "kernelClassUsed0VMAddr: " + kernelClassUsed0VMAddr
79
80
81 # Check the fixups
82 kernel_cache.analyze("/auxkc-pageablekc-vtable-patching/main.kc", ["-fixups", "-arch", "x86_64"])
83
84 # In vtable for Foo, we match the entry for Foo::foo() by looking for its value on the RHS of the fixup
85 kernelFooFixupAddr = findFixupVMAddr(kernel_cache, "kc(0) + " + kernelClassFooVMAddr + " : pointer64")
86 if enableLogging:
87 print "kernelFooFixupAddr: " + kernelFooFixupAddr
88 # Then the following fixup should be to KernelClass::kernelClassUsed0()
89 kernelFooNextFixupAddr = offsetVMAddr(kernelFooFixupAddr, 8)
90 if enableLogging:
91 print "kernelFooNextFixupAddr: " + kernelFooNextFixupAddr
92 assert kernel_cache.dictionary()["fixups"][kernelFooNextFixupAddr] == "kc(0) + " + kernelClassUsed0VMAddr + " : pointer64"
93
94 # From this point on, the vmAddr for __ZN11KernelClass16kernelClassUsed0Ev is an offset in to kc(0)
95 # so we want to turn it from a vmAddr to vmOffset by subtracting the base address of 0x4000 which is on __HIB
96 kernelClassUsed0VMOffset = offsetVMAddr(kernelClassUsed0VMAddr, -0x4000)
97 if enableLogging:
98 print "kernelClassUsed0VMOffset: " + kernelClassUsed0VMOffset
99
100 # -----------------------------------------------------------
101 # Now build an pageable cache using the baseline kernel collection
102 kernel_cache.buildPageableKernelCollection("x86_64", "/auxkc-pageablekc-vtable-patching/pageable.kc", "/auxkc-pageablekc-vtable-patching/main.kc", "/auxkc-pageablekc-vtable-patching/extensions", ["com.apple.foo1", "com.apple.foo2"], [])
103 kernel_cache.analyze("/auxkc-pageablekc-vtable-patching/pageable.kc", ["-layout", "-arch", "x86_64"])
104
105 assert len(kernel_cache.dictionary()["dylibs"]) == 2
106 assert kernel_cache.dictionary()["dylibs"][0]["name"] == "com.apple.foo1"
107 assert kernel_cache.dictionary()["dylibs"][1]["name"] == "com.apple.foo2"
108
109
110 # Get the addresses for the symbols we are looking at. This will make it easier to work out the fixup slots
111 kernel_cache.analyze("/auxkc-pageablekc-vtable-patching/pageable.kc", ["-symbols", "-arch", "x86_64"])
112
113 # From foo1, find the vtable and its override of foo()
114 # Foo1::foo()
115 pageableFoo1FooVMAddr = findSymbolVMAddr(kernel_cache, 0, "__ZN4Foo13fooEv")
116 if enableLogging:
117 print "pageableFoo1FooVMAddr: " + pageableFoo1FooVMAddr
118
119 pageableFoo1FooUsed0VMAddr = findSymbolVMAddr(kernel_cache, 0, "__ZN4Foo19foo1Used0Ev")
120 if enableLogging:
121 print "pageableFoo1FooUsed0VMAddr: " + pageableFoo1FooUsed0VMAddr
122
123 pageableFoo1FooUsed1VMAddr = findSymbolVMAddr(kernel_cache, 0, "__ZN4Foo19foo1Used1Ev")
124 if enableLogging:
125 print "pageableFoo1FooUsed1VMAddr: " + pageableFoo1FooUsed1VMAddr
126
127 # From foo2, find the vtable and its override of foo()
128 # Foo2::foo()
129 pageableFoo2FooVMAddr = findSymbolVMAddr(kernel_cache, 1, "__ZN4Foo23fooEv")
130 if enableLogging:
131 print "pageableFoo2FooVMAddr: " + pageableFoo2FooVMAddr
132 # Also find Foo2::foo1Used0() as it overrides foo1Used0 from the superclass
133 pageableFoo2FooUsed0VMAddr = findSymbolVMAddr(kernel_cache, 1, "__ZN4Foo29foo1Used0Ev")
134 if enableLogging:
135 print "pageableFoo2FooUsed0VMAddr: " + pageableFoo2FooUsed0VMAddr
136
137
138 # Check the fixups
139 kernel_cache.analyze("/auxkc-pageablekc-vtable-patching/pageable.kc", ["-fixups", "-arch", "x86_64"])
140 kernel_cache.dictionary()["fixups"] == "none"
141
142 # --- foo1.kext ---
143 # The vtable we have is [ ..., foo(), kernelClass_RESERVED0(), foo1Used0(), foo1Used1() ]
144 # and we want [ ..., foo(), kernelClassUsed0(), foo1Used0(), foo1Used1() ]
145
146 # In vtable for Foo1, we match the entry for Foo1::foo() by looking for its value on the RHS of the fixup
147 pageableFoo1FooFixupAddr = findPagableFixupVMAddr(kernel_cache, 0, "kc(1) + " + pageableFoo1FooVMAddr)
148 if enableLogging:
149 print "pageableFoo1FooFixupAddr: " + pageableFoo1FooFixupAddr
150
151 # Then the following fixup should be to KernelClass::kernelClassUsed0()
152 pageableFoo1FooNextFixupAddr = offsetVMAddr(pageableFoo1FooFixupAddr, 8)
153 if enableLogging:
154 print "pageableFoo1FooNextFixupAddr: " + pageableFoo1FooNextFixupAddr
155 assert kernel_cache.dictionary()["dylibs"][0]["fixups"][pageableFoo1FooNextFixupAddr] == "kc(0) + " + kernelClassUsed0VMOffset
156
157 # Then we should have foo1Used0()
158 pageableFoo1FooNextFixupAddr = offsetVMAddr(pageableFoo1FooFixupAddr, 16)
159 if enableLogging:
160 print "pageableFoo1FooNextFixupAddr: " + pageableFoo1FooNextFixupAddr
161 assert kernel_cache.dictionary()["dylibs"][0]["fixups"][pageableFoo1FooNextFixupAddr] == "kc(1) + " + pageableFoo1FooUsed0VMAddr
162
163 # And then foo1Used1()
164 pageableFoo1FooNextFixupAddr = offsetVMAddr(pageableFoo1FooFixupAddr, 24)
165 if enableLogging:
166 print "pageableFoo1FooNextFixupAddr: " + pageableFoo1FooNextFixupAddr
167 assert kernel_cache.dictionary()["dylibs"][0]["fixups"][pageableFoo1FooNextFixupAddr] == "kc(1) + " + pageableFoo1FooUsed1VMAddr
168
169 # --- foo2.kext ---
170 # The vtable we have is [ ..., foo(), kernelClass_RESERVED0(), foo1Used0(), foo1_RESERVED1() ]
171 # and we want [ ..., foo(), kernelClassUsed0(), foo1Used0(), foo1Used1() ]
172
173 # In vtable for Foo2, we match the entry for Foo2::foo() by looking for its value on the RHS of the fixup
174 pageableFoo2FooFixupAddr = findPagableFixupVMAddr(kernel_cache, 1, "kc(1) + " + pageableFoo2FooVMAddr)
175 if enableLogging:
176 print "pageableFoo2FooFixupAddr: " + pageableFoo2FooFixupAddr
177
178 # Then the following fixup should be to KernelClass::kernelClassUsed0()
179 pageableFoo2FooNextFixupAddr = offsetVMAddr(pageableFoo2FooFixupAddr, 8)
180 if enableLogging:
181 print "pageableFoo2FooNextFixupAddr: " + pageableFoo2FooNextFixupAddr
182 assert kernel_cache.dictionary()["dylibs"][1]["fixups"][pageableFoo2FooNextFixupAddr] == "kc(0) + " + kernelClassUsed0VMOffset
183
184 # Then we should have foo1Used0(), but Foo2 overrides that, so it should be the Foo2 implementation, not the Foo1 implementation
185 pageableFoo2FooNextFixupAddr = offsetVMAddr(pageableFoo2FooFixupAddr, 16)
186 if enableLogging:
187 print "pageableFoo2FooNextFixupAddr: " + pageableFoo2FooNextFixupAddr
188 assert kernel_cache.dictionary()["dylibs"][1]["fixups"][pageableFoo2FooNextFixupAddr] == "kc(1) + " + pageableFoo2FooUsed0VMAddr
189
190 # And then foo1Used1()
191 pageableFoo2FooNextFixupAddr = offsetVMAddr(pageableFoo2FooFixupAddr, 24)
192 if enableLogging:
193 print "pageableFoo2FooNextFixupAddr: " + pageableFoo2FooNextFixupAddr
194 assert kernel_cache.dictionary()["dylibs"][1]["fixups"][pageableFoo2FooNextFixupAddr] == "kc(1) + " + pageableFoo1FooUsed1VMAddr
195
196
197 # -----------------------------------------------------------
198 # Now build an aux cache using the baseline kernel collection
199 kernel_cache.buildAuxKernelCollection("x86_64", "/auxkc-pageablekc-vtable-patching/aux.kc", "/auxkc-pageablekc-vtable-patching/main.kc", "/auxkc-pageablekc-vtable-patching/pageable.kc", "/auxkc-pageablekc-vtable-patching/extensions", ["com.apple.bar1", "com.apple.bar2"], [])
200 kernel_cache.analyze("/auxkc-pageablekc-vtable-patching/aux.kc", ["-layout", "-arch", "x86_64"])
201
202 assert len(kernel_cache.dictionary()["dylibs"]) == 2
203 assert kernel_cache.dictionary()["dylibs"][0]["name"] == "com.apple.bar1"
204 assert kernel_cache.dictionary()["dylibs"][1]["name"] == "com.apple.bar2"
205
206
207 # Get the addresses for the symbols we are looking at. This will make it easier to work out the fixup slots
208 kernel_cache.analyze("/auxkc-pageablekc-vtable-patching/aux.kc", ["-symbols", "-arch", "x86_64"])
209
210 # From bar1, find the vtable and its override of foo()
211 # Bar1::foo()
212 auxBar1FooVMAddr = findSymbolVMAddr(kernel_cache, 0, "__ZN4Bar13fooEv")
213 if enableLogging:
214 print "auxBar1FooVMAddr: " + auxBar1FooVMAddr
215
216 # From bar2, find the vtable and its override of foo()
217 # Bar1::foo()
218 auxBar2FooVMAddr = findSymbolVMAddr(kernel_cache, 1, "__ZN4Bar23fooEv")
219 if enableLogging:
220 print "auxBar2FooVMAddr: " + auxBar2FooVMAddr
221
222
223 # Check the fixups
224 kernel_cache.analyze("/auxkc-pageablekc-vtable-patching/aux.kc", ["-fixups", "-arch", "x86_64"])
225
226 # --- foo1.kext ---
227 # The vtable we have is [ ..., foo(), kernelClass_RESERVED0(), foo1Used0(), foo1Used1() ]
228 # and we want [ ..., foo(), kernelClassUsed0(), foo1Used0(), foo1Used1() ]
229
230 # In vtable for Bar1, we match the entry for Bar1::foo() by looking for its value on the RHS of the fixup
231 auxBar1FooFixupAddr = findAuxFixupVMAddr(kernel_cache, 0, "kc(3) + " + auxBar1FooVMAddr)
232 if enableLogging:
233 print "auxBar1FooFixupAddr: " + auxBar1FooFixupAddr
234
235 # Then the following fixup should be to KernelClass::kernelClassUsed0()
236 auxBar1FooNextFixupAddr = offsetVMAddr(auxBar1FooFixupAddr, 8)
237 if enableLogging:
238 print "auxBar1FooNextFixupAddr: " + auxBar1FooNextFixupAddr
239 assert kernel_cache.dictionary()["dylibs"][0]["fixups"][auxBar1FooNextFixupAddr] == "kc(0) + " + kernelClassUsed0VMOffset
240
241 # Then we should have foo1Used0() from Foo2 as it overrides it from Foo1
242 auxBar1FooNextFixupAddr = offsetVMAddr(auxBar1FooFixupAddr, 16)
243 if enableLogging:
244 print "auxBar1FooNextFixupAddr: " + auxBar1FooNextFixupAddr
245 assert kernel_cache.dictionary()["dylibs"][0]["fixups"][auxBar1FooNextFixupAddr] == "kc(1) + " + pageableFoo2FooUsed0VMAddr
246
247 # And then foo1Used1()
248 auxBar1FooNextFixupAddr = offsetVMAddr(auxBar1FooFixupAddr, 24)
249 if enableLogging:
250 print "auxBar1FooNextFixupAddr: " + auxBar1FooNextFixupAddr
251 assert kernel_cache.dictionary()["dylibs"][0]["fixups"][auxBar1FooNextFixupAddr] == "kc(1) + " + pageableFoo1FooUsed1VMAddr
252
253 # --- bar2.kext ---
254 # The vtable we have is [ ..., foo(), kernelClass_RESERVED0(), foo1Used0(), foo1_RESERVED1() ]
255 # and we want [ ..., foo(), kernelClassUsed0(), foo1Used0(), foo1Used1() ]
256
257 # In vtable for Foo2, we match the entry for Foo2::foo() by looking for its value on the RHS of the fixup
258 auxBar2FooFixupAddr = findAuxFixupVMAddr(kernel_cache, 1, "kc(3) + " + auxBar2FooVMAddr)
259 if enableLogging:
260 print "auxBar2FooFixupAddr: " + auxBar2FooFixupAddr
261
262 # Then the following fixup should be to KernelClass::kernelClassUsed0()
263 auxBar2FooNextFixupAddr = offsetVMAddr(auxBar2FooFixupAddr, 8)
264 if enableLogging:
265 print "auxBar2FooNextFixupAddr: " + auxBar2FooNextFixupAddr
266 assert kernel_cache.dictionary()["dylibs"][1]["fixups"][auxBar2FooNextFixupAddr] == "kc(0) + " + kernelClassUsed0VMOffset
267
268 # Then we should have foo1Used0() from Foo2 as it overrides it from Foo1
269 auxBar2FooNextFixupAddr = offsetVMAddr(auxBar2FooFixupAddr, 16)
270 if enableLogging:
271 print "auxBar2FooNextFixupAddr: " + auxBar2FooNextFixupAddr
272 assert kernel_cache.dictionary()["dylibs"][1]["fixups"][auxBar2FooNextFixupAddr] == "kc(1) + " + pageableFoo2FooUsed0VMAddr
273
274 # And then foo1Used1()
275 auxBar2FooNextFixupAddr = offsetVMAddr(auxBar2FooFixupAddr, 24)
276 if enableLogging:
277 print "auxBar2FooNextFixupAddr: " + auxBar2FooNextFixupAddr
278 assert kernel_cache.dictionary()["dylibs"][1]["fixups"][auxBar2FooNextFixupAddr] == "kc(1) + " + pageableFoo1FooUsed1VMAddr
279
280 # [~]> xcrun -sdk macosx.internal cc -arch x86_64 -Wl,-static -mkernel -nostdlib -Wl,-e,__start -Wl,-pie main.cpp kernel.cpp -Wl,-pagezero_size,0x0 -o main.kernel -Wl,-image_base,0x10000 -Wl,-segaddr,__HIB,0x4000 -Wl,-add_split_seg_info -Wl,-install_name,/usr/lib/swift/split.seg.v2.hack -iwithsysroot /System/Library/Frameworks/Kernel.framework/Headers -Wl,-sectcreate,__LINKINFO,__symbolsets,SymbolSets.plist -Wl,-segprot,__LINKINFO,r--,r-- -std=c++11 -DKERNEL_USED=1
281 # [~]> xcrun -sdk macosx.internal cc -arch x86_64 -Wl,-kext -mkernel -nostdlib -Wl,-add_split_seg_info -Wl,-no_data_const foo1.cpp -o extensions/foo1.kext/foo1 -iwithsysroot /System/Library/Frameworks/Kernel.framework/Headers -std=c++11 -DFOO1_USED0=1 -DFOO1_USED1=1
282 # [~]> xcrun -sdk macosx.internal cc -arch x86_64 -Wl,-kext -mkernel -nostdlib -Wl,-add_split_seg_info -Wl,-no_data_const foo2.cpp -o extensions/foo2.kext/foo2 -iwithsysroot /System/Library/Frameworks/Kernel.framework/Headers -std=c++11 -DFOO1_USED0=1
283 # [~]> xcrun -sdk macosx.internal cc -arch x86_64 -Wl,-kext -mkernel -nostdlib -Wl,-add_split_seg_info -Wl,-data_const bar1.cpp -o extensions/bar1.kext/bar1 -iwithsysroot /System/Library/Frameworks/Kernel.framework/Headers -std=c++11 -DFOO1_USED0=1
284 # [~]> xcrun -sdk macosx.internal cc -arch x86_64 -Wl,-kext -mkernel -nostdlib -Wl,-add_split_seg_info -Wl,-data_const bar2.cpp -o extensions/bar2.kext/bar2 -iwithsysroot /System/Library/Frameworks/Kernel.framework/Headers -std=c++11 -DFOO1_USED0=1
285 # [~]> rm -r extensions/*.kext/*.ld
286