]>
Commit | Line | Data |
---|---|---|
cb323159 | 1 | /* |
f427ee49 | 2 | * Copyright (c) 2019-2020 Apple Inc. All rights reserved. |
cb323159 A |
3 | * |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * This file contains Original Code and/or Modifications of Original Code | |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
14 | * | |
15 | * Please obtain a copy of the License at | |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
25 | * | |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ | |
27 | */ | |
28 | ||
29 | #include <mach/mach_types.h> | |
30 | #include <kern/kern_types.h> | |
31 | #include <mach/notify.h> | |
32 | #include <mach/resource_monitors.h> | |
33 | ||
34 | #include <mach/host_special_ports.h> | |
35 | #include <mach/mach_host_server.h> | |
36 | #include <mach/host_priv_server.h> | |
37 | #include <mach/fairplayd_notification.h> | |
38 | #include <mach/arcade_upcall.h> | |
39 | ||
40 | #include <kern/kern_types.h> | |
41 | #include <kern/assert.h> | |
cb323159 A |
42 | #include <kern/host.h> |
43 | #include <kern/ast.h> | |
44 | #include <kern/task.h> | |
45 | ||
46 | #include <kern/arcade.h> | |
47 | #include <mach/arcade_register_server.h> | |
48 | ||
49 | #include <IOKit/IOBSD.h> | |
50 | ||
51 | #if !defined(MAXPATHLEN) | |
52 | #define MAXPATHLEN 4096 | |
53 | #endif | |
54 | ||
55 | extern struct proc *current_proc(void); | |
56 | extern int proc_pidpathinfo_internal(struct proc *p, uint64_t arg, | |
57 | char *buffer, uint32_t buffersize, | |
58 | int32_t *retval); | |
59 | extern off_t proc_getexecutableoffset(struct proc *p); | |
60 | ||
61 | /* | |
62 | * Simple structure to represent a handle for the Arcade registration. | |
63 | * | |
64 | * This registration is done with an independent kobject callback, rather | |
65 | * than a reply, so that we execute it in the context of the user-space | |
66 | * server replying (in order to do an entitlement check on the reply). | |
67 | * | |
68 | * We cache the resulting upcall port until it fails, and then we go | |
69 | * get another one. | |
70 | */ | |
71 | struct arcade_register { | |
72 | ipc_port_t ar_port; | |
73 | }; | |
74 | typedef struct arcade_register *arcade_register_t; | |
75 | ||
76 | static struct arcade_register arcade_register_global; | |
77 | ||
78 | void | |
79 | arcade_prepare(task_t task, thread_t thread) | |
80 | { | |
81 | /* Platform binaries are exempt */ | |
82 | if (task->t_flags & TF_PLATFORM) { | |
83 | return; | |
84 | } | |
85 | ||
86 | /* Check to see if the task has the arcade entitlement */ | |
87 | if (!IOTaskHasEntitlement(task, "com.apple.developer.arcade-operations")) { | |
88 | return; | |
89 | } | |
90 | ||
91 | /* Others will stop in the AST to make an upcall */ | |
92 | thread_ast_set(thread, AST_ARCADE); | |
93 | } | |
94 | ||
f427ee49 A |
95 | static LCK_GRP_DECLARE(arcade_upcall_lck_grp, "arcade_upcall"); |
96 | static LCK_MTX_DECLARE(arcade_upcall_mutex, &arcade_upcall_lck_grp); | |
cb323159 A |
97 | |
98 | static ipc_port_t arcade_upcall_port = IP_NULL; | |
99 | static boolean_t arcade_upcall_refresh_in_progress = FALSE; | |
100 | static boolean_t arcade_upcall_refresh_waiters = FALSE; | |
101 | ||
102 | void | |
103 | arcade_init(void) | |
104 | { | |
105 | ipc_port_t port; | |
106 | ||
cb323159 A |
107 | /* Initialize the global arcade_register kobject and associated port */ |
108 | port = ipc_kobject_alloc_port((ipc_kobject_t)&arcade_register_global, | |
109 | IKOT_ARCADE_REG, IPC_KOBJECT_ALLOC_MAKE_SEND); | |
f427ee49 | 110 | os_atomic_store(&arcade_register_global.ar_port, port, release); |
cb323159 A |
111 | } |
112 | ||
113 | arcade_register_t | |
114 | convert_port_to_arcade_register( | |
115 | ipc_port_t port) | |
116 | { | |
117 | arcade_register_t arcade_reg = ARCADE_REG_NULL; | |
118 | ||
119 | if (IP_VALID(port)) { | |
120 | /* No need to lock port because of how refs managed */ | |
121 | if (ip_kotype(port) == IKOT_ARCADE_REG) { | |
122 | assert(ip_active(port)); | |
ea3f0419 | 123 | arcade_reg = (arcade_register_t)ip_get_kobject(port); |
cb323159 A |
124 | assert(arcade_reg == &arcade_register_global); |
125 | assert(arcade_reg->ar_port == port); | |
126 | } | |
127 | } | |
128 | return arcade_reg; | |
129 | } | |
130 | ||
131 | ipc_port_t | |
132 | convert_arcade_register_to_port( | |
133 | arcade_register_t arcade_reg) | |
134 | { | |
135 | ipc_port_t port = IP_NULL; | |
136 | ||
137 | if (arcade_reg == &arcade_register_global) { | |
138 | port = arcade_reg->ar_port; | |
139 | } | |
140 | return port; | |
141 | } | |
142 | ||
143 | kern_return_t | |
144 | arcade_register_new_upcall( | |
145 | arcade_register_t arcade_reg, | |
146 | mach_port_t port) | |
147 | { | |
148 | if (arcade_reg == ARCADE_REG_NULL) { | |
149 | return KERN_INVALID_ARGUMENT; | |
150 | } | |
151 | assert(arcade_reg == &arcade_register_global); | |
152 | ||
153 | /* Check to see if this is the real arcade subscription service */ | |
154 | if (!IOTaskHasEntitlement(current_task(), "com.apple.arcade.fpsd")) { | |
155 | return KERN_INVALID_VALUE; | |
156 | } | |
157 | ||
158 | lck_mtx_lock(&arcade_upcall_mutex); | |
159 | ||
160 | if (arcade_upcall_refresh_in_progress) { | |
161 | /* If we have an old arcade upcall port, discard it */ | |
162 | if (IP_VALID(arcade_upcall_port)) { | |
163 | ipc_port_release_send(arcade_upcall_port); | |
164 | arcade_upcall_port = IP_NULL; | |
165 | } | |
166 | arcade_upcall_port = port; /* owns send right */ | |
167 | ||
168 | /* Wake up anyone waiting for the update */ | |
169 | lck_mtx_unlock(&arcade_upcall_mutex); | |
170 | thread_wakeup(&arcade_upcall_port); | |
171 | return KERN_SUCCESS; | |
172 | } | |
173 | ||
174 | lck_mtx_unlock(&arcade_upcall_mutex); | |
175 | return KERN_FAILURE; | |
176 | } | |
177 | ||
178 | ||
179 | static kern_return_t | |
180 | arcade_upcall_refresh(uint64_t deadline) | |
181 | { | |
182 | ipc_port_t fairplayd_port = IP_NULL; | |
183 | wait_result_t wr = THREAD_NOT_WAITING; | |
184 | kern_return_t kr; | |
185 | ||
186 | LCK_MTX_ASSERT(&arcade_upcall_mutex, LCK_MTX_ASSERT_OWNED); | |
187 | ||
188 | /* If someone else is doing the update, wait for them */ | |
189 | if (arcade_upcall_refresh_in_progress) { | |
190 | arcade_upcall_refresh_waiters = TRUE; | |
191 | wr = lck_mtx_sleep(&arcade_upcall_mutex, LCK_SLEEP_DEFAULT, | |
192 | &arcade_upcall_refresh_in_progress, THREAD_INTERRUPTIBLE); | |
193 | goto out; | |
194 | } | |
195 | ||
196 | arcade_upcall_refresh_in_progress = TRUE; | |
197 | ||
198 | /* If we have an old arcade upcall port, discard it */ | |
199 | if (IP_VALID(arcade_upcall_port)) { | |
200 | ipc_port_release_send(arcade_upcall_port); | |
201 | arcade_upcall_port = IP_NULL; | |
202 | } | |
203 | ||
cb323159 A |
204 | if (host_get_fairplayd_port(host_priv_self(), &fairplayd_port) != KERN_SUCCESS) { |
205 | panic("arcade_upcall_refresh(get fairplayd)"); | |
206 | } | |
cb323159 A |
207 | |
208 | /* If no valid fairplayd port registered, we're done */ | |
209 | if (!IP_VALID(fairplayd_port)) { | |
210 | goto finish_in_progress; | |
211 | } | |
212 | ||
213 | /* | |
214 | * Send a fairplayd notification to request a new arcade upcall port. | |
215 | * Pass along a send right to the arcade_register kobject to complete | |
216 | * the registration. | |
217 | */ | |
218 | ipc_port_t port = convert_arcade_register_to_port(&arcade_register_global); | |
219 | kr = fairplayd_arcade_request(fairplayd_port, port); | |
220 | ||
221 | ipc_port_release_send(fairplayd_port); | |
222 | ||
223 | switch (kr) { | |
224 | case MACH_MSG_SUCCESS: | |
225 | break; | |
226 | default: | |
227 | goto finish_in_progress; | |
228 | } | |
229 | ||
230 | /* | |
231 | * Wait on the arcade upcall port to get registered through the | |
232 | * registration kobject waiting with a deadline here. | |
233 | */ | |
234 | wr = lck_mtx_sleep_deadline(&arcade_upcall_mutex, LCK_SLEEP_DEFAULT, | |
235 | &arcade_upcall_port, THREAD_INTERRUPTIBLE, deadline); | |
236 | ||
237 | finish_in_progress: | |
238 | arcade_upcall_refresh_in_progress = FALSE; | |
239 | ||
240 | /* Wakeup any waiters */ | |
241 | if (arcade_upcall_refresh_waiters) { | |
242 | arcade_upcall_refresh_waiters = FALSE; | |
243 | thread_wakeup_with_result(&arcade_upcall_refresh_in_progress, wr); | |
244 | } | |
245 | ||
246 | out: | |
247 | switch (wr) { | |
248 | case THREAD_AWAKENED: | |
249 | return KERN_SUCCESS; | |
250 | default: | |
251 | return KERN_FAILURE; | |
252 | } | |
253 | } | |
254 | ||
255 | static kern_return_t | |
256 | __MAKING_UPCALL_TO_ARCADE_VALIDATION_SERVICE__(mach_port_t port, | |
257 | vm_map_copy_t path, | |
258 | vm_size_t pathlen, | |
259 | off_t offset, | |
260 | boolean_t *should_killp) | |
261 | { | |
262 | mach_msg_type_number_t len = (mach_msg_type_number_t)pathlen; | |
263 | return arcade_upcall(port, (vm_offset_t)path, len, offset, should_killp); | |
264 | } | |
265 | ||
266 | void | |
267 | arcade_ast(__unused thread_t thread) | |
268 | { | |
269 | ipc_port_t port; | |
270 | uint64_t deadline; | |
271 | kern_return_t kr; | |
272 | int retval; | |
273 | ||
274 | /* Determine the deadline */ | |
275 | clock_interval_to_deadline(10, NSEC_PER_SEC, &deadline); | |
276 | ||
277 | restart: | |
278 | lck_mtx_lock(&arcade_upcall_mutex); | |
279 | port = ipc_port_copy_send(arcade_upcall_port); | |
280 | /* | |
281 | * if the arcade_upcall_port was inactive, "port" will be IP_DEAD. | |
282 | * Otherwise, it holds a send right to the arcade_upcall_port. | |
283 | */ | |
284 | ||
285 | while (!IP_VALID(port)) { | |
286 | /* | |
287 | * Refresh the arcade upcall port. If that gives up, | |
288 | * give up ourselves. | |
289 | */ | |
290 | kr = arcade_upcall_refresh(deadline); | |
291 | if (kr != KERN_SUCCESS) { | |
292 | lck_mtx_unlock(&arcade_upcall_mutex); | |
293 | goto fail; | |
294 | } | |
295 | port = ipc_port_copy_send(arcade_upcall_port); | |
296 | } | |
297 | lck_mtx_unlock(&arcade_upcall_mutex); | |
298 | ||
299 | /* We have an upcall port send right */ | |
300 | ||
301 | /* Gather the data we need to send in the upcall */ | |
302 | off_t offset; | |
303 | struct proc *p = current_proc(); | |
304 | char *path; | |
305 | vm_map_copy_t copy; | |
306 | ||
307 | kr = kmem_alloc(ipc_kernel_map, (vm_offset_t *)&path, MAXPATHLEN, VM_KERN_MEMORY_IPC); | |
308 | if (kr != KERN_SUCCESS) { | |
309 | ipc_port_release_send(port); | |
310 | return; | |
311 | } | |
312 | bzero(path, MAXPATHLEN); | |
313 | retval = proc_pidpathinfo_internal(p, 0, path, MAXPATHLEN, NULL); | |
314 | assert(!retval); | |
315 | kr = vm_map_unwire(ipc_kernel_map, | |
316 | vm_map_trunc_page((vm_offset_t)path, VM_MAP_PAGE_MASK(ipc_kernel_map)), | |
317 | vm_map_round_page((vm_offset_t)path + MAXPATHLEN, VM_MAP_PAGE_MASK(ipc_kernel_map)), | |
318 | FALSE); | |
319 | assert(kr == KERN_SUCCESS); | |
320 | kr = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)path, MAXPATHLEN, TRUE, ©); | |
321 | assert(kr == KERN_SUCCESS); | |
322 | ||
323 | offset = proc_getexecutableoffset(p); | |
324 | ||
325 | /* MAKE THE UPCALL */ | |
326 | boolean_t should_kill = TRUE; | |
327 | kr = __MAKING_UPCALL_TO_ARCADE_VALIDATION_SERVICE__(port, copy, MAXPATHLEN, offset, &should_kill); | |
328 | ipc_port_release_send(port); | |
329 | ||
330 | switch (kr) { | |
331 | case MACH_SEND_INVALID_DEST: | |
332 | vm_map_copy_discard(copy); | |
f427ee49 | 333 | OS_FALLTHROUGH; |
cb323159 A |
334 | case MIG_SERVER_DIED: |
335 | goto restart; | |
336 | case KERN_SUCCESS: | |
337 | if (should_kill == TRUE) { | |
338 | /* | |
339 | * Invalid subscription. UI already presented as to why it did not | |
340 | * launch. | |
341 | */ | |
342 | task_terminate_internal(current_task()); | |
343 | } | |
344 | break; | |
345 | default: | |
346 | fail: | |
347 | /* | |
348 | * Failure of the subscription validation mechanism, not a rejection. | |
349 | * for a missing subscription. There will be no indication WHY this | |
350 | * process didn't launch. We might want this to be an exit_with_reason() | |
351 | * in the future. | |
352 | */ | |
353 | task_terminate_internal(current_task()); | |
354 | break; | |
355 | } | |
356 | } |