]>
Commit | Line | Data |
---|---|---|
cb323159 A |
1 | /* |
2 | * Copyright (c) 2019 Apple Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * This file contains Original Code and/or Modifications of Original Code | |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
14 | * | |
15 | * Please obtain a copy of the License at | |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
25 | * | |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ | |
27 | */ | |
28 | ||
29 | #include <mach/mach_types.h> | |
30 | #include <kern/kern_types.h> | |
31 | #include <mach/notify.h> | |
32 | #include <mach/resource_monitors.h> | |
33 | ||
34 | #include <mach/host_special_ports.h> | |
35 | #include <mach/mach_host_server.h> | |
36 | #include <mach/host_priv_server.h> | |
37 | #include <mach/fairplayd_notification.h> | |
38 | #include <mach/arcade_upcall.h> | |
39 | ||
40 | #include <kern/kern_types.h> | |
41 | #include <kern/assert.h> | |
42 | #include <kern/kalloc.h> | |
43 | #include <kern/host.h> | |
44 | #include <kern/ast.h> | |
45 | #include <kern/task.h> | |
46 | ||
47 | #include <kern/arcade.h> | |
48 | #include <mach/arcade_register_server.h> | |
49 | ||
50 | #include <IOKit/IOBSD.h> | |
51 | ||
52 | #if !defined(MAXPATHLEN) | |
53 | #define MAXPATHLEN 4096 | |
54 | #endif | |
55 | ||
56 | extern struct proc *current_proc(void); | |
57 | extern int proc_pidpathinfo_internal(struct proc *p, uint64_t arg, | |
58 | char *buffer, uint32_t buffersize, | |
59 | int32_t *retval); | |
60 | extern off_t proc_getexecutableoffset(struct proc *p); | |
61 | ||
62 | /* | |
63 | * Simple structure to represent a handle for the Arcade registration. | |
64 | * | |
65 | * This registration is done with an independent kobject callback, rather | |
66 | * than a reply, so that we execute it in the context of the user-space | |
67 | * server replying (in order to do an entitlement check on the reply). | |
68 | * | |
69 | * We cache the resulting upcall port until it fails, and then we go | |
70 | * get another one. | |
71 | */ | |
72 | struct arcade_register { | |
73 | ipc_port_t ar_port; | |
74 | }; | |
75 | typedef struct arcade_register *arcade_register_t; | |
76 | ||
77 | static struct arcade_register arcade_register_global; | |
78 | ||
79 | void | |
80 | arcade_prepare(task_t task, thread_t thread) | |
81 | { | |
82 | /* Platform binaries are exempt */ | |
83 | if (task->t_flags & TF_PLATFORM) { | |
84 | return; | |
85 | } | |
86 | ||
87 | /* Check to see if the task has the arcade entitlement */ | |
88 | if (!IOTaskHasEntitlement(task, "com.apple.developer.arcade-operations")) { | |
89 | return; | |
90 | } | |
91 | ||
92 | /* Others will stop in the AST to make an upcall */ | |
93 | thread_ast_set(thread, AST_ARCADE); | |
94 | } | |
95 | ||
96 | static lck_grp_attr_t *arcade_upcall_lck_grp_attr; | |
97 | static lck_grp_t *arcade_upcall_lck_grp; | |
98 | static lck_mtx_t arcade_upcall_mutex; | |
99 | ||
100 | static ipc_port_t arcade_upcall_port = IP_NULL; | |
101 | static boolean_t arcade_upcall_refresh_in_progress = FALSE; | |
102 | static boolean_t arcade_upcall_refresh_waiters = FALSE; | |
103 | ||
104 | void | |
105 | arcade_init(void) | |
106 | { | |
107 | ipc_port_t port; | |
108 | ||
109 | arcade_upcall_lck_grp_attr = lck_grp_attr_alloc_init(); | |
110 | arcade_upcall_lck_grp = lck_grp_alloc_init("arcade_upcall", arcade_upcall_lck_grp_attr); | |
111 | lck_mtx_init(&arcade_upcall_mutex, arcade_upcall_lck_grp, NULL); | |
112 | ||
113 | /* Initialize the global arcade_register kobject and associated port */ | |
114 | port = ipc_kobject_alloc_port((ipc_kobject_t)&arcade_register_global, | |
115 | IKOT_ARCADE_REG, IPC_KOBJECT_ALLOC_MAKE_SEND); | |
116 | arcade_register_global.ar_port = port; | |
117 | } | |
118 | ||
119 | arcade_register_t | |
120 | convert_port_to_arcade_register( | |
121 | ipc_port_t port) | |
122 | { | |
123 | arcade_register_t arcade_reg = ARCADE_REG_NULL; | |
124 | ||
125 | if (IP_VALID(port)) { | |
126 | /* No need to lock port because of how refs managed */ | |
127 | if (ip_kotype(port) == IKOT_ARCADE_REG) { | |
128 | assert(ip_active(port)); | |
ea3f0419 | 129 | arcade_reg = (arcade_register_t)ip_get_kobject(port); |
cb323159 A |
130 | assert(arcade_reg == &arcade_register_global); |
131 | assert(arcade_reg->ar_port == port); | |
132 | } | |
133 | } | |
134 | return arcade_reg; | |
135 | } | |
136 | ||
137 | ipc_port_t | |
138 | convert_arcade_register_to_port( | |
139 | arcade_register_t arcade_reg) | |
140 | { | |
141 | ipc_port_t port = IP_NULL; | |
142 | ||
143 | if (arcade_reg == &arcade_register_global) { | |
144 | port = arcade_reg->ar_port; | |
145 | } | |
146 | return port; | |
147 | } | |
148 | ||
149 | kern_return_t | |
150 | arcade_register_new_upcall( | |
151 | arcade_register_t arcade_reg, | |
152 | mach_port_t port) | |
153 | { | |
154 | if (arcade_reg == ARCADE_REG_NULL) { | |
155 | return KERN_INVALID_ARGUMENT; | |
156 | } | |
157 | assert(arcade_reg == &arcade_register_global); | |
158 | ||
159 | /* Check to see if this is the real arcade subscription service */ | |
160 | if (!IOTaskHasEntitlement(current_task(), "com.apple.arcade.fpsd")) { | |
161 | return KERN_INVALID_VALUE; | |
162 | } | |
163 | ||
164 | lck_mtx_lock(&arcade_upcall_mutex); | |
165 | ||
166 | if (arcade_upcall_refresh_in_progress) { | |
167 | /* If we have an old arcade upcall port, discard it */ | |
168 | if (IP_VALID(arcade_upcall_port)) { | |
169 | ipc_port_release_send(arcade_upcall_port); | |
170 | arcade_upcall_port = IP_NULL; | |
171 | } | |
172 | arcade_upcall_port = port; /* owns send right */ | |
173 | ||
174 | /* Wake up anyone waiting for the update */ | |
175 | lck_mtx_unlock(&arcade_upcall_mutex); | |
176 | thread_wakeup(&arcade_upcall_port); | |
177 | return KERN_SUCCESS; | |
178 | } | |
179 | ||
180 | lck_mtx_unlock(&arcade_upcall_mutex); | |
181 | return KERN_FAILURE; | |
182 | } | |
183 | ||
184 | ||
185 | static kern_return_t | |
186 | arcade_upcall_refresh(uint64_t deadline) | |
187 | { | |
188 | ipc_port_t fairplayd_port = IP_NULL; | |
189 | wait_result_t wr = THREAD_NOT_WAITING; | |
190 | kern_return_t kr; | |
191 | ||
192 | LCK_MTX_ASSERT(&arcade_upcall_mutex, LCK_MTX_ASSERT_OWNED); | |
193 | ||
194 | /* If someone else is doing the update, wait for them */ | |
195 | if (arcade_upcall_refresh_in_progress) { | |
196 | arcade_upcall_refresh_waiters = TRUE; | |
197 | wr = lck_mtx_sleep(&arcade_upcall_mutex, LCK_SLEEP_DEFAULT, | |
198 | &arcade_upcall_refresh_in_progress, THREAD_INTERRUPTIBLE); | |
199 | goto out; | |
200 | } | |
201 | ||
202 | arcade_upcall_refresh_in_progress = TRUE; | |
203 | ||
204 | /* If we have an old arcade upcall port, discard it */ | |
205 | if (IP_VALID(arcade_upcall_port)) { | |
206 | ipc_port_release_send(arcade_upcall_port); | |
207 | arcade_upcall_port = IP_NULL; | |
208 | } | |
209 | ||
210 | #if 0 | |
211 | if (host_get_fairplayd_port(host_priv_self(), &fairplayd_port) != KERN_SUCCESS) { | |
212 | panic("arcade_upcall_refresh(get fairplayd)"); | |
213 | } | |
214 | #else | |
215 | /* Temporary hack because launchd is rejecting the other special port number */ | |
216 | if (host_get_unfreed_port(host_priv_self(), &fairplayd_port) != KERN_SUCCESS) { | |
217 | panic("arcade_upcall_refresh(get fairplayd)"); | |
218 | } | |
219 | #endif | |
220 | ||
221 | /* If no valid fairplayd port registered, we're done */ | |
222 | if (!IP_VALID(fairplayd_port)) { | |
223 | goto finish_in_progress; | |
224 | } | |
225 | ||
226 | /* | |
227 | * Send a fairplayd notification to request a new arcade upcall port. | |
228 | * Pass along a send right to the arcade_register kobject to complete | |
229 | * the registration. | |
230 | */ | |
231 | ipc_port_t port = convert_arcade_register_to_port(&arcade_register_global); | |
232 | kr = fairplayd_arcade_request(fairplayd_port, port); | |
233 | ||
234 | ipc_port_release_send(fairplayd_port); | |
235 | ||
236 | switch (kr) { | |
237 | case MACH_MSG_SUCCESS: | |
238 | break; | |
239 | default: | |
240 | goto finish_in_progress; | |
241 | } | |
242 | ||
243 | /* | |
244 | * Wait on the arcade upcall port to get registered through the | |
245 | * registration kobject waiting with a deadline here. | |
246 | */ | |
247 | wr = lck_mtx_sleep_deadline(&arcade_upcall_mutex, LCK_SLEEP_DEFAULT, | |
248 | &arcade_upcall_port, THREAD_INTERRUPTIBLE, deadline); | |
249 | ||
250 | finish_in_progress: | |
251 | arcade_upcall_refresh_in_progress = FALSE; | |
252 | ||
253 | /* Wakeup any waiters */ | |
254 | if (arcade_upcall_refresh_waiters) { | |
255 | arcade_upcall_refresh_waiters = FALSE; | |
256 | thread_wakeup_with_result(&arcade_upcall_refresh_in_progress, wr); | |
257 | } | |
258 | ||
259 | out: | |
260 | switch (wr) { | |
261 | case THREAD_AWAKENED: | |
262 | return KERN_SUCCESS; | |
263 | default: | |
264 | return KERN_FAILURE; | |
265 | } | |
266 | } | |
267 | ||
268 | static kern_return_t | |
269 | __MAKING_UPCALL_TO_ARCADE_VALIDATION_SERVICE__(mach_port_t port, | |
270 | vm_map_copy_t path, | |
271 | vm_size_t pathlen, | |
272 | off_t offset, | |
273 | boolean_t *should_killp) | |
274 | { | |
275 | mach_msg_type_number_t len = (mach_msg_type_number_t)pathlen; | |
276 | return arcade_upcall(port, (vm_offset_t)path, len, offset, should_killp); | |
277 | } | |
278 | ||
279 | void | |
280 | arcade_ast(__unused thread_t thread) | |
281 | { | |
282 | ipc_port_t port; | |
283 | uint64_t deadline; | |
284 | kern_return_t kr; | |
285 | int retval; | |
286 | ||
287 | /* Determine the deadline */ | |
288 | clock_interval_to_deadline(10, NSEC_PER_SEC, &deadline); | |
289 | ||
290 | restart: | |
291 | lck_mtx_lock(&arcade_upcall_mutex); | |
292 | port = ipc_port_copy_send(arcade_upcall_port); | |
293 | /* | |
294 | * if the arcade_upcall_port was inactive, "port" will be IP_DEAD. | |
295 | * Otherwise, it holds a send right to the arcade_upcall_port. | |
296 | */ | |
297 | ||
298 | while (!IP_VALID(port)) { | |
299 | /* | |
300 | * Refresh the arcade upcall port. If that gives up, | |
301 | * give up ourselves. | |
302 | */ | |
303 | kr = arcade_upcall_refresh(deadline); | |
304 | if (kr != KERN_SUCCESS) { | |
305 | lck_mtx_unlock(&arcade_upcall_mutex); | |
306 | goto fail; | |
307 | } | |
308 | port = ipc_port_copy_send(arcade_upcall_port); | |
309 | } | |
310 | lck_mtx_unlock(&arcade_upcall_mutex); | |
311 | ||
312 | /* We have an upcall port send right */ | |
313 | ||
314 | /* Gather the data we need to send in the upcall */ | |
315 | off_t offset; | |
316 | struct proc *p = current_proc(); | |
317 | char *path; | |
318 | vm_map_copy_t copy; | |
319 | ||
320 | kr = kmem_alloc(ipc_kernel_map, (vm_offset_t *)&path, MAXPATHLEN, VM_KERN_MEMORY_IPC); | |
321 | if (kr != KERN_SUCCESS) { | |
322 | ipc_port_release_send(port); | |
323 | return; | |
324 | } | |
325 | bzero(path, MAXPATHLEN); | |
326 | retval = proc_pidpathinfo_internal(p, 0, path, MAXPATHLEN, NULL); | |
327 | assert(!retval); | |
328 | kr = vm_map_unwire(ipc_kernel_map, | |
329 | vm_map_trunc_page((vm_offset_t)path, VM_MAP_PAGE_MASK(ipc_kernel_map)), | |
330 | vm_map_round_page((vm_offset_t)path + MAXPATHLEN, VM_MAP_PAGE_MASK(ipc_kernel_map)), | |
331 | FALSE); | |
332 | assert(kr == KERN_SUCCESS); | |
333 | kr = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)path, MAXPATHLEN, TRUE, ©); | |
334 | assert(kr == KERN_SUCCESS); | |
335 | ||
336 | offset = proc_getexecutableoffset(p); | |
337 | ||
338 | /* MAKE THE UPCALL */ | |
339 | boolean_t should_kill = TRUE; | |
340 | kr = __MAKING_UPCALL_TO_ARCADE_VALIDATION_SERVICE__(port, copy, MAXPATHLEN, offset, &should_kill); | |
341 | ipc_port_release_send(port); | |
342 | ||
343 | switch (kr) { | |
344 | case MACH_SEND_INVALID_DEST: | |
345 | vm_map_copy_discard(copy); | |
346 | /* fall thru */ | |
347 | case MIG_SERVER_DIED: | |
348 | goto restart; | |
349 | case KERN_SUCCESS: | |
350 | if (should_kill == TRUE) { | |
351 | /* | |
352 | * Invalid subscription. UI already presented as to why it did not | |
353 | * launch. | |
354 | */ | |
355 | task_terminate_internal(current_task()); | |
356 | } | |
357 | break; | |
358 | default: | |
359 | fail: | |
360 | /* | |
361 | * Failure of the subscription validation mechanism, not a rejection. | |
362 | * for a missing subscription. There will be no indication WHY this | |
363 | * process didn't launch. We might want this to be an exit_with_reason() | |
364 | * in the future. | |
365 | */ | |
366 | task_terminate_internal(current_task()); | |
367 | break; | |
368 | } | |
369 | } |