]>
Commit | Line | Data |
---|---|---|
5ba3f43e A |
1 | /* |
2 | * Copyright (c) 2017 Apple Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * This file contains Original Code and/or Modifications of Original Code | |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
14 | * | |
15 | * Please obtain a copy of the License at | |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
25 | * | |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ | |
27 | */ | |
28 | ||
29 | ||
30 | #include <sys/work_interval.h> | |
31 | ||
32 | #include <kern/work_interval.h> | |
33 | ||
34 | #include <kern/thread.h> | |
35 | #include <kern/sched_prim.h> | |
36 | #include <kern/machine.h> | |
37 | #include <kern/thread_group.h> | |
38 | #include <kern/ipc_kobject.h> | |
39 | #include <kern/task.h> | |
40 | ||
41 | #include <mach/kern_return.h> | |
42 | #include <mach/notify.h> | |
43 | ||
44 | #include <stdatomic.h> | |
45 | ||
46 | /* | |
47 | * Work Interval structs | |
48 | * | |
49 | * This struct represents a thread group and/or work interval context | |
50 | * in a mechanism that is represented with a kobject. | |
51 | * | |
52 | * Every thread that has joined a WI has a +1 ref, and the port | |
53 | * has a +1 ref as well. | |
54 | * | |
55 | * TODO: groups need to have a 'is for WI' flag | |
56 | * and they need a flag to create that says 'for WI' | |
57 | * This would allow CLPC to avoid allocating WI support | |
58 | * data unless it is needed | |
59 | * | |
60 | * TODO: Enforce not having more than one non-group joinable work | |
61 | * interval per thread group. | |
62 | * CLPC only wants to see one WI-notify callout per group. | |
63 | */ | |
64 | ||
65 | struct work_interval { | |
66 | uint64_t wi_id; | |
67 | _Atomic uint32_t wi_ref_count; | |
68 | uint32_t wi_create_flags; | |
69 | ||
70 | /* for debugging purposes only, does not hold a ref on port */ | |
71 | ipc_port_t wi_port; | |
72 | ||
73 | /* | |
74 | * holds uniqueid and version of creating process, | |
75 | * used to permission-gate notify | |
76 | * TODO: you'd think there would be a better way to do this | |
77 | */ | |
78 | uint64_t wi_creator_uniqueid; | |
79 | uint32_t wi_creator_pid; | |
80 | int wi_creator_pidversion; | |
81 | ||
82 | }; | |
83 | ||
84 | static inline void | |
85 | wi_retain(struct work_interval *work_interval) | |
86 | { | |
87 | uint32_t old_count; | |
88 | old_count = atomic_fetch_add_explicit(&work_interval->wi_ref_count, | |
89 | 1, memory_order_relaxed); | |
90 | assert(old_count > 0); | |
91 | } | |
92 | ||
93 | static inline void | |
94 | wi_release(struct work_interval *work_interval) | |
95 | { | |
96 | uint32_t old_count; | |
97 | old_count = atomic_fetch_sub_explicit(&work_interval->wi_ref_count, | |
98 | 1, memory_order_relaxed); | |
99 | assert(old_count > 0); | |
100 | ||
101 | if (old_count == 1) { | |
102 | ||
103 | ||
104 | kfree(work_interval, sizeof(struct work_interval)); | |
105 | } | |
106 | } | |
107 | ||
108 | /* | |
109 | * work_interval_port_alloc | |
110 | * | |
111 | * Description: Obtain a send right for the given work interval struct. | |
112 | * | |
113 | * Parameters: work_interval - A work_interval struct | |
114 | * Consumes a +1 ref count on work_interval, now owned by the port. | |
115 | * | |
116 | * Returns: Port of type IKOT_WORK_INTERVAL with work_interval set as its kobject. | |
117 | * Returned with a +1 send right and no-senders notification armed. | |
118 | * Work interval struct reference is held by the port. | |
119 | */ | |
120 | static ipc_port_t | |
121 | work_interval_port_alloc(struct work_interval *work_interval) | |
122 | { | |
123 | ipc_port_t work_interval_port = ipc_port_alloc_kernel(); | |
124 | ||
125 | if (work_interval_port == IP_NULL) | |
126 | panic("failed to allocate work interval port"); | |
127 | ||
128 | assert(work_interval->wi_port == IP_NULL); | |
129 | ||
130 | ip_lock(work_interval_port); | |
131 | ipc_kobject_set_atomically(work_interval_port, (ipc_kobject_t)work_interval, | |
132 | IKOT_WORK_INTERVAL); | |
133 | ||
134 | ipc_port_t notify_port = ipc_port_make_sonce_locked(work_interval_port); | |
135 | ipc_port_t old_notify_port = IP_NULL; | |
136 | ipc_port_nsrequest(work_interval_port, 1, notify_port, &old_notify_port); | |
137 | /* port unlocked */ | |
138 | ||
139 | assert(old_notify_port == IP_NULL); | |
140 | ||
141 | /* This is the only make-send that will happen on this port */ | |
142 | ipc_port_t send_port = ipc_port_make_send(work_interval_port); | |
143 | assert(IP_VALID(send_port)); | |
144 | ||
145 | work_interval->wi_port = work_interval_port; | |
146 | ||
147 | return send_port; | |
148 | } | |
149 | ||
150 | /* | |
151 | * work_interval_port_convert | |
152 | * | |
153 | * Called with port locked, returns reference to work interval | |
154 | * if indeed the port is a work interval kobject port | |
155 | */ | |
156 | static struct work_interval * | |
157 | work_interval_port_convert_locked(ipc_port_t port) | |
158 | { | |
159 | struct work_interval *work_interval = NULL; | |
160 | ||
161 | if (!IP_VALID(port)) | |
162 | return NULL; | |
163 | ||
164 | if (!ip_active(port)) | |
165 | return NULL; | |
166 | ||
167 | if (IKOT_WORK_INTERVAL != ip_kotype(port)) | |
168 | return NULL; | |
169 | ||
170 | work_interval = (struct work_interval *)port->ip_kobject; | |
171 | ||
172 | wi_retain(work_interval); | |
173 | ||
174 | return work_interval; | |
175 | } | |
176 | ||
177 | /* | |
178 | * port_name_to_work_interval | |
179 | * | |
180 | * Description: Obtain a reference to the work_interval associated with a given port. | |
181 | * | |
182 | * Parameters: name A Mach port name to translate. | |
183 | * | |
184 | * Returns: NULL The given Mach port did not reference a work_interval. | |
185 | * !NULL The work_interval that is associated with the Mach port. | |
186 | */ | |
187 | static kern_return_t | |
188 | port_name_to_work_interval(mach_port_name_t name, | |
189 | struct work_interval **work_interval) | |
190 | { | |
191 | if (!MACH_PORT_VALID(name)) | |
192 | return KERN_INVALID_NAME; | |
193 | ||
194 | ipc_port_t port = IPC_PORT_NULL; | |
195 | kern_return_t kr = KERN_SUCCESS; | |
196 | ||
197 | kr = ipc_port_translate_send(current_space(), name, &port); | |
198 | if (kr != KERN_SUCCESS) | |
199 | return kr; | |
200 | /* port is locked */ | |
201 | ||
202 | assert(IP_VALID(port)); | |
203 | ||
204 | struct work_interval *converted_work_interval; | |
205 | ||
206 | converted_work_interval = work_interval_port_convert_locked(port); | |
207 | ||
208 | /* the port is valid, but doesn't denote a work_interval */ | |
209 | if (converted_work_interval == NULL) | |
210 | kr = KERN_INVALID_CAPABILITY; | |
211 | ||
212 | ip_unlock(port); | |
213 | ||
214 | if (kr == KERN_SUCCESS) | |
215 | *work_interval = converted_work_interval; | |
216 | ||
217 | return kr; | |
218 | ||
219 | } | |
220 | ||
221 | ||
222 | /* | |
223 | * work_interval_port_notify | |
224 | * | |
225 | * Description: Handle a no-senders notification for a work interval port. | |
226 | * Destroys the port and releases its reference on the work interval. | |
227 | * | |
228 | * Parameters: msg A Mach no-senders notification message. | |
229 | * | |
230 | * Note: This assumes that there is only one create-right-from-work-interval point, | |
231 | * if the ability to extract another send right after creation is added, | |
232 | * this will have to change to handle make-send counts correctly. | |
233 | */ | |
234 | void | |
235 | work_interval_port_notify(mach_msg_header_t *msg) | |
236 | { | |
237 | mach_no_senders_notification_t *notification = (void *)msg; | |
238 | ipc_port_t port = notification->not_header.msgh_remote_port; | |
239 | struct work_interval *work_interval = NULL; | |
240 | ||
241 | if (!IP_VALID(port)) | |
242 | panic("work_interval_port_notify(): invalid port"); | |
243 | ||
244 | ip_lock(port); | |
245 | ||
246 | if (!ip_active(port)) | |
247 | panic("work_interval_port_notify(): inactive port %p", port); | |
248 | ||
249 | if (ip_kotype(port) != IKOT_WORK_INTERVAL) | |
250 | panic("work_interval_port_notify(): not the right kobject: %p, %d\n", | |
251 | port, ip_kotype(port)); | |
252 | ||
253 | if (port->ip_mscount != notification->not_count) | |
254 | panic("work_interval_port_notify(): unexpected make-send count: %p, %d, %d", | |
255 | port, port->ip_mscount, notification->not_count); | |
256 | ||
257 | if (port->ip_srights != 0) | |
258 | panic("work_interval_port_notify(): unexpected send right count: %p, %d", | |
259 | port, port->ip_srights); | |
260 | ||
261 | work_interval = (struct work_interval *)port->ip_kobject; | |
262 | ||
263 | if (work_interval == NULL) | |
264 | panic("work_interval_port_notify(): missing kobject: %p", port); | |
265 | ||
266 | ipc_kobject_set_atomically(port, IKO_NULL, IKOT_NONE); | |
267 | ||
268 | work_interval->wi_port = MACH_PORT_NULL; | |
269 | ||
270 | ip_unlock(port); | |
271 | ||
272 | ipc_port_dealloc_kernel(port); | |
273 | wi_release(work_interval); | |
274 | } | |
275 | ||
276 | /* | |
277 | * Change thread's bound work interval to the passed-in work interval | |
278 | * Consumes +1 ref on work_interval | |
279 | * | |
280 | * May also pass NULL to un-set work_interval on the thread | |
281 | * | |
282 | * Will deallocate any old work interval on the thread | |
283 | */ | |
284 | static void | |
285 | thread_set_work_interval(thread_t thread, | |
286 | struct work_interval *work_interval) | |
287 | { | |
288 | assert(thread == current_thread()); | |
289 | ||
290 | struct work_interval *old_th_wi = thread->th_work_interval; | |
291 | ||
292 | /* transfer +1 ref to thread */ | |
293 | thread->th_work_interval = work_interval; | |
294 | ||
295 | ||
296 | if (old_th_wi != NULL) | |
297 | wi_release(old_th_wi); | |
298 | } | |
299 | ||
300 | void | |
301 | work_interval_thread_terminate(thread_t thread) | |
302 | { | |
303 | if (thread->th_work_interval != NULL) | |
304 | thread_set_work_interval(thread, NULL); | |
305 | } | |
306 | ||
307 | ||
308 | ||
309 | kern_return_t | |
310 | kern_work_interval_notify(thread_t thread, struct kern_work_interval_args* kwi_args) | |
311 | { | |
312 | assert(thread == current_thread()); | |
313 | assert(kwi_args->work_interval_id != 0); | |
314 | ||
315 | struct work_interval *work_interval = thread->th_work_interval; | |
316 | ||
317 | if (work_interval == NULL || | |
318 | work_interval->wi_id != kwi_args->work_interval_id) { | |
319 | /* This thread must have adopted the work interval to be able to notify */ | |
320 | return (KERN_INVALID_ARGUMENT); | |
321 | } | |
322 | ||
323 | task_t notifying_task = current_task(); | |
324 | ||
325 | if (work_interval->wi_creator_uniqueid != get_task_uniqueid(notifying_task) || | |
326 | work_interval->wi_creator_pidversion != get_task_version(notifying_task)) { | |
327 | /* Only the creating task can do a notify */ | |
328 | return (KERN_INVALID_ARGUMENT); | |
329 | } | |
330 | ||
331 | spl_t s = splsched(); | |
332 | ||
333 | ||
334 | uint64_t urgency_param1, urgency_param2; | |
335 | kwi_args->urgency = thread_get_urgency(thread, &urgency_param1, &urgency_param2); | |
336 | ||
337 | splx(s); | |
338 | ||
339 | /* called without interrupts disabled */ | |
340 | machine_work_interval_notify(thread, kwi_args); | |
341 | ||
342 | return (KERN_SUCCESS); | |
343 | } | |
344 | ||
345 | /* Start at 1, 0 is not a valid work interval ID */ | |
346 | static _Atomic uint64_t unique_work_interval_id = 1; | |
347 | ||
348 | kern_return_t | |
349 | kern_work_interval_create(thread_t thread, | |
350 | struct kern_work_interval_create_args *create_params) | |
351 | { | |
352 | assert(thread == current_thread()); | |
353 | ||
354 | if (thread->th_work_interval != NULL) { | |
355 | /* already assigned a work interval */ | |
356 | return (KERN_FAILURE); | |
357 | } | |
358 | ||
359 | struct work_interval *work_interval = kalloc(sizeof(*work_interval)); | |
360 | ||
361 | if (work_interval == NULL) | |
362 | panic("failed to allocate work_interval"); | |
363 | ||
364 | bzero(work_interval, sizeof(*work_interval)); | |
365 | ||
366 | uint64_t old_value = atomic_fetch_add_explicit(&unique_work_interval_id, 1, | |
367 | memory_order_relaxed); | |
368 | ||
369 | uint64_t work_interval_id = old_value + 1; | |
370 | ||
371 | uint32_t create_flags = create_params->wica_create_flags; | |
372 | ||
373 | task_t creating_task = current_task(); | |
374 | ||
375 | *work_interval = (struct work_interval) { | |
376 | .wi_id = work_interval_id, | |
377 | .wi_ref_count = 1, | |
378 | .wi_create_flags = create_flags, | |
379 | .wi_creator_pid = pid_from_task(creating_task), | |
380 | .wi_creator_uniqueid = get_task_uniqueid(creating_task), | |
381 | .wi_creator_pidversion = get_task_version(creating_task), | |
382 | }; | |
383 | ||
384 | ||
385 | if (create_flags & WORK_INTERVAL_FLAG_JOINABLE) { | |
386 | /* work_interval has a +1 ref, moves to the port */ | |
387 | ipc_port_t port = work_interval_port_alloc(work_interval); | |
388 | mach_port_name_t name = MACH_PORT_NULL; | |
389 | ||
390 | name = ipc_port_copyout_send(port, current_space()); | |
391 | ||
392 | if (!MACH_PORT_VALID(name)) { | |
393 | /* | |
394 | * copyout failed (port is already deallocated) | |
395 | * Because of the port-destroyed magic, | |
396 | * the work interval is already deallocated too. | |
397 | */ | |
398 | return KERN_RESOURCE_SHORTAGE; | |
399 | } | |
400 | ||
401 | create_params->wica_port = name; | |
402 | } else { | |
403 | /* work_interval has a +1 ref, moves to the thread */ | |
404 | thread_set_work_interval(thread, work_interval); | |
405 | create_params->wica_port = MACH_PORT_NULL; | |
406 | } | |
407 | ||
408 | create_params->wica_id = work_interval_id; | |
409 | ||
410 | return KERN_SUCCESS; | |
411 | } | |
412 | ||
413 | kern_return_t | |
414 | kern_work_interval_destroy(thread_t thread, | |
415 | uint64_t work_interval_id) | |
416 | { | |
417 | if (work_interval_id == 0) | |
418 | return KERN_INVALID_ARGUMENT; | |
419 | ||
420 | if (thread->th_work_interval == NULL || | |
421 | thread->th_work_interval->wi_id != work_interval_id) { | |
422 | /* work ID isn't valid or doesn't match joined work interval ID */ | |
423 | return (KERN_INVALID_ARGUMENT); | |
424 | } | |
425 | ||
426 | thread_set_work_interval(thread, NULL); | |
427 | ||
428 | return KERN_SUCCESS; | |
429 | } | |
430 | ||
431 | kern_return_t | |
432 | kern_work_interval_join(thread_t thread, | |
433 | mach_port_name_t port_name) | |
434 | { | |
435 | struct work_interval *work_interval = NULL; | |
436 | kern_return_t kr; | |
437 | ||
438 | if (port_name == MACH_PORT_NULL) { | |
439 | /* 'Un-join' the current work interval */ | |
440 | thread_set_work_interval(thread, NULL); | |
441 | return KERN_SUCCESS; | |
442 | } | |
443 | ||
444 | kr = port_name_to_work_interval(port_name, &work_interval); | |
445 | if (kr != KERN_SUCCESS) | |
446 | return kr; | |
447 | /* work_interval has a +1 ref */ | |
448 | ||
449 | assert(work_interval != NULL); | |
450 | ||
451 | thread_set_work_interval(thread, work_interval); | |
452 | ||
453 | /* ref was consumed by passing it to the thread */ | |
454 | ||
455 | return KERN_SUCCESS; | |
456 | } | |
457 | ||
458 | ||
459 |