]> git.saurik.com Git - apple/xnu.git/blame - osfmk/kern/work_interval.c
xnu-6153.61.1.tar.gz
[apple/xnu.git] / osfmk / kern / work_interval.c
CommitLineData
5ba3f43e
A
1/*
2 * Copyright (c) 2017 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29
30#include <sys/work_interval.h>
31
32#include <kern/work_interval.h>
33
34#include <kern/thread.h>
35#include <kern/sched_prim.h>
36#include <kern/machine.h>
37#include <kern/thread_group.h>
38#include <kern/ipc_kobject.h>
39#include <kern/task.h>
a39ff7e2
A
40#include <kern/coalition.h>
41#include <kern/policy_internal.h>
5ba3f43e
A
42
43#include <mach/kern_return.h>
44#include <mach/notify.h>
45
46#include <stdatomic.h>
47
48/*
49 * Work Interval structs
50 *
51 * This struct represents a thread group and/or work interval context
52 * in a mechanism that is represented with a kobject.
53 *
54 * Every thread that has joined a WI has a +1 ref, and the port
55 * has a +1 ref as well.
56 *
57 * TODO: groups need to have a 'is for WI' flag
58 * and they need a flag to create that says 'for WI'
59 * This would allow CLPC to avoid allocating WI support
60 * data unless it is needed
61 *
62 * TODO: Enforce not having more than one non-group joinable work
63 * interval per thread group.
64 * CLPC only wants to see one WI-notify callout per group.
65 */
66
67struct work_interval {
68 uint64_t wi_id;
69 _Atomic uint32_t wi_ref_count;
70 uint32_t wi_create_flags;
71
72 /* for debugging purposes only, does not hold a ref on port */
73 ipc_port_t wi_port;
74
75 /*
76 * holds uniqueid and version of creating process,
77 * used to permission-gate notify
78 * TODO: you'd think there would be a better way to do this
79 */
80 uint64_t wi_creator_uniqueid;
81 uint32_t wi_creator_pid;
82 int wi_creator_pidversion;
83
84};
85
86static inline void
87wi_retain(struct work_interval *work_interval)
88{
89 uint32_t old_count;
90 old_count = atomic_fetch_add_explicit(&work_interval->wi_ref_count,
0a7de745 91 1, memory_order_relaxed);
5ba3f43e
A
92 assert(old_count > 0);
93}
94
95static inline void
96wi_release(struct work_interval *work_interval)
97{
98 uint32_t old_count;
99 old_count = atomic_fetch_sub_explicit(&work_interval->wi_ref_count,
0a7de745 100 1, memory_order_relaxed);
5ba3f43e
A
101 assert(old_count > 0);
102
103 if (old_count == 1) {
104
5ba3f43e
A
105 kfree(work_interval, sizeof(struct work_interval));
106 }
107}
108
5ba3f43e
A
109/*
110 * work_interval_port_convert
111 *
112 * Called with port locked, returns reference to work interval
113 * if indeed the port is a work interval kobject port
114 */
115static struct work_interval *
116work_interval_port_convert_locked(ipc_port_t port)
117{
118 struct work_interval *work_interval = NULL;
119
0a7de745 120 if (!IP_VALID(port)) {
5ba3f43e 121 return NULL;
0a7de745 122 }
5ba3f43e 123
0a7de745 124 if (!ip_active(port)) {
5ba3f43e 125 return NULL;
0a7de745 126 }
5ba3f43e 127
0a7de745 128 if (IKOT_WORK_INTERVAL != ip_kotype(port)) {
5ba3f43e 129 return NULL;
0a7de745 130 }
5ba3f43e
A
131
132 work_interval = (struct work_interval *)port->ip_kobject;
133
134 wi_retain(work_interval);
135
136 return work_interval;
137}
138
139/*
140 * port_name_to_work_interval
141 *
142 * Description: Obtain a reference to the work_interval associated with a given port.
143 *
144 * Parameters: name A Mach port name to translate.
145 *
146 * Returns: NULL The given Mach port did not reference a work_interval.
147 * !NULL The work_interval that is associated with the Mach port.
148 */
149static kern_return_t
150port_name_to_work_interval(mach_port_name_t name,
0a7de745 151 struct work_interval **work_interval)
5ba3f43e 152{
0a7de745 153 if (!MACH_PORT_VALID(name)) {
5ba3f43e 154 return KERN_INVALID_NAME;
0a7de745 155 }
5ba3f43e
A
156
157 ipc_port_t port = IPC_PORT_NULL;
158 kern_return_t kr = KERN_SUCCESS;
159
160 kr = ipc_port_translate_send(current_space(), name, &port);
0a7de745 161 if (kr != KERN_SUCCESS) {
5ba3f43e 162 return kr;
0a7de745 163 }
5ba3f43e
A
164 /* port is locked */
165
166 assert(IP_VALID(port));
167
168 struct work_interval *converted_work_interval;
169
170 converted_work_interval = work_interval_port_convert_locked(port);
171
172 /* the port is valid, but doesn't denote a work_interval */
0a7de745 173 if (converted_work_interval == NULL) {
5ba3f43e 174 kr = KERN_INVALID_CAPABILITY;
0a7de745 175 }
5ba3f43e
A
176
177 ip_unlock(port);
178
0a7de745 179 if (kr == KERN_SUCCESS) {
5ba3f43e 180 *work_interval = converted_work_interval;
0a7de745 181 }
5ba3f43e
A
182
183 return kr;
5ba3f43e
A
184}
185
186
187/*
188 * work_interval_port_notify
189 *
190 * Description: Handle a no-senders notification for a work interval port.
191 * Destroys the port and releases its reference on the work interval.
192 *
193 * Parameters: msg A Mach no-senders notification message.
194 *
195 * Note: This assumes that there is only one create-right-from-work-interval point,
196 * if the ability to extract another send right after creation is added,
197 * this will have to change to handle make-send counts correctly.
198 */
199void
200work_interval_port_notify(mach_msg_header_t *msg)
201{
202 mach_no_senders_notification_t *notification = (void *)msg;
203 ipc_port_t port = notification->not_header.msgh_remote_port;
204 struct work_interval *work_interval = NULL;
205
0a7de745 206 if (!IP_VALID(port)) {
5ba3f43e 207 panic("work_interval_port_notify(): invalid port");
0a7de745 208 }
5ba3f43e
A
209
210 ip_lock(port);
211
0a7de745 212 if (!ip_active(port)) {
5ba3f43e 213 panic("work_interval_port_notify(): inactive port %p", port);
0a7de745 214 }
5ba3f43e 215
0a7de745 216 if (ip_kotype(port) != IKOT_WORK_INTERVAL) {
5ba3f43e 217 panic("work_interval_port_notify(): not the right kobject: %p, %d\n",
0a7de745
A
218 port, ip_kotype(port));
219 }
5ba3f43e 220
0a7de745 221 if (port->ip_mscount != notification->not_count) {
5ba3f43e 222 panic("work_interval_port_notify(): unexpected make-send count: %p, %d, %d",
0a7de745
A
223 port, port->ip_mscount, notification->not_count);
224 }
5ba3f43e 225
0a7de745 226 if (port->ip_srights != 0) {
5ba3f43e 227 panic("work_interval_port_notify(): unexpected send right count: %p, %d",
0a7de745
A
228 port, port->ip_srights);
229 }
5ba3f43e
A
230
231 work_interval = (struct work_interval *)port->ip_kobject;
232
0a7de745 233 if (work_interval == NULL) {
5ba3f43e 234 panic("work_interval_port_notify(): missing kobject: %p", port);
0a7de745 235 }
5ba3f43e
A
236
237 ipc_kobject_set_atomically(port, IKO_NULL, IKOT_NONE);
238
239 work_interval->wi_port = MACH_PORT_NULL;
240
241 ip_unlock(port);
242
243 ipc_port_dealloc_kernel(port);
244 wi_release(work_interval);
245}
246
247/*
248 * Change thread's bound work interval to the passed-in work interval
249 * Consumes +1 ref on work_interval
250 *
251 * May also pass NULL to un-set work_interval on the thread
252 *
253 * Will deallocate any old work interval on the thread
254 */
255static void
256thread_set_work_interval(thread_t thread,
0a7de745 257 struct work_interval *work_interval)
5ba3f43e
A
258{
259 assert(thread == current_thread());
260
261 struct work_interval *old_th_wi = thread->th_work_interval;
262
263 /* transfer +1 ref to thread */
264 thread->th_work_interval = work_interval;
265
266
0a7de745 267 if (old_th_wi != NULL) {
5ba3f43e 268 wi_release(old_th_wi);
0a7de745 269 }
5ba3f43e
A
270}
271
272void
273work_interval_thread_terminate(thread_t thread)
274{
0a7de745 275 if (thread->th_work_interval != NULL) {
5ba3f43e 276 thread_set_work_interval(thread, NULL);
0a7de745 277 }
5ba3f43e
A
278}
279
280
281
282kern_return_t
283kern_work_interval_notify(thread_t thread, struct kern_work_interval_args* kwi_args)
284{
285 assert(thread == current_thread());
286 assert(kwi_args->work_interval_id != 0);
287
288 struct work_interval *work_interval = thread->th_work_interval;
289
290 if (work_interval == NULL ||
291 work_interval->wi_id != kwi_args->work_interval_id) {
292 /* This thread must have adopted the work interval to be able to notify */
0a7de745 293 return KERN_INVALID_ARGUMENT;
5ba3f43e
A
294 }
295
296 task_t notifying_task = current_task();
297
0a7de745 298 if (work_interval->wi_creator_uniqueid != get_task_uniqueid(notifying_task) ||
5ba3f43e
A
299 work_interval->wi_creator_pidversion != get_task_version(notifying_task)) {
300 /* Only the creating task can do a notify */
0a7de745 301 return KERN_INVALID_ARGUMENT;
5ba3f43e
A
302 }
303
304 spl_t s = splsched();
305
306
307 uint64_t urgency_param1, urgency_param2;
308 kwi_args->urgency = thread_get_urgency(thread, &urgency_param1, &urgency_param2);
309
310 splx(s);
311
312 /* called without interrupts disabled */
313 machine_work_interval_notify(thread, kwi_args);
314
0a7de745 315 return KERN_SUCCESS;
5ba3f43e
A
316}
317
318/* Start at 1, 0 is not a valid work interval ID */
319static _Atomic uint64_t unique_work_interval_id = 1;
320
321kern_return_t
322kern_work_interval_create(thread_t thread,
0a7de745 323 struct kern_work_interval_create_args *create_params)
5ba3f43e
A
324{
325 assert(thread == current_thread());
326
327 if (thread->th_work_interval != NULL) {
328 /* already assigned a work interval */
0a7de745 329 return KERN_FAILURE;
5ba3f43e
A
330 }
331
332 struct work_interval *work_interval = kalloc(sizeof(*work_interval));
333
0a7de745 334 if (work_interval == NULL) {
5ba3f43e 335 panic("failed to allocate work_interval");
0a7de745 336 }
5ba3f43e
A
337
338 bzero(work_interval, sizeof(*work_interval));
339
340 uint64_t old_value = atomic_fetch_add_explicit(&unique_work_interval_id, 1,
0a7de745 341 memory_order_relaxed);
5ba3f43e
A
342
343 uint64_t work_interval_id = old_value + 1;
344
345 uint32_t create_flags = create_params->wica_create_flags;
346
347 task_t creating_task = current_task();
a39ff7e2
A
348 if ((create_flags & WORK_INTERVAL_TYPE_MASK) == WORK_INTERVAL_TYPE_CA_CLIENT) {
349 /*
cb323159
A
350 * CA_CLIENT work intervals do not create new thread groups.
351 * There can only be one CA_CLIENT work interval (created by UIKit or AppKit)
a39ff7e2
A
352 * per each application task
353 */
cb323159 354 if (create_flags & WORK_INTERVAL_FLAG_GROUP) {
0a7de745
A
355 return KERN_FAILURE;
356 }
357 if (!task_is_app(creating_task)) {
358 return KERN_NOT_SUPPORTED;
359 }
360 if (task_set_ca_client_wi(creating_task, true) == false) {
361 return KERN_FAILURE;
362 }
a39ff7e2 363 }
5ba3f43e
A
364
365 *work_interval = (struct work_interval) {
366 .wi_id = work_interval_id,
367 .wi_ref_count = 1,
368 .wi_create_flags = create_flags,
369 .wi_creator_pid = pid_from_task(creating_task),
370 .wi_creator_uniqueid = get_task_uniqueid(creating_task),
371 .wi_creator_pidversion = get_task_version(creating_task),
372 };
373
374
375 if (create_flags & WORK_INTERVAL_FLAG_JOINABLE) {
5ba3f43e
A
376 mach_port_name_t name = MACH_PORT_NULL;
377
cb323159
A
378 /* work_interval has a +1 ref, moves to the port */
379 work_interval->wi_port = ipc_kobject_alloc_port(
380 (ipc_kobject_t)work_interval, IKOT_WORK_INTERVAL,
381 IPC_KOBJECT_ALLOC_MAKE_SEND | IPC_KOBJECT_ALLOC_NSREQUEST);
382
383 name = ipc_port_copyout_send(work_interval->wi_port, current_space());
5ba3f43e
A
384
385 if (!MACH_PORT_VALID(name)) {
386 /*
387 * copyout failed (port is already deallocated)
388 * Because of the port-destroyed magic,
389 * the work interval is already deallocated too.
390 */
391 return KERN_RESOURCE_SHORTAGE;
392 }
393
394 create_params->wica_port = name;
395 } else {
396 /* work_interval has a +1 ref, moves to the thread */
397 thread_set_work_interval(thread, work_interval);
398 create_params->wica_port = MACH_PORT_NULL;
399 }
400
401 create_params->wica_id = work_interval_id;
5ba3f43e
A
402 return KERN_SUCCESS;
403}
404
a39ff7e2 405
5ba3f43e 406kern_return_t
a39ff7e2 407kern_work_interval_destroy(thread_t thread, uint64_t work_interval_id)
5ba3f43e 408{
0a7de745 409 if (work_interval_id == 0) {
5ba3f43e 410 return KERN_INVALID_ARGUMENT;
0a7de745 411 }
5ba3f43e
A
412
413 if (thread->th_work_interval == NULL ||
414 thread->th_work_interval->wi_id != work_interval_id) {
415 /* work ID isn't valid or doesn't match joined work interval ID */
0a7de745 416 return KERN_INVALID_ARGUMENT;
5ba3f43e
A
417 }
418
419 thread_set_work_interval(thread, NULL);
420
421 return KERN_SUCCESS;
422}
423
424kern_return_t
425kern_work_interval_join(thread_t thread,
0a7de745 426 mach_port_name_t port_name)
5ba3f43e
A
427{
428 struct work_interval *work_interval = NULL;
429 kern_return_t kr;
430
431 if (port_name == MACH_PORT_NULL) {
432 /* 'Un-join' the current work interval */
433 thread_set_work_interval(thread, NULL);
434 return KERN_SUCCESS;
435 }
436
437 kr = port_name_to_work_interval(port_name, &work_interval);
0a7de745 438 if (kr != KERN_SUCCESS) {
5ba3f43e 439 return kr;
0a7de745 440 }
5ba3f43e
A
441 /* work_interval has a +1 ref */
442
443 assert(work_interval != NULL);
444
445 thread_set_work_interval(thread, work_interval);
446
447 /* ref was consumed by passing it to the thread */
448
449 return KERN_SUCCESS;
450}