2 * Copyright (c) 2017 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
30 #include <sys/work_interval.h>
32 #include <kern/work_interval.h>
34 #include <kern/thread.h>
35 #include <kern/sched_prim.h>
36 #include <kern/machine.h>
37 #include <kern/thread_group.h>
38 #include <kern/ipc_kobject.h>
39 #include <kern/task.h>
40 #include <kern/coalition.h>
41 #include <kern/policy_internal.h>
43 #include <mach/kern_return.h>
44 #include <mach/notify.h>
46 #include <stdatomic.h>
49 * Work Interval structs
51 * This struct represents a thread group and/or work interval context
52 * in a mechanism that is represented with a kobject.
54 * Every thread that has joined a WI has a +1 ref, and the port
55 * has a +1 ref as well.
57 * TODO: groups need to have a 'is for WI' flag
58 * and they need a flag to create that says 'for WI'
59 * This would allow CLPC to avoid allocating WI support
60 * data unless it is needed
62 * TODO: Enforce not having more than one non-group joinable work
63 * interval per thread group.
64 * CLPC only wants to see one WI-notify callout per group.
67 struct work_interval
{
69 _Atomic
uint32_t wi_ref_count
;
70 uint32_t wi_create_flags
;
72 /* for debugging purposes only, does not hold a ref on port */
76 * holds uniqueid and version of creating process,
77 * used to permission-gate notify
78 * TODO: you'd think there would be a better way to do this
80 uint64_t wi_creator_uniqueid
;
81 uint32_t wi_creator_pid
;
82 int wi_creator_pidversion
;
87 wi_retain(struct work_interval
*work_interval
)
90 old_count
= atomic_fetch_add_explicit(&work_interval
->wi_ref_count
,
91 1, memory_order_relaxed
);
92 assert(old_count
> 0);
96 wi_release(struct work_interval
*work_interval
)
99 old_count
= atomic_fetch_sub_explicit(&work_interval
->wi_ref_count
,
100 1, memory_order_relaxed
);
101 assert(old_count
> 0);
103 if (old_count
== 1) {
106 kfree(work_interval
, sizeof(struct work_interval
));
111 * work_interval_port_alloc
113 * Description: Obtain a send right for the given work interval struct.
115 * Parameters: work_interval - A work_interval struct
116 * Consumes a +1 ref count on work_interval, now owned by the port.
118 * Returns: Port of type IKOT_WORK_INTERVAL with work_interval set as its kobject.
119 * Returned with a +1 send right and no-senders notification armed.
120 * Work interval struct reference is held by the port.
123 work_interval_port_alloc(struct work_interval
*work_interval
)
125 ipc_port_t work_interval_port
= ipc_port_alloc_kernel();
127 if (work_interval_port
== IP_NULL
)
128 panic("failed to allocate work interval port");
130 assert(work_interval
->wi_port
== IP_NULL
);
132 ip_lock(work_interval_port
);
133 ipc_kobject_set_atomically(work_interval_port
, (ipc_kobject_t
)work_interval
,
136 ipc_port_t notify_port
= ipc_port_make_sonce_locked(work_interval_port
);
137 ipc_port_t old_notify_port
= IP_NULL
;
138 ipc_port_nsrequest(work_interval_port
, 1, notify_port
, &old_notify_port
);
141 assert(old_notify_port
== IP_NULL
);
143 /* This is the only make-send that will happen on this port */
144 ipc_port_t send_port
= ipc_port_make_send(work_interval_port
);
145 assert(IP_VALID(send_port
));
147 work_interval
->wi_port
= work_interval_port
;
153 * work_interval_port_convert
155 * Called with port locked, returns reference to work interval
156 * if indeed the port is a work interval kobject port
158 static struct work_interval
*
159 work_interval_port_convert_locked(ipc_port_t port
)
161 struct work_interval
*work_interval
= NULL
;
166 if (!ip_active(port
))
169 if (IKOT_WORK_INTERVAL
!= ip_kotype(port
))
172 work_interval
= (struct work_interval
*)port
->ip_kobject
;
174 wi_retain(work_interval
);
176 return work_interval
;
180 * port_name_to_work_interval
182 * Description: Obtain a reference to the work_interval associated with a given port.
184 * Parameters: name A Mach port name to translate.
186 * Returns: NULL The given Mach port did not reference a work_interval.
187 * !NULL The work_interval that is associated with the Mach port.
190 port_name_to_work_interval(mach_port_name_t name
,
191 struct work_interval
**work_interval
)
193 if (!MACH_PORT_VALID(name
))
194 return KERN_INVALID_NAME
;
196 ipc_port_t port
= IPC_PORT_NULL
;
197 kern_return_t kr
= KERN_SUCCESS
;
199 kr
= ipc_port_translate_send(current_space(), name
, &port
);
200 if (kr
!= KERN_SUCCESS
)
204 assert(IP_VALID(port
));
206 struct work_interval
*converted_work_interval
;
208 converted_work_interval
= work_interval_port_convert_locked(port
);
210 /* the port is valid, but doesn't denote a work_interval */
211 if (converted_work_interval
== NULL
)
212 kr
= KERN_INVALID_CAPABILITY
;
216 if (kr
== KERN_SUCCESS
)
217 *work_interval
= converted_work_interval
;
225 * work_interval_port_notify
227 * Description: Handle a no-senders notification for a work interval port.
228 * Destroys the port and releases its reference on the work interval.
230 * Parameters: msg A Mach no-senders notification message.
232 * Note: This assumes that there is only one create-right-from-work-interval point,
233 * if the ability to extract another send right after creation is added,
234 * this will have to change to handle make-send counts correctly.
237 work_interval_port_notify(mach_msg_header_t
*msg
)
239 mach_no_senders_notification_t
*notification
= (void *)msg
;
240 ipc_port_t port
= notification
->not_header
.msgh_remote_port
;
241 struct work_interval
*work_interval
= NULL
;
244 panic("work_interval_port_notify(): invalid port");
248 if (!ip_active(port
))
249 panic("work_interval_port_notify(): inactive port %p", port
);
251 if (ip_kotype(port
) != IKOT_WORK_INTERVAL
)
252 panic("work_interval_port_notify(): not the right kobject: %p, %d\n",
253 port
, ip_kotype(port
));
255 if (port
->ip_mscount
!= notification
->not_count
)
256 panic("work_interval_port_notify(): unexpected make-send count: %p, %d, %d",
257 port
, port
->ip_mscount
, notification
->not_count
);
259 if (port
->ip_srights
!= 0)
260 panic("work_interval_port_notify(): unexpected send right count: %p, %d",
261 port
, port
->ip_srights
);
263 work_interval
= (struct work_interval
*)port
->ip_kobject
;
265 if (work_interval
== NULL
)
266 panic("work_interval_port_notify(): missing kobject: %p", port
);
268 ipc_kobject_set_atomically(port
, IKO_NULL
, IKOT_NONE
);
270 work_interval
->wi_port
= MACH_PORT_NULL
;
274 ipc_port_dealloc_kernel(port
);
275 wi_release(work_interval
);
279 * Change thread's bound work interval to the passed-in work interval
280 * Consumes +1 ref on work_interval
282 * May also pass NULL to un-set work_interval on the thread
284 * Will deallocate any old work interval on the thread
287 thread_set_work_interval(thread_t thread
,
288 struct work_interval
*work_interval
)
290 assert(thread
== current_thread());
292 struct work_interval
*old_th_wi
= thread
->th_work_interval
;
294 /* transfer +1 ref to thread */
295 thread
->th_work_interval
= work_interval
;
298 if (old_th_wi
!= NULL
)
299 wi_release(old_th_wi
);
303 work_interval_thread_terminate(thread_t thread
)
305 if (thread
->th_work_interval
!= NULL
)
306 thread_set_work_interval(thread
, NULL
);
312 kern_work_interval_notify(thread_t thread
, struct kern_work_interval_args
* kwi_args
)
314 assert(thread
== current_thread());
315 assert(kwi_args
->work_interval_id
!= 0);
317 struct work_interval
*work_interval
= thread
->th_work_interval
;
319 if (work_interval
== NULL
||
320 work_interval
->wi_id
!= kwi_args
->work_interval_id
) {
321 /* This thread must have adopted the work interval to be able to notify */
322 return (KERN_INVALID_ARGUMENT
);
325 task_t notifying_task
= current_task();
327 if (work_interval
->wi_creator_uniqueid
!= get_task_uniqueid(notifying_task
) ||
328 work_interval
->wi_creator_pidversion
!= get_task_version(notifying_task
)) {
329 /* Only the creating task can do a notify */
330 return (KERN_INVALID_ARGUMENT
);
333 spl_t s
= splsched();
336 uint64_t urgency_param1
, urgency_param2
;
337 kwi_args
->urgency
= thread_get_urgency(thread
, &urgency_param1
, &urgency_param2
);
341 /* called without interrupts disabled */
342 machine_work_interval_notify(thread
, kwi_args
);
344 return (KERN_SUCCESS
);
347 /* Start at 1, 0 is not a valid work interval ID */
348 static _Atomic
uint64_t unique_work_interval_id
= 1;
351 kern_work_interval_create(thread_t thread
,
352 struct kern_work_interval_create_args
*create_params
)
354 assert(thread
== current_thread());
356 if (thread
->th_work_interval
!= NULL
) {
357 /* already assigned a work interval */
358 return (KERN_FAILURE
);
361 struct work_interval
*work_interval
= kalloc(sizeof(*work_interval
));
363 if (work_interval
== NULL
)
364 panic("failed to allocate work_interval");
366 bzero(work_interval
, sizeof(*work_interval
));
368 uint64_t old_value
= atomic_fetch_add_explicit(&unique_work_interval_id
, 1,
369 memory_order_relaxed
);
371 uint64_t work_interval_id
= old_value
+ 1;
373 uint32_t create_flags
= create_params
->wica_create_flags
;
375 task_t creating_task
= current_task();
376 if ((create_flags
& WORK_INTERVAL_TYPE_MASK
) == WORK_INTERVAL_TYPE_CA_CLIENT
) {
378 * CA_CLIENT work intervals do not create new thread groups
379 * and are non-joinable.
380 * There can only be one CA_CLIENT work interval (created by UIKit)
381 * per each application task
383 if (create_flags
& (WORK_INTERVAL_FLAG_JOINABLE
| WORK_INTERVAL_FLAG_GROUP
))
384 return (KERN_FAILURE
);
385 if (!task_is_app(creating_task
))
386 return (KERN_NOT_SUPPORTED
);
387 if (task_set_ca_client_wi(creating_task
, true) == false)
388 return (KERN_FAILURE
);
391 *work_interval
= (struct work_interval
) {
392 .wi_id
= work_interval_id
,
394 .wi_create_flags
= create_flags
,
395 .wi_creator_pid
= pid_from_task(creating_task
),
396 .wi_creator_uniqueid
= get_task_uniqueid(creating_task
),
397 .wi_creator_pidversion
= get_task_version(creating_task
),
401 if (create_flags
& WORK_INTERVAL_FLAG_JOINABLE
) {
402 /* work_interval has a +1 ref, moves to the port */
403 ipc_port_t port
= work_interval_port_alloc(work_interval
);
404 mach_port_name_t name
= MACH_PORT_NULL
;
406 name
= ipc_port_copyout_send(port
, current_space());
408 if (!MACH_PORT_VALID(name
)) {
410 * copyout failed (port is already deallocated)
411 * Because of the port-destroyed magic,
412 * the work interval is already deallocated too.
414 return KERN_RESOURCE_SHORTAGE
;
417 create_params
->wica_port
= name
;
419 /* work_interval has a +1 ref, moves to the thread */
420 thread_set_work_interval(thread
, work_interval
);
421 create_params
->wica_port
= MACH_PORT_NULL
;
424 create_params
->wica_id
= work_interval_id
;
430 kern_work_interval_destroy(thread_t thread
, uint64_t work_interval_id
)
432 if (work_interval_id
== 0)
433 return KERN_INVALID_ARGUMENT
;
435 if (thread
->th_work_interval
== NULL
||
436 thread
->th_work_interval
->wi_id
!= work_interval_id
) {
437 /* work ID isn't valid or doesn't match joined work interval ID */
438 return (KERN_INVALID_ARGUMENT
);
441 thread_set_work_interval(thread
, NULL
);
447 kern_work_interval_join(thread_t thread
,
448 mach_port_name_t port_name
)
450 struct work_interval
*work_interval
= NULL
;
453 if (port_name
== MACH_PORT_NULL
) {
454 /* 'Un-join' the current work interval */
455 thread_set_work_interval(thread
, NULL
);
459 kr
= port_name_to_work_interval(port_name
, &work_interval
);
460 if (kr
!= KERN_SUCCESS
)
462 /* work_interval has a +1 ref */
464 assert(work_interval
!= NULL
);
466 thread_set_work_interval(thread
, work_interval
);
468 /* ref was consumed by passing it to the thread */