2 * Copyright (c) 2019 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <mach/mach_types.h>
30 #include <mach/task.h>
33 #include <kern/kalloc.h>
34 #include <kern/kern_types.h>
35 #include <kern/mach_param.h>
36 #include <kern/machine.h>
37 #include <kern/misc_protos.h>
38 #include <kern/processor.h>
39 #include <kern/queue.h>
40 #include <kern/restartable.h>
41 #include <kern/task.h>
42 #include <kern/thread.h>
43 #include <kern/waitq.h>
46 #include <os/refcnt.h>
49 * @file osfmk/kern/restartable.c
52 * This module implements restartable userspace functions.
55 * task_restartable_ranges_register() allows task to configure
56 * the restartable ranges, only once per task,
57 * before it has made its second thread.
59 * task_restartable_ranges_synchronize() can later be used to trigger
60 * restarts for threads with a PC in a restartable region.
62 * It is implemented with an AST (AST_RESET_PCS) that will cause threads
63 * as they return to userspace to reset PCs in a restartable region
64 * to the recovery offset of this region.
66 * Because signal delivery would mask the proper saved PC for threads,
67 * sigreturn also forcefully sets the AST and will go through the logic
71 typedef int (*cmpfunc_t
)(const void *a
, const void *b
);
72 extern void qsort(void *a
, size_t n
, size_t es
, cmpfunc_t cmp
);
74 struct restartable_ranges
{
75 queue_chain_t rr_link
;
79 task_restartable_range_t rr_ranges
[];
82 #if DEBUG || DEVELOPMENT
83 #define RR_HASH_SIZE 256
85 // Release kernel userspace should have shared caches and a single registration
86 #define RR_HASH_SIZE 16
89 static queue_head_t rr_hash
[RR_HASH_SIZE
];
90 LCK_GRP_DECLARE(rr_lock_grp
, "restartable ranges");
91 LCK_SPIN_DECLARE(rr_spinlock
, &rr_lock_grp
);
93 #define rr_lock() lck_spin_lock_grp(&rr_spinlock, &rr_lock_grp)
94 #define rr_unlock() lck_spin_unlock(&rr_spinlock);
96 #pragma mark internals
99 * @function _ranges_cmp
102 * Compares two ranges together.
105 _ranges_cmp(const void *_r1
, const void *_r2
)
107 const task_restartable_range_t
*r1
= _r1
;
108 const task_restartable_range_t
*r2
= _r2
;
110 if (r1
->location
!= r2
->location
) {
111 return r1
->location
< r2
->location
? -1 : 1;
113 if (r1
->length
== r2
->length
) {
116 return r1
->length
< r2
->length
? -1 : 1;
120 * @function _ranges_validate
123 * Validates an array of PC ranges for wraps and intersections.
126 * This sorts and modifies the input.
130 * - have a length/recovery offset within a page of the range start
133 * - KERN_SUCCESS: ranges are valid
134 * - KERN_INVALID_ARGUMENT: ranges are invalid
137 _ranges_validate(task_t task
, task_restartable_range_t
*ranges
, uint32_t count
)
139 qsort(ranges
, count
, sizeof(task_restartable_range_t
), _ranges_cmp
);
140 uint64_t limit
= task_has_64Bit_data(task
) ? UINT64_MAX
: UINT32_MAX
;
141 uint64_t end
, recovery
;
143 for (size_t i
= 0; i
< count
; i
++) {
144 if (ranges
[i
].length
> TASK_RESTARTABLE_OFFSET_MAX
||
145 ranges
[i
].recovery_offs
> TASK_RESTARTABLE_OFFSET_MAX
) {
146 return KERN_INVALID_ARGUMENT
;
148 if (ranges
[i
].flags
) {
149 return KERN_INVALID_ARGUMENT
;
151 if (os_add_overflow(ranges
[i
].location
, ranges
[i
].length
, &end
)) {
152 return KERN_INVALID_ARGUMENT
;
154 if (os_add_overflow(ranges
[i
].location
, ranges
[i
].recovery_offs
, &recovery
)) {
155 return KERN_INVALID_ARGUMENT
;
157 if (ranges
[i
].location
> limit
|| end
> limit
|| recovery
> limit
) {
158 return KERN_INVALID_ARGUMENT
;
160 if (i
+ 1 < count
&& end
> ranges
[i
+ 1].location
) {
161 return KERN_INVALID_ARGUMENT
;
169 * @function _ranges_lookup
172 * Lookup the left side of a range for a given PC within a set of ranges.
175 * - 0: no PC range found
176 * - the left-side of the range.
178 __attribute__((always_inline
))
179 static mach_vm_address_t
180 _ranges_lookup(struct restartable_ranges
*rr
, mach_vm_address_t pc
)
182 task_restartable_range_t
*ranges
= rr
->rr_ranges
;
183 uint32_t l
= 0, r
= rr
->rr_count
;
185 if (pc
<= ranges
[0].location
) {
188 if (pc
>= ranges
[r
- 1].location
+ ranges
[r
- 1].length
) {
193 uint32_t i
= (r
+ l
) / 2;
194 mach_vm_address_t location
= ranges
[i
].location
;
196 if (pc
<= location
) {
197 /* if the PC is exactly at pc_start, no reset is needed */
199 } else if (location
+ ranges
[i
].length
<= pc
) {
200 /* if the PC is exactly at the end, it's out of the function */
203 /* else it's strictly in the range, return the recovery pc */
204 return location
+ ranges
[i
].recovery_offs
;
212 * @function _restartable_ranges_dispose
215 * Helper to dispose of a range that has reached a 0 refcount.
217 __attribute__((noinline
))
219 _restartable_ranges_dispose(struct restartable_ranges
*rr
, bool hash_remove
)
223 remqueue(&rr
->rr_link
);
226 kfree(rr
, sizeof(*rr
) + rr
->rr_count
* sizeof(task_restartable_range_t
));
230 * @function _restartable_ranges_equals
233 * Helper to compare two restartable ranges.
236 _restartable_ranges_equals(
237 const struct restartable_ranges
*rr1
,
238 const struct restartable_ranges
*rr2
)
240 size_t rr1_size
= rr1
->rr_count
* sizeof(task_restartable_range_t
);
241 return rr1
->rr_hash
== rr2
->rr_hash
&&
242 rr1
->rr_count
== rr2
->rr_count
&&
243 memcmp(rr1
->rr_ranges
, rr2
->rr_ranges
, rr1_size
) == 0;
247 * @function _restartable_ranges_create
250 * Helper to create a uniqued restartable range.
254 * - KERN_INVALID_ARGUMENT: the validation of the new ranges failed.
255 * - KERN_RESOURCE_SHORTAGE: too many ranges, out of memory
258 _restartable_ranges_create(task_t task
, task_restartable_range_t
*ranges
,
259 uint32_t count
, struct restartable_ranges
**rr_storage
)
261 struct restartable_ranges
*rr
, *rr_found
, *rr_base
;
263 uint32_t base_count
, total_count
;
264 size_t base_size
, size
;
267 rr_base
= *rr_storage
;
268 base_count
= rr_base
? rr_base
->rr_count
: 0;
269 base_size
= sizeof(task_restartable_range_t
) * base_count
;
270 size
= sizeof(task_restartable_range_t
) * count
;
272 if (os_add_overflow(base_count
, count
, &total_count
)) {
273 return KERN_INVALID_ARGUMENT
;
275 if (total_count
> 1024) {
276 return KERN_RESOURCE_SHORTAGE
;
279 rr
= kalloc(sizeof(*rr
) + base_size
+ size
);
281 return KERN_RESOURCE_SHORTAGE
;
284 queue_chain_init(rr
->rr_link
);
285 os_ref_init(&rr
->rr_ref
, NULL
);
286 rr
->rr_count
= total_count
;
288 memcpy(rr
->rr_ranges
, rr_base
->rr_ranges
, base_size
);
290 memcpy(rr
->rr_ranges
+ base_count
, ranges
, size
);
291 kr
= _ranges_validate(task
, rr
->rr_ranges
, total_count
);
293 _restartable_ranges_dispose(rr
, false);
296 rr
->rr_hash
= os_hash_jenkins(rr
->rr_ranges
,
297 rr
->rr_count
* sizeof(task_restartable_range_t
));
299 head
= &rr_hash
[rr
->rr_hash
% RR_HASH_SIZE
];
302 queue_iterate(head
, rr_found
, struct restartable_ranges
*, rr_link
) {
303 if (_restartable_ranges_equals(rr
, rr_found
) &&
304 os_ref_retain_try(&rr_found
->rr_ref
)) {
309 enqueue_tail(head
, &rr
->rr_link
);
313 if (rr_base
&& os_ref_release_relaxed(&rr_base
->rr_ref
) == 0) {
314 remqueue(&rr_base
->rr_link
);
320 *rr_storage
= rr_found
;
322 if (rr_found
!= rr
) {
323 _restartable_ranges_dispose(rr
, false);
326 _restartable_ranges_dispose(rr_base
, false);
331 #pragma mark extern interfaces
334 restartable_ranges_release(struct restartable_ranges
*rr
)
336 if (os_ref_release_relaxed(&rr
->rr_ref
) == 0) {
337 _restartable_ranges_dispose(rr
, true);
342 thread_reset_pcs_ast(thread_t thread
)
344 task_t task
= thread
->task
;
345 struct restartable_ranges
*rr
;
346 mach_vm_address_t pc
;
349 * Because restartable_ranges are set while the task only has on thread
350 * and can't be mutated outside of this, no lock is required to read this.
352 rr
= task
->restartable_ranges
;
354 /* pairs with the barrier in task_restartable_ranges_synchronize() */
355 os_atomic_thread_fence(acquire
);
357 pc
= _ranges_lookup(rr
, machine_thread_pc(thread
));
360 machine_thread_reset_pc(thread
, pc
);
366 restartable_init(void)
368 for (size_t i
= 0; i
< RR_HASH_SIZE
; i
++) {
369 queue_head_init(rr_hash
[i
]);
373 #pragma mark MiG interfaces
376 task_restartable_ranges_register(
378 task_restartable_range_t
*ranges
,
379 mach_msg_type_number_t count
)
384 if (task
!= current_task()) {
389 kr
= _ranges_validate(task
, ranges
, count
);
391 if (kr
== KERN_SUCCESS
) {
394 queue_iterate(&task
->threads
, th
, thread_t
, task_threads
) {
395 if (th
!= current_thread()) {
396 kr
= KERN_NOT_SUPPORTED
;
400 #if !DEBUG && !DEVELOPMENT
402 * For security reasons, on release kernels, only allow for this to be
405 * But to be able to test the feature we need to relax this for
408 if (task
->restartable_ranges
) {
409 kr
= KERN_NOT_SUPPORTED
;
412 if (kr
== KERN_SUCCESS
) {
413 kr
= _restartable_ranges_create(task
, ranges
, count
,
414 &task
->restartable_ranges
);
423 task_restartable_ranges_synchronize(task_t task
)
427 if (task
!= current_task()) {
431 /* pairs with the barrier in thread_reset_pcs_ast() */
432 os_atomic_thread_fence(release
);
436 if (task
->restartable_ranges
) {
437 queue_iterate(&task
->threads
, thread
, thread_t
, task_threads
) {
438 if (thread
!= current_thread()) {
439 thread_mtx_lock(thread
);
440 act_set_ast_reset_pcs(thread
);
441 thread_mtx_unlock(thread
);