]> git.saurik.com Git - apple/xnu.git/blame - osfmk/kern/syscall_subr.c
xnu-1504.15.3.tar.gz
[apple/xnu.git] / osfmk / kern / syscall_subr.c
CommitLineData
1c79356b 1/*
b0d623f7 2 * Copyright (c) 2000-2009 Apple Inc. All rights reserved.
1c79356b 3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
1c79356b 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
8f6c56a5 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
8f6c56a5 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
1c79356b
A
27 */
28/*
29 * @OSF_COPYRIGHT@
30 */
31/*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56/*
57 */
58
1c79356b
A
59#include <mach/boolean.h>
60#include <mach/thread_switch.h>
61#include <ipc/ipc_port.h>
62#include <ipc/ipc_space.h>
63#include <kern/counters.h>
1c79356b
A
64#include <kern/ipc_kobject.h>
65#include <kern/processor.h>
66#include <kern/sched.h>
67#include <kern/sched_prim.h>
68#include <kern/spl.h>
69#include <kern/task.h>
70#include <kern/thread.h>
1c79356b
A
71#include <mach/policy.h>
72
73#include <kern/syscall_subr.h>
74#include <mach/mach_host_server.h>
75#include <mach/mach_syscalls.h>
76
b0d623f7
A
77
78#ifdef MACH_BSD
79extern void workqueue_thread_yielded(void);
80#endif /* MACH_BSD */
81
82
83/* Called from commpage to take a delayed preemption when exiting
84 * the "Preemption Free Zone" (PFZ).
85 */
86kern_return_t
87pfz_exit(
88__unused struct pfz_exit_args *args)
89{
90 /* For now, nothing special to do. We'll pick up the ASTs on kernel exit. */
91
92 return (KERN_SUCCESS);
93}
94
95
1c79356b
A
96/*
97 * swtch and swtch_pri both attempt to context switch (logic in
98 * thread_block no-ops the context switch if nothing would happen).
99 * A boolean is returned that indicates whether there is anything
100 * else runnable.
101 *
102 * This boolean can be used by a thread waiting on a
103 * lock or condition: If FALSE is returned, the thread is justified
104 * in becoming a resource hog by continuing to spin because there's
105 * nothing else useful that the processor could do. If TRUE is
106 * returned, the thread should make one more check on the
107 * lock and then be a good citizen and really suspend.
108 */
109
91447636 110static void
0b4e3aa0 111swtch_continue(void)
1c79356b 112{
0b4e3aa0
A
113 register processor_t myprocessor;
114 boolean_t result;
1c79356b 115
91447636 116 disable_preemption();
1c79356b 117 myprocessor = current_processor();
2d21ac55 118 result = myprocessor->runq.count > 0 || rt_runq.count > 0;
91447636 119 enable_preemption();
1c79356b 120
0b4e3aa0
A
121 thread_syscall_return(result);
122 /*NOTREACHED*/
123}
1c79356b
A
124
125boolean_t
91447636
A
126swtch(
127 __unused struct swtch_args *args)
1c79356b
A
128{
129 register processor_t myprocessor;
130 boolean_t result;
131
91447636 132 disable_preemption();
1c79356b 133 myprocessor = current_processor();
2d21ac55 134 if (myprocessor->runq.count == 0 && rt_runq.count == 0) {
1c79356b
A
135 mp_enable_preemption();
136
137 return (FALSE);
138 }
91447636 139 enable_preemption();
1c79356b
A
140
141 counter(c_swtch_block++);
142
91447636 143 thread_block_reason((thread_continue_t)swtch_continue, NULL, AST_YIELD);
1c79356b 144
91447636 145 disable_preemption();
1c79356b 146 myprocessor = current_processor();
2d21ac55 147 result = myprocessor->runq.count > 0 || rt_runq.count > 0;
91447636 148 enable_preemption();
1c79356b
A
149
150 return (result);
151}
152
91447636 153static void
0b4e3aa0
A
154swtch_pri_continue(void)
155{
156 register processor_t myprocessor;
157 boolean_t result;
158
91447636 159 thread_depress_abort_internal(current_thread());
0b4e3aa0 160
91447636 161 disable_preemption();
0b4e3aa0 162 myprocessor = current_processor();
2d21ac55 163 result = myprocessor->runq.count > 0 || rt_runq.count > 0;
0b4e3aa0
A
164 mp_enable_preemption();
165
166 thread_syscall_return(result);
167 /*NOTREACHED*/
168}
169
1c79356b
A
170boolean_t
171swtch_pri(
91447636 172__unused struct swtch_pri_args *args)
1c79356b 173{
1c79356b
A
174 register processor_t myprocessor;
175 boolean_t result;
1c79356b 176
91447636 177 disable_preemption();
1c79356b 178 myprocessor = current_processor();
2d21ac55 179 if (myprocessor->runq.count == 0 && rt_runq.count == 0) {
0b4e3aa0 180 mp_enable_preemption();
1c79356b
A
181
182 return (FALSE);
183 }
91447636 184 enable_preemption();
0b4e3aa0
A
185
186 counter(c_swtch_pri_block++);
187
91447636 188 thread_depress_abstime(std_quantum);
1c79356b 189
91447636 190 thread_block_reason((thread_continue_t)swtch_pri_continue, NULL, AST_YIELD);
1c79356b 191
91447636 192 thread_depress_abort_internal(current_thread());
1c79356b 193
91447636 194 disable_preemption();
1c79356b 195 myprocessor = current_processor();
2d21ac55 196 result = myprocessor->runq.count > 0 || rt_runq.count > 0;
91447636 197 enable_preemption();
1c79356b
A
198
199 return (result);
200}
201
91447636
A
202static void
203thread_switch_continue(void)
204{
205 register thread_t self = current_thread();
206 int option = self->saved.swtch.option;
207
208 if (option == SWITCH_OPTION_DEPRESS)
209 thread_depress_abort_internal(self);
210
211 thread_syscall_return(KERN_SUCCESS);
212 /*NOTREACHED*/
213}
214
1c79356b
A
215/*
216 * thread_switch:
217 *
218 * Context switch. User may supply thread hint.
219 */
220kern_return_t
221thread_switch(
91447636 222 struct thread_switch_args *args)
1c79356b 223{
91447636
A
224 register thread_t thread, self = current_thread();
225 mach_port_name_t thread_name = args->thread_name;
226 int option = args->option;
227 mach_msg_timeout_t option_time = args->option_time;
1c79356b
A
228
229 /*
230 * Process option.
231 */
232 switch (option) {
233
234 case SWITCH_OPTION_NONE:
235 case SWITCH_OPTION_DEPRESS:
236 case SWITCH_OPTION_WAIT:
237 break;
238
239 default:
240 return (KERN_INVALID_ARGUMENT);
241 }
242
b0d623f7
A
243 workqueue_thread_yielded();
244
91447636
A
245 /*
246 * Translate the port name if supplied.
247 */
1c79356b
A
248 if (thread_name != MACH_PORT_NULL) {
249 ipc_port_t port;
250
91447636 251 if (ipc_port_translate_send(self->task->itk_space,
1c79356b
A
252 thread_name, &port) == KERN_SUCCESS) {
253 ip_reference(port);
254 ip_unlock(port);
255
91447636 256 thread = convert_port_to_thread(port);
1c79356b 257 ipc_port_release(port);
91447636
A
258
259 if (thread == self) {
2d21ac55 260 (void)thread_deallocate_internal(thread);
91447636
A
261 thread = THREAD_NULL;
262 }
1c79356b 263 }
91447636
A
264 else
265 thread = THREAD_NULL;
1c79356b 266 }
91447636
A
267 else
268 thread = THREAD_NULL;
269
270 /*
271 * Try to handoff if supplied.
272 */
273 if (thread != THREAD_NULL) {
274 processor_t processor;
275 spl_t s;
276
277 s = splsched();
278 thread_lock(thread);
279
280 /*
2d21ac55
A
281 * Check that the thread is not bound
282 * to a different processor, and that realtime
283 * is not involved.
91447636
A
284 *
285 * Next, pull it off its run queue. If it
286 * doesn't come, it's not eligible.
287 */
288 processor = current_processor();
289 if (processor->current_pri < BASEPRI_RTQUEUES &&
290 thread->sched_pri < BASEPRI_RTQUEUES &&
91447636
A
291 (thread->bound_processor == PROCESSOR_NULL ||
292 thread->bound_processor == processor) &&
2d21ac55 293 run_queue_remove(thread) ) {
91447636
A
294 /*
295 * Hah, got it!!
296 */
297 thread_unlock(thread);
298
2d21ac55 299 (void)thread_deallocate_internal(thread);
91447636
A
300
301 if (option == SWITCH_OPTION_WAIT)
302 assert_wait_timeout((event_t)assert_wait_timeout, THREAD_ABORTSAFE,
303 option_time, 1000*NSEC_PER_USEC);
304 else
305 if (option == SWITCH_OPTION_DEPRESS)
306 thread_depress_ms(option_time);
307
308 self->saved.swtch.option = option;
309
310 thread_run(self, (thread_continue_t)thread_switch_continue, NULL, thread);
311 /* NOTREACHED */
312 }
313
314 thread_unlock(thread);
315 splx(s);
316
317 thread_deallocate(thread);
318 }
319
320 if (option == SWITCH_OPTION_WAIT)
321 assert_wait_timeout((event_t)assert_wait_timeout, THREAD_ABORTSAFE, option_time, 1000*NSEC_PER_USEC);
322 else
323 if (option == SWITCH_OPTION_DEPRESS)
324 thread_depress_ms(option_time);
325
326 self->saved.swtch.option = option;
327
328 thread_block_reason((thread_continue_t)thread_switch_continue, NULL, AST_YIELD);
329
330 if (option == SWITCH_OPTION_DEPRESS)
331 thread_depress_abort_internal(self);
332
333 return (KERN_SUCCESS);
334}
1c79356b 335
91447636
A
336/*
337 * Depress thread's priority to lowest possible for the specified interval,
338 * with a value of zero resulting in no timeout being scheduled.
339 */
340void
341thread_depress_abstime(
342 uint64_t interval)
343{
344 register thread_t self = current_thread();
345 uint64_t deadline;
346 spl_t s;
347
348 s = splsched();
349 thread_lock(self);
350 if (!(self->sched_mode & TH_MODE_ISDEPRESSED)) {
351 processor_t myprocessor = self->last_processor;
352
353 self->sched_pri = DEPRESSPRI;
354 myprocessor->current_pri = self->sched_pri;
91447636
A
355 self->sched_mode |= TH_MODE_DEPRESS;
356
357 if (interval != 0) {
358 clock_absolutetime_interval_to_deadline(interval, &deadline);
359 if (!timer_call_enter(&self->depress_timer, deadline))
360 self->depress_timer_active++;
361 }
362 }
363 thread_unlock(self);
364 splx(s);
365}
366
367void
368thread_depress_ms(
369 mach_msg_timeout_t interval)
370{
371 uint64_t abstime;
372
373 clock_interval_to_absolutetime_interval(
374 interval, 1000*NSEC_PER_USEC, &abstime);
375 thread_depress_abstime(abstime);
376}
377
378/*
379 * Priority depression expiration.
380 */
381void
382thread_depress_expire(
383 void *p0,
384 __unused void *p1)
385{
386 thread_t thread = p0;
387 spl_t s;
388
389 s = splsched();
390 thread_lock(thread);
391 if (--thread->depress_timer_active == 0) {
392 thread->sched_mode &= ~TH_MODE_ISDEPRESSED;
393 compute_priority(thread, FALSE);
394 }
395 thread_unlock(thread);
396 splx(s);
397}
398
399/*
400 * Prematurely abort priority depression if there is one.
401 */
402kern_return_t
403thread_depress_abort_internal(
404 thread_t thread)
405{
406 kern_return_t result = KERN_NOT_DEPRESSED;
407 spl_t s;
408
409 s = splsched();
410 thread_lock(thread);
411 if (!(thread->sched_mode & TH_MODE_POLLDEPRESS)) {
412 if (thread->sched_mode & TH_MODE_ISDEPRESSED) {
413 thread->sched_mode &= ~TH_MODE_ISDEPRESSED;
414 compute_priority(thread, FALSE);
415 result = KERN_SUCCESS;
416 }
417
418 if (timer_call_cancel(&thread->depress_timer))
419 thread->depress_timer_active--;
420 }
421 thread_unlock(thread);
422 splx(s);
423
424 return (result);
425}
426
427void
428thread_poll_yield(
429 thread_t self)
430{
431 spl_t s;
432
433 assert(self == current_thread());
434
435 s = splsched();
436 if (!(self->sched_mode & (TH_MODE_REALTIME|TH_MODE_TIMESHARE))) {
437 uint64_t total_computation, abstime;
438
439 abstime = mach_absolute_time();
440 total_computation = abstime - self->computation_epoch;
441 total_computation += self->computation_metered;
442 if (total_computation >= max_poll_computation) {
443 processor_t myprocessor = current_processor();
444 ast_t preempt;
445
446 thread_lock(self);
447 if (!(self->sched_mode & TH_MODE_ISDEPRESSED)) {
448 self->sched_pri = DEPRESSPRI;
449 myprocessor->current_pri = self->sched_pri;
91447636
A
450 }
451 self->computation_epoch = abstime;
452 self->computation_metered = 0;
453 self->sched_mode |= TH_MODE_POLLDEPRESS;
454
455 abstime += (total_computation >> sched_poll_yield_shift);
456 if (!timer_call_enter(&self->depress_timer, abstime))
457 self->depress_timer_active++;
458 thread_unlock(self);
459
c910b4d9 460 if ((preempt = csw_check(myprocessor)) != AST_NONE)
91447636
A
461 ast_on(preempt);
462 }
463 }
464 splx(s);
1c79356b 465}
2d21ac55
A
466
467
468void
469thread_yield_internal(
470 mach_msg_timeout_t ms)
471{
472 processor_t myprocessor;
473
474 disable_preemption();
475 myprocessor = current_processor();
476 if (myprocessor->runq.count == 0 && rt_runq.count == 0) {
477 mp_enable_preemption();
478
479 return;
480 }
481 enable_preemption();
482
483 thread_depress_ms(ms);
484
485 thread_block_reason(THREAD_CONTINUE_NULL, NULL, AST_YIELD);
486
487 thread_depress_abort_internal(current_thread());
488}
489