]> git.saurik.com Git - apple/xnu.git/blame - osfmk/kern/syscall_subr.c
xnu-1228.12.14.tar.gz
[apple/xnu.git] / osfmk / kern / syscall_subr.c
CommitLineData
1c79356b 1/*
c910b4d9 2 * Copyright (c) 2000-2008 Apple Inc. All rights reserved.
1c79356b 3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
1c79356b 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
8f6c56a5 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
8f6c56a5 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
1c79356b
A
27 */
28/*
29 * @OSF_COPYRIGHT@
30 */
31/*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56/*
57 */
58
1c79356b
A
59#include <mach/boolean.h>
60#include <mach/thread_switch.h>
61#include <ipc/ipc_port.h>
62#include <ipc/ipc_space.h>
63#include <kern/counters.h>
1c79356b
A
64#include <kern/ipc_kobject.h>
65#include <kern/processor.h>
66#include <kern/sched.h>
67#include <kern/sched_prim.h>
68#include <kern/spl.h>
69#include <kern/task.h>
70#include <kern/thread.h>
1c79356b
A
71#include <mach/policy.h>
72
73#include <kern/syscall_subr.h>
74#include <mach/mach_host_server.h>
75#include <mach/mach_syscalls.h>
76
1c79356b
A
77/*
78 * swtch and swtch_pri both attempt to context switch (logic in
79 * thread_block no-ops the context switch if nothing would happen).
80 * A boolean is returned that indicates whether there is anything
81 * else runnable.
82 *
83 * This boolean can be used by a thread waiting on a
84 * lock or condition: If FALSE is returned, the thread is justified
85 * in becoming a resource hog by continuing to spin because there's
86 * nothing else useful that the processor could do. If TRUE is
87 * returned, the thread should make one more check on the
88 * lock and then be a good citizen and really suspend.
89 */
90
91447636 91static void
0b4e3aa0 92swtch_continue(void)
1c79356b 93{
0b4e3aa0
A
94 register processor_t myprocessor;
95 boolean_t result;
1c79356b 96
91447636 97 disable_preemption();
1c79356b 98 myprocessor = current_processor();
2d21ac55 99 result = myprocessor->runq.count > 0 || rt_runq.count > 0;
91447636 100 enable_preemption();
1c79356b 101
0b4e3aa0
A
102 thread_syscall_return(result);
103 /*NOTREACHED*/
104}
1c79356b
A
105
106boolean_t
91447636
A
107swtch(
108 __unused struct swtch_args *args)
1c79356b
A
109{
110 register processor_t myprocessor;
111 boolean_t result;
112
91447636 113 disable_preemption();
1c79356b 114 myprocessor = current_processor();
2d21ac55 115 if (myprocessor->runq.count == 0 && rt_runq.count == 0) {
1c79356b
A
116 mp_enable_preemption();
117
118 return (FALSE);
119 }
91447636 120 enable_preemption();
1c79356b
A
121
122 counter(c_swtch_block++);
123
91447636 124 thread_block_reason((thread_continue_t)swtch_continue, NULL, AST_YIELD);
1c79356b 125
91447636 126 disable_preemption();
1c79356b 127 myprocessor = current_processor();
2d21ac55 128 result = myprocessor->runq.count > 0 || rt_runq.count > 0;
91447636 129 enable_preemption();
1c79356b
A
130
131 return (result);
132}
133
91447636 134static void
0b4e3aa0
A
135swtch_pri_continue(void)
136{
137 register processor_t myprocessor;
138 boolean_t result;
139
91447636 140 thread_depress_abort_internal(current_thread());
0b4e3aa0 141
91447636 142 disable_preemption();
0b4e3aa0 143 myprocessor = current_processor();
2d21ac55 144 result = myprocessor->runq.count > 0 || rt_runq.count > 0;
0b4e3aa0
A
145 mp_enable_preemption();
146
147 thread_syscall_return(result);
148 /*NOTREACHED*/
149}
150
1c79356b
A
151boolean_t
152swtch_pri(
91447636 153__unused struct swtch_pri_args *args)
1c79356b 154{
1c79356b
A
155 register processor_t myprocessor;
156 boolean_t result;
1c79356b 157
91447636 158 disable_preemption();
1c79356b 159 myprocessor = current_processor();
2d21ac55 160 if (myprocessor->runq.count == 0 && rt_runq.count == 0) {
0b4e3aa0 161 mp_enable_preemption();
1c79356b
A
162
163 return (FALSE);
164 }
91447636 165 enable_preemption();
0b4e3aa0
A
166
167 counter(c_swtch_pri_block++);
168
91447636 169 thread_depress_abstime(std_quantum);
1c79356b 170
91447636 171 thread_block_reason((thread_continue_t)swtch_pri_continue, NULL, AST_YIELD);
1c79356b 172
91447636 173 thread_depress_abort_internal(current_thread());
1c79356b 174
91447636 175 disable_preemption();
1c79356b 176 myprocessor = current_processor();
2d21ac55 177 result = myprocessor->runq.count > 0 || rt_runq.count > 0;
91447636 178 enable_preemption();
1c79356b
A
179
180 return (result);
181}
182
91447636
A
183static void
184thread_switch_continue(void)
185{
186 register thread_t self = current_thread();
187 int option = self->saved.swtch.option;
188
189 if (option == SWITCH_OPTION_DEPRESS)
190 thread_depress_abort_internal(self);
191
192 thread_syscall_return(KERN_SUCCESS);
193 /*NOTREACHED*/
194}
195
1c79356b
A
196/*
197 * thread_switch:
198 *
199 * Context switch. User may supply thread hint.
200 */
201kern_return_t
202thread_switch(
91447636 203 struct thread_switch_args *args)
1c79356b 204{
91447636
A
205 register thread_t thread, self = current_thread();
206 mach_port_name_t thread_name = args->thread_name;
207 int option = args->option;
208 mach_msg_timeout_t option_time = args->option_time;
1c79356b
A
209
210 /*
211 * Process option.
212 */
213 switch (option) {
214
215 case SWITCH_OPTION_NONE:
216 case SWITCH_OPTION_DEPRESS:
217 case SWITCH_OPTION_WAIT:
218 break;
219
220 default:
221 return (KERN_INVALID_ARGUMENT);
222 }
223
91447636
A
224 /*
225 * Translate the port name if supplied.
226 */
1c79356b
A
227 if (thread_name != MACH_PORT_NULL) {
228 ipc_port_t port;
229
91447636 230 if (ipc_port_translate_send(self->task->itk_space,
1c79356b
A
231 thread_name, &port) == KERN_SUCCESS) {
232 ip_reference(port);
233 ip_unlock(port);
234
91447636 235 thread = convert_port_to_thread(port);
1c79356b 236 ipc_port_release(port);
91447636
A
237
238 if (thread == self) {
2d21ac55 239 (void)thread_deallocate_internal(thread);
91447636
A
240 thread = THREAD_NULL;
241 }
1c79356b 242 }
91447636
A
243 else
244 thread = THREAD_NULL;
1c79356b 245 }
91447636
A
246 else
247 thread = THREAD_NULL;
248
249 /*
250 * Try to handoff if supplied.
251 */
252 if (thread != THREAD_NULL) {
253 processor_t processor;
254 spl_t s;
255
256 s = splsched();
257 thread_lock(thread);
258
259 /*
2d21ac55
A
260 * Check that the thread is not bound
261 * to a different processor, and that realtime
262 * is not involved.
91447636
A
263 *
264 * Next, pull it off its run queue. If it
265 * doesn't come, it's not eligible.
266 */
267 processor = current_processor();
268 if (processor->current_pri < BASEPRI_RTQUEUES &&
269 thread->sched_pri < BASEPRI_RTQUEUES &&
91447636
A
270 (thread->bound_processor == PROCESSOR_NULL ||
271 thread->bound_processor == processor) &&
2d21ac55 272 run_queue_remove(thread) ) {
91447636
A
273 /*
274 * Hah, got it!!
275 */
276 thread_unlock(thread);
277
2d21ac55 278 (void)thread_deallocate_internal(thread);
91447636
A
279
280 if (option == SWITCH_OPTION_WAIT)
281 assert_wait_timeout((event_t)assert_wait_timeout, THREAD_ABORTSAFE,
282 option_time, 1000*NSEC_PER_USEC);
283 else
284 if (option == SWITCH_OPTION_DEPRESS)
285 thread_depress_ms(option_time);
286
287 self->saved.swtch.option = option;
288
289 thread_run(self, (thread_continue_t)thread_switch_continue, NULL, thread);
290 /* NOTREACHED */
291 }
292
293 thread_unlock(thread);
294 splx(s);
295
296 thread_deallocate(thread);
297 }
298
299 if (option == SWITCH_OPTION_WAIT)
300 assert_wait_timeout((event_t)assert_wait_timeout, THREAD_ABORTSAFE, option_time, 1000*NSEC_PER_USEC);
301 else
302 if (option == SWITCH_OPTION_DEPRESS)
303 thread_depress_ms(option_time);
304
305 self->saved.swtch.option = option;
306
307 thread_block_reason((thread_continue_t)thread_switch_continue, NULL, AST_YIELD);
308
309 if (option == SWITCH_OPTION_DEPRESS)
310 thread_depress_abort_internal(self);
311
312 return (KERN_SUCCESS);
313}
1c79356b 314
91447636
A
315/*
316 * Depress thread's priority to lowest possible for the specified interval,
317 * with a value of zero resulting in no timeout being scheduled.
318 */
319void
320thread_depress_abstime(
321 uint64_t interval)
322{
323 register thread_t self = current_thread();
324 uint64_t deadline;
325 spl_t s;
326
327 s = splsched();
328 thread_lock(self);
329 if (!(self->sched_mode & TH_MODE_ISDEPRESSED)) {
330 processor_t myprocessor = self->last_processor;
331
332 self->sched_pri = DEPRESSPRI;
333 myprocessor->current_pri = self->sched_pri;
91447636
A
334 self->sched_mode |= TH_MODE_DEPRESS;
335
336 if (interval != 0) {
337 clock_absolutetime_interval_to_deadline(interval, &deadline);
338 if (!timer_call_enter(&self->depress_timer, deadline))
339 self->depress_timer_active++;
340 }
341 }
342 thread_unlock(self);
343 splx(s);
344}
345
346void
347thread_depress_ms(
348 mach_msg_timeout_t interval)
349{
350 uint64_t abstime;
351
352 clock_interval_to_absolutetime_interval(
353 interval, 1000*NSEC_PER_USEC, &abstime);
354 thread_depress_abstime(abstime);
355}
356
357/*
358 * Priority depression expiration.
359 */
360void
361thread_depress_expire(
362 void *p0,
363 __unused void *p1)
364{
365 thread_t thread = p0;
366 spl_t s;
367
368 s = splsched();
369 thread_lock(thread);
370 if (--thread->depress_timer_active == 0) {
371 thread->sched_mode &= ~TH_MODE_ISDEPRESSED;
372 compute_priority(thread, FALSE);
373 }
374 thread_unlock(thread);
375 splx(s);
376}
377
378/*
379 * Prematurely abort priority depression if there is one.
380 */
381kern_return_t
382thread_depress_abort_internal(
383 thread_t thread)
384{
385 kern_return_t result = KERN_NOT_DEPRESSED;
386 spl_t s;
387
388 s = splsched();
389 thread_lock(thread);
390 if (!(thread->sched_mode & TH_MODE_POLLDEPRESS)) {
391 if (thread->sched_mode & TH_MODE_ISDEPRESSED) {
392 thread->sched_mode &= ~TH_MODE_ISDEPRESSED;
393 compute_priority(thread, FALSE);
394 result = KERN_SUCCESS;
395 }
396
397 if (timer_call_cancel(&thread->depress_timer))
398 thread->depress_timer_active--;
399 }
400 thread_unlock(thread);
401 splx(s);
402
403 return (result);
404}
405
406void
407thread_poll_yield(
408 thread_t self)
409{
410 spl_t s;
411
412 assert(self == current_thread());
413
414 s = splsched();
415 if (!(self->sched_mode & (TH_MODE_REALTIME|TH_MODE_TIMESHARE))) {
416 uint64_t total_computation, abstime;
417
418 abstime = mach_absolute_time();
419 total_computation = abstime - self->computation_epoch;
420 total_computation += self->computation_metered;
421 if (total_computation >= max_poll_computation) {
422 processor_t myprocessor = current_processor();
423 ast_t preempt;
424
425 thread_lock(self);
426 if (!(self->sched_mode & TH_MODE_ISDEPRESSED)) {
427 self->sched_pri = DEPRESSPRI;
428 myprocessor->current_pri = self->sched_pri;
91447636
A
429 }
430 self->computation_epoch = abstime;
431 self->computation_metered = 0;
432 self->sched_mode |= TH_MODE_POLLDEPRESS;
433
434 abstime += (total_computation >> sched_poll_yield_shift);
435 if (!timer_call_enter(&self->depress_timer, abstime))
436 self->depress_timer_active++;
437 thread_unlock(self);
438
c910b4d9 439 if ((preempt = csw_check(myprocessor)) != AST_NONE)
91447636
A
440 ast_on(preempt);
441 }
442 }
443 splx(s);
1c79356b 444}
2d21ac55
A
445
446
447void
448thread_yield_internal(
449 mach_msg_timeout_t ms)
450{
451 processor_t myprocessor;
452
453 disable_preemption();
454 myprocessor = current_processor();
455 if (myprocessor->runq.count == 0 && rt_runq.count == 0) {
456 mp_enable_preemption();
457
458 return;
459 }
460 enable_preemption();
461
462 thread_depress_ms(ms);
463
464 thread_block_reason(THREAD_CONTINUE_NULL, NULL, AST_YIELD);
465
466 thread_depress_abort_internal(current_thread());
467}
468