]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/syscall_subr.c
be3227c8d63a9a0a778d9fc4d9d5c1b3f2d10f5e
[apple/xnu.git] / osfmk / kern / syscall_subr.c
1 /*
2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23 /*
24 * @OSF_COPYRIGHT@
25 */
26 /*
27 * Mach Operating System
28 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
29 * All Rights Reserved.
30 *
31 * Permission to use, copy, modify and distribute this software and its
32 * documentation is hereby granted, provided that both the copyright
33 * notice and this permission notice appear in all copies of the
34 * software, derivative works or modified versions, and any portions
35 * thereof, and that both notices appear in supporting documentation.
36 *
37 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
38 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
39 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
40 *
41 * Carnegie Mellon requests users of this software to return to
42 *
43 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
44 * School of Computer Science
45 * Carnegie Mellon University
46 * Pittsburgh PA 15213-3890
47 *
48 * any improvements or extensions that they make and grant Carnegie Mellon
49 * the rights to redistribute these changes.
50 */
51 /*
52 */
53
54 #include <mach/boolean.h>
55 #include <mach/thread_switch.h>
56 #include <ipc/ipc_port.h>
57 #include <ipc/ipc_space.h>
58 #include <kern/counters.h>
59 #include <kern/ipc_kobject.h>
60 #include <kern/processor.h>
61 #include <kern/sched.h>
62 #include <kern/sched_prim.h>
63 #include <kern/spl.h>
64 #include <kern/task.h>
65 #include <kern/thread.h>
66 #include <mach/policy.h>
67
68 #include <kern/syscall_subr.h>
69 #include <mach/mach_host_server.h>
70 #include <mach/mach_syscalls.h>
71
72 /*
73 * swtch and swtch_pri both attempt to context switch (logic in
74 * thread_block no-ops the context switch if nothing would happen).
75 * A boolean is returned that indicates whether there is anything
76 * else runnable.
77 *
78 * This boolean can be used by a thread waiting on a
79 * lock or condition: If FALSE is returned, the thread is justified
80 * in becoming a resource hog by continuing to spin because there's
81 * nothing else useful that the processor could do. If TRUE is
82 * returned, the thread should make one more check on the
83 * lock and then be a good citizen and really suspend.
84 */
85
86 static void
87 swtch_continue(void)
88 {
89 register processor_t myprocessor;
90 boolean_t result;
91
92 disable_preemption();
93 myprocessor = current_processor();
94 result = myprocessor->runq.count > 0 ||
95 myprocessor->processor_set->runq.count > 0;
96 enable_preemption();
97
98 thread_syscall_return(result);
99 /*NOTREACHED*/
100 }
101
102 boolean_t
103 swtch(
104 __unused struct swtch_args *args)
105 {
106 register processor_t myprocessor;
107 boolean_t result;
108
109 disable_preemption();
110 myprocessor = current_processor();
111 if ( myprocessor->runq.count == 0 &&
112 myprocessor->processor_set->runq.count == 0 ) {
113 mp_enable_preemption();
114
115 return (FALSE);
116 }
117 enable_preemption();
118
119 counter(c_swtch_block++);
120
121 thread_block_reason((thread_continue_t)swtch_continue, NULL, AST_YIELD);
122
123 disable_preemption();
124 myprocessor = current_processor();
125 result = myprocessor->runq.count > 0 ||
126 myprocessor->processor_set->runq.count > 0;
127 enable_preemption();
128
129 return (result);
130 }
131
132 static void
133 swtch_pri_continue(void)
134 {
135 register processor_t myprocessor;
136 boolean_t result;
137
138 thread_depress_abort_internal(current_thread());
139
140 disable_preemption();
141 myprocessor = current_processor();
142 result = myprocessor->runq.count > 0 ||
143 myprocessor->processor_set->runq.count > 0;
144 mp_enable_preemption();
145
146 thread_syscall_return(result);
147 /*NOTREACHED*/
148 }
149
150 boolean_t
151 swtch_pri(
152 __unused struct swtch_pri_args *args)
153 {
154 register processor_t myprocessor;
155 boolean_t result;
156
157 disable_preemption();
158 myprocessor = current_processor();
159 if ( myprocessor->runq.count == 0 &&
160 myprocessor->processor_set->runq.count == 0 ) {
161 mp_enable_preemption();
162
163 return (FALSE);
164 }
165 enable_preemption();
166
167 counter(c_swtch_pri_block++);
168
169 thread_depress_abstime(std_quantum);
170
171 thread_block_reason((thread_continue_t)swtch_pri_continue, NULL, AST_YIELD);
172
173 thread_depress_abort_internal(current_thread());
174
175 disable_preemption();
176 myprocessor = current_processor();
177 result = myprocessor->runq.count > 0 ||
178 myprocessor->processor_set->runq.count > 0;
179 enable_preemption();
180
181 return (result);
182 }
183
184 static void
185 thread_switch_continue(void)
186 {
187 register thread_t self = current_thread();
188 int option = self->saved.swtch.option;
189
190 if (option == SWITCH_OPTION_DEPRESS)
191 thread_depress_abort_internal(self);
192
193 thread_syscall_return(KERN_SUCCESS);
194 /*NOTREACHED*/
195 }
196
197 /*
198 * thread_switch:
199 *
200 * Context switch. User may supply thread hint.
201 */
202 kern_return_t
203 thread_switch(
204 struct thread_switch_args *args)
205 {
206 register thread_t thread, self = current_thread();
207 mach_port_name_t thread_name = args->thread_name;
208 int option = args->option;
209 mach_msg_timeout_t option_time = args->option_time;
210
211 /*
212 * Process option.
213 */
214 switch (option) {
215
216 case SWITCH_OPTION_NONE:
217 case SWITCH_OPTION_DEPRESS:
218 case SWITCH_OPTION_WAIT:
219 break;
220
221 default:
222 return (KERN_INVALID_ARGUMENT);
223 }
224
225 /*
226 * Translate the port name if supplied.
227 */
228 if (thread_name != MACH_PORT_NULL) {
229 ipc_port_t port;
230
231 if (ipc_port_translate_send(self->task->itk_space,
232 thread_name, &port) == KERN_SUCCESS) {
233 ip_reference(port);
234 ip_unlock(port);
235
236 thread = convert_port_to_thread(port);
237 ipc_port_release(port);
238
239 if (thread == self) {
240 thread_deallocate_internal(thread);
241 thread = THREAD_NULL;
242 }
243 }
244 else
245 thread = THREAD_NULL;
246 }
247 else
248 thread = THREAD_NULL;
249
250 /*
251 * Try to handoff if supplied.
252 */
253 if (thread != THREAD_NULL) {
254 processor_t processor;
255 spl_t s;
256
257 s = splsched();
258 thread_lock(thread);
259
260 /*
261 * Check if the thread is in the right pset,
262 * is not bound to a different processor,
263 * and that realtime is not involved.
264 *
265 * Next, pull it off its run queue. If it
266 * doesn't come, it's not eligible.
267 */
268 processor = current_processor();
269 if (processor->current_pri < BASEPRI_RTQUEUES &&
270 thread->sched_pri < BASEPRI_RTQUEUES &&
271 thread->processor_set == processor->processor_set &&
272 (thread->bound_processor == PROCESSOR_NULL ||
273 thread->bound_processor == processor) &&
274 run_queue_remove(thread) != RUN_QUEUE_NULL ) {
275 /*
276 * Hah, got it!!
277 */
278 thread_unlock(thread);
279
280 thread_deallocate_internal(thread);
281
282 if (option == SWITCH_OPTION_WAIT)
283 assert_wait_timeout((event_t)assert_wait_timeout, THREAD_ABORTSAFE,
284 option_time, 1000*NSEC_PER_USEC);
285 else
286 if (option == SWITCH_OPTION_DEPRESS)
287 thread_depress_ms(option_time);
288
289 self->saved.swtch.option = option;
290
291 thread_run(self, (thread_continue_t)thread_switch_continue, NULL, thread);
292 /* NOTREACHED */
293 }
294
295 thread_unlock(thread);
296 splx(s);
297
298 thread_deallocate(thread);
299 }
300
301 if (option == SWITCH_OPTION_WAIT)
302 assert_wait_timeout((event_t)assert_wait_timeout, THREAD_ABORTSAFE, option_time, 1000*NSEC_PER_USEC);
303 else
304 if (option == SWITCH_OPTION_DEPRESS)
305 thread_depress_ms(option_time);
306
307 self->saved.swtch.option = option;
308
309 thread_block_reason((thread_continue_t)thread_switch_continue, NULL, AST_YIELD);
310
311 if (option == SWITCH_OPTION_DEPRESS)
312 thread_depress_abort_internal(self);
313
314 return (KERN_SUCCESS);
315 }
316
317 /*
318 * Depress thread's priority to lowest possible for the specified interval,
319 * with a value of zero resulting in no timeout being scheduled.
320 */
321 void
322 thread_depress_abstime(
323 uint64_t interval)
324 {
325 register thread_t self = current_thread();
326 uint64_t deadline;
327 spl_t s;
328
329 s = splsched();
330 thread_lock(self);
331 if (!(self->sched_mode & TH_MODE_ISDEPRESSED)) {
332 processor_t myprocessor = self->last_processor;
333
334 self->sched_pri = DEPRESSPRI;
335 myprocessor->current_pri = self->sched_pri;
336 self->sched_mode &= ~TH_MODE_PREEMPT;
337 self->sched_mode |= TH_MODE_DEPRESS;
338
339 if (interval != 0) {
340 clock_absolutetime_interval_to_deadline(interval, &deadline);
341 if (!timer_call_enter(&self->depress_timer, deadline))
342 self->depress_timer_active++;
343 }
344 }
345 thread_unlock(self);
346 splx(s);
347 }
348
349 void
350 thread_depress_ms(
351 mach_msg_timeout_t interval)
352 {
353 uint64_t abstime;
354
355 clock_interval_to_absolutetime_interval(
356 interval, 1000*NSEC_PER_USEC, &abstime);
357 thread_depress_abstime(abstime);
358 }
359
360 /*
361 * Priority depression expiration.
362 */
363 void
364 thread_depress_expire(
365 void *p0,
366 __unused void *p1)
367 {
368 thread_t thread = p0;
369 spl_t s;
370
371 s = splsched();
372 thread_lock(thread);
373 if (--thread->depress_timer_active == 0) {
374 thread->sched_mode &= ~TH_MODE_ISDEPRESSED;
375 compute_priority(thread, FALSE);
376 }
377 thread_unlock(thread);
378 splx(s);
379 }
380
381 /*
382 * Prematurely abort priority depression if there is one.
383 */
384 kern_return_t
385 thread_depress_abort_internal(
386 thread_t thread)
387 {
388 kern_return_t result = KERN_NOT_DEPRESSED;
389 spl_t s;
390
391 s = splsched();
392 thread_lock(thread);
393 if (!(thread->sched_mode & TH_MODE_POLLDEPRESS)) {
394 if (thread->sched_mode & TH_MODE_ISDEPRESSED) {
395 thread->sched_mode &= ~TH_MODE_ISDEPRESSED;
396 compute_priority(thread, FALSE);
397 result = KERN_SUCCESS;
398 }
399
400 if (timer_call_cancel(&thread->depress_timer))
401 thread->depress_timer_active--;
402 }
403 thread_unlock(thread);
404 splx(s);
405
406 return (result);
407 }
408
409 void
410 thread_poll_yield(
411 thread_t self)
412 {
413 spl_t s;
414
415 assert(self == current_thread());
416
417 s = splsched();
418 if (!(self->sched_mode & (TH_MODE_REALTIME|TH_MODE_TIMESHARE))) {
419 uint64_t total_computation, abstime;
420
421 abstime = mach_absolute_time();
422 total_computation = abstime - self->computation_epoch;
423 total_computation += self->computation_metered;
424 if (total_computation >= max_poll_computation) {
425 processor_t myprocessor = current_processor();
426 ast_t preempt;
427
428 thread_lock(self);
429 if (!(self->sched_mode & TH_MODE_ISDEPRESSED)) {
430 self->sched_pri = DEPRESSPRI;
431 myprocessor->current_pri = self->sched_pri;
432 self->sched_mode &= ~TH_MODE_PREEMPT;
433 }
434 self->computation_epoch = abstime;
435 self->computation_metered = 0;
436 self->sched_mode |= TH_MODE_POLLDEPRESS;
437
438 abstime += (total_computation >> sched_poll_yield_shift);
439 if (!timer_call_enter(&self->depress_timer, abstime))
440 self->depress_timer_active++;
441 thread_unlock(self);
442
443 if ((preempt = csw_check(self, myprocessor)) != AST_NONE)
444 ast_on(preempt);
445 }
446 }
447 splx(s);
448 }