]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/syscall_subr.c
xnu-792.21.3.tar.gz
[apple/xnu.git] / osfmk / kern / syscall_subr.c
1 /*
2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 */
58
59 #include <mach/boolean.h>
60 #include <mach/thread_switch.h>
61 #include <ipc/ipc_port.h>
62 #include <ipc/ipc_space.h>
63 #include <kern/counters.h>
64 #include <kern/ipc_kobject.h>
65 #include <kern/processor.h>
66 #include <kern/sched.h>
67 #include <kern/sched_prim.h>
68 #include <kern/spl.h>
69 #include <kern/task.h>
70 #include <kern/thread.h>
71 #include <mach/policy.h>
72
73 #include <kern/syscall_subr.h>
74 #include <mach/mach_host_server.h>
75 #include <mach/mach_syscalls.h>
76
77 /*
78 * swtch and swtch_pri both attempt to context switch (logic in
79 * thread_block no-ops the context switch if nothing would happen).
80 * A boolean is returned that indicates whether there is anything
81 * else runnable.
82 *
83 * This boolean can be used by a thread waiting on a
84 * lock or condition: If FALSE is returned, the thread is justified
85 * in becoming a resource hog by continuing to spin because there's
86 * nothing else useful that the processor could do. If TRUE is
87 * returned, the thread should make one more check on the
88 * lock and then be a good citizen and really suspend.
89 */
90
91 static void
92 swtch_continue(void)
93 {
94 register processor_t myprocessor;
95 boolean_t result;
96
97 disable_preemption();
98 myprocessor = current_processor();
99 result = myprocessor->runq.count > 0 ||
100 myprocessor->processor_set->runq.count > 0;
101 enable_preemption();
102
103 thread_syscall_return(result);
104 /*NOTREACHED*/
105 }
106
107 boolean_t
108 swtch(
109 __unused struct swtch_args *args)
110 {
111 register processor_t myprocessor;
112 boolean_t result;
113
114 disable_preemption();
115 myprocessor = current_processor();
116 if ( myprocessor->runq.count == 0 &&
117 myprocessor->processor_set->runq.count == 0 ) {
118 mp_enable_preemption();
119
120 return (FALSE);
121 }
122 enable_preemption();
123
124 counter(c_swtch_block++);
125
126 thread_block_reason((thread_continue_t)swtch_continue, NULL, AST_YIELD);
127
128 disable_preemption();
129 myprocessor = current_processor();
130 result = myprocessor->runq.count > 0 ||
131 myprocessor->processor_set->runq.count > 0;
132 enable_preemption();
133
134 return (result);
135 }
136
137 static void
138 swtch_pri_continue(void)
139 {
140 register processor_t myprocessor;
141 boolean_t result;
142
143 thread_depress_abort_internal(current_thread());
144
145 disable_preemption();
146 myprocessor = current_processor();
147 result = myprocessor->runq.count > 0 ||
148 myprocessor->processor_set->runq.count > 0;
149 mp_enable_preemption();
150
151 thread_syscall_return(result);
152 /*NOTREACHED*/
153 }
154
155 boolean_t
156 swtch_pri(
157 __unused struct swtch_pri_args *args)
158 {
159 register processor_t myprocessor;
160 boolean_t result;
161
162 disable_preemption();
163 myprocessor = current_processor();
164 if ( myprocessor->runq.count == 0 &&
165 myprocessor->processor_set->runq.count == 0 ) {
166 mp_enable_preemption();
167
168 return (FALSE);
169 }
170 enable_preemption();
171
172 counter(c_swtch_pri_block++);
173
174 thread_depress_abstime(std_quantum);
175
176 thread_block_reason((thread_continue_t)swtch_pri_continue, NULL, AST_YIELD);
177
178 thread_depress_abort_internal(current_thread());
179
180 disable_preemption();
181 myprocessor = current_processor();
182 result = myprocessor->runq.count > 0 ||
183 myprocessor->processor_set->runq.count > 0;
184 enable_preemption();
185
186 return (result);
187 }
188
189 static void
190 thread_switch_continue(void)
191 {
192 register thread_t self = current_thread();
193 int option = self->saved.swtch.option;
194
195 if (option == SWITCH_OPTION_DEPRESS)
196 thread_depress_abort_internal(self);
197
198 thread_syscall_return(KERN_SUCCESS);
199 /*NOTREACHED*/
200 }
201
202 /*
203 * thread_switch:
204 *
205 * Context switch. User may supply thread hint.
206 */
207 kern_return_t
208 thread_switch(
209 struct thread_switch_args *args)
210 {
211 register thread_t thread, self = current_thread();
212 mach_port_name_t thread_name = args->thread_name;
213 int option = args->option;
214 mach_msg_timeout_t option_time = args->option_time;
215
216 /*
217 * Process option.
218 */
219 switch (option) {
220
221 case SWITCH_OPTION_NONE:
222 case SWITCH_OPTION_DEPRESS:
223 case SWITCH_OPTION_WAIT:
224 break;
225
226 default:
227 return (KERN_INVALID_ARGUMENT);
228 }
229
230 /*
231 * Translate the port name if supplied.
232 */
233 if (thread_name != MACH_PORT_NULL) {
234 ipc_port_t port;
235
236 if (ipc_port_translate_send(self->task->itk_space,
237 thread_name, &port) == KERN_SUCCESS) {
238 ip_reference(port);
239 ip_unlock(port);
240
241 thread = convert_port_to_thread(port);
242 ipc_port_release(port);
243
244 if (thread == self) {
245 thread_deallocate_internal(thread);
246 thread = THREAD_NULL;
247 }
248 }
249 else
250 thread = THREAD_NULL;
251 }
252 else
253 thread = THREAD_NULL;
254
255 /*
256 * Try to handoff if supplied.
257 */
258 if (thread != THREAD_NULL) {
259 processor_t processor;
260 spl_t s;
261
262 s = splsched();
263 thread_lock(thread);
264
265 /*
266 * Check if the thread is in the right pset,
267 * is not bound to a different processor,
268 * and that realtime is not involved.
269 *
270 * Next, pull it off its run queue. If it
271 * doesn't come, it's not eligible.
272 */
273 processor = current_processor();
274 if (processor->current_pri < BASEPRI_RTQUEUES &&
275 thread->sched_pri < BASEPRI_RTQUEUES &&
276 thread->processor_set == processor->processor_set &&
277 (thread->bound_processor == PROCESSOR_NULL ||
278 thread->bound_processor == processor) &&
279 run_queue_remove(thread) != RUN_QUEUE_NULL ) {
280 /*
281 * Hah, got it!!
282 */
283 thread_unlock(thread);
284
285 thread_deallocate_internal(thread);
286
287 if (option == SWITCH_OPTION_WAIT)
288 assert_wait_timeout((event_t)assert_wait_timeout, THREAD_ABORTSAFE,
289 option_time, 1000*NSEC_PER_USEC);
290 else
291 if (option == SWITCH_OPTION_DEPRESS)
292 thread_depress_ms(option_time);
293
294 self->saved.swtch.option = option;
295
296 thread_run(self, (thread_continue_t)thread_switch_continue, NULL, thread);
297 /* NOTREACHED */
298 }
299
300 thread_unlock(thread);
301 splx(s);
302
303 thread_deallocate(thread);
304 }
305
306 if (option == SWITCH_OPTION_WAIT)
307 assert_wait_timeout((event_t)assert_wait_timeout, THREAD_ABORTSAFE, option_time, 1000*NSEC_PER_USEC);
308 else
309 if (option == SWITCH_OPTION_DEPRESS)
310 thread_depress_ms(option_time);
311
312 self->saved.swtch.option = option;
313
314 thread_block_reason((thread_continue_t)thread_switch_continue, NULL, AST_YIELD);
315
316 if (option == SWITCH_OPTION_DEPRESS)
317 thread_depress_abort_internal(self);
318
319 return (KERN_SUCCESS);
320 }
321
322 /*
323 * Depress thread's priority to lowest possible for the specified interval,
324 * with a value of zero resulting in no timeout being scheduled.
325 */
326 void
327 thread_depress_abstime(
328 uint64_t interval)
329 {
330 register thread_t self = current_thread();
331 uint64_t deadline;
332 spl_t s;
333
334 s = splsched();
335 thread_lock(self);
336 if (!(self->sched_mode & TH_MODE_ISDEPRESSED)) {
337 processor_t myprocessor = self->last_processor;
338
339 self->sched_pri = DEPRESSPRI;
340 myprocessor->current_pri = self->sched_pri;
341 self->sched_mode &= ~TH_MODE_PREEMPT;
342 self->sched_mode |= TH_MODE_DEPRESS;
343
344 if (interval != 0) {
345 clock_absolutetime_interval_to_deadline(interval, &deadline);
346 if (!timer_call_enter(&self->depress_timer, deadline))
347 self->depress_timer_active++;
348 }
349 }
350 thread_unlock(self);
351 splx(s);
352 }
353
354 void
355 thread_depress_ms(
356 mach_msg_timeout_t interval)
357 {
358 uint64_t abstime;
359
360 clock_interval_to_absolutetime_interval(
361 interval, 1000*NSEC_PER_USEC, &abstime);
362 thread_depress_abstime(abstime);
363 }
364
365 /*
366 * Priority depression expiration.
367 */
368 void
369 thread_depress_expire(
370 void *p0,
371 __unused void *p1)
372 {
373 thread_t thread = p0;
374 spl_t s;
375
376 s = splsched();
377 thread_lock(thread);
378 if (--thread->depress_timer_active == 0) {
379 thread->sched_mode &= ~TH_MODE_ISDEPRESSED;
380 compute_priority(thread, FALSE);
381 }
382 thread_unlock(thread);
383 splx(s);
384 }
385
386 /*
387 * Prematurely abort priority depression if there is one.
388 */
389 kern_return_t
390 thread_depress_abort_internal(
391 thread_t thread)
392 {
393 kern_return_t result = KERN_NOT_DEPRESSED;
394 spl_t s;
395
396 s = splsched();
397 thread_lock(thread);
398 if (!(thread->sched_mode & TH_MODE_POLLDEPRESS)) {
399 if (thread->sched_mode & TH_MODE_ISDEPRESSED) {
400 thread->sched_mode &= ~TH_MODE_ISDEPRESSED;
401 compute_priority(thread, FALSE);
402 result = KERN_SUCCESS;
403 }
404
405 if (timer_call_cancel(&thread->depress_timer))
406 thread->depress_timer_active--;
407 }
408 thread_unlock(thread);
409 splx(s);
410
411 return (result);
412 }
413
414 void
415 thread_poll_yield(
416 thread_t self)
417 {
418 spl_t s;
419
420 assert(self == current_thread());
421
422 s = splsched();
423 if (!(self->sched_mode & (TH_MODE_REALTIME|TH_MODE_TIMESHARE))) {
424 uint64_t total_computation, abstime;
425
426 abstime = mach_absolute_time();
427 total_computation = abstime - self->computation_epoch;
428 total_computation += self->computation_metered;
429 if (total_computation >= max_poll_computation) {
430 processor_t myprocessor = current_processor();
431 ast_t preempt;
432
433 thread_lock(self);
434 if (!(self->sched_mode & TH_MODE_ISDEPRESSED)) {
435 self->sched_pri = DEPRESSPRI;
436 myprocessor->current_pri = self->sched_pri;
437 self->sched_mode &= ~TH_MODE_PREEMPT;
438 }
439 self->computation_epoch = abstime;
440 self->computation_metered = 0;
441 self->sched_mode |= TH_MODE_POLLDEPRESS;
442
443 abstime += (total_computation >> sched_poll_yield_shift);
444 if (!timer_call_enter(&self->depress_timer, abstime))
445 self->depress_timer_active++;
446 thread_unlock(self);
447
448 if ((preempt = csw_check(self, myprocessor)) != AST_NONE)
449 ast_on(preempt);
450 }
451 }
452 splx(s);
453 }