]> git.saurik.com Git - apple/xnu.git/blame - osfmk/kern/ast.c
xnu-124.13.tar.gz
[apple/xnu.git] / osfmk / kern / ast.c
CommitLineData
1c79356b
A
1/*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22/*
23 * @OSF_COPYRIGHT@
24 */
25/*
26 * Mach Operating System
27 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
28 * All Rights Reserved.
29 *
30 * Permission to use, copy, modify and distribute this software and its
31 * documentation is hereby granted, provided that both the copyright
32 * notice and this permission notice appear in all copies of the
33 * software, derivative works or modified versions, and any portions
34 * thereof, and that both notices appear in supporting documentation.
35 *
36 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
37 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
38 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
39 *
40 * Carnegie Mellon requests users of this software to return to
41 *
42 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
43 * School of Computer Science
44 * Carnegie Mellon University
45 * Pittsburgh PA 15213-3890
46 *
47 * any improvements or extensions that they make and grant Carnegie Mellon
48 * the rights to redistribute these changes.
49 */
50/*
51 */
52
53/*
54 *
55 * This file contains routines to check whether an ast is needed.
56 *
57 * ast_check() - check whether ast is needed for interrupt or context
58 * switch. Usually called by clock interrupt handler.
59 *
60 */
61
62#include <cputypes.h>
63#include <cpus.h>
64#include <platforms.h>
65#include <task_swapper.h>
66
67#include <kern/ast.h>
68#include <kern/counters.h>
69#include <kern/cpu_number.h>
70#include <kern/misc_protos.h>
71#include <kern/queue.h>
72#include <kern/sched.h>
73#include <kern/sched_prim.h>
74#include <kern/thread.h>
75#include <kern/thread_act.h>
76#include <kern/thread_swap.h>
77#include <kern/processor.h>
78#include <kern/spl.h>
79#include <mach/policy.h>
80#if TASK_SWAPPER
81#include <kern/task_swap.h>
82#endif /* TASK_SWAPPER */
83
84volatile ast_t need_ast[NCPUS];
85
86void
87ast_init(void)
88{
89#ifndef MACHINE_AST
90 register int i;
91
92 for (i=0; i<NCPUS; i++) {
93 need_ast[i] = AST_NONE;
94 }
95#endif /* MACHINE_AST */
96}
97
98void
99ast_taken(
100 boolean_t preemption,
101 ast_t mask,
102 boolean_t interrupt
103)
104{
105 register thread_t self = current_thread();
106 register processor_t mypr;
107 register ast_t reasons;
108 register int mycpu;
109 thread_act_t act = self->top_act;
110#ifdef MACH_BSD
111 extern void bsd_ast(thread_act_t);
112 extern void bsdinit_task(void);
113#endif
114
115 mp_disable_preemption();
116 mycpu = cpu_number();
117 reasons = need_ast[mycpu] & mask;
118 need_ast[mycpu] &= ~reasons;
119 mp_enable_preemption();
120
121 ml_set_interrupts_enabled(interrupt);
122
123 /*
124 * No ast for an idle thread
125 */
126 if (self->state & TH_IDLE)
127 return;
128
129 /*
130 * Check for preemption
131 */
132 if ((reasons & AST_URGENT) && (wait_queue_assert_possible(self))) {
133 reasons &= ~AST_URGENT;
134 if ((reasons & (AST_BLOCK|AST_QUANTUM)) == 0) {
135 mp_disable_preemption();
136 mypr = current_processor();
137 if (csw_needed(self, mypr)) {
138 reasons |= (mypr->first_quantum ? AST_BLOCK : AST_QUANTUM);
139 }
140 mp_enable_preemption();
141 }
142 if (reasons & (AST_BLOCK | AST_QUANTUM)) {
143 counter(c_ast_taken_block++);
144 thread_block_reason((void (*)(void))0,
145 (reasons & (AST_BLOCK | AST_QUANTUM)));
146 }
147 if (reasons == 0)
148 return;
149 }
150
151#ifdef MACH_BSD
152 /*
153 * Check for BSD hardcoded hooks
154 */
155 if (reasons & AST_BSD) {
156 thread_ast_clear(act,AST_BSD);
157 bsd_ast(act);
158 }
159 if (reasons & AST_BSD_INIT) {
160 thread_ast_clear(act,AST_BSD_INIT);
161 bsdinit_task();
162 }
163#endif
164
165#if TASK_SWAPPER
166 /* must be before AST_APC */
167 if (reasons & AST_SWAPOUT) {
168 spl_t s;
169 swapout_ast();
170 s = splsched();
171 mp_disable_preemption();
172 mycpu = cpu_number();
173 if (need_ast[mycpu] & AST_APC) {
174 /* generated in swapout_ast() to get suspended */
175 reasons |= AST_APC; /* process now ... */
176 need_ast[mycpu] &= ~AST_APC; /* ... and not later */
177 }
178 mp_enable_preemption();
179 splx(s);
180 }
181#endif /* TASK_SWAPPER */
182
183 /*
184 * migration APC hook
185 */
186 if (reasons & AST_APC) {
187 act_execute_returnhandlers();
188 }
189
190 /*
191 * thread_block needs to know if the thread's quantum
192 * expired so the thread can be put on the tail of
193 * run queue. One of the previous actions might well
194 * have woken a high-priority thread, so we also use
195 * csw_needed check.
196 */
197 reasons &= (AST_BLOCK | AST_QUANTUM);
198 if (reasons == 0) {
199 mp_disable_preemption();
200 mypr = current_processor();
201 if (csw_needed(self, mypr)) {
202 reasons = (mypr->first_quantum ? AST_BLOCK : AST_QUANTUM);
203 }
204 mp_enable_preemption();
205 }
206 if ((reasons & (AST_BLOCK | AST_QUANTUM)) &&
207 (wait_queue_assert_possible(self))) {
208 counter(c_ast_taken_block++);
209 /*
210 * JMM - SMP machines don't like blocking at a continuation
211 * here - why not? Could be a combination of set_state and
212 * suspension on the thread_create_running API?
213 *
214 * thread_block_reason(thread_exception_return, reasons);
215 */
216 thread_block_reason((void (*)(void))0, reasons);
217 }
218}
219
220void
221ast_check(void)
222{
223 register int mycpu;
224 register processor_t myprocessor;
225 register thread_t thread = current_thread();
226 spl_t s = splsched();
227
228 mp_disable_preemption();
229 mycpu = cpu_number();
230
231 /*
232 * Check processor state for ast conditions.
233 */
234 myprocessor = cpu_to_processor(mycpu);
235 switch(myprocessor->state) {
236 case PROCESSOR_OFF_LINE:
237 case PROCESSOR_IDLE:
238 case PROCESSOR_DISPATCHING:
239 /*
240 * No ast.
241 */
242 break;
243
244#if NCPUS > 1
245 case PROCESSOR_ASSIGN:
246 /*
247 * Need ast to force action thread onto processor.
248 *
249 * XXX Should check if action thread is already there.
250 */
251 ast_on(AST_BLOCK);
252 break;
253#endif /* NCPUS > 1 */
254
255 case PROCESSOR_RUNNING:
256 case PROCESSOR_SHUTDOWN:
257 /*
258 * Propagate thread ast to processor. If we already
259 * need an ast, don't look for more reasons.
260 */
261 ast_propagate(current_act()->ast);
262 if (ast_needed(mycpu))
263 break;
264
265 /*
266 * Context switch check.
267 */
268 if (csw_needed(thread, myprocessor)) {
269 ast_on((myprocessor->first_quantum ?
270 AST_BLOCK : AST_QUANTUM));
271 }
272 break;
273
274 default:
275 panic("ast_check: Bad processor state");
276 }
277 mp_enable_preemption();
278 splx(s);
279}
280
281/*
282 * JMM - Temporary exports to other components
283 */
284#undef ast_on
285#undef ast_off
286
287void
288ast_on(ast_t reason)
289{
290 boolean_t intr;
291
292 intr = ml_set_interrupts_enabled(FALSE);
293 ast_on_fast(reason);
294 (void *)ml_set_interrupts_enabled(intr);
295}
296
297void
298ast_off(ast_t reason)
299{
300 ast_off_fast(reason);
301}