]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved. | |
7 | * | |
8 | * This file contains Original Code and/or Modifications of Original Code | |
9 | * as defined in and that are subject to the Apple Public Source License | |
10 | * Version 2.0 (the 'License'). You may not use this file except in | |
11 | * compliance with the License. Please obtain a copy of the License at | |
12 | * http://www.opensource.apple.com/apsl/ and read it before using this | |
13 | * file. | |
14 | * | |
15 | * The Original Code and all software distributed under the License are | |
16 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
17 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
18 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
19 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
20 | * Please see the License for the specific language governing rights and | |
21 | * limitations under the License. | |
22 | * | |
23 | * @APPLE_LICENSE_HEADER_END@ | |
24 | */ | |
25 | /* | |
26 | * @OSF_COPYRIGHT@ | |
27 | */ | |
28 | /* | |
29 | * Mach Operating System | |
30 | * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University | |
31 | * All Rights Reserved. | |
32 | * | |
33 | * Permission to use, copy, modify and distribute this software and its | |
34 | * documentation is hereby granted, provided that both the copyright | |
35 | * notice and this permission notice appear in all copies of the | |
36 | * software, derivative works or modified versions, and any portions | |
37 | * thereof, and that both notices appear in supporting documentation. | |
38 | * | |
39 | * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" | |
40 | * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR | |
41 | * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. | |
42 | * | |
43 | * Carnegie Mellon requests users of this software to return to | |
44 | * | |
45 | * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU | |
46 | * School of Computer Science | |
47 | * Carnegie Mellon University | |
48 | * Pittsburgh PA 15213-3890 | |
49 | * | |
50 | * any improvements or extensions that they make and grant Carnegie Mellon | |
51 | * the rights to redistribute these changes. | |
52 | */ | |
53 | /* | |
54 | */ | |
55 | /* | |
56 | * File: kern/machine.c | |
57 | * Author: Avadis Tevanian, Jr. | |
58 | * Date: 1987 | |
59 | * | |
60 | * Support for machine independent machine abstraction. | |
61 | */ | |
62 | ||
63 | #include <cpus.h> | |
64 | ||
65 | #include <string.h> | |
66 | #include <mach/boolean.h> | |
67 | #include <mach/kern_return.h> | |
68 | #include <mach/mach_types.h> | |
69 | #include <mach/machine.h> | |
70 | #include <mach/host_info.h> | |
71 | #include <mach/host_reboot.h> | |
72 | #include <kern/counters.h> | |
73 | #include <kern/cpu_data.h> | |
74 | #include <kern/ipc_host.h> | |
75 | #include <kern/host.h> | |
76 | #include <kern/lock.h> | |
77 | #include <kern/machine.h> | |
78 | #include <kern/processor.h> | |
79 | #include <kern/queue.h> | |
80 | #include <kern/sched.h> | |
81 | #include <kern/task.h> | |
82 | #include <kern/thread.h> | |
83 | #include <kern/thread_swap.h> | |
84 | #include <kern/misc_protos.h> | |
85 | ||
86 | #include <kern/mk_sp.h> | |
87 | ||
88 | /* | |
89 | * Exported variables: | |
90 | */ | |
91 | ||
92 | struct machine_info machine_info; | |
93 | struct machine_slot machine_slot[NCPUS]; | |
94 | ||
95 | static queue_head_t processor_action_queue; | |
96 | static boolean_t processor_action_active; | |
97 | static thread_call_t processor_action_call; | |
98 | static thread_call_data_t processor_action_call_data; | |
99 | decl_simple_lock_data(static,processor_action_lock) | |
100 | ||
101 | thread_t machine_wake_thread; | |
102 | ||
103 | /* Forwards */ | |
104 | processor_set_t processor_request_action( | |
105 | processor_t processor, | |
106 | processor_set_t new_pset); | |
107 | ||
108 | void processor_doaction( | |
109 | processor_t processor); | |
110 | ||
111 | void processor_doshutdown( | |
112 | processor_t processor); | |
113 | ||
114 | /* | |
115 | * cpu_up: | |
116 | * | |
117 | * Flag specified cpu as up and running. Called when a processor comes | |
118 | * online. | |
119 | */ | |
120 | void | |
121 | cpu_up( | |
122 | int cpu) | |
123 | { | |
124 | processor_t processor = cpu_to_processor(cpu); | |
125 | processor_set_t pset = &default_pset; | |
126 | struct machine_slot *ms; | |
127 | spl_t s; | |
128 | ||
129 | /* | |
130 | * Just twiddle our thumbs; we've got nothing better to do | |
131 | * yet, anyway. | |
132 | */ | |
133 | while (!simple_lock_try(&pset->processors_lock)) | |
134 | continue; | |
135 | ||
136 | s = splsched(); | |
137 | processor_lock(processor); | |
138 | init_ast_check(processor); | |
139 | ms = &machine_slot[cpu]; | |
140 | ms->running = TRUE; | |
141 | machine_info.avail_cpus++; | |
142 | pset_add_processor(pset, processor); | |
143 | simple_lock(&pset->sched_lock); | |
144 | enqueue_tail(&pset->active_queue, (queue_entry_t)processor); | |
145 | processor->state = PROCESSOR_RUNNING; | |
146 | simple_unlock(&pset->sched_lock); | |
147 | processor_unlock(processor); | |
148 | splx(s); | |
149 | ||
150 | simple_unlock(&pset->processors_lock); | |
151 | } | |
152 | ||
153 | /* | |
154 | * cpu_down: | |
155 | * | |
156 | * Flag specified cpu as down. Called when a processor is about to | |
157 | * go offline. | |
158 | */ | |
159 | void | |
160 | cpu_down( | |
161 | int cpu) | |
162 | { | |
163 | processor_t processor; | |
164 | struct machine_slot *ms; | |
165 | spl_t s; | |
166 | ||
167 | processor = cpu_to_processor(cpu); | |
168 | ||
169 | s = splsched(); | |
170 | processor_lock(processor); | |
171 | ms = &machine_slot[cpu]; | |
172 | ms->running = FALSE; | |
173 | machine_info.avail_cpus--; | |
174 | /* | |
175 | * processor has already been removed from pset. | |
176 | */ | |
177 | processor->processor_set_next = PROCESSOR_SET_NULL; | |
178 | processor->state = PROCESSOR_OFF_LINE; | |
179 | processor_unlock(processor); | |
180 | splx(s); | |
181 | } | |
182 | ||
183 | kern_return_t | |
184 | host_reboot( | |
185 | host_priv_t host_priv, | |
186 | int options) | |
187 | { | |
188 | if (host_priv == HOST_PRIV_NULL) | |
189 | return (KERN_INVALID_HOST); | |
190 | ||
191 | assert(host_priv == &realhost); | |
192 | ||
193 | if (options & HOST_REBOOT_DEBUGGER) { | |
194 | Debugger("Debugger"); | |
195 | } | |
196 | else | |
197 | halt_all_cpus(!(options & HOST_REBOOT_HALT)); | |
198 | ||
199 | return (KERN_SUCCESS); | |
200 | } | |
201 | ||
202 | /* | |
203 | * processor_request_action: | |
204 | * | |
205 | * Common internals of processor_assign and processor_shutdown. | |
206 | * If new_pset is null, this is a shutdown, else it's an assign | |
207 | * and caller must donate a reference. | |
208 | * For assign operations, it returns an old pset that must be deallocated | |
209 | * if it's not NULL. | |
210 | * For shutdown operations, it always returns PROCESSOR_SET_NULL. | |
211 | */ | |
212 | processor_set_t | |
213 | processor_request_action( | |
214 | processor_t processor, | |
215 | processor_set_t new_pset) | |
216 | { | |
217 | processor_set_t pset, old_pset; | |
218 | ||
219 | /* | |
220 | * Processor must be in a processor set. Must lock its idle lock to | |
221 | * get at processor state. | |
222 | */ | |
223 | pset = processor->processor_set; | |
224 | simple_lock(&pset->sched_lock); | |
225 | ||
226 | /* | |
227 | * If the processor is dispatching, let it finish - it will set its | |
228 | * state to running very soon. | |
229 | */ | |
230 | while (*(volatile int *)&processor->state == PROCESSOR_DISPATCHING) { | |
231 | simple_unlock(&pset->sched_lock); | |
232 | ||
233 | simple_lock(&pset->sched_lock); | |
234 | } | |
235 | ||
236 | assert( processor->state == PROCESSOR_IDLE || | |
237 | processor->state == PROCESSOR_RUNNING || | |
238 | processor->state == PROCESSOR_ASSIGN ); | |
239 | ||
240 | /* | |
241 | * Now lock the action queue and do the dirty work. | |
242 | */ | |
243 | simple_lock(&processor_action_lock); | |
244 | ||
245 | if (processor->state == PROCESSOR_IDLE) { | |
246 | remqueue(&pset->idle_queue, (queue_entry_t)processor); | |
247 | pset->idle_count--; | |
248 | } | |
249 | else | |
250 | if (processor->state == PROCESSOR_RUNNING) | |
251 | remqueue(&pset->active_queue, (queue_entry_t)processor); | |
252 | ||
253 | if (processor->state != PROCESSOR_ASSIGN) | |
254 | enqueue_tail(&processor_action_queue, (queue_entry_t)processor); | |
255 | ||
256 | /* | |
257 | * And ask the action_thread to do the work. | |
258 | */ | |
259 | if (new_pset != PROCESSOR_SET_NULL) { | |
260 | processor->state = PROCESSOR_ASSIGN; | |
261 | old_pset = processor->processor_set_next; | |
262 | processor->processor_set_next = new_pset; | |
263 | } | |
264 | else { | |
265 | processor->state = PROCESSOR_SHUTDOWN; | |
266 | old_pset = PROCESSOR_SET_NULL; | |
267 | } | |
268 | ||
269 | simple_unlock(&pset->sched_lock); | |
270 | ||
271 | if (processor_action_active) { | |
272 | simple_unlock(&processor_action_lock); | |
273 | ||
274 | return (old_pset); | |
275 | } | |
276 | ||
277 | processor_action_active = TRUE; | |
278 | simple_unlock(&processor_action_lock); | |
279 | ||
280 | processor_unlock(processor); | |
281 | ||
282 | thread_call_enter(processor_action_call); | |
283 | processor_lock(processor); | |
284 | ||
285 | return (old_pset); | |
286 | } | |
287 | ||
288 | kern_return_t | |
289 | processor_assign( | |
290 | processor_t processor, | |
291 | processor_set_t new_pset, | |
292 | boolean_t wait) | |
293 | { | |
294 | #ifdef lint | |
295 | processor++; new_pset++; wait++; | |
296 | #endif /* lint */ | |
297 | return (KERN_FAILURE); | |
298 | } | |
299 | ||
300 | /* | |
301 | * processor_shutdown() queues a processor up for shutdown. | |
302 | * Any assignment in progress is overriden. | |
303 | */ | |
304 | kern_return_t | |
305 | processor_shutdown( | |
306 | processor_t processor) | |
307 | { | |
308 | spl_t s; | |
309 | ||
310 | s = splsched(); | |
311 | processor_lock(processor); | |
312 | if ( processor->state == PROCESSOR_OFF_LINE || | |
313 | processor->state == PROCESSOR_SHUTDOWN ) { | |
314 | /* | |
315 | * Already shutdown or being shutdown -- nothing to do. | |
316 | */ | |
317 | processor_unlock(processor); | |
318 | splx(s); | |
319 | ||
320 | return (KERN_SUCCESS); | |
321 | } | |
322 | ||
323 | processor_request_action(processor, PROCESSOR_SET_NULL); | |
324 | ||
325 | assert_wait((event_t)processor, THREAD_UNINT); | |
326 | ||
327 | processor_unlock(processor); | |
328 | splx(s); | |
329 | ||
330 | thread_block(THREAD_CONTINUE_NULL); | |
331 | ||
332 | return (KERN_SUCCESS); | |
333 | } | |
334 | ||
335 | /* | |
336 | * processor_action() shuts down processors or changes their assignment. | |
337 | */ | |
338 | static void | |
339 | _processor_action( | |
340 | thread_call_param_t p0, | |
341 | thread_call_param_t p1) | |
342 | { | |
343 | register processor_t processor; | |
344 | spl_t s; | |
345 | ||
346 | s = splsched(); | |
347 | simple_lock(&processor_action_lock); | |
348 | ||
349 | while (!queue_empty(&processor_action_queue)) { | |
350 | processor = (processor_t)dequeue_head(&processor_action_queue); | |
351 | simple_unlock(&processor_action_lock); | |
352 | splx(s); | |
353 | ||
354 | processor_doaction(processor); | |
355 | ||
356 | s = splsched(); | |
357 | simple_lock(&processor_action_lock); | |
358 | } | |
359 | ||
360 | processor_action_active = FALSE; | |
361 | simple_unlock(&processor_action_lock); | |
362 | splx(s); | |
363 | } | |
364 | ||
365 | void | |
366 | processor_action(void) | |
367 | { | |
368 | queue_init(&processor_action_queue); | |
369 | simple_lock_init(&processor_action_lock, ETAP_THREAD_ACTION); | |
370 | processor_action_active = FALSE; | |
371 | ||
372 | thread_call_setup(&processor_action_call_data, _processor_action, NULL); | |
373 | processor_action_call = &processor_action_call_data; | |
374 | } | |
375 | ||
376 | /* | |
377 | * processor_doaction actually does the shutdown. The trick here | |
378 | * is to schedule ourselves onto a cpu and then save our | |
379 | * context back into the runqs before taking out the cpu. | |
380 | */ | |
381 | void | |
382 | processor_doaction( | |
383 | processor_t processor) | |
384 | { | |
385 | thread_t self = current_thread(); | |
386 | processor_set_t pset; | |
387 | thread_t old_thread; | |
388 | spl_t s; | |
389 | ||
390 | /* | |
391 | * Get onto the processor to shutdown | |
392 | */ | |
393 | thread_bind(self, processor); | |
394 | thread_block(THREAD_CONTINUE_NULL); | |
395 | ||
396 | pset = processor->processor_set; | |
397 | simple_lock(&pset->processors_lock); | |
398 | ||
399 | if (pset->processor_count == 1) { | |
400 | thread_t thread; | |
401 | extern void start_cpu_thread(void); | |
402 | ||
403 | simple_unlock(&pset->processors_lock); | |
404 | ||
405 | /* | |
406 | * Create the thread, and point it at the routine. | |
407 | */ | |
408 | thread = kernel_thread_with_priority( | |
409 | kernel_task, MAXPRI_KERNEL, | |
410 | start_cpu_thread, TRUE, FALSE); | |
411 | ||
412 | disable_preemption(); | |
413 | ||
414 | s = splsched(); | |
415 | thread_lock(thread); | |
416 | machine_wake_thread = thread; | |
417 | thread_go_locked(thread, THREAD_AWAKENED); | |
418 | (void)rem_runq(thread); | |
419 | thread_unlock(thread); | |
420 | splx(s); | |
421 | ||
422 | simple_lock(&pset->processors_lock); | |
423 | enable_preemption(); | |
424 | } | |
425 | ||
426 | s = splsched(); | |
427 | processor_lock(processor); | |
428 | ||
429 | /* | |
430 | * Do shutdown, make sure we live when processor dies. | |
431 | */ | |
432 | if (processor->state != PROCESSOR_SHUTDOWN) { | |
433 | panic("action_thread -- bad processor state"); | |
434 | } | |
435 | ||
436 | pset_remove_processor(pset, processor); | |
437 | processor_unlock(processor); | |
438 | simple_unlock(&pset->processors_lock); | |
439 | ||
440 | /* | |
441 | * Clean up. | |
442 | */ | |
443 | thread_bind(self, PROCESSOR_NULL); | |
444 | self->continuation = 0; | |
445 | old_thread = switch_to_shutdown_context(self, | |
446 | processor_doshutdown, processor); | |
447 | if (processor != current_processor()) | |
448 | timer_call_shutdown(processor); | |
449 | thread_dispatch(old_thread); | |
450 | thread_wakeup((event_t)processor); | |
451 | splx(s); | |
452 | } | |
453 | ||
454 | /* | |
455 | * Actually do the processor shutdown. This is called at splsched, | |
456 | * running on the processor's shutdown stack. | |
457 | */ | |
458 | ||
459 | void | |
460 | processor_doshutdown( | |
461 | processor_t processor) | |
462 | { | |
463 | register int cpu = processor->slot_num; | |
464 | ||
465 | timer_call_cancel(&processor->quantum_timer); | |
466 | thread_dispatch(current_thread()); | |
467 | timer_switch(&kernel_timer[cpu]); | |
468 | ||
469 | /* | |
470 | * OK, now exit this cpu. | |
471 | */ | |
472 | PMAP_DEACTIVATE_KERNEL(cpu); | |
473 | thread_machine_set_current(processor->idle_thread); | |
474 | cpu_down(cpu); | |
475 | cpu_sleep(); | |
476 | panic("zombie processor"); | |
477 | /*NOTREACHED*/ | |
478 | } | |
479 | ||
480 | kern_return_t | |
481 | host_get_boot_info( | |
482 | host_priv_t host_priv, | |
483 | kernel_boot_info_t boot_info) | |
484 | { | |
485 | char *src = ""; | |
486 | extern char *machine_boot_info( | |
487 | kernel_boot_info_t boot_info, | |
488 | vm_size_t buf_len); | |
489 | ||
490 | if (host_priv == HOST_PRIV_NULL) | |
491 | return (KERN_INVALID_HOST); | |
492 | ||
493 | assert(host_priv == &realhost); | |
494 | ||
495 | /* | |
496 | * Copy first operator string terminated by '\0' followed by | |
497 | * standardized strings generated from boot string. | |
498 | */ | |
499 | src = machine_boot_info(boot_info, KERNEL_BOOT_INFO_MAX); | |
500 | if (src != boot_info) | |
501 | (void) strncpy(boot_info, src, KERNEL_BOOT_INFO_MAX); | |
502 | ||
503 | return (KERN_SUCCESS); | |
504 | } |