]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/machine.c
xnu-344.23.tar.gz
[apple/xnu.git] / osfmk / kern / machine.c
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 /*
23 * @OSF_COPYRIGHT@
24 */
25 /*
26 * Mach Operating System
27 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
28 * All Rights Reserved.
29 *
30 * Permission to use, copy, modify and distribute this software and its
31 * documentation is hereby granted, provided that both the copyright
32 * notice and this permission notice appear in all copies of the
33 * software, derivative works or modified versions, and any portions
34 * thereof, and that both notices appear in supporting documentation.
35 *
36 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
37 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
38 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
39 *
40 * Carnegie Mellon requests users of this software to return to
41 *
42 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
43 * School of Computer Science
44 * Carnegie Mellon University
45 * Pittsburgh PA 15213-3890
46 *
47 * any improvements or extensions that they make and grant Carnegie Mellon
48 * the rights to redistribute these changes.
49 */
50 /*
51 */
52 /*
53 * File: kern/machine.c
54 * Author: Avadis Tevanian, Jr.
55 * Date: 1987
56 *
57 * Support for machine independent machine abstraction.
58 */
59
60 #include <cpus.h>
61
62 #include <string.h>
63 #include <mach/boolean.h>
64 #include <mach/kern_return.h>
65 #include <mach/mach_types.h>
66 #include <mach/machine.h>
67 #include <mach/host_info.h>
68 #include <mach/host_reboot.h>
69 #include <kern/counters.h>
70 #include <kern/cpu_data.h>
71 #include <kern/ipc_host.h>
72 #include <kern/host.h>
73 #include <kern/lock.h>
74 #include <kern/machine.h>
75 #include <kern/processor.h>
76 #include <kern/queue.h>
77 #include <kern/sched.h>
78 #include <kern/task.h>
79 #include <kern/thread.h>
80 #include <kern/thread_swap.h>
81 #include <kern/misc_protos.h>
82
83 #include <kern/mk_sp.h>
84
85 /*
86 * Exported variables:
87 */
88
89 struct machine_info machine_info;
90 struct machine_slot machine_slot[NCPUS];
91
92 static queue_head_t processor_action_queue;
93 static boolean_t processor_action_active;
94 static thread_call_t processor_action_call;
95 static thread_call_data_t processor_action_call_data;
96 decl_simple_lock_data(static,processor_action_lock)
97
98 thread_t machine_wake_thread;
99
100 /* Forwards */
101 processor_set_t processor_request_action(
102 processor_t processor,
103 processor_set_t new_pset);
104
105 void processor_doaction(
106 processor_t processor);
107
108 void processor_doshutdown(
109 processor_t processor);
110
111 /*
112 * cpu_up:
113 *
114 * Flag specified cpu as up and running. Called when a processor comes
115 * online.
116 */
117 void
118 cpu_up(
119 int cpu)
120 {
121 processor_t processor = cpu_to_processor(cpu);
122 processor_set_t pset = &default_pset;
123 struct machine_slot *ms;
124 spl_t s;
125
126 /*
127 * Just twiddle our thumbs; we've got nothing better to do
128 * yet, anyway.
129 */
130 while (!simple_lock_try(&pset->processors_lock))
131 continue;
132
133 s = splsched();
134 processor_lock(processor);
135 init_ast_check(processor);
136 ms = &machine_slot[cpu];
137 ms->running = TRUE;
138 machine_info.avail_cpus++;
139 pset_add_processor(pset, processor);
140 simple_lock(&pset->sched_lock);
141 enqueue_tail(&pset->active_queue, (queue_entry_t)processor);
142 processor->state = PROCESSOR_RUNNING;
143 simple_unlock(&pset->sched_lock);
144 processor_unlock(processor);
145 splx(s);
146
147 simple_unlock(&pset->processors_lock);
148 }
149
150 /*
151 * cpu_down:
152 *
153 * Flag specified cpu as down. Called when a processor is about to
154 * go offline.
155 */
156 void
157 cpu_down(
158 int cpu)
159 {
160 processor_t processor;
161 struct machine_slot *ms;
162 spl_t s;
163
164 processor = cpu_to_processor(cpu);
165
166 s = splsched();
167 processor_lock(processor);
168 ms = &machine_slot[cpu];
169 ms->running = FALSE;
170 machine_info.avail_cpus--;
171 /*
172 * processor has already been removed from pset.
173 */
174 processor->processor_set_next = PROCESSOR_SET_NULL;
175 processor->state = PROCESSOR_OFF_LINE;
176 processor_unlock(processor);
177 splx(s);
178 }
179
180 kern_return_t
181 host_reboot(
182 host_priv_t host_priv,
183 int options)
184 {
185 if (host_priv == HOST_PRIV_NULL)
186 return (KERN_INVALID_HOST);
187
188 assert(host_priv == &realhost);
189
190 if (options & HOST_REBOOT_DEBUGGER) {
191 Debugger("Debugger");
192 }
193 else
194 halt_all_cpus(!(options & HOST_REBOOT_HALT));
195
196 return (KERN_SUCCESS);
197 }
198
199 /*
200 * processor_request_action:
201 *
202 * Common internals of processor_assign and processor_shutdown.
203 * If new_pset is null, this is a shutdown, else it's an assign
204 * and caller must donate a reference.
205 * For assign operations, it returns an old pset that must be deallocated
206 * if it's not NULL.
207 * For shutdown operations, it always returns PROCESSOR_SET_NULL.
208 */
209 processor_set_t
210 processor_request_action(
211 processor_t processor,
212 processor_set_t new_pset)
213 {
214 processor_set_t pset, old_pset;
215
216 /*
217 * Processor must be in a processor set. Must lock its idle lock to
218 * get at processor state.
219 */
220 pset = processor->processor_set;
221 simple_lock(&pset->sched_lock);
222
223 /*
224 * If the processor is dispatching, let it finish - it will set its
225 * state to running very soon.
226 */
227 while (*(volatile int *)&processor->state == PROCESSOR_DISPATCHING) {
228 simple_unlock(&pset->sched_lock);
229
230 simple_lock(&pset->sched_lock);
231 }
232
233 assert( processor->state == PROCESSOR_IDLE ||
234 processor->state == PROCESSOR_RUNNING ||
235 processor->state == PROCESSOR_ASSIGN );
236
237 /*
238 * Now lock the action queue and do the dirty work.
239 */
240 simple_lock(&processor_action_lock);
241
242 if (processor->state == PROCESSOR_IDLE) {
243 remqueue(&pset->idle_queue, (queue_entry_t)processor);
244 pset->idle_count--;
245 }
246 else
247 if (processor->state == PROCESSOR_RUNNING)
248 remqueue(&pset->active_queue, (queue_entry_t)processor);
249
250 if (processor->state != PROCESSOR_ASSIGN)
251 enqueue_tail(&processor_action_queue, (queue_entry_t)processor);
252
253 /*
254 * And ask the action_thread to do the work.
255 */
256 if (new_pset != PROCESSOR_SET_NULL) {
257 processor->state = PROCESSOR_ASSIGN;
258 old_pset = processor->processor_set_next;
259 processor->processor_set_next = new_pset;
260 }
261 else {
262 processor->state = PROCESSOR_SHUTDOWN;
263 old_pset = PROCESSOR_SET_NULL;
264 }
265
266 simple_unlock(&pset->sched_lock);
267
268 if (processor_action_active) {
269 simple_unlock(&processor_action_lock);
270
271 return (old_pset);
272 }
273
274 processor_action_active = TRUE;
275 simple_unlock(&processor_action_lock);
276
277 processor_unlock(processor);
278
279 thread_call_enter(processor_action_call);
280 processor_lock(processor);
281
282 return (old_pset);
283 }
284
285 kern_return_t
286 processor_assign(
287 processor_t processor,
288 processor_set_t new_pset,
289 boolean_t wait)
290 {
291 #ifdef lint
292 processor++; new_pset++; wait++;
293 #endif /* lint */
294 return (KERN_FAILURE);
295 }
296
297 /*
298 * processor_shutdown() queues a processor up for shutdown.
299 * Any assignment in progress is overriden.
300 */
301 kern_return_t
302 processor_shutdown(
303 processor_t processor)
304 {
305 spl_t s;
306
307 s = splsched();
308 processor_lock(processor);
309 if ( processor->state == PROCESSOR_OFF_LINE ||
310 processor->state == PROCESSOR_SHUTDOWN ) {
311 /*
312 * Already shutdown or being shutdown -- nothing to do.
313 */
314 processor_unlock(processor);
315 splx(s);
316
317 return (KERN_SUCCESS);
318 }
319
320 processor_request_action(processor, PROCESSOR_SET_NULL);
321
322 assert_wait((event_t)processor, THREAD_UNINT);
323
324 processor_unlock(processor);
325 splx(s);
326
327 thread_block(THREAD_CONTINUE_NULL);
328
329 return (KERN_SUCCESS);
330 }
331
332 /*
333 * processor_action() shuts down processors or changes their assignment.
334 */
335 static void
336 _processor_action(
337 thread_call_param_t p0,
338 thread_call_param_t p1)
339 {
340 register processor_t processor;
341 spl_t s;
342
343 s = splsched();
344 simple_lock(&processor_action_lock);
345
346 while (!queue_empty(&processor_action_queue)) {
347 processor = (processor_t)dequeue_head(&processor_action_queue);
348 simple_unlock(&processor_action_lock);
349 splx(s);
350
351 processor_doaction(processor);
352
353 s = splsched();
354 simple_lock(&processor_action_lock);
355 }
356
357 processor_action_active = FALSE;
358 simple_unlock(&processor_action_lock);
359 splx(s);
360 }
361
362 void
363 processor_action(void)
364 {
365 queue_init(&processor_action_queue);
366 simple_lock_init(&processor_action_lock, ETAP_THREAD_ACTION);
367 processor_action_active = FALSE;
368
369 thread_call_setup(&processor_action_call_data, _processor_action, NULL);
370 processor_action_call = &processor_action_call_data;
371 }
372
373 /*
374 * processor_doaction actually does the shutdown. The trick here
375 * is to schedule ourselves onto a cpu and then save our
376 * context back into the runqs before taking out the cpu.
377 */
378 void
379 processor_doaction(
380 processor_t processor)
381 {
382 thread_t self = current_thread();
383 processor_set_t pset;
384 thread_t old_thread;
385 spl_t s;
386
387 /*
388 * Get onto the processor to shutdown
389 */
390 thread_bind(self, processor);
391 thread_block(THREAD_CONTINUE_NULL);
392
393 pset = processor->processor_set;
394 simple_lock(&pset->processors_lock);
395
396 if (pset->processor_count == 1) {
397 thread_t thread;
398 extern void start_cpu_thread(void);
399
400 simple_unlock(&pset->processors_lock);
401
402 /*
403 * Create the thread, and point it at the routine.
404 */
405 thread = kernel_thread_with_priority(
406 kernel_task, MAXPRI_KERNEL,
407 start_cpu_thread, TRUE, FALSE);
408
409 disable_preemption();
410
411 s = splsched();
412 thread_lock(thread);
413 machine_wake_thread = thread;
414 thread_go_locked(thread, THREAD_AWAKENED);
415 (void)rem_runq(thread);
416 thread_unlock(thread);
417 splx(s);
418
419 simple_lock(&pset->processors_lock);
420 enable_preemption();
421 }
422
423 s = splsched();
424 processor_lock(processor);
425
426 /*
427 * Do shutdown, make sure we live when processor dies.
428 */
429 if (processor->state != PROCESSOR_SHUTDOWN) {
430 panic("action_thread -- bad processor state");
431 }
432
433 pset_remove_processor(pset, processor);
434 processor_unlock(processor);
435 simple_unlock(&pset->processors_lock);
436
437 /*
438 * Clean up.
439 */
440 thread_bind(self, PROCESSOR_NULL);
441 self->continuation = 0;
442 old_thread = switch_to_shutdown_context(self,
443 processor_doshutdown, processor);
444 if (processor != current_processor())
445 timer_call_shutdown(processor);
446 thread_dispatch(old_thread);
447 thread_wakeup((event_t)processor);
448 splx(s);
449 }
450
451 /*
452 * Actually do the processor shutdown. This is called at splsched,
453 * running on the processor's shutdown stack.
454 */
455
456 void
457 processor_doshutdown(
458 processor_t processor)
459 {
460 register int cpu = processor->slot_num;
461
462 timer_call_cancel(&processor->quantum_timer);
463 thread_dispatch(current_thread());
464 timer_switch(&kernel_timer[cpu]);
465
466 /*
467 * OK, now exit this cpu.
468 */
469 PMAP_DEACTIVATE_KERNEL(cpu);
470 thread_machine_set_current(processor->idle_thread);
471 cpu_down(cpu);
472 cpu_sleep();
473 panic("zombie processor");
474 /*NOTREACHED*/
475 }
476
477 kern_return_t
478 host_get_boot_info(
479 host_priv_t host_priv,
480 kernel_boot_info_t boot_info)
481 {
482 char *src = "";
483 extern char *machine_boot_info(
484 kernel_boot_info_t boot_info,
485 vm_size_t buf_len);
486
487 if (host_priv == HOST_PRIV_NULL)
488 return (KERN_INVALID_HOST);
489
490 assert(host_priv == &realhost);
491
492 /*
493 * Copy first operator string terminated by '\0' followed by
494 * standardized strings generated from boot string.
495 */
496 src = machine_boot_info(boot_info, KERNEL_BOOT_INFO_MAX);
497 if (src != boot_info)
498 (void) strncpy(boot_info, src, KERNEL_BOOT_INFO_MAX);
499
500 return (KERN_SUCCESS);
501 }