]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/machine.c
xnu-201.42.3.tar.gz
[apple/xnu.git] / osfmk / kern / machine.c
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 /*
23 * @OSF_COPYRIGHT@
24 */
25 /*
26 * Mach Operating System
27 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
28 * All Rights Reserved.
29 *
30 * Permission to use, copy, modify and distribute this software and its
31 * documentation is hereby granted, provided that both the copyright
32 * notice and this permission notice appear in all copies of the
33 * software, derivative works or modified versions, and any portions
34 * thereof, and that both notices appear in supporting documentation.
35 *
36 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
37 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
38 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
39 *
40 * Carnegie Mellon requests users of this software to return to
41 *
42 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
43 * School of Computer Science
44 * Carnegie Mellon University
45 * Pittsburgh PA 15213-3890
46 *
47 * any improvements or extensions that they make and grant Carnegie Mellon
48 * the rights to redistribute these changes.
49 */
50 /*
51 */
52 /*
53 * File: kern/machine.c
54 * Author: Avadis Tevanian, Jr.
55 * Date: 1987
56 *
57 * Support for machine independent machine abstraction.
58 */
59
60 #include <cpus.h>
61
62 #include <string.h>
63 #include <mach/boolean.h>
64 #include <mach/kern_return.h>
65 #include <mach/mach_types.h>
66 #include <mach/machine.h>
67 #include <mach/host_info.h>
68 #include <mach/host_reboot.h>
69 #include <kern/counters.h>
70 #include <kern/cpu_data.h>
71 #include <kern/ipc_host.h>
72 #include <kern/host.h>
73 #include <kern/lock.h>
74 #include <kern/machine.h>
75 #include <kern/processor.h>
76 #include <kern/queue.h>
77 #include <kern/sched.h>
78 #include <kern/task.h>
79 #include <kern/thread.h>
80 #include <kern/thread_swap.h>
81 #include <kern/misc_protos.h>
82
83 #include <kern/mk_sp.h>
84
85 /*
86 * Exported variables:
87 */
88
89 struct machine_info machine_info;
90 struct machine_slot machine_slot[NCPUS];
91
92 static queue_head_t processor_action_queue;
93 static boolean_t processor_action_active;
94 static thread_call_t processor_action_call;
95 static thread_call_data_t processor_action_call_data;
96 decl_simple_lock_data(static,processor_action_lock)
97
98 thread_t machine_wake_thread;
99
100 /* Forwards */
101 processor_set_t processor_request_action(
102 processor_t processor,
103 processor_set_t new_pset);
104
105 void processor_doaction(
106 processor_t processor);
107
108 void processor_doshutdown(
109 processor_t processor);
110
111 /*
112 * cpu_up:
113 *
114 * Flag specified cpu as up and running. Called when a processor comes
115 * online.
116 */
117 void
118 cpu_up(
119 int cpu)
120 {
121 processor_t processor = cpu_to_processor(cpu);
122 struct machine_slot *ms;
123 spl_t s;
124
125 /*
126 * Just twiddle our thumbs; we've got nothing better to do
127 * yet, anyway.
128 */
129 while (!simple_lock_try(&default_pset.processors_lock))
130 continue;
131
132 s = splsched();
133 processor_lock(processor);
134 init_ast_check(processor);
135 ms = &machine_slot[cpu];
136 ms->running = TRUE;
137 machine_info.avail_cpus++;
138 pset_add_processor(&default_pset, processor);
139 processor->state = PROCESSOR_RUNNING;
140 processor_unlock(processor);
141 splx(s);
142
143 simple_unlock(&default_pset.processors_lock);
144 }
145
146 /*
147 * cpu_down:
148 *
149 * Flag specified cpu as down. Called when a processor is about to
150 * go offline.
151 */
152 void
153 cpu_down(
154 int cpu)
155 {
156 processor_t processor;
157 struct machine_slot *ms;
158 spl_t s;
159
160 processor = cpu_to_processor(cpu);
161
162 s = splsched();
163 processor_lock(processor);
164 ms = &machine_slot[cpu];
165 ms->running = FALSE;
166 machine_info.avail_cpus--;
167 /*
168 * processor has already been removed from pset.
169 */
170 processor->processor_set_next = PROCESSOR_SET_NULL;
171 processor->state = PROCESSOR_OFF_LINE;
172 processor_unlock(processor);
173 splx(s);
174 }
175
176 kern_return_t
177 host_reboot(
178 host_priv_t host_priv,
179 int options)
180 {
181 if (host_priv == HOST_PRIV_NULL)
182 return (KERN_INVALID_HOST);
183
184 assert(host_priv == &realhost);
185
186 if (options & HOST_REBOOT_DEBUGGER) {
187 Debugger("Debugger");
188 }
189 else
190 halt_all_cpus(!(options & HOST_REBOOT_HALT));
191
192 return (KERN_SUCCESS);
193 }
194
195 /*
196 * processor_request_action:
197 *
198 * Common internals of processor_assign and processor_shutdown.
199 * If new_pset is null, this is a shutdown, else it's an assign
200 * and caller must donate a reference.
201 * For assign operations, it returns an old pset that must be deallocated
202 * if it's not NULL.
203 * For shutdown operations, it always returns PROCESSOR_SET_NULL.
204 */
205 processor_set_t
206 processor_request_action(
207 processor_t processor,
208 processor_set_t new_pset)
209 {
210 processor_set_t pset, old_next_pset;
211
212 /*
213 * Processor must be in a processor set. Must lock its idle lock to
214 * get at processor state.
215 */
216 pset = processor->processor_set;
217 simple_lock(&pset->idle_lock);
218
219 /*
220 * If the processor is dispatching, let it finish - it will set its
221 * state to running very soon.
222 */
223 while (*(volatile int *)&processor->state == PROCESSOR_DISPATCHING) {
224 simple_unlock(&pset->idle_lock);
225 simple_lock(&pset->idle_lock);
226 }
227
228 /*
229 * Now lock the action queue and do the dirty work.
230 */
231 simple_lock(&processor_action_lock);
232
233 switch (processor->state) {
234
235 case PROCESSOR_IDLE:
236 /*
237 * Remove from idle queue.
238 */
239 queue_remove(&pset->idle_queue, processor,
240 processor_t, processor_queue);
241 pset->idle_count--;
242
243 /* fall through ... */
244 case PROCESSOR_RUNNING:
245 /*
246 * Put it on the action queue.
247 */
248 queue_enter(&processor_action_queue, processor,
249 processor_t,processor_queue);
250
251 /* Fall through ... */
252 case PROCESSOR_ASSIGN:
253 /*
254 * And ask the action_thread to do the work.
255 */
256
257 if (new_pset == PROCESSOR_SET_NULL) {
258 processor->state = PROCESSOR_SHUTDOWN;
259 old_next_pset = PROCESSOR_SET_NULL;
260 } else {
261 processor->state = PROCESSOR_ASSIGN;
262 old_next_pset = processor->processor_set_next;
263 processor->processor_set_next = new_pset;
264 }
265 break;
266
267 default:
268 printf("state: %d\n", processor->state);
269 panic("processor_request_action: bad state");
270 }
271
272 if (processor_action_active == FALSE) {
273 processor_action_active = TRUE;
274 simple_unlock(&processor_action_lock);
275 simple_unlock(&pset->idle_lock);
276 processor_unlock(processor);
277 thread_call_enter(processor_action_call);
278 processor_lock(processor);
279 } else {
280 simple_unlock(&processor_action_lock);
281 simple_unlock(&pset->idle_lock);
282 }
283
284 return (old_next_pset);
285 }
286
287 kern_return_t
288 processor_assign(
289 processor_t processor,
290 processor_set_t new_pset,
291 boolean_t wait)
292 {
293 #ifdef lint
294 processor++; new_pset++; wait++;
295 #endif /* lint */
296 return (KERN_FAILURE);
297 }
298
299 /*
300 * processor_shutdown() queues a processor up for shutdown.
301 * Any assignment in progress is overriden.
302 */
303 kern_return_t
304 processor_shutdown(
305 processor_t processor)
306 {
307 spl_t s;
308
309 s = splsched();
310 processor_lock(processor);
311 if ((processor->state == PROCESSOR_OFF_LINE) ||
312 (processor->state == PROCESSOR_SHUTDOWN)) {
313 /*
314 * Already shutdown or being shutdown -- nothing to do.
315 */
316 processor_unlock(processor);
317 splx(s);
318
319 return (KERN_SUCCESS);
320 }
321
322 (void) processor_request_action(processor, PROCESSOR_SET_NULL);
323
324 assert_wait((event_t)processor, THREAD_UNINT);
325
326 processor_unlock(processor);
327 splx(s);
328
329 thread_block((void (*)(void)) 0);
330
331 return (KERN_SUCCESS);
332 }
333
334 /*
335 * processor_action() shuts down processors or changes their assignment.
336 */
337 static void
338 _processor_action(
339 thread_call_param_t p0,
340 thread_call_param_t p1)
341 {
342 register processor_t processor;
343 spl_t s;
344
345 s = splsched();
346 simple_lock(&processor_action_lock);
347
348 while (!queue_empty(&processor_action_queue)) {
349 processor = (processor_t) queue_first(&processor_action_queue);
350 queue_remove(&processor_action_queue, processor,
351 processor_t, processor_queue);
352 simple_unlock(&processor_action_lock);
353 splx(s);
354
355 processor_doaction(processor);
356
357 s = splsched();
358 simple_lock(&processor_action_lock);
359 }
360
361 processor_action_active = FALSE;
362 simple_unlock(&processor_action_lock);
363 splx(s);
364 }
365
366 void
367 processor_action(void)
368 {
369 queue_init(&processor_action_queue);
370 simple_lock_init(&processor_action_lock, ETAP_THREAD_ACTION);
371 processor_action_active = FALSE;
372
373 thread_call_setup(&processor_action_call_data, _processor_action, NULL);
374 processor_action_call = &processor_action_call_data;
375 }
376
377 /*
378 * processor_doaction actually does the shutdown. The trick here
379 * is to schedule ourselves onto a cpu and then save our
380 * context back into the runqs before taking out the cpu.
381 */
382 void
383 processor_doaction(
384 processor_t processor)
385 {
386 thread_t self = current_thread();
387 processor_set_t pset;
388 thread_t old_thread;
389 spl_t s;
390
391 /*
392 * Get onto the processor to shutdown
393 */
394 thread_bind(self, processor);
395 thread_block((void (*)(void)) 0);
396
397 pset = processor->processor_set;
398 simple_lock(&pset->processors_lock);
399
400 if (pset->processor_count == 1) {
401 thread_t thread;
402 extern void start_cpu_thread(void);
403
404 simple_unlock(&pset->processors_lock);
405
406 /*
407 * Create the thread, and point it at the routine.
408 */
409 thread = kernel_thread_with_priority(
410 kernel_task, MAXPRI_KERNEL,
411 start_cpu_thread, TRUE, FALSE);
412
413 disable_preemption();
414
415 s = splsched();
416 thread_lock(thread);
417 thread->state |= TH_RUN;
418 _mk_sp_thread_unblock(thread);
419 (void)rem_runq(thread);
420 machine_wake_thread = thread;
421 thread_unlock(thread);
422 splx(s);
423
424 simple_lock(&pset->processors_lock);
425 enable_preemption();
426 }
427
428 s = splsched();
429 processor_lock(processor);
430
431 /*
432 * Do shutdown, make sure we live when processor dies.
433 */
434 if (processor->state != PROCESSOR_SHUTDOWN) {
435 panic("action_thread -- bad processor state");
436 }
437
438 pset_remove_processor(pset, processor);
439 processor_unlock(processor);
440 simple_unlock(&pset->processors_lock);
441
442 /*
443 * Clean up.
444 */
445 thread_bind(self, PROCESSOR_NULL);
446 self->continuation = 0;
447 old_thread = switch_to_shutdown_context(self,
448 processor_doshutdown, processor);
449 thread_dispatch(old_thread);
450 thread_wakeup((event_t)processor);
451 splx(s);
452 }
453
454 /*
455 * Actually do the processor shutdown. This is called at splsched,
456 * running on the processor's shutdown stack.
457 */
458
459 void
460 processor_doshutdown(
461 processor_t processor)
462 {
463 register int cpu = processor->slot_num;
464
465 timer_call_cancel(&processor->quantum_timer);
466 thread_dispatch(current_thread());
467 timer_switch(&kernel_timer[cpu]);
468
469 /*
470 * OK, now exit this cpu.
471 */
472 PMAP_DEACTIVATE_KERNEL(cpu);
473 cpu_data[cpu].active_thread = THREAD_NULL;
474 active_kloaded[cpu] = THR_ACT_NULL;
475 cpu_down(cpu);
476 cpu_sleep();
477 panic("zombie processor");
478 /*NOTREACHED*/
479 }
480
481 kern_return_t
482 host_get_boot_info(
483 host_priv_t host_priv,
484 kernel_boot_info_t boot_info)
485 {
486 char *src = "";
487 extern char *machine_boot_info(
488 kernel_boot_info_t boot_info,
489 vm_size_t buf_len);
490
491 if (host_priv == HOST_PRIV_NULL)
492 return (KERN_INVALID_HOST);
493
494 assert(host_priv == &realhost);
495
496 /*
497 * Copy first operator string terminated by '\0' followed by
498 * standardized strings generated from boot string.
499 */
500 src = machine_boot_info(boot_info, KERNEL_BOOT_INFO_MAX);
501 if (src != boot_info)
502 (void) strncpy(boot_info, src, KERNEL_BOOT_INFO_MAX);
503
504 return (KERN_SUCCESS);
505 }