]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/machine.c
xnu-4570.20.62.tar.gz
[apple/xnu.git] / osfmk / kern / machine.c
1 /*
2 * Copyright (c) 2000-2009 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 */
58 /*
59 * File: kern/machine.c
60 * Author: Avadis Tevanian, Jr.
61 * Date: 1987
62 *
63 * Support for machine independent machine abstraction.
64 */
65
66 #include <string.h>
67
68 #include <mach/mach_types.h>
69 #include <mach/boolean.h>
70 #include <mach/kern_return.h>
71 #include <mach/machine.h>
72 #include <mach/host_info.h>
73 #include <mach/host_reboot.h>
74 #include <mach/host_priv_server.h>
75 #include <mach/processor_server.h>
76
77 #include <kern/kern_types.h>
78 #include <kern/counters.h>
79 #include <kern/cpu_data.h>
80 #include <kern/ipc_host.h>
81 #include <kern/host.h>
82 #include <kern/machine.h>
83 #include <kern/misc_protos.h>
84 #include <kern/processor.h>
85 #include <kern/queue.h>
86 #include <kern/sched.h>
87 #include <kern/task.h>
88 #include <kern/thread.h>
89
90 #include <machine/commpage.h>
91
92 #if HIBERNATION
93 #include <IOKit/IOHibernatePrivate.h>
94 #endif
95 #include <IOKit/IOPlatformExpert.h>
96
97 #if CONFIG_DTRACE
98 extern void (*dtrace_cpu_state_changed_hook)(int, boolean_t);
99 #endif
100
101 /*
102 * Exported variables:
103 */
104
105 struct machine_info machine_info;
106
107 /* Forwards */
108 void processor_doshutdown(
109 processor_t processor);
110
111 /*
112 * processor_up:
113 *
114 * Flag processor as up and running, and available
115 * for scheduling.
116 */
117 void
118 processor_up(
119 processor_t processor)
120 {
121 processor_set_t pset;
122 spl_t s;
123
124 s = splsched();
125 init_ast_check(processor);
126 pset = processor->processor_set;
127 pset_lock(pset);
128 ++pset->online_processor_count;
129 enqueue_tail(&pset->active_queue, (queue_entry_t)processor);
130 processor->state = PROCESSOR_RUNNING;
131 pset->active_processor_count++;
132 sched_update_pset_load_average(pset);
133 (void)hw_atomic_add(&processor_avail_count, 1);
134 commpage_update_active_cpus();
135 pset_unlock(pset);
136 ml_cpu_up();
137 splx(s);
138
139 #if CONFIG_DTRACE
140 if (dtrace_cpu_state_changed_hook)
141 (*dtrace_cpu_state_changed_hook)(processor->cpu_id, TRUE);
142 #endif
143 }
144 #include <atm/atm_internal.h>
145
146 kern_return_t
147 host_reboot(
148 host_priv_t host_priv,
149 int options)
150 {
151 if (host_priv == HOST_PRIV_NULL)
152 return (KERN_INVALID_HOST);
153
154 assert(host_priv == &realhost);
155
156 #if DEVELOPMENT || DEBUG
157 if (options & HOST_REBOOT_DEBUGGER) {
158 Debugger("Debugger");
159 return (KERN_SUCCESS);
160 }
161 #endif
162
163 if (options & HOST_REBOOT_UPSDELAY) {
164 // UPS power cutoff path
165 PEHaltRestart( kPEUPSDelayHaltCPU );
166 } else {
167 halt_all_cpus(!(options & HOST_REBOOT_HALT));
168 }
169
170 return (KERN_SUCCESS);
171 }
172
173 kern_return_t
174 processor_assign(
175 __unused processor_t processor,
176 __unused processor_set_t new_pset,
177 __unused boolean_t wait)
178 {
179 return (KERN_FAILURE);
180 }
181
182 kern_return_t
183 processor_shutdown(
184 processor_t processor)
185 {
186 processor_set_t pset;
187 spl_t s;
188
189 s = splsched();
190 pset = processor->processor_set;
191 pset_lock(pset);
192 if (processor->state == PROCESSOR_OFF_LINE) {
193 /*
194 * Success if already shutdown.
195 */
196 pset_unlock(pset);
197 splx(s);
198
199 return (KERN_SUCCESS);
200 }
201
202 if (processor->state == PROCESSOR_START) {
203 /*
204 * Failure if currently being started.
205 */
206 pset_unlock(pset);
207 splx(s);
208
209 return (KERN_FAILURE);
210 }
211
212 /*
213 * If the processor is dispatching, let it finish.
214 */
215 while (processor->state == PROCESSOR_DISPATCHING) {
216 pset_unlock(pset);
217 splx(s);
218 delay(1);
219 s = splsched();
220 pset_lock(pset);
221 }
222
223 /*
224 * Success if already being shutdown.
225 */
226 if (processor->state == PROCESSOR_SHUTDOWN) {
227 pset_unlock(pset);
228 splx(s);
229
230 return (KERN_SUCCESS);
231 }
232
233 if (processor->state == PROCESSOR_IDLE) {
234 remqueue((queue_entry_t)processor);
235 } else if (processor->state == PROCESSOR_RUNNING) {
236 remqueue((queue_entry_t)processor);
237 pset->active_processor_count--;
238 sched_update_pset_load_average(pset);
239 }
240
241 processor->state = PROCESSOR_SHUTDOWN;
242
243 pset_unlock(pset);
244
245 processor_doshutdown(processor);
246 splx(s);
247
248 cpu_exit_wait(processor->cpu_id);
249
250 return (KERN_SUCCESS);
251 }
252
253 /*
254 * Called with interrupts disabled.
255 */
256 void
257 processor_doshutdown(
258 processor_t processor)
259 {
260 thread_t old_thread, self = current_thread();
261 processor_t prev;
262 processor_set_t pset;
263
264 /*
265 * Get onto the processor to shutdown
266 */
267 prev = thread_bind(processor);
268 thread_block(THREAD_CONTINUE_NULL);
269
270 assert(processor->state == PROCESSOR_SHUTDOWN);
271
272 #if CONFIG_DTRACE
273 if (dtrace_cpu_state_changed_hook)
274 (*dtrace_cpu_state_changed_hook)(processor->cpu_id, FALSE);
275 #endif
276
277 ml_cpu_down();
278
279 #if HIBERNATION
280 if (processor_avail_count < 2) {
281 hibernate_vm_lock();
282 hibernate_vm_unlock();
283 }
284 #endif
285
286 pset = processor->processor_set;
287 pset_lock(pset);
288 processor->state = PROCESSOR_OFF_LINE;
289 --pset->online_processor_count;
290 (void)hw_atomic_sub(&processor_avail_count, 1);
291 commpage_update_active_cpus();
292 SCHED(processor_queue_shutdown)(processor);
293 /* pset lock dropped */
294 SCHED(rt_queue_shutdown)(processor);
295
296 /*
297 * Continue processor shutdown in shutdown context.
298 *
299 * We save the current context in machine_processor_shutdown in such a way
300 * that when this thread is next invoked it will return from here instead of
301 * from the machine_switch_context() in thread_invoke like a normal context switch.
302 *
303 * As such, 'old_thread' is neither the idle thread nor the current thread - it's whatever
304 * thread invoked back to this one. (Usually, it's another processor's idle thread.)
305 *
306 * TODO: Make this a real thread_run of the idle_thread, so we don't have to keep this in sync
307 * with thread_invoke.
308 */
309 thread_bind(prev);
310 old_thread = machine_processor_shutdown(self, processor_offline, processor);
311
312 thread_dispatch(old_thread, self);
313 }
314
315 /*
316 * Complete the shutdown and place the processor offline.
317 *
318 * Called at splsched in the shutdown context.
319 * This performs a minimal thread_invoke() to the idle thread,
320 * so it needs to be kept in sync with what thread_invoke() does.
321 *
322 * The onlining half of this is done in load_context().
323 */
324 void
325 processor_offline(
326 processor_t processor)
327 {
328 assert(processor == current_processor());
329 assert(processor->active_thread == current_thread());
330
331 thread_t old_thread = processor->active_thread;
332 thread_t new_thread = processor->idle_thread;
333
334 processor->active_thread = new_thread;
335 processor_state_update_idle(processor);
336 processor->starting_pri = IDLEPRI;
337 processor->deadline = UINT64_MAX;
338 new_thread->last_processor = processor;
339
340 uint64_t ctime = mach_absolute_time();
341
342 processor->last_dispatch = ctime;
343 old_thread->last_run_time = ctime;
344
345 /* Update processor->thread_timer and ->kernel_timer to point to the new thread */
346 thread_timer_event(ctime, &new_thread->system_timer);
347 PROCESSOR_DATA(processor, kernel_timer) = &new_thread->system_timer;
348 timer_stop(PROCESSOR_DATA(processor, current_state), ctime);
349
350 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
351 MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED) | DBG_FUNC_NONE,
352 old_thread->reason, (uintptr_t)thread_tid(new_thread),
353 old_thread->sched_pri, new_thread->sched_pri, 0);
354
355 machine_set_current_thread(new_thread);
356
357 thread_dispatch(old_thread, new_thread);
358
359 PMAP_DEACTIVATE_KERNEL(processor->cpu_id);
360
361 cpu_sleep();
362 panic("zombie processor");
363 /*NOTREACHED*/
364 }
365
366 kern_return_t
367 host_get_boot_info(
368 host_priv_t host_priv,
369 kernel_boot_info_t boot_info)
370 {
371 const char *src = "";
372 if (host_priv == HOST_PRIV_NULL)
373 return (KERN_INVALID_HOST);
374
375 assert(host_priv == &realhost);
376
377 /*
378 * Copy first operator string terminated by '\0' followed by
379 * standardized strings generated from boot string.
380 */
381 src = machine_boot_info(boot_info, KERNEL_BOOT_INFO_MAX);
382 if (src != boot_info)
383 (void) strncpy(boot_info, src, KERNEL_BOOT_INFO_MAX);
384
385 return (KERN_SUCCESS);
386 }
387
388 #if CONFIG_DTRACE
389 #include <mach/sdt.h>
390 #endif
391
392 unsigned long long ml_io_read(uintptr_t vaddr, int size) {
393 unsigned long long result = 0;
394 unsigned char s1;
395 unsigned short s2;
396
397 #if defined(__x86_64__)
398 uint64_t sabs, eabs;
399 boolean_t istate, timeread = FALSE;
400 #if DEVELOPMENT || DEBUG
401 pmap_verify_noncacheable(vaddr);
402 #endif /* x86_64 DEVELOPMENT || DEBUG */
403 if (__improbable(reportphyreaddelayabs != 0)) {
404 istate = ml_set_interrupts_enabled(FALSE);
405 sabs = mach_absolute_time();
406 timeread = TRUE;
407 }
408 #endif /* x86_64 */
409
410 switch (size) {
411 case 1:
412 s1 = *(volatile unsigned char *)vaddr;
413 result = s1;
414 break;
415 case 2:
416 s2 = *(volatile unsigned short *)vaddr;
417 result = s2;
418 break;
419 case 4:
420 result = *(volatile unsigned int *)vaddr;
421 break;
422 case 8:
423 result = *(volatile unsigned long long *)vaddr;
424 break;
425 default:
426 panic("Invalid size %d for ml_io_read(%p)\n", size, (void *)vaddr);
427 break;
428 }
429
430 #if defined(__x86_64__)
431 if (__improbable(timeread == TRUE)) {
432 eabs = mach_absolute_time();
433 (void)ml_set_interrupts_enabled(istate);
434
435 if (__improbable((eabs - sabs) > reportphyreaddelayabs)) {
436 if (phyreadpanic && (machine_timeout_suspended() == FALSE)) {
437 panic("Read from IO virtual addr 0x%lx took %llu ns, result: 0x%llx (start: %llu, end: %llu), ceiling: %llu", vaddr, (eabs - sabs), result, sabs, eabs, reportphyreaddelayabs);
438 }
439 #if CONFIG_DTRACE
440 DTRACE_PHYSLAT3(physread, uint64_t, (eabs - sabs),
441 uint64_t, vaddr, uint32_t, size);
442 #endif /* CONFIG_DTRACE */
443 }
444 }
445 #endif /* x86_64 */
446 return result;
447 }
448
449 unsigned int ml_io_read8(uintptr_t vaddr) {
450 return (unsigned) ml_io_read(vaddr, 1);
451 }
452
453 unsigned int ml_io_read16(uintptr_t vaddr) {
454 return (unsigned) ml_io_read(vaddr, 2);
455 }
456
457 unsigned int ml_io_read32(uintptr_t vaddr) {
458 return (unsigned) ml_io_read(vaddr, 4);
459 }
460
461 unsigned long long ml_io_read64(uintptr_t vaddr) {
462 return ml_io_read(vaddr, 8);
463 }