2 * Copyright (c) 2000-2009 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
46 * Carnegie Mellon requests users of this software to return to
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
59 * File: kern/machine.c
60 * Author: Avadis Tevanian, Jr.
63 * Support for machine independent machine abstraction.
68 #include <mach/mach_types.h>
69 #include <mach/boolean.h>
70 #include <mach/kern_return.h>
71 #include <mach/machine.h>
72 #include <mach/host_info.h>
73 #include <mach/host_reboot.h>
74 #include <mach/host_priv_server.h>
75 #include <mach/processor_server.h>
77 #include <kern/kern_types.h>
78 #include <kern/counters.h>
79 #include <kern/cpu_data.h>
80 #include <kern/cpu_quiesce.h>
81 #include <kern/ipc_host.h>
82 #include <kern/host.h>
83 #include <kern/machine.h>
84 #include <kern/misc_protos.h>
85 #include <kern/processor.h>
86 #include <kern/queue.h>
87 #include <kern/sched.h>
88 #include <kern/task.h>
89 #include <kern/thread.h>
91 #include <machine/commpage.h>
94 #include <IOKit/IOHibernatePrivate.h>
96 #include <IOKit/IOPlatformExpert.h>
99 extern void (*dtrace_cpu_state_changed_hook
)(int, boolean_t
);
103 * Exported variables:
106 struct machine_info machine_info
;
109 void processor_doshutdown(
110 processor_t processor
);
115 * Flag processor as up and running, and available
120 processor_t processor
)
122 processor_set_t pset
;
126 init_ast_check(processor
);
127 pset
= processor
->processor_set
;
129 ++pset
->online_processor_count
;
130 pset_update_processor_state(pset
, processor
, PROCESSOR_RUNNING
);
131 (void)hw_atomic_add(&processor_avail_count
, 1);
132 commpage_update_active_cpus();
138 if (dtrace_cpu_state_changed_hook
)
139 (*dtrace_cpu_state_changed_hook
)(processor
->cpu_id
, TRUE
);
142 #include <atm/atm_internal.h>
146 host_priv_t host_priv
,
149 if (host_priv
== HOST_PRIV_NULL
)
150 return (KERN_INVALID_HOST
);
152 assert(host_priv
== &realhost
);
154 #if DEVELOPMENT || DEBUG
155 if (options
& HOST_REBOOT_DEBUGGER
) {
156 Debugger("Debugger");
157 return (KERN_SUCCESS
);
161 if (options
& HOST_REBOOT_UPSDELAY
) {
162 // UPS power cutoff path
163 PEHaltRestart( kPEUPSDelayHaltCPU
);
165 halt_all_cpus(!(options
& HOST_REBOOT_HALT
));
168 return (KERN_SUCCESS
);
173 __unused processor_t processor
,
174 __unused processor_set_t new_pset
,
175 __unused boolean_t wait
)
177 return (KERN_FAILURE
);
182 processor_t processor
)
184 processor_set_t pset
;
188 pset
= processor
->processor_set
;
190 if (processor
->state
== PROCESSOR_OFF_LINE
) {
192 * Success if already shutdown.
197 return (KERN_SUCCESS
);
200 if (processor
->state
== PROCESSOR_START
) {
202 * Failure if currently being started.
207 return (KERN_FAILURE
);
211 * If the processor is dispatching, let it finish.
213 while (processor
->state
== PROCESSOR_DISPATCHING
) {
222 * Success if already being shutdown.
224 if (processor
->state
== PROCESSOR_SHUTDOWN
) {
228 return (KERN_SUCCESS
);
231 pset_update_processor_state(pset
, processor
, PROCESSOR_SHUTDOWN
);
235 processor_doshutdown(processor
);
238 cpu_exit_wait(processor
->cpu_id
);
240 return (KERN_SUCCESS
);
244 * Called with interrupts disabled.
247 processor_doshutdown(
248 processor_t processor
)
250 thread_t old_thread
, self
= current_thread();
252 processor_set_t pset
;
255 * Get onto the processor to shutdown
257 prev
= thread_bind(processor
);
258 thread_block(THREAD_CONTINUE_NULL
);
260 assert(processor
->state
== PROCESSOR_SHUTDOWN
);
263 if (dtrace_cpu_state_changed_hook
)
264 (*dtrace_cpu_state_changed_hook
)(processor
->cpu_id
, FALSE
);
270 if (processor_avail_count
< 2) {
272 hibernate_vm_unlock();
276 pset
= processor
->processor_set
;
278 pset_update_processor_state(pset
, processor
, PROCESSOR_OFF_LINE
);
279 --pset
->online_processor_count
;
280 (void)hw_atomic_sub(&processor_avail_count
, 1);
281 commpage_update_active_cpus();
282 SCHED(processor_queue_shutdown
)(processor
);
283 /* pset lock dropped */
284 SCHED(rt_queue_shutdown
)(processor
);
287 * Continue processor shutdown in shutdown context.
289 * We save the current context in machine_processor_shutdown in such a way
290 * that when this thread is next invoked it will return from here instead of
291 * from the machine_switch_context() in thread_invoke like a normal context switch.
293 * As such, 'old_thread' is neither the idle thread nor the current thread - it's whatever
294 * thread invoked back to this one. (Usually, it's another processor's idle thread.)
296 * TODO: Make this a real thread_run of the idle_thread, so we don't have to keep this in sync
297 * with thread_invoke.
300 old_thread
= machine_processor_shutdown(self
, processor_offline
, processor
);
302 thread_dispatch(old_thread
, self
);
306 * Complete the shutdown and place the processor offline.
308 * Called at splsched in the shutdown context.
309 * This performs a minimal thread_invoke() to the idle thread,
310 * so it needs to be kept in sync with what thread_invoke() does.
312 * The onlining half of this is done in load_context().
316 processor_t processor
)
318 assert(processor
== current_processor());
319 assert(processor
->active_thread
== current_thread());
321 thread_t old_thread
= processor
->active_thread
;
322 thread_t new_thread
= processor
->idle_thread
;
324 if (!new_thread
->kernel_stack
) {
325 /* the idle thread has a reserved stack, so this will never fail */
326 if (!stack_alloc_try(new_thread
))
327 panic("processor_offline");
330 processor
->active_thread
= new_thread
;
331 processor_state_update_idle(processor
);
332 processor
->starting_pri
= IDLEPRI
;
333 processor
->deadline
= UINT64_MAX
;
334 new_thread
->last_processor
= processor
;
336 uint64_t ctime
= mach_absolute_time();
338 processor
->last_dispatch
= ctime
;
339 old_thread
->last_run_time
= ctime
;
341 /* Update processor->thread_timer and ->kernel_timer to point to the new thread */
342 processor_timer_switch_thread(ctime
, &new_thread
->system_timer
);
343 PROCESSOR_DATA(processor
, kernel_timer
) = &new_thread
->system_timer
;
344 timer_stop(PROCESSOR_DATA(processor
, current_state
), ctime
);
346 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
,
347 MACHDBG_CODE(DBG_MACH_SCHED
, MACH_SCHED
) | DBG_FUNC_NONE
,
348 old_thread
->reason
, (uintptr_t)thread_tid(new_thread
),
349 old_thread
->sched_pri
, new_thread
->sched_pri
, 0);
351 machine_set_current_thread(new_thread
);
353 thread_dispatch(old_thread
, new_thread
);
355 cpu_quiescent_counter_leave(processor
->last_dispatch
);
357 PMAP_DEACTIVATE_KERNEL(processor
->cpu_id
);
360 panic("zombie processor");
366 host_priv_t host_priv
,
367 kernel_boot_info_t boot_info
)
369 const char *src
= "";
370 if (host_priv
== HOST_PRIV_NULL
)
371 return (KERN_INVALID_HOST
);
373 assert(host_priv
== &realhost
);
376 * Copy first operator string terminated by '\0' followed by
377 * standardized strings generated from boot string.
379 src
= machine_boot_info(boot_info
, KERNEL_BOOT_INFO_MAX
);
380 if (src
!= boot_info
)
381 (void) strncpy(boot_info
, src
, KERNEL_BOOT_INFO_MAX
);
383 return (KERN_SUCCESS
);
387 #include <mach/sdt.h>
390 unsigned long long ml_io_read(uintptr_t vaddr
, int size
) {
391 unsigned long long result
= 0;
395 #if defined(__x86_64__)
397 boolean_t istate
, timeread
= FALSE
;
398 #if DEVELOPMENT || DEBUG
399 pmap_verify_noncacheable(vaddr
);
400 #endif /* x86_64 DEVELOPMENT || DEBUG */
401 if (__improbable(reportphyreaddelayabs
!= 0)) {
402 istate
= ml_set_interrupts_enabled(FALSE
);
403 sabs
= mach_absolute_time();
410 s1
= *(volatile unsigned char *)vaddr
;
414 s2
= *(volatile unsigned short *)vaddr
;
418 result
= *(volatile unsigned int *)vaddr
;
421 result
= *(volatile unsigned long long *)vaddr
;
424 panic("Invalid size %d for ml_io_read(%p)\n", size
, (void *)vaddr
);
428 #if defined(__x86_64__)
429 if (__improbable(timeread
== TRUE
)) {
430 eabs
= mach_absolute_time();
431 (void)ml_set_interrupts_enabled(istate
);
433 if (__improbable((eabs
- sabs
) > reportphyreaddelayabs
)) {
434 if (phyreadpanic
&& (machine_timeout_suspended() == FALSE
)) {
435 panic("Read from IO virtual addr 0x%lx took %llu ns, result: 0x%llx (start: %llu, end: %llu), ceiling: %llu", vaddr
, (eabs
- sabs
), result
, sabs
, eabs
, reportphyreaddelayabs
);
438 DTRACE_PHYSLAT3(physread
, uint64_t, (eabs
- sabs
),
439 uint64_t, vaddr
, uint32_t, size
);
440 #endif /* CONFIG_DTRACE */
447 unsigned int ml_io_read8(uintptr_t vaddr
) {
448 return (unsigned) ml_io_read(vaddr
, 1);
451 unsigned int ml_io_read16(uintptr_t vaddr
) {
452 return (unsigned) ml_io_read(vaddr
, 2);
455 unsigned int ml_io_read32(uintptr_t vaddr
) {
456 return (unsigned) ml_io_read(vaddr
, 4);
459 unsigned long long ml_io_read64(uintptr_t vaddr
) {
460 return ml_io_read(vaddr
, 8);