]> git.saurik.com Git - apple/xnu.git/blame - osfmk/i386/machine_routines.c
xnu-1228.0.2.tar.gz
[apple/xnu.git] / osfmk / i386 / machine_routines.c
CommitLineData
1c79356b 1/*
2d21ac55 2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
1c79356b 3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
1c79356b 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
8f6c56a5 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
8f6c56a5 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
1c79356b 27 */
2d21ac55 28
1c79356b
A
29#include <i386/machine_routines.h>
30#include <i386/io_map_entries.h>
55e303ae
A
31#include <i386/cpuid.h>
32#include <i386/fpu.h>
2d21ac55 33#include <mach/processor.h>
55e303ae 34#include <kern/processor.h>
91447636 35#include <kern/machine.h>
1c79356b 36#include <kern/cpu_data.h>
91447636
A
37#include <kern/cpu_number.h>
38#include <kern/thread.h>
39#include <i386/cpu_data.h>
55e303ae
A
40#include <i386/machine_cpu.h>
41#include <i386/mp.h>
42#include <i386/mp_events.h>
91447636
A
43#include <i386/pmap.h>
44#include <i386/misc_protos.h>
0c530ab8
A
45#include <i386/pmCPU.h>
46#include <i386/proc_reg.h>
2d21ac55
A
47#include <i386/tsc.h>
48#include <i386/cpu_threads.h>
91447636 49#include <mach/vm_param.h>
0c530ab8
A
50#if MACH_KDB
51#include <i386/db_machdep.h>
52#include <ddb/db_aout.h>
53#include <ddb/db_access.h>
54#include <ddb/db_sym.h>
55#include <ddb/db_variables.h>
56#include <ddb/db_command.h>
57#include <ddb/db_output.h>
58#include <ddb/db_expr.h>
59#endif
91447636 60
0c530ab8
A
61#if DEBUG
62#define DBG(x...) kprintf("DBG: " x)
63#else
64#define DBG(x...)
65#endif
66
0c530ab8 67extern thread_t Shutdown_context(thread_t thread, void (*doshutdown)(processor_t),processor_t processor);
91447636 68extern void wakeup(void *);
0c530ab8 69extern unsigned KernelRelocOffset;
55e303ae
A
70
71static int max_cpus_initialized = 0;
72
2d21ac55
A
73unsigned int LockTimeOut;
74unsigned int LockTimeOutTSC;
75unsigned int MutexSpin;
0c530ab8 76
55e303ae
A
77#define MAX_CPUS_SET 0x1
78#define MAX_CPUS_WAIT 0x2
1c79356b
A
79
80/* IO memory map services */
81
82/* Map memory map IO space */
83vm_offset_t ml_io_map(
84 vm_offset_t phys_addr,
85 vm_size_t size)
86{
0c530ab8 87 return(io_map(phys_addr,size,VM_WIMG_IO));
1c79356b
A
88}
89
90/* boot memory allocation */
91vm_offset_t ml_static_malloc(
91447636 92 __unused vm_size_t size)
1c79356b
A
93{
94 return((vm_offset_t)NULL);
95}
96
0c530ab8
A
97
98void ml_get_bouncepool_info(vm_offset_t *phys_addr, vm_size_t *size)
99{
100 *phys_addr = bounce_pool_base;
101 *size = bounce_pool_size;
102}
103
104
105vm_offset_t
106ml_boot_ptovirt(
107 vm_offset_t paddr)
108{
109 return (vm_offset_t)((paddr-KernelRelocOffset) | LINEAR_KERNEL_ADDRESS);
110}
111
1c79356b
A
112vm_offset_t
113ml_static_ptovirt(
114 vm_offset_t paddr)
115{
91447636 116 return (vm_offset_t)((unsigned) paddr | LINEAR_KERNEL_ADDRESS);
1c79356b
A
117}
118
91447636
A
119
120/*
121 * Routine: ml_static_mfree
122 * Function:
123 */
1c79356b
A
124void
125ml_static_mfree(
91447636
A
126 vm_offset_t vaddr,
127 vm_size_t size)
1c79356b 128{
91447636
A
129 vm_offset_t vaddr_cur;
130 ppnum_t ppn;
131
0c530ab8 132// if (vaddr < VM_MIN_KERNEL_ADDRESS) return;
91447636
A
133
134 assert((vaddr & (PAGE_SIZE-1)) == 0); /* must be page aligned */
135
136 for (vaddr_cur = vaddr;
137 vaddr_cur < round_page_32(vaddr+size);
138 vaddr_cur += PAGE_SIZE) {
139 ppn = pmap_find_phys(kernel_pmap, (addr64_t)vaddr_cur);
140 if (ppn != (vm_offset_t)NULL) {
2d21ac55
A
141 kernel_pmap->stats.resident_count++;
142 if (kernel_pmap->stats.resident_count >
143 kernel_pmap->stats.resident_max) {
144 kernel_pmap->stats.resident_max =
145 kernel_pmap->stats.resident_count;
146 }
91447636
A
147 pmap_remove(kernel_pmap, (addr64_t)vaddr_cur, (addr64_t)(vaddr_cur+PAGE_SIZE));
148 vm_page_create(ppn,(ppn+1));
149 vm_page_wire_count--;
150 }
151 }
1c79356b
A
152}
153
0c530ab8 154
1c79356b
A
155/* virtual to physical on wired pages */
156vm_offset_t ml_vtophys(
157 vm_offset_t vaddr)
158{
159 return kvtophys(vaddr);
160}
161
2d21ac55
A
162/*
163 * Routine: ml_nofault_copy
164 * Function: Perform a physical mode copy if the source and
165 * destination have valid translations in the kernel pmap.
166 * If translations are present, they are assumed to
167 * be wired; i.e. no attempt is made to guarantee that the
168 * translations obtained remained valid for
169 * the duration of the copy process.
170 */
171
172vm_size_t ml_nofault_copy(
173 vm_offset_t virtsrc, vm_offset_t virtdst, vm_size_t size)
174{
175 addr64_t cur_phys_dst, cur_phys_src;
176 uint32_t count, nbytes = 0;
177
178 while (size > 0) {
179 if (!(cur_phys_src = kvtophys(virtsrc)))
180 break;
181 if (!(cur_phys_dst = kvtophys(virtdst)))
182 break;
183 if (!pmap_valid_page(i386_btop(cur_phys_dst)) || !pmap_valid_page(i386_btop(cur_phys_src)))
184 break;
185 count = PAGE_SIZE - (cur_phys_src & PAGE_MASK);
186 if (count > (PAGE_SIZE - (cur_phys_dst & PAGE_MASK)))
187 count = PAGE_SIZE - (cur_phys_dst & PAGE_MASK);
188 if (count > size)
189 count = size;
190
191 bcopy_phys(cur_phys_src, cur_phys_dst, count);
192
193 nbytes += count;
194 virtsrc += count;
195 virtdst += count;
196 size -= count;
197 }
198
199 return nbytes;
200}
201
1c79356b
A
202/* Interrupt handling */
203
55e303ae
A
204/* Initialize Interrupts */
205void ml_init_interrupt(void)
206{
207 (void) ml_set_interrupts_enabled(TRUE);
208}
209
1c79356b
A
210/* Get Interrupts Enabled */
211boolean_t ml_get_interrupts_enabled(void)
212{
213 unsigned long flags;
214
215 __asm__ volatile("pushf; popl %0" : "=r" (flags));
216 return (flags & EFL_IF) != 0;
217}
218
219/* Set Interrupts Enabled */
220boolean_t ml_set_interrupts_enabled(boolean_t enable)
221{
222 unsigned long flags;
223
224 __asm__ volatile("pushf; popl %0" : "=r" (flags));
225
0c530ab8
A
226 if (enable) {
227 ast_t *myast;
228
229 myast = ast_pending();
230
231 if ( (get_preemption_level() == 0) && (*myast & AST_URGENT) ) {
1c79356b 232 __asm__ volatile("sti");
0c530ab8
A
233 __asm__ volatile ("int $0xff");
234 } else {
235 __asm__ volatile ("sti");
236 }
237 }
238 else {
1c79356b 239 __asm__ volatile("cli");
0c530ab8 240 }
1c79356b
A
241
242 return (flags & EFL_IF) != 0;
243}
244
245/* Check if running at interrupt context */
246boolean_t ml_at_interrupt_context(void)
247{
248 return get_interrupt_level() != 0;
249}
250
251/* Generate a fake interrupt */
252void ml_cause_interrupt(void)
253{
254 panic("ml_cause_interrupt not defined yet on Intel");
255}
256
d52fe63f
A
257void ml_thread_policy(
258 thread_t thread,
2d21ac55 259__unused unsigned policy_id,
d52fe63f
A
260 unsigned policy_info)
261{
55e303ae
A
262 if (policy_info & MACHINE_NETWORK_WORKLOOP) {
263 spl_t s = splsched();
264
265 thread_lock(thread);
266
267 set_priority(thread, thread->priority + 1);
268
269 thread_unlock(thread);
270 splx(s);
271 }
d52fe63f
A
272}
273
1c79356b
A
274/* Initialize Interrupts */
275void ml_install_interrupt_handler(
276 void *nub,
277 int source,
278 void *target,
279 IOInterruptHandler handler,
280 void *refCon)
281{
282 boolean_t current_state;
283
284 current_state = ml_get_interrupts_enabled();
285
286 PE_install_interrupt_handler(nub, source, target,
287 (IOInterruptHandler) handler, refCon);
288
289 (void) ml_set_interrupts_enabled(current_state);
55e303ae 290
2d21ac55 291 initialize_screen(NULL, kPEAcquireScreen);
55e303ae
A
292}
293
91447636 294
55e303ae
A
295void
296machine_idle(void)
297{
2d21ac55
A
298 x86_core_t *my_core = x86_core();
299 cpu_data_t *my_cpu = current_cpu_datap();
91447636
A
300 int others_active;
301
302 /*
303 * We halt this cpu thread
304 * unless kernel param idlehalt is false and no other thread
305 * in the same core is active - if so, don't halt so that this
306 * core doesn't go into a low-power mode.
0c530ab8 307 * For 4/4, we set a null "active cr3" while idle.
91447636 308 */
2d21ac55
A
309 if (my_core == NULL || my_cpu == NULL)
310 goto out;
311
91447636 312 others_active = !atomic_decl_and_test(
2d21ac55
A
313 (long *) &my_core->active_lcpus, 1);
314 my_cpu->lcpu.idle = TRUE;
91447636
A
315 if (idlehalt || others_active) {
316 DBGLOG(cpu_handle, cpu_number(), MP_IDLE);
0c530ab8 317 MARK_CPU_IDLE(cpu_number());
2d21ac55 318 machine_idle_cstate(FALSE);
0c530ab8 319 MARK_CPU_ACTIVE(cpu_number());
91447636 320 DBGLOG(cpu_handle, cpu_number(), MP_UNIDLE);
91447636 321 }
2d21ac55
A
322 my_cpu->lcpu.idle = FALSE;
323 atomic_incl((long *) &my_core->active_lcpus, 1);
324 out:
325 __asm__ volatile("sti");
1c79356b
A
326}
327
328void
329machine_signal_idle(
330 processor_t processor)
331{
91447636 332 cpu_interrupt(PROCESSOR_DATA(processor, slot_num));
55e303ae
A
333}
334
0c530ab8
A
335thread_t
336machine_processor_shutdown(
2d21ac55
A
337 thread_t thread,
338 void (*doshutdown)(processor_t),
339 processor_t processor)
0c530ab8 340{
2d21ac55
A
341 vmx_suspend();
342 fpu_save_context(thread);
0c530ab8
A
343 return(Shutdown_context(thread, doshutdown, processor));
344}
345
55e303ae
A
346kern_return_t
347ml_processor_register(
348 cpu_id_t cpu_id,
349 uint32_t lapic_id,
91447636 350 processor_t *processor_out,
55e303ae
A
351 ipi_handler_t *ipi_handler,
352 boolean_t boot_cpu)
353{
55e303ae 354 int target_cpu;
91447636 355 cpu_data_t *this_cpu_datap;
55e303ae 356
91447636
A
357 this_cpu_datap = cpu_data_alloc(boot_cpu);
358 if (this_cpu_datap == NULL) {
55e303ae 359 return KERN_FAILURE;
91447636
A
360 }
361 target_cpu = this_cpu_datap->cpu_number;
55e303ae
A
362 assert((boot_cpu && (target_cpu == 0)) ||
363 (!boot_cpu && (target_cpu != 0)));
364
365 lapic_cpu_map(lapic_id, target_cpu);
91447636
A
366
367 this_cpu_datap->cpu_id = cpu_id;
368 this_cpu_datap->cpu_phys_number = lapic_id;
369
370 this_cpu_datap->cpu_console_buf = console_cpu_alloc(boot_cpu);
371 if (this_cpu_datap->cpu_console_buf == NULL)
372 goto failed;
373
0c530ab8
A
374 this_cpu_datap->cpu_chud = chudxnu_cpu_alloc(boot_cpu);
375 if (this_cpu_datap->cpu_chud == NULL)
376 goto failed;
377
91447636 378 if (!boot_cpu) {
2d21ac55
A
379 this_cpu_datap->lcpu.core = cpu_thread_alloc(this_cpu_datap->cpu_number);
380 if (this_cpu_datap->lcpu.core == NULL)
381 goto failed;
382
383 pmCPUStateInit();
0c530ab8 384
91447636
A
385 this_cpu_datap->cpu_pmap = pmap_cpu_alloc(boot_cpu);
386 if (this_cpu_datap->cpu_pmap == NULL)
387 goto failed;
388
389 this_cpu_datap->cpu_processor = cpu_processor_alloc(boot_cpu);
390 if (this_cpu_datap->cpu_processor == NULL)
391 goto failed;
2d21ac55
A
392 /*
393 * processor_init() deferred to topology start
394 * because "slot numbers" a.k.a. logical processor numbers
395 * are not yet finalized.
396 */
91447636
A
397 }
398
399 *processor_out = this_cpu_datap->cpu_processor;
55e303ae
A
400 *ipi_handler = NULL;
401
2d21ac55
A
402 if (target_cpu == machine_info.max_cpus - 1) {
403 /*
404 * All processors are now registered but not started (except
405 * for this "in-limbo" boot processor). We call to the machine
406 * topology code to finalize and activate the topology.
407 */
408 cpu_topology_start();
409 }
410
55e303ae 411 return KERN_SUCCESS;
91447636
A
412
413failed:
414 cpu_processor_free(this_cpu_datap->cpu_processor);
415 pmap_cpu_free(this_cpu_datap->cpu_pmap);
0c530ab8 416 chudxnu_cpu_free(this_cpu_datap->cpu_chud);
91447636
A
417 console_cpu_free(this_cpu_datap->cpu_console_buf);
418 return KERN_FAILURE;
1c79356b
A
419}
420
43866e37 421void
91447636 422ml_cpu_get_info(ml_cpu_info_t *cpu_infop)
43866e37 423{
55e303ae
A
424 boolean_t os_supports_sse;
425 i386_cpu_info_t *cpuid_infop;
426
91447636 427 if (cpu_infop == NULL)
55e303ae
A
428 return;
429
430 /*
0c530ab8 431 * Are we supporting MMX/SSE/SSE2/SSE3?
55e303ae
A
432 * As distinct from whether the cpu has these capabilities.
433 */
434 os_supports_sse = get_cr4() & CR4_XMM;
2d21ac55
A
435 if ((cpuid_features() & CPUID_FEATURE_SSE4_2) && os_supports_sse)
436 cpu_infop->vector_unit = 8;
437 else if ((cpuid_features() & CPUID_FEATURE_SSE4_1) && os_supports_sse)
438 cpu_infop->vector_unit = 7;
439 else if ((cpuid_features() & CPUID_FEATURE_SSSE3) && os_supports_sse)
0c530ab8
A
440 cpu_infop->vector_unit = 6;
441 else if ((cpuid_features() & CPUID_FEATURE_SSE3) && os_supports_sse)
442 cpu_infop->vector_unit = 5;
443 else if ((cpuid_features() & CPUID_FEATURE_SSE2) && os_supports_sse)
91447636 444 cpu_infop->vector_unit = 4;
55e303ae 445 else if ((cpuid_features() & CPUID_FEATURE_SSE) && os_supports_sse)
91447636 446 cpu_infop->vector_unit = 3;
55e303ae 447 else if (cpuid_features() & CPUID_FEATURE_MMX)
91447636 448 cpu_infop->vector_unit = 2;
55e303ae 449 else
91447636 450 cpu_infop->vector_unit = 0;
55e303ae
A
451
452 cpuid_infop = cpuid_info();
453
91447636 454 cpu_infop->cache_line_size = cpuid_infop->cache_linesize;
55e303ae 455
91447636
A
456 cpu_infop->l1_icache_size = cpuid_infop->cache_size[L1I];
457 cpu_infop->l1_dcache_size = cpuid_infop->cache_size[L1D];
55e303ae 458
91447636
A
459 if (cpuid_infop->cache_size[L2U] > 0) {
460 cpu_infop->l2_settings = 1;
461 cpu_infop->l2_cache_size = cpuid_infop->cache_size[L2U];
462 } else {
463 cpu_infop->l2_settings = 0;
464 cpu_infop->l2_cache_size = 0xFFFFFFFF;
465 }
55e303ae 466
91447636 467 if (cpuid_infop->cache_size[L3U] > 0) {
0c530ab8
A
468 cpu_infop->l3_settings = 1;
469 cpu_infop->l3_cache_size = cpuid_infop->cache_size[L3U];
91447636
A
470 } else {
471 cpu_infop->l3_settings = 0;
472 cpu_infop->l3_cache_size = 0xFFFFFFFF;
473 }
43866e37
A
474}
475
476void
477ml_init_max_cpus(unsigned long max_cpus)
478{
55e303ae
A
479 boolean_t current_state;
480
481 current_state = ml_set_interrupts_enabled(FALSE);
482 if (max_cpus_initialized != MAX_CPUS_SET) {
91447636
A
483 if (max_cpus > 0 && max_cpus <= MAX_CPUS) {
484 /*
2d21ac55 485 * Note: max_cpus is the number of enabled processors
91447636
A
486 * that ACPI found; max_ncpus is the maximum number
487 * that the kernel supports or that the "cpus="
488 * boot-arg has set. Here we take int minimum.
489 */
490 machine_info.max_cpus = MIN(max_cpus, max_ncpus);
491 }
55e303ae
A
492 if (max_cpus_initialized == MAX_CPUS_WAIT)
493 wakeup((event_t)&max_cpus_initialized);
494 max_cpus_initialized = MAX_CPUS_SET;
495 }
496 (void) ml_set_interrupts_enabled(current_state);
43866e37
A
497}
498
499int
500ml_get_max_cpus(void)
501{
55e303ae 502 boolean_t current_state;
43866e37 503
55e303ae
A
504 current_state = ml_set_interrupts_enabled(FALSE);
505 if (max_cpus_initialized != MAX_CPUS_SET) {
506 max_cpus_initialized = MAX_CPUS_WAIT;
507 assert_wait((event_t)&max_cpus_initialized, THREAD_UNINT);
508 (void)thread_block(THREAD_CONTINUE_NULL);
509 }
510 (void) ml_set_interrupts_enabled(current_state);
511 return(machine_info.max_cpus);
43866e37
A
512}
513
0c530ab8
A
514/*
515 * Routine: ml_init_lock_timeout
516 * Function:
517 */
518void
519ml_init_lock_timeout(void)
520{
521 uint64_t abstime;
522 uint32_t mtxspin;
523
2d21ac55 524 /* LockTimeOut is absolutetime, LockTimeOutTSC is in TSC ticks */
0c530ab8 525 nanoseconds_to_absolutetime(NSEC_PER_SEC>>2, &abstime);
2d21ac55
A
526 LockTimeOut = (uint32_t) abstime;
527 LockTimeOutTSC = (uint32_t) tmrCvt(abstime, tscFCvtn2t);
0c530ab8
A
528
529 if (PE_parse_boot_arg("mtxspin", &mtxspin)) {
530 if (mtxspin > USEC_PER_SEC>>4)
531 mtxspin = USEC_PER_SEC>>4;
532 nanoseconds_to_absolutetime(mtxspin*NSEC_PER_USEC, &abstime);
533 } else {
534 nanoseconds_to_absolutetime(10*NSEC_PER_USEC, &abstime);
535 }
536 MutexSpin = (unsigned int)abstime;
537}
538
91447636
A
539/*
540 * This is called from the machine-independent routine cpu_up()
541 * to perform machine-dependent info updates. Defer to cpu_thread_init().
542 */
543void
544ml_cpu_up(void)
545{
546 return;
547}
548
549/*
550 * This is called from the machine-independent routine cpu_down()
551 * to perform machine-dependent info updates.
552 */
553void
554ml_cpu_down(void)
555{
556 return;
557}
558
91447636
A
559/*
560 * The following are required for parts of the kernel
561 * that cannot resolve these functions as inlines:
562 */
563extern thread_t current_act(void);
564thread_t
9bccf70c 565current_act(void)
91447636
A
566{
567 return(current_thread_fast());
568}
55e303ae
A
569
570#undef current_thread
91447636 571extern thread_t current_thread(void);
55e303ae
A
572thread_t
573current_thread(void)
574{
91447636 575 return(current_thread_fast());
55e303ae 576}
0c530ab8 577
0c530ab8
A
578
579boolean_t ml_is64bit(void) {
580
581 return (cpu_mode_is64bit());
582}
583
584
585boolean_t ml_thread_is64bit(thread_t thread) {
586
587 return (thread_is_64bit(thread));
588}
589
590
591boolean_t ml_state_is64bit(void *saved_state) {
592
593 return is_saved_state64(saved_state);
594}
595
596void ml_cpu_set_ldt(int selector)
597{
598 /*
599 * Avoid loading the LDT
600 * if we're setting the KERNEL LDT and it's already set.
601 */
602 if (selector == KERNEL_LDT &&
603 current_cpu_datap()->cpu_ldt == KERNEL_LDT)
604 return;
605
606 /*
607 * If 64bit this requires a mode switch (and back).
608 */
609 if (cpu_mode_is64bit())
610 ml_64bit_lldt(selector);
611 else
612 lldt(selector);
613 current_cpu_datap()->cpu_ldt = selector;
614}
615
616void ml_fp_setvalid(boolean_t value)
617{
618 fp_setvalid(value);
619}
620
2d21ac55
A
621uint64_t ml_cpu_int_event_time(void)
622{
623 return current_cpu_datap()->cpu_int_event_time;
624}
625
626
0c530ab8
A
627#if MACH_KDB
628
629/*
630 * Display the global msrs
631 * *
632 * ms
633 */
634void
635db_msr(__unused db_expr_t addr,
636 __unused int have_addr,
637 __unused db_expr_t count,
638 __unused char *modif)
639{
640
641 uint32_t i, msrlow, msrhigh;
642
643 /* Try all of the first 4096 msrs */
644 for (i = 0; i < 4096; i++) {
645 if (!rdmsr_carefully(i, &msrlow, &msrhigh)) {
646 db_printf("%08X - %08X.%08X\n", i, msrhigh, msrlow);
647 }
648 }
649
650 /* Try all of the 4096 msrs at 0x0C000000 */
651 for (i = 0; i < 4096; i++) {
652 if (!rdmsr_carefully(0x0C000000 | i, &msrlow, &msrhigh)) {
653 db_printf("%08X - %08X.%08X\n",
654 0x0C000000 | i, msrhigh, msrlow);
655 }
656 }
657
658 /* Try all of the 4096 msrs at 0xC0000000 */
659 for (i = 0; i < 4096; i++) {
660 if (!rdmsr_carefully(0xC0000000 | i, &msrlow, &msrhigh)) {
661 db_printf("%08X - %08X.%08X\n",
662 0xC0000000 | i, msrhigh, msrlow);
663 }
664 }
665}
666
667#endif