]> git.saurik.com Git - apple/xnu.git/blame - osfmk/ppc/machine_routines.c
xnu-792.12.6.tar.gz
[apple/xnu.git] / osfmk / ppc / machine_routines.c
CommitLineData
1c79356b 1/*
3a60a9f5 2 * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved.
1c79356b 3 *
8ad349bb 4 * @APPLE_LICENSE_OSREFERENCE_HEADER_START@
1c79356b 5 *
8ad349bb
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the
10 * License may not be used to create, or enable the creation or
11 * redistribution of, unlawful or unlicensed copies of an Apple operating
12 * system, or to circumvent, violate, or enable the circumvention or
13 * violation of, any terms of an Apple operating system software license
14 * agreement.
15 *
16 * Please obtain a copy of the License at
17 * http://www.opensource.apple.com/apsl/ and read it before using this
18 * file.
19 *
20 * The Original Code and all software distributed under the License are
21 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
22 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
23 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
25 * Please see the License for the specific language governing rights and
26 * limitations under the License.
27 *
28 * @APPLE_LICENSE_OSREFERENCE_HEADER_END@
1c79356b 29 */
91447636
A
30
31#include <mach/mach_types.h>
32
1c79356b 33#include <ppc/machine_routines.h>
91447636 34#include <ppc/cpu_internal.h>
1c79356b 35#include <ppc/exception.h>
91447636 36#include <ppc/io_map_entries.h>
1c79356b 37#include <ppc/misc_protos.h>
91447636 38#include <ppc/savearea.h>
1c79356b 39#include <ppc/Firmware.h>
1c79356b 40#include <ppc/pmap.h>
91447636
A
41#include <ppc/mem.h>
42#include <ppc/new_screen.h>
1c79356b 43#include <ppc/proc_reg.h>
91447636 44#include <kern/kern_types.h>
1c79356b 45#include <kern/processor.h>
91447636 46#include <kern/machine.h>
1c79356b 47
91447636
A
48#include <vm/vm_page.h>
49
8ad349bb 50unsigned int LockTimeOut = 12500000;
91447636
A
51unsigned int MutexSpin = 0;
52
53decl_mutex_data(static,mcpus_lock);
54unsigned int mcpus_lock_initialized = 0;
55unsigned int mcpus_state = 0;
43866e37 56
a3d08fcd
A
57uint32_t warFlags = 0;
58#define warDisMBpoff 0x80000000
91447636
A
59#define MAX_CPUS_SET 0x01
60#define MAX_CPUS_WAIT 0x02
43866e37 61
91447636
A
62decl_simple_lock_data(, spsLock);
63unsigned int spsLockInit = 0;
64
65extern unsigned int hwllckPatch_isync;
66extern unsigned int hwulckPatch_isync;
67extern unsigned int hwulckbPatch_isync;
68extern unsigned int hwlmlckPatch_isync;
69extern unsigned int hwltlckPatch_isync;
70extern unsigned int hwcsatomicPatch_isync;
71extern unsigned int mlckePatch_isync;
72extern unsigned int mlckPatch_isync;
73extern unsigned int mltelckPatch_isync;
74extern unsigned int mltlckPatch_isync;
75extern unsigned int mulckePatch_isync;
76extern unsigned int mulckPatch_isync;
77extern unsigned int slckPatch_isync;
78extern unsigned int stlckPatch_isync;
79extern unsigned int sulckPatch_isync;
80extern unsigned int rwlePatch_isync;
81extern unsigned int rwlsPatch_isync;
82extern unsigned int rwlsePatch_isync;
83extern unsigned int rwlesPatch_isync;
84extern unsigned int rwtlePatch_isync;
85extern unsigned int rwtlsPatch_isync;
86extern unsigned int rwldPatch_isync;
87extern unsigned int hwulckPatch_eieio;
88extern unsigned int mulckPatch_eieio;
89extern unsigned int mulckePatch_eieio;
90extern unsigned int sulckPatch_eieio;
91extern unsigned int rwlesPatch_eieio;
92extern unsigned int rwldPatch_eieio;
93#if !MACH_LDEBUG
94extern unsigned int entfsectPatch_isync;
95extern unsigned int retfsectPatch_isync;
96extern unsigned int retfsectPatch_eieio;
97#endif
98
99struct patch_up {
100 unsigned int *addr;
101 unsigned int data;
102};
103
104typedef struct patch_up patch_up_t;
105
106patch_up_t patch_up_table[] = {
107 {&hwllckPatch_isync, 0x60000000},
108 {&hwulckPatch_isync, 0x60000000},
109 {&hwulckbPatch_isync, 0x60000000},
110 {&hwlmlckPatch_isync, 0x60000000},
111 {&hwltlckPatch_isync, 0x60000000},
112 {&hwcsatomicPatch_isync, 0x60000000},
113 {&mlckePatch_isync, 0x60000000},
114 {&mlckPatch_isync, 0x60000000},
115 {&mltelckPatch_isync, 0x60000000},
116 {&mltlckPatch_isync, 0x60000000},
117 {&mulckePatch_isync, 0x60000000},
118 {&mulckPatch_isync, 0x60000000},
119 {&slckPatch_isync, 0x60000000},
120 {&stlckPatch_isync, 0x60000000},
121 {&sulckPatch_isync, 0x60000000},
122 {&rwlePatch_isync, 0x60000000},
123 {&rwlsPatch_isync, 0x60000000},
124 {&rwlsePatch_isync, 0x60000000},
125 {&rwlesPatch_isync, 0x60000000},
126 {&rwtlePatch_isync, 0x60000000},
127 {&rwtlsPatch_isync, 0x60000000},
128 {&rwldPatch_isync, 0x60000000},
129 {&hwulckPatch_eieio, 0x60000000},
130 {&hwulckPatch_eieio, 0x60000000},
131 {&mulckPatch_eieio, 0x60000000},
132 {&mulckePatch_eieio, 0x60000000},
133 {&sulckPatch_eieio, 0x60000000},
134 {&rwlesPatch_eieio, 0x60000000},
135 {&rwldPatch_eieio, 0x60000000},
136#if !MACH_LDEBUG
137 {&entfsectPatch_isync, 0x60000000},
138 {&retfsectPatch_isync, 0x60000000},
139 {&retfsectPatch_eieio, 0x60000000},
140#endif
141 {NULL, 0x00000000}
142};
143
144extern int forcenap;
145extern boolean_t pmap_initialized;
1c79356b
A
146
147/* Map memory map IO space */
148vm_offset_t
149ml_io_map(
150 vm_offset_t phys_addr,
151 vm_size_t size)
152{
8ad349bb 153 return(io_map(phys_addr,size));
1c79356b
A
154}
155
91447636
A
156/*
157 * Routine: ml_static_malloc
158 * Function: static memory allocation
159 */
1c79356b
A
160vm_offset_t
161ml_static_malloc(
162 vm_size_t size)
163{
1c79356b
A
164 vm_offset_t vaddr;
165
166 if (pmap_initialized)
167 return((vm_offset_t)NULL);
168 else {
169 vaddr = static_memory_end;
91447636 170 static_memory_end = round_page(vaddr+size);
1c79356b
A
171 return(vaddr);
172 }
173}
174
91447636
A
175/*
176 * Routine: ml_static_ptovirt
177 * Function:
178 */
1c79356b
A
179vm_offset_t
180ml_static_ptovirt(
181 vm_offset_t paddr)
182{
1c79356b
A
183 vm_offset_t vaddr;
184
185 /* Static memory is map V=R */
186 vaddr = paddr;
187 if ( (vaddr < static_memory_end) && (pmap_extract(kernel_pmap, vaddr)==paddr) )
188 return(vaddr);
189 else
190 return((vm_offset_t)NULL);
191}
192
91447636
A
193/*
194 * Routine: ml_static_mfree
195 * Function:
196 */
1c79356b
A
197void
198ml_static_mfree(
199 vm_offset_t vaddr,
200 vm_size_t size)
201{
202 vm_offset_t paddr_cur, vaddr_cur;
203
55e303ae
A
204 for (vaddr_cur = round_page_32(vaddr);
205 vaddr_cur < trunc_page_32(vaddr+size);
1c79356b
A
206 vaddr_cur += PAGE_SIZE) {
207 paddr_cur = pmap_extract(kernel_pmap, vaddr_cur);
208 if (paddr_cur != (vm_offset_t)NULL) {
209 vm_page_wire_count--;
55e303ae
A
210 pmap_remove(kernel_pmap, (addr64_t)vaddr_cur, (addr64_t)(vaddr_cur+PAGE_SIZE));
211 vm_page_create(paddr_cur>>12,(paddr_cur+PAGE_SIZE)>>12);
1c79356b
A
212 }
213 }
214}
215
91447636
A
216/*
217 * Routine: ml_vtophys
218 * Function: virtual to physical on static pages
219 */
1c79356b
A
220vm_offset_t ml_vtophys(
221 vm_offset_t vaddr)
222{
223 return(pmap_extract(kernel_pmap, vaddr));
224}
225
91447636
A
226/*
227 * Routine: ml_install_interrupt_handler
228 * Function: Initialize Interrupt Handler
229 */
1c79356b
A
230void ml_install_interrupt_handler(
231 void *nub,
232 int source,
233 void *target,
234 IOInterruptHandler handler,
235 void *refCon)
236{
91447636
A
237 struct per_proc_info *proc_info;
238 boolean_t current_state;
1c79356b 239
1c79356b 240 current_state = ml_get_interrupts_enabled();
91447636 241 proc_info = getPerProc();
1c79356b 242
91447636
A
243 proc_info->interrupt_nub = nub;
244 proc_info->interrupt_source = source;
245 proc_info->interrupt_target = target;
246 proc_info->interrupt_handler = handler;
247 proc_info->interrupt_refCon = refCon;
1c79356b 248
91447636 249 proc_info->interrupts_enabled = TRUE;
1c79356b 250 (void) ml_set_interrupts_enabled(current_state);
9bccf70c
A
251
252 initialize_screen(0, kPEAcquireScreen);
1c79356b
A
253}
254
91447636
A
255/*
256 * Routine: ml_init_interrupt
257 * Function: Initialize Interrupts
258 */
1c79356b
A
259void ml_init_interrupt(void)
260{
1c79356b
A
261 boolean_t current_state;
262
263 current_state = ml_get_interrupts_enabled();
264
91447636 265 getPerProc()->interrupts_enabled = TRUE;
1c79356b
A
266 (void) ml_set_interrupts_enabled(current_state);
267}
268
91447636
A
269/*
270 * Routine: ml_get_interrupts_enabled
271 * Function: Get Interrupts Enabled
272 */
1c79356b 273boolean_t ml_get_interrupts_enabled(void)
1c79356b
A
274{
275 return((mfmsr() & MASK(MSR_EE)) != 0);
276}
277
91447636
A
278/*
279 * Routine: ml_at_interrupt_context
280 * Function: Check if running at interrupt context
281 */
1c79356b
A
282boolean_t ml_at_interrupt_context(void)
283{
0b4e3aa0
A
284 boolean_t ret;
285 boolean_t current_state;
286
287 current_state = ml_set_interrupts_enabled(FALSE);
91447636 288 ret = (getPerProc()->istackptr == 0);
0b4e3aa0
A
289 ml_set_interrupts_enabled(current_state);
290 return(ret);
1c79356b
A
291}
292
91447636
A
293/*
294 * Routine: ml_cause_interrupt
295 * Function: Generate a fake interrupt
296 */
1c79356b
A
297void ml_cause_interrupt(void)
298{
299 CreateFakeIO();
300}
301
91447636
A
302/*
303 * Routine: ml_thread_policy
304 * Function:
305 */
9bccf70c 306void ml_thread_policy(
d52fe63f
A
307 thread_t thread,
308 unsigned policy_id,
309 unsigned policy_info)
310{
55e303ae 311
d52fe63f 312 if ((policy_id == MACHINE_GROUP) &&
91447636 313 ((PerProcTable[master_cpu].ppe_vaddr->pf.Available) & pfSMPcap))
9bccf70c
A
314 thread_bind(thread, master_processor);
315
316 if (policy_info & MACHINE_NETWORK_WORKLOOP) {
317 spl_t s = splsched();
318
319 thread_lock(thread);
320
9bccf70c
A
321 set_priority(thread, thread->priority + 1);
322
323 thread_unlock(thread);
324 splx(s);
325 }
d52fe63f
A
326}
327
91447636
A
328/*
329 * Routine: machine_signal_idle
330 * Function:
331 */
1c79356b
A
332void
333machine_signal_idle(
334 processor_t processor)
335{
91447636
A
336 struct per_proc_info *proc_info;
337
338 proc_info = PROCESSOR_TO_PER_PROC(processor);
339
340 if (proc_info->pf.Available & (pfCanDoze|pfWillNap))
341 (void)cpu_signal(proc_info->cpu_number, SIGPwake, 0, 0);
1c79356b
A
342}
343
91447636
A
344/*
345 * Routine: ml_processor_register
346 * Function:
347 */
1c79356b
A
348kern_return_t
349ml_processor_register(
91447636
A
350 ml_processor_info_t *in_processor_info,
351 processor_t *processor_out,
352 ipi_handler_t *ipi_handler)
1c79356b 353{
91447636
A
354 struct per_proc_info *proc_info;
355 int donap;
356 boolean_t current_state;
357 boolean_t boot_processor;
1c79356b 358
91447636 359 if (in_processor_info->boot_cpu == FALSE) {
5353443c
A
360 if (spsLockInit == 0) {
361 spsLockInit = 1;
362 simple_lock_init(&spsLock, 0);
91447636
A
363 }
364 boot_processor = FALSE;
365 proc_info = cpu_per_proc_alloc();
366 if (proc_info == (struct per_proc_info *)NULL)
1c79356b 367 return KERN_FAILURE;
91447636
A
368 proc_info->pp_cbfr = console_per_proc_alloc(FALSE);
369 if (proc_info->pp_cbfr == (void *)NULL)
370 goto processor_register_error;
1c79356b 371 } else {
91447636
A
372 boot_processor = TRUE;
373 proc_info = PerProcTable[master_cpu].ppe_vaddr;
1c79356b
A
374 }
375
91447636
A
376 proc_info->pp_chud = chudxnu_per_proc_alloc(boot_processor);
377 if (proc_info->pp_chud == (void *)NULL)
378 goto processor_register_error;
379
380 if (!boot_processor)
381 if (cpu_per_proc_register(proc_info) != KERN_SUCCESS)
382 goto processor_register_error;
383
384 proc_info->cpu_id = in_processor_info->cpu_id;
385 proc_info->start_paddr = in_processor_info->start_paddr;
386 if(in_processor_info->time_base_enable != (void(*)(cpu_id_t, boolean_t ))NULL)
387 proc_info->time_base_enable = in_processor_info->time_base_enable;
388 else
389 proc_info->time_base_enable = (void(*)(cpu_id_t, boolean_t ))NULL;
1c79356b 390
3a60a9f5
A
391 if((proc_info->pf.pfPowerModes & pmType) == pmPowerTune) {
392 proc_info->pf.pfPowerTune0 = in_processor_info->power_mode_0;
393 proc_info->pf.pfPowerTune1 = in_processor_info->power_mode_1;
483a1d10
A
394 }
395
91447636
A
396 donap = in_processor_info->supports_nap; /* Assume we use requested nap */
397 if(forcenap) donap = forcenap - 1; /* If there was an override, use that */
1c79356b 398
91447636
A
399 if((proc_info->pf.Available & pfCanNap)
400 && (donap)) {
401 proc_info->pf.Available |= pfWillNap;
402 current_state = ml_set_interrupts_enabled(FALSE);
403 if(proc_info == getPerProc())
404 __asm__ volatile("mtsprg 2,%0" : : "r" (proc_info->pf.Available)); /* Set live value */
405 (void) ml_set_interrupts_enabled(current_state);
406 }
1c79356b 407
91447636
A
408 if (!boot_processor) {
409 (void)hw_atomic_add((uint32_t *)&saveanchor.savetarget, FreeListMin); /* saveareas for this processor */
410 processor_init((struct processor *)proc_info->processor, proc_info->cpu_number);
411 }
412
413 *processor_out = (struct processor *)proc_info->processor;
1c79356b
A
414 *ipi_handler = cpu_signal_handler;
415
416 return KERN_SUCCESS;
91447636
A
417
418processor_register_error:
419 if (proc_info->pp_cbfr != (void *)NULL)
420 console_per_proc_free(proc_info->pp_cbfr);
421 if (proc_info->pp_chud != (void *)NULL)
422 chudxnu_per_proc_free(proc_info->pp_chud);
423 if (!boot_processor)
424 cpu_per_proc_free(proc_info);
425 return KERN_FAILURE;
1c79356b
A
426}
427
91447636
A
428/*
429 * Routine: ml_enable_nap
430 * Function:
431 */
1c79356b
A
432boolean_t
433ml_enable_nap(int target_cpu, boolean_t nap_enabled)
434{
91447636
A
435 struct per_proc_info *proc_info;
436 boolean_t prev_value;
437 boolean_t current_state;
438
439 proc_info = PerProcTable[target_cpu].ppe_vaddr;
440
441 prev_value = (proc_info->pf.Available & pfCanNap) && (proc_info->pf.Available & pfWillNap);
1c79356b 442
91447636 443 if(forcenap) nap_enabled = forcenap - 1; /* If we are to force nap on or off, do it */
55e303ae 444
91447636
A
445 if(proc_info->pf.Available & pfCanNap) { /* Can the processor nap? */
446 if (nap_enabled) proc_info->pf.Available |= pfWillNap; /* Is nap supported on this machine? */
447 else proc_info->pf.Available &= ~pfWillNap; /* Clear if not */
1c79356b
A
448 }
449
91447636
A
450 current_state = ml_set_interrupts_enabled(FALSE);
451 if(proc_info == getPerProc())
452 __asm__ volatile("mtsprg 2,%0" : : "r" (proc_info->pf.Available)); /* Set live value */
453 (void) ml_set_interrupts_enabled(current_state);
55e303ae 454
de355530 455 return (prev_value);
d7e50217
A
456}
457
91447636
A
458/*
459 * Routine: ml_init_max_cpus
460 * Function:
461 */
d7e50217 462void
91447636 463ml_init_max_cpus(unsigned int mcpus)
43866e37 464{
43866e37 465
91447636
A
466 if (hw_compare_and_store(0,1,&mcpus_lock_initialized))
467 mutex_init(&mcpus_lock,0);
468 mutex_lock(&mcpus_lock);
469 if ((mcpus_state & MAX_CPUS_SET)
470 || (mcpus == 0)
471 || (mcpus > MAX_CPUS))
472 panic("ml_init_max_cpus(): Invalid call, max_cpus: %d\n", mcpus);
473
474 machine_info.max_cpus = mcpus;
475 machine_info.physical_cpu_max = mcpus;
476 machine_info.logical_cpu_max = mcpus;
477 mcpus_state |= MAX_CPUS_SET;
478
479 if (mcpus_state & MAX_CPUS_WAIT) {
480 mcpus_state |= ~MAX_CPUS_WAIT;
481 thread_wakeup((event_t)&mcpus_state);
482 }
483 mutex_unlock(&mcpus_lock);
484
485 if (machine_info.logical_cpu_max == 1) {
486 struct patch_up *patch_up_ptr;
487 boolean_t current_state;
488
489 patch_up_ptr = &patch_up_table[0];
490
491 current_state = ml_set_interrupts_enabled(FALSE);
492 while (patch_up_ptr->addr != NULL) {
493 /*
494 * Patch for V=R kernel text section
495 */
496 bcopy_phys((addr64_t)((unsigned int)(&patch_up_ptr->data)),
497 (addr64_t)((unsigned int)(patch_up_ptr->addr)), 4);
498 sync_cache64((addr64_t)((unsigned int)(patch_up_ptr->addr)),4);
499 patch_up_ptr++;
500 }
501 (void) ml_set_interrupts_enabled(current_state);
43866e37 502 }
43866e37
A
503}
504
91447636
A
505/*
506 * Routine: ml_get_max_cpus
507 * Function:
508 */
509unsigned int
43866e37
A
510ml_get_max_cpus(void)
511{
91447636
A
512 if (hw_compare_and_store(0,1,&mcpus_lock_initialized))
513 mutex_init(&mcpus_lock,0);
514 mutex_lock(&mcpus_lock);
515 if (!(mcpus_state & MAX_CPUS_SET)) {
516 mcpus_state |= MAX_CPUS_WAIT;
517 thread_sleep_mutex((event_t)&mcpus_state,
518 &mcpus_lock, THREAD_UNINT);
519 } else
520 mutex_unlock(&mcpus_lock);
43866e37
A
521 return(machine_info.max_cpus);
522}
523
91447636
A
524/*
525 * This is called from the machine-independent routine cpu_up()
526 * to perform machine-dependent info updates.
527 */
528void
529ml_cpu_up(void)
530{
531 hw_atomic_add(&machine_info.physical_cpu, 1);
532 hw_atomic_add(&machine_info.logical_cpu, 1);
533}
534
535/*
536 * This is called from the machine-independent routine cpu_down()
537 * to perform machine-dependent info updates.
538 */
43866e37 539void
91447636 540ml_cpu_down(void)
1c79356b 541{
91447636
A
542 hw_atomic_sub(&machine_info.physical_cpu, 1);
543 hw_atomic_sub(&machine_info.logical_cpu, 1);
544}
545
546/*
547 * Routine: ml_cpu_get_info
548 * Function:
549 */
550void
551ml_cpu_get_info(ml_cpu_info_t *ml_cpu_info)
552{
553 struct per_proc_info *proc_info;
554
555 if (ml_cpu_info == 0) return;
1c79356b 556
91447636
A
557 proc_info = PerProcTable[master_cpu].ppe_vaddr;
558 ml_cpu_info->vector_unit = (proc_info->pf.Available & pfAltivec) != 0;
559 ml_cpu_info->cache_line_size = proc_info->pf.lineSize;
560 ml_cpu_info->l1_icache_size = proc_info->pf.l1iSize;
561 ml_cpu_info->l1_dcache_size = proc_info->pf.l1dSize;
1c79356b 562
91447636
A
563 if (proc_info->pf.Available & pfL2) {
564 ml_cpu_info->l2_settings = proc_info->pf.l2cr;
565 ml_cpu_info->l2_cache_size = proc_info->pf.l2Size;
1c79356b 566 } else {
91447636
A
567 ml_cpu_info->l2_settings = 0;
568 ml_cpu_info->l2_cache_size = 0xFFFFFFFF;
1c79356b 569 }
91447636
A
570 if (proc_info->pf.Available & pfL3) {
571 ml_cpu_info->l3_settings = proc_info->pf.l3cr;
572 ml_cpu_info->l3_cache_size = proc_info->pf.l3Size;
1c79356b 573 } else {
91447636
A
574 ml_cpu_info->l3_settings = 0;
575 ml_cpu_info->l3_cache_size = 0xFFFFFFFF;
1c79356b
A
576 }
577}
578
91447636
A
579/*
580 * Routine: ml_enable_cache_level
581 * Function:
582 */
d52fe63f
A
583#define l2em 0x80000000
584#define l3em 0x80000000
d52fe63f
A
585int
586ml_enable_cache_level(int cache_level, int enable)
587{
588 int old_mode;
589 unsigned long available, ccr;
91447636 590 struct per_proc_info *proc_info;
d52fe63f 591
91447636 592 if (real_ncpus != 1) return -1; /* XXX: This test is not safe */
d52fe63f 593
91447636
A
594 proc_info = PerProcTable[master_cpu].ppe_vaddr;
595 available = proc_info->pf.Available;
d52fe63f
A
596
597 if ((cache_level == 2) && (available & pfL2)) {
91447636 598 ccr = proc_info->pf.l2cr;
d52fe63f
A
599 old_mode = (ccr & l2em) ? TRUE : FALSE;
600 if (old_mode != enable) {
91447636 601 if (enable) ccr = proc_info->pf.l2crOriginal;
d52fe63f 602 else ccr = 0;
91447636 603 proc_info->pf.l2cr = ccr;
d52fe63f
A
604 cacheInit();
605 }
606
607 return old_mode;
608 }
609
610 if ((cache_level == 3) && (available & pfL3)) {
91447636 611 ccr = proc_info->pf.l3cr;
d52fe63f
A
612 old_mode = (ccr & l3em) ? TRUE : FALSE;
613 if (old_mode != enable) {
91447636 614 if (enable) ccr = proc_info->pf.l3crOriginal;
d52fe63f 615 else ccr = 0;
91447636 616 proc_info->pf.l3cr = ccr;
d52fe63f
A
617 cacheInit();
618 }
619
620 return old_mode;
621 }
622
623 return -1;
624}
625
91447636
A
626
627decl_simple_lock_data(, spsLock);
628
5353443c
A
629/*
630 * Routine: ml_set_processor_speed
631 * Function:
632 */
633void
634ml_set_processor_speed(unsigned long speed)
635{
91447636 636 struct per_proc_info *proc_info;
3a60a9f5 637 uint32_t cpu;
91447636
A
638 kern_return_t result;
639 boolean_t current_state;
640 unsigned int i;
5353443c 641
91447636 642 proc_info = PerProcTable[master_cpu].ppe_vaddr;
91447636 643
3a60a9f5
A
644 switch (proc_info->pf.pfPowerModes & pmType) { /* Figure specific type */
645 case pmDualPLL:
646
647 ml_set_processor_speed_dpll(speed);
648 break;
649
650 case pmDFS:
651
652 for (cpu = 0; cpu < real_ncpus; cpu++) {
653 /*
654 * cpu_signal() returns after .5ms if it fails to signal a running cpu
655 * retry cpu_signal() for .1s to deal with long interrupt latency at boot
656 */
657 for (i=200; i>0; i--) {
658 current_state = ml_set_interrupts_enabled(FALSE);
659 if (cpu != cpu_number()) {
660 if (PerProcTable[cpu].ppe_vaddr->cpu_flags & SignalReady)
661 /*
662 * Target cpu is off-line, skip
663 */
664 result = KERN_SUCCESS;
665 else {
666 simple_lock(&spsLock);
667 result = cpu_signal(cpu, SIGPcpureq, CPRQsps, speed);
668 if (result == KERN_SUCCESS)
669 thread_sleep_simple_lock(&spsLock, &spsLock, THREAD_UNINT);
670 simple_unlock(&spsLock);
671 }
672 } else {
673 ml_set_processor_speed_dfs(speed);
5353443c 674 result = KERN_SUCCESS;
5353443c 675 }
3a60a9f5
A
676 (void) ml_set_interrupts_enabled(current_state);
677 if (result == KERN_SUCCESS)
678 break;
5353443c 679 }
3a60a9f5
A
680 if (result != KERN_SUCCESS)
681 panic("ml_set_processor_speed(): Fail to set cpu%d speed\n", cpu);
5353443c 682 }
3a60a9f5
A
683 break;
684
685 case pmPowerTune:
686
687 ml_set_processor_speed_powertune(speed);
688 break;
689
690 default:
691 break;
5353443c
A
692
693 }
3a60a9f5 694 return;
5353443c
A
695}
696
697/*
698 * Routine: ml_set_processor_speed_slave
699 * Function:
700 */
701void
702ml_set_processor_speed_slave(unsigned long speed)
703{
5353443c
A
704 ml_set_processor_speed_dfs(speed);
705
91447636 706 simple_lock(&spsLock);
5353443c
A
707 thread_wakeup(&spsLock);
708 simple_unlock(&spsLock);
709}
710
711/*
712 * Routine: ml_init_lock_timeout
713 * Function:
714 */
ab86ba33
A
715void
716ml_init_lock_timeout(void)
717{
718 uint64_t abstime;
719 uint32_t mtxspin;
720
721 nanoseconds_to_absolutetime(NSEC_PER_SEC>>2, &abstime);
722 LockTimeOut = (unsigned int)abstime;
723
724 if (PE_parse_boot_arg("mtxspin", &mtxspin)) {
725 if (mtxspin > USEC_PER_SEC>>4)
726 mtxspin = USEC_PER_SEC>>4;
727 nanoseconds_to_absolutetime(mtxspin*NSEC_PER_USEC, &abstime);
728 } else {
91447636 729 nanoseconds_to_absolutetime(10*NSEC_PER_USEC, &abstime);
ab86ba33
A
730 }
731 MutexSpin = (unsigned int)abstime;
732}
733
91447636
A
734/*
735 * Routine: init_ast_check
736 * Function:
737 */
1c79356b 738void
91447636
A
739init_ast_check(
740 __unused processor_t processor)
1c79356b 741{}
91447636
A
742
743/*
744 * Routine: cause_ast_check
745 * Function:
746 */
1c79356b 747void
9bccf70c
A
748cause_ast_check(
749 processor_t processor)
1c79356b 750{
91447636
A
751 struct per_proc_info *proc_info;
752
753 proc_info = PROCESSOR_TO_PER_PROC(processor);
754
755 if (proc_info != getPerProc()
756 && proc_info->interrupts_enabled == TRUE)
757 cpu_signal(proc_info->cpu_number, SIGPast, (unsigned int)NULL, (unsigned int)NULL);
1c79356b
A
758}
759
91447636
A
760/*
761 * Routine: machine_processor_shutdown
762 * Function:
763 */
1c79356b 764thread_t
91447636
A
765machine_processor_shutdown(
766 __unused thread_t thread,
767 __unused void (*doshutdown)(processor_t),
768 __unused processor_t processor)
1c79356b 769{
1c79356b 770 CreateShutdownCTX();
91447636 771 return((thread_t)(getPerProc()->old_thread));
1c79356b
A
772}
773
91447636
A
774/*
775 * Routine: set_be_bit
776 * Function:
777 */
1c79356b 778int
91447636
A
779set_be_bit(
780 void)
1c79356b 781{
1c79356b
A
782 boolean_t current_state;
783
91447636
A
784 current_state = ml_set_interrupts_enabled(FALSE);
785 getPerProc()->cpu_flags |= traceBE;
1c79356b
A
786 (void) ml_set_interrupts_enabled(current_state);
787 return(1);
788}
789
91447636
A
790/*
791 * Routine: clr_be_bit
792 * Function:
793 */
1c79356b 794int
91447636
A
795clr_be_bit(
796 void)
1c79356b 797{
1c79356b
A
798 boolean_t current_state;
799
91447636
A
800 current_state = ml_set_interrupts_enabled(FALSE);
801 getPerProc()->cpu_flags &= ~traceBE;
1c79356b
A
802 (void) ml_set_interrupts_enabled(current_state);
803 return(1);
804}
805
91447636
A
806/*
807 * Routine: be_tracing
808 * Function:
809 */
1c79356b 810int
91447636
A
811be_tracing(
812 void)
1c79356b 813{
91447636 814 return(getPerProc()->cpu_flags & traceBE);
1c79356b 815}
0b4e3aa0 816
a3d08fcd
A
817
818void ml_mem_backoff(void) {
819
820 if(warFlags & warDisMBpoff) return; /* If backoff disabled, exit */
821
822 __asm__ volatile("sync");
823 __asm__ volatile("isync");
824
825 return;
826}
827