]> git.saurik.com Git - apple/xnu.git/blame - osfmk/ppc/machine_routines.c
xnu-792.24.17.tar.gz
[apple/xnu.git] / osfmk / ppc / machine_routines.c
CommitLineData
1c79356b 1/*
3a60a9f5 2 * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved.
1c79356b 3 *
6601e61a 4 * @APPLE_LICENSE_HEADER_START@
1c79356b 5 *
6601e61a
A
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
8f6c56a5 11 *
6601e61a
A
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
6601e61a
A
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
8f6c56a5 19 *
6601e61a 20 * @APPLE_LICENSE_HEADER_END@
1c79356b 21 */
91447636
A
22
23#include <mach/mach_types.h>
24
1c79356b 25#include <ppc/machine_routines.h>
91447636 26#include <ppc/cpu_internal.h>
1c79356b 27#include <ppc/exception.h>
91447636 28#include <ppc/io_map_entries.h>
1c79356b 29#include <ppc/misc_protos.h>
91447636 30#include <ppc/savearea.h>
1c79356b 31#include <ppc/Firmware.h>
1c79356b 32#include <ppc/pmap.h>
91447636
A
33#include <ppc/mem.h>
34#include <ppc/new_screen.h>
1c79356b 35#include <ppc/proc_reg.h>
91447636 36#include <kern/kern_types.h>
1c79356b 37#include <kern/processor.h>
91447636 38#include <kern/machine.h>
1c79356b 39
91447636
A
40#include <vm/vm_page.h>
41
6601e61a 42unsigned int LockTimeOut = 12500000;
91447636
A
43unsigned int MutexSpin = 0;
44
45decl_mutex_data(static,mcpus_lock);
46unsigned int mcpus_lock_initialized = 0;
47unsigned int mcpus_state = 0;
43866e37 48
a3d08fcd
A
49uint32_t warFlags = 0;
50#define warDisMBpoff 0x80000000
91447636
A
51#define MAX_CPUS_SET 0x01
52#define MAX_CPUS_WAIT 0x02
43866e37 53
91447636
A
54decl_simple_lock_data(, spsLock);
55unsigned int spsLockInit = 0;
56
57extern unsigned int hwllckPatch_isync;
58extern unsigned int hwulckPatch_isync;
59extern unsigned int hwulckbPatch_isync;
60extern unsigned int hwlmlckPatch_isync;
61extern unsigned int hwltlckPatch_isync;
62extern unsigned int hwcsatomicPatch_isync;
63extern unsigned int mlckePatch_isync;
64extern unsigned int mlckPatch_isync;
65extern unsigned int mltelckPatch_isync;
66extern unsigned int mltlckPatch_isync;
67extern unsigned int mulckePatch_isync;
68extern unsigned int mulckPatch_isync;
69extern unsigned int slckPatch_isync;
70extern unsigned int stlckPatch_isync;
71extern unsigned int sulckPatch_isync;
72extern unsigned int rwlePatch_isync;
73extern unsigned int rwlsPatch_isync;
74extern unsigned int rwlsePatch_isync;
75extern unsigned int rwlesPatch_isync;
76extern unsigned int rwtlePatch_isync;
77extern unsigned int rwtlsPatch_isync;
78extern unsigned int rwldPatch_isync;
79extern unsigned int hwulckPatch_eieio;
80extern unsigned int mulckPatch_eieio;
81extern unsigned int mulckePatch_eieio;
82extern unsigned int sulckPatch_eieio;
83extern unsigned int rwlesPatch_eieio;
84extern unsigned int rwldPatch_eieio;
85#if !MACH_LDEBUG
86extern unsigned int entfsectPatch_isync;
87extern unsigned int retfsectPatch_isync;
88extern unsigned int retfsectPatch_eieio;
89#endif
90
91struct patch_up {
92 unsigned int *addr;
93 unsigned int data;
94};
95
96typedef struct patch_up patch_up_t;
97
98patch_up_t patch_up_table[] = {
99 {&hwllckPatch_isync, 0x60000000},
100 {&hwulckPatch_isync, 0x60000000},
101 {&hwulckbPatch_isync, 0x60000000},
102 {&hwlmlckPatch_isync, 0x60000000},
103 {&hwltlckPatch_isync, 0x60000000},
104 {&hwcsatomicPatch_isync, 0x60000000},
105 {&mlckePatch_isync, 0x60000000},
106 {&mlckPatch_isync, 0x60000000},
107 {&mltelckPatch_isync, 0x60000000},
108 {&mltlckPatch_isync, 0x60000000},
109 {&mulckePatch_isync, 0x60000000},
110 {&mulckPatch_isync, 0x60000000},
111 {&slckPatch_isync, 0x60000000},
112 {&stlckPatch_isync, 0x60000000},
113 {&sulckPatch_isync, 0x60000000},
114 {&rwlePatch_isync, 0x60000000},
115 {&rwlsPatch_isync, 0x60000000},
116 {&rwlsePatch_isync, 0x60000000},
117 {&rwlesPatch_isync, 0x60000000},
118 {&rwtlePatch_isync, 0x60000000},
119 {&rwtlsPatch_isync, 0x60000000},
120 {&rwldPatch_isync, 0x60000000},
121 {&hwulckPatch_eieio, 0x60000000},
122 {&hwulckPatch_eieio, 0x60000000},
123 {&mulckPatch_eieio, 0x60000000},
124 {&mulckePatch_eieio, 0x60000000},
125 {&sulckPatch_eieio, 0x60000000},
126 {&rwlesPatch_eieio, 0x60000000},
127 {&rwldPatch_eieio, 0x60000000},
128#if !MACH_LDEBUG
129 {&entfsectPatch_isync, 0x60000000},
130 {&retfsectPatch_isync, 0x60000000},
131 {&retfsectPatch_eieio, 0x60000000},
132#endif
133 {NULL, 0x00000000}
134};
135
136extern int forcenap;
137extern boolean_t pmap_initialized;
1c79356b
A
138
139/* Map memory map IO space */
140vm_offset_t
141ml_io_map(
142 vm_offset_t phys_addr,
143 vm_size_t size)
144{
6601e61a 145 return(io_map(phys_addr,size));
4452a7af
A
146}
147
91447636
A
148/*
149 * Routine: ml_static_malloc
150 * Function: static memory allocation
151 */
1c79356b
A
152vm_offset_t
153ml_static_malloc(
154 vm_size_t size)
155{
1c79356b
A
156 vm_offset_t vaddr;
157
158 if (pmap_initialized)
159 return((vm_offset_t)NULL);
160 else {
161 vaddr = static_memory_end;
91447636 162 static_memory_end = round_page(vaddr+size);
1c79356b
A
163 return(vaddr);
164 }
165}
166
91447636
A
167/*
168 * Routine: ml_static_ptovirt
169 * Function:
170 */
1c79356b
A
171vm_offset_t
172ml_static_ptovirt(
173 vm_offset_t paddr)
174{
1c79356b
A
175 vm_offset_t vaddr;
176
177 /* Static memory is map V=R */
178 vaddr = paddr;
179 if ( (vaddr < static_memory_end) && (pmap_extract(kernel_pmap, vaddr)==paddr) )
180 return(vaddr);
181 else
182 return((vm_offset_t)NULL);
183}
184
91447636
A
185/*
186 * Routine: ml_static_mfree
187 * Function:
188 */
1c79356b
A
189void
190ml_static_mfree(
191 vm_offset_t vaddr,
192 vm_size_t size)
193{
194 vm_offset_t paddr_cur, vaddr_cur;
195
55e303ae
A
196 for (vaddr_cur = round_page_32(vaddr);
197 vaddr_cur < trunc_page_32(vaddr+size);
1c79356b
A
198 vaddr_cur += PAGE_SIZE) {
199 paddr_cur = pmap_extract(kernel_pmap, vaddr_cur);
200 if (paddr_cur != (vm_offset_t)NULL) {
201 vm_page_wire_count--;
55e303ae
A
202 pmap_remove(kernel_pmap, (addr64_t)vaddr_cur, (addr64_t)(vaddr_cur+PAGE_SIZE));
203 vm_page_create(paddr_cur>>12,(paddr_cur+PAGE_SIZE)>>12);
1c79356b
A
204 }
205 }
206}
207
91447636
A
208/*
209 * Routine: ml_vtophys
210 * Function: virtual to physical on static pages
211 */
1c79356b
A
212vm_offset_t ml_vtophys(
213 vm_offset_t vaddr)
214{
215 return(pmap_extract(kernel_pmap, vaddr));
216}
217
91447636
A
218/*
219 * Routine: ml_install_interrupt_handler
220 * Function: Initialize Interrupt Handler
221 */
1c79356b
A
222void ml_install_interrupt_handler(
223 void *nub,
224 int source,
225 void *target,
226 IOInterruptHandler handler,
227 void *refCon)
228{
91447636
A
229 struct per_proc_info *proc_info;
230 boolean_t current_state;
1c79356b 231
1c79356b 232 current_state = ml_get_interrupts_enabled();
91447636 233 proc_info = getPerProc();
1c79356b 234
91447636
A
235 proc_info->interrupt_nub = nub;
236 proc_info->interrupt_source = source;
237 proc_info->interrupt_target = target;
238 proc_info->interrupt_handler = handler;
239 proc_info->interrupt_refCon = refCon;
1c79356b 240
91447636 241 proc_info->interrupts_enabled = TRUE;
1c79356b 242 (void) ml_set_interrupts_enabled(current_state);
9bccf70c
A
243
244 initialize_screen(0, kPEAcquireScreen);
1c79356b
A
245}
246
91447636
A
247/*
248 * Routine: ml_init_interrupt
249 * Function: Initialize Interrupts
250 */
1c79356b
A
251void ml_init_interrupt(void)
252{
1c79356b
A
253 boolean_t current_state;
254
255 current_state = ml_get_interrupts_enabled();
256
91447636 257 getPerProc()->interrupts_enabled = TRUE;
1c79356b
A
258 (void) ml_set_interrupts_enabled(current_state);
259}
260
91447636
A
261/*
262 * Routine: ml_get_interrupts_enabled
263 * Function: Get Interrupts Enabled
264 */
1c79356b 265boolean_t ml_get_interrupts_enabled(void)
1c79356b
A
266{
267 return((mfmsr() & MASK(MSR_EE)) != 0);
268}
269
91447636
A
270/*
271 * Routine: ml_at_interrupt_context
272 * Function: Check if running at interrupt context
273 */
1c79356b
A
274boolean_t ml_at_interrupt_context(void)
275{
0b4e3aa0
A
276 boolean_t ret;
277 boolean_t current_state;
278
279 current_state = ml_set_interrupts_enabled(FALSE);
91447636 280 ret = (getPerProc()->istackptr == 0);
0b4e3aa0
A
281 ml_set_interrupts_enabled(current_state);
282 return(ret);
1c79356b
A
283}
284
91447636
A
285/*
286 * Routine: ml_cause_interrupt
287 * Function: Generate a fake interrupt
288 */
1c79356b
A
289void ml_cause_interrupt(void)
290{
291 CreateFakeIO();
292}
293
91447636
A
294/*
295 * Routine: ml_thread_policy
296 * Function:
297 */
9bccf70c 298void ml_thread_policy(
d52fe63f
A
299 thread_t thread,
300 unsigned policy_id,
301 unsigned policy_info)
302{
55e303ae 303
d52fe63f 304 if ((policy_id == MACHINE_GROUP) &&
91447636 305 ((PerProcTable[master_cpu].ppe_vaddr->pf.Available) & pfSMPcap))
9bccf70c
A
306 thread_bind(thread, master_processor);
307
308 if (policy_info & MACHINE_NETWORK_WORKLOOP) {
309 spl_t s = splsched();
310
311 thread_lock(thread);
312
9bccf70c
A
313 set_priority(thread, thread->priority + 1);
314
315 thread_unlock(thread);
316 splx(s);
317 }
d52fe63f
A
318}
319
91447636
A
320/*
321 * Routine: machine_signal_idle
322 * Function:
323 */
1c79356b
A
324void
325machine_signal_idle(
326 processor_t processor)
327{
91447636
A
328 struct per_proc_info *proc_info;
329
330 proc_info = PROCESSOR_TO_PER_PROC(processor);
331
332 if (proc_info->pf.Available & (pfCanDoze|pfWillNap))
333 (void)cpu_signal(proc_info->cpu_number, SIGPwake, 0, 0);
1c79356b
A
334}
335
91447636
A
336/*
337 * Routine: ml_processor_register
338 * Function:
339 */
1c79356b
A
340kern_return_t
341ml_processor_register(
91447636
A
342 ml_processor_info_t *in_processor_info,
343 processor_t *processor_out,
344 ipi_handler_t *ipi_handler)
1c79356b 345{
91447636
A
346 struct per_proc_info *proc_info;
347 int donap;
348 boolean_t current_state;
349 boolean_t boot_processor;
1c79356b 350
91447636 351 if (in_processor_info->boot_cpu == FALSE) {
5353443c
A
352 if (spsLockInit == 0) {
353 spsLockInit = 1;
354 simple_lock_init(&spsLock, 0);
91447636
A
355 }
356 boot_processor = FALSE;
357 proc_info = cpu_per_proc_alloc();
358 if (proc_info == (struct per_proc_info *)NULL)
1c79356b 359 return KERN_FAILURE;
91447636
A
360 proc_info->pp_cbfr = console_per_proc_alloc(FALSE);
361 if (proc_info->pp_cbfr == (void *)NULL)
362 goto processor_register_error;
1c79356b 363 } else {
91447636
A
364 boot_processor = TRUE;
365 proc_info = PerProcTable[master_cpu].ppe_vaddr;
1c79356b
A
366 }
367
91447636
A
368 proc_info->pp_chud = chudxnu_per_proc_alloc(boot_processor);
369 if (proc_info->pp_chud == (void *)NULL)
370 goto processor_register_error;
371
372 if (!boot_processor)
373 if (cpu_per_proc_register(proc_info) != KERN_SUCCESS)
374 goto processor_register_error;
375
376 proc_info->cpu_id = in_processor_info->cpu_id;
377 proc_info->start_paddr = in_processor_info->start_paddr;
378 if(in_processor_info->time_base_enable != (void(*)(cpu_id_t, boolean_t ))NULL)
379 proc_info->time_base_enable = in_processor_info->time_base_enable;
380 else
381 proc_info->time_base_enable = (void(*)(cpu_id_t, boolean_t ))NULL;
1c79356b 382
3a60a9f5
A
383 if((proc_info->pf.pfPowerModes & pmType) == pmPowerTune) {
384 proc_info->pf.pfPowerTune0 = in_processor_info->power_mode_0;
385 proc_info->pf.pfPowerTune1 = in_processor_info->power_mode_1;
483a1d10
A
386 }
387
91447636
A
388 donap = in_processor_info->supports_nap; /* Assume we use requested nap */
389 if(forcenap) donap = forcenap - 1; /* If there was an override, use that */
1c79356b 390
91447636
A
391 if((proc_info->pf.Available & pfCanNap)
392 && (donap)) {
393 proc_info->pf.Available |= pfWillNap;
394 current_state = ml_set_interrupts_enabled(FALSE);
395 if(proc_info == getPerProc())
396 __asm__ volatile("mtsprg 2,%0" : : "r" (proc_info->pf.Available)); /* Set live value */
397 (void) ml_set_interrupts_enabled(current_state);
398 }
1c79356b 399
91447636
A
400 if (!boot_processor) {
401 (void)hw_atomic_add((uint32_t *)&saveanchor.savetarget, FreeListMin); /* saveareas for this processor */
402 processor_init((struct processor *)proc_info->processor, proc_info->cpu_number);
403 }
404
405 *processor_out = (struct processor *)proc_info->processor;
1c79356b
A
406 *ipi_handler = cpu_signal_handler;
407
408 return KERN_SUCCESS;
91447636
A
409
410processor_register_error:
411 if (proc_info->pp_cbfr != (void *)NULL)
412 console_per_proc_free(proc_info->pp_cbfr);
413 if (proc_info->pp_chud != (void *)NULL)
414 chudxnu_per_proc_free(proc_info->pp_chud);
415 if (!boot_processor)
416 cpu_per_proc_free(proc_info);
417 return KERN_FAILURE;
1c79356b
A
418}
419
91447636
A
420/*
421 * Routine: ml_enable_nap
422 * Function:
423 */
1c79356b
A
424boolean_t
425ml_enable_nap(int target_cpu, boolean_t nap_enabled)
426{
91447636
A
427 struct per_proc_info *proc_info;
428 boolean_t prev_value;
429 boolean_t current_state;
430
431 proc_info = PerProcTable[target_cpu].ppe_vaddr;
432
433 prev_value = (proc_info->pf.Available & pfCanNap) && (proc_info->pf.Available & pfWillNap);
1c79356b 434
91447636 435 if(forcenap) nap_enabled = forcenap - 1; /* If we are to force nap on or off, do it */
55e303ae 436
91447636
A
437 if(proc_info->pf.Available & pfCanNap) { /* Can the processor nap? */
438 if (nap_enabled) proc_info->pf.Available |= pfWillNap; /* Is nap supported on this machine? */
439 else proc_info->pf.Available &= ~pfWillNap; /* Clear if not */
1c79356b
A
440 }
441
91447636
A
442 current_state = ml_set_interrupts_enabled(FALSE);
443 if(proc_info == getPerProc())
444 __asm__ volatile("mtsprg 2,%0" : : "r" (proc_info->pf.Available)); /* Set live value */
445 (void) ml_set_interrupts_enabled(current_state);
55e303ae 446
de355530 447 return (prev_value);
d7e50217
A
448}
449
91447636
A
450/*
451 * Routine: ml_init_max_cpus
452 * Function:
453 */
d7e50217 454void
91447636 455ml_init_max_cpus(unsigned int mcpus)
43866e37 456{
43866e37 457
91447636
A
458 if (hw_compare_and_store(0,1,&mcpus_lock_initialized))
459 mutex_init(&mcpus_lock,0);
460 mutex_lock(&mcpus_lock);
461 if ((mcpus_state & MAX_CPUS_SET)
462 || (mcpus == 0)
463 || (mcpus > MAX_CPUS))
464 panic("ml_init_max_cpus(): Invalid call, max_cpus: %d\n", mcpus);
465
466 machine_info.max_cpus = mcpus;
467 machine_info.physical_cpu_max = mcpus;
468 machine_info.logical_cpu_max = mcpus;
469 mcpus_state |= MAX_CPUS_SET;
470
471 if (mcpus_state & MAX_CPUS_WAIT) {
472 mcpus_state |= ~MAX_CPUS_WAIT;
473 thread_wakeup((event_t)&mcpus_state);
474 }
475 mutex_unlock(&mcpus_lock);
476
477 if (machine_info.logical_cpu_max == 1) {
478 struct patch_up *patch_up_ptr;
479 boolean_t current_state;
480
481 patch_up_ptr = &patch_up_table[0];
482
483 current_state = ml_set_interrupts_enabled(FALSE);
484 while (patch_up_ptr->addr != NULL) {
485 /*
486 * Patch for V=R kernel text section
487 */
488 bcopy_phys((addr64_t)((unsigned int)(&patch_up_ptr->data)),
489 (addr64_t)((unsigned int)(patch_up_ptr->addr)), 4);
490 sync_cache64((addr64_t)((unsigned int)(patch_up_ptr->addr)),4);
491 patch_up_ptr++;
492 }
493 (void) ml_set_interrupts_enabled(current_state);
43866e37 494 }
43866e37
A
495}
496
91447636
A
497/*
498 * Routine: ml_get_max_cpus
499 * Function:
500 */
501unsigned int
43866e37
A
502ml_get_max_cpus(void)
503{
91447636
A
504 if (hw_compare_and_store(0,1,&mcpus_lock_initialized))
505 mutex_init(&mcpus_lock,0);
506 mutex_lock(&mcpus_lock);
507 if (!(mcpus_state & MAX_CPUS_SET)) {
508 mcpus_state |= MAX_CPUS_WAIT;
509 thread_sleep_mutex((event_t)&mcpus_state,
510 &mcpus_lock, THREAD_UNINT);
511 } else
512 mutex_unlock(&mcpus_lock);
43866e37
A
513 return(machine_info.max_cpus);
514}
515
91447636
A
516/*
517 * This is called from the machine-independent routine cpu_up()
518 * to perform machine-dependent info updates.
519 */
520void
521ml_cpu_up(void)
522{
523 hw_atomic_add(&machine_info.physical_cpu, 1);
524 hw_atomic_add(&machine_info.logical_cpu, 1);
525}
526
527/*
528 * This is called from the machine-independent routine cpu_down()
529 * to perform machine-dependent info updates.
530 */
43866e37 531void
91447636 532ml_cpu_down(void)
1c79356b 533{
91447636
A
534 hw_atomic_sub(&machine_info.physical_cpu, 1);
535 hw_atomic_sub(&machine_info.logical_cpu, 1);
536}
537
538/*
539 * Routine: ml_cpu_get_info
540 * Function:
541 */
542void
543ml_cpu_get_info(ml_cpu_info_t *ml_cpu_info)
544{
545 struct per_proc_info *proc_info;
546
547 if (ml_cpu_info == 0) return;
1c79356b 548
91447636
A
549 proc_info = PerProcTable[master_cpu].ppe_vaddr;
550 ml_cpu_info->vector_unit = (proc_info->pf.Available & pfAltivec) != 0;
551 ml_cpu_info->cache_line_size = proc_info->pf.lineSize;
552 ml_cpu_info->l1_icache_size = proc_info->pf.l1iSize;
553 ml_cpu_info->l1_dcache_size = proc_info->pf.l1dSize;
1c79356b 554
91447636
A
555 if (proc_info->pf.Available & pfL2) {
556 ml_cpu_info->l2_settings = proc_info->pf.l2cr;
557 ml_cpu_info->l2_cache_size = proc_info->pf.l2Size;
1c79356b 558 } else {
91447636
A
559 ml_cpu_info->l2_settings = 0;
560 ml_cpu_info->l2_cache_size = 0xFFFFFFFF;
1c79356b 561 }
91447636
A
562 if (proc_info->pf.Available & pfL3) {
563 ml_cpu_info->l3_settings = proc_info->pf.l3cr;
564 ml_cpu_info->l3_cache_size = proc_info->pf.l3Size;
1c79356b 565 } else {
91447636
A
566 ml_cpu_info->l3_settings = 0;
567 ml_cpu_info->l3_cache_size = 0xFFFFFFFF;
1c79356b
A
568 }
569}
570
91447636
A
571/*
572 * Routine: ml_enable_cache_level
573 * Function:
574 */
d52fe63f
A
575#define l2em 0x80000000
576#define l3em 0x80000000
d52fe63f
A
577int
578ml_enable_cache_level(int cache_level, int enable)
579{
580 int old_mode;
581 unsigned long available, ccr;
91447636 582 struct per_proc_info *proc_info;
d52fe63f 583
91447636 584 if (real_ncpus != 1) return -1; /* XXX: This test is not safe */
d52fe63f 585
91447636
A
586 proc_info = PerProcTable[master_cpu].ppe_vaddr;
587 available = proc_info->pf.Available;
d52fe63f
A
588
589 if ((cache_level == 2) && (available & pfL2)) {
91447636 590 ccr = proc_info->pf.l2cr;
d52fe63f
A
591 old_mode = (ccr & l2em) ? TRUE : FALSE;
592 if (old_mode != enable) {
91447636 593 if (enable) ccr = proc_info->pf.l2crOriginal;
d52fe63f 594 else ccr = 0;
91447636 595 proc_info->pf.l2cr = ccr;
d52fe63f
A
596 cacheInit();
597 }
598
599 return old_mode;
600 }
601
602 if ((cache_level == 3) && (available & pfL3)) {
91447636 603 ccr = proc_info->pf.l3cr;
d52fe63f
A
604 old_mode = (ccr & l3em) ? TRUE : FALSE;
605 if (old_mode != enable) {
91447636 606 if (enable) ccr = proc_info->pf.l3crOriginal;
d52fe63f 607 else ccr = 0;
91447636 608 proc_info->pf.l3cr = ccr;
d52fe63f
A
609 cacheInit();
610 }
611
612 return old_mode;
613 }
614
615 return -1;
616}
617
91447636
A
618
619decl_simple_lock_data(, spsLock);
620
5353443c
A
621/*
622 * Routine: ml_set_processor_speed
623 * Function:
624 */
625void
626ml_set_processor_speed(unsigned long speed)
627{
91447636 628 struct per_proc_info *proc_info;
3a60a9f5 629 uint32_t cpu;
91447636
A
630 kern_return_t result;
631 boolean_t current_state;
632 unsigned int i;
5353443c 633
91447636 634 proc_info = PerProcTable[master_cpu].ppe_vaddr;
91447636 635
3a60a9f5
A
636 switch (proc_info->pf.pfPowerModes & pmType) { /* Figure specific type */
637 case pmDualPLL:
638
639 ml_set_processor_speed_dpll(speed);
640 break;
641
642 case pmDFS:
643
644 for (cpu = 0; cpu < real_ncpus; cpu++) {
645 /*
646 * cpu_signal() returns after .5ms if it fails to signal a running cpu
647 * retry cpu_signal() for .1s to deal with long interrupt latency at boot
648 */
649 for (i=200; i>0; i--) {
650 current_state = ml_set_interrupts_enabled(FALSE);
651 if (cpu != cpu_number()) {
652 if (PerProcTable[cpu].ppe_vaddr->cpu_flags & SignalReady)
653 /*
654 * Target cpu is off-line, skip
655 */
656 result = KERN_SUCCESS;
657 else {
658 simple_lock(&spsLock);
659 result = cpu_signal(cpu, SIGPcpureq, CPRQsps, speed);
660 if (result == KERN_SUCCESS)
661 thread_sleep_simple_lock(&spsLock, &spsLock, THREAD_UNINT);
662 simple_unlock(&spsLock);
663 }
664 } else {
665 ml_set_processor_speed_dfs(speed);
5353443c 666 result = KERN_SUCCESS;
5353443c 667 }
3a60a9f5
A
668 (void) ml_set_interrupts_enabled(current_state);
669 if (result == KERN_SUCCESS)
670 break;
5353443c 671 }
3a60a9f5
A
672 if (result != KERN_SUCCESS)
673 panic("ml_set_processor_speed(): Fail to set cpu%d speed\n", cpu);
5353443c 674 }
3a60a9f5
A
675 break;
676
677 case pmPowerTune:
678
679 ml_set_processor_speed_powertune(speed);
680 break;
681
682 default:
683 break;
5353443c
A
684
685 }
3a60a9f5 686 return;
5353443c
A
687}
688
689/*
690 * Routine: ml_set_processor_speed_slave
691 * Function:
692 */
693void
694ml_set_processor_speed_slave(unsigned long speed)
695{
5353443c
A
696 ml_set_processor_speed_dfs(speed);
697
91447636 698 simple_lock(&spsLock);
5353443c
A
699 thread_wakeup(&spsLock);
700 simple_unlock(&spsLock);
701}
702
703/*
704 * Routine: ml_init_lock_timeout
705 * Function:
706 */
ab86ba33
A
707void
708ml_init_lock_timeout(void)
709{
710 uint64_t abstime;
711 uint32_t mtxspin;
712
713 nanoseconds_to_absolutetime(NSEC_PER_SEC>>2, &abstime);
714 LockTimeOut = (unsigned int)abstime;
715
716 if (PE_parse_boot_arg("mtxspin", &mtxspin)) {
717 if (mtxspin > USEC_PER_SEC>>4)
718 mtxspin = USEC_PER_SEC>>4;
719 nanoseconds_to_absolutetime(mtxspin*NSEC_PER_USEC, &abstime);
720 } else {
91447636 721 nanoseconds_to_absolutetime(10*NSEC_PER_USEC, &abstime);
ab86ba33
A
722 }
723 MutexSpin = (unsigned int)abstime;
724}
725
91447636
A
726/*
727 * Routine: init_ast_check
728 * Function:
729 */
1c79356b 730void
91447636
A
731init_ast_check(
732 __unused processor_t processor)
1c79356b 733{}
91447636
A
734
735/*
736 * Routine: cause_ast_check
737 * Function:
738 */
1c79356b 739void
9bccf70c
A
740cause_ast_check(
741 processor_t processor)
1c79356b 742{
91447636
A
743 struct per_proc_info *proc_info;
744
745 proc_info = PROCESSOR_TO_PER_PROC(processor);
746
747 if (proc_info != getPerProc()
748 && proc_info->interrupts_enabled == TRUE)
749 cpu_signal(proc_info->cpu_number, SIGPast, (unsigned int)NULL, (unsigned int)NULL);
1c79356b
A
750}
751
91447636
A
752/*
753 * Routine: machine_processor_shutdown
754 * Function:
755 */
1c79356b 756thread_t
91447636
A
757machine_processor_shutdown(
758 __unused thread_t thread,
759 __unused void (*doshutdown)(processor_t),
760 __unused processor_t processor)
1c79356b 761{
1c79356b 762 CreateShutdownCTX();
91447636 763 return((thread_t)(getPerProc()->old_thread));
1c79356b
A
764}
765
91447636
A
766/*
767 * Routine: set_be_bit
768 * Function:
769 */
1c79356b 770int
91447636
A
771set_be_bit(
772 void)
1c79356b 773{
1c79356b
A
774 boolean_t current_state;
775
91447636
A
776 current_state = ml_set_interrupts_enabled(FALSE);
777 getPerProc()->cpu_flags |= traceBE;
1c79356b
A
778 (void) ml_set_interrupts_enabled(current_state);
779 return(1);
780}
781
91447636
A
782/*
783 * Routine: clr_be_bit
784 * Function:
785 */
1c79356b 786int
91447636
A
787clr_be_bit(
788 void)
1c79356b 789{
1c79356b
A
790 boolean_t current_state;
791
91447636
A
792 current_state = ml_set_interrupts_enabled(FALSE);
793 getPerProc()->cpu_flags &= ~traceBE;
1c79356b
A
794 (void) ml_set_interrupts_enabled(current_state);
795 return(1);
796}
797
91447636
A
798/*
799 * Routine: be_tracing
800 * Function:
801 */
1c79356b 802int
91447636
A
803be_tracing(
804 void)
1c79356b 805{
91447636 806 return(getPerProc()->cpu_flags & traceBE);
1c79356b 807}
0b4e3aa0 808
a3d08fcd
A
809
810void ml_mem_backoff(void) {
811
812 if(warFlags & warDisMBpoff) return; /* If backoff disabled, exit */
813
814 __asm__ volatile("sync");
815 __asm__ volatile("isync");
816
817 return;
818}
819