]> git.saurik.com Git - apple/xnu.git/blame - osfmk/ppc/machine_routines.c
xnu-792.18.15.tar.gz
[apple/xnu.git] / osfmk / ppc / machine_routines.c
CommitLineData
1c79356b 1/*
3a60a9f5 2 * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved.
1c79356b 3 *
8f6c56a5 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
1c79356b 5 *
8f6c56a5
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
8ad349bb 24 * limitations under the License.
8f6c56a5
A
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
1c79356b 27 */
91447636
A
28
29#include <mach/mach_types.h>
30
1c79356b 31#include <ppc/machine_routines.h>
91447636 32#include <ppc/cpu_internal.h>
1c79356b 33#include <ppc/exception.h>
91447636 34#include <ppc/io_map_entries.h>
1c79356b 35#include <ppc/misc_protos.h>
91447636 36#include <ppc/savearea.h>
1c79356b 37#include <ppc/Firmware.h>
1c79356b 38#include <ppc/pmap.h>
91447636
A
39#include <ppc/mem.h>
40#include <ppc/new_screen.h>
1c79356b 41#include <ppc/proc_reg.h>
91447636 42#include <kern/kern_types.h>
1c79356b 43#include <kern/processor.h>
91447636 44#include <kern/machine.h>
1c79356b 45
91447636
A
46#include <vm/vm_page.h>
47
89b3af67 48unsigned int LockTimeOut = 1250000000;
91447636
A
49unsigned int MutexSpin = 0;
50
51decl_mutex_data(static,mcpus_lock);
52unsigned int mcpus_lock_initialized = 0;
53unsigned int mcpus_state = 0;
43866e37 54
a3d08fcd
A
55uint32_t warFlags = 0;
56#define warDisMBpoff 0x80000000
91447636
A
57#define MAX_CPUS_SET 0x01
58#define MAX_CPUS_WAIT 0x02
43866e37 59
91447636
A
60decl_simple_lock_data(, spsLock);
61unsigned int spsLockInit = 0;
62
63extern unsigned int hwllckPatch_isync;
64extern unsigned int hwulckPatch_isync;
65extern unsigned int hwulckbPatch_isync;
66extern unsigned int hwlmlckPatch_isync;
67extern unsigned int hwltlckPatch_isync;
68extern unsigned int hwcsatomicPatch_isync;
69extern unsigned int mlckePatch_isync;
70extern unsigned int mlckPatch_isync;
71extern unsigned int mltelckPatch_isync;
72extern unsigned int mltlckPatch_isync;
73extern unsigned int mulckePatch_isync;
74extern unsigned int mulckPatch_isync;
75extern unsigned int slckPatch_isync;
76extern unsigned int stlckPatch_isync;
77extern unsigned int sulckPatch_isync;
78extern unsigned int rwlePatch_isync;
79extern unsigned int rwlsPatch_isync;
80extern unsigned int rwlsePatch_isync;
81extern unsigned int rwlesPatch_isync;
82extern unsigned int rwtlePatch_isync;
83extern unsigned int rwtlsPatch_isync;
84extern unsigned int rwldPatch_isync;
85extern unsigned int hwulckPatch_eieio;
86extern unsigned int mulckPatch_eieio;
87extern unsigned int mulckePatch_eieio;
88extern unsigned int sulckPatch_eieio;
89extern unsigned int rwlesPatch_eieio;
90extern unsigned int rwldPatch_eieio;
91#if !MACH_LDEBUG
92extern unsigned int entfsectPatch_isync;
93extern unsigned int retfsectPatch_isync;
94extern unsigned int retfsectPatch_eieio;
95#endif
96
97struct patch_up {
98 unsigned int *addr;
99 unsigned int data;
100};
101
102typedef struct patch_up patch_up_t;
103
104patch_up_t patch_up_table[] = {
105 {&hwllckPatch_isync, 0x60000000},
106 {&hwulckPatch_isync, 0x60000000},
107 {&hwulckbPatch_isync, 0x60000000},
108 {&hwlmlckPatch_isync, 0x60000000},
109 {&hwltlckPatch_isync, 0x60000000},
110 {&hwcsatomicPatch_isync, 0x60000000},
111 {&mlckePatch_isync, 0x60000000},
112 {&mlckPatch_isync, 0x60000000},
113 {&mltelckPatch_isync, 0x60000000},
114 {&mltlckPatch_isync, 0x60000000},
115 {&mulckePatch_isync, 0x60000000},
116 {&mulckPatch_isync, 0x60000000},
117 {&slckPatch_isync, 0x60000000},
118 {&stlckPatch_isync, 0x60000000},
119 {&sulckPatch_isync, 0x60000000},
120 {&rwlePatch_isync, 0x60000000},
121 {&rwlsPatch_isync, 0x60000000},
122 {&rwlsePatch_isync, 0x60000000},
123 {&rwlesPatch_isync, 0x60000000},
124 {&rwtlePatch_isync, 0x60000000},
125 {&rwtlsPatch_isync, 0x60000000},
126 {&rwldPatch_isync, 0x60000000},
127 {&hwulckPatch_eieio, 0x60000000},
128 {&hwulckPatch_eieio, 0x60000000},
129 {&mulckPatch_eieio, 0x60000000},
130 {&mulckePatch_eieio, 0x60000000},
131 {&sulckPatch_eieio, 0x60000000},
132 {&rwlesPatch_eieio, 0x60000000},
133 {&rwldPatch_eieio, 0x60000000},
134#if !MACH_LDEBUG
135 {&entfsectPatch_isync, 0x60000000},
136 {&retfsectPatch_isync, 0x60000000},
137 {&retfsectPatch_eieio, 0x60000000},
138#endif
139 {NULL, 0x00000000}
140};
141
142extern int forcenap;
143extern boolean_t pmap_initialized;
1c79356b
A
144
145/* Map memory map IO space */
146vm_offset_t
147ml_io_map(
148 vm_offset_t phys_addr,
149 vm_size_t size)
150{
89b3af67 151 return(io_map(phys_addr,size,VM_WIMG_IO));
1c79356b
A
152}
153
89b3af67
A
154
155void ml_get_bouncepool_info(vm_offset_t *phys_addr, vm_size_t *size)
156{
157 *phys_addr = 0;
158 *size = 0;
159}
160
161
91447636
A
162/*
163 * Routine: ml_static_malloc
164 * Function: static memory allocation
165 */
1c79356b
A
166vm_offset_t
167ml_static_malloc(
168 vm_size_t size)
169{
1c79356b
A
170 vm_offset_t vaddr;
171
172 if (pmap_initialized)
173 return((vm_offset_t)NULL);
174 else {
175 vaddr = static_memory_end;
91447636 176 static_memory_end = round_page(vaddr+size);
1c79356b
A
177 return(vaddr);
178 }
179}
180
91447636
A
181/*
182 * Routine: ml_static_ptovirt
183 * Function:
184 */
1c79356b
A
185vm_offset_t
186ml_static_ptovirt(
187 vm_offset_t paddr)
188{
1c79356b
A
189 vm_offset_t vaddr;
190
191 /* Static memory is map V=R */
192 vaddr = paddr;
193 if ( (vaddr < static_memory_end) && (pmap_extract(kernel_pmap, vaddr)==paddr) )
194 return(vaddr);
195 else
196 return((vm_offset_t)NULL);
197}
198
91447636
A
199/*
200 * Routine: ml_static_mfree
201 * Function:
202 */
1c79356b
A
203void
204ml_static_mfree(
205 vm_offset_t vaddr,
206 vm_size_t size)
207{
208 vm_offset_t paddr_cur, vaddr_cur;
209
55e303ae
A
210 for (vaddr_cur = round_page_32(vaddr);
211 vaddr_cur < trunc_page_32(vaddr+size);
1c79356b
A
212 vaddr_cur += PAGE_SIZE) {
213 paddr_cur = pmap_extract(kernel_pmap, vaddr_cur);
214 if (paddr_cur != (vm_offset_t)NULL) {
215 vm_page_wire_count--;
55e303ae
A
216 pmap_remove(kernel_pmap, (addr64_t)vaddr_cur, (addr64_t)(vaddr_cur+PAGE_SIZE));
217 vm_page_create(paddr_cur>>12,(paddr_cur+PAGE_SIZE)>>12);
1c79356b
A
218 }
219 }
220}
221
91447636
A
222/*
223 * Routine: ml_vtophys
224 * Function: virtual to physical on static pages
225 */
1c79356b
A
226vm_offset_t ml_vtophys(
227 vm_offset_t vaddr)
228{
229 return(pmap_extract(kernel_pmap, vaddr));
230}
231
91447636
A
232/*
233 * Routine: ml_install_interrupt_handler
234 * Function: Initialize Interrupt Handler
235 */
1c79356b
A
236void ml_install_interrupt_handler(
237 void *nub,
238 int source,
239 void *target,
240 IOInterruptHandler handler,
241 void *refCon)
242{
91447636
A
243 struct per_proc_info *proc_info;
244 boolean_t current_state;
1c79356b 245
1c79356b 246 current_state = ml_get_interrupts_enabled();
91447636 247 proc_info = getPerProc();
1c79356b 248
91447636
A
249 proc_info->interrupt_nub = nub;
250 proc_info->interrupt_source = source;
251 proc_info->interrupt_target = target;
252 proc_info->interrupt_handler = handler;
253 proc_info->interrupt_refCon = refCon;
1c79356b 254
91447636 255 proc_info->interrupts_enabled = TRUE;
1c79356b 256 (void) ml_set_interrupts_enabled(current_state);
9bccf70c
A
257
258 initialize_screen(0, kPEAcquireScreen);
1c79356b
A
259}
260
91447636
A
261/*
262 * Routine: ml_init_interrupt
263 * Function: Initialize Interrupts
264 */
1c79356b
A
265void ml_init_interrupt(void)
266{
1c79356b
A
267 boolean_t current_state;
268
269 current_state = ml_get_interrupts_enabled();
270
91447636 271 getPerProc()->interrupts_enabled = TRUE;
1c79356b
A
272 (void) ml_set_interrupts_enabled(current_state);
273}
274
91447636
A
275/*
276 * Routine: ml_get_interrupts_enabled
277 * Function: Get Interrupts Enabled
278 */
1c79356b 279boolean_t ml_get_interrupts_enabled(void)
1c79356b
A
280{
281 return((mfmsr() & MASK(MSR_EE)) != 0);
282}
283
91447636
A
284/*
285 * Routine: ml_at_interrupt_context
286 * Function: Check if running at interrupt context
287 */
1c79356b
A
288boolean_t ml_at_interrupt_context(void)
289{
0b4e3aa0
A
290 boolean_t ret;
291 boolean_t current_state;
292
293 current_state = ml_set_interrupts_enabled(FALSE);
91447636 294 ret = (getPerProc()->istackptr == 0);
0b4e3aa0
A
295 ml_set_interrupts_enabled(current_state);
296 return(ret);
1c79356b
A
297}
298
91447636
A
299/*
300 * Routine: ml_cause_interrupt
301 * Function: Generate a fake interrupt
302 */
1c79356b
A
303void ml_cause_interrupt(void)
304{
305 CreateFakeIO();
306}
307
91447636
A
308/*
309 * Routine: ml_thread_policy
310 * Function:
311 */
9bccf70c 312void ml_thread_policy(
d52fe63f
A
313 thread_t thread,
314 unsigned policy_id,
315 unsigned policy_info)
316{
55e303ae 317
d52fe63f 318 if ((policy_id == MACHINE_GROUP) &&
91447636 319 ((PerProcTable[master_cpu].ppe_vaddr->pf.Available) & pfSMPcap))
9bccf70c
A
320 thread_bind(thread, master_processor);
321
322 if (policy_info & MACHINE_NETWORK_WORKLOOP) {
323 spl_t s = splsched();
324
325 thread_lock(thread);
326
9bccf70c
A
327 set_priority(thread, thread->priority + 1);
328
329 thread_unlock(thread);
330 splx(s);
331 }
d52fe63f
A
332}
333
91447636
A
334/*
335 * Routine: machine_signal_idle
336 * Function:
337 */
1c79356b
A
338void
339machine_signal_idle(
340 processor_t processor)
341{
91447636
A
342 struct per_proc_info *proc_info;
343
344 proc_info = PROCESSOR_TO_PER_PROC(processor);
345
346 if (proc_info->pf.Available & (pfCanDoze|pfWillNap))
347 (void)cpu_signal(proc_info->cpu_number, SIGPwake, 0, 0);
1c79356b
A
348}
349
91447636
A
350/*
351 * Routine: ml_processor_register
352 * Function:
353 */
1c79356b
A
354kern_return_t
355ml_processor_register(
91447636
A
356 ml_processor_info_t *in_processor_info,
357 processor_t *processor_out,
358 ipi_handler_t *ipi_handler)
1c79356b 359{
91447636
A
360 struct per_proc_info *proc_info;
361 int donap;
362 boolean_t current_state;
363 boolean_t boot_processor;
1c79356b 364
91447636 365 if (in_processor_info->boot_cpu == FALSE) {
5353443c
A
366 if (spsLockInit == 0) {
367 spsLockInit = 1;
368 simple_lock_init(&spsLock, 0);
91447636
A
369 }
370 boot_processor = FALSE;
371 proc_info = cpu_per_proc_alloc();
372 if (proc_info == (struct per_proc_info *)NULL)
1c79356b 373 return KERN_FAILURE;
91447636
A
374 proc_info->pp_cbfr = console_per_proc_alloc(FALSE);
375 if (proc_info->pp_cbfr == (void *)NULL)
376 goto processor_register_error;
1c79356b 377 } else {
91447636
A
378 boot_processor = TRUE;
379 proc_info = PerProcTable[master_cpu].ppe_vaddr;
1c79356b
A
380 }
381
91447636
A
382 proc_info->pp_chud = chudxnu_per_proc_alloc(boot_processor);
383 if (proc_info->pp_chud == (void *)NULL)
384 goto processor_register_error;
385
386 if (!boot_processor)
387 if (cpu_per_proc_register(proc_info) != KERN_SUCCESS)
388 goto processor_register_error;
389
390 proc_info->cpu_id = in_processor_info->cpu_id;
391 proc_info->start_paddr = in_processor_info->start_paddr;
392 if(in_processor_info->time_base_enable != (void(*)(cpu_id_t, boolean_t ))NULL)
393 proc_info->time_base_enable = in_processor_info->time_base_enable;
394 else
395 proc_info->time_base_enable = (void(*)(cpu_id_t, boolean_t ))NULL;
1c79356b 396
3a60a9f5
A
397 if((proc_info->pf.pfPowerModes & pmType) == pmPowerTune) {
398 proc_info->pf.pfPowerTune0 = in_processor_info->power_mode_0;
399 proc_info->pf.pfPowerTune1 = in_processor_info->power_mode_1;
483a1d10
A
400 }
401
91447636
A
402 donap = in_processor_info->supports_nap; /* Assume we use requested nap */
403 if(forcenap) donap = forcenap - 1; /* If there was an override, use that */
1c79356b 404
91447636
A
405 if((proc_info->pf.Available & pfCanNap)
406 && (donap)) {
407 proc_info->pf.Available |= pfWillNap;
408 current_state = ml_set_interrupts_enabled(FALSE);
409 if(proc_info == getPerProc())
410 __asm__ volatile("mtsprg 2,%0" : : "r" (proc_info->pf.Available)); /* Set live value */
411 (void) ml_set_interrupts_enabled(current_state);
412 }
1c79356b 413
91447636
A
414 if (!boot_processor) {
415 (void)hw_atomic_add((uint32_t *)&saveanchor.savetarget, FreeListMin); /* saveareas for this processor */
416 processor_init((struct processor *)proc_info->processor, proc_info->cpu_number);
417 }
418
419 *processor_out = (struct processor *)proc_info->processor;
1c79356b
A
420 *ipi_handler = cpu_signal_handler;
421
422 return KERN_SUCCESS;
91447636
A
423
424processor_register_error:
425 if (proc_info->pp_cbfr != (void *)NULL)
426 console_per_proc_free(proc_info->pp_cbfr);
427 if (proc_info->pp_chud != (void *)NULL)
428 chudxnu_per_proc_free(proc_info->pp_chud);
429 if (!boot_processor)
430 cpu_per_proc_free(proc_info);
431 return KERN_FAILURE;
1c79356b
A
432}
433
91447636
A
434/*
435 * Routine: ml_enable_nap
436 * Function:
437 */
1c79356b
A
438boolean_t
439ml_enable_nap(int target_cpu, boolean_t nap_enabled)
440{
91447636
A
441 struct per_proc_info *proc_info;
442 boolean_t prev_value;
443 boolean_t current_state;
444
445 proc_info = PerProcTable[target_cpu].ppe_vaddr;
446
447 prev_value = (proc_info->pf.Available & pfCanNap) && (proc_info->pf.Available & pfWillNap);
1c79356b 448
91447636 449 if(forcenap) nap_enabled = forcenap - 1; /* If we are to force nap on or off, do it */
55e303ae 450
91447636
A
451 if(proc_info->pf.Available & pfCanNap) { /* Can the processor nap? */
452 if (nap_enabled) proc_info->pf.Available |= pfWillNap; /* Is nap supported on this machine? */
453 else proc_info->pf.Available &= ~pfWillNap; /* Clear if not */
1c79356b
A
454 }
455
91447636
A
456 current_state = ml_set_interrupts_enabled(FALSE);
457 if(proc_info == getPerProc())
458 __asm__ volatile("mtsprg 2,%0" : : "r" (proc_info->pf.Available)); /* Set live value */
459 (void) ml_set_interrupts_enabled(current_state);
55e303ae 460
de355530 461 return (prev_value);
d7e50217
A
462}
463
91447636
A
464/*
465 * Routine: ml_init_max_cpus
466 * Function:
467 */
d7e50217 468void
91447636 469ml_init_max_cpus(unsigned int mcpus)
43866e37 470{
43866e37 471
91447636
A
472 if (hw_compare_and_store(0,1,&mcpus_lock_initialized))
473 mutex_init(&mcpus_lock,0);
474 mutex_lock(&mcpus_lock);
475 if ((mcpus_state & MAX_CPUS_SET)
476 || (mcpus == 0)
477 || (mcpus > MAX_CPUS))
478 panic("ml_init_max_cpus(): Invalid call, max_cpus: %d\n", mcpus);
479
480 machine_info.max_cpus = mcpus;
481 machine_info.physical_cpu_max = mcpus;
482 machine_info.logical_cpu_max = mcpus;
483 mcpus_state |= MAX_CPUS_SET;
484
485 if (mcpus_state & MAX_CPUS_WAIT) {
486 mcpus_state |= ~MAX_CPUS_WAIT;
487 thread_wakeup((event_t)&mcpus_state);
488 }
489 mutex_unlock(&mcpus_lock);
490
491 if (machine_info.logical_cpu_max == 1) {
492 struct patch_up *patch_up_ptr;
493 boolean_t current_state;
494
495 patch_up_ptr = &patch_up_table[0];
496
497 current_state = ml_set_interrupts_enabled(FALSE);
498 while (patch_up_ptr->addr != NULL) {
499 /*
500 * Patch for V=R kernel text section
501 */
502 bcopy_phys((addr64_t)((unsigned int)(&patch_up_ptr->data)),
503 (addr64_t)((unsigned int)(patch_up_ptr->addr)), 4);
504 sync_cache64((addr64_t)((unsigned int)(patch_up_ptr->addr)),4);
505 patch_up_ptr++;
506 }
507 (void) ml_set_interrupts_enabled(current_state);
43866e37 508 }
43866e37
A
509}
510
91447636
A
511/*
512 * Routine: ml_get_max_cpus
513 * Function:
514 */
515unsigned int
43866e37
A
516ml_get_max_cpus(void)
517{
91447636
A
518 if (hw_compare_and_store(0,1,&mcpus_lock_initialized))
519 mutex_init(&mcpus_lock,0);
520 mutex_lock(&mcpus_lock);
521 if (!(mcpus_state & MAX_CPUS_SET)) {
522 mcpus_state |= MAX_CPUS_WAIT;
523 thread_sleep_mutex((event_t)&mcpus_state,
524 &mcpus_lock, THREAD_UNINT);
525 } else
526 mutex_unlock(&mcpus_lock);
43866e37
A
527 return(machine_info.max_cpus);
528}
529
91447636
A
530/*
531 * This is called from the machine-independent routine cpu_up()
532 * to perform machine-dependent info updates.
533 */
534void
535ml_cpu_up(void)
536{
537 hw_atomic_add(&machine_info.physical_cpu, 1);
538 hw_atomic_add(&machine_info.logical_cpu, 1);
539}
540
541/*
542 * This is called from the machine-independent routine cpu_down()
543 * to perform machine-dependent info updates.
544 */
43866e37 545void
91447636 546ml_cpu_down(void)
1c79356b 547{
91447636
A
548 hw_atomic_sub(&machine_info.physical_cpu, 1);
549 hw_atomic_sub(&machine_info.logical_cpu, 1);
550}
551
552/*
553 * Routine: ml_cpu_get_info
554 * Function:
555 */
556void
557ml_cpu_get_info(ml_cpu_info_t *ml_cpu_info)
558{
559 struct per_proc_info *proc_info;
560
561 if (ml_cpu_info == 0) return;
1c79356b 562
91447636
A
563 proc_info = PerProcTable[master_cpu].ppe_vaddr;
564 ml_cpu_info->vector_unit = (proc_info->pf.Available & pfAltivec) != 0;
565 ml_cpu_info->cache_line_size = proc_info->pf.lineSize;
566 ml_cpu_info->l1_icache_size = proc_info->pf.l1iSize;
567 ml_cpu_info->l1_dcache_size = proc_info->pf.l1dSize;
1c79356b 568
91447636
A
569 if (proc_info->pf.Available & pfL2) {
570 ml_cpu_info->l2_settings = proc_info->pf.l2cr;
571 ml_cpu_info->l2_cache_size = proc_info->pf.l2Size;
1c79356b 572 } else {
91447636
A
573 ml_cpu_info->l2_settings = 0;
574 ml_cpu_info->l2_cache_size = 0xFFFFFFFF;
1c79356b 575 }
91447636
A
576 if (proc_info->pf.Available & pfL3) {
577 ml_cpu_info->l3_settings = proc_info->pf.l3cr;
578 ml_cpu_info->l3_cache_size = proc_info->pf.l3Size;
1c79356b 579 } else {
91447636
A
580 ml_cpu_info->l3_settings = 0;
581 ml_cpu_info->l3_cache_size = 0xFFFFFFFF;
1c79356b
A
582 }
583}
584
91447636
A
585/*
586 * Routine: ml_enable_cache_level
587 * Function:
588 */
d52fe63f
A
589#define l2em 0x80000000
590#define l3em 0x80000000
d52fe63f
A
591int
592ml_enable_cache_level(int cache_level, int enable)
593{
594 int old_mode;
595 unsigned long available, ccr;
91447636 596 struct per_proc_info *proc_info;
d52fe63f 597
91447636 598 if (real_ncpus != 1) return -1; /* XXX: This test is not safe */
d52fe63f 599
91447636
A
600 proc_info = PerProcTable[master_cpu].ppe_vaddr;
601 available = proc_info->pf.Available;
d52fe63f
A
602
603 if ((cache_level == 2) && (available & pfL2)) {
91447636 604 ccr = proc_info->pf.l2cr;
d52fe63f
A
605 old_mode = (ccr & l2em) ? TRUE : FALSE;
606 if (old_mode != enable) {
91447636 607 if (enable) ccr = proc_info->pf.l2crOriginal;
d52fe63f 608 else ccr = 0;
91447636 609 proc_info->pf.l2cr = ccr;
d52fe63f
A
610 cacheInit();
611 }
612
613 return old_mode;
614 }
615
616 if ((cache_level == 3) && (available & pfL3)) {
91447636 617 ccr = proc_info->pf.l3cr;
d52fe63f
A
618 old_mode = (ccr & l3em) ? TRUE : FALSE;
619 if (old_mode != enable) {
91447636 620 if (enable) ccr = proc_info->pf.l3crOriginal;
d52fe63f 621 else ccr = 0;
91447636 622 proc_info->pf.l3cr = ccr;
d52fe63f
A
623 cacheInit();
624 }
625
626 return old_mode;
627 }
628
629 return -1;
630}
631
91447636
A
632
633decl_simple_lock_data(, spsLock);
634
5353443c
A
635/*
636 * Routine: ml_set_processor_speed
637 * Function:
638 */
639void
640ml_set_processor_speed(unsigned long speed)
641{
91447636 642 struct per_proc_info *proc_info;
3a60a9f5 643 uint32_t cpu;
91447636
A
644 kern_return_t result;
645 boolean_t current_state;
646 unsigned int i;
5353443c 647
91447636 648 proc_info = PerProcTable[master_cpu].ppe_vaddr;
91447636 649
3a60a9f5
A
650 switch (proc_info->pf.pfPowerModes & pmType) { /* Figure specific type */
651 case pmDualPLL:
652
653 ml_set_processor_speed_dpll(speed);
654 break;
655
656 case pmDFS:
657
658 for (cpu = 0; cpu < real_ncpus; cpu++) {
659 /*
660 * cpu_signal() returns after .5ms if it fails to signal a running cpu
661 * retry cpu_signal() for .1s to deal with long interrupt latency at boot
662 */
663 for (i=200; i>0; i--) {
664 current_state = ml_set_interrupts_enabled(FALSE);
665 if (cpu != cpu_number()) {
666 if (PerProcTable[cpu].ppe_vaddr->cpu_flags & SignalReady)
667 /*
668 * Target cpu is off-line, skip
669 */
670 result = KERN_SUCCESS;
671 else {
672 simple_lock(&spsLock);
673 result = cpu_signal(cpu, SIGPcpureq, CPRQsps, speed);
674 if (result == KERN_SUCCESS)
675 thread_sleep_simple_lock(&spsLock, &spsLock, THREAD_UNINT);
676 simple_unlock(&spsLock);
677 }
678 } else {
679 ml_set_processor_speed_dfs(speed);
5353443c 680 result = KERN_SUCCESS;
5353443c 681 }
3a60a9f5
A
682 (void) ml_set_interrupts_enabled(current_state);
683 if (result == KERN_SUCCESS)
684 break;
5353443c 685 }
3a60a9f5
A
686 if (result != KERN_SUCCESS)
687 panic("ml_set_processor_speed(): Fail to set cpu%d speed\n", cpu);
5353443c 688 }
3a60a9f5
A
689 break;
690
691 case pmPowerTune:
692
693 ml_set_processor_speed_powertune(speed);
694 break;
695
696 default:
697 break;
5353443c
A
698
699 }
3a60a9f5 700 return;
5353443c
A
701}
702
703/*
704 * Routine: ml_set_processor_speed_slave
705 * Function:
706 */
707void
708ml_set_processor_speed_slave(unsigned long speed)
709{
5353443c
A
710 ml_set_processor_speed_dfs(speed);
711
91447636 712 simple_lock(&spsLock);
5353443c
A
713 thread_wakeup(&spsLock);
714 simple_unlock(&spsLock);
715}
716
717/*
718 * Routine: ml_init_lock_timeout
719 * Function:
720 */
ab86ba33
A
721void
722ml_init_lock_timeout(void)
723{
724 uint64_t abstime;
725 uint32_t mtxspin;
726
727 nanoseconds_to_absolutetime(NSEC_PER_SEC>>2, &abstime);
728 LockTimeOut = (unsigned int)abstime;
729
730 if (PE_parse_boot_arg("mtxspin", &mtxspin)) {
731 if (mtxspin > USEC_PER_SEC>>4)
732 mtxspin = USEC_PER_SEC>>4;
733 nanoseconds_to_absolutetime(mtxspin*NSEC_PER_USEC, &abstime);
734 } else {
91447636 735 nanoseconds_to_absolutetime(10*NSEC_PER_USEC, &abstime);
ab86ba33
A
736 }
737 MutexSpin = (unsigned int)abstime;
738}
739
91447636
A
740/*
741 * Routine: init_ast_check
742 * Function:
743 */
1c79356b 744void
91447636
A
745init_ast_check(
746 __unused processor_t processor)
1c79356b 747{}
91447636
A
748
749/*
750 * Routine: cause_ast_check
751 * Function:
752 */
1c79356b 753void
9bccf70c
A
754cause_ast_check(
755 processor_t processor)
1c79356b 756{
91447636
A
757 struct per_proc_info *proc_info;
758
759 proc_info = PROCESSOR_TO_PER_PROC(processor);
760
761 if (proc_info != getPerProc()
762 && proc_info->interrupts_enabled == TRUE)
763 cpu_signal(proc_info->cpu_number, SIGPast, (unsigned int)NULL, (unsigned int)NULL);
1c79356b
A
764}
765
91447636
A
766/*
767 * Routine: machine_processor_shutdown
768 * Function:
769 */
1c79356b 770thread_t
91447636
A
771machine_processor_shutdown(
772 __unused thread_t thread,
773 __unused void (*doshutdown)(processor_t),
774 __unused processor_t processor)
1c79356b 775{
1c79356b 776 CreateShutdownCTX();
91447636 777 return((thread_t)(getPerProc()->old_thread));
1c79356b
A
778}
779
91447636
A
780/*
781 * Routine: set_be_bit
782 * Function:
783 */
1c79356b 784int
91447636
A
785set_be_bit(
786 void)
1c79356b 787{
1c79356b
A
788 boolean_t current_state;
789
91447636
A
790 current_state = ml_set_interrupts_enabled(FALSE);
791 getPerProc()->cpu_flags |= traceBE;
1c79356b
A
792 (void) ml_set_interrupts_enabled(current_state);
793 return(1);
794}
795
91447636
A
796/*
797 * Routine: clr_be_bit
798 * Function:
799 */
1c79356b 800int
91447636
A
801clr_be_bit(
802 void)
1c79356b 803{
1c79356b
A
804 boolean_t current_state;
805
91447636
A
806 current_state = ml_set_interrupts_enabled(FALSE);
807 getPerProc()->cpu_flags &= ~traceBE;
1c79356b
A
808 (void) ml_set_interrupts_enabled(current_state);
809 return(1);
810}
811
91447636
A
812/*
813 * Routine: be_tracing
814 * Function:
815 */
1c79356b 816int
91447636
A
817be_tracing(
818 void)
1c79356b 819{
91447636 820 return(getPerProc()->cpu_flags & traceBE);
1c79356b 821}
0b4e3aa0 822
a3d08fcd
A
823
824void ml_mem_backoff(void) {
825
826 if(warFlags & warDisMBpoff) return; /* If backoff disabled, exit */
827
828 __asm__ volatile("sync");
829 __asm__ volatile("isync");
830
831 return;
832}
833