]> git.saurik.com Git - apple/xnu.git/blame - osfmk/ppc/machine_routines.c
xnu-123.5.tar.gz
[apple/xnu.git] / osfmk / ppc / machine_routines.c
CommitLineData
1c79356b
A
1/*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22#include <ppc/machine_routines.h>
23#include <ppc/machine_cpu.h>
24#include <ppc/exception.h>
25#include <ppc/misc_protos.h>
26#include <ppc/Firmware.h>
27#include <vm/vm_page.h>
28#include <ppc/pmap.h>
29#include <ppc/proc_reg.h>
30#include <kern/processor.h>
31
32boolean_t get_interrupts_enabled(void);
33extern boolean_t set_interrupts_enabled(boolean_t);
34
35/* Map memory map IO space */
36vm_offset_t
37ml_io_map(
38 vm_offset_t phys_addr,
39 vm_size_t size)
40{
41 return(io_map(phys_addr,size));
42}
43
44/* static memory allocation */
45vm_offset_t
46ml_static_malloc(
47 vm_size_t size)
48{
49 extern vm_offset_t static_memory_end;
50 extern boolean_t pmap_initialized;
51 vm_offset_t vaddr;
52
53 if (pmap_initialized)
54 return((vm_offset_t)NULL);
55 else {
56 vaddr = static_memory_end;
57 static_memory_end = round_page(vaddr+size);
58 return(vaddr);
59 }
60}
61
62vm_offset_t
63ml_static_ptovirt(
64 vm_offset_t paddr)
65{
66 extern vm_offset_t static_memory_end;
67 vm_offset_t vaddr;
68
69 /* Static memory is map V=R */
70 vaddr = paddr;
71 if ( (vaddr < static_memory_end) && (pmap_extract(kernel_pmap, vaddr)==paddr) )
72 return(vaddr);
73 else
74 return((vm_offset_t)NULL);
75}
76
77void
78ml_static_mfree(
79 vm_offset_t vaddr,
80 vm_size_t size)
81{
82 vm_offset_t paddr_cur, vaddr_cur;
83
84 for (vaddr_cur = round_page(vaddr);
85 vaddr_cur < trunc_page(vaddr+size);
86 vaddr_cur += PAGE_SIZE) {
87 paddr_cur = pmap_extract(kernel_pmap, vaddr_cur);
88 if (paddr_cur != (vm_offset_t)NULL) {
89 vm_page_wire_count--;
90 pmap_remove(kernel_pmap, vaddr_cur, vaddr_cur+PAGE_SIZE);
91 vm_page_create(paddr_cur,paddr_cur+PAGE_SIZE);
92 }
93 }
94}
95
96/* virtual to physical on wired pages */
97vm_offset_t ml_vtophys(
98 vm_offset_t vaddr)
99{
100 return(pmap_extract(kernel_pmap, vaddr));
101}
102
103/* Initialize Interrupt Handler */
104void ml_install_interrupt_handler(
105 void *nub,
106 int source,
107 void *target,
108 IOInterruptHandler handler,
109 void *refCon)
110{
111 int current_cpu;
112 boolean_t current_state;
113
114 current_cpu = cpu_number();
115 current_state = ml_get_interrupts_enabled();
116
117 per_proc_info[current_cpu].interrupt_nub = nub;
118 per_proc_info[current_cpu].interrupt_source = source;
119 per_proc_info[current_cpu].interrupt_target = target;
120 per_proc_info[current_cpu].interrupt_handler = handler;
121 per_proc_info[current_cpu].interrupt_refCon = refCon;
122
123 per_proc_info[current_cpu].get_interrupts_enabled
124 = get_interrupts_enabled;
125 per_proc_info[current_cpu].set_interrupts_enabled
126 = set_interrupts_enabled;
127 (void) ml_set_interrupts_enabled(current_state);
128}
129
130/* Initialize Interrupts */
131void ml_init_interrupt(void)
132{
133 int current_cpu;
134 boolean_t current_state;
135
136 current_state = ml_get_interrupts_enabled();
137
138 current_cpu = cpu_number();
139 per_proc_info[current_cpu].get_interrupts_enabled
140 = get_interrupts_enabled;
141 per_proc_info[current_cpu].set_interrupts_enabled
142 = set_interrupts_enabled;
143 (void) ml_set_interrupts_enabled(current_state);
144}
145
146boolean_t fake_get_interrupts_enabled(void)
147{
148 /*
149 * The scheduler is not active on this cpu. There is no need to disable
150 * preemption. The current thread wont be dispatched on anhother cpu.
151 */
152 return((per_proc_info[cpu_number()].cpu_flags & turnEEon) != 0);
153}
154
155boolean_t fake_set_interrupts_enabled(boolean_t enable)
156{
157 boolean_t interrupt_state_prev;
158
159 /*
160 * The scheduler is not active on this cpu. There is no need to disable
161 * preemption. The current thread wont be dispatched on anhother cpu.
162 */
163 interrupt_state_prev =
164 (per_proc_info[cpu_number()].cpu_flags & turnEEon) != 0;
165 if (interrupt_state_prev != enable)
166 per_proc_info[cpu_number()].cpu_flags ^= turnEEon;
167 return(interrupt_state_prev);
168}
169
170/* Get Interrupts Enabled */
171boolean_t ml_get_interrupts_enabled(void)
172{
173 return(per_proc_info[cpu_number()].get_interrupts_enabled());
174}
175
176boolean_t get_interrupts_enabled(void)
177{
178 return((mfmsr() & MASK(MSR_EE)) != 0);
179}
180
181/* Set Interrupts Enabled */
182boolean_t ml_set_interrupts_enabled(boolean_t enable)
183{
184 return(per_proc_info[cpu_number()].set_interrupts_enabled(enable));
185}
186
187/* Check if running at interrupt context */
188boolean_t ml_at_interrupt_context(void)
189{
190 /*
191 * If running at interrupt context, the current thread won't be
192 * dispatched on another cpu. There is no need to turn off preemption.
193 */
194 return (per_proc_info[cpu_number()].istackptr == 0);
195}
196
197/* Generate a fake interrupt */
198void ml_cause_interrupt(void)
199{
200 CreateFakeIO();
201}
202
203void machine_clock_assist(void)
204{
205 if (per_proc_info[cpu_number()].get_interrupts_enabled == fake_get_interrupts_enabled)
206 CreateFakeDEC();
207}
208
209void machine_idle(void)
210{
211 if (per_proc_info[cpu_number()].get_interrupts_enabled != fake_get_interrupts_enabled) {
212 int cur_decr;
213
214 machine_idle_ppc();
215
216 /*
217 * protect against a lost decrementer trap
218 * if the current decrementer value is negative
219 * by more than 10 ticks, re-arm it since it's
220 * unlikely to fire at this point... a hardware
221 * interrupt got us out of machine_idle and may
222 * also be contributing to this state
223 */
224 cur_decr = isync_mfdec();
225
226 if (cur_decr < -10) {
227 mtdec(1);
228 }
229 }
230}
231
232void
233machine_signal_idle(
234 processor_t processor)
235{
236 (void)cpu_signal(processor->slot_num, SIGPwake, 0, 0);
237}
238
239kern_return_t
240ml_processor_register(
241 ml_processor_info_t *processor_info,
242 processor_t *processor,
243 ipi_handler_t *ipi_handler)
244{
245 kern_return_t ret;
246 int target_cpu;
247
248 if (processor_info->boot_cpu == FALSE) {
249 if (cpu_register(&target_cpu) != KERN_SUCCESS)
250 return KERN_FAILURE;
251 } else {
252 /* boot_cpu is always 0 */
253 target_cpu= 0;
254 }
255
256 per_proc_info[target_cpu].cpu_id = processor_info->cpu_id;
257 per_proc_info[target_cpu].start_paddr = processor_info->start_paddr;
258
259 if(per_proc_info[target_cpu].pf.Available & pfCanNap)
260 if(processor_info->supports_nap)
261 per_proc_info[target_cpu].pf.Available |= pfWillNap;
262
263 if(processor_info->time_base_enable != (void(*)(cpu_id_t, boolean_t ))NULL)
264 per_proc_info[target_cpu].time_base_enable = processor_info->time_base_enable;
265 else
266 per_proc_info[target_cpu].time_base_enable = (void(*)(cpu_id_t, boolean_t ))NULL;
267
268 if(target_cpu == cpu_number())
269 __asm__ volatile("mtsprg 2,%0" : : "r" (per_proc_info[target_cpu].pf.Available)); /* Set live value */
270
271 *processor = cpu_to_processor(target_cpu);
272 *ipi_handler = cpu_signal_handler;
273
274 return KERN_SUCCESS;
275}
276
277boolean_t
278ml_enable_nap(int target_cpu, boolean_t nap_enabled)
279{
280 boolean_t prev_value = (per_proc_info[target_cpu].pf.Available & pfCanNap) && (per_proc_info[target_cpu].pf.Available & pfWillNap);
281
282 if(per_proc_info[target_cpu].pf.Available & pfCanNap) { /* Can the processor nap? */
283 if (nap_enabled) per_proc_info[target_cpu].pf.Available |= pfWillNap; /* Is nap supported on this machine? */
284 else per_proc_info[target_cpu].pf.Available &= ~pfWillNap; /* Clear if not */
285 }
286
287 if(target_cpu == cpu_number())
288 __asm__ volatile("mtsprg 2,%0" : : "r" (per_proc_info[target_cpu].pf.Available)); /* Set live value */
289
290 return (prev_value);
291}
292
293void
294ml_ppc_get_info(ml_ppc_cpu_info_t *cpu_info)
295{
296 if (cpu_info == 0) return;
297
298 cpu_info->vector_unit = (per_proc_info[0].pf.Available & pfAltivec) != 0;
299 cpu_info->cache_line_size = per_proc_info[0].pf.lineSize;
300 cpu_info->l1_icache_size = per_proc_info[0].pf.l1iSize;
301 cpu_info->l1_dcache_size = per_proc_info[0].pf.l1dSize;
302
303 if (per_proc_info[0].pf.Available & pfL2) {
304 cpu_info->l2_settings = per_proc_info[0].pf.l2cr;
305 cpu_info->l2_cache_size = per_proc_info[0].pf.l2Size;
306 } else {
307 cpu_info->l2_settings = 0;
308 cpu_info->l2_cache_size = 0xFFFFFFFF;
309 }
310 if (per_proc_info[0].pf.Available & pfL3) {
311 cpu_info->l3_settings = per_proc_info[0].pf.l3cr;
312 cpu_info->l3_cache_size = per_proc_info[0].pf.l3Size;
313 } else {
314 cpu_info->l3_settings = 0;
315 cpu_info->l3_cache_size = 0xFFFFFFFF;
316 }
317}
318
319void
320init_ast_check(processor_t processor)
321{}
322
323void
324cause_ast_check(processor_t processor)
325{
326 if ((processor != current_processor())
327 && (per_proc_info[processor->slot_num].get_interrupts_enabled
328 != fake_get_interrupts_enabled))
329 cpu_signal(processor->slot_num, SIGPast, NULL, NULL);
330}
331
332thread_t
333switch_to_shutdown_context(
334 thread_t thread,
335 void (*doshutdown)(processor_t),
336 processor_t processor)
337{
338 disable_preemption();
339 CreateShutdownCTX();
340 enable_preemption();
341 return((thread_t)(per_proc_info[cpu_number()].old_thread));
342}
343
344int
345set_be_bit()
346{
347
348 int mycpu;
349 boolean_t current_state;
350
351 current_state = ml_set_interrupts_enabled(FALSE); /* Can't allow interruptions when mucking with per_proc flags */
352 mycpu = cpu_number();
353 per_proc_info[mycpu].cpu_flags |= traceBE;
354 (void) ml_set_interrupts_enabled(current_state);
355 return(1);
356}
357
358int
359clr_be_bit()
360{
361 int mycpu;
362 boolean_t current_state;
363
364 current_state = ml_set_interrupts_enabled(FALSE); /* Can't allow interruptions when mucking with per_proc flags */
365 mycpu = cpu_number();
366 per_proc_info[mycpu].cpu_flags &= ~traceBE;
367 (void) ml_set_interrupts_enabled(current_state);
368 return(1);
369}
370
371int
372be_tracing()
373{
374 int mycpu = cpu_number();
375 return(per_proc_info[mycpu].cpu_flags & traceBE);
376}