]> git.saurik.com Git - apple/xnu.git/blob - osfmk/ppc/PseudoKernel.c
e48ee5430b5738fa787e48dac9cce8badec742db
[apple/xnu.git] / osfmk / ppc / PseudoKernel.c
1 /*
2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23 /*
24 File: PseudoKernel.c
25
26 Contains: BlueBox PseudoKernel calls
27 Written by: Mark Gorlinsky
28 Bill Angell
29
30 Copyright: 1997 by Apple Computer, Inc., all rights reserved
31
32 */
33
34 #include <mach/mach_types.h>
35 #include <mach/kern_return.h>
36
37 #include <kern/kalloc.h>
38 #include <kern/kern_types.h>
39 #include <kern/host.h>
40 #include <kern/task.h>
41 #include <kern/thread.h>
42 #include <ppc/PseudoKernel.h>
43 #include <ppc/exception.h>
44 #include <ppc/misc_protos.h>
45 #include <ppc/proc_reg.h>
46
47 #include <vm/pmap.h>
48 #include <vm/vm_map.h>
49 #include <vm/vm_kern.h>
50
51 void bbSetRupt(ReturnHandler *rh, thread_t ct);
52
53 /*
54 ** Function: NotifyInterruption
55 **
56 ** Inputs:
57 ** ppcInterrupHandler - interrupt handler to execute
58 ** interruptStatePtr - current interrupt state
59 **
60 ** Outputs:
61 **
62 ** Notes:
63 **
64 */
65 kern_return_t syscall_notify_interrupt ( void ) {
66
67 UInt32 interruptState;
68 task_t task;
69 thread_t act, fact;
70 bbRupt *bbr;
71 BTTD_t *bttd;
72 int i;
73
74 task = current_task(); /* Figure out who our task is */
75
76 task_lock(task); /* Lock our task */
77
78 fact = (thread_t)task->threads.next; /* Get the first activation on task */
79 act = 0; /* Pretend we didn't find it yet */
80
81 for(i = 0; i < task->thread_count; i++) { /* Scan the whole list */
82 if(fact->machine.bbDescAddr) { /* Is this a Blue thread? */
83 bttd = (BTTD_t *)(fact->machine.bbDescAddr & -PAGE_SIZE);
84 if(bttd->InterruptVector) { /* Is this the Blue interrupt thread? */
85 act = fact; /* Yeah... */
86 break; /* Found it, Bail the loop... */
87 }
88 }
89 fact = (thread_t)fact->task_threads.next; /* Go to the next one */
90 }
91
92 if(!act) { /* Couldn't find a bluebox */
93 task_unlock(task); /* Release task lock */
94 return KERN_FAILURE; /* No tickie, no shirtee... */
95 }
96
97 thread_reference(act);
98
99 task_unlock(task); /* Safe to release now */
100
101 thread_mtx_lock(act);
102
103 /* if the calling thread is the BlueBox thread that handles interrupts
104 * we know that we are in the PsuedoKernel and we can short circuit
105 * setting up the asynchronous task by setting a pending interrupt.
106 */
107
108 if ( (unsigned int)act == (unsigned int)current_thread() ) {
109 bttd->InterruptControlWord = bttd->InterruptControlWord |
110 ((bttd->postIntMask >> kCR2ToBackupShift) & kBackupCR2Mask);
111
112 thread_mtx_unlock(act); /* Unlock the activation */
113 thread_deallocate(act);
114 return KERN_SUCCESS;
115 }
116
117 if(act->machine.emPendRupts >= 16) { /* Have we hit the arbitrary maximum? */
118 thread_mtx_unlock(act); /* Unlock the activation */
119 thread_deallocate(act);
120 return KERN_RESOURCE_SHORTAGE; /* Too many pending right now */
121 }
122
123 if(!(bbr = (bbRupt *)kalloc(sizeof(bbRupt)))) { /* Get a return handler control block */
124 thread_mtx_unlock(act); /* Unlock the activation */
125 thread_deallocate(act);
126 return KERN_RESOURCE_SHORTAGE; /* No storage... */
127 }
128
129 (void)hw_atomic_add(&act->machine.emPendRupts, 1); /* Count this 'rupt */
130 bbr->rh.handler = bbSetRupt; /* Set interruption routine */
131
132 bbr->rh.next = act->handlers; /* Put our interrupt at the start of the list */
133 act->handlers = &bbr->rh;
134
135 act_set_apc(act); /* Set an APC AST */
136
137 thread_mtx_unlock(act); /* Unlock the activation */
138 thread_deallocate(act);
139 return KERN_SUCCESS; /* We're done... */
140 }
141
142 /*
143 * This guy is fired off asynchronously to actually do the 'rupt.
144 * We will find the user state savearea and modify it. If we can't,
145 * we just leave after releasing our work area
146 */
147
148 void bbSetRupt(ReturnHandler *rh, thread_t act) {
149
150 savearea *sv;
151 BTTD_t *bttd;
152 bbRupt *bbr;
153 UInt32 interruptState;
154
155 bbr = (bbRupt *)rh; /* Make our area convenient */
156
157 if(!(act->machine.bbDescAddr)) { /* Is BlueBox still enabled? */
158 kfree(bbr, sizeof(bbRupt)); /* No, release the control block */
159 return;
160 }
161
162 (void)hw_atomic_sub(&act->machine.emPendRupts, 1); /* Uncount this 'rupt */
163
164 if(!(sv = find_user_regs(act))) { /* Find the user state registers */
165 kfree(bbr, sizeof(bbRupt)); /* Couldn't find 'em, release the control block */
166 return;
167 }
168
169 bttd = (BTTD_t *)(act->machine.bbDescAddr & -PAGE_SIZE);
170
171 interruptState = (bttd->InterruptControlWord & kInterruptStateMask) >> kInterruptStateShift;
172
173 switch (interruptState) {
174
175 case kInSystemContext:
176 sv->save_cr |= bttd->postIntMask; /* post int in CR2 */
177 break;
178
179 case kInAlternateContext:
180 bttd->InterruptControlWord = (bttd->InterruptControlWord & ~kInterruptStateMask) |
181 (kInPseudoKernel << kInterruptStateShift);
182
183 bttd->exceptionInfo.srr0 = (unsigned int)sv->save_srr0; /* Save the current PC */
184 sv->save_srr0 = (uint64_t)act->machine.bbInterrupt; /* Set the new PC */
185 bttd->exceptionInfo.sprg1 = (unsigned int)sv->save_r1; /* Save the original R1 */
186 sv->save_r1 = (uint64_t)bttd->exceptionInfo.sprg0; /* Set the new R1 */
187 bttd->exceptionInfo.srr1 = (unsigned int)sv->save_srr1; /* Save the original MSR */
188 sv->save_srr1 &= ~(MASK(MSR_BE)|MASK(MSR_SE)); /* Clear SE|BE bits in MSR */
189 act->machine.specFlags &= ~bbNoMachSC; /* reactivate Mach SCs */
190 disable_preemption(); /* Don't move us around */
191 getPerProc()->spcFlags = act->machine.specFlags; /* Copy the flags */
192 enable_preemption(); /* Ok to move us around */
193 /* drop through to post int in backup CR2 in ICW */
194
195 case kInExceptionHandler:
196 case kInPseudoKernel:
197 case kOutsideBlue:
198 bttd->InterruptControlWord = bttd->InterruptControlWord |
199 ((bttd->postIntMask >> kCR2ToBackupShift) & kBackupCR2Mask);
200 break;
201
202 default:
203 break;
204 }
205
206 kfree(bbr, sizeof(bbRupt)); /* Release the control block */
207 return;
208
209 }
210
211 /*
212 * This function is used to enable the firmware assist code for bluebox traps, system calls
213 * and interrupts.
214 *
215 * The assist code can be called from two types of threads. The blue thread, which handles
216 * traps, system calls and interrupts and preemptive threads that only issue system calls.
217 *
218 */
219
220 kern_return_t enable_bluebox(
221 host_t host,
222 void *taskID, /* opaque task ID */
223 void *TWI_TableStart, /* Start of TWI table */
224 char *Desc_TableStart /* Start of descriptor table */
225 ) {
226
227 thread_t th;
228 vm_offset_t kerndescaddr, origdescoffset;
229 kern_return_t ret;
230 ppnum_t physdescpage;
231 BTTD_t *bttd;
232
233 th = current_thread(); /* Get our thread */
234
235 if ( host == HOST_NULL ) return KERN_INVALID_HOST;
236 if ( ! is_suser() ) return KERN_FAILURE; /* We will only do this for the superuser */
237 if ( th->machine.bbDescAddr ) return KERN_FAILURE; /* Bail if already authorized... */
238 if ( ! (unsigned int) Desc_TableStart ) return KERN_FAILURE; /* There has to be a descriptor page */
239 if ( ! TWI_TableStart ) return KERN_FAILURE; /* There has to be a TWI table */
240
241 /* Get the page offset of the descriptor */
242 origdescoffset = (vm_offset_t)Desc_TableStart & (PAGE_SIZE - 1);
243
244 /* Align the descriptor to a page */
245 Desc_TableStart = (char *)((vm_offset_t)Desc_TableStart & -PAGE_SIZE);
246
247 ret = vm_map_wire(th->map, /* Kernel wire the descriptor in the user's map */
248 (vm_offset_t)Desc_TableStart,
249 (vm_offset_t)Desc_TableStart + PAGE_SIZE,
250 VM_PROT_READ | VM_PROT_WRITE,
251 FALSE);
252
253 if(ret != KERN_SUCCESS) { /* Couldn't wire it, spit on 'em... */
254 return KERN_FAILURE;
255 }
256
257 physdescpage = /* Get the physical page number of the page */
258 pmap_find_phys(th->map->pmap, (addr64_t)Desc_TableStart);
259
260 ret = kmem_alloc_pageable(kernel_map, &kerndescaddr, PAGE_SIZE); /* Find a virtual address to use */
261 if(ret != KERN_SUCCESS) { /* Could we get an address? */
262 (void) vm_map_unwire(th->map, /* No, unwire the descriptor */
263 (vm_offset_t)Desc_TableStart,
264 (vm_offset_t)Desc_TableStart + PAGE_SIZE,
265 TRUE);
266 return KERN_FAILURE; /* Split... */
267 }
268
269 (void) pmap_enter(kernel_pmap, /* Map this into the kernel */
270 kerndescaddr, physdescpage, VM_PROT_READ|VM_PROT_WRITE,
271 VM_WIMG_USE_DEFAULT, TRUE);
272
273 bttd = (BTTD_t *)kerndescaddr; /* Get the address in a convienient spot */
274
275 th->machine.bbDescAddr = (unsigned int)kerndescaddr+origdescoffset; /* Set kernel address of the table */
276 th->machine.bbUserDA = (unsigned int)Desc_TableStart; /* Set user address of the table */
277 th->machine.bbTableStart = (unsigned int)TWI_TableStart; /* Set address of the trap table */
278 th->machine.bbTaskID = (unsigned int)taskID; /* Assign opaque task ID */
279 th->machine.bbTaskEnv = 0; /* Clean task environment data */
280 th->machine.emPendRupts = 0; /* Clean pending 'rupt count */
281 th->machine.bbTrap = bttd->TrapVector; /* Remember trap vector */
282 th->machine.bbSysCall = bttd->SysCallVector; /* Remember syscall vector */
283 th->machine.bbInterrupt = bttd->InterruptVector; /* Remember interrupt vector */
284 th->machine.bbPending = bttd->PendingIntVector; /* Remember pending vector */
285 th->machine.specFlags &= ~(bbNoMachSC | bbPreemptive); /* Make sure mach SCs are enabled and we are not marked preemptive */
286 th->machine.specFlags |= bbThread; /* Set that we are Classic thread */
287
288 if(!(bttd->InterruptVector)) { /* See if this is a preemptive (MP) BlueBox thread */
289 th->machine.specFlags |= bbPreemptive; /* Yes, remember it */
290 }
291
292 disable_preemption(); /* Don't move us around */
293 getPerProc()->spcFlags = th->machine.specFlags; /* Copy the flags */
294 enable_preemption(); /* Ok to move us around */
295
296 {
297 /* mark the proc to indicate that this is a TBE proc */
298 extern void tbeproc(void *proc);
299
300 tbeproc(th->task->bsd_info);
301 }
302
303 return KERN_SUCCESS;
304 }
305
306 kern_return_t disable_bluebox( host_t host ) { /* User call to terminate bluebox */
307
308 thread_t act;
309
310 act = current_thread(); /* Get our thread */
311
312 if (host == HOST_NULL) return KERN_INVALID_HOST;
313
314 if(!is_suser()) return KERN_FAILURE; /* We will only do this for the superuser */
315 if(!act->machine.bbDescAddr) return KERN_FAILURE; /* Bail if not authorized... */
316
317 disable_bluebox_internal(act); /* Clean it all up */
318 return KERN_SUCCESS; /* Leave */
319 }
320
321 void disable_bluebox_internal(thread_t act) { /* Terminate bluebox */
322
323 (void) vm_map_unwire(act->map, /* Unwire the descriptor in user's address space */
324 (vm_offset_t)act->machine.bbUserDA,
325 (vm_offset_t)act->machine.bbUserDA + PAGE_SIZE,
326 FALSE);
327
328 kmem_free(kernel_map, (vm_offset_t)act->machine.bbDescAddr & -PAGE_SIZE, PAGE_SIZE); /* Release the page */
329
330 act->machine.bbDescAddr = 0; /* Clear kernel pointer to it */
331 act->machine.bbUserDA = 0; /* Clear user pointer to it */
332 act->machine.bbTableStart = 0; /* Clear user pointer to TWI table */
333 act->machine.bbTaskID = 0; /* Clear opaque task ID */
334 act->machine.bbTaskEnv = 0; /* Clean task environment data */
335 act->machine.emPendRupts = 0; /* Clean pending 'rupt count */
336 act->machine.specFlags &= ~(bbNoMachSC | bbPreemptive | bbThread); /* Clean up Blue Box enables */
337 disable_preemption(); /* Don't move us around */
338 getPerProc()->spcFlags = act->machine.specFlags; /* Copy the flags */
339 enable_preemption(); /* Ok to move us around */
340 return;
341 }
342
343 /*
344 * Use the new PPCcall method to enable blue box threads
345 *
346 * save->r3 = taskID
347 * save->r4 = TWI_TableStart
348 * save->r5 = Desc_TableStart
349 *
350 */
351 int bb_enable_bluebox( struct savearea *save )
352 {
353 kern_return_t rc;
354
355 rc = enable_bluebox( (host_t)0xFFFFFFFF, (void *)save->save_r3, (void *)save->save_r4, (char *)save->save_r5 );
356 save->save_r3 = rc;
357 return 1; /* Return with normal AST checking */
358 }
359
360 /*
361 * Use the new PPCcall method to disable blue box threads
362 *
363 */
364 int bb_disable_bluebox( struct savearea *save )
365 {
366 kern_return_t rc;
367
368 rc = disable_bluebox( (host_t)0xFFFFFFFF );
369 save->save_r3 = rc;
370 return 1; /* Return with normal AST checking */
371 }
372
373 /*
374 * Search through the list of threads to find the matching taskIDs, then
375 * set the task environment pointer. A task in this case is a preemptive thread
376 * in MacOS 9.
377 *
378 * save->r3 = taskID
379 * save->r4 = taskEnv
380 */
381
382 int bb_settaskenv( struct savearea *save )
383 {
384 int i;
385 task_t task;
386 thread_t act, fact;
387
388
389 task = current_task(); /* Figure out who our task is */
390
391 task_lock(task); /* Lock our task */
392 fact = (thread_t)task->threads.next; /* Get the first activation on task */
393 act = 0; /* Pretend we didn't find it yet */
394
395 for(i = 0; i < task->thread_count; i++) { /* Scan the whole list */
396 if(fact->machine.bbDescAddr) { /* Is this a Blue thread? */
397 if ( fact->machine.bbTaskID == save->save_r3 ) { /* Is this the task we are looking for? */
398 act = fact; /* Yeah... */
399 break; /* Found it, Bail the loop... */
400 }
401 }
402 fact = (thread_t)fact->task_threads.next; /* Go to the next one */
403 }
404
405 if ( !act || !act->active) {
406 task_unlock(task); /* Release task lock */
407 save->save_r3 = -1; /* we failed to find the taskID */
408 return 1;
409 }
410
411 thread_reference(act);
412
413 task_unlock(task); /* Safe to release now */
414
415 thread_mtx_lock(act); /* Make sure this stays 'round */
416
417 act->machine.bbTaskEnv = save->save_r4;
418 if(act == current_thread()) { /* Are we setting our own? */
419 disable_preemption(); /* Don't move us around */
420 getPerProc()->ppbbTaskEnv = act->machine.bbTaskEnv; /* Remember the environment */
421 enable_preemption(); /* Ok to move us around */
422 }
423
424 thread_mtx_unlock(act); /* Unlock the activation */
425 thread_deallocate(act);
426 save->save_r3 = 0;
427 return 1;
428 }