]> git.saurik.com Git - apple/xnu.git/blob - osfmk/ppc/PseudoKernel.c
xnu-792.tar.gz
[apple/xnu.git] / osfmk / ppc / PseudoKernel.c
1 /*
2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 /*
23 File: PseudoKernel.c
24
25 Contains: BlueBox PseudoKernel calls
26 Written by: Mark Gorlinsky
27 Bill Angell
28
29 Copyright: 1997 by Apple Computer, Inc., all rights reserved
30
31 */
32
33 #include <mach/mach_types.h>
34 #include <mach/kern_return.h>
35
36 #include <kern/kalloc.h>
37 #include <kern/kern_types.h>
38 #include <kern/host.h>
39 #include <kern/task.h>
40 #include <kern/thread.h>
41 #include <ppc/PseudoKernel.h>
42 #include <ppc/exception.h>
43 #include <ppc/misc_protos.h>
44 #include <ppc/proc_reg.h>
45
46 #include <vm/pmap.h>
47 #include <vm/vm_map.h>
48 #include <vm/vm_kern.h>
49
50 void bbSetRupt(ReturnHandler *rh, thread_t ct);
51
52 /*
53 ** Function: NotifyInterruption
54 **
55 ** Inputs:
56 ** ppcInterrupHandler - interrupt handler to execute
57 ** interruptStatePtr - current interrupt state
58 **
59 ** Outputs:
60 **
61 ** Notes:
62 **
63 */
64 kern_return_t syscall_notify_interrupt ( void ) {
65
66 UInt32 interruptState;
67 task_t task;
68 thread_t act, fact;
69 bbRupt *bbr;
70 BTTD_t *bttd;
71 int i;
72
73 task = current_task(); /* Figure out who our task is */
74
75 task_lock(task); /* Lock our task */
76
77 fact = (thread_t)task->threads.next; /* Get the first activation on task */
78 act = 0; /* Pretend we didn't find it yet */
79
80 for(i = 0; i < task->thread_count; i++) { /* Scan the whole list */
81 if(fact->machine.bbDescAddr) { /* Is this a Blue thread? */
82 bttd = (BTTD_t *)(fact->machine.bbDescAddr & -PAGE_SIZE);
83 if(bttd->InterruptVector) { /* Is this the Blue interrupt thread? */
84 act = fact; /* Yeah... */
85 break; /* Found it, Bail the loop... */
86 }
87 }
88 fact = (thread_t)fact->task_threads.next; /* Go to the next one */
89 }
90
91 if(!act) { /* Couldn't find a bluebox */
92 task_unlock(task); /* Release task lock */
93 return KERN_FAILURE; /* No tickie, no shirtee... */
94 }
95
96 thread_reference(act);
97
98 task_unlock(task); /* Safe to release now */
99
100 thread_mtx_lock(act);
101
102 /* if the calling thread is the BlueBox thread that handles interrupts
103 * we know that we are in the PsuedoKernel and we can short circuit
104 * setting up the asynchronous task by setting a pending interrupt.
105 */
106
107 if ( (unsigned int)act == (unsigned int)current_thread() ) {
108 bttd->InterruptControlWord = bttd->InterruptControlWord |
109 ((bttd->postIntMask >> kCR2ToBackupShift) & kBackupCR2Mask);
110
111 thread_mtx_unlock(act); /* Unlock the activation */
112 thread_deallocate(act);
113 return KERN_SUCCESS;
114 }
115
116 if(act->machine.emPendRupts >= 16) { /* Have we hit the arbitrary maximum? */
117 thread_mtx_unlock(act); /* Unlock the activation */
118 thread_deallocate(act);
119 return KERN_RESOURCE_SHORTAGE; /* Too many pending right now */
120 }
121
122 if(!(bbr = (bbRupt *)kalloc(sizeof(bbRupt)))) { /* Get a return handler control block */
123 thread_mtx_unlock(act); /* Unlock the activation */
124 thread_deallocate(act);
125 return KERN_RESOURCE_SHORTAGE; /* No storage... */
126 }
127
128 (void)hw_atomic_add(&act->machine.emPendRupts, 1); /* Count this 'rupt */
129 bbr->rh.handler = bbSetRupt; /* Set interruption routine */
130
131 bbr->rh.next = act->handlers; /* Put our interrupt at the start of the list */
132 act->handlers = &bbr->rh;
133
134 act_set_apc(act); /* Set an APC AST */
135
136 thread_mtx_unlock(act); /* Unlock the activation */
137 thread_deallocate(act);
138 return KERN_SUCCESS; /* We're done... */
139 }
140
141 /*
142 * This guy is fired off asynchronously to actually do the 'rupt.
143 * We will find the user state savearea and modify it. If we can't,
144 * we just leave after releasing our work area
145 */
146
147 void bbSetRupt(ReturnHandler *rh, thread_t act) {
148
149 savearea *sv;
150 BTTD_t *bttd;
151 bbRupt *bbr;
152 UInt32 interruptState;
153
154 bbr = (bbRupt *)rh; /* Make our area convenient */
155
156 if(!(act->machine.bbDescAddr)) { /* Is BlueBox still enabled? */
157 kfree(bbr, sizeof(bbRupt)); /* No, release the control block */
158 return;
159 }
160
161 (void)hw_atomic_sub(&act->machine.emPendRupts, 1); /* Uncount this 'rupt */
162
163 if(!(sv = find_user_regs(act))) { /* Find the user state registers */
164 kfree(bbr, sizeof(bbRupt)); /* Couldn't find 'em, release the control block */
165 return;
166 }
167
168 bttd = (BTTD_t *)(act->machine.bbDescAddr & -PAGE_SIZE);
169
170 interruptState = (bttd->InterruptControlWord & kInterruptStateMask) >> kInterruptStateShift;
171
172 switch (interruptState) {
173
174 case kInSystemContext:
175 sv->save_cr |= bttd->postIntMask; /* post int in CR2 */
176 break;
177
178 case kInAlternateContext:
179 bttd->InterruptControlWord = (bttd->InterruptControlWord & ~kInterruptStateMask) |
180 (kInPseudoKernel << kInterruptStateShift);
181
182 bttd->exceptionInfo.srr0 = (unsigned int)sv->save_srr0; /* Save the current PC */
183 sv->save_srr0 = (uint64_t)act->machine.bbInterrupt; /* Set the new PC */
184 bttd->exceptionInfo.sprg1 = (unsigned int)sv->save_r1; /* Save the original R1 */
185 sv->save_r1 = (uint64_t)bttd->exceptionInfo.sprg0; /* Set the new R1 */
186 bttd->exceptionInfo.srr1 = (unsigned int)sv->save_srr1; /* Save the original MSR */
187 sv->save_srr1 &= ~(MASK(MSR_BE)|MASK(MSR_SE)); /* Clear SE|BE bits in MSR */
188 act->machine.specFlags &= ~bbNoMachSC; /* reactivate Mach SCs */
189 disable_preemption(); /* Don't move us around */
190 getPerProc()->spcFlags = act->machine.specFlags; /* Copy the flags */
191 enable_preemption(); /* Ok to move us around */
192 /* drop through to post int in backup CR2 in ICW */
193
194 case kInExceptionHandler:
195 case kInPseudoKernel:
196 case kOutsideBlue:
197 bttd->InterruptControlWord = bttd->InterruptControlWord |
198 ((bttd->postIntMask >> kCR2ToBackupShift) & kBackupCR2Mask);
199 break;
200
201 default:
202 break;
203 }
204
205 kfree(bbr, sizeof(bbRupt)); /* Release the control block */
206 return;
207
208 }
209
210 /*
211 * This function is used to enable the firmware assist code for bluebox traps, system calls
212 * and interrupts.
213 *
214 * The assist code can be called from two types of threads. The blue thread, which handles
215 * traps, system calls and interrupts and preemptive threads that only issue system calls.
216 *
217 */
218
219 kern_return_t enable_bluebox(
220 host_t host,
221 void *taskID, /* opaque task ID */
222 void *TWI_TableStart, /* Start of TWI table */
223 char *Desc_TableStart /* Start of descriptor table */
224 ) {
225
226 thread_t th;
227 vm_offset_t kerndescaddr, origdescoffset;
228 kern_return_t ret;
229 ppnum_t physdescpage;
230 BTTD_t *bttd;
231
232 th = current_thread(); /* Get our thread */
233
234 if ( host == HOST_NULL ) return KERN_INVALID_HOST;
235 if ( ! is_suser() ) return KERN_FAILURE; /* We will only do this for the superuser */
236 if ( th->machine.bbDescAddr ) return KERN_FAILURE; /* Bail if already authorized... */
237 if ( ! (unsigned int) Desc_TableStart ) return KERN_FAILURE; /* There has to be a descriptor page */
238 if ( ! TWI_TableStart ) return KERN_FAILURE; /* There has to be a TWI table */
239
240 /* Get the page offset of the descriptor */
241 origdescoffset = (vm_offset_t)Desc_TableStart & (PAGE_SIZE - 1);
242
243 /* Align the descriptor to a page */
244 Desc_TableStart = (char *)((vm_offset_t)Desc_TableStart & -PAGE_SIZE);
245
246 ret = vm_map_wire(th->map, /* Kernel wire the descriptor in the user's map */
247 (vm_offset_t)Desc_TableStart,
248 (vm_offset_t)Desc_TableStart + PAGE_SIZE,
249 VM_PROT_READ | VM_PROT_WRITE,
250 FALSE);
251
252 if(ret != KERN_SUCCESS) { /* Couldn't wire it, spit on 'em... */
253 return KERN_FAILURE;
254 }
255
256 physdescpage = /* Get the physical page number of the page */
257 pmap_find_phys(th->map->pmap, (addr64_t)Desc_TableStart);
258
259 ret = kmem_alloc_pageable(kernel_map, &kerndescaddr, PAGE_SIZE); /* Find a virtual address to use */
260 if(ret != KERN_SUCCESS) { /* Could we get an address? */
261 (void) vm_map_unwire(th->map, /* No, unwire the descriptor */
262 (vm_offset_t)Desc_TableStart,
263 (vm_offset_t)Desc_TableStart + PAGE_SIZE,
264 TRUE);
265 return KERN_FAILURE; /* Split... */
266 }
267
268 (void) pmap_enter(kernel_pmap, /* Map this into the kernel */
269 kerndescaddr, physdescpage, VM_PROT_READ|VM_PROT_WRITE,
270 VM_WIMG_USE_DEFAULT, TRUE);
271
272 bttd = (BTTD_t *)kerndescaddr; /* Get the address in a convienient spot */
273
274 th->machine.bbDescAddr = (unsigned int)kerndescaddr+origdescoffset; /* Set kernel address of the table */
275 th->machine.bbUserDA = (unsigned int)Desc_TableStart; /* Set user address of the table */
276 th->machine.bbTableStart = (unsigned int)TWI_TableStart; /* Set address of the trap table */
277 th->machine.bbTaskID = (unsigned int)taskID; /* Assign opaque task ID */
278 th->machine.bbTaskEnv = 0; /* Clean task environment data */
279 th->machine.emPendRupts = 0; /* Clean pending 'rupt count */
280 th->machine.bbTrap = bttd->TrapVector; /* Remember trap vector */
281 th->machine.bbSysCall = bttd->SysCallVector; /* Remember syscall vector */
282 th->machine.bbInterrupt = bttd->InterruptVector; /* Remember interrupt vector */
283 th->machine.bbPending = bttd->PendingIntVector; /* Remember pending vector */
284 th->machine.specFlags &= ~(bbNoMachSC | bbPreemptive); /* Make sure mach SCs are enabled and we are not marked preemptive */
285 th->machine.specFlags |= bbThread; /* Set that we are Classic thread */
286
287 if(!(bttd->InterruptVector)) { /* See if this is a preemptive (MP) BlueBox thread */
288 th->machine.specFlags |= bbPreemptive; /* Yes, remember it */
289 }
290
291 disable_preemption(); /* Don't move us around */
292 getPerProc()->spcFlags = th->machine.specFlags; /* Copy the flags */
293 enable_preemption(); /* Ok to move us around */
294
295 {
296 /* mark the proc to indicate that this is a TBE proc */
297 extern void tbeproc(void *proc);
298
299 tbeproc(th->task->bsd_info);
300 }
301
302 return KERN_SUCCESS;
303 }
304
305 kern_return_t disable_bluebox( host_t host ) { /* User call to terminate bluebox */
306
307 thread_t act;
308
309 act = current_thread(); /* Get our thread */
310
311 if (host == HOST_NULL) return KERN_INVALID_HOST;
312
313 if(!is_suser()) return KERN_FAILURE; /* We will only do this for the superuser */
314 if(!act->machine.bbDescAddr) return KERN_FAILURE; /* Bail if not authorized... */
315
316 disable_bluebox_internal(act); /* Clean it all up */
317 return KERN_SUCCESS; /* Leave */
318 }
319
320 void disable_bluebox_internal(thread_t act) { /* Terminate bluebox */
321
322 (void) vm_map_unwire(act->map, /* Unwire the descriptor in user's address space */
323 (vm_offset_t)act->machine.bbUserDA,
324 (vm_offset_t)act->machine.bbUserDA + PAGE_SIZE,
325 FALSE);
326
327 kmem_free(kernel_map, (vm_offset_t)act->machine.bbDescAddr & -PAGE_SIZE, PAGE_SIZE); /* Release the page */
328
329 act->machine.bbDescAddr = 0; /* Clear kernel pointer to it */
330 act->machine.bbUserDA = 0; /* Clear user pointer to it */
331 act->machine.bbTableStart = 0; /* Clear user pointer to TWI table */
332 act->machine.bbTaskID = 0; /* Clear opaque task ID */
333 act->machine.bbTaskEnv = 0; /* Clean task environment data */
334 act->machine.emPendRupts = 0; /* Clean pending 'rupt count */
335 act->machine.specFlags &= ~(bbNoMachSC | bbPreemptive | bbThread); /* Clean up Blue Box enables */
336 disable_preemption(); /* Don't move us around */
337 getPerProc()->spcFlags = act->machine.specFlags; /* Copy the flags */
338 enable_preemption(); /* Ok to move us around */
339 return;
340 }
341
342 /*
343 * Use the new PPCcall method to enable blue box threads
344 *
345 * save->r3 = taskID
346 * save->r4 = TWI_TableStart
347 * save->r5 = Desc_TableStart
348 *
349 */
350 int bb_enable_bluebox( struct savearea *save )
351 {
352 kern_return_t rc;
353
354 rc = enable_bluebox( (host_t)0xFFFFFFFF, (void *)save->save_r3, (void *)save->save_r4, (char *)save->save_r5 );
355 save->save_r3 = rc;
356 return 1; /* Return with normal AST checking */
357 }
358
359 /*
360 * Use the new PPCcall method to disable blue box threads
361 *
362 */
363 int bb_disable_bluebox( struct savearea *save )
364 {
365 kern_return_t rc;
366
367 rc = disable_bluebox( (host_t)0xFFFFFFFF );
368 save->save_r3 = rc;
369 return 1; /* Return with normal AST checking */
370 }
371
372 /*
373 * Search through the list of threads to find the matching taskIDs, then
374 * set the task environment pointer. A task in this case is a preemptive thread
375 * in MacOS 9.
376 *
377 * save->r3 = taskID
378 * save->r4 = taskEnv
379 */
380
381 int bb_settaskenv( struct savearea *save )
382 {
383 int i;
384 task_t task;
385 thread_t act, fact;
386
387
388 task = current_task(); /* Figure out who our task is */
389
390 task_lock(task); /* Lock our task */
391 fact = (thread_t)task->threads.next; /* Get the first activation on task */
392 act = 0; /* Pretend we didn't find it yet */
393
394 for(i = 0; i < task->thread_count; i++) { /* Scan the whole list */
395 if(fact->machine.bbDescAddr) { /* Is this a Blue thread? */
396 if ( fact->machine.bbTaskID == save->save_r3 ) { /* Is this the task we are looking for? */
397 act = fact; /* Yeah... */
398 break; /* Found it, Bail the loop... */
399 }
400 }
401 fact = (thread_t)fact->task_threads.next; /* Go to the next one */
402 }
403
404 if ( !act || !act->active) {
405 task_unlock(task); /* Release task lock */
406 save->save_r3 = -1; /* we failed to find the taskID */
407 return 1;
408 }
409
410 thread_reference(act);
411
412 task_unlock(task); /* Safe to release now */
413
414 thread_mtx_lock(act); /* Make sure this stays 'round */
415
416 act->machine.bbTaskEnv = save->save_r4;
417 if(act == current_thread()) { /* Are we setting our own? */
418 disable_preemption(); /* Don't move us around */
419 getPerProc()->ppbbTaskEnv = act->machine.bbTaskEnv; /* Remember the environment */
420 enable_preemption(); /* Ok to move us around */
421 }
422
423 thread_mtx_unlock(act); /* Unlock the activation */
424 thread_deallocate(act);
425 save->save_r3 = 0;
426 return 1;
427 }