/*
- * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
*
- * @APPLE_LICENSE_HEADER_START@
- *
- * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
* This file contains Original Code and/or Modifications of Original Code
* as defined in and that are subject to the Apple Public Source License
* Version 2.0 (the 'License'). You may not use this file except in
- * compliance with the License. Please obtain a copy of the License at
- * http://www.opensource.apple.com/apsl/ and read it before using this
- * file.
+ * compliance with the License. The rights granted to you under the License
+ * may not be used to create, or enable the creation or redistribution of,
+ * unlawful or unlicensed copies of an Apple operating system, or to
+ * circumvent, violate, or enable the circumvention or violation of, any
+ * terms of an Apple operating system software license agreement.
+ *
+ * Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this file.
*
* The Original Code and all software distributed under the License are
* distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
* Please see the License for the specific language governing rights and
* limitations under the License.
*
- * @APPLE_LICENSE_HEADER_END@
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
*/
/*
* @OSF_COPYRIGHT@
#include <debug.h>
#include <mach_assert.h>
#include <mach/exception_types.h>
+#include <mach/kern_return.h>
#include <mach/ppc/vm_param.h>
#include <assym.s>
#include <ppc/trap.h>
#include <ppc/exception.h>
#include <ppc/savearea.h>
-#include <ppc/spl.h>
#define VERIFYSAVE 0
#define FPVECDBG 0
+#define FPFLOOD 0
#define INSTRUMENT 0
/*
.globl EXT(thandler)
LEXT(thandler) ; Trap handler
- mfsprg r25,0 ; Get the per_proc
+ mfsprg r13,1 ; Get the current activation
+ lwz r25,ACT_PER_PROC(r13) ; Get the per_proc block
lwz r1,PP_ISTACKPTR(r25) ; Get interrupt stack pointer
cmpwi cr0,r1,0 ; Are we on interrupt stack?
- lwz r6,PP_ACTIVE_THREAD(r25) ; Get the pointer to the currently active thread
+ mr r6,r13
beq- cr0,EXT(ihandler) ; If on interrupt stack, treat this as interrupt...
- lwz r13,THREAD_TOP_ACT(r6) ; Point to the active activation
lwz r26,ACT_MACT_SPF(r13) ; Get special flags
lwz r8,ACT_MACT_PCB(r13) ; Get the last savearea used
rlwinm. r26,r26,0,bbThreadbit,bbThreadbit ; Do we have Blue Box Assist active?
subi r1,r26,FM_REDZONE ; Make a red zone on interrupt time kernel stack
.L_kstackfree:
- lwz r7,savesrr1+4(r4) ; Pick up the entry MSR
+ lwz r31,savesrr1+4(r4) ; Pick up the entry MSR
sub r9,r1,r9 ; Get displacment into the kernel stack
li r0,0 ; Make this 0
rlwinm. r0,r9,0,28,31 ; Verify that we have a 16-byte aligned stack (and get a 0)
.L_state_on_kstack:
lwz r9,savevrsave(r4) ; Get the VRSAVE register
bne-- kernelStackUnaligned ; Stack is unaligned...
- rlwinm. r6,r7,0,MSR_VEC_BIT,MSR_VEC_BIT ; Was vector on?
+ rlwinm. r6,r31,0,MSR_VEC_BIT,MSR_VEC_BIT ; Was vector on?
subi r1,r1,FM_SIZE ; Push a header onto the current stack
bgt-- cr2,kernelStackBad ; Kernel stack is bogus...
stwu r1, -FM_SIZE(r1) ; and make new frame
#endif /* DEBUG */
+ mr r30,r4
+ lwz r3,SAVtime(r4)
+ lwz r4,SAVtime+4(r4)
+ addi r5,r13,SYSTEM_TIMER
+ bl EXT(thread_timer_event)
+ addi r5,r25,SYSTEM_STATE
+ bl EXT(state_event)
+
+ lwz r7,ACT_TASK(r13)
+ lwz r8,TASK_VTIMERS(r7)
+ cmpwi r8,0
+ beq++ 0f
+
+ lwz r7,ACT_PER_PROC(r13)
+ li r4,AST_BSD
+ lwz r8,PP_PENDING_AST(r7)
+ or r8,r8,r4
+ stw r8,PP_PENDING_AST(r7)
+ addi r3,r13,ACT_AST
+ bl EXT(hw_atomic_or)
+0:
/* call trap handler proper, with
- * ARG0 = type (not yet, holds pcb ptr)
- * ARG1 = saved_state ptr (already there)
- * ARG2 = dsisr (already there)
- * ARG3 = dar (already there)
+ * ARG0 = type
+ * ARG1 = saved_state ptr
+ * ARG2 = dsisr
+ * ARG3 = dar
*/
-
- lwz r3,saveexception(r4) ; Get the exception code
+ mr r4,r30
+ lwz r3,saveexception(r30) ; Get the exception code
lwz r0,ACT_MACT_SPF(r13) ; Get the special flags
addi r5,r3,-T_DATA_ACCESS ; Adjust to start of range
lwz r5,savedsisr(r4) ; Get the saved DSISR
crnor cr7_eq,cr0_eq,cr2_gt ; We should intercept if in VM and is a true trap (cr7_eq == 1 if yes)
- rlwinm. r0,r7,0,MSR_PR_BIT,MSR_PR_BIT ; Are we trapping from supervisor state? (cr0_eq == 1 if yes)
+ rlwinm. r0,r31,0,MSR_PR_BIT,MSR_PR_BIT ; Are we trapping from supervisor state? (cr0_eq == 1 if yes)
cmpi cr2,r3,T_PREEMPT ; Is this a preemption?
+
+ beq-- .L_check_VM
+ stw r4,ACT_MACT_UPCB(r13) ; Store user savearea
+.L_check_VM:
crandc cr0_eq,cr7_eq,cr0_eq ; Do not intercept if we are in the kernel (cr0_eq == 1 if yes)
.L_call_trap:
+#if FPFLOOD
+ stfd f31,emfp31(r25) ; (TEST/DEBUG)
+#endif
+
bl EXT(trap)
lis r10,hi16(MASK(MSR_VEC)) ; Get the vector enable
ori r10,r10,lo16(MASK(MSR_FP)|MASK(MSR_EE)) ; Add in FP and EE
andc r7,r7,r10 ; Turn off VEC, FP, and EE
mtmsr r7 ; Disable for interrupts
- mfsprg r10,0 ; Restore the per_proc info
+ mfsprg r8,1 ; Get the current activation
+ lwz r10,ACT_PER_PROC(r8) ; Get the per_proc block
/*
* This is also the point where new threads come when they are created.
* The new thread is setup to look like a thread that took an
lwz r11,SAVflags(r3) ; Get the flags of the current savearea
lwz r0,savesrr1+4(r3) ; Get the MSR we are going to
lwz r4,SAVprev+4(r3) ; Pick up the previous savearea
- mfsprg r8,1 ; Get the current activation
- lwz r1,PP_ACTIVE_THREAD(r10) ; Get the active thread
+ mfsprg r8,1 ; Get the current thread
rlwinm r11,r11,0,15,13 ; Clear the syscall flag
rlwinm. r0,r0,0,MSR_PR_BIT,MSR_PR_BIT ; Are we going to the user?
+ mr r1,r8
stw r11,SAVflags(r3) ; Save back the flags (with reset stack cleared)
-
+
lwz r5,THREAD_KERNEL_STACK(r1) ; Get the base pointer to the stack
stw r4,ACT_MACT_PCB(r8) ; Point to the previous savearea (or 0 if none)
addi r5,r5,KERNEL_STACK_SIZE-FM_SIZE ; Reset to empty
LEXT(shandler) ; System call handler
lwz r7,savesrr1+4(r4) ; Get the SRR1 value
- mfsprg r25,0 ; Get the per proc area
+ mfsprg r13,1 ; Get the current activation
+ lwz r25,ACT_PER_PROC(r13) ; Get the per_proc block
lwz r0,saver0+4(r4) ; Get the original syscall number
lwz r17,PP_ISTACKPTR(r25) ; Get interrupt stack pointer
rlwinm r15,r0,0,0,19 ; Clear the bottom of call number for fast check
lwz r9,savevrsave(r4) ; Get the VRsave register
beq-- EXT(ihandler) ; On interrupt stack, not allowed...
rlwinm. r6,r7,0,MSR_VEC_BIT,MSR_VEC_BIT ; Was vector on?
- lwz r16,PP_ACTIVE_THREAD(r25) ; Get the thread pointer
- mfsprg r13,1 ; Pick up the active thread
+ mr r16,r13
beq++ svecoff ; Vector off, do not save vrsave...
stw r9,liveVRS(r25) ; Set the live value
#endif /* DEBUG */
stw r4,ACT_MACT_PCB(r13) ; Point to our savearea
+ stw r4,ACT_MACT_UPCB(r13) ; Store user savearea
li r0,0 ; Clear this out
stw r14,SAVprev+4(r4) ; Queue the new save area in the front
stw r13,SAVact(r4) ; Point the savearea at its activation
mr r30,r4 ; Save pointer to the new context savearea
stw r0,ACT_MACT_KSP(r13) ; Mark stack as busy with 0 val
stw r15,FM_BACKPTR(r1) ; Link stack frame backwards
+
+ lwz r3,SAVtime(r30)
+ lwz r4,SAVtime+4(r30)
+ addi r5,r13,SYSTEM_TIMER
+ bl EXT(thread_timer_event)
+ addi r5,r25,SYSTEM_STATE
+ bl EXT(state_event)
+
+ lwz r7,ACT_TASK(r13)
+ lwz r8,TASK_VTIMERS(r7)
+ cmpwi r8,0
+ beq++ 0f
+
+ lwz r7,ACT_PER_PROC(r13)
+ li r4,AST_BSD
+ lwz r8,PP_PENDING_AST(r7)
+ or r8,r8,r4
+ stw r8,PP_PENDING_AST(r7)
+ addi r3,r13,ACT_AST
+ bl EXT(hw_atomic_or)
+0:
#if DEBUG
/* If debugging, we need two frames, the first being a dummy
stwu r1, -FM_SIZE(r1) ; and make new frame
#endif /* DEBUG */
+ mr r4,r30
+
lwz r15,SAVflags(r30) ; Get the savearea flags
lwz r0,saver0+4(r30) ; Get R0 back
mfmsr r11 ; Get the MSR
- stwu r1,-(FM_SIZE+ARG_SIZE)(r1) ; Make a stack frame
+ stwu r1,-(FM_SIZE+ARG_SIZE+MUNGE_ARGS_SIZE)(r1) ; Make a stack frame
ori r11,r11,lo16(MASK(MSR_EE)) ; Turn on interruption enabled bit
rlwinm r10,r0,0,0,19 ; Keep only the top part
oris r15,r15,SAVsyscall >> 16 ; Mark that it this is a syscall
stw r15,SAVflags(r30) ; Save syscall marker
beq-- cr6,exitFromVM ; It is time to exit from alternate context...
- beq- ppcscall ; Call the ppc-only system call handler...
+ beq-- ppcscall ; Call the ppc-only system call handler...
mr. r0,r0 ; What kind is it?
mtmsr r11 ; Enable interruptions
blt-- .L_kernel_syscall ; System call number if negative, this is a mach call...
+ lwz r8,ACT_TASK(r13) ; Get our task
cmpwi cr0,r0,0x7FFA ; Special blue box call?
beq-- .L_notify_interrupt_syscall ; Yeah, call it...
- lwz r8,ACT_TASK(r13) ; Get our task
- lis r10,hi16(EXT(c_syscalls_unix)) ; Get top half of counter address
lwz r7,TASK_SYSCALLS_UNIX(r8) ; Get the current count
- ori r10,r10,lo16(EXT(c_syscalls_unix)) ; Get low half of counter address
- addi r7,r7,1 ; Bump it
- lwz r9,0(r10) ; Get counter
- stw r7,TASK_SYSCALLS_UNIX(r8) ; Save it
mr r3,r30 ; Get PCB/savearea
mr r4,r13 ; current activation
- addi r9,r9,1 ; Add 1
- stw r9,0(r10) ; Save it back
+ addi r7,r7,1 ; Bump it
+ stw r7,TASK_SYSCALLS_UNIX(r8) ; Save it
+
+#if FPFLOOD
+ stfd f31,emfp31(r25) ; (TEST/DEBUG)
+#endif
+
bl EXT(unix_syscall) ; Check out unix...
.L_call_server_syscall_exception:
.L_notify_interrupt_syscall:
lwz r3,saver3+4(r30) ; Get the new PC address to pass in
bl EXT(syscall_notify_interrupt)
- b .L_syscall_return
+/*
+ * Ok, return from C function, R3 = return value
+ *
+ * saved state is still in R30 and the active thread is in R16 .
+ */
+ mr r31,r16 ; Move the current thread pointer
+ stw r3,saver3+4(r30) ; Stash the return code
+ b .L_thread_syscall_ret_check_ast
;
; Handle PPC-only system call interface
; and the savearea/pcb as the first parameter.
; It is up to the callee to enable interruptions if
; they should be. We are in a state here where
-; both interrupts and preemption is ok, but because we could
+; both interrupts and preemption are ok, but because we could
; be calling diagnostic code we will not enable.
;
; Also, the callee is responsible for finding any parameters
mr. r3,r3 ; See what we should do
mr r31,r16 ; Restore the current thread pointer
bgt+ .L_thread_syscall_ret_check_ast ; Take normal AST checking return....
- mfsprg r10,0 ; Get the per_proc
+ mfsprg r10,1 ; Get the current activation
+ lwz r10,ACT_PER_PROC(r10) ; Get the per_proc block
blt+ .L_thread_syscall_return ; Return, but no ASTs....
lwz r0,saver0+4(r30) ; Restore the system call number
b .L_call_server_syscall_exception ; Go to common exit...
+
+/*
+ * we get here for mach system calls
+ * when kdebug tracing is enabled
+ */
+
+ksystrace:
+ mr r4,r30 ; Pass in saved state
+ bl EXT(syscall_trace)
+
+ cmplw r31,r29 ; Is this syscall in the table?
+ add r31,r27,r28 ; Point right to the syscall table entry
+
+ bge- .L_call_server_syscall_exception ; The syscall number is invalid
+
+ lwz r0,savesrr1(r30) ; Get the saved srr1
+ rlwinm. r0,r0,0,MSR_SF_BIT,MSR_SF_BIT ; Test for 64 bit caller
+ lwz r0,MACH_TRAP_ARG_MUNGE32(r31) ; Pick up the 32 bit munge function address
+ beq-- .L_ksystrace_munge
+ lwz r0,MACH_TRAP_ARG_MUNGE64(r31) ; Pick up the 64 bit munge function address
+
+.L_ksystrace_munge:
+ cmplwi r0,0 ; do we have a munger to call?
+ mtctr r0 ; Set the function call address
+ addi r3,r30,saver3 ; Pointer to args from save area
+ addi r4,r1,FM_ARG0+ARG_SIZE ; Pointer for munged args
+ beq-- .L_ksystrace_trapcall ; just make the trap call
+ bctrl ; Call the munge function
+
+.L_ksystrace_trapcall:
+ lwz r0,MACH_TRAP_FUNCTION(r31) ; Pick up the function address
+ mtctr r0 ; Set the function call address
+ addi r3,r1,FM_ARG0+ARG_SIZE ; Pointer to munged args
+ bctrl
+
+ mr r4,r30 ; Pass in the savearea
+ bl EXT(syscall_trace_end) ; Trace the exit of the system call
+ b .L_mach_return
+
+
+
/* Once here, we know that the syscall was -ve
* we should still have r1=ksp,
* r16 = pointer to current thread,
* r30 = pointer to saved state (in pcb)
*/
- .align 5
+ .align 5
.L_kernel_syscall:
;
; Call a function that can print out our syscall info
; Note that we don t care about any volatiles yet
;
- lis r8,hi16(EXT(kdebug_enable)) ; Get top of kdebug_enable
- ori r8,r8,lo16(EXT(kdebug_enable)) ; Get bottom of kdebug_enable
+ lwz r10,ACT_TASK(r13) ; Get our task
lwz r0,saver0+4(r30)
- lwz r8,0(r8) ; Get kdebug_enable
- lis r29,hi16(EXT(mach_trap_count)) ; Get address of count
- neg r31,r0 ; Make this positive
- ori r29,r29,lo16(EXT(mach_trap_count)) ; Get address of count
+ lis r8,hi16(EXT(kdebug_enable)) ; Get top of kdebug_enable
lis r28,hi16(EXT(mach_trap_table)) ; Get address of table
- cmplwi r8,0 ; Is kdebug_enable false?
- lwz r29,0(r29) ; Pick up the actual count of system calls
- slwi r27,r31,MACH_TRAP_OFFSET_POW2 ; Convert index to offset
- ori r28,r28,lo16(EXT(mach_trap_table)) ; Get address of table
- beq++ ksysnotrc ; No tracing...
- mr r4,r30 ; Pass in saved state
- bl EXT(syscall_trace)
-
-ksysnotrc: cmplw r31,r29 ; Is this syscall in the table?
- add r31,r27,r28 ; Point right to the syscall table entry
-
- bge- .L_call_server_syscall_exception ; The syscall number is invalid
-
- lwz r0,MACH_TRAP_FUNCTION(r31) ; Pick up the function address
+ ori r8,r8,lo16(EXT(kdebug_enable)) ; Get bottom of kdebug_enable
+ lwz r8,0(r8) ; Get kdebug_enable
-;
-; NOTE: We do not support more than 8 parameters for PPC. The only
-; system call to use more than 8 is mach_msg_overwrite_trap and it
-; uses 9. We pass a 0 in as number 9.
-;
- lwz r8,ACT_TASK(r13) ; Get our task
- lis r29,hi16(EXT(kern_invalid)) ; Get high half of invalid syscall function
- ori r29,r29,lo16(EXT(kern_invalid)) ; Get low half of invalid syscall function
- lwz r7,TASK_SYSCALLS_MACH(r8) ; Get the current count
- lis r10,hi16(EXT(c_syscalls_mach)) ; Get top half of counter address
- lwz r3,saver3+4(r30) ; Restore r3
- addi r7,r7,1 ; Bump it
- cmp cr0,r0,r29 ; Check if this is an invalid system call
- ori r10,r10,lo16(EXT(c_syscalls_mach)) ; Get low half of counter address
- beq- .L_call_server_syscall_exception ; We have a bad one...
- stw r7,TASK_SYSCALLS_MACH(r8) ; Save count
- lwz r4,saver4+4(r30) ; Restore r4
- lwz r9,0(r10) ; Get counter
- mtctr r0 ; Set the function call address
- lwz r5,saver5+4(r30) ; Restore r5
- lwz r6,saver6+4(r30) ; Restore r6
- addi r9,r9,1 ; Add 1
- lwz r7,saver7+4(r30) ; Restore r7
- li r0,0 ; Clear this out
- lwz r8,saver8+4(r30) ; Restore r8
- stw r9,0(r10) ; Save it back
- lwz r9,saver9+4(r30) ; Restore r9
- lwz r10,saver10+4(r30) ; Restore r10
- stw r0,FM_ARG0(r1) ; Clear that 9th parameter just in case some fool uses it
- bctrl ; perform the actual syscall
-
- lis r10,hi16(EXT(kdebug_enable)) ; Get top of kdebug_enable
- ori r10,r10,lo16(EXT(kdebug_enable)) ; Get bottom of kdebug_enable
- lwz r10,0(r10) ; Get kdebug_enable
- cmplwi r10,0 ; Is kdebug_enable false?
-
- beq++ .L_syscall_return ; No tracing...
- mr r4,r30 ; Pass in the savearea
- bl EXT(syscall_trace_end) ; Trace the exit of the system call
+ lwz r7,TASK_SYSCALLS_MACH(r10) ; Get the current count
+ neg r31,r0 ; Make this positive
+ mr r3,r31 ; save it
+ slwi r27,r3,4 ; multiply by 16
+ slwi r3,r3,2 ; and the original by 4
+ ori r28,r28,lo16(EXT(mach_trap_table)) ; Get address of table
+ add r27,r27,r3 ; for a total of 20x (5 words/entry)
+ addi r7,r7,1 ; Bump TASK_SYSCALLS_MACH count
+ cmplwi r8,0 ; Is kdebug_enable non-zero
+ stw r7,TASK_SYSCALLS_MACH(r10) ; Save count
+ bne-- ksystrace ; yes, tracing enabled
+
+ cmplwi r31,MACH_TRAP_TABLE_COUNT ; Is this syscall in the table?
+ add r31,r27,r28 ; Point right to the syscall table entry
+
+ bge-- .L_call_server_syscall_exception ; The syscall number is invalid
+
+ lwz r0,savesrr1(r30) ; Get the saved srr1
+ rlwinm. r0,r0,0,MSR_SF_BIT,MSR_SF_BIT ; Test for 64 bit caller
+ lwz r0,MACH_TRAP_ARG_MUNGE32(r31) ; Pick up the 32 bit munge function address
+ beq-- .L_kernel_syscall_munge
+ lwz r0,MACH_TRAP_ARG_MUNGE64(r31) ; Pick up the 64 bit munge function address
+
+.L_kernel_syscall_munge:
+ cmplwi r0,0 ; test for null munger
+ mtctr r0 ; Set the function call address
+ addi r3,r30,saver3 ; Pointer to args from save area
+ addi r4,r1,FM_ARG0+ARG_SIZE ; Pointer for munged args
+ beq-- .L_kernel_syscall_trapcall ; null munger - skip to trap call
+ bctrl ; Call the munge function
+
+.L_kernel_syscall_trapcall:
+ lwz r0,MACH_TRAP_FUNCTION(r31) ; Pick up the function address
+ mtctr r0 ; Set the function call address
+ addi r3,r1,FM_ARG0+ARG_SIZE ; Pointer to munged args
+
+#if FPFLOOD
+ stfd f31,emfp31(r25) ; (TEST/DEBUG)
+#endif
-/* 'standard' syscall returns here - INTERRUPTS ARE STILL ON */
+ bctrl
-/* r3 contains value that we're going to return to the user
- */
/*
* Ok, return from C function, R3 = return value
*
* get the active thread's PCB pointer and thus pointer to user state
- * saved state is still in R30 and the active thread is in R16 .
+ * saved state is still in R30 and the active thread is in R16
*/
-/* Store return value into saved state structure, since
- * we need to pick up the value from here later - the
- * syscall may perform a thread_set_syscall_return
+.L_mach_return:
+ srawi r0,r3,31 ; properly extend the return code
+ cmpi cr0,r3,KERN_INVALID_ARGUMENT ; deal with invalid system calls
+ mr r31,r16 ; Move the current thread pointer
+ stw r0, saver3(r30) ; stash the high part of the return code
+ stw r3,saver3+4(r30) ; Stash the low part of the return code
+ beq-- cr0,.L_mach_invalid_ret ; otherwise fall through into the normal return path
+.L_mach_invalid_arg:
+
+
+/* 'standard' syscall returns here - INTERRUPTS ARE STILL ON
+ * the syscall may perform a thread_set_syscall_return
* followed by a thread_exception_return, ending up
* at thread_syscall_return below, with SS_R3 having
* been set up already
- */
-
-/* When we are here, r16 should point to the current thread,
+ *
+ * When we are here, r31 should point to the current thread,
* r30 should point to the current pcb
+ * r3 contains value that we're going to return to the user
+ * which has already been stored back into the save area
*/
-
-/* save off return value, we must load it
- * back anyway for thread_exception_return
- */
-
-.L_syscall_return:
- mr r31,r16 ; Move the current thread pointer
- stw r3,saver3+4(r30) ; Stash the return code
-
+
.L_thread_syscall_ret_check_ast:
lis r10,hi16(MASK(MSR_VEC)) ; Get the vector enable
mfmsr r12 ; Get the current MSR
andc r12,r12,r10 ; Turn off VEC, FP, and EE
mtmsr r12 ; Turn interruptions off
- mfsprg r10,0 ; Get the per_processor block
+ mfsprg r10,1 ; Get the current activation
+ lwz r10,ACT_PER_PROC(r10) ; Get the per_proc block
/* Check to see if there's an outstanding AST */
- lwz r4,PP_NEED_AST(r10) ; Get the pointer to the ast requests
- lwz r4,0(r4) ; Get the flags
+ lwz r4,PP_PENDING_AST(r10)
cmpi cr0,r4, 0 ; Any pending asts?
beq++ cr0,.L_syscall_no_ast ; Nope...
scrnotkern:
#endif /* DEBUG */
- li r3,AST_ALL ; Set ast flags
+ lis r3,hi16(AST_ALL) ; Set ast flags
li r4,1 ; Set interrupt allowed
+ ori r3,r3,lo16(AST_ALL)
bl EXT(ast_taken) ; Process the pending ast
b .L_thread_syscall_ret_check_ast ; Go see if there was another...
+.L_mach_invalid_ret:
+/*
+ * need to figure out why we got an KERN_INVALID_ARG
+ * if it was due to a non-existent system call
+ * then we want to throw an exception... otherwise
+ * we want to pass the error code back to the caller
+ */
+ lwz r0,saver0+4(r30) ; reload the original syscall number
+ neg r28,r0 ; Make this positive
+ mr r4,r28 ; save a copy
+ slwi r27,r4,4 ; multiply by 16
+ slwi r4,r4,2 ; and another 4
+ lis r28,hi16(EXT(mach_trap_table)) ; Get address of table
+ add r27,r27,r4 ; for a total of 20x (5 words/entry)
+ ori r28,r28,lo16(EXT(mach_trap_table)) ; Get address of table
+ add r28,r27,r28 ; Point right to the syscall table entry
+ lwz r27,MACH_TRAP_FUNCTION(r28) ; Pick up the function address
+ lis r28,hi16(EXT(kern_invalid)) ; Get high half of invalid syscall function
+ ori r28,r28,lo16(EXT(kern_invalid)) ; Get low half of invalid syscall function
+ cmpw cr0,r27,r28 ; Check if this is an invalid system call
+ beq-- .L_call_server_syscall_exception ; We have a bad system call
+ b .L_mach_invalid_arg ; a system call returned KERN_INVALID_ARG
+
+
/* thread_exception_return returns to here, almost all
* registers intact. It expects a full context restore
* of what it hasn't restored itself (ie. what we use).
.L_thread_syscall_return:
mr r3,r30 ; Get savearea to the correct register for common exit
- lwz r5,THREAD_KERNEL_STACK(r31) ; Get the base pointer to the stack
+
lwz r11,SAVflags(r30) ; Get the flags
+ lwz r5,THREAD_KERNEL_STACK(r31) ; Get the base pointer to the stack
lwz r4,SAVprev+4(r30) ; Get the previous save area
+ rlwinm r11,r11,0,15,13 ; Clear the syscall flag
mfsprg r8,1 ; Now find the current activation
addi r5,r5,KERNEL_STACK_SIZE-FM_SIZE ; Reset to empty
- stw r4,ACT_MACT_PCB(r8) ; Save previous save area
- rlwinm r11,r11,0,15,13 ; Clear the syscall flag
- stw r5,ACT_MACT_KSP(r8) ; Save the empty stack pointer
stw r11,SAVflags(r30) ; Stick back the flags
+ stw r5,ACT_MACT_KSP(r8) ; Save the empty stack pointer
+ stw r4,ACT_MACT_PCB(r8) ; Save previous save area
b chkfac ; Go end it all...
/*
frame, given that we're not going to return.
*/
- mfsprg r10,0 ; Get the per_processor block
- lwz r4,PP_NEED_AST(r10)
- li r3,AST_ALL
- lwz r4,0(r4)
+ mfsprg r10,1 ; Get the current activation
+ lwz r10,ACT_PER_PROC(r10) ; Get the per_proc block
+ lwz r4,PP_PENDING_AST(r10)
cmpi cr0,r4, 0
- li r4,1
beq+ cr0,.L_exc_ret_no_ast
/* Yes there is, call ast_taken
* pretending that the user thread took an AST exception here,
* ast_taken will save all state and bring us back here
*/
+
+ lis r3,hi16(AST_ALL)
+ li r4,1
+ ori r3,r3,lo16(AST_ALL)
bl EXT(ast_taken)
b .L_thread_exc_ret_check_ast ; check for a second AST (rare)
.L_exc_ret_no_ast:
mfsprg r30,1 ; Get the currrent activation
- lwz r31,ACT_THREAD(r30) ; Get the current thread
+ mr r31,r30
lwz r30,ACT_MACT_PCB(r30)
mr. r30,r30 ; Is there any context yet?
lwz r10,savesrr1+4(r4) ; Get SRR1
lwz r7,savevrsave(r4) ; Get the VRSAVE register
- mfsprg r25,0 ; Get the per_proc block
+ mfsprg r13,1 ; Get the current activation
+ lwz r25,ACT_PER_PROC(r13) ; Get the per_proc block
li r14,0 ; Zero this for now
- rlwinm. r13,r10,0,MSR_VEC_BIT,MSR_VEC_BIT ; Was vector on?
+ rlwinm. r16,r10,0,MSR_VEC_BIT,MSR_VEC_BIT ; Was vector on?
lwz r1,PP_ISTACKPTR(r25) ; Get the interrupt stack
- li r13,0 ; Zero this for now
- lwz r16,PP_ACTIVE_THREAD(r25) ; Get the thread pointer
+ li r16,0 ; Zero this for now
beq+ ivecoff ; Vector off, do not save vrsave...
stw r7,liveVRS(r25) ; Set the live value
-ivecoff: cmplwi cr1,r16,0 ; Are we still booting?
-
-ifpoff: mr. r1,r1 ; Is it active?
- beq- cr1,ihboot1 ; We are still coming up...
- lwz r13,THREAD_TOP_ACT(r16) ; Pick up the active thread
+ivecoff: li r0,0 ; Get a constant 0
+ rlwinm r5,r10,0,MSR_PR_BIT,MSR_PR_BIT ; Are we trapping from supervisor state?
+ mr. r1,r1 ; Is it active?
+ cmplwi cr2,r5,0 ; cr2_eq == 1 if yes
+ mr r16,r13
lwz r14,ACT_MACT_PCB(r13) ; Now point to the PCB
-
-ihboot1: lwz r9,saver1+4(r4) ; Pick up the rupt time stack
+ lwz r9,saver1+4(r4) ; Pick up the rupt time stack
stw r14,SAVprev+4(r4) ; Queue the new save area in the front
stw r13,SAVact(r4) ; Point the savearea at its activation
- beq- cr1,ihboot4 ; We are still coming up...
stw r4,ACT_MACT_PCB(r13) ; Point to our savearea
+ beq cr2,ifromk
+ stw r4,ACT_MACT_UPCB(r13) ; Store user savearea
-ihboot4: bne .L_istackfree ; Nope...
+ifromk: bne .L_istackfree ; Nope...
/* We're already on the interrupt stack, get back the old
* stack pointer and make room for a frame
stwu r1,-FM_SIZE(r1) ; Make another new frame for C routine
#endif /* DEBUG */
- lwz r5,savedsisr(r4) ; Get the DSISR
- lwz r6,savedar+4(r4) ; Get the DAR
-
- bl EXT(interrupt)
-
+ mr r31,r3
+ mr r30,r4
+
+ lwz r3,SAVtime(r4)
+ lwz r4,SAVtime+4(r4)
+ addi r5,r25,PP_PROCESSOR
+ lwz r5,KERNEL_TIMER(r5)
+ bl EXT(thread_timer_event)
+ addi r6,r25,PP_PROCESSOR
+ lwz r5,CURRENT_STATE(r6)
+ addi r7,r6,USER_STATE
+ cmplw r5,r7
+ bne 0f
+ addi r5,r6,SYSTEM_STATE
+ bl EXT(state_event)
+0:
+
+ lwz r7,ACT_TASK(r13)
+ lwz r8,TASK_VTIMERS(r7)
+ cmpwi r8,0
+ beq++ 0f
+
+ lwz r7,ACT_PER_PROC(r13)
+ li r4,AST_BSD
+ lwz r8,PP_PENDING_AST(r7)
+ or r8,r8,r4
+ stw r8,PP_PENDING_AST(r7)
+ addi r3,r13,ACT_AST
+ bl EXT(hw_atomic_or)
+0:
+
+ mr r3,r31
+ mr r4,r30
+ lwz r5,savedsisr(r30) ; Get the DSISR
+ lwz r6,savedar+4(r30) ; Get the DAR
+
+#if FPFLOOD
+ stfd f31,emfp31(r25) ; (TEST/DEBUG)
+#endif
-/* interrupt() returns a pointer to the saved state in r3
- *
- * Ok, back from C. Disable interrupts while we restore things
- */
- .globl EXT(ihandler_ret)
+ bl EXT(interrupt)
-LEXT(ihandler_ret) ; Marks our return point from debugger entry
+/* interrupt() returns a pointer to the saved state in r3 */
lis r10,hi16(MASK(MSR_VEC)) ; Get the vector enable
mfmsr r0 ; Get our MSR
ori r10,r10,lo16(MASK(MSR_FP)|MASK(MSR_EE)) ; Add in FP and EE
andc r0,r0,r10 ; Turn off VEC, FP, and EE
mtmsr r0 ; Make sure interrupts are disabled
- mfsprg r10,0 ; Get the per_proc block
+ mfsprg r8,1 ; Get the current activation
+ lwz r10,ACT_PER_PROC(r8) ; Get the per_proc block
lwz r7,SAVflags(r3) ; Pick up the flags
- lwz r8,PP_ACTIVE_THREAD(r10) ; and the active thread
- lwz r9,SAVprev+4(r3) ; Get previous save area
+ lwz r9,SAVprev+4(r3) ; Get previous save area
cmplwi cr1,r8,0 ; Are we still initializing?
lwz r12,savesrr1+4(r3) ; Get the MSR we will load on return
- lwz r8,THREAD_TOP_ACT(r8) ; Pick up the active thread
andis. r11,r7,hi16(SAVrststk) ; Is this the first on the stack?
stw r9,ACT_MACT_PCB(r8) ; Point to previous context savearea
mr r4,r3 ; Move the savearea pointer
cmplwi r3, 0 ; Check for preemption
bne .L_no_int_ast ; Do not preempt if level is not zero
andi. r6,r12,MASK(MSR_PR) ; privilege mode
- lwz r11,PP_NEED_AST(r10) ; Get the AST request address
- lwz r11,0(r11) ; Get the request
+ lwz r11,PP_PENDING_AST(r10) ; Get the pending AST mask
beq- .L_kernel_int_ast ; In kernel space, AST_URGENT check
li r3,T_AST ; Assume the worst
mr. r11,r11 ; Are there any pending?
stw r19,FPUcpu(r20) ; Claim context for us
eieio ; Make sure this gets out before owner clear
-
-#if ppSize != 4096
-#error per_proc_info is not 4k in size
+
+#if ppeSize != 16
+#error per_proc_entry is not 16bytes in size
#endif
- lis r23,hi16(EXT(per_proc_info)) ; Set base per_proc
- slwi r22,r22,12 ; FInd offset to the owner per_proc
- ori r23,r23,lo16(EXT(per_proc_info)) ; Set base per_proc
- li r24,FPUowner ; Displacement to FPU owner
- add r22,r23,r22 ; Point to the owner per_proc
+ lis r23,hi16(EXT(PerProcTable)) ; Set base PerProcTable
+ slwi r22,r22,4 ; Find offset to the owner per_proc_entry
+ ori r23,r23,lo16(EXT(PerProcTable)) ; Set base PerProcTable
+ li r24,FPUowner ; Displacement to float owner
+ add r22,r23,r22 ; Point to the owner per_proc_entry
+ lwz r22,ppe_vaddr(r22) ; Point to the owner per_proc
fpuinvothr: lwarx r23,r24,r22 ; Get the owner
lwz r24,FPUsave(r26) ; (TEST/DEBUG) Get the first savearea
mr. r23,r23 ; (TEST/DEBUG) Should be level 0
beq++ fpulvl0 ; (TEST/DEBUG) Yes...
- BREAKPOINT_TRAP ; (TEST/DEBUG)
+
+ lis r0,hi16(Choke) ; (TEST/DEBUG) Choke code
+ ori r0,r0,lo16(Choke) ; (TEST/DEBUG) and the rest
+ sc ; (TEST/DEBUG) System ABEND
fpulvl0: mr. r24,r24 ; (TEST/DEBUG) Any context?
beq fpunusrstt ; (TEST/DEBUG) No...
lwz r21,SAVprev+4(r24) ; (TEST/DEBUG) Get previous pointer
mr. r23,r23 ; (TEST/DEBUG) Is this our user context?
beq++ fpulvl0b ; (TEST/DEBUG) Yes...
- BREAKPOINT_TRAP ; (TEST/DEBUG)
+
+ lis r0,hi16(Choke) ; (TEST/DEBUG) Choke code
+ ori r0,r0,lo16(Choke) ; (TEST/DEBUG) and the rest
+ sc ; (TEST/DEBUG) System ABEND
fpulvl0b: mr. r21,r21 ; (TEST/DEBUG) Is there a forward chain?
beq++ fpunusrstt ; (TEST/DEBUG) Nope...
- BREAKPOINT_TRAP ; (TEST/DEBUG)
+
+ lis r0,hi16(Choke) ; (TEST/DEBUG) Choke code
+ ori r0,r0,lo16(Choke) ; (TEST/DEBUG) and the rest
+ sc ; (TEST/DEBUG) System ABEND
fpunusrstt: ; (TEST/DEBUG)
#endif
beq++ fpuena ; Nope...
lwz r25,SAVlevel(r24) ; Get the level of savearea
lwz r0,SAVprev+4(r24) ; Get the previous
+
cmplw r30,r25 ; Is savearea for the level we are launching?
bne++ fpuena ; No, just go enable...
eieio ; Make sure this gets out before owner clear
- lis r23,hi16(EXT(per_proc_info)) ; Set base per_proc
- slwi r22,r22,12 ; Find offset to the owner per_proc
- ori r23,r23,lo16(EXT(per_proc_info)) ; Set base per_proc
- li r24,VMXowner ; Displacement to VMX owner
- add r22,r23,r22 ; Point to the owner per_proc
+ lis r23,hi16(EXT(PerProcTable)) ; Set base PerProcTable
+ slwi r22,r22,4 ; Find offset to the owner per_proc_entry
+ ori r23,r23,lo16(EXT(PerProcTable)) ; Set base PerProcTable
+ li r24,VMXowner ; Displacement to float owner
+ add r22,r23,r22 ; Point to the owner per_proc_entry
+ lwz r22,ppe_vaddr(r22) ; Point to the owner per_proc
vmxinvothr: lwarx r23,r24,r22 ; Get the owner
lwz r3,SACvrswap+4(r3) ; Get the virtual to real conversion (bottom)
stw r8,SAVprev(r22) ; Link the old in (top)
stw r9,SAVprev+4(r22) ; Link the old in (bottom)
- xor r3,r24,r3 ; Convert to physical
+ xor r3,r22,r3 ; Convert to physical
stw r2,quickfret(r31) ; Set the first in quickfret list (top)
stw r3,quickfret+4(r31) ; Set the first in quickfret list (bottom)
vmxena: oris r29,r29,hi16(MASK(MSR_VEC)) ; Enable facility
-setena: lwz r18,cioSpace(r28) ; Get the space ID in case we are launching user
+setena: lwz r18,umwSpace(r28) ; Get the space ID in case we are launching user
rlwinm. r0,r29,0,MSR_PR_BIT,MSR_PR_BIT ; Are we about to launch user state?
li r0,0 ; Get set to release quickfret holdoff
crmove cr7_eq,cr0_eq ; Remember if we are going to user state
rlwinm r20,r29,(((31-vectorCngbit)+(MSR_VEC_BIT+1))&31),vectorCngbit,vectorCngbit ; Set flag if we enabled vector
stw r29,savesrr1+4(r27) ; Turn facility on or off
stw r0,holdQFret(r31) ; Release quickfret
- oris r18,r18,hi16(cioSwitchAway) ; Set the switch-away bit in case we go to user
+ oris r18,r18,hi16(umwSwitchAway) ; Set the switch-away bit in case we go to user
beq setenaa ; Neither float nor vector turned on....
mtdec r13 ; Set our value
-chkifuser: beq-- cr7,chkenax ; Skip this if we are going to kernel...
- stw r18,cioSpace(r28) ; Half-invalidate to force MapUserAddressSpace to reload SRs
+chkifuser: bl EXT(mach_absolute_time)
+ lwz r5,ACT_PER_PROC(r28)
+ addi r6,r5,PP_PROCESSOR
+ lwz r5,KERNEL_TIMER(r6)
+ lwz r29,CURRENT_STATE(r6)
+ beq-- cr7,chkifuser1 ; Skip this if we are going to kernel...
+ stw r18,umwSpace(r28) ; Half-invalidate to force MapUserAddressWindow to reload SRs
+ addi r5,r28,USER_TIMER
+ addi r29,r6,USER_STATE
+
+chkifuser1: bl EXT(thread_timer_event)
+ mr r5,r29
+ bl EXT(state_event)
chkenax:
-
#if DEBUG
lwz r20,SAVact(r27) ; (TEST/DEBUG) Make sure our restore
- lwz r21,PP_ACTIVE_THREAD(r31) ; (TEST/DEBUG) with the current act.
+ mfsprg r21, 1 ; (TEST/DEBUG) with the current act.
cmpwi r21,0 ; (TEST/DEBUG)
- beq- yeswereok ; (TEST/DEBUG)
- lwz r21,THREAD_TOP_ACT(r21) ; (TEST/DEBUG)
+ beq-- yeswereok ; (TEST/DEBUG)
cmplw r21,r20 ; (TEST/DEBUG)
- beq+ yeswereok ; (TEST/DEBUG)
+ beq++ yeswereok ; (TEST/DEBUG)
lis r0,hi16(Choke) ; (TEST/DEBUG) Choke code
ori r0,r0,lo16(Choke) ; (TEST/DEBUG) and the rest
/*
* void cthread_set_self(cproc_t p)
*
- * set's thread state "user_value"
+ * Set's thread state "user_value". In practice this is the thread-local-data-pointer (TLDP),
+ * though we do not interpret it. This call is mostly used by 32-bit tasks, but we save all 64 bits
+ * in case a 64-bit task wants to use this facility. They normally do not, because the 64-bit
+ * ABI reserves r13 for the TLDP.
*
* This op is invoked as follows:
* li r0, CthreadSetSelfNumber // load the fast-trap number
* sc // invoke fast-trap
* blr
- *
*/
CthreadSetSelfNumber:
-
- lwz r5,saver3+4(r4) /* Retrieve the self number */
- stw r5,CTHREAD_SELF(r13) /* Remember it */
- stw r5,UAW(r25) /* Prime the per_proc_info with it */
+ lwz r3,saver3+0(r4) /* get the TLDP passed in r3 */
+ lwz r5,saver3+4(r4) /* (all 64 bits, in case this is a 64-bit task) */
+ stw r3,CTHREAD_SELF+0(r13) /* Remember it in the activation... */
+ stw r5,CTHREAD_SELF+4(r13)
+ stw r3,UAW+0(r25) /* ...and in the per-proc */
+ stw r5,UAW+4(r25)
.globl EXT(fastexit)
.globl EXT(retFromVM)
LEXT(retFromVM)
- mfsprg r10,0 ; Restore the per_proc info
+ mfsprg r10,1 ; Get the current activation
+ lwz r10,ACT_PER_PROC(r10) ; Get the per_proc block
mr r8,r3 ; Get the activation
lwz r4,SAVprev+4(r30) ; Pick up the previous savearea
mr r3,r30 ; Put savearea in proper register for common code
lwz r11,SAVflags(r30) ; Get the flags of the current savearea
rlwinm r11,r11,0,15,13 ; Clear the syscall flag
- lwz r1,ACT_THREAD(r8) ; and the active thread
+ mr r1,r8
stw r11,SAVflags(r3) ; Save back the flags (with reset stack cleared)
stw r4,ACT_MACT_PCB(r8) ; Point to the previous savearea (or 0 if none)
LEXT(chandler) ; Choke handler
li r31,0 ; Get a 0
- mfsprg r25,0 ; Get the per_proc
+ mfsprg r25,1 ; Get the current activation
+ lwz r25,ACT_PER_PROC(r25) ; Get the per_proc block
stw r31,traceMask(0) ; Force tracing off right now
lwz r23,0(r22) ; (TEST/DEBUG)
mr. r23,r23 ; (TEST/DEBUG)
beqlr- ; (TEST/DEBUG)
- mfsprg r20,0 ; (TEST/DEBUG)
+ mfsprg r20,1 ; Get the current activation
+ lwz r20,ACT_PER_PROC(r20) ; Get the per_proc block
lwz r21,pfAvailable(r20) ; (TEST/DEBUG)
mr. r21,r21 ; (TEST/DEBUG)
bnelr+ ; (TEST/DEBUG)
#endif
#if 0
+ ;; This code is broken and migration will make the matter even worse
;
; Make sure that all savearea chains have the right type on them
;