/*
- * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
*
- * @APPLE_LICENSE_HEADER_START@
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
- * The contents of this file constitute Original Code as defined in and
- * are subject to the Apple Public Source License Version 1.1 (the
- * "License"). You may not use this file except in compliance with the
- * License. Please obtain a copy of the License at
- * http://www.apple.com/publicsource and read it before using this file.
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. The rights granted to you under the License
+ * may not be used to create, or enable the creation or redistribution of,
+ * unlawful or unlicensed copies of an Apple operating system, or to
+ * circumvent, violate, or enable the circumvention or violation of, any
+ * terms of an Apple operating system software license agreement.
*
- * This Original Code and all software distributed under the License are
- * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this file.
+ *
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
* EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
* INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
- * License for the specific language governing rights and limitations
- * under the License.
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
*
- * @APPLE_LICENSE_HEADER_END@
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
*/
/*
* @OSF_COPYRIGHT@
#define VERIFYSAVE 0
#define FPVECDBG 0
+#define FPFLOOD 0
#define INSTRUMENT 0
/*
#endif /* DEBUG */
mr r30,r4
- lwz r3,SAVtime+4(r4)
- addi r4,r13,SYSTEM_TIMER
- bl EXT(timer_event)
+ lwz r3,SAVtime(r4)
+ lwz r4,SAVtime+4(r4)
+ addi r5,r13,SYSTEM_TIMER
+ bl EXT(thread_timer_event)
+ addi r5,r25,SYSTEM_STATE
+ bl EXT(state_event)
+
+ lwz r7,ACT_TASK(r13)
+ lwz r8,TASK_VTIMERS(r7)
+ cmpwi r8,0
+ beq++ 0f
+
+ lwz r7,ACT_PER_PROC(r13)
+ li r4,AST_BSD
+ lwz r8,PP_PENDING_AST(r7)
+ or r8,r8,r4
+ stw r8,PP_PENDING_AST(r7)
+ addi r3,r13,ACT_AST
+ bl EXT(hw_atomic_or)
+0:
/* call trap handler proper, with
* ARG0 = type
.L_call_trap:
+#if FPFLOOD
+ stfd f31,emfp31(r25) ; (TEST/DEBUG)
+#endif
+
bl EXT(trap)
lis r10,hi16(MASK(MSR_VEC)) ; Get the vector enable
stw r0,ACT_MACT_KSP(r13) ; Mark stack as busy with 0 val
stw r15,FM_BACKPTR(r1) ; Link stack frame backwards
- lwz r3,SAVtime+4(r30)
- addi r4,r13,SYSTEM_TIMER
- bl EXT(timer_event)
+ lwz r3,SAVtime(r30)
+ lwz r4,SAVtime+4(r30)
+ addi r5,r13,SYSTEM_TIMER
+ bl EXT(thread_timer_event)
+ addi r5,r25,SYSTEM_STATE
+ bl EXT(state_event)
+
+ lwz r7,ACT_TASK(r13)
+ lwz r8,TASK_VTIMERS(r7)
+ cmpwi r8,0
+ beq++ 0f
+
+ lwz r7,ACT_PER_PROC(r13)
+ li r4,AST_BSD
+ lwz r8,PP_PENDING_AST(r7)
+ or r8,r8,r4
+ stw r8,PP_PENDING_AST(r7)
+ addi r3,r13,ACT_AST
+ bl EXT(hw_atomic_or)
+0:
#if DEBUG
/* If debugging, we need two frames, the first being a dummy
mr r4,r13 ; current activation
addi r7,r7,1 ; Bump it
stw r7,TASK_SYSCALLS_UNIX(r8) ; Save it
+
+#if FPFLOOD
+ stfd f31,emfp31(r25) ; (TEST/DEBUG)
+#endif
+
bl EXT(unix_syscall) ; Check out unix...
.L_call_server_syscall_exception:
; and the savearea/pcb as the first parameter.
; It is up to the callee to enable interruptions if
; they should be. We are in a state here where
-; both interrupts and preemption is ok, but because we could
+; both interrupts and preemption are ok, but because we could
; be calling diagnostic code we will not enable.
;
; Also, the callee is responsible for finding any parameters
*/
ksystrace:
- mr r4,r30 ; Pass in saved state
+ mr r4,r30 ; Pass in saved state
bl EXT(syscall_trace)
- cmplw r31,r29 ; Is this syscall in the table?
- add r31,r27,r28 ; Point right to the syscall table entry
+ cmplw r31,r29 ; Is this syscall in the table?
+ add r31,r27,r28 ; Point right to the syscall table entry
bge- .L_call_server_syscall_exception ; The syscall number is invalid
.L_ksystrace_munge:
cmplwi r0,0 ; do we have a munger to call?
- mtctr r0 ; Set the function call address
- addi r3,r30,saver3 ; Pointer to args from save area
- addi r4,r1,FM_ARG0+ARG_SIZE ; Pointer for munged args
+ mtctr r0 ; Set the function call address
+ addi r3,r30,saver3 ; Pointer to args from save area
+ addi r4,r1,FM_ARG0+ARG_SIZE ; Pointer for munged args
beq-- .L_ksystrace_trapcall ; just make the trap call
- bctrl ; Call the munge function
+ bctrl ; Call the munge function
.L_ksystrace_trapcall:
- lwz r0,MACH_TRAP_FUNCTION(r31) ; Pick up the function address
- mtctr r0 ; Set the function call address
- addi r3,r1,FM_ARG0+ARG_SIZE ; Pointer to munged args
+ lwz r0,MACH_TRAP_FUNCTION(r31) ; Pick up the function address
+ mtctr r0 ; Set the function call address
+ addi r3,r1,FM_ARG0+ARG_SIZE ; Pointer to munged args
bctrl
- mr r4,r30 ; Pass in the savearea
- bl EXT(syscall_trace_end) ; Trace the exit of the system call
+ mr r4,r30 ; Pass in the savearea
+ bl EXT(syscall_trace_end) ; Trace the exit of the system call
b .L_mach_return
; Call a function that can print out our syscall info
; Note that we don t care about any volatiles yet
;
- lwz r10,ACT_TASK(r13) ; Get our task
+ lwz r10,ACT_TASK(r13) ; Get our task
lwz r0,saver0+4(r30)
- lis r8,hi16(EXT(kdebug_enable)) ; Get top of kdebug_enable
+ lis r8,hi16(EXT(kdebug_enable)) ; Get top of kdebug_enable
lis r28,hi16(EXT(mach_trap_table)) ; Get address of table
ori r8,r8,lo16(EXT(kdebug_enable)) ; Get bottom of kdebug_enable
- lwz r8,0(r8) ; Get kdebug_enable
+ lwz r8,0(r8) ; Get kdebug_enable
lwz r7,TASK_SYSCALLS_MACH(r10) ; Get the current count
neg r31,r0 ; Make this positive
- mr r3,r31 ; save it
- slwi r27,r3,4 ; multiply by 16
- slwi r3,r3,2 ; and the original by 4
+ mr r3,r31 ; save it
+ slwi r27,r3,4 ; multiply by 16
+ slwi r3,r3,2 ; and the original by 4
ori r28,r28,lo16(EXT(mach_trap_table)) ; Get address of table
- add r27,r27,r3 ; for a total of 20x (5 words/entry)
- addi r7,r7,1 ; Bump TASK_SYSCALLS_MACH count
- cmplwi r8,0 ; Is kdebug_enable non-zero
- stw r7,TASK_SYSCALLS_MACH(r10) ; Save count
- bne-- ksystrace ; yes, tracing enabled
+ add r27,r27,r3 ; for a total of 20x (5 words/entry)
+ addi r7,r7,1 ; Bump TASK_SYSCALLS_MACH count
+ cmplwi r8,0 ; Is kdebug_enable non-zero
+ stw r7,TASK_SYSCALLS_MACH(r10) ; Save count
+ bne-- ksystrace ; yes, tracing enabled
- cmplwi r31,MACH_TRAP_TABLE_COUNT ; Is this syscall in the table?
- add r31,r27,r28 ; Point right to the syscall table entry
+ cmplwi r31,MACH_TRAP_TABLE_COUNT ; Is this syscall in the table?
+ add r31,r27,r28 ; Point right to the syscall table entry
bge-- .L_call_server_syscall_exception ; The syscall number is invalid
.L_kernel_syscall_munge:
cmplwi r0,0 ; test for null munger
- mtctr r0 ; Set the function call address
+ mtctr r0 ; Set the function call address
addi r3,r30,saver3 ; Pointer to args from save area
addi r4,r1,FM_ARG0+ARG_SIZE ; Pointer for munged args
beq-- .L_kernel_syscall_trapcall ; null munger - skip to trap call
bctrl ; Call the munge function
.L_kernel_syscall_trapcall:
- lwz r0,MACH_TRAP_FUNCTION(r31) ; Pick up the function address
- mtctr r0 ; Set the function call address
- addi r3,r1,FM_ARG0+ARG_SIZE ; Pointer to munged args
+ lwz r0,MACH_TRAP_FUNCTION(r31) ; Pick up the function address
+ mtctr r0 ; Set the function call address
+ addi r3,r1,FM_ARG0+ARG_SIZE ; Pointer to munged args
+
+#if FPFLOOD
+ stfd f31,emfp31(r25) ; (TEST/DEBUG)
+#endif
+
bctrl
*/
.L_mach_return:
- srawi r0,r3,31 ; properly extend the return code
- cmpi cr0,r3,KERN_INVALID_ARGUMENT ; deal with invalid system calls
- mr r31,r16 ; Move the current thread pointer
- stw r0, saver3(r30) ; stash the high part of the return code
- stw r3,saver3+4(r30) ; Stash the low part of the return code
- beq- cr0,.L_mach_invalid_ret ; otherwise fall through into the normal return path
+ srawi r0,r3,31 ; properly extend the return code
+ cmpi cr0,r3,KERN_INVALID_ARGUMENT ; deal with invalid system calls
+ mr r31,r16 ; Move the current thread pointer
+ stw r0, saver3(r30) ; stash the high part of the return code
+ stw r3,saver3+4(r30) ; Stash the low part of the return code
+ beq-- cr0,.L_mach_invalid_ret ; otherwise fall through into the normal return path
.L_mach_invalid_arg:
* we want to pass the error code back to the caller
*/
lwz r0,saver0+4(r30) ; reload the original syscall number
- neg r28,r0 ; Make this positive
- mr r4,r28 ; save a copy
- slwi r27,r4,4 ; multiply by 16
- slwi r4,r4,2 ; and another 4
+ neg r28,r0 ; Make this positive
+ mr r4,r28 ; save a copy
+ slwi r27,r4,4 ; multiply by 16
+ slwi r4,r4,2 ; and another 4
lis r28,hi16(EXT(mach_trap_table)) ; Get address of table
- add r27,r27,r4 ; for a total of 20x (5 words/entry)
+ add r27,r27,r4 ; for a total of 20x (5 words/entry)
ori r28,r28,lo16(EXT(mach_trap_table)) ; Get address of table
add r28,r27,r28 ; Point right to the syscall table entry
lwz r27,MACH_TRAP_FUNCTION(r28) ; Pick up the function address
mr r31,r3
mr r30,r4
- lwz r3,SAVtime+4(r4)
- addi r4,r13,SYSTEM_TIMER
- bl EXT(timer_event)
+ lwz r3,SAVtime(r4)
+ lwz r4,SAVtime+4(r4)
+ addi r5,r25,PP_PROCESSOR
+ lwz r5,KERNEL_TIMER(r5)
+ bl EXT(thread_timer_event)
+ addi r6,r25,PP_PROCESSOR
+ lwz r5,CURRENT_STATE(r6)
+ addi r7,r6,USER_STATE
+ cmplw r5,r7
+ bne 0f
+ addi r5,r6,SYSTEM_STATE
+ bl EXT(state_event)
+0:
+
+ lwz r7,ACT_TASK(r13)
+ lwz r8,TASK_VTIMERS(r7)
+ cmpwi r8,0
+ beq++ 0f
+
+ lwz r7,ACT_PER_PROC(r13)
+ li r4,AST_BSD
+ lwz r8,PP_PENDING_AST(r7)
+ or r8,r8,r4
+ stw r8,PP_PENDING_AST(r7)
+ addi r3,r13,ACT_AST
+ bl EXT(hw_atomic_or)
+0:
mr r3,r31
mr r4,r30
lwz r5,savedsisr(r30) ; Get the DSISR
lwz r6,savedar+4(r30) ; Get the DAR
-
- bl EXT(interrupt)
+#if FPFLOOD
+ stfd f31,emfp31(r25) ; (TEST/DEBUG)
+#endif
-/* interrupt() returns a pointer to the saved state in r3
- *
- * Ok, back from C. Disable interrupts while we restore things
- */
- .globl EXT(ihandler_ret)
+ bl EXT(interrupt)
-LEXT(ihandler_ret) ; Marks our return point from debugger entry
+/* interrupt() returns a pointer to the saved state in r3 */
lis r10,hi16(MASK(MSR_VEC)) ; Get the vector enable
mfmsr r0 ; Get our MSR
lwz r10,ACT_PER_PROC(r8) ; Get the per_proc block
lwz r7,SAVflags(r3) ; Pick up the flags
- lwz r9,SAVprev+4(r3) ; Get previous save area
+ lwz r9,SAVprev+4(r3) ; Get previous save area
cmplwi cr1,r8,0 ; Are we still initializing?
lwz r12,savesrr1+4(r3) ; Get the MSR we will load on return
andis. r11,r7,hi16(SAVrststk) ; Is this the first on the stack?
lwz r24,FPUsave(r26) ; (TEST/DEBUG) Get the first savearea
mr. r23,r23 ; (TEST/DEBUG) Should be level 0
beq++ fpulvl0 ; (TEST/DEBUG) Yes...
- BREAKPOINT_TRAP ; (TEST/DEBUG)
+
+ lis r0,hi16(Choke) ; (TEST/DEBUG) Choke code
+ ori r0,r0,lo16(Choke) ; (TEST/DEBUG) and the rest
+ sc ; (TEST/DEBUG) System ABEND
fpulvl0: mr. r24,r24 ; (TEST/DEBUG) Any context?
beq fpunusrstt ; (TEST/DEBUG) No...
lwz r21,SAVprev+4(r24) ; (TEST/DEBUG) Get previous pointer
mr. r23,r23 ; (TEST/DEBUG) Is this our user context?
beq++ fpulvl0b ; (TEST/DEBUG) Yes...
- BREAKPOINT_TRAP ; (TEST/DEBUG)
+
+ lis r0,hi16(Choke) ; (TEST/DEBUG) Choke code
+ ori r0,r0,lo16(Choke) ; (TEST/DEBUG) and the rest
+ sc ; (TEST/DEBUG) System ABEND
fpulvl0b: mr. r21,r21 ; (TEST/DEBUG) Is there a forward chain?
beq++ fpunusrstt ; (TEST/DEBUG) Nope...
- BREAKPOINT_TRAP ; (TEST/DEBUG)
+
+ lis r0,hi16(Choke) ; (TEST/DEBUG) Choke code
+ ori r0,r0,lo16(Choke) ; (TEST/DEBUG) and the rest
+ sc ; (TEST/DEBUG) System ABEND
fpunusrstt: ; (TEST/DEBUG)
#endif
beq++ fpuena ; Nope...
lwz r25,SAVlevel(r24) ; Get the level of savearea
lwz r0,SAVprev+4(r24) ; Get the previous
+
cmplw r30,r25 ; Is savearea for the level we are launching?
bne++ fpuena ; No, just go enable...
mtdec r13 ; Set our value
-chkifuser: addi r4,r28,SYSTEM_TIMER
- mftb r3
+chkifuser: bl EXT(mach_absolute_time)
+ lwz r5,ACT_PER_PROC(r28)
+ addi r6,r5,PP_PROCESSOR
+ lwz r5,KERNEL_TIMER(r6)
+ lwz r29,CURRENT_STATE(r6)
beq-- cr7,chkifuser1 ; Skip this if we are going to kernel...
stw r18,umwSpace(r28) ; Half-invalidate to force MapUserAddressWindow to reload SRs
- addi r4,r28,USER_TIMER
+ addi r5,r28,USER_TIMER
+ addi r29,r6,USER_STATE
-chkifuser1: bl EXT(timer_event)
+chkifuser1: bl EXT(thread_timer_event)
+ mr r5,r29
+ bl EXT(state_event)
chkenax: