2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
32 * Mach Operating System
33 * Copyright (c) 1992-1990 Carnegie Mellon University
34 * All Rights Reserved.
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
46 * Carnegie Mellon requests users of this software to return to
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
59 #include <platforms.h>
61 #include <mach/exception_types.h>
62 #include <mach/i386/thread_status.h>
63 #include <mach/i386/fp_reg.h>
65 #include <kern/mach_param.h>
66 #include <kern/processor.h>
67 #include <kern/thread.h>
68 #include <kern/zalloc.h>
69 #include <kern/misc_protos.h>
71 #include <kern/assert.h>
73 #include <i386/thread.h>
75 #include <i386/trap.h>
76 #include <architecture/i386/pio.h>
77 #include <i386/cpuid.h>
78 #include <i386/misc_protos.h>
79 #include <i386/proc_reg.h>
81 int fp_kind
= FP_NO
; /* not inited */
82 zone_t ifps_zone
; /* zone for FPU save area */
84 #define ALIGNED(addr,size) (((unsigned)(addr)&((size)-1))==0)
88 extern void fpinit(void);
94 static void configure_mxcsr_capability_mask(struct x86_fpsave_state
*ifps
);
96 struct x86_fpsave_state starting_fp_state
;
99 /* Global MXCSR capability bitmask */
100 static unsigned int mxcsr_capability_mask
;
103 * Determine the MXCSR capability mask, which allows us to mask off any
104 * potentially unsafe "reserved" bits before restoring the FPU context.
105 * *Not* per-cpu, assumes symmetry.
108 configure_mxcsr_capability_mask(struct x86_fpsave_state
*ifps
)
110 /* FXSAVE requires a 16 byte aligned store */
111 assert(ALIGNED(ifps
,16));
112 /* Clear, to prepare for the diagnostic FXSAVE */
113 bzero(ifps
, sizeof(*ifps
));
114 /* Disable FPU/SSE Device Not Available exceptions */
117 __asm__
volatile("fxsave %0" : "=m" (ifps
->fx_save_state
));
118 mxcsr_capability_mask
= ifps
->fx_save_state
.fx_MXCSR_MASK
;
120 /* Set default mask value if necessary */
121 if (mxcsr_capability_mask
== 0)
122 mxcsr_capability_mask
= 0xffbf;
124 /* Re-enable FPU/SSE DNA exceptions */
129 * Allocate and initialize FP state for current thread.
132 static struct x86_fpsave_state
*
135 struct x86_fpsave_state
*ifps
;
137 ifps
= (struct x86_fpsave_state
*)zalloc(ifps_zone
);
138 assert(ALIGNED(ifps
,16));
139 bzero((char *)ifps
, sizeof *ifps
);
145 fp_state_free(struct x86_fpsave_state
*ifps
)
147 zfree(ifps_zone
, ifps
);
152 * Look for FPU and initialize it.
153 * Called on each CPU.
158 unsigned short status
, control
;
161 * Check for FPU by initializing it,
162 * then trying to read the correct bit patterns from
163 * the control and status registers.
165 set_cr0((get_cr0() & ~(CR0_EM
|CR0_TS
)) | CR0_NE
); /* allow use of FPU */
171 if ((status
& 0xff) == 0 &&
172 (control
& 0x103f) == 0x3f)
174 /* Use FPU save/restore instructions if available */
175 if (cpuid_features() & CPUID_FEATURE_FXSR
) {
177 set_cr4(get_cr4() | CR4_FXS
);
178 printf("Enabling XMM register save/restore");
179 /* And allow SIMD instructions if present */
180 if (cpuid_features() & CPUID_FEATURE_SSE
) {
181 printf(" and SSE/SSE2");
182 set_cr4(get_cr4() | CR4_XMM
);
184 printf(" opcodes\n");
186 panic("fpu is not FP_FXSR");
189 * initialze FPU to normal starting
190 * position so that we can take a snapshot
191 * of that state and store it for future use
192 * when we're asked for the FPU state of a
193 * thread, and it hasn't initiated any yet
196 fxsave(&starting_fp_state
.fx_save_state
);
199 * Trap wait instructions. Turn off FPU for now.
201 set_cr0(get_cr0() | CR0_TS
| CR0_MP
);
208 panic("fpu is not FP_FXSR");
213 * Initialize FP handling.
216 fpu_module_init(void)
218 struct x86_fpsave_state
*new_ifps
;
220 ifps_zone
= zinit(sizeof(struct x86_fpsave_state
),
221 THREAD_MAX
* sizeof(struct x86_fpsave_state
),
222 THREAD_CHUNK
* sizeof(struct x86_fpsave_state
),
224 new_ifps
= fp_state_alloc();
225 /* Determine MXCSR reserved bits */
226 configure_mxcsr_capability_mask(new_ifps
);
227 fp_state_free(new_ifps
);
231 * Free a FPU save area.
232 * Called only when thread terminating - no locking necessary.
236 struct x86_fpsave_state
*fps
;
242 * Set the floating-point state for a thread based
243 * on the FXSave formatted data. This is basically
244 * the same as fpu_set_state except it uses the
245 * expanded data structure.
246 * If the thread is not the current thread, it is
247 * not running (held). Locking needed against
248 * concurrent fpu_set_state or fpu_get_state.
253 thread_state_t tstate
)
255 struct x86_fpsave_state
*ifps
;
256 struct x86_fpsave_state
*new_ifps
;
257 x86_float_state64_t
*state
;
260 if (fp_kind
== FP_NO
)
263 state
= (x86_float_state64_t
*)tstate
;
265 assert(thr_act
!= THREAD_NULL
);
266 pcb
= thr_act
->machine
.pcb
;
270 * new FPU state is 'invalid'.
271 * Deallocate the fp state if it exists.
273 simple_lock(&pcb
->lock
);
278 simple_unlock(&pcb
->lock
);
284 * Valid state. Allocate the fp state if there is none.
288 simple_lock(&pcb
->lock
);
293 simple_unlock(&pcb
->lock
);
294 new_ifps
= fp_state_alloc();
302 * now copy over the new data.
304 bcopy((char *)&state
->fpu_fcw
,
305 (char *)&ifps
->fx_save_state
, sizeof(struct x86_fx_save
));
307 /* XXX The layout of the state set from user-space may need to be
308 * validated for consistency.
310 ifps
->fp_save_layout
= thread_is_64bit(thr_act
) ? FXSAVE64
: FXSAVE32
;
312 * Clear any reserved bits in the MXCSR to prevent a GPF
313 * when issuing an FXRSTOR.
315 ifps
->fx_save_state
.fx_MXCSR
&= mxcsr_capability_mask
;
317 simple_unlock(&pcb
->lock
);
320 fp_state_free(new_ifps
);
326 * Get the floating-point state for a thread.
327 * If the thread is not the current thread, it is
328 * not running (held). Locking needed against
329 * concurrent fpu_set_state or fpu_get_state.
334 thread_state_t tstate
)
336 struct x86_fpsave_state
*ifps
;
337 x86_float_state64_t
*state
;
338 kern_return_t ret
= KERN_FAILURE
;
341 if (fp_kind
== FP_NO
)
344 state
= (x86_float_state64_t
*)tstate
;
346 assert(thr_act
!= THREAD_NULL
);
347 pcb
= thr_act
->machine
.pcb
;
349 simple_lock(&pcb
->lock
);
354 * No valid floating-point state.
356 bcopy((char *)&starting_fp_state
.fx_save_state
,
357 (char *)&state
->fpu_fcw
, sizeof(struct x86_fx_save
));
359 simple_unlock(&pcb
->lock
);
364 * Make sure we`ve got the latest fp state info
365 * If the live fpu state belongs to our target
367 if (thr_act
== current_thread())
371 intr
= ml_set_interrupts_enabled(FALSE
);
377 (void)ml_set_interrupts_enabled(intr
);
379 if (ifps
->fp_valid
) {
380 bcopy((char *)&ifps
->fx_save_state
,
381 (char *)&state
->fpu_fcw
, sizeof(struct x86_fx_save
));
384 simple_unlock(&pcb
->lock
);
391 * the child thread is 'stopped' with the thread
392 * mutex held and is currently not known by anyone
393 * so no way for fpu state to get manipulated by an
394 * outside agency -> no need for pcb lock
402 struct x86_fpsave_state
*new_ifps
= NULL
;
406 ppcb
= parent
->machine
.pcb
;
408 if (ppcb
->ifps
== NULL
)
411 if (child
->machine
.pcb
->ifps
)
412 panic("fpu_dup_fxstate: child's ifps non-null");
414 new_ifps
= fp_state_alloc();
416 simple_lock(&ppcb
->lock
);
418 if (ppcb
->ifps
!= NULL
) {
420 * Make sure we`ve got the latest fp state info
422 intr
= ml_set_interrupts_enabled(FALSE
);
428 (void)ml_set_interrupts_enabled(intr
);
430 if (ppcb
->ifps
->fp_valid
) {
431 child
->machine
.pcb
->ifps
= new_ifps
;
433 bcopy((char *)&(ppcb
->ifps
->fx_save_state
),
434 (char *)&(child
->machine
.pcb
->ifps
->fx_save_state
), sizeof(struct x86_fx_save
));
436 new_ifps
->fp_save_layout
= ppcb
->ifps
->fp_save_layout
;
438 * Clear any reserved bits in the MXCSR to prevent a GPF
439 * when issuing an FXRSTOR.
441 new_ifps
->fx_save_state
.fx_MXCSR
&= mxcsr_capability_mask
;
445 simple_unlock(&ppcb
->lock
);
447 if (new_ifps
!= NULL
)
448 fp_state_free(new_ifps
);
459 unsigned short control
;
464 control
&= ~(FPC_PC
|FPC_RC
); /* Clear precision & rounding control */
465 control
|= (FPC_PC_64
| /* Set precision */
466 FPC_RC_RN
| /* round-to-nearest */
467 FPC_ZE
| /* Suppress zero-divide */
468 FPC_OE
| /* and overflow */
469 FPC_UE
| /* underflow */
470 FPC_IE
| /* Allow NaNQs and +-INF */
471 FPC_DE
| /* Allow denorms as operands */
472 FPC_PE
); /* No trap for precision loss */
475 /* Initialize SSE/SSE2 */
476 __builtin_ia32_ldmxcsr(0x1f80);
480 * Coprocessor not present.
488 intr
= ml_set_interrupts_enabled(FALSE
);
490 clear_ts(); /* Enable FPU use */
492 if (get_interrupt_level()) {
494 * Save current coprocessor context if valid
495 * Initialize coprocessor live context
497 fp_save(current_thread());
501 * Load this thread`s state into coprocessor live context.
503 fp_load(current_thread());
506 (void)ml_set_interrupts_enabled(intr
);
510 * FPU overran end of segment.
511 * Re-initialize FPU. Floating point state is not valid.
517 thread_t thr_act
= current_thread();
519 struct x86_fpsave_state
*ifps
;
522 intr
= ml_set_interrupts_enabled(FALSE
);
524 if (get_interrupt_level())
525 panic("FPU segment overrun exception at interrupt context\n");
526 if (current_task() == kernel_task
)
527 panic("FPU segment overrun exception in kernel thread context\n");
530 * This is a non-recoverable error.
531 * Invalidate the thread`s FPU state.
533 pcb
= thr_act
->machine
.pcb
;
534 simple_lock(&pcb
->lock
);
537 simple_unlock(&pcb
->lock
);
540 * Re-initialize the FPU.
546 * And disable access.
550 (void)ml_set_interrupts_enabled(intr
);
553 zfree(ifps_zone
, ifps
);
558 i386_exception(EXC_BAD_ACCESS
, VM_PROT_READ
|VM_PROT_EXECUTE
, 0);
563 * FPU error. Called by AST.
569 thread_t thr_act
= current_thread();
570 struct x86_fpsave_state
*ifps
= thr_act
->machine
.pcb
->ifps
;
573 intr
= ml_set_interrupts_enabled(FALSE
);
575 if (get_interrupt_level())
576 panic("FPU error exception at interrupt context\n");
577 if (current_task() == kernel_task
)
578 panic("FPU error exception in kernel thread context\n");
581 * Save the FPU state and turn off the FPU.
585 (void)ml_set_interrupts_enabled(intr
);
588 * Raise FPU exception.
589 * Locking not needed on pcb->ifps,
590 * since thread is running.
592 i386_exception(EXC_ARITHMETIC
,
594 ifps
->fx_save_state
.fx_status
);
602 * Locking not needed:
603 * . if called from fpu_get_state, pcb already locked.
604 * . if called from fpnoextflt or fp_intr, we are single-cpu
605 * . otherwise, thread is running.
606 * N.B.: Must be called with interrupts disabled
613 pcb_t pcb
= thr_act
->machine
.pcb
;
614 struct x86_fpsave_state
*ifps
= pcb
->ifps
;
616 if (ifps
!= 0 && !ifps
->fp_valid
) {
617 assert((get_cr0() & CR0_TS
) == 0);
618 /* registers are in FPU */
619 ifps
->fp_valid
= TRUE
;
621 if (!thread_is_64bit(thr_act
)) {
622 /* save the compatibility/legacy mode XMM+x87 state */
623 fxsave(&ifps
->fx_save_state
);
624 ifps
->fp_save_layout
= FXSAVE32
;
627 fxsave64(&ifps
->fx_save_state
);
628 ifps
->fp_save_layout
= FXSAVE64
;
634 * Restore FPU state from PCB.
636 * Locking not needed; always called on the current thread.
643 pcb_t pcb
= thr_act
->machine
.pcb
;
644 struct x86_fpsave_state
*ifps
;
647 if (ifps
== 0 || ifps
->fp_valid
== FALSE
) {
649 /* FIXME: This allocation mechanism should be revised
650 * for scenarios where interrupts are disabled.
652 ifps
= fp_state_alloc();
657 assert(ifps
->fp_save_layout
== FXSAVE32
|| ifps
->fp_save_layout
== FXSAVE64
);
658 if (ifps
->fp_save_layout
== FXSAVE32
) {
659 /* Restore the compatibility/legacy mode XMM+x87 state */
660 fxrstor(&ifps
->fx_save_state
);
662 else if (ifps
->fp_save_layout
== FXSAVE64
) {
663 fxrstor64(&ifps
->fx_save_state
);
666 ifps
->fp_valid
= FALSE
; /* in FPU */
673 * Flush the current act's state, if needed
674 * (used by thread_terminate_self to ensure fp faults
675 * aren't satisfied by overly general trap code in the
676 * context of the reaper thread)
679 fpflush(__unused thread_t thr_act
)
681 /* not needed on MP x86s; fp not lazily evaluated */
685 * SSE arithmetic exception handling code.
686 * Basically the same as the x87 exception handler with a different subtype
692 thread_t thr_act
= current_thread();
693 struct x86_fpsave_state
*ifps
= thr_act
->machine
.pcb
->ifps
;
696 intr
= ml_set_interrupts_enabled(FALSE
);
698 if (get_interrupt_level())
699 panic("SSE exception at interrupt context\n");
700 if (current_task() == kernel_task
)
701 panic("SSE exception in kernel thread context\n");
704 * Save the FPU state and turn off the FPU.
708 (void)ml_set_interrupts_enabled(intr
);
710 * Raise FPU exception.
711 * Locking not needed on pcb->ifps,
712 * since thread is running.
714 assert(ifps
->fp_save_layout
== FXSAVE32
|| ifps
->fp_save_layout
== FXSAVE64
);
715 i386_exception(EXC_ARITHMETIC
,
717 ifps
->fx_save_state
.fx_status
);
723 fp_setvalid(boolean_t value
) {
724 thread_t thr_act
= current_thread();
725 struct x86_fpsave_state
*ifps
= thr_act
->machine
.pcb
->ifps
;
728 ifps
->fp_valid
= value
;