+static void configure_mxcsr_capability_mask(struct x86_fpsave_state *ifps);
+
+struct x86_fpsave_state starting_fp_state;
+
+
+/* Global MXCSR capability bitmask */
+static unsigned int mxcsr_capability_mask;
+
+/*
+ * Determine the MXCSR capability mask, which allows us to mask off any
+ * potentially unsafe "reserved" bits before restoring the FPU context.
+ * *Not* per-cpu, assumes symmetry.
+ */
+static void
+configure_mxcsr_capability_mask(struct x86_fpsave_state *ifps)
+{
+ /* FXSAVE requires a 16 byte aligned store */
+ assert(ALIGNED(ifps,16));
+ /* Clear, to prepare for the diagnostic FXSAVE */
+ bzero(ifps, sizeof(*ifps));
+ /* Disable FPU/SSE Device Not Available exceptions */
+ clear_ts();
+
+ __asm__ volatile("fxsave %0" : "=m" (ifps->fx_save_state));
+ mxcsr_capability_mask = ifps->fx_save_state.fx_MXCSR_MASK;
+
+ /* Set default mask value if necessary */
+ if (mxcsr_capability_mask == 0)
+ mxcsr_capability_mask = 0xffbf;
+
+ /* Re-enable FPU/SSE DNA exceptions */
+ set_ts();
+}
+
+/*
+ * Allocate and initialize FP state for current thread.
+ * Don't load state.
+ */
+static struct x86_fpsave_state *
+fp_state_alloc(void)
+{
+ struct x86_fpsave_state *ifps;
+
+ ifps = (struct x86_fpsave_state *)zalloc(ifps_zone);
+ assert(ALIGNED(ifps,16));
+ bzero((char *)ifps, sizeof *ifps);
+
+ return ifps;
+}
+
+static inline void
+fp_state_free(struct x86_fpsave_state *ifps)
+{
+ zfree(ifps_zone, ifps);
+}
+
+