]> git.saurik.com Git - apple/xnu.git/blobdiff - osfmk/i386/thread.h
xnu-2782.1.97.tar.gz
[apple/xnu.git] / osfmk / i386 / thread.h
index 85a82e8804aa8fdfa5726ea826e02d9fb5990b85..b75e36a990027b95528ea0c4b2170db3f2a71201 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 2000-2012 Apple Inc. All rights reserved.
  *
  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
  * 
 #include <mach/i386/fp_reg.h>
 #include <mach/thread_status.h>
 
-#include <kern/lock.h>
+#include <kern/simple_lock.h>
 
 #include <i386/iopb.h>
 #include <i386/seg.h>
 #include <i386/tss.h>
 #include <i386/eflags.h>
 
-/*
- *     i386_saved_state:
- *
- *     Has been exported to servers.  See: mach/i386/thread_status.h
- *
- *     This structure corresponds to the state of user registers
- *     as saved upon kernel entry.  It lives in the pcb.
- *     It is also pushed onto the stack for exceptions in the kernel.
- *     For performance, it is also used directly in syscall exceptions
- *     if the server has requested i386_THREAD_STATE flavor for the exception
- *     port.
- */
-
-/*
- *     Save area for user floating-point state.
- *     Allocated only when necessary.
- */
-
-struct x86_fpsave_state {
-       boolean_t               fp_valid;
-       enum {
-               FXSAVE32 = 1,
-               FXSAVE64 = 2
-       } fp_save_layout;
-        struct x86_fx_save     fx_save_state __attribute__ ((aligned (16)));
-};
+#include <i386/cpu_data.h>
 
+#include <machine/pal_routines.h>
 
 /*
- *     x86_kernel_state32:
+ *     x86_kernel_state:
  *
  *     This structure corresponds to the state of kernel registers
  *     as saved in a context-switch.  It lives at the base of the stack.
- *      kernel only runs in 32 bit mode for now
  */
 
-struct x86_kernel_state32 {
-       int                     k_ebx;  /* kernel context */
-       int                     k_esp;
-       int                     k_ebp;
-       int                     k_edi;
-       int                     k_esi;
-       int                     k_eip;
-       /*
-        * Kernel stacks are 16-byte aligned with a 4-byte i386_exception_link at
-        * the top, followed by an x86_kernel_state32.  After both structs have
-        * been pushed, we want to be 16-byte aligned.  A dummy int gets us there.
-        */
-       int                     dummy;
+struct x86_kernel_state {
+       uint64_t        k_rbx;  /* kernel context */
+       uint64_t        k_rsp;
+       uint64_t        k_rbp;
+       uint64_t        k_r12;
+       uint64_t        k_r13;
+       uint64_t        k_r14;
+       uint64_t        k_r15;
+       uint64_t        k_rip;
 };
 
-
-typedef struct pcb {
-       void                    *sf;
-       x86_saved_state_t       *iss;
-       struct x86_fpsave_state *ifps;
-#ifdef MACH_BSD
-       uint64_t        cthread_self;           /* for use of cthread package */
-        struct real_descriptor cthread_desc;
-       unsigned long  uldt_selector;          /* user ldt selector to set */
-       struct real_descriptor uldt_desc;      /* the actual user setable ldt data */
-#endif
-       decl_simple_lock_data(,lock);
-       uint64_t        iss_pte0;
-       uint64_t        iss_pte1;
-       void            *ids;
-       uint32_t        arg_store_valid;
-} *pcb_t;
-
-
 /*
  * Maps state flavor to number of words in the state:
  */
-__private_extern__ unsigned int _MachineStateCount[];
+extern unsigned int _MachineStateCount[];
 
-#define USER_STATE(ThrAct)     ((ThrAct)->machine.pcb->iss)
-#define USER_REGS32(ThrAct)    (saved_state32(USER_STATE(ThrAct)))
-#define USER_REGS64(ThrAct)    (saved_state64(USER_STATE(ThrAct)))
-
-#define        user_pc(ThrAct)         (is_saved_state32(USER_STATE(ThrAct)) ? \
-                                       USER_REGS32(ThrAct)->eip :      \
-                                       USER_REGS64(ThrAct)->isf.rip )
+/*
+ * The machine-dependent thread state - registers and all platform-dependent
+ * state - is saved in the machine thread structure which is embedded in
+ * the thread data structure. For historical reasons this is also referred to
+ * as the PCB.
+ */
+struct machine_thread {
+       x86_saved_state_t       *iss;
+       void                    *ifps;
+       void                    *ids;
+       decl_simple_lock_data(,lock);           /* protects ifps and ids */
+       uint64_t                iss_pte0;
+       uint64_t                iss_pte1;
 
+#ifdef MACH_BSD
+       uint64_t                cthread_self;   /* for use of cthread package */
+        struct real_descriptor cthread_desc;
+       unsigned long           uldt_selector;  /* user ldt selector to set */
+       struct real_descriptor  uldt_desc;      /* actual user setable ldt */
+#endif
 
-struct machine_thread {
-       /*
-        * pointer to process control block
-        *      (actual storage may as well be here, too)
-        */
-       struct pcb xxx_pcb;
-       pcb_t pcb;
-
-       uint32_t        specFlags;
-#define                OnProc  0x1
-#if CONFIG_DTRACE
-#define                CopyIOActive 0x2 /* Checked to ensure DTrace actions do not re-enter copyio(). */
-#endif /* CONFIG_DTRACE */
-  
+       struct pal_pcb          pal_pcb;
+       uint32_t                specFlags;
+#define                OnProc          0x1
+#define                CopyIOActive    0x2 /* Checked to ensure DTrace actions do not re-enter copyio(). */
+       uint64_t                thread_gpu_ns;
+#if NCOPY_WINDOWS > 0
         struct {
                user_addr_t     user_base;
        } copy_window[NCOPY_WINDOWS];
-        int            nxt_window;
-        int            copyio_state;
+        int                    nxt_window;
+        int                    copyio_state;
 #define                WINDOWS_DIRTY   0
 #define                WINDOWS_CLEAN   1
 #define                WINDOWS_CLOSED  2
 #define                WINDOWS_OPENED  3
-        uint64_t       physwindow_pte;
-        int            physwindow_busy;
+        uint64_t               physwindow_pte;
+        int                    physwindow_busy;
+#endif
 };
+typedef struct machine_thread *pcb_t;
 
+#define        THREAD_TO_PCB(Thr)      (&(Thr)->machine)
+
+#define USER_STATE(Thr)                ((Thr)->machine.iss)
+#define USER_REGS32(Thr)       (saved_state32(USER_STATE(Thr)))
+#define USER_REGS64(Thr)       (saved_state64(USER_STATE(Thr)))
+
+#define        user_pc(Thr)            (is_saved_state32(USER_STATE(Thr)) ?    \
+                                       USER_REGS32(Thr)->eip :         \
+                                       USER_REGS64(Thr)->isf.rip )
 
 extern void *get_user_regs(thread_t);
 
@@ -196,41 +163,41 @@ extern void *act_thread_csave(void);
 extern void act_thread_catt(void *ctx);
 extern void act_thread_cfree(void *ctx);
 
-/*
- *     i386_exception_link:
- *
- *     This structure lives at the high end of the kernel stack.
- *     It points to the current thread`s user registers.
- */
-struct i386_exception_link {
-       x86_saved_state_t       *saved_state;
-};
-
 
 /*
  *     On the kernel stack is:
  *     stack:  ...
- *             struct i386_exception_link
- *             struct i386_kernel_state
- *     stack+KERNEL_STACK_SIZE
+ *             struct x86_kernel_state
+ *     stack+kernel_stack_size
  */
 
 #define STACK_IKS(stack)       \
-       ((struct x86_kernel_state32 *)((stack) + KERNEL_STACK_SIZE) - 1)
-#define STACK_IEL(stack)       \
-       ((struct i386_exception_link *)STACK_IKS(stack) - 1)
+       ((struct x86_kernel_state *)((stack) + kernel_stack_size) - 1)
 
 /*
- * Return address of the function that called current function, given
- *     address of the first parameter of current function.
+ * Return the current stack depth including x86_kernel_state
  */
-#define        GET_RETURN_PC(addr)     (*((vm_offset_t *)addr - 1))
+static inline vm_offset_t
+current_stack_depth(void)
+{
+       vm_offset_t     stack_ptr;
+
+       assert(get_preemption_level() > 0 || !ml_get_interrupts_enabled());
+
+#if defined(__x86_64__)
+       __asm__ volatile("mov %%rsp, %0" : "=m" (stack_ptr));
+#else
+       __asm__ volatile("mov %%esp, %0" : "=m" (stack_ptr));
+#endif
+       return (current_cpu_datap()->cpu_kernel_stack
+               + sizeof(struct x86_kernel_state)
+               - stack_ptr); 
+}
 
 /*
- * Defining this indicates that MD code will supply an exception()
- * routine, conformant with kern/exception.c (dependency alert!)
- * but which does wonderfully fast, machine-dependent magic.
+ * Return address of the function that called current function, given
+ *     address of the first parameter of current function.
  */
-#define MACHINE_FAST_EXCEPTION 1
+#define        GET_RETURN_PC(addr)     (__builtin_return_address(0))
 
 #endif /* _I386_THREAD_H_ */