/*
- * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 2000-2010 Apple Inc. All rights reserved.
*
- * @APPLE_LICENSE_HEADER_START@
- *
- * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
* This file contains Original Code and/or Modifications of Original Code
* as defined in and that are subject to the Apple Public Source License
* Version 2.0 (the 'License'). You may not use this file except in
- * compliance with the License. Please obtain a copy of the License at
- * http://www.opensource.apple.com/apsl/ and read it before using this
- * file.
+ * compliance with the License. The rights granted to you under the License
+ * may not be used to create, or enable the creation or redistribution of,
+ * unlawful or unlicensed copies of an Apple operating system, or to
+ * circumvent, violate, or enable the circumvention or violation of, any
+ * terms of an Apple operating system software license agreement.
+ *
+ * Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this file.
*
* The Original Code and all software distributed under the License are
* distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
* Please see the License for the specific language governing rights and
* limitations under the License.
*
- * @APPLE_LICENSE_HEADER_END@
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
*/
/*
* @OSF_COPYRIGHT@
#include <mach/boolean.h>
#include <mach/i386/vm_types.h>
#include <mach/i386/fp_reg.h>
+#include <mach/thread_status.h>
#include <kern/lock.h>
#include <i386/seg.h>
#include <i386/tss.h>
#include <i386/eflags.h>
-#include <i386/thread_act.h>
+
+#include <i386/cpu_data.h>
+
+#include <machine/pal_routines.h>
/*
- * i386_exception_link:
+ * x86_kernel_state:
*
- * This structure lives at the high end of the kernel stack.
- * It points to the current thread`s user registers.
+ * This structure corresponds to the state of kernel registers
+ * as saved in a context-switch. It lives at the base of the stack.
+ */
+
+#ifdef __i386__
+struct x86_kernel_state {
+ uint32_t k_ebx; /* kernel context */
+ uint32_t k_esp;
+ uint32_t k_ebp;
+ uint32_t k_edi;
+ uint32_t k_esi;
+ uint32_t k_eip;
+ /*
+ * Kernel stacks are 16-byte aligned with x86_kernel_state at the top,
+ * so we need a couple of dummy 32-bit words here.
+ */
+ uint32_t dummy[2];
+};
+#else
+struct x86_kernel_state {
+ uint64_t k_rbx; /* kernel context */
+ uint64_t k_rsp;
+ uint64_t k_rbp;
+ uint64_t k_r12;
+ uint64_t k_r13;
+ uint64_t k_r14;
+ uint64_t k_r15;
+ uint64_t k_rip;
+};
+#endif
+
+/*
+ * Maps state flavor to number of words in the state:
*/
-struct i386_exception_link {
- struct i386_saved_state *saved_state;
+__private_extern__ unsigned int _MachineStateCount[];
+
+/*
+ * The machine-dependent thread state - registers and all platform-dependent
+ * state - is saved in the machine thread structure which is embedded in
+ * the thread data structure. For historical reasons this is also referred to
+ * as the PCB.
+ */
+struct machine_thread {
+ void *sf;
+ x86_saved_state_t *iss;
+ void *ifps;
+ void *ids;
+ decl_simple_lock_data(,lock); /* protects ifps and ids */
+ uint64_t iss_pte0;
+ uint64_t iss_pte1;
+ uint32_t arg_store_valid;
+#ifdef MACH_BSD
+ uint64_t cthread_self; /* for use of cthread package */
+ struct real_descriptor cthread_desc;
+ unsigned long uldt_selector; /* user ldt selector to set */
+ struct real_descriptor uldt_desc; /* actual user setable ldt */
+#endif
+
+ struct pal_pcb pal_pcb;
+
+ uint32_t specFlags;
+#define OnProc 0x1
+#define CopyIOActive 0x2 /* Checked to ensure DTrace actions do not re-enter copyio(). */
+
+#if NCOPY_WINDOWS > 0
+ struct {
+ user_addr_t user_base;
+ } copy_window[NCOPY_WINDOWS];
+ int nxt_window;
+ int copyio_state;
+#define WINDOWS_DIRTY 0
+#define WINDOWS_CLEAN 1
+#define WINDOWS_CLOSED 2
+#define WINDOWS_OPENED 3
+ uint64_t physwindow_pte;
+ int physwindow_busy;
+#endif
};
+typedef struct machine_thread *pcb_t;
+
+#define THREAD_TO_PCB(Thr) (&(Thr)->machine)
+
+#define USER_STATE(Thr) ((Thr)->machine.iss)
+#define USER_REGS32(Thr) (saved_state32(USER_STATE(Thr)))
+#define USER_REGS64(Thr) (saved_state64(USER_STATE(Thr)))
+
+#define user_pc(Thr) (is_saved_state32(USER_STATE(Thr)) ? \
+ USER_REGS32(Thr)->eip : \
+ USER_REGS64(Thr)->isf.rip )
+
+extern void *get_user_regs(thread_t);
+
+extern void *act_thread_csave(void);
+extern void act_thread_catt(void *ctx);
+extern void act_thread_cfree(void *ctx);
/*
* On the kernel stack is:
* stack: ...
- * struct i386_exception_link
- * struct i386_kernel_state
- * stack+KERNEL_STACK_SIZE
+ * struct x86_kernel_state
+ * stack+kernel_stack_size
*/
#define STACK_IKS(stack) \
- ((struct i386_kernel_state *)((stack) + KERNEL_STACK_SIZE) - 1)
-#define STACK_IEL(stack) \
- ((struct i386_exception_link *)STACK_IKS(stack) - 1)
-
-#if NCPUS > 1
-#include <i386/mp_desc.h>
-#endif
+ ((struct x86_kernel_state *)((stack) + kernel_stack_size) - 1)
/*
- * Boot-time data for master (or only) CPU
+ * Return the current stack depth including x86_kernel_state
*/
-extern struct fake_descriptor idt[IDTSZ];
-extern struct fake_descriptor gdt[GDTSZ];
-extern struct fake_descriptor ldt[LDTSZ];
-extern struct i386_tss ktss;
-#if MACH_KDB
-extern char db_stack_store[];
-extern char db_task_stack_store[];
-extern struct i386_tss dbtss;
-extern void db_task_start(void);
-#endif /* MACH_KDB */
-#if NCPUS > 1
-#define curr_gdt(mycpu) (mp_gdt[mycpu])
-#define curr_ktss(mycpu) (mp_ktss[mycpu])
+static inline vm_offset_t
+current_stack_depth(void)
+{
+ vm_offset_t stack_ptr;
+
+ assert(get_preemption_level() > 0 || !ml_get_interrupts_enabled());
+
+#if defined(__x86_64__)
+ __asm__ volatile("mov %%rsp, %0" : "=m" (stack_ptr));
#else
-#define curr_gdt(mycpu) (gdt)
-#define curr_ktss(mycpu) (&ktss)
+ __asm__ volatile("mov %%esp, %0" : "=m" (stack_ptr));
#endif
-
-#define gdt_desc_p(mycpu,sel) \
- ((struct real_descriptor *)&curr_gdt(mycpu)[sel_idx(sel)])
+ return (current_cpu_datap()->cpu_kernel_stack
+ + sizeof(struct x86_kernel_state)
+ - stack_ptr);
+}
/*
* Return address of the function that called current function, given
* address of the first parameter of current function.
*/
-#define GET_RETURN_PC(addr) (*((vm_offset_t *)addr - 1))
-
-/*
- * Defining this indicates that MD code will supply an exception()
- * routine, conformant with kern/exception.c (dependency alert!)
- * but which does wonderfully fast, machine-dependent magic.
- */
-#define MACHINE_FAST_EXCEPTION 1
+#define GET_RETURN_PC(addr) (__builtin_return_address(0))
#endif /* _I386_THREAD_H_ */