]> git.saurik.com Git - apple/xnu.git/blob - osfmk/i386/cpu_data.h
xnu-517.3.7.tar.gz
[apple/xnu.git] / osfmk / i386 / cpu_data.h
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
7 *
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * file.
14 *
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
22 *
23 * @APPLE_LICENSE_HEADER_END@
24 */
25 /*
26 * @OSF_COPYRIGHT@
27 *
28 */
29
30 #ifndef I386_CPU_DATA
31 #define I386_CPU_DATA
32
33 #include <cpus.h>
34 #include <mach_assert.h>
35
36 #if defined(__GNUC__)
37
38 #include <kern/assert.h>
39 #include <kern/kern_types.h>
40 #include <pexpert/pexpert.h>
41
42 typedef struct
43 {
44 thread_act_t *active_thread;
45 int preemption_level;
46 int simple_lock_count;
47 int interrupt_level;
48 int cpu_number; /* Logical CPU number */
49 int cpu_phys_number; /* Physical CPU Number */
50 cpu_id_t cpu_id; /* Platform Expert handle */
51 int cpu_status; /* Boot Status */
52 int cpu_signals; /* IPI events */
53 int mcount_off; /* mcount recursion flag */
54 } cpu_data_t;
55
56 extern cpu_data_t cpu_data[NCPUS];
57
58 /* Macro to generate inline bodies to retrieve per-cpu data fields. */
59 #define offsetof(TYPE,MEMBER) ((size_t) &((TYPE *)0)->MEMBER)
60 #define CPU_DATA_GET(field,type) \
61 type ret; \
62 __asm__ volatile ("movl %%gs:%P1,%0" \
63 : "=r" (ret) \
64 : "i" (offsetof(cpu_data_t,field))); \
65 return ret;
66
67 /*
68 * Everyone within the osfmk part of the kernel can use the fast
69 * inline versions of these routines. Everyone outside, must call
70 * the real thing,
71 */
72 extern thread_act_t __inline__ get_active_thread(void)
73 {
74 CPU_DATA_GET(active_thread,thread_act_t)
75 }
76 #define current_act_fast() get_active_thread()
77 #define current_act() current_act_fast()
78 #define current_thread() current_act_fast()->thread
79
80 extern int __inline__ get_preemption_level(void)
81 {
82 CPU_DATA_GET(preemption_level,int)
83 }
84 extern int __inline__ get_simple_lock_count(void)
85 {
86 CPU_DATA_GET(simple_lock_count,int)
87 }
88 extern int __inline__ get_interrupt_level(void)
89 {
90 CPU_DATA_GET(interrupt_level,int)
91 }
92 extern int __inline__ get_cpu_number(void)
93 {
94 CPU_DATA_GET(cpu_number,int)
95 }
96 extern int __inline__ get_cpu_phys_number(void)
97 {
98 CPU_DATA_GET(cpu_phys_number,int)
99 }
100
101 extern void __inline__ disable_preemption(void)
102 {
103 register int idx = (int)&((cpu_data_t *)0)->preemption_level;
104
105 __asm__ volatile (" incl %%gs:(%0)" : : "r" (idx));
106 }
107
108 extern void __inline__ enable_preemption(void)
109 {
110 extern void kernel_preempt_check (void);
111 register int idx = (int)&((cpu_data_t *)0)->preemption_level;
112 register void (*kpc)(void)= kernel_preempt_check;
113
114 assert(get_preemption_level() > 0);
115
116 __asm__ volatile ("decl %%gs:(%0); jne 1f; \
117 call %1; 1:"
118 : /* no outputs */
119 : "r" (idx), "r" (kpc)
120 : "%eax", "%ecx", "%edx", "cc", "memory");
121 }
122
123 extern void __inline__ enable_preemption_no_check(void)
124 {
125 register int idx = (int)&((cpu_data_t *)0)->preemption_level;
126
127 assert(get_preemption_level() > 0);
128
129 __asm__ volatile ("decl %%gs:(%0)"
130 : /* no outputs */
131 : "r" (idx)
132 : "cc", "memory");
133 }
134
135 extern void __inline__ mp_disable_preemption(void)
136 {
137 #if NCPUS > 1
138 disable_preemption();
139 #endif /* NCPUS > 1 */
140 }
141
142 extern void __inline__ mp_enable_preemption(void)
143 {
144 #if NCPUS > 1
145 enable_preemption();
146 #endif /* NCPUS > 1 */
147 }
148
149 extern void __inline__ mp_enable_preemption_no_check(void)
150 {
151 #if NCPUS > 1
152 enable_preemption_no_check();
153 #endif /* NCPUS > 1 */
154 }
155
156 #if 0
157 #ifndef __OPTIMIZE__
158 #undef extern
159 #endif
160 #endif
161
162 #else /* !defined(__GNUC__) */
163
164 #endif /* defined(__GNUC__) */
165
166 #endif /* I386_CPU_DATA */