]> git.saurik.com Git - apple/xnu.git/blob - osfmk/i386/genassym.c
xnu-1504.9.37.tar.gz
[apple/xnu.git] / osfmk / i386 / genassym.c
1 /*
2 * Copyright (c) 2000-2008 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56
57 #include <platforms.h>
58 #include <mach_kdb.h>
59 #include <mach_ldebug.h>
60 #include <stat_time.h>
61
62 /*
63 * Pass field offsets to assembly code.
64 */
65 #include <kern/ast.h>
66 #include <kern/thread.h>
67 #include <kern/task.h>
68 #include <kern/lock.h>
69 #include <kern/locks.h>
70 #include <kern/host.h>
71 #include <kern/misc_protos.h>
72 #include <ipc/ipc_space.h>
73 #include <ipc/ipc_port.h>
74 #include <ipc/ipc_pset.h>
75 #include <vm/vm_map.h>
76 #include <i386/pmap.h>
77 #include <i386/Diagnostics.h>
78 #include <i386/mp_desc.h>
79 #include <i386/seg.h>
80 #include <i386/thread.h>
81 #include <i386/cpu_data.h>
82 #include <i386/tss.h>
83 #include <i386/cpu_capabilities.h>
84 #include <i386/cpuid.h>
85 #include <i386/pmCPU.h>
86 #include <mach/i386/vm_param.h>
87 #include <mach/i386/thread_status.h>
88 #include <machine/commpage.h>
89 #include <pexpert/i386/boot.h>
90
91 #if CONFIG_DTRACE
92 #define NEED_DTRACE_DEFS
93 #include <../bsd/sys/lockstat.h>
94 #endif
95
96
97 /*
98 * genassym.c is used to produce an
99 * assembly file which, intermingled with unuseful assembly code,
100 * has all the necessary definitions emitted. This assembly file is
101 * then postprocessed with sed to extract only these definitions
102 * and thus the final assyms.s is created.
103 *
104 * This convoluted means is necessary since the structure alignment
105 * and packing may be different between the host machine and the
106 * target so we are forced into using the cross compiler to generate
107 * the values, but we cannot run anything on the target machine.
108 */
109
110 #undef offsetof
111 #define offsetof(TYPE, MEMBER) ((size_t) &((TYPE)0)->MEMBER)
112
113 #if 0
114 #define DECLARE(SYM,VAL) \
115 __asm("#DEFINITION#\t.set\t" SYM ",\t%0" : : "n" ((u_int)(VAL)))
116 #else
117 #define DECLARE(SYM,VAL) \
118 __asm("#DEFINITION##define " SYM "\t%0" : : "n" ((u_int)(VAL)))
119 #endif
120
121 int main(
122 int argc,
123 char ** argv);
124
125 int
126 main(
127 int argc,
128 char **argv)
129 {
130
131 DECLARE("AST_URGENT", AST_URGENT);
132 DECLARE("AST_BSD", AST_BSD);
133
134 /* Simple Lock structure */
135 DECLARE("SLOCK_ILK", offsetof(usimple_lock_t, interlock));
136 #if MACH_LDEBUG
137 DECLARE("SLOCK_TYPE", offsetof(usimple_lock_t, lock_type));
138 DECLARE("SLOCK_PC", offsetof(usimple_lock_t, debug.lock_pc));
139 DECLARE("SLOCK_THREAD", offsetof(usimple_lock_t, debug.lock_thread));
140 DECLARE("SLOCK_DURATIONH",offsetof(usimple_lock_t, debug.duration[0]));
141 DECLARE("SLOCK_DURATIONL",offsetof(usimple_lock_t, debug.duration[1]));
142 DECLARE("USLOCK_TAG", USLOCK_TAG);
143 #endif /* MACH_LDEBUG */
144
145 /* Mutex structure */
146 DECLARE("MUTEX_OWNER", offsetof(lck_mtx_t *, lck_mtx_owner));
147 DECLARE("MUTEX_PTR", offsetof(lck_mtx_t *, lck_mtx_ptr));
148 DECLARE("MUTEX_STATE", offsetof(lck_mtx_t *, lck_mtx_state));
149 #ifdef __i386__
150 DECLARE("MUTEX_TYPE", offsetof(lck_mtx_ext_t *, lck_mtx_deb.type));
151 DECLARE("MUTEX_PC", offsetof(lck_mtx_ext_t *, lck_mtx_deb.pc));
152 DECLARE("MUTEX_THREAD", offsetof(lck_mtx_ext_t *, lck_mtx_deb.thread));
153 DECLARE("MUTEX_ATTR", offsetof(lck_mtx_ext_t *, lck_mtx_attr));
154 DECLARE("MUTEX_ATTR_DEBUG", LCK_MTX_ATTR_DEBUG);
155 DECLARE("MUTEX_ATTR_DEBUGb", LCK_MTX_ATTR_DEBUGb);
156 DECLARE("MUTEX_ATTR_STAT", LCK_MTX_ATTR_STAT);
157 DECLARE("MUTEX_ATTR_STATb", LCK_MTX_ATTR_STATb);
158 DECLARE("MUTEX_TAG", MUTEX_TAG);
159 #endif
160 DECLARE("MUTEX_IND", LCK_MTX_TAG_INDIRECT);
161 DECLARE("MUTEX_EXT", LCK_MTX_PTR_EXTENDED);
162 DECLARE("MUTEX_ITAG", offsetof(lck_mtx_t *, lck_mtx_tag));
163 DECLARE("MUTEX_PTR", offsetof(lck_mtx_t *, lck_mtx_ptr));
164 DECLARE("MUTEX_ASSERT_OWNED", LCK_MTX_ASSERT_OWNED);
165 DECLARE("MUTEX_ASSERT_NOTOWNED",LCK_MTX_ASSERT_NOTOWNED);
166 DECLARE("GRP_MTX_STAT_UTIL", offsetof(lck_grp_t *, lck_grp_stat.lck_grp_mtx_stat.lck_grp_mtx_util_cnt));
167 DECLARE("GRP_MTX_STAT_MISS", offsetof(lck_grp_t *, lck_grp_stat.lck_grp_mtx_stat.lck_grp_mtx_miss_cnt));
168 DECLARE("GRP_MTX_STAT_WAIT", offsetof(lck_grp_t *, lck_grp_stat.lck_grp_mtx_stat.lck_grp_mtx_wait_cnt));
169
170 /* x86 only */
171 DECLARE("MUTEX_DESTROYED", LCK_MTX_TAG_DESTROYED);
172
173 /* Per-mutex statistic element */
174 DECLARE("MTX_ACQ_TSC", offsetof(lck_mtx_ext_t *, lck_mtx_stat));
175
176 /* Mutex group statistics elements */
177 DECLARE("MUTEX_GRP", offsetof(lck_mtx_ext_t *, lck_mtx_grp));
178
179 /*
180 * The use of this field is somewhat at variance with the alias.
181 */
182 DECLARE("GRP_MTX_STAT_DIRECT_WAIT", offsetof(lck_grp_t *, lck_grp_stat.lck_grp_mtx_stat.lck_grp_mtx_held_cnt));
183
184 DECLARE("GRP_MTX_STAT_HELD_MAX", offsetof(lck_grp_t *, lck_grp_stat.lck_grp_mtx_stat.lck_grp_mtx_held_max));
185 /* Reader writer lock types */
186 DECLARE("RW_SHARED", LCK_RW_TYPE_SHARED);
187 DECLARE("RW_EXCL", LCK_RW_TYPE_EXCLUSIVE);
188
189 DECLARE("TH_RECOVER", offsetof(thread_t, recover));
190 DECLARE("TH_CONTINUATION", offsetof(thread_t, continuation));
191 DECLARE("TH_KERNEL_STACK", offsetof(thread_t, kernel_stack));
192
193 DECLARE("TASK_MACH_EXC_PORT",
194 offsetof(task_t, exc_actions[EXC_MACH_SYSCALL].port));
195 DECLARE("TASK_SYSCALLS_MACH", offsetof(struct task *, syscalls_mach));
196 DECLARE("TASK_SYSCALLS_UNIX", offsetof(struct task *, syscalls_unix));
197
198 DECLARE("TASK_VTIMERS", offsetof(struct task *, vtimers));
199
200 /* These fields are being added on demand */
201 DECLARE("ACT_MACH_EXC_PORT",
202 offsetof(thread_t, exc_actions[EXC_MACH_SYSCALL].port));
203
204 DECLARE("ACT_TASK", offsetof(thread_t, task));
205 DECLARE("ACT_AST", offsetof(thread_t, ast));
206 DECLARE("ACT_PCB", offsetof(thread_t, machine.pcb));
207 DECLARE("ACT_SPF", offsetof(thread_t, machine.specFlags));
208 DECLARE("ACT_MAP", offsetof(thread_t, map));
209 DECLARE("ACT_PCB_ISS", offsetof(thread_t, machine.xxx_pcb.iss));
210 DECLARE("ACT_PCB_IDS", offsetof(thread_t, machine.xxx_pcb.ids));
211 #if NCOPY_WINDOWS > 0
212 DECLARE("ACT_COPYIO_STATE", offsetof(thread_t, machine.copyio_state));
213 DECLARE("WINDOWS_CLEAN", WINDOWS_CLEAN);
214 #endif
215
216 DECLARE("MAP_PMAP", offsetof(vm_map_t, pmap));
217
218 #define IEL_SIZE (sizeof(struct i386_exception_link *))
219 DECLARE("IEL_SIZE", IEL_SIZE);
220 DECLARE("IKS_SIZE", sizeof(struct x86_kernel_state));
221
222 /*
223 * KSS_* are offsets from the top of the kernel stack (cpu_kernel_stack)
224 */
225 #if defined(__i386__)
226 DECLARE("KSS_EBX", IEL_SIZE + offsetof(struct x86_kernel_state *, k_ebx));
227 DECLARE("KSS_ESP", IEL_SIZE + offsetof(struct x86_kernel_state *, k_esp));
228 DECLARE("KSS_EBP", IEL_SIZE + offsetof(struct x86_kernel_state *, k_ebp));
229 DECLARE("KSS_EDI", IEL_SIZE + offsetof(struct x86_kernel_state *, k_edi));
230 DECLARE("KSS_ESI", IEL_SIZE + offsetof(struct x86_kernel_state *, k_esi));
231 DECLARE("KSS_EIP", IEL_SIZE + offsetof(struct x86_kernel_state *, k_eip));
232 #elif defined(__x86_64__)
233 DECLARE("KSS_RBX", IEL_SIZE + offsetof(struct x86_kernel_state *, k_rbx));
234 DECLARE("KSS_RSP", IEL_SIZE + offsetof(struct x86_kernel_state *, k_rsp));
235 DECLARE("KSS_RBP", IEL_SIZE + offsetof(struct x86_kernel_state *, k_rbp));
236 DECLARE("KSS_R12", IEL_SIZE + offsetof(struct x86_kernel_state *, k_r12));
237 DECLARE("KSS_R13", IEL_SIZE + offsetof(struct x86_kernel_state *, k_r13));
238 DECLARE("KSS_R14", IEL_SIZE + offsetof(struct x86_kernel_state *, k_r14));
239 DECLARE("KSS_R15", IEL_SIZE + offsetof(struct x86_kernel_state *, k_r15));
240 DECLARE("KSS_RIP", IEL_SIZE + offsetof(struct x86_kernel_state *, k_rip));
241 #else
242 #error Unsupported architecture
243 #endif
244
245 DECLARE("PCB_FPS", offsetof(pcb_t, ifps));
246 DECLARE("PCB_ISS", offsetof(pcb_t, iss));
247
248 DECLARE("DS_DR0", offsetof(struct x86_debug_state32 *, dr0));
249 DECLARE("DS_DR1", offsetof(struct x86_debug_state32 *, dr1));
250 DECLARE("DS_DR2", offsetof(struct x86_debug_state32 *, dr2));
251 DECLARE("DS_DR3", offsetof(struct x86_debug_state32 *, dr3));
252 DECLARE("DS_DR4", offsetof(struct x86_debug_state32 *, dr4));
253 DECLARE("DS_DR5", offsetof(struct x86_debug_state32 *, dr5));
254 DECLARE("DS_DR6", offsetof(struct x86_debug_state32 *, dr6));
255 DECLARE("DS_DR7", offsetof(struct x86_debug_state32 *, dr7));
256
257 DECLARE("DS64_DR0", offsetof(struct x86_debug_state64 *, dr0));
258 DECLARE("DS64_DR1", offsetof(struct x86_debug_state64 *, dr1));
259 DECLARE("DS64_DR2", offsetof(struct x86_debug_state64 *, dr2));
260 DECLARE("DS64_DR3", offsetof(struct x86_debug_state64 *, dr3));
261 DECLARE("DS64_DR4", offsetof(struct x86_debug_state64 *, dr4));
262 DECLARE("DS64_DR5", offsetof(struct x86_debug_state64 *, dr5));
263 DECLARE("DS64_DR6", offsetof(struct x86_debug_state64 *, dr6));
264 DECLARE("DS64_DR7", offsetof(struct x86_debug_state64 *, dr7));
265
266 DECLARE("FP_VALID", offsetof(struct x86_fpsave_state *,fp_valid));
267
268 DECLARE("SS_FLAVOR", offsetof(x86_saved_state_t *, flavor));
269 DECLARE("SS_32", x86_SAVED_STATE32);
270 DECLARE("SS_64", x86_SAVED_STATE64);
271
272 #define R_(x) offsetof(x86_saved_state_t *, ss_32.x)
273 DECLARE("R32_CS", R_(cs));
274 DECLARE("R32_SS", R_(ss));
275 DECLARE("R32_DS", R_(ds));
276 DECLARE("R32_ES", R_(es));
277 DECLARE("R32_FS", R_(fs));
278 DECLARE("R32_GS", R_(gs));
279 DECLARE("R32_UESP", R_(uesp));
280 DECLARE("R32_EBP", R_(ebp));
281 DECLARE("R32_EAX", R_(eax));
282 DECLARE("R32_EBX", R_(ebx));
283 DECLARE("R32_ECX", R_(ecx));
284 DECLARE("R32_EDX", R_(edx));
285 DECLARE("R32_ESI", R_(esi));
286 DECLARE("R32_EDI", R_(edi));
287 DECLARE("R32_TRAPNO", R_(trapno));
288 DECLARE("R32_ERR", R_(err));
289 DECLARE("R32_EFLAGS", R_(efl));
290 DECLARE("R32_EIP", R_(eip));
291 DECLARE("R32_CR2", R_(cr2));
292 DECLARE("ISS32_SIZE", sizeof (x86_saved_state32_t));
293
294 #define R64_(x) offsetof(x86_saved_state_t *, ss_64.x)
295 DECLARE("R64_FS", R64_(fs));
296 DECLARE("R64_GS", R64_(gs));
297 DECLARE("R64_R8", R64_(r8));
298 DECLARE("R64_R9", R64_(r9));
299 DECLARE("R64_R10", R64_(r10));
300 DECLARE("R64_R11", R64_(r11));
301 DECLARE("R64_R12", R64_(r12));
302 DECLARE("R64_R13", R64_(r13));
303 DECLARE("R64_R14", R64_(r14));
304 DECLARE("R64_R15", R64_(r15));
305 DECLARE("R64_RBP", R64_(rbp));
306 DECLARE("R64_RAX", R64_(rax));
307 DECLARE("R64_RBX", R64_(rbx));
308 DECLARE("R64_RCX", R64_(rcx));
309 DECLARE("R64_RDX", R64_(rdx));
310 DECLARE("R64_RSI", R64_(rsi));
311 DECLARE("R64_RDI", R64_(rdi));
312 DECLARE("R64_V_ARG6", R64_(v_arg6));
313 DECLARE("R64_V_ARG7", R64_(v_arg7));
314 DECLARE("R64_V_ARG8", R64_(v_arg8));
315 DECLARE("R64_CS", R64_(isf.cs));
316 DECLARE("R64_SS", R64_(isf.ss));
317 DECLARE("R64_RSP", R64_(isf.rsp));
318 DECLARE("R64_TRAPNO", R64_(isf.trapno));
319 DECLARE("R64_TRAPFN", R64_(isf.trapfn));
320 DECLARE("R64_ERR", R64_(isf.err));
321 DECLARE("R64_RFLAGS", R64_(isf.rflags));
322 DECLARE("R64_RIP", R64_(isf.rip));
323 DECLARE("R64_CR2", R64_(cr2));
324 DECLARE("ISS64_OFFSET", R64_(isf));
325 DECLARE("ISS64_SIZE", sizeof (x86_saved_state64_t));
326
327 #define ISF64_(x) offsetof(x86_64_intr_stack_frame_t *, x)
328 DECLARE("ISF64_TRAPNO", ISF64_(trapno));
329 DECLARE("ISF64_TRAPFN", ISF64_(trapfn));
330 DECLARE("ISF64_ERR", ISF64_(err));
331 DECLARE("ISF64_RIP", ISF64_(rip));
332 DECLARE("ISF64_CS", ISF64_(cs));
333 DECLARE("ISF64_RFLAGS", ISF64_(rflags));
334 DECLARE("ISF64_RSP", ISF64_(rsp));
335 DECLARE("ISF64_SS", ISF64_(ss));
336 DECLARE("ISF64_SIZE", sizeof(x86_64_intr_stack_frame_t));
337
338 DECLARE("ISC32_OFFSET", offsetof(x86_saved_state_compat32_t *, isf64));
339 #define ISC32_(x) offsetof(x86_saved_state_compat32_t *, isf64.x)
340 DECLARE("ISC32_TRAPNO", ISC32_(trapno));
341 DECLARE("ISC32_TRAPFN", ISC32_(trapfn));
342 DECLARE("ISC32_ERR", ISC32_(err));
343 DECLARE("ISC32_RIP", ISC32_(rip));
344 DECLARE("ISC32_CS", ISC32_(cs));
345 DECLARE("ISC32_RFLAGS", ISC32_(rflags));
346 DECLARE("ISC32_RSP", ISC32_(rsp));
347 DECLARE("ISC32_SS", ISC32_(ss));
348
349 DECLARE("NBPG", I386_PGBYTES);
350 DECLARE("PAGE_SIZE", I386_PGBYTES);
351 DECLARE("PAGE_MASK", I386_PGBYTES-1);
352 DECLARE("PAGE_SHIFT", 12);
353 DECLARE("NKPT", NKPT);
354 #ifdef __i386__
355 DECLARE("KPTDI", KPTDI);
356 #endif
357 DECLARE("VM_MIN_ADDRESS", VM_MIN_ADDRESS);
358 DECLARE("VM_MAX_ADDRESS", VM_MAX_ADDRESS);
359 DECLARE("KERNELBASE", VM_MIN_KERNEL_ADDRESS);
360 DECLARE("LINEAR_KERNELBASE", LINEAR_KERNEL_ADDRESS);
361 DECLARE("KERNEL_STACK_SIZE", KERNEL_STACK_SIZE);
362 #ifdef __i386__
363 DECLARE("KERNEL_UBER_BASE_HI32", KERNEL_UBER_BASE_HI32);
364 #endif
365
366 DECLARE("ASM_COMM_PAGE32_BASE_ADDRESS", _COMM_PAGE32_BASE_ADDRESS);
367 DECLARE("ASM_COMM_PAGE32_START_ADDRESS", _COMM_PAGE32_START_ADDRESS);
368 DECLARE("ASM_COMM_PAGE_SCHED_GEN", _COMM_PAGE_SCHED_GEN);
369
370 DECLARE("PDESHIFT", PDESHIFT);
371 DECLARE("PTEMASK", PTEMASK);
372 DECLARE("PTEINDX", PTEINDX);
373 DECLARE("INTEL_PTE_PFN", INTEL_PTE_PFN);
374 DECLARE("INTEL_PTE_VALID", INTEL_PTE_VALID);
375 DECLARE("INTEL_PTE_WRITE", INTEL_PTE_WRITE);
376 DECLARE("INTEL_PTE_PS", INTEL_PTE_PS);
377 DECLARE("INTEL_PTE_USER", INTEL_PTE_USER);
378 DECLARE("INTEL_PTE_INVALID", INTEL_PTE_INVALID);
379 DECLARE("NPGPTD", NPGPTD);
380 #if defined(__x86_64__)
381 DECLARE("INITPT_SEG_BASE",INITPT_SEG_BASE);
382 DECLARE("INITGDT_SEG_BASE",INITGDT_SEG_BASE);
383 DECLARE("SLEEP_SEG_BASE",SLEEP_SEG_BASE);
384 DECLARE("PROT_MODE_GDT_SIZE",PROT_MODE_GDT_SIZE);
385 DECLARE("KERNEL_PML4_INDEX",KERNEL_PML4_INDEX);
386 #endif
387 DECLARE("IDTSZ", IDTSZ);
388 DECLARE("GDTSZ", GDTSZ);
389 DECLARE("LDTSZ", LDTSZ);
390
391 DECLARE("KERNEL_DS", KERNEL_DS);
392 DECLARE("USER_CS", USER_CS);
393 DECLARE("USER_DS", USER_DS);
394 DECLARE("KERNEL32_CS", KERNEL32_CS);
395 DECLARE("KERNEL64_CS", KERNEL64_CS);
396 DECLARE("USER64_CS", USER64_CS);
397 DECLARE("KERNEL_TSS", KERNEL_TSS);
398 DECLARE("KERNEL_LDT", KERNEL_LDT);
399 #ifdef __i386__
400 DECLARE("DF_TSS", DF_TSS);
401 DECLARE("MC_TSS", MC_TSS);
402 #if MACH_KDB
403 DECLARE("DEBUG_TSS", DEBUG_TSS);
404 #endif /* MACH_KDB */
405 DECLARE("CPU_DATA_GS", CPU_DATA_GS);
406 #endif /* __i386__ */
407 DECLARE("SYSENTER_CS", SYSENTER_CS);
408 DECLARE("SYSENTER_TF_CS",SYSENTER_TF_CS);
409 DECLARE("SYSENTER_DS", SYSENTER_DS);
410 DECLARE("SYSCALL_CS", SYSCALL_CS);
411 #ifdef __i386__
412 DECLARE("USER_WINDOW_SEL", USER_WINDOW_SEL);
413 DECLARE("PHYS_WINDOW_SEL", PHYS_WINDOW_SEL);
414 #endif
415
416 DECLARE("CPU_THIS",
417 offsetof(cpu_data_t *, cpu_this));
418 DECLARE("CPU_ACTIVE_THREAD",
419 offsetof(cpu_data_t *, cpu_active_thread));
420 DECLARE("CPU_ACTIVE_STACK",
421 offsetof(cpu_data_t *, cpu_active_stack));
422 DECLARE("CPU_KERNEL_STACK",
423 offsetof(cpu_data_t *, cpu_kernel_stack));
424 DECLARE("CPU_INT_STACK_TOP",
425 offsetof(cpu_data_t *, cpu_int_stack_top));
426 #if MACH_RT
427 DECLARE("CPU_PREEMPTION_LEVEL",
428 offsetof(cpu_data_t *, cpu_preemption_level));
429 #endif /* MACH_RT */
430 DECLARE("CPU_HIBERNATE",
431 offsetof(cpu_data_t *, cpu_hibernate));
432 DECLARE("CPU_INTERRUPT_LEVEL",
433 offsetof(cpu_data_t *, cpu_interrupt_level));
434 DECLARE("CPU_SIMPLE_LOCK_COUNT",
435 offsetof(cpu_data_t *,cpu_simple_lock_count));
436 DECLARE("CPU_NUMBER_GS",
437 offsetof(cpu_data_t *,cpu_number));
438 DECLARE("CPU_RUNNING",
439 offsetof(cpu_data_t *,cpu_running));
440 DECLARE("CPU_MCOUNT_OFF",
441 offsetof(cpu_data_t *,cpu_mcount_off));
442 DECLARE("CPU_PENDING_AST",
443 offsetof(cpu_data_t *,cpu_pending_ast));
444 DECLARE("CPU_DESC_TABLEP",
445 offsetof(cpu_data_t *,cpu_desc_tablep));
446 DECLARE("CPU_DESC_INDEX",
447 offsetof(cpu_data_t *,cpu_desc_index));
448 DECLARE("CDI_GDT",
449 offsetof(cpu_desc_index_t *,cdi_gdt));
450 DECLARE("CDI_IDT",
451 offsetof(cpu_desc_index_t *,cdi_idt));
452 DECLARE("CPU_PROCESSOR",
453 offsetof(cpu_data_t *,cpu_processor));
454 DECLARE("CPU_INT_STATE",
455 offsetof(cpu_data_t *, cpu_int_state));
456 DECLARE("CPU_INT_EVENT_TIME",
457 offsetof(cpu_data_t *, cpu_int_event_time));
458
459 #ifdef __i386__
460 DECLARE("CPU_HI_ISS",
461 offsetof(cpu_data_t *, cpu_hi_iss));
462 #endif
463 DECLARE("CPU_TASK_CR3",
464 offsetof(cpu_data_t *, cpu_task_cr3));
465 DECLARE("CPU_ACTIVE_CR3",
466 offsetof(cpu_data_t *, cpu_active_cr3));
467 DECLARE("CPU_KERNEL_CR3",
468 offsetof(cpu_data_t *, cpu_kernel_cr3));
469 #ifdef __x86_64__
470 DECLARE("CPU_TLB_INVALID",
471 offsetof(cpu_data_t *, cpu_tlb_invalid));
472 #endif
473
474 DECLARE("CPU_IS64BIT",
475 offsetof(cpu_data_t *, cpu_is64bit));
476 DECLARE("CPU_TASK_MAP",
477 offsetof(cpu_data_t *, cpu_task_map));
478 DECLARE("TASK_MAP_32BIT", TASK_MAP_32BIT);
479 DECLARE("TASK_MAP_64BIT", TASK_MAP_64BIT);
480 #ifdef __i386__
481 DECLARE("TASK_MAP_64BIT_SHARED", TASK_MAP_64BIT_SHARED);
482 #endif
483 DECLARE("CPU_UBER_USER_GS_BASE",
484 offsetof(cpu_data_t *, cpu_uber.cu_user_gs_base));
485 DECLARE("CPU_UBER_ISF",
486 offsetof(cpu_data_t *, cpu_uber.cu_isf));
487 DECLARE("CPU_UBER_TMP",
488 offsetof(cpu_data_t *, cpu_uber.cu_tmp));
489 DECLARE("CPU_UBER_ARG_STORE",
490 offsetof(cpu_data_t *, cpu_uber_arg_store));
491 DECLARE("CPU_UBER_ARG_STORE_VALID",
492 offsetof(cpu_data_t *, cpu_uber_arg_store_valid));
493
494 DECLARE("CPU_NANOTIME",
495 offsetof(cpu_data_t *, cpu_nanotime));
496
497 DECLARE("CPU_DR7",
498 offsetof(cpu_data_t *, cpu_dr7));
499
500 DECLARE("hwIntCnt", offsetof(cpu_data_t *,cpu_hwIntCnt));
501
502 DECLARE("enaExpTrace", enaExpTrace);
503 DECLARE("enaExpTraceb", enaExpTraceb);
504 DECLARE("enaUsrFCall", enaUsrFCall);
505 DECLARE("enaUsrFCallb", enaUsrFCallb);
506 DECLARE("enaUsrPhyMp", enaUsrPhyMp);
507 DECLARE("enaUsrPhyMpb", enaUsrPhyMpb);
508 DECLARE("enaDiagSCs", enaDiagSCs);
509 DECLARE("enaDiagSCsb", enaDiagSCsb);
510 DECLARE("enaDiagEM", enaDiagEM);
511 DECLARE("enaDiagEMb", enaDiagEMb);
512 DECLARE("enaNotifyEM", enaNotifyEM);
513 DECLARE("enaNotifyEMb", enaNotifyEMb);
514 DECLARE("dgLock", offsetof(struct diagWork *, dgLock));
515 DECLARE("dgFlags", offsetof(struct diagWork *, dgFlags));
516 DECLARE("dgMisc1", offsetof(struct diagWork *, dgMisc1));
517 DECLARE("dgMisc2", offsetof(struct diagWork *, dgMisc2));
518 DECLARE("dgMisc3", offsetof(struct diagWork *, dgMisc3));
519 DECLARE("dgMisc4", offsetof(struct diagWork *, dgMisc4));
520 DECLARE("dgMisc5", offsetof(struct diagWork *, dgMisc5));
521
522 DECLARE("INTEL_PTE_KERNEL", INTEL_PTE_VALID|INTEL_PTE_WRITE);
523 DECLARE("PDESHIFT", PDESHIFT);
524 DECLARE("PDESIZE", PDESIZE);
525 DECLARE("PTESIZE", PTESIZE);
526 #ifdef __i386__
527 DECLARE("PTDPTDI", PTDPTDI);
528 DECLARE("APTDPTDI", APTDPTDI);
529 DECLARE("HIGH_MEM_BASE", HIGH_MEM_BASE);
530 DECLARE("HIGH_IDT_BASE", pmap_index_to_virt(HIGH_FIXED_IDT));
531 #endif
532
533 DECLARE("KERNELBASEPDE",
534 (LINEAR_KERNEL_ADDRESS >> PDESHIFT) *
535 sizeof(pt_entry_t));
536
537 DECLARE("TSS_ESP0", offsetof(struct i386_tss *, esp0));
538 DECLARE("TSS_SS0", offsetof(struct i386_tss *, ss0));
539 DECLARE("TSS_LDT", offsetof(struct i386_tss *, ldt));
540 DECLARE("TSS_PDBR", offsetof(struct i386_tss *, cr3));
541 DECLARE("TSS_LINK", offsetof(struct i386_tss *, back_link));
542
543 DECLARE("K_TASK_GATE", ACC_P|ACC_PL_K|ACC_TASK_GATE);
544 DECLARE("K_TRAP_GATE", ACC_P|ACC_PL_K|ACC_TRAP_GATE);
545 DECLARE("U_TRAP_GATE", ACC_P|ACC_PL_U|ACC_TRAP_GATE);
546 DECLARE("K_INTR_GATE", ACC_P|ACC_PL_K|ACC_INTR_GATE);
547 DECLARE("U_INTR_GATE", ACC_P|ACC_PL_U|ACC_INTR_GATE);
548 DECLARE("K_TSS", ACC_P|ACC_PL_K|ACC_TSS);
549
550 /*
551 * usimple_lock fields
552 */
553 DECLARE("USL_INTERLOCK", offsetof(usimple_lock_t, interlock));
554
555 DECLARE("INTSTACK_SIZE", INTSTACK_SIZE);
556 DECLARE("KADDR", offsetof(struct boot_args *, kaddr));
557 DECLARE("KSIZE", offsetof(struct boot_args *, ksize));
558 DECLARE("MEMORYMAP", offsetof(struct boot_args *, MemoryMap));
559 DECLARE("DEVICETREEP", offsetof(struct boot_args *, deviceTreeP));
560
561 DECLARE("RNT_TSC_BASE",
562 offsetof(rtc_nanotime_t *, tsc_base));
563 DECLARE("RNT_NS_BASE",
564 offsetof(rtc_nanotime_t *, ns_base));
565 DECLARE("RNT_SCALE",
566 offsetof(rtc_nanotime_t *, scale));
567 DECLARE("RNT_SHIFT",
568 offsetof(rtc_nanotime_t *, shift));
569 DECLARE("RNT_GENERATION",
570 offsetof(rtc_nanotime_t *, generation));
571
572 /* values from kern/timer.h */
573 #ifdef __LP64__
574 DECLARE("TIMER_ALL", offsetof(struct timer *, all_bits));
575 #else
576 DECLARE("TIMER_LOW", offsetof(struct timer *, low_bits));
577 DECLARE("TIMER_HIGH", offsetof(struct timer *, high_bits));
578 DECLARE("TIMER_HIGHCHK", offsetof(struct timer *, high_bits_check));
579 #endif
580 #if !STAT_TIME
581 DECLARE("TIMER_TSTAMP",
582 offsetof(struct timer *, tstamp));
583
584 DECLARE("THREAD_TIMER",
585 offsetof(struct processor *, processor_data.thread_timer));
586 #endif
587 DECLARE("KERNEL_TIMER",
588 offsetof(struct processor *, processor_data.kernel_timer));
589 DECLARE("SYSTEM_TIMER",
590 offsetof(struct thread *, system_timer));
591 DECLARE("USER_TIMER",
592 offsetof(struct thread *, user_timer));
593 DECLARE("SYSTEM_STATE",
594 offsetof(struct processor *, processor_data.system_state));
595 DECLARE("USER_STATE",
596 offsetof(struct processor *, processor_data.user_state));
597 DECLARE("IDLE_STATE",
598 offsetof(struct processor *, processor_data.idle_state));
599 DECLARE("CURRENT_STATE",
600 offsetof(struct processor *, processor_data.current_state));
601
602 DECLARE("OnProc", OnProc);
603
604
605 #if CONFIG_DTRACE
606 DECLARE("LS_LCK_MTX_LOCK_ACQUIRE", LS_LCK_MTX_LOCK_ACQUIRE);
607 DECLARE("LS_LCK_MTX_TRY_SPIN_LOCK_ACQUIRE", LS_LCK_MTX_TRY_SPIN_LOCK_ACQUIRE);
608 DECLARE("LS_LCK_MTX_UNLOCK_RELEASE", LS_LCK_MTX_UNLOCK_RELEASE);
609 DECLARE("LS_LCK_MTX_TRY_LOCK_ACQUIRE", LS_LCK_MTX_TRY_LOCK_ACQUIRE);
610 DECLARE("LS_LCK_RW_LOCK_SHARED_ACQUIRE", LS_LCK_RW_LOCK_SHARED_ACQUIRE);
611 DECLARE("LS_LCK_RW_DONE_RELEASE", LS_LCK_RW_DONE_RELEASE);
612 DECLARE("LS_LCK_MTX_EXT_LOCK_ACQUIRE", LS_LCK_MTX_EXT_LOCK_ACQUIRE);
613 DECLARE("LS_LCK_MTX_TRY_EXT_LOCK_ACQUIRE", LS_LCK_MTX_TRY_EXT_LOCK_ACQUIRE);
614 DECLARE("LS_LCK_MTX_EXT_UNLOCK_RELEASE", LS_LCK_MTX_EXT_UNLOCK_RELEASE);
615 DECLARE("LS_LCK_RW_LOCK_EXCL_ACQUIRE", LS_LCK_RW_LOCK_EXCL_ACQUIRE);
616 DECLARE("LS_LCK_RW_LOCK_SHARED_TO_EXCL_UPGRADE", LS_LCK_RW_LOCK_SHARED_TO_EXCL_UPGRADE);
617 DECLARE("LS_LCK_RW_TRY_LOCK_EXCL_ACQUIRE", LS_LCK_RW_TRY_LOCK_EXCL_ACQUIRE);
618 DECLARE("LS_LCK_RW_TRY_LOCK_SHARED_ACQUIRE", LS_LCK_RW_TRY_LOCK_SHARED_ACQUIRE);
619 DECLARE("LS_LCK_MTX_LOCK_SPIN_ACQUIRE", LS_LCK_MTX_LOCK_SPIN_ACQUIRE);
620 #endif
621
622 return (0);
623 }