]> git.saurik.com Git - apple/xnu.git/blob - osfmk/i386/genassym.c
xnu-2782.40.9.tar.gz
[apple/xnu.git] / osfmk / i386 / genassym.c
1 /*
2 * Copyright (c) 2000-2012 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56
57 #include <mach_ldebug.h>
58
59 /*
60 * Pass field offsets to assembly code.
61 */
62 #include <kern/ast.h>
63 #include <kern/thread.h>
64 #include <kern/task.h>
65 #include <kern/locks.h>
66 #include <kern/host.h>
67 #include <kern/misc_protos.h>
68 #include <ipc/ipc_space.h>
69 #include <ipc/ipc_port.h>
70 #include <ipc/ipc_pset.h>
71 #include <vm/vm_map.h>
72 #include <i386/pmap.h>
73 #include <i386/Diagnostics.h>
74 #include <i386/mp_desc.h>
75 #include <i386/seg.h>
76 #include <i386/thread.h>
77 #include <i386/cpu_data.h>
78 #include <i386/tss.h>
79 #include <i386/cpu_capabilities.h>
80 #include <i386/cpuid.h>
81 #include <i386/pmCPU.h>
82 #include <mach/i386/vm_param.h>
83 #include <mach/i386/thread_status.h>
84 #include <machine/commpage.h>
85 #include <pexpert/i386/boot.h>
86
87 #undef offsetof
88 #include <stddef.h>
89
90 #if CONFIG_DTRACE
91 #define NEED_DTRACE_DEFS
92 #include <../bsd/sys/lockstat.h>
93 #endif
94
95 /*
96 * genassym.c is used to produce an
97 * assembly file which, intermingled with unuseful assembly code,
98 * has all the necessary definitions emitted. This assembly file is
99 * then postprocessed with sed to extract only these definitions
100 * and thus the final assyms.s is created.
101 *
102 * This convoluted means is necessary since the structure alignment
103 * and packing may be different between the host machine and the
104 * target so we are forced into using the cross compiler to generate
105 * the values, but we cannot run anything on the target machine.
106 */
107
108 #define DECLARE(SYM,VAL) \
109 __asm("#DEFINITION##define " SYM "\t%0" : : "n" ((u_int)(VAL)))
110
111 int main(
112 int argc,
113 char ** argv);
114
115 int
116 main(
117 int argc,
118 char **argv)
119 {
120
121 DECLARE("AST_URGENT", AST_URGENT);
122 DECLARE("AST_BSD", AST_BSD);
123
124 DECLARE("MAX_CPUS", MAX_CPUS);
125
126 /* Simple Lock structure */
127 DECLARE("SLOCK_ILK", offsetof(usimple_lock_data_t, interlock));
128 #if MACH_LDEBUG
129 DECLARE("SLOCK_TYPE", offsetof(usimple_lock_data_t, lock_type));
130 DECLARE("SLOCK_PC", offsetof(usimple_lock_data_t, debug.lock_pc));
131 DECLARE("SLOCK_THREAD", offsetof(usimple_lock_data_t, debug.lock_thread));
132 DECLARE("SLOCK_DURATIONH",offsetof(usimple_lock_data_t, debug.duration[0]));
133 DECLARE("SLOCK_DURATIONL",offsetof(usimple_lock_data_t, debug.duration[1]));
134 DECLARE("USLOCK_TAG", USLOCK_TAG);
135 #endif /* MACH_LDEBUG */
136
137 /* Mutex structure */
138 DECLARE("MUTEX_OWNER", offsetof(lck_mtx_t, lck_mtx_owner));
139 DECLARE("MUTEX_PTR", offsetof(lck_mtx_t, lck_mtx_ptr));
140 DECLARE("MUTEX_STATE", offsetof(lck_mtx_t, lck_mtx_state));
141 DECLARE("MUTEX_IND", LCK_MTX_TAG_INDIRECT);
142 DECLARE("MUTEX_PTR", offsetof(lck_mtx_t, lck_mtx_ptr));
143 DECLARE("MUTEX_ASSERT_OWNED", LCK_MTX_ASSERT_OWNED);
144 DECLARE("MUTEX_ASSERT_NOTOWNED",LCK_MTX_ASSERT_NOTOWNED);
145 DECLARE("GRP_MTX_STAT_UTIL", offsetof(lck_grp_t, lck_grp_stat.lck_grp_mtx_stat.lck_grp_mtx_util_cnt));
146 DECLARE("GRP_MTX_STAT_MISS", offsetof(lck_grp_t, lck_grp_stat.lck_grp_mtx_stat.lck_grp_mtx_miss_cnt));
147 DECLARE("GRP_MTX_STAT_WAIT", offsetof(lck_grp_t, lck_grp_stat.lck_grp_mtx_stat.lck_grp_mtx_wait_cnt));
148
149 /* x86 only */
150 DECLARE("MUTEX_DESTROYED", LCK_MTX_TAG_DESTROYED);
151
152 /* Per-mutex statistic element */
153 DECLARE("MTX_ACQ_TSC", offsetof(lck_mtx_ext_t, lck_mtx_stat));
154
155 /* Mutex group statistics elements */
156 DECLARE("MUTEX_GRP", offsetof(lck_mtx_ext_t, lck_mtx_grp));
157
158 /*
159 * The use of this field is somewhat at variance with the alias.
160 */
161 DECLARE("GRP_MTX_STAT_DIRECT_WAIT", offsetof(lck_grp_t, lck_grp_stat.lck_grp_mtx_stat.lck_grp_mtx_held_cnt));
162
163 DECLARE("GRP_MTX_STAT_HELD_MAX", offsetof(lck_grp_t, lck_grp_stat.lck_grp_mtx_stat.lck_grp_mtx_held_max));
164 /* Reader writer lock types */
165 DECLARE("RW_SHARED", LCK_RW_TYPE_SHARED);
166 DECLARE("RW_EXCL", LCK_RW_TYPE_EXCLUSIVE);
167
168 DECLARE("TH_RECOVER", offsetof(struct thread, recover));
169 DECLARE("TH_CONTINUATION", offsetof(struct thread, continuation));
170 DECLARE("TH_KERNEL_STACK", offsetof(struct thread, kernel_stack));
171 DECLARE("TH_MUTEX_COUNT", offsetof(struct thread, mutex_count));
172 DECLARE("TH_WAS_PROMOTED_ON_WAKEUP", offsetof(struct thread, was_promoted_on_wakeup));
173 DECLARE("TH_IOTIER_OVERRIDE", offsetof(struct thread, iotier_override));
174
175 DECLARE("TH_SYSCALLS_MACH", offsetof(struct thread, syscalls_mach));
176 DECLARE("TH_SYSCALLS_UNIX", offsetof(struct thread, syscalls_unix));
177
178 DECLARE("TASK_VTIMERS", offsetof(struct task, vtimers));
179
180 /* These fields are being added on demand */
181 DECLARE("TH_TASK", offsetof(struct thread, task));
182 DECLARE("TH_AST", offsetof(struct thread, ast));
183 DECLARE("TH_MAP", offsetof(struct thread, map));
184 DECLARE("TH_SPF", offsetof(struct thread, machine.specFlags));
185 DECLARE("TH_PCB_ISS", offsetof(struct thread, machine.iss));
186 DECLARE("TH_PCB_IDS", offsetof(struct thread, machine.ids));
187 DECLARE("TH_PCB_FPS", offsetof(struct thread, machine.ifps));
188 #if NCOPY_WINDOWS > 0
189 DECLARE("TH_COPYIO_STATE", offsetof(struct thread, machine.copyio_state));
190 DECLARE("WINDOWS_CLEAN", WINDOWS_CLEAN);
191 #endif
192 DECLARE("TH_RWLOCK_COUNT", offsetof(struct thread, rwlock_count));
193
194 DECLARE("MAP_PMAP", offsetof(struct _vm_map, pmap));
195
196 #define IEL_SIZE (sizeof(struct i386_exception_link *))
197 DECLARE("IKS_SIZE", sizeof(struct x86_kernel_state));
198
199 /*
200 * KSS_* are offsets from the top of the kernel stack (cpu_kernel_stack)
201 */
202 DECLARE("KSS_RBX", offsetof(struct x86_kernel_state, k_rbx));
203 DECLARE("KSS_RSP", offsetof(struct x86_kernel_state, k_rsp));
204 DECLARE("KSS_RBP", offsetof(struct x86_kernel_state, k_rbp));
205 DECLARE("KSS_R12", offsetof(struct x86_kernel_state, k_r12));
206 DECLARE("KSS_R13", offsetof(struct x86_kernel_state, k_r13));
207 DECLARE("KSS_R14", offsetof(struct x86_kernel_state, k_r14));
208 DECLARE("KSS_R15", offsetof(struct x86_kernel_state, k_r15));
209 DECLARE("KSS_RIP", offsetof(struct x86_kernel_state, k_rip));
210
211 DECLARE("DS_DR0", offsetof(struct x86_debug_state32, dr0));
212 DECLARE("DS_DR1", offsetof(struct x86_debug_state32, dr1));
213 DECLARE("DS_DR2", offsetof(struct x86_debug_state32, dr2));
214 DECLARE("DS_DR3", offsetof(struct x86_debug_state32, dr3));
215 DECLARE("DS_DR4", offsetof(struct x86_debug_state32, dr4));
216 DECLARE("DS_DR5", offsetof(struct x86_debug_state32, dr5));
217 DECLARE("DS_DR6", offsetof(struct x86_debug_state32, dr6));
218 DECLARE("DS_DR7", offsetof(struct x86_debug_state32, dr7));
219
220 DECLARE("DS64_DR0", offsetof(struct x86_debug_state64, dr0));
221 DECLARE("DS64_DR1", offsetof(struct x86_debug_state64, dr1));
222 DECLARE("DS64_DR2", offsetof(struct x86_debug_state64, dr2));
223 DECLARE("DS64_DR3", offsetof(struct x86_debug_state64, dr3));
224 DECLARE("DS64_DR4", offsetof(struct x86_debug_state64, dr4));
225 DECLARE("DS64_DR5", offsetof(struct x86_debug_state64, dr5));
226 DECLARE("DS64_DR6", offsetof(struct x86_debug_state64, dr6));
227 DECLARE("DS64_DR7", offsetof(struct x86_debug_state64, dr7));
228
229 DECLARE("FP_VALID", offsetof(struct x86_fx_thread_state,fp_valid));
230
231 DECLARE("SS_FLAVOR", offsetof(x86_saved_state_t, flavor));
232 DECLARE("SS_32", x86_SAVED_STATE32);
233 DECLARE("SS_64", x86_SAVED_STATE64);
234
235 #define R_(x) offsetof(x86_saved_state_t, ss_32.x)
236 DECLARE("R32_CS", R_(cs));
237 DECLARE("R32_SS", R_(ss));
238 DECLARE("R32_DS", R_(ds));
239 DECLARE("R32_ES", R_(es));
240 DECLARE("R32_FS", R_(fs));
241 DECLARE("R32_GS", R_(gs));
242 DECLARE("R32_UESP", R_(uesp));
243 DECLARE("R32_EBP", R_(ebp));
244 DECLARE("R32_EAX", R_(eax));
245 DECLARE("R32_EBX", R_(ebx));
246 DECLARE("R32_ECX", R_(ecx));
247 DECLARE("R32_EDX", R_(edx));
248 DECLARE("R32_ESI", R_(esi));
249 DECLARE("R32_EDI", R_(edi));
250 DECLARE("R32_TRAPNO", R_(trapno));
251 DECLARE("R32_ERR", R_(err));
252 DECLARE("R32_EFLAGS", R_(efl));
253 DECLARE("R32_EIP", R_(eip));
254 DECLARE("R32_CR2", R_(cr2));
255 DECLARE("ISS32_SIZE", sizeof (x86_saved_state32_t));
256
257 #define R64_(x) offsetof(x86_saved_state_t, ss_64.x)
258 DECLARE("R64_FS", R64_(fs));
259 DECLARE("R64_GS", R64_(gs));
260 DECLARE("R64_R8", R64_(r8));
261 DECLARE("R64_R9", R64_(r9));
262 DECLARE("R64_R10", R64_(r10));
263 DECLARE("R64_R11", R64_(r11));
264 DECLARE("R64_R12", R64_(r12));
265 DECLARE("R64_R13", R64_(r13));
266 DECLARE("R64_R14", R64_(r14));
267 DECLARE("R64_R15", R64_(r15));
268 DECLARE("R64_RBP", R64_(rbp));
269 DECLARE("R64_RAX", R64_(rax));
270 DECLARE("R64_RBX", R64_(rbx));
271 DECLARE("R64_RCX", R64_(rcx));
272 DECLARE("R64_RDX", R64_(rdx));
273 DECLARE("R64_RSI", R64_(rsi));
274 DECLARE("R64_RDI", R64_(rdi));
275 DECLARE("R64_CS", R64_(isf.cs));
276 DECLARE("R64_SS", R64_(isf.ss));
277 DECLARE("R64_RSP", R64_(isf.rsp));
278 DECLARE("R64_TRAPNO", R64_(isf.trapno));
279 DECLARE("R64_TRAPFN", R64_(isf.trapfn));
280 DECLARE("R64_ERR", R64_(isf.err));
281 DECLARE("R64_RFLAGS", R64_(isf.rflags));
282 DECLARE("R64_RIP", R64_(isf.rip));
283 DECLARE("R64_CR2", R64_(cr2));
284 DECLARE("ISS64_OFFSET", R64_(isf));
285 DECLARE("ISS64_SIZE", sizeof (x86_saved_state64_t));
286
287 #define ISF64_(x) offsetof(x86_64_intr_stack_frame_t, x)
288 DECLARE("ISF64_TRAPNO", ISF64_(trapno));
289 DECLARE("ISF64_TRAPFN", ISF64_(trapfn));
290 DECLARE("ISF64_ERR", ISF64_(err));
291 DECLARE("ISF64_RIP", ISF64_(rip));
292 DECLARE("ISF64_CS", ISF64_(cs));
293 DECLARE("ISF64_RFLAGS", ISF64_(rflags));
294 DECLARE("ISF64_RSP", ISF64_(rsp));
295 DECLARE("ISF64_SS", ISF64_(ss));
296 DECLARE("ISF64_SIZE", sizeof(x86_64_intr_stack_frame_t));
297
298 DECLARE("NBPG", I386_PGBYTES);
299 DECLARE("PAGE_SIZE", I386_PGBYTES);
300 DECLARE("PAGE_MASK", I386_PGBYTES-1);
301 DECLARE("PAGE_SHIFT", 12);
302 DECLARE("NKPT", NKPT);
303 DECLARE("VM_MIN_ADDRESS", VM_MIN_ADDRESS);
304 DECLARE("VM_MAX_ADDRESS", VM_MAX_ADDRESS);
305 DECLARE("KERNELBASE", VM_MIN_KERNEL_ADDRESS);
306 DECLARE("LINEAR_KERNELBASE", LINEAR_KERNEL_ADDRESS);
307 DECLARE("KERNEL_STACK_SIZE", KERNEL_STACK_SIZE);
308
309 DECLARE("ASM_COMM_PAGE32_BASE_ADDRESS", _COMM_PAGE32_BASE_ADDRESS);
310 DECLARE("ASM_COMM_PAGE32_START_ADDRESS", _COMM_PAGE32_START_ADDRESS);
311 DECLARE("ASM_COMM_PAGE_SCHED_GEN", _COMM_PAGE_SCHED_GEN);
312
313 DECLARE("PDESHIFT", PDESHIFT);
314 DECLARE("PTEMASK", PTEMASK);
315 DECLARE("PTEINDX", PTEINDX);
316 DECLARE("INTEL_PTE_PFN", INTEL_PTE_PFN);
317 DECLARE("INTEL_PTE_VALID", INTEL_PTE_VALID);
318 DECLARE("INTEL_PTE_WRITE", INTEL_PTE_WRITE);
319 DECLARE("INTEL_PTE_PS", INTEL_PTE_PS);
320 DECLARE("INTEL_PTE_USER", INTEL_PTE_USER);
321 DECLARE("INTEL_PTE_INVALID", INTEL_PTE_INVALID);
322 DECLARE("NPGPTD", NPGPTD);
323 DECLARE("KERNEL_PML4_INDEX",KERNEL_PML4_INDEX);
324 DECLARE("IDTSZ", IDTSZ);
325 DECLARE("GDTSZ", GDTSZ);
326 DECLARE("LDTSZ", LDTSZ);
327
328 DECLARE("KERNEL_DS", KERNEL_DS);
329 DECLARE("USER_CS", USER_CS);
330 DECLARE("USER_DS", USER_DS);
331 DECLARE("KERNEL32_CS", KERNEL32_CS);
332 DECLARE("KERNEL64_CS", KERNEL64_CS);
333 DECLARE("USER64_CS", USER64_CS);
334 DECLARE("KERNEL_TSS", KERNEL_TSS);
335 DECLARE("KERNEL_LDT", KERNEL_LDT);
336 DECLARE("SYSENTER_CS", SYSENTER_CS);
337 DECLARE("SYSENTER_TF_CS",SYSENTER_TF_CS);
338 DECLARE("SYSENTER_DS", SYSENTER_DS);
339 DECLARE("SYSCALL_CS", SYSCALL_CS);
340
341 DECLARE("CPU_THIS",
342 offsetof(cpu_data_t, cpu_this));
343 DECLARE("CPU_ACTIVE_THREAD",
344 offsetof(cpu_data_t, cpu_active_thread));
345 DECLARE("CPU_ACTIVE_STACK",
346 offsetof(cpu_data_t, cpu_active_stack));
347 DECLARE("CPU_KERNEL_STACK",
348 offsetof(cpu_data_t, cpu_kernel_stack));
349 DECLARE("CPU_INT_STACK_TOP",
350 offsetof(cpu_data_t, cpu_int_stack_top));
351 #if MACH_RT
352 DECLARE("CPU_PREEMPTION_LEVEL",
353 offsetof(cpu_data_t, cpu_preemption_level));
354 #endif /* MACH_RT */
355 DECLARE("CPU_HIBERNATE",
356 offsetof(cpu_data_t, cpu_hibernate));
357 DECLARE("CPU_INTERRUPT_LEVEL",
358 offsetof(cpu_data_t, cpu_interrupt_level));
359 DECLARE("CPU_NESTED_ISTACK",
360 offsetof(cpu_data_t, cpu_nested_istack));
361 DECLARE("CPU_NUMBER_GS",
362 offsetof(cpu_data_t,cpu_number));
363 DECLARE("CPU_RUNNING",
364 offsetof(cpu_data_t,cpu_running));
365 DECLARE("CPU_PENDING_AST",
366 offsetof(cpu_data_t,cpu_pending_ast));
367 DECLARE("CPU_DESC_TABLEP",
368 offsetof(cpu_data_t,cpu_desc_tablep));
369 DECLARE("CPU_DESC_INDEX",
370 offsetof(cpu_data_t,cpu_desc_index));
371 DECLARE("CDI_GDT",
372 offsetof(cpu_desc_index_t,cdi_gdt));
373 DECLARE("CDI_IDT",
374 offsetof(cpu_desc_index_t,cdi_idt));
375 DECLARE("CPU_PROCESSOR",
376 offsetof(cpu_data_t,cpu_processor));
377 DECLARE("CPU_INT_STATE",
378 offsetof(cpu_data_t, cpu_int_state));
379 DECLARE("CPU_INT_EVENT_TIME",
380 offsetof(cpu_data_t, cpu_int_event_time));
381
382 DECLARE("CPU_TASK_CR3",
383 offsetof(cpu_data_t, cpu_task_cr3));
384 DECLARE("CPU_ACTIVE_CR3",
385 offsetof(cpu_data_t, cpu_active_cr3));
386 DECLARE("CPU_KERNEL_CR3",
387 offsetof(cpu_data_t, cpu_kernel_cr3));
388 DECLARE("CPU_TLB_INVALID",
389 offsetof(cpu_data_t, cpu_tlb_invalid));
390
391 DECLARE("CPU_TASK_MAP",
392 offsetof(cpu_data_t, cpu_task_map));
393 DECLARE("TASK_MAP_32BIT", TASK_MAP_32BIT);
394 DECLARE("TASK_MAP_64BIT", TASK_MAP_64BIT);
395 DECLARE("CPU_UBER_USER_GS_BASE",
396 offsetof(cpu_data_t, cpu_uber.cu_user_gs_base));
397 DECLARE("CPU_UBER_ISF",
398 offsetof(cpu_data_t, cpu_uber.cu_isf));
399 DECLARE("CPU_UBER_TMP",
400 offsetof(cpu_data_t, cpu_uber.cu_tmp));
401
402 DECLARE("CPU_NANOTIME",
403 offsetof(cpu_data_t, cpu_nanotime));
404
405 DECLARE("CPU_DR7",
406 offsetof(cpu_data_t, cpu_dr7));
407
408 DECLARE("hwIntCnt", offsetof(cpu_data_t,cpu_hwIntCnt));
409 DECLARE("CPU_ACTIVE_PCID",
410 offsetof(cpu_data_t, cpu_active_pcid));
411 DECLARE("CPU_PCID_COHERENTP",
412 offsetof(cpu_data_t, cpu_pmap_pcid_coherentp));
413 DECLARE("CPU_PCID_COHERENTP_KERNEL",
414 offsetof(cpu_data_t, cpu_pmap_pcid_coherentp_kernel));
415 DECLARE("CPU_PMAP_PCID_ENABLED",
416 offsetof(cpu_data_t, cpu_pmap_pcid_enabled));
417
418 #ifdef PCID_STATS
419 DECLARE("CPU_PMAP_USER_RETS",
420 offsetof(cpu_data_t, cpu_pmap_user_rets));
421 DECLARE("CPU_PMAP_PCID_PRESERVES",
422 offsetof(cpu_data_t, cpu_pmap_pcid_preserves));
423 DECLARE("CPU_PMAP_PCID_FLUSHES",
424 offsetof(cpu_data_t, cpu_pmap_pcid_flushes));
425 #endif
426 DECLARE("CPU_TLB_INVALID",
427 offsetof(cpu_data_t, cpu_tlb_invalid));
428 DECLARE("CPU_TLB_INVALID_LOCAL",
429 offsetof(cpu_data_t, cpu_tlb_invalid_local));
430 DECLARE("CPU_TLB_INVALID_GLOBAL",
431 offsetof(cpu_data_t, cpu_tlb_invalid_global));
432 DECLARE("enaExpTrace", enaExpTrace);
433 DECLARE("enaUsrFCall", enaUsrFCall);
434 DECLARE("enaUsrPhyMp", enaUsrPhyMp);
435 DECLARE("enaDiagSCs", enaDiagSCs);
436 DECLARE("enaDiagEM", enaDiagEM);
437 DECLARE("enaNotifyEM", enaNotifyEM);
438 DECLARE("dgLock", offsetof(struct diagWork, dgLock));
439 DECLARE("dgFlags", offsetof(struct diagWork, dgFlags));
440 DECLARE("dgMisc1", offsetof(struct diagWork, dgMisc1));
441 DECLARE("dgMisc2", offsetof(struct diagWork, dgMisc2));
442 DECLARE("dgMisc3", offsetof(struct diagWork, dgMisc3));
443 DECLARE("dgMisc4", offsetof(struct diagWork, dgMisc4));
444 DECLARE("dgMisc5", offsetof(struct diagWork, dgMisc5));
445
446 DECLARE("INTEL_PTE_KERNEL", INTEL_PTE_VALID|INTEL_PTE_WRITE);
447 DECLARE("PDESHIFT", PDESHIFT);
448 DECLARE("PDESIZE", PDESIZE);
449 DECLARE("PTESIZE", PTESIZE);
450
451 DECLARE("KERNELBASEPDE",
452 (LINEAR_KERNEL_ADDRESS >> PDESHIFT) *
453 sizeof(pt_entry_t));
454
455 DECLARE("TSS_ESP0", offsetof(struct i386_tss, esp0));
456 DECLARE("TSS_SS0", offsetof(struct i386_tss, ss0));
457 DECLARE("TSS_LDT", offsetof(struct i386_tss, ldt));
458 DECLARE("TSS_PDBR", offsetof(struct i386_tss, cr3));
459 DECLARE("TSS_LINK", offsetof(struct i386_tss, back_link));
460
461 DECLARE("K_TASK_GATE", ACC_P|ACC_PL_K|ACC_TASK_GATE);
462 DECLARE("K_TRAP_GATE", ACC_P|ACC_PL_K|ACC_TRAP_GATE);
463 DECLARE("U_TRAP_GATE", ACC_P|ACC_PL_U|ACC_TRAP_GATE);
464 DECLARE("K_INTR_GATE", ACC_P|ACC_PL_K|ACC_INTR_GATE);
465 DECLARE("U_INTR_GATE", ACC_P|ACC_PL_U|ACC_INTR_GATE);
466 DECLARE("K_TSS", ACC_P|ACC_PL_K|ACC_TSS);
467
468 /*
469 * usimple_lock fields
470 */
471 DECLARE("USL_INTERLOCK", offsetof(usimple_lock_data_t, interlock));
472
473 DECLARE("INTSTACK_SIZE", INTSTACK_SIZE);
474 DECLARE("KADDR", offsetof(struct boot_args, kaddr));
475 DECLARE("KSIZE", offsetof(struct boot_args, ksize));
476 DECLARE("MEMORYMAP", offsetof(struct boot_args, MemoryMap));
477 DECLARE("DEVICETREEP", offsetof(struct boot_args, deviceTreeP));
478
479 DECLARE("RNT_TSC_BASE",
480 offsetof(pal_rtc_nanotime_t, tsc_base));
481 DECLARE("RNT_NS_BASE",
482 offsetof(pal_rtc_nanotime_t, ns_base));
483 DECLARE("RNT_SCALE",
484 offsetof(pal_rtc_nanotime_t, scale));
485 DECLARE("RNT_SHIFT",
486 offsetof(pal_rtc_nanotime_t, shift));
487 DECLARE("RNT_GENERATION",
488 offsetof(pal_rtc_nanotime_t, generation));
489
490 /* values from kern/timer.h */
491 #ifdef __LP64__
492 DECLARE("TIMER_ALL", offsetof(struct timer, all_bits));
493 #else
494 DECLARE("TIMER_LOW", offsetof(struct timer, low_bits));
495 DECLARE("TIMER_HIGH", offsetof(struct timer, high_bits));
496 DECLARE("TIMER_HIGHCHK", offsetof(struct timer, high_bits_check));
497 #endif
498 DECLARE("TIMER_TSTAMP",
499 offsetof(struct timer, tstamp));
500
501 DECLARE("THREAD_TIMER",
502 offsetof(struct processor, processor_data.thread_timer));
503 DECLARE("KERNEL_TIMER",
504 offsetof(struct processor, processor_data.kernel_timer));
505 DECLARE("SYSTEM_TIMER",
506 offsetof(struct thread, system_timer));
507 DECLARE("USER_TIMER",
508 offsetof(struct thread, user_timer));
509 DECLARE("SYSTEM_STATE",
510 offsetof(struct processor, processor_data.system_state));
511 DECLARE("USER_STATE",
512 offsetof(struct processor, processor_data.user_state));
513 DECLARE("IDLE_STATE",
514 offsetof(struct processor, processor_data.idle_state));
515 DECLARE("CURRENT_STATE",
516 offsetof(struct processor, processor_data.current_state));
517
518 DECLARE("OnProc", OnProc);
519
520
521 #if CONFIG_DTRACE
522 DECLARE("LS_LCK_MTX_LOCK_ACQUIRE", LS_LCK_MTX_LOCK_ACQUIRE);
523 DECLARE("LS_LCK_MTX_TRY_SPIN_LOCK_ACQUIRE", LS_LCK_MTX_TRY_SPIN_LOCK_ACQUIRE);
524 DECLARE("LS_LCK_MTX_UNLOCK_RELEASE", LS_LCK_MTX_UNLOCK_RELEASE);
525 DECLARE("LS_LCK_MTX_TRY_LOCK_ACQUIRE", LS_LCK_MTX_TRY_LOCK_ACQUIRE);
526 DECLARE("LS_LCK_RW_LOCK_SHARED_ACQUIRE", LS_LCK_RW_LOCK_SHARED_ACQUIRE);
527 DECLARE("LS_LCK_RW_DONE_RELEASE", LS_LCK_RW_DONE_RELEASE);
528 DECLARE("LS_LCK_MTX_EXT_LOCK_ACQUIRE", LS_LCK_MTX_EXT_LOCK_ACQUIRE);
529 DECLARE("LS_LCK_MTX_TRY_EXT_LOCK_ACQUIRE", LS_LCK_MTX_TRY_EXT_LOCK_ACQUIRE);
530 DECLARE("LS_LCK_MTX_EXT_UNLOCK_RELEASE", LS_LCK_MTX_EXT_UNLOCK_RELEASE);
531 DECLARE("LS_LCK_RW_LOCK_EXCL_ACQUIRE", LS_LCK_RW_LOCK_EXCL_ACQUIRE);
532 DECLARE("LS_LCK_RW_LOCK_SHARED_TO_EXCL_UPGRADE", LS_LCK_RW_LOCK_SHARED_TO_EXCL_UPGRADE);
533 DECLARE("LS_LCK_RW_TRY_LOCK_EXCL_ACQUIRE", LS_LCK_RW_TRY_LOCK_EXCL_ACQUIRE);
534 DECLARE("LS_LCK_RW_TRY_LOCK_SHARED_ACQUIRE", LS_LCK_RW_TRY_LOCK_SHARED_ACQUIRE);
535 DECLARE("LS_LCK_MTX_LOCK_SPIN_ACQUIRE", LS_LCK_MTX_LOCK_SPIN_ACQUIRE);
536 #endif
537
538 return (0);
539 }