]> git.saurik.com Git - apple/xnu.git/blame - osfmk/i386/mp.h
xnu-1228.15.4.tar.gz
[apple/xnu.git] / osfmk / i386 / mp.h
CommitLineData
1c79356b 1/*
593a1d5f 2 * Copyright (c) 2000-2008 Apple Inc. All rights reserved.
1c79356b 3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
1c79356b 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
8f6c56a5 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
8f6c56a5 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
1c79356b
A
27 */
28/*
29 * @OSF_COPYRIGHT@
30 */
31/*
32 * Mach Operating System
33 * Copyright (c) 1991,1990 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56
57/*
58 */
91447636 59#ifdef KERNEL_PRIVATE
1c79356b 60
593a1d5f
A
61#ifndef _I386_MP_H_
62#define _I386_MP_H_
1c79356b 63
55e303ae
A
64#ifndef DEBUG
65#include <debug.h>
66#endif
91447636 67//#define MP_DEBUG 1
55e303ae 68
1c79356b 69#include <i386/apic.h>
55e303ae
A
70#include <i386/mp_events.h>
71
593a1d5f 72#define MAX_CPUS 32 /* (8*sizeof(long)) */
55e303ae
A
73
74#ifndef ASSEMBLER
593a1d5f 75#include <stdint.h>
91447636
A
76#include <sys/cdefs.h>
77#include <mach/boolean.h>
78#include <mach/kern_return.h>
0c530ab8 79#include <mach/i386/thread_status.h>
593a1d5f 80#include <kern/lock.h>
91447636
A
81
82__BEGIN_DECLS
83
84extern kern_return_t intel_startCPU(int slot_num);
593a1d5f 85extern kern_return_t intel_startCPU_fast(int slot_num);
91447636 86extern void i386_init_slave(void);
593a1d5f 87extern void i386_init_slave_fast(void);
91447636
A
88extern void smp_init(void);
89
90extern void cpu_interrupt(int cpu);
91447636
A
91__END_DECLS
92
91447636
A
93extern unsigned int real_ncpus; /* real number of cpus */
94extern unsigned int max_ncpus; /* max number of cpus */
1c79356b 95decl_simple_lock_data(extern,kdb_lock) /* kdb lock */
91447636
A
96
97__BEGIN_DECLS
98
99extern void console_init(void);
100extern void *console_cpu_alloc(boolean_t boot_cpu);
101extern void console_cpu_free(void *console_buf);
1c79356b
A
102
103extern int kdb_cpu; /* current cpu running kdb */
104extern int kdb_debug;
1c79356b 105extern int kdb_active[];
55e303ae
A
106
107extern volatile boolean_t mp_kdp_trap;
935ed37a
A
108extern volatile boolean_t force_immediate_debugger_NMI;
109extern volatile boolean_t pmap_tlb_flush_timeout;
2d21ac55 110
91447636
A
111extern void mp_kdp_enter(void);
112extern void mp_kdp_exit(void);
55e303ae 113
0c530ab8
A
114#if MACH_KDB
115extern void mp_kdb_exit(void);
116#endif
117
55e303ae
A
118/*
119 * All cpu rendezvous:
120 */
0c530ab8
A
121extern void mp_rendezvous(
122 void (*setup_func)(void *),
123 void (*action_func)(void *),
124 void (*teardown_func)(void *),
125 void *arg);
126extern void mp_rendezvous_no_intrs(
127 void (*action_func)(void *),
128 void *arg);
129extern void mp_rendezvous_break_lock(void);
55e303ae 130
2d21ac55
A
131/*
132 * All cpu broadcast.
133 * Called from thread context, this blocks until all active cpus have
134 * run action_func:
135 */
136extern void mp_broadcast(
137 void (*action_func)(void *),
138 void *arg);
139
140typedef uint32_t cpu_t;
141typedef uint32_t cpumask_t;
142static inline cpumask_t
143cpu_to_cpumask(cpu_t cpu)
144{
145 return (cpu < 32) ? (1 << cpu) : 0;
146}
147#define CPUMASK_ALL 0xffffffff
148#define CPUMASK_SELF cpu_to_cpumask(cpu_number())
149#define CPUMASK_OTHERS (CPUMASK_ALL & ~CPUMASK_SELF)
150
151/*
152 * Invoke a function (possibly NULL) on a set of cpus specified by a mask.
153 * The mask may include the local cpu.
154 * If the mode is:
155 * - ASYNC: other cpus make their calls in parallel.
156 * - SYNC: the calls are performed serially in logical cpu order.
157 * This call returns when the function has been run on all specified cpus.
158 * The return value is the number of cpus on which the call was made.
159 * The action function is called with interrupts disabled.
160 */
161extern cpu_t mp_cpus_call(
162 cpumask_t cpus,
163 mp_sync_t mode,
164 void (*action_func)(void *),
165 void *arg);
166
c910b4d9
A
167/*
168 * Power-management-specific SPI to:
169 * - register a callout function, and
170 * - request the callout (if registered) on a given cpu.
171 */
172extern void PM_interrupt_register(void (*fn)(void));
173extern void cpu_PM_interrupt(int cpu);
174
175
91447636
A
176__END_DECLS
177
55e303ae
A
178#if MP_DEBUG
179typedef struct {
180 uint64_t time;
181 int cpu;
182 mp_event_t event;
183} cpu_signal_event_t;
184
185#define LOG_NENTRIES 100
186typedef struct {
187 uint64_t count[MP_LAST];
188 int next_entry;
189 cpu_signal_event_t entry[LOG_NENTRIES];
190} cpu_signal_event_log_t;
191
91447636
A
192extern cpu_signal_event_log_t *cpu_signal[];
193extern cpu_signal_event_log_t *cpu_handle[];
55e303ae
A
194
195#define DBGLOG(log,_cpu,_event) { \
91447636
A
196 boolean_t spl = ml_set_interrupts_enabled(FALSE); \
197 cpu_signal_event_log_t *logp = log[cpu_number()]; \
55e303ae
A
198 int next = logp->next_entry; \
199 cpu_signal_event_t *eventp = &logp->entry[next]; \
55e303ae
A
200 \
201 logp->count[_event]++; \
202 \
203 eventp->time = rdtsc64(); \
204 eventp->cpu = _cpu; \
205 eventp->event = _event; \
206 if (next == (LOG_NENTRIES - 1)) \
207 logp->next_entry = 0; \
208 else \
209 logp->next_entry++; \
210 \
211 (void) ml_set_interrupts_enabled(spl); \
212}
91447636
A
213
214#define DBGLOG_CPU_INIT(cpu) { \
215 cpu_signal_event_log_t **sig_logpp = &cpu_signal[cpu]; \
216 cpu_signal_event_log_t **hdl_logpp = &cpu_handle[cpu]; \
217 \
218 if (*sig_logpp == NULL && \
219 kmem_alloc(kernel_map, \
220 (vm_offset_t *) sig_logpp, \
221 sizeof(cpu_signal_event_log_t)) != KERN_SUCCESS)\
222 panic("DBGLOG_CPU_INIT cpu_signal allocation failed\n");\
223 bzero(*sig_logpp, sizeof(cpu_signal_event_log_t)); \
224 if (*hdl_logpp == NULL && \
225 kmem_alloc(kernel_map, \
226 (vm_offset_t *) hdl_logpp, \
227 sizeof(cpu_signal_event_log_t)) != KERN_SUCCESS)\
228 panic("DBGLOG_CPU_INIT cpu_handle allocation failed\n");\
229 bzero(*sig_logpp, sizeof(cpu_signal_event_log_t)); \
230}
55e303ae
A
231#else /* MP_DEBUG */
232#define DBGLOG(log,_cpu,_event)
91447636 233#define DBGLOG_CPU_INIT(cpu)
55e303ae
A
234#endif /* MP_DEBUG */
235
1c79356b
A
236#endif /* ASSEMBLER */
237
238#define i_bit(bit, word) ((long)(*(word)) & ((long)1 << (bit)))
239
240
241/*
242 * Device driver synchronization.
243 *
244 * at386_io_lock(op) and at386_io_unlock() are called
245 * by device drivers when accessing H/W. The underlying
246 * Processing is machine dependant. But the op argument
247 * to the at386_io_lock is generic
248 */
249
250#define MP_DEV_OP_MAX 4
251#define MP_DEV_WAIT MP_DEV_OP_MAX /* Wait for the lock */
252
253/*
254 * If the caller specifies an op value different than MP_DEV_WAIT, the
255 * at386_io_lock function must return true if lock was successful else
256 * false
257 */
258
259#define MP_DEV_OP_START 0 /* If lock busy, register a pending start op */
260#define MP_DEV_OP_INTR 1 /* If lock busy, register a pending intr */
261#define MP_DEV_OP_TIMEO 2 /* If lock busy, register a pending timeout */
262#define MP_DEV_OP_CALLB 3 /* If lock busy, register a pending callback */
263
1c79356b 264#if MACH_RT
91447636
A
265#define _DISABLE_PREEMPTION \
266 incl %gs:CPU_PREEMPTION_LEVEL
1c79356b 267
91447636
A
268#define _ENABLE_PREEMPTION \
269 decl %gs:CPU_PREEMPTION_LEVEL ; \
1c79356b
A
270 jne 9f ; \
271 pushl %eax ; \
272 pushl %ecx ; \
273 pushl %edx ; \
274 call EXT(kernel_preempt_check) ; \
275 popl %edx ; \
276 popl %ecx ; \
277 popl %eax ; \
2789:
279
91447636
A
280#define _ENABLE_PREEMPTION_NO_CHECK \
281 decl %gs:CPU_PREEMPTION_LEVEL
1c79356b
A
282
283#if MACH_ASSERT
91447636 284#define DISABLE_PREEMPTION \
1c79356b
A
285 pushl %eax; \
286 pushl %ecx; \
287 pushl %edx; \
288 call EXT(_disable_preemption); \
289 popl %edx; \
290 popl %ecx; \
291 popl %eax
91447636 292#define ENABLE_PREEMPTION \
1c79356b
A
293 pushl %eax; \
294 pushl %ecx; \
295 pushl %edx; \
296 call EXT(_enable_preemption); \
297 popl %edx; \
298 popl %ecx; \
299 popl %eax
91447636 300#define ENABLE_PREEMPTION_NO_CHECK \
1c79356b
A
301 pushl %eax; \
302 pushl %ecx; \
303 pushl %edx; \
304 call EXT(_enable_preemption_no_check); \
305 popl %edx; \
306 popl %ecx; \
307 popl %eax
91447636 308#define MP_DISABLE_PREEMPTION \
1c79356b
A
309 pushl %eax; \
310 pushl %ecx; \
311 pushl %edx; \
312 call EXT(_mp_disable_preemption); \
313 popl %edx; \
314 popl %ecx; \
315 popl %eax
91447636 316#define MP_ENABLE_PREEMPTION \
1c79356b
A
317 pushl %eax; \
318 pushl %ecx; \
319 pushl %edx; \
320 call EXT(_mp_enable_preemption); \
321 popl %edx; \
322 popl %ecx; \
323 popl %eax
91447636 324#define MP_ENABLE_PREEMPTION_NO_CHECK \
1c79356b
A
325 pushl %eax; \
326 pushl %ecx; \
327 pushl %edx; \
328 call EXT(_mp_enable_preemption_no_check); \
329 popl %edx; \
330 popl %ecx; \
331 popl %eax
1c79356b 332#else /* MACH_ASSERT */
91447636
A
333#define DISABLE_PREEMPTION _DISABLE_PREEMPTION
334#define ENABLE_PREEMPTION _ENABLE_PREEMPTION
335#define ENABLE_PREEMPTION_NO_CHECK _ENABLE_PREEMPTION_NO_CHECK
336#define MP_DISABLE_PREEMPTION _DISABLE_PREEMPTION
337#define MP_ENABLE_PREEMPTION _ENABLE_PREEMPTION
338#define MP_ENABLE_PREEMPTION_NO_CHECK _ENABLE_PREEMPTION_NO_CHECK
1c79356b
A
339#endif /* MACH_ASSERT */
340
341#else /* MACH_RT */
91447636
A
342#define DISABLE_PREEMPTION
343#define ENABLE_PREEMPTION
344#define ENABLE_PREEMPTION_NO_CHECK
345#define MP_DISABLE_PREEMPTION
346#define MP_ENABLE_PREEMPTION
347#define MP_ENABLE_PREEMPTION_NO_CHECK
1c79356b
A
348#endif /* MACH_RT */
349
593a1d5f 350#endif /* _I386_MP_H_ */
91447636
A
351
352#endif /* KERNEL_PRIVATE */