]> git.saurik.com Git - apple/xnu.git/blame - osfmk/kern/ast.h
xnu-4903.231.4.tar.gz
[apple/xnu.git] / osfmk / kern / ast.h
CommitLineData
1c79356b 1/*
39236c6e 2 * Copyright (c) 2000-2012 Apple Computer, Inc. All rights reserved.
1c79356b 3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
1c79356b 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
8f6c56a5 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
8f6c56a5 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
1c79356b
A
27 */
28/*
29 * @OSF_COPYRIGHT@
30 */
31/*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56/*
57 */
58
59/*
60 * kern/ast.h: Definitions for Asynchronous System Traps.
61 */
62
63#ifndef _KERN_AST_H_
64#define _KERN_AST_H_
65
1c79356b
A
66
67#include <kern/assert.h>
1c79356b 68#include <kern/macro_help.h>
1c79356b 69#include <kern/spl.h>
1c79356b
A
70
71/*
5ba3f43e
A
72 * A processor detects an AST when it is about to return from an
73 * interrupt context, and calls ast_taken_kernel or ast_taken_user
74 * depending on whether it was returning from userspace or kernelspace.
9bccf70c
A
75 *
76 * Machine-dependent code is responsible for maintaining
5ba3f43e 77 * a set of reasons for an AST.
1c79356b 78 */
5ba3f43e 79typedef uint32_t ast_t;
1c79356b 80
fe8ab488
A
81/*
82 * When returning from interrupt/trap context to kernel mode,
5ba3f43e
A
83 * if AST_URGENT is set, then ast_taken_kernel is called, for
84 * instance to effect preemption of a kernel thread by a realtime
85 * thread.
86 *
fe8ab488
A
87 * This is also done when re-enabling preemption or re-enabling
88 * interrupts, since an AST may have been set while preemption
89 * was disabled, and it should take effect as soon as possible.
90 *
91 * When returning from interrupt/trap/syscall context to user
5ba3f43e
A
92 * mode, any and all ASTs that are pending should be handled by
93 * calling ast_taken_user.
fe8ab488
A
94 *
95 * If a thread context switches, only ASTs not in AST_PER_THREAD
96 * remain active. The per-thread ASTs are stored in the thread_t
97 * and re-enabled when the thread context switches back.
98 *
99 * Typically the preemption ASTs are set as a result of threads
100 * becoming runnable, threads changing priority, or quantum
101 * expiration. If a thread becomes runnable and is chosen
102 * to run on another processor, cause_ast_check() may be called
103 * to IPI that processor and request csw_check() be run there.
104 */
105
1c79356b
A
106/*
107 * Bits for reasons
5ba3f43e
A
108 * TODO: Split the context switch and return-to-user AST namespaces
109 * NOTE: Some of these are exported as the 'reason' code in scheduler tracepoints
1c79356b 110 */
55e303ae
A
111#define AST_PREEMPT 0x01
112#define AST_QUANTUM 0x02
113#define AST_URGENT 0x04
114#define AST_HANDOFF 0x08
115#define AST_YIELD 0x10
0b4e3aa0 116#define AST_APC 0x20 /* migration APC hook */
316670eb 117#define AST_LEDGER 0x40
55e303ae 118#define AST_BSD 0x80
316670eb
A
119#define AST_KPERF 0x100 /* kernel profiling */
120#define AST_MACF 0x200 /* MACF user ret pending */
a39ff7e2 121/* 0x400, 0x800 unused */
39236c6e 122#define AST_GUARD 0x1000
fe8ab488
A
123#define AST_TELEMETRY_USER 0x2000 /* telemetry sample requested on interrupt from userspace */
124#define AST_TELEMETRY_KERNEL 0x4000 /* telemetry sample requested on interrupt from kernel */
d9a64523 125#define AST_TELEMETRY_PMI 0x8000 /* telemetry sample requested on PMI */
fe8ab488 126#define AST_SFI 0x10000 /* Evaluate if SFI wait is needed before return to userspace */
39037602
A
127#define AST_DTRACE 0x20000
128#define AST_TELEMETRY_IO 0x40000 /* telemetry sample requested for I/O */
5ba3f43e
A
129#define AST_KEVENT 0x80000
130#define AST_REBALANCE 0x100000 /* thread context switched due to rebalancing */
d9a64523 131#define AST_UNQUIESCE 0x200000 /* catch unquiesced processor before returning to userspace */
1c79356b 132
0b4e3aa0 133#define AST_NONE 0x00
55e303ae 134#define AST_ALL (~AST_NONE)
1c79356b 135
55e303ae
A
136#define AST_SCHEDULING (AST_PREEMPTION | AST_YIELD | AST_HANDOFF)
137#define AST_PREEMPTION (AST_PREEMPT | AST_QUANTUM | AST_URGENT)
1c79356b 138
d9a64523
A
139#define AST_TELEMETRY_ALL (AST_TELEMETRY_USER | AST_TELEMETRY_KERNEL | \
140 AST_TELEMETRY_PMI | AST_TELEMETRY_IO)
39236c6e 141
3e170ce0 142/* Per-thread ASTs follow the thread at context-switch time. */
5ba3f43e 143#define AST_PER_THREAD (AST_APC | AST_BSD | AST_MACF | AST_LEDGER | AST_GUARD | AST_TELEMETRY_ALL | AST_KEVENT)
1c79356b 144
5ba3f43e
A
145/* Handle AST_URGENT detected while in the kernel */
146extern void ast_taken_kernel(void);
1c79356b 147
5ba3f43e
A
148/* Handle an AST flag set while returning to user mode (may continue via thread_exception_return) */
149extern void ast_taken_user(void);
1c79356b
A
150
151/* Check for pending ASTs */
3e170ce0 152extern void ast_check(processor_t processor);
1c79356b 153
91447636 154/* Pending ast mask for the current processor */
3e170ce0 155extern ast_t *ast_pending(void);
91447636 156
3e170ce0
A
157/* Set AST flags on current processor */
158extern void ast_on(ast_t reasons);
1c79356b 159
3e170ce0
A
160/* Clear AST flags on current processor */
161extern void ast_off(ast_t reasons);
1c79356b 162
5ba3f43e
A
163/* Consume specified AST flags from current processor */
164extern ast_t ast_consume(ast_t reasons);
165
166/* Read specified AST flags from current processor */
167extern ast_t ast_peek(ast_t reasons);
168
3e170ce0
A
169/* Re-set current processor's per-thread AST flags to those set on thread */
170extern void ast_context(thread_t thread);
171
5ba3f43e
A
172/* Propagate ASTs set on a thread to the current processor */
173extern void ast_propagate(thread_t thread);
1c79356b 174
91447636 175/*
3e170ce0
A
176 * Set an AST on a thread with thread_ast_set.
177 *
178 * You can then propagate it to the current processor with ast_propagate(),
179 * or tell another processor to act on it with cause_ast_check().
180 *
181 * See act_set_ast() for an example.
91447636 182 */
3e170ce0
A
183#define thread_ast_set(act, reason) (hw_atomic_or_noret(&(act)->ast, (reason)))
184#define thread_ast_clear(act, reason) (hw_atomic_and_noret(&(act)->ast, ~(reason)))
1c79356b 185
91447636
A
186#ifdef MACH_BSD
187
91447636
A
188extern void act_set_astbsd(thread_t);
189extern void bsd_ast(thread_t);
190
191#endif /* MACH_BSD */
1c79356b 192
39037602
A
193#ifdef CONFIG_DTRACE
194extern void ast_dtrace_on(void);
5ba3f43e 195extern void dtrace_ast(void);
39037602
A
196#endif /* CONFIG_DTRACE */
197
5ba3f43e
A
198extern void kevent_ast(thread_t thread, uint16_t bits);
199extern void act_set_astkevent(thread_t thread, uint16_t bits);
200
1c79356b 201#endif /* _KERN_AST_H_ */