]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/hv_support.c
xnu-6153.121.1.tar.gz
[apple/xnu.git] / osfmk / kern / hv_support.c
1 /*
2 * Copyright (c) 2013 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <kern/ast.h>
30 #include <kern/locks.h>
31 #include <kern/task.h>
32 #include <kern/thread.h>
33 #include <libkern/OSAtomic.h>
34 #include <vm/vm_pageout.h>
35
36 #if defined(__x86_64__) && CONFIG_VMX
37 #include <i386/vmx/vmx_cpu.h>
38 #endif
39
40 #include <kern/hv_support.h>
41
42 int hv_support_available = 0;
43
44 /* callbacks for tasks/threads with associated hv objects */
45 hv_callbacks_t hv_callbacks = {
46 .dispatch = NULL, /* thread is being dispatched for execution */
47 .preempt = NULL, /* thread is being preempted */
48 .suspend = NULL, /* system is being suspended */
49 .thread_destroy = NULL, /* thread is being destroyed */
50 .task_destroy = NULL, /* task is being destroyed */
51 .volatile_state = NULL, /* thread state is becoming volatile */
52 };
53
54 /* trap tables for hv_*_trap syscalls */
55 static hv_trap_table_t hv_trap_table[] = {
56 [HV_TASK_TRAP] = {
57 .traps = NULL,
58 .trap_count = 0
59 },
60 [HV_THREAD_TRAP] = {
61 .traps = NULL,
62 .trap_count = 0
63 }
64 };
65
66 static int hv_callbacks_enabled = 0;
67 static lck_grp_t *hv_support_lck_grp = NULL;
68 static lck_mtx_t *hv_support_lck_mtx = NULL;
69
70 /* hv_support boot initialization */
71 void
72 hv_support_init(void)
73 {
74 #if defined(__x86_64__) && CONFIG_VMX
75 hv_support_available = vmx_hv_support();
76 #endif
77
78 hv_support_lck_grp = lck_grp_alloc_init("hv_support", LCK_GRP_ATTR_NULL);
79 assert(hv_support_lck_grp);
80
81 hv_support_lck_mtx = lck_mtx_alloc_init(hv_support_lck_grp, LCK_ATTR_NULL);
82 assert(hv_support_lck_mtx);
83 }
84
85 /* returns true if hv_support is available on this machine */
86 int
87 hv_get_support(void)
88 {
89 return hv_support_available;
90 }
91
92 /* associate an hv object with the current task */
93 void
94 hv_set_task_target(void *target)
95 {
96 current_task()->hv_task_target = target;
97 }
98
99 /* associate an hv object with the current thread */
100 void
101 hv_set_thread_target(void *target)
102 {
103 current_thread()->hv_thread_target = target;
104 }
105
106 /* get hv object associated with the current task */
107 void*
108 hv_get_task_target(void)
109 {
110 return current_task()->hv_task_target;
111 }
112
113 /* get hv object associated with the current thread */
114 void*
115 hv_get_thread_target(void)
116 {
117 return current_thread()->hv_thread_target;
118 }
119
120 /* test if a given thread state may be volatile between dispatch
121 * and preemption */
122 int
123 hv_get_volatile_state(hv_volatile_state_t state)
124 {
125 int is_volatile = 0;
126
127 #if (defined(__x86_64__))
128 if (state == HV_DEBUG_STATE) {
129 is_volatile = (current_thread()->machine.ids != NULL);
130 }
131 #endif
132
133 return is_volatile;
134 }
135
136 /* register a list of trap handlers for the hv_*_trap syscalls */
137 kern_return_t
138 hv_set_traps(hv_trap_type_t trap_type, const hv_trap_t *traps,
139 unsigned trap_count)
140 {
141 hv_trap_table_t *trap_table = &hv_trap_table[trap_type];
142 kern_return_t kr = KERN_FAILURE;
143
144 lck_mtx_lock(hv_support_lck_mtx);
145 if (trap_table->trap_count == 0) {
146 trap_table->traps = traps;
147 OSMemoryBarrier();
148 trap_table->trap_count = trap_count;
149 kr = KERN_SUCCESS;
150 }
151 lck_mtx_unlock(hv_support_lck_mtx);
152
153 return kr;
154 }
155
156 /* release hv_*_trap traps */
157 void
158 hv_release_traps(hv_trap_type_t trap_type)
159 {
160 hv_trap_table_t *trap_table = &hv_trap_table[trap_type];
161
162 lck_mtx_lock(hv_support_lck_mtx);
163 trap_table->trap_count = 0;
164 OSMemoryBarrier();
165 trap_table->traps = NULL;
166 lck_mtx_unlock(hv_support_lck_mtx);
167 }
168
169 /* register callbacks for certain task/thread events for tasks/threads with
170 * associated hv objects */
171 kern_return_t
172 hv_set_callbacks(hv_callbacks_t callbacks)
173 {
174 kern_return_t kr = KERN_FAILURE;
175
176 lck_mtx_lock(hv_support_lck_mtx);
177 if (hv_callbacks_enabled == 0) {
178 hv_callbacks = callbacks;
179 hv_callbacks_enabled = 1;
180 kr = KERN_SUCCESS;
181 }
182 lck_mtx_unlock(hv_support_lck_mtx);
183
184 return kr;
185 }
186
187 /* release callbacks for task/thread events */
188 void
189 hv_release_callbacks(void)
190 {
191 lck_mtx_lock(hv_support_lck_mtx);
192 hv_callbacks = (hv_callbacks_t) {
193 .dispatch = NULL,
194 .preempt = NULL,
195 .suspend = NULL,
196 .thread_destroy = NULL,
197 .task_destroy = NULL,
198 .volatile_state = NULL,
199 .memory_pressure = NULL
200 };
201
202 hv_callbacks_enabled = 0;
203 lck_mtx_unlock(hv_support_lck_mtx);
204 }
205
206 /* system suspend notification */
207 void
208 hv_suspend(void)
209 {
210 if (hv_callbacks_enabled) {
211 hv_callbacks.suspend();
212 }
213 }
214
215 /* dispatch hv_task_trap/hv_thread_trap syscalls to trap handlers,
216 * fail for invalid index or absence of trap handlers, trap handler is
217 * responsible for validating targets */
218 #define HV_TRAP_DISPATCH(type, index, target, argument) \
219 ((__probable(index < hv_trap_table[type].trap_count)) ? \
220 hv_trap_table[type].traps[index](target, argument) \
221 : KERN_INVALID_ARGUMENT)
222
223 kern_return_t
224 hv_task_trap(uint64_t index, uint64_t arg)
225 {
226 return HV_TRAP_DISPATCH(HV_TASK_TRAP, index, hv_get_task_target(), arg);
227 }
228
229 kern_return_t
230 hv_thread_trap(uint64_t index, uint64_t arg)
231 {
232 return HV_TRAP_DISPATCH(HV_THREAD_TRAP, index, hv_get_thread_target(), arg);
233 }
234
235 boolean_t
236 hv_ast_pending(void)
237 {
238 return current_cpu_datap()->cpu_pending_ast & (AST_APC | AST_BSD);
239 }