]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/hv_support_kext.c
ca9054202bfd90b91cdb21480e5507d77f4fcac9
[apple/xnu.git] / osfmk / kern / hv_support_kext.c
1 /*
2 * Copyright (c) 2013 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <kern/ast.h>
30 #include <kern/locks.h>
31 #include <kern/task.h>
32 #include <kern/thread.h>
33 #include <libkern/OSAtomic.h>
34 #include <vm/vm_pageout.h>
35 #include <mach/sdt.h>
36
37 #if defined(__x86_64__) && CONFIG_VMX
38 #include <i386/vmx/vmx_cpu.h>
39 #endif
40
41 #include <kern/hv_support.h>
42
43 int hv_support_available = 0;
44
45 int hv_disable = 0;
46
47 /* callbacks for tasks/threads with associated hv objects */
48 hv_callbacks_t hv_callbacks = {
49 .dispatch = NULL, /* thread is being dispatched for execution */
50 .preempt = NULL, /* thread is being preempted */
51 .suspend = NULL, /* system is being suspended */
52 .thread_destroy = NULL, /* thread is being destroyed */
53 .task_destroy = NULL, /* task is being destroyed */
54 .volatile_state = NULL, /* thread state is becoming volatile */
55 };
56
57 /* trap tables for hv_*_trap syscalls */
58 static hv_trap_table_t hv_trap_table[] = {
59 [HV_TASK_TRAP] = {
60 .traps = NULL,
61 .trap_count = 0
62 },
63 [HV_THREAD_TRAP] = {
64 .traps = NULL,
65 .trap_count = 0
66 }
67 };
68
69 static int hv_callbacks_enabled = 0;
70 static LCK_GRP_DECLARE(hv_support_lck_grp, "hv_support");
71 static LCK_MTX_DECLARE(hv_support_lck_mtx, &hv_support_lck_grp);
72
73 /* hv_support boot initialization */
74 void
75 hv_support_init(void)
76 {
77 #if defined(__x86_64__) && CONFIG_VMX
78 hv_support_available = vmx_hv_support();
79 #endif
80 }
81
82 /* returns true if hv_support is available on this machine */
83 int
84 hv_get_support(void)
85 {
86 return hv_support_available;
87 }
88
89 /* associate an hv object with the current task */
90 void
91 hv_set_task_target(void *target)
92 {
93 current_task()->hv_task_target = target;
94 }
95
96 /* associate an hv object with the current thread */
97 void
98 hv_set_thread_target(void *target)
99 {
100 current_thread()->hv_thread_target = target;
101 }
102
103 /* get hv object associated with the current task */
104 void*
105 hv_get_task_target(void)
106 {
107 return current_task()->hv_task_target;
108 }
109
110 /* get hv object associated with the current thread */
111 void*
112 hv_get_thread_target(void)
113 {
114 return current_thread()->hv_thread_target;
115 }
116
117 /* test if a given thread state may be volatile between dispatch
118 * and preemption */
119 int
120 hv_get_volatile_state(hv_volatile_state_t state)
121 {
122 int is_volatile = 0;
123
124 #if (defined(__x86_64__))
125 if (state == HV_DEBUG_STATE) {
126 is_volatile = (current_thread()->machine.ids != NULL);
127 }
128 #endif
129
130 return is_volatile;
131 }
132
133 /* register a list of trap handlers for the hv_*_trap syscalls */
134 kern_return_t
135 hv_set_traps(hv_trap_type_t trap_type, const hv_trap_t *traps,
136 unsigned trap_count)
137 {
138 hv_trap_table_t *trap_table = &hv_trap_table[trap_type];
139 kern_return_t kr = KERN_FAILURE;
140
141 lck_mtx_lock(&hv_support_lck_mtx);
142 if (trap_table->trap_count == 0) {
143 trap_table->traps = traps;
144 OSMemoryBarrier();
145 trap_table->trap_count = trap_count;
146 kr = KERN_SUCCESS;
147 }
148 lck_mtx_unlock(&hv_support_lck_mtx);
149
150 return kr;
151 }
152
153 /* release hv_*_trap traps */
154 void
155 hv_release_traps(hv_trap_type_t trap_type)
156 {
157 hv_trap_table_t *trap_table = &hv_trap_table[trap_type];
158
159 lck_mtx_lock(&hv_support_lck_mtx);
160 trap_table->trap_count = 0;
161 OSMemoryBarrier();
162 trap_table->traps = NULL;
163 lck_mtx_unlock(&hv_support_lck_mtx);
164 }
165
166 /* register callbacks for certain task/thread events for tasks/threads with
167 * associated hv objects */
168 kern_return_t
169 hv_set_callbacks(hv_callbacks_t callbacks)
170 {
171 kern_return_t kr = KERN_FAILURE;
172
173 lck_mtx_lock(&hv_support_lck_mtx);
174 if (hv_callbacks_enabled == 0) {
175 hv_callbacks = callbacks;
176 hv_callbacks_enabled = 1;
177 kr = KERN_SUCCESS;
178 }
179 lck_mtx_unlock(&hv_support_lck_mtx);
180
181 return kr;
182 }
183
184 /* release callbacks for task/thread events */
185 void
186 hv_release_callbacks(void)
187 {
188 lck_mtx_lock(&hv_support_lck_mtx);
189 hv_callbacks = (hv_callbacks_t) {
190 .dispatch = NULL,
191 .preempt = NULL,
192 .suspend = NULL,
193 .thread_destroy = NULL,
194 .task_destroy = NULL,
195 .volatile_state = NULL
196 };
197
198 hv_callbacks_enabled = 0;
199 lck_mtx_unlock(&hv_support_lck_mtx);
200 }
201
202 /* system suspend notification */
203 void
204 hv_suspend(void)
205 {
206 if (hv_callbacks_enabled) {
207 hv_callbacks.suspend();
208 }
209 }
210
211 /* dispatch hv_task_trap/hv_thread_trap syscalls to trap handlers,
212 * fail for invalid index or absence of trap handlers, trap handler is
213 * responsible for validating targets */
214 #define HV_TRAP_DISPATCH(type, index, target, argument) \
215 ((__probable(index < hv_trap_table[type].trap_count)) ? \
216 hv_trap_table[type].traps[index](target, argument) \
217 : KERN_INVALID_ARGUMENT)
218
219 kern_return_t
220 hv_task_trap(uint64_t index, uint64_t arg)
221 {
222 return HV_TRAP_DISPATCH(HV_TASK_TRAP, index, hv_get_task_target(), arg);
223 }
224
225 kern_return_t
226 hv_thread_trap(uint64_t index, uint64_t arg)
227 {
228 return HV_TRAP_DISPATCH(HV_THREAD_TRAP, index, hv_get_thread_target(), arg);
229 }
230
231 boolean_t
232 hv_ast_pending(void)
233 {
234 return current_cpu_datap()->cpu_pending_ast != 0;
235 }
236
237 void __attribute__((__noreturn__))
238 hv_port_notify(mach_msg_header_t *msg __unused)
239 {
240 panic("%s: not supported in this configuration", __func__);
241 }
242
243 void
244 hv_trace_guest_enter(uint32_t vcpu_id, uint64_t *vcpu_regs)
245 {
246 DTRACE_HV2(guest__enter, uint32_t, vcpu_id, uint64_t *, vcpu_regs);
247 }
248
249 void
250 hv_trace_guest_exit(uint32_t vcpu_id, uint64_t *vcpu_regs)
251 {
252 DTRACE_HV2(guest__exit, uint32_t, vcpu_id, uint64_t *, vcpu_regs);
253 }