]> git.saurik.com Git - apple/xnu.git/blame - osfmk/kern/hv_support_kext.c
xnu-7195.101.1.tar.gz
[apple/xnu.git] / osfmk / kern / hv_support_kext.c
CommitLineData
fe8ab488
A
1/*
2 * Copyright (c) 2013 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
0a7de745 5 *
fe8ab488
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
0a7de745 14 *
fe8ab488
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
0a7de745 17 *
fe8ab488
A
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
0a7de745 25 *
fe8ab488
A
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
cb323159 29#include <kern/ast.h>
fe8ab488
A
30#include <kern/locks.h>
31#include <kern/task.h>
32#include <kern/thread.h>
33#include <libkern/OSAtomic.h>
34#include <vm/vm_pageout.h>
f427ee49 35#include <mach/sdt.h>
c3c9b80d 36#include <sys/kdebug.h>
fe8ab488
A
37
38#if defined(__x86_64__) && CONFIG_VMX
39#include <i386/vmx/vmx_cpu.h>
40#endif
41
42#include <kern/hv_support.h>
43
44int hv_support_available = 0;
45
f427ee49
A
46int hv_disable = 0;
47
fe8ab488
A
48/* callbacks for tasks/threads with associated hv objects */
49hv_callbacks_t hv_callbacks = {
0a7de745
A
50 .dispatch = NULL, /* thread is being dispatched for execution */
51 .preempt = NULL, /* thread is being preempted */
52 .suspend = NULL, /* system is being suspended */
53 .thread_destroy = NULL, /* thread is being destroyed */
54 .task_destroy = NULL, /* task is being destroyed */
55 .volatile_state = NULL, /* thread state is becoming volatile */
c3c9b80d
A
56 .resume = NULL, /* system is being resumed */
57 .memory_pressure = NULL,/* (unused) */
fe8ab488
A
58};
59
60/* trap tables for hv_*_trap syscalls */
61static hv_trap_table_t hv_trap_table[] = {
62 [HV_TASK_TRAP] = {
63 .traps = NULL,
64 .trap_count = 0
65 },
66 [HV_THREAD_TRAP] = {
67 .traps = NULL,
68 .trap_count = 0
69 }
70};
71
72static int hv_callbacks_enabled = 0;
f427ee49
A
73static LCK_GRP_DECLARE(hv_support_lck_grp, "hv_support");
74static LCK_MTX_DECLARE(hv_support_lck_mtx, &hv_support_lck_grp);
fe8ab488
A
75
76/* hv_support boot initialization */
77void
0a7de745
A
78hv_support_init(void)
79{
fe8ab488
A
80#if defined(__x86_64__) && CONFIG_VMX
81 hv_support_available = vmx_hv_support();
82#endif
fe8ab488
A
83}
84
85/* returns true if hv_support is available on this machine */
86int
0a7de745
A
87hv_get_support(void)
88{
fe8ab488
A
89 return hv_support_available;
90}
91
92/* associate an hv object with the current task */
93void
0a7de745
A
94hv_set_task_target(void *target)
95{
fe8ab488
A
96 current_task()->hv_task_target = target;
97}
98
99/* associate an hv object with the current thread */
100void
0a7de745
A
101hv_set_thread_target(void *target)
102{
fe8ab488
A
103 current_thread()->hv_thread_target = target;
104}
105
106/* get hv object associated with the current task */
107void*
0a7de745
A
108hv_get_task_target(void)
109{
fe8ab488
A
110 return current_task()->hv_task_target;
111}
112
113/* get hv object associated with the current thread */
114void*
0a7de745
A
115hv_get_thread_target(void)
116{
fe8ab488
A
117 return current_thread()->hv_thread_target;
118}
119
120/* test if a given thread state may be volatile between dispatch
0a7de745 121 * and preemption */
fe8ab488 122int
0a7de745
A
123hv_get_volatile_state(hv_volatile_state_t state)
124{
fe8ab488
A
125 int is_volatile = 0;
126
127#if (defined(__x86_64__))
128 if (state == HV_DEBUG_STATE) {
129 is_volatile = (current_thread()->machine.ids != NULL);
130 }
131#endif
132
133 return is_volatile;
134}
135
fe8ab488
A
136/* register a list of trap handlers for the hv_*_trap syscalls */
137kern_return_t
138hv_set_traps(hv_trap_type_t trap_type, const hv_trap_t *traps,
0a7de745 139 unsigned trap_count)
fe8ab488
A
140{
141 hv_trap_table_t *trap_table = &hv_trap_table[trap_type];
142 kern_return_t kr = KERN_FAILURE;
143
f427ee49 144 lck_mtx_lock(&hv_support_lck_mtx);
0a7de745 145 if (trap_table->trap_count == 0) {
fe8ab488
A
146 trap_table->traps = traps;
147 OSMemoryBarrier();
148 trap_table->trap_count = trap_count;
149 kr = KERN_SUCCESS;
150 }
f427ee49 151 lck_mtx_unlock(&hv_support_lck_mtx);
fe8ab488
A
152
153 return kr;
154}
155
156/* release hv_*_trap traps */
157void
0a7de745
A
158hv_release_traps(hv_trap_type_t trap_type)
159{
fe8ab488
A
160 hv_trap_table_t *trap_table = &hv_trap_table[trap_type];
161
f427ee49 162 lck_mtx_lock(&hv_support_lck_mtx);
fe8ab488
A
163 trap_table->trap_count = 0;
164 OSMemoryBarrier();
165 trap_table->traps = NULL;
f427ee49 166 lck_mtx_unlock(&hv_support_lck_mtx);
fe8ab488
A
167}
168
169/* register callbacks for certain task/thread events for tasks/threads with
0a7de745 170 * associated hv objects */
fe8ab488 171kern_return_t
0a7de745
A
172hv_set_callbacks(hv_callbacks_t callbacks)
173{
fe8ab488
A
174 kern_return_t kr = KERN_FAILURE;
175
f427ee49 176 lck_mtx_lock(&hv_support_lck_mtx);
0a7de745 177 if (hv_callbacks_enabled == 0) {
fe8ab488
A
178 hv_callbacks = callbacks;
179 hv_callbacks_enabled = 1;
180 kr = KERN_SUCCESS;
181 }
f427ee49 182 lck_mtx_unlock(&hv_support_lck_mtx);
fe8ab488
A
183
184 return kr;
185}
186
187/* release callbacks for task/thread events */
188void
0a7de745
A
189hv_release_callbacks(void)
190{
f427ee49 191 lck_mtx_lock(&hv_support_lck_mtx);
fe8ab488
A
192 hv_callbacks = (hv_callbacks_t) {
193 .dispatch = NULL,
194 .preempt = NULL,
04b8595b 195 .suspend = NULL,
fe8ab488
A
196 .thread_destroy = NULL,
197 .task_destroy = NULL,
c3c9b80d
A
198 .volatile_state = NULL,
199 .resume = NULL,
fe8ab488
A
200 };
201
202 hv_callbacks_enabled = 0;
f427ee49 203 lck_mtx_unlock(&hv_support_lck_mtx);
fe8ab488
A
204}
205
04b8595b
A
206/* system suspend notification */
207void
0a7de745
A
208hv_suspend(void)
209{
04b8595b
A
210 if (hv_callbacks_enabled) {
211 hv_callbacks.suspend();
212 }
213}
214
c3c9b80d
A
215/* system resume notification */
216void
217hv_resume(void)
218{
219 if (hv_callbacks_enabled && hv_callbacks.resume) {
220 hv_callbacks.resume();
221 }
222}
223
fe8ab488 224/* dispatch hv_task_trap/hv_thread_trap syscalls to trap handlers,
0a7de745
A
225 * fail for invalid index or absence of trap handlers, trap handler is
226 * responsible for validating targets */
227#define HV_TRAP_DISPATCH(type, index, target, argument) \
fe8ab488 228 ((__probable(index < hv_trap_table[type].trap_count)) ? \
0a7de745
A
229 hv_trap_table[type].traps[index](target, argument) \
230 : KERN_INVALID_ARGUMENT)
fe8ab488 231
0a7de745
A
232kern_return_t
233hv_task_trap(uint64_t index, uint64_t arg)
234{
fe8ab488
A
235 return HV_TRAP_DISPATCH(HV_TASK_TRAP, index, hv_get_task_target(), arg);
236}
237
0a7de745
A
238kern_return_t
239hv_thread_trap(uint64_t index, uint64_t arg)
240{
fe8ab488
A
241 return HV_TRAP_DISPATCH(HV_THREAD_TRAP, index, hv_get_thread_target(), arg);
242}
cb323159
A
243
244boolean_t
245hv_ast_pending(void)
246{
f427ee49
A
247 return current_cpu_datap()->cpu_pending_ast != 0;
248}
249
250void __attribute__((__noreturn__))
251hv_port_notify(mach_msg_header_t *msg __unused)
252{
253 panic("%s: not supported in this configuration", __func__);
254}
255
256void
257hv_trace_guest_enter(uint32_t vcpu_id, uint64_t *vcpu_regs)
258{
259 DTRACE_HV2(guest__enter, uint32_t, vcpu_id, uint64_t *, vcpu_regs);
c3c9b80d
A
260
261 KDBG(MACHDBG_CODE(DBG_MACH_HV, HV_GUEST_ENTER) | DBG_FUNC_START, vcpu_id);
f427ee49
A
262}
263
264void
c3c9b80d 265hv_trace_guest_exit(uint32_t vcpu_id, uint64_t *vcpu_regs, uint32_t reason)
f427ee49 266{
c3c9b80d
A
267 KDBG(MACHDBG_CODE(DBG_MACH_HV, HV_GUEST_ENTER) | DBG_FUNC_END, vcpu_id,
268 reason);
269
f427ee49 270 DTRACE_HV2(guest__exit, uint32_t, vcpu_id, uint64_t *, vcpu_regs);
cb323159 271}
c3c9b80d
A
272
273void
274hv_trace_guest_error(uint32_t vcpu_id, uint64_t *vcpu_regs, uint32_t failure,
275 uint32_t error)
276{
277 /*
278 * An error indicates that the guest enter failed so there will be no
279 * guest exit. Close the guest enter interval.
280 */
281 KDBG(MACHDBG_CODE(DBG_MACH_HV, HV_GUEST_ENTER) | DBG_FUNC_END, vcpu_id,
282 -1, failure, error);
283 KDBG(MACHDBG_CODE(DBG_MACH_HV, HV_GUEST_ERROR), vcpu_id, failure, error);
284
285 DTRACE_HV3(guest__error, uint32_t, vcpu_id, uint64_t *, vcpu_regs, uint32_t, failure);
286}