2 * Copyright (c) 2013 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
30 #include <kern/locks.h>
31 #include <kern/task.h>
32 #include <kern/thread.h>
33 #include <libkern/OSAtomic.h>
34 #include <vm/vm_pageout.h>
36 #include <sys/kdebug.h>
38 #if defined(__x86_64__) && CONFIG_VMX
39 #include <i386/vmx/vmx_cpu.h>
42 #include <kern/hv_support.h>
44 int hv_support_available
= 0;
48 /* callbacks for tasks/threads with associated hv objects */
49 hv_callbacks_t hv_callbacks
= {
50 .dispatch
= NULL
, /* thread is being dispatched for execution */
51 .preempt
= NULL
, /* thread is being preempted */
52 .suspend
= NULL
, /* system is being suspended */
53 .thread_destroy
= NULL
, /* thread is being destroyed */
54 .task_destroy
= NULL
, /* task is being destroyed */
55 .volatile_state
= NULL
, /* thread state is becoming volatile */
56 .resume
= NULL
, /* system is being resumed */
57 .memory_pressure
= NULL
,/* (unused) */
60 /* trap tables for hv_*_trap syscalls */
61 static hv_trap_table_t hv_trap_table
[] = {
72 static int hv_callbacks_enabled
= 0;
73 static LCK_GRP_DECLARE(hv_support_lck_grp
, "hv_support");
74 static LCK_MTX_DECLARE(hv_support_lck_mtx
, &hv_support_lck_grp
);
76 /* hv_support boot initialization */
80 #if defined(__x86_64__) && CONFIG_VMX
81 hv_support_available
= vmx_hv_support();
85 /* returns true if hv_support is available on this machine */
89 return hv_support_available
;
92 /* associate an hv object with the current task */
94 hv_set_task_target(void *target
)
96 current_task()->hv_task_target
= target
;
99 /* associate an hv object with the current thread */
101 hv_set_thread_target(void *target
)
103 current_thread()->hv_thread_target
= target
;
106 /* get hv object associated with the current task */
108 hv_get_task_target(void)
110 return current_task()->hv_task_target
;
113 /* get hv object associated with the current thread */
115 hv_get_thread_target(void)
117 return current_thread()->hv_thread_target
;
120 /* test if a given thread state may be volatile between dispatch
123 hv_get_volatile_state(hv_volatile_state_t state
)
127 #if (defined(__x86_64__))
128 if (state
== HV_DEBUG_STATE
) {
129 is_volatile
= (current_thread()->machine
.ids
!= NULL
);
136 /* register a list of trap handlers for the hv_*_trap syscalls */
138 hv_set_traps(hv_trap_type_t trap_type
, const hv_trap_t
*traps
,
141 hv_trap_table_t
*trap_table
= &hv_trap_table
[trap_type
];
142 kern_return_t kr
= KERN_FAILURE
;
144 lck_mtx_lock(&hv_support_lck_mtx
);
145 if (trap_table
->trap_count
== 0) {
146 trap_table
->traps
= traps
;
148 trap_table
->trap_count
= trap_count
;
151 lck_mtx_unlock(&hv_support_lck_mtx
);
156 /* release hv_*_trap traps */
158 hv_release_traps(hv_trap_type_t trap_type
)
160 hv_trap_table_t
*trap_table
= &hv_trap_table
[trap_type
];
162 lck_mtx_lock(&hv_support_lck_mtx
);
163 trap_table
->trap_count
= 0;
165 trap_table
->traps
= NULL
;
166 lck_mtx_unlock(&hv_support_lck_mtx
);
169 /* register callbacks for certain task/thread events for tasks/threads with
170 * associated hv objects */
172 hv_set_callbacks(hv_callbacks_t callbacks
)
174 kern_return_t kr
= KERN_FAILURE
;
176 lck_mtx_lock(&hv_support_lck_mtx
);
177 if (hv_callbacks_enabled
== 0) {
178 hv_callbacks
= callbacks
;
179 hv_callbacks_enabled
= 1;
182 lck_mtx_unlock(&hv_support_lck_mtx
);
187 /* release callbacks for task/thread events */
189 hv_release_callbacks(void)
191 lck_mtx_lock(&hv_support_lck_mtx
);
192 hv_callbacks
= (hv_callbacks_t
) {
196 .thread_destroy
= NULL
,
197 .task_destroy
= NULL
,
198 .volatile_state
= NULL
,
202 hv_callbacks_enabled
= 0;
203 lck_mtx_unlock(&hv_support_lck_mtx
);
206 /* system suspend notification */
210 if (hv_callbacks_enabled
) {
211 hv_callbacks
.suspend();
215 /* system resume notification */
219 if (hv_callbacks_enabled
&& hv_callbacks
.resume
) {
220 hv_callbacks
.resume();
224 /* dispatch hv_task_trap/hv_thread_trap syscalls to trap handlers,
225 * fail for invalid index or absence of trap handlers, trap handler is
226 * responsible for validating targets */
227 #define HV_TRAP_DISPATCH(type, index, target, argument) \
228 ((__probable(index < hv_trap_table[type].trap_count)) ? \
229 hv_trap_table[type].traps[index](target, argument) \
230 : KERN_INVALID_ARGUMENT)
233 hv_task_trap(uint64_t index
, uint64_t arg
)
235 return HV_TRAP_DISPATCH(HV_TASK_TRAP
, index
, hv_get_task_target(), arg
);
239 hv_thread_trap(uint64_t index
, uint64_t arg
)
241 return HV_TRAP_DISPATCH(HV_THREAD_TRAP
, index
, hv_get_thread_target(), arg
);
247 return current_cpu_datap()->cpu_pending_ast
!= 0;
250 void __attribute__((__noreturn__
))
251 hv_port_notify(mach_msg_header_t
*msg __unused
)
253 panic("%s: not supported in this configuration", __func__
);
257 hv_trace_guest_enter(uint32_t vcpu_id
, uint64_t *vcpu_regs
)
259 DTRACE_HV2(guest__enter
, uint32_t, vcpu_id
, uint64_t *, vcpu_regs
);
261 KDBG(MACHDBG_CODE(DBG_MACH_HV
, HV_GUEST_ENTER
) | DBG_FUNC_START
, vcpu_id
);
265 hv_trace_guest_exit(uint32_t vcpu_id
, uint64_t *vcpu_regs
, uint32_t reason
)
267 KDBG(MACHDBG_CODE(DBG_MACH_HV
, HV_GUEST_ENTER
) | DBG_FUNC_END
, vcpu_id
,
270 DTRACE_HV2(guest__exit
, uint32_t, vcpu_id
, uint64_t *, vcpu_regs
);
274 hv_trace_guest_error(uint32_t vcpu_id
, uint64_t *vcpu_regs
, uint32_t failure
,
278 * An error indicates that the guest enter failed so there will be no
279 * guest exit. Close the guest enter interval.
281 KDBG(MACHDBG_CODE(DBG_MACH_HV
, HV_GUEST_ENTER
) | DBG_FUNC_END
, vcpu_id
,
283 KDBG(MACHDBG_CODE(DBG_MACH_HV
, HV_GUEST_ERROR
), vcpu_id
, failure
, error
);
285 DTRACE_HV3(guest__error
, uint32_t, vcpu_id
, uint64_t *, vcpu_regs
, uint32_t, failure
);