]>
Commit | Line | Data |
---|---|---|
fe8ab488 A |
1 | /* |
2 | * Copyright (c) 2013 Apple Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * This file contains Original Code and/or Modifications of Original Code | |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
14 | * | |
15 | * Please obtain a copy of the License at | |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
25 | * | |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ | |
27 | */ | |
28 | ||
29 | #include <kern/locks.h> | |
30 | #include <kern/task.h> | |
31 | #include <kern/thread.h> | |
32 | #include <libkern/OSAtomic.h> | |
33 | #include <vm/vm_pageout.h> | |
34 | ||
35 | #if defined(__x86_64__) && CONFIG_VMX | |
36 | #include <i386/vmx/vmx_cpu.h> | |
37 | #endif | |
38 | ||
39 | #include <kern/hv_support.h> | |
40 | ||
41 | int hv_support_available = 0; | |
42 | ||
43 | /* callbacks for tasks/threads with associated hv objects */ | |
44 | hv_callbacks_t hv_callbacks = { | |
45 | .dispatch = NULL, /* thread is being dispatched for execution */ | |
46 | .preempt = NULL, /* thread is being preempted */ | |
04b8595b | 47 | .suspend = NULL, /* system is being suspended */ |
fe8ab488 A |
48 | .thread_destroy = NULL, /* thread is being destroyed */ |
49 | .task_destroy = NULL, /* task is being destroyed */ | |
50 | .volatile_state = NULL, /* thread state is becoming volatile */ | |
51 | .memory_pressure = NULL /* memory pressure notification */ | |
52 | }; | |
53 | ||
54 | /* trap tables for hv_*_trap syscalls */ | |
55 | static hv_trap_table_t hv_trap_table[] = { | |
56 | [HV_TASK_TRAP] = { | |
57 | .traps = NULL, | |
58 | .trap_count = 0 | |
59 | }, | |
60 | [HV_THREAD_TRAP] = { | |
61 | .traps = NULL, | |
62 | .trap_count = 0 | |
63 | } | |
64 | }; | |
65 | ||
66 | static int hv_callbacks_enabled = 0; | |
67 | static int hv_mp_notify_enabled = 0; | |
68 | static int hv_mp_notify_destroy = 0; | |
69 | static lck_grp_t *hv_support_lck_grp = NULL; | |
70 | static lck_mtx_t *hv_support_lck_mtx = NULL; | |
71 | static thread_t hv_mp_notify_thread = THREAD_NULL; | |
72 | static void hv_mp_notify(void); | |
73 | ||
74 | /* hv_support boot initialization */ | |
75 | void | |
76 | hv_support_init(void) { | |
77 | #if defined(__x86_64__) && CONFIG_VMX | |
78 | hv_support_available = vmx_hv_support(); | |
79 | #endif | |
80 | ||
81 | hv_support_lck_grp = lck_grp_alloc_init("hv_support", LCK_GRP_ATTR_NULL); | |
82 | assert(hv_support_lck_grp); | |
83 | ||
84 | hv_support_lck_mtx = lck_mtx_alloc_init(hv_support_lck_grp, LCK_ATTR_NULL); | |
85 | assert(hv_support_lck_mtx); | |
86 | } | |
87 | ||
88 | /* returns true if hv_support is available on this machine */ | |
89 | int | |
90 | hv_get_support(void) { | |
91 | return hv_support_available; | |
92 | } | |
93 | ||
94 | /* associate an hv object with the current task */ | |
95 | void | |
96 | hv_set_task_target(void *target) { | |
97 | current_task()->hv_task_target = target; | |
98 | } | |
99 | ||
100 | /* associate an hv object with the current thread */ | |
101 | void | |
102 | hv_set_thread_target(void *target) { | |
103 | current_thread()->hv_thread_target = target; | |
104 | } | |
105 | ||
106 | /* get hv object associated with the current task */ | |
107 | void* | |
108 | hv_get_task_target(void) { | |
109 | return current_task()->hv_task_target; | |
110 | } | |
111 | ||
112 | /* get hv object associated with the current thread */ | |
113 | void* | |
114 | hv_get_thread_target(void) { | |
115 | return current_thread()->hv_thread_target; | |
116 | } | |
117 | ||
118 | /* test if a given thread state may be volatile between dispatch | |
119 | and preemption */ | |
120 | int | |
121 | hv_get_volatile_state(hv_volatile_state_t state) { | |
122 | int is_volatile = 0; | |
123 | ||
124 | #if (defined(__x86_64__)) | |
125 | if (state == HV_DEBUG_STATE) { | |
126 | is_volatile = (current_thread()->machine.ids != NULL); | |
127 | } | |
128 | #endif | |
129 | ||
130 | return is_volatile; | |
131 | } | |
132 | ||
133 | /* memory pressure monitor thread */ | |
134 | static void | |
135 | hv_mp_notify(void) { | |
136 | while (1) { | |
137 | mach_vm_pressure_monitor(TRUE, 0, NULL, NULL); | |
138 | ||
139 | lck_mtx_lock(hv_support_lck_mtx); | |
140 | if (hv_mp_notify_destroy == 1) { | |
141 | hv_mp_notify_destroy = 0; | |
142 | hv_mp_notify_enabled = 0; | |
143 | lck_mtx_unlock(hv_support_lck_mtx); | |
144 | break; | |
145 | } else { | |
04b8595b | 146 | hv_callbacks.memory_pressure(); |
fe8ab488 A |
147 | } |
148 | lck_mtx_unlock(hv_support_lck_mtx); | |
149 | } | |
150 | ||
151 | thread_deallocate(current_thread()); | |
152 | } | |
153 | ||
154 | /* subscribe to memory pressure notifications */ | |
155 | kern_return_t | |
156 | hv_set_mp_notify(void) { | |
157 | kern_return_t kr; | |
158 | ||
159 | lck_mtx_lock(hv_support_lck_mtx); | |
160 | if (hv_callbacks_enabled == 0) { | |
161 | lck_mtx_unlock(hv_support_lck_mtx); | |
162 | return KERN_FAILURE; | |
163 | } | |
164 | ||
165 | if (hv_mp_notify_enabled == 1) { | |
166 | hv_mp_notify_destroy = 0; | |
167 | lck_mtx_unlock(hv_support_lck_mtx); | |
168 | return KERN_SUCCESS; | |
169 | } | |
170 | ||
171 | kr = kernel_thread_start((thread_continue_t) &hv_mp_notify, NULL, | |
172 | &hv_mp_notify_thread); | |
173 | ||
174 | if (kr == KERN_SUCCESS) { | |
175 | hv_mp_notify_enabled = 1; | |
176 | } | |
177 | lck_mtx_unlock(hv_support_lck_mtx); | |
178 | ||
179 | return kr; | |
180 | } | |
181 | ||
182 | /* unsubscribe from memory pressure notifications */ | |
183 | void | |
184 | hv_release_mp_notify(void) { | |
185 | lck_mtx_lock(hv_support_lck_mtx); | |
186 | if (hv_mp_notify_enabled == 1) { | |
187 | hv_mp_notify_destroy = 1; | |
188 | } | |
189 | lck_mtx_unlock(hv_support_lck_mtx); | |
190 | } | |
191 | ||
192 | /* register a list of trap handlers for the hv_*_trap syscalls */ | |
193 | kern_return_t | |
194 | hv_set_traps(hv_trap_type_t trap_type, const hv_trap_t *traps, | |
195 | unsigned trap_count) | |
196 | { | |
197 | hv_trap_table_t *trap_table = &hv_trap_table[trap_type]; | |
198 | kern_return_t kr = KERN_FAILURE; | |
199 | ||
200 | lck_mtx_lock(hv_support_lck_mtx); | |
201 | if (trap_table->trap_count == 0) { | |
202 | trap_table->traps = traps; | |
203 | OSMemoryBarrier(); | |
204 | trap_table->trap_count = trap_count; | |
205 | kr = KERN_SUCCESS; | |
206 | } | |
207 | lck_mtx_unlock(hv_support_lck_mtx); | |
208 | ||
209 | return kr; | |
210 | } | |
211 | ||
212 | /* release hv_*_trap traps */ | |
213 | void | |
214 | hv_release_traps(hv_trap_type_t trap_type) { | |
215 | hv_trap_table_t *trap_table = &hv_trap_table[trap_type]; | |
216 | ||
217 | lck_mtx_lock(hv_support_lck_mtx); | |
218 | trap_table->trap_count = 0; | |
219 | OSMemoryBarrier(); | |
220 | trap_table->traps = NULL; | |
221 | lck_mtx_unlock(hv_support_lck_mtx); | |
222 | } | |
223 | ||
224 | /* register callbacks for certain task/thread events for tasks/threads with | |
225 | associated hv objects */ | |
226 | kern_return_t | |
227 | hv_set_callbacks(hv_callbacks_t callbacks) { | |
228 | kern_return_t kr = KERN_FAILURE; | |
229 | ||
230 | lck_mtx_lock(hv_support_lck_mtx); | |
231 | if (hv_callbacks_enabled == 0) { | |
232 | hv_callbacks = callbacks; | |
233 | hv_callbacks_enabled = 1; | |
234 | kr = KERN_SUCCESS; | |
235 | } | |
236 | lck_mtx_unlock(hv_support_lck_mtx); | |
237 | ||
238 | return kr; | |
239 | } | |
240 | ||
241 | /* release callbacks for task/thread events */ | |
242 | void | |
243 | hv_release_callbacks(void) { | |
244 | lck_mtx_lock(hv_support_lck_mtx); | |
245 | hv_callbacks = (hv_callbacks_t) { | |
246 | .dispatch = NULL, | |
247 | .preempt = NULL, | |
04b8595b | 248 | .suspend = NULL, |
fe8ab488 A |
249 | .thread_destroy = NULL, |
250 | .task_destroy = NULL, | |
251 | .volatile_state = NULL, | |
252 | .memory_pressure = NULL | |
253 | }; | |
254 | ||
255 | hv_callbacks_enabled = 0; | |
256 | lck_mtx_unlock(hv_support_lck_mtx); | |
257 | } | |
258 | ||
04b8595b A |
259 | /* system suspend notification */ |
260 | void | |
261 | hv_suspend(void) { | |
262 | if (hv_callbacks_enabled) { | |
263 | hv_callbacks.suspend(); | |
264 | } | |
265 | } | |
266 | ||
fe8ab488 A |
267 | /* dispatch hv_task_trap/hv_thread_trap syscalls to trap handlers, |
268 | fail for invalid index or absence of trap handlers, trap handler is | |
269 | responsible for validating targets */ | |
270 | #define HV_TRAP_DISPATCH(type, index, target, argument)\ | |
271 | ((__probable(index < hv_trap_table[type].trap_count)) ? \ | |
272 | hv_trap_table[type].traps[index](target, argument) \ | |
273 | : KERN_INVALID_ARGUMENT) | |
274 | ||
275 | kern_return_t hv_task_trap(uint64_t index, uint64_t arg) { | |
276 | return HV_TRAP_DISPATCH(HV_TASK_TRAP, index, hv_get_task_target(), arg); | |
277 | } | |
278 | ||
279 | kern_return_t hv_thread_trap(uint64_t index, uint64_t arg) { | |
280 | return HV_TRAP_DISPATCH(HV_THREAD_TRAP, index, hv_get_thread_target(), arg); | |
281 | } |