]>
Commit | Line | Data |
---|---|---|
5ba3f43e | 1 | /* |
0a7de745 | 2 | * Copyright (c) 2017-2019 Apple Inc. All rights reserved. |
5ba3f43e A |
3 | * |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * This file contains Original Code and/or Modifications of Original Code | |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
14 | * | |
15 | * Please obtain a copy of the License at | |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
25 | * | |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ | |
27 | */ | |
6d2010ae A |
28 | /* |
29 | * ucode.c | |
30 | * | |
31 | * Microcode updater interface sysctl | |
32 | */ | |
33 | ||
34 | #include <kern/locks.h> | |
35 | #include <i386/ucode.h> | |
36 | #include <sys/errno.h> | |
37 | #include <i386/proc_reg.h> | |
38 | #include <i386/cpuid.h> | |
39 | #include <vm/vm_kern.h> | |
f427ee49 A |
40 | #include <i386/cpu_data.h> // mp_*_preemption |
41 | #include <i386/mp.h> // mp_cpus_call | |
0a7de745 | 42 | #include <i386/commpage/commpage.h> |
d26ffc64 | 43 | #include <i386/fpu.h> |
6d2010ae | 44 | #include <machine/cpu_number.h> // cpu_number |
39236c6e | 45 | #include <pexpert/pexpert.h> // boot-args |
6d2010ae A |
46 | |
47 | #define IA32_BIOS_UPDT_TRIG (0x79) /* microcode update trigger MSR */ | |
48 | ||
49 | struct intel_ucupdate *global_update = NULL; | |
50 | ||
51 | /* Exceute the actual update! */ | |
52 | static void | |
53 | update_microcode(void) | |
54 | { | |
55 | /* SDM Example 9-8 code shows that we load the | |
56 | * address of the UpdateData within the microcode blob, | |
57 | * not the address of the header. | |
58 | */ | |
59 | wrmsr64(IA32_BIOS_UPDT_TRIG, (uint64_t)(uintptr_t)&global_update->data); | |
60 | } | |
61 | ||
62 | /* locks */ | |
63 | static lck_grp_attr_t *ucode_slock_grp_attr = NULL; | |
64 | static lck_grp_t *ucode_slock_grp = NULL; | |
65 | static lck_attr_t *ucode_slock_attr = NULL; | |
66 | static lck_spin_t *ucode_slock = NULL; | |
67 | ||
68 | static kern_return_t | |
69 | register_locks(void) | |
70 | { | |
71 | /* already allocated? */ | |
0a7de745 | 72 | if (ucode_slock_grp_attr && ucode_slock_grp && ucode_slock_attr && ucode_slock) { |
6d2010ae | 73 | return KERN_SUCCESS; |
0a7de745 | 74 | } |
6d2010ae A |
75 | |
76 | /* allocate lock group attribute and group */ | |
0a7de745 | 77 | if (!(ucode_slock_grp_attr = lck_grp_attr_alloc_init())) { |
6d2010ae | 78 | goto nomem_out; |
0a7de745 | 79 | } |
6d2010ae | 80 | |
0a7de745 | 81 | if (!(ucode_slock_grp = lck_grp_alloc_init("uccode_lock", ucode_slock_grp_attr))) { |
6d2010ae | 82 | goto nomem_out; |
0a7de745 | 83 | } |
6d2010ae A |
84 | |
85 | /* Allocate lock attribute */ | |
0a7de745 | 86 | if (!(ucode_slock_attr = lck_attr_alloc_init())) { |
6d2010ae | 87 | goto nomem_out; |
0a7de745 | 88 | } |
6d2010ae A |
89 | |
90 | /* Allocate the spin lock */ | |
91 | /* We keep one global spin-lock. We could have one per update | |
92 | * request... but srsly, why would you update microcode like that? | |
93 | */ | |
0a7de745 | 94 | if (!(ucode_slock = lck_spin_alloc_init(ucode_slock_grp, ucode_slock_attr))) { |
6d2010ae | 95 | goto nomem_out; |
0a7de745 | 96 | } |
6d2010ae A |
97 | |
98 | return KERN_SUCCESS; | |
99 | ||
100 | nomem_out: | |
101 | /* clean up */ | |
0a7de745 | 102 | if (ucode_slock) { |
6d2010ae | 103 | lck_spin_free(ucode_slock, ucode_slock_grp); |
0a7de745 A |
104 | } |
105 | if (ucode_slock_attr) { | |
6d2010ae | 106 | lck_attr_free(ucode_slock_attr); |
0a7de745 A |
107 | } |
108 | if (ucode_slock_grp) { | |
6d2010ae | 109 | lck_grp_free(ucode_slock_grp); |
0a7de745 A |
110 | } |
111 | if (ucode_slock_grp_attr) { | |
6d2010ae | 112 | lck_grp_attr_free(ucode_slock_grp_attr); |
0a7de745 | 113 | } |
6d2010ae A |
114 | |
115 | return KERN_NO_SPACE; | |
116 | } | |
117 | ||
118 | /* Copy in an update */ | |
119 | static int | |
120 | copyin_update(uint64_t inaddr) | |
121 | { | |
122 | struct intel_ucupdate update_header; | |
123 | struct intel_ucupdate *update; | |
124 | vm_size_t size; | |
125 | kern_return_t ret; | |
126 | int error; | |
127 | ||
128 | /* Copy in enough header to peek at the size */ | |
129 | error = copyin((user_addr_t)inaddr, (void *)&update_header, sizeof(update_header)); | |
0a7de745 | 130 | if (error) { |
6d2010ae | 131 | return error; |
0a7de745 | 132 | } |
6d2010ae A |
133 | |
134 | /* Get the actual, alleged size */ | |
135 | size = update_header.total_size; | |
136 | ||
137 | /* huge bogus piece of data that somehow made it through? */ | |
0a7de745 | 138 | if (size >= 1024 * 1024) { |
6d2010ae | 139 | return ENOMEM; |
0a7de745 | 140 | } |
6d2010ae A |
141 | |
142 | /* Old microcodes? */ | |
0a7de745 | 143 | if (size == 0) { |
6d2010ae | 144 | size = 2048; /* default update size; see SDM */ |
0a7de745 | 145 | } |
6d2010ae A |
146 | /* |
147 | * create the buffer for the update | |
148 | * It need only be aligned to 16-bytes, according to the SDM. | |
149 | * This also wires it down | |
150 | */ | |
3e170ce0 | 151 | ret = kmem_alloc_kobject(kernel_map, (vm_offset_t *)&update, size, VM_KERN_MEMORY_OSFMK); |
0a7de745 | 152 | if (ret != KERN_SUCCESS) { |
6d2010ae | 153 | return ENOMEM; |
0a7de745 | 154 | } |
6d2010ae A |
155 | |
156 | /* Copy it in */ | |
157 | error = copyin((user_addr_t)inaddr, (void*)update, size); | |
158 | if (error) { | |
159 | kmem_free(kernel_map, (vm_offset_t)update, size); | |
160 | return error; | |
161 | } | |
162 | ||
163 | global_update = update; | |
164 | return 0; | |
165 | } | |
166 | ||
0a7de745 A |
167 | static void |
168 | cpu_apply_microcode(void) | |
169 | { | |
170 | /* grab the lock */ | |
171 | lck_spin_lock(ucode_slock); | |
172 | ||
173 | /* execute the update */ | |
174 | update_microcode(); | |
175 | ||
176 | /* release the lock */ | |
177 | lck_spin_unlock(ucode_slock); | |
178 | } | |
179 | ||
180 | static void | |
181 | cpu_update(__unused void *arg) | |
182 | { | |
183 | cpu_apply_microcode(); | |
184 | ||
185 | cpuid_do_was(); | |
186 | } | |
187 | ||
6d2010ae A |
188 | /* |
189 | * This is called once by every CPU on a wake from sleep/hibernate | |
190 | * and is meant to re-apply a microcode update that got lost | |
191 | * by sleeping. | |
192 | */ | |
193 | void | |
eb6b6ca3 | 194 | ucode_update_wake_and_apply_cpu_was() |
6d2010ae A |
195 | { |
196 | if (global_update) { | |
197 | kprintf("ucode: Re-applying update after wake (CPU #%d)\n", cpu_number()); | |
0a7de745 | 198 | cpu_update(NULL); |
6d2010ae | 199 | } else { |
eb6b6ca3 A |
200 | cpuid_do_was(); |
201 | #if DEBUG | |
6d2010ae A |
202 | kprintf("ucode: No update to apply (CPU #%d)\n", cpu_number()); |
203 | #endif | |
204 | } | |
205 | } | |
206 | ||
d26ffc64 A |
207 | static void |
208 | ucode_cpuid_set_info(void) | |
209 | { | |
210 | uint64_t saved_xcr0, dest_xcr0; | |
211 | int need_xcr0_restore = 0; | |
212 | boolean_t intrs_enabled = ml_set_interrupts_enabled(FALSE); | |
213 | ||
214 | /* | |
215 | * Before we cache the CPUID information, we must configure XCR0 with the maximal set of | |
216 | * features to ensure the save area returned in the xsave leaf is correctly-sized. | |
217 | * | |
218 | * Since we are guaranteed that init_fpu() has already happened, we can use state | |
219 | * variables set there that were already predicated on the presence of explicit | |
220 | * boot-args enables/disables. | |
221 | */ | |
222 | ||
223 | if (fpu_capability == AVX512 || fpu_capability == AVX) { | |
224 | saved_xcr0 = xgetbv(XCR0); | |
225 | dest_xcr0 = (fpu_capability == AVX512) ? AVX512_XMASK : AVX_XMASK; | |
226 | assert((get_cr4() & CR4_OSXSAVE) != 0); | |
227 | if (saved_xcr0 != dest_xcr0) { | |
228 | need_xcr0_restore = 1; | |
229 | xsetbv(dest_xcr0 >> 32, dest_xcr0 & 0xFFFFFFFFUL); | |
230 | } | |
231 | } | |
232 | ||
233 | cpuid_set_info(); | |
234 | ||
235 | if (need_xcr0_restore) { | |
236 | xsetbv(saved_xcr0 >> 32, saved_xcr0 & 0xFFFFFFFFUL); | |
237 | } | |
238 | ||
239 | ml_set_interrupts_enabled(intrs_enabled); | |
240 | } | |
241 | ||
6d2010ae A |
242 | /* Farm an update out to all CPUs */ |
243 | static void | |
244 | xcpu_update(void) | |
245 | { | |
0a7de745 | 246 | cpumask_t dest_cpumask; |
6d2010ae | 247 | |
0a7de745 A |
248 | if (register_locks() != KERN_SUCCESS) { |
249 | return; | |
250 | } | |
39236c6e | 251 | |
0a7de745 A |
252 | mp_disable_preemption(); |
253 | dest_cpumask = CPUMASK_OTHERS; | |
254 | cpu_apply_microcode(); | |
39236c6e | 255 | /* Update the cpuid info */ |
d26ffc64 | 256 | ucode_cpuid_set_info(); |
0a7de745 A |
257 | mp_enable_preemption(); |
258 | ||
259 | /* Get all other CPUs to perform the update */ | |
260 | /* | |
261 | * Calling mp_cpus_call with the ASYNC flag ensures that the | |
262 | * IPI dispatch occurs in parallel, but that we will not | |
263 | * proceed until all targeted CPUs complete the microcode | |
264 | * update. | |
265 | */ | |
266 | mp_cpus_call(dest_cpumask, ASYNC, cpu_update, NULL); | |
267 | ||
268 | /* Update the commpage only after we update all CPUs' microcode */ | |
269 | commpage_post_ucode_update(); | |
6d2010ae A |
270 | } |
271 | ||
272 | /* | |
273 | * sysctl function | |
274 | * | |
275 | */ | |
276 | int | |
277 | ucode_interface(uint64_t addr) | |
278 | { | |
279 | int error; | |
0a7de745 | 280 | char arg[16]; |
39236c6e | 281 | |
0a7de745 | 282 | if (PE_parse_boot_argn("-x", arg, sizeof(arg))) { |
39236c6e A |
283 | printf("ucode: no updates in safe mode\n"); |
284 | return EPERM; | |
285 | } | |
6d2010ae A |
286 | |
287 | #if !DEBUG | |
288 | /* | |
289 | * Userland may only call this once per boot. Anything else | |
290 | * would not make sense (all updates are cumulative), and also | |
291 | * leak memory, because we don't free previous updates. | |
292 | */ | |
0a7de745 | 293 | if (global_update) { |
6d2010ae | 294 | return EPERM; |
0a7de745 | 295 | } |
6d2010ae A |
296 | #endif |
297 | ||
298 | /* Get the whole microcode */ | |
299 | error = copyin_update(addr); | |
300 | ||
0a7de745 | 301 | if (error) { |
6d2010ae | 302 | return error; |
0a7de745 | 303 | } |
6d2010ae A |
304 | |
305 | /* Farm out the updates */ | |
306 | xcpu_update(); | |
307 | ||
308 | return 0; | |
309 | } |