]>
Commit | Line | Data |
---|---|---|
5ba3f43e | 1 | /* |
0a7de745 | 2 | * Copyright (c) 2017-2019 Apple Inc. All rights reserved. |
5ba3f43e A |
3 | * |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * This file contains Original Code and/or Modifications of Original Code | |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
14 | * | |
15 | * Please obtain a copy of the License at | |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
25 | * | |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ | |
27 | */ | |
6d2010ae A |
28 | /* |
29 | * ucode.c | |
30 | * | |
31 | * Microcode updater interface sysctl | |
32 | */ | |
33 | ||
34 | #include <kern/locks.h> | |
35 | #include <i386/ucode.h> | |
36 | #include <sys/errno.h> | |
37 | #include <i386/proc_reg.h> | |
38 | #include <i386/cpuid.h> | |
39 | #include <vm/vm_kern.h> | |
0a7de745 A |
40 | #include <i386/mp.h> // mp_cpus_call |
41 | #include <i386/commpage/commpage.h> | |
d26ffc64 | 42 | #include <i386/fpu.h> |
6d2010ae | 43 | #include <machine/cpu_number.h> // cpu_number |
39236c6e | 44 | #include <pexpert/pexpert.h> // boot-args |
6d2010ae A |
45 | |
46 | #define IA32_BIOS_UPDT_TRIG (0x79) /* microcode update trigger MSR */ | |
47 | ||
48 | struct intel_ucupdate *global_update = NULL; | |
49 | ||
50 | /* Exceute the actual update! */ | |
51 | static void | |
52 | update_microcode(void) | |
53 | { | |
54 | /* SDM Example 9-8 code shows that we load the | |
55 | * address of the UpdateData within the microcode blob, | |
56 | * not the address of the header. | |
57 | */ | |
58 | wrmsr64(IA32_BIOS_UPDT_TRIG, (uint64_t)(uintptr_t)&global_update->data); | |
59 | } | |
60 | ||
61 | /* locks */ | |
62 | static lck_grp_attr_t *ucode_slock_grp_attr = NULL; | |
63 | static lck_grp_t *ucode_slock_grp = NULL; | |
64 | static lck_attr_t *ucode_slock_attr = NULL; | |
65 | static lck_spin_t *ucode_slock = NULL; | |
66 | ||
67 | static kern_return_t | |
68 | register_locks(void) | |
69 | { | |
70 | /* already allocated? */ | |
0a7de745 | 71 | if (ucode_slock_grp_attr && ucode_slock_grp && ucode_slock_attr && ucode_slock) { |
6d2010ae | 72 | return KERN_SUCCESS; |
0a7de745 | 73 | } |
6d2010ae A |
74 | |
75 | /* allocate lock group attribute and group */ | |
0a7de745 | 76 | if (!(ucode_slock_grp_attr = lck_grp_attr_alloc_init())) { |
6d2010ae | 77 | goto nomem_out; |
0a7de745 | 78 | } |
6d2010ae | 79 | |
0a7de745 | 80 | if (!(ucode_slock_grp = lck_grp_alloc_init("uccode_lock", ucode_slock_grp_attr))) { |
6d2010ae | 81 | goto nomem_out; |
0a7de745 | 82 | } |
6d2010ae A |
83 | |
84 | /* Allocate lock attribute */ | |
0a7de745 | 85 | if (!(ucode_slock_attr = lck_attr_alloc_init())) { |
6d2010ae | 86 | goto nomem_out; |
0a7de745 | 87 | } |
6d2010ae A |
88 | |
89 | /* Allocate the spin lock */ | |
90 | /* We keep one global spin-lock. We could have one per update | |
91 | * request... but srsly, why would you update microcode like that? | |
92 | */ | |
0a7de745 | 93 | if (!(ucode_slock = lck_spin_alloc_init(ucode_slock_grp, ucode_slock_attr))) { |
6d2010ae | 94 | goto nomem_out; |
0a7de745 | 95 | } |
6d2010ae A |
96 | |
97 | return KERN_SUCCESS; | |
98 | ||
99 | nomem_out: | |
100 | /* clean up */ | |
0a7de745 | 101 | if (ucode_slock) { |
6d2010ae | 102 | lck_spin_free(ucode_slock, ucode_slock_grp); |
0a7de745 A |
103 | } |
104 | if (ucode_slock_attr) { | |
6d2010ae | 105 | lck_attr_free(ucode_slock_attr); |
0a7de745 A |
106 | } |
107 | if (ucode_slock_grp) { | |
6d2010ae | 108 | lck_grp_free(ucode_slock_grp); |
0a7de745 A |
109 | } |
110 | if (ucode_slock_grp_attr) { | |
6d2010ae | 111 | lck_grp_attr_free(ucode_slock_grp_attr); |
0a7de745 | 112 | } |
6d2010ae A |
113 | |
114 | return KERN_NO_SPACE; | |
115 | } | |
116 | ||
117 | /* Copy in an update */ | |
118 | static int | |
119 | copyin_update(uint64_t inaddr) | |
120 | { | |
121 | struct intel_ucupdate update_header; | |
122 | struct intel_ucupdate *update; | |
123 | vm_size_t size; | |
124 | kern_return_t ret; | |
125 | int error; | |
126 | ||
127 | /* Copy in enough header to peek at the size */ | |
128 | error = copyin((user_addr_t)inaddr, (void *)&update_header, sizeof(update_header)); | |
0a7de745 | 129 | if (error) { |
6d2010ae | 130 | return error; |
0a7de745 | 131 | } |
6d2010ae A |
132 | |
133 | /* Get the actual, alleged size */ | |
134 | size = update_header.total_size; | |
135 | ||
136 | /* huge bogus piece of data that somehow made it through? */ | |
0a7de745 | 137 | if (size >= 1024 * 1024) { |
6d2010ae | 138 | return ENOMEM; |
0a7de745 | 139 | } |
6d2010ae A |
140 | |
141 | /* Old microcodes? */ | |
0a7de745 | 142 | if (size == 0) { |
6d2010ae | 143 | size = 2048; /* default update size; see SDM */ |
0a7de745 | 144 | } |
6d2010ae A |
145 | /* |
146 | * create the buffer for the update | |
147 | * It need only be aligned to 16-bytes, according to the SDM. | |
148 | * This also wires it down | |
149 | */ | |
3e170ce0 | 150 | ret = kmem_alloc_kobject(kernel_map, (vm_offset_t *)&update, size, VM_KERN_MEMORY_OSFMK); |
0a7de745 | 151 | if (ret != KERN_SUCCESS) { |
6d2010ae | 152 | return ENOMEM; |
0a7de745 | 153 | } |
6d2010ae A |
154 | |
155 | /* Copy it in */ | |
156 | error = copyin((user_addr_t)inaddr, (void*)update, size); | |
157 | if (error) { | |
158 | kmem_free(kernel_map, (vm_offset_t)update, size); | |
159 | return error; | |
160 | } | |
161 | ||
162 | global_update = update; | |
163 | return 0; | |
164 | } | |
165 | ||
0a7de745 A |
166 | static void |
167 | cpu_apply_microcode(void) | |
168 | { | |
169 | /* grab the lock */ | |
170 | lck_spin_lock(ucode_slock); | |
171 | ||
172 | /* execute the update */ | |
173 | update_microcode(); | |
174 | ||
175 | /* release the lock */ | |
176 | lck_spin_unlock(ucode_slock); | |
177 | } | |
178 | ||
179 | static void | |
180 | cpu_update(__unused void *arg) | |
181 | { | |
182 | cpu_apply_microcode(); | |
183 | ||
184 | cpuid_do_was(); | |
185 | } | |
186 | ||
6d2010ae A |
187 | /* |
188 | * This is called once by every CPU on a wake from sleep/hibernate | |
189 | * and is meant to re-apply a microcode update that got lost | |
190 | * by sleeping. | |
191 | */ | |
192 | void | |
193 | ucode_update_wake() | |
194 | { | |
195 | if (global_update) { | |
196 | kprintf("ucode: Re-applying update after wake (CPU #%d)\n", cpu_number()); | |
0a7de745 | 197 | cpu_update(NULL); |
5ba3f43e | 198 | #if DEBUG |
6d2010ae A |
199 | } else { |
200 | kprintf("ucode: No update to apply (CPU #%d)\n", cpu_number()); | |
201 | #endif | |
202 | } | |
203 | } | |
204 | ||
d26ffc64 A |
205 | static void |
206 | ucode_cpuid_set_info(void) | |
207 | { | |
208 | uint64_t saved_xcr0, dest_xcr0; | |
209 | int need_xcr0_restore = 0; | |
210 | boolean_t intrs_enabled = ml_set_interrupts_enabled(FALSE); | |
211 | ||
212 | /* | |
213 | * Before we cache the CPUID information, we must configure XCR0 with the maximal set of | |
214 | * features to ensure the save area returned in the xsave leaf is correctly-sized. | |
215 | * | |
216 | * Since we are guaranteed that init_fpu() has already happened, we can use state | |
217 | * variables set there that were already predicated on the presence of explicit | |
218 | * boot-args enables/disables. | |
219 | */ | |
220 | ||
221 | if (fpu_capability == AVX512 || fpu_capability == AVX) { | |
222 | saved_xcr0 = xgetbv(XCR0); | |
223 | dest_xcr0 = (fpu_capability == AVX512) ? AVX512_XMASK : AVX_XMASK; | |
224 | assert((get_cr4() & CR4_OSXSAVE) != 0); | |
225 | if (saved_xcr0 != dest_xcr0) { | |
226 | need_xcr0_restore = 1; | |
227 | xsetbv(dest_xcr0 >> 32, dest_xcr0 & 0xFFFFFFFFUL); | |
228 | } | |
229 | } | |
230 | ||
231 | cpuid_set_info(); | |
232 | ||
233 | if (need_xcr0_restore) { | |
234 | xsetbv(saved_xcr0 >> 32, saved_xcr0 & 0xFFFFFFFFUL); | |
235 | } | |
236 | ||
237 | ml_set_interrupts_enabled(intrs_enabled); | |
238 | } | |
239 | ||
6d2010ae A |
240 | /* Farm an update out to all CPUs */ |
241 | static void | |
242 | xcpu_update(void) | |
243 | { | |
0a7de745 | 244 | cpumask_t dest_cpumask; |
6d2010ae | 245 | |
0a7de745 A |
246 | if (register_locks() != KERN_SUCCESS) { |
247 | return; | |
248 | } | |
39236c6e | 249 | |
0a7de745 A |
250 | mp_disable_preemption(); |
251 | dest_cpumask = CPUMASK_OTHERS; | |
252 | cpu_apply_microcode(); | |
39236c6e | 253 | /* Update the cpuid info */ |
d26ffc64 | 254 | ucode_cpuid_set_info(); |
0a7de745 A |
255 | /* Now apply workarounds */ |
256 | cpuid_do_was(); | |
257 | mp_enable_preemption(); | |
258 | ||
259 | /* Get all other CPUs to perform the update */ | |
260 | /* | |
261 | * Calling mp_cpus_call with the ASYNC flag ensures that the | |
262 | * IPI dispatch occurs in parallel, but that we will not | |
263 | * proceed until all targeted CPUs complete the microcode | |
264 | * update. | |
265 | */ | |
266 | mp_cpus_call(dest_cpumask, ASYNC, cpu_update, NULL); | |
267 | ||
268 | /* Update the commpage only after we update all CPUs' microcode */ | |
269 | commpage_post_ucode_update(); | |
6d2010ae A |
270 | } |
271 | ||
272 | /* | |
273 | * sysctl function | |
274 | * | |
275 | */ | |
276 | int | |
277 | ucode_interface(uint64_t addr) | |
278 | { | |
279 | int error; | |
0a7de745 | 280 | char arg[16]; |
39236c6e | 281 | |
0a7de745 | 282 | if (PE_parse_boot_argn("-x", arg, sizeof(arg))) { |
39236c6e A |
283 | printf("ucode: no updates in safe mode\n"); |
284 | return EPERM; | |
285 | } | |
6d2010ae A |
286 | |
287 | #if !DEBUG | |
288 | /* | |
289 | * Userland may only call this once per boot. Anything else | |
290 | * would not make sense (all updates are cumulative), and also | |
291 | * leak memory, because we don't free previous updates. | |
292 | */ | |
0a7de745 | 293 | if (global_update) { |
6d2010ae | 294 | return EPERM; |
0a7de745 | 295 | } |
6d2010ae A |
296 | #endif |
297 | ||
298 | /* Get the whole microcode */ | |
299 | error = copyin_update(addr); | |
300 | ||
0a7de745 | 301 | if (error) { |
6d2010ae | 302 | return error; |
0a7de745 | 303 | } |
6d2010ae A |
304 | |
305 | /* Farm out the updates */ | |
306 | xcpu_update(); | |
307 | ||
308 | return 0; | |
309 | } |