]> git.saurik.com Git - apple/xnu.git/blob - osfmk/i386/ucode.c
xnu-6153.61.1.tar.gz
[apple/xnu.git] / osfmk / i386 / ucode.c
1 /*
2 * Copyright (c) 2017-2019 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * ucode.c
30 *
31 * Microcode updater interface sysctl
32 */
33
34 #include <kern/locks.h>
35 #include <i386/ucode.h>
36 #include <sys/errno.h>
37 #include <i386/proc_reg.h>
38 #include <i386/cpuid.h>
39 #include <vm/vm_kern.h>
40 #include <i386/mp.h> // mp_cpus_call
41 #include <i386/commpage/commpage.h>
42 #include <i386/fpu.h>
43 #include <machine/cpu_number.h> // cpu_number
44 #include <pexpert/pexpert.h> // boot-args
45
46 #define IA32_BIOS_UPDT_TRIG (0x79) /* microcode update trigger MSR */
47
48 struct intel_ucupdate *global_update = NULL;
49
50 /* Exceute the actual update! */
51 static void
52 update_microcode(void)
53 {
54 /* SDM Example 9-8 code shows that we load the
55 * address of the UpdateData within the microcode blob,
56 * not the address of the header.
57 */
58 wrmsr64(IA32_BIOS_UPDT_TRIG, (uint64_t)(uintptr_t)&global_update->data);
59 }
60
61 /* locks */
62 static lck_grp_attr_t *ucode_slock_grp_attr = NULL;
63 static lck_grp_t *ucode_slock_grp = NULL;
64 static lck_attr_t *ucode_slock_attr = NULL;
65 static lck_spin_t *ucode_slock = NULL;
66
67 static kern_return_t
68 register_locks(void)
69 {
70 /* already allocated? */
71 if (ucode_slock_grp_attr && ucode_slock_grp && ucode_slock_attr && ucode_slock) {
72 return KERN_SUCCESS;
73 }
74
75 /* allocate lock group attribute and group */
76 if (!(ucode_slock_grp_attr = lck_grp_attr_alloc_init())) {
77 goto nomem_out;
78 }
79
80 if (!(ucode_slock_grp = lck_grp_alloc_init("uccode_lock", ucode_slock_grp_attr))) {
81 goto nomem_out;
82 }
83
84 /* Allocate lock attribute */
85 if (!(ucode_slock_attr = lck_attr_alloc_init())) {
86 goto nomem_out;
87 }
88
89 /* Allocate the spin lock */
90 /* We keep one global spin-lock. We could have one per update
91 * request... but srsly, why would you update microcode like that?
92 */
93 if (!(ucode_slock = lck_spin_alloc_init(ucode_slock_grp, ucode_slock_attr))) {
94 goto nomem_out;
95 }
96
97 return KERN_SUCCESS;
98
99 nomem_out:
100 /* clean up */
101 if (ucode_slock) {
102 lck_spin_free(ucode_slock, ucode_slock_grp);
103 }
104 if (ucode_slock_attr) {
105 lck_attr_free(ucode_slock_attr);
106 }
107 if (ucode_slock_grp) {
108 lck_grp_free(ucode_slock_grp);
109 }
110 if (ucode_slock_grp_attr) {
111 lck_grp_attr_free(ucode_slock_grp_attr);
112 }
113
114 return KERN_NO_SPACE;
115 }
116
117 /* Copy in an update */
118 static int
119 copyin_update(uint64_t inaddr)
120 {
121 struct intel_ucupdate update_header;
122 struct intel_ucupdate *update;
123 vm_size_t size;
124 kern_return_t ret;
125 int error;
126
127 /* Copy in enough header to peek at the size */
128 error = copyin((user_addr_t)inaddr, (void *)&update_header, sizeof(update_header));
129 if (error) {
130 return error;
131 }
132
133 /* Get the actual, alleged size */
134 size = update_header.total_size;
135
136 /* huge bogus piece of data that somehow made it through? */
137 if (size >= 1024 * 1024) {
138 return ENOMEM;
139 }
140
141 /* Old microcodes? */
142 if (size == 0) {
143 size = 2048; /* default update size; see SDM */
144 }
145 /*
146 * create the buffer for the update
147 * It need only be aligned to 16-bytes, according to the SDM.
148 * This also wires it down
149 */
150 ret = kmem_alloc_kobject(kernel_map, (vm_offset_t *)&update, size, VM_KERN_MEMORY_OSFMK);
151 if (ret != KERN_SUCCESS) {
152 return ENOMEM;
153 }
154
155 /* Copy it in */
156 error = copyin((user_addr_t)inaddr, (void*)update, size);
157 if (error) {
158 kmem_free(kernel_map, (vm_offset_t)update, size);
159 return error;
160 }
161
162 global_update = update;
163 return 0;
164 }
165
166 static void
167 cpu_apply_microcode(void)
168 {
169 /* grab the lock */
170 lck_spin_lock(ucode_slock);
171
172 /* execute the update */
173 update_microcode();
174
175 /* release the lock */
176 lck_spin_unlock(ucode_slock);
177 }
178
179 static void
180 cpu_update(__unused void *arg)
181 {
182 cpu_apply_microcode();
183
184 cpuid_do_was();
185 }
186
187 /*
188 * This is called once by every CPU on a wake from sleep/hibernate
189 * and is meant to re-apply a microcode update that got lost
190 * by sleeping.
191 */
192 void
193 ucode_update_wake()
194 {
195 if (global_update) {
196 kprintf("ucode: Re-applying update after wake (CPU #%d)\n", cpu_number());
197 cpu_update(NULL);
198 #if DEBUG
199 } else {
200 kprintf("ucode: No update to apply (CPU #%d)\n", cpu_number());
201 #endif
202 }
203 }
204
205 static void
206 ucode_cpuid_set_info(void)
207 {
208 uint64_t saved_xcr0, dest_xcr0;
209 int need_xcr0_restore = 0;
210 boolean_t intrs_enabled = ml_set_interrupts_enabled(FALSE);
211
212 /*
213 * Before we cache the CPUID information, we must configure XCR0 with the maximal set of
214 * features to ensure the save area returned in the xsave leaf is correctly-sized.
215 *
216 * Since we are guaranteed that init_fpu() has already happened, we can use state
217 * variables set there that were already predicated on the presence of explicit
218 * boot-args enables/disables.
219 */
220
221 if (fpu_capability == AVX512 || fpu_capability == AVX) {
222 saved_xcr0 = xgetbv(XCR0);
223 dest_xcr0 = (fpu_capability == AVX512) ? AVX512_XMASK : AVX_XMASK;
224 assert((get_cr4() & CR4_OSXSAVE) != 0);
225 if (saved_xcr0 != dest_xcr0) {
226 need_xcr0_restore = 1;
227 xsetbv(dest_xcr0 >> 32, dest_xcr0 & 0xFFFFFFFFUL);
228 }
229 }
230
231 cpuid_set_info();
232
233 if (need_xcr0_restore) {
234 xsetbv(saved_xcr0 >> 32, saved_xcr0 & 0xFFFFFFFFUL);
235 }
236
237 ml_set_interrupts_enabled(intrs_enabled);
238 }
239
240 /* Farm an update out to all CPUs */
241 static void
242 xcpu_update(void)
243 {
244 cpumask_t dest_cpumask;
245
246 if (register_locks() != KERN_SUCCESS) {
247 return;
248 }
249
250 mp_disable_preemption();
251 dest_cpumask = CPUMASK_OTHERS;
252 cpu_apply_microcode();
253 /* Update the cpuid info */
254 ucode_cpuid_set_info();
255 mp_enable_preemption();
256
257 /* Get all other CPUs to perform the update */
258 /*
259 * Calling mp_cpus_call with the ASYNC flag ensures that the
260 * IPI dispatch occurs in parallel, but that we will not
261 * proceed until all targeted CPUs complete the microcode
262 * update.
263 */
264 mp_cpus_call(dest_cpumask, ASYNC, cpu_update, NULL);
265
266 /* Update the commpage only after we update all CPUs' microcode */
267 commpage_post_ucode_update();
268 }
269
270 /*
271 * sysctl function
272 *
273 */
274 int
275 ucode_interface(uint64_t addr)
276 {
277 int error;
278 char arg[16];
279
280 if (PE_parse_boot_argn("-x", arg, sizeof(arg))) {
281 printf("ucode: no updates in safe mode\n");
282 return EPERM;
283 }
284
285 #if !DEBUG
286 /*
287 * Userland may only call this once per boot. Anything else
288 * would not make sense (all updates are cumulative), and also
289 * leak memory, because we don't free previous updates.
290 */
291 if (global_update) {
292 return EPERM;
293 }
294 #endif
295
296 /* Get the whole microcode */
297 error = copyin_update(addr);
298
299 if (error) {
300 return error;
301 }
302
303 /* Farm out the updates */
304 xcpu_update();
305
306 return 0;
307 }