]> git.saurik.com Git - apple/xnu.git/blob - osfmk/i386/ucode.c
xnu-7195.101.1.tar.gz
[apple/xnu.git] / osfmk / i386 / ucode.c
1 /*
2 * Copyright (c) 2017-2019 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * ucode.c
30 *
31 * Microcode updater interface sysctl
32 */
33
34 #include <kern/locks.h>
35 #include <i386/ucode.h>
36 #include <sys/errno.h>
37 #include <i386/proc_reg.h>
38 #include <i386/cpuid.h>
39 #include <vm/vm_kern.h>
40 #include <i386/cpu_data.h> // mp_*_preemption
41 #include <i386/mp.h> // mp_cpus_call
42 #include <i386/commpage/commpage.h>
43 #include <i386/fpu.h>
44 #include <machine/cpu_number.h> // cpu_number
45 #include <pexpert/pexpert.h> // boot-args
46
47 #define IA32_BIOS_UPDT_TRIG (0x79) /* microcode update trigger MSR */
48
49 struct intel_ucupdate *global_update = NULL;
50
51 /* Exceute the actual update! */
52 static void
53 update_microcode(void)
54 {
55 /* SDM Example 9-8 code shows that we load the
56 * address of the UpdateData within the microcode blob,
57 * not the address of the header.
58 */
59 wrmsr64(IA32_BIOS_UPDT_TRIG, (uint64_t)(uintptr_t)&global_update->data);
60 }
61
62 /* locks */
63 static LCK_GRP_DECLARE(ucode_slock_grp, "uccode_lock");
64 static LCK_SPIN_DECLARE(ucode_slock, &ucode_slock_grp);
65
66 /* Copy in an update */
67 static int
68 copyin_update(uint64_t inaddr)
69 {
70 struct intel_ucupdate update_header;
71 struct intel_ucupdate *update;
72 vm_size_t size;
73 kern_return_t ret;
74 int error;
75
76 /* Copy in enough header to peek at the size */
77 error = copyin((user_addr_t)inaddr, (void *)&update_header, sizeof(update_header));
78 if (error) {
79 return error;
80 }
81
82 /* Get the actual, alleged size */
83 size = update_header.total_size;
84
85 /* huge bogus piece of data that somehow made it through? */
86 if (size >= 1024 * 1024) {
87 return ENOMEM;
88 }
89
90 /* Old microcodes? */
91 if (size == 0) {
92 size = 2048; /* default update size; see SDM */
93 }
94 /*
95 * create the buffer for the update
96 * It need only be aligned to 16-bytes, according to the SDM.
97 * This also wires it down
98 */
99 ret = kmem_alloc_kobject(kernel_map, (vm_offset_t *)&update, size, VM_KERN_MEMORY_OSFMK);
100 if (ret != KERN_SUCCESS) {
101 return ENOMEM;
102 }
103
104 /* Copy it in */
105 error = copyin((user_addr_t)inaddr, (void*)update, size);
106 if (error) {
107 kmem_free(kernel_map, (vm_offset_t)update, size);
108 return error;
109 }
110
111 global_update = update;
112 return 0;
113 }
114
115 static void
116 cpu_apply_microcode(void)
117 {
118 /* grab the lock */
119 lck_spin_lock(&ucode_slock);
120
121 /* execute the update */
122 update_microcode();
123
124 /* release the lock */
125 lck_spin_unlock(&ucode_slock);
126 }
127
128 static void
129 cpu_update(__unused void *arg)
130 {
131 cpu_apply_microcode();
132
133 cpuid_do_was();
134 }
135
136 /*
137 * This is called once by every CPU on a wake from sleep/hibernate
138 * and is meant to re-apply a microcode update that got lost
139 * by sleeping.
140 */
141 void
142 ucode_update_wake_and_apply_cpu_was()
143 {
144 if (global_update) {
145 kprintf("ucode: Re-applying update after wake (CPU #%d)\n", cpu_number());
146 cpu_update(NULL);
147 } else {
148 cpuid_do_was();
149 #if DEBUG
150 kprintf("ucode: No update to apply (CPU #%d)\n", cpu_number());
151 #endif
152 }
153 }
154
155 static void
156 ucode_cpuid_set_info(void)
157 {
158 uint64_t saved_xcr0, dest_xcr0;
159 int need_xcr0_restore = 0;
160 boolean_t intrs_enabled = ml_set_interrupts_enabled(FALSE);
161
162 /*
163 * Before we cache the CPUID information, we must configure XCR0 with the maximal set of
164 * features to ensure the save area returned in the xsave leaf is correctly-sized.
165 *
166 * Since we are guaranteed that init_fpu() has already happened, we can use state
167 * variables set there that were already predicated on the presence of explicit
168 * boot-args enables/disables.
169 */
170
171 if (fpu_capability == AVX512 || fpu_capability == AVX) {
172 saved_xcr0 = xgetbv(XCR0);
173 dest_xcr0 = (fpu_capability == AVX512) ? AVX512_XMASK : AVX_XMASK;
174 assert((get_cr4() & CR4_OSXSAVE) != 0);
175 if (saved_xcr0 != dest_xcr0) {
176 need_xcr0_restore = 1;
177 xsetbv(dest_xcr0 >> 32, dest_xcr0 & 0xFFFFFFFFUL);
178 }
179 }
180
181 cpuid_set_info();
182
183 if (need_xcr0_restore) {
184 xsetbv(saved_xcr0 >> 32, saved_xcr0 & 0xFFFFFFFFUL);
185 }
186
187 ml_set_interrupts_enabled(intrs_enabled);
188 }
189
190 /* Farm an update out to all CPUs */
191 static void
192 xcpu_update(void)
193 {
194 cpumask_t dest_cpumask;
195
196 mp_disable_preemption();
197 dest_cpumask = CPUMASK_OTHERS;
198 cpu_apply_microcode();
199 /* Update the cpuid info */
200 ucode_cpuid_set_info();
201 mp_enable_preemption();
202
203 /* Get all other CPUs to perform the update */
204 /*
205 * Calling mp_cpus_call with the ASYNC flag ensures that the
206 * IPI dispatch occurs in parallel, but that we will not
207 * proceed until all targeted CPUs complete the microcode
208 * update.
209 */
210 mp_cpus_call(dest_cpumask, ASYNC, cpu_update, NULL);
211
212 /* Update the commpage only after we update all CPUs' microcode */
213 commpage_post_ucode_update();
214 }
215
216 /*
217 * sysctl function
218 *
219 */
220 int
221 ucode_interface(uint64_t addr)
222 {
223 int error;
224 char arg[16];
225
226 if (PE_parse_boot_argn("-x", arg, sizeof(arg))) {
227 printf("ucode: no updates in safe mode\n");
228 return EPERM;
229 }
230
231 #if !DEBUG
232 /*
233 * Userland may only call this once per boot. Anything else
234 * would not make sense (all updates are cumulative), and also
235 * leak memory, because we don't free previous updates.
236 */
237 if (global_update) {
238 return EPERM;
239 }
240 #endif
241
242 /* Get the whole microcode */
243 error = copyin_update(addr);
244
245 if (error) {
246 return error;
247 }
248
249 /* Farm out the updates */
250 xcpu_update();
251
252 return 0;
253 }