2 * Copyright (c) 2017-2019 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
31 * Microcode updater interface sysctl
34 #include <kern/locks.h>
35 #include <i386/ucode.h>
36 #include <sys/errno.h>
37 #include <i386/proc_reg.h>
38 #include <i386/cpuid.h>
39 #include <vm/vm_kern.h>
40 #include <i386/cpu_data.h> // mp_*_preemption
41 #include <i386/mp.h> // mp_cpus_call
42 #include <i386/commpage/commpage.h>
44 #include <machine/cpu_number.h> // cpu_number
45 #include <pexpert/pexpert.h> // boot-args
47 #define IA32_BIOS_UPDT_TRIG (0x79) /* microcode update trigger MSR */
49 struct intel_ucupdate
*global_update
= NULL
;
51 /* Exceute the actual update! */
53 update_microcode(void)
55 /* SDM Example 9-8 code shows that we load the
56 * address of the UpdateData within the microcode blob,
57 * not the address of the header.
59 wrmsr64(IA32_BIOS_UPDT_TRIG
, (uint64_t)(uintptr_t)&global_update
->data
);
63 static lck_grp_attr_t
*ucode_slock_grp_attr
= NULL
;
64 static lck_grp_t
*ucode_slock_grp
= NULL
;
65 static lck_attr_t
*ucode_slock_attr
= NULL
;
66 static lck_spin_t
*ucode_slock
= NULL
;
71 /* already allocated? */
72 if (ucode_slock_grp_attr
&& ucode_slock_grp
&& ucode_slock_attr
&& ucode_slock
) {
76 /* allocate lock group attribute and group */
77 if (!(ucode_slock_grp_attr
= lck_grp_attr_alloc_init())) {
81 if (!(ucode_slock_grp
= lck_grp_alloc_init("uccode_lock", ucode_slock_grp_attr
))) {
85 /* Allocate lock attribute */
86 if (!(ucode_slock_attr
= lck_attr_alloc_init())) {
90 /* Allocate the spin lock */
91 /* We keep one global spin-lock. We could have one per update
92 * request... but srsly, why would you update microcode like that?
94 if (!(ucode_slock
= lck_spin_alloc_init(ucode_slock_grp
, ucode_slock_attr
))) {
103 lck_spin_free(ucode_slock
, ucode_slock_grp
);
105 if (ucode_slock_attr
) {
106 lck_attr_free(ucode_slock_attr
);
108 if (ucode_slock_grp
) {
109 lck_grp_free(ucode_slock_grp
);
111 if (ucode_slock_grp_attr
) {
112 lck_grp_attr_free(ucode_slock_grp_attr
);
115 return KERN_NO_SPACE
;
118 /* Copy in an update */
120 copyin_update(uint64_t inaddr
)
122 struct intel_ucupdate update_header
;
123 struct intel_ucupdate
*update
;
128 /* Copy in enough header to peek at the size */
129 error
= copyin((user_addr_t
)inaddr
, (void *)&update_header
, sizeof(update_header
));
134 /* Get the actual, alleged size */
135 size
= update_header
.total_size
;
137 /* huge bogus piece of data that somehow made it through? */
138 if (size
>= 1024 * 1024) {
142 /* Old microcodes? */
144 size
= 2048; /* default update size; see SDM */
147 * create the buffer for the update
148 * It need only be aligned to 16-bytes, according to the SDM.
149 * This also wires it down
151 ret
= kmem_alloc_kobject(kernel_map
, (vm_offset_t
*)&update
, size
, VM_KERN_MEMORY_OSFMK
);
152 if (ret
!= KERN_SUCCESS
) {
157 error
= copyin((user_addr_t
)inaddr
, (void*)update
, size
);
159 kmem_free(kernel_map
, (vm_offset_t
)update
, size
);
163 global_update
= update
;
168 cpu_apply_microcode(void)
171 lck_spin_lock(ucode_slock
);
173 /* execute the update */
176 /* release the lock */
177 lck_spin_unlock(ucode_slock
);
181 cpu_update(__unused
void *arg
)
183 cpu_apply_microcode();
189 * This is called once by every CPU on a wake from sleep/hibernate
190 * and is meant to re-apply a microcode update that got lost
194 ucode_update_wake_and_apply_cpu_was()
197 kprintf("ucode: Re-applying update after wake (CPU #%d)\n", cpu_number());
202 kprintf("ucode: No update to apply (CPU #%d)\n", cpu_number());
208 ucode_cpuid_set_info(void)
210 uint64_t saved_xcr0
, dest_xcr0
;
211 int need_xcr0_restore
= 0;
212 boolean_t intrs_enabled
= ml_set_interrupts_enabled(FALSE
);
215 * Before we cache the CPUID information, we must configure XCR0 with the maximal set of
216 * features to ensure the save area returned in the xsave leaf is correctly-sized.
218 * Since we are guaranteed that init_fpu() has already happened, we can use state
219 * variables set there that were already predicated on the presence of explicit
220 * boot-args enables/disables.
223 if (fpu_capability
== AVX512
|| fpu_capability
== AVX
) {
224 saved_xcr0
= xgetbv(XCR0
);
225 dest_xcr0
= (fpu_capability
== AVX512
) ? AVX512_XMASK
: AVX_XMASK
;
226 assert((get_cr4() & CR4_OSXSAVE
) != 0);
227 if (saved_xcr0
!= dest_xcr0
) {
228 need_xcr0_restore
= 1;
229 xsetbv(dest_xcr0
>> 32, dest_xcr0
& 0xFFFFFFFFUL
);
235 if (need_xcr0_restore
) {
236 xsetbv(saved_xcr0
>> 32, saved_xcr0
& 0xFFFFFFFFUL
);
239 ml_set_interrupts_enabled(intrs_enabled
);
242 /* Farm an update out to all CPUs */
246 cpumask_t dest_cpumask
;
248 if (register_locks() != KERN_SUCCESS
) {
252 mp_disable_preemption();
253 dest_cpumask
= CPUMASK_OTHERS
;
254 cpu_apply_microcode();
255 /* Update the cpuid info */
256 ucode_cpuid_set_info();
257 mp_enable_preemption();
259 /* Get all other CPUs to perform the update */
261 * Calling mp_cpus_call with the ASYNC flag ensures that the
262 * IPI dispatch occurs in parallel, but that we will not
263 * proceed until all targeted CPUs complete the microcode
266 mp_cpus_call(dest_cpumask
, ASYNC
, cpu_update
, NULL
);
268 /* Update the commpage only after we update all CPUs' microcode */
269 commpage_post_ucode_update();
277 ucode_interface(uint64_t addr
)
282 if (PE_parse_boot_argn("-x", arg
, sizeof(arg
))) {
283 printf("ucode: no updates in safe mode\n");
289 * Userland may only call this once per boot. Anything else
290 * would not make sense (all updates are cumulative), and also
291 * leak memory, because we don't free previous updates.
298 /* Get the whole microcode */
299 error
= copyin_update(addr
);
305 /* Farm out the updates */