]> git.saurik.com Git - apple/xnu.git/blob - osfmk/i386/ucode.c
xnu-3247.1.106.tar.gz
[apple/xnu.git] / osfmk / i386 / ucode.c
1 /*
2 * ucode.c
3 *
4 * Microcode updater interface sysctl
5 */
6
7 #include <kern/locks.h>
8 #include <i386/ucode.h>
9 #include <sys/errno.h>
10 #include <i386/proc_reg.h>
11 #include <i386/cpuid.h>
12 #include <vm/vm_kern.h>
13 #include <i386/mp.h> // mp_broadcast
14 #include <machine/cpu_number.h> // cpu_number
15 #include <pexpert/pexpert.h> // boot-args
16
17 #define IA32_BIOS_UPDT_TRIG (0x79) /* microcode update trigger MSR */
18
19 struct intel_ucupdate *global_update = NULL;
20
21 /* Exceute the actual update! */
22 static void
23 update_microcode(void)
24 {
25 /* SDM Example 9-8 code shows that we load the
26 * address of the UpdateData within the microcode blob,
27 * not the address of the header.
28 */
29 wrmsr64(IA32_BIOS_UPDT_TRIG, (uint64_t)(uintptr_t)&global_update->data);
30 }
31
32 /* locks */
33 static lck_grp_attr_t *ucode_slock_grp_attr = NULL;
34 static lck_grp_t *ucode_slock_grp = NULL;
35 static lck_attr_t *ucode_slock_attr = NULL;
36 static lck_spin_t *ucode_slock = NULL;
37
38 static kern_return_t
39 register_locks(void)
40 {
41 /* already allocated? */
42 if (ucode_slock_grp_attr && ucode_slock_grp && ucode_slock_attr && ucode_slock)
43 return KERN_SUCCESS;
44
45 /* allocate lock group attribute and group */
46 if (!(ucode_slock_grp_attr = lck_grp_attr_alloc_init()))
47 goto nomem_out;
48
49 lck_grp_attr_setstat(ucode_slock_grp_attr);
50
51 if (!(ucode_slock_grp = lck_grp_alloc_init("uccode_lock", ucode_slock_grp_attr)))
52 goto nomem_out;
53
54 /* Allocate lock attribute */
55 if (!(ucode_slock_attr = lck_attr_alloc_init()))
56 goto nomem_out;
57
58 /* Allocate the spin lock */
59 /* We keep one global spin-lock. We could have one per update
60 * request... but srsly, why would you update microcode like that?
61 */
62 if (!(ucode_slock = lck_spin_alloc_init(ucode_slock_grp, ucode_slock_attr)))
63 goto nomem_out;
64
65 return KERN_SUCCESS;
66
67 nomem_out:
68 /* clean up */
69 if (ucode_slock)
70 lck_spin_free(ucode_slock, ucode_slock_grp);
71 if (ucode_slock_attr)
72 lck_attr_free(ucode_slock_attr);
73 if (ucode_slock_grp)
74 lck_grp_free(ucode_slock_grp);
75 if (ucode_slock_grp_attr)
76 lck_grp_attr_free(ucode_slock_grp_attr);
77
78 return KERN_NO_SPACE;
79 }
80
81 /* Copy in an update */
82 static int
83 copyin_update(uint64_t inaddr)
84 {
85 struct intel_ucupdate update_header;
86 struct intel_ucupdate *update;
87 vm_size_t size;
88 kern_return_t ret;
89 int error;
90
91 /* Copy in enough header to peek at the size */
92 error = copyin((user_addr_t)inaddr, (void *)&update_header, sizeof(update_header));
93 if (error)
94 return error;
95
96 /* Get the actual, alleged size */
97 size = update_header.total_size;
98
99 /* huge bogus piece of data that somehow made it through? */
100 if (size >= 1024 * 1024)
101 return ENOMEM;
102
103 /* Old microcodes? */
104 if (size == 0)
105 size = 2048; /* default update size; see SDM */
106
107 /*
108 * create the buffer for the update
109 * It need only be aligned to 16-bytes, according to the SDM.
110 * This also wires it down
111 */
112 ret = kmem_alloc_kobject(kernel_map, (vm_offset_t *)&update, size, VM_KERN_MEMORY_OSFMK);
113 if (ret != KERN_SUCCESS)
114 return ENOMEM;
115
116 /* Copy it in */
117 error = copyin((user_addr_t)inaddr, (void*)update, size);
118 if (error) {
119 kmem_free(kernel_map, (vm_offset_t)update, size);
120 return error;
121 }
122
123 global_update = update;
124 return 0;
125 }
126
127 /*
128 * This is called once by every CPU on a wake from sleep/hibernate
129 * and is meant to re-apply a microcode update that got lost
130 * by sleeping.
131 */
132 void
133 ucode_update_wake()
134 {
135 if (global_update) {
136 kprintf("ucode: Re-applying update after wake (CPU #%d)\n", cpu_number());
137 update_microcode();
138 #ifdef DEBUG
139 } else {
140 kprintf("ucode: No update to apply (CPU #%d)\n", cpu_number());
141 #endif
142 }
143 }
144
145 static void
146 cpu_update(__unused void *arg)
147 {
148 /* grab the lock */
149 lck_spin_lock(ucode_slock);
150
151 /* execute the update */
152 update_microcode();
153
154 /* release the lock */
155 lck_spin_unlock(ucode_slock);
156 }
157
158 /* Farm an update out to all CPUs */
159 static void
160 xcpu_update(void)
161 {
162 if (register_locks() != KERN_SUCCESS)
163 return;
164
165 /* Get all CPUs to perform the update */
166 mp_broadcast(cpu_update, NULL);
167
168 /* Update the cpuid info */
169 cpuid_set_info();
170
171 }
172
173 /*
174 * sysctl function
175 *
176 */
177 int
178 ucode_interface(uint64_t addr)
179 {
180 int error;
181 char arg[16];
182
183 if (PE_parse_boot_argn("-x", arg, sizeof (arg))) {
184 printf("ucode: no updates in safe mode\n");
185 return EPERM;
186 }
187
188 #if !DEBUG
189 /*
190 * Userland may only call this once per boot. Anything else
191 * would not make sense (all updates are cumulative), and also
192 * leak memory, because we don't free previous updates.
193 */
194 if (global_update)
195 return EPERM;
196 #endif
197
198 /* Get the whole microcode */
199 error = copyin_update(addr);
200
201 if (error)
202 return error;
203
204 /* Farm out the updates */
205 xcpu_update();
206
207 return 0;
208 }