]> git.saurik.com Git - apple/xnu.git/blob - osfmk/i386/ucode.c
xnu-2050.18.24.tar.gz
[apple/xnu.git] / osfmk / i386 / ucode.c
1 /*
2 * ucode.c
3 *
4 * Microcode updater interface sysctl
5 */
6
7 #include <kern/locks.h>
8 #include <i386/ucode.h>
9 #include <sys/errno.h>
10 #include <i386/proc_reg.h>
11 #include <i386/cpuid.h>
12 #include <vm/vm_kern.h>
13 #include <i386/mp.h> // mp_broadcast
14 #include <machine/cpu_number.h> // cpu_number
15
16 #define IA32_BIOS_UPDT_TRIG (0x79) /* microcode update trigger MSR */
17
18 struct intel_ucupdate *global_update = NULL;
19
20 /* Exceute the actual update! */
21 static void
22 update_microcode(void)
23 {
24 /* SDM Example 9-8 code shows that we load the
25 * address of the UpdateData within the microcode blob,
26 * not the address of the header.
27 */
28 wrmsr64(IA32_BIOS_UPDT_TRIG, (uint64_t)(uintptr_t)&global_update->data);
29 }
30
31 /* locks */
32 static lck_grp_attr_t *ucode_slock_grp_attr = NULL;
33 static lck_grp_t *ucode_slock_grp = NULL;
34 static lck_attr_t *ucode_slock_attr = NULL;
35 static lck_spin_t *ucode_slock = NULL;
36
37 static kern_return_t
38 register_locks(void)
39 {
40 /* already allocated? */
41 if (ucode_slock_grp_attr && ucode_slock_grp && ucode_slock_attr && ucode_slock)
42 return KERN_SUCCESS;
43
44 /* allocate lock group attribute and group */
45 if (!(ucode_slock_grp_attr = lck_grp_attr_alloc_init()))
46 goto nomem_out;
47
48 lck_grp_attr_setstat(ucode_slock_grp_attr);
49
50 if (!(ucode_slock_grp = lck_grp_alloc_init("uccode_lock", ucode_slock_grp_attr)))
51 goto nomem_out;
52
53 /* Allocate lock attribute */
54 if (!(ucode_slock_attr = lck_attr_alloc_init()))
55 goto nomem_out;
56
57 /* Allocate the spin lock */
58 /* We keep one global spin-lock. We could have one per update
59 * request... but srsly, why would you update microcode like that?
60 */
61 if (!(ucode_slock = lck_spin_alloc_init(ucode_slock_grp, ucode_slock_attr)))
62 goto nomem_out;
63
64 return KERN_SUCCESS;
65
66 nomem_out:
67 /* clean up */
68 if (ucode_slock)
69 lck_spin_free(ucode_slock, ucode_slock_grp);
70 if (ucode_slock_attr)
71 lck_attr_free(ucode_slock_attr);
72 if (ucode_slock_grp)
73 lck_grp_free(ucode_slock_grp);
74 if (ucode_slock_grp_attr)
75 lck_grp_attr_free(ucode_slock_grp_attr);
76
77 return KERN_NO_SPACE;
78 }
79
80 /* Copy in an update */
81 static int
82 copyin_update(uint64_t inaddr)
83 {
84 struct intel_ucupdate update_header;
85 struct intel_ucupdate *update;
86 vm_size_t size;
87 kern_return_t ret;
88 int error;
89
90 /* Copy in enough header to peek at the size */
91 error = copyin((user_addr_t)inaddr, (void *)&update_header, sizeof(update_header));
92 if (error)
93 return error;
94
95 /* Get the actual, alleged size */
96 size = update_header.total_size;
97
98 /* huge bogus piece of data that somehow made it through? */
99 if (size >= 1024 * 1024)
100 return ENOMEM;
101
102 /* Old microcodes? */
103 if (size == 0)
104 size = 2048; /* default update size; see SDM */
105
106 /*
107 * create the buffer for the update
108 * It need only be aligned to 16-bytes, according to the SDM.
109 * This also wires it down
110 */
111 ret = kmem_alloc_kobject(kernel_map, (vm_offset_t *)&update, size);
112 if (ret != KERN_SUCCESS)
113 return ENOMEM;
114
115 /* Copy it in */
116 error = copyin((user_addr_t)inaddr, (void*)update, size);
117 if (error) {
118 kmem_free(kernel_map, (vm_offset_t)update, size);
119 return error;
120 }
121
122 global_update = update;
123 return 0;
124 }
125
126 /*
127 * This is called once by every CPU on a wake from sleep/hibernate
128 * and is meant to re-apply a microcode update that got lost
129 * by sleeping.
130 */
131 void
132 ucode_update_wake()
133 {
134 if (global_update) {
135 kprintf("ucode: Re-applying update after wake (CPU #%d)\n", cpu_number());
136 update_microcode();
137 #ifdef DEBUG
138 } else {
139 kprintf("ucode: No update to apply (CPU #%d)\n", cpu_number());
140 #endif
141 }
142 }
143
144 static void
145 cpu_update(__unused void *arg)
146 {
147 /* grab the lock */
148 lck_spin_lock(ucode_slock);
149
150 /* execute the update */
151 update_microcode();
152
153 /* if CPU #0, update global CPU information */
154 if (!cpu_number())
155 cpuid_set_info();
156
157 /* release the lock */
158 lck_spin_unlock(ucode_slock);
159 }
160
161 /* Farm an update out to all CPUs */
162 static void
163 xcpu_update(void)
164 {
165 if (register_locks() != KERN_SUCCESS)
166 return;
167
168 /* Get all CPUs to perform the update */
169 mp_broadcast(cpu_update, NULL);
170 }
171
172 /*
173 * sysctl function
174 *
175 */
176 int
177 ucode_interface(uint64_t addr)
178 {
179 int error;
180
181 #if !DEBUG
182 /*
183 * Userland may only call this once per boot. Anything else
184 * would not make sense (all updates are cumulative), and also
185 * leak memory, because we don't free previous updates.
186 */
187 if (global_update)
188 return EPERM;
189 #endif
190
191 /* Get the whole microcode */
192 error = copyin_update(addr);
193
194 if (error)
195 return error;
196
197 /* Farm out the updates */
198 xcpu_update();
199
200 return 0;
201 }