]> git.saurik.com Git - apple/xnu.git/blob - osfmk/i386/user_ldt.c
xnu-6153.61.1.tar.gz
[apple/xnu.git] / osfmk / i386 / user_ldt.c
1 /*
2 * Copyright (c) 2000-2009 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56
57 /*
58 */
59
60 /*
61 * User LDT management.
62 * Each task may have its own LDT.
63 */
64
65 #include <kern/kalloc.h>
66 #include <kern/thread.h>
67 #include <kern/misc_protos.h>
68
69 #include <vm/vm_kern.h>
70
71 #include <i386/machdep_call.h>
72 #include <i386/user_ldt.h>
73 #include <i386/mp.h>
74 #include <i386/machine_routines.h>
75 #include <i386/proc_reg.h>
76 #include <i386/mp_desc.h>
77 #include <i386/seg.h>
78 #include <i386/thread.h>
79
80 #include <IOKit/IOBSD.h> /* for IOTaskHasEntitlement */
81 #include <sys/csr.h> /* for csr_check */
82
83 #include <sys/errno.h>
84
85 static void user_ldt_set_action(void *);
86 static int i386_set_ldt_impl(uint32_t *retval, uint64_t start_sel, uint64_t descs,
87 uint64_t num_sels);
88 static int i386_get_ldt_impl(uint32_t *retval, uint64_t start_sel, uint64_t descs,
89 uint64_t num_sels);
90
91 #define LDT_IN_64BITPROC_ENTITLEMENT "com.apple.security.ldt-in-64bit-process"
92
93 /*
94 * Add the descriptors to the LDT, starting with
95 * the descriptor for 'first_selector'.
96 */
97
98 static int
99 i386_set_ldt_impl(
100 uint32_t *retval,
101 uint64_t start_sel,
102 uint64_t descs, /* out */
103 uint64_t num_sels)
104 {
105 user_ldt_t new_ldt, old_ldt;
106 struct real_descriptor *dp;
107 unsigned int i;
108 unsigned int min_selector = LDTSZ_MIN; /* do not allow the system selectors to be changed */
109 task_t task = current_task();
110 unsigned int ldt_count;
111 kern_return_t err;
112
113 if (start_sel != LDT_AUTO_ALLOC
114 && (start_sel != 0 || num_sels != 0)
115 && (start_sel < min_selector || start_sel >= LDTSZ || num_sels > LDTSZ)) {
116 return EINVAL;
117 }
118 if (start_sel != LDT_AUTO_ALLOC && start_sel + num_sels > LDTSZ) {
119 return EINVAL;
120 }
121
122 task_lock(task);
123
124 old_ldt = task->i386_ldt;
125
126 if (start_sel == LDT_AUTO_ALLOC) {
127 if (old_ldt) {
128 unsigned int null_count;
129 struct real_descriptor null_ldt;
130
131 bzero(&null_ldt, sizeof(null_ldt));
132
133 /*
134 * Look for null selectors among the already-allocated
135 * entries.
136 */
137 null_count = 0;
138 i = 0;
139 while (i < old_ldt->count) {
140 if (!memcmp(&old_ldt->ldt[i++], &null_ldt, sizeof(null_ldt))) {
141 null_count++;
142 if (null_count == num_sels) {
143 break; /* break out of while loop */
144 }
145 } else {
146 null_count = 0;
147 }
148 }
149
150 /*
151 * If we broke out of the while loop, i points to the selector
152 * after num_sels null selectors. Otherwise it points to the end
153 * of the old LDTs, and null_count is the number of null selectors
154 * at the end.
155 *
156 * Either way, there are null_count null selectors just prior to
157 * the i-indexed selector, and either null_count >= num_sels,
158 * or we're at the end, so we can extend.
159 */
160 start_sel = old_ldt->start + i - null_count;
161 } else {
162 start_sel = LDTSZ_MIN;
163 }
164
165 if (start_sel + num_sels > LDTSZ) {
166 task_unlock(task);
167 return ENOMEM;
168 }
169 }
170
171 if (start_sel == 0 && num_sels == 0) {
172 new_ldt = NULL;
173 } else {
174 /*
175 * Allocate new LDT
176 */
177
178 unsigned int begin_sel = (unsigned int)start_sel;
179 unsigned int end_sel = (unsigned int)begin_sel +
180 (unsigned int)num_sels;
181
182 if (old_ldt != NULL) {
183 if (old_ldt->start < begin_sel) {
184 begin_sel = old_ldt->start;
185 }
186 if (old_ldt->start + old_ldt->count > end_sel) {
187 end_sel = old_ldt->start + old_ldt->count;
188 }
189 }
190
191 ldt_count = end_sel - begin_sel;
192 /* XXX allocation under task lock */
193 new_ldt = (user_ldt_t)kalloc(sizeof(struct user_ldt) + (ldt_count * sizeof(struct real_descriptor)));
194 if (new_ldt == NULL) {
195 task_unlock(task);
196 return ENOMEM;
197 }
198
199 new_ldt->start = begin_sel;
200 new_ldt->count = ldt_count;
201
202 /*
203 * Have new LDT. If there was a an old ldt, copy descriptors
204 * from old to new.
205 */
206 if (old_ldt) {
207 bcopy(&old_ldt->ldt[0],
208 &new_ldt->ldt[old_ldt->start - begin_sel],
209 old_ldt->count * sizeof(struct real_descriptor));
210
211 /*
212 * If the old and new LDTs are non-overlapping, fill the
213 * center in with null selectors.
214 */
215
216 if (old_ldt->start + old_ldt->count < start_sel) {
217 bzero(&new_ldt->ldt[old_ldt->count],
218 (start_sel - (old_ldt->start + old_ldt->count)) * sizeof(struct real_descriptor));
219 } else if (old_ldt->start > start_sel + num_sels) {
220 bzero(&new_ldt->ldt[num_sels],
221 (old_ldt->start - (start_sel + num_sels)) * sizeof(struct real_descriptor));
222 }
223 }
224
225 /*
226 * Install new descriptors.
227 */
228 if (descs != 0) {
229 /* XXX copyin under task lock */
230 err = copyin(descs, (char *)&new_ldt->ldt[start_sel - begin_sel],
231 num_sels * sizeof(struct real_descriptor));
232 if (err != 0) {
233 task_unlock(task);
234 user_ldt_free(new_ldt);
235 return err;
236 }
237 } else {
238 bzero(&new_ldt->ldt[start_sel - begin_sel], num_sels * sizeof(struct real_descriptor));
239 }
240 /*
241 * Validate descriptors.
242 * Only allow descriptors with user privileges.
243 */
244 for (i = 0, dp = (struct real_descriptor *) &new_ldt->ldt[start_sel - begin_sel];
245 i < num_sels;
246 i++, dp++) {
247 switch (dp->access & ~ACC_A) {
248 case 0:
249 case ACC_P:
250 /* valid empty descriptor, clear Present preemptively */
251 dp->access &= (~ACC_P & 0xff);
252 break;
253 case ACC_P | ACC_PL_U | ACC_DATA:
254 case ACC_P | ACC_PL_U | ACC_DATA_W:
255 case ACC_P | ACC_PL_U | ACC_DATA_E:
256 case ACC_P | ACC_PL_U | ACC_DATA_EW:
257 case ACC_P | ACC_PL_U | ACC_CODE:
258 case ACC_P | ACC_PL_U | ACC_CODE_R:
259 case ACC_P | ACC_PL_U | ACC_CODE_C:
260 case ACC_P | ACC_PL_U | ACC_CODE_CR:
261 break;
262 default:
263 task_unlock(task);
264 user_ldt_free(new_ldt);
265 return EACCES;
266 }
267 /* Reject attempts to create segments with 64-bit granules */
268 /* Note this restriction is still correct, even when
269 * executing as a 64-bit process (we want to maintain a single
270 * 64-bit selector (located in the GDT)).
271 */
272 if (dp->granularity & SZ_64) {
273 task_unlock(task);
274 user_ldt_free(new_ldt);
275 return EACCES;
276 }
277 }
278 }
279
280 task->i386_ldt = new_ldt; /* new LDT for task */
281
282 /*
283 * Switch to new LDT. We need to do this on all CPUs, since
284 * another thread in this same task may be currently running,
285 * and we need to make sure the new LDT is in place
286 * throughout the task before returning to the user.
287 */
288 mp_broadcast(user_ldt_set_action, task);
289
290 task_unlock(task);
291
292 /* free old LDT. We can't do this until after we've
293 * rendezvoused with all CPUs, in case another thread
294 * in this task was in the process of context switching.
295 */
296 if (old_ldt) {
297 user_ldt_free(old_ldt);
298 }
299
300 *retval = (uint32_t)start_sel;
301
302 return 0;
303 }
304
305 static int
306 i386_get_ldt_impl(
307 uint32_t *retval,
308 uint64_t start_sel,
309 uint64_t descs, /* out */
310 uint64_t num_sels)
311 {
312 user_ldt_t user_ldt;
313 task_t task = current_task();
314 unsigned int ldt_count;
315 kern_return_t err;
316
317 if (start_sel >= LDTSZ || num_sels > LDTSZ) {
318 return EINVAL;
319 }
320 if (start_sel + num_sels > LDTSZ) {
321 return EINVAL;
322 }
323 if (descs == 0) {
324 return EINVAL;
325 }
326
327 task_lock(task);
328
329 user_ldt = task->i386_ldt;
330 err = 0;
331
332 /*
333 * copy out the descriptors
334 */
335
336 if (user_ldt != 0) {
337 ldt_count = user_ldt->start + user_ldt->count;
338 } else {
339 ldt_count = LDTSZ_MIN;
340 }
341
342
343 if (start_sel < ldt_count) {
344 unsigned int copy_sels = (unsigned int)num_sels;
345
346 if (start_sel + num_sels > ldt_count) {
347 copy_sels = ldt_count - (unsigned int)start_sel;
348 }
349
350 err = copyout((char *)(current_ldt() + start_sel),
351 descs, copy_sels * sizeof(struct real_descriptor));
352 }
353
354 task_unlock(task);
355
356 *retval = ldt_count;
357
358 return err;
359 }
360
361 void
362 user_ldt_free(
363 user_ldt_t user_ldt)
364 {
365 kfree(user_ldt, sizeof(struct user_ldt) + (user_ldt->count * sizeof(struct real_descriptor)));
366 }
367
368 user_ldt_t
369 user_ldt_copy(
370 user_ldt_t user_ldt)
371 {
372 if (user_ldt != NULL) {
373 size_t size = sizeof(struct user_ldt) + (user_ldt->count * sizeof(struct real_descriptor));
374 user_ldt_t new_ldt = (user_ldt_t)kalloc(size);
375 if (new_ldt != NULL) {
376 bcopy(user_ldt, new_ldt, size);
377 }
378 return new_ldt;
379 }
380
381 return 0;
382 }
383
384 void
385 user_ldt_set_action(
386 void *arg)
387 {
388 task_t arg_task = (task_t)arg;
389
390 if (arg_task == current_task()) {
391 user_ldt_set(current_thread());
392 }
393 }
394
395 /*
396 * Set the LDT for the given thread on the current CPU. Should be invoked
397 * with interrupts disabled.
398 */
399 void
400 user_ldt_set(
401 thread_t thread)
402 {
403 task_t task = thread->task;
404 user_ldt_t user_ldt;
405
406 user_ldt = task->i386_ldt;
407
408 if (user_ldt != 0) {
409 struct real_descriptor *ldtp = current_ldt();
410
411 if (user_ldt->start > LDTSZ_MIN) {
412 bzero(&ldtp[LDTSZ_MIN],
413 sizeof(struct real_descriptor) * (user_ldt->start - LDTSZ_MIN));
414 }
415
416 bcopy(user_ldt->ldt, &ldtp[user_ldt->start],
417 sizeof(struct real_descriptor) * (user_ldt->count));
418
419 gdt_desc_p(USER_LDT)->limit_low = (uint16_t)((sizeof(struct real_descriptor) * (user_ldt->start + user_ldt->count)) - 1);
420
421 ml_cpu_set_ldt(USER_LDT);
422 } else {
423 ml_cpu_set_ldt(KERNEL_LDT);
424 }
425 }
426
427 /* For 32-bit processes, called via machdep_syscall() */
428 int
429 i386_set_ldt(
430 uint32_t *retval,
431 uint32_t start_sel,
432 uint32_t descs, /* out */
433 uint32_t num_sels)
434 {
435 return i386_set_ldt_impl(retval, (uint64_t)start_sel, (uint64_t)descs,
436 (uint64_t)num_sels);
437 }
438
439 /* For 64-bit processes, called via machdep_syscall64() */
440 int
441 i386_set_ldt64(
442 uint32_t *retval,
443 uint64_t start_sel,
444 uint64_t descs, /* out */
445 uint64_t num_sels)
446 {
447 if (csr_check(CSR_ALLOW_UNTRUSTED_KEXTS) != 0 &&
448 !IOTaskHasEntitlement(current_task(), LDT_IN_64BITPROC_ENTITLEMENT)) {
449 return EPERM;
450 }
451
452 return i386_set_ldt_impl(retval, start_sel, descs, num_sels);
453 }
454
455 /* For 32-bit processes, called via machdep_syscall() */
456 int
457 i386_get_ldt(
458 uint32_t *retval,
459 uint32_t start_sel,
460 uint32_t descs, /* out */
461 uint32_t num_sels)
462 {
463 return i386_get_ldt_impl(retval, (uint64_t)start_sel, (uint64_t)descs,
464 (uint64_t)num_sels);
465 }
466
467 /* For 64-bit processes, called via machdep_syscall64() */
468 int
469 i386_get_ldt64(
470 uint32_t *retval,
471 uint64_t start_sel,
472 uint64_t descs, /* out */
473 uint64_t num_sels)
474 {
475 if (csr_check(CSR_ALLOW_UNTRUSTED_KEXTS) != 0 &&
476 !IOTaskHasEntitlement(current_task(), LDT_IN_64BITPROC_ENTITLEMENT)) {
477 return EPERM;
478 }
479
480 return i386_get_ldt_impl(retval, start_sel, descs, num_sels);
481 }