]> git.saurik.com Git - apple/xnu.git/blob - osfmk/i386/user_ldt.c
xnu-2422.1.72.tar.gz
[apple/xnu.git] / osfmk / i386 / user_ldt.c
1 /*
2 * Copyright (c) 2000-2009 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56
57 /*
58 */
59
60 /*
61 * User LDT management.
62 * Each task may have its own LDT.
63 */
64
65 #include <kern/kalloc.h>
66 #include <kern/thread.h>
67 #include <kern/misc_protos.h>
68
69 #include <vm/vm_kern.h>
70
71 #include <i386/machdep_call.h>
72 #include <i386/user_ldt.h>
73 #include <i386/mp.h>
74 #include <i386/machine_routines.h>
75 #include <i386/proc_reg.h>
76 #include <i386/mp_desc.h>
77 #include <i386/seg.h>
78 #include <i386/thread.h>
79
80 #include <sys/errno.h>
81
82 static void user_ldt_set_action(void *);
83
84 /*
85 * Add the descriptors to the LDT, starting with
86 * the descriptor for 'first_selector'.
87 */
88
89 int
90 i386_set_ldt(
91 uint32_t *retval,
92 uint32_t start_sel,
93 uint32_t descs, /* out */
94 uint32_t num_sels)
95 {
96 user_ldt_t new_ldt, old_ldt;
97 struct real_descriptor *dp;
98 unsigned int i;
99 unsigned int min_selector = LDTSZ_MIN; /* do not allow the system selectors to be changed */
100 task_t task = current_task();
101 unsigned int ldt_count;
102 kern_return_t err;
103
104 if (start_sel != LDT_AUTO_ALLOC
105 && (start_sel != 0 || num_sels != 0)
106 && (start_sel < min_selector || start_sel >= LDTSZ))
107 return EINVAL;
108 if (start_sel != LDT_AUTO_ALLOC
109 && (uint64_t)start_sel + (uint64_t)num_sels > LDTSZ) /* cast to uint64_t to detect wrap-around */
110 return EINVAL;
111
112 task_lock(task);
113
114 old_ldt = task->i386_ldt;
115
116 if (start_sel == LDT_AUTO_ALLOC) {
117 if (old_ldt) {
118 unsigned int null_count;
119 struct real_descriptor null_ldt;
120
121 bzero(&null_ldt, sizeof(null_ldt));
122
123 /*
124 * Look for null selectors among the already-allocated
125 * entries.
126 */
127 null_count = 0;
128 i = 0;
129 while (i < old_ldt->count)
130 {
131 if (!memcmp(&old_ldt->ldt[i++], &null_ldt, sizeof(null_ldt))) {
132 null_count++;
133 if (null_count == num_sels)
134 break; /* break out of while loop */
135 } else {
136 null_count = 0;
137 }
138 }
139
140 /*
141 * If we broke out of the while loop, i points to the selector
142 * after num_sels null selectors. Otherwise it points to the end
143 * of the old LDTs, and null_count is the number of null selectors
144 * at the end.
145 *
146 * Either way, there are null_count null selectors just prior to
147 * the i-indexed selector, and either null_count >= num_sels,
148 * or we're at the end, so we can extend.
149 */
150 start_sel = old_ldt->start + i - null_count;
151 } else {
152 start_sel = LDTSZ_MIN;
153 }
154
155 if ((uint64_t)start_sel + (uint64_t)num_sels > LDTSZ) {
156 task_unlock(task);
157 return ENOMEM;
158 }
159 }
160
161 if (start_sel == 0 && num_sels == 0) {
162 new_ldt = NULL;
163 } else {
164 /*
165 * Allocate new LDT
166 */
167
168 unsigned int begin_sel = start_sel;
169 unsigned int end_sel = begin_sel + num_sels;
170
171 if (old_ldt != NULL) {
172 if (old_ldt->start < begin_sel)
173 begin_sel = old_ldt->start;
174 if (old_ldt->start + old_ldt->count > end_sel)
175 end_sel = old_ldt->start + old_ldt->count;
176 }
177
178 ldt_count = end_sel - begin_sel;
179 /* XXX allocation under task lock */
180 new_ldt = (user_ldt_t)kalloc(sizeof(struct user_ldt) + (ldt_count * sizeof(struct real_descriptor)));
181 if (new_ldt == NULL) {
182 task_unlock(task);
183 return ENOMEM;
184 }
185
186 new_ldt->start = begin_sel;
187 new_ldt->count = ldt_count;
188
189 /*
190 * Have new LDT. If there was a an old ldt, copy descriptors
191 * from old to new.
192 */
193 if (old_ldt) {
194 bcopy(&old_ldt->ldt[0],
195 &new_ldt->ldt[old_ldt->start - begin_sel],
196 old_ldt->count * sizeof(struct real_descriptor));
197
198 /*
199 * If the old and new LDTs are non-overlapping, fill the
200 * center in with null selectors.
201 */
202
203 if (old_ldt->start + old_ldt->count < start_sel)
204 bzero(&new_ldt->ldt[old_ldt->count],
205 (start_sel - (old_ldt->start + old_ldt->count)) * sizeof(struct real_descriptor));
206 else if (old_ldt->start > start_sel + num_sels)
207 bzero(&new_ldt->ldt[num_sels],
208 (old_ldt->start - (start_sel + num_sels)) * sizeof(struct real_descriptor));
209 }
210
211 /*
212 * Install new descriptors.
213 */
214 if (descs != 0) {
215 /* XXX copyin under task lock */
216 err = copyin(descs, (char *)&new_ldt->ldt[start_sel - begin_sel],
217 num_sels * sizeof(struct real_descriptor));
218 if (err != 0)
219 {
220 task_unlock(task);
221 user_ldt_free(new_ldt);
222 return err;
223 }
224 } else {
225 bzero(&new_ldt->ldt[start_sel - begin_sel], num_sels * sizeof(struct real_descriptor));
226 }
227
228 /*
229 * Validate descriptors.
230 * Only allow descriptors with user privileges.
231 */
232 for (i = 0, dp = (struct real_descriptor *) &new_ldt->ldt[start_sel - begin_sel];
233 i < num_sels;
234 i++, dp++)
235 {
236 switch (dp->access & ~ACC_A) {
237 case 0:
238 case ACC_P:
239 /* valid empty descriptor, clear Present preemptively */
240 dp->access &= (~ACC_P & 0xff);
241 break;
242 case ACC_P | ACC_PL_U | ACC_DATA:
243 case ACC_P | ACC_PL_U | ACC_DATA_W:
244 case ACC_P | ACC_PL_U | ACC_DATA_E:
245 case ACC_P | ACC_PL_U | ACC_DATA_EW:
246 case ACC_P | ACC_PL_U | ACC_CODE:
247 case ACC_P | ACC_PL_U | ACC_CODE_R:
248 case ACC_P | ACC_PL_U | ACC_CODE_C:
249 case ACC_P | ACC_PL_U | ACC_CODE_CR:
250 break;
251 default:
252 task_unlock(task);
253 user_ldt_free(new_ldt);
254 return EACCES;
255 }
256 }
257 }
258
259 task->i386_ldt = new_ldt; /* new LDT for task */
260
261 /*
262 * Switch to new LDT. We need to do this on all CPUs, since
263 * another thread in this same task may be currently running,
264 * and we need to make sure the new LDT is in place
265 * throughout the task before returning to the user.
266 */
267 mp_broadcast(user_ldt_set_action, task);
268
269 task_unlock(task);
270
271 /* free old LDT. We can't do this until after we've
272 * rendezvoused with all CPUs, in case another thread
273 * in this task was in the process of context switching.
274 */
275 if (old_ldt)
276 user_ldt_free(old_ldt);
277
278 *retval = start_sel;
279
280 return 0;
281 }
282
283 int
284 i386_get_ldt(
285 uint32_t *retval,
286 uint32_t start_sel,
287 uint32_t descs, /* out */
288 uint32_t num_sels)
289 {
290 user_ldt_t user_ldt;
291 task_t task = current_task();
292 unsigned int ldt_count;
293 kern_return_t err;
294
295 if (start_sel >= 8192)
296 return EINVAL;
297 if ((uint64_t)start_sel + (uint64_t)num_sels > 8192)
298 return EINVAL;
299 if (descs == 0)
300 return EINVAL;
301
302 task_lock(task);
303
304 user_ldt = task->i386_ldt;
305 err = 0;
306
307 /*
308 * copy out the descriptors
309 */
310
311 if (user_ldt != 0)
312 ldt_count = user_ldt->start + user_ldt->count;
313 else
314 ldt_count = LDTSZ_MIN;
315
316
317 if (start_sel < ldt_count)
318 {
319 unsigned int copy_sels = num_sels;
320
321 if (start_sel + num_sels > ldt_count)
322 copy_sels = ldt_count - start_sel;
323
324 err = copyout((char *)(current_ldt() + start_sel),
325 descs, copy_sels * sizeof(struct real_descriptor));
326 }
327
328 task_unlock(task);
329
330 *retval = ldt_count;
331
332 return err;
333 }
334
335 void
336 user_ldt_free(
337 user_ldt_t user_ldt)
338 {
339 kfree(user_ldt, sizeof(struct user_ldt) + (user_ldt->count * sizeof(struct real_descriptor)));
340 }
341
342 user_ldt_t
343 user_ldt_copy(
344 user_ldt_t user_ldt)
345 {
346 if (user_ldt != NULL) {
347 size_t size = sizeof(struct user_ldt) + (user_ldt->count * sizeof(struct real_descriptor));
348 user_ldt_t new_ldt = (user_ldt_t)kalloc(size);
349 if (new_ldt != NULL)
350 bcopy(user_ldt, new_ldt, size);
351 return new_ldt;
352 }
353
354 return 0;
355 }
356
357 void
358 user_ldt_set_action(
359 void *arg)
360 {
361 task_t arg_task = (task_t)arg;
362
363 if (arg_task == current_task()) {
364 user_ldt_set(current_thread());
365 }
366 }
367
368 /*
369 * Set the LDT for the given thread on the current CPU. Should be invoked
370 * with interrupts disabled.
371 */
372 void
373 user_ldt_set(
374 thread_t thread)
375 {
376 task_t task = thread->task;
377 user_ldt_t user_ldt;
378
379 user_ldt = task->i386_ldt;
380
381 if (user_ldt != 0) {
382 struct real_descriptor *ldtp = (struct real_descriptor *)current_ldt();
383
384 if (user_ldt->start > LDTSZ_MIN) {
385 bzero(&ldtp[LDTSZ_MIN],
386 sizeof(struct real_descriptor) * (user_ldt->start - LDTSZ_MIN));
387 }
388
389 bcopy(user_ldt->ldt, &ldtp[user_ldt->start],
390 sizeof(struct real_descriptor) * (user_ldt->count));
391
392 gdt_desc_p(USER_LDT)->limit_low = (uint16_t)((sizeof(struct real_descriptor) * (user_ldt->start + user_ldt->count)) - 1);
393
394 ml_cpu_set_ldt(USER_LDT);
395 } else {
396 ml_cpu_set_ldt(KERNEL_LDT);
397 }
398 }