]> git.saurik.com Git - apple/xnu.git/blob - osfmk/i386/user_ldt.c
xnu-4903.221.2.tar.gz
[apple/xnu.git] / osfmk / i386 / user_ldt.c
1 /*
2 * Copyright (c) 2000-2009 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56
57 /*
58 */
59
60 /*
61 * User LDT management.
62 * Each task may have its own LDT.
63 */
64
65 #include <kern/kalloc.h>
66 #include <kern/thread.h>
67 #include <kern/misc_protos.h>
68
69 #include <vm/vm_kern.h>
70
71 #include <i386/machdep_call.h>
72 #include <i386/user_ldt.h>
73 #include <i386/mp.h>
74 #include <i386/machine_routines.h>
75 #include <i386/proc_reg.h>
76 #include <i386/mp_desc.h>
77 #include <i386/seg.h>
78 #include <i386/thread.h>
79
80 #include <sys/errno.h>
81
82 static void user_ldt_set_action(void *);
83
84 /*
85 * Add the descriptors to the LDT, starting with
86 * the descriptor for 'first_selector'.
87 */
88
89 int
90 i386_set_ldt(
91 uint32_t *retval,
92 uint32_t start_sel,
93 uint32_t descs, /* out */
94 uint32_t num_sels)
95 {
96 user_ldt_t new_ldt, old_ldt;
97 struct real_descriptor *dp;
98 unsigned int i;
99 unsigned int min_selector = LDTSZ_MIN; /* do not allow the system selectors to be changed */
100 task_t task = current_task();
101 unsigned int ldt_count;
102 kern_return_t err;
103
104 if (start_sel != LDT_AUTO_ALLOC
105 && (start_sel != 0 || num_sels != 0)
106 && (start_sel < min_selector || start_sel >= LDTSZ))
107 return EINVAL;
108 if (start_sel != LDT_AUTO_ALLOC
109 && (uint64_t)start_sel + (uint64_t)num_sels > LDTSZ) /* cast to uint64_t to detect wrap-around */
110 return EINVAL;
111
112 task_lock(task);
113
114 old_ldt = task->i386_ldt;
115
116 if (start_sel == LDT_AUTO_ALLOC) {
117 if (old_ldt) {
118 unsigned int null_count;
119 struct real_descriptor null_ldt;
120
121 bzero(&null_ldt, sizeof(null_ldt));
122
123 /*
124 * Look for null selectors among the already-allocated
125 * entries.
126 */
127 null_count = 0;
128 i = 0;
129 while (i < old_ldt->count)
130 {
131 if (!memcmp(&old_ldt->ldt[i++], &null_ldt, sizeof(null_ldt))) {
132 null_count++;
133 if (null_count == num_sels)
134 break; /* break out of while loop */
135 } else {
136 null_count = 0;
137 }
138 }
139
140 /*
141 * If we broke out of the while loop, i points to the selector
142 * after num_sels null selectors. Otherwise it points to the end
143 * of the old LDTs, and null_count is the number of null selectors
144 * at the end.
145 *
146 * Either way, there are null_count null selectors just prior to
147 * the i-indexed selector, and either null_count >= num_sels,
148 * or we're at the end, so we can extend.
149 */
150 start_sel = old_ldt->start + i - null_count;
151 } else {
152 start_sel = LDTSZ_MIN;
153 }
154
155 if ((uint64_t)start_sel + (uint64_t)num_sels > LDTSZ) {
156 task_unlock(task);
157 return ENOMEM;
158 }
159 }
160
161 if (start_sel == 0 && num_sels == 0) {
162 new_ldt = NULL;
163 } else {
164 /*
165 * Allocate new LDT
166 */
167
168 unsigned int begin_sel = start_sel;
169 unsigned int end_sel = begin_sel + num_sels;
170
171 if (old_ldt != NULL) {
172 if (old_ldt->start < begin_sel)
173 begin_sel = old_ldt->start;
174 if (old_ldt->start + old_ldt->count > end_sel)
175 end_sel = old_ldt->start + old_ldt->count;
176 }
177
178 ldt_count = end_sel - begin_sel;
179 /* XXX allocation under task lock */
180 new_ldt = (user_ldt_t)kalloc(sizeof(struct user_ldt) + (ldt_count * sizeof(struct real_descriptor)));
181 if (new_ldt == NULL) {
182 task_unlock(task);
183 return ENOMEM;
184 }
185
186 new_ldt->start = begin_sel;
187 new_ldt->count = ldt_count;
188
189 /*
190 * Have new LDT. If there was a an old ldt, copy descriptors
191 * from old to new.
192 */
193 if (old_ldt) {
194 bcopy(&old_ldt->ldt[0],
195 &new_ldt->ldt[old_ldt->start - begin_sel],
196 old_ldt->count * sizeof(struct real_descriptor));
197
198 /*
199 * If the old and new LDTs are non-overlapping, fill the
200 * center in with null selectors.
201 */
202
203 if (old_ldt->start + old_ldt->count < start_sel)
204 bzero(&new_ldt->ldt[old_ldt->count],
205 (start_sel - (old_ldt->start + old_ldt->count)) * sizeof(struct real_descriptor));
206 else if (old_ldt->start > start_sel + num_sels)
207 bzero(&new_ldt->ldt[num_sels],
208 (old_ldt->start - (start_sel + num_sels)) * sizeof(struct real_descriptor));
209 }
210
211 /*
212 * Install new descriptors.
213 */
214 if (descs != 0) {
215 /* XXX copyin under task lock */
216 err = copyin(descs, (char *)&new_ldt->ldt[start_sel - begin_sel],
217 num_sels * sizeof(struct real_descriptor));
218 if (err != 0)
219 {
220 task_unlock(task);
221 user_ldt_free(new_ldt);
222 return err;
223 }
224 } else {
225 bzero(&new_ldt->ldt[start_sel - begin_sel], num_sels * sizeof(struct real_descriptor));
226 }
227 /*
228 * Validate descriptors.
229 * Only allow descriptors with user privileges.
230 */
231 for (i = 0, dp = (struct real_descriptor *) &new_ldt->ldt[start_sel - begin_sel];
232 i < num_sels;
233 i++, dp++)
234 {
235 switch (dp->access & ~ACC_A) {
236 case 0:
237 case ACC_P:
238 /* valid empty descriptor, clear Present preemptively */
239 dp->access &= (~ACC_P & 0xff);
240 break;
241 case ACC_P | ACC_PL_U | ACC_DATA:
242 case ACC_P | ACC_PL_U | ACC_DATA_W:
243 case ACC_P | ACC_PL_U | ACC_DATA_E:
244 case ACC_P | ACC_PL_U | ACC_DATA_EW:
245 case ACC_P | ACC_PL_U | ACC_CODE:
246 case ACC_P | ACC_PL_U | ACC_CODE_R:
247 case ACC_P | ACC_PL_U | ACC_CODE_C:
248 case ACC_P | ACC_PL_U | ACC_CODE_CR:
249 break;
250 default:
251 task_unlock(task);
252 user_ldt_free(new_ldt);
253 return EACCES;
254 }
255 /* Reject attempts to create segments with 64-bit granules */
256 if (dp->granularity & SZ_64) {
257 task_unlock(task);
258 user_ldt_free(new_ldt);
259 return EACCES;
260 }
261 }
262 }
263
264 task->i386_ldt = new_ldt; /* new LDT for task */
265
266 /*
267 * Switch to new LDT. We need to do this on all CPUs, since
268 * another thread in this same task may be currently running,
269 * and we need to make sure the new LDT is in place
270 * throughout the task before returning to the user.
271 */
272 mp_broadcast(user_ldt_set_action, task);
273
274 task_unlock(task);
275
276 /* free old LDT. We can't do this until after we've
277 * rendezvoused with all CPUs, in case another thread
278 * in this task was in the process of context switching.
279 */
280 if (old_ldt)
281 user_ldt_free(old_ldt);
282
283 *retval = start_sel;
284
285 return 0;
286 }
287
288 int
289 i386_get_ldt(
290 uint32_t *retval,
291 uint32_t start_sel,
292 uint32_t descs, /* out */
293 uint32_t num_sels)
294 {
295 user_ldt_t user_ldt;
296 task_t task = current_task();
297 unsigned int ldt_count;
298 kern_return_t err;
299
300 if (start_sel >= LDTSZ)
301 return EINVAL;
302 if ((uint64_t)start_sel + (uint64_t)num_sels > LDTSZ)
303 return EINVAL;
304 if (descs == 0)
305 return EINVAL;
306
307 task_lock(task);
308
309 user_ldt = task->i386_ldt;
310 err = 0;
311
312 /*
313 * copy out the descriptors
314 */
315
316 if (user_ldt != 0)
317 ldt_count = user_ldt->start + user_ldt->count;
318 else
319 ldt_count = LDTSZ_MIN;
320
321
322 if (start_sel < ldt_count)
323 {
324 unsigned int copy_sels = num_sels;
325
326 if (start_sel + num_sels > ldt_count)
327 copy_sels = ldt_count - start_sel;
328
329 err = copyout((char *)(current_ldt() + start_sel),
330 descs, copy_sels * sizeof(struct real_descriptor));
331 }
332
333 task_unlock(task);
334
335 *retval = ldt_count;
336
337 return err;
338 }
339
340 void
341 user_ldt_free(
342 user_ldt_t user_ldt)
343 {
344 kfree(user_ldt, sizeof(struct user_ldt) + (user_ldt->count * sizeof(struct real_descriptor)));
345 }
346
347 user_ldt_t
348 user_ldt_copy(
349 user_ldt_t user_ldt)
350 {
351 if (user_ldt != NULL) {
352 size_t size = sizeof(struct user_ldt) + (user_ldt->count * sizeof(struct real_descriptor));
353 user_ldt_t new_ldt = (user_ldt_t)kalloc(size);
354 if (new_ldt != NULL)
355 bcopy(user_ldt, new_ldt, size);
356 return new_ldt;
357 }
358
359 return 0;
360 }
361
362 void
363 user_ldt_set_action(
364 void *arg)
365 {
366 task_t arg_task = (task_t)arg;
367
368 if (arg_task == current_task()) {
369 user_ldt_set(current_thread());
370 }
371 }
372
373 /*
374 * Set the LDT for the given thread on the current CPU. Should be invoked
375 * with interrupts disabled.
376 */
377 void
378 user_ldt_set(
379 thread_t thread)
380 {
381 task_t task = thread->task;
382 user_ldt_t user_ldt;
383
384 user_ldt = task->i386_ldt;
385
386 if (user_ldt != 0) {
387 struct real_descriptor *ldtp = (struct real_descriptor *)current_ldt();
388
389 if (user_ldt->start > LDTSZ_MIN) {
390 bzero(&ldtp[LDTSZ_MIN],
391 sizeof(struct real_descriptor) * (user_ldt->start - LDTSZ_MIN));
392 }
393
394 bcopy(user_ldt->ldt, &ldtp[user_ldt->start],
395 sizeof(struct real_descriptor) * (user_ldt->count));
396
397 gdt_desc_p(USER_LDT)->limit_low = (uint16_t)((sizeof(struct real_descriptor) * (user_ldt->start + user_ldt->count)) - 1);
398
399 ml_cpu_set_ldt(USER_LDT);
400 } else {
401 ml_cpu_set_ldt(KERNEL_LDT);
402 }
403 }