]> git.saurik.com Git - apple/xnu.git/blame_incremental - osfmk/i386/user_ldt.c
xnu-344.49.tar.gz
[apple/xnu.git] / osfmk / i386 / user_ldt.c
... / ...
CommitLineData
1/*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
7 *
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * file.
14 *
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
22 *
23 * @APPLE_LICENSE_HEADER_END@
24 */
25/*
26 * @OSF_COPYRIGHT@
27 */
28/*
29 * Mach Operating System
30 * Copyright (c) 1991 Carnegie Mellon University
31 * All Rights Reserved.
32 *
33 * Permission to use, copy, modify and distribute this software and its
34 * documentation is hereby granted, provided that both the copyright
35 * notice and this permission notice appear in all copies of the
36 * software, derivative works or modified versions, and any portions
37 * thereof, and that both notices appear in supporting documentation.
38 *
39 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
40 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
41 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
42 *
43 * Carnegie Mellon requests users of this software to return to
44 *
45 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
46 * School of Computer Science
47 * Carnegie Mellon University
48 * Pittsburgh PA 15213-3890
49 *
50 * any improvements or extensions that they make and grant Carnegie Mellon
51 * the rights to redistribute these changes.
52 */
53
54/*
55 */
56
57/*
58 * User LDT management.
59 * Each thread in a task may have its own LDT.
60 */
61
62#include <kern/kalloc.h>
63#include <kern/thread.h>
64#include <kern/misc_protos.h>
65
66#include <vm/vm_kern.h>
67
68#include <i386/seg.h>
69#include <i386/thread.h>
70#include <i386/user_ldt.h>
71
72char acc_type[8][3] = {
73 /* code stack data */
74 { 0, 0, 1 }, /* data */
75 { 0, 1, 1 }, /* data, writable */
76 { 0, 0, 1 }, /* data, expand-down */
77 { 0, 1, 1 }, /* data, writable, expand-down */
78 { 1, 0, 0 }, /* code */
79 { 1, 0, 1 }, /* code, readable */
80 { 1, 0, 0 }, /* code, conforming */
81 { 1, 0, 1 }, /* code, readable, conforming */
82};
83
84extern struct fake_descriptor ldt[]; /* for system call gate */
85
86#if 0
87/* Forward */
88
89extern boolean_t selector_check(
90 thread_t thread,
91 int sel,
92 int type);
93
94boolean_t
95selector_check(
96 thread_t thread,
97 int sel,
98 int type)
99{
100 struct user_ldt *ldt;
101 int access;
102
103 ldt = thread->top_act->mact.pcb->ims.ldt;
104 if (ldt == 0) {
105 switch (type) {
106 case S_CODE:
107 return sel == USER_CS;
108 case S_STACK:
109 return sel == USER_DS;
110 case S_DATA:
111 return sel == 0 ||
112 sel == USER_CS ||
113 sel == USER_DS;
114 }
115 }
116
117 if (type != S_DATA && sel == 0)
118 return FALSE;
119 if ((sel & (SEL_LDTS|SEL_PL)) != (SEL_LDTS|SEL_PL_U)
120 || sel > ldt->desc.limit_low)
121 return FALSE;
122
123 access = ldt->ldt[sel_idx(sel)].access;
124
125 if ((access & (ACC_P|ACC_PL|ACC_TYPE_USER))
126 != (ACC_P|ACC_PL_U|ACC_TYPE_USER))
127 return FALSE;
128 /* present, pl == pl.user, not system */
129
130 return acc_type[(access & 0xe)>>1][type];
131}
132
133/*
134 * Add the descriptors to the LDT, starting with
135 * the descriptor for 'first_selector'.
136 */
137
138kern_return_t
139i386_set_ldt(
140 thread_act_t thr_act,
141 int first_selector,
142 descriptor_list_t desc_list,
143 mach_msg_type_number_t count)
144{
145 user_ldt_t new_ldt, old_ldt, temp;
146 struct real_descriptor *dp;
147 int i;
148 int min_selector = 0;
149 pcb_t pcb;
150 vm_size_t ldt_size_needed;
151 int first_desc = sel_idx(first_selector);
152 vm_map_copy_t old_copy_object;
153 thread_t thread;
154
155 if (first_desc < min_selector || first_desc > 8191)
156 return KERN_INVALID_ARGUMENT;
157 if (first_desc + count >= 8192)
158 return KERN_INVALID_ARGUMENT;
159 if (thr_act == THR_ACT_NULL)
160 return KERN_INVALID_ARGUMENT;
161 if ((thread = act_lock_thread(thr_act)) == THREAD_NULL) {
162 act_unlock_thread(thr_act);
163 return KERN_INVALID_ARGUMENT;
164 }
165 if (thread == current_thread())
166 min_selector = LDTSZ;
167 act_unlock_thread(thr_act);
168
169 /*
170 * We must copy out desc_list to the kernel map, and wire
171 * it down (we touch it while the PCB is locked).
172 *
173 * We make a copy of the copyin object, and clear
174 * out the old one, so that the MIG stub will have a
175 * a empty (but valid) copyin object to discard.
176 */
177 {
178 kern_return_t kr;
179 vm_offset_t dst_addr;
180
181 old_copy_object = (vm_map_copy_t) desc_list;
182
183 kr = vm_map_copyout(ipc_kernel_map, &dst_addr,
184 vm_map_copy_copy(old_copy_object));
185 if (kr != KERN_SUCCESS)
186 return kr;
187
188 (void) vm_map_wire(ipc_kernel_map,
189 trunc_page(dst_addr),
190 round_page(dst_addr +
191 count * sizeof(struct real_descriptor)),
192 VM_PROT_READ|VM_PROT_WRITE, FALSE);
193 desc_list = (descriptor_list_t) dst_addr;
194 }
195
196 for (i = 0, dp = (struct real_descriptor *) desc_list;
197 i < count;
198 i++, dp++)
199 {
200 switch (dp->access & ~ACC_A) {
201 case 0:
202 case ACC_P:
203 /* valid empty descriptor */
204 break;
205 case ACC_P | ACC_CALL_GATE:
206 /* Mach kernel call */
207 *dp = *(struct real_descriptor *)
208 &ldt[sel_idx(USER_SCALL)];
209 break;
210 case ACC_P | ACC_PL_U | ACC_DATA:
211 case ACC_P | ACC_PL_U | ACC_DATA_W:
212 case ACC_P | ACC_PL_U | ACC_DATA_E:
213 case ACC_P | ACC_PL_U | ACC_DATA_EW:
214 case ACC_P | ACC_PL_U | ACC_CODE:
215 case ACC_P | ACC_PL_U | ACC_CODE_R:
216 case ACC_P | ACC_PL_U | ACC_CODE_C:
217 case ACC_P | ACC_PL_U | ACC_CODE_CR:
218 case ACC_P | ACC_PL_U | ACC_CALL_GATE_16:
219 case ACC_P | ACC_PL_U | ACC_CALL_GATE:
220 break;
221 default:
222 (void) vm_map_remove(ipc_kernel_map,
223 (vm_offset_t) desc_list,
224 count * sizeof(struct real_descriptor),
225 VM_MAP_REMOVE_KUNWIRE);
226 return KERN_INVALID_ARGUMENT;
227 }
228 }
229 ldt_size_needed = sizeof(struct real_descriptor)
230 * (first_desc + count);
231
232 pcb = thr_act->mact.pcb;
233 new_ldt = 0;
234 Retry:
235 simple_lock(&pcb->lock);
236 old_ldt = pcb->ims.ldt;
237 if (old_ldt == 0 ||
238 old_ldt->desc.limit_low + 1 < ldt_size_needed)
239 {
240 /*
241 * No old LDT, or not big enough
242 */
243 if (new_ldt == 0) {
244 simple_unlock(&pcb->lock);
245
246 new_ldt = (user_ldt_t) kalloc(ldt_size_needed
247 + sizeof(struct real_descriptor));
248 new_ldt->desc.limit_low = ldt_size_needed - 1;
249 new_ldt->desc.limit_high = 0;
250 new_ldt->desc.base_low =
251 ((vm_offset_t)&new_ldt->ldt[0]) & 0xffff;
252 new_ldt->desc.base_med =
253 (((vm_offset_t)&new_ldt->ldt[0]) >> 16)
254 & 0xff;
255 new_ldt->desc.base_high =
256 ((vm_offset_t)&new_ldt->ldt[0]) >> 24;
257 new_ldt->desc.access = ACC_P | ACC_LDT;
258 new_ldt->desc.granularity = 0;
259
260 goto Retry;
261 }
262
263 /*
264 * Have new LDT. If there was a an old ldt, copy descriptors
265 * from old to new. Otherwise copy the default ldt.
266 */
267 if (old_ldt) {
268 bcopy((char *)&old_ldt->ldt[0],
269 (char *)&new_ldt->ldt[0],
270 old_ldt->desc.limit_low + 1);
271 }
272 else if (thr_act == current_act()) {
273 struct real_descriptor template = {0, 0, 0, ACC_P, 0, 0 ,0};
274
275 for (dp = &new_ldt->ldt[0], i = 0; i < first_desc; i++, dp++) {
276 if (i < LDTSZ)
277 *dp = *(struct real_descriptor *) &ldt[i];
278 else
279 *dp = template;
280 }
281 }
282
283 temp = old_ldt;
284 old_ldt = new_ldt; /* use new LDT from now on */
285 new_ldt = temp; /* discard old LDT */
286
287 pcb->ims.ldt = old_ldt; /* new LDT for thread */
288 }
289
290 /*
291 * Install new descriptors.
292 */
293 bcopy((char *)desc_list,
294 (char *)&old_ldt->ldt[first_desc],
295 count * sizeof(struct real_descriptor));
296
297 simple_unlock(&pcb->lock);
298
299 if (new_ldt)
300 kfree((vm_offset_t)new_ldt,
301 new_ldt->desc.limit_low+1+sizeof(struct real_descriptor));
302
303 /*
304 * Free the descriptor list.
305 */
306 (void) vm_map_remove(ipc_kernel_map, (vm_offset_t) desc_list,
307 count * sizeof(struct real_descriptor),
308 VM_MAP_REMOVE_KUNWIRE);
309 return KERN_SUCCESS;
310}
311
312kern_return_t
313i386_get_ldt(
314 thread_act_t thr_act,
315 int first_selector,
316 int selector_count, /* number wanted */
317 descriptor_list_t *desc_list, /* in/out */
318 mach_msg_type_number_t *count) /* in/out */
319{
320 struct user_ldt *user_ldt;
321 pcb_t pcb = thr_act->mact.pcb;
322 int first_desc = sel_idx(first_selector);
323 unsigned int ldt_count;
324 vm_size_t ldt_size;
325 vm_size_t size, size_needed;
326 vm_offset_t addr;
327 thread_t thread;
328
329 if (thr_act == THR_ACT_NULL || (thread = thr_act->thread)==THREAD_NULL)
330 return KERN_INVALID_ARGUMENT;
331
332 if (first_desc < 0 || first_desc > 8191)
333 return KERN_INVALID_ARGUMENT;
334 if (first_desc + selector_count >= 8192)
335 return KERN_INVALID_ARGUMENT;
336
337 addr = 0;
338 size = 0;
339
340 for (;;) {
341 simple_lock(&pcb->lock);
342 user_ldt = pcb->ims.ldt;
343 if (user_ldt == 0) {
344 simple_unlock(&pcb->lock);
345 if (addr)
346 kmem_free(ipc_kernel_map, addr, size);
347 *count = 0;
348 return KERN_SUCCESS;
349 }
350
351 /*
352 * Find how many descriptors we should return.
353 */
354 ldt_count = (user_ldt->desc.limit_low + 1) /
355 sizeof (struct real_descriptor);
356 ldt_count -= first_desc;
357 if (ldt_count > selector_count)
358 ldt_count = selector_count;
359
360 ldt_size = ldt_count * sizeof(struct real_descriptor);
361
362 /*
363 * Do we have the memory we need?
364 */
365 if (ldt_count <= *count)
366 break; /* fits in-line */
367
368 size_needed = round_page(ldt_size);
369 if (size_needed <= size)
370 break;
371
372 /*
373 * Unlock the pcb and allocate more memory
374 */
375 simple_unlock(&pcb->lock);
376
377 if (size != 0)
378 kmem_free(ipc_kernel_map, addr, size);
379
380 size = size_needed;
381
382 if (kmem_alloc(ipc_kernel_map, &addr, size)
383 != KERN_SUCCESS)
384 return KERN_RESOURCE_SHORTAGE;
385 }
386
387 /*
388 * copy out the descriptors
389 */
390 bcopy((char *)&user_ldt->ldt[first_desc],
391 (char *)addr,
392 ldt_size);
393 *count = ldt_count;
394 simple_unlock(&pcb->lock);
395
396 if (addr) {
397 vm_size_t size_used, size_left;
398 vm_map_copy_t memory;
399
400 /*
401 * Free any unused memory beyond the end of the last page used
402 */
403 size_used = round_page(ldt_size);
404 if (size_used != size)
405 kmem_free(ipc_kernel_map,
406 addr + size_used, size - size_used);
407
408 /*
409 * Zero the remainder of the page being returned.
410 */
411 size_left = size_used - ldt_size;
412 if (size_left > 0)
413 bzero((char *)addr + ldt_size, size_left);
414
415 /*
416 * Unwire the memory and make it into copyin form.
417 */
418 (void) vm_map_unwire(ipc_kernel_map, trunc_page(addr),
419 round_page(addr + size_used), FALSE);
420 (void) vm_map_copyin(ipc_kernel_map, addr, size_used,
421 TRUE, &memory);
422 *desc_list = (descriptor_list_t) memory;
423 }
424
425 return KERN_SUCCESS;
426}
427
428#endif
429void
430user_ldt_free(
431 user_ldt_t user_ldt)
432{
433 kfree((vm_offset_t)user_ldt,
434 user_ldt->desc.limit_low+1+sizeof(struct real_descriptor));
435}