]> git.saurik.com Git - apple/xnu.git/blame - osfmk/i386/user_ldt.c
xnu-344.23.tar.gz
[apple/xnu.git] / osfmk / i386 / user_ldt.c
CommitLineData
1c79356b
A
1/*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
de355530
A
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
1c79356b 11 *
de355530
A
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
1c79356b
A
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
de355530
A
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
1c79356b
A
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22/*
23 * @OSF_COPYRIGHT@
24 */
25/*
26 * Mach Operating System
27 * Copyright (c) 1991 Carnegie Mellon University
28 * All Rights Reserved.
29 *
30 * Permission to use, copy, modify and distribute this software and its
31 * documentation is hereby granted, provided that both the copyright
32 * notice and this permission notice appear in all copies of the
33 * software, derivative works or modified versions, and any portions
34 * thereof, and that both notices appear in supporting documentation.
35 *
36 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
37 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
38 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
39 *
40 * Carnegie Mellon requests users of this software to return to
41 *
42 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
43 * School of Computer Science
44 * Carnegie Mellon University
45 * Pittsburgh PA 15213-3890
46 *
47 * any improvements or extensions that they make and grant Carnegie Mellon
48 * the rights to redistribute these changes.
49 */
50
51/*
52 */
53
54/*
55 * User LDT management.
56 * Each thread in a task may have its own LDT.
57 */
58
59#include <kern/kalloc.h>
60#include <kern/thread.h>
61#include <kern/misc_protos.h>
62
63#include <vm/vm_kern.h>
64
65#include <i386/seg.h>
66#include <i386/thread.h>
67#include <i386/user_ldt.h>
68
69char acc_type[8][3] = {
70 /* code stack data */
71 { 0, 0, 1 }, /* data */
72 { 0, 1, 1 }, /* data, writable */
73 { 0, 0, 1 }, /* data, expand-down */
74 { 0, 1, 1 }, /* data, writable, expand-down */
75 { 1, 0, 0 }, /* code */
76 { 1, 0, 1 }, /* code, readable */
77 { 1, 0, 0 }, /* code, conforming */
78 { 1, 0, 1 }, /* code, readable, conforming */
79};
80
81extern struct fake_descriptor ldt[]; /* for system call gate */
82
83#if 0
84/* Forward */
85
86extern boolean_t selector_check(
87 thread_t thread,
88 int sel,
89 int type);
90
91boolean_t
92selector_check(
93 thread_t thread,
94 int sel,
95 int type)
96{
97 struct user_ldt *ldt;
98 int access;
99
100 ldt = thread->top_act->mact.pcb->ims.ldt;
101 if (ldt == 0) {
102 switch (type) {
103 case S_CODE:
104 return sel == USER_CS;
105 case S_STACK:
106 return sel == USER_DS;
107 case S_DATA:
108 return sel == 0 ||
109 sel == USER_CS ||
110 sel == USER_DS;
111 }
112 }
113
114 if (type != S_DATA && sel == 0)
115 return FALSE;
116 if ((sel & (SEL_LDTS|SEL_PL)) != (SEL_LDTS|SEL_PL_U)
117 || sel > ldt->desc.limit_low)
118 return FALSE;
119
120 access = ldt->ldt[sel_idx(sel)].access;
121
122 if ((access & (ACC_P|ACC_PL|ACC_TYPE_USER))
123 != (ACC_P|ACC_PL_U|ACC_TYPE_USER))
124 return FALSE;
125 /* present, pl == pl.user, not system */
126
127 return acc_type[(access & 0xe)>>1][type];
128}
129
130/*
131 * Add the descriptors to the LDT, starting with
132 * the descriptor for 'first_selector'.
133 */
134
135kern_return_t
136i386_set_ldt(
137 thread_act_t thr_act,
138 int first_selector,
139 descriptor_list_t desc_list,
140 mach_msg_type_number_t count)
141{
142 user_ldt_t new_ldt, old_ldt, temp;
143 struct real_descriptor *dp;
144 int i;
145 int min_selector = 0;
146 pcb_t pcb;
147 vm_size_t ldt_size_needed;
148 int first_desc = sel_idx(first_selector);
149 vm_map_copy_t old_copy_object;
150 thread_t thread;
151
152 if (first_desc < min_selector || first_desc > 8191)
153 return KERN_INVALID_ARGUMENT;
154 if (first_desc + count >= 8192)
155 return KERN_INVALID_ARGUMENT;
156 if (thr_act == THR_ACT_NULL)
157 return KERN_INVALID_ARGUMENT;
158 if ((thread = act_lock_thread(thr_act)) == THREAD_NULL) {
159 act_unlock_thread(thr_act);
160 return KERN_INVALID_ARGUMENT;
161 }
162 if (thread == current_thread())
163 min_selector = LDTSZ;
164 act_unlock_thread(thr_act);
165
166 /*
167 * We must copy out desc_list to the kernel map, and wire
168 * it down (we touch it while the PCB is locked).
169 *
170 * We make a copy of the copyin object, and clear
171 * out the old one, so that the MIG stub will have a
172 * a empty (but valid) copyin object to discard.
173 */
174 {
175 kern_return_t kr;
176 vm_offset_t dst_addr;
177
178 old_copy_object = (vm_map_copy_t) desc_list;
179
180 kr = vm_map_copyout(ipc_kernel_map, &dst_addr,
181 vm_map_copy_copy(old_copy_object));
182 if (kr != KERN_SUCCESS)
183 return kr;
184
185 (void) vm_map_wire(ipc_kernel_map,
186 trunc_page(dst_addr),
187 round_page(dst_addr +
188 count * sizeof(struct real_descriptor)),
189 VM_PROT_READ|VM_PROT_WRITE, FALSE);
190 desc_list = (descriptor_list_t) dst_addr;
191 }
192
193 for (i = 0, dp = (struct real_descriptor *) desc_list;
194 i < count;
195 i++, dp++)
196 {
197 switch (dp->access & ~ACC_A) {
198 case 0:
199 case ACC_P:
200 /* valid empty descriptor */
201 break;
202 case ACC_P | ACC_CALL_GATE:
203 /* Mach kernel call */
204 *dp = *(struct real_descriptor *)
205 &ldt[sel_idx(USER_SCALL)];
206 break;
207 case ACC_P | ACC_PL_U | ACC_DATA:
208 case ACC_P | ACC_PL_U | ACC_DATA_W:
209 case ACC_P | ACC_PL_U | ACC_DATA_E:
210 case ACC_P | ACC_PL_U | ACC_DATA_EW:
211 case ACC_P | ACC_PL_U | ACC_CODE:
212 case ACC_P | ACC_PL_U | ACC_CODE_R:
213 case ACC_P | ACC_PL_U | ACC_CODE_C:
214 case ACC_P | ACC_PL_U | ACC_CODE_CR:
215 case ACC_P | ACC_PL_U | ACC_CALL_GATE_16:
216 case ACC_P | ACC_PL_U | ACC_CALL_GATE:
217 break;
218 default:
219 (void) vm_map_remove(ipc_kernel_map,
220 (vm_offset_t) desc_list,
221 count * sizeof(struct real_descriptor),
222 VM_MAP_REMOVE_KUNWIRE);
223 return KERN_INVALID_ARGUMENT;
224 }
225 }
226 ldt_size_needed = sizeof(struct real_descriptor)
227 * (first_desc + count);
228
229 pcb = thr_act->mact.pcb;
230 new_ldt = 0;
231 Retry:
232 simple_lock(&pcb->lock);
233 old_ldt = pcb->ims.ldt;
234 if (old_ldt == 0 ||
235 old_ldt->desc.limit_low + 1 < ldt_size_needed)
236 {
237 /*
238 * No old LDT, or not big enough
239 */
240 if (new_ldt == 0) {
241 simple_unlock(&pcb->lock);
242
243 new_ldt = (user_ldt_t) kalloc(ldt_size_needed
244 + sizeof(struct real_descriptor));
245 new_ldt->desc.limit_low = ldt_size_needed - 1;
246 new_ldt->desc.limit_high = 0;
247 new_ldt->desc.base_low =
248 ((vm_offset_t)&new_ldt->ldt[0]) & 0xffff;
249 new_ldt->desc.base_med =
250 (((vm_offset_t)&new_ldt->ldt[0]) >> 16)
251 & 0xff;
252 new_ldt->desc.base_high =
253 ((vm_offset_t)&new_ldt->ldt[0]) >> 24;
254 new_ldt->desc.access = ACC_P | ACC_LDT;
255 new_ldt->desc.granularity = 0;
256
257 goto Retry;
258 }
259
260 /*
261 * Have new LDT. If there was a an old ldt, copy descriptors
262 * from old to new. Otherwise copy the default ldt.
263 */
264 if (old_ldt) {
265 bcopy((char *)&old_ldt->ldt[0],
266 (char *)&new_ldt->ldt[0],
267 old_ldt->desc.limit_low + 1);
268 }
269 else if (thr_act == current_act()) {
270 struct real_descriptor template = {0, 0, 0, ACC_P, 0, 0 ,0};
271
272 for (dp = &new_ldt->ldt[0], i = 0; i < first_desc; i++, dp++) {
273 if (i < LDTSZ)
274 *dp = *(struct real_descriptor *) &ldt[i];
275 else
276 *dp = template;
277 }
278 }
279
280 temp = old_ldt;
281 old_ldt = new_ldt; /* use new LDT from now on */
282 new_ldt = temp; /* discard old LDT */
283
284 pcb->ims.ldt = old_ldt; /* new LDT for thread */
285 }
286
287 /*
288 * Install new descriptors.
289 */
290 bcopy((char *)desc_list,
291 (char *)&old_ldt->ldt[first_desc],
292 count * sizeof(struct real_descriptor));
293
294 simple_unlock(&pcb->lock);
295
296 if (new_ldt)
297 kfree((vm_offset_t)new_ldt,
298 new_ldt->desc.limit_low+1+sizeof(struct real_descriptor));
299
300 /*
301 * Free the descriptor list.
302 */
303 (void) vm_map_remove(ipc_kernel_map, (vm_offset_t) desc_list,
304 count * sizeof(struct real_descriptor),
305 VM_MAP_REMOVE_KUNWIRE);
306 return KERN_SUCCESS;
307}
308
309kern_return_t
310i386_get_ldt(
311 thread_act_t thr_act,
312 int first_selector,
313 int selector_count, /* number wanted */
314 descriptor_list_t *desc_list, /* in/out */
315 mach_msg_type_number_t *count) /* in/out */
316{
317 struct user_ldt *user_ldt;
318 pcb_t pcb = thr_act->mact.pcb;
319 int first_desc = sel_idx(first_selector);
320 unsigned int ldt_count;
321 vm_size_t ldt_size;
322 vm_size_t size, size_needed;
323 vm_offset_t addr;
324 thread_t thread;
325
326 if (thr_act == THR_ACT_NULL || (thread = thr_act->thread)==THREAD_NULL)
327 return KERN_INVALID_ARGUMENT;
328
329 if (first_desc < 0 || first_desc > 8191)
330 return KERN_INVALID_ARGUMENT;
331 if (first_desc + selector_count >= 8192)
332 return KERN_INVALID_ARGUMENT;
333
334 addr = 0;
335 size = 0;
336
337 for (;;) {
338 simple_lock(&pcb->lock);
339 user_ldt = pcb->ims.ldt;
340 if (user_ldt == 0) {
341 simple_unlock(&pcb->lock);
342 if (addr)
343 kmem_free(ipc_kernel_map, addr, size);
344 *count = 0;
345 return KERN_SUCCESS;
346 }
347
348 /*
349 * Find how many descriptors we should return.
350 */
351 ldt_count = (user_ldt->desc.limit_low + 1) /
352 sizeof (struct real_descriptor);
353 ldt_count -= first_desc;
354 if (ldt_count > selector_count)
355 ldt_count = selector_count;
356
357 ldt_size = ldt_count * sizeof(struct real_descriptor);
358
359 /*
360 * Do we have the memory we need?
361 */
362 if (ldt_count <= *count)
363 break; /* fits in-line */
364
365 size_needed = round_page(ldt_size);
366 if (size_needed <= size)
367 break;
368
369 /*
370 * Unlock the pcb and allocate more memory
371 */
372 simple_unlock(&pcb->lock);
373
374 if (size != 0)
375 kmem_free(ipc_kernel_map, addr, size);
376
377 size = size_needed;
378
379 if (kmem_alloc(ipc_kernel_map, &addr, size)
380 != KERN_SUCCESS)
381 return KERN_RESOURCE_SHORTAGE;
382 }
383
384 /*
385 * copy out the descriptors
386 */
387 bcopy((char *)&user_ldt->ldt[first_desc],
388 (char *)addr,
389 ldt_size);
390 *count = ldt_count;
391 simple_unlock(&pcb->lock);
392
393 if (addr) {
394 vm_size_t size_used, size_left;
395 vm_map_copy_t memory;
396
397 /*
398 * Free any unused memory beyond the end of the last page used
399 */
400 size_used = round_page(ldt_size);
401 if (size_used != size)
402 kmem_free(ipc_kernel_map,
403 addr + size_used, size - size_used);
404
405 /*
406 * Zero the remainder of the page being returned.
407 */
408 size_left = size_used - ldt_size;
409 if (size_left > 0)
410 bzero((char *)addr + ldt_size, size_left);
411
412 /*
413 * Unwire the memory and make it into copyin form.
414 */
415 (void) vm_map_unwire(ipc_kernel_map, trunc_page(addr),
416 round_page(addr + size_used), FALSE);
417 (void) vm_map_copyin(ipc_kernel_map, addr, size_used,
418 TRUE, &memory);
419 *desc_list = (descriptor_list_t) memory;
420 }
421
422 return KERN_SUCCESS;
423}
424
425#endif
426void
427user_ldt_free(
428 user_ldt_t user_ldt)
429{
430 kfree((vm_offset_t)user_ldt,
431 user_ldt->desc.limit_low+1+sizeof(struct real_descriptor));
432}