]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * The contents of this file constitute Original Code as defined in and | |
7 | * are subject to the Apple Public Source License Version 1.1 (the | |
8 | * "License"). You may not use this file except in compliance with the | |
9 | * License. Please obtain a copy of the License at | |
10 | * http://www.apple.com/publicsource and read it before using this file. | |
11 | * | |
12 | * This Original Code and all software distributed under the License are | |
13 | * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
14 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
15 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
16 | * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the | |
17 | * License for the specific language governing rights and limitations | |
18 | * under the License. | |
19 | * | |
20 | * @APPLE_LICENSE_HEADER_END@ | |
21 | */ | |
22 | #ifdef MACH_BSD | |
23 | #include <mach_rt.h> | |
24 | #include <mach_debug.h> | |
25 | #include <mach_ldebug.h> | |
26 | ||
27 | #include <mach/kern_return.h> | |
28 | #include <mach/mach_traps.h> | |
29 | #include <mach/thread_status.h> | |
30 | #include <mach/vm_param.h> | |
31 | ||
32 | #include <kern/counters.h> | |
33 | #include <kern/cpu_data.h> | |
34 | #include <kern/mach_param.h> | |
35 | #include <kern/task.h> | |
36 | #include <kern/thread.h> | |
37 | #include <kern/sched_prim.h> | |
38 | #include <kern/misc_protos.h> | |
39 | #include <kern/assert.h> | |
40 | #include <kern/spl.h> | |
41 | #include <kern/syscall_sw.h> | |
42 | #include <ipc/ipc_port.h> | |
43 | #include <vm/vm_kern.h> | |
44 | #include <vm/pmap.h> | |
45 | ||
46 | #include <i386/cpu_data.h> | |
47 | #include <i386/cpu_number.h> | |
48 | #include <i386/thread.h> | |
49 | #include <i386/eflags.h> | |
50 | #include <i386/proc_reg.h> | |
51 | #include <i386/seg.h> | |
52 | #include <i386/tss.h> | |
53 | #include <i386/user_ldt.h> | |
54 | #include <i386/fpu.h> | |
55 | #include <i386/iopb_entries.h> | |
56 | #include <i386/machdep_call.h> | |
57 | #include <i386/misc_protos.h> | |
58 | #include <i386/cpu_data.h> | |
59 | #include <i386/cpu_number.h> | |
60 | #include <i386/mp_desc.h> | |
61 | #include <i386/vmparam.h> | |
62 | #include <sys/syscall.h> | |
63 | #include <sys/kdebug.h> | |
64 | #include <sys/ktrace.h> | |
65 | #include <../bsd/sys/sysent.h> | |
66 | ||
67 | extern struct proc *current_proc(void); | |
68 | ||
69 | kern_return_t | |
70 | thread_userstack( | |
71 | thread_t, | |
72 | int, | |
73 | thread_state_t, | |
74 | unsigned int, | |
75 | mach_vm_offset_t *, | |
76 | int * | |
77 | ); | |
78 | ||
79 | kern_return_t | |
80 | thread_entrypoint( | |
81 | thread_t, | |
82 | int, | |
83 | thread_state_t, | |
84 | unsigned int, | |
85 | mach_vm_offset_t * | |
86 | ); | |
87 | ||
88 | unsigned int get_msr_exportmask(void); | |
89 | ||
90 | unsigned int get_msr_nbits(void); | |
91 | ||
92 | unsigned int get_msr_rbits(void); | |
93 | ||
94 | kern_return_t | |
95 | thread_compose_cthread_desc(unsigned int addr, pcb_t pcb); | |
96 | ||
97 | void IOSleep(int); | |
98 | ||
99 | /* | |
100 | * thread_userstack: | |
101 | * | |
102 | * Return the user stack pointer from the machine | |
103 | * dependent thread state info. | |
104 | */ | |
105 | kern_return_t | |
106 | thread_userstack( | |
107 | __unused thread_t thread, | |
108 | int flavor, | |
109 | thread_state_t tstate, | |
110 | unsigned int count, | |
111 | user_addr_t *user_stack, | |
112 | int *customstack | |
113 | ) | |
114 | { | |
115 | struct i386_saved_state *state; | |
116 | i386_thread_state_t *state25; | |
117 | vm_offset_t uesp; | |
118 | ||
119 | if (customstack) | |
120 | *customstack = 0; | |
121 | ||
122 | switch (flavor) { | |
123 | case i386_THREAD_STATE: /* FIXME */ | |
124 | state25 = (i386_thread_state_t *) tstate; | |
125 | if (state25->esp) | |
126 | *user_stack = state25->esp; | |
127 | else | |
128 | *user_stack = USRSTACK; | |
129 | if (customstack && state25->esp) | |
130 | *customstack = 1; | |
131 | else | |
132 | *customstack = 0; | |
133 | break; | |
134 | ||
135 | case i386_NEW_THREAD_STATE: | |
136 | if (count < i386_NEW_THREAD_STATE_COUNT) | |
137 | return (KERN_INVALID_ARGUMENT); | |
138 | else { | |
139 | state = (struct i386_saved_state *) tstate; | |
140 | uesp = state->uesp; | |
141 | } | |
142 | ||
143 | /* If a valid user stack is specified, use it. */ | |
144 | if (uesp) | |
145 | *user_stack = uesp; | |
146 | else | |
147 | *user_stack = USRSTACK; | |
148 | if (customstack && uesp) | |
149 | *customstack = 1; | |
150 | else | |
151 | *customstack = 0; | |
152 | break; | |
153 | default : | |
154 | return (KERN_INVALID_ARGUMENT); | |
155 | } | |
156 | ||
157 | return (KERN_SUCCESS); | |
158 | } | |
159 | ||
160 | kern_return_t | |
161 | thread_entrypoint( | |
162 | __unused thread_t thread, | |
163 | int flavor, | |
164 | thread_state_t tstate, | |
165 | unsigned int count, | |
166 | mach_vm_offset_t *entry_point | |
167 | ) | |
168 | { | |
169 | struct i386_saved_state *state; | |
170 | i386_thread_state_t *state25; | |
171 | ||
172 | /* | |
173 | * Set a default. | |
174 | */ | |
175 | if (*entry_point == 0) | |
176 | *entry_point = VM_MIN_ADDRESS; | |
177 | ||
178 | switch (flavor) { | |
179 | case i386_THREAD_STATE: | |
180 | state25 = (i386_thread_state_t *) tstate; | |
181 | *entry_point = state25->eip ? state25->eip: VM_MIN_ADDRESS; | |
182 | break; | |
183 | ||
184 | case i386_NEW_THREAD_STATE: | |
185 | if (count < i386_THREAD_STATE_COUNT) | |
186 | return (KERN_INVALID_ARGUMENT); | |
187 | else { | |
188 | state = (struct i386_saved_state *) tstate; | |
189 | ||
190 | /* | |
191 | * If a valid entry point is specified, use it. | |
192 | */ | |
193 | *entry_point = state->eip ? state->eip: VM_MIN_ADDRESS; | |
194 | } | |
195 | break; | |
196 | } | |
197 | ||
198 | return (KERN_SUCCESS); | |
199 | } | |
200 | ||
201 | struct i386_saved_state * | |
202 | get_user_regs(thread_t th) | |
203 | { | |
204 | if (th->machine.pcb) | |
205 | return(USER_REGS(th)); | |
206 | else { | |
207 | printf("[get_user_regs: thread does not have pcb]"); | |
208 | return NULL; | |
209 | } | |
210 | } | |
211 | ||
212 | /* | |
213 | * Duplicate parent state in child | |
214 | * for U**X fork. | |
215 | */ | |
216 | kern_return_t | |
217 | machine_thread_dup( | |
218 | thread_t parent, | |
219 | thread_t child | |
220 | ) | |
221 | { | |
222 | struct i386_float_state floatregs; | |
223 | ||
224 | #ifdef XXX | |
225 | /* Save the FPU state */ | |
226 | if ((pcb_t)(per_proc_info[cpu_number()].fpu_pcb) == parent->machine.pcb) { | |
227 | fp_state_save(parent); | |
228 | } | |
229 | #endif | |
230 | ||
231 | if (child->machine.pcb == NULL || parent->machine.pcb == NULL) | |
232 | return (KERN_FAILURE); | |
233 | ||
234 | /* Copy over the i386_saved_state registers */ | |
235 | child->machine.pcb->iss = parent->machine.pcb->iss; | |
236 | ||
237 | /* Check to see if parent is using floating point | |
238 | * and if so, copy the registers to the child | |
239 | * FIXME - make sure this works. | |
240 | */ | |
241 | ||
242 | if (parent->machine.pcb->ims.ifps) { | |
243 | if (fpu_get_state(parent, &floatregs) == KERN_SUCCESS) | |
244 | fpu_set_state(child, &floatregs); | |
245 | } | |
246 | ||
247 | /* FIXME - should a user specified LDT, TSS and V86 info | |
248 | * be duplicated as well?? - probably not. | |
249 | */ | |
250 | // duplicate any use LDT entry that was set I think this is appropriate. | |
251 | #ifdef MACH_BSD | |
252 | if (parent->machine.pcb->uldt_selector!= 0) { | |
253 | child->machine.pcb->uldt_selector = parent->machine.pcb->uldt_selector; | |
254 | child->machine.pcb->uldt_desc = parent->machine.pcb->uldt_desc; | |
255 | } | |
256 | #endif | |
257 | ||
258 | ||
259 | return (KERN_SUCCESS); | |
260 | } | |
261 | ||
262 | /* | |
263 | * FIXME - thread_set_child | |
264 | */ | |
265 | ||
266 | void thread_set_child(thread_t child, int pid); | |
267 | void | |
268 | thread_set_child(thread_t child, int pid) | |
269 | { | |
270 | child->machine.pcb->iss.eax = pid; | |
271 | child->machine.pcb->iss.edx = 1; | |
272 | child->machine.pcb->iss.efl &= ~EFL_CF; | |
273 | } | |
274 | void thread_set_parent(thread_t parent, int pid); | |
275 | void | |
276 | thread_set_parent(thread_t parent, int pid) | |
277 | { | |
278 | parent->machine.pcb->iss.eax = pid; | |
279 | parent->machine.pcb->iss.edx = 0; | |
280 | parent->machine.pcb->iss.efl &= ~EFL_CF; | |
281 | } | |
282 | ||
283 | ||
284 | ||
285 | /* | |
286 | * System Call handling code | |
287 | */ | |
288 | ||
289 | #define ERESTART -1 /* restart syscall */ | |
290 | #define EJUSTRETURN -2 /* don't modify regs, just return */ | |
291 | ||
292 | ||
293 | #define NO_FUNNEL 0 | |
294 | #define KERNEL_FUNNEL 1 | |
295 | ||
296 | extern funnel_t * kernel_flock; | |
297 | ||
298 | extern int set_bsduthreadargs (thread_t, struct i386_saved_state *, void *); | |
299 | extern void * get_bsduthreadarg(thread_t); | |
300 | extern int * get_bsduthreadrval(thread_t th); | |
301 | extern int * get_bsduthreadlowpridelay(thread_t th); | |
302 | ||
303 | extern long fuword(vm_offset_t); | |
304 | ||
305 | extern void unix_syscall(struct i386_saved_state *); | |
306 | extern void unix_syscall_return(int); | |
307 | ||
308 | /* following implemented in bsd/dev/i386/unix_signal.c */ | |
309 | int __pthread_cset(struct sysent *); | |
310 | ||
311 | void __pthread_creset(struct sysent *); | |
312 | ||
313 | ||
314 | void | |
315 | unix_syscall_return(int error) | |
316 | { | |
317 | thread_t thread; | |
318 | volatile int *rval; | |
319 | struct i386_saved_state *regs; | |
320 | struct proc *p; | |
321 | unsigned short code; | |
322 | vm_offset_t params; | |
323 | struct sysent *callp; | |
324 | volatile int *lowpri_delay; | |
325 | ||
326 | thread = current_thread(); | |
327 | rval = get_bsduthreadrval(thread); | |
328 | lowpri_delay = get_bsduthreadlowpridelay(thread); | |
329 | p = current_proc(); | |
330 | ||
331 | regs = USER_REGS(thread); | |
332 | ||
333 | /* reconstruct code for tracing before blasting eax */ | |
334 | code = regs->eax; | |
335 | params = (vm_offset_t) ((caddr_t)regs->uesp + sizeof (int)); | |
336 | callp = (code >= nsysent) ? &sysent[63] : &sysent[code]; | |
337 | if (callp == sysent) { | |
338 | code = fuword(params); | |
339 | } | |
340 | ||
341 | if (error == ERESTART) { | |
342 | regs->eip -= 7; | |
343 | } | |
344 | else if (error != EJUSTRETURN) { | |
345 | if (error) { | |
346 | regs->eax = error; | |
347 | regs->efl |= EFL_CF; /* carry bit */ | |
348 | } else { /* (not error) */ | |
349 | regs->eax = rval[0]; | |
350 | regs->edx = rval[1]; | |
351 | regs->efl &= ~EFL_CF; | |
352 | } | |
353 | } | |
354 | ||
355 | ktrsysret(p, code, error, rval[0], (callp->sy_funnel & FUNNEL_MASK)); | |
356 | ||
357 | __pthread_creset(callp); | |
358 | ||
359 | if ((callp->sy_funnel & FUNNEL_MASK) != NO_FUNNEL) | |
360 | (void) thread_funnel_set(current_thread()->funnel_lock, FALSE); | |
361 | ||
362 | if (*lowpri_delay) { | |
363 | /* | |
364 | * task is marked as a low priority I/O type | |
365 | * and the I/O we issued while in this system call | |
366 | * collided with normal I/O operations... we'll | |
367 | * delay in order to mitigate the impact of this | |
368 | * task on the normal operation of the system | |
369 | */ | |
370 | IOSleep(*lowpri_delay); | |
371 | *lowpri_delay = 0; | |
372 | } | |
373 | KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_EXCP_SC, code) | DBG_FUNC_END, | |
374 | error, rval[0], rval[1], 0, 0); | |
375 | ||
376 | thread_exception_return(); | |
377 | /* NOTREACHED */ | |
378 | } | |
379 | ||
380 | ||
381 | void | |
382 | unix_syscall(struct i386_saved_state *regs) | |
383 | { | |
384 | thread_t thread; | |
385 | void *vt; | |
386 | unsigned short code; | |
387 | struct sysent *callp; | |
388 | int nargs; | |
389 | int error; | |
390 | int *rval; | |
391 | int funnel_type; | |
392 | vm_offset_t params; | |
393 | struct proc *p; | |
394 | volatile int *lowpri_delay; | |
395 | ||
396 | thread = current_thread(); | |
397 | p = current_proc(); | |
398 | rval = get_bsduthreadrval(thread); | |
399 | lowpri_delay = get_bsduthreadlowpridelay(thread); | |
400 | ||
401 | thread->task->syscalls_unix++; /* MP-safety ignored */ | |
402 | ||
403 | //printf("[scall : eax %x]", regs->eax); | |
404 | code = regs->eax; | |
405 | params = (vm_offset_t) ((caddr_t)regs->uesp + sizeof (int)); | |
406 | callp = (code >= nsysent) ? &sysent[63] : &sysent[code]; | |
407 | if (callp == sysent) { | |
408 | code = fuword(params); | |
409 | params += sizeof (int); | |
410 | callp = (code >= nsysent) ? &sysent[63] : &sysent[code]; | |
411 | } | |
412 | ||
413 | vt = get_bsduthreadarg(thread); | |
414 | ||
415 | if ((nargs = (callp->sy_narg * sizeof (int))) && | |
416 | (error = copyin((user_addr_t) params, (char *) vt, nargs)) != 0) { | |
417 | regs->eax = error; | |
418 | regs->efl |= EFL_CF; | |
419 | thread_exception_return(); | |
420 | /* NOTREACHED */ | |
421 | } | |
422 | ||
423 | rval[0] = 0; | |
424 | rval[1] = regs->edx; | |
425 | ||
426 | if ((error = __pthread_cset(callp))) { | |
427 | /* cancelled system call; let it returned with EINTR for handling */ | |
428 | regs->eax = error; | |
429 | regs->efl |= EFL_CF; | |
430 | thread_exception_return(); | |
431 | /* NOTREACHED */ | |
432 | } | |
433 | ||
434 | funnel_type = (callp->sy_funnel & FUNNEL_MASK); | |
435 | if(funnel_type == KERNEL_FUNNEL) | |
436 | (void) thread_funnel_set(kernel_flock, TRUE); | |
437 | ||
438 | (void) set_bsduthreadargs(thread, regs, NULL); | |
439 | ||
440 | if (callp->sy_narg > 8) | |
441 | panic("unix_syscall max arg count exceeded (%d)", callp->sy_narg); | |
442 | ||
443 | ktrsyscall(p, code, callp->sy_narg, vt, funnel_type); | |
444 | ||
445 | { | |
446 | int *ip = (int *)vt; | |
447 | KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_EXCP_SC, code) | DBG_FUNC_START, | |
448 | *ip, *(ip+1), *(ip+2), *(ip+3), 0); | |
449 | } | |
450 | ||
451 | error = (*(callp->sy_call))((void *) p, (void *) vt, &rval[0]); | |
452 | ||
453 | #if 0 | |
454 | /* May be needed with vfork changes */ | |
455 | regs = USER_REGS(thread); | |
456 | #endif | |
457 | if (error == ERESTART) { | |
458 | regs->eip -= 7; | |
459 | } | |
460 | else if (error != EJUSTRETURN) { | |
461 | if (error) { | |
462 | regs->eax = error; | |
463 | regs->efl |= EFL_CF; /* carry bit */ | |
464 | } else { /* (not error) */ | |
465 | regs->eax = rval[0]; | |
466 | regs->edx = rval[1]; | |
467 | regs->efl &= ~EFL_CF; | |
468 | } | |
469 | } | |
470 | ||
471 | ktrsysret(p, code, error, rval[0], funnel_type); | |
472 | ||
473 | __pthread_creset(callp); | |
474 | ||
475 | if(funnel_type != NO_FUNNEL) | |
476 | (void) thread_funnel_set(current_thread()->funnel_lock, FALSE); | |
477 | ||
478 | if (*lowpri_delay) { | |
479 | /* | |
480 | * task is marked as a low priority I/O type | |
481 | * and the I/O we issued while in this system call | |
482 | * collided with normal I/O operations... we'll | |
483 | * delay in order to mitigate the impact of this | |
484 | * task on the normal operation of the system | |
485 | */ | |
486 | IOSleep(*lowpri_delay); | |
487 | *lowpri_delay = 0; | |
488 | } | |
489 | KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_EXCP_SC, code) | DBG_FUNC_END, | |
490 | error, rval[0], rval[1], 0, 0); | |
491 | ||
492 | thread_exception_return(); | |
493 | /* NOTREACHED */ | |
494 | } | |
495 | ||
496 | ||
497 | void | |
498 | machdep_syscall( struct i386_saved_state *regs) | |
499 | { | |
500 | int trapno, nargs; | |
501 | machdep_call_t *entry; | |
502 | ||
503 | trapno = regs->eax; | |
504 | if (trapno < 0 || trapno >= machdep_call_count) { | |
505 | regs->eax = (unsigned int)kern_invalid(NULL); | |
506 | ||
507 | thread_exception_return(); | |
508 | /* NOTREACHED */ | |
509 | } | |
510 | ||
511 | entry = &machdep_call_table[trapno]; | |
512 | nargs = entry->nargs; | |
513 | ||
514 | if (nargs > 0) { | |
515 | int args[nargs]; | |
516 | ||
517 | if (copyin((user_addr_t) regs->uesp + sizeof (int), | |
518 | (char *) args, | |
519 | nargs * sizeof (int))) { | |
520 | ||
521 | regs->eax = KERN_INVALID_ADDRESS; | |
522 | ||
523 | thread_exception_return(); | |
524 | /* NOTREACHED */ | |
525 | } | |
526 | ||
527 | switch (nargs) { | |
528 | case 1: | |
529 | regs->eax = (*entry->routine.args_1)(args[0]); | |
530 | break; | |
531 | case 2: | |
532 | regs->eax = (*entry->routine.args_2)(args[0],args[1]); | |
533 | break; | |
534 | case 3: | |
535 | regs->eax = (*entry->routine.args_3)(args[0],args[1],args[2]); | |
536 | break; | |
537 | case 4: | |
538 | regs->eax = (*entry->routine.args_4)(args[0],args[1],args[2],args[3]); | |
539 | break; | |
540 | default: | |
541 | panic("machdep_syscall(): too many args"); | |
542 | } | |
543 | } | |
544 | else | |
545 | regs->eax = (*entry->routine.args_0)(); | |
546 | ||
547 | if (current_thread()->funnel_lock) | |
548 | (void) thread_funnel_set(current_thread()->funnel_lock, FALSE); | |
549 | ||
550 | thread_exception_return(); | |
551 | /* NOTREACHED */ | |
552 | } | |
553 | ||
554 | ||
555 | kern_return_t | |
556 | thread_compose_cthread_desc(unsigned int addr, pcb_t pcb) | |
557 | { | |
558 | struct real_descriptor desc; | |
559 | ||
560 | mp_disable_preemption(); | |
561 | ||
562 | desc.limit_low = 1; | |
563 | desc.limit_high = 0; | |
564 | desc.base_low = addr & 0xffff; | |
565 | desc.base_med = (addr >> 16) & 0xff; | |
566 | desc.base_high = (addr >> 24) & 0xff; | |
567 | desc.access = ACC_P|ACC_PL_U|ACC_DATA_W; | |
568 | desc.granularity = SZ_32|SZ_G; | |
569 | pcb->cthread_desc = desc; | |
570 | *ldt_desc_p(USER_CTHREAD) = desc; | |
571 | ||
572 | mp_enable_preemption(); | |
573 | ||
574 | return(KERN_SUCCESS); | |
575 | } | |
576 | ||
577 | kern_return_t | |
578 | thread_set_cthread_self(uint32_t self) | |
579 | { | |
580 | current_thread()->machine.pcb->cthread_self = self; | |
581 | ||
582 | return (KERN_SUCCESS); | |
583 | } | |
584 | ||
585 | kern_return_t | |
586 | thread_get_cthread_self(void) | |
587 | { | |
588 | return ((kern_return_t)current_thread()->machine.pcb->cthread_self); | |
589 | } | |
590 | ||
591 | kern_return_t | |
592 | thread_fast_set_cthread_self(uint32_t self) | |
593 | { | |
594 | pcb_t pcb; | |
595 | pcb = (pcb_t)current_thread()->machine.pcb; | |
596 | thread_compose_cthread_desc(self, pcb); | |
597 | pcb->cthread_self = self; /* preserve old func too */ | |
598 | return (USER_CTHREAD); | |
599 | } | |
600 | ||
601 | /* | |
602 | * thread_set_user_ldt routine is the interface for the user level | |
603 | * settable ldt entry feature. allowing a user to create arbitrary | |
604 | * ldt entries seems to be too large of a security hole, so instead | |
605 | * this mechanism is in place to allow user level processes to have | |
606 | * an ldt entry that can be used in conjunction with the FS register. | |
607 | * | |
608 | * Swapping occurs inside the pcb.c file along with initialization | |
609 | * when a thread is created. The basic functioning theory is that the | |
610 | * pcb->uldt_selector variable will contain either 0 meaning the | |
611 | * process has not set up any entry, or the selector to be used in | |
612 | * the FS register. pcb->uldt_desc contains the actual descriptor the | |
613 | * user has set up stored in machine usable ldt format. | |
614 | * | |
615 | * Currently one entry is shared by all threads (USER_SETTABLE), but | |
616 | * this could be changed in the future by changing how this routine | |
617 | * allocates the selector. There seems to be no real reason at this | |
618 | * time to have this added feature, but in the future it might be | |
619 | * needed. | |
620 | * | |
621 | * address is the linear address of the start of the data area size | |
622 | * is the size in bytes of the area flags should always be set to 0 | |
623 | * for now. in the future it could be used to set R/W permisions or | |
624 | * other functions. Currently the segment is created as a data segment | |
625 | * up to 1 megabyte in size with full read/write permisions only. | |
626 | * | |
627 | * this call returns the segment selector or -1 if any error occurs | |
628 | */ | |
629 | kern_return_t | |
630 | thread_set_user_ldt(uint32_t address, uint32_t size, uint32_t flags) | |
631 | { | |
632 | pcb_t pcb; | |
633 | struct fake_descriptor temp; | |
634 | int mycpu; | |
635 | ||
636 | if (flags != 0) | |
637 | return -1; // flags not supported | |
638 | if (size > 0xFFFFF) | |
639 | return -1; // size too big, 1 meg is the limit | |
640 | ||
641 | mp_disable_preemption(); | |
642 | mycpu = cpu_number(); | |
643 | ||
644 | // create a "fake" descriptor so we can use fix_desc() | |
645 | // to build a real one... | |
646 | // 32 bit default operation size | |
647 | // standard read/write perms for a data segment | |
648 | pcb = (pcb_t)current_thread()->machine.pcb; | |
649 | temp.offset = address; | |
650 | temp.lim_or_seg = size; | |
651 | temp.size_or_wdct = SZ_32; | |
652 | temp.access = ACC_P|ACC_PL_U|ACC_DATA_W; | |
653 | ||
654 | // turn this into a real descriptor | |
655 | fix_desc(&temp,1); | |
656 | ||
657 | // set up our data in the pcb | |
658 | pcb->uldt_desc = *(struct real_descriptor*)&temp; | |
659 | pcb->uldt_selector = USER_SETTABLE; // set the selector value | |
660 | ||
661 | // now set it up in the current table... | |
662 | *ldt_desc_p(USER_SETTABLE) = *(struct real_descriptor*)&temp; | |
663 | ||
664 | mp_enable_preemption(); | |
665 | ||
666 | return USER_SETTABLE; | |
667 | } | |
668 | void | |
669 | mach25_syscall(struct i386_saved_state *regs) | |
670 | { | |
671 | printf("*** Atttempt to execute a Mach 2.5 system call at EIP=%x EAX=%x(%d)\n", | |
672 | regs->eip, regs->eax, -regs->eax); | |
673 | panic("FIXME!"); | |
674 | } | |
675 | #endif /* MACH_BSD */ | |
676 | ||
677 | ||
678 | /* This routine is called from assembly before each and every mach trap. | |
679 | */ | |
680 | ||
681 | extern unsigned int mach_call_start(unsigned int, unsigned int *); | |
682 | ||
683 | __private_extern__ | |
684 | unsigned int | |
685 | mach_call_start(unsigned int call_number, unsigned int *args) | |
686 | { | |
687 | int i, argc; | |
688 | unsigned int kdarg[3]; | |
689 | ||
690 | current_thread()->task->syscalls_mach++; /* MP-safety ignored */ | |
691 | ||
692 | /* Always prepare to trace mach system calls */ | |
693 | ||
694 | kdarg[0]=0; | |
695 | kdarg[1]=0; | |
696 | kdarg[2]=0; | |
697 | ||
698 | argc = mach_trap_table[call_number>>4].mach_trap_arg_count; | |
699 | ||
700 | if (argc > 3) | |
701 | argc = 3; | |
702 | ||
703 | for (i=0; i < argc; i++) | |
704 | kdarg[i] = (int)*(args + i); | |
705 | ||
706 | KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_EXCP_SC, (call_number>>4)) | DBG_FUNC_START, | |
707 | kdarg[0], kdarg[1], kdarg[2], 0, 0); | |
708 | ||
709 | return call_number; /* pass this back thru */ | |
710 | } | |
711 | ||
712 | /* This routine is called from assembly after each mach system call | |
713 | */ | |
714 | ||
715 | extern unsigned int mach_call_end(unsigned int, unsigned int); | |
716 | ||
717 | __private_extern__ | |
718 | unsigned int | |
719 | mach_call_end(unsigned int call_number, unsigned int retval) | |
720 | { | |
721 | KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_EXCP_SC,(call_number>>4)) | DBG_FUNC_END, | |
722 | retval, 0, 0, 0, 0); | |
723 | return retval; /* pass this back thru */ | |
724 | } | |
725 | ||
726 | typedef kern_return_t (*mach_call_t)(void *); | |
727 | ||
728 | extern __attribute__((regparm(1))) kern_return_t | |
729 | mach_call_munger(unsigned int call_number, | |
730 | unsigned int arg1, | |
731 | unsigned int arg2, | |
732 | unsigned int arg3, | |
733 | unsigned int arg4, | |
734 | unsigned int arg5, | |
735 | unsigned int arg6, | |
736 | unsigned int arg7, | |
737 | unsigned int arg8, | |
738 | unsigned int arg9 | |
739 | ); | |
740 | ||
741 | struct mach_call_args { | |
742 | unsigned int arg1; | |
743 | unsigned int arg2; | |
744 | unsigned int arg3; | |
745 | unsigned int arg4; | |
746 | unsigned int arg5; | |
747 | unsigned int arg6; | |
748 | unsigned int arg7; | |
749 | unsigned int arg8; | |
750 | unsigned int arg9; | |
751 | }; | |
752 | __private_extern__ | |
753 | __attribute__((regparm(1))) kern_return_t | |
754 | mach_call_munger(unsigned int call_number, | |
755 | unsigned int arg1, | |
756 | unsigned int arg2, | |
757 | unsigned int arg3, | |
758 | unsigned int arg4, | |
759 | unsigned int arg5, | |
760 | unsigned int arg6, | |
761 | unsigned int arg7, | |
762 | unsigned int arg8, | |
763 | unsigned int arg9 | |
764 | ) | |
765 | { | |
766 | int argc; | |
767 | mach_call_t mach_call; | |
768 | kern_return_t retval; | |
769 | struct mach_call_args args = { 0, 0, 0, 0, 0, 0, 0, 0, 0 }; | |
770 | ||
771 | current_thread()->task->syscalls_mach++; /* MP-safety ignored */ | |
772 | call_number >>= 4; | |
773 | ||
774 | argc = mach_trap_table[call_number].mach_trap_arg_count; | |
775 | switch (argc) { | |
776 | case 9: args.arg9 = arg9; | |
777 | case 8: args.arg8 = arg8; | |
778 | case 7: args.arg7 = arg7; | |
779 | case 6: args.arg6 = arg6; | |
780 | case 5: args.arg5 = arg5; | |
781 | case 4: args.arg4 = arg4; | |
782 | case 3: args.arg3 = arg3; | |
783 | case 2: args.arg2 = arg2; | |
784 | case 1: args.arg1 = arg1; | |
785 | } | |
786 | ||
787 | KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_EXCP_SC, (call_number)) | DBG_FUNC_START, | |
788 | args.arg1, args.arg2, args.arg3, 0, 0); | |
789 | ||
790 | mach_call = (mach_call_t)mach_trap_table[call_number].mach_trap_function; | |
791 | retval = mach_call(&args); | |
792 | ||
793 | KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_EXCP_SC,(call_number)) | DBG_FUNC_END, | |
794 | retval, 0, 0, 0, 0); | |
795 | ||
796 | return retval; | |
797 | } | |
798 | ||
799 | /* | |
800 | * thread_setuserstack: | |
801 | * | |
802 | * Sets the user stack pointer into the machine | |
803 | * dependent thread state info. | |
804 | */ | |
805 | void | |
806 | thread_setuserstack( | |
807 | thread_t thread, | |
808 | mach_vm_address_t user_stack) | |
809 | { | |
810 | struct i386_saved_state *ss = get_user_regs(thread); | |
811 | ||
812 | ss->uesp = CAST_DOWN(unsigned int,user_stack); | |
813 | } | |
814 | ||
815 | /* | |
816 | * thread_adjuserstack: | |
817 | * | |
818 | * Returns the adjusted user stack pointer from the machine | |
819 | * dependent thread state info. Used for small (<2G) deltas. | |
820 | */ | |
821 | uint64_t | |
822 | thread_adjuserstack( | |
823 | thread_t thread, | |
824 | int adjust) | |
825 | { | |
826 | struct i386_saved_state *ss = get_user_regs(thread); | |
827 | ||
828 | ss->uesp += adjust; | |
829 | return CAST_USER_ADDR_T(ss->uesp); | |
830 | } | |
831 | ||
832 | /* | |
833 | * thread_setentrypoint: | |
834 | * | |
835 | * Sets the user PC into the machine | |
836 | * dependent thread state info. | |
837 | */ | |
838 | void | |
839 | thread_setentrypoint( | |
840 | thread_t thread, | |
841 | mach_vm_address_t entry) | |
842 | { | |
843 | struct i386_saved_state *ss = get_user_regs(thread); | |
844 | ||
845 | ss->eip = CAST_DOWN(unsigned int,entry); | |
846 | } | |
847 |