]> git.saurik.com Git - apple/xnu.git/blob - osfmk/i386/bsd_i386.c
xnu-344.23.tar.gz
[apple/xnu.git] / osfmk / i386 / bsd_i386.c
1 /*
2 * Copyright (c) 2000-2002 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 #ifdef MACH_BSD
23 #include <cpus.h>
24 #include <mach_rt.h>
25 #include <mach_debug.h>
26 #include <mach_ldebug.h>
27
28 #include <mach/kern_return.h>
29 #include <mach/thread_status.h>
30 #include <mach/vm_param.h>
31
32 #include <kern/counters.h>
33 #include <kern/cpu_data.h>
34 #include <kern/mach_param.h>
35 #include <kern/task.h>
36 #include <kern/thread.h>
37 #include <kern/thread_swap.h>
38 #include <kern/sched_prim.h>
39 #include <kern/misc_protos.h>
40 #include <kern/assert.h>
41 #include <kern/spl.h>
42 #include <ipc/ipc_port.h>
43 #include <vm/vm_kern.h>
44 #include <vm/pmap.h>
45
46 #include <i386/thread.h>
47 #include <i386/eflags.h>
48 #include <i386/proc_reg.h>
49 #include <i386/seg.h>
50 #include <i386/tss.h>
51 #include <i386/user_ldt.h>
52 #include <i386/fpu.h>
53 #include <i386/iopb_entries.h>
54 #include <i386/machdep_call.h>
55
56 #include <sys/syscall.h>
57 #include <sys/ktrace.h>
58 struct proc;
59
60 kern_return_t
61 thread_userstack(
62 thread_t,
63 int,
64 thread_state_t,
65 unsigned int,
66 vm_offset_t *,
67 int *
68 );
69
70 kern_return_t
71 thread_entrypoint(
72 thread_t,
73 int,
74 thread_state_t,
75 unsigned int,
76 vm_offset_t *
77 );
78
79 struct i386_saved_state *
80 get_user_regs(
81 thread_act_t);
82
83 void
84 act_thread_dup(
85 thread_act_t,
86 thread_act_t
87 );
88
89 unsigned int get_msr_exportmask(void);
90
91 unsigned int get_msr_nbits(void);
92
93 unsigned int get_msr_rbits(void);
94
95 /*
96 * thread_userstack:
97 *
98 * Return the user stack pointer from the machine
99 * dependent thread state info.
100 */
101 kern_return_t
102 thread_userstack(
103 thread_t thread,
104 int flavor,
105 thread_state_t tstate,
106 unsigned int count,
107 vm_offset_t *user_stack,
108 int *customstack
109 )
110 {
111 struct i386_saved_state *state;
112 i386_thread_state_t *state25;
113 vm_offset_t uesp;
114
115 if (customstack)
116 *customstack = 0;
117
118 switch (flavor) {
119 case i386_THREAD_STATE: /* FIXME */
120 state25 = (i386_thread_state_t *) tstate;
121 if (state25->esp)
122 *user_stack = state25->esp;
123 if (customstack && state25->esp)
124 *customstack = 1;
125 else
126 *customstack = 0;
127 break;
128
129 case i386_NEW_THREAD_STATE:
130 if (count < i386_NEW_THREAD_STATE_COUNT)
131 return (KERN_INVALID_ARGUMENT);
132 else {
133 state = (struct i386_saved_state *) tstate;
134 uesp = state->uesp;
135 }
136
137 /* If a valid user stack is specified, use it. */
138 if (uesp)
139 *user_stack = uesp;
140 if (customstack && uesp)
141 *customstack = 1;
142 else
143 *customstack = 0;
144 break;
145 default :
146 return (KERN_INVALID_ARGUMENT);
147 }
148
149 return (KERN_SUCCESS);
150 }
151
152 kern_return_t
153 thread_entrypoint(
154 thread_t thread,
155 int flavor,
156 thread_state_t tstate,
157 unsigned int count,
158 vm_offset_t *entry_point
159 )
160 {
161 struct i386_saved_state *state;
162 i386_thread_state_t *state25;
163
164 /*
165 * Set a default.
166 */
167 if (*entry_point == 0)
168 *entry_point = VM_MIN_ADDRESS;
169
170 switch (flavor) {
171 case i386_THREAD_STATE:
172 state25 = (i386_thread_state_t *) tstate;
173 *entry_point = state25->eip ? state25->eip: VM_MIN_ADDRESS;
174 break;
175
176 case i386_NEW_THREAD_STATE:
177 if (count < i386_THREAD_STATE_COUNT)
178 return (KERN_INVALID_ARGUMENT);
179 else {
180 state = (struct i386_saved_state *) tstate;
181
182 /*
183 * If a valid entry point is specified, use it.
184 */
185 *entry_point = state->eip ? state->eip: VM_MIN_ADDRESS;
186 }
187 break;
188 }
189
190 return (KERN_SUCCESS);
191 }
192
193 struct i386_saved_state *
194 get_user_regs(thread_act_t th)
195 {
196 if (th->mact.pcb)
197 return(USER_REGS(th));
198 else {
199 printf("[get_user_regs: thread does not have pcb]");
200 return NULL;
201 }
202 }
203
204 /*
205 * Duplicate parent state in child
206 * for U**X fork.
207 */
208 void
209 act_thread_dup(
210 thread_act_t parent,
211 thread_act_t child
212 )
213 {
214 struct i386_saved_state *parent_state, *child_state;
215 struct i386_machine_state *ims;
216 struct i386_float_state floatregs;
217
218 #ifdef XXX
219 /* Save the FPU state */
220 if ((pcb_t)(per_proc_info[cpu_number()].fpu_pcb) == parent->mact.pcb) {
221 fp_state_save(parent);
222 }
223 #endif
224
225 if (child->mact.pcb == NULL
226 || parent->mact.pcb == NULL) {
227 panic("[thread_dup, child (%x) or parent (%x) is NULL!]",
228 child->mact.pcb, parent->mact.pcb);
229 return;
230 }
231
232 /* Copy over the i386_saved_state registers */
233 child->mact.pcb->iss = parent->mact.pcb->iss;
234
235 /* Check to see if parent is using floating point
236 * and if so, copy the registers to the child
237 * FIXME - make sure this works.
238 */
239
240 if (parent->mact.pcb->ims.ifps) {
241 if (fpu_get_state(parent, &floatregs) == KERN_SUCCESS)
242 fpu_set_state(child, &floatregs);
243 }
244
245 /* FIXME - should a user specified LDT, TSS and V86 info
246 * be duplicated as well?? - probably not.
247 */
248 }
249
250 /*
251 * FIXME - thread_set_child
252 */
253
254 void thread_set_child(thread_act_t child, int pid);
255 void
256 thread_set_child(thread_act_t child, int pid)
257 {
258 child->mact.pcb->iss.eax = pid;
259 child->mact.pcb->iss.edx = 1;
260 child->mact.pcb->iss.efl &= ~EFL_CF;
261 }
262 void thread_set_parent(thread_act_t parent, int pid);
263 void
264 thread_set_parent(thread_act_t parent, int pid)
265 {
266 parent->mact.pcb->iss.eax = pid;
267 parent->mact.pcb->iss.edx = 0;
268 parent->mact.pcb->iss.efl &= ~EFL_CF;
269 }
270
271
272
273 /*
274 * Move pages from one kernel virtual address to another.
275 * Both addresses are assumed to reside in the Sysmap,
276 * and size must be a multiple of the page size.
277 */
278 void
279 pagemove(
280 register caddr_t from,
281 register caddr_t to,
282 int size)
283 {
284 pmap_movepage((unsigned long)from, (unsigned long)to, (vm_size_t)size);
285 }
286
287 /*
288 * System Call handling code
289 */
290
291 #define ERESTART -1 /* restart syscall */
292 #define EJUSTRETURN -2 /* don't modify regs, just return */
293
294 struct sysent { /* system call table */
295 unsigned short sy_narg; /* number of args */
296 char sy_parallel; /* can execute in parallel */
297 char sy_funnel; /* funnel type */
298 unsigned long (*sy_call)(void *, void *, int *); /* implementing function */
299 };
300
301 #define NO_FUNNEL 0
302 #define KERNEL_FUNNEL 1
303 #define NETWORK_FUNNEL 2
304
305 extern funnel_t * kernel_flock;
306 extern funnel_t * network_flock;
307
308 extern struct sysent sysent[];
309
310 int set_bsduthreadargs (thread_act_t, struct i386_saved_state *, void *);
311
312 void * get_bsduthreadarg(thread_act_t);
313
314 void unix_syscall(struct i386_saved_state *);
315
316 void
317 unix_syscall_return(int error)
318 {
319 thread_act_t thread;
320 volatile int *rval;
321 struct i386_saved_state *regs;
322 struct proc *p;
323 struct proc *current_proc();
324 unsigned short code;
325 vm_offset_t params;
326 struct sysent *callp;
327 extern int nsysent;
328
329 thread = current_act();
330 rval = (int *)get_bsduthreadrval(thread);
331 p = current_proc();
332
333 regs = USER_REGS(thread);
334
335 /* reconstruct code for tracing before blasting eax */
336 code = regs->eax;
337 params = (vm_offset_t) ((caddr_t)regs->uesp + sizeof (int));
338 callp = (code >= nsysent) ? &sysent[63] : &sysent[code];
339 if (callp == sysent) {
340 code = fuword(params);
341 }
342
343 if (error == ERESTART) {
344 regs->eip -= 7;
345 }
346 else if (error != EJUSTRETURN) {
347 if (error) {
348 regs->eax = error;
349 regs->efl |= EFL_CF; /* carry bit */
350 } else { /* (not error) */
351 regs->eax = rval[0];
352 regs->edx = rval[1];
353 regs->efl &= ~EFL_CF;
354 }
355 }
356
357 ktrsysret(p, code, error, rval[0], callp->sy_funnel);
358
359 KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_EXCP_SC, code) | DBG_FUNC_END,
360 error, rval[0], rval[1], 0, 0);
361
362 if (callp->sy_funnel != NO_FUNNEL) {
363 assert(thread_funnel_get() == THR_FUNNEL_NULL);
364 (void) thread_funnel_set(current_thread()->funnel_lock, FALSE);
365 }
366
367 thread_exception_return();
368 /* NOTREACHED */
369 }
370
371
372 void
373 unix_syscall(struct i386_saved_state *regs)
374 {
375 thread_act_t thread;
376 void *vt;
377 unsigned short code;
378 struct sysent *callp;
379 int nargs, error;
380 volatile int *rval;
381 int funnel_type;
382 vm_offset_t params;
383 extern int nsysent;
384 struct proc *p;
385 struct proc *current_proc();
386
387 thread = current_act();
388 p = current_proc();
389 rval = (int *)get_bsduthreadrval(thread);
390
391 //printf("[scall : eax %x]", regs->eax);
392 code = regs->eax;
393 params = (vm_offset_t) ((caddr_t)regs->uesp + sizeof (int));
394 callp = (code >= nsysent) ? &sysent[63] : &sysent[code];
395 if (callp == sysent) {
396 code = fuword(params);
397 params += sizeof (int);
398 callp = (code >= nsysent) ? &sysent[63] : &sysent[code];
399 }
400
401 vt = get_bsduthreadarg(thread);
402
403 if ((nargs = (callp->sy_narg * sizeof (int))) &&
404 (error = copyin((char *) params, (char *)vt , nargs)) != 0) {
405 regs->eax = error;
406 regs->efl |= EFL_CF;
407 thread_exception_return();
408 /* NOTREACHED */
409 }
410
411 rval[0] = 0;
412 rval[1] = regs->edx;
413
414 funnel_type = callp->sy_funnel;
415 if(funnel_type == KERNEL_FUNNEL)
416 (void) thread_funnel_set(kernel_flock, TRUE);
417 else if (funnel_type == NETWORK_FUNNEL)
418 (void) thread_funnel_set(network_flock, TRUE);
419
420 set_bsduthreadargs(thread, regs, NULL);
421
422 if (callp->sy_narg > 8)
423 panic("unix_syscall max arg count exceeded (%d)", callp->sy_narg);
424
425 ktrsyscall(p, code, callp->sy_narg, vt, funnel_type);
426
427 {
428 int *ip = (int *)vt;
429 KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_EXCP_SC, code) | DBG_FUNC_START,
430 *ip, *(ip+1), *(ip+2), *(ip+3), 0);
431 }
432
433 error = (*(callp->sy_call))(p, (void *) vt, rval);
434
435 #if 0
436 /* May be needed with vfork changes */
437 regs = USER_REGS(thread);
438 #endif
439 if (error == ERESTART) {
440 regs->eip -= 7;
441 }
442 else if (error != EJUSTRETURN) {
443 if (error) {
444 regs->eax = error;
445 regs->efl |= EFL_CF; /* carry bit */
446 } else { /* (not error) */
447 regs->eax = rval[0];
448 regs->edx = rval[1];
449 regs->efl &= ~EFL_CF;
450 }
451 }
452
453 ktrsysret(p, code, error, rval[0], funnel_type);
454
455 KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_EXCP_SC, code) | DBG_FUNC_END,
456 error, rval[0], rval[1], 0, 0);
457
458 if(funnel_type != NO_FUNNEL)
459 (void) thread_funnel_set(current_thread()->funnel_lock, FALSE);
460
461 thread_exception_return();
462 /* NOTREACHED */
463 }
464
465
466 void
467 machdep_syscall( struct i386_saved_state *regs)
468 {
469 int trapno, nargs;
470 machdep_call_t *entry;
471 thread_t thread;
472 struct proc *p;
473 struct proc *current_proc();
474
475 trapno = regs->eax;
476 if (trapno < 0 || trapno >= machdep_call_count) {
477 regs->eax = (unsigned int)kern_invalid();
478
479 thread_exception_return();
480 /* NOTREACHED */
481 }
482
483 entry = &machdep_call_table[trapno];
484 nargs = entry->nargs;
485
486 if (nargs > 0) {
487 int args[nargs];
488
489 if (copyin((char *) regs->uesp + sizeof (int),
490 (char *) args,
491 nargs * sizeof (int))) {
492
493 regs->eax = KERN_INVALID_ADDRESS;
494
495 thread_exception_return();
496 /* NOTREACHED */
497 }
498
499 asm volatile("
500 1:
501 mov (%2),%%eax;
502 pushl %%eax;
503 sub $4,%2;
504 dec %1;
505 jne 1b;
506 mov %3,%%eax;
507 call *%%eax;
508 mov %%eax,%0"
509
510 : "=r" (regs->eax)
511 : "r" (nargs),
512 "r" (&args[nargs - 1]),
513 "g" (entry->routine)
514 : "ax", "cx", "dx", "sp");
515 }
516 else
517 regs->eax = (unsigned int)(*entry->routine)();
518
519 if (current_thread()->funnel_lock)
520 (void) thread_funnel_set(current_thread()->funnel_lock, FALSE);
521
522 thread_exception_return();
523 /* NOTREACHED */
524 }
525
526
527 kern_return_t
528 thread_set_cthread_self(int self)
529 {
530 current_act()->mact.pcb->cthread_self = (unsigned int)self;
531
532 return (KERN_SUCCESS);
533 }
534
535 kern_return_t
536 thread_get_cthread_self(void)
537 {
538 return ((kern_return_t)current_act()->mact.pcb->cthread_self);
539 }
540
541 void
542 mach25_syscall(struct i386_saved_state *regs)
543 {
544 printf("*** Atttempt to execute a Mach 2.5 system call at EIP=%x EAX=%x(%d)\n",
545 regs->eip, regs->eax, -regs->eax);
546 panic("FIXME!");
547 }
548
549 #endif /* MACH_BSD */
550
551 #undef current_thread
552 thread_t
553 current_thread(void)
554 {
555 return(current_thread_fast());
556 }