]> git.saurik.com Git - apple/xnu.git/blob - osfmk/i386/bsd_i386.c
xnu-201.tar.gz
[apple/xnu.git] / osfmk / i386 / bsd_i386.c
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 #ifdef MACH_BSD
23 #include <cpus.h>
24 #include <mach_rt.h>
25 #include <mach_debug.h>
26 #include <mach_ldebug.h>
27
28 #include <mach/kern_return.h>
29 #include <mach/thread_status.h>
30 #include <mach/vm_param.h>
31 #include <mach/rpc.h>
32
33 #include <kern/counters.h>
34 #include <kern/cpu_data.h>
35 #include <kern/mach_param.h>
36 #include <kern/task.h>
37 #include <kern/thread.h>
38 #include <kern/thread_swap.h>
39 #include <kern/sched_prim.h>
40 #include <kern/misc_protos.h>
41 #include <kern/assert.h>
42 #include <kern/spl.h>
43 #include <ipc/ipc_port.h>
44 #include <vm/vm_kern.h>
45 #include <vm/pmap.h>
46
47 #include <i386/thread.h>
48 #include <i386/eflags.h>
49 #include <i386/proc_reg.h>
50 #include <i386/seg.h>
51 #include <i386/tss.h>
52 #include <i386/user_ldt.h>
53 #include <i386/fpu.h>
54 #include <i386/iopb_entries.h>
55 #include <i386/machdep_call.h>
56
57 #define USRSTACK 0xc0000000
58
59 kern_return_t
60 thread_userstack(
61 thread_t,
62 int,
63 thread_state_t,
64 unsigned int,
65 vm_offset_t *,
66 int *
67 );
68
69 kern_return_t
70 thread_entrypoint(
71 thread_t,
72 int,
73 thread_state_t,
74 unsigned int,
75 vm_offset_t *
76 );
77
78 struct i386_saved_state *
79 get_user_regs(
80 thread_act_t);
81
82 void
83 act_thread_dup(
84 thread_act_t,
85 thread_act_t
86 );
87
88 unsigned int get_msr_exportmask(void);
89
90 unsigned int get_msr_nbits(void);
91
92 unsigned int get_msr_rbits(void);
93
94 /*
95 * thread_userstack:
96 *
97 * Return the user stack pointer from the machine
98 * dependent thread state info.
99 */
100 kern_return_t
101 thread_userstack(
102 thread_t thread,
103 int flavor,
104 thread_state_t tstate,
105 unsigned int count,
106 vm_offset_t *user_stack,
107 int *customstack
108 )
109 {
110 struct i386_saved_state *state;
111 i386_thread_state_t *state25;
112 vm_offset_t uesp;
113
114 /*
115 * Set a default.
116 */
117 if (*user_stack == 0)
118 *user_stack = USRSTACK;
119
120 if (customstack)
121 *customstack = 0;
122 switch (flavor) {
123 case i386_THREAD_STATE: /* FIXME */
124 state25 = (i386_thread_state_t *) tstate;
125 *user_stack = state25->esp ? state25->esp : USRSTACK;
126 if (customstack && state25->esp)
127 *customstack = 1;
128 else
129 *customstack = 0;
130 break;
131
132 case i386_NEW_THREAD_STATE:
133 if (count < i386_NEW_THREAD_STATE_COUNT)
134 return (KERN_INVALID_ARGUMENT);
135 else {
136 state = (struct i386_saved_state *) tstate;
137 uesp = state->uesp;
138 }
139
140 /*
141 * If a valid user stack is specified, use it.
142 */
143 *user_stack = uesp ? uesp : USRSTACK;
144 if (customstack && uesp)
145 *customstack = 1;
146 else
147 *customstack = 0;
148 break;
149 default :
150 return (KERN_INVALID_ARGUMENT);
151 }
152
153 return (KERN_SUCCESS);
154 }
155
156 kern_return_t
157 thread_entrypoint(
158 thread_t thread,
159 int flavor,
160 thread_state_t tstate,
161 unsigned int count,
162 vm_offset_t *entry_point
163 )
164 {
165 struct i386_saved_state *state;
166 i386_thread_state_t *state25;
167
168 /*
169 * Set a default.
170 */
171 if (*entry_point == 0)
172 *entry_point = VM_MIN_ADDRESS;
173
174 switch (flavor) {
175 case i386_THREAD_STATE:
176 state25 = (i386_thread_state_t *) tstate;
177 *entry_point = state25->eip ? state25->eip: VM_MIN_ADDRESS;
178 break;
179
180 case i386_NEW_THREAD_STATE:
181 if (count < i386_THREAD_STATE_COUNT)
182 return (KERN_INVALID_ARGUMENT);
183 else {
184 state = (struct i386_saved_state *) tstate;
185
186 /*
187 * If a valid entry point is specified, use it.
188 */
189 *entry_point = state->eip ? state->eip: VM_MIN_ADDRESS;
190 }
191 break;
192 }
193
194 return (KERN_SUCCESS);
195 }
196
197 struct i386_saved_state *
198 get_user_regs(thread_act_t th)
199 {
200 if (th->mact.pcb)
201 return(USER_REGS(th));
202 else {
203 printf("[get_user_regs: thread does not have pcb]");
204 return NULL;
205 }
206 }
207
208 /*
209 * Duplicate parent state in child
210 * for U**X fork.
211 */
212 void
213 act_thread_dup(
214 thread_act_t parent,
215 thread_act_t child
216 )
217 {
218 struct i386_saved_state *parent_state, *child_state;
219 struct i386_machine_state *ims;
220 struct i386_float_state floatregs;
221
222 #ifdef XXX
223 /* Save the FPU state */
224 if ((pcb_t)(per_proc_info[cpu_number()].fpu_pcb) == parent->mact.pcb) {
225 fp_state_save(parent);
226 }
227 #endif
228
229 if (child->mact.pcb == NULL
230 || parent->mact.pcb == NULL) {
231 panic("[thread_dup, child (%x) or parent (%x) is NULL!]",
232 child->mact.pcb, parent->mact.pcb);
233 return;
234 }
235
236 /* Copy over the i386_saved_state registers */
237 child->mact.pcb->iss = parent->mact.pcb->iss;
238
239 /* Check to see if parent is using floating point
240 * and if so, copy the registers to the child
241 * FIXME - make sure this works.
242 */
243
244 if (parent->mact.pcb->ims.ifps) {
245 if (fpu_get_state(parent, &floatregs) == KERN_SUCCESS)
246 fpu_set_state(child, &floatregs);
247 }
248
249 /* FIXME - should a user specified LDT, TSS and V86 info
250 * be duplicated as well?? - probably not.
251 */
252 }
253
254 /*
255 * FIXME - thread_set_child
256 */
257
258 void thread_set_child(thread_act_t child, int pid);
259 void
260 thread_set_child(thread_act_t child, int pid)
261 {
262 child->mact.pcb->iss.eax = pid;
263 child->mact.pcb->iss.edx = 1;
264 child->mact.pcb->iss.efl &= ~EFL_CF;
265 }
266 void thread_set_parent(thread_act_t parent, int pid);
267 void
268 thread_set_parent(thread_act_t parent, int pid)
269 {
270 parent->mact.pcb->iss.eax = pid;
271 parent->mact.pcb->iss.edx = 0;
272 parent->mact.pcb->iss.efl &= ~EFL_CF;
273 }
274
275
276
277 /*
278 * Move pages from one kernel virtual address to another.
279 * Both addresses are assumed to reside in the Sysmap,
280 * and size must be a multiple of the page size.
281 */
282 void
283 pagemove(
284 register caddr_t from,
285 register caddr_t to,
286 int size)
287 {
288 pmap_movepage((unsigned long)from, (unsigned long)to, (vm_size_t)size);
289 }
290
291 /*
292 * System Call handling code
293 */
294
295 #define ERESTART -1 /* restart syscall */
296 #define EJUSTRETURN -2 /* don't modify regs, just return */
297
298 struct sysent { /* system call table */
299 unsigned short sy_narg; /* number of args */
300 char sy_parallel; /* can execute in parallel */
301 char sy_funnel; /* funnel type */
302 unsigned long (*sy_call)(void *, void *, int *); /* implementing function */
303 };
304
305 #define KERNEL_FUNNEL 1
306 #define NETWORK_FUNNEL 2
307
308 extern funnel_t * kernel_flock;
309 extern funnel_t * network_flock;
310
311 extern struct sysent sysent[];
312
313
314 int set_bsduthreadargs (thread_act_t, struct i386_saved_state *, void *);
315
316 void * get_bsduthreadarg(thread_act_t);
317
318 void unix_syscall(struct i386_saved_state *);
319
320 /* USED ONLY FROM VFORK/EXIT */
321 void
322 unix_syscall_return(int error)
323 {
324 thread_act_t thread;
325 volatile int *rval;
326 struct i386_saved_state *regs;
327
328 thread = current_act();
329 rval = (int *)get_bsduthreadrval(thread);
330
331 regs = USER_REGS(thread);
332
333 if (error == ERESTART) {
334 regs->eip -= 7;
335 }
336 else if (error != EJUSTRETURN) {
337 if (error) {
338 regs->eax = error;
339 regs->efl |= EFL_CF; /* carry bit */
340 } else { /* (not error) */
341 regs->eax = rval[0];
342 regs->edx = rval[1];
343 regs->efl &= ~EFL_CF;
344 }
345 }
346
347 (void) thread_funnel_set(current_thread()->funnel_lock, FALSE);
348
349 thread_exception_return();
350 /* NOTREACHED */
351 }
352
353
354 void
355 unix_syscall(struct i386_saved_state *regs)
356 {
357 thread_act_t thread;
358 void *p, *vt;
359 unsigned short code;
360 struct sysent *callp;
361 int nargs, error;
362 volatile int *rval;
363 int funnel_type;
364 vm_offset_t params;
365 extern int nsysent;
366
367 thread = current_act();
368 p = current_proc();
369 rval = (int *)get_bsduthreadrval(thread);
370
371 //printf("[scall : eax %x]", regs->eax);
372 code = regs->eax;
373 params = (vm_offset_t) ((caddr_t)regs->uesp + sizeof (int));
374 callp = (code >= nsysent) ? &sysent[63] : &sysent[code];
375 if (callp == sysent) {
376 code = fuword(params);
377 params += sizeof (int);
378 callp = (code >= nsysent) ? &sysent[63] : &sysent[code];
379 }
380
381 vt = get_bsduthreadarg(thread);
382
383 if ((nargs = (callp->sy_narg * sizeof (int))) &&
384 (error = copyin((char *) params, (char *)vt , nargs)) != 0) {
385 regs->eax = error;
386 regs->efl |= EFL_CF;
387 thread_exception_return();
388 /* NOTREACHED */
389 }
390
391 rval[0] = 0;
392 rval[1] = regs->edx;
393
394 if(callp->sy_funnel == NETWORK_FUNNEL) {
395 (void) thread_funnel_set(network_flock, TRUE);
396 }
397 else {
398 (void) thread_funnel_set(kernel_flock, TRUE);
399 }
400 set_bsduthreadargs(thread, regs, NULL);
401
402 if (callp->sy_narg > 8)
403 panic("unix_syscall max arg count exceeded (%d)", callp->sy_narg);
404
405
406 error = (*(callp->sy_call))(p, (void *) vt, rval);
407
408 #if 0
409 /* May be needed with vfork changes */
410 regs = USER_REGS(thread);
411 #endif
412 if (error == ERESTART) {
413 regs->eip -= 7;
414 }
415 else if (error != EJUSTRETURN) {
416 if (error) {
417 regs->eax = error;
418 regs->efl |= EFL_CF; /* carry bit */
419 } else { /* (not error) */
420 regs->eax = rval[0];
421 regs->edx = rval[1];
422 regs->efl &= ~EFL_CF;
423 }
424 }
425
426 (void) thread_funnel_set(current_thread()->funnel_lock, FALSE);
427
428 thread_exception_return();
429 /* NOTREACHED */
430 }
431
432
433 void
434 machdep_syscall( struct i386_saved_state *regs)
435 {
436 int trapno, nargs;
437 machdep_call_t *entry;
438 thread_t thread;
439
440 trapno = regs->eax;
441 if (trapno < 0 || trapno >= machdep_call_count) {
442 regs->eax = (unsigned int)kern_invalid();
443
444 thread_exception_return();
445 /* NOTREACHED */
446 }
447
448 entry = &machdep_call_table[trapno];
449 nargs = entry->nargs;
450
451 if (nargs > 0) {
452 int args[nargs];
453
454 if (copyin((char *) regs->uesp + sizeof (int),
455 (char *) args,
456 nargs * sizeof (int))) {
457
458 regs->eax = KERN_INVALID_ADDRESS;
459
460 thread_exception_return();
461 /* NOTREACHED */
462 }
463
464 asm volatile("
465 1:
466 mov (%2),%%eax;
467 pushl %%eax;
468 sub $4,%2;
469 dec %1;
470 jne 1b;
471 mov %3,%%eax;
472 call *%%eax;
473 mov %%eax,%0"
474
475 : "=r" (regs->eax)
476 : "r" (nargs),
477 "r" (&args[nargs - 1]),
478 "g" (entry->routine)
479 : "ax", "cx", "dx", "sp");
480 }
481 else
482 regs->eax = (unsigned int)(*entry->routine)();
483
484 (void) thread_funnel_set(current_thread()->funnel_lock, FALSE);
485
486 thread_exception_return();
487 /* NOTREACHED */
488 }
489
490
491 kern_return_t
492 thread_set_cthread_self(int self)
493 {
494 current_act()->mact.pcb->cthread_self = (unsigned int)self;
495
496 return (KERN_SUCCESS);
497 }
498
499 kern_return_t
500 thread_get_cthread_self(void)
501 {
502 return ((kern_return_t)current_act()->mact.pcb->cthread_self);
503 }
504
505 void
506 mach25_syscall(struct i386_saved_state *regs)
507 {
508 printf("*** Atttempt to execute a Mach 2.5 system call at EIP=%x EAX=%x(%d)\n",
509 regs->eip, regs->eax, -regs->eax);
510 panic("FIXME!");
511 }
512
513 #endif /* MACH_BSD */
514
515 #undef current_thread
516 thread_act_t
517 current_thread(void)
518 {
519 return(current_thread_fast());
520 }