]> git.saurik.com Git - apple/xnu.git/blame - osfmk/i386/bsd_i386.c
xnu-123.5.tar.gz
[apple/xnu.git] / osfmk / i386 / bsd_i386.c
CommitLineData
1c79356b
A
1/*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22#ifdef MACH_BSD
23#include <cpus.h>
24#include <mach_rt.h>
25#include <mach_debug.h>
26#include <mach_ldebug.h>
27
28#include <mach/kern_return.h>
29#include <mach/thread_status.h>
30#include <mach/vm_param.h>
31#include <mach/rpc.h>
32
33#include <kern/counters.h>
34#include <kern/cpu_data.h>
35#include <kern/mach_param.h>
36#include <kern/task.h>
37#include <kern/thread.h>
38#include <kern/thread_swap.h>
39#include <kern/sched_prim.h>
40#include <kern/misc_protos.h>
41#include <kern/assert.h>
42#include <kern/spl.h>
43#include <ipc/ipc_port.h>
44#include <vm/vm_kern.h>
45#include <vm/pmap.h>
46
47#include <i386/thread.h>
48#include <i386/eflags.h>
49#include <i386/proc_reg.h>
50#include <i386/seg.h>
51#include <i386/tss.h>
52#include <i386/user_ldt.h>
53#include <i386/fpu.h>
54#include <i386/iopb_entries.h>
55#include <i386/machdep_call.h>
56
57#define USRSTACK 0xc0000000
58
59kern_return_t
60thread_userstack(
61 thread_t,
62 int,
63 thread_state_t,
64 unsigned int,
65 vm_offset_t *
66);
67
68kern_return_t
69thread_entrypoint(
70 thread_t,
71 int,
72 thread_state_t,
73 unsigned int,
74 vm_offset_t *
75);
76
77struct i386_saved_state *
78get_user_regs(
79 thread_act_t);
80
81void
82act_thread_dup(
83 thread_act_t,
84 thread_act_t
85);
86
87unsigned int get_msr_exportmask(void);
88
89unsigned int get_msr_nbits(void);
90
91unsigned int get_msr_rbits(void);
92
93/*
94 * thread_userstack:
95 *
96 * Return the user stack pointer from the machine
97 * dependent thread state info.
98 */
99kern_return_t
100thread_userstack(
101 thread_t thread,
102 int flavor,
103 thread_state_t tstate,
104 unsigned int count,
105 vm_offset_t *user_stack
106)
107{
108 struct i386_saved_state *state;
109 i386_thread_state_t *state25;
110 vm_offset_t uesp;
111
112 /*
113 * Set a default.
114 */
115 if (*user_stack == 0)
116 *user_stack = USRSTACK;
117
118 switch (flavor) {
119 case i386_THREAD_STATE: /* FIXME */
120 state25 = (i386_thread_state_t *) tstate;
121 *user_stack = state25->esp ? state25->esp : USRSTACK;
122 break;
123
124 case i386_NEW_THREAD_STATE:
125 if (count < i386_NEW_THREAD_STATE_COUNT)
126 return (KERN_INVALID_ARGUMENT);
127 else {
128 state = (struct i386_saved_state *) tstate;
129 uesp = state->uesp;
130 }
131
132 /*
133 * If a valid user stack is specified, use it.
134 */
135 *user_stack = uesp ? uesp : USRSTACK;
136 break;
137 default :
138 return (KERN_INVALID_ARGUMENT);
139 }
140
141 return (KERN_SUCCESS);
142}
143
144kern_return_t
145thread_entrypoint(
146 thread_t thread,
147 int flavor,
148 thread_state_t tstate,
149 unsigned int count,
150 vm_offset_t *entry_point
151)
152{
153 struct i386_saved_state *state;
154 i386_thread_state_t *state25;
155
156 /*
157 * Set a default.
158 */
159 if (*entry_point == 0)
160 *entry_point = VM_MIN_ADDRESS;
161
162 switch (flavor) {
163 case i386_THREAD_STATE:
164 state25 = (i386_thread_state_t *) tstate;
165 *entry_point = state25->eip ? state25->eip: VM_MIN_ADDRESS;
166 break;
167
168 case i386_NEW_THREAD_STATE:
169 if (count < i386_THREAD_STATE_COUNT)
170 return (KERN_INVALID_ARGUMENT);
171 else {
172 state = (struct i386_saved_state *) tstate;
173
174 /*
175 * If a valid entry point is specified, use it.
176 */
177 *entry_point = state->eip ? state->eip: VM_MIN_ADDRESS;
178 }
179 break;
180 }
181
182 return (KERN_SUCCESS);
183}
184
185struct i386_saved_state *
186get_user_regs(thread_act_t th)
187{
188 if (th->mact.pcb)
189 return(USER_REGS(th));
190 else {
191 printf("[get_user_regs: thread does not have pcb]");
192 return NULL;
193 }
194}
195
196/*
197 * Duplicate parent state in child
198 * for U**X fork.
199 */
200void
201act_thread_dup(
202 thread_act_t parent,
203 thread_act_t child
204)
205{
206 struct i386_saved_state *parent_state, *child_state;
207 struct i386_machine_state *ims;
208 struct i386_float_state floatregs;
209
210#ifdef XXX
211 /* Save the FPU state */
212 if ((pcb_t)(per_proc_info[cpu_number()].fpu_pcb) == parent->mact.pcb) {
213 fp_state_save(parent);
214 }
215#endif
216
217 if (child->mact.pcb == NULL
218 || parent->mact.pcb == NULL) {
219 panic("[thread_dup, child (%x) or parent (%x) is NULL!]",
220 child->mact.pcb, parent->mact.pcb);
221 return;
222 }
223
224 /* Copy over the i386_saved_state registers */
225 child->mact.pcb->iss = parent->mact.pcb->iss;
226
227 /* Check to see if parent is using floating point
228 * and if so, copy the registers to the child
229 * FIXME - make sure this works.
230 */
231
232 if (parent->mact.pcb->ims.ifps) {
233 if (fpu_get_state(parent, &floatregs) == KERN_SUCCESS)
234 fpu_set_state(child, &floatregs);
235 }
236
237 /* FIXME - should a user specified LDT, TSS and V86 info
238 * be duplicated as well?? - probably not.
239 */
240}
241
242/*
243 * FIXME - thread_set_child
244 */
245
246void thread_set_child(thread_act_t child, int pid);
247void
248thread_set_child(thread_act_t child, int pid)
249{
250 child->mact.pcb->iss.eax = pid;
251 child->mact.pcb->iss.edx = 1;
252 child->mact.pcb->iss.efl &= ~EFL_CF;
253}
254
255
256
257/*
258 * Move pages from one kernel virtual address to another.
259 * Both addresses are assumed to reside in the Sysmap,
260 * and size must be a multiple of the page size.
261 */
262void
263pagemove(
264 register caddr_t from,
265 register caddr_t to,
266 int size)
267{
268 pmap_movepage((unsigned long)from, (unsigned long)to, (vm_size_t)size);
269}
270
271/*
272 * System Call handling code
273 */
274
275#define ERESTART -1 /* restart syscall */
276#define EJUSTRETURN -2 /* don't modify regs, just return */
277
278struct sysent { /* system call table */
279 unsigned short sy_narg; /* number of args */
280 char sy_parallel; /* can execute in parallel */
281 char sy_funnel; /* funnel type */
282 unsigned long (*sy_call)(void *, void *, int *); /* implementing function */
283};
284
285#define KERNEL_FUNNEL 1
286#define NETWORK_FUNNEL 2
287
288extern funnel_t * kernel_flock;
289extern funnel_t * network_flock;
290
291extern struct sysent sysent[];
292
293void *get_bsdtask_info(
294 task_t);
295
296int set_bsduthreadargs (thread_act_t, struct i386_saved_state *, void *);
297
298void * get_bsduthreadarg(thread_act_t);
299
300void unix_syscall(struct i386_saved_state *);
301
302void
303unix_syscall_return(int error)
304{
305 panic("unix_syscall_return not implemented yet!!");
306}
307
308
309void
310unix_syscall(struct i386_saved_state *regs)
311{
312 thread_act_t thread;
313 void *p, *vt;
314 unsigned short code;
315 struct sysent *callp;
316 int nargs, error;
317 int *rval;
318 int funnel_type;
319 vm_offset_t params;
320 extern int nsysent;
321
322 thread = current_act();
323 p = get_bsdtask_info(current_task());
324 rval = (int *)get_bsduthreadrval(thread);
325
326 //printf("[scall : eax %x]", regs->eax);
327 code = regs->eax;
328 params = (vm_offset_t) ((caddr_t)regs->uesp + sizeof (int));
329 callp = (code >= nsysent) ? &sysent[63] : &sysent[code];
330 if (callp == sysent) {
331 code = fuword(params);
332 params += sizeof (int);
333 callp = (code >= nsysent) ? &sysent[63] : &sysent[code];
334 }
335
336 vt = get_bsduthreadarg(thread);
337
338 if ((nargs = (callp->sy_narg * sizeof (int))) &&
339 (error = copyin((char *) params, (char *)vt , nargs)) != 0) {
340 regs->eax = error;
341 regs->efl |= EFL_CF;
342 thread_exception_return();
343 /* NOTREACHED */
344 }
345
346 rval[0] = 0;
347 rval[1] = regs->edx;
348
349 if(callp->sy_funnel == NETWORK_FUNNEL) {
350 (void) thread_funnel_set(network_flock, TRUE);
351 }
352 else {
353 (void) thread_funnel_set(kernel_flock, TRUE);
354 }
355 set_bsduthreadargs(thread, regs, NULL);
356
357 if (callp->sy_narg > 8)
358 panic("unix_syscall max arg count exceeded (%d)", callp->sy_narg);
359
360
361 error = (*(callp->sy_call))(p, (void *) vt, rval);
362
363 if (error == ERESTART) {
364 regs->eip -= 7;
365 }
366 else if (error != EJUSTRETURN) {
367 if (error) {
368 regs->eax = error;
369 regs->efl |= EFL_CF; /* carry bit */
370 } else { /* (not error) */
371 regs->eax = rval[0];
372 regs->edx = rval[1];
373 regs->efl &= ~EFL_CF;
374 }
375 }
376
377 (void) thread_funnel_set(current_thread()->funnel_lock, FALSE);
378
379 thread_exception_return();
380 /* NOTREACHED */
381}
382
383
384void
385machdep_syscall( struct i386_saved_state *regs)
386{
387 int trapno, nargs;
388 machdep_call_t *entry;
389 thread_t thread;
390
391 trapno = regs->eax;
392 if (trapno < 0 || trapno >= machdep_call_count) {
393 regs->eax = (unsigned int)kern_invalid();
394
395 thread_exception_return();
396 /* NOTREACHED */
397 }
398
399 entry = &machdep_call_table[trapno];
400 nargs = entry->nargs;
401
402 if (nargs > 0) {
403 int args[nargs];
404
405 if (copyin((char *) regs->uesp + sizeof (int),
406 (char *) args,
407 nargs * sizeof (int))) {
408
409 regs->eax = KERN_INVALID_ADDRESS;
410
411 thread_exception_return();
412 /* NOTREACHED */
413 }
414
415 asm volatile("
416 1:
417 mov (%2),%%eax;
418 pushl %%eax;
419 sub $4,%2;
420 dec %1;
421 jne 1b;
422 mov %3,%%eax;
423 call *%%eax;
424 mov %%eax,%0"
425
426 : "=r" (regs->eax)
427 : "r" (nargs),
428 "r" (&args[nargs - 1]),
429 "g" (entry->routine)
430 : "ax", "cx", "dx", "sp");
431 }
432 else
433 regs->eax = (unsigned int)(*entry->routine)();
434
435 (void) thread_funnel_set(current_thread()->funnel_lock, FALSE);
436
437 thread_exception_return();
438 /* NOTREACHED */
439}
440
441
442kern_return_t
443thread_set_cthread_self(int self)
444{
445 current_act()->mact.pcb->cthread_self = (unsigned int)self;
446
447 return (KERN_SUCCESS);
448}
449
450kern_return_t
451thread_get_cthread_self(void)
452{
453 return ((kern_return_t)current_act()->mact.pcb->cthread_self);
454}
455
456void
457mach25_syscall(struct i386_saved_state *regs)
458{
459 printf("*** Atttempt to execute a Mach 2.5 system call at EIP=%x EAX=%x(%d)\n",
460 regs->eip, regs->eax, -regs->eax);
461 panic("FIXME!");
462}
463
464#endif /* MACH_BSD */
465
466#undef current_thread
467thread_act_t
468current_thread(void)
469{
470 return(current_thread_fast());
471}