]> git.saurik.com Git - apple/xnu.git/blob - osfmk/i386/start.s
xnu-517.12.7.tar.gz
[apple/xnu.git] / osfmk / i386 / start.s
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 /*
23 * @OSF_COPYRIGHT@
24 */
25 /*
26 * Mach Operating System
27 * Copyright (c) 1991,1990 Carnegie Mellon University
28 * All Rights Reserved.
29 *
30 * Permission to use, copy, modify and distribute this software and its
31 * documentation is hereby granted, provided that both the copyright
32 * notice and this permission notice appear in all copies of the
33 * software, derivative works or modified versions, and any portions
34 * thereof, and that both notices appear in supporting documentation.
35 *
36 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
37 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
38 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
39 *
40 * Carnegie Mellon requests users of this software to return to
41 *
42 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
43 * School of Computer Science
44 * Carnegie Mellon University
45 * Pittsburgh PA 15213-3890
46 *
47 * any improvements or extensions that they make and grant Carnegie Mellon
48 * the rights to redistribute these changes.
49 */
50 /*
51 */
52
53 #include <platforms.h>
54 #include <cpus.h>
55 #include <mach_kdb.h>
56
57 #include <i386/asm.h>
58 #include <i386/proc_reg.h>
59 #include <assym.s>
60
61 #if NCPUS > 1
62
63 #define CX(addr,reg) addr(,reg,4)
64
65 #else
66
67 #define CPU_NUMBER(reg)
68 #define CX(addr,reg) addr
69
70 #endif /* NCPUS > 1 */
71
72 #include <i386/mp.h>
73
74 /*
75 * GAS won't handle an intersegment jump with a relocatable offset.
76 */
77 #define LJMP(segment,address) \
78 .byte 0xea ;\
79 .long address ;\
80 .word segment
81
82
83
84 #define KVTOPHYS (-KERNELBASE)
85 #define KVTOLINEAR LINEAR_KERNELBASE
86
87
88 #define PA(addr) (addr)+KVTOPHYS
89 #define VA(addr) (addr)-KVTOPHYS
90
91 .data
92 .align 2
93 .globl EXT(_kick_buffer_)
94 EXT(_kick_buffer_):
95 .long 1
96 .long 3
97 .set .,.+16836
98 /*
99 * Interrupt and bootup stack for initial processor.
100 */
101 .align ALIGN
102 .globl EXT(intstack)
103 EXT(intstack):
104 .set ., .+INTSTACK_SIZE
105 .globl EXT(eintstack)
106 EXT(eintstack:)
107
108 #if NCPUS == 1
109 .globl EXT(int_stack_high) /* all interrupt stacks */
110 EXT(int_stack_high): /* must lie below this */
111 .long EXT(eintstack) /* address */
112
113 .globl EXT(int_stack_top) /* top of interrupt stack */
114 EXT(int_stack_top):
115 .long EXT(eintstack)
116 #endif
117
118 #if MACH_KDB
119 /*
120 * Kernel debugger stack for each processor.
121 */
122 .align ALIGN
123 .globl EXT(db_stack_store)
124 EXT(db_stack_store):
125 .set ., .+(INTSTACK_SIZE*NCPUS)
126
127 /*
128 * Stack for last-ditch debugger task for each processor.
129 */
130 .align ALIGN
131 .globl EXT(db_task_stack_store)
132 EXT(db_task_stack_store):
133 .set ., .+(INTSTACK_SIZE*NCPUS)
134 #endif /* MACH_KDB */
135
136 /*
137 * per-processor kernel debugger stacks
138 */
139 .align ALIGN
140 .globl EXT(kgdb_stack_store)
141 EXT(kgdb_stack_store):
142 .set ., .+(INTSTACK_SIZE*NCPUS)
143
144
145 /*
146 * Pointers to GDT and IDT. These contain linear addresses.
147 */
148 .align ALIGN
149 .globl EXT(gdtptr)
150 LEXT(gdtptr)
151 .word Times(8,GDTSZ)-1
152 .long EXT(gdt)+KVTOLINEAR
153
154 .align ALIGN
155 .globl EXT(idtptr)
156 LEXT(idtptr)
157 .word Times(8,IDTSZ)-1
158 .long EXT(idt)+KVTOLINEAR
159
160 #if NCPUS > 1
161 .data
162 /*
163 * start_lock is very special. We initialize the
164 * lock at allocation time rather than at run-time.
165 * Although start_lock should be an instance of a
166 * hw_lock, we hand-code all manipulation of the lock
167 * because the hw_lock code may require function calls;
168 * and we'd rather not introduce another dependency on
169 * a working stack at this point.
170 */
171 .globl EXT(start_lock)
172 EXT(start_lock):
173 .long 0 /* synchronizes processor startup */
174
175 .globl EXT(master_is_up)
176 EXT(master_is_up):
177 .long 0 /* 1 when OK for other processors */
178 /* to start */
179 .globl EXT(mp_boot_pde)
180 EXT(mp_boot_pde):
181 .long 0
182 #endif /* NCPUS > 1 */
183
184 /*
185 * All CPUs start here.
186 *
187 * Environment:
188 * protected mode, no paging, flat 32-bit address space.
189 * (Code/data/stack segments have base == 0, limit == 4G)
190 */
191 .text
192 .align ALIGN
193 .globl EXT(pstart)
194 .globl EXT(_start)
195 LEXT(_start)
196 LEXT(pstart)
197 mov %eax, %ebx /* save pointer to kernbootstruct */
198 mov $0,%ax /* fs must be zeroed; */
199 mov %ax,%fs /* some bootstrappers don`t do this */
200 mov %ax,%gs
201
202 #if NCPUS > 1
203 jmp 1f
204 0: cmpl $0,PA(EXT(start_lock))
205 jne 0b
206 1: movb $1,%eax
207 xchgl %eax,PA(EXT(start_lock)) /* locked */
208 testl %eax,%eax
209 jnz 0b
210
211 cmpl $0,PA(EXT(master_is_up)) /* are we first? */
212 jne EXT(slave_start) /* no -- system already up. */
213 movl $1,PA(EXT(master_is_up)) /* others become slaves */
214 #endif /* NCPUS > 1 */
215
216 /*
217 * Get startup parameters.
218 */
219
220 #include <i386/AT386/asm_startup.h>
221
222 /*
223 * Build initial page table directory and page tables.
224 * %ebx holds first available physical address.
225 */
226
227 addl $(NBPG-1),%ebx /* round first avail physical addr */
228 andl $(-NBPG),%ebx /* to machine page size */
229 leal -KVTOPHYS(%ebx),%eax /* convert to virtual address */
230 movl %eax,PA(EXT(kpde)) /* save as kernel page table directory */
231 movl %ebx,%cr3 /* set physical address in CR3 now */
232
233 movl %ebx,%edi /* clear page table directory */
234 movl $(PTES_PER_PAGE),%ecx /* one page of ptes */
235 xorl %eax,%eax
236 cld
237 rep
238 stosl /* edi now points to next page */
239
240 /*
241 * Use next few pages for page tables.
242 */
243 addl $(KERNELBASEPDE),%ebx /* point to pde for kernel base */
244 movl %edi,%esi /* point to end of current pte page */
245
246 /*
247 * Enter 1-1 mappings for kernel and for kernel page tables.
248 */
249 movl $(INTEL_PTE_KERNEL),%eax /* set up pte prototype */
250 0:
251 cmpl %esi,%edi /* at end of pte page? */
252 jb 1f /* if so: */
253 movl %edi,%edx /* get pte address (physical) */
254 andl $(-NBPG),%edx /* mask out offset in page */
255 orl $(INTEL_PTE_KERNEL),%edx /* add pte bits */
256 movl %edx,(%ebx) /* set pde */
257 addl $4,%ebx /* point to next pde */
258 movl %edi,%esi /* point to */
259 addl $(NBPG),%esi /* end of new pte page */
260 1:
261 movl %eax,(%edi) /* set pte */
262 addl $4,%edi /* advance to next pte */
263 addl $(NBPG),%eax /* advance to next phys page */
264 cmpl %edi,%eax /* have we mapped this pte page yet? */
265 jb 0b /* loop if not */
266
267 /*
268 * Zero rest of last pte page.
269 */
270 xor %eax,%eax /* don`t map yet */
271 2: cmpl %esi,%edi /* at end of pte page? */
272 jae 3f
273 movl %eax,(%edi) /* zero mapping */
274 addl $4,%edi
275 jmp 2b
276 3:
277
278 #if NCPUS > 1
279 /*
280 * Grab (waste?) another page for a bootstrap page directory
281 * for the other CPUs. We don't want the running CPUs to see
282 * addresses 0..3fffff mapped 1-1.
283 */
284 movl %edi,PA(EXT(mp_boot_pde)) /* save its physical address */
285 movl $(PTES_PER_PAGE),%ecx /* and clear it */
286 rep
287 stosl
288 #endif /* NCPUS > 1 */
289 movl %edi,PA(EXT(first_avail)) /* save first available phys addr */
290
291 /*
292 * pmap_bootstrap will enter rest of mappings.
293 */
294
295 /*
296 * Fix initial descriptor tables.
297 */
298 lea PA(EXT(idt)),%esi /* fix IDT */
299 movl $(IDTSZ),%ecx
300 movl $(PA(fix_idt_ret)),%ebx
301 jmp fix_desc_common /* (cannot use stack) */
302 fix_idt_ret:
303
304 lea PA(EXT(gdt)),%esi /* fix GDT */
305 movl $(GDTSZ),%ecx
306 movl $(PA(fix_gdt_ret)),%ebx
307 jmp fix_desc_common /* (cannot use stack) */
308 fix_gdt_ret:
309
310 lea PA(EXT(ldt)),%esi /* fix LDT */
311 movl $(LDTSZ),%ecx
312 movl $(PA(fix_ldt_ret)),%ebx
313 jmp fix_desc_common /* (cannot use stack) */
314 fix_ldt_ret:
315
316 /*
317 * Turn on paging.
318 */
319 movl %cr3,%eax /* retrieve kernel PDE phys address */
320 movl KERNELBASEPDE(%eax),%ecx
321 movl %ecx,(%eax) /* set it also as pte for location */
322 /* 0..3fffff, so that the code */
323 /* that enters paged mode is mapped */
324 /* to identical addresses after */
325 /* paged mode is enabled */
326
327 addl $4,%eax /* 400000..7fffff */
328 movl KERNELBASEPDE(%eax),%ecx
329 movl %ecx,(%eax)
330
331 movl $ EXT(pag_start),%ebx /* first paged code address */
332
333 movl %cr0,%eax
334 orl $(CR0_PG),%eax /* set PG bit in CR0 */
335 orl $(CR0_WP),%eax
336 movl %eax,%cr0 /* to enable paging */
337
338 jmp *%ebx /* flush prefetch queue */
339
340 /*
341 * We are now paging, and can run with correct addresses.
342 */
343 LEXT(pag_start)
344 lgdt EXT(gdtptr) /* load GDT */
345 lidt EXT(idtptr) /* load IDT */
346 LJMP(KERNEL_CS,EXT(vstart)) /* switch to kernel code segment */
347
348 /*
349 * Master is now running with correct addresses.
350 */
351 LEXT(vstart)
352 mov $(KERNEL_DS),%ax /* set kernel data segment */
353 mov %ax,%ds
354 mov %ax,%es
355 mov %ax,%ss
356 mov %ax,EXT(ktss)+TSS_SS0 /* set kernel stack segment */
357 /* for traps to kernel */
358 #if MACH_KDB
359 mov %ax,EXT(dbtss)+TSS_SS0 /* likewise for debug task switch */
360 mov %cr3,%eax /* get PDBR into debug TSS */
361 mov %eax,EXT(dbtss)+TSS_PDBR
362 mov $0,%eax
363 #endif
364
365 movw $(KERNEL_LDT),%ax /* get LDT segment */
366 lldt %ax /* load LDT */
367 #if MACH_KDB
368 mov %ax,EXT(ktss)+TSS_LDT /* store LDT in two TSS, as well... */
369 mov %ax,EXT(dbtss)+TSS_LDT /* ...matters if we switch tasks */
370 #endif
371 movw $(KERNEL_TSS),%ax
372 ltr %ax /* set up KTSS */
373
374 mov $ CPU_DATA,%ax
375 mov %ax,%gs
376
377 lea EXT(eintstack),%esp /* switch to the bootup stack */
378 call EXT(i386_init) /* run C code */
379 /*NOTREACHED*/
380 hlt
381
382 #if NCPUS > 1
383 /*
384 * master_up is used by the master cpu to signify that it is done
385 * with the interrupt stack, etc. See the code in pstart and svstart
386 * that this interlocks with.
387 */
388 .align ALIGN
389 .globl EXT(master_up)
390 LEXT(master_up)
391 pushl %ebp /* set up */
392 movl %esp,%ebp /* stack frame */
393 movl $0,%ecx /* unlock start_lock */
394 xchgl %ecx,EXT(start_lock) /* since we are no longer using */
395 /* bootstrap stack */
396 leave /* pop stack frame */
397 ret
398
399 /*
400 * We aren't the first. Call slave_main to initialize the processor
401 * and get Mach going on it.
402 */
403 .align ALIGN
404 .globl EXT(slave_start)
405 LEXT(slave_start)
406 cli /* disable interrupts, so we don`t */
407 /* need IDT for a while */
408 movl EXT(kpde)+KVTOPHYS,%ebx /* get PDE virtual address */
409 addl $(KVTOPHYS),%ebx /* convert to physical address */
410
411 movl PA(EXT(mp_boot_pde)),%edx /* point to the bootstrap PDE */
412 movl KERNELBASEPDE(%ebx),%eax
413 /* point to pte for KERNELBASE */
414 movl %eax,KERNELBASEPDE(%edx)
415 /* set in bootstrap PDE */
416 movl %eax,(%edx) /* set it also as pte for location */
417 /* 0..3fffff, so that the code */
418 /* that enters paged mode is mapped */
419 /* to identical addresses after */
420 /* paged mode is enabled */
421 movl %edx,%cr3 /* use bootstrap PDE to enable paging */
422
423 movl $ EXT(spag_start),%edx /* first paged code address */
424
425 movl %cr0,%eax
426 orl $(CR0_PG),%eax /* set PG bit in CR0 */
427 orl $(CR0_WP),%eax
428 movl %eax,%cr0 /* to enable paging */
429
430 jmp *%edx /* flush prefetch queue. */
431
432 /*
433 * We are now paging, and can run with correct addresses.
434 */
435 LEXT(spag_start)
436
437 lgdt EXT(gdtptr) /* load GDT */
438 lidt EXT(idtptr) /* load IDT */
439 LJMP(KERNEL_CS,EXT(svstart)) /* switch to kernel code segment */
440
441 /*
442 * Slave is now running with correct addresses.
443 */
444 LEXT(svstart)
445 mov $(KERNEL_DS),%ax /* set kernel data segment */
446 mov %ax,%ds
447 mov %ax,%es
448 mov %ax,%ss
449
450 movl %ebx,%cr3 /* switch to the real kernel PDE */
451
452 CPU_NUMBER(%eax)
453 movl CX(EXT(interrupt_stack),%eax),%esp /* get stack */
454 addl $(INTSTACK_SIZE),%esp /* point to top */
455 xorl %ebp,%ebp /* for completeness */
456
457 movl $0,%ecx /* unlock start_lock */
458 xchgl %ecx,EXT(start_lock) /* since we are no longer using */
459 /* bootstrap stack */
460
461 /*
462 * switch to the per-cpu descriptor tables
463 */
464
465 pushl %eax /* pass CPU number */
466 call EXT(mp_desc_init) /* set up local table */
467 /* pointer returned in %eax */
468 subl $4,%esp /* get space to build pseudo-descriptors */
469
470 CPU_NUMBER(%eax)
471 movw $(GDTSZ*8-1),0(%esp) /* set GDT size in GDT descriptor */
472 movl CX(EXT(mp_gdt),%eax),%edx
473 addl $ KVTOLINEAR,%edx
474 movl %edx,2(%esp) /* point to local GDT (linear address) */
475 lgdt 0(%esp) /* load new GDT */
476
477 movw $(IDTSZ*8-1),0(%esp) /* set IDT size in IDT descriptor */
478 movl CX(EXT(mp_idt),%eax),%edx
479 addl $ KVTOLINEAR,%edx
480 movl %edx,2(%esp) /* point to local IDT (linear address) */
481 lidt 0(%esp) /* load new IDT */
482
483 movw $(KERNEL_LDT),%ax /* get LDT segment */
484 lldt %ax /* load LDT */
485
486 movw $(KERNEL_TSS),%ax
487 ltr %ax /* load new KTSS */
488
489 mov $ CPU_DATA,%ax
490 mov %ax,%gs
491
492 call EXT(slave_main) /* start MACH */
493 /*NOTREACHED*/
494 hlt
495 #endif /* NCPUS > 1 */
496
497 /*
498 * Convert a descriptor from fake to real format.
499 *
500 * Calls from assembly code:
501 * %ebx = return address (physical) CANNOT USE STACK
502 * %esi = descriptor table address (physical)
503 * %ecx = number of descriptors
504 *
505 * Calls from C:
506 * 0(%esp) = return address
507 * 4(%esp) = descriptor table address (physical)
508 * 8(%esp) = number of descriptors
509 *
510 * Fake descriptor format:
511 * bytes 0..3 base 31..0
512 * bytes 4..5 limit 15..0
513 * byte 6 access byte 2 | limit 19..16
514 * byte 7 access byte 1
515 *
516 * Real descriptor format:
517 * bytes 0..1 limit 15..0
518 * bytes 2..3 base 15..0
519 * byte 4 base 23..16
520 * byte 5 access byte 1
521 * byte 6 access byte 2 | limit 19..16
522 * byte 7 base 31..24
523 *
524 * Fake gate format:
525 * bytes 0..3 offset
526 * bytes 4..5 selector
527 * byte 6 word count << 4 (to match fake descriptor)
528 * byte 7 access byte 1
529 *
530 * Real gate format:
531 * bytes 0..1 offset 15..0
532 * bytes 2..3 selector
533 * byte 4 word count
534 * byte 5 access byte 1
535 * bytes 6..7 offset 31..16
536 */
537 .globl EXT(fix_desc)
538 LEXT(fix_desc)
539 pushl %ebp /* set up */
540 movl %esp,%ebp /* stack frame */
541 pushl %esi /* save registers */
542 pushl %ebx
543 movl B_ARG0,%esi /* point to first descriptor */
544 movl B_ARG1,%ecx /* get number of descriptors */
545 lea 0f,%ebx /* get return address */
546 jmp fix_desc_common /* call internal routine */
547 0: popl %ebx /* restore registers */
548 popl %esi
549 leave /* pop stack frame */
550 ret /* return */
551
552 fix_desc_common:
553 0:
554 movw 6(%esi),%dx /* get access byte */
555 movb %dh,%al
556 andb $0x14,%al
557 cmpb $0x04,%al /* gate or descriptor? */
558 je 1f
559
560 /* descriptor */
561 movl 0(%esi),%eax /* get base in eax */
562 rol $16,%eax /* swap 15..0 with 31..16 */
563 /* (15..0 in correct place) */
564 movb %al,%dl /* combine bits 23..16 with ACC1 */
565 /* in dh/dl */
566 movb %ah,7(%esi) /* store bits 31..24 in correct place */
567 movw 4(%esi),%ax /* move limit bits 0..15 to word 0 */
568 movl %eax,0(%esi) /* store (bytes 0..3 correct) */
569 movw %dx,4(%esi) /* store bytes 4..5 */
570 jmp 2f
571
572 /* gate */
573 1:
574 movw 4(%esi),%ax /* get selector */
575 shrb $4,%dl /* shift word count to proper place */
576 movw %dx,4(%esi) /* store word count / ACC1 */
577 movw 2(%esi),%dx /* get offset 16..31 */
578 movw %dx,6(%esi) /* store in correct place */
579 movw %ax,2(%esi) /* store selector in correct place */
580 2:
581 addl $8,%esi /* bump to next descriptor */
582 loop 0b /* repeat */
583 jmp *%ebx /* all done */
584
585 /*
586 * put arg in kbd leds and spin a while
587 * eats eax, ecx, edx
588 */
589 #define K_RDWR 0x60
590 #define K_CMD_LEDS 0xed
591 #define K_STATUS 0x64
592 #define K_IBUF_FULL 0x02 /* input (to kbd) buffer full */
593 #define K_OBUF_FULL 0x01 /* output (from kbd) buffer full */
594
595 ENTRY(set_kbd_leds)
596 mov S_ARG0,%cl /* save led value */
597
598 0: inb $(K_STATUS),%al /* get kbd status */
599 testb $(K_IBUF_FULL),%al /* input busy? */
600 jne 0b /* loop until not */
601
602 mov $(K_CMD_LEDS),%al /* K_CMD_LEDS */
603 outb %al,$(K_RDWR) /* to kbd */
604
605 0: inb $(K_STATUS),%al /* get kbd status */
606 testb $(K_OBUF_FULL),%al /* output present? */
607 je 0b /* loop if not */
608
609 inb $(K_RDWR),%al /* read status (and discard) */
610
611 0: inb $(K_STATUS),%al /* get kbd status */
612 testb $(K_IBUF_FULL),%al /* input busy? */
613 jne 0b /* loop until not */
614
615 mov %cl,%al /* move led value */
616 outb %al,$(K_RDWR) /* to kbd */
617
618 movl $10000000,%ecx /* spin */
619 0: nop
620 nop
621 loop 0b /* a while */
622
623 ret