2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_OSREFERENCE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the
10 * License may not be used to create, or enable the creation or
11 * redistribution of, unlawful or unlicensed copies of an Apple operating
12 * system, or to circumvent, violate, or enable the circumvention or
13 * violation of, any terms of an Apple operating system software license
16 * Please obtain a copy of the License at
17 * http://www.opensource.apple.com/apsl/ and read it before using this
20 * The Original Code and all software distributed under the License are
21 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
22 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
23 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
25 * Please see the License for the specific language governing rights and
26 * limitations under the License.
28 * @APPLE_LICENSE_OSREFERENCE_HEADER_END@
34 * Mach Operating System
35 * Copyright (c) 1991,1990 Carnegie Mellon University
36 * All Rights Reserved.
38 * Permission to use, copy, modify and distribute this software and its
39 * documentation is hereby granted, provided that both the copyright
40 * notice and this permission notice appear in all copies of the
41 * software, derivative works or modified versions, and any portions
42 * thereof, and that both notices appear in supporting documentation.
44 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
45 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
46 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
48 * Carnegie Mellon requests users of this software to return to
50 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
51 * School of Computer Science
52 * Carnegie Mellon University
53 * Pittsburgh PA 15213-3890
55 * any improvements or extensions that they make and grant Carnegie Mellon
56 * the rights to redistribute these changes.
61 #include <platforms.h>
65 #include <i386/proc_reg.h>
66 #include <i386/postcode.h>
69 #define CX(addr,reg) addr(,reg,4)
72 #include <i386/mp_slave_boot.h>
75 * GAS won't handle an intersegment jump with a relocatable offset.
77 #define LJMP(segment,address) \
84 #define KVTOPHYS (-KERNELBASE)
85 #define KVTOLINEAR LINEAR_KERNELBASE
88 #define PA(addr) ((addr)+KVTOPHYS)
89 #define VA(addr) ((addr)-KVTOPHYS)
92 #if 0 /* Anyone need this? */
94 .globl EXT(_kick_buffer_)
101 * Interrupt and bootup stack for initial processor.
103 /* in the __HIB section since the hibernate restore code uses this stack. */
104 .section __HIB, __data
109 .globl EXT(gIOHibernateRestoreStack)
110 EXT(gIOHibernateRestoreStack):
112 .set ., .+INTSTACK_SIZE
114 .globl EXT(eintstack)
116 .globl EXT(gIOHibernateRestoreStackEnd)
117 EXT(gIOHibernateRestoreStackEnd):
120 * Pointers to GDT and IDT. These contain linear addresses.
125 .word Times(8,GDTSZ)-1
131 .word Times(8,IDTSZ)-1
134 /* back to the regular __DATA section. */
136 .section __DATA, __data
141 * Kernel debugger stack for each processor.
144 .globl EXT(db_stack_store)
146 .set ., .+(INTSTACK_SIZE*MAX_CPUS)
149 * Stack for last-ditch debugger task for each processor.
152 .globl EXT(db_task_stack_store)
153 EXT(db_task_stack_store):
154 .set ., .+(INTSTACK_SIZE*MAX_CPUS)
157 * per-processor kernel debugger stacks
160 .globl EXT(kgdb_stack_store)
161 EXT(kgdb_stack_store):
162 .set ., .+(INTSTACK_SIZE*MAX_CPUS)
163 #endif /* MACH_KDB */
167 * start_lock is very special. We initialize the
168 * lock at allocation time rather than at run-time.
169 * Although start_lock should be an instance of a
170 * hw_lock, we hand-code all manipulation of the lock
171 * because the hw_lock code may require function calls;
172 * and we'd rather not introduce another dependency on
173 * a working stack at this point.
175 .globl EXT(start_lock)
177 .long 0 /* synchronizes processor startup */
179 .globl EXT(master_is_up)
181 .long 0 /* 1 when OK for other processors */
183 .globl EXT(mp_boot_pde)
187 _KERNend: .long 0 /* phys addr end of kernel (just after bss) */
188 physfree: .long 0 /* phys addr of next free page */
191 _IdlePTD: .long 0 /* phys addr of kernel PTD */
194 _IdlePDPT: .long 0 /* phys addr of kernel PDPT */
199 _KPTphys: .long 0 /* phys addr of kernel page tables */
202 /* Some handy macros */
204 #define ALLOCPAGES(npages) \
205 movl PA(physfree), %esi ; \
206 movl $((npages) * PAGE_SIZE), %eax ; \
208 movl %eax, PA(physfree) ; \
210 movl $((npages) * PAGE_SIZE / 4),%ecx ; \
218 * eax = page frame address
219 * ebx = index into page table
220 * ecx = how many pages to map
221 * base = base address of page dir/table
222 * prot = protection bits
224 #define fillkpt(base, prot) \
225 shll $(PTEINDX),%ebx ; \
227 orl $(PTE_V) ,%eax ; \
229 1: movl %eax,(%ebx) ; \
230 addl $(PAGE_SIZE),%eax ; /* increment physical address */ \
231 addl $(PTESIZE),%ebx ; /* next pte */ \
236 * eax = physical address
237 * ecx = how many pages to map
238 * prot = protection bits
240 #define fillkptphys(prot) \
242 shrl $(PAGE_SHIFT), %ebx ; \
243 fillkpt(PA(EXT(KPTphys)), prot)
247 * All CPUs start here.
250 * protected mode, no paging, flat 32-bit address space.
251 * (Code/data/stack segments have base == 0, limit == 4G)
259 mov %eax, %ebx /* save pointer to kernbootstruct */
261 POSTCODE(PSTART_ENTRY);
263 mov $0,%ax /* fs must be zeroed; */
264 mov %ax,%fs /* some bootstrappers don`t do this */
268 0: cmpl $0,PA(EXT(start_lock))
271 xchgl %eax,PA(EXT(start_lock)) /* locked */
275 cmpl $0,PA(EXT(master_is_up)) /* are we first? */
276 jne EXT(slave_start) /* no -- system already up. */
277 movl $1,PA(EXT(master_is_up)) /* others become slaves */
282 * Get startup parameters.
285 movl %ebx,PA(EXT(boot_args_start)) /* Save KERNBOOTSTRUCT */
287 movl KADDR(%ebx), %eax
288 addl KSIZE(%ebx), %eax
291 movl %eax, PA(EXT(KERNend))
292 movl %eax, PA(physfree)
295 /* allocate kernel page table pages */
297 movl %esi,PA(EXT(KPTphys))
300 /* allocate Page Table Directory Page */
302 movl %esi,PA(EXT(IdlePDPT))
305 /* allocate kernel page directory page */
307 movl %esi,PA(EXT(IdlePTD))
309 /* map from zero to end of kernel */
311 movl PA(physfree),%ecx
312 shrl $(PAGE_SHIFT),%ecx
313 fillkptphys( $(PTE_W) )
315 /* map page directory */
317 movl PA(EXT(IdlePDPT)), %eax
319 fillkptphys( $(PTE_W) )
321 movl PA(EXT(IdlePTD)),%eax
323 fillkptphys( $(PTE_W) )
325 /* install a pde for temp double map of bottom of VA */
326 movl PA(EXT(KPTphys)),%eax
329 fillkpt(PA(EXT(IdlePTD)), $(PTE_W))
331 /* install pde's for page tables */
332 movl PA(EXT(KPTphys)),%eax
335 fillkpt(PA(EXT(IdlePTD)), $(PTE_W))
337 /* install a pde recursively mapping page directory as a page table */
338 movl PA(EXT(IdlePTD)),%eax
341 fillkpt(PA(EXT(IdlePTD)), $(PTE_W))
344 movl PA(EXT(IdlePTD)), %eax
347 fillkpt(PA(EXT(IdlePDPT)), $0)
350 /* install a pde page for commpage use up in high memory */
352 movl PA(physfree),%eax /* grab next phys page */
354 addl $(PAGE_SIZE),%ebx
355 movl %ebx,PA(physfree) /* show next free phys pg */
356 movl $(COMM_PAGE_BASE_ADDR),%ebx
357 shrl $(PDESHIFT),%ebx /* index into pde page */
358 movl $(1), %ecx /* # pdes to store */
359 fillkpt(PA(EXT(IdlePTD)), $(PTE_W|PTE_U)) /* user has access! */
361 movl PA(physfree),%edi
362 movl %edi,PA(EXT(first_avail)) /* save first available phys addr */
366 * We steal 0x4000 for a temp pdpt and 0x5000-0x8000
367 * for temp pde pages in the PAE case. Once we are
368 * running at the proper virtual address we switch to
369 * the PDPT/PDE's the master is using */
371 /* clear pdpt page to be safe */
373 movl $(PAGE_SIZE),%ecx
379 /* build temp pdpt */
383 fillkpt($(0x4000), $0)
385 /* copy the NPGPTD pages of pdes */
386 movl PA(EXT(IdlePTD)),%eax
388 movl $((PTEMASK+1)*NPGPTD),%ecx
397 /* create temp pde for slaves to use
398 use unused lomem page and copy in IdlePTD */
399 movl PA(EXT(IdlePTD)),%eax
401 movl $(PTEMASK+1),%ecx
409 POSTCODE(PSTART_PAGE_TABLES);
412 * Fix initial descriptor tables.
414 lea PA(EXT(idt)),%esi /* fix IDT */
416 movl $(PA(fix_idt_ret)),%ebx
417 jmp fix_desc_common /* (cannot use stack) */
420 lea PA(EXT(gdt)),%esi /* fix GDT */
422 movl $(PA(fix_gdt_ret)),%ebx
423 jmp fix_desc_common /* (cannot use stack) */
426 lea PA(EXT(ldt)),%esi /* fix LDT */
428 movl $(PA(fix_ldt_ret)),%ebx
429 jmp fix_desc_common /* (cannot use stack) */
436 lgdt PA(EXT(gdtptr)) /* load GDT */
437 lidt PA(EXT(idtptr)) /* load IDT */
439 POSTCODE(PSTART_BEFORE_PAGING);
445 movl PA(EXT(IdlePDPT)), %eax
452 movl PA(EXT(IdlePTD)), %eax
457 orl $(CR0_PG|CR0_WP|CR0_PE),%eax
458 movl %eax,%cr0 /* to enable paging */
460 LJMP(KERNEL_CS,EXT(vstart)) /* switch to kernel code segment */
463 * Master is now running with correct addresses.
466 POSTCODE(VSTART_ENTRY) ;
468 mov $(KERNEL_DS),%ax /* set kernel data segment */
472 mov %ax,EXT(ktss)+TSS_SS0 /* set kernel stack segment */
473 /* for traps to kernel */
475 mov %ax,EXT(dbtss)+TSS_SS0 /* likewise for debug task switch */
476 mov %cr3,%eax /* get PDBR into debug TSS */
477 mov %eax,EXT(dbtss)+TSS_PDBR
481 movw $(KERNEL_LDT),%ax /* get LDT segment */
482 lldt %ax /* load LDT */
484 mov %ax,EXT(ktss)+TSS_LDT /* store LDT in two TSS, as well... */
485 mov %ax,EXT(dbtss)+TSS_LDT /* ...matters if we switch tasks */
487 movw $(KERNEL_TSS),%ax
488 ltr %ax /* set up KTSS */
490 mov $(CPU_DATA_GS),%ax
493 POSTCODE(VSTART_STACK_SWITCH);
495 lea EXT(eintstack),%esp /* switch to the bootup stack */
496 call EXT(i386_preinit)
498 POSTCODE(VSTART_EXIT);
500 call EXT(i386_init) /* run C code */
506 .set __start, PA(EXT(pstart))
510 * master_up is used by the master cpu to signify that it is done
511 * with the interrupt stack, etc. See the code in pstart and svstart
512 * that this interlocks with.
515 .globl EXT(master_up)
517 pushl %ebp /* set up */
518 movl %esp,%ebp /* stack frame */
519 movl $0,%ecx /* unlock start_lock */
520 xchgl %ecx,EXT(start_lock) /* since we are no longer using */
521 /* bootstrap stack */
522 leave /* pop stack frame */
526 * We aren't the first. Call slave_main to initialize the processor
527 * and get Mach going on it.
530 .globl EXT(slave_start)
532 cli /* disable interrupts, so we don`t */
533 /* need IDT for a while */
535 POSTCODE(SLAVE_START_ENTRY);
539 movl $(EXT(spag_start)),%edx /* first paged code address */
549 movl $(0x4000),%eax /* tmp until we get mapped */
554 orl $(CR0_PG|CR0_WP|CR0_PE),%eax
555 movl %eax,%cr0 /* to enable paging */
557 POSTCODE(SLAVE_START_EXIT);
559 jmp *%edx /* flush prefetch queue */
562 * We are now paging, and can run with correct addresses.
566 lgdt PA(EXT(gdtptr)) /* load GDT */
567 lidt PA(EXT(idtptr)) /* load IDT */
569 LJMP(KERNEL_CS,EXT(svstart)) /* switch to kernel code segment */
573 * Slave is now running with correct addresses.
577 POSTCODE(SVSTART_ENTRY);
580 movl PA(EXT(IdlePDPT)), %eax
583 movl PA(EXT(IdlePTD)), %eax
587 mov $(KERNEL_DS),%ax /* set kernel data segment */
593 * We're not quite through with the boot stack
594 * but we need to reset the stack pointer to the correct virtual
596 * And we need to offset above the address of pstart.
598 movl $(VA(MP_BOOTSTACK+MP_BOOT+4)), %esp
601 * Switch to the per-cpu descriptor tables
603 POSTCODE(SVSTART_DESC_INIT);
605 CPU_NUMBER_FROM_LAPIC(%eax)
606 movl CX(EXT(cpu_data_ptr),%eax),%ecx
607 movl CPU_DESC_TABLEP(%ecx), %ecx
609 movw $(GDTSZ*8-1),0(%esp) /* set GDT size in GDT descriptor */
610 leal MP_GDT(%ecx),%edx
611 movl %edx,2(%esp) /* point to local GDT (linear addr) */
612 lgdt 0(%esp) /* load new GDT */
614 movw $(IDTSZ*8-1),0(%esp) /* set IDT size in IDT descriptor */
615 leal MP_IDT(%ecx),%edx
616 movl %edx,2(%esp) /* point to local IDT (linear addr) */
617 lidt 0(%esp) /* load new IDT */
619 movw $(KERNEL_LDT),%ax /* get LDT segment */
620 lldt %ax /* load LDT */
622 movw $(KERNEL_TSS),%ax
623 ltr %ax /* load new KTSS */
625 mov $(CPU_DATA_GS),%ax
629 * Get stack top from pre-cpu data and switch
631 POSTCODE(SVSTART_STACK_SWITCH);
633 movl %gs:CPU_INT_STACK_TOP,%esp
634 xorl %ebp,%ebp /* for completeness */
636 movl $0,%eax /* unlock start_lock */
637 xchgl %eax,EXT(start_lock) /* since we are no longer using */
638 /* bootstrap stack */
639 POSTCODE(SVSTART_EXIT);
641 call EXT(i386_init_slave) /* start MACH */
646 * Convert a descriptor from fake to real format.
648 * Calls from assembly code:
649 * %ebx = return address (physical) CANNOT USE STACK
650 * %esi = descriptor table address (physical)
651 * %ecx = number of descriptors
654 * 0(%esp) = return address
655 * 4(%esp) = descriptor table address (physical)
656 * 8(%esp) = number of descriptors
658 * Fake descriptor format:
659 * bytes 0..3 base 31..0
660 * bytes 4..5 limit 15..0
661 * byte 6 access byte 2 | limit 19..16
662 * byte 7 access byte 1
664 * Real descriptor format:
665 * bytes 0..1 limit 15..0
666 * bytes 2..3 base 15..0
668 * byte 5 access byte 1
669 * byte 6 access byte 2 | limit 19..16
674 * bytes 4..5 selector
675 * byte 6 word count << 4 (to match fake descriptor)
676 * byte 7 access byte 1
679 * bytes 0..1 offset 15..0
680 * bytes 2..3 selector
682 * byte 5 access byte 1
683 * bytes 6..7 offset 31..16
687 pushl %ebp /* set up */
688 movl %esp,%ebp /* stack frame */
689 pushl %esi /* save registers */
691 movl B_ARG0,%esi /* point to first descriptor */
692 movl B_ARG1,%ecx /* get number of descriptors */
693 lea 0f,%ebx /* get return address */
694 jmp fix_desc_common /* call internal routine */
695 0: popl %ebx /* restore registers */
697 leave /* pop stack frame */
702 movw 6(%esi),%dx /* get access byte */
705 cmpb $0x04,%al /* gate or descriptor? */
709 movl 0(%esi),%eax /* get base in eax */
710 rol $16,%eax /* swap 15..0 with 31..16 */
711 /* (15..0 in correct place) */
712 movb %al,%dl /* combine bits 23..16 with ACC1 */
714 movb %ah,7(%esi) /* store bits 31..24 in correct place */
715 movw 4(%esi),%ax /* move limit bits 0..15 to word 0 */
716 movl %eax,0(%esi) /* store (bytes 0..3 correct) */
717 movw %dx,4(%esi) /* store bytes 4..5 */
722 movw 4(%esi),%ax /* get selector */
723 shrb $4,%dl /* shift word count to proper place */
724 movw %dx,4(%esi) /* store word count / ACC1 */
725 movw 2(%esi),%dx /* get offset 16..31 */
726 movw %dx,6(%esi) /* store in correct place */
727 movw %ax,2(%esi) /* store selector in correct place */
729 addl $8,%esi /* bump to next descriptor */
731 jmp *%ebx /* all done */
734 * put arg in kbd leds and spin a while
738 #define K_CMD_LEDS 0xed
739 #define K_STATUS 0x64
740 #define K_IBUF_FULL 0x02 /* input (to kbd) buffer full */
741 #define K_OBUF_FULL 0x01 /* output (from kbd) buffer full */
744 mov S_ARG0,%cl /* save led value */
746 0: inb $(K_STATUS),%al /* get kbd status */
747 testb $(K_IBUF_FULL),%al /* input busy? */
748 jne 0b /* loop until not */
750 mov $(K_CMD_LEDS),%al /* K_CMD_LEDS */
751 outb %al,$(K_RDWR) /* to kbd */
753 0: inb $(K_STATUS),%al /* get kbd status */
754 testb $(K_OBUF_FULL),%al /* output present? */
755 je 0b /* loop if not */
757 inb $(K_RDWR),%al /* read status (and discard) */
759 0: inb $(K_STATUS),%al /* get kbd status */
760 testb $(K_IBUF_FULL),%al /* input busy? */
761 jne 0b /* loop until not */
763 mov %cl,%al /* move led value */
764 outb %al,$(K_RDWR) /* to kbd */
766 movl $10000000,%ecx /* spin */
769 loop 0b /* a while */