2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
26 * Mach Operating System
27 * Copyright (c) 1991,1990 Carnegie Mellon University
28 * All Rights Reserved.
30 * Permission to use, copy, modify and distribute this software and its
31 * documentation is hereby granted, provided that both the copyright
32 * notice and this permission notice appear in all copies of the
33 * software, derivative works or modified versions, and any portions
34 * thereof, and that both notices appear in supporting documentation.
36 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
37 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
38 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
40 * Carnegie Mellon requests users of this software to return to
42 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
43 * School of Computer Science
44 * Carnegie Mellon University
45 * Pittsburgh PA 15213-3890
47 * any improvements or extensions that they make and grant Carnegie Mellon
48 * the rights to redistribute these changes.
53 #include <platforms.h>
57 #include <i386/proc_reg.h>
58 #include <i386/postcode.h>
61 #define CX(addr,reg) addr(,reg,4)
64 #include <i386/mp_slave_boot.h>
67 * GAS won't handle an intersegment jump with a relocatable offset.
69 #define LJMP(segment,address) \
76 #define KVTOPHYS (-KERNELBASE)
77 #define KVTOLINEAR LINEAR_KERNELBASE
80 #define PA(addr) ((addr)+KVTOPHYS)
81 #define VA(addr) ((addr)-KVTOPHYS)
84 #if 0 /* Anyone need this? */
86 .globl EXT(_kick_buffer_)
93 * Interrupt and bootup stack for initial processor.
95 /* in the __HIB section since the hibernate restore code uses this stack. */
96 .section __HIB, __data
101 .globl EXT(gIOHibernateRestoreStack)
102 EXT(gIOHibernateRestoreStack):
104 .set ., .+INTSTACK_SIZE
106 .globl EXT(eintstack)
108 .globl EXT(gIOHibernateRestoreStackEnd)
109 EXT(gIOHibernateRestoreStackEnd):
112 * Pointers to GDT and IDT. These contain linear addresses.
117 .word Times(8,GDTSZ)-1
123 .word Times(8,IDTSZ)-1
126 /* back to the regular __DATA section. */
128 .section __DATA, __data
133 * Kernel debugger stack for each processor.
136 .globl EXT(db_stack_store)
138 .set ., .+(INTSTACK_SIZE*MAX_CPUS)
141 * Stack for last-ditch debugger task for each processor.
144 .globl EXT(db_task_stack_store)
145 EXT(db_task_stack_store):
146 .set ., .+(INTSTACK_SIZE*MAX_CPUS)
149 * per-processor kernel debugger stacks
152 .globl EXT(kgdb_stack_store)
153 EXT(kgdb_stack_store):
154 .set ., .+(INTSTACK_SIZE*MAX_CPUS)
155 #endif /* MACH_KDB */
159 * start_lock is very special. We initialize the
160 * lock at allocation time rather than at run-time.
161 * Although start_lock should be an instance of a
162 * hw_lock, we hand-code all manipulation of the lock
163 * because the hw_lock code may require function calls;
164 * and we'd rather not introduce another dependency on
165 * a working stack at this point.
167 .globl EXT(start_lock)
169 .long 0 /* synchronizes processor startup */
171 .globl EXT(master_is_up)
173 .long 0 /* 1 when OK for other processors */
175 .globl EXT(mp_boot_pde)
179 _KERNend: .long 0 /* phys addr end of kernel (just after bss) */
180 physfree: .long 0 /* phys addr of next free page */
183 _IdlePTD: .long 0 /* phys addr of kernel PTD */
186 _IdlePDPT: .long 0 /* phys addr of kernel PDPT */
191 _KPTphys: .long 0 /* phys addr of kernel page tables */
194 /* Some handy macros */
196 #define ALLOCPAGES(npages) \
197 movl PA(physfree), %esi ; \
198 movl $((npages) * PAGE_SIZE), %eax ; \
200 movl %eax, PA(physfree) ; \
202 movl $((npages) * PAGE_SIZE / 4),%ecx ; \
210 * eax = page frame address
211 * ebx = index into page table
212 * ecx = how many pages to map
213 * base = base address of page dir/table
214 * prot = protection bits
216 #define fillkpt(base, prot) \
217 shll $(PTEINDX),%ebx ; \
219 orl $(PTE_V) ,%eax ; \
221 1: movl %eax,(%ebx) ; \
222 addl $(PAGE_SIZE),%eax ; /* increment physical address */ \
223 addl $(PTESIZE),%ebx ; /* next pte */ \
228 * eax = physical address
229 * ecx = how many pages to map
230 * prot = protection bits
232 #define fillkptphys(prot) \
234 shrl $(PAGE_SHIFT), %ebx ; \
235 fillkpt(PA(EXT(KPTphys)), prot)
239 * All CPUs start here.
242 * protected mode, no paging, flat 32-bit address space.
243 * (Code/data/stack segments have base == 0, limit == 4G)
251 mov %eax, %ebx /* save pointer to kernbootstruct */
253 POSTCODE(PSTART_ENTRY);
255 mov $0,%ax /* fs must be zeroed; */
256 mov %ax,%fs /* some bootstrappers don`t do this */
260 0: cmpl $0,PA(EXT(start_lock))
263 xchgl %eax,PA(EXT(start_lock)) /* locked */
267 cmpl $0,PA(EXT(master_is_up)) /* are we first? */
268 jne EXT(slave_start) /* no -- system already up. */
269 movl $1,PA(EXT(master_is_up)) /* others become slaves */
274 * Get startup parameters.
277 movl %ebx,PA(EXT(boot_args_start)) /* Save KERNBOOTSTRUCT */
279 movl KADDR(%ebx), %eax
280 addl KSIZE(%ebx), %eax
283 movl %eax, PA(EXT(KERNend))
284 movl %eax, PA(physfree)
287 /* allocate kernel page table pages */
289 movl %esi,PA(EXT(KPTphys))
292 /* allocate Page Table Directory Page */
294 movl %esi,PA(EXT(IdlePDPT))
297 /* allocate kernel page directory page */
299 movl %esi,PA(EXT(IdlePTD))
301 /* map from zero to end of kernel */
303 movl PA(physfree),%ecx
304 shrl $(PAGE_SHIFT),%ecx
305 fillkptphys( $(PTE_W) )
307 /* map page directory */
309 movl PA(EXT(IdlePDPT)), %eax
311 fillkptphys( $(PTE_W) )
313 movl PA(EXT(IdlePTD)),%eax
315 fillkptphys( $(PTE_W) )
317 /* install a pde for temp double map of bottom of VA */
318 movl PA(EXT(KPTphys)),%eax
321 fillkpt(PA(EXT(IdlePTD)), $(PTE_W))
323 /* install pde's for page tables */
324 movl PA(EXT(KPTphys)),%eax
327 fillkpt(PA(EXT(IdlePTD)), $(PTE_W))
329 /* install a pde recursively mapping page directory as a page table */
330 movl PA(EXT(IdlePTD)),%eax
333 fillkpt(PA(EXT(IdlePTD)), $(PTE_W))
336 movl PA(EXT(IdlePTD)), %eax
339 fillkpt(PA(EXT(IdlePDPT)), $0)
342 /* install a pde page for commpage use up in high memory */
344 movl PA(physfree),%eax /* grab next phys page */
346 addl $(PAGE_SIZE),%ebx
347 movl %ebx,PA(physfree) /* show next free phys pg */
348 movl $(COMM_PAGE_BASE_ADDR),%ebx
349 shrl $(PDESHIFT),%ebx /* index into pde page */
350 movl $(1), %ecx /* # pdes to store */
351 fillkpt(PA(EXT(IdlePTD)), $(PTE_W|PTE_U)) /* user has access! */
353 movl PA(physfree),%edi
354 movl %edi,PA(EXT(first_avail)) /* save first available phys addr */
358 * We steal 0x4000 for a temp pdpt and 0x5000-0x8000
359 * for temp pde pages in the PAE case. Once we are
360 * running at the proper virtual address we switch to
361 * the PDPT/PDE's the master is using */
363 /* clear pdpt page to be safe */
365 movl $(PAGE_SIZE),%ecx
371 /* build temp pdpt */
375 fillkpt($(0x4000), $0)
377 /* copy the NPGPTD pages of pdes */
378 movl PA(EXT(IdlePTD)),%eax
380 movl $((PTEMASK+1)*NPGPTD),%ecx
389 /* create temp pde for slaves to use
390 use unused lomem page and copy in IdlePTD */
391 movl PA(EXT(IdlePTD)),%eax
393 movl $(PTEMASK+1),%ecx
401 POSTCODE(PSTART_PAGE_TABLES);
404 * Fix initial descriptor tables.
406 lea PA(EXT(idt)),%esi /* fix IDT */
408 movl $(PA(fix_idt_ret)),%ebx
409 jmp fix_desc_common /* (cannot use stack) */
412 lea PA(EXT(gdt)),%esi /* fix GDT */
414 movl $(PA(fix_gdt_ret)),%ebx
415 jmp fix_desc_common /* (cannot use stack) */
418 lea PA(EXT(ldt)),%esi /* fix LDT */
420 movl $(PA(fix_ldt_ret)),%ebx
421 jmp fix_desc_common /* (cannot use stack) */
428 lgdt PA(EXT(gdtptr)) /* load GDT */
429 lidt PA(EXT(idtptr)) /* load IDT */
431 POSTCODE(PSTART_BEFORE_PAGING);
437 movl PA(EXT(IdlePDPT)), %eax
444 movl PA(EXT(IdlePTD)), %eax
449 orl $(CR0_PG|CR0_WP|CR0_PE),%eax
450 movl %eax,%cr0 /* to enable paging */
452 LJMP(KERNEL_CS,EXT(vstart)) /* switch to kernel code segment */
455 * Master is now running with correct addresses.
458 POSTCODE(VSTART_ENTRY) ;
460 mov $(KERNEL_DS),%ax /* set kernel data segment */
464 mov %ax,EXT(ktss)+TSS_SS0 /* set kernel stack segment */
465 /* for traps to kernel */
467 mov %ax,EXT(dbtss)+TSS_SS0 /* likewise for debug task switch */
468 mov %cr3,%eax /* get PDBR into debug TSS */
469 mov %eax,EXT(dbtss)+TSS_PDBR
473 movw $(KERNEL_LDT),%ax /* get LDT segment */
474 lldt %ax /* load LDT */
476 mov %ax,EXT(ktss)+TSS_LDT /* store LDT in two TSS, as well... */
477 mov %ax,EXT(dbtss)+TSS_LDT /* ...matters if we switch tasks */
479 movw $(KERNEL_TSS),%ax
480 ltr %ax /* set up KTSS */
482 mov $(CPU_DATA_GS),%ax
485 POSTCODE(VSTART_STACK_SWITCH);
487 lea EXT(eintstack),%esp /* switch to the bootup stack */
488 call EXT(i386_preinit)
490 POSTCODE(VSTART_EXIT);
492 call EXT(i386_init) /* run C code */
498 .set __start, PA(EXT(pstart))
502 * master_up is used by the master cpu to signify that it is done
503 * with the interrupt stack, etc. See the code in pstart and svstart
504 * that this interlocks with.
507 .globl EXT(master_up)
509 pushl %ebp /* set up */
510 movl %esp,%ebp /* stack frame */
511 movl $0,%ecx /* unlock start_lock */
512 xchgl %ecx,EXT(start_lock) /* since we are no longer using */
513 /* bootstrap stack */
514 leave /* pop stack frame */
518 * We aren't the first. Call slave_main to initialize the processor
519 * and get Mach going on it.
522 .globl EXT(slave_start)
524 cli /* disable interrupts, so we don`t */
525 /* need IDT for a while */
527 POSTCODE(SLAVE_START_ENTRY);
531 movl $(EXT(spag_start)),%edx /* first paged code address */
541 movl $(0x4000),%eax /* tmp until we get mapped */
546 orl $(CR0_PG|CR0_WP|CR0_PE),%eax
547 movl %eax,%cr0 /* to enable paging */
549 POSTCODE(SLAVE_START_EXIT);
551 jmp *%edx /* flush prefetch queue */
554 * We are now paging, and can run with correct addresses.
558 lgdt PA(EXT(gdtptr)) /* load GDT */
559 lidt PA(EXT(idtptr)) /* load IDT */
561 LJMP(KERNEL_CS,EXT(svstart)) /* switch to kernel code segment */
565 * Slave is now running with correct addresses.
569 POSTCODE(SVSTART_ENTRY);
572 movl PA(EXT(IdlePDPT)), %eax
575 movl PA(EXT(IdlePTD)), %eax
579 mov $(KERNEL_DS),%ax /* set kernel data segment */
585 * We're not quite through with the boot stack
586 * but we need to reset the stack pointer to the correct virtual
588 * And we need to offset above the address of pstart.
590 movl $(VA(MP_BOOTSTACK+MP_BOOT+4)), %esp
593 * Switch to the per-cpu descriptor tables
595 POSTCODE(SVSTART_DESC_INIT);
597 CPU_NUMBER_FROM_LAPIC(%eax)
598 movl CX(EXT(cpu_data_ptr),%eax),%ecx
599 movl CPU_DESC_TABLEP(%ecx), %ecx
601 movw $(GDTSZ*8-1),0(%esp) /* set GDT size in GDT descriptor */
602 leal MP_GDT(%ecx),%edx
603 movl %edx,2(%esp) /* point to local GDT (linear addr) */
604 lgdt 0(%esp) /* load new GDT */
606 movw $(IDTSZ*8-1),0(%esp) /* set IDT size in IDT descriptor */
607 leal MP_IDT(%ecx),%edx
608 movl %edx,2(%esp) /* point to local IDT (linear addr) */
609 lidt 0(%esp) /* load new IDT */
611 movw $(KERNEL_LDT),%ax /* get LDT segment */
612 lldt %ax /* load LDT */
614 movw $(KERNEL_TSS),%ax
615 ltr %ax /* load new KTSS */
617 mov $(CPU_DATA_GS),%ax
621 * Get stack top from pre-cpu data and switch
623 POSTCODE(SVSTART_STACK_SWITCH);
625 movl %gs:CPU_INT_STACK_TOP,%esp
626 xorl %ebp,%ebp /* for completeness */
628 movl $0,%eax /* unlock start_lock */
629 xchgl %eax,EXT(start_lock) /* since we are no longer using */
630 /* bootstrap stack */
631 POSTCODE(SVSTART_EXIT);
633 call EXT(i386_init_slave) /* start MACH */
638 * Convert a descriptor from fake to real format.
640 * Calls from assembly code:
641 * %ebx = return address (physical) CANNOT USE STACK
642 * %esi = descriptor table address (physical)
643 * %ecx = number of descriptors
646 * 0(%esp) = return address
647 * 4(%esp) = descriptor table address (physical)
648 * 8(%esp) = number of descriptors
650 * Fake descriptor format:
651 * bytes 0..3 base 31..0
652 * bytes 4..5 limit 15..0
653 * byte 6 access byte 2 | limit 19..16
654 * byte 7 access byte 1
656 * Real descriptor format:
657 * bytes 0..1 limit 15..0
658 * bytes 2..3 base 15..0
660 * byte 5 access byte 1
661 * byte 6 access byte 2 | limit 19..16
666 * bytes 4..5 selector
667 * byte 6 word count << 4 (to match fake descriptor)
668 * byte 7 access byte 1
671 * bytes 0..1 offset 15..0
672 * bytes 2..3 selector
674 * byte 5 access byte 1
675 * bytes 6..7 offset 31..16
679 pushl %ebp /* set up */
680 movl %esp,%ebp /* stack frame */
681 pushl %esi /* save registers */
683 movl B_ARG0,%esi /* point to first descriptor */
684 movl B_ARG1,%ecx /* get number of descriptors */
685 lea 0f,%ebx /* get return address */
686 jmp fix_desc_common /* call internal routine */
687 0: popl %ebx /* restore registers */
689 leave /* pop stack frame */
694 movw 6(%esi),%dx /* get access byte */
697 cmpb $0x04,%al /* gate or descriptor? */
701 movl 0(%esi),%eax /* get base in eax */
702 rol $16,%eax /* swap 15..0 with 31..16 */
703 /* (15..0 in correct place) */
704 movb %al,%dl /* combine bits 23..16 with ACC1 */
706 movb %ah,7(%esi) /* store bits 31..24 in correct place */
707 movw 4(%esi),%ax /* move limit bits 0..15 to word 0 */
708 movl %eax,0(%esi) /* store (bytes 0..3 correct) */
709 movw %dx,4(%esi) /* store bytes 4..5 */
714 movw 4(%esi),%ax /* get selector */
715 shrb $4,%dl /* shift word count to proper place */
716 movw %dx,4(%esi) /* store word count / ACC1 */
717 movw 2(%esi),%dx /* get offset 16..31 */
718 movw %dx,6(%esi) /* store in correct place */
719 movw %ax,2(%esi) /* store selector in correct place */
721 addl $8,%esi /* bump to next descriptor */
723 jmp *%ebx /* all done */
726 * put arg in kbd leds and spin a while
730 #define K_CMD_LEDS 0xed
731 #define K_STATUS 0x64
732 #define K_IBUF_FULL 0x02 /* input (to kbd) buffer full */
733 #define K_OBUF_FULL 0x01 /* output (from kbd) buffer full */
736 mov S_ARG0,%cl /* save led value */
738 0: inb $(K_STATUS),%al /* get kbd status */
739 testb $(K_IBUF_FULL),%al /* input busy? */
740 jne 0b /* loop until not */
742 mov $(K_CMD_LEDS),%al /* K_CMD_LEDS */
743 outb %al,$(K_RDWR) /* to kbd */
745 0: inb $(K_STATUS),%al /* get kbd status */
746 testb $(K_OBUF_FULL),%al /* output present? */
747 je 0b /* loop if not */
749 inb $(K_RDWR),%al /* read status (and discard) */
751 0: inb $(K_STATUS),%al /* get kbd status */
752 testb $(K_IBUF_FULL),%al /* input busy? */
753 jne 0b /* loop until not */
755 mov %cl,%al /* move led value */
756 outb %al,$(K_RDWR) /* to kbd */
758 movl $10000000,%ecx /* spin */
761 loop 0b /* a while */