]> git.saurik.com Git - apple/xnu.git/blob - osfmk/i386/start.s
xnu-792.10.96.tar.gz
[apple/xnu.git] / osfmk / i386 / start.s
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 /*
23 * @OSF_COPYRIGHT@
24 */
25 /*
26 * Mach Operating System
27 * Copyright (c) 1991,1990 Carnegie Mellon University
28 * All Rights Reserved.
29 *
30 * Permission to use, copy, modify and distribute this software and its
31 * documentation is hereby granted, provided that both the copyright
32 * notice and this permission notice appear in all copies of the
33 * software, derivative works or modified versions, and any portions
34 * thereof, and that both notices appear in supporting documentation.
35 *
36 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
37 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
38 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
39 *
40 * Carnegie Mellon requests users of this software to return to
41 *
42 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
43 * School of Computer Science
44 * Carnegie Mellon University
45 * Pittsburgh PA 15213-3890
46 *
47 * any improvements or extensions that they make and grant Carnegie Mellon
48 * the rights to redistribute these changes.
49 */
50 /*
51 */
52
53 #include <platforms.h>
54 #include <mach_kdb.h>
55
56 #include <i386/asm.h>
57 #include <i386/proc_reg.h>
58 #include <i386/postcode.h>
59 #include <assym.s>
60
61 #define CX(addr,reg) addr(,reg,4)
62
63 #include <i386/mp.h>
64 #include <i386/mp_slave_boot.h>
65 #include <i386/cpuid.h>
66
67 /*
68 * GAS won't handle an intersegment jump with a relocatable offset.
69 */
70 #define LJMP(segment,address) \
71 .byte 0xea ;\
72 .long address ;\
73 .word segment
74
75
76
77 #define PA(addr) (addr)
78 #define VA(addr) (addr)
79
80 /*
81 * Interrupt and bootup stack for initial processor.
82 */
83
84 /* in the __HIB section since the hibernate restore code uses this stack. */
85 .section __HIB, __data
86 .align 12
87
88 .globl EXT(low_intstack)
89 EXT(low_intstack):
90 .globl EXT(gIOHibernateRestoreStack)
91 EXT(gIOHibernateRestoreStack):
92
93 .set ., .+INTSTACK_SIZE
94
95 .globl EXT(low_eintstack)
96 EXT(low_eintstack:)
97 .globl EXT(gIOHibernateRestoreStackEnd)
98 EXT(gIOHibernateRestoreStackEnd):
99
100 /*
101 * Pointers to GDT and IDT. These contain linear addresses.
102 */
103 .align ALIGN
104 .globl EXT(gdtptr)
105 /* align below properly */
106 .word 0
107 LEXT(gdtptr)
108 .word Times(8,GDTSZ)-1
109 .long EXT(master_gdt)
110
111 .align ALIGN
112 .globl EXT(idtptr)
113 /* align below properly */
114 .word 0
115 LEXT(idtptr)
116 .word Times(8,IDTSZ)-1
117 .long EXT(master_idt)
118
119 /* back to the regular __DATA section. */
120
121 .section __DATA, __data
122
123 /*
124 * Stack for last-gasp double-fault handler.
125 */
126 .align 12
127 .globl EXT(df_task_stack)
128 EXT(df_task_stack):
129 .set ., .+INTSTACK_SIZE
130 .globl EXT(df_task_stack_end)
131 EXT(df_task_stack_end):
132
133
134 /*
135 * Stack for machine-check handler.
136 */
137 .align 12
138 .globl EXT(mc_task_stack)
139 EXT(mc_task_stack):
140 .set ., .+INTSTACK_SIZE
141 .globl EXT(mc_task_stack_end)
142 EXT(mc_task_stack_end):
143
144
145 #if MACH_KDB
146 /*
147 * Kernel debugger stack for each processor.
148 */
149 .align 12
150 .globl EXT(db_stack_store)
151 EXT(db_stack_store):
152 .set ., .+(INTSTACK_SIZE*MAX_CPUS)
153
154 /*
155 * Stack for last-ditch debugger task for each processor.
156 */
157 .align 12
158 .globl EXT(db_task_stack_store)
159 EXT(db_task_stack_store):
160 .set ., .+(INTSTACK_SIZE*MAX_CPUS)
161
162 /*
163 * per-processor kernel debugger stacks
164 */
165 .align ALIGN
166 .globl EXT(kgdb_stack_store)
167 EXT(kgdb_stack_store):
168 .set ., .+(INTSTACK_SIZE*MAX_CPUS)
169 #endif /* MACH_KDB */
170
171 .data
172 physfree:
173 .long 0 /* phys addr of next free page */
174
175 .globl EXT(IdlePTD)
176 EXT(IdlePTD):
177 .long 0 /* phys addr of kernel PTD */
178 #ifdef PAE
179 .globl EXT(IdlePDPT)
180 EXT(IdlePDPT):
181 .long 0 /* phys addr of kernel PDPT */
182 #endif
183 #ifdef X86_64
184 .globl EXT(IdlePML4)
185 EXT(IdlePML4):
186 .long 0
187 .globl EXT(IdlePDPT64)
188 EXT(IdlePDPT64):
189 .long 0
190 #endif
191
192 KPTphys:
193 .long 0 /* phys addr of kernel page tables */
194
195 .globl EXT(KernelRelocOffset)
196 EXT(KernelRelocOffset):
197 .long 0 /* Kernel relocation offset */
198
199
200 /* Some handy macros */
201
202 #define ALLOCPAGES(npages) \
203 movl PA(physfree), %esi ; \
204 movl $((npages) * PAGE_SIZE), %eax ; \
205 addl %esi, %eax ; \
206 movl %eax, PA(physfree) ; \
207 movl %esi, %edi ; \
208 movl $((npages) * PAGE_SIZE / 4),%ecx ; \
209 xorl %eax,%eax ; \
210 cld ; \
211 rep ; \
212 stosl
213
214 /*
215 * fillkpt
216 * eax = page frame address
217 * ebx = index into page table
218 * ecx = how many pages to map
219 * base = base address of page dir/table
220 * prot = protection bits
221 */
222 #define fillkpt(base, prot) \
223 shll $(PTEINDX),%ebx ; \
224 addl base,%ebx ; \
225 orl $(PTE_V) ,%eax ; \
226 orl prot,%eax ; \
227 1: movl %eax,(%ebx) ; \
228 addl $(PAGE_SIZE),%eax ; /* increment physical address */ \
229 addl $(PTESIZE),%ebx ; /* next pte */ \
230 loop 1b
231
232 /*
233 * fillkptphys(prot)
234 * eax = physical address
235 * ecx = how many pages to map
236 * prot = protection bits
237 */
238 #define fillkptphys(prot) \
239 movl %eax, %ebx ; \
240 shrl $(PAGE_SHIFT), %ebx ; \
241 fillkpt(PA(KPTphys), prot)
242
243 /*
244 * BSP CPU start here.
245 * eax points to kernbootstruct
246 *
247 * Environment:
248 * protected mode, no paging, flat 32-bit address space.
249 * (Code/data/stack segments have base == 0, limit == 4G)
250 */
251 .text
252 .align ALIGN
253 .globl EXT(_start)
254 .globl EXT(_pstart)
255 LEXT(_start)
256 LEXT(_pstart)
257 mov %ds, %bx
258 mov %bx, %es
259 mov %eax, %ebp // Move kernbootstruct to ebp
260 POSTCODE(_PSTART_ENTRY)
261 movl KADDR(%ebp), %ebx // Load boot image phys addr
262 movl %ebx, %edx // Set edx with boot load phys addr
263 addl KSIZE(%ebp), %edx // Add boot image size
264 addl $(NBPG-1), %edx // Round to a page size
265 andl $(-NBPG), %edx // Set edx to first free page
266 movl %edx, %esp // Set temporay stack
267 addl $(NBPG), %esp // add page size
268 call Ls1
269 Ls1: popl %esi // Get return address
270 cmpl $(PA(Ls1)), %esi // Compare with static physicall addr
271 je EXT(pstart) // Branch if equal
272 subl $(PA(Ls1)), %esi // Extract relocation offset
273 movl %esi, %esp // Store relocation offset in esp
274 leal (PA(Lreloc_start))(%esp),%esi
275 // Set esi to reloc_start boot phys addr
276 movl %edx, %edi // Set edi to first free page
277 movl $(Lreloc_end-Lreloc_start), %ecx
278 // Set ecx to copy code size
279 cld // count up
280 rep
281 movsb // copy reloc copy code
282 wbinvd // Write back and Invalidate cache
283 movl %ebx, %esi // Set esi to kernbootstruct kaddr
284 movl KADDR(%ebp), %edi // Load boot image phys addr
285 subl %esp, %edi // Adjust to static phys addr
286 movl KSIZE(%ebp), %ecx // Set ecx to kernbootstruct ksize
287 addl $(NBPG-1), %ecx // Add NBPG-1 to ecx
288 andl $(-NBPG), %ecx // Truncate ecx to a page aligned addr
289 sarl $2, %ecx // Divide ecx by 4
290 movl %esp, (PA(EXT(KernelRelocOffset)))(%esp)
291 // Store relocation offset
292 movl %edi, KADDR(%ebp) // Relocate kaddr in kernbootstruct
293 subl %esp, MEMORYMAP(%ebp) // And relocate MemoryMap
294 subl %esp, DEVICETREEP(%ebp) // And relocate deviceTreeP
295 subl %esp, %ebp // Set ebp with relocated phys addr
296 jmp *%edx // Branch to relocated copy code
297 Lreloc_start:
298 POSTCODE(_PSTART_RELOC)
299 rep
300 movsl // Copy boot image at BASE_KERNEL_PADDR
301 wbinvd // Write back and Invalidate cache
302 movl $(PA(EXT(pstart))), %edx // Set branch target
303 jmp *%edx // Far jmp to pstart phys addr
304 Lreloc_end:
305 /* NOTREACHED */
306 hlt
307
308 .text
309 .globl __start
310 .set __start, PA(EXT(_pstart))
311
312 /*
313 * BSP CPU continues here after possible relocation.
314 * ebp points to kernbootstruct
315 */
316 .align ALIGN
317 .globl EXT(pstart)
318 LEXT(pstart)
319 mov %ebp, %ebx /* get pointer to kernbootstruct */
320
321 POSTCODE(PSTART_ENTRY)
322
323 mov $0,%ax /* fs must be zeroed; */
324 mov %ax,%fs /* some bootstrappers don`t do this */
325 mov %ax,%gs
326
327 /*
328 * Get startup parameters.
329 */
330 movl KADDR(%ebx), %eax
331 addl KSIZE(%ebx), %eax
332 addl $(NBPG-1),%eax
333 andl $(-NBPG), %eax
334 movl %eax, PA(physfree)
335 cld
336
337 /* allocate kernel page table pages */
338 ALLOCPAGES(NKPT)
339 movl %esi,PA(KPTphys)
340
341 #ifdef X86_64
342 /* allocate PML4 page */
343 ALLOCPAGES(1)
344 movl %esi,EXT(IdlePML4)
345 /* allocate new 3rd level directory page */
346 ALLOCPAGES(1)
347 movl %esi,EXT(IdlePDPT64)
348 #endif
349
350 #ifdef PAE
351 /* allocate Page Table Directory Page */
352 ALLOCPAGES(1)
353 movl %esi,PA(EXT(IdlePDPT))
354 #endif
355
356 /* allocate kernel page directory page */
357 ALLOCPAGES(NPGPTD)
358 movl %esi,PA(EXT(IdlePTD))
359
360 /* map from zero to end of kernel */
361 xorl %eax,%eax
362 movl PA(physfree),%ecx
363 shrl $(PAGE_SHIFT),%ecx
364 fillkptphys( $(PTE_W) )
365
366 /* map page directory */
367 #ifdef PAE
368 movl PA(EXT(IdlePDPT)), %eax
369 movl $1, %ecx
370 fillkptphys( $(PTE_W) )
371
372 movl PA(EXT(IdlePDPT64)), %eax
373 movl $1, %ecx
374 fillkptphys( $(PTE_W) )
375 #endif
376 movl PA(EXT(IdlePTD)),%eax
377 movl $(NPGPTD), %ecx
378 fillkptphys( $(PTE_W) )
379
380 /* install a pde for temp double map of bottom of VA */
381 movl PA(KPTphys),%eax
382 xorl %ebx,%ebx
383 movl $(NKPT), %ecx
384 fillkpt(PA(EXT(IdlePTD)), $(PTE_W))
385
386 /* install pde's for page tables */
387 movl PA(KPTphys),%eax
388 movl $(KPTDI),%ebx
389 movl $(NKPT),%ecx
390 fillkpt(PA(EXT(IdlePTD)), $(PTE_W))
391
392 /* install a pde recursively mapping page directory as a page table */
393 movl PA(EXT(IdlePTD)),%eax
394 movl $(PTDPTDI),%ebx
395 movl $(NPGPTD),%ecx
396 fillkpt(PA(EXT(IdlePTD)), $(PTE_W))
397
398 #ifdef PAE
399 movl PA(EXT(IdlePTD)), %eax
400 xorl %ebx, %ebx
401 movl $(NPGPTD), %ecx
402 fillkpt(PA(EXT(IdlePDPT)), $0)
403 #endif
404
405 /* install a pde page for commpage use up in high memory */
406
407 movl PA(physfree),%eax /* grab next phys page */
408 movl %eax,%ebx
409 addl $(PAGE_SIZE),%ebx
410 movl %ebx,PA(physfree) /* show next free phys pg */
411 movl $(COMM_PAGE_BASE_ADDR),%ebx
412 shrl $(PDESHIFT),%ebx /* index into pde page */
413 movl $(1), %ecx /* # pdes to store */
414 fillkpt(PA(EXT(IdlePTD)), $(PTE_W|PTE_U)) /* user has access! */
415
416 movl PA(physfree),%edi
417 movl %edi,PA(EXT(first_avail)) /* save first available phys addr */
418
419 #ifdef PAE
420 /*
421 * We steal 0x4000 for a temp pdpt and 0x5000-0x8000
422 * for temp pde pages in the PAE case. Once we are
423 * running at the proper virtual address we switch to
424 * the PDPT/PDE's the master is using */
425
426 /* clear pdpt page to be safe */
427 xorl %eax, %eax
428 movl $(PAGE_SIZE),%ecx
429 movl $(0x4000),%edi
430 cld
431 rep
432 stosb
433
434 /* build temp pdpt */
435 movl $(0x5000), %eax
436 xorl %ebx, %ebx
437 movl $(NPGPTD), %ecx
438 fillkpt($(0x4000), $0)
439
440 /* copy the NPGPTD pages of pdes */
441 movl PA(EXT(IdlePTD)),%eax
442 movl $0x5000,%ebx
443 movl $((PTEMASK+1)*NPGPTD),%ecx
444 1: movl 0(%eax),%edx
445 movl %edx,0(%ebx)
446 movl 4(%eax),%edx
447 movl %edx,4(%ebx)
448 addl $(PTESIZE),%eax
449 addl $(PTESIZE),%ebx
450 loop 1b
451 #else
452 /* create temp pde for slaves to use
453 use unused lomem page and copy in IdlePTD */
454 movl PA(EXT(IdlePTD)),%eax
455 movl $0x4000,%ebx
456 movl $(PTEMASK+1),%ecx
457 1: movl 0(%eax),%edx
458 movl %edx,0(%ebx)
459 addl $(PTESIZE),%eax
460 addl $(PTESIZE),%ebx
461 loop 1b
462 #endif
463
464 POSTCODE(PSTART_PAGE_TABLES)
465
466 /*
467 * Fix initial descriptor tables.
468 */
469 lea PA(EXT(master_idt)),%esi /* fix IDT */
470 movl $(IDTSZ),%ecx
471 movl $(PA(fix_idt_ret)),%ebx
472 jmp fix_desc_common /* (cannot use stack) */
473 fix_idt_ret:
474
475 lea PA(EXT(master_gdt)),%esi /* fix GDT */
476 movl $(GDTSZ),%ecx
477 movl $(PA(fix_gdt_ret)),%ebx
478 jmp fix_desc_common /* (cannot use stack) */
479 fix_gdt_ret:
480
481 lea PA(EXT(master_ldt)),%esi /* fix LDT */
482 movl $(LDTSZ),%ecx
483 movl $(PA(fix_ldt_ret)),%ebx
484 jmp fix_desc_common /* (cannot use stack) */
485 fix_ldt_ret:
486
487 /*
488 *
489 */
490
491 lgdt PA(EXT(gdtptr)) /* load GDT */
492 lidt PA(EXT(idtptr)) /* load IDT */
493
494 POSTCODE(PSTART_BEFORE_PAGING)
495
496 /*
497 * Turn on paging.
498 */
499 #ifdef PAE
500 movl PA(EXT(IdlePDPT)), %eax
501 movl %eax, %cr3
502
503 movl %cr4, %eax
504 orl $(CR4_PAE|CR4_PGE|CR4_MCE), %eax
505 movl %eax, %cr4
506
507 movl $0x80000001, %eax
508 cpuid
509 and $(CPUID_EXTFEATURE_XD), %edx /* clear all but bit 20 */
510 cmp $0, %edx /* skip setting NXE if 20 is not set */
511 je 1f
512
513 movl $(MSR_IA32_EFER), %ecx /* MSR number in ecx */
514 rdmsr /* MSR value return in edx: eax */
515 orl $(MSR_IA32_EFER_NXE), %eax /* Set NXE bit in low 32-bits */
516 wrmsr /* Update Extended Feature Enable reg */
517 1:
518
519 #else
520 movl PA(EXT(IdlePTD)), %eax
521 movl %eax,%cr3
522 #endif
523
524 movl %cr0,%eax
525 orl $(CR0_PG|CR0_WP|CR0_PE),%eax
526 movl %eax,%cr0 /* to enable paging */
527
528 LJMP(KERNEL_CS,EXT(vstart)) /* switch to kernel code segment */
529
530 /*
531 * BSP is now running with correct addresses.
532 */
533 LEXT(vstart)
534 POSTCODE(VSTART_ENTRY) ;
535
536 mov $(KERNEL_DS),%ax /* set kernel data segment */
537 mov %ax,%ds
538 mov %ax,%es
539 mov %ax,%ss
540 mov %ax,EXT(master_ktss)+TSS_SS0 /* set kernel stack segment */
541 /* for traps to kernel */
542
543 #if MACH_KDB
544 mov %ax,EXT(master_dbtss)+TSS_SS0 /* likewise for debug task switch */
545 mov %cr3,%eax /* get PDBR into debug TSS */
546 mov %eax,EXT(master_dbtss)+TSS_PDBR
547 mov $0,%eax
548 #endif
549 mov %cr3,%eax /* get PDBR into DF TSS */
550 mov %eax,EXT(master_dftss)+TSS_PDBR
551 mov %eax,EXT(master_mctss)+TSS_PDBR
552
553 movw $(KERNEL_LDT),%ax /* get LDT segment */
554 lldt %ax /* load LDT */
555 #if MACH_KDB
556 mov %ax,EXT(master_ktss)+TSS_LDT /* store LDT in two TSS, as well... */
557 mov %ax,EXT(master_dbtss)+TSS_LDT /* ...matters if we switch tasks */
558 #endif
559 movw $(KERNEL_TSS),%ax
560 ltr %ax /* set up KTSS */
561
562 mov $(CPU_DATA_GS),%ax
563 mov %ax,%gs
564
565 POSTCODE(VSTART_STACK_SWITCH)
566
567 lea EXT(low_eintstack),%esp /* switch to the bootup stack */
568 pushl %ebp /* push boot args addr */
569 xorl %ebp,%ebp /* clear stack frame ptr */
570
571 POSTCODE(VSTART_EXIT)
572
573 call EXT(i386_init) /* run C code */
574 /*NOTREACHED*/
575 hlt
576
577
578 /*
579 * AP (slave) CPUs enter here.
580 *
581 * Environment:
582 * protected mode, no paging, flat 32-bit address space.
583 * (Code/data/stack segments have base == 0, limit == 4G)
584 */
585 .align ALIGN
586 .globl EXT(slave_pstart)
587 LEXT(slave_pstart)
588 cli /* disable interrupts, so we don`t */
589 /* need IDT for a while */
590
591 POSTCODE(SLAVE_PSTART_ENTRY)
592 /*
593 * Turn on paging.
594 */
595 #ifdef PAE
596 movl %cr4, %eax
597 orl $(CR4_PAE|CR4_PGE|CR4_MCE), %eax
598 movl %eax, %cr4
599
600 movl $(MSR_IA32_EFER), %ecx /* MSR number in ecx */
601 rdmsr /* MSR value return in edx: eax */
602 orl $(MSR_IA32_EFER_NXE), %eax /* Set NXE bit in low 32-bits */
603 wrmsr /* Update Extended Feature Enable reg */
604 #endif
605 movl $(0x4000),%eax /* tmp until we get mapped */
606 movl %eax,%cr3
607
608 movl %cr0,%eax
609 orl $(CR0_PG|CR0_WP|CR0_PE),%eax
610 movl %eax,%cr0 /* to enable paging */
611
612 POSTCODE(SLAVE_PSTART_EXIT)
613
614 movl $(EXT(spag_start)),%edx /* first paged code address */
615 jmp *%edx /* flush prefetch queue */
616
617 /*
618 * We are now paging, and can run with correct addresses.
619 */
620 LEXT(spag_start)
621
622 lgdt PA(EXT(gdtptr)) /* load GDT */
623 lidt PA(EXT(idtptr)) /* load IDT */
624
625 LJMP(KERNEL_CS,EXT(slave_vstart)) /* switch to kernel code segment */
626
627
628 /*
629 * Slave is now running with correct addresses.
630 */
631 LEXT(slave_vstart)
632
633 POSTCODE(SLAVE_VSTART_ENTRY)
634
635 #ifdef PAE
636 movl PA(EXT(IdlePDPT)), %eax
637 movl %eax, %cr3
638 #else
639 movl PA(EXT(IdlePTD)), %eax
640 movl %eax, %cr3
641 #endif
642
643 mov $(KERNEL_DS),%ax /* set kernel data segment */
644 mov %ax,%ds
645 mov %ax,%es
646 mov %ax,%ss
647
648 /*
649 * We're not quite through with the boot stack
650 * but we need to reset the stack pointer to the correct virtual
651 * address.
652 * And we need to offset above the address of pstart.
653 */
654 movl $(VA(MP_BOOTSTACK+MP_BOOT+4)), %esp
655
656 /*
657 * Switch to the per-cpu descriptor tables
658 */
659 POSTCODE(SLAVE_VSTART_DESC_INIT)
660
661 CPU_NUMBER_FROM_LAPIC(%eax)
662 movl CX(EXT(cpu_data_ptr),%eax),%ecx
663
664 movw $(GDTSZ*8-1),0(%esp) /* set GDT size in GDT descriptor */
665 movl CPU_DESC_INDEX+CDI_GDT(%ecx),%edx
666 movl %edx,2(%esp) /* point to local GDT (linear addr) */
667 lgdt 0(%esp) /* load new GDT */
668
669 movw $(IDTSZ*8-1),0(%esp) /* set IDT size in IDT descriptor */
670 movl CPU_DESC_INDEX+CDI_IDT(%ecx),%edx
671 movl %edx,2(%esp) /* point to local IDT (linear addr) */
672 lidt 0(%esp) /* load new IDT */
673
674 movw $(KERNEL_LDT),%ax /* get LDT segment */
675 lldt %ax /* load LDT */
676
677 movw $(KERNEL_TSS),%ax
678 ltr %ax /* load new KTSS */
679
680 mov $(CPU_DATA_GS),%ax
681 mov %ax,%gs
682
683 /*
684 * Get stack top from pre-cpu data and switch
685 */
686 POSTCODE(SLAVE_VSTART_STACK_SWITCH)
687
688 movl %gs:CPU_INT_STACK_TOP,%esp
689 xorl %ebp,%ebp /* for completeness */
690
691 POSTCODE(SLAVE_VSTART_EXIT)
692
693 call EXT(i386_init_slave) /* start MACH */
694 /*NOTREACHED*/
695 hlt
696
697 /*
698 * Convert a descriptor from fake to real format.
699 *
700 * Calls from assembly code:
701 * %ebx = return address (physical) CANNOT USE STACK
702 * %esi = descriptor table address (physical)
703 * %ecx = number of descriptors
704 *
705 * Calls from C:
706 * 0(%esp) = return address
707 * 4(%esp) = descriptor table address (physical)
708 * 8(%esp) = number of descriptors
709 *
710 * Fake descriptor format:
711 * bytes 0..3 base 31..0
712 * bytes 4..5 limit 15..0
713 * byte 6 access byte 2 | limit 19..16
714 * byte 7 access byte 1
715 *
716 * Real descriptor format:
717 * bytes 0..1 limit 15..0
718 * bytes 2..3 base 15..0
719 * byte 4 base 23..16
720 * byte 5 access byte 1
721 * byte 6 access byte 2 | limit 19..16
722 * byte 7 base 31..24
723 *
724 * Fake gate format:
725 * bytes 0..3 offset
726 * bytes 4..5 selector
727 * byte 6 word count << 4 (to match fake descriptor)
728 * byte 7 access byte 1
729 *
730 * Real gate format:
731 * bytes 0..1 offset 15..0
732 * bytes 2..3 selector
733 * byte 4 word count
734 * byte 5 access byte 1
735 * bytes 6..7 offset 31..16
736 */
737 .globl EXT(fix_desc)
738 LEXT(fix_desc)
739 pushl %ebp /* set up */
740 movl %esp,%ebp /* stack frame */
741 pushl %esi /* save registers */
742 pushl %ebx
743 movl B_ARG0,%esi /* point to first descriptor */
744 movl B_ARG1,%ecx /* get number of descriptors */
745 lea 0f,%ebx /* get return address */
746 jmp fix_desc_common /* call internal routine */
747 0: popl %ebx /* restore registers */
748 popl %esi
749 leave /* pop stack frame */
750 ret /* return */
751
752 fix_desc_common:
753 0:
754 movw 6(%esi),%dx /* get access byte */
755 movb %dh,%al
756 andb $0x14,%al
757 cmpb $0x04,%al /* gate or descriptor? */
758 je 1f
759
760 /* descriptor */
761 movl 0(%esi),%eax /* get base in eax */
762 rol $16,%eax /* swap 15..0 with 31..16 */
763 /* (15..0 in correct place) */
764 movb %al,%dl /* combine bits 23..16 with ACC1 */
765 /* in dh/dl */
766 movb %ah,7(%esi) /* store bits 31..24 in correct place */
767 movw 4(%esi),%ax /* move limit bits 0..15 to word 0 */
768 movl %eax,0(%esi) /* store (bytes 0..3 correct) */
769 movw %dx,4(%esi) /* store bytes 4..5 */
770 jmp 2f
771
772 /* gate */
773 1:
774 movw 4(%esi),%ax /* get selector */
775 shrb $4,%dl /* shift word count to proper place */
776 movw %dx,4(%esi) /* store word count / ACC1 */
777 movw 2(%esi),%dx /* get offset 16..31 */
778 movw %dx,6(%esi) /* store in correct place */
779 movw %ax,2(%esi) /* store selector in correct place */
780 2:
781 addl $8,%esi /* bump to next descriptor */
782 loop 0b /* repeat */
783 jmp *%ebx /* all done */
784
785 /*
786 * put arg in kbd leds and spin a while
787 * eats eax, ecx, edx
788 */
789 #define K_RDWR 0x60
790 #define K_CMD_LEDS 0xed
791 #define K_STATUS 0x64
792 #define K_IBUF_FULL 0x02 /* input (to kbd) buffer full */
793 #define K_OBUF_FULL 0x01 /* output (from kbd) buffer full */
794
795 ENTRY(set_kbd_leds)
796 mov S_ARG0,%cl /* save led value */
797
798 0: inb $(K_STATUS),%al /* get kbd status */
799 testb $(K_IBUF_FULL),%al /* input busy? */
800 jne 0b /* loop until not */
801
802 mov $(K_CMD_LEDS),%al /* K_CMD_LEDS */
803 outb %al,$(K_RDWR) /* to kbd */
804
805 0: inb $(K_STATUS),%al /* get kbd status */
806 testb $(K_OBUF_FULL),%al /* output present? */
807 je 0b /* loop if not */
808
809 inb $(K_RDWR),%al /* read status (and discard) */
810
811 0: inb $(K_STATUS),%al /* get kbd status */
812 testb $(K_IBUF_FULL),%al /* input busy? */
813 jne 0b /* loop until not */
814
815 mov %cl,%al /* move led value */
816 outb %al,$(K_RDWR) /* to kbd */
817
818 movl $10000000,%ecx /* spin */
819 0: nop
820 nop
821 loop 0b /* a while */
822
823 ret