]> git.saurik.com Git - apple/xnu.git/blob - osfmk/i386/start.s
xnu-1228.7.58.tar.gz
[apple/xnu.git] / osfmk / i386 / start.s
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 */
58
59 #include <platforms.h>
60 #include <mach_kdb.h>
61
62 #include <i386/asm.h>
63 #include <i386/proc_reg.h>
64 #include <i386/postcode.h>
65 #include <assym.s>
66
67 #define CX(addr,reg) addr(,reg,4)
68
69 #include <i386/mp.h>
70 #include <i386/mp_slave_boot.h>
71 #include <i386/cpuid.h>
72
73 /*
74 * GAS won't handle an intersegment jump with a relocatable offset.
75 */
76 #define LJMP(segment,address) \
77 .byte 0xea ;\
78 .long address ;\
79 .word segment
80
81
82
83 #define PA(addr) (addr)
84 #define VA(addr) (addr)
85
86 /*
87 * Interrupt and bootup stack for initial processor.
88 */
89
90 /* in the __HIB section since the hibernate restore code uses this stack. */
91 .section __HIB, __data
92 .align 12
93
94 .globl EXT(low_intstack)
95 EXT(low_intstack):
96 .globl EXT(gIOHibernateRestoreStack)
97 EXT(gIOHibernateRestoreStack):
98
99 .set ., .+INTSTACK_SIZE
100
101 .globl EXT(low_eintstack)
102 EXT(low_eintstack:)
103 .globl EXT(gIOHibernateRestoreStackEnd)
104 EXT(gIOHibernateRestoreStackEnd):
105
106 /*
107 * Pointers to GDT and IDT. These contain linear addresses.
108 */
109 .align ALIGN
110 .globl EXT(gdtptr)
111 /* align below properly */
112 .word 0
113 LEXT(gdtptr)
114 .word Times(8,GDTSZ)-1
115 .long EXT(master_gdt)
116
117 .align ALIGN
118 .globl EXT(idtptr)
119 /* align below properly */
120 .word 0
121 LEXT(idtptr)
122 .word Times(8,IDTSZ)-1
123 .long EXT(master_idt)
124
125 /* back to the regular __DATA section. */
126
127 .section __DATA, __data
128
129 /*
130 * Stack for last-gasp double-fault handler.
131 */
132 .align 12
133 .globl EXT(df_task_stack)
134 EXT(df_task_stack):
135 .set ., .+INTSTACK_SIZE
136 .globl EXT(df_task_stack_end)
137 EXT(df_task_stack_end):
138
139
140 /*
141 * Stack for machine-check handler.
142 */
143 .align 12
144 .globl EXT(mc_task_stack)
145 EXT(mc_task_stack):
146 .set ., .+INTSTACK_SIZE
147 .globl EXT(mc_task_stack_end)
148 EXT(mc_task_stack_end):
149
150
151 #if MACH_KDB
152 /*
153 * Kernel debugger stack for each processor.
154 */
155 .align 12
156 .globl EXT(db_stack_store)
157 EXT(db_stack_store):
158 .set ., .+(INTSTACK_SIZE*MAX_CPUS)
159
160 /*
161 * Stack for last-ditch debugger task for each processor.
162 */
163 .align 12
164 .globl EXT(db_task_stack_store)
165 EXT(db_task_stack_store):
166 .set ., .+(INTSTACK_SIZE*MAX_CPUS)
167
168 /*
169 * per-processor kernel debugger stacks
170 */
171 .align ALIGN
172 .globl EXT(kgdb_stack_store)
173 EXT(kgdb_stack_store):
174 .set ., .+(INTSTACK_SIZE*MAX_CPUS)
175 #endif /* MACH_KDB */
176
177 .data
178 physfree:
179 .long 0 /* phys addr of next free page */
180
181 .globl EXT(IdlePTD)
182 EXT(IdlePTD):
183 .long 0 /* phys addr of kernel PTD */
184 #ifdef PAE
185 .globl EXT(IdlePDPT)
186 EXT(IdlePDPT):
187 .long 0 /* phys addr of kernel PDPT */
188 #endif
189 #ifdef X86_64
190 .globl EXT(IdlePML4)
191 EXT(IdlePML4):
192 .long 0
193 .globl EXT(IdlePDPT64)
194 EXT(IdlePDPT64):
195 .long 0
196 #endif
197
198 KPTphys:
199 .long 0 /* phys addr of kernel page tables */
200
201 .globl EXT(KernelRelocOffset)
202 EXT(KernelRelocOffset):
203 .long 0 /* Kernel relocation offset */
204
205
206 /* Some handy macros */
207
208 #define ALLOCPAGES(npages) \
209 movl PA(physfree), %esi ; \
210 movl $((npages) * PAGE_SIZE), %eax ; \
211 addl %esi, %eax ; \
212 movl %eax, PA(physfree) ; \
213 movl %esi, %edi ; \
214 movl $((npages) * PAGE_SIZE / 4),%ecx ; \
215 xorl %eax,%eax ; \
216 cld ; \
217 rep ; \
218 stosl
219
220 /*
221 * fillkpt
222 * eax = page frame address
223 * ebx = index into page table
224 * ecx = how many pages to map
225 * base = base address of page dir/table
226 * prot = protection bits
227 */
228 #define fillkpt(base, prot) \
229 shll $(PTEINDX),%ebx ; \
230 addl base,%ebx ; \
231 orl $(PTE_V) ,%eax ; \
232 orl prot,%eax ; \
233 1: movl %eax,(%ebx) ; \
234 addl $(PAGE_SIZE),%eax ; /* increment physical address */ \
235 addl $(PTESIZE),%ebx ; /* next pte */ \
236 loop 1b
237
238 /*
239 * fillkptphys(prot)
240 * eax = physical address
241 * ecx = how many pages to map
242 * prot = protection bits
243 */
244 #define fillkptphys(prot) \
245 movl %eax, %ebx ; \
246 shrl $(PAGE_SHIFT), %ebx ; \
247 fillkpt(PA(KPTphys), prot)
248
249 /*
250 * BSP CPU start here.
251 * eax points to kernbootstruct
252 *
253 * Environment:
254 * protected mode, no paging, flat 32-bit address space.
255 * (Code/data/stack segments have base == 0, limit == 4G)
256 */
257 .text
258 .align ALIGN
259 .globl EXT(_start)
260 .globl EXT(_pstart)
261 LEXT(_start)
262 LEXT(_pstart)
263 mov %ds, %bx
264 mov %bx, %es
265 mov %eax, %ebp // Move kernbootstruct to ebp
266 POSTCODE(_PSTART_ENTRY)
267 movl KADDR(%ebp), %ebx // Load boot image phys addr
268 movl %ebx, %edx // Set edx with boot load phys addr
269 addl KSIZE(%ebp), %edx // Add boot image size
270 addl $(NBPG-1), %edx // Round to a page size
271 andl $(-NBPG), %edx // Set edx to first free page
272 movl %edx, %esp // Set temporay stack
273 addl $(NBPG), %esp // add page size
274 call Ls1
275 Ls1: popl %esi // Get return address
276 cmpl $(PA(Ls1)), %esi // Compare with static physicall addr
277 je EXT(pstart) // Branch if equal
278 subl $(PA(Ls1)), %esi // Extract relocation offset
279 movl %esi, %esp // Store relocation offset in esp
280 leal (PA(Lreloc_start))(%esp),%esi
281 // Set esi to reloc_start boot phys addr
282 movl %edx, %edi // Set edi to first free page
283 movl $(Lreloc_end-Lreloc_start), %ecx
284 // Set ecx to copy code size
285 cld // count up
286 rep
287 movsb // copy reloc copy code
288 wbinvd // Write back and Invalidate cache
289 movl %ebx, %esi // Set esi to kernbootstruct kaddr
290 movl KADDR(%ebp), %edi // Load boot image phys addr
291 subl %esp, %edi // Adjust to static phys addr
292 movl KSIZE(%ebp), %ecx // Set ecx to kernbootstruct ksize
293 addl $(NBPG-1), %ecx // Add NBPG-1 to ecx
294 andl $(-NBPG), %ecx // Truncate ecx to a page aligned addr
295 shrl $2, %ecx // Divide ecx by 4
296 movl %esp, (PA(EXT(KernelRelocOffset)))(%esp)
297 // Store relocation offset
298 movl %edi, KADDR(%ebp) // Relocate kaddr in kernbootstruct
299 subl %esp, MEMORYMAP(%ebp) // And relocate MemoryMap
300 subl %esp, DEVICETREEP(%ebp) // And relocate deviceTreeP
301 subl %esp, %ebp // Set ebp with relocated phys addr
302 jmp *%edx // Branch to relocated copy code
303 Lreloc_start:
304 POSTCODE(_PSTART_RELOC)
305 rep
306 movsl // Copy boot image at BASE_KERNEL_PADDR
307 wbinvd // Write back and Invalidate cache
308 movl $(PA(EXT(pstart))), %edx // Set branch target
309 jmp *%edx // Far jmp to pstart phys addr
310 Lreloc_end:
311 /* NOTREACHED */
312 hlt
313
314 .text
315 .globl __start
316 .set __start, PA(EXT(_pstart))
317
318 /*
319 * BSP CPU continues here after possible relocation.
320 * ebp points to kernbootstruct
321 */
322 .align ALIGN
323 .globl EXT(pstart)
324 LEXT(pstart)
325 mov %ebp, %ebx /* get pointer to kernbootstruct */
326
327 POSTCODE(PSTART_ENTRY)
328
329 mov $0,%ax /* fs must be zeroed; */
330 mov %ax,%fs /* some bootstrappers don`t do this */
331 mov %ax,%gs
332
333 /*
334 * Get startup parameters.
335 */
336 movl KADDR(%ebx), %eax
337 addl KSIZE(%ebx), %eax
338 addl $(NBPG-1),%eax
339 andl $(-NBPG), %eax
340 movl %eax, PA(physfree)
341 cld
342
343 /* allocate kernel page table pages */
344 ALLOCPAGES(NKPT)
345 movl %esi,PA(KPTphys)
346
347 #ifdef X86_64
348 /* allocate PML4 page */
349 ALLOCPAGES(1)
350 movl %esi,EXT(IdlePML4)
351 /* allocate new 3rd level directory page */
352 ALLOCPAGES(1)
353 movl %esi,EXT(IdlePDPT64)
354 #endif
355
356 #ifdef PAE
357 /* allocate Page Table Directory Page */
358 ALLOCPAGES(1)
359 movl %esi,PA(EXT(IdlePDPT))
360 #endif
361
362 /* allocate kernel page directory page */
363 ALLOCPAGES(NPGPTD)
364 movl %esi,PA(EXT(IdlePTD))
365
366 /* map from zero to end of kernel */
367 xorl %eax,%eax
368 movl PA(physfree),%ecx
369 shrl $(PAGE_SHIFT),%ecx
370 fillkptphys( $(PTE_W) )
371
372 /* map page directory */
373 #ifdef PAE
374 movl PA(EXT(IdlePDPT)), %eax
375 movl $1, %ecx
376 fillkptphys( $(PTE_W) )
377
378 movl PA(EXT(IdlePDPT64)), %eax
379 movl $1, %ecx
380 fillkptphys( $(PTE_W) )
381 #endif
382 movl PA(EXT(IdlePTD)),%eax
383 movl $(NPGPTD), %ecx
384 fillkptphys( $(PTE_W) )
385
386 /* install a pde for temp double map of bottom of VA */
387 movl PA(KPTphys),%eax
388 xorl %ebx,%ebx
389 movl $(NKPT), %ecx
390 fillkpt(PA(EXT(IdlePTD)), $(PTE_W))
391
392 /* install pde's for page tables */
393 movl PA(KPTphys),%eax
394 movl $(KPTDI),%ebx
395 movl $(NKPT),%ecx
396 fillkpt(PA(EXT(IdlePTD)), $(PTE_W))
397
398 /* install a pde recursively mapping page directory as a page table */
399 movl PA(EXT(IdlePTD)),%eax
400 movl $(PTDPTDI),%ebx
401 movl $(NPGPTD),%ecx
402 fillkpt(PA(EXT(IdlePTD)), $(PTE_W))
403
404 #ifdef PAE
405 movl PA(EXT(IdlePTD)), %eax
406 xorl %ebx, %ebx
407 movl $(NPGPTD), %ecx
408 fillkpt(PA(EXT(IdlePDPT)), $0)
409 #endif
410
411 /* install a pde page for commpage use up in high memory */
412
413 movl PA(physfree),%eax /* grab next phys page */
414 movl %eax,%ebx
415 addl $(PAGE_SIZE),%ebx
416 movl %ebx,PA(physfree) /* show next free phys pg */
417 movl $(COMM_PAGE_BASE_ADDR),%ebx
418 shrl $(PDESHIFT),%ebx /* index into pde page */
419 movl $(1), %ecx /* # pdes to store */
420 fillkpt(PA(EXT(IdlePTD)), $(PTE_W|PTE_U)) /* user has access! */
421
422 movl PA(physfree),%edi
423 movl %edi,PA(EXT(first_avail)) /* save first available phys addr */
424
425 #ifdef PAE
426 /*
427 * We steal 0x4000 for a temp pdpt and 0x5000-0x8000
428 * for temp pde pages in the PAE case. Once we are
429 * running at the proper virtual address we switch to
430 * the PDPT/PDE's the master is using */
431
432 /* clear pdpt page to be safe */
433 xorl %eax, %eax
434 movl $(PAGE_SIZE),%ecx
435 movl $(0x4000),%edi
436 cld
437 rep
438 stosb
439
440 /* build temp pdpt */
441 movl $(0x5000), %eax
442 xorl %ebx, %ebx
443 movl $(NPGPTD), %ecx
444 fillkpt($(0x4000), $0)
445
446 /* copy the NPGPTD pages of pdes */
447 movl PA(EXT(IdlePTD)),%eax
448 movl $0x5000,%ebx
449 movl $((PTEMASK+1)*NPGPTD),%ecx
450 1: movl 0(%eax),%edx
451 movl %edx,0(%ebx)
452 movl 4(%eax),%edx
453 movl %edx,4(%ebx)
454 addl $(PTESIZE),%eax
455 addl $(PTESIZE),%ebx
456 loop 1b
457 #else
458 /* create temp pde for slaves to use
459 use unused lomem page and copy in IdlePTD */
460 movl PA(EXT(IdlePTD)),%eax
461 movl $0x4000,%ebx
462 movl $(PTEMASK+1),%ecx
463 1: movl 0(%eax),%edx
464 movl %edx,0(%ebx)
465 addl $(PTESIZE),%eax
466 addl $(PTESIZE),%ebx
467 loop 1b
468 #endif
469
470 POSTCODE(PSTART_PAGE_TABLES)
471
472 /*
473 * Fix initial descriptor tables.
474 */
475 lea PA(EXT(master_idt)),%esi /* fix IDT */
476 movl $(IDTSZ),%ecx
477 movl $(PA(fix_idt_ret)),%ebx
478 jmp fix_desc_common /* (cannot use stack) */
479 fix_idt_ret:
480
481 lea PA(EXT(master_gdt)),%esi /* fix GDT */
482 movl $(GDTSZ),%ecx
483 movl $(PA(fix_gdt_ret)),%ebx
484 jmp fix_desc_common /* (cannot use stack) */
485 fix_gdt_ret:
486
487 lea PA(EXT(master_ldt)),%esi /* fix LDT */
488 movl $(LDTSZ),%ecx
489 movl $(PA(fix_ldt_ret)),%ebx
490 jmp fix_desc_common /* (cannot use stack) */
491 fix_ldt_ret:
492
493 /*
494 *
495 */
496
497 lgdt PA(EXT(gdtptr)) /* load GDT */
498 lidt PA(EXT(idtptr)) /* load IDT */
499
500 POSTCODE(PSTART_BEFORE_PAGING)
501
502 /*
503 * Turn on paging.
504 */
505 #ifdef PAE
506 movl PA(EXT(IdlePDPT)), %eax
507 movl %eax, %cr3
508
509 movl %cr4, %eax
510 orl $(CR4_PAE), %eax
511 movl %eax, %cr4
512
513 movl $0x80000001, %eax
514 cpuid
515 and $(CPUID_EXTFEATURE_XD), %edx /* clear all but bit 20 */
516 cmp $0, %edx /* skip setting NXE if 20 is not set */
517 je 1f
518
519 movl $(MSR_IA32_EFER), %ecx /* MSR number in ecx */
520 rdmsr /* MSR value return in edx: eax */
521 orl $(MSR_IA32_EFER_NXE), %eax /* Set NXE bit in low 32-bits */
522 wrmsr /* Update Extended Feature Enable reg */
523 1:
524
525 #else
526 movl PA(EXT(IdlePTD)), %eax
527 movl %eax,%cr3
528 #endif
529
530 movl %cr0,%eax
531 orl $(CR0_PG|CR0_WP|CR0_PE),%eax
532 movl %eax,%cr0 /* to enable paging */
533
534 LJMP(KERNEL_CS,EXT(vstart)) /* switch to kernel code segment */
535
536 /*
537 * BSP is now running with correct addresses.
538 */
539 LEXT(vstart)
540 POSTCODE(VSTART_ENTRY) ;
541
542 mov $(KERNEL_DS),%ax /* set kernel data segment */
543 mov %ax,%ds
544 mov %ax,%es
545 mov %ax,%ss
546 mov %ax,EXT(master_ktss)+TSS_SS0 /* set kernel stack segment */
547 /* for traps to kernel */
548
549 #if MACH_KDB
550 mov %ax,EXT(master_dbtss)+TSS_SS0 /* likewise for debug task switch */
551 mov %cr3,%eax /* get PDBR into debug TSS */
552 mov %eax,EXT(master_dbtss)+TSS_PDBR
553 mov $0,%eax
554 #endif
555 mov %cr3,%eax /* get PDBR into DF TSS */
556 mov %eax,EXT(master_dftss)+TSS_PDBR
557 mov %eax,EXT(master_mctss)+TSS_PDBR
558
559 movw $(KERNEL_LDT),%ax /* get LDT segment */
560 lldt %ax /* load LDT */
561 #if MACH_KDB
562 mov %ax,EXT(master_ktss)+TSS_LDT /* store LDT in two TSS, as well... */
563 mov %ax,EXT(master_dbtss)+TSS_LDT /* ...matters if we switch tasks */
564 #endif
565 movw $(KERNEL_TSS),%ax
566 ltr %ax /* set up KTSS */
567
568 mov $(CPU_DATA_GS),%ax
569 mov %ax,%gs
570
571 POSTCODE(VSTART_STACK_SWITCH)
572
573 lea EXT(low_eintstack),%esp /* switch to the bootup stack */
574 pushl %ebp /* push boot args addr */
575 xorl %ebp,%ebp /* clear stack frame ptr */
576
577 POSTCODE(VSTART_EXIT)
578
579 call EXT(i386_init) /* run C code */
580 /*NOTREACHED*/
581 hlt
582
583
584 /*
585 * AP (slave) CPUs enter here.
586 *
587 * Environment:
588 * protected mode, no paging, flat 32-bit address space.
589 * (Code/data/stack segments have base == 0, limit == 4G)
590 */
591 .align ALIGN
592 .globl EXT(slave_pstart)
593 LEXT(slave_pstart)
594 cli /* disable interrupts, so we don`t */
595 /* need IDT for a while */
596
597 POSTCODE(SLAVE_PSTART_ENTRY)
598 /*
599 * Turn on paging.
600 */
601 #ifdef PAE
602 movl %cr4, %eax
603 orl $(CR4_PAE), %eax
604 movl %eax, %cr4
605
606 movl $(MSR_IA32_EFER), %ecx /* MSR number in ecx */
607 rdmsr /* MSR value return in edx: eax */
608 orl $(MSR_IA32_EFER_NXE), %eax /* Set NXE bit in low 32-bits */
609 wrmsr /* Update Extended Feature Enable reg */
610 #endif
611 movl $(0x4000),%eax /* tmp until we get mapped */
612 movl %eax,%cr3
613
614 movl %cr0,%eax
615 orl $(CR0_PG|CR0_WP|CR0_PE),%eax
616 movl %eax,%cr0 /* to enable paging */
617
618 POSTCODE(SLAVE_PSTART_EXIT)
619
620 movl $(EXT(spag_start)),%edx /* first paged code address */
621 jmp *%edx /* flush prefetch queue */
622
623 /*
624 * We are now paging, and can run with correct addresses.
625 */
626 LEXT(spag_start)
627
628 lgdt PA(EXT(gdtptr)) /* load GDT */
629 lidt PA(EXT(idtptr)) /* load IDT */
630
631 LJMP(KERNEL_CS,EXT(slave_vstart)) /* switch to kernel code segment */
632
633
634 /*
635 * Slave is now running with correct addresses.
636 */
637 LEXT(slave_vstart)
638
639 POSTCODE(SLAVE_VSTART_ENTRY)
640
641 #ifdef PAE
642 movl PA(EXT(IdlePDPT)), %eax
643 movl %eax, %cr3
644 #else
645 movl PA(EXT(IdlePTD)), %eax
646 movl %eax, %cr3
647 #endif
648
649 mov $(KERNEL_DS),%ax /* set kernel data segment */
650 mov %ax,%ds
651 mov %ax,%es
652 mov %ax,%ss
653
654 /*
655 * We're not quite through with the boot stack
656 * but we need to reset the stack pointer to the correct virtual
657 * address.
658 * And we need to offset above the address of pstart.
659 */
660 movl $(VA(MP_BOOTSTACK+MP_BOOT+4)), %esp
661
662 /*
663 * Switch to the per-cpu descriptor tables
664 */
665 POSTCODE(SLAVE_VSTART_DESC_INIT)
666
667 CPU_NUMBER_FROM_LAPIC(%eax)
668 movl CX(EXT(cpu_data_ptr),%eax),%ecx
669
670 movw $(GDTSZ*8-1),0(%esp) /* set GDT size in GDT descriptor */
671 movl CPU_DESC_INDEX+CDI_GDT(%ecx),%edx
672 movl %edx,2(%esp) /* point to local GDT (linear addr) */
673 lgdt 0(%esp) /* load new GDT */
674
675 movw $(IDTSZ*8-1),0(%esp) /* set IDT size in IDT descriptor */
676 movl CPU_DESC_INDEX+CDI_IDT(%ecx),%edx
677 movl %edx,2(%esp) /* point to local IDT (linear addr) */
678 lidt 0(%esp) /* load new IDT */
679
680 movw $(KERNEL_LDT),%ax /* get LDT segment */
681 lldt %ax /* load LDT */
682
683 movw $(KERNEL_TSS),%ax
684 ltr %ax /* load new KTSS */
685
686 mov $(CPU_DATA_GS),%ax
687 mov %ax,%gs
688
689 /*
690 * Get stack top from pre-cpu data and switch
691 */
692 POSTCODE(SLAVE_VSTART_STACK_SWITCH)
693
694 movl %gs:CPU_INT_STACK_TOP,%esp
695 xorl %ebp,%ebp /* for completeness */
696
697 POSTCODE(SLAVE_VSTART_EXIT)
698
699 call EXT(i386_init_slave) /* start MACH */
700 /*NOTREACHED*/
701 hlt
702
703 /*
704 * Convert a descriptor from fake to real format.
705 *
706 * Calls from assembly code:
707 * %ebx = return address (physical) CANNOT USE STACK
708 * %esi = descriptor table address (physical)
709 * %ecx = number of descriptors
710 *
711 * Calls from C:
712 * 0(%esp) = return address
713 * 4(%esp) = descriptor table address (physical)
714 * 8(%esp) = number of descriptors
715 *
716 * Fake descriptor format:
717 * bytes 0..3 base 31..0
718 * bytes 4..5 limit 15..0
719 * byte 6 access byte 2 | limit 19..16
720 * byte 7 access byte 1
721 *
722 * Real descriptor format:
723 * bytes 0..1 limit 15..0
724 * bytes 2..3 base 15..0
725 * byte 4 base 23..16
726 * byte 5 access byte 1
727 * byte 6 access byte 2 | limit 19..16
728 * byte 7 base 31..24
729 *
730 * Fake gate format:
731 * bytes 0..3 offset
732 * bytes 4..5 selector
733 * byte 6 word count << 4 (to match fake descriptor)
734 * byte 7 access byte 1
735 *
736 * Real gate format:
737 * bytes 0..1 offset 15..0
738 * bytes 2..3 selector
739 * byte 4 word count
740 * byte 5 access byte 1
741 * bytes 6..7 offset 31..16
742 */
743 .globl EXT(fix_desc)
744 LEXT(fix_desc)
745 pushl %ebp /* set up */
746 movl %esp,%ebp /* stack frame */
747 pushl %esi /* save registers */
748 pushl %ebx
749 movl B_ARG0,%esi /* point to first descriptor */
750 movl B_ARG1,%ecx /* get number of descriptors */
751 lea 0f,%ebx /* get return address */
752 jmp fix_desc_common /* call internal routine */
753 0: popl %ebx /* restore registers */
754 popl %esi
755 leave /* pop stack frame */
756 ret /* return */
757
758 fix_desc_common:
759 0:
760 movw 6(%esi),%dx /* get access byte */
761 movb %dh,%al
762 andb $0x14,%al
763 cmpb $0x04,%al /* gate or descriptor? */
764 je 1f
765
766 /* descriptor */
767 movl 0(%esi),%eax /* get base in eax */
768 rol $16,%eax /* swap 15..0 with 31..16 */
769 /* (15..0 in correct place) */
770 movb %al,%dl /* combine bits 23..16 with ACC1 */
771 /* in dh/dl */
772 movb %ah,7(%esi) /* store bits 31..24 in correct place */
773 movw 4(%esi),%ax /* move limit bits 0..15 to word 0 */
774 movl %eax,0(%esi) /* store (bytes 0..3 correct) */
775 movw %dx,4(%esi) /* store bytes 4..5 */
776 jmp 2f
777
778 /* gate */
779 1:
780 movw 4(%esi),%ax /* get selector */
781 shrb $4,%dl /* shift word count to proper place */
782 movw %dx,4(%esi) /* store word count / ACC1 */
783 movw 2(%esi),%dx /* get offset 16..31 */
784 movw %dx,6(%esi) /* store in correct place */
785 movw %ax,2(%esi) /* store selector in correct place */
786 2:
787 addl $8,%esi /* bump to next descriptor */
788 loop 0b /* repeat */
789 jmp *%ebx /* all done */
790
791 /*
792 * put arg in kbd leds and spin a while
793 * eats eax, ecx, edx
794 */
795 #define K_RDWR 0x60
796 #define K_CMD_LEDS 0xed
797 #define K_STATUS 0x64
798 #define K_IBUF_FULL 0x02 /* input (to kbd) buffer full */
799 #define K_OBUF_FULL 0x01 /* output (from kbd) buffer full */
800
801 ENTRY(set_kbd_leds)
802 mov S_ARG0,%cl /* save led value */
803
804 0: inb $(K_STATUS),%al /* get kbd status */
805 testb $(K_IBUF_FULL),%al /* input busy? */
806 jne 0b /* loop until not */
807
808 mov $(K_CMD_LEDS),%al /* K_CMD_LEDS */
809 outb %al,$(K_RDWR) /* to kbd */
810
811 0: inb $(K_STATUS),%al /* get kbd status */
812 testb $(K_OBUF_FULL),%al /* output present? */
813 je 0b /* loop if not */
814
815 inb $(K_RDWR),%al /* read status (and discard) */
816
817 0: inb $(K_STATUS),%al /* get kbd status */
818 testb $(K_IBUF_FULL),%al /* input busy? */
819 jne 0b /* loop until not */
820
821 mov %cl,%al /* move led value */
822 outb %al,$(K_RDWR) /* to kbd */
823
824 movl $10000000,%ecx /* spin */
825 0: nop
826 nop
827 loop 0b /* a while */
828
829 ret