]> git.saurik.com Git - apple/xnu.git/blame - osfmk/i386/start.s
xnu-792.21.3.tar.gz
[apple/xnu.git] / osfmk / i386 / start.s
CommitLineData
1c79356b
A
1/*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
8f6c56a5 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
1c79356b 5 *
8f6c56a5
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
8ad349bb 24 * limitations under the License.
8f6c56a5
A
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
1c79356b
A
27 */
28/*
29 * @OSF_COPYRIGHT@
30 */
31/*
32 * Mach Operating System
33 * Copyright (c) 1991,1990 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56/*
57 */
58
59#include <platforms.h>
1c79356b
A
60#include <mach_kdb.h>
61
62#include <i386/asm.h>
63#include <i386/proc_reg.h>
91447636 64#include <i386/postcode.h>
1c79356b
A
65#include <assym.s>
66
1c79356b
A
67#define CX(addr,reg) addr(,reg,4)
68
55e303ae 69#include <i386/mp.h>
91447636 70#include <i386/mp_slave_boot.h>
1c79356b
A
71
72/*
73 * GAS won't handle an intersegment jump with a relocatable offset.
74 */
75#define LJMP(segment,address) \
76 .byte 0xea ;\
77 .long address ;\
78 .word segment
79
80
81
21362eb3
A
82#define KVTOPHYS (-KERNELBASE)
83#define KVTOLINEAR LINEAR_KERNELBASE
8f6c56a5 84
21362eb3
A
85
86#define PA(addr) ((addr)+KVTOPHYS)
87#define VA(addr) ((addr)-KVTOPHYS)
88
89 .data
90#if 0 /* Anyone need this? */
91 .align 2
92 .globl EXT(_kick_buffer_)
93EXT(_kick_buffer_):
94 .long 1
95 .long 3
96 .set .,.+16836
97#endif /* XXX */
1c79356b
A
98/*
99 * Interrupt and bootup stack for initial processor.
100 */
3a60a9f5 101 /* in the __HIB section since the hibernate restore code uses this stack. */
91447636 102 .section __HIB, __data
21362eb3 103 .align ALIGN
91447636 104
21362eb3
A
105 .globl EXT(intstack)
106EXT(intstack):
3a60a9f5
A
107 .globl EXT(gIOHibernateRestoreStack)
108EXT(gIOHibernateRestoreStack):
91447636 109
1c79356b 110 .set ., .+INTSTACK_SIZE
91447636 111
21362eb3
A
112 .globl EXT(eintstack)
113EXT(eintstack:)
3a60a9f5
A
114 .globl EXT(gIOHibernateRestoreStackEnd)
115EXT(gIOHibernateRestoreStackEnd):
1c79356b 116
91447636
A
117/*
118 * Pointers to GDT and IDT. These contain linear addresses.
119 */
120 .align ALIGN
121 .globl EXT(gdtptr)
122LEXT(gdtptr)
123 .word Times(8,GDTSZ)-1
21362eb3 124 .long EXT(gdt)
91447636
A
125
126 .align ALIGN
127 .globl EXT(idtptr)
128LEXT(idtptr)
129 .word Times(8,IDTSZ)-1
21362eb3 130 .long EXT(idt)
91447636 131
21362eb3 132 /* back to the regular __DATA section. */
91447636
A
133
134 .section __DATA, __data
1c79356b 135
1c79356b
A
136
137#if MACH_KDB
138/*
139 * Kernel debugger stack for each processor.
140 */
21362eb3 141 .align ALIGN
1c79356b
A
142 .globl EXT(db_stack_store)
143EXT(db_stack_store):
91447636 144 .set ., .+(INTSTACK_SIZE*MAX_CPUS)
1c79356b
A
145
146/*
147 * Stack for last-ditch debugger task for each processor.
148 */
21362eb3 149 .align ALIGN
1c79356b
A
150 .globl EXT(db_task_stack_store)
151EXT(db_task_stack_store):
91447636 152 .set ., .+(INTSTACK_SIZE*MAX_CPUS)
1c79356b
A
153
154/*
155 * per-processor kernel debugger stacks
156 */
157 .align ALIGN
158 .globl EXT(kgdb_stack_store)
159EXT(kgdb_stack_store):
91447636
A
160 .set ., .+(INTSTACK_SIZE*MAX_CPUS)
161#endif /* MACH_KDB */
1c79356b 162
1c79356b 163 .data
21362eb3
A
164 /*
165 * start_lock is very special. We initialize the
166 * lock at allocation time rather than at run-time.
167 * Although start_lock should be an instance of a
168 * hw_lock, we hand-code all manipulation of the lock
169 * because the hw_lock code may require function calls;
170 * and we'd rather not introduce another dependency on
171 * a working stack at this point.
172 */
173 .globl EXT(start_lock)
174EXT(start_lock):
175 .long 0 /* synchronizes processor startup */
176
177 .globl EXT(master_is_up)
178EXT(master_is_up):
179 .long 0 /* 1 when OK for other processors */
180 /* to start */
181 .globl EXT(mp_boot_pde)
182EXT(mp_boot_pde):
183 .long 0
184
185_KERNend: .long 0 /* phys addr end of kernel (just after bss) */
186physfree: .long 0 /* phys addr of next free page */
187
188 .globl _IdlePTD
189_IdlePTD: .long 0 /* phys addr of kernel PTD */
91447636 190#ifdef PAE
21362eb3
A
191 .globl _IdlePDPT
192_IdlePDPT: .long 0 /* phys addr of kernel PDPT */
91447636
A
193#endif
194
21362eb3
A
195 .globl _KPTphys
196
197_KPTphys: .long 0 /* phys addr of kernel page tables */
91447636
A
198
199
200/* Some handy macros */
201
21362eb3
A
202#define ALLOCPAGES(npages) \
203 movl PA(physfree), %esi ; \
204 movl $((npages) * PAGE_SIZE), %eax ; \
205 addl %esi, %eax ; \
206 movl %eax, PA(physfree) ; \
207 movl %esi, %edi ; \
208 movl $((npages) * PAGE_SIZE / 4),%ecx ; \
209 xorl %eax,%eax ; \
210 cld ; \
211 rep ; \
91447636
A
212 stosl
213
214/*
215 * fillkpt
216 * eax = page frame address
217 * ebx = index into page table
218 * ecx = how many pages to map
219 * base = base address of page dir/table
220 * prot = protection bits
221 */
222#define fillkpt(base, prot) \
21362eb3 223 shll $(PTEINDX),%ebx ; \
91447636
A
224 addl base,%ebx ; \
225 orl $(PTE_V) ,%eax ; \
226 orl prot,%eax ; \
2271: movl %eax,(%ebx) ; \
228 addl $(PAGE_SIZE),%eax ; /* increment physical address */ \
229 addl $(PTESIZE),%ebx ; /* next pte */ \
230 loop 1b
231
232/*
233 * fillkptphys(prot)
234 * eax = physical address
235 * ecx = how many pages to map
236 * prot = protection bits
237 */
238#define fillkptphys(prot) \
239 movl %eax, %ebx ; \
240 shrl $(PAGE_SHIFT), %ebx ; \
21362eb3 241 fillkpt(PA(EXT(KPTphys)), prot)
91447636 242
21362eb3 243
1c79356b 244/*
21362eb3 245 * All CPUs start here.
1c79356b
A
246 *
247 * Environment:
248 * protected mode, no paging, flat 32-bit address space.
249 * (Code/data/stack segments have base == 0, limit == 4G)
250 */
251 .text
252 .align ALIGN
21362eb3 253 .globl EXT(pstart)
1c79356b
A
254 .globl EXT(_start)
255LEXT(_start)
256LEXT(pstart)
21362eb3 257 mov %eax, %ebx /* save pointer to kernbootstruct */
91447636 258
21362eb3 259 POSTCODE(PSTART_ENTRY);
91447636 260
1c79356b
A
261 mov $0,%ax /* fs must be zeroed; */
262 mov %ax,%fs /* some bootstrappers don`t do this */
263 mov %ax,%gs
264
21362eb3
A
265 jmp 1f
2660: cmpl $0,PA(EXT(start_lock))
267 jne 0b
2681: movb $1,%eax
269 xchgl %eax,PA(EXT(start_lock)) /* locked */
270 testl %eax,%eax
271 jnz 0b
272
273 cmpl $0,PA(EXT(master_is_up)) /* are we first? */
274 jne EXT(slave_start) /* no -- system already up. */
275 movl $1,PA(EXT(master_is_up)) /* others become slaves */
276 jmp 3f
2773:
278
1c79356b
A
279/*
280 * Get startup parameters.
281 */
21362eb3
A
282
283 movl %ebx,PA(EXT(boot_args_start)) /* Save KERNBOOTSTRUCT */
284
91447636
A
285 movl KADDR(%ebx), %eax
286 addl KSIZE(%ebx), %eax
287 addl $(NBPG-1),%eax
288 andl $(-NBPG), %eax
21362eb3 289 movl %eax, PA(EXT(KERNend))
91447636
A
290 movl %eax, PA(physfree)
291 cld
1c79356b 292
91447636
A
293/* allocate kernel page table pages */
294 ALLOCPAGES(NKPT)
21362eb3 295 movl %esi,PA(EXT(KPTphys))
1c79356b 296
91447636
A
297#ifdef PAE
298/* allocate Page Table Directory Page */
299 ALLOCPAGES(1)
300 movl %esi,PA(EXT(IdlePDPT))
301#endif
1c79356b 302
91447636
A
303/* allocate kernel page directory page */
304 ALLOCPAGES(NPGPTD)
305 movl %esi,PA(EXT(IdlePTD))
1c79356b 306
91447636
A
307/* map from zero to end of kernel */
308 xorl %eax,%eax
309 movl PA(physfree),%ecx
310 shrl $(PAGE_SHIFT),%ecx
311 fillkptphys( $(PTE_W) )
312
313/* map page directory */
314#ifdef PAE
315 movl PA(EXT(IdlePDPT)), %eax
316 movl $1, %ecx
317 fillkptphys( $(PTE_W) )
318#endif
319 movl PA(EXT(IdlePTD)),%eax
320 movl $(NPGPTD), %ecx
321 fillkptphys( $(PTE_W) )
322
323/* install a pde for temp double map of bottom of VA */
21362eb3 324 movl PA(EXT(KPTphys)),%eax
91447636
A
325 xorl %ebx,%ebx
326 movl $(NKPT), %ecx
327 fillkpt(PA(EXT(IdlePTD)), $(PTE_W))
328
329/* install pde's for page tables */
21362eb3 330 movl PA(EXT(KPTphys)),%eax
91447636
A
331 movl $(KPTDI),%ebx
332 movl $(NKPT),%ecx
333 fillkpt(PA(EXT(IdlePTD)), $(PTE_W))
334
335/* install a pde recursively mapping page directory as a page table */
336 movl PA(EXT(IdlePTD)),%eax
337 movl $(PTDPTDI),%ebx
338 movl $(NPGPTD),%ecx
339 fillkpt(PA(EXT(IdlePTD)), $(PTE_W))
340
341#ifdef PAE
342 movl PA(EXT(IdlePTD)), %eax
343 xorl %ebx, %ebx
344 movl $(NPGPTD), %ecx
345 fillkpt(PA(EXT(IdlePDPT)), $0)
346#endif
1c79356b 347
91447636 348/* install a pde page for commpage use up in high memory */
1c79356b 349
91447636
A
350 movl PA(physfree),%eax /* grab next phys page */
351 movl %eax,%ebx
352 addl $(PAGE_SIZE),%ebx
353 movl %ebx,PA(physfree) /* show next free phys pg */
354 movl $(COMM_PAGE_BASE_ADDR),%ebx
355 shrl $(PDESHIFT),%ebx /* index into pde page */
356 movl $(1), %ecx /* # pdes to store */
357 fillkpt(PA(EXT(IdlePTD)), $(PTE_W|PTE_U)) /* user has access! */
1c79356b 358
91447636 359 movl PA(physfree),%edi
1c79356b
A
360 movl %edi,PA(EXT(first_avail)) /* save first available phys addr */
361
91447636 362#ifdef PAE
1c79356b 363/*
91447636
A
364 * We steal 0x4000 for a temp pdpt and 0x5000-0x8000
365 * for temp pde pages in the PAE case. Once we are
366 * running at the proper virtual address we switch to
367 * the PDPT/PDE's the master is using */
368
369 /* clear pdpt page to be safe */
370 xorl %eax, %eax
371 movl $(PAGE_SIZE),%ecx
372 movl $(0x4000),%edi
373 cld
374 rep
375 stosb
376
377 /* build temp pdpt */
378 movl $(0x5000), %eax
379 xorl %ebx, %ebx
380 movl $(NPGPTD), %ecx
381 fillkpt($(0x4000), $0)
382
383 /* copy the NPGPTD pages of pdes */
384 movl PA(EXT(IdlePTD)),%eax
385 movl $0x5000,%ebx
386 movl $((PTEMASK+1)*NPGPTD),%ecx
3871: movl 0(%eax),%edx
388 movl %edx,0(%ebx)
389 movl 4(%eax),%edx
390 movl %edx,4(%ebx)
391 addl $(PTESIZE),%eax
392 addl $(PTESIZE),%ebx
393 loop 1b
394#else
395/* create temp pde for slaves to use
396 use unused lomem page and copy in IdlePTD */
397 movl PA(EXT(IdlePTD)),%eax
398 movl $0x4000,%ebx
399 movl $(PTEMASK+1),%ecx
4001: movl 0(%eax),%edx
401 movl %edx,0(%ebx)
402 addl $(PTESIZE),%eax
403 addl $(PTESIZE),%ebx
404 loop 1b
405#endif
406
21362eb3 407 POSTCODE(PSTART_PAGE_TABLES);
1c79356b
A
408
409/*
410 * Fix initial descriptor tables.
411 */
21362eb3 412 lea PA(EXT(idt)),%esi /* fix IDT */
1c79356b
A
413 movl $(IDTSZ),%ecx
414 movl $(PA(fix_idt_ret)),%ebx
415 jmp fix_desc_common /* (cannot use stack) */
416fix_idt_ret:
417
21362eb3 418 lea PA(EXT(gdt)),%esi /* fix GDT */
1c79356b
A
419 movl $(GDTSZ),%ecx
420 movl $(PA(fix_gdt_ret)),%ebx
421 jmp fix_desc_common /* (cannot use stack) */
422fix_gdt_ret:
423
21362eb3 424 lea PA(EXT(ldt)),%esi /* fix LDT */
1c79356b
A
425 movl $(LDTSZ),%ecx
426 movl $(PA(fix_ldt_ret)),%ebx
427 jmp fix_desc_common /* (cannot use stack) */
428fix_ldt_ret:
429
430/*
91447636 431 *
1c79356b 432 */
1c79356b 433
91447636
A
434 lgdt PA(EXT(gdtptr)) /* load GDT */
435 lidt PA(EXT(idtptr)) /* load IDT */
1c79356b 436
21362eb3 437 POSTCODE(PSTART_BEFORE_PAGING);
1c79356b
A
438
439/*
91447636 440 * Turn on paging.
1c79356b 441 */
91447636
A
442#ifdef PAE
443 movl PA(EXT(IdlePDPT)), %eax
444 movl %eax, %cr3
445
446 movl %cr4, %eax
21362eb3 447 orl $(CR4_PAE), %eax
91447636
A
448 movl %eax, %cr4
449#else
450 movl PA(EXT(IdlePTD)), %eax
451 movl %eax,%cr3
452#endif
453
454 movl %cr0,%eax
455 orl $(CR0_PG|CR0_WP|CR0_PE),%eax
456 movl %eax,%cr0 /* to enable paging */
457
1c79356b
A
458 LJMP(KERNEL_CS,EXT(vstart)) /* switch to kernel code segment */
459
460/*
21362eb3 461 * Master is now running with correct addresses.
1c79356b
A
462 */
463LEXT(vstart)
91447636
A
464 POSTCODE(VSTART_ENTRY) ;
465
1c79356b
A
466 mov $(KERNEL_DS),%ax /* set kernel data segment */
467 mov %ax,%ds
468 mov %ax,%es
469 mov %ax,%ss
21362eb3 470 mov %ax,EXT(ktss)+TSS_SS0 /* set kernel stack segment */
1c79356b
A
471 /* for traps to kernel */
472#if MACH_KDB
21362eb3 473 mov %ax,EXT(dbtss)+TSS_SS0 /* likewise for debug task switch */
1c79356b 474 mov %cr3,%eax /* get PDBR into debug TSS */
21362eb3 475 mov %eax,EXT(dbtss)+TSS_PDBR
1c79356b
A
476 mov $0,%eax
477#endif
478
479 movw $(KERNEL_LDT),%ax /* get LDT segment */
480 lldt %ax /* load LDT */
481#if MACH_KDB
21362eb3
A
482 mov %ax,EXT(ktss)+TSS_LDT /* store LDT in two TSS, as well... */
483 mov %ax,EXT(dbtss)+TSS_LDT /* ...matters if we switch tasks */
1c79356b
A
484#endif
485 movw $(KERNEL_TSS),%ax
486 ltr %ax /* set up KTSS */
487
91447636 488 mov $(CPU_DATA_GS),%ax
1c79356b
A
489 mov %ax,%gs
490
21362eb3 491 POSTCODE(VSTART_STACK_SWITCH);
91447636 492
21362eb3
A
493 lea EXT(eintstack),%esp /* switch to the bootup stack */
494 call EXT(i386_preinit)
91447636 495
21362eb3 496 POSTCODE(VSTART_EXIT);
91447636 497
55e303ae 498 call EXT(i386_init) /* run C code */
1c79356b
A
499 /*NOTREACHED*/
500 hlt
501
21362eb3
A
502 .text
503 .globl __start
504 .set __start, PA(EXT(pstart))
8f6c56a5 505
21362eb3 506
8f6c56a5 507/*
21362eb3
A
508 * master_up is used by the master cpu to signify that it is done
509 * with the interrupt stack, etc. See the code in pstart and svstart
510 * that this interlocks with.
1c79356b
A
511 */
512 .align ALIGN
21362eb3
A
513 .globl EXT(master_up)
514LEXT(master_up)
515 pushl %ebp /* set up */
516 movl %esp,%ebp /* stack frame */
517 movl $0,%ecx /* unlock start_lock */
518 xchgl %ecx,EXT(start_lock) /* since we are no longer using */
519 /* bootstrap stack */
520 leave /* pop stack frame */
521 ret
522
523/*
524 * We aren't the first. Call slave_main to initialize the processor
525 * and get Mach going on it.
526 */
527 .align ALIGN
528 .globl EXT(slave_start)
529LEXT(slave_start)
1c79356b
A
530 cli /* disable interrupts, so we don`t */
531 /* need IDT for a while */
91447636 532
21362eb3 533 POSTCODE(SLAVE_START_ENTRY);
91447636
A
534/*
535 * Turn on paging.
536 */
21362eb3
A
537 movl $(EXT(spag_start)),%edx /* first paged code address */
538
91447636 539#ifdef PAE
21362eb3
A
540 movl $(0x4000), %eax
541 movl %eax, %cr3
542
91447636 543 movl %cr4, %eax
21362eb3 544 orl $(CR4_PAE), %eax
91447636 545 movl %eax, %cr4
21362eb3 546#else
91447636
A
547 movl $(0x4000),%eax /* tmp until we get mapped */
548 movl %eax,%cr3
21362eb3 549#endif
1c79356b
A
550
551 movl %cr0,%eax
91447636 552 orl $(CR0_PG|CR0_WP|CR0_PE),%eax
1c79356b
A
553 movl %eax,%cr0 /* to enable paging */
554
21362eb3 555 POSTCODE(SLAVE_START_EXIT);
91447636
A
556
557 jmp *%edx /* flush prefetch queue */
1c79356b
A
558
559/*
560 * We are now paging, and can run with correct addresses.
561 */
562LEXT(spag_start)
563
91447636
A
564 lgdt PA(EXT(gdtptr)) /* load GDT */
565 lidt PA(EXT(idtptr)) /* load IDT */
566
21362eb3 567 LJMP(KERNEL_CS,EXT(svstart)) /* switch to kernel code segment */
1c79356b 568
91447636 569
1c79356b
A
570/*
571 * Slave is now running with correct addresses.
572 */
21362eb3 573LEXT(svstart)
91447636 574
21362eb3 575 POSTCODE(SVSTART_ENTRY);
91447636
A
576
577#ifdef PAE
578 movl PA(EXT(IdlePDPT)), %eax
579 movl %eax, %cr3
580#else
581 movl PA(EXT(IdlePTD)), %eax
582 movl %eax, %cr3
583#endif
584
1c79356b
A
585 mov $(KERNEL_DS),%ax /* set kernel data segment */
586 mov %ax,%ds
587 mov %ax,%es
588 mov %ax,%ss
589
91447636
A
590 /*
591 * We're not quite through with the boot stack
592 * but we need to reset the stack pointer to the correct virtual
593 * address.
594 * And we need to offset above the address of pstart.
595 */
596 movl $(VA(MP_BOOTSTACK+MP_BOOT+4)), %esp
1c79356b
A
597
598/*
91447636 599 * Switch to the per-cpu descriptor tables
1c79356b 600 */
21362eb3 601 POSTCODE(SVSTART_DESC_INIT);
91447636
A
602
603 CPU_NUMBER_FROM_LAPIC(%eax)
604 movl CX(EXT(cpu_data_ptr),%eax),%ecx
21362eb3 605 movl CPU_DESC_TABLEP(%ecx), %ecx
1c79356b 606
1c79356b 607 movw $(GDTSZ*8-1),0(%esp) /* set GDT size in GDT descriptor */
21362eb3 608 leal MP_GDT(%ecx),%edx
91447636 609 movl %edx,2(%esp) /* point to local GDT (linear addr) */
1c79356b
A
610 lgdt 0(%esp) /* load new GDT */
611
612 movw $(IDTSZ*8-1),0(%esp) /* set IDT size in IDT descriptor */
21362eb3 613 leal MP_IDT(%ecx),%edx
91447636 614 movl %edx,2(%esp) /* point to local IDT (linear addr) */
1c79356b
A
615 lidt 0(%esp) /* load new IDT */
616
55e303ae
A
617 movw $(KERNEL_LDT),%ax /* get LDT segment */
618 lldt %ax /* load LDT */
619
1c79356b
A
620 movw $(KERNEL_TSS),%ax
621 ltr %ax /* load new KTSS */
622
91447636 623 mov $(CPU_DATA_GS),%ax
1c79356b
A
624 mov %ax,%gs
625
91447636
A
626/*
627 * Get stack top from pre-cpu data and switch
628 */
21362eb3 629 POSTCODE(SVSTART_STACK_SWITCH);
91447636
A
630
631 movl %gs:CPU_INT_STACK_TOP,%esp
632 xorl %ebp,%ebp /* for completeness */
633
21362eb3
A
634 movl $0,%eax /* unlock start_lock */
635 xchgl %eax,EXT(start_lock) /* since we are no longer using */
636 /* bootstrap stack */
637 POSTCODE(SVSTART_EXIT);
91447636
A
638
639 call EXT(i386_init_slave) /* start MACH */
1c79356b
A
640 /*NOTREACHED*/
641 hlt
1c79356b
A
642
643/*
644 * Convert a descriptor from fake to real format.
645 *
646 * Calls from assembly code:
647 * %ebx = return address (physical) CANNOT USE STACK
648 * %esi = descriptor table address (physical)
649 * %ecx = number of descriptors
650 *
651 * Calls from C:
652 * 0(%esp) = return address
653 * 4(%esp) = descriptor table address (physical)
654 * 8(%esp) = number of descriptors
655 *
656 * Fake descriptor format:
657 * bytes 0..3 base 31..0
658 * bytes 4..5 limit 15..0
659 * byte 6 access byte 2 | limit 19..16
660 * byte 7 access byte 1
661 *
662 * Real descriptor format:
663 * bytes 0..1 limit 15..0
664 * bytes 2..3 base 15..0
665 * byte 4 base 23..16
666 * byte 5 access byte 1
667 * byte 6 access byte 2 | limit 19..16
668 * byte 7 base 31..24
669 *
670 * Fake gate format:
671 * bytes 0..3 offset
672 * bytes 4..5 selector
673 * byte 6 word count << 4 (to match fake descriptor)
674 * byte 7 access byte 1
675 *
676 * Real gate format:
677 * bytes 0..1 offset 15..0
678 * bytes 2..3 selector
679 * byte 4 word count
680 * byte 5 access byte 1
681 * bytes 6..7 offset 31..16
682 */
683 .globl EXT(fix_desc)
684LEXT(fix_desc)
685 pushl %ebp /* set up */
686 movl %esp,%ebp /* stack frame */
687 pushl %esi /* save registers */
688 pushl %ebx
689 movl B_ARG0,%esi /* point to first descriptor */
690 movl B_ARG1,%ecx /* get number of descriptors */
691 lea 0f,%ebx /* get return address */
692 jmp fix_desc_common /* call internal routine */
6930: popl %ebx /* restore registers */
694 popl %esi
695 leave /* pop stack frame */
696 ret /* return */
697
698fix_desc_common:
6990:
700 movw 6(%esi),%dx /* get access byte */
701 movb %dh,%al
702 andb $0x14,%al
703 cmpb $0x04,%al /* gate or descriptor? */
704 je 1f
705
706/* descriptor */
707 movl 0(%esi),%eax /* get base in eax */
708 rol $16,%eax /* swap 15..0 with 31..16 */
709 /* (15..0 in correct place) */
710 movb %al,%dl /* combine bits 23..16 with ACC1 */
711 /* in dh/dl */
712 movb %ah,7(%esi) /* store bits 31..24 in correct place */
713 movw 4(%esi),%ax /* move limit bits 0..15 to word 0 */
714 movl %eax,0(%esi) /* store (bytes 0..3 correct) */
715 movw %dx,4(%esi) /* store bytes 4..5 */
716 jmp 2f
717
718/* gate */
7191:
720 movw 4(%esi),%ax /* get selector */
721 shrb $4,%dl /* shift word count to proper place */
722 movw %dx,4(%esi) /* store word count / ACC1 */
723 movw 2(%esi),%dx /* get offset 16..31 */
724 movw %dx,6(%esi) /* store in correct place */
725 movw %ax,2(%esi) /* store selector in correct place */
7262:
727 addl $8,%esi /* bump to next descriptor */
728 loop 0b /* repeat */
729 jmp *%ebx /* all done */
730
731/*
732 * put arg in kbd leds and spin a while
733 * eats eax, ecx, edx
734 */
735#define K_RDWR 0x60
736#define K_CMD_LEDS 0xed
737#define K_STATUS 0x64
738#define K_IBUF_FULL 0x02 /* input (to kbd) buffer full */
739#define K_OBUF_FULL 0x01 /* output (from kbd) buffer full */
740
741ENTRY(set_kbd_leds)
742 mov S_ARG0,%cl /* save led value */
743
7440: inb $(K_STATUS),%al /* get kbd status */
745 testb $(K_IBUF_FULL),%al /* input busy? */
746 jne 0b /* loop until not */
747
748 mov $(K_CMD_LEDS),%al /* K_CMD_LEDS */
749 outb %al,$(K_RDWR) /* to kbd */
750
7510: inb $(K_STATUS),%al /* get kbd status */
752 testb $(K_OBUF_FULL),%al /* output present? */
753 je 0b /* loop if not */
754
755 inb $(K_RDWR),%al /* read status (and discard) */
756
7570: inb $(K_STATUS),%al /* get kbd status */
758 testb $(K_IBUF_FULL),%al /* input busy? */
759 jne 0b /* loop until not */
760
761 mov %cl,%al /* move led value */
762 outb %al,$(K_RDWR) /* to kbd */
763
764 movl $10000000,%ecx /* spin */
7650: nop
766 nop
767 loop 0b /* a while */
768
769 ret