]> git.saurik.com Git - apple/xnu.git/blob - osfmk/i386/start.s
xnu-792.12.6.tar.gz
[apple/xnu.git] / osfmk / i386 / start.s
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_OSREFERENCE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the
10 * License may not be used to create, or enable the creation or
11 * redistribution of, unlawful or unlicensed copies of an Apple operating
12 * system, or to circumvent, violate, or enable the circumvention or
13 * violation of, any terms of an Apple operating system software license
14 * agreement.
15 *
16 * Please obtain a copy of the License at
17 * http://www.opensource.apple.com/apsl/ and read it before using this
18 * file.
19 *
20 * The Original Code and all software distributed under the License are
21 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
22 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
23 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
25 * Please see the License for the specific language governing rights and
26 * limitations under the License.
27 *
28 * @APPLE_LICENSE_OSREFERENCE_HEADER_END@
29 */
30 /*
31 * @OSF_COPYRIGHT@
32 */
33 /*
34 * Mach Operating System
35 * Copyright (c) 1991,1990 Carnegie Mellon University
36 * All Rights Reserved.
37 *
38 * Permission to use, copy, modify and distribute this software and its
39 * documentation is hereby granted, provided that both the copyright
40 * notice and this permission notice appear in all copies of the
41 * software, derivative works or modified versions, and any portions
42 * thereof, and that both notices appear in supporting documentation.
43 *
44 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
45 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
46 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
47 *
48 * Carnegie Mellon requests users of this software to return to
49 *
50 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
51 * School of Computer Science
52 * Carnegie Mellon University
53 * Pittsburgh PA 15213-3890
54 *
55 * any improvements or extensions that they make and grant Carnegie Mellon
56 * the rights to redistribute these changes.
57 */
58 /*
59 */
60
61 #include <platforms.h>
62 #include <mach_kdb.h>
63
64 #include <i386/asm.h>
65 #include <i386/proc_reg.h>
66 #include <i386/postcode.h>
67 #include <assym.s>
68
69 #define CX(addr,reg) addr(,reg,4)
70
71 #include <i386/mp.h>
72 #include <i386/mp_slave_boot.h>
73
74 /*
75 * GAS won't handle an intersegment jump with a relocatable offset.
76 */
77 #define LJMP(segment,address) \
78 .byte 0xea ;\
79 .long address ;\
80 .word segment
81
82
83
84 #define KVTOPHYS (-KERNELBASE)
85 #define KVTOLINEAR LINEAR_KERNELBASE
86
87
88 #define PA(addr) ((addr)+KVTOPHYS)
89 #define VA(addr) ((addr)-KVTOPHYS)
90
91 .data
92 #if 0 /* Anyone need this? */
93 .align 2
94 .globl EXT(_kick_buffer_)
95 EXT(_kick_buffer_):
96 .long 1
97 .long 3
98 .set .,.+16836
99 #endif /* XXX */
100 /*
101 * Interrupt and bootup stack for initial processor.
102 */
103 /* in the __HIB section since the hibernate restore code uses this stack. */
104 .section __HIB, __data
105 .align ALIGN
106
107 .globl EXT(intstack)
108 EXT(intstack):
109 .globl EXT(gIOHibernateRestoreStack)
110 EXT(gIOHibernateRestoreStack):
111
112 .set ., .+INTSTACK_SIZE
113
114 .globl EXT(eintstack)
115 EXT(eintstack:)
116 .globl EXT(gIOHibernateRestoreStackEnd)
117 EXT(gIOHibernateRestoreStackEnd):
118
119 /*
120 * Pointers to GDT and IDT. These contain linear addresses.
121 */
122 .align ALIGN
123 .globl EXT(gdtptr)
124 LEXT(gdtptr)
125 .word Times(8,GDTSZ)-1
126 .long EXT(gdt)
127
128 .align ALIGN
129 .globl EXT(idtptr)
130 LEXT(idtptr)
131 .word Times(8,IDTSZ)-1
132 .long EXT(idt)
133
134 /* back to the regular __DATA section. */
135
136 .section __DATA, __data
137
138
139 #if MACH_KDB
140 /*
141 * Kernel debugger stack for each processor.
142 */
143 .align ALIGN
144 .globl EXT(db_stack_store)
145 EXT(db_stack_store):
146 .set ., .+(INTSTACK_SIZE*MAX_CPUS)
147
148 /*
149 * Stack for last-ditch debugger task for each processor.
150 */
151 .align ALIGN
152 .globl EXT(db_task_stack_store)
153 EXT(db_task_stack_store):
154 .set ., .+(INTSTACK_SIZE*MAX_CPUS)
155
156 /*
157 * per-processor kernel debugger stacks
158 */
159 .align ALIGN
160 .globl EXT(kgdb_stack_store)
161 EXT(kgdb_stack_store):
162 .set ., .+(INTSTACK_SIZE*MAX_CPUS)
163 #endif /* MACH_KDB */
164
165 .data
166 /*
167 * start_lock is very special. We initialize the
168 * lock at allocation time rather than at run-time.
169 * Although start_lock should be an instance of a
170 * hw_lock, we hand-code all manipulation of the lock
171 * because the hw_lock code may require function calls;
172 * and we'd rather not introduce another dependency on
173 * a working stack at this point.
174 */
175 .globl EXT(start_lock)
176 EXT(start_lock):
177 .long 0 /* synchronizes processor startup */
178
179 .globl EXT(master_is_up)
180 EXT(master_is_up):
181 .long 0 /* 1 when OK for other processors */
182 /* to start */
183 .globl EXT(mp_boot_pde)
184 EXT(mp_boot_pde):
185 .long 0
186
187 _KERNend: .long 0 /* phys addr end of kernel (just after bss) */
188 physfree: .long 0 /* phys addr of next free page */
189
190 .globl _IdlePTD
191 _IdlePTD: .long 0 /* phys addr of kernel PTD */
192 #ifdef PAE
193 .globl _IdlePDPT
194 _IdlePDPT: .long 0 /* phys addr of kernel PDPT */
195 #endif
196
197 .globl _KPTphys
198
199 _KPTphys: .long 0 /* phys addr of kernel page tables */
200
201
202 /* Some handy macros */
203
204 #define ALLOCPAGES(npages) \
205 movl PA(physfree), %esi ; \
206 movl $((npages) * PAGE_SIZE), %eax ; \
207 addl %esi, %eax ; \
208 movl %eax, PA(physfree) ; \
209 movl %esi, %edi ; \
210 movl $((npages) * PAGE_SIZE / 4),%ecx ; \
211 xorl %eax,%eax ; \
212 cld ; \
213 rep ; \
214 stosl
215
216 /*
217 * fillkpt
218 * eax = page frame address
219 * ebx = index into page table
220 * ecx = how many pages to map
221 * base = base address of page dir/table
222 * prot = protection bits
223 */
224 #define fillkpt(base, prot) \
225 shll $(PTEINDX),%ebx ; \
226 addl base,%ebx ; \
227 orl $(PTE_V) ,%eax ; \
228 orl prot,%eax ; \
229 1: movl %eax,(%ebx) ; \
230 addl $(PAGE_SIZE),%eax ; /* increment physical address */ \
231 addl $(PTESIZE),%ebx ; /* next pte */ \
232 loop 1b
233
234 /*
235 * fillkptphys(prot)
236 * eax = physical address
237 * ecx = how many pages to map
238 * prot = protection bits
239 */
240 #define fillkptphys(prot) \
241 movl %eax, %ebx ; \
242 shrl $(PAGE_SHIFT), %ebx ; \
243 fillkpt(PA(EXT(KPTphys)), prot)
244
245
246 /*
247 * All CPUs start here.
248 *
249 * Environment:
250 * protected mode, no paging, flat 32-bit address space.
251 * (Code/data/stack segments have base == 0, limit == 4G)
252 */
253 .text
254 .align ALIGN
255 .globl EXT(pstart)
256 .globl EXT(_start)
257 LEXT(_start)
258 LEXT(pstart)
259 mov %eax, %ebx /* save pointer to kernbootstruct */
260
261 POSTCODE(PSTART_ENTRY);
262
263 mov $0,%ax /* fs must be zeroed; */
264 mov %ax,%fs /* some bootstrappers don`t do this */
265 mov %ax,%gs
266
267 jmp 1f
268 0: cmpl $0,PA(EXT(start_lock))
269 jne 0b
270 1: movb $1,%eax
271 xchgl %eax,PA(EXT(start_lock)) /* locked */
272 testl %eax,%eax
273 jnz 0b
274
275 cmpl $0,PA(EXT(master_is_up)) /* are we first? */
276 jne EXT(slave_start) /* no -- system already up. */
277 movl $1,PA(EXT(master_is_up)) /* others become slaves */
278 jmp 3f
279 3:
280
281 /*
282 * Get startup parameters.
283 */
284
285 movl %ebx,PA(EXT(boot_args_start)) /* Save KERNBOOTSTRUCT */
286
287 movl KADDR(%ebx), %eax
288 addl KSIZE(%ebx), %eax
289 addl $(NBPG-1),%eax
290 andl $(-NBPG), %eax
291 movl %eax, PA(EXT(KERNend))
292 movl %eax, PA(physfree)
293 cld
294
295 /* allocate kernel page table pages */
296 ALLOCPAGES(NKPT)
297 movl %esi,PA(EXT(KPTphys))
298
299 #ifdef PAE
300 /* allocate Page Table Directory Page */
301 ALLOCPAGES(1)
302 movl %esi,PA(EXT(IdlePDPT))
303 #endif
304
305 /* allocate kernel page directory page */
306 ALLOCPAGES(NPGPTD)
307 movl %esi,PA(EXT(IdlePTD))
308
309 /* map from zero to end of kernel */
310 xorl %eax,%eax
311 movl PA(physfree),%ecx
312 shrl $(PAGE_SHIFT),%ecx
313 fillkptphys( $(PTE_W) )
314
315 /* map page directory */
316 #ifdef PAE
317 movl PA(EXT(IdlePDPT)), %eax
318 movl $1, %ecx
319 fillkptphys( $(PTE_W) )
320 #endif
321 movl PA(EXT(IdlePTD)),%eax
322 movl $(NPGPTD), %ecx
323 fillkptphys( $(PTE_W) )
324
325 /* install a pde for temp double map of bottom of VA */
326 movl PA(EXT(KPTphys)),%eax
327 xorl %ebx,%ebx
328 movl $(NKPT), %ecx
329 fillkpt(PA(EXT(IdlePTD)), $(PTE_W))
330
331 /* install pde's for page tables */
332 movl PA(EXT(KPTphys)),%eax
333 movl $(KPTDI),%ebx
334 movl $(NKPT),%ecx
335 fillkpt(PA(EXT(IdlePTD)), $(PTE_W))
336
337 /* install a pde recursively mapping page directory as a page table */
338 movl PA(EXT(IdlePTD)),%eax
339 movl $(PTDPTDI),%ebx
340 movl $(NPGPTD),%ecx
341 fillkpt(PA(EXT(IdlePTD)), $(PTE_W))
342
343 #ifdef PAE
344 movl PA(EXT(IdlePTD)), %eax
345 xorl %ebx, %ebx
346 movl $(NPGPTD), %ecx
347 fillkpt(PA(EXT(IdlePDPT)), $0)
348 #endif
349
350 /* install a pde page for commpage use up in high memory */
351
352 movl PA(physfree),%eax /* grab next phys page */
353 movl %eax,%ebx
354 addl $(PAGE_SIZE),%ebx
355 movl %ebx,PA(physfree) /* show next free phys pg */
356 movl $(COMM_PAGE_BASE_ADDR),%ebx
357 shrl $(PDESHIFT),%ebx /* index into pde page */
358 movl $(1), %ecx /* # pdes to store */
359 fillkpt(PA(EXT(IdlePTD)), $(PTE_W|PTE_U)) /* user has access! */
360
361 movl PA(physfree),%edi
362 movl %edi,PA(EXT(first_avail)) /* save first available phys addr */
363
364 #ifdef PAE
365 /*
366 * We steal 0x4000 for a temp pdpt and 0x5000-0x8000
367 * for temp pde pages in the PAE case. Once we are
368 * running at the proper virtual address we switch to
369 * the PDPT/PDE's the master is using */
370
371 /* clear pdpt page to be safe */
372 xorl %eax, %eax
373 movl $(PAGE_SIZE),%ecx
374 movl $(0x4000),%edi
375 cld
376 rep
377 stosb
378
379 /* build temp pdpt */
380 movl $(0x5000), %eax
381 xorl %ebx, %ebx
382 movl $(NPGPTD), %ecx
383 fillkpt($(0x4000), $0)
384
385 /* copy the NPGPTD pages of pdes */
386 movl PA(EXT(IdlePTD)),%eax
387 movl $0x5000,%ebx
388 movl $((PTEMASK+1)*NPGPTD),%ecx
389 1: movl 0(%eax),%edx
390 movl %edx,0(%ebx)
391 movl 4(%eax),%edx
392 movl %edx,4(%ebx)
393 addl $(PTESIZE),%eax
394 addl $(PTESIZE),%ebx
395 loop 1b
396 #else
397 /* create temp pde for slaves to use
398 use unused lomem page and copy in IdlePTD */
399 movl PA(EXT(IdlePTD)),%eax
400 movl $0x4000,%ebx
401 movl $(PTEMASK+1),%ecx
402 1: movl 0(%eax),%edx
403 movl %edx,0(%ebx)
404 addl $(PTESIZE),%eax
405 addl $(PTESIZE),%ebx
406 loop 1b
407 #endif
408
409 POSTCODE(PSTART_PAGE_TABLES);
410
411 /*
412 * Fix initial descriptor tables.
413 */
414 lea PA(EXT(idt)),%esi /* fix IDT */
415 movl $(IDTSZ),%ecx
416 movl $(PA(fix_idt_ret)),%ebx
417 jmp fix_desc_common /* (cannot use stack) */
418 fix_idt_ret:
419
420 lea PA(EXT(gdt)),%esi /* fix GDT */
421 movl $(GDTSZ),%ecx
422 movl $(PA(fix_gdt_ret)),%ebx
423 jmp fix_desc_common /* (cannot use stack) */
424 fix_gdt_ret:
425
426 lea PA(EXT(ldt)),%esi /* fix LDT */
427 movl $(LDTSZ),%ecx
428 movl $(PA(fix_ldt_ret)),%ebx
429 jmp fix_desc_common /* (cannot use stack) */
430 fix_ldt_ret:
431
432 /*
433 *
434 */
435
436 lgdt PA(EXT(gdtptr)) /* load GDT */
437 lidt PA(EXT(idtptr)) /* load IDT */
438
439 POSTCODE(PSTART_BEFORE_PAGING);
440
441 /*
442 * Turn on paging.
443 */
444 #ifdef PAE
445 movl PA(EXT(IdlePDPT)), %eax
446 movl %eax, %cr3
447
448 movl %cr4, %eax
449 orl $(CR4_PAE), %eax
450 movl %eax, %cr4
451 #else
452 movl PA(EXT(IdlePTD)), %eax
453 movl %eax,%cr3
454 #endif
455
456 movl %cr0,%eax
457 orl $(CR0_PG|CR0_WP|CR0_PE),%eax
458 movl %eax,%cr0 /* to enable paging */
459
460 LJMP(KERNEL_CS,EXT(vstart)) /* switch to kernel code segment */
461
462 /*
463 * Master is now running with correct addresses.
464 */
465 LEXT(vstart)
466 POSTCODE(VSTART_ENTRY) ;
467
468 mov $(KERNEL_DS),%ax /* set kernel data segment */
469 mov %ax,%ds
470 mov %ax,%es
471 mov %ax,%ss
472 mov %ax,EXT(ktss)+TSS_SS0 /* set kernel stack segment */
473 /* for traps to kernel */
474 #if MACH_KDB
475 mov %ax,EXT(dbtss)+TSS_SS0 /* likewise for debug task switch */
476 mov %cr3,%eax /* get PDBR into debug TSS */
477 mov %eax,EXT(dbtss)+TSS_PDBR
478 mov $0,%eax
479 #endif
480
481 movw $(KERNEL_LDT),%ax /* get LDT segment */
482 lldt %ax /* load LDT */
483 #if MACH_KDB
484 mov %ax,EXT(ktss)+TSS_LDT /* store LDT in two TSS, as well... */
485 mov %ax,EXT(dbtss)+TSS_LDT /* ...matters if we switch tasks */
486 #endif
487 movw $(KERNEL_TSS),%ax
488 ltr %ax /* set up KTSS */
489
490 mov $(CPU_DATA_GS),%ax
491 mov %ax,%gs
492
493 POSTCODE(VSTART_STACK_SWITCH);
494
495 lea EXT(eintstack),%esp /* switch to the bootup stack */
496 call EXT(i386_preinit)
497
498 POSTCODE(VSTART_EXIT);
499
500 call EXT(i386_init) /* run C code */
501 /*NOTREACHED*/
502 hlt
503
504 .text
505 .globl __start
506 .set __start, PA(EXT(pstart))
507
508
509 /*
510 * master_up is used by the master cpu to signify that it is done
511 * with the interrupt stack, etc. See the code in pstart and svstart
512 * that this interlocks with.
513 */
514 .align ALIGN
515 .globl EXT(master_up)
516 LEXT(master_up)
517 pushl %ebp /* set up */
518 movl %esp,%ebp /* stack frame */
519 movl $0,%ecx /* unlock start_lock */
520 xchgl %ecx,EXT(start_lock) /* since we are no longer using */
521 /* bootstrap stack */
522 leave /* pop stack frame */
523 ret
524
525 /*
526 * We aren't the first. Call slave_main to initialize the processor
527 * and get Mach going on it.
528 */
529 .align ALIGN
530 .globl EXT(slave_start)
531 LEXT(slave_start)
532 cli /* disable interrupts, so we don`t */
533 /* need IDT for a while */
534
535 POSTCODE(SLAVE_START_ENTRY);
536 /*
537 * Turn on paging.
538 */
539 movl $(EXT(spag_start)),%edx /* first paged code address */
540
541 #ifdef PAE
542 movl $(0x4000), %eax
543 movl %eax, %cr3
544
545 movl %cr4, %eax
546 orl $(CR4_PAE), %eax
547 movl %eax, %cr4
548 #else
549 movl $(0x4000),%eax /* tmp until we get mapped */
550 movl %eax,%cr3
551 #endif
552
553 movl %cr0,%eax
554 orl $(CR0_PG|CR0_WP|CR0_PE),%eax
555 movl %eax,%cr0 /* to enable paging */
556
557 POSTCODE(SLAVE_START_EXIT);
558
559 jmp *%edx /* flush prefetch queue */
560
561 /*
562 * We are now paging, and can run with correct addresses.
563 */
564 LEXT(spag_start)
565
566 lgdt PA(EXT(gdtptr)) /* load GDT */
567 lidt PA(EXT(idtptr)) /* load IDT */
568
569 LJMP(KERNEL_CS,EXT(svstart)) /* switch to kernel code segment */
570
571
572 /*
573 * Slave is now running with correct addresses.
574 */
575 LEXT(svstart)
576
577 POSTCODE(SVSTART_ENTRY);
578
579 #ifdef PAE
580 movl PA(EXT(IdlePDPT)), %eax
581 movl %eax, %cr3
582 #else
583 movl PA(EXT(IdlePTD)), %eax
584 movl %eax, %cr3
585 #endif
586
587 mov $(KERNEL_DS),%ax /* set kernel data segment */
588 mov %ax,%ds
589 mov %ax,%es
590 mov %ax,%ss
591
592 /*
593 * We're not quite through with the boot stack
594 * but we need to reset the stack pointer to the correct virtual
595 * address.
596 * And we need to offset above the address of pstart.
597 */
598 movl $(VA(MP_BOOTSTACK+MP_BOOT+4)), %esp
599
600 /*
601 * Switch to the per-cpu descriptor tables
602 */
603 POSTCODE(SVSTART_DESC_INIT);
604
605 CPU_NUMBER_FROM_LAPIC(%eax)
606 movl CX(EXT(cpu_data_ptr),%eax),%ecx
607 movl CPU_DESC_TABLEP(%ecx), %ecx
608
609 movw $(GDTSZ*8-1),0(%esp) /* set GDT size in GDT descriptor */
610 leal MP_GDT(%ecx),%edx
611 movl %edx,2(%esp) /* point to local GDT (linear addr) */
612 lgdt 0(%esp) /* load new GDT */
613
614 movw $(IDTSZ*8-1),0(%esp) /* set IDT size in IDT descriptor */
615 leal MP_IDT(%ecx),%edx
616 movl %edx,2(%esp) /* point to local IDT (linear addr) */
617 lidt 0(%esp) /* load new IDT */
618
619 movw $(KERNEL_LDT),%ax /* get LDT segment */
620 lldt %ax /* load LDT */
621
622 movw $(KERNEL_TSS),%ax
623 ltr %ax /* load new KTSS */
624
625 mov $(CPU_DATA_GS),%ax
626 mov %ax,%gs
627
628 /*
629 * Get stack top from pre-cpu data and switch
630 */
631 POSTCODE(SVSTART_STACK_SWITCH);
632
633 movl %gs:CPU_INT_STACK_TOP,%esp
634 xorl %ebp,%ebp /* for completeness */
635
636 movl $0,%eax /* unlock start_lock */
637 xchgl %eax,EXT(start_lock) /* since we are no longer using */
638 /* bootstrap stack */
639 POSTCODE(SVSTART_EXIT);
640
641 call EXT(i386_init_slave) /* start MACH */
642 /*NOTREACHED*/
643 hlt
644
645 /*
646 * Convert a descriptor from fake to real format.
647 *
648 * Calls from assembly code:
649 * %ebx = return address (physical) CANNOT USE STACK
650 * %esi = descriptor table address (physical)
651 * %ecx = number of descriptors
652 *
653 * Calls from C:
654 * 0(%esp) = return address
655 * 4(%esp) = descriptor table address (physical)
656 * 8(%esp) = number of descriptors
657 *
658 * Fake descriptor format:
659 * bytes 0..3 base 31..0
660 * bytes 4..5 limit 15..0
661 * byte 6 access byte 2 | limit 19..16
662 * byte 7 access byte 1
663 *
664 * Real descriptor format:
665 * bytes 0..1 limit 15..0
666 * bytes 2..3 base 15..0
667 * byte 4 base 23..16
668 * byte 5 access byte 1
669 * byte 6 access byte 2 | limit 19..16
670 * byte 7 base 31..24
671 *
672 * Fake gate format:
673 * bytes 0..3 offset
674 * bytes 4..5 selector
675 * byte 6 word count << 4 (to match fake descriptor)
676 * byte 7 access byte 1
677 *
678 * Real gate format:
679 * bytes 0..1 offset 15..0
680 * bytes 2..3 selector
681 * byte 4 word count
682 * byte 5 access byte 1
683 * bytes 6..7 offset 31..16
684 */
685 .globl EXT(fix_desc)
686 LEXT(fix_desc)
687 pushl %ebp /* set up */
688 movl %esp,%ebp /* stack frame */
689 pushl %esi /* save registers */
690 pushl %ebx
691 movl B_ARG0,%esi /* point to first descriptor */
692 movl B_ARG1,%ecx /* get number of descriptors */
693 lea 0f,%ebx /* get return address */
694 jmp fix_desc_common /* call internal routine */
695 0: popl %ebx /* restore registers */
696 popl %esi
697 leave /* pop stack frame */
698 ret /* return */
699
700 fix_desc_common:
701 0:
702 movw 6(%esi),%dx /* get access byte */
703 movb %dh,%al
704 andb $0x14,%al
705 cmpb $0x04,%al /* gate or descriptor? */
706 je 1f
707
708 /* descriptor */
709 movl 0(%esi),%eax /* get base in eax */
710 rol $16,%eax /* swap 15..0 with 31..16 */
711 /* (15..0 in correct place) */
712 movb %al,%dl /* combine bits 23..16 with ACC1 */
713 /* in dh/dl */
714 movb %ah,7(%esi) /* store bits 31..24 in correct place */
715 movw 4(%esi),%ax /* move limit bits 0..15 to word 0 */
716 movl %eax,0(%esi) /* store (bytes 0..3 correct) */
717 movw %dx,4(%esi) /* store bytes 4..5 */
718 jmp 2f
719
720 /* gate */
721 1:
722 movw 4(%esi),%ax /* get selector */
723 shrb $4,%dl /* shift word count to proper place */
724 movw %dx,4(%esi) /* store word count / ACC1 */
725 movw 2(%esi),%dx /* get offset 16..31 */
726 movw %dx,6(%esi) /* store in correct place */
727 movw %ax,2(%esi) /* store selector in correct place */
728 2:
729 addl $8,%esi /* bump to next descriptor */
730 loop 0b /* repeat */
731 jmp *%ebx /* all done */
732
733 /*
734 * put arg in kbd leds and spin a while
735 * eats eax, ecx, edx
736 */
737 #define K_RDWR 0x60
738 #define K_CMD_LEDS 0xed
739 #define K_STATUS 0x64
740 #define K_IBUF_FULL 0x02 /* input (to kbd) buffer full */
741 #define K_OBUF_FULL 0x01 /* output (from kbd) buffer full */
742
743 ENTRY(set_kbd_leds)
744 mov S_ARG0,%cl /* save led value */
745
746 0: inb $(K_STATUS),%al /* get kbd status */
747 testb $(K_IBUF_FULL),%al /* input busy? */
748 jne 0b /* loop until not */
749
750 mov $(K_CMD_LEDS),%al /* K_CMD_LEDS */
751 outb %al,$(K_RDWR) /* to kbd */
752
753 0: inb $(K_STATUS),%al /* get kbd status */
754 testb $(K_OBUF_FULL),%al /* output present? */
755 je 0b /* loop if not */
756
757 inb $(K_RDWR),%al /* read status (and discard) */
758
759 0: inb $(K_STATUS),%al /* get kbd status */
760 testb $(K_IBUF_FULL),%al /* input busy? */
761 jne 0b /* loop until not */
762
763 mov %cl,%al /* move led value */
764 outb %al,$(K_RDWR) /* to kbd */
765
766 movl $10000000,%ecx /* spin */
767 0: nop
768 nop
769 loop 0b /* a while */
770
771 ret