]> git.saurik.com Git - apple/xnu.git/blob - osfmk/i386/start.s
xnu-792.6.56.tar.gz
[apple/xnu.git] / osfmk / i386 / start.s
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23 /*
24 * @OSF_COPYRIGHT@
25 */
26 /*
27 * Mach Operating System
28 * Copyright (c) 1991,1990 Carnegie Mellon University
29 * All Rights Reserved.
30 *
31 * Permission to use, copy, modify and distribute this software and its
32 * documentation is hereby granted, provided that both the copyright
33 * notice and this permission notice appear in all copies of the
34 * software, derivative works or modified versions, and any portions
35 * thereof, and that both notices appear in supporting documentation.
36 *
37 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
38 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
39 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
40 *
41 * Carnegie Mellon requests users of this software to return to
42 *
43 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
44 * School of Computer Science
45 * Carnegie Mellon University
46 * Pittsburgh PA 15213-3890
47 *
48 * any improvements or extensions that they make and grant Carnegie Mellon
49 * the rights to redistribute these changes.
50 */
51 /*
52 */
53
54 #include <platforms.h>
55 #include <mach_kdb.h>
56
57 #include <i386/asm.h>
58 #include <i386/proc_reg.h>
59 #include <i386/postcode.h>
60 #include <assym.s>
61
62 #define CX(addr,reg) addr(,reg,4)
63
64 #include <i386/mp.h>
65 #include <i386/mp_slave_boot.h>
66
67 /*
68 * GAS won't handle an intersegment jump with a relocatable offset.
69 */
70 #define LJMP(segment,address) \
71 .byte 0xea ;\
72 .long address ;\
73 .word segment
74
75
76
77 #define KVTOPHYS (-KERNELBASE)
78 #define KVTOLINEAR LINEAR_KERNELBASE
79
80
81 #define PA(addr) ((addr)+KVTOPHYS)
82 #define VA(addr) ((addr)-KVTOPHYS)
83
84 .data
85 #if 0 /* Anyone need this? */
86 .align 2
87 .globl EXT(_kick_buffer_)
88 EXT(_kick_buffer_):
89 .long 1
90 .long 3
91 .set .,.+16836
92 #endif /* XXX */
93 /*
94 * Interrupt and bootup stack for initial processor.
95 */
96 /* in the __HIB section since the hibernate restore code uses this stack. */
97 .section __HIB, __data
98 .align ALIGN
99
100 .globl EXT(intstack)
101 EXT(intstack):
102 .globl EXT(gIOHibernateRestoreStack)
103 EXT(gIOHibernateRestoreStack):
104
105 .set ., .+INTSTACK_SIZE
106
107 .globl EXT(eintstack)
108 EXT(eintstack:)
109 .globl EXT(gIOHibernateRestoreStackEnd)
110 EXT(gIOHibernateRestoreStackEnd):
111
112 /*
113 * Pointers to GDT and IDT. These contain linear addresses.
114 */
115 .align ALIGN
116 .globl EXT(gdtptr)
117 LEXT(gdtptr)
118 .word Times(8,GDTSZ)-1
119 .long EXT(gdt)
120
121 .align ALIGN
122 .globl EXT(idtptr)
123 LEXT(idtptr)
124 .word Times(8,IDTSZ)-1
125 .long EXT(idt)
126
127 /* back to the regular __DATA section. */
128
129 .section __DATA, __data
130
131
132 #if MACH_KDB
133 /*
134 * Kernel debugger stack for each processor.
135 */
136 .align ALIGN
137 .globl EXT(db_stack_store)
138 EXT(db_stack_store):
139 .set ., .+(INTSTACK_SIZE*MAX_CPUS)
140
141 /*
142 * Stack for last-ditch debugger task for each processor.
143 */
144 .align ALIGN
145 .globl EXT(db_task_stack_store)
146 EXT(db_task_stack_store):
147 .set ., .+(INTSTACK_SIZE*MAX_CPUS)
148
149 /*
150 * per-processor kernel debugger stacks
151 */
152 .align ALIGN
153 .globl EXT(kgdb_stack_store)
154 EXT(kgdb_stack_store):
155 .set ., .+(INTSTACK_SIZE*MAX_CPUS)
156 #endif /* MACH_KDB */
157
158 .data
159 /*
160 * start_lock is very special. We initialize the
161 * lock at allocation time rather than at run-time.
162 * Although start_lock should be an instance of a
163 * hw_lock, we hand-code all manipulation of the lock
164 * because the hw_lock code may require function calls;
165 * and we'd rather not introduce another dependency on
166 * a working stack at this point.
167 */
168 .globl EXT(start_lock)
169 EXT(start_lock):
170 .long 0 /* synchronizes processor startup */
171
172 .globl EXT(master_is_up)
173 EXT(master_is_up):
174 .long 0 /* 1 when OK for other processors */
175 /* to start */
176 .globl EXT(mp_boot_pde)
177 EXT(mp_boot_pde):
178 .long 0
179
180 _KERNend: .long 0 /* phys addr end of kernel (just after bss) */
181 physfree: .long 0 /* phys addr of next free page */
182
183 .globl _IdlePTD
184 _IdlePTD: .long 0 /* phys addr of kernel PTD */
185 #ifdef PAE
186 .globl _IdlePDPT
187 _IdlePDPT: .long 0 /* phys addr of kernel PDPT */
188 #endif
189
190 .globl _KPTphys
191
192 _KPTphys: .long 0 /* phys addr of kernel page tables */
193
194
195 /* Some handy macros */
196
197 #define ALLOCPAGES(npages) \
198 movl PA(physfree), %esi ; \
199 movl $((npages) * PAGE_SIZE), %eax ; \
200 addl %esi, %eax ; \
201 movl %eax, PA(physfree) ; \
202 movl %esi, %edi ; \
203 movl $((npages) * PAGE_SIZE / 4),%ecx ; \
204 xorl %eax,%eax ; \
205 cld ; \
206 rep ; \
207 stosl
208
209 /*
210 * fillkpt
211 * eax = page frame address
212 * ebx = index into page table
213 * ecx = how many pages to map
214 * base = base address of page dir/table
215 * prot = protection bits
216 */
217 #define fillkpt(base, prot) \
218 shll $(PTEINDX),%ebx ; \
219 addl base,%ebx ; \
220 orl $(PTE_V) ,%eax ; \
221 orl prot,%eax ; \
222 1: movl %eax,(%ebx) ; \
223 addl $(PAGE_SIZE),%eax ; /* increment physical address */ \
224 addl $(PTESIZE),%ebx ; /* next pte */ \
225 loop 1b
226
227 /*
228 * fillkptphys(prot)
229 * eax = physical address
230 * ecx = how many pages to map
231 * prot = protection bits
232 */
233 #define fillkptphys(prot) \
234 movl %eax, %ebx ; \
235 shrl $(PAGE_SHIFT), %ebx ; \
236 fillkpt(PA(EXT(KPTphys)), prot)
237
238
239 /*
240 * All CPUs start here.
241 *
242 * Environment:
243 * protected mode, no paging, flat 32-bit address space.
244 * (Code/data/stack segments have base == 0, limit == 4G)
245 */
246 .text
247 .align ALIGN
248 .globl EXT(pstart)
249 .globl EXT(_start)
250 LEXT(_start)
251 LEXT(pstart)
252 mov %eax, %ebx /* save pointer to kernbootstruct */
253
254 POSTCODE(PSTART_ENTRY);
255
256 mov $0,%ax /* fs must be zeroed; */
257 mov %ax,%fs /* some bootstrappers don`t do this */
258 mov %ax,%gs
259
260 jmp 1f
261 0: cmpl $0,PA(EXT(start_lock))
262 jne 0b
263 1: movb $1,%eax
264 xchgl %eax,PA(EXT(start_lock)) /* locked */
265 testl %eax,%eax
266 jnz 0b
267
268 cmpl $0,PA(EXT(master_is_up)) /* are we first? */
269 jne EXT(slave_start) /* no -- system already up. */
270 movl $1,PA(EXT(master_is_up)) /* others become slaves */
271 jmp 3f
272 3:
273
274 /*
275 * Get startup parameters.
276 */
277
278 movl %ebx,PA(EXT(boot_args_start)) /* Save KERNBOOTSTRUCT */
279
280 movl KADDR(%ebx), %eax
281 addl KSIZE(%ebx), %eax
282 addl $(NBPG-1),%eax
283 andl $(-NBPG), %eax
284 movl %eax, PA(EXT(KERNend))
285 movl %eax, PA(physfree)
286 cld
287
288 /* allocate kernel page table pages */
289 ALLOCPAGES(NKPT)
290 movl %esi,PA(EXT(KPTphys))
291
292 #ifdef PAE
293 /* allocate Page Table Directory Page */
294 ALLOCPAGES(1)
295 movl %esi,PA(EXT(IdlePDPT))
296 #endif
297
298 /* allocate kernel page directory page */
299 ALLOCPAGES(NPGPTD)
300 movl %esi,PA(EXT(IdlePTD))
301
302 /* map from zero to end of kernel */
303 xorl %eax,%eax
304 movl PA(physfree),%ecx
305 shrl $(PAGE_SHIFT),%ecx
306 fillkptphys( $(PTE_W) )
307
308 /* map page directory */
309 #ifdef PAE
310 movl PA(EXT(IdlePDPT)), %eax
311 movl $1, %ecx
312 fillkptphys( $(PTE_W) )
313 #endif
314 movl PA(EXT(IdlePTD)),%eax
315 movl $(NPGPTD), %ecx
316 fillkptphys( $(PTE_W) )
317
318 /* install a pde for temp double map of bottom of VA */
319 movl PA(EXT(KPTphys)),%eax
320 xorl %ebx,%ebx
321 movl $(NKPT), %ecx
322 fillkpt(PA(EXT(IdlePTD)), $(PTE_W))
323
324 /* install pde's for page tables */
325 movl PA(EXT(KPTphys)),%eax
326 movl $(KPTDI),%ebx
327 movl $(NKPT),%ecx
328 fillkpt(PA(EXT(IdlePTD)), $(PTE_W))
329
330 /* install a pde recursively mapping page directory as a page table */
331 movl PA(EXT(IdlePTD)),%eax
332 movl $(PTDPTDI),%ebx
333 movl $(NPGPTD),%ecx
334 fillkpt(PA(EXT(IdlePTD)), $(PTE_W))
335
336 #ifdef PAE
337 movl PA(EXT(IdlePTD)), %eax
338 xorl %ebx, %ebx
339 movl $(NPGPTD), %ecx
340 fillkpt(PA(EXT(IdlePDPT)), $0)
341 #endif
342
343 /* install a pde page for commpage use up in high memory */
344
345 movl PA(physfree),%eax /* grab next phys page */
346 movl %eax,%ebx
347 addl $(PAGE_SIZE),%ebx
348 movl %ebx,PA(physfree) /* show next free phys pg */
349 movl $(COMM_PAGE_BASE_ADDR),%ebx
350 shrl $(PDESHIFT),%ebx /* index into pde page */
351 movl $(1), %ecx /* # pdes to store */
352 fillkpt(PA(EXT(IdlePTD)), $(PTE_W|PTE_U)) /* user has access! */
353
354 movl PA(physfree),%edi
355 movl %edi,PA(EXT(first_avail)) /* save first available phys addr */
356
357 #ifdef PAE
358 /*
359 * We steal 0x4000 for a temp pdpt and 0x5000-0x8000
360 * for temp pde pages in the PAE case. Once we are
361 * running at the proper virtual address we switch to
362 * the PDPT/PDE's the master is using */
363
364 /* clear pdpt page to be safe */
365 xorl %eax, %eax
366 movl $(PAGE_SIZE),%ecx
367 movl $(0x4000),%edi
368 cld
369 rep
370 stosb
371
372 /* build temp pdpt */
373 movl $(0x5000), %eax
374 xorl %ebx, %ebx
375 movl $(NPGPTD), %ecx
376 fillkpt($(0x4000), $0)
377
378 /* copy the NPGPTD pages of pdes */
379 movl PA(EXT(IdlePTD)),%eax
380 movl $0x5000,%ebx
381 movl $((PTEMASK+1)*NPGPTD),%ecx
382 1: movl 0(%eax),%edx
383 movl %edx,0(%ebx)
384 movl 4(%eax),%edx
385 movl %edx,4(%ebx)
386 addl $(PTESIZE),%eax
387 addl $(PTESIZE),%ebx
388 loop 1b
389 #else
390 /* create temp pde for slaves to use
391 use unused lomem page and copy in IdlePTD */
392 movl PA(EXT(IdlePTD)),%eax
393 movl $0x4000,%ebx
394 movl $(PTEMASK+1),%ecx
395 1: movl 0(%eax),%edx
396 movl %edx,0(%ebx)
397 addl $(PTESIZE),%eax
398 addl $(PTESIZE),%ebx
399 loop 1b
400 #endif
401
402 POSTCODE(PSTART_PAGE_TABLES);
403
404 /*
405 * Fix initial descriptor tables.
406 */
407 lea PA(EXT(idt)),%esi /* fix IDT */
408 movl $(IDTSZ),%ecx
409 movl $(PA(fix_idt_ret)),%ebx
410 jmp fix_desc_common /* (cannot use stack) */
411 fix_idt_ret:
412
413 lea PA(EXT(gdt)),%esi /* fix GDT */
414 movl $(GDTSZ),%ecx
415 movl $(PA(fix_gdt_ret)),%ebx
416 jmp fix_desc_common /* (cannot use stack) */
417 fix_gdt_ret:
418
419 lea PA(EXT(ldt)),%esi /* fix LDT */
420 movl $(LDTSZ),%ecx
421 movl $(PA(fix_ldt_ret)),%ebx
422 jmp fix_desc_common /* (cannot use stack) */
423 fix_ldt_ret:
424
425 /*
426 *
427 */
428
429 lgdt PA(EXT(gdtptr)) /* load GDT */
430 lidt PA(EXT(idtptr)) /* load IDT */
431
432 POSTCODE(PSTART_BEFORE_PAGING);
433
434 /*
435 * Turn on paging.
436 */
437 #ifdef PAE
438 movl PA(EXT(IdlePDPT)), %eax
439 movl %eax, %cr3
440
441 movl %cr4, %eax
442 orl $(CR4_PAE), %eax
443 movl %eax, %cr4
444 #else
445 movl PA(EXT(IdlePTD)), %eax
446 movl %eax,%cr3
447 #endif
448
449 movl %cr0,%eax
450 orl $(CR0_PG|CR0_WP|CR0_PE),%eax
451 movl %eax,%cr0 /* to enable paging */
452
453 LJMP(KERNEL_CS,EXT(vstart)) /* switch to kernel code segment */
454
455 /*
456 * Master is now running with correct addresses.
457 */
458 LEXT(vstart)
459 POSTCODE(VSTART_ENTRY) ;
460
461 mov $(KERNEL_DS),%ax /* set kernel data segment */
462 mov %ax,%ds
463 mov %ax,%es
464 mov %ax,%ss
465 mov %ax,EXT(ktss)+TSS_SS0 /* set kernel stack segment */
466 /* for traps to kernel */
467 #if MACH_KDB
468 mov %ax,EXT(dbtss)+TSS_SS0 /* likewise for debug task switch */
469 mov %cr3,%eax /* get PDBR into debug TSS */
470 mov %eax,EXT(dbtss)+TSS_PDBR
471 mov $0,%eax
472 #endif
473
474 movw $(KERNEL_LDT),%ax /* get LDT segment */
475 lldt %ax /* load LDT */
476 #if MACH_KDB
477 mov %ax,EXT(ktss)+TSS_LDT /* store LDT in two TSS, as well... */
478 mov %ax,EXT(dbtss)+TSS_LDT /* ...matters if we switch tasks */
479 #endif
480 movw $(KERNEL_TSS),%ax
481 ltr %ax /* set up KTSS */
482
483 mov $(CPU_DATA_GS),%ax
484 mov %ax,%gs
485
486 POSTCODE(VSTART_STACK_SWITCH);
487
488 lea EXT(eintstack),%esp /* switch to the bootup stack */
489 call EXT(i386_preinit)
490
491 POSTCODE(VSTART_EXIT);
492
493 call EXT(i386_init) /* run C code */
494 /*NOTREACHED*/
495 hlt
496
497 .text
498 .globl __start
499 .set __start, PA(EXT(pstart))
500
501
502 /*
503 * master_up is used by the master cpu to signify that it is done
504 * with the interrupt stack, etc. See the code in pstart and svstart
505 * that this interlocks with.
506 */
507 .align ALIGN
508 .globl EXT(master_up)
509 LEXT(master_up)
510 pushl %ebp /* set up */
511 movl %esp,%ebp /* stack frame */
512 movl $0,%ecx /* unlock start_lock */
513 xchgl %ecx,EXT(start_lock) /* since we are no longer using */
514 /* bootstrap stack */
515 leave /* pop stack frame */
516 ret
517
518 /*
519 * We aren't the first. Call slave_main to initialize the processor
520 * and get Mach going on it.
521 */
522 .align ALIGN
523 .globl EXT(slave_start)
524 LEXT(slave_start)
525 cli /* disable interrupts, so we don`t */
526 /* need IDT for a while */
527
528 POSTCODE(SLAVE_START_ENTRY);
529 /*
530 * Turn on paging.
531 */
532 movl $(EXT(spag_start)),%edx /* first paged code address */
533
534 #ifdef PAE
535 movl $(0x4000), %eax
536 movl %eax, %cr3
537
538 movl %cr4, %eax
539 orl $(CR4_PAE), %eax
540 movl %eax, %cr4
541 #else
542 movl $(0x4000),%eax /* tmp until we get mapped */
543 movl %eax,%cr3
544 #endif
545
546 movl %cr0,%eax
547 orl $(CR0_PG|CR0_WP|CR0_PE),%eax
548 movl %eax,%cr0 /* to enable paging */
549
550 POSTCODE(SLAVE_START_EXIT);
551
552 jmp *%edx /* flush prefetch queue */
553
554 /*
555 * We are now paging, and can run with correct addresses.
556 */
557 LEXT(spag_start)
558
559 lgdt PA(EXT(gdtptr)) /* load GDT */
560 lidt PA(EXT(idtptr)) /* load IDT */
561
562 LJMP(KERNEL_CS,EXT(svstart)) /* switch to kernel code segment */
563
564
565 /*
566 * Slave is now running with correct addresses.
567 */
568 LEXT(svstart)
569
570 POSTCODE(SVSTART_ENTRY);
571
572 #ifdef PAE
573 movl PA(EXT(IdlePDPT)), %eax
574 movl %eax, %cr3
575 #else
576 movl PA(EXT(IdlePTD)), %eax
577 movl %eax, %cr3
578 #endif
579
580 mov $(KERNEL_DS),%ax /* set kernel data segment */
581 mov %ax,%ds
582 mov %ax,%es
583 mov %ax,%ss
584
585 /*
586 * We're not quite through with the boot stack
587 * but we need to reset the stack pointer to the correct virtual
588 * address.
589 * And we need to offset above the address of pstart.
590 */
591 movl $(VA(MP_BOOTSTACK+MP_BOOT+4)), %esp
592
593 /*
594 * Switch to the per-cpu descriptor tables
595 */
596 POSTCODE(SVSTART_DESC_INIT);
597
598 CPU_NUMBER_FROM_LAPIC(%eax)
599 movl CX(EXT(cpu_data_ptr),%eax),%ecx
600 movl CPU_DESC_TABLEP(%ecx), %ecx
601
602 movw $(GDTSZ*8-1),0(%esp) /* set GDT size in GDT descriptor */
603 leal MP_GDT(%ecx),%edx
604 movl %edx,2(%esp) /* point to local GDT (linear addr) */
605 lgdt 0(%esp) /* load new GDT */
606
607 movw $(IDTSZ*8-1),0(%esp) /* set IDT size in IDT descriptor */
608 leal MP_IDT(%ecx),%edx
609 movl %edx,2(%esp) /* point to local IDT (linear addr) */
610 lidt 0(%esp) /* load new IDT */
611
612 movw $(KERNEL_LDT),%ax /* get LDT segment */
613 lldt %ax /* load LDT */
614
615 movw $(KERNEL_TSS),%ax
616 ltr %ax /* load new KTSS */
617
618 mov $(CPU_DATA_GS),%ax
619 mov %ax,%gs
620
621 /*
622 * Get stack top from pre-cpu data and switch
623 */
624 POSTCODE(SVSTART_STACK_SWITCH);
625
626 movl %gs:CPU_INT_STACK_TOP,%esp
627 xorl %ebp,%ebp /* for completeness */
628
629 movl $0,%eax /* unlock start_lock */
630 xchgl %eax,EXT(start_lock) /* since we are no longer using */
631 /* bootstrap stack */
632 POSTCODE(SVSTART_EXIT);
633
634 call EXT(i386_init_slave) /* start MACH */
635 /*NOTREACHED*/
636 hlt
637
638 /*
639 * Convert a descriptor from fake to real format.
640 *
641 * Calls from assembly code:
642 * %ebx = return address (physical) CANNOT USE STACK
643 * %esi = descriptor table address (physical)
644 * %ecx = number of descriptors
645 *
646 * Calls from C:
647 * 0(%esp) = return address
648 * 4(%esp) = descriptor table address (physical)
649 * 8(%esp) = number of descriptors
650 *
651 * Fake descriptor format:
652 * bytes 0..3 base 31..0
653 * bytes 4..5 limit 15..0
654 * byte 6 access byte 2 | limit 19..16
655 * byte 7 access byte 1
656 *
657 * Real descriptor format:
658 * bytes 0..1 limit 15..0
659 * bytes 2..3 base 15..0
660 * byte 4 base 23..16
661 * byte 5 access byte 1
662 * byte 6 access byte 2 | limit 19..16
663 * byte 7 base 31..24
664 *
665 * Fake gate format:
666 * bytes 0..3 offset
667 * bytes 4..5 selector
668 * byte 6 word count << 4 (to match fake descriptor)
669 * byte 7 access byte 1
670 *
671 * Real gate format:
672 * bytes 0..1 offset 15..0
673 * bytes 2..3 selector
674 * byte 4 word count
675 * byte 5 access byte 1
676 * bytes 6..7 offset 31..16
677 */
678 .globl EXT(fix_desc)
679 LEXT(fix_desc)
680 pushl %ebp /* set up */
681 movl %esp,%ebp /* stack frame */
682 pushl %esi /* save registers */
683 pushl %ebx
684 movl B_ARG0,%esi /* point to first descriptor */
685 movl B_ARG1,%ecx /* get number of descriptors */
686 lea 0f,%ebx /* get return address */
687 jmp fix_desc_common /* call internal routine */
688 0: popl %ebx /* restore registers */
689 popl %esi
690 leave /* pop stack frame */
691 ret /* return */
692
693 fix_desc_common:
694 0:
695 movw 6(%esi),%dx /* get access byte */
696 movb %dh,%al
697 andb $0x14,%al
698 cmpb $0x04,%al /* gate or descriptor? */
699 je 1f
700
701 /* descriptor */
702 movl 0(%esi),%eax /* get base in eax */
703 rol $16,%eax /* swap 15..0 with 31..16 */
704 /* (15..0 in correct place) */
705 movb %al,%dl /* combine bits 23..16 with ACC1 */
706 /* in dh/dl */
707 movb %ah,7(%esi) /* store bits 31..24 in correct place */
708 movw 4(%esi),%ax /* move limit bits 0..15 to word 0 */
709 movl %eax,0(%esi) /* store (bytes 0..3 correct) */
710 movw %dx,4(%esi) /* store bytes 4..5 */
711 jmp 2f
712
713 /* gate */
714 1:
715 movw 4(%esi),%ax /* get selector */
716 shrb $4,%dl /* shift word count to proper place */
717 movw %dx,4(%esi) /* store word count / ACC1 */
718 movw 2(%esi),%dx /* get offset 16..31 */
719 movw %dx,6(%esi) /* store in correct place */
720 movw %ax,2(%esi) /* store selector in correct place */
721 2:
722 addl $8,%esi /* bump to next descriptor */
723 loop 0b /* repeat */
724 jmp *%ebx /* all done */
725
726 /*
727 * put arg in kbd leds and spin a while
728 * eats eax, ecx, edx
729 */
730 #define K_RDWR 0x60
731 #define K_CMD_LEDS 0xed
732 #define K_STATUS 0x64
733 #define K_IBUF_FULL 0x02 /* input (to kbd) buffer full */
734 #define K_OBUF_FULL 0x01 /* output (from kbd) buffer full */
735
736 ENTRY(set_kbd_leds)
737 mov S_ARG0,%cl /* save led value */
738
739 0: inb $(K_STATUS),%al /* get kbd status */
740 testb $(K_IBUF_FULL),%al /* input busy? */
741 jne 0b /* loop until not */
742
743 mov $(K_CMD_LEDS),%al /* K_CMD_LEDS */
744 outb %al,$(K_RDWR) /* to kbd */
745
746 0: inb $(K_STATUS),%al /* get kbd status */
747 testb $(K_OBUF_FULL),%al /* output present? */
748 je 0b /* loop if not */
749
750 inb $(K_RDWR),%al /* read status (and discard) */
751
752 0: inb $(K_STATUS),%al /* get kbd status */
753 testb $(K_IBUF_FULL),%al /* input busy? */
754 jne 0b /* loop until not */
755
756 mov %cl,%al /* move led value */
757 outb %al,$(K_RDWR) /* to kbd */
758
759 movl $10000000,%ecx /* spin */
760 0: nop
761 nop
762 loop 0b /* a while */
763
764 ret