]> git.saurik.com Git - apple/xnu.git/blob - osfmk/i386/start.s
xnu-792.tar.gz
[apple/xnu.git] / osfmk / i386 / start.s
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 /*
23 * @OSF_COPYRIGHT@
24 */
25 /*
26 * Mach Operating System
27 * Copyright (c) 1991,1990 Carnegie Mellon University
28 * All Rights Reserved.
29 *
30 * Permission to use, copy, modify and distribute this software and its
31 * documentation is hereby granted, provided that both the copyright
32 * notice and this permission notice appear in all copies of the
33 * software, derivative works or modified versions, and any portions
34 * thereof, and that both notices appear in supporting documentation.
35 *
36 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
37 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
38 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
39 *
40 * Carnegie Mellon requests users of this software to return to
41 *
42 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
43 * School of Computer Science
44 * Carnegie Mellon University
45 * Pittsburgh PA 15213-3890
46 *
47 * any improvements or extensions that they make and grant Carnegie Mellon
48 * the rights to redistribute these changes.
49 */
50 /*
51 */
52
53 #include <platforms.h>
54 #include <mach_kdb.h>
55
56 #include <i386/asm.h>
57 #include <i386/proc_reg.h>
58 #include <i386/postcode.h>
59 #include <assym.s>
60
61 #define CX(addr,reg) addr(,reg,4)
62
63 #include <i386/mp.h>
64 #include <i386/mp_slave_boot.h>
65
66 /*
67 * GAS won't handle an intersegment jump with a relocatable offset.
68 */
69 #define LJMP(segment,address) \
70 .byte 0xea ;\
71 .long address ;\
72 .word segment
73
74
75
76 #define KVTOPHYS (-KERNELBASE)
77 #define KVTOLINEAR LINEAR_KERNELBASE
78
79
80 #define PA(addr) ((addr)+KVTOPHYS)
81 #define VA(addr) ((addr)-KVTOPHYS)
82
83 .data
84 #if 0 /* Anyone need this? */
85 .align 2
86 .globl EXT(_kick_buffer_)
87 EXT(_kick_buffer_):
88 .long 1
89 .long 3
90 .set .,.+16836
91 #endif /* XXX */
92 /*
93 * Interrupt and bootup stack for initial processor.
94 */
95 .section __HIB, __data
96 .align ALIGN
97
98 .globl EXT(intstack)
99 EXT(intstack):
100
101 .set ., .+INTSTACK_SIZE
102
103 .globl EXT(eintstack)
104 EXT(eintstack:)
105
106 /*
107 * Pointers to GDT and IDT. These contain linear addresses.
108 */
109 .align ALIGN
110 .globl EXT(gdtptr)
111 LEXT(gdtptr)
112 .word Times(8,GDTSZ)-1
113 .long EXT(gdt)
114
115 .align ALIGN
116 .globl EXT(idtptr)
117 LEXT(idtptr)
118 .word Times(8,IDTSZ)-1
119 .long EXT(idt)
120
121 /* back to the regular __DATA section. */
122
123 .section __DATA, __data
124
125
126 #if MACH_KDB
127 /*
128 * Kernel debugger stack for each processor.
129 */
130 .align ALIGN
131 .globl EXT(db_stack_store)
132 EXT(db_stack_store):
133 .set ., .+(INTSTACK_SIZE*MAX_CPUS)
134
135 /*
136 * Stack for last-ditch debugger task for each processor.
137 */
138 .align ALIGN
139 .globl EXT(db_task_stack_store)
140 EXT(db_task_stack_store):
141 .set ., .+(INTSTACK_SIZE*MAX_CPUS)
142
143 /*
144 * per-processor kernel debugger stacks
145 */
146 .align ALIGN
147 .globl EXT(kgdb_stack_store)
148 EXT(kgdb_stack_store):
149 .set ., .+(INTSTACK_SIZE*MAX_CPUS)
150 #endif /* MACH_KDB */
151
152 .data
153 /*
154 * start_lock is very special. We initialize the
155 * lock at allocation time rather than at run-time.
156 * Although start_lock should be an instance of a
157 * hw_lock, we hand-code all manipulation of the lock
158 * because the hw_lock code may require function calls;
159 * and we'd rather not introduce another dependency on
160 * a working stack at this point.
161 */
162 .globl EXT(start_lock)
163 EXT(start_lock):
164 .long 0 /* synchronizes processor startup */
165
166 .globl EXT(master_is_up)
167 EXT(master_is_up):
168 .long 0 /* 1 when OK for other processors */
169 /* to start */
170 .globl EXT(mp_boot_pde)
171 EXT(mp_boot_pde):
172 .long 0
173
174 _KERNend: .long 0 /* phys addr end of kernel (just after bss) */
175 physfree: .long 0 /* phys addr of next free page */
176
177 .globl _IdlePTD
178 _IdlePTD: .long 0 /* phys addr of kernel PTD */
179 #ifdef PAE
180 .globl _IdlePDPT
181 _IdlePDPT: .long 0 /* phys addr of kernel PDPT */
182 #endif
183
184 .globl _KPTphys
185
186 _KPTphys: .long 0 /* phys addr of kernel page tables */
187
188
189 /* Some handy macros */
190
191 #define ALLOCPAGES(npages) \
192 movl PA(physfree), %esi ; \
193 movl $((npages) * PAGE_SIZE), %eax ; \
194 addl %esi, %eax ; \
195 movl %eax, PA(physfree) ; \
196 movl %esi, %edi ; \
197 movl $((npages) * PAGE_SIZE / 4),%ecx ; \
198 xorl %eax,%eax ; \
199 cld ; \
200 rep ; \
201 stosl
202
203 /*
204 * fillkpt
205 * eax = page frame address
206 * ebx = index into page table
207 * ecx = how many pages to map
208 * base = base address of page dir/table
209 * prot = protection bits
210 */
211 #define fillkpt(base, prot) \
212 shll $(PTEINDX),%ebx ; \
213 addl base,%ebx ; \
214 orl $(PTE_V) ,%eax ; \
215 orl prot,%eax ; \
216 1: movl %eax,(%ebx) ; \
217 addl $(PAGE_SIZE),%eax ; /* increment physical address */ \
218 addl $(PTESIZE),%ebx ; /* next pte */ \
219 loop 1b
220
221 /*
222 * fillkptphys(prot)
223 * eax = physical address
224 * ecx = how many pages to map
225 * prot = protection bits
226 */
227 #define fillkptphys(prot) \
228 movl %eax, %ebx ; \
229 shrl $(PAGE_SHIFT), %ebx ; \
230 fillkpt(PA(EXT(KPTphys)), prot)
231
232
233 /*
234 * All CPUs start here.
235 *
236 * Environment:
237 * protected mode, no paging, flat 32-bit address space.
238 * (Code/data/stack segments have base == 0, limit == 4G)
239 */
240 .text
241 .align ALIGN
242 .globl EXT(pstart)
243 .globl EXT(_start)
244 LEXT(_start)
245 LEXT(pstart)
246 mov %eax, %ebx /* save pointer to kernbootstruct */
247
248 POSTCODE(PSTART_ENTRY);
249
250 mov $0,%ax /* fs must be zeroed; */
251 mov %ax,%fs /* some bootstrappers don`t do this */
252 mov %ax,%gs
253
254 jmp 1f
255 0: cmpl $0,PA(EXT(start_lock))
256 jne 0b
257 1: movb $1,%eax
258 xchgl %eax,PA(EXT(start_lock)) /* locked */
259 testl %eax,%eax
260 jnz 0b
261
262 cmpl $0,PA(EXT(master_is_up)) /* are we first? */
263 jne EXT(slave_start) /* no -- system already up. */
264 movl $1,PA(EXT(master_is_up)) /* others become slaves */
265 jmp 3f
266 3:
267
268 /*
269 * Get startup parameters.
270 */
271
272 movl %ebx,PA(EXT(boot_args_start)) /* Save KERNBOOTSTRUCT */
273
274 movl KADDR(%ebx), %eax
275 addl KSIZE(%ebx), %eax
276 addl $(NBPG-1),%eax
277 andl $(-NBPG), %eax
278 movl %eax, PA(EXT(KERNend))
279 movl %eax, PA(physfree)
280 cld
281
282 /* allocate kernel page table pages */
283 ALLOCPAGES(NKPT)
284 movl %esi,PA(EXT(KPTphys))
285
286 #ifdef PAE
287 /* allocate Page Table Directory Page */
288 ALLOCPAGES(1)
289 movl %esi,PA(EXT(IdlePDPT))
290 #endif
291
292 /* allocate kernel page directory page */
293 ALLOCPAGES(NPGPTD)
294 movl %esi,PA(EXT(IdlePTD))
295
296 /* map from zero to end of kernel */
297 xorl %eax,%eax
298 movl PA(physfree),%ecx
299 shrl $(PAGE_SHIFT),%ecx
300 fillkptphys( $(PTE_W) )
301
302 /* map page directory */
303 #ifdef PAE
304 movl PA(EXT(IdlePDPT)), %eax
305 movl $1, %ecx
306 fillkptphys( $(PTE_W) )
307 #endif
308 movl PA(EXT(IdlePTD)),%eax
309 movl $(NPGPTD), %ecx
310 fillkptphys( $(PTE_W) )
311
312 /* install a pde for temp double map of bottom of VA */
313 movl PA(EXT(KPTphys)),%eax
314 xorl %ebx,%ebx
315 movl $(NKPT), %ecx
316 fillkpt(PA(EXT(IdlePTD)), $(PTE_W))
317
318 /* install pde's for page tables */
319 movl PA(EXT(KPTphys)),%eax
320 movl $(KPTDI),%ebx
321 movl $(NKPT),%ecx
322 fillkpt(PA(EXT(IdlePTD)), $(PTE_W))
323
324 /* install a pde recursively mapping page directory as a page table */
325 movl PA(EXT(IdlePTD)),%eax
326 movl $(PTDPTDI),%ebx
327 movl $(NPGPTD),%ecx
328 fillkpt(PA(EXT(IdlePTD)), $(PTE_W))
329
330 #ifdef PAE
331 movl PA(EXT(IdlePTD)), %eax
332 xorl %ebx, %ebx
333 movl $(NPGPTD), %ecx
334 fillkpt(PA(EXT(IdlePDPT)), $0)
335 #endif
336
337 /* install a pde page for commpage use up in high memory */
338
339 movl PA(physfree),%eax /* grab next phys page */
340 movl %eax,%ebx
341 addl $(PAGE_SIZE),%ebx
342 movl %ebx,PA(physfree) /* show next free phys pg */
343 movl $(COMM_PAGE_BASE_ADDR),%ebx
344 shrl $(PDESHIFT),%ebx /* index into pde page */
345 movl $(1), %ecx /* # pdes to store */
346 fillkpt(PA(EXT(IdlePTD)), $(PTE_W|PTE_U)) /* user has access! */
347
348 movl PA(physfree),%edi
349 movl %edi,PA(EXT(first_avail)) /* save first available phys addr */
350
351 #ifdef PAE
352 /*
353 * We steal 0x4000 for a temp pdpt and 0x5000-0x8000
354 * for temp pde pages in the PAE case. Once we are
355 * running at the proper virtual address we switch to
356 * the PDPT/PDE's the master is using */
357
358 /* clear pdpt page to be safe */
359 xorl %eax, %eax
360 movl $(PAGE_SIZE),%ecx
361 movl $(0x4000),%edi
362 cld
363 rep
364 stosb
365
366 /* build temp pdpt */
367 movl $(0x5000), %eax
368 xorl %ebx, %ebx
369 movl $(NPGPTD), %ecx
370 fillkpt($(0x4000), $0)
371
372 /* copy the NPGPTD pages of pdes */
373 movl PA(EXT(IdlePTD)),%eax
374 movl $0x5000,%ebx
375 movl $((PTEMASK+1)*NPGPTD),%ecx
376 1: movl 0(%eax),%edx
377 movl %edx,0(%ebx)
378 movl 4(%eax),%edx
379 movl %edx,4(%ebx)
380 addl $(PTESIZE),%eax
381 addl $(PTESIZE),%ebx
382 loop 1b
383 #else
384 /* create temp pde for slaves to use
385 use unused lomem page and copy in IdlePTD */
386 movl PA(EXT(IdlePTD)),%eax
387 movl $0x4000,%ebx
388 movl $(PTEMASK+1),%ecx
389 1: movl 0(%eax),%edx
390 movl %edx,0(%ebx)
391 addl $(PTESIZE),%eax
392 addl $(PTESIZE),%ebx
393 loop 1b
394 #endif
395
396 POSTCODE(PSTART_PAGE_TABLES);
397
398 /*
399 * Fix initial descriptor tables.
400 */
401 lea PA(EXT(idt)),%esi /* fix IDT */
402 movl $(IDTSZ),%ecx
403 movl $(PA(fix_idt_ret)),%ebx
404 jmp fix_desc_common /* (cannot use stack) */
405 fix_idt_ret:
406
407 lea PA(EXT(gdt)),%esi /* fix GDT */
408 movl $(GDTSZ),%ecx
409 movl $(PA(fix_gdt_ret)),%ebx
410 jmp fix_desc_common /* (cannot use stack) */
411 fix_gdt_ret:
412
413 lea PA(EXT(ldt)),%esi /* fix LDT */
414 movl $(LDTSZ),%ecx
415 movl $(PA(fix_ldt_ret)),%ebx
416 jmp fix_desc_common /* (cannot use stack) */
417 fix_ldt_ret:
418
419 /*
420 *
421 */
422
423 lgdt PA(EXT(gdtptr)) /* load GDT */
424 lidt PA(EXT(idtptr)) /* load IDT */
425
426 POSTCODE(PSTART_BEFORE_PAGING);
427
428 /*
429 * Turn on paging.
430 */
431 #ifdef PAE
432 movl PA(EXT(IdlePDPT)), %eax
433 movl %eax, %cr3
434
435 movl %cr4, %eax
436 orl $(CR4_PAE), %eax
437 movl %eax, %cr4
438 #else
439 movl PA(EXT(IdlePTD)), %eax
440 movl %eax,%cr3
441 #endif
442
443 movl %cr0,%eax
444 orl $(CR0_PG|CR0_WP|CR0_PE),%eax
445 movl %eax,%cr0 /* to enable paging */
446
447 LJMP(KERNEL_CS,EXT(vstart)) /* switch to kernel code segment */
448
449 /*
450 * Master is now running with correct addresses.
451 */
452 LEXT(vstart)
453 POSTCODE(VSTART_ENTRY) ;
454
455 mov $(KERNEL_DS),%ax /* set kernel data segment */
456 mov %ax,%ds
457 mov %ax,%es
458 mov %ax,%ss
459 mov %ax,EXT(ktss)+TSS_SS0 /* set kernel stack segment */
460 /* for traps to kernel */
461 #if MACH_KDB
462 mov %ax,EXT(dbtss)+TSS_SS0 /* likewise for debug task switch */
463 mov %cr3,%eax /* get PDBR into debug TSS */
464 mov %eax,EXT(dbtss)+TSS_PDBR
465 mov $0,%eax
466 #endif
467
468 movw $(KERNEL_LDT),%ax /* get LDT segment */
469 lldt %ax /* load LDT */
470 #if MACH_KDB
471 mov %ax,EXT(ktss)+TSS_LDT /* store LDT in two TSS, as well... */
472 mov %ax,EXT(dbtss)+TSS_LDT /* ...matters if we switch tasks */
473 #endif
474 movw $(KERNEL_TSS),%ax
475 ltr %ax /* set up KTSS */
476
477 mov $(CPU_DATA_GS),%ax
478 mov %ax,%gs
479
480 POSTCODE(VSTART_STACK_SWITCH);
481
482 lea EXT(eintstack),%esp /* switch to the bootup stack */
483 call EXT(i386_preinit)
484
485 POSTCODE(VSTART_EXIT);
486
487 call EXT(i386_init) /* run C code */
488 /*NOTREACHED*/
489 hlt
490
491 .text
492 .globl __start
493 .set __start, PA(EXT(pstart))
494
495
496 /*
497 * master_up is used by the master cpu to signify that it is done
498 * with the interrupt stack, etc. See the code in pstart and svstart
499 * that this interlocks with.
500 */
501 .align ALIGN
502 .globl EXT(master_up)
503 LEXT(master_up)
504 pushl %ebp /* set up */
505 movl %esp,%ebp /* stack frame */
506 movl $0,%ecx /* unlock start_lock */
507 xchgl %ecx,EXT(start_lock) /* since we are no longer using */
508 /* bootstrap stack */
509 leave /* pop stack frame */
510 ret
511
512 /*
513 * We aren't the first. Call slave_main to initialize the processor
514 * and get Mach going on it.
515 */
516 .align ALIGN
517 .globl EXT(slave_start)
518 LEXT(slave_start)
519 cli /* disable interrupts, so we don`t */
520 /* need IDT for a while */
521
522 POSTCODE(SLAVE_START_ENTRY);
523 /*
524 * Turn on paging.
525 */
526 movl $(EXT(spag_start)),%edx /* first paged code address */
527
528 #ifdef PAE
529 movl $(0x4000), %eax
530 movl %eax, %cr3
531
532 movl %cr4, %eax
533 orl $(CR4_PAE), %eax
534 movl %eax, %cr4
535 #else
536 movl $(0x4000),%eax /* tmp until we get mapped */
537 movl %eax,%cr3
538 #endif
539
540 movl %cr0,%eax
541 orl $(CR0_PG|CR0_WP|CR0_PE),%eax
542 movl %eax,%cr0 /* to enable paging */
543
544 POSTCODE(SLAVE_START_EXIT);
545
546 jmp *%edx /* flush prefetch queue */
547
548 /*
549 * We are now paging, and can run with correct addresses.
550 */
551 LEXT(spag_start)
552
553 lgdt PA(EXT(gdtptr)) /* load GDT */
554 lidt PA(EXT(idtptr)) /* load IDT */
555
556 LJMP(KERNEL_CS,EXT(svstart)) /* switch to kernel code segment */
557
558
559 /*
560 * Slave is now running with correct addresses.
561 */
562 LEXT(svstart)
563
564 POSTCODE(SVSTART_ENTRY);
565
566 #ifdef PAE
567 movl PA(EXT(IdlePDPT)), %eax
568 movl %eax, %cr3
569 #else
570 movl PA(EXT(IdlePTD)), %eax
571 movl %eax, %cr3
572 #endif
573
574 mov $(KERNEL_DS),%ax /* set kernel data segment */
575 mov %ax,%ds
576 mov %ax,%es
577 mov %ax,%ss
578
579 /*
580 * We're not quite through with the boot stack
581 * but we need to reset the stack pointer to the correct virtual
582 * address.
583 * And we need to offset above the address of pstart.
584 */
585 movl $(VA(MP_BOOTSTACK+MP_BOOT+4)), %esp
586
587 /*
588 * Switch to the per-cpu descriptor tables
589 */
590 POSTCODE(SVSTART_DESC_INIT);
591
592 CPU_NUMBER_FROM_LAPIC(%eax)
593 movl CX(EXT(cpu_data_ptr),%eax),%ecx
594 movl CPU_DESC_TABLEP(%ecx), %ecx
595
596 movw $(GDTSZ*8-1),0(%esp) /* set GDT size in GDT descriptor */
597 leal MP_GDT(%ecx),%edx
598 movl %edx,2(%esp) /* point to local GDT (linear addr) */
599 lgdt 0(%esp) /* load new GDT */
600
601 movw $(IDTSZ*8-1),0(%esp) /* set IDT size in IDT descriptor */
602 leal MP_IDT(%ecx),%edx
603 movl %edx,2(%esp) /* point to local IDT (linear addr) */
604 lidt 0(%esp) /* load new IDT */
605
606 movw $(KERNEL_LDT),%ax /* get LDT segment */
607 lldt %ax /* load LDT */
608
609 movw $(KERNEL_TSS),%ax
610 ltr %ax /* load new KTSS */
611
612 mov $(CPU_DATA_GS),%ax
613 mov %ax,%gs
614
615 /*
616 * Get stack top from pre-cpu data and switch
617 */
618 POSTCODE(SVSTART_STACK_SWITCH);
619
620 movl %gs:CPU_INT_STACK_TOP,%esp
621 xorl %ebp,%ebp /* for completeness */
622
623 movl $0,%eax /* unlock start_lock */
624 xchgl %eax,EXT(start_lock) /* since we are no longer using */
625 /* bootstrap stack */
626 POSTCODE(SVSTART_EXIT);
627
628 call EXT(i386_init_slave) /* start MACH */
629 /*NOTREACHED*/
630 hlt
631
632 /*
633 * Convert a descriptor from fake to real format.
634 *
635 * Calls from assembly code:
636 * %ebx = return address (physical) CANNOT USE STACK
637 * %esi = descriptor table address (physical)
638 * %ecx = number of descriptors
639 *
640 * Calls from C:
641 * 0(%esp) = return address
642 * 4(%esp) = descriptor table address (physical)
643 * 8(%esp) = number of descriptors
644 *
645 * Fake descriptor format:
646 * bytes 0..3 base 31..0
647 * bytes 4..5 limit 15..0
648 * byte 6 access byte 2 | limit 19..16
649 * byte 7 access byte 1
650 *
651 * Real descriptor format:
652 * bytes 0..1 limit 15..0
653 * bytes 2..3 base 15..0
654 * byte 4 base 23..16
655 * byte 5 access byte 1
656 * byte 6 access byte 2 | limit 19..16
657 * byte 7 base 31..24
658 *
659 * Fake gate format:
660 * bytes 0..3 offset
661 * bytes 4..5 selector
662 * byte 6 word count << 4 (to match fake descriptor)
663 * byte 7 access byte 1
664 *
665 * Real gate format:
666 * bytes 0..1 offset 15..0
667 * bytes 2..3 selector
668 * byte 4 word count
669 * byte 5 access byte 1
670 * bytes 6..7 offset 31..16
671 */
672 .globl EXT(fix_desc)
673 LEXT(fix_desc)
674 pushl %ebp /* set up */
675 movl %esp,%ebp /* stack frame */
676 pushl %esi /* save registers */
677 pushl %ebx
678 movl B_ARG0,%esi /* point to first descriptor */
679 movl B_ARG1,%ecx /* get number of descriptors */
680 lea 0f,%ebx /* get return address */
681 jmp fix_desc_common /* call internal routine */
682 0: popl %ebx /* restore registers */
683 popl %esi
684 leave /* pop stack frame */
685 ret /* return */
686
687 fix_desc_common:
688 0:
689 movw 6(%esi),%dx /* get access byte */
690 movb %dh,%al
691 andb $0x14,%al
692 cmpb $0x04,%al /* gate or descriptor? */
693 je 1f
694
695 /* descriptor */
696 movl 0(%esi),%eax /* get base in eax */
697 rol $16,%eax /* swap 15..0 with 31..16 */
698 /* (15..0 in correct place) */
699 movb %al,%dl /* combine bits 23..16 with ACC1 */
700 /* in dh/dl */
701 movb %ah,7(%esi) /* store bits 31..24 in correct place */
702 movw 4(%esi),%ax /* move limit bits 0..15 to word 0 */
703 movl %eax,0(%esi) /* store (bytes 0..3 correct) */
704 movw %dx,4(%esi) /* store bytes 4..5 */
705 jmp 2f
706
707 /* gate */
708 1:
709 movw 4(%esi),%ax /* get selector */
710 shrb $4,%dl /* shift word count to proper place */
711 movw %dx,4(%esi) /* store word count / ACC1 */
712 movw 2(%esi),%dx /* get offset 16..31 */
713 movw %dx,6(%esi) /* store in correct place */
714 movw %ax,2(%esi) /* store selector in correct place */
715 2:
716 addl $8,%esi /* bump to next descriptor */
717 loop 0b /* repeat */
718 jmp *%ebx /* all done */
719
720 /*
721 * put arg in kbd leds and spin a while
722 * eats eax, ecx, edx
723 */
724 #define K_RDWR 0x60
725 #define K_CMD_LEDS 0xed
726 #define K_STATUS 0x64
727 #define K_IBUF_FULL 0x02 /* input (to kbd) buffer full */
728 #define K_OBUF_FULL 0x01 /* output (from kbd) buffer full */
729
730 ENTRY(set_kbd_leds)
731 mov S_ARG0,%cl /* save led value */
732
733 0: inb $(K_STATUS),%al /* get kbd status */
734 testb $(K_IBUF_FULL),%al /* input busy? */
735 jne 0b /* loop until not */
736
737 mov $(K_CMD_LEDS),%al /* K_CMD_LEDS */
738 outb %al,$(K_RDWR) /* to kbd */
739
740 0: inb $(K_STATUS),%al /* get kbd status */
741 testb $(K_OBUF_FULL),%al /* output present? */
742 je 0b /* loop if not */
743
744 inb $(K_RDWR),%al /* read status (and discard) */
745
746 0: inb $(K_STATUS),%al /* get kbd status */
747 testb $(K_IBUF_FULL),%al /* input busy? */
748 jne 0b /* loop until not */
749
750 mov %cl,%al /* move led value */
751 outb %al,$(K_RDWR) /* to kbd */
752
753 movl $10000000,%ecx /* spin */
754 0: nop
755 nop
756 loop 0b /* a while */
757
758 ret