]>
Commit | Line | Data |
---|---|---|
1c79356b A |
1 | /* |
2 | * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * The contents of this file constitute Original Code as defined in and | |
7 | * are subject to the Apple Public Source License Version 1.1 (the | |
8 | * "License"). You may not use this file except in compliance with the | |
9 | * License. Please obtain a copy of the License at | |
10 | * http://www.apple.com/publicsource and read it before using this file. | |
11 | * | |
12 | * This Original Code and all software distributed under the License are | |
13 | * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
14 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
15 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
16 | * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the | |
17 | * License for the specific language governing rights and limitations | |
18 | * under the License. | |
19 | * | |
20 | * @APPLE_LICENSE_HEADER_END@ | |
21 | */ | |
22 | /* | |
23 | * @OSF_COPYRIGHT@ | |
24 | */ | |
25 | /* | |
26 | * Mach Operating System | |
27 | * Copyright (c) 1991,1990 Carnegie Mellon University | |
28 | * All Rights Reserved. | |
29 | * | |
30 | * Permission to use, copy, modify and distribute this software and its | |
31 | * documentation is hereby granted, provided that both the copyright | |
32 | * notice and this permission notice appear in all copies of the | |
33 | * software, derivative works or modified versions, and any portions | |
34 | * thereof, and that both notices appear in supporting documentation. | |
35 | * | |
36 | * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" | |
37 | * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR | |
38 | * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. | |
39 | * | |
40 | * Carnegie Mellon requests users of this software to return to | |
41 | * | |
42 | * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU | |
43 | * School of Computer Science | |
44 | * Carnegie Mellon University | |
45 | * Pittsburgh PA 15213-3890 | |
46 | * | |
47 | * any improvements or extensions that they make and grant Carnegie Mellon | |
48 | * the rights to redistribute these changes. | |
49 | */ | |
50 | /* | |
51 | */ | |
52 | ||
53 | #include <platforms.h> | |
54 | #include <cpus.h> | |
55 | #include <mach_kdb.h> | |
56 | ||
57 | #include <i386/asm.h> | |
58 | #include <i386/proc_reg.h> | |
59 | #include <assym.s> | |
60 | ||
61 | #if NCPUS > 1 | |
62 | ||
63 | #define CX(addr,reg) addr(,reg,4) | |
64 | ||
65 | #else | |
66 | ||
67 | #define CPU_NUMBER(reg) | |
68 | #define CX(addr,reg) addr | |
69 | ||
70 | #endif /* NCPUS > 1 */ | |
71 | ||
72 | #include <i386/AT386/mp/mp.h> | |
73 | ||
74 | /* | |
75 | * GAS won't handle an intersegment jump with a relocatable offset. | |
76 | */ | |
77 | #define LJMP(segment,address) \ | |
78 | .byte 0xea ;\ | |
79 | .long address ;\ | |
80 | .word segment | |
81 | ||
82 | ||
83 | ||
84 | #define KVTOPHYS (-KERNELBASE) | |
85 | #define KVTOLINEAR LINEAR_KERNELBASE | |
86 | ||
87 | ||
88 | #define PA(addr) (addr)+KVTOPHYS | |
89 | #define VA(addr) (addr)-KVTOPHYS | |
90 | ||
91 | .data | |
92 | .align 2 | |
93 | .globl EXT(_kick_buffer_) | |
94 | EXT(_kick_buffer_): | |
95 | .long 1 | |
96 | .long 3 | |
97 | .set .,.+16836 | |
98 | /* | |
99 | * Interrupt and bootup stack for initial processor. | |
100 | */ | |
101 | .align ALIGN | |
102 | .globl EXT(intstack) | |
103 | EXT(intstack): | |
104 | .set ., .+INTSTACK_SIZE | |
105 | .globl EXT(eintstack) | |
106 | EXT(eintstack:) | |
107 | ||
108 | #if NCPUS == 1 | |
109 | .globl EXT(int_stack_high) /* all interrupt stacks */ | |
110 | EXT(int_stack_high): /* must lie below this */ | |
111 | .long EXT(eintstack) /* address */ | |
112 | ||
113 | .globl EXT(int_stack_top) /* top of interrupt stack */ | |
114 | EXT(int_stack_top): | |
115 | .long EXT(eintstack) | |
116 | #endif | |
117 | ||
118 | #if MACH_KDB | |
119 | /* | |
120 | * Kernel debugger stack for each processor. | |
121 | */ | |
122 | .align ALIGN | |
123 | .globl EXT(db_stack_store) | |
124 | EXT(db_stack_store): | |
125 | .set ., .+(INTSTACK_SIZE*NCPUS) | |
126 | ||
127 | /* | |
128 | * Stack for last-ditch debugger task for each processor. | |
129 | */ | |
130 | .align ALIGN | |
131 | .globl EXT(db_task_stack_store) | |
132 | EXT(db_task_stack_store): | |
133 | .set ., .+(INTSTACK_SIZE*NCPUS) | |
134 | #endif /* MACH_KDB */ | |
135 | ||
136 | /* | |
137 | * per-processor kernel debugger stacks | |
138 | */ | |
139 | .align ALIGN | |
140 | .globl EXT(kgdb_stack_store) | |
141 | EXT(kgdb_stack_store): | |
142 | .set ., .+(INTSTACK_SIZE*NCPUS) | |
143 | ||
144 | ||
145 | /* | |
146 | * Pointers to GDT and IDT. These contain linear addresses. | |
147 | */ | |
148 | .align ALIGN | |
149 | .globl EXT(gdtptr) | |
150 | LEXT(gdtptr) | |
151 | .word Times(8,GDTSZ)-1 | |
152 | .long EXT(gdt)+KVTOLINEAR | |
153 | ||
154 | .align ALIGN | |
155 | .globl EXT(idtptr) | |
156 | LEXT(idtptr) | |
157 | .word Times(8,IDTSZ)-1 | |
158 | .long EXT(idt)+KVTOLINEAR | |
159 | ||
160 | #if NCPUS > 1 | |
161 | .data | |
162 | /* | |
163 | * start_lock is very special. We initialize the | |
164 | * lock at allocation time rather than at run-time. | |
165 | * Although start_lock should be an instance of a | |
166 | * hw_lock, we hand-code all manipulation of the lock | |
167 | * because the hw_lock code may require function calls; | |
168 | * and we'd rather not introduce another dependency on | |
169 | * a working stack at this point. | |
170 | */ | |
171 | .globl EXT(start_lock) | |
172 | EXT(start_lock): | |
173 | .long 0 /* synchronizes processor startup */ | |
174 | ||
175 | .globl EXT(master_is_up) | |
176 | EXT(master_is_up): | |
177 | .long 0 /* 1 when OK for other processors */ | |
178 | /* to start */ | |
179 | .globl EXT(mp_boot_pde) | |
180 | EXT(mp_boot_pde): | |
181 | .long 0 | |
182 | #endif /* NCPUS > 1 */ | |
183 | ||
184 | /* | |
185 | * All CPUs start here. | |
186 | * | |
187 | * Environment: | |
188 | * protected mode, no paging, flat 32-bit address space. | |
189 | * (Code/data/stack segments have base == 0, limit == 4G) | |
190 | */ | |
191 | .text | |
192 | .align ALIGN | |
193 | .globl EXT(pstart) | |
194 | .globl EXT(_start) | |
195 | LEXT(_start) | |
196 | LEXT(pstart) | |
197 | mov $0,%ax /* fs must be zeroed; */ | |
198 | mov %ax,%fs /* some bootstrappers don`t do this */ | |
199 | mov %ax,%gs | |
200 | ||
201 | #if NCPUS > 1 | |
202 | jmp 1f | |
203 | 0: cmpl $0,PA(EXT(start_lock)) | |
204 | jne 0b | |
205 | 1: movb $1,%eax | |
206 | xchgl %eax,PA(EXT(start_lock)) /* locked */ | |
207 | testl %eax,%eax | |
208 | jnz 0b | |
209 | ||
210 | cmpl $0,PA(EXT(master_is_up)) /* are we first? */ | |
211 | jne EXT(slave_start) /* no -- system already up. */ | |
212 | movl $1,PA(EXT(master_is_up)) /* others become slaves */ | |
213 | #endif /* NCPUS > 1 */ | |
214 | ||
215 | /* | |
216 | * Get startup parameters. | |
217 | */ | |
218 | ||
219 | #include <i386/AT386/asm_startup.h> | |
220 | ||
221 | /* | |
222 | * Build initial page table directory and page tables. | |
223 | * %ebx holds first available physical address. | |
224 | */ | |
225 | ||
226 | addl $(NBPG-1),%ebx /* round first avail physical addr */ | |
227 | andl $(-NBPG),%ebx /* to machine page size */ | |
228 | leal -KVTOPHYS(%ebx),%eax /* convert to virtual address */ | |
229 | movl %eax,PA(EXT(kpde)) /* save as kernel page table directory */ | |
230 | movl %ebx,%cr3 /* set physical address in CR3 now */ | |
231 | ||
232 | movl %ebx,%edi /* clear page table directory */ | |
233 | movl $(PTES_PER_PAGE),%ecx /* one page of ptes */ | |
234 | xorl %eax,%eax | |
235 | cld | |
236 | rep | |
237 | stosl /* edi now points to next page */ | |
238 | ||
239 | /* | |
240 | * Use next few pages for page tables. | |
241 | */ | |
242 | addl $(KERNELBASEPDE),%ebx /* point to pde for kernel base */ | |
243 | movl %edi,%esi /* point to end of current pte page */ | |
244 | ||
245 | /* | |
246 | * Enter 1-1 mappings for kernel and for kernel page tables. | |
247 | */ | |
248 | movl $(INTEL_PTE_KERNEL),%eax /* set up pte prototype */ | |
249 | 0: | |
250 | cmpl %esi,%edi /* at end of pte page? */ | |
251 | jb 1f /* if so: */ | |
252 | movl %edi,%edx /* get pte address (physical) */ | |
253 | andl $(-NBPG),%edx /* mask out offset in page */ | |
254 | orl $(INTEL_PTE_KERNEL),%edx /* add pte bits */ | |
255 | movl %edx,(%ebx) /* set pde */ | |
256 | addl $4,%ebx /* point to next pde */ | |
257 | movl %edi,%esi /* point to */ | |
258 | addl $(NBPG),%esi /* end of new pte page */ | |
259 | 1: | |
260 | movl %eax,(%edi) /* set pte */ | |
261 | addl $4,%edi /* advance to next pte */ | |
262 | addl $(NBPG),%eax /* advance to next phys page */ | |
263 | cmpl %edi,%eax /* have we mapped this pte page yet? */ | |
264 | jb 0b /* loop if not */ | |
265 | ||
266 | /* | |
267 | * Zero rest of last pte page. | |
268 | */ | |
269 | xor %eax,%eax /* don`t map yet */ | |
270 | 2: cmpl %esi,%edi /* at end of pte page? */ | |
271 | jae 3f | |
272 | movl %eax,(%edi) /* zero mapping */ | |
273 | addl $4,%edi | |
274 | jmp 2b | |
275 | 3: | |
276 | ||
277 | #if NCPUS > 1 | |
278 | /* | |
279 | * Grab (waste?) another page for a bootstrap page directory | |
280 | * for the other CPUs. We don't want the running CPUs to see | |
281 | * addresses 0..3fffff mapped 1-1. | |
282 | */ | |
283 | movl %edi,PA(EXT(mp_boot_pde)) /* save its physical address */ | |
284 | movl $(PTES_PER_PAGE),%ecx /* and clear it */ | |
285 | rep | |
286 | stosl | |
287 | #endif /* NCPUS > 1 */ | |
288 | movl %edi,PA(EXT(first_avail)) /* save first available phys addr */ | |
289 | ||
290 | /* | |
291 | * pmap_bootstrap will enter rest of mappings. | |
292 | */ | |
293 | ||
294 | /* | |
295 | * Fix initial descriptor tables. | |
296 | */ | |
297 | lea PA(EXT(idt)),%esi /* fix IDT */ | |
298 | movl $(IDTSZ),%ecx | |
299 | movl $(PA(fix_idt_ret)),%ebx | |
300 | jmp fix_desc_common /* (cannot use stack) */ | |
301 | fix_idt_ret: | |
302 | ||
303 | lea PA(EXT(gdt)),%esi /* fix GDT */ | |
304 | movl $(GDTSZ),%ecx | |
305 | movl $(PA(fix_gdt_ret)),%ebx | |
306 | jmp fix_desc_common /* (cannot use stack) */ | |
307 | fix_gdt_ret: | |
308 | ||
309 | lea PA(EXT(ldt)),%esi /* fix LDT */ | |
310 | movl $(LDTSZ),%ecx | |
311 | movl $(PA(fix_ldt_ret)),%ebx | |
312 | jmp fix_desc_common /* (cannot use stack) */ | |
313 | fix_ldt_ret: | |
314 | ||
315 | /* | |
316 | * Turn on paging. | |
317 | */ | |
318 | movl %cr3,%eax /* retrieve kernel PDE phys address */ | |
319 | movl KERNELBASEPDE(%eax),%ecx | |
320 | movl %ecx,(%eax) /* set it also as pte for location */ | |
321 | /* 0..3fffff, so that the code */ | |
322 | /* that enters paged mode is mapped */ | |
323 | /* to identical addresses after */ | |
324 | /* paged mode is enabled */ | |
325 | ||
326 | addl $4,%eax /* 400000..7fffff */ | |
327 | movl KERNELBASEPDE(%eax),%ecx | |
328 | movl %ecx,(%eax) | |
329 | ||
9bccf70c | 330 | movl $ EXT(pag_start),%ebx /* first paged code address */ |
1c79356b A |
331 | |
332 | movl %cr0,%eax | |
333 | orl $(CR0_PG),%eax /* set PG bit in CR0 */ | |
334 | orl $(CR0_WP),%eax | |
335 | movl %eax,%cr0 /* to enable paging */ | |
336 | ||
337 | jmp *%ebx /* flush prefetch queue */ | |
338 | ||
339 | /* | |
340 | * We are now paging, and can run with correct addresses. | |
341 | */ | |
342 | LEXT(pag_start) | |
343 | lgdt EXT(gdtptr) /* load GDT */ | |
344 | lidt EXT(idtptr) /* load IDT */ | |
345 | LJMP(KERNEL_CS,EXT(vstart)) /* switch to kernel code segment */ | |
346 | ||
347 | /* | |
348 | * Master is now running with correct addresses. | |
349 | */ | |
350 | LEXT(vstart) | |
351 | mov $(KERNEL_DS),%ax /* set kernel data segment */ | |
352 | mov %ax,%ds | |
353 | mov %ax,%es | |
354 | mov %ax,%ss | |
355 | mov %ax,EXT(ktss)+TSS_SS0 /* set kernel stack segment */ | |
356 | /* for traps to kernel */ | |
357 | #if MACH_KDB | |
358 | mov %ax,EXT(dbtss)+TSS_SS0 /* likewise for debug task switch */ | |
359 | mov %cr3,%eax /* get PDBR into debug TSS */ | |
360 | mov %eax,EXT(dbtss)+TSS_PDBR | |
361 | mov $0,%eax | |
362 | #endif | |
363 | ||
364 | movw $(KERNEL_LDT),%ax /* get LDT segment */ | |
365 | lldt %ax /* load LDT */ | |
366 | #if MACH_KDB | |
367 | mov %ax,EXT(ktss)+TSS_LDT /* store LDT in two TSS, as well... */ | |
368 | mov %ax,EXT(dbtss)+TSS_LDT /* ...matters if we switch tasks */ | |
369 | #endif | |
370 | movw $(KERNEL_TSS),%ax | |
371 | ltr %ax /* set up KTSS */ | |
372 | ||
9bccf70c | 373 | mov $ CPU_DATA,%ax |
1c79356b A |
374 | mov %ax,%gs |
375 | ||
376 | lea EXT(eintstack),%esp /* switch to the bootup stack */ | |
377 | call EXT(machine_startup) /* run C code */ | |
378 | /*NOTREACHED*/ | |
379 | hlt | |
380 | ||
381 | #if NCPUS > 1 | |
382 | /* | |
383 | * master_up is used by the master cpu to signify that it is done | |
384 | * with the interrupt stack, etc. See the code in pstart and svstart | |
385 | * that this interlocks with. | |
386 | */ | |
387 | .align ALIGN | |
388 | .globl EXT(master_up) | |
389 | LEXT(master_up) | |
390 | pushl %ebp /* set up */ | |
391 | movl %esp,%ebp /* stack frame */ | |
392 | movl $0,%ecx /* unlock start_lock */ | |
393 | xchgl %ecx,EXT(start_lock) /* since we are no longer using */ | |
394 | /* bootstrap stack */ | |
395 | leave /* pop stack frame */ | |
396 | ret | |
397 | ||
398 | /* | |
399 | * We aren't the first. Call slave_main to initialize the processor | |
400 | * and get Mach going on it. | |
401 | */ | |
402 | .align ALIGN | |
403 | .globl EXT(slave_start) | |
404 | LEXT(slave_start) | |
405 | cli /* disable interrupts, so we don`t */ | |
406 | /* need IDT for a while */ | |
407 | movl EXT(kpde)+KVTOPHYS,%ebx /* get PDE virtual address */ | |
408 | addl $(KVTOPHYS),%ebx /* convert to physical address */ | |
409 | ||
410 | movl PA(EXT(mp_boot_pde)),%edx /* point to the bootstrap PDE */ | |
411 | movl KERNELBASEPDE(%ebx),%eax | |
412 | /* point to pte for KERNELBASE */ | |
413 | movl %eax,KERNELBASEPDE(%edx) | |
414 | /* set in bootstrap PDE */ | |
415 | movl %eax,(%edx) /* set it also as pte for location */ | |
416 | /* 0..3fffff, so that the code */ | |
417 | /* that enters paged mode is mapped */ | |
418 | /* to identical addresses after */ | |
419 | /* paged mode is enabled */ | |
420 | movl %edx,%cr3 /* use bootstrap PDE to enable paging */ | |
421 | ||
9bccf70c | 422 | movl $ EXT(spag_start),%edx /* first paged code address */ |
1c79356b A |
423 | |
424 | movl %cr0,%eax | |
425 | orl $(CR0_PG),%eax /* set PG bit in CR0 */ | |
426 | orl $(CR0_WP),%eax | |
427 | movl %eax,%cr0 /* to enable paging */ | |
428 | ||
429 | jmp *%edx /* flush prefetch queue. */ | |
430 | ||
431 | /* | |
432 | * We are now paging, and can run with correct addresses. | |
433 | */ | |
434 | LEXT(spag_start) | |
435 | ||
436 | lgdt EXT(gdtptr) /* load GDT */ | |
437 | lidt EXT(idtptr) /* load IDT */ | |
438 | LJMP(KERNEL_CS,EXT(svstart)) /* switch to kernel code segment */ | |
439 | ||
440 | /* | |
441 | * Slave is now running with correct addresses. | |
442 | */ | |
443 | LEXT(svstart) | |
444 | mov $(KERNEL_DS),%ax /* set kernel data segment */ | |
445 | mov %ax,%ds | |
446 | mov %ax,%es | |
447 | mov %ax,%ss | |
448 | ||
449 | movl %ebx,%cr3 /* switch to the real kernel PDE */ | |
450 | ||
451 | CPU_NUMBER(%eax) | |
452 | movl CX(EXT(interrupt_stack),%eax),%esp /* get stack */ | |
453 | addl $(INTSTACK_SIZE),%esp /* point to top */ | |
454 | xorl %ebp,%ebp /* for completeness */ | |
455 | ||
456 | movl $0,%ecx /* unlock start_lock */ | |
457 | xchgl %ecx,EXT(start_lock) /* since we are no longer using */ | |
458 | /* bootstrap stack */ | |
459 | ||
460 | /* | |
461 | * switch to the per-cpu descriptor tables | |
462 | */ | |
463 | ||
464 | pushl %eax /* pass CPU number */ | |
465 | call EXT(mp_desc_init) /* set up local table */ | |
466 | /* pointer returned in %eax */ | |
467 | subl $4,%esp /* get space to build pseudo-descriptors */ | |
468 | ||
469 | CPU_NUMBER(%eax) | |
470 | movw $(GDTSZ*8-1),0(%esp) /* set GDT size in GDT descriptor */ | |
471 | movl CX(EXT(mp_gdt),%eax),%edx | |
9bccf70c | 472 | addl $ KVTOLINEAR,%edx |
1c79356b A |
473 | movl %edx,2(%esp) /* point to local GDT (linear address) */ |
474 | lgdt 0(%esp) /* load new GDT */ | |
475 | ||
476 | movw $(IDTSZ*8-1),0(%esp) /* set IDT size in IDT descriptor */ | |
477 | movl CX(EXT(mp_idt),%eax),%edx | |
9bccf70c | 478 | addl $ KVTOLINEAR,%edx |
1c79356b A |
479 | movl %edx,2(%esp) /* point to local IDT (linear address) */ |
480 | lidt 0(%esp) /* load new IDT */ | |
481 | ||
482 | movw $(KERNEL_LDT),%ax | |
483 | lldt %ax /* load new LDT */ | |
484 | ||
485 | movw $(KERNEL_TSS),%ax | |
486 | ltr %ax /* load new KTSS */ | |
487 | ||
9bccf70c | 488 | mov $ CPU_DATA,%ax |
1c79356b A |
489 | mov %ax,%gs |
490 | ||
491 | call EXT(slave_main) /* start MACH */ | |
492 | /*NOTREACHED*/ | |
493 | hlt | |
494 | #endif /* NCPUS > 1 */ | |
495 | ||
496 | /* | |
497 | * Convert a descriptor from fake to real format. | |
498 | * | |
499 | * Calls from assembly code: | |
500 | * %ebx = return address (physical) CANNOT USE STACK | |
501 | * %esi = descriptor table address (physical) | |
502 | * %ecx = number of descriptors | |
503 | * | |
504 | * Calls from C: | |
505 | * 0(%esp) = return address | |
506 | * 4(%esp) = descriptor table address (physical) | |
507 | * 8(%esp) = number of descriptors | |
508 | * | |
509 | * Fake descriptor format: | |
510 | * bytes 0..3 base 31..0 | |
511 | * bytes 4..5 limit 15..0 | |
512 | * byte 6 access byte 2 | limit 19..16 | |
513 | * byte 7 access byte 1 | |
514 | * | |
515 | * Real descriptor format: | |
516 | * bytes 0..1 limit 15..0 | |
517 | * bytes 2..3 base 15..0 | |
518 | * byte 4 base 23..16 | |
519 | * byte 5 access byte 1 | |
520 | * byte 6 access byte 2 | limit 19..16 | |
521 | * byte 7 base 31..24 | |
522 | * | |
523 | * Fake gate format: | |
524 | * bytes 0..3 offset | |
525 | * bytes 4..5 selector | |
526 | * byte 6 word count << 4 (to match fake descriptor) | |
527 | * byte 7 access byte 1 | |
528 | * | |
529 | * Real gate format: | |
530 | * bytes 0..1 offset 15..0 | |
531 | * bytes 2..3 selector | |
532 | * byte 4 word count | |
533 | * byte 5 access byte 1 | |
534 | * bytes 6..7 offset 31..16 | |
535 | */ | |
536 | .globl EXT(fix_desc) | |
537 | LEXT(fix_desc) | |
538 | pushl %ebp /* set up */ | |
539 | movl %esp,%ebp /* stack frame */ | |
540 | pushl %esi /* save registers */ | |
541 | pushl %ebx | |
542 | movl B_ARG0,%esi /* point to first descriptor */ | |
543 | movl B_ARG1,%ecx /* get number of descriptors */ | |
544 | lea 0f,%ebx /* get return address */ | |
545 | jmp fix_desc_common /* call internal routine */ | |
546 | 0: popl %ebx /* restore registers */ | |
547 | popl %esi | |
548 | leave /* pop stack frame */ | |
549 | ret /* return */ | |
550 | ||
551 | fix_desc_common: | |
552 | 0: | |
553 | movw 6(%esi),%dx /* get access byte */ | |
554 | movb %dh,%al | |
555 | andb $0x14,%al | |
556 | cmpb $0x04,%al /* gate or descriptor? */ | |
557 | je 1f | |
558 | ||
559 | /* descriptor */ | |
560 | movl 0(%esi),%eax /* get base in eax */ | |
561 | rol $16,%eax /* swap 15..0 with 31..16 */ | |
562 | /* (15..0 in correct place) */ | |
563 | movb %al,%dl /* combine bits 23..16 with ACC1 */ | |
564 | /* in dh/dl */ | |
565 | movb %ah,7(%esi) /* store bits 31..24 in correct place */ | |
566 | movw 4(%esi),%ax /* move limit bits 0..15 to word 0 */ | |
567 | movl %eax,0(%esi) /* store (bytes 0..3 correct) */ | |
568 | movw %dx,4(%esi) /* store bytes 4..5 */ | |
569 | jmp 2f | |
570 | ||
571 | /* gate */ | |
572 | 1: | |
573 | movw 4(%esi),%ax /* get selector */ | |
574 | shrb $4,%dl /* shift word count to proper place */ | |
575 | movw %dx,4(%esi) /* store word count / ACC1 */ | |
576 | movw 2(%esi),%dx /* get offset 16..31 */ | |
577 | movw %dx,6(%esi) /* store in correct place */ | |
578 | movw %ax,2(%esi) /* store selector in correct place */ | |
579 | 2: | |
580 | addl $8,%esi /* bump to next descriptor */ | |
581 | loop 0b /* repeat */ | |
582 | jmp *%ebx /* all done */ | |
583 | ||
584 | /* | |
585 | * put arg in kbd leds and spin a while | |
586 | * eats eax, ecx, edx | |
587 | */ | |
588 | #define K_RDWR 0x60 | |
589 | #define K_CMD_LEDS 0xed | |
590 | #define K_STATUS 0x64 | |
591 | #define K_IBUF_FULL 0x02 /* input (to kbd) buffer full */ | |
592 | #define K_OBUF_FULL 0x01 /* output (from kbd) buffer full */ | |
593 | ||
594 | ENTRY(set_kbd_leds) | |
595 | mov S_ARG0,%cl /* save led value */ | |
596 | ||
597 | 0: inb $(K_STATUS),%al /* get kbd status */ | |
598 | testb $(K_IBUF_FULL),%al /* input busy? */ | |
599 | jne 0b /* loop until not */ | |
600 | ||
601 | mov $(K_CMD_LEDS),%al /* K_CMD_LEDS */ | |
602 | outb %al,$(K_RDWR) /* to kbd */ | |
603 | ||
604 | 0: inb $(K_STATUS),%al /* get kbd status */ | |
605 | testb $(K_OBUF_FULL),%al /* output present? */ | |
606 | je 0b /* loop if not */ | |
607 | ||
608 | inb $(K_RDWR),%al /* read status (and discard) */ | |
609 | ||
610 | 0: inb $(K_STATUS),%al /* get kbd status */ | |
611 | testb $(K_IBUF_FULL),%al /* input busy? */ | |
612 | jne 0b /* loop until not */ | |
613 | ||
614 | mov %cl,%al /* move led value */ | |
615 | outb %al,$(K_RDWR) /* to kbd */ | |
616 | ||
617 | movl $10000000,%ecx /* spin */ | |
618 | 0: nop | |
619 | nop | |
620 | loop 0b /* a while */ | |
621 | ||
622 | ret |