]>
Commit | Line | Data |
---|---|---|
1c79356b A |
1 | /* |
2 | * Copyright (c) 2000 Apple Computer, Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_LICENSE_HEADER_START@ | |
5 | * | |
43866e37 | 6 | * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved. |
1c79356b | 7 | * |
43866e37 A |
8 | * This file contains Original Code and/or Modifications of Original Code |
9 | * as defined in and that are subject to the Apple Public Source License | |
10 | * Version 2.0 (the 'License'). You may not use this file except in | |
11 | * compliance with the License. Please obtain a copy of the License at | |
12 | * http://www.opensource.apple.com/apsl/ and read it before using this | |
13 | * file. | |
14 | * | |
15 | * The Original Code and all software distributed under the License are | |
16 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
1c79356b A |
17 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
18 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
43866e37 A |
19 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
20 | * Please see the License for the specific language governing rights and | |
21 | * limitations under the License. | |
1c79356b A |
22 | * |
23 | * @APPLE_LICENSE_HEADER_END@ | |
24 | */ | |
25 | /* | |
26 | * @OSF_COPYRIGHT@ | |
27 | */ | |
28 | /* | |
29 | * Mach Operating System | |
30 | * Copyright (c) 1991,1990 Carnegie Mellon University | |
31 | * All Rights Reserved. | |
32 | * | |
33 | * Permission to use, copy, modify and distribute this software and its | |
34 | * documentation is hereby granted, provided that both the copyright | |
35 | * notice and this permission notice appear in all copies of the | |
36 | * software, derivative works or modified versions, and any portions | |
37 | * thereof, and that both notices appear in supporting documentation. | |
38 | * | |
39 | * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" | |
40 | * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR | |
41 | * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. | |
42 | * | |
43 | * Carnegie Mellon requests users of this software to return to | |
44 | * | |
45 | * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU | |
46 | * School of Computer Science | |
47 | * Carnegie Mellon University | |
48 | * Pittsburgh PA 15213-3890 | |
49 | * | |
50 | * any improvements or extensions that they make and grant Carnegie Mellon | |
51 | * the rights to redistribute these changes. | |
52 | */ | |
53 | /* | |
54 | */ | |
55 | ||
56 | #include <platforms.h> | |
57 | #include <cpus.h> | |
58 | #include <mach_kdb.h> | |
59 | ||
60 | #include <i386/asm.h> | |
61 | #include <i386/proc_reg.h> | |
62 | #include <assym.s> | |
63 | ||
64 | #if NCPUS > 1 | |
65 | ||
66 | #define CX(addr,reg) addr(,reg,4) | |
67 | ||
68 | #else | |
69 | ||
70 | #define CPU_NUMBER(reg) | |
71 | #define CX(addr,reg) addr | |
72 | ||
73 | #endif /* NCPUS > 1 */ | |
74 | ||
55e303ae | 75 | #include <i386/mp.h> |
1c79356b A |
76 | |
77 | /* | |
78 | * GAS won't handle an intersegment jump with a relocatable offset. | |
79 | */ | |
80 | #define LJMP(segment,address) \ | |
81 | .byte 0xea ;\ | |
82 | .long address ;\ | |
83 | .word segment | |
84 | ||
85 | ||
86 | ||
87 | #define KVTOPHYS (-KERNELBASE) | |
88 | #define KVTOLINEAR LINEAR_KERNELBASE | |
89 | ||
90 | ||
91 | #define PA(addr) (addr)+KVTOPHYS | |
92 | #define VA(addr) (addr)-KVTOPHYS | |
93 | ||
94 | .data | |
95 | .align 2 | |
96 | .globl EXT(_kick_buffer_) | |
97 | EXT(_kick_buffer_): | |
98 | .long 1 | |
99 | .long 3 | |
100 | .set .,.+16836 | |
101 | /* | |
102 | * Interrupt and bootup stack for initial processor. | |
103 | */ | |
104 | .align ALIGN | |
105 | .globl EXT(intstack) | |
106 | EXT(intstack): | |
107 | .set ., .+INTSTACK_SIZE | |
108 | .globl EXT(eintstack) | |
109 | EXT(eintstack:) | |
110 | ||
111 | #if NCPUS == 1 | |
112 | .globl EXT(int_stack_high) /* all interrupt stacks */ | |
113 | EXT(int_stack_high): /* must lie below this */ | |
114 | .long EXT(eintstack) /* address */ | |
115 | ||
116 | .globl EXT(int_stack_top) /* top of interrupt stack */ | |
117 | EXT(int_stack_top): | |
118 | .long EXT(eintstack) | |
119 | #endif | |
120 | ||
121 | #if MACH_KDB | |
122 | /* | |
123 | * Kernel debugger stack for each processor. | |
124 | */ | |
125 | .align ALIGN | |
126 | .globl EXT(db_stack_store) | |
127 | EXT(db_stack_store): | |
128 | .set ., .+(INTSTACK_SIZE*NCPUS) | |
129 | ||
130 | /* | |
131 | * Stack for last-ditch debugger task for each processor. | |
132 | */ | |
133 | .align ALIGN | |
134 | .globl EXT(db_task_stack_store) | |
135 | EXT(db_task_stack_store): | |
136 | .set ., .+(INTSTACK_SIZE*NCPUS) | |
137 | #endif /* MACH_KDB */ | |
138 | ||
139 | /* | |
140 | * per-processor kernel debugger stacks | |
141 | */ | |
142 | .align ALIGN | |
143 | .globl EXT(kgdb_stack_store) | |
144 | EXT(kgdb_stack_store): | |
145 | .set ., .+(INTSTACK_SIZE*NCPUS) | |
146 | ||
147 | ||
148 | /* | |
149 | * Pointers to GDT and IDT. These contain linear addresses. | |
150 | */ | |
151 | .align ALIGN | |
152 | .globl EXT(gdtptr) | |
153 | LEXT(gdtptr) | |
154 | .word Times(8,GDTSZ)-1 | |
155 | .long EXT(gdt)+KVTOLINEAR | |
156 | ||
157 | .align ALIGN | |
158 | .globl EXT(idtptr) | |
159 | LEXT(idtptr) | |
160 | .word Times(8,IDTSZ)-1 | |
161 | .long EXT(idt)+KVTOLINEAR | |
162 | ||
163 | #if NCPUS > 1 | |
164 | .data | |
165 | /* | |
166 | * start_lock is very special. We initialize the | |
167 | * lock at allocation time rather than at run-time. | |
168 | * Although start_lock should be an instance of a | |
169 | * hw_lock, we hand-code all manipulation of the lock | |
170 | * because the hw_lock code may require function calls; | |
171 | * and we'd rather not introduce another dependency on | |
172 | * a working stack at this point. | |
173 | */ | |
174 | .globl EXT(start_lock) | |
175 | EXT(start_lock): | |
176 | .long 0 /* synchronizes processor startup */ | |
177 | ||
178 | .globl EXT(master_is_up) | |
179 | EXT(master_is_up): | |
180 | .long 0 /* 1 when OK for other processors */ | |
181 | /* to start */ | |
182 | .globl EXT(mp_boot_pde) | |
183 | EXT(mp_boot_pde): | |
184 | .long 0 | |
185 | #endif /* NCPUS > 1 */ | |
186 | ||
187 | /* | |
188 | * All CPUs start here. | |
189 | * | |
190 | * Environment: | |
191 | * protected mode, no paging, flat 32-bit address space. | |
192 | * (Code/data/stack segments have base == 0, limit == 4G) | |
193 | */ | |
194 | .text | |
195 | .align ALIGN | |
196 | .globl EXT(pstart) | |
197 | .globl EXT(_start) | |
198 | LEXT(_start) | |
199 | LEXT(pstart) | |
55e303ae | 200 | mov %eax, %ebx /* save pointer to kernbootstruct */ |
1c79356b A |
201 | mov $0,%ax /* fs must be zeroed; */ |
202 | mov %ax,%fs /* some bootstrappers don`t do this */ | |
203 | mov %ax,%gs | |
204 | ||
205 | #if NCPUS > 1 | |
206 | jmp 1f | |
207 | 0: cmpl $0,PA(EXT(start_lock)) | |
208 | jne 0b | |
209 | 1: movb $1,%eax | |
210 | xchgl %eax,PA(EXT(start_lock)) /* locked */ | |
211 | testl %eax,%eax | |
212 | jnz 0b | |
213 | ||
214 | cmpl $0,PA(EXT(master_is_up)) /* are we first? */ | |
215 | jne EXT(slave_start) /* no -- system already up. */ | |
216 | movl $1,PA(EXT(master_is_up)) /* others become slaves */ | |
217 | #endif /* NCPUS > 1 */ | |
218 | ||
219 | /* | |
220 | * Get startup parameters. | |
221 | */ | |
222 | ||
223 | #include <i386/AT386/asm_startup.h> | |
224 | ||
225 | /* | |
226 | * Build initial page table directory and page tables. | |
227 | * %ebx holds first available physical address. | |
228 | */ | |
229 | ||
230 | addl $(NBPG-1),%ebx /* round first avail physical addr */ | |
231 | andl $(-NBPG),%ebx /* to machine page size */ | |
232 | leal -KVTOPHYS(%ebx),%eax /* convert to virtual address */ | |
233 | movl %eax,PA(EXT(kpde)) /* save as kernel page table directory */ | |
234 | movl %ebx,%cr3 /* set physical address in CR3 now */ | |
235 | ||
236 | movl %ebx,%edi /* clear page table directory */ | |
237 | movl $(PTES_PER_PAGE),%ecx /* one page of ptes */ | |
238 | xorl %eax,%eax | |
239 | cld | |
240 | rep | |
241 | stosl /* edi now points to next page */ | |
242 | ||
243 | /* | |
244 | * Use next few pages for page tables. | |
245 | */ | |
246 | addl $(KERNELBASEPDE),%ebx /* point to pde for kernel base */ | |
247 | movl %edi,%esi /* point to end of current pte page */ | |
248 | ||
249 | /* | |
250 | * Enter 1-1 mappings for kernel and for kernel page tables. | |
251 | */ | |
252 | movl $(INTEL_PTE_KERNEL),%eax /* set up pte prototype */ | |
253 | 0: | |
254 | cmpl %esi,%edi /* at end of pte page? */ | |
255 | jb 1f /* if so: */ | |
256 | movl %edi,%edx /* get pte address (physical) */ | |
257 | andl $(-NBPG),%edx /* mask out offset in page */ | |
258 | orl $(INTEL_PTE_KERNEL),%edx /* add pte bits */ | |
259 | movl %edx,(%ebx) /* set pde */ | |
260 | addl $4,%ebx /* point to next pde */ | |
261 | movl %edi,%esi /* point to */ | |
262 | addl $(NBPG),%esi /* end of new pte page */ | |
263 | 1: | |
264 | movl %eax,(%edi) /* set pte */ | |
265 | addl $4,%edi /* advance to next pte */ | |
266 | addl $(NBPG),%eax /* advance to next phys page */ | |
267 | cmpl %edi,%eax /* have we mapped this pte page yet? */ | |
268 | jb 0b /* loop if not */ | |
269 | ||
270 | /* | |
271 | * Zero rest of last pte page. | |
272 | */ | |
273 | xor %eax,%eax /* don`t map yet */ | |
274 | 2: cmpl %esi,%edi /* at end of pte page? */ | |
275 | jae 3f | |
276 | movl %eax,(%edi) /* zero mapping */ | |
277 | addl $4,%edi | |
278 | jmp 2b | |
279 | 3: | |
280 | ||
281 | #if NCPUS > 1 | |
282 | /* | |
283 | * Grab (waste?) another page for a bootstrap page directory | |
284 | * for the other CPUs. We don't want the running CPUs to see | |
285 | * addresses 0..3fffff mapped 1-1. | |
286 | */ | |
287 | movl %edi,PA(EXT(mp_boot_pde)) /* save its physical address */ | |
288 | movl $(PTES_PER_PAGE),%ecx /* and clear it */ | |
289 | rep | |
290 | stosl | |
291 | #endif /* NCPUS > 1 */ | |
292 | movl %edi,PA(EXT(first_avail)) /* save first available phys addr */ | |
293 | ||
294 | /* | |
295 | * pmap_bootstrap will enter rest of mappings. | |
296 | */ | |
297 | ||
298 | /* | |
299 | * Fix initial descriptor tables. | |
300 | */ | |
301 | lea PA(EXT(idt)),%esi /* fix IDT */ | |
302 | movl $(IDTSZ),%ecx | |
303 | movl $(PA(fix_idt_ret)),%ebx | |
304 | jmp fix_desc_common /* (cannot use stack) */ | |
305 | fix_idt_ret: | |
306 | ||
307 | lea PA(EXT(gdt)),%esi /* fix GDT */ | |
308 | movl $(GDTSZ),%ecx | |
309 | movl $(PA(fix_gdt_ret)),%ebx | |
310 | jmp fix_desc_common /* (cannot use stack) */ | |
311 | fix_gdt_ret: | |
312 | ||
313 | lea PA(EXT(ldt)),%esi /* fix LDT */ | |
314 | movl $(LDTSZ),%ecx | |
315 | movl $(PA(fix_ldt_ret)),%ebx | |
316 | jmp fix_desc_common /* (cannot use stack) */ | |
317 | fix_ldt_ret: | |
318 | ||
319 | /* | |
320 | * Turn on paging. | |
321 | */ | |
322 | movl %cr3,%eax /* retrieve kernel PDE phys address */ | |
323 | movl KERNELBASEPDE(%eax),%ecx | |
324 | movl %ecx,(%eax) /* set it also as pte for location */ | |
325 | /* 0..3fffff, so that the code */ | |
326 | /* that enters paged mode is mapped */ | |
327 | /* to identical addresses after */ | |
328 | /* paged mode is enabled */ | |
329 | ||
330 | addl $4,%eax /* 400000..7fffff */ | |
331 | movl KERNELBASEPDE(%eax),%ecx | |
332 | movl %ecx,(%eax) | |
333 | ||
9bccf70c | 334 | movl $ EXT(pag_start),%ebx /* first paged code address */ |
1c79356b A |
335 | |
336 | movl %cr0,%eax | |
337 | orl $(CR0_PG),%eax /* set PG bit in CR0 */ | |
338 | orl $(CR0_WP),%eax | |
339 | movl %eax,%cr0 /* to enable paging */ | |
340 | ||
341 | jmp *%ebx /* flush prefetch queue */ | |
342 | ||
343 | /* | |
344 | * We are now paging, and can run with correct addresses. | |
345 | */ | |
346 | LEXT(pag_start) | |
347 | lgdt EXT(gdtptr) /* load GDT */ | |
348 | lidt EXT(idtptr) /* load IDT */ | |
349 | LJMP(KERNEL_CS,EXT(vstart)) /* switch to kernel code segment */ | |
350 | ||
351 | /* | |
352 | * Master is now running with correct addresses. | |
353 | */ | |
354 | LEXT(vstart) | |
355 | mov $(KERNEL_DS),%ax /* set kernel data segment */ | |
356 | mov %ax,%ds | |
357 | mov %ax,%es | |
358 | mov %ax,%ss | |
359 | mov %ax,EXT(ktss)+TSS_SS0 /* set kernel stack segment */ | |
360 | /* for traps to kernel */ | |
361 | #if MACH_KDB | |
362 | mov %ax,EXT(dbtss)+TSS_SS0 /* likewise for debug task switch */ | |
363 | mov %cr3,%eax /* get PDBR into debug TSS */ | |
364 | mov %eax,EXT(dbtss)+TSS_PDBR | |
365 | mov $0,%eax | |
366 | #endif | |
367 | ||
368 | movw $(KERNEL_LDT),%ax /* get LDT segment */ | |
369 | lldt %ax /* load LDT */ | |
370 | #if MACH_KDB | |
371 | mov %ax,EXT(ktss)+TSS_LDT /* store LDT in two TSS, as well... */ | |
372 | mov %ax,EXT(dbtss)+TSS_LDT /* ...matters if we switch tasks */ | |
373 | #endif | |
374 | movw $(KERNEL_TSS),%ax | |
375 | ltr %ax /* set up KTSS */ | |
376 | ||
9bccf70c | 377 | mov $ CPU_DATA,%ax |
1c79356b A |
378 | mov %ax,%gs |
379 | ||
380 | lea EXT(eintstack),%esp /* switch to the bootup stack */ | |
55e303ae | 381 | call EXT(i386_init) /* run C code */ |
1c79356b A |
382 | /*NOTREACHED*/ |
383 | hlt | |
384 | ||
385 | #if NCPUS > 1 | |
386 | /* | |
387 | * master_up is used by the master cpu to signify that it is done | |
388 | * with the interrupt stack, etc. See the code in pstart and svstart | |
389 | * that this interlocks with. | |
390 | */ | |
391 | .align ALIGN | |
392 | .globl EXT(master_up) | |
393 | LEXT(master_up) | |
394 | pushl %ebp /* set up */ | |
395 | movl %esp,%ebp /* stack frame */ | |
396 | movl $0,%ecx /* unlock start_lock */ | |
397 | xchgl %ecx,EXT(start_lock) /* since we are no longer using */ | |
398 | /* bootstrap stack */ | |
399 | leave /* pop stack frame */ | |
400 | ret | |
401 | ||
402 | /* | |
403 | * We aren't the first. Call slave_main to initialize the processor | |
404 | * and get Mach going on it. | |
405 | */ | |
406 | .align ALIGN | |
407 | .globl EXT(slave_start) | |
408 | LEXT(slave_start) | |
409 | cli /* disable interrupts, so we don`t */ | |
410 | /* need IDT for a while */ | |
411 | movl EXT(kpde)+KVTOPHYS,%ebx /* get PDE virtual address */ | |
412 | addl $(KVTOPHYS),%ebx /* convert to physical address */ | |
413 | ||
414 | movl PA(EXT(mp_boot_pde)),%edx /* point to the bootstrap PDE */ | |
415 | movl KERNELBASEPDE(%ebx),%eax | |
416 | /* point to pte for KERNELBASE */ | |
417 | movl %eax,KERNELBASEPDE(%edx) | |
418 | /* set in bootstrap PDE */ | |
419 | movl %eax,(%edx) /* set it also as pte for location */ | |
420 | /* 0..3fffff, so that the code */ | |
421 | /* that enters paged mode is mapped */ | |
422 | /* to identical addresses after */ | |
423 | /* paged mode is enabled */ | |
424 | movl %edx,%cr3 /* use bootstrap PDE to enable paging */ | |
425 | ||
9bccf70c | 426 | movl $ EXT(spag_start),%edx /* first paged code address */ |
1c79356b A |
427 | |
428 | movl %cr0,%eax | |
429 | orl $(CR0_PG),%eax /* set PG bit in CR0 */ | |
430 | orl $(CR0_WP),%eax | |
431 | movl %eax,%cr0 /* to enable paging */ | |
432 | ||
433 | jmp *%edx /* flush prefetch queue. */ | |
434 | ||
435 | /* | |
436 | * We are now paging, and can run with correct addresses. | |
437 | */ | |
438 | LEXT(spag_start) | |
439 | ||
440 | lgdt EXT(gdtptr) /* load GDT */ | |
441 | lidt EXT(idtptr) /* load IDT */ | |
442 | LJMP(KERNEL_CS,EXT(svstart)) /* switch to kernel code segment */ | |
443 | ||
444 | /* | |
445 | * Slave is now running with correct addresses. | |
446 | */ | |
447 | LEXT(svstart) | |
448 | mov $(KERNEL_DS),%ax /* set kernel data segment */ | |
449 | mov %ax,%ds | |
450 | mov %ax,%es | |
451 | mov %ax,%ss | |
452 | ||
453 | movl %ebx,%cr3 /* switch to the real kernel PDE */ | |
454 | ||
455 | CPU_NUMBER(%eax) | |
456 | movl CX(EXT(interrupt_stack),%eax),%esp /* get stack */ | |
457 | addl $(INTSTACK_SIZE),%esp /* point to top */ | |
458 | xorl %ebp,%ebp /* for completeness */ | |
459 | ||
460 | movl $0,%ecx /* unlock start_lock */ | |
461 | xchgl %ecx,EXT(start_lock) /* since we are no longer using */ | |
462 | /* bootstrap stack */ | |
463 | ||
464 | /* | |
465 | * switch to the per-cpu descriptor tables | |
466 | */ | |
467 | ||
468 | pushl %eax /* pass CPU number */ | |
469 | call EXT(mp_desc_init) /* set up local table */ | |
470 | /* pointer returned in %eax */ | |
471 | subl $4,%esp /* get space to build pseudo-descriptors */ | |
472 | ||
473 | CPU_NUMBER(%eax) | |
474 | movw $(GDTSZ*8-1),0(%esp) /* set GDT size in GDT descriptor */ | |
475 | movl CX(EXT(mp_gdt),%eax),%edx | |
9bccf70c | 476 | addl $ KVTOLINEAR,%edx |
1c79356b A |
477 | movl %edx,2(%esp) /* point to local GDT (linear address) */ |
478 | lgdt 0(%esp) /* load new GDT */ | |
479 | ||
480 | movw $(IDTSZ*8-1),0(%esp) /* set IDT size in IDT descriptor */ | |
481 | movl CX(EXT(mp_idt),%eax),%edx | |
9bccf70c | 482 | addl $ KVTOLINEAR,%edx |
1c79356b A |
483 | movl %edx,2(%esp) /* point to local IDT (linear address) */ |
484 | lidt 0(%esp) /* load new IDT */ | |
485 | ||
55e303ae A |
486 | movw $(KERNEL_LDT),%ax /* get LDT segment */ |
487 | lldt %ax /* load LDT */ | |
488 | ||
1c79356b A |
489 | movw $(KERNEL_TSS),%ax |
490 | ltr %ax /* load new KTSS */ | |
491 | ||
9bccf70c | 492 | mov $ CPU_DATA,%ax |
1c79356b A |
493 | mov %ax,%gs |
494 | ||
495 | call EXT(slave_main) /* start MACH */ | |
496 | /*NOTREACHED*/ | |
497 | hlt | |
498 | #endif /* NCPUS > 1 */ | |
499 | ||
500 | /* | |
501 | * Convert a descriptor from fake to real format. | |
502 | * | |
503 | * Calls from assembly code: | |
504 | * %ebx = return address (physical) CANNOT USE STACK | |
505 | * %esi = descriptor table address (physical) | |
506 | * %ecx = number of descriptors | |
507 | * | |
508 | * Calls from C: | |
509 | * 0(%esp) = return address | |
510 | * 4(%esp) = descriptor table address (physical) | |
511 | * 8(%esp) = number of descriptors | |
512 | * | |
513 | * Fake descriptor format: | |
514 | * bytes 0..3 base 31..0 | |
515 | * bytes 4..5 limit 15..0 | |
516 | * byte 6 access byte 2 | limit 19..16 | |
517 | * byte 7 access byte 1 | |
518 | * | |
519 | * Real descriptor format: | |
520 | * bytes 0..1 limit 15..0 | |
521 | * bytes 2..3 base 15..0 | |
522 | * byte 4 base 23..16 | |
523 | * byte 5 access byte 1 | |
524 | * byte 6 access byte 2 | limit 19..16 | |
525 | * byte 7 base 31..24 | |
526 | * | |
527 | * Fake gate format: | |
528 | * bytes 0..3 offset | |
529 | * bytes 4..5 selector | |
530 | * byte 6 word count << 4 (to match fake descriptor) | |
531 | * byte 7 access byte 1 | |
532 | * | |
533 | * Real gate format: | |
534 | * bytes 0..1 offset 15..0 | |
535 | * bytes 2..3 selector | |
536 | * byte 4 word count | |
537 | * byte 5 access byte 1 | |
538 | * bytes 6..7 offset 31..16 | |
539 | */ | |
540 | .globl EXT(fix_desc) | |
541 | LEXT(fix_desc) | |
542 | pushl %ebp /* set up */ | |
543 | movl %esp,%ebp /* stack frame */ | |
544 | pushl %esi /* save registers */ | |
545 | pushl %ebx | |
546 | movl B_ARG0,%esi /* point to first descriptor */ | |
547 | movl B_ARG1,%ecx /* get number of descriptors */ | |
548 | lea 0f,%ebx /* get return address */ | |
549 | jmp fix_desc_common /* call internal routine */ | |
550 | 0: popl %ebx /* restore registers */ | |
551 | popl %esi | |
552 | leave /* pop stack frame */ | |
553 | ret /* return */ | |
554 | ||
555 | fix_desc_common: | |
556 | 0: | |
557 | movw 6(%esi),%dx /* get access byte */ | |
558 | movb %dh,%al | |
559 | andb $0x14,%al | |
560 | cmpb $0x04,%al /* gate or descriptor? */ | |
561 | je 1f | |
562 | ||
563 | /* descriptor */ | |
564 | movl 0(%esi),%eax /* get base in eax */ | |
565 | rol $16,%eax /* swap 15..0 with 31..16 */ | |
566 | /* (15..0 in correct place) */ | |
567 | movb %al,%dl /* combine bits 23..16 with ACC1 */ | |
568 | /* in dh/dl */ | |
569 | movb %ah,7(%esi) /* store bits 31..24 in correct place */ | |
570 | movw 4(%esi),%ax /* move limit bits 0..15 to word 0 */ | |
571 | movl %eax,0(%esi) /* store (bytes 0..3 correct) */ | |
572 | movw %dx,4(%esi) /* store bytes 4..5 */ | |
573 | jmp 2f | |
574 | ||
575 | /* gate */ | |
576 | 1: | |
577 | movw 4(%esi),%ax /* get selector */ | |
578 | shrb $4,%dl /* shift word count to proper place */ | |
579 | movw %dx,4(%esi) /* store word count / ACC1 */ | |
580 | movw 2(%esi),%dx /* get offset 16..31 */ | |
581 | movw %dx,6(%esi) /* store in correct place */ | |
582 | movw %ax,2(%esi) /* store selector in correct place */ | |
583 | 2: | |
584 | addl $8,%esi /* bump to next descriptor */ | |
585 | loop 0b /* repeat */ | |
586 | jmp *%ebx /* all done */ | |
587 | ||
588 | /* | |
589 | * put arg in kbd leds and spin a while | |
590 | * eats eax, ecx, edx | |
591 | */ | |
592 | #define K_RDWR 0x60 | |
593 | #define K_CMD_LEDS 0xed | |
594 | #define K_STATUS 0x64 | |
595 | #define K_IBUF_FULL 0x02 /* input (to kbd) buffer full */ | |
596 | #define K_OBUF_FULL 0x01 /* output (from kbd) buffer full */ | |
597 | ||
598 | ENTRY(set_kbd_leds) | |
599 | mov S_ARG0,%cl /* save led value */ | |
600 | ||
601 | 0: inb $(K_STATUS),%al /* get kbd status */ | |
602 | testb $(K_IBUF_FULL),%al /* input busy? */ | |
603 | jne 0b /* loop until not */ | |
604 | ||
605 | mov $(K_CMD_LEDS),%al /* K_CMD_LEDS */ | |
606 | outb %al,$(K_RDWR) /* to kbd */ | |
607 | ||
608 | 0: inb $(K_STATUS),%al /* get kbd status */ | |
609 | testb $(K_OBUF_FULL),%al /* output present? */ | |
610 | je 0b /* loop if not */ | |
611 | ||
612 | inb $(K_RDWR),%al /* read status (and discard) */ | |
613 | ||
614 | 0: inb $(K_STATUS),%al /* get kbd status */ | |
615 | testb $(K_IBUF_FULL),%al /* input busy? */ | |
616 | jne 0b /* loop until not */ | |
617 | ||
618 | mov %cl,%al /* move led value */ | |
619 | outb %al,$(K_RDWR) /* to kbd */ | |
620 | ||
621 | movl $10000000,%ecx /* spin */ | |
622 | 0: nop | |
623 | nop | |
624 | loop 0b /* a while */ | |
625 | ||
626 | ret |