]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * This file contains Original Code and/or Modifications of Original Code | |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
14 | * | |
15 | * Please obtain a copy of the License at | |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
25 | * | |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ | |
27 | */ | |
28 | ||
29 | #ifdef __x86_64__ | |
30 | #include <i386/mp.h> | |
31 | #include <i386/cpu_data.h> | |
32 | #include <i386/bit_routines.h> | |
33 | #include <i386/machine_cpu.h> | |
34 | #include <i386/machine_routines.h> | |
35 | #include <i386/misc_protos.h> | |
36 | #include <i386/serial_io.h> | |
37 | #endif /* __x86_64__ */ | |
38 | ||
39 | #include <libkern/OSAtomic.h> | |
40 | #include <vm/vm_kern.h> | |
41 | #include <vm/vm_map.h> | |
42 | #include <console/video_console.h> | |
43 | #include <console/serial_protos.h> | |
44 | #include <kern/kalloc.h> | |
45 | #include <kern/thread.h> | |
46 | #include <kern/cpu_data.h> | |
47 | ||
48 | ||
49 | ||
50 | #ifndef MAX_CPU_SLOTS | |
51 | #define MAX_CPU_SLOTS (MAX_CPUS) | |
52 | #endif | |
53 | ||
54 | static struct { | |
55 | char * buffer; | |
56 | int len; | |
57 | int used; | |
58 | char * write_ptr; | |
59 | char * read_ptr; | |
60 | decl_simple_lock_data(, read_lock); | |
61 | decl_simple_lock_data(, write_lock); | |
62 | } console_ring; | |
63 | ||
64 | hw_lock_data_t cnputc_lock; | |
65 | static volatile uint32_t console_output = 0; | |
66 | ||
67 | /* | |
68 | * New allocation mechanism for console buffers | |
69 | * Total allocation: 1 * PAGE_SIZE | |
70 | * - Each cpu gets CPU_CONS_BUF_SIZE buffer | |
71 | * - Kernel wide console ring gets PAGE_SIZE - MAX_CPU_SLOTS * CPU_CONS_BUF_SIZE | |
72 | * | |
73 | * At the return from console_init() the memory is setup as follows: | |
74 | * +----------------------------+-------------+-------------+-------------+-------------+ | |
75 | * |console ring buffer---------|f2eec075-----|f2eec075-----|f2eec075-----|f2eec075-----| | |
76 | * +----------------------------+-------------+-------------+-------------+-------------+ | |
77 | * Each cpu allocation will find the first (f2eec075) and use that buffer. | |
78 | * | |
79 | */ | |
80 | ||
81 | #define CPU_CONS_BUF_SIZE 256 | |
82 | #define CPU_BUF_FREE_HEX 0xf2eec075 | |
83 | ||
84 | #define KERN_CONSOLE_BUF_SIZE vm_map_round_page(CPU_CONS_BUF_SIZE *(MAX_CPU_SLOTS + 1), PAGE_SIZE - 1) | |
85 | #define KERN_CONSOLE_RING_SIZE (KERN_CONSOLE_BUF_SIZE - (CPU_CONS_BUF_SIZE * MAX_CPU_SLOTS)) | |
86 | ||
87 | /* | |
88 | * A serial line running at 115200 bps can output ~11.5 characters per millisecond. | |
89 | * Synchronous serial logging with preemption+interrupts disabled fundamentally prevents us | |
90 | * from hitting expected scheduling deadlines, but we can at least tone it down a bit. | |
91 | * | |
92 | * TODO: IOLog should use asynchronous serial logging instead of the synchronous serial console. (26555148) | |
93 | * | |
94 | * Keep interrupt disabled periods shorter than 1ms | |
95 | */ | |
96 | #define MAX_INT_DISABLED_FLUSH_SIZE 8 | |
97 | #define MAX_TOTAL_FLUSH_SIZE (MAX(2, MAX_CPU_SLOTS) * CPU_CONS_BUF_SIZE) | |
98 | ||
99 | typedef struct console_buf { | |
100 | char * buf_base; | |
101 | char * buf_end; | |
102 | char * buf_ptr; | |
103 | #define CPU_BUFFER_LEN (CPU_CONS_BUF_SIZE - 3 * (sizeof(char *))) | |
104 | char buf[CPU_BUFFER_LEN]; | |
105 | } console_buf_t; | |
106 | ||
107 | extern int serial_getc(void); | |
108 | extern void serial_putc(char); | |
109 | ||
110 | static void _serial_putc(int, int, int); | |
111 | ||
112 | struct console_ops cons_ops[] = { | |
113 | { | |
114 | .putc = _serial_putc, .getc = _serial_getc, | |
115 | }, | |
116 | { | |
117 | .putc = vcputc, .getc = vcgetc, | |
118 | }, | |
119 | }; | |
120 | ||
121 | uint32_t nconsops = (sizeof cons_ops / sizeof cons_ops[0]); | |
122 | ||
123 | uint32_t cons_ops_index = VC_CONS_OPS; | |
124 | ||
125 | ||
126 | static bool console_suspended = false; | |
127 | ||
128 | static void | |
129 | console_ring_lock_init(void) | |
130 | { | |
131 | simple_lock_init(&console_ring.read_lock, 0); | |
132 | simple_lock_init(&console_ring.write_lock, 0); | |
133 | } | |
134 | ||
135 | void | |
136 | console_init(void) | |
137 | { | |
138 | int ret, i; | |
139 | uint32_t * p; | |
140 | ||
141 | if (!OSCompareAndSwap(0, KERN_CONSOLE_RING_SIZE, (UInt32 *)&console_ring.len)) | |
142 | return; | |
143 | ||
144 | assert(console_ring.len > 0); | |
145 | ||
146 | ret = kmem_alloc(kernel_map, (vm_offset_t *)&console_ring.buffer, KERN_CONSOLE_BUF_SIZE, VM_KERN_MEMORY_OSFMK); | |
147 | if (ret != KERN_SUCCESS) { | |
148 | panic("console_ring_init() failed to allocate ring buffer, error %d\n", ret); | |
149 | } | |
150 | ||
151 | /* setup memory for per cpu console buffers */ | |
152 | for (i = 0; i < MAX_CPU_SLOTS; i++) { | |
153 | p = (uint32_t *)((uintptr_t)console_ring.buffer + console_ring.len + (i * sizeof(console_buf_t))); | |
154 | *p = CPU_BUF_FREE_HEX; | |
155 | } | |
156 | ||
157 | console_ring.used = 0; | |
158 | console_ring.read_ptr = console_ring.buffer; | |
159 | console_ring.write_ptr = console_ring.buffer; | |
160 | console_ring_lock_init(); | |
161 | hw_lock_init(&cnputc_lock); | |
162 | } | |
163 | ||
164 | void * | |
165 | console_cpu_alloc(__unused boolean_t boot_processor) | |
166 | { | |
167 | console_buf_t * cbp; | |
168 | int i; | |
169 | uint32_t * p; | |
170 | ||
171 | console_init(); | |
172 | assert(console_ring.buffer != NULL); | |
173 | ||
174 | /* select the next slot from the per cpu buffers at end of console_ring.buffer */ | |
175 | for (i = 0; i < MAX_CPU_SLOTS; i++) { | |
176 | p = (uint32_t *)((uintptr_t)console_ring.buffer + console_ring.len + (i * sizeof(console_buf_t))); | |
177 | if (OSCompareAndSwap(CPU_BUF_FREE_HEX, 0, (UInt32 *)p)) | |
178 | break; | |
179 | } | |
180 | assert(i < MAX_CPU_SLOTS); | |
181 | ||
182 | cbp = (console_buf_t *)(uintptr_t)p; | |
183 | if ((uintptr_t)cbp >= (uintptr_t)console_ring.buffer + KERN_CONSOLE_BUF_SIZE) { | |
184 | printf("console_cpu_alloc() failed to allocate cpu buffer\n"); | |
185 | return NULL; | |
186 | } | |
187 | ||
188 | cbp->buf_base = (char *)&cbp->buf; | |
189 | cbp->buf_ptr = cbp->buf_base; | |
190 | cbp->buf_end = cbp->buf_base + CPU_BUFFER_LEN; | |
191 | return (void *)cbp; | |
192 | } | |
193 | ||
194 | void | |
195 | console_cpu_free(void * buf) | |
196 | { | |
197 | assert((uintptr_t)buf > (uintptr_t)console_ring.buffer); | |
198 | assert((uintptr_t)buf < (uintptr_t)console_ring.buffer + KERN_CONSOLE_BUF_SIZE); | |
199 | if (buf != NULL) | |
200 | *(uint32_t *)buf = CPU_BUF_FREE_HEX; | |
201 | } | |
202 | ||
203 | static inline int | |
204 | console_ring_space(void) | |
205 | { | |
206 | return console_ring.len - console_ring.used; | |
207 | } | |
208 | ||
209 | static boolean_t | |
210 | console_ring_put(char ch) | |
211 | { | |
212 | if (console_ring.used < console_ring.len) { | |
213 | console_ring.used++; | |
214 | *console_ring.write_ptr++ = ch; | |
215 | if (console_ring.write_ptr - console_ring.buffer == console_ring.len) | |
216 | console_ring.write_ptr = console_ring.buffer; | |
217 | return TRUE; | |
218 | } else { | |
219 | return FALSE; | |
220 | } | |
221 | } | |
222 | ||
223 | static inline boolean_t | |
224 | cpu_buffer_put(console_buf_t * cbp, char ch) | |
225 | { | |
226 | if (ch != '\0' && cbp->buf_ptr < cbp->buf_end) { | |
227 | *(cbp->buf_ptr++) = ch; | |
228 | return TRUE; | |
229 | } else { | |
230 | return FALSE; | |
231 | } | |
232 | } | |
233 | ||
234 | static inline int | |
235 | cpu_buffer_size(console_buf_t * cbp) | |
236 | { | |
237 | return (int)(cbp->buf_ptr - cbp->buf_base); | |
238 | } | |
239 | ||
240 | static inline void | |
241 | _cnputs(char * c, int size) | |
242 | { | |
243 | /* The console device output routines are assumed to be | |
244 | * non-reentrant. | |
245 | */ | |
246 | #ifdef __x86_64__ | |
247 | uint32_t lock_timeout_ticks = UINT32_MAX; | |
248 | #else | |
249 | uint32_t lock_timeout_ticks = LockTimeOut; | |
250 | #endif | |
251 | ||
252 | mp_disable_preemption(); | |
253 | if (!hw_lock_to(&cnputc_lock, lock_timeout_ticks)) { | |
254 | /* If we timed out on the lock, and we're in the debugger, | |
255 | * copy lock data for debugging and break the lock. | |
256 | */ | |
257 | hw_lock_data_t _shadow_lock; | |
258 | memcpy(&_shadow_lock, &cnputc_lock, sizeof(cnputc_lock)); | |
259 | if (debug_mode) { | |
260 | /* Since hw_lock_to takes a pre-emption count...*/ | |
261 | mp_enable_preemption(); | |
262 | hw_lock_init(&cnputc_lock); | |
263 | hw_lock_lock(&cnputc_lock); | |
264 | } else { | |
265 | panic("Lock acquire timeout in _cnputs() lock=%p, lock owner thread=0x%lx, current_thread: %p\n", &_shadow_lock, | |
266 | _shadow_lock.lock_data, current_thread()); | |
267 | } | |
268 | } | |
269 | ||
270 | while (size-- > 0) { | |
271 | cons_ops[cons_ops_index].putc(0, 0, *c); | |
272 | if (*c == '\n') | |
273 | cons_ops[cons_ops_index].putc(0, 0, '\r'); | |
274 | c++; | |
275 | } | |
276 | ||
277 | hw_lock_unlock(&cnputc_lock); | |
278 | mp_enable_preemption(); | |
279 | } | |
280 | ||
281 | void | |
282 | cnputc_unbuffered(char c) | |
283 | { | |
284 | _cnputs(&c, 1); | |
285 | } | |
286 | ||
287 | void | |
288 | cnputcusr(char c) | |
289 | { | |
290 | boolean_t state; | |
291 | ||
292 | /* Spin (with pre-emption enabled) waiting for console_ring_try_empty() | |
293 | * to complete output. There is a small window here where we could | |
294 | * end up with a stale value of console_output, but it's unlikely, | |
295 | * and _cnputs(), which outputs to the console device, is internally | |
296 | * synchronized. There's something of a conflict between the | |
297 | * character-at-a-time (with pre-emption enabled) unbuffered | |
298 | * output model here, and the buffered output from cnputc(), | |
299 | * whose consumers include printf() ( which outputs a sequence | |
300 | * with pre-emption disabled, and should be safe to call with | |
301 | * interrupts off); we don't want to disable pre-emption indefinitely | |
302 | * here, and spinlocks and mutexes are inappropriate. | |
303 | */ | |
304 | while (console_output != 0) | |
305 | ; | |
306 | ||
307 | /* | |
308 | * We disable interrupts to avoid issues caused by rendevous IPIs | |
309 | * and an interruptible core holding the lock while an uninterruptible | |
310 | * core wants it. Stackshot is the prime example of this. | |
311 | */ | |
312 | state = ml_set_interrupts_enabled(FALSE); | |
313 | _cnputs(&c, 1); | |
314 | ml_set_interrupts_enabled(state); | |
315 | } | |
316 | ||
317 | static void | |
318 | console_ring_try_empty(void) | |
319 | { | |
320 | #ifdef __x86_64__ | |
321 | boolean_t handle_tlb_flushes = (ml_get_interrupts_enabled() == FALSE); | |
322 | #endif /* __x86_64__ */ | |
323 | ||
324 | int nchars_out = 0; | |
325 | int total_chars_out = 0; | |
326 | int size_before_wrap = 0; | |
327 | ||
328 | do { | |
329 | #ifdef __x86_64__ | |
330 | if (handle_tlb_flushes) | |
331 | handle_pending_TLB_flushes(); | |
332 | #endif /* __x86_64__ */ | |
333 | ||
334 | /* | |
335 | * Try to get the read lock on the ring buffer to empty it. | |
336 | * If this fails someone else is already emptying... | |
337 | */ | |
338 | if (!simple_lock_try(&console_ring.read_lock)) { | |
339 | /* | |
340 | * If multiple cores are spinning trying to empty the buffer, | |
341 | * we may suffer lock starvation (get the read lock, but | |
342 | * never the write lock, with other cores unable to get the | |
343 | * read lock). As a result, insert a delay on failure, to | |
344 | * let other cores have a turn. | |
345 | */ | |
346 | delay(1); | |
347 | return; | |
348 | } | |
349 | ||
350 | boolean_t state = ml_set_interrupts_enabled(FALSE); | |
351 | ||
352 | /* Indicate that we're in the process of writing a block of data to the console. */ | |
353 | (void)hw_atomic_add(&console_output, 1); | |
354 | ||
355 | simple_lock_try_lock_loop(&console_ring.write_lock); | |
356 | ||
357 | /* try small chunk at a time, so we allow writes from other cpus into the buffer */ | |
358 | nchars_out = MIN(console_ring.used, MAX_INT_DISABLED_FLUSH_SIZE); | |
359 | ||
360 | /* account for data to be read before wrap around */ | |
361 | size_before_wrap = (int)((console_ring.buffer + console_ring.len) - console_ring.read_ptr); | |
362 | if (nchars_out > size_before_wrap) | |
363 | nchars_out = size_before_wrap; | |
364 | ||
365 | if (nchars_out > 0) { | |
366 | _cnputs(console_ring.read_ptr, nchars_out); | |
367 | console_ring.read_ptr = | |
368 | console_ring.buffer + ((console_ring.read_ptr - console_ring.buffer + nchars_out) % console_ring.len); | |
369 | console_ring.used -= nchars_out; | |
370 | total_chars_out += nchars_out; | |
371 | } | |
372 | ||
373 | simple_unlock(&console_ring.write_lock); | |
374 | ||
375 | (void)hw_atomic_sub(&console_output, 1); | |
376 | ||
377 | simple_unlock(&console_ring.read_lock); | |
378 | ||
379 | ml_set_interrupts_enabled(state); | |
380 | ||
381 | /* | |
382 | * In case we end up being the console drain thread | |
383 | * for far too long, break out. Except in panic/suspend cases | |
384 | * where we should clear out full buffer. | |
385 | */ | |
386 | if (debug_mode == 0 && !console_suspended && (total_chars_out >= MAX_TOTAL_FLUSH_SIZE)) | |
387 | break; | |
388 | ||
389 | } while (nchars_out > 0); | |
390 | } | |
391 | ||
392 | ||
393 | void | |
394 | console_suspend() | |
395 | { | |
396 | console_suspended = true; | |
397 | console_ring_try_empty(); | |
398 | } | |
399 | ||
400 | void | |
401 | console_resume() | |
402 | { | |
403 | console_suspended = false; | |
404 | } | |
405 | ||
406 | void | |
407 | console_write(char * str, int size) | |
408 | { | |
409 | console_init(); | |
410 | int chunk_size = size; | |
411 | int i = 0; | |
412 | ||
413 | if (size > console_ring.len) | |
414 | chunk_size = CPU_CONS_BUF_SIZE; | |
415 | ||
416 | while (size > 0) { | |
417 | boolean_t state = ml_set_interrupts_enabled(FALSE); | |
418 | ||
419 | simple_lock_try_lock_loop(&console_ring.write_lock); | |
420 | while (chunk_size > console_ring_space()) { | |
421 | simple_unlock(&console_ring.write_lock); | |
422 | ml_set_interrupts_enabled(state); | |
423 | ||
424 | console_ring_try_empty(); | |
425 | ||
426 | state = ml_set_interrupts_enabled(FALSE); | |
427 | simple_lock_try_lock_loop(&console_ring.write_lock); | |
428 | } | |
429 | ||
430 | for (i = 0; i < chunk_size; i++) | |
431 | console_ring_put(str[i]); | |
432 | ||
433 | str = &str[i]; | |
434 | size -= chunk_size; | |
435 | simple_unlock(&console_ring.write_lock); | |
436 | ml_set_interrupts_enabled(state); | |
437 | } | |
438 | ||
439 | console_ring_try_empty(); | |
440 | } | |
441 | ||
442 | void | |
443 | cnputc(char c) | |
444 | { | |
445 | console_buf_t * cbp; | |
446 | cpu_data_t * cpu_data_p; | |
447 | boolean_t state; | |
448 | boolean_t needs_print = TRUE; | |
449 | char * cp; | |
450 | ||
451 | restart: | |
452 | mp_disable_preemption(); | |
453 | cpu_data_p = current_cpu_datap(); | |
454 | cbp = (console_buf_t *)cpu_data_p->cpu_console_buf; | |
455 | if (console_suspended || cbp == NULL) { | |
456 | mp_enable_preemption(); | |
457 | /* Put directly if console ring is not initialized or we're heading into suspend */ | |
458 | _cnputs(&c, 1); | |
459 | return; | |
460 | } | |
461 | ||
462 | #ifndef __x86_64__ | |
463 | /* Is there a panic backtrace going on? */ | |
464 | if (cpu_data_p->PAB_active) { | |
465 | /* If another processor was in the process of emptying the | |
466 | * console ring buffer when it received the panic backtrace | |
467 | * signal, that processor will be spinning in DebugXCall() | |
468 | * waiting for the panicking processor to finish printing | |
469 | * the backtrace. But panicking processor will never | |
470 | * be able to obtain the ring buffer lock since it is | |
471 | * owned by a processor that's spinning in DebugXCall(). | |
472 | * Blow away any locks that other processors may have on | |
473 | * the console ring buffer so that the backtrace can | |
474 | * complete. | |
475 | */ | |
476 | console_ring_lock_init(); | |
477 | } | |
478 | #endif /* __x86_64__ */ | |
479 | ||
480 | state = ml_set_interrupts_enabled(FALSE); | |
481 | ||
482 | /* | |
483 | * add to stack buf | |
484 | * If the cpu buffer is full, we'll flush, then try | |
485 | * another put. If it fails a second time... screw | |
486 | * it. | |
487 | */ | |
488 | if (needs_print && !cpu_buffer_put(cbp, c)) { | |
489 | simple_lock_try_lock_loop(&console_ring.write_lock); | |
490 | ||
491 | if (cpu_buffer_size(cbp) > console_ring_space()) { | |
492 | simple_unlock(&console_ring.write_lock); | |
493 | ml_set_interrupts_enabled(state); | |
494 | mp_enable_preemption(); | |
495 | ||
496 | console_ring_try_empty(); | |
497 | goto restart; | |
498 | } | |
499 | ||
500 | for (cp = cbp->buf_base; cp < cbp->buf_ptr; cp++) | |
501 | console_ring_put(*cp); | |
502 | cbp->buf_ptr = cbp->buf_base; | |
503 | simple_unlock(&console_ring.write_lock); | |
504 | ||
505 | cpu_buffer_put(cbp, c); | |
506 | } | |
507 | ||
508 | needs_print = FALSE; | |
509 | ||
510 | if (c != '\n') { | |
511 | ml_set_interrupts_enabled(state); | |
512 | mp_enable_preemption(); | |
513 | return; | |
514 | } | |
515 | ||
516 | /* We printed a newline, time to flush the CPU buffer to the global buffer */ | |
517 | simple_lock_try_lock_loop(&console_ring.write_lock); | |
518 | ||
519 | /* | |
520 | * Is there enough space in the shared ring buffer? | |
521 | * Try to empty if not. | |
522 | * Note, we want the entire local buffer to fit to | |
523 | * avoid another cpu interjecting. | |
524 | */ | |
525 | ||
526 | if (cpu_buffer_size(cbp) > console_ring_space()) { | |
527 | simple_unlock(&console_ring.write_lock); | |
528 | ml_set_interrupts_enabled(state); | |
529 | mp_enable_preemption(); | |
530 | ||
531 | console_ring_try_empty(); | |
532 | ||
533 | goto restart; | |
534 | } | |
535 | ||
536 | for (cp = cbp->buf_base; cp < cbp->buf_ptr; cp++) | |
537 | console_ring_put(*cp); | |
538 | ||
539 | cbp->buf_ptr = cbp->buf_base; | |
540 | simple_unlock(&console_ring.write_lock); | |
541 | ml_set_interrupts_enabled(state); | |
542 | mp_enable_preemption(); | |
543 | ||
544 | console_ring_try_empty(); | |
545 | ||
546 | return; | |
547 | } | |
548 | ||
549 | int | |
550 | _serial_getc(__unused int a, __unused int b, boolean_t wait, __unused boolean_t raw) | |
551 | { | |
552 | int c; | |
553 | do { | |
554 | c = serial_getc(); | |
555 | } while (wait && c < 0); | |
556 | ||
557 | ||
558 | return c; | |
559 | } | |
560 | ||
561 | static void | |
562 | _serial_putc(__unused int a, __unused int b, int c) | |
563 | { | |
564 | serial_putc(c); | |
565 | } | |
566 | ||
567 | int | |
568 | cngetc(void) | |
569 | { | |
570 | return cons_ops[cons_ops_index].getc(0, 0, TRUE, FALSE); | |
571 | } | |
572 | ||
573 | int | |
574 | cnmaygetc(void) | |
575 | { | |
576 | return cons_ops[cons_ops_index].getc(0, 0, FALSE, FALSE); | |
577 | } | |
578 | ||
579 | int | |
580 | vcgetc(__unused int l, __unused int u, __unused boolean_t wait, __unused boolean_t raw) | |
581 | { | |
582 | char c; | |
583 | ||
584 | if (0 == (*PE_poll_input)(0, &c)) | |
585 | return c; | |
586 | else | |
587 | return 0; | |
588 | } | |
589 | ||
590 | /* So we can re-write the serial device functions at boot-time */ | |
591 | void | |
592 | console_set_serial_ops(struct console_ops * newops) | |
593 | { | |
594 | cons_ops[SERIAL_CONS_OPS] = *newops; | |
595 | } | |
596 |