]>
Commit | Line | Data |
---|---|---|
39037602 A |
1 | /* |
2 | * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * This file contains Original Code and/or Modifications of Original Code | |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
14 | * | |
15 | * Please obtain a copy of the License at | |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
25 | * | |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ | |
27 | */ | |
28 | ||
29 | #ifdef __x86_64__ | |
30 | #include <i386/mp.h> | |
31 | #include <i386/cpu_data.h> | |
32 | #include <i386/bit_routines.h> | |
33 | #include <i386/machine_cpu.h> | |
34 | #include <i386/machine_routines.h> | |
35 | #include <i386/misc_protos.h> | |
36 | #include <i386/serial_io.h> | |
37 | #endif /* __x86_64__ */ | |
38 | ||
39 | #include <libkern/OSAtomic.h> | |
40 | #include <vm/vm_kern.h> | |
41 | #include <vm/vm_map.h> | |
42 | #include <console/video_console.h> | |
43 | #include <console/serial_protos.h> | |
44 | #include <kern/kalloc.h> | |
45 | #include <kern/thread.h> | |
46 | #include <kern/cpu_data.h> | |
813fb2f6 | 47 | #include <libkern/section_keywords.h> |
39037602 A |
48 | |
49 | ||
50 | ||
51 | #ifndef MAX_CPU_SLOTS | |
52 | #define MAX_CPU_SLOTS (MAX_CPUS) | |
53 | #endif | |
54 | ||
55 | static struct { | |
56 | char * buffer; | |
57 | int len; | |
58 | int used; | |
59 | char * write_ptr; | |
60 | char * read_ptr; | |
61 | decl_simple_lock_data(, read_lock); | |
62 | decl_simple_lock_data(, write_lock); | |
63 | } console_ring; | |
64 | ||
65 | hw_lock_data_t cnputc_lock; | |
66 | static volatile uint32_t console_output = 0; | |
67 | ||
68 | /* | |
69 | * New allocation mechanism for console buffers | |
70 | * Total allocation: 1 * PAGE_SIZE | |
71 | * - Each cpu gets CPU_CONS_BUF_SIZE buffer | |
72 | * - Kernel wide console ring gets PAGE_SIZE - MAX_CPU_SLOTS * CPU_CONS_BUF_SIZE | |
73 | * | |
74 | * At the return from console_init() the memory is setup as follows: | |
75 | * +----------------------------+-------------+-------------+-------------+-------------+ | |
76 | * |console ring buffer---------|f2eec075-----|f2eec075-----|f2eec075-----|f2eec075-----| | |
77 | * +----------------------------+-------------+-------------+-------------+-------------+ | |
78 | * Each cpu allocation will find the first (f2eec075) and use that buffer. | |
79 | * | |
80 | */ | |
81 | ||
82 | #define CPU_CONS_BUF_SIZE 256 | |
83 | #define CPU_BUF_FREE_HEX 0xf2eec075 | |
84 | ||
85 | #define KERN_CONSOLE_BUF_SIZE vm_map_round_page(CPU_CONS_BUF_SIZE *(MAX_CPU_SLOTS + 1), PAGE_SIZE - 1) | |
86 | #define KERN_CONSOLE_RING_SIZE (KERN_CONSOLE_BUF_SIZE - (CPU_CONS_BUF_SIZE * MAX_CPU_SLOTS)) | |
87 | ||
88 | /* | |
89 | * A serial line running at 115200 bps can output ~11.5 characters per millisecond. | |
90 | * Synchronous serial logging with preemption+interrupts disabled fundamentally prevents us | |
91 | * from hitting expected scheduling deadlines, but we can at least tone it down a bit. | |
92 | * | |
93 | * TODO: IOLog should use asynchronous serial logging instead of the synchronous serial console. (26555148) | |
94 | * | |
95 | * Keep interrupt disabled periods shorter than 1ms | |
96 | */ | |
97 | #define MAX_INT_DISABLED_FLUSH_SIZE 8 | |
98 | #define MAX_TOTAL_FLUSH_SIZE (MAX(2, MAX_CPU_SLOTS) * CPU_CONS_BUF_SIZE) | |
99 | ||
100 | typedef struct console_buf { | |
101 | char * buf_base; | |
102 | char * buf_end; | |
103 | char * buf_ptr; | |
104 | #define CPU_BUFFER_LEN (CPU_CONS_BUF_SIZE - 3 * (sizeof(char *))) | |
105 | char buf[CPU_BUFFER_LEN]; | |
106 | } console_buf_t; | |
107 | ||
108 | extern int serial_getc(void); | |
109 | extern void serial_putc(char); | |
110 | ||
111 | static void _serial_putc(int, int, int); | |
112 | ||
813fb2f6 | 113 | SECURITY_READ_ONLY_EARLY(struct console_ops) cons_ops[] = { |
39037602 A |
114 | { |
115 | .putc = _serial_putc, .getc = _serial_getc, | |
116 | }, | |
117 | { | |
118 | .putc = vcputc, .getc = vcgetc, | |
119 | }, | |
120 | }; | |
121 | ||
813fb2f6 | 122 | SECURITY_READ_ONLY_EARLY(uint32_t) nconsops = (sizeof cons_ops / sizeof cons_ops[0]); |
39037602 A |
123 | |
124 | uint32_t cons_ops_index = VC_CONS_OPS; | |
125 | ||
126 | ||
127 | static bool console_suspended = false; | |
128 | ||
129 | static void | |
130 | console_ring_lock_init(void) | |
131 | { | |
132 | simple_lock_init(&console_ring.read_lock, 0); | |
133 | simple_lock_init(&console_ring.write_lock, 0); | |
134 | } | |
135 | ||
136 | void | |
137 | console_init(void) | |
138 | { | |
139 | int ret, i; | |
140 | uint32_t * p; | |
141 | ||
142 | if (!OSCompareAndSwap(0, KERN_CONSOLE_RING_SIZE, (UInt32 *)&console_ring.len)) | |
143 | return; | |
144 | ||
145 | assert(console_ring.len > 0); | |
146 | ||
147 | ret = kmem_alloc(kernel_map, (vm_offset_t *)&console_ring.buffer, KERN_CONSOLE_BUF_SIZE, VM_KERN_MEMORY_OSFMK); | |
148 | if (ret != KERN_SUCCESS) { | |
149 | panic("console_ring_init() failed to allocate ring buffer, error %d\n", ret); | |
150 | } | |
151 | ||
152 | /* setup memory for per cpu console buffers */ | |
153 | for (i = 0; i < MAX_CPU_SLOTS; i++) { | |
154 | p = (uint32_t *)((uintptr_t)console_ring.buffer + console_ring.len + (i * sizeof(console_buf_t))); | |
155 | *p = CPU_BUF_FREE_HEX; | |
156 | } | |
157 | ||
158 | console_ring.used = 0; | |
159 | console_ring.read_ptr = console_ring.buffer; | |
160 | console_ring.write_ptr = console_ring.buffer; | |
161 | console_ring_lock_init(); | |
162 | hw_lock_init(&cnputc_lock); | |
163 | } | |
164 | ||
165 | void * | |
166 | console_cpu_alloc(__unused boolean_t boot_processor) | |
167 | { | |
168 | console_buf_t * cbp; | |
169 | int i; | |
170 | uint32_t * p; | |
171 | ||
172 | console_init(); | |
173 | assert(console_ring.buffer != NULL); | |
174 | ||
175 | /* select the next slot from the per cpu buffers at end of console_ring.buffer */ | |
176 | for (i = 0; i < MAX_CPU_SLOTS; i++) { | |
177 | p = (uint32_t *)((uintptr_t)console_ring.buffer + console_ring.len + (i * sizeof(console_buf_t))); | |
178 | if (OSCompareAndSwap(CPU_BUF_FREE_HEX, 0, (UInt32 *)p)) | |
179 | break; | |
180 | } | |
181 | assert(i < MAX_CPU_SLOTS); | |
182 | ||
183 | cbp = (console_buf_t *)(uintptr_t)p; | |
184 | if ((uintptr_t)cbp >= (uintptr_t)console_ring.buffer + KERN_CONSOLE_BUF_SIZE) { | |
185 | printf("console_cpu_alloc() failed to allocate cpu buffer\n"); | |
186 | return NULL; | |
187 | } | |
188 | ||
189 | cbp->buf_base = (char *)&cbp->buf; | |
190 | cbp->buf_ptr = cbp->buf_base; | |
191 | cbp->buf_end = cbp->buf_base + CPU_BUFFER_LEN; | |
192 | return (void *)cbp; | |
193 | } | |
194 | ||
195 | void | |
196 | console_cpu_free(void * buf) | |
197 | { | |
198 | assert((uintptr_t)buf > (uintptr_t)console_ring.buffer); | |
199 | assert((uintptr_t)buf < (uintptr_t)console_ring.buffer + KERN_CONSOLE_BUF_SIZE); | |
200 | if (buf != NULL) | |
201 | *(uint32_t *)buf = CPU_BUF_FREE_HEX; | |
202 | } | |
203 | ||
204 | static inline int | |
205 | console_ring_space(void) | |
206 | { | |
207 | return console_ring.len - console_ring.used; | |
208 | } | |
209 | ||
210 | static boolean_t | |
211 | console_ring_put(char ch) | |
212 | { | |
213 | if (console_ring.used < console_ring.len) { | |
214 | console_ring.used++; | |
215 | *console_ring.write_ptr++ = ch; | |
216 | if (console_ring.write_ptr - console_ring.buffer == console_ring.len) | |
217 | console_ring.write_ptr = console_ring.buffer; | |
218 | return TRUE; | |
219 | } else { | |
220 | return FALSE; | |
221 | } | |
222 | } | |
223 | ||
224 | static inline boolean_t | |
225 | cpu_buffer_put(console_buf_t * cbp, char ch) | |
226 | { | |
227 | if (ch != '\0' && cbp->buf_ptr < cbp->buf_end) { | |
228 | *(cbp->buf_ptr++) = ch; | |
229 | return TRUE; | |
230 | } else { | |
231 | return FALSE; | |
232 | } | |
233 | } | |
234 | ||
235 | static inline int | |
236 | cpu_buffer_size(console_buf_t * cbp) | |
237 | { | |
238 | return (int)(cbp->buf_ptr - cbp->buf_base); | |
239 | } | |
240 | ||
241 | static inline void | |
242 | _cnputs(char * c, int size) | |
243 | { | |
244 | /* The console device output routines are assumed to be | |
245 | * non-reentrant. | |
246 | */ | |
743345f9 A |
247 | #ifdef __x86_64__ |
248 | uint32_t lock_timeout_ticks = UINT32_MAX; | |
249 | #else | |
250 | uint32_t lock_timeout_ticks = LockTimeOut; | |
251 | #endif | |
252 | ||
39037602 | 253 | mp_disable_preemption(); |
743345f9 | 254 | if (!hw_lock_to(&cnputc_lock, lock_timeout_ticks)) { |
39037602 A |
255 | /* If we timed out on the lock, and we're in the debugger, |
256 | * copy lock data for debugging and break the lock. | |
257 | */ | |
258 | hw_lock_data_t _shadow_lock; | |
259 | memcpy(&_shadow_lock, &cnputc_lock, sizeof(cnputc_lock)); | |
260 | if (debug_mode) { | |
261 | /* Since hw_lock_to takes a pre-emption count...*/ | |
262 | mp_enable_preemption(); | |
263 | hw_lock_init(&cnputc_lock); | |
264 | hw_lock_lock(&cnputc_lock); | |
265 | } else { | |
266 | panic("Lock acquire timeout in _cnputs() lock=%p, lock owner thread=0x%lx, current_thread: %p\n", &_shadow_lock, | |
267 | _shadow_lock.lock_data, current_thread()); | |
268 | } | |
269 | } | |
270 | ||
271 | while (size-- > 0) { | |
272 | cons_ops[cons_ops_index].putc(0, 0, *c); | |
273 | if (*c == '\n') | |
274 | cons_ops[cons_ops_index].putc(0, 0, '\r'); | |
275 | c++; | |
276 | } | |
277 | ||
278 | hw_lock_unlock(&cnputc_lock); | |
279 | mp_enable_preemption(); | |
280 | } | |
281 | ||
282 | void | |
283 | cnputc_unbuffered(char c) | |
284 | { | |
285 | _cnputs(&c, 1); | |
286 | } | |
287 | ||
288 | void | |
289 | cnputcusr(char c) | |
290 | { | |
291 | boolean_t state; | |
292 | ||
293 | /* Spin (with pre-emption enabled) waiting for console_ring_try_empty() | |
294 | * to complete output. There is a small window here where we could | |
295 | * end up with a stale value of console_output, but it's unlikely, | |
296 | * and _cnputs(), which outputs to the console device, is internally | |
297 | * synchronized. There's something of a conflict between the | |
298 | * character-at-a-time (with pre-emption enabled) unbuffered | |
299 | * output model here, and the buffered output from cnputc(), | |
300 | * whose consumers include printf() ( which outputs a sequence | |
301 | * with pre-emption disabled, and should be safe to call with | |
302 | * interrupts off); we don't want to disable pre-emption indefinitely | |
303 | * here, and spinlocks and mutexes are inappropriate. | |
304 | */ | |
305 | while (console_output != 0) | |
306 | ; | |
307 | ||
308 | /* | |
309 | * We disable interrupts to avoid issues caused by rendevous IPIs | |
310 | * and an interruptible core holding the lock while an uninterruptible | |
311 | * core wants it. Stackshot is the prime example of this. | |
312 | */ | |
313 | state = ml_set_interrupts_enabled(FALSE); | |
314 | _cnputs(&c, 1); | |
315 | ml_set_interrupts_enabled(state); | |
316 | } | |
317 | ||
318 | static void | |
319 | console_ring_try_empty(void) | |
320 | { | |
321 | #ifdef __x86_64__ | |
322 | boolean_t handle_tlb_flushes = (ml_get_interrupts_enabled() == FALSE); | |
323 | #endif /* __x86_64__ */ | |
324 | ||
325 | int nchars_out = 0; | |
326 | int total_chars_out = 0; | |
327 | int size_before_wrap = 0; | |
328 | ||
329 | do { | |
330 | #ifdef __x86_64__ | |
331 | if (handle_tlb_flushes) | |
332 | handle_pending_TLB_flushes(); | |
333 | #endif /* __x86_64__ */ | |
334 | ||
335 | /* | |
336 | * Try to get the read lock on the ring buffer to empty it. | |
337 | * If this fails someone else is already emptying... | |
338 | */ | |
339 | if (!simple_lock_try(&console_ring.read_lock)) { | |
340 | /* | |
341 | * If multiple cores are spinning trying to empty the buffer, | |
342 | * we may suffer lock starvation (get the read lock, but | |
343 | * never the write lock, with other cores unable to get the | |
344 | * read lock). As a result, insert a delay on failure, to | |
345 | * let other cores have a turn. | |
346 | */ | |
347 | delay(1); | |
348 | return; | |
349 | } | |
350 | ||
351 | boolean_t state = ml_set_interrupts_enabled(FALSE); | |
352 | ||
353 | /* Indicate that we're in the process of writing a block of data to the console. */ | |
354 | (void)hw_atomic_add(&console_output, 1); | |
355 | ||
356 | simple_lock_try_lock_loop(&console_ring.write_lock); | |
357 | ||
358 | /* try small chunk at a time, so we allow writes from other cpus into the buffer */ | |
359 | nchars_out = MIN(console_ring.used, MAX_INT_DISABLED_FLUSH_SIZE); | |
360 | ||
361 | /* account for data to be read before wrap around */ | |
362 | size_before_wrap = (int)((console_ring.buffer + console_ring.len) - console_ring.read_ptr); | |
363 | if (nchars_out > size_before_wrap) | |
364 | nchars_out = size_before_wrap; | |
365 | ||
366 | if (nchars_out > 0) { | |
367 | _cnputs(console_ring.read_ptr, nchars_out); | |
368 | console_ring.read_ptr = | |
369 | console_ring.buffer + ((console_ring.read_ptr - console_ring.buffer + nchars_out) % console_ring.len); | |
370 | console_ring.used -= nchars_out; | |
371 | total_chars_out += nchars_out; | |
372 | } | |
373 | ||
374 | simple_unlock(&console_ring.write_lock); | |
375 | ||
376 | (void)hw_atomic_sub(&console_output, 1); | |
377 | ||
378 | simple_unlock(&console_ring.read_lock); | |
379 | ||
380 | ml_set_interrupts_enabled(state); | |
381 | ||
382 | /* | |
383 | * In case we end up being the console drain thread | |
384 | * for far too long, break out. Except in panic/suspend cases | |
385 | * where we should clear out full buffer. | |
386 | */ | |
387 | if (debug_mode == 0 && !console_suspended && (total_chars_out >= MAX_TOTAL_FLUSH_SIZE)) | |
388 | break; | |
389 | ||
390 | } while (nchars_out > 0); | |
391 | } | |
392 | ||
393 | ||
394 | void | |
395 | console_suspend() | |
396 | { | |
397 | console_suspended = true; | |
398 | console_ring_try_empty(); | |
399 | } | |
400 | ||
401 | void | |
402 | console_resume() | |
403 | { | |
404 | console_suspended = false; | |
405 | } | |
406 | ||
407 | void | |
408 | console_write(char * str, int size) | |
409 | { | |
410 | console_init(); | |
411 | int chunk_size = size; | |
412 | int i = 0; | |
413 | ||
414 | if (size > console_ring.len) | |
415 | chunk_size = CPU_CONS_BUF_SIZE; | |
416 | ||
417 | while (size > 0) { | |
418 | boolean_t state = ml_set_interrupts_enabled(FALSE); | |
419 | ||
420 | simple_lock_try_lock_loop(&console_ring.write_lock); | |
421 | while (chunk_size > console_ring_space()) { | |
422 | simple_unlock(&console_ring.write_lock); | |
423 | ml_set_interrupts_enabled(state); | |
424 | ||
425 | console_ring_try_empty(); | |
426 | ||
427 | state = ml_set_interrupts_enabled(FALSE); | |
428 | simple_lock_try_lock_loop(&console_ring.write_lock); | |
429 | } | |
430 | ||
431 | for (i = 0; i < chunk_size; i++) | |
432 | console_ring_put(str[i]); | |
433 | ||
434 | str = &str[i]; | |
435 | size -= chunk_size; | |
436 | simple_unlock(&console_ring.write_lock); | |
437 | ml_set_interrupts_enabled(state); | |
438 | } | |
439 | ||
440 | console_ring_try_empty(); | |
441 | } | |
442 | ||
443 | void | |
444 | cnputc(char c) | |
445 | { | |
446 | console_buf_t * cbp; | |
447 | cpu_data_t * cpu_data_p; | |
448 | boolean_t state; | |
449 | boolean_t needs_print = TRUE; | |
450 | char * cp; | |
451 | ||
452 | restart: | |
453 | mp_disable_preemption(); | |
454 | cpu_data_p = current_cpu_datap(); | |
455 | cbp = (console_buf_t *)cpu_data_p->cpu_console_buf; | |
456 | if (console_suspended || cbp == NULL) { | |
457 | mp_enable_preemption(); | |
458 | /* Put directly if console ring is not initialized or we're heading into suspend */ | |
459 | _cnputs(&c, 1); | |
460 | return; | |
461 | } | |
462 | ||
463 | #ifndef __x86_64__ | |
464 | /* Is there a panic backtrace going on? */ | |
465 | if (cpu_data_p->PAB_active) { | |
466 | /* If another processor was in the process of emptying the | |
467 | * console ring buffer when it received the panic backtrace | |
468 | * signal, that processor will be spinning in DebugXCall() | |
469 | * waiting for the panicking processor to finish printing | |
470 | * the backtrace. But panicking processor will never | |
471 | * be able to obtain the ring buffer lock since it is | |
472 | * owned by a processor that's spinning in DebugXCall(). | |
473 | * Blow away any locks that other processors may have on | |
474 | * the console ring buffer so that the backtrace can | |
475 | * complete. | |
476 | */ | |
477 | console_ring_lock_init(); | |
478 | } | |
479 | #endif /* __x86_64__ */ | |
480 | ||
481 | state = ml_set_interrupts_enabled(FALSE); | |
482 | ||
483 | /* | |
484 | * add to stack buf | |
485 | * If the cpu buffer is full, we'll flush, then try | |
486 | * another put. If it fails a second time... screw | |
487 | * it. | |
488 | */ | |
489 | if (needs_print && !cpu_buffer_put(cbp, c)) { | |
490 | simple_lock_try_lock_loop(&console_ring.write_lock); | |
491 | ||
492 | if (cpu_buffer_size(cbp) > console_ring_space()) { | |
493 | simple_unlock(&console_ring.write_lock); | |
494 | ml_set_interrupts_enabled(state); | |
495 | mp_enable_preemption(); | |
496 | ||
497 | console_ring_try_empty(); | |
498 | goto restart; | |
499 | } | |
500 | ||
501 | for (cp = cbp->buf_base; cp < cbp->buf_ptr; cp++) | |
502 | console_ring_put(*cp); | |
503 | cbp->buf_ptr = cbp->buf_base; | |
504 | simple_unlock(&console_ring.write_lock); | |
505 | ||
506 | cpu_buffer_put(cbp, c); | |
507 | } | |
508 | ||
509 | needs_print = FALSE; | |
510 | ||
511 | if (c != '\n') { | |
512 | ml_set_interrupts_enabled(state); | |
513 | mp_enable_preemption(); | |
514 | return; | |
515 | } | |
516 | ||
517 | /* We printed a newline, time to flush the CPU buffer to the global buffer */ | |
518 | simple_lock_try_lock_loop(&console_ring.write_lock); | |
519 | ||
520 | /* | |
521 | * Is there enough space in the shared ring buffer? | |
522 | * Try to empty if not. | |
523 | * Note, we want the entire local buffer to fit to | |
524 | * avoid another cpu interjecting. | |
525 | */ | |
526 | ||
527 | if (cpu_buffer_size(cbp) > console_ring_space()) { | |
528 | simple_unlock(&console_ring.write_lock); | |
529 | ml_set_interrupts_enabled(state); | |
530 | mp_enable_preemption(); | |
531 | ||
532 | console_ring_try_empty(); | |
533 | ||
534 | goto restart; | |
535 | } | |
536 | ||
537 | for (cp = cbp->buf_base; cp < cbp->buf_ptr; cp++) | |
538 | console_ring_put(*cp); | |
539 | ||
540 | cbp->buf_ptr = cbp->buf_base; | |
541 | simple_unlock(&console_ring.write_lock); | |
542 | ml_set_interrupts_enabled(state); | |
543 | mp_enable_preemption(); | |
544 | ||
545 | console_ring_try_empty(); | |
546 | ||
547 | return; | |
548 | } | |
549 | ||
550 | int | |
551 | _serial_getc(__unused int a, __unused int b, boolean_t wait, __unused boolean_t raw) | |
552 | { | |
553 | int c; | |
554 | do { | |
555 | c = serial_getc(); | |
556 | } while (wait && c < 0); | |
557 | ||
558 | ||
559 | return c; | |
560 | } | |
561 | ||
562 | static void | |
563 | _serial_putc(__unused int a, __unused int b, int c) | |
564 | { | |
565 | serial_putc(c); | |
566 | } | |
567 | ||
568 | int | |
569 | cngetc(void) | |
570 | { | |
571 | return cons_ops[cons_ops_index].getc(0, 0, TRUE, FALSE); | |
572 | } | |
573 | ||
574 | int | |
575 | cnmaygetc(void) | |
576 | { | |
577 | return cons_ops[cons_ops_index].getc(0, 0, FALSE, FALSE); | |
578 | } | |
579 | ||
580 | int | |
581 | vcgetc(__unused int l, __unused int u, __unused boolean_t wait, __unused boolean_t raw) | |
582 | { | |
583 | char c; | |
584 | ||
585 | if (0 == (*PE_poll_input)(0, &c)) | |
586 | return c; | |
587 | else | |
588 | return 0; | |
589 | } | |
590 |