]> git.saurik.com Git - apple/xnu.git/blame - osfmk/console/serial_console.c
xnu-4570.61.1.tar.gz
[apple/xnu.git] / osfmk / console / serial_console.c
CommitLineData
39037602
A
1/*
2 * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29#ifdef __x86_64__
30#include <i386/mp.h>
31#include <i386/cpu_data.h>
32#include <i386/bit_routines.h>
33#include <i386/machine_cpu.h>
34#include <i386/machine_routines.h>
35#include <i386/misc_protos.h>
36#include <i386/serial_io.h>
37#endif /* __x86_64__ */
38
39#include <libkern/OSAtomic.h>
40#include <vm/vm_kern.h>
41#include <vm/vm_map.h>
42#include <console/video_console.h>
43#include <console/serial_protos.h>
44#include <kern/kalloc.h>
45#include <kern/thread.h>
46#include <kern/cpu_data.h>
813fb2f6 47#include <libkern/section_keywords.h>
39037602 48
5ba3f43e
A
49#if __arm__ || __arm64__
50#include <machine/machine_routines.h>
51#include <arm/cpu_data_internal.h>
52#endif
39037602
A
53
54
55#ifndef MAX_CPU_SLOTS
56#define MAX_CPU_SLOTS (MAX_CPUS)
57#endif
58
59static struct {
60 char * buffer;
61 int len;
62 int used;
63 char * write_ptr;
64 char * read_ptr;
65 decl_simple_lock_data(, read_lock);
66 decl_simple_lock_data(, write_lock);
67} console_ring;
68
69hw_lock_data_t cnputc_lock;
70static volatile uint32_t console_output = 0;
71
72/*
73 * New allocation mechanism for console buffers
74 * Total allocation: 1 * PAGE_SIZE
75 * - Each cpu gets CPU_CONS_BUF_SIZE buffer
76 * - Kernel wide console ring gets PAGE_SIZE - MAX_CPU_SLOTS * CPU_CONS_BUF_SIZE
77 *
78 * At the return from console_init() the memory is setup as follows:
79 * +----------------------------+-------------+-------------+-------------+-------------+
80 * |console ring buffer---------|f2eec075-----|f2eec075-----|f2eec075-----|f2eec075-----|
81 * +----------------------------+-------------+-------------+-------------+-------------+
82 * Each cpu allocation will find the first (f2eec075) and use that buffer.
83 *
84 */
85
86#define CPU_CONS_BUF_SIZE 256
87#define CPU_BUF_FREE_HEX 0xf2eec075
88
89#define KERN_CONSOLE_BUF_SIZE vm_map_round_page(CPU_CONS_BUF_SIZE *(MAX_CPU_SLOTS + 1), PAGE_SIZE - 1)
90#define KERN_CONSOLE_RING_SIZE (KERN_CONSOLE_BUF_SIZE - (CPU_CONS_BUF_SIZE * MAX_CPU_SLOTS))
91
92/*
93 * A serial line running at 115200 bps can output ~11.5 characters per millisecond.
94 * Synchronous serial logging with preemption+interrupts disabled fundamentally prevents us
95 * from hitting expected scheduling deadlines, but we can at least tone it down a bit.
96 *
97 * TODO: IOLog should use asynchronous serial logging instead of the synchronous serial console. (26555148)
98 *
99 * Keep interrupt disabled periods shorter than 1ms
100 */
101#define MAX_INT_DISABLED_FLUSH_SIZE 8
102#define MAX_TOTAL_FLUSH_SIZE (MAX(2, MAX_CPU_SLOTS) * CPU_CONS_BUF_SIZE)
103
104typedef struct console_buf {
105 char * buf_base;
106 char * buf_end;
107 char * buf_ptr;
108#define CPU_BUFFER_LEN (CPU_CONS_BUF_SIZE - 3 * (sizeof(char *)))
109 char buf[CPU_BUFFER_LEN];
110} console_buf_t;
111
112extern int serial_getc(void);
113extern void serial_putc(char);
114
115static void _serial_putc(int, int, int);
116
813fb2f6 117SECURITY_READ_ONLY_EARLY(struct console_ops) cons_ops[] = {
39037602
A
118 {
119 .putc = _serial_putc, .getc = _serial_getc,
120 },
121 {
122 .putc = vcputc, .getc = vcgetc,
123 },
124};
125
813fb2f6 126SECURITY_READ_ONLY_EARLY(uint32_t) nconsops = (sizeof cons_ops / sizeof cons_ops[0]);
39037602
A
127
128uint32_t cons_ops_index = VC_CONS_OPS;
129
5ba3f43e
A
130#ifdef __arm__
131// NMI static variables
132#define NMI_STRING_SIZE 32
133char nmi_string[NMI_STRING_SIZE] = "afDIGHr84A84jh19Kphgp428DNPdnapq";
134static int nmi_counter = 0;
135#endif /* __arm__ */
39037602
A
136
137static bool console_suspended = false;
138
5ba3f43e
A
139/* Wrapper for ml_set_interrupts_enabled */
140static void
141console_restore_interrupts_state(boolean_t state)
142{
143#if INTERRUPT_MASKED_DEBUG
144 /*
145 * Serial console holds interrupts disabled for far too long
146 * and would trip the spin-debugger. If we are about to reenable
147 * interrupts then clear the timer and avoid panicking on the delay.
148 * Otherwise, let the code that printed with interrupt disabled
149 * take the panic when it reenables interrupts.
150 * Hopefully one day this is fixed so that this workaround is unnecessary.
151 */
152 if (state == TRUE)
153 ml_spin_debug_clear_self();
154#endif /* INTERRUPT_MASKED_DEBUG */
155 ml_set_interrupts_enabled(state);
156}
157
39037602
A
158static void
159console_ring_lock_init(void)
160{
161 simple_lock_init(&console_ring.read_lock, 0);
162 simple_lock_init(&console_ring.write_lock, 0);
163}
164
165void
166console_init(void)
167{
168 int ret, i;
169 uint32_t * p;
170
171 if (!OSCompareAndSwap(0, KERN_CONSOLE_RING_SIZE, (UInt32 *)&console_ring.len))
172 return;
173
174 assert(console_ring.len > 0);
175
176 ret = kmem_alloc(kernel_map, (vm_offset_t *)&console_ring.buffer, KERN_CONSOLE_BUF_SIZE, VM_KERN_MEMORY_OSFMK);
177 if (ret != KERN_SUCCESS) {
178 panic("console_ring_init() failed to allocate ring buffer, error %d\n", ret);
179 }
180
181 /* setup memory for per cpu console buffers */
182 for (i = 0; i < MAX_CPU_SLOTS; i++) {
183 p = (uint32_t *)((uintptr_t)console_ring.buffer + console_ring.len + (i * sizeof(console_buf_t)));
184 *p = CPU_BUF_FREE_HEX;
185 }
186
187 console_ring.used = 0;
188 console_ring.read_ptr = console_ring.buffer;
189 console_ring.write_ptr = console_ring.buffer;
190 console_ring_lock_init();
191 hw_lock_init(&cnputc_lock);
192}
193
194void *
195console_cpu_alloc(__unused boolean_t boot_processor)
196{
197 console_buf_t * cbp;
198 int i;
5ba3f43e 199 uint32_t * p = NULL;
39037602
A
200
201 console_init();
202 assert(console_ring.buffer != NULL);
203
204 /* select the next slot from the per cpu buffers at end of console_ring.buffer */
205 for (i = 0; i < MAX_CPU_SLOTS; i++) {
206 p = (uint32_t *)((uintptr_t)console_ring.buffer + console_ring.len + (i * sizeof(console_buf_t)));
207 if (OSCompareAndSwap(CPU_BUF_FREE_HEX, 0, (UInt32 *)p))
208 break;
209 }
210 assert(i < MAX_CPU_SLOTS);
211
212 cbp = (console_buf_t *)(uintptr_t)p;
213 if ((uintptr_t)cbp >= (uintptr_t)console_ring.buffer + KERN_CONSOLE_BUF_SIZE) {
214 printf("console_cpu_alloc() failed to allocate cpu buffer\n");
215 return NULL;
216 }
217
218 cbp->buf_base = (char *)&cbp->buf;
219 cbp->buf_ptr = cbp->buf_base;
220 cbp->buf_end = cbp->buf_base + CPU_BUFFER_LEN;
221 return (void *)cbp;
222}
223
224void
225console_cpu_free(void * buf)
226{
227 assert((uintptr_t)buf > (uintptr_t)console_ring.buffer);
228 assert((uintptr_t)buf < (uintptr_t)console_ring.buffer + KERN_CONSOLE_BUF_SIZE);
229 if (buf != NULL)
230 *(uint32_t *)buf = CPU_BUF_FREE_HEX;
231}
232
233static inline int
234console_ring_space(void)
235{
236 return console_ring.len - console_ring.used;
237}
238
239static boolean_t
240console_ring_put(char ch)
241{
242 if (console_ring.used < console_ring.len) {
243 console_ring.used++;
244 *console_ring.write_ptr++ = ch;
245 if (console_ring.write_ptr - console_ring.buffer == console_ring.len)
246 console_ring.write_ptr = console_ring.buffer;
247 return TRUE;
248 } else {
249 return FALSE;
250 }
251}
252
253static inline boolean_t
254cpu_buffer_put(console_buf_t * cbp, char ch)
255{
256 if (ch != '\0' && cbp->buf_ptr < cbp->buf_end) {
257 *(cbp->buf_ptr++) = ch;
258 return TRUE;
259 } else {
260 return FALSE;
261 }
262}
263
264static inline int
265cpu_buffer_size(console_buf_t * cbp)
266{
267 return (int)(cbp->buf_ptr - cbp->buf_base);
268}
269
270static inline void
271_cnputs(char * c, int size)
272{
273 /* The console device output routines are assumed to be
274 * non-reentrant.
275 */
743345f9
A
276#ifdef __x86_64__
277 uint32_t lock_timeout_ticks = UINT32_MAX;
278#else
279 uint32_t lock_timeout_ticks = LockTimeOut;
280#endif
281
39037602 282 mp_disable_preemption();
743345f9 283 if (!hw_lock_to(&cnputc_lock, lock_timeout_ticks)) {
39037602
A
284 /* If we timed out on the lock, and we're in the debugger,
285 * copy lock data for debugging and break the lock.
286 */
287 hw_lock_data_t _shadow_lock;
288 memcpy(&_shadow_lock, &cnputc_lock, sizeof(cnputc_lock));
5ba3f43e 289 if (kernel_debugger_entry_count) {
39037602
A
290 /* Since hw_lock_to takes a pre-emption count...*/
291 mp_enable_preemption();
292 hw_lock_init(&cnputc_lock);
293 hw_lock_lock(&cnputc_lock);
294 } else {
295 panic("Lock acquire timeout in _cnputs() lock=%p, lock owner thread=0x%lx, current_thread: %p\n", &_shadow_lock,
296 _shadow_lock.lock_data, current_thread());
297 }
298 }
299
300 while (size-- > 0) {
301 cons_ops[cons_ops_index].putc(0, 0, *c);
302 if (*c == '\n')
303 cons_ops[cons_ops_index].putc(0, 0, '\r');
304 c++;
305 }
306
307 hw_lock_unlock(&cnputc_lock);
308 mp_enable_preemption();
309}
310
311void
312cnputc_unbuffered(char c)
313{
314 _cnputs(&c, 1);
315}
316
5ba3f43e
A
317
318void cnputcusr(char c)
319{
320 cnputsusr(&c, 1);
321}
322
39037602 323void
5ba3f43e 324cnputsusr(char *s, int size)
39037602 325{
5ba3f43e
A
326
327 if (size > 1) {
328 console_write(s, size);
329 return;
330 }
331
39037602
A
332 boolean_t state;
333
334 /* Spin (with pre-emption enabled) waiting for console_ring_try_empty()
335 * to complete output. There is a small window here where we could
336 * end up with a stale value of console_output, but it's unlikely,
337 * and _cnputs(), which outputs to the console device, is internally
338 * synchronized. There's something of a conflict between the
339 * character-at-a-time (with pre-emption enabled) unbuffered
340 * output model here, and the buffered output from cnputc(),
341 * whose consumers include printf() ( which outputs a sequence
342 * with pre-emption disabled, and should be safe to call with
343 * interrupts off); we don't want to disable pre-emption indefinitely
344 * here, and spinlocks and mutexes are inappropriate.
345 */
5ba3f43e
A
346 while (console_output != 0) {
347 delay(1);
348 }
39037602
A
349
350 /*
351 * We disable interrupts to avoid issues caused by rendevous IPIs
352 * and an interruptible core holding the lock while an uninterruptible
353 * core wants it. Stackshot is the prime example of this.
354 */
355 state = ml_set_interrupts_enabled(FALSE);
5ba3f43e
A
356 _cnputs(s, 1);
357 console_restore_interrupts_state(state);
39037602
A
358}
359
360static void
361console_ring_try_empty(void)
362{
363#ifdef __x86_64__
364 boolean_t handle_tlb_flushes = (ml_get_interrupts_enabled() == FALSE);
365#endif /* __x86_64__ */
366
367 int nchars_out = 0;
368 int total_chars_out = 0;
369 int size_before_wrap = 0;
370
371 do {
372#ifdef __x86_64__
373 if (handle_tlb_flushes)
374 handle_pending_TLB_flushes();
375#endif /* __x86_64__ */
376
377 /*
378 * Try to get the read lock on the ring buffer to empty it.
379 * If this fails someone else is already emptying...
380 */
381 if (!simple_lock_try(&console_ring.read_lock)) {
382 /*
383 * If multiple cores are spinning trying to empty the buffer,
384 * we may suffer lock starvation (get the read lock, but
385 * never the write lock, with other cores unable to get the
386 * read lock). As a result, insert a delay on failure, to
387 * let other cores have a turn.
388 */
389 delay(1);
390 return;
391 }
392
393 boolean_t state = ml_set_interrupts_enabled(FALSE);
394
395 /* Indicate that we're in the process of writing a block of data to the console. */
396 (void)hw_atomic_add(&console_output, 1);
397
398 simple_lock_try_lock_loop(&console_ring.write_lock);
399
400 /* try small chunk at a time, so we allow writes from other cpus into the buffer */
401 nchars_out = MIN(console_ring.used, MAX_INT_DISABLED_FLUSH_SIZE);
402
403 /* account for data to be read before wrap around */
404 size_before_wrap = (int)((console_ring.buffer + console_ring.len) - console_ring.read_ptr);
405 if (nchars_out > size_before_wrap)
406 nchars_out = size_before_wrap;
407
408 if (nchars_out > 0) {
409 _cnputs(console_ring.read_ptr, nchars_out);
410 console_ring.read_ptr =
411 console_ring.buffer + ((console_ring.read_ptr - console_ring.buffer + nchars_out) % console_ring.len);
412 console_ring.used -= nchars_out;
413 total_chars_out += nchars_out;
414 }
415
416 simple_unlock(&console_ring.write_lock);
417
418 (void)hw_atomic_sub(&console_output, 1);
419
420 simple_unlock(&console_ring.read_lock);
421
5ba3f43e 422 console_restore_interrupts_state(state);
39037602
A
423
424 /*
425 * In case we end up being the console drain thread
426 * for far too long, break out. Except in panic/suspend cases
427 * where we should clear out full buffer.
428 */
5ba3f43e 429 if (!kernel_debugger_entry_count && !console_suspended && (total_chars_out >= MAX_TOTAL_FLUSH_SIZE))
39037602
A
430 break;
431
432 } while (nchars_out > 0);
433}
434
435
436void
437console_suspend()
438{
439 console_suspended = true;
440 console_ring_try_empty();
441}
442
443void
444console_resume()
445{
446 console_suspended = false;
447}
448
449void
450console_write(char * str, int size)
451{
452 console_init();
453 int chunk_size = size;
454 int i = 0;
455
456 if (size > console_ring.len)
457 chunk_size = CPU_CONS_BUF_SIZE;
458
459 while (size > 0) {
460 boolean_t state = ml_set_interrupts_enabled(FALSE);
461
462 simple_lock_try_lock_loop(&console_ring.write_lock);
463 while (chunk_size > console_ring_space()) {
464 simple_unlock(&console_ring.write_lock);
5ba3f43e 465 console_restore_interrupts_state(state);
39037602
A
466
467 console_ring_try_empty();
468
469 state = ml_set_interrupts_enabled(FALSE);
470 simple_lock_try_lock_loop(&console_ring.write_lock);
471 }
472
473 for (i = 0; i < chunk_size; i++)
474 console_ring_put(str[i]);
475
476 str = &str[i];
477 size -= chunk_size;
478 simple_unlock(&console_ring.write_lock);
5ba3f43e 479 console_restore_interrupts_state(state);
39037602
A
480 }
481
482 console_ring_try_empty();
483}
484
485void
486cnputc(char c)
487{
488 console_buf_t * cbp;
489 cpu_data_t * cpu_data_p;
490 boolean_t state;
491 boolean_t needs_print = TRUE;
492 char * cp;
493
494restart:
495 mp_disable_preemption();
496 cpu_data_p = current_cpu_datap();
497 cbp = (console_buf_t *)cpu_data_p->cpu_console_buf;
498 if (console_suspended || cbp == NULL) {
499 mp_enable_preemption();
500 /* Put directly if console ring is not initialized or we're heading into suspend */
501 _cnputs(&c, 1);
502 return;
503 }
504
505#ifndef __x86_64__
506 /* Is there a panic backtrace going on? */
507 if (cpu_data_p->PAB_active) {
508 /* If another processor was in the process of emptying the
509 * console ring buffer when it received the panic backtrace
510 * signal, that processor will be spinning in DebugXCall()
511 * waiting for the panicking processor to finish printing
512 * the backtrace. But panicking processor will never
513 * be able to obtain the ring buffer lock since it is
514 * owned by a processor that's spinning in DebugXCall().
515 * Blow away any locks that other processors may have on
516 * the console ring buffer so that the backtrace can
517 * complete.
518 */
519 console_ring_lock_init();
520 }
521#endif /* __x86_64__ */
522
523 state = ml_set_interrupts_enabled(FALSE);
524
525 /*
526 * add to stack buf
527 * If the cpu buffer is full, we'll flush, then try
528 * another put. If it fails a second time... screw
529 * it.
530 */
531 if (needs_print && !cpu_buffer_put(cbp, c)) {
532 simple_lock_try_lock_loop(&console_ring.write_lock);
533
534 if (cpu_buffer_size(cbp) > console_ring_space()) {
535 simple_unlock(&console_ring.write_lock);
5ba3f43e 536 console_restore_interrupts_state(state);
39037602
A
537 mp_enable_preemption();
538
539 console_ring_try_empty();
540 goto restart;
541 }
542
543 for (cp = cbp->buf_base; cp < cbp->buf_ptr; cp++)
544 console_ring_put(*cp);
545 cbp->buf_ptr = cbp->buf_base;
546 simple_unlock(&console_ring.write_lock);
547
548 cpu_buffer_put(cbp, c);
549 }
550
551 needs_print = FALSE;
552
553 if (c != '\n') {
5ba3f43e 554 console_restore_interrupts_state(state);
39037602
A
555 mp_enable_preemption();
556 return;
557 }
558
559 /* We printed a newline, time to flush the CPU buffer to the global buffer */
560 simple_lock_try_lock_loop(&console_ring.write_lock);
561
562 /*
563 * Is there enough space in the shared ring buffer?
564 * Try to empty if not.
565 * Note, we want the entire local buffer to fit to
566 * avoid another cpu interjecting.
567 */
568
569 if (cpu_buffer_size(cbp) > console_ring_space()) {
570 simple_unlock(&console_ring.write_lock);
5ba3f43e 571 console_restore_interrupts_state(state);
39037602
A
572 mp_enable_preemption();
573
574 console_ring_try_empty();
575
576 goto restart;
577 }
578
579 for (cp = cbp->buf_base; cp < cbp->buf_ptr; cp++)
580 console_ring_put(*cp);
581
582 cbp->buf_ptr = cbp->buf_base;
583 simple_unlock(&console_ring.write_lock);
5ba3f43e
A
584
585 console_restore_interrupts_state(state);
39037602
A
586 mp_enable_preemption();
587
588 console_ring_try_empty();
589
590 return;
591}
592
593int
594_serial_getc(__unused int a, __unused int b, boolean_t wait, __unused boolean_t raw)
595{
596 int c;
597 do {
598 c = serial_getc();
599 } while (wait && c < 0);
600
5ba3f43e
A
601#ifdef __arm__
602 // Check for the NMI string
603 if (c == nmi_string[nmi_counter]) {
604 nmi_counter++;
605 if (nmi_counter == NMI_STRING_SIZE) {
606 // We've got the NMI string, now do an NMI
607 Debugger("Automatic NMI");
608 nmi_counter = 0;
609 return '\n';
610 }
611 } else if (c != -1) {
612 nmi_counter = 0;
613 }
614#endif
39037602
A
615
616 return c;
617}
618
619static void
620_serial_putc(__unused int a, __unused int b, int c)
621{
622 serial_putc(c);
623}
624
625int
626cngetc(void)
627{
628 return cons_ops[cons_ops_index].getc(0, 0, TRUE, FALSE);
629}
630
631int
632cnmaygetc(void)
633{
634 return cons_ops[cons_ops_index].getc(0, 0, FALSE, FALSE);
635}
636
637int
638vcgetc(__unused int l, __unused int u, __unused boolean_t wait, __unused boolean_t raw)
639{
640 char c;
641
642 if (0 == (*PE_poll_input)(0, &c))
643 return c;
644 else
645 return 0;
646}
647