]> git.saurik.com Git - apple/xnu.git/blob - osfmk/console/serial_console.c
ec139794c07a363f9ea65d0e8649b53ce5b1a70e
[apple/xnu.git] / osfmk / console / serial_console.c
1 /*
2 * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #ifdef __x86_64__
30 #include <i386/mp.h>
31 #include <i386/cpu_data.h>
32 #include <i386/bit_routines.h>
33 #include <i386/machine_cpu.h>
34 #include <i386/machine_routines.h>
35 #include <i386/misc_protos.h>
36 #include <i386/serial_io.h>
37 #endif /* __x86_64__ */
38
39 #include <libkern/OSAtomic.h>
40 #include <vm/vm_kern.h>
41 #include <vm/vm_map.h>
42 #include <console/video_console.h>
43 #include <console/serial_protos.h>
44 #include <kern/kalloc.h>
45 #include <kern/thread.h>
46 #include <kern/cpu_data.h>
47
48
49
50 #ifndef MAX_CPU_SLOTS
51 #define MAX_CPU_SLOTS (MAX_CPUS)
52 #endif
53
54 static struct {
55 char * buffer;
56 int len;
57 int used;
58 char * write_ptr;
59 char * read_ptr;
60 decl_simple_lock_data(, read_lock);
61 decl_simple_lock_data(, write_lock);
62 } console_ring;
63
64 hw_lock_data_t cnputc_lock;
65 static volatile uint32_t console_output = 0;
66
67 /*
68 * New allocation mechanism for console buffers
69 * Total allocation: 1 * PAGE_SIZE
70 * - Each cpu gets CPU_CONS_BUF_SIZE buffer
71 * - Kernel wide console ring gets PAGE_SIZE - MAX_CPU_SLOTS * CPU_CONS_BUF_SIZE
72 *
73 * At the return from console_init() the memory is setup as follows:
74 * +----------------------------+-------------+-------------+-------------+-------------+
75 * |console ring buffer---------|f2eec075-----|f2eec075-----|f2eec075-----|f2eec075-----|
76 * +----------------------------+-------------+-------------+-------------+-------------+
77 * Each cpu allocation will find the first (f2eec075) and use that buffer.
78 *
79 */
80
81 #define CPU_CONS_BUF_SIZE 256
82 #define CPU_BUF_FREE_HEX 0xf2eec075
83
84 #define KERN_CONSOLE_BUF_SIZE vm_map_round_page(CPU_CONS_BUF_SIZE *(MAX_CPU_SLOTS + 1), PAGE_SIZE - 1)
85 #define KERN_CONSOLE_RING_SIZE (KERN_CONSOLE_BUF_SIZE - (CPU_CONS_BUF_SIZE * MAX_CPU_SLOTS))
86
87 /*
88 * A serial line running at 115200 bps can output ~11.5 characters per millisecond.
89 * Synchronous serial logging with preemption+interrupts disabled fundamentally prevents us
90 * from hitting expected scheduling deadlines, but we can at least tone it down a bit.
91 *
92 * TODO: IOLog should use asynchronous serial logging instead of the synchronous serial console. (26555148)
93 *
94 * Keep interrupt disabled periods shorter than 1ms
95 */
96 #define MAX_INT_DISABLED_FLUSH_SIZE 8
97 #define MAX_TOTAL_FLUSH_SIZE (MAX(2, MAX_CPU_SLOTS) * CPU_CONS_BUF_SIZE)
98
99 typedef struct console_buf {
100 char * buf_base;
101 char * buf_end;
102 char * buf_ptr;
103 #define CPU_BUFFER_LEN (CPU_CONS_BUF_SIZE - 3 * (sizeof(char *)))
104 char buf[CPU_BUFFER_LEN];
105 } console_buf_t;
106
107 extern int serial_getc(void);
108 extern void serial_putc(char);
109
110 static void _serial_putc(int, int, int);
111
112 struct console_ops cons_ops[] = {
113 {
114 .putc = _serial_putc, .getc = _serial_getc,
115 },
116 {
117 .putc = vcputc, .getc = vcgetc,
118 },
119 };
120
121 uint32_t nconsops = (sizeof cons_ops / sizeof cons_ops[0]);
122
123 uint32_t cons_ops_index = VC_CONS_OPS;
124
125
126 static bool console_suspended = false;
127
128 static void
129 console_ring_lock_init(void)
130 {
131 simple_lock_init(&console_ring.read_lock, 0);
132 simple_lock_init(&console_ring.write_lock, 0);
133 }
134
135 void
136 console_init(void)
137 {
138 int ret, i;
139 uint32_t * p;
140
141 if (!OSCompareAndSwap(0, KERN_CONSOLE_RING_SIZE, (UInt32 *)&console_ring.len))
142 return;
143
144 assert(console_ring.len > 0);
145
146 ret = kmem_alloc(kernel_map, (vm_offset_t *)&console_ring.buffer, KERN_CONSOLE_BUF_SIZE, VM_KERN_MEMORY_OSFMK);
147 if (ret != KERN_SUCCESS) {
148 panic("console_ring_init() failed to allocate ring buffer, error %d\n", ret);
149 }
150
151 /* setup memory for per cpu console buffers */
152 for (i = 0; i < MAX_CPU_SLOTS; i++) {
153 p = (uint32_t *)((uintptr_t)console_ring.buffer + console_ring.len + (i * sizeof(console_buf_t)));
154 *p = CPU_BUF_FREE_HEX;
155 }
156
157 console_ring.used = 0;
158 console_ring.read_ptr = console_ring.buffer;
159 console_ring.write_ptr = console_ring.buffer;
160 console_ring_lock_init();
161 hw_lock_init(&cnputc_lock);
162 }
163
164 void *
165 console_cpu_alloc(__unused boolean_t boot_processor)
166 {
167 console_buf_t * cbp;
168 int i;
169 uint32_t * p;
170
171 console_init();
172 assert(console_ring.buffer != NULL);
173
174 /* select the next slot from the per cpu buffers at end of console_ring.buffer */
175 for (i = 0; i < MAX_CPU_SLOTS; i++) {
176 p = (uint32_t *)((uintptr_t)console_ring.buffer + console_ring.len + (i * sizeof(console_buf_t)));
177 if (OSCompareAndSwap(CPU_BUF_FREE_HEX, 0, (UInt32 *)p))
178 break;
179 }
180 assert(i < MAX_CPU_SLOTS);
181
182 cbp = (console_buf_t *)(uintptr_t)p;
183 if ((uintptr_t)cbp >= (uintptr_t)console_ring.buffer + KERN_CONSOLE_BUF_SIZE) {
184 printf("console_cpu_alloc() failed to allocate cpu buffer\n");
185 return NULL;
186 }
187
188 cbp->buf_base = (char *)&cbp->buf;
189 cbp->buf_ptr = cbp->buf_base;
190 cbp->buf_end = cbp->buf_base + CPU_BUFFER_LEN;
191 return (void *)cbp;
192 }
193
194 void
195 console_cpu_free(void * buf)
196 {
197 assert((uintptr_t)buf > (uintptr_t)console_ring.buffer);
198 assert((uintptr_t)buf < (uintptr_t)console_ring.buffer + KERN_CONSOLE_BUF_SIZE);
199 if (buf != NULL)
200 *(uint32_t *)buf = CPU_BUF_FREE_HEX;
201 }
202
203 static inline int
204 console_ring_space(void)
205 {
206 return console_ring.len - console_ring.used;
207 }
208
209 static boolean_t
210 console_ring_put(char ch)
211 {
212 if (console_ring.used < console_ring.len) {
213 console_ring.used++;
214 *console_ring.write_ptr++ = ch;
215 if (console_ring.write_ptr - console_ring.buffer == console_ring.len)
216 console_ring.write_ptr = console_ring.buffer;
217 return TRUE;
218 } else {
219 return FALSE;
220 }
221 }
222
223 static inline boolean_t
224 cpu_buffer_put(console_buf_t * cbp, char ch)
225 {
226 if (ch != '\0' && cbp->buf_ptr < cbp->buf_end) {
227 *(cbp->buf_ptr++) = ch;
228 return TRUE;
229 } else {
230 return FALSE;
231 }
232 }
233
234 static inline int
235 cpu_buffer_size(console_buf_t * cbp)
236 {
237 return (int)(cbp->buf_ptr - cbp->buf_base);
238 }
239
240 static inline void
241 _cnputs(char * c, int size)
242 {
243 /* The console device output routines are assumed to be
244 * non-reentrant.
245 */
246 mp_disable_preemption();
247 if (!hw_lock_to(&cnputc_lock, LockTimeOut)) {
248 /* If we timed out on the lock, and we're in the debugger,
249 * copy lock data for debugging and break the lock.
250 */
251 hw_lock_data_t _shadow_lock;
252 memcpy(&_shadow_lock, &cnputc_lock, sizeof(cnputc_lock));
253 if (debug_mode) {
254 /* Since hw_lock_to takes a pre-emption count...*/
255 mp_enable_preemption();
256 hw_lock_init(&cnputc_lock);
257 hw_lock_lock(&cnputc_lock);
258 } else {
259 panic("Lock acquire timeout in _cnputs() lock=%p, lock owner thread=0x%lx, current_thread: %p\n", &_shadow_lock,
260 _shadow_lock.lock_data, current_thread());
261 }
262 }
263
264 while (size-- > 0) {
265 cons_ops[cons_ops_index].putc(0, 0, *c);
266 if (*c == '\n')
267 cons_ops[cons_ops_index].putc(0, 0, '\r');
268 c++;
269 }
270
271 hw_lock_unlock(&cnputc_lock);
272 mp_enable_preemption();
273 }
274
275 void
276 cnputc_unbuffered(char c)
277 {
278 _cnputs(&c, 1);
279 }
280
281 void
282 cnputcusr(char c)
283 {
284 boolean_t state;
285
286 /* Spin (with pre-emption enabled) waiting for console_ring_try_empty()
287 * to complete output. There is a small window here where we could
288 * end up with a stale value of console_output, but it's unlikely,
289 * and _cnputs(), which outputs to the console device, is internally
290 * synchronized. There's something of a conflict between the
291 * character-at-a-time (with pre-emption enabled) unbuffered
292 * output model here, and the buffered output from cnputc(),
293 * whose consumers include printf() ( which outputs a sequence
294 * with pre-emption disabled, and should be safe to call with
295 * interrupts off); we don't want to disable pre-emption indefinitely
296 * here, and spinlocks and mutexes are inappropriate.
297 */
298 while (console_output != 0)
299 ;
300
301 /*
302 * We disable interrupts to avoid issues caused by rendevous IPIs
303 * and an interruptible core holding the lock while an uninterruptible
304 * core wants it. Stackshot is the prime example of this.
305 */
306 state = ml_set_interrupts_enabled(FALSE);
307 _cnputs(&c, 1);
308 ml_set_interrupts_enabled(state);
309 }
310
311 static void
312 console_ring_try_empty(void)
313 {
314 #ifdef __x86_64__
315 boolean_t handle_tlb_flushes = (ml_get_interrupts_enabled() == FALSE);
316 #endif /* __x86_64__ */
317
318 int nchars_out = 0;
319 int total_chars_out = 0;
320 int size_before_wrap = 0;
321
322 do {
323 #ifdef __x86_64__
324 if (handle_tlb_flushes)
325 handle_pending_TLB_flushes();
326 #endif /* __x86_64__ */
327
328 /*
329 * Try to get the read lock on the ring buffer to empty it.
330 * If this fails someone else is already emptying...
331 */
332 if (!simple_lock_try(&console_ring.read_lock)) {
333 /*
334 * If multiple cores are spinning trying to empty the buffer,
335 * we may suffer lock starvation (get the read lock, but
336 * never the write lock, with other cores unable to get the
337 * read lock). As a result, insert a delay on failure, to
338 * let other cores have a turn.
339 */
340 delay(1);
341 return;
342 }
343
344 boolean_t state = ml_set_interrupts_enabled(FALSE);
345
346 /* Indicate that we're in the process of writing a block of data to the console. */
347 (void)hw_atomic_add(&console_output, 1);
348
349 simple_lock_try_lock_loop(&console_ring.write_lock);
350
351 /* try small chunk at a time, so we allow writes from other cpus into the buffer */
352 nchars_out = MIN(console_ring.used, MAX_INT_DISABLED_FLUSH_SIZE);
353
354 /* account for data to be read before wrap around */
355 size_before_wrap = (int)((console_ring.buffer + console_ring.len) - console_ring.read_ptr);
356 if (nchars_out > size_before_wrap)
357 nchars_out = size_before_wrap;
358
359 if (nchars_out > 0) {
360 _cnputs(console_ring.read_ptr, nchars_out);
361 console_ring.read_ptr =
362 console_ring.buffer + ((console_ring.read_ptr - console_ring.buffer + nchars_out) % console_ring.len);
363 console_ring.used -= nchars_out;
364 total_chars_out += nchars_out;
365 }
366
367 simple_unlock(&console_ring.write_lock);
368
369 (void)hw_atomic_sub(&console_output, 1);
370
371 simple_unlock(&console_ring.read_lock);
372
373 ml_set_interrupts_enabled(state);
374
375 /*
376 * In case we end up being the console drain thread
377 * for far too long, break out. Except in panic/suspend cases
378 * where we should clear out full buffer.
379 */
380 if (debug_mode == 0 && !console_suspended && (total_chars_out >= MAX_TOTAL_FLUSH_SIZE))
381 break;
382
383 } while (nchars_out > 0);
384 }
385
386
387 void
388 console_suspend()
389 {
390 console_suspended = true;
391 console_ring_try_empty();
392 }
393
394 void
395 console_resume()
396 {
397 console_suspended = false;
398 }
399
400 void
401 console_write(char * str, int size)
402 {
403 console_init();
404 int chunk_size = size;
405 int i = 0;
406
407 if (size > console_ring.len)
408 chunk_size = CPU_CONS_BUF_SIZE;
409
410 while (size > 0) {
411 boolean_t state = ml_set_interrupts_enabled(FALSE);
412
413 simple_lock_try_lock_loop(&console_ring.write_lock);
414 while (chunk_size > console_ring_space()) {
415 simple_unlock(&console_ring.write_lock);
416 ml_set_interrupts_enabled(state);
417
418 console_ring_try_empty();
419
420 state = ml_set_interrupts_enabled(FALSE);
421 simple_lock_try_lock_loop(&console_ring.write_lock);
422 }
423
424 for (i = 0; i < chunk_size; i++)
425 console_ring_put(str[i]);
426
427 str = &str[i];
428 size -= chunk_size;
429 simple_unlock(&console_ring.write_lock);
430 ml_set_interrupts_enabled(state);
431 }
432
433 console_ring_try_empty();
434 }
435
436 void
437 cnputc(char c)
438 {
439 console_buf_t * cbp;
440 cpu_data_t * cpu_data_p;
441 boolean_t state;
442 boolean_t needs_print = TRUE;
443 char * cp;
444
445 restart:
446 mp_disable_preemption();
447 cpu_data_p = current_cpu_datap();
448 cbp = (console_buf_t *)cpu_data_p->cpu_console_buf;
449 if (console_suspended || cbp == NULL) {
450 mp_enable_preemption();
451 /* Put directly if console ring is not initialized or we're heading into suspend */
452 _cnputs(&c, 1);
453 return;
454 }
455
456 #ifndef __x86_64__
457 /* Is there a panic backtrace going on? */
458 if (cpu_data_p->PAB_active) {
459 /* If another processor was in the process of emptying the
460 * console ring buffer when it received the panic backtrace
461 * signal, that processor will be spinning in DebugXCall()
462 * waiting for the panicking processor to finish printing
463 * the backtrace. But panicking processor will never
464 * be able to obtain the ring buffer lock since it is
465 * owned by a processor that's spinning in DebugXCall().
466 * Blow away any locks that other processors may have on
467 * the console ring buffer so that the backtrace can
468 * complete.
469 */
470 console_ring_lock_init();
471 }
472 #endif /* __x86_64__ */
473
474 state = ml_set_interrupts_enabled(FALSE);
475
476 /*
477 * add to stack buf
478 * If the cpu buffer is full, we'll flush, then try
479 * another put. If it fails a second time... screw
480 * it.
481 */
482 if (needs_print && !cpu_buffer_put(cbp, c)) {
483 simple_lock_try_lock_loop(&console_ring.write_lock);
484
485 if (cpu_buffer_size(cbp) > console_ring_space()) {
486 simple_unlock(&console_ring.write_lock);
487 ml_set_interrupts_enabled(state);
488 mp_enable_preemption();
489
490 console_ring_try_empty();
491 goto restart;
492 }
493
494 for (cp = cbp->buf_base; cp < cbp->buf_ptr; cp++)
495 console_ring_put(*cp);
496 cbp->buf_ptr = cbp->buf_base;
497 simple_unlock(&console_ring.write_lock);
498
499 cpu_buffer_put(cbp, c);
500 }
501
502 needs_print = FALSE;
503
504 if (c != '\n') {
505 ml_set_interrupts_enabled(state);
506 mp_enable_preemption();
507 return;
508 }
509
510 /* We printed a newline, time to flush the CPU buffer to the global buffer */
511 simple_lock_try_lock_loop(&console_ring.write_lock);
512
513 /*
514 * Is there enough space in the shared ring buffer?
515 * Try to empty if not.
516 * Note, we want the entire local buffer to fit to
517 * avoid another cpu interjecting.
518 */
519
520 if (cpu_buffer_size(cbp) > console_ring_space()) {
521 simple_unlock(&console_ring.write_lock);
522 ml_set_interrupts_enabled(state);
523 mp_enable_preemption();
524
525 console_ring_try_empty();
526
527 goto restart;
528 }
529
530 for (cp = cbp->buf_base; cp < cbp->buf_ptr; cp++)
531 console_ring_put(*cp);
532
533 cbp->buf_ptr = cbp->buf_base;
534 simple_unlock(&console_ring.write_lock);
535 ml_set_interrupts_enabled(state);
536 mp_enable_preemption();
537
538 console_ring_try_empty();
539
540 return;
541 }
542
543 int
544 _serial_getc(__unused int a, __unused int b, boolean_t wait, __unused boolean_t raw)
545 {
546 int c;
547 do {
548 c = serial_getc();
549 } while (wait && c < 0);
550
551
552 return c;
553 }
554
555 static void
556 _serial_putc(__unused int a, __unused int b, int c)
557 {
558 serial_putc(c);
559 }
560
561 int
562 cngetc(void)
563 {
564 return cons_ops[cons_ops_index].getc(0, 0, TRUE, FALSE);
565 }
566
567 int
568 cnmaygetc(void)
569 {
570 return cons_ops[cons_ops_index].getc(0, 0, FALSE, FALSE);
571 }
572
573 int
574 vcgetc(__unused int l, __unused int u, __unused boolean_t wait, __unused boolean_t raw)
575 {
576 char c;
577
578 if (0 == (*PE_poll_input)(0, &c))
579 return c;
580 else
581 return 0;
582 }
583
584 /* So we can re-write the serial device functions at boot-time */
585 void
586 console_set_serial_ops(struct console_ops * newops)
587 {
588 cons_ops[SERIAL_CONS_OPS] = *newops;
589 }
590