]> git.saurik.com Git - apple/xnu.git/blob - osfmk/console/serial_console.c
xnu-6153.101.6.tar.gz
[apple/xnu.git] / osfmk / console / serial_console.c
1 /*
2 * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #ifdef __x86_64__
30 #include <i386/mp.h>
31 #include <i386/cpu_data.h>
32 #include <i386/bit_routines.h>
33 #include <i386/machine_cpu.h>
34 #include <i386/machine_routines.h>
35 #include <i386/misc_protos.h>
36 #include <i386/serial_io.h>
37 #endif /* __x86_64__ */
38
39 #include <libkern/OSAtomic.h>
40 #include <vm/vm_kern.h>
41 #include <vm/vm_map.h>
42 #include <console/video_console.h>
43 #include <console/serial_protos.h>
44 #include <kern/kalloc.h>
45 #include <kern/thread.h>
46 #include <kern/cpu_data.h>
47 #include <libkern/section_keywords.h>
48
49 #if __arm__ || __arm64__
50 #include <machine/machine_routines.h>
51 #include <arm/cpu_data_internal.h>
52 #endif
53
54 #ifdef CONFIG_XNUPOST
55 #include <tests/xnupost.h>
56 kern_return_t console_serial_test(void);
57 kern_return_t console_serial_alloc_rel_tests(void);
58 kern_return_t console_serial_parallel_log_tests(void);
59 #define MAX_CPU_SLOTS (MAX_CPUS + 2)
60 #endif
61
62 #ifndef MAX_CPU_SLOTS
63 #define MAX_CPU_SLOTS (MAX_CPUS)
64 #endif
65
66 static struct {
67 char * buffer;
68 int len;
69 int used;
70 char * write_ptr;
71 char * read_ptr;
72 decl_simple_lock_data(, read_lock);
73 decl_simple_lock_data(, write_lock);
74 } console_ring;
75
76 hw_lock_data_t cnputc_lock;
77 static volatile uint32_t console_output = 0;
78
79 /*
80 * New allocation mechanism for console buffers
81 * Total allocation: 1 * PAGE_SIZE
82 * - Each cpu gets CPU_CONS_BUF_SIZE buffer
83 * - Kernel wide console ring gets PAGE_SIZE - MAX_CPU_SLOTS * CPU_CONS_BUF_SIZE
84 *
85 * At the return from console_init() the memory is setup as follows:
86 * +----------------------------+-------------+-------------+-------------+-------------+
87 * |console ring buffer---------|f2eec075-----|f2eec075-----|f2eec075-----|f2eec075-----|
88 * +----------------------------+-------------+-------------+-------------+-------------+
89 * Each cpu allocation will find the first (f2eec075) and use that buffer.
90 *
91 */
92
93 #define CPU_CONS_BUF_SIZE 256
94 #define CPU_BUF_FREE_HEX 0xf2eec075
95
96 #define KERN_CONSOLE_BUF_SIZE vm_map_round_page(CPU_CONS_BUF_SIZE *(MAX_CPU_SLOTS + 1), PAGE_SIZE - 1)
97 #define KERN_CONSOLE_RING_SIZE (KERN_CONSOLE_BUF_SIZE - (CPU_CONS_BUF_SIZE * MAX_CPU_SLOTS))
98
99 /*
100 * A serial line running at 115200 bps can output ~11.5 characters per millisecond.
101 * Synchronous serial logging with preemption+interrupts disabled fundamentally prevents us
102 * from hitting expected scheduling deadlines, but we can at least tone it down a bit.
103 *
104 * TODO: IOLog should use asynchronous serial logging instead of the synchronous serial console. (26555148)
105 *
106 * Keep interrupt disabled periods shorter than 1ms
107 */
108 #define MAX_INT_DISABLED_FLUSH_SIZE 8
109 #define MAX_TOTAL_FLUSH_SIZE (MAX(2, MAX_CPU_SLOTS) * CPU_CONS_BUF_SIZE)
110
111 typedef struct console_buf {
112 char * buf_base;
113 char * buf_end;
114 char * buf_ptr;
115 #define CPU_BUFFER_LEN (CPU_CONS_BUF_SIZE - 3 * (sizeof(char *)))
116 char buf[CPU_BUFFER_LEN];
117 } console_buf_t;
118
119 extern int serial_getc(void);
120 extern void serial_putc(char);
121
122 static void _serial_putc(int, int, int);
123
124 SECURITY_READ_ONLY_EARLY(struct console_ops) cons_ops[] = {
125 {
126 .putc = _serial_putc, .getc = _serial_getc,
127 },
128 {
129 .putc = vcputc, .getc = vcgetc,
130 },
131 };
132
133 SECURITY_READ_ONLY_EARLY(uint32_t) nconsops = (sizeof cons_ops / sizeof cons_ops[0]);
134
135 uint32_t cons_ops_index = VC_CONS_OPS;
136
137 #if defined(__x86_64__) || defined(__arm__)
138 // NMI static variables
139 #define NMI_STRING_SIZE 32
140 char nmi_string[NMI_STRING_SIZE] = "afDIGHr84A84jh19Kphgp428DNPdnapq";
141 static int nmi_counter = 0;
142 #endif /* __arm__ */
143
144 static bool console_suspended = false;
145
146 /* Wrapper for ml_set_interrupts_enabled */
147 static void
148 console_restore_interrupts_state(boolean_t state)
149 {
150 #if INTERRUPT_MASKED_DEBUG
151 /*
152 * Serial console holds interrupts disabled for far too long
153 * and would trip the spin-debugger. If we are about to reenable
154 * interrupts then clear the timer and avoid panicking on the delay.
155 * Otherwise, let the code that printed with interrupt disabled
156 * take the panic when it reenables interrupts.
157 * Hopefully one day this is fixed so that this workaround is unnecessary.
158 */
159 if (state == TRUE) {
160 ml_spin_debug_clear_self();
161 }
162 #endif /* INTERRUPT_MASKED_DEBUG */
163 ml_set_interrupts_enabled(state);
164 }
165
166 static void
167 console_ring_lock_init(void)
168 {
169 simple_lock_init(&console_ring.read_lock, 0);
170 simple_lock_init(&console_ring.write_lock, 0);
171 }
172
173 void
174 console_init(void)
175 {
176 int ret, i;
177 uint32_t * p;
178
179 if (!OSCompareAndSwap(0, KERN_CONSOLE_RING_SIZE, (UInt32 *)&console_ring.len)) {
180 return;
181 }
182
183 assert(console_ring.len > 0);
184
185 ret = kmem_alloc(kernel_map, (vm_offset_t *)&console_ring.buffer, KERN_CONSOLE_BUF_SIZE, VM_KERN_MEMORY_OSFMK);
186 if (ret != KERN_SUCCESS) {
187 panic("console_ring_init() failed to allocate ring buffer, error %d\n", ret);
188 }
189
190 /* setup memory for per cpu console buffers */
191 for (i = 0; i < MAX_CPU_SLOTS; i++) {
192 p = (uint32_t *)((uintptr_t)console_ring.buffer + console_ring.len + (i * sizeof(console_buf_t)));
193 *p = CPU_BUF_FREE_HEX;
194 }
195
196 console_ring.used = 0;
197 console_ring.read_ptr = console_ring.buffer;
198 console_ring.write_ptr = console_ring.buffer;
199 console_ring_lock_init();
200 hw_lock_init(&cnputc_lock);
201 }
202
203 void *
204 console_cpu_alloc(__unused boolean_t boot_processor)
205 {
206 console_buf_t * cbp;
207 int i;
208 uint32_t * p = NULL;
209
210 console_init();
211 assert(console_ring.buffer != NULL);
212
213 /* select the next slot from the per cpu buffers at end of console_ring.buffer */
214 for (i = 0; i < MAX_CPU_SLOTS; i++) {
215 p = (uint32_t *)((uintptr_t)console_ring.buffer + console_ring.len + (i * sizeof(console_buf_t)));
216 if (OSCompareAndSwap(CPU_BUF_FREE_HEX, 0, (UInt32 *)p)) {
217 break;
218 }
219 }
220 assert(i < MAX_CPU_SLOTS);
221
222 cbp = (console_buf_t *)(uintptr_t)p;
223 if ((uintptr_t)cbp >= (uintptr_t)console_ring.buffer + KERN_CONSOLE_BUF_SIZE) {
224 printf("console_cpu_alloc() failed to allocate cpu buffer\n");
225 return NULL;
226 }
227
228 cbp->buf_base = (char *)&cbp->buf;
229 cbp->buf_ptr = cbp->buf_base;
230 cbp->buf_end = cbp->buf_base + CPU_BUFFER_LEN;
231 return (void *)cbp;
232 }
233
234 void
235 console_cpu_free(void * buf)
236 {
237 assert((uintptr_t)buf > (uintptr_t)console_ring.buffer);
238 assert((uintptr_t)buf < (uintptr_t)console_ring.buffer + KERN_CONSOLE_BUF_SIZE);
239 if (buf != NULL) {
240 *(uint32_t *)buf = CPU_BUF_FREE_HEX;
241 }
242 }
243
244 static inline int
245 console_ring_space(void)
246 {
247 return console_ring.len - console_ring.used;
248 }
249
250 static boolean_t
251 console_ring_put(char ch)
252 {
253 if (console_ring.used < console_ring.len) {
254 console_ring.used++;
255 *console_ring.write_ptr++ = ch;
256 if (console_ring.write_ptr - console_ring.buffer == console_ring.len) {
257 console_ring.write_ptr = console_ring.buffer;
258 }
259 return TRUE;
260 } else {
261 return FALSE;
262 }
263 }
264
265 static inline boolean_t
266 cpu_buffer_put(console_buf_t * cbp, char ch)
267 {
268 if (ch != '\0' && cbp->buf_ptr < cbp->buf_end) {
269 *(cbp->buf_ptr++) = ch;
270 return TRUE;
271 } else {
272 return FALSE;
273 }
274 }
275
276 static inline int
277 cpu_buffer_size(console_buf_t * cbp)
278 {
279 return (int)(cbp->buf_ptr - cbp->buf_base);
280 }
281
282 static inline void
283 _cnputs(char * c, int size)
284 {
285 /* The console device output routines are assumed to be
286 * non-reentrant.
287 */
288 #ifdef __x86_64__
289 uint32_t lock_timeout_ticks = UINT32_MAX;
290 #else
291 uint32_t lock_timeout_ticks = LockTimeOut * 2; // 250ms is not enough, 500 is just right
292 #endif
293
294 mp_disable_preemption();
295 if (!hw_lock_to(&cnputc_lock, lock_timeout_ticks, LCK_GRP_NULL)) {
296 /* If we timed out on the lock, and we're in the debugger,
297 * copy lock data for debugging and break the lock.
298 */
299 hw_lock_data_t _shadow_lock;
300 memcpy(&_shadow_lock, &cnputc_lock, sizeof(cnputc_lock));
301 if (kernel_debugger_entry_count) {
302 /* Since hw_lock_to takes a pre-emption count...*/
303 mp_enable_preemption();
304 hw_lock_init(&cnputc_lock);
305 hw_lock_lock(&cnputc_lock, LCK_GRP_NULL);
306 } else {
307 panic("Lock acquire timeout in _cnputs() lock=%p, lock owner thread=0x%lx, current_thread: %p\n", &_shadow_lock,
308 _shadow_lock.lock_data, current_thread());
309 }
310 }
311
312 while (size-- > 0) {
313 if (*c == '\n') {
314 cons_ops[cons_ops_index].putc(0, 0, '\r');
315 }
316 cons_ops[cons_ops_index].putc(0, 0, *c);
317 c++;
318 }
319
320 hw_lock_unlock(&cnputc_lock);
321 mp_enable_preemption();
322 }
323
324 void
325 cnputc_unbuffered(char c)
326 {
327 _cnputs(&c, 1);
328 }
329
330
331 void
332 cnputcusr(char c)
333 {
334 cnputsusr(&c, 1);
335 }
336
337 void
338 cnputsusr(char *s, int size)
339 {
340 if (size > 1) {
341 console_write(s, size);
342 return;
343 }
344
345 boolean_t state;
346
347 /* Spin (with pre-emption enabled) waiting for console_ring_try_empty()
348 * to complete output. There is a small window here where we could
349 * end up with a stale value of console_output, but it's unlikely,
350 * and _cnputs(), which outputs to the console device, is internally
351 * synchronized. There's something of a conflict between the
352 * character-at-a-time (with pre-emption enabled) unbuffered
353 * output model here, and the buffered output from cnputc(),
354 * whose consumers include printf() ( which outputs a sequence
355 * with pre-emption disabled, and should be safe to call with
356 * interrupts off); we don't want to disable pre-emption indefinitely
357 * here, and spinlocks and mutexes are inappropriate.
358 */
359 while (console_output != 0) {
360 delay(1);
361 }
362
363 /*
364 * We disable interrupts to avoid issues caused by rendevous IPIs
365 * and an interruptible core holding the lock while an uninterruptible
366 * core wants it. Stackshot is the prime example of this.
367 */
368 state = ml_set_interrupts_enabled(FALSE);
369 _cnputs(s, 1);
370 console_restore_interrupts_state(state);
371 }
372
373 static void
374 console_ring_try_empty(void)
375 {
376 #ifdef __x86_64__
377 boolean_t handle_tlb_flushes = (ml_get_interrupts_enabled() == FALSE);
378 #endif /* __x86_64__ */
379
380 int nchars_out = 0;
381 int total_chars_out = 0;
382 int size_before_wrap = 0;
383
384 do {
385 #ifdef __x86_64__
386 if (handle_tlb_flushes) {
387 handle_pending_TLB_flushes();
388 }
389 #endif /* __x86_64__ */
390
391 /*
392 * Try to get the read lock on the ring buffer to empty it.
393 * If this fails someone else is already emptying...
394 */
395 if (!simple_lock_try(&console_ring.read_lock, LCK_GRP_NULL)) {
396 /*
397 * If multiple cores are spinning trying to empty the buffer,
398 * we may suffer lock starvation (get the read lock, but
399 * never the write lock, with other cores unable to get the
400 * read lock). As a result, insert a delay on failure, to
401 * let other cores have a turn.
402 */
403 delay(1);
404 return;
405 }
406
407 boolean_t state = ml_set_interrupts_enabled(FALSE);
408
409 /* Indicate that we're in the process of writing a block of data to the console. */
410 os_atomic_inc(&console_output, relaxed);
411
412 simple_lock_try_lock_loop(&console_ring.write_lock, LCK_GRP_NULL);
413
414 /* try small chunk at a time, so we allow writes from other cpus into the buffer */
415 nchars_out = MIN(console_ring.used, MAX_INT_DISABLED_FLUSH_SIZE);
416
417 /* account for data to be read before wrap around */
418 size_before_wrap = (int)((console_ring.buffer + console_ring.len) - console_ring.read_ptr);
419 if (nchars_out > size_before_wrap) {
420 nchars_out = size_before_wrap;
421 }
422
423 if (nchars_out > 0) {
424 _cnputs(console_ring.read_ptr, nchars_out);
425 console_ring.read_ptr =
426 console_ring.buffer + ((console_ring.read_ptr - console_ring.buffer + nchars_out) % console_ring.len);
427 console_ring.used -= nchars_out;
428 total_chars_out += nchars_out;
429 }
430
431 simple_unlock(&console_ring.write_lock);
432
433 os_atomic_dec(&console_output, relaxed);
434
435 simple_unlock(&console_ring.read_lock);
436
437 console_restore_interrupts_state(state);
438
439 /*
440 * In case we end up being the console drain thread
441 * for far too long, break out. Except in panic/suspend cases
442 * where we should clear out full buffer.
443 */
444 if (!kernel_debugger_entry_count && !console_suspended && (total_chars_out >= MAX_TOTAL_FLUSH_SIZE)) {
445 break;
446 }
447 } while (nchars_out > 0);
448 }
449
450
451 void
452 console_suspend()
453 {
454 console_suspended = true;
455 console_ring_try_empty();
456 }
457
458 void
459 console_resume()
460 {
461 console_suspended = false;
462 }
463
464 void
465 console_write(char * str, int size)
466 {
467 console_init();
468 int chunk_size = size;
469 int i = 0;
470
471 if (size > console_ring.len) {
472 chunk_size = CPU_CONS_BUF_SIZE;
473 }
474
475 while (size > 0) {
476 boolean_t state = ml_set_interrupts_enabled(FALSE);
477
478 simple_lock_try_lock_loop(&console_ring.write_lock, LCK_GRP_NULL);
479 while (chunk_size > console_ring_space()) {
480 simple_unlock(&console_ring.write_lock);
481 console_restore_interrupts_state(state);
482
483 console_ring_try_empty();
484
485 state = ml_set_interrupts_enabled(FALSE);
486 simple_lock_try_lock_loop(&console_ring.write_lock, LCK_GRP_NULL);
487 }
488
489 for (i = 0; i < chunk_size; i++) {
490 console_ring_put(str[i]);
491 }
492
493 str = &str[i];
494 size -= chunk_size;
495 simple_unlock(&console_ring.write_lock);
496 console_restore_interrupts_state(state);
497 }
498
499 console_ring_try_empty();
500 }
501
502 void
503 cnputc(char c)
504 {
505 console_buf_t * cbp;
506 cpu_data_t * cpu_data_p;
507 boolean_t state;
508 boolean_t needs_print = TRUE;
509 char * cp;
510
511 restart:
512 mp_disable_preemption();
513 cpu_data_p = current_cpu_datap();
514 cbp = (console_buf_t *)cpu_data_p->cpu_console_buf;
515 if (console_suspended || cbp == NULL) {
516 mp_enable_preemption();
517 /* Put directly if console ring is not initialized or we're heading into suspend */
518 _cnputs(&c, 1);
519 return;
520 }
521
522 #ifndef __x86_64__
523 /* Is there a panic backtrace going on? */
524 if (cpu_data_p->PAB_active) {
525 /* If another processor was in the process of emptying the
526 * console ring buffer when it received the panic backtrace
527 * signal, that processor will be spinning in DebugXCall()
528 * waiting for the panicking processor to finish printing
529 * the backtrace. But panicking processor will never
530 * be able to obtain the ring buffer lock since it is
531 * owned by a processor that's spinning in DebugXCall().
532 * Blow away any locks that other processors may have on
533 * the console ring buffer so that the backtrace can
534 * complete.
535 */
536 console_ring_lock_init();
537 }
538 #endif /* __x86_64__ */
539
540 state = ml_set_interrupts_enabled(FALSE);
541
542 /*
543 * add to stack buf
544 * If the cpu buffer is full, we'll flush, then try
545 * another put. If it fails a second time... screw
546 * it.
547 */
548 if (needs_print && !cpu_buffer_put(cbp, c)) {
549 simple_lock_try_lock_loop(&console_ring.write_lock, LCK_GRP_NULL);
550
551 if (cpu_buffer_size(cbp) > console_ring_space()) {
552 simple_unlock(&console_ring.write_lock);
553 console_restore_interrupts_state(state);
554 mp_enable_preemption();
555
556 console_ring_try_empty();
557 goto restart;
558 }
559
560 for (cp = cbp->buf_base; cp < cbp->buf_ptr; cp++) {
561 console_ring_put(*cp);
562 }
563 cbp->buf_ptr = cbp->buf_base;
564 simple_unlock(&console_ring.write_lock);
565
566 cpu_buffer_put(cbp, c);
567 }
568
569 needs_print = FALSE;
570
571 if (c != '\n') {
572 console_restore_interrupts_state(state);
573 mp_enable_preemption();
574 return;
575 }
576
577 /* We printed a newline, time to flush the CPU buffer to the global buffer */
578 simple_lock_try_lock_loop(&console_ring.write_lock, LCK_GRP_NULL);
579
580 /*
581 * Is there enough space in the shared ring buffer?
582 * Try to empty if not.
583 * Note, we want the entire local buffer to fit to
584 * avoid another cpu interjecting.
585 */
586
587 if (cpu_buffer_size(cbp) > console_ring_space()) {
588 simple_unlock(&console_ring.write_lock);
589 console_restore_interrupts_state(state);
590 mp_enable_preemption();
591
592 console_ring_try_empty();
593
594 goto restart;
595 }
596
597 for (cp = cbp->buf_base; cp < cbp->buf_ptr; cp++) {
598 console_ring_put(*cp);
599 }
600
601 cbp->buf_ptr = cbp->buf_base;
602 simple_unlock(&console_ring.write_lock);
603
604 console_restore_interrupts_state(state);
605 mp_enable_preemption();
606
607 console_ring_try_empty();
608
609 return;
610 }
611
612 int
613 _serial_getc(__unused int a, __unused int b, boolean_t wait, __unused boolean_t raw)
614 {
615 int c;
616 do {
617 c = serial_getc();
618 } while (wait && c < 0);
619
620 #if defined(__x86_64__) || defined(__arm__)
621 // Check for the NMI string
622 if (c == nmi_string[nmi_counter]) {
623 nmi_counter++;
624 if (nmi_counter == NMI_STRING_SIZE) {
625 // We've got the NMI string, now do an NMI
626 Debugger("Automatic NMI");
627 nmi_counter = 0;
628 return '\n';
629 }
630 } else if (c != -1) {
631 nmi_counter = 0;
632 }
633 #endif
634
635 return c;
636 }
637
638 static void
639 _serial_putc(__unused int a, __unused int b, int c)
640 {
641 serial_putc(c);
642 }
643
644 int
645 cngetc(void)
646 {
647 return cons_ops[cons_ops_index].getc(0, 0, TRUE, FALSE);
648 }
649
650 int
651 cnmaygetc(void)
652 {
653 return cons_ops[cons_ops_index].getc(0, 0, FALSE, FALSE);
654 }
655
656 int
657 vcgetc(__unused int l, __unused int u, __unused boolean_t wait, __unused boolean_t raw)
658 {
659 char c;
660
661 if (0 == PE_stub_poll_input(0, &c)) {
662 return c;
663 } else {
664 return 0;
665 }
666 }
667
668 #ifdef CONFIG_XNUPOST
669 static uint32_t cons_test_ops_count = 0;
670
671 /*
672 * Try to do multiple cpu buffer allocs and free and intentionally
673 * allow for pre-emption.
674 */
675 static void
676 alloc_free_func(void * arg, wait_result_t wres __unused)
677 {
678 console_buf_t * cbp = NULL;
679 int count = (int)arg;
680
681 T_LOG("Doing %d iterations of console cpu alloc and free.", count);
682
683 while (count-- > 0) {
684 os_atomic_inc(&cons_test_ops_count, relaxed);
685 cbp = (console_buf_t *)console_cpu_alloc(0);
686 if (cbp == NULL) {
687 T_ASSERT_NOTNULL(cbp, "cpu allocation failed");
688 }
689 console_cpu_free(cbp);
690 cbp = NULL;
691 /* give chance to another thread to come in */
692 delay(10);
693 }
694 }
695
696 /*
697 * Log to console by multiple methods - printf, unbuffered write, console_write()
698 */
699 static void
700 log_to_console_func(void * arg __unused, wait_result_t wres __unused)
701 {
702 uint64_t thread_id = current_thread()->thread_id;
703 char somedata[10] = "123456789";
704 for (int i = 0; i < 26; i++) {
705 os_atomic_inc(&cons_test_ops_count, relaxed);
706 printf(" thid: %llu printf iteration %d\n", thread_id, i);
707 cnputc_unbuffered((char)('A' + i));
708 cnputc_unbuffered('\n');
709 console_write((char *)somedata, sizeof(somedata));
710 delay(10);
711 }
712 printf("finished the log_to_console_func operations\n\n");
713 }
714
715 kern_return_t
716 console_serial_parallel_log_tests(void)
717 {
718 thread_t thread;
719 kern_return_t kr;
720 cons_test_ops_count = 0;
721
722 kr = kernel_thread_start(log_to_console_func, NULL, &thread);
723 T_ASSERT_EQ_INT(kr, KERN_SUCCESS, "kernel_thread_start returned successfully");
724
725 delay(100);
726
727 log_to_console_func(NULL, 0);
728
729 /* wait until other thread has also finished */
730 while (cons_test_ops_count < 52) {
731 delay(1000);
732 }
733
734 thread_deallocate(thread);
735 T_LOG("parallel_logging tests is now complete. From this point forward we expect full lines\n");
736 return KERN_SUCCESS;
737 }
738
739 kern_return_t
740 console_serial_alloc_rel_tests(void)
741 {
742 unsigned long i, free_buf_count = 0;
743 uint32_t * p;
744 console_buf_t * cbp;
745 thread_t thread;
746 kern_return_t kr;
747
748 T_LOG("doing alloc/release tests");
749
750 for (i = 0; i < MAX_CPU_SLOTS; i++) {
751 p = (uint32_t *)((uintptr_t)console_ring.buffer + console_ring.len + (i * sizeof(console_buf_t)));
752 cbp = (console_buf_t *)(void *)p;
753 /* p should either be allocated cpu buffer or have CPU_BUF_FREE_HEX in it */
754 T_ASSERT(*p == CPU_BUF_FREE_HEX || cbp->buf_base == &cbp->buf[0], "");
755 if (*p == CPU_BUF_FREE_HEX) {
756 free_buf_count++;
757 }
758 }
759
760 T_ASSERT_GE_ULONG(free_buf_count, 2, "At least 2 buffers should be free");
761 cons_test_ops_count = 0;
762
763 kr = kernel_thread_start(alloc_free_func, (void *)1000, &thread);
764 T_ASSERT_EQ_INT(kr, KERN_SUCCESS, "kernel_thread_start returned successfully");
765
766 /* yeild cpu to give other thread chance to get on-core */
767 delay(100);
768
769 alloc_free_func((void *)1000, 0);
770
771 /* wait until other thread finishes its tasks */
772 while (cons_test_ops_count < 2000) {
773 delay(1000);
774 }
775
776 thread_deallocate(thread);
777 /* verify again that atleast 2 slots are free */
778 free_buf_count = 0;
779 for (i = 0; i < MAX_CPU_SLOTS; i++) {
780 p = (uint32_t *)((uintptr_t)console_ring.buffer + console_ring.len + (i * sizeof(console_buf_t)));
781 cbp = (console_buf_t *)(void *)p;
782 /* p should either be allocated cpu buffer or have CPU_BUF_FREE_HEX in it */
783 T_ASSERT(*p == CPU_BUF_FREE_HEX || cbp->buf_base == &cbp->buf[0], "");
784 if (*p == CPU_BUF_FREE_HEX) {
785 free_buf_count++;
786 }
787 }
788 T_ASSERT_GE_ULONG(free_buf_count, 2, "At least 2 buffers should be free after alloc free tests");
789
790 return KERN_SUCCESS;
791 }
792
793 kern_return_t
794 console_serial_test(void)
795 {
796 unsigned long i;
797 char buffer[CPU_BUFFER_LEN];
798 uint32_t * p;
799 console_buf_t * cbp;
800
801 T_LOG("Checking console_ring status.");
802 T_ASSERT_EQ_INT(console_ring.len, KERN_CONSOLE_RING_SIZE, "Console ring size is not correct.");
803 T_ASSERT_GT_INT(KERN_CONSOLE_BUF_SIZE, KERN_CONSOLE_RING_SIZE, "kernel console buffer size is < allocation.");
804
805 /* select the next slot from the per cpu buffers at end of console_ring.buffer */
806 for (i = 0; i < MAX_CPU_SLOTS; i++) {
807 p = (uint32_t *)((uintptr_t)console_ring.buffer + console_ring.len + (i * sizeof(console_buf_t)));
808 cbp = (console_buf_t *)(void *)p;
809 /* p should either be allocated cpu buffer or have CPU_BUF_FREE_HEX in it */
810 T_ASSERT(*p == CPU_BUF_FREE_HEX || cbp->buf_base == &cbp->buf[0], "verified initialization of cpu buffers p=%p", (void *)p);
811 }
812
813 /* setup buffer to be chars */
814 for (i = 0; i < CPU_BUFFER_LEN; i++) {
815 buffer[i] = (char)('0' + (i % 10));
816 }
817 buffer[CPU_BUFFER_LEN - 1] = '\0';
818
819 T_LOG("Printing %d char string to serial one char at a time.", CPU_BUFFER_LEN);
820 for (i = 0; i < CPU_BUFFER_LEN; i++) {
821 printf("%c", buffer[i]);
822 }
823 printf("End\n");
824 T_LOG("Printing %d char string to serial as a whole", CPU_BUFFER_LEN);
825 printf("%s\n", buffer);
826
827 T_LOG("Using console_write call repeatedly for 100 iterations");
828 for (i = 0; i < 100; i++) {
829 console_write(&buffer[0], 14);
830 if ((i % 6) == 0) {
831 printf("\n");
832 }
833 }
834 printf("\n");
835
836 T_LOG("Using T_LOG to print buffer %s", buffer);
837 return KERN_SUCCESS;
838 }
839 #endif