]> git.saurik.com Git - apple/xnu.git/blob - osfmk/console/serial_console.c
xnu-7195.101.1.tar.gz
[apple/xnu.git] / osfmk / console / serial_console.c
1 /*
2 * Copyright (c) 2000-2019 Apple, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #ifdef __x86_64__
30 #include <i386/mp.h>
31 #include <i386/cpu_data.h>
32 #include <i386/bit_routines.h>
33 #include <i386/machine_routines.h>
34 #include <i386/misc_protos.h>
35 #include <i386/serial_io.h>
36 #endif /* __x86_64__ */
37
38 #include <machine/machine_cpu.h>
39 #include <libkern/OSAtomic.h>
40 #include <vm/vm_kern.h>
41 #include <vm/vm_map.h>
42 #include <console/video_console.h>
43 #include <console/serial_protos.h>
44 #include <kern/startup.h>
45 #include <kern/thread.h>
46 #include <kern/cpu_data.h>
47 #include <libkern/section_keywords.h>
48
49 #if __arm__ || __arm64__
50 #include <machine/machine_routines.h>
51 #include <arm/cpu_data_internal.h>
52 #endif
53
54 #ifdef CONFIG_XNUPOST
55 #include <tests/xnupost.h>
56 kern_return_t console_serial_test(void);
57 kern_return_t console_serial_alloc_rel_tests(void);
58 kern_return_t console_serial_parallel_log_tests(void);
59 #define MAX_CPU_SLOTS (MAX_CPUS + 2)
60 #endif
61
62 #ifndef MAX_CPU_SLOTS
63 #define MAX_CPU_SLOTS (MAX_CPUS)
64 #endif
65
66 static struct {
67 char * buffer;
68 int len;
69 int used;
70 int nreserved;
71 char * write_ptr;
72 char * read_ptr;
73 decl_simple_lock_data(, read_lock);
74 lck_ticket_t write_lock;
75 } console_ring;
76
77 /*
78 * New allocation mechanism for console buffers
79 * Total allocation: 1 * PAGE_SIZE
80 * - Each cpu gets CPU_CONS_BUF_SIZE buffer
81 * - Kernel wide console ring gets PAGE_SIZE - MAX_CPU_SLOTS * CPU_CONS_BUF_SIZE
82 *
83 * At the return from console_init() the memory is setup as follows:
84 * +----------------------------+-------------+-------------+-------------+-------------+
85 * |console ring buffer---------|f2eec075-----|f2eec075-----|f2eec075-----|f2eec075-----|
86 * +----------------------------+-------------+-------------+-------------+-------------+
87 * Each cpu allocation will find the first (f2eec075) and use that buffer.
88 *
89 */
90
91 #define CPU_CONS_BUF_SIZE 256
92 #define CPU_BUF_FREE_HEX 0xf2eec075
93
94 #define KERN_CONSOLE_BUF_SIZE vm_map_round_page(CPU_CONS_BUF_SIZE *(MAX_CPU_SLOTS + 1), PAGE_SIZE - 1)
95 #define KERN_CONSOLE_RING_SIZE (KERN_CONSOLE_BUF_SIZE - (CPU_CONS_BUF_SIZE * MAX_CPU_SLOTS))
96
97 /*
98 * A serial line running at 115200 bps can output ~11.5 characters per millisecond.
99 * Synchronous serial logging with preemption disabled fundamentally prevents us
100 * from hitting expected scheduling deadlines, but we can at least tone it down a bit.
101 *
102 * TODO: IOLog should use asynchronous serial logging instead of the synchronous serial console. (26555148)
103 *
104 * Keep interrupt disabled periods shorter than 1ms
105 */
106 #define MAX_NO_PREEMPT_FLUSH_SIZE 8
107 #define MAX_TOTAL_FLUSH_SIZE (MAX(2, MAX_CPU_SLOTS) * CPU_CONS_BUF_SIZE)
108
109 typedef struct console_buf {
110 char * buf_base;
111 char * buf_end;
112 char * buf_ptr;
113 #define CPU_BUFFER_LEN (CPU_CONS_BUF_SIZE - 3 * (sizeof(char *)))
114 char buf[CPU_BUFFER_LEN];
115 } console_buf_t;
116
117 extern int serial_getc(void);
118 extern void serial_putc(char);
119
120 #if DEBUG || DEVELOPMENT
121 TUNABLE(bool, allow_printf_from_interrupts_disabled_context, "nointr_consio", false);
122 #else
123 #define allow_printf_from_interrupts_disabled_context false
124 #endif
125
126 static void _serial_putc(int, int, int);
127
128 SECURITY_READ_ONLY_EARLY(struct console_ops) cons_ops[] = {
129 {
130 .putc = _serial_putc, .getc = _serial_getc,
131 },
132 {
133 .putc = vcputc, .getc = vcgetc,
134 },
135 };
136
137 SECURITY_READ_ONLY_EARLY(uint32_t) nconsops = (sizeof cons_ops / sizeof cons_ops[0]);
138
139 #if __x86_64__
140 uint32_t cons_ops_index = VC_CONS_OPS;
141 #else
142 SECURITY_READ_ONLY_LATE(uint32_t) cons_ops_index = VC_CONS_OPS;
143 #endif
144
145 LCK_GRP_DECLARE(console_lck_grp, "console");
146
147 // NMI static variables
148 #define NMI_STRING_SIZE 32
149 char nmi_string[NMI_STRING_SIZE] = "afDIGHr84A84jh19Kphgp428DNPdnapq";
150 static int nmi_counter = 0;
151
152 static bool console_suspended = false;
153
154 static inline bool
155 console_io_allowed(void)
156 {
157 if (!allow_printf_from_interrupts_disabled_context &&
158 !console_suspended &&
159 startup_phase >= STARTUP_SUB_EARLY_BOOT &&
160 !ml_get_interrupts_enabled()) {
161 #if defined(__arm__) || defined(__arm64__) || DEBUG || DEVELOPMENT
162 panic("Console I/O from interrupt-disabled context");
163 #else
164 return false;
165 #endif
166 }
167
168 return true;
169 }
170
171 static void
172 console_ring_lock_init(void)
173 {
174 simple_lock_init(&console_ring.read_lock, 0);
175 lck_ticket_init(&console_ring.write_lock, &console_lck_grp);
176 }
177
178 void
179 console_init(void)
180 {
181 int ret, i;
182 uint32_t * p;
183
184 if (!OSCompareAndSwap(0, KERN_CONSOLE_RING_SIZE, (UInt32 *)&console_ring.len)) {
185 return;
186 }
187
188 assert(console_ring.len > 0);
189
190 ret = kmem_alloc_flags(kernel_map, (vm_offset_t *)&console_ring.buffer,
191 KERN_CONSOLE_BUF_SIZE + 2 * PAGE_SIZE, VM_KERN_MEMORY_OSFMK,
192 KMA_KOBJECT | KMA_PERMANENT | KMA_GUARD_FIRST | KMA_GUARD_LAST);
193 if (ret != KERN_SUCCESS) {
194 panic("console_ring_init() failed to allocate ring buffer, error %d\n", ret);
195 }
196
197 console_ring.buffer += PAGE_SIZE;
198
199 /* setup memory for per cpu console buffers */
200 for (i = 0; i < MAX_CPU_SLOTS; i++) {
201 p = (uint32_t *)((uintptr_t)console_ring.buffer + console_ring.len + (i * sizeof(console_buf_t)));
202 *p = CPU_BUF_FREE_HEX;
203 }
204
205 console_ring.used = 0;
206 console_ring.nreserved = 0;
207 console_ring.read_ptr = console_ring.buffer;
208 console_ring.write_ptr = console_ring.buffer;
209 console_ring_lock_init();
210 }
211
212 void *
213 console_cpu_alloc(__unused boolean_t boot_processor)
214 {
215 console_buf_t * cbp;
216 int i;
217 uint32_t * p = NULL;
218
219 console_init();
220 assert(console_ring.buffer != NULL);
221
222 /* select the next slot from the per cpu buffers at end of console_ring.buffer */
223 for (i = 0; i < MAX_CPU_SLOTS; i++) {
224 p = (uint32_t *)((uintptr_t)console_ring.buffer + console_ring.len + (i * sizeof(console_buf_t)));
225 if (OSCompareAndSwap(CPU_BUF_FREE_HEX, 0, (UInt32 *)p)) {
226 break;
227 }
228 }
229 assert(i < MAX_CPU_SLOTS);
230
231 cbp = (console_buf_t *)(uintptr_t)p;
232 if ((uintptr_t)cbp >= (uintptr_t)console_ring.buffer + KERN_CONSOLE_BUF_SIZE) {
233 printf("console_cpu_alloc() failed to allocate cpu buffer\n");
234 return NULL;
235 }
236
237 cbp->buf_base = (char *)&cbp->buf;
238 cbp->buf_ptr = cbp->buf_base;
239 cbp->buf_end = cbp->buf_base + CPU_BUFFER_LEN;
240 return (void *)cbp;
241 }
242
243 void
244 console_cpu_free(void * buf)
245 {
246 assert((uintptr_t)buf > (uintptr_t)console_ring.buffer);
247 assert((uintptr_t)buf < (uintptr_t)console_ring.buffer + KERN_CONSOLE_BUF_SIZE);
248 if (buf != NULL) {
249 *(uint32_t *)buf = CPU_BUF_FREE_HEX;
250 }
251 }
252
253 static inline char*
254 console_ring_reserve_space(int nchars)
255 {
256 char *write_ptr = NULL;
257 lck_ticket_lock(&console_ring.write_lock, &console_lck_grp);
258 if ((console_ring.len - console_ring.used) >= nchars) {
259 console_ring.used += nchars;
260 mp_disable_preemption(); // Don't allow preemption while holding a reservation; otherwise console_ring_try_empty() could take arbitrarily long
261 os_atomic_inc(&console_ring.nreserved, relaxed);
262 write_ptr = console_ring.write_ptr;
263 console_ring.write_ptr =
264 console_ring.buffer + ((console_ring.write_ptr - console_ring.buffer + nchars) % console_ring.len);
265 }
266 lck_ticket_unlock(&console_ring.write_lock);
267 return write_ptr;
268 }
269
270 static inline void
271 console_ring_unreserve_space(void)
272 {
273 os_atomic_dec(&console_ring.nreserved, relaxed);
274 mp_enable_preemption();
275 }
276
277 static inline void
278 console_ring_put(char **write_ptr, char ch)
279 {
280 assert(console_ring.nreserved > 0);
281 **write_ptr = ch;
282 ++(*write_ptr);
283 if ((*write_ptr - console_ring.buffer) == console_ring.len) {
284 *write_ptr = console_ring.buffer;
285 }
286 }
287
288 static inline boolean_t
289 cpu_buffer_put(console_buf_t * cbp, char ch)
290 {
291 if (ch != '\0' && cbp->buf_ptr < cbp->buf_end) {
292 *(cbp->buf_ptr++) = ch;
293 return TRUE;
294 } else {
295 return FALSE;
296 }
297 }
298
299 static inline int
300 cpu_buffer_size(console_buf_t * cbp)
301 {
302 return (int)(cbp->buf_ptr - cbp->buf_base);
303 }
304
305 static inline uint32_t
306 get_cons_ops_index(void)
307 {
308 uint32_t idx = cons_ops_index;
309
310 if (idx >= nconsops) {
311 panic("Bad cons_ops_index: %d", idx);
312 }
313
314 return idx;
315 }
316
317 static inline void
318 _cnputs(char * c, int size)
319 {
320 extern int disableConsoleOutput;
321
322 if (disableConsoleOutput) {
323 return;
324 }
325
326 assert(c != NULL);
327
328 const uint32_t idx = get_cons_ops_index();
329
330 while (size-- > 0) {
331 if (*c == '\n') {
332 cons_ops[idx].putc(0, 0, '\r');
333 }
334 cons_ops[idx].putc(0, 0, *c);
335 c++;
336 }
337 }
338
339 void
340 cnputc_unbuffered(char c)
341 {
342 _cnputs(&c, 1);
343 }
344
345
346 void
347 cnputcusr(char c)
348 {
349 cnputsusr(&c, 1);
350 }
351
352 void
353 cnputsusr(char *s, int size)
354 {
355 console_write(s, size);
356 }
357
358 static void
359 console_ring_try_empty(void)
360 {
361 char flush_buf[MAX_NO_PREEMPT_FLUSH_SIZE];
362
363 int nchars_out = 0;
364 int total_chars_out = 0;
365 int size_before_wrap = 0;
366 bool in_debugger = (kernel_debugger_entry_count > 0);
367
368
369 if (__improbable(!console_io_allowed())) {
370 return;
371 }
372
373 do {
374 /*
375 * Try to get the read lock on the ring buffer to empty it.
376 * If this fails someone else is already emptying...
377 */
378 if (!in_debugger && !simple_lock_try(&console_ring.read_lock, &console_lck_grp)) {
379 return;
380 }
381
382 if (__probable(!in_debugger)) {
383 lck_ticket_lock(&console_ring.write_lock, &console_lck_grp);
384 while (os_atomic_load(&console_ring.nreserved, relaxed) > 0) {
385 cpu_pause();
386 }
387 }
388
389 /* try small chunk at a time, so we allow writes from other cpus into the buffer */
390 nchars_out = MIN(console_ring.used, (int)sizeof(flush_buf));
391
392 /* account for data to be read before wrap around */
393 size_before_wrap = (int)((console_ring.buffer + console_ring.len) - console_ring.read_ptr);
394 if (nchars_out > size_before_wrap) {
395 nchars_out = size_before_wrap;
396 }
397
398 if (nchars_out > 0) {
399 memcpy(flush_buf, console_ring.read_ptr, nchars_out);
400 console_ring.read_ptr =
401 console_ring.buffer + ((console_ring.read_ptr - console_ring.buffer + nchars_out) % console_ring.len);
402 console_ring.used -= nchars_out;
403 }
404
405 if (__probable(!in_debugger)) {
406 lck_ticket_unlock(&console_ring.write_lock);
407 }
408
409 if (nchars_out > 0) {
410 total_chars_out += nchars_out;
411 _cnputs(flush_buf, nchars_out);
412 }
413
414 if (__probable(!in_debugger)) {
415 simple_unlock(&console_ring.read_lock);
416 }
417
418 /*
419 * In case we end up being the console drain thread
420 * for far too long, break out. Except in panic/suspend cases
421 * where we should clear out full buffer.
422 */
423 if (!console_suspended && (total_chars_out >= MAX_TOTAL_FLUSH_SIZE)) {
424 break;
425 }
426 } while (nchars_out > 0);
427 }
428
429
430 void
431 console_suspend()
432 {
433 console_suspended = true;
434 console_ring_try_empty();
435 }
436
437 void
438 console_resume()
439 {
440 console_suspended = false;
441 }
442
443 void
444 console_write(char * str, int size)
445 {
446 console_init();
447 char *write_ptr;
448 int chunk_size = CPU_CONS_BUF_SIZE;
449 int i = 0;
450
451 if (__improbable(!console_io_allowed())) {
452 return;
453 } else if (__improbable(console_suspended)) {
454 /*
455 * Put directly to console if we're heading into suspend or if we're in
456 * the kernel debugger for a panic/stackshot. If any of the other cores
457 * happened to halt while holding any of the console locks, attempting
458 * to use the normal path will result in sadness.
459 */
460 _cnputs(str, size);
461 return;
462 }
463
464 while (size > 0) {
465 if (size < chunk_size) {
466 chunk_size = size;
467 }
468 while ((write_ptr = console_ring_reserve_space(chunk_size)) == NULL) {
469 console_ring_try_empty();
470 }
471
472 for (i = 0; i < chunk_size; i++) {
473 console_ring_put(&write_ptr, str[i]);
474 }
475
476 console_ring_unreserve_space();
477 str = &str[i];
478 size -= chunk_size;
479 }
480
481 console_ring_try_empty();
482 }
483
484 void
485 cnputc(char c)
486 {
487 console_buf_t * cbp;
488 cpu_data_t * cpu_data_p;
489 boolean_t needs_print = TRUE;
490 char * write_ptr;
491 char * cp;
492
493 restart:
494 mp_disable_preemption();
495 cpu_data_p = current_cpu_datap();
496 cbp = (console_buf_t *)cpu_data_p->cpu_console_buf;
497 if (console_suspended || cbp == NULL) {
498 mp_enable_preemption();
499 /*
500 * Put directly if console ring is not initialized or we're heading into suspend.
501 * Also do this if we're in the kernel debugger for a panic or stackshot.
502 * If any of the other cores happened to halt while holding any of the console
503 * locks, attempting to use the normal path will result in sadness.
504 */
505 _cnputs(&c, 1);
506 return;
507 }
508
509 /*
510 * add to stack buf
511 * If the cpu buffer is full, we'll flush, then try
512 * another put. If it fails a second time... screw
513 * it.
514 */
515 if (needs_print && !cpu_buffer_put(cbp, c)) {
516 write_ptr = console_ring_reserve_space(cpu_buffer_size(cbp));
517 if (write_ptr == NULL) {
518 mp_enable_preemption();
519
520 console_ring_try_empty();
521 goto restart;
522 }
523
524 for (cp = cbp->buf_base; cp < cbp->buf_ptr; cp++) {
525 console_ring_put(&write_ptr, *cp);
526 }
527 console_ring_unreserve_space();
528
529 cbp->buf_ptr = cbp->buf_base;
530 cpu_buffer_put(cbp, c);
531 }
532
533 needs_print = FALSE;
534
535 if (c != '\n') {
536 mp_enable_preemption();
537 return;
538 }
539
540 /* We printed a newline, time to flush the CPU buffer to the global buffer */
541
542 /*
543 * Is there enough space in the shared ring buffer?
544 * Try to empty if not.
545 * Note, we want the entire local buffer to fit to
546 * avoid another cpu interjecting.
547 */
548
549 write_ptr = console_ring_reserve_space(cpu_buffer_size(cbp));
550 if (write_ptr == NULL) {
551 mp_enable_preemption();
552
553 console_ring_try_empty();
554 goto restart;
555 }
556
557 for (cp = cbp->buf_base; cp < cbp->buf_ptr; cp++) {
558 console_ring_put(&write_ptr, *cp);
559 }
560
561 console_ring_unreserve_space();
562 cbp->buf_ptr = cbp->buf_base;
563
564 mp_enable_preemption();
565
566 console_ring_try_empty();
567
568 return;
569 }
570
571 int
572 _serial_getc(__unused int a, __unused int b, boolean_t wait, __unused boolean_t raw)
573 {
574 int c;
575 do {
576 c = serial_getc();
577 } while (wait && c < 0);
578
579 // Check for the NMI string
580 if (c == nmi_string[nmi_counter]) {
581 nmi_counter++;
582 if (nmi_counter == NMI_STRING_SIZE) {
583 // We've got the NMI string, now do an NMI
584 Debugger("Automatic NMI");
585 nmi_counter = 0;
586 return '\n';
587 }
588 } else if (c != -1) {
589 nmi_counter = 0;
590 }
591
592 return c;
593 }
594
595 static void
596 _serial_putc(__unused int a, __unused int b, int c)
597 {
598 serial_putc((char)c);
599 }
600
601 int
602 cngetc(void)
603 {
604 uint32_t idx = get_cons_ops_index();
605 return cons_ops[idx].getc(0, 0, TRUE, FALSE);
606 }
607
608 int
609 cnmaygetc(void)
610 {
611 uint32_t idx = get_cons_ops_index();
612 return cons_ops[idx].getc(0, 0, FALSE, FALSE);
613 }
614
615 int
616 vcgetc(__unused int l, __unused int u, __unused boolean_t wait, __unused boolean_t raw)
617 {
618 char c;
619
620 if (0 == PE_stub_poll_input(0, &c)) {
621 return c;
622 } else {
623 return 0;
624 }
625 }
626
627 #ifdef CONFIG_XNUPOST
628 static uint32_t cons_test_ops_count = 0;
629
630 /*
631 * Try to do multiple cpu buffer allocs and free and intentionally
632 * allow for pre-emption.
633 */
634 static void
635 alloc_free_func(void * arg, wait_result_t wres __unused)
636 {
637 console_buf_t * cbp = NULL;
638 int count = (int)arg;
639
640 T_LOG("Doing %d iterations of console cpu alloc and free.", count);
641
642 while (count-- > 0) {
643 os_atomic_inc(&cons_test_ops_count, relaxed);
644 cbp = (console_buf_t *)console_cpu_alloc(0);
645 if (cbp == NULL) {
646 T_ASSERT_NOTNULL(cbp, "cpu allocation failed");
647 }
648 console_cpu_free(cbp);
649 cbp = NULL;
650 /* give chance to another thread to come in */
651 delay(10);
652 }
653 }
654
655 /*
656 * Log to console by multiple methods - printf, unbuffered write, console_write()
657 */
658 static void
659 log_to_console_func(void * arg __unused, wait_result_t wres __unused)
660 {
661 uint64_t thread_id = current_thread()->thread_id;
662 char somedata[10] = "123456789";
663 for (int i = 0; i < 26; i++) {
664 os_atomic_inc(&cons_test_ops_count, relaxed);
665 printf(" thid: %llu printf iteration %d\n", thread_id, i);
666 cnputc_unbuffered((char)('A' + i));
667 cnputc_unbuffered('\n');
668 console_write((char *)somedata, sizeof(somedata));
669 delay(10);
670 }
671 printf("finished the log_to_console_func operations\n\n");
672 }
673
674 kern_return_t
675 console_serial_parallel_log_tests(void)
676 {
677 thread_t thread;
678 kern_return_t kr;
679 cons_test_ops_count = 0;
680
681 kr = kernel_thread_start(log_to_console_func, NULL, &thread);
682 T_ASSERT_EQ_INT(kr, KERN_SUCCESS, "kernel_thread_start returned successfully");
683
684 delay(100);
685
686 log_to_console_func(NULL, 0);
687
688 /* wait until other thread has also finished */
689 while (cons_test_ops_count < 52) {
690 delay(1000);
691 }
692
693 thread_deallocate(thread);
694 T_LOG("parallel_logging tests is now complete. From this point forward we expect full lines\n");
695 return KERN_SUCCESS;
696 }
697
698 kern_return_t
699 console_serial_alloc_rel_tests(void)
700 {
701 unsigned long i, free_buf_count = 0;
702 uint32_t * p;
703 console_buf_t * cbp;
704 thread_t thread;
705 kern_return_t kr;
706
707 T_LOG("doing alloc/release tests");
708
709 for (i = 0; i < MAX_CPU_SLOTS; i++) {
710 p = (uint32_t *)((uintptr_t)console_ring.buffer + console_ring.len + (i * sizeof(console_buf_t)));
711 cbp = (console_buf_t *)(void *)p;
712 /* p should either be allocated cpu buffer or have CPU_BUF_FREE_HEX in it */
713 T_ASSERT(*p == CPU_BUF_FREE_HEX || cbp->buf_base == &cbp->buf[0], "");
714 if (*p == CPU_BUF_FREE_HEX) {
715 free_buf_count++;
716 }
717 }
718
719 T_ASSERT_GE_ULONG(free_buf_count, 2, "At least 2 buffers should be free");
720 cons_test_ops_count = 0;
721
722 kr = kernel_thread_start(alloc_free_func, (void *)1000, &thread);
723 T_ASSERT_EQ_INT(kr, KERN_SUCCESS, "kernel_thread_start returned successfully");
724
725 /* yeild cpu to give other thread chance to get on-core */
726 delay(100);
727
728 alloc_free_func((void *)1000, 0);
729
730 /* wait until other thread finishes its tasks */
731 while (cons_test_ops_count < 2000) {
732 delay(1000);
733 }
734
735 thread_deallocate(thread);
736 /* verify again that atleast 2 slots are free */
737 free_buf_count = 0;
738 for (i = 0; i < MAX_CPU_SLOTS; i++) {
739 p = (uint32_t *)((uintptr_t)console_ring.buffer + console_ring.len + (i * sizeof(console_buf_t)));
740 cbp = (console_buf_t *)(void *)p;
741 /* p should either be allocated cpu buffer or have CPU_BUF_FREE_HEX in it */
742 T_ASSERT(*p == CPU_BUF_FREE_HEX || cbp->buf_base == &cbp->buf[0], "");
743 if (*p == CPU_BUF_FREE_HEX) {
744 free_buf_count++;
745 }
746 }
747 T_ASSERT_GE_ULONG(free_buf_count, 2, "At least 2 buffers should be free after alloc free tests");
748
749 return KERN_SUCCESS;
750 }
751
752 kern_return_t
753 console_serial_test(void)
754 {
755 unsigned long i;
756 char buffer[CPU_BUFFER_LEN];
757 uint32_t * p;
758 console_buf_t * cbp;
759
760 T_LOG("Checking console_ring status.");
761 T_ASSERT_EQ_INT(console_ring.len, KERN_CONSOLE_RING_SIZE, "Console ring size is not correct.");
762 T_ASSERT_GT_INT(KERN_CONSOLE_BUF_SIZE, KERN_CONSOLE_RING_SIZE, "kernel console buffer size is < allocation.");
763
764 /* select the next slot from the per cpu buffers at end of console_ring.buffer */
765 for (i = 0; i < MAX_CPU_SLOTS; i++) {
766 p = (uint32_t *)((uintptr_t)console_ring.buffer + console_ring.len + (i * sizeof(console_buf_t)));
767 cbp = (console_buf_t *)(void *)p;
768 /* p should either be allocated cpu buffer or have CPU_BUF_FREE_HEX in it */
769 T_ASSERT(*p == CPU_BUF_FREE_HEX || cbp->buf_base == &cbp->buf[0], "verified initialization of cpu buffers p=%p", (void *)p);
770 }
771
772 /* setup buffer to be chars */
773 for (i = 0; i < CPU_BUFFER_LEN; i++) {
774 buffer[i] = (char)('0' + (i % 10));
775 }
776 buffer[CPU_BUFFER_LEN - 1] = '\0';
777
778 T_LOG("Printing %d char string to serial one char at a time.", CPU_BUFFER_LEN);
779 for (i = 0; i < CPU_BUFFER_LEN; i++) {
780 printf("%c", buffer[i]);
781 }
782 printf("End\n");
783 T_LOG("Printing %d char string to serial as a whole", CPU_BUFFER_LEN);
784 printf("%s\n", buffer);
785
786 T_LOG("Using console_write call repeatedly for 100 iterations");
787 for (i = 0; i < 100; i++) {
788 console_write(&buffer[0], 14);
789 if ((i % 6) == 0) {
790 printf("\n");
791 }
792 }
793 printf("\n");
794
795 T_LOG("Using T_LOG to print buffer %s", buffer);
796 return KERN_SUCCESS;
797 }
798 #endif