]> git.saurik.com Git - apple/xnu.git/blob - osfmk/console/serial_console.c
xnu-7195.81.3.tar.gz
[apple/xnu.git] / osfmk / console / serial_console.c
1 /*
2 * Copyright (c) 2000-2019 Apple, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #ifdef __x86_64__
30 #include <i386/mp.h>
31 #include <i386/cpu_data.h>
32 #include <i386/bit_routines.h>
33 #include <i386/machine_routines.h>
34 #include <i386/misc_protos.h>
35 #include <i386/serial_io.h>
36 #endif /* __x86_64__ */
37
38 #include <machine/machine_cpu.h>
39 #include <libkern/OSAtomic.h>
40 #include <vm/vm_kern.h>
41 #include <vm/vm_map.h>
42 #include <console/video_console.h>
43 #include <console/serial_protos.h>
44 #include <kern/startup.h>
45 #include <kern/thread.h>
46 #include <kern/cpu_data.h>
47 #include <libkern/section_keywords.h>
48
49 #if __arm__ || __arm64__
50 #include <machine/machine_routines.h>
51 #include <arm/cpu_data_internal.h>
52 #endif
53
54 #ifdef CONFIG_XNUPOST
55 #include <tests/xnupost.h>
56 kern_return_t console_serial_test(void);
57 kern_return_t console_serial_alloc_rel_tests(void);
58 kern_return_t console_serial_parallel_log_tests(void);
59 #define MAX_CPU_SLOTS (MAX_CPUS + 2)
60 #endif
61
62 #ifndef MAX_CPU_SLOTS
63 #define MAX_CPU_SLOTS (MAX_CPUS)
64 #endif
65
66 static struct {
67 char * buffer;
68 int len;
69 int used;
70 int nreserved;
71 char * write_ptr;
72 char * read_ptr;
73 decl_simple_lock_data(, read_lock);
74 lck_ticket_t write_lock;
75 } console_ring;
76
77 /*
78 * New allocation mechanism for console buffers
79 * Total allocation: 1 * PAGE_SIZE
80 * - Each cpu gets CPU_CONS_BUF_SIZE buffer
81 * - Kernel wide console ring gets PAGE_SIZE - MAX_CPU_SLOTS * CPU_CONS_BUF_SIZE
82 *
83 * At the return from console_init() the memory is setup as follows:
84 * +----------------------------+-------------+-------------+-------------+-------------+
85 * |console ring buffer---------|f2eec075-----|f2eec075-----|f2eec075-----|f2eec075-----|
86 * +----------------------------+-------------+-------------+-------------+-------------+
87 * Each cpu allocation will find the first (f2eec075) and use that buffer.
88 *
89 */
90
91 #define CPU_CONS_BUF_SIZE 256
92 #define CPU_BUF_FREE_HEX 0xf2eec075
93
94 #define KERN_CONSOLE_BUF_SIZE vm_map_round_page(CPU_CONS_BUF_SIZE *(MAX_CPU_SLOTS + 1), PAGE_SIZE - 1)
95 #define KERN_CONSOLE_RING_SIZE (KERN_CONSOLE_BUF_SIZE - (CPU_CONS_BUF_SIZE * MAX_CPU_SLOTS))
96
97 /*
98 * A serial line running at 115200 bps can output ~11.5 characters per millisecond.
99 * Synchronous serial logging with preemption disabled fundamentally prevents us
100 * from hitting expected scheduling deadlines, but we can at least tone it down a bit.
101 *
102 * TODO: IOLog should use asynchronous serial logging instead of the synchronous serial console. (26555148)
103 *
104 * Keep interrupt disabled periods shorter than 1ms
105 */
106 #define MAX_NO_PREEMPT_FLUSH_SIZE 8
107 #define MAX_TOTAL_FLUSH_SIZE (MAX(2, MAX_CPU_SLOTS) * CPU_CONS_BUF_SIZE)
108
109 typedef struct console_buf {
110 char * buf_base;
111 char * buf_end;
112 char * buf_ptr;
113 #define CPU_BUFFER_LEN (CPU_CONS_BUF_SIZE - 3 * (sizeof(char *)))
114 char buf[CPU_BUFFER_LEN];
115 } console_buf_t;
116
117 extern int serial_getc(void);
118 extern void serial_putc(char);
119
120 #if DEBUG || DEVELOPMENT
121 TUNABLE(bool, allow_printf_from_interrupts_disabled_context, "nointr_consio", false);
122 #else
123 #define allow_printf_from_interrupts_disabled_context false
124 #endif
125
126 static void _serial_putc(int, int, int);
127
128 SECURITY_READ_ONLY_EARLY(struct console_ops) cons_ops[] = {
129 {
130 .putc = _serial_putc, .getc = _serial_getc,
131 },
132 {
133 .putc = vcputc, .getc = vcgetc,
134 },
135 };
136
137 SECURITY_READ_ONLY_EARLY(uint32_t) nconsops = (sizeof cons_ops / sizeof cons_ops[0]);
138
139 #if __x86_64__
140 uint32_t cons_ops_index = VC_CONS_OPS;
141 #else
142 SECURITY_READ_ONLY_LATE(uint32_t) cons_ops_index = VC_CONS_OPS;
143 #endif
144
145 LCK_GRP_DECLARE(console_lck_grp, "console");
146
147 // NMI static variables
148 #define NMI_STRING_SIZE 32
149 char nmi_string[NMI_STRING_SIZE] = "afDIGHr84A84jh19Kphgp428DNPdnapq";
150 static int nmi_counter = 0;
151
152 static bool console_suspended = false;
153
154 static inline bool
155 console_io_allowed(void)
156 {
157 if (!allow_printf_from_interrupts_disabled_context &&
158 !console_suspended &&
159 startup_phase >= STARTUP_SUB_EARLY_BOOT &&
160 !ml_get_interrupts_enabled()) {
161 #if defined(__arm__) || defined(__arm64__) || DEBUG || DEVELOPMENT
162 panic("Console I/O from interrupt-disabled context");
163 #else
164 return false;
165 #endif
166 }
167
168 return true;
169 }
170
171 static void
172 console_ring_lock_init(void)
173 {
174 simple_lock_init(&console_ring.read_lock, 0);
175 lck_ticket_init(&console_ring.write_lock, &console_lck_grp);
176 }
177
178 void
179 console_init(void)
180 {
181 int ret, i;
182 uint32_t * p;
183
184 if (!OSCompareAndSwap(0, KERN_CONSOLE_RING_SIZE, (UInt32 *)&console_ring.len)) {
185 return;
186 }
187
188 assert(console_ring.len > 0);
189
190 ret = kmem_alloc_flags(kernel_map, (vm_offset_t *)&console_ring.buffer,
191 KERN_CONSOLE_BUF_SIZE + 2 * PAGE_SIZE, VM_KERN_MEMORY_OSFMK,
192 KMA_KOBJECT | KMA_PERMANENT | KMA_GUARD_FIRST | KMA_GUARD_LAST);
193 if (ret != KERN_SUCCESS) {
194 panic("console_ring_init() failed to allocate ring buffer, error %d\n", ret);
195 }
196
197 console_ring.buffer += PAGE_SIZE;
198
199 /* setup memory for per cpu console buffers */
200 for (i = 0; i < MAX_CPU_SLOTS; i++) {
201 p = (uint32_t *)((uintptr_t)console_ring.buffer + console_ring.len + (i * sizeof(console_buf_t)));
202 *p = CPU_BUF_FREE_HEX;
203 }
204
205 console_ring.used = 0;
206 console_ring.nreserved = 0;
207 console_ring.read_ptr = console_ring.buffer;
208 console_ring.write_ptr = console_ring.buffer;
209 console_ring_lock_init();
210 }
211
212 void *
213 console_cpu_alloc(__unused boolean_t boot_processor)
214 {
215 console_buf_t * cbp;
216 int i;
217 uint32_t * p = NULL;
218
219 console_init();
220 assert(console_ring.buffer != NULL);
221
222 /* select the next slot from the per cpu buffers at end of console_ring.buffer */
223 for (i = 0; i < MAX_CPU_SLOTS; i++) {
224 p = (uint32_t *)((uintptr_t)console_ring.buffer + console_ring.len + (i * sizeof(console_buf_t)));
225 if (OSCompareAndSwap(CPU_BUF_FREE_HEX, 0, (UInt32 *)p)) {
226 break;
227 }
228 }
229 assert(i < MAX_CPU_SLOTS);
230
231 cbp = (console_buf_t *)(uintptr_t)p;
232 if ((uintptr_t)cbp >= (uintptr_t)console_ring.buffer + KERN_CONSOLE_BUF_SIZE) {
233 printf("console_cpu_alloc() failed to allocate cpu buffer\n");
234 return NULL;
235 }
236
237 cbp->buf_base = (char *)&cbp->buf;
238 cbp->buf_ptr = cbp->buf_base;
239 cbp->buf_end = cbp->buf_base + CPU_BUFFER_LEN;
240 return (void *)cbp;
241 }
242
243 void
244 console_cpu_free(void * buf)
245 {
246 assert((uintptr_t)buf > (uintptr_t)console_ring.buffer);
247 assert((uintptr_t)buf < (uintptr_t)console_ring.buffer + KERN_CONSOLE_BUF_SIZE);
248 if (buf != NULL) {
249 *(uint32_t *)buf = CPU_BUF_FREE_HEX;
250 }
251 }
252
253 static inline char*
254 console_ring_reserve_space(int nchars)
255 {
256 char *write_ptr = NULL;
257 lck_ticket_lock(&console_ring.write_lock, &console_lck_grp);
258 if ((console_ring.len - console_ring.used) >= nchars) {
259 console_ring.used += nchars;
260 mp_disable_preemption(); // Don't allow preemption while holding a reservation; otherwise console_ring_try_empty() could take arbitrarily long
261 os_atomic_inc(&console_ring.nreserved, relaxed);
262 write_ptr = console_ring.write_ptr;
263 console_ring.write_ptr =
264 console_ring.buffer + ((console_ring.write_ptr - console_ring.buffer + nchars) % console_ring.len);
265 }
266 lck_ticket_unlock(&console_ring.write_lock);
267 return write_ptr;
268 }
269
270 static inline void
271 console_ring_unreserve_space(void)
272 {
273 os_atomic_dec(&console_ring.nreserved, relaxed);
274 mp_enable_preemption();
275 }
276
277 static inline void
278 console_ring_put(char **write_ptr, char ch)
279 {
280 assert(console_ring.nreserved > 0);
281 **write_ptr = ch;
282 ++(*write_ptr);
283 if ((*write_ptr - console_ring.buffer) == console_ring.len) {
284 *write_ptr = console_ring.buffer;
285 }
286 }
287
288 static inline boolean_t
289 cpu_buffer_put(console_buf_t * cbp, char ch)
290 {
291 if (ch != '\0' && cbp->buf_ptr < cbp->buf_end) {
292 *(cbp->buf_ptr++) = ch;
293 return TRUE;
294 } else {
295 return FALSE;
296 }
297 }
298
299 static inline int
300 cpu_buffer_size(console_buf_t * cbp)
301 {
302 return (int)(cbp->buf_ptr - cbp->buf_base);
303 }
304
305 static inline uint32_t
306 get_cons_ops_index(void)
307 {
308 uint32_t idx = cons_ops_index;
309
310 if (idx >= nconsops) {
311 panic("Bad cons_ops_index: %d", idx);
312 }
313
314 return idx;
315 }
316
317 static inline void
318 _cnputs(char * c, int size)
319 {
320 uint32_t idx = get_cons_ops_index();
321
322 while (size-- > 0) {
323 if (*c == '\n') {
324 cons_ops[idx].putc(0, 0, '\r');
325 }
326 cons_ops[idx].putc(0, 0, *c);
327 c++;
328 }
329 }
330
331 void
332 cnputc_unbuffered(char c)
333 {
334 _cnputs(&c, 1);
335 }
336
337
338 void
339 cnputcusr(char c)
340 {
341 cnputsusr(&c, 1);
342 }
343
344 void
345 cnputsusr(char *s, int size)
346 {
347 console_write(s, size);
348 }
349
350 static void
351 console_ring_try_empty(void)
352 {
353 char flush_buf[MAX_NO_PREEMPT_FLUSH_SIZE];
354
355 int nchars_out = 0;
356 int total_chars_out = 0;
357 int size_before_wrap = 0;
358 bool in_debugger = (kernel_debugger_entry_count > 0);
359
360
361 if (__improbable(!console_io_allowed())) {
362 return;
363 }
364
365 do {
366 /*
367 * Try to get the read lock on the ring buffer to empty it.
368 * If this fails someone else is already emptying...
369 */
370 if (!in_debugger && !simple_lock_try(&console_ring.read_lock, &console_lck_grp)) {
371 return;
372 }
373
374 if (__probable(!in_debugger)) {
375 lck_ticket_lock(&console_ring.write_lock, &console_lck_grp);
376 while (os_atomic_load(&console_ring.nreserved, relaxed) > 0) {
377 cpu_pause();
378 }
379 }
380
381 /* try small chunk at a time, so we allow writes from other cpus into the buffer */
382 nchars_out = MIN(console_ring.used, (int)sizeof(flush_buf));
383
384 /* account for data to be read before wrap around */
385 size_before_wrap = (int)((console_ring.buffer + console_ring.len) - console_ring.read_ptr);
386 if (nchars_out > size_before_wrap) {
387 nchars_out = size_before_wrap;
388 }
389
390 if (nchars_out > 0) {
391 memcpy(flush_buf, console_ring.read_ptr, nchars_out);
392 console_ring.read_ptr =
393 console_ring.buffer + ((console_ring.read_ptr - console_ring.buffer + nchars_out) % console_ring.len);
394 console_ring.used -= nchars_out;
395 }
396
397 if (__probable(!in_debugger)) {
398 lck_ticket_unlock(&console_ring.write_lock);
399 }
400
401 if (nchars_out > 0) {
402 total_chars_out += nchars_out;
403 _cnputs(flush_buf, nchars_out);
404 }
405
406 if (__probable(!in_debugger)) {
407 simple_unlock(&console_ring.read_lock);
408 }
409
410 /*
411 * In case we end up being the console drain thread
412 * for far too long, break out. Except in panic/suspend cases
413 * where we should clear out full buffer.
414 */
415 if (!console_suspended && (total_chars_out >= MAX_TOTAL_FLUSH_SIZE)) {
416 break;
417 }
418 } while (nchars_out > 0);
419 }
420
421
422 void
423 console_suspend()
424 {
425 console_suspended = true;
426 console_ring_try_empty();
427 }
428
429 void
430 console_resume()
431 {
432 console_suspended = false;
433 }
434
435 void
436 console_write(char * str, int size)
437 {
438 console_init();
439 char *write_ptr;
440 int chunk_size = CPU_CONS_BUF_SIZE;
441 int i = 0;
442
443 if (__improbable(!console_io_allowed())) {
444 return;
445 } else if (__improbable(console_suspended)) {
446 /*
447 * Put directly to console if we're heading into suspend or if we're in
448 * the kernel debugger for a panic/stackshot. If any of the other cores
449 * happened to halt while holding any of the console locks, attempting
450 * to use the normal path will result in sadness.
451 */
452 _cnputs(str, size);
453 return;
454 }
455
456 while (size > 0) {
457 if (size < chunk_size) {
458 chunk_size = size;
459 }
460 while ((write_ptr = console_ring_reserve_space(chunk_size)) == NULL) {
461 console_ring_try_empty();
462 }
463
464 for (i = 0; i < chunk_size; i++) {
465 console_ring_put(&write_ptr, str[i]);
466 }
467
468 console_ring_unreserve_space();
469 str = &str[i];
470 size -= chunk_size;
471 }
472
473 console_ring_try_empty();
474 }
475
476 void
477 cnputc(char c)
478 {
479 console_buf_t * cbp;
480 cpu_data_t * cpu_data_p;
481 boolean_t needs_print = TRUE;
482 char * write_ptr;
483 char * cp;
484
485 restart:
486 mp_disable_preemption();
487 cpu_data_p = current_cpu_datap();
488 cbp = (console_buf_t *)cpu_data_p->cpu_console_buf;
489 if (console_suspended || cbp == NULL) {
490 mp_enable_preemption();
491 /*
492 * Put directly if console ring is not initialized or we're heading into suspend.
493 * Also do this if we're in the kernel debugger for a panic or stackshot.
494 * If any of the other cores happened to halt while holding any of the console
495 * locks, attempting to use the normal path will result in sadness.
496 */
497 _cnputs(&c, 1);
498 return;
499 }
500
501 /*
502 * add to stack buf
503 * If the cpu buffer is full, we'll flush, then try
504 * another put. If it fails a second time... screw
505 * it.
506 */
507 if (needs_print && !cpu_buffer_put(cbp, c)) {
508 write_ptr = console_ring_reserve_space(cpu_buffer_size(cbp));
509 if (write_ptr == NULL) {
510 mp_enable_preemption();
511
512 console_ring_try_empty();
513 goto restart;
514 }
515
516 for (cp = cbp->buf_base; cp < cbp->buf_ptr; cp++) {
517 console_ring_put(&write_ptr, *cp);
518 }
519 console_ring_unreserve_space();
520
521 cbp->buf_ptr = cbp->buf_base;
522 cpu_buffer_put(cbp, c);
523 }
524
525 needs_print = FALSE;
526
527 if (c != '\n') {
528 mp_enable_preemption();
529 return;
530 }
531
532 /* We printed a newline, time to flush the CPU buffer to the global buffer */
533
534 /*
535 * Is there enough space in the shared ring buffer?
536 * Try to empty if not.
537 * Note, we want the entire local buffer to fit to
538 * avoid another cpu interjecting.
539 */
540
541 write_ptr = console_ring_reserve_space(cpu_buffer_size(cbp));
542 if (write_ptr == NULL) {
543 mp_enable_preemption();
544
545 console_ring_try_empty();
546 goto restart;
547 }
548
549 for (cp = cbp->buf_base; cp < cbp->buf_ptr; cp++) {
550 console_ring_put(&write_ptr, *cp);
551 }
552
553 console_ring_unreserve_space();
554 cbp->buf_ptr = cbp->buf_base;
555
556 mp_enable_preemption();
557
558 console_ring_try_empty();
559
560 return;
561 }
562
563 int
564 _serial_getc(__unused int a, __unused int b, boolean_t wait, __unused boolean_t raw)
565 {
566 int c;
567 do {
568 c = serial_getc();
569 } while (wait && c < 0);
570
571 // Check for the NMI string
572 if (c == nmi_string[nmi_counter]) {
573 nmi_counter++;
574 if (nmi_counter == NMI_STRING_SIZE) {
575 // We've got the NMI string, now do an NMI
576 Debugger("Automatic NMI");
577 nmi_counter = 0;
578 return '\n';
579 }
580 } else if (c != -1) {
581 nmi_counter = 0;
582 }
583
584 return c;
585 }
586
587 static void
588 _serial_putc(__unused int a, __unused int b, int c)
589 {
590 serial_putc((char)c);
591 }
592
593 int
594 cngetc(void)
595 {
596 uint32_t idx = get_cons_ops_index();
597 return cons_ops[idx].getc(0, 0, TRUE, FALSE);
598 }
599
600 int
601 cnmaygetc(void)
602 {
603 uint32_t idx = get_cons_ops_index();
604 return cons_ops[idx].getc(0, 0, FALSE, FALSE);
605 }
606
607 int
608 vcgetc(__unused int l, __unused int u, __unused boolean_t wait, __unused boolean_t raw)
609 {
610 char c;
611
612 if (0 == PE_stub_poll_input(0, &c)) {
613 return c;
614 } else {
615 return 0;
616 }
617 }
618
619 #ifdef CONFIG_XNUPOST
620 static uint32_t cons_test_ops_count = 0;
621
622 /*
623 * Try to do multiple cpu buffer allocs and free and intentionally
624 * allow for pre-emption.
625 */
626 static void
627 alloc_free_func(void * arg, wait_result_t wres __unused)
628 {
629 console_buf_t * cbp = NULL;
630 int count = (int)arg;
631
632 T_LOG("Doing %d iterations of console cpu alloc and free.", count);
633
634 while (count-- > 0) {
635 os_atomic_inc(&cons_test_ops_count, relaxed);
636 cbp = (console_buf_t *)console_cpu_alloc(0);
637 if (cbp == NULL) {
638 T_ASSERT_NOTNULL(cbp, "cpu allocation failed");
639 }
640 console_cpu_free(cbp);
641 cbp = NULL;
642 /* give chance to another thread to come in */
643 delay(10);
644 }
645 }
646
647 /*
648 * Log to console by multiple methods - printf, unbuffered write, console_write()
649 */
650 static void
651 log_to_console_func(void * arg __unused, wait_result_t wres __unused)
652 {
653 uint64_t thread_id = current_thread()->thread_id;
654 char somedata[10] = "123456789";
655 for (int i = 0; i < 26; i++) {
656 os_atomic_inc(&cons_test_ops_count, relaxed);
657 printf(" thid: %llu printf iteration %d\n", thread_id, i);
658 cnputc_unbuffered((char)('A' + i));
659 cnputc_unbuffered('\n');
660 console_write((char *)somedata, sizeof(somedata));
661 delay(10);
662 }
663 printf("finished the log_to_console_func operations\n\n");
664 }
665
666 kern_return_t
667 console_serial_parallel_log_tests(void)
668 {
669 thread_t thread;
670 kern_return_t kr;
671 cons_test_ops_count = 0;
672
673 kr = kernel_thread_start(log_to_console_func, NULL, &thread);
674 T_ASSERT_EQ_INT(kr, KERN_SUCCESS, "kernel_thread_start returned successfully");
675
676 delay(100);
677
678 log_to_console_func(NULL, 0);
679
680 /* wait until other thread has also finished */
681 while (cons_test_ops_count < 52) {
682 delay(1000);
683 }
684
685 thread_deallocate(thread);
686 T_LOG("parallel_logging tests is now complete. From this point forward we expect full lines\n");
687 return KERN_SUCCESS;
688 }
689
690 kern_return_t
691 console_serial_alloc_rel_tests(void)
692 {
693 unsigned long i, free_buf_count = 0;
694 uint32_t * p;
695 console_buf_t * cbp;
696 thread_t thread;
697 kern_return_t kr;
698
699 T_LOG("doing alloc/release tests");
700
701 for (i = 0; i < MAX_CPU_SLOTS; i++) {
702 p = (uint32_t *)((uintptr_t)console_ring.buffer + console_ring.len + (i * sizeof(console_buf_t)));
703 cbp = (console_buf_t *)(void *)p;
704 /* p should either be allocated cpu buffer or have CPU_BUF_FREE_HEX in it */
705 T_ASSERT(*p == CPU_BUF_FREE_HEX || cbp->buf_base == &cbp->buf[0], "");
706 if (*p == CPU_BUF_FREE_HEX) {
707 free_buf_count++;
708 }
709 }
710
711 T_ASSERT_GE_ULONG(free_buf_count, 2, "At least 2 buffers should be free");
712 cons_test_ops_count = 0;
713
714 kr = kernel_thread_start(alloc_free_func, (void *)1000, &thread);
715 T_ASSERT_EQ_INT(kr, KERN_SUCCESS, "kernel_thread_start returned successfully");
716
717 /* yeild cpu to give other thread chance to get on-core */
718 delay(100);
719
720 alloc_free_func((void *)1000, 0);
721
722 /* wait until other thread finishes its tasks */
723 while (cons_test_ops_count < 2000) {
724 delay(1000);
725 }
726
727 thread_deallocate(thread);
728 /* verify again that atleast 2 slots are free */
729 free_buf_count = 0;
730 for (i = 0; i < MAX_CPU_SLOTS; i++) {
731 p = (uint32_t *)((uintptr_t)console_ring.buffer + console_ring.len + (i * sizeof(console_buf_t)));
732 cbp = (console_buf_t *)(void *)p;
733 /* p should either be allocated cpu buffer or have CPU_BUF_FREE_HEX in it */
734 T_ASSERT(*p == CPU_BUF_FREE_HEX || cbp->buf_base == &cbp->buf[0], "");
735 if (*p == CPU_BUF_FREE_HEX) {
736 free_buf_count++;
737 }
738 }
739 T_ASSERT_GE_ULONG(free_buf_count, 2, "At least 2 buffers should be free after alloc free tests");
740
741 return KERN_SUCCESS;
742 }
743
744 kern_return_t
745 console_serial_test(void)
746 {
747 unsigned long i;
748 char buffer[CPU_BUFFER_LEN];
749 uint32_t * p;
750 console_buf_t * cbp;
751
752 T_LOG("Checking console_ring status.");
753 T_ASSERT_EQ_INT(console_ring.len, KERN_CONSOLE_RING_SIZE, "Console ring size is not correct.");
754 T_ASSERT_GT_INT(KERN_CONSOLE_BUF_SIZE, KERN_CONSOLE_RING_SIZE, "kernel console buffer size is < allocation.");
755
756 /* select the next slot from the per cpu buffers at end of console_ring.buffer */
757 for (i = 0; i < MAX_CPU_SLOTS; i++) {
758 p = (uint32_t *)((uintptr_t)console_ring.buffer + console_ring.len + (i * sizeof(console_buf_t)));
759 cbp = (console_buf_t *)(void *)p;
760 /* p should either be allocated cpu buffer or have CPU_BUF_FREE_HEX in it */
761 T_ASSERT(*p == CPU_BUF_FREE_HEX || cbp->buf_base == &cbp->buf[0], "verified initialization of cpu buffers p=%p", (void *)p);
762 }
763
764 /* setup buffer to be chars */
765 for (i = 0; i < CPU_BUFFER_LEN; i++) {
766 buffer[i] = (char)('0' + (i % 10));
767 }
768 buffer[CPU_BUFFER_LEN - 1] = '\0';
769
770 T_LOG("Printing %d char string to serial one char at a time.", CPU_BUFFER_LEN);
771 for (i = 0; i < CPU_BUFFER_LEN; i++) {
772 printf("%c", buffer[i]);
773 }
774 printf("End\n");
775 T_LOG("Printing %d char string to serial as a whole", CPU_BUFFER_LEN);
776 printf("%s\n", buffer);
777
778 T_LOG("Using console_write call repeatedly for 100 iterations");
779 for (i = 0; i < 100; i++) {
780 console_write(&buffer[0], 14);
781 if ((i % 6) == 0) {
782 printf("\n");
783 }
784 }
785 printf("\n");
786
787 T_LOG("Using T_LOG to print buffer %s", buffer);
788 return KERN_SUCCESS;
789 }
790 #endif