2 * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
31 #include <i386/cpu_data.h>
32 #include <i386/bit_routines.h>
33 #include <i386/machine_cpu.h>
34 #include <i386/machine_routines.h>
35 #include <i386/misc_protos.h>
36 #include <i386/serial_io.h>
37 #endif /* __x86_64__ */
39 #include <libkern/OSAtomic.h>
40 #include <vm/vm_kern.h>
41 #include <vm/vm_map.h>
42 #include <console/video_console.h>
43 #include <console/serial_protos.h>
44 #include <kern/kalloc.h>
45 #include <kern/thread.h>
46 #include <kern/cpu_data.h>
47 #include <libkern/section_keywords.h>
49 #if __arm__ || __arm64__
50 #include <machine/machine_routines.h>
51 #include <arm/cpu_data_internal.h>
55 #include <tests/xnupost.h>
56 kern_return_t
console_serial_test(void);
57 kern_return_t
console_serial_alloc_rel_tests(void);
58 kern_return_t
console_serial_parallel_log_tests(void);
59 #define MAX_CPU_SLOTS (MAX_CPUS + 2)
63 #define MAX_CPU_SLOTS (MAX_CPUS)
72 decl_simple_lock_data(, read_lock
);
73 decl_simple_lock_data(, write_lock
);
76 hw_lock_data_t cnputc_lock
;
77 static volatile uint32_t console_output
= 0;
80 * New allocation mechanism for console buffers
81 * Total allocation: 1 * PAGE_SIZE
82 * - Each cpu gets CPU_CONS_BUF_SIZE buffer
83 * - Kernel wide console ring gets PAGE_SIZE - MAX_CPU_SLOTS * CPU_CONS_BUF_SIZE
85 * At the return from console_init() the memory is setup as follows:
86 * +----------------------------+-------------+-------------+-------------+-------------+
87 * |console ring buffer---------|f2eec075-----|f2eec075-----|f2eec075-----|f2eec075-----|
88 * +----------------------------+-------------+-------------+-------------+-------------+
89 * Each cpu allocation will find the first (f2eec075) and use that buffer.
93 #define CPU_CONS_BUF_SIZE 256
94 #define CPU_BUF_FREE_HEX 0xf2eec075
96 #define KERN_CONSOLE_BUF_SIZE vm_map_round_page(CPU_CONS_BUF_SIZE *(MAX_CPU_SLOTS + 1), PAGE_SIZE - 1)
97 #define KERN_CONSOLE_RING_SIZE (KERN_CONSOLE_BUF_SIZE - (CPU_CONS_BUF_SIZE * MAX_CPU_SLOTS))
100 * A serial line running at 115200 bps can output ~11.5 characters per millisecond.
101 * Synchronous serial logging with preemption+interrupts disabled fundamentally prevents us
102 * from hitting expected scheduling deadlines, but we can at least tone it down a bit.
104 * TODO: IOLog should use asynchronous serial logging instead of the synchronous serial console. (26555148)
106 * Keep interrupt disabled periods shorter than 1ms
108 #define MAX_INT_DISABLED_FLUSH_SIZE 8
109 #define MAX_TOTAL_FLUSH_SIZE (MAX(2, MAX_CPU_SLOTS) * CPU_CONS_BUF_SIZE)
111 typedef struct console_buf
{
115 #define CPU_BUFFER_LEN (CPU_CONS_BUF_SIZE - 3 * (sizeof(char *)))
116 char buf
[CPU_BUFFER_LEN
];
119 extern int serial_getc(void);
120 extern void serial_putc(char);
122 static void _serial_putc(int, int, int);
124 SECURITY_READ_ONLY_EARLY(struct console_ops
) cons_ops
[] = {
126 .putc
= _serial_putc
, .getc
= _serial_getc
,
129 .putc
= vcputc
, .getc
= vcgetc
,
133 SECURITY_READ_ONLY_EARLY(uint32_t) nconsops
= (sizeof cons_ops
/ sizeof cons_ops
[0]);
135 uint32_t cons_ops_index
= VC_CONS_OPS
;
137 #if defined(__x86_64__) || defined(__arm__)
138 // NMI static variables
139 #define NMI_STRING_SIZE 32
140 char nmi_string
[NMI_STRING_SIZE
] = "afDIGHr84A84jh19Kphgp428DNPdnapq";
141 static int nmi_counter
= 0;
144 static bool console_suspended
= false;
146 /* Wrapper for ml_set_interrupts_enabled */
148 console_restore_interrupts_state(boolean_t state
)
150 #if INTERRUPT_MASKED_DEBUG
152 * Serial console holds interrupts disabled for far too long
153 * and would trip the spin-debugger. If we are about to reenable
154 * interrupts then clear the timer and avoid panicking on the delay.
155 * Otherwise, let the code that printed with interrupt disabled
156 * take the panic when it reenables interrupts.
157 * Hopefully one day this is fixed so that this workaround is unnecessary.
160 ml_spin_debug_clear_self();
162 #endif /* INTERRUPT_MASKED_DEBUG */
163 ml_set_interrupts_enabled(state
);
167 console_ring_lock_init(void)
169 simple_lock_init(&console_ring
.read_lock
, 0);
170 simple_lock_init(&console_ring
.write_lock
, 0);
179 if (!OSCompareAndSwap(0, KERN_CONSOLE_RING_SIZE
, (UInt32
*)&console_ring
.len
)) {
183 assert(console_ring
.len
> 0);
185 ret
= kmem_alloc(kernel_map
, (vm_offset_t
*)&console_ring
.buffer
, KERN_CONSOLE_BUF_SIZE
, VM_KERN_MEMORY_OSFMK
);
186 if (ret
!= KERN_SUCCESS
) {
187 panic("console_ring_init() failed to allocate ring buffer, error %d\n", ret
);
190 /* setup memory for per cpu console buffers */
191 for (i
= 0; i
< MAX_CPU_SLOTS
; i
++) {
192 p
= (uint32_t *)((uintptr_t)console_ring
.buffer
+ console_ring
.len
+ (i
* sizeof(console_buf_t
)));
193 *p
= CPU_BUF_FREE_HEX
;
196 console_ring
.used
= 0;
197 console_ring
.read_ptr
= console_ring
.buffer
;
198 console_ring
.write_ptr
= console_ring
.buffer
;
199 console_ring_lock_init();
200 hw_lock_init(&cnputc_lock
);
204 console_cpu_alloc(__unused boolean_t boot_processor
)
211 assert(console_ring
.buffer
!= NULL
);
213 /* select the next slot from the per cpu buffers at end of console_ring.buffer */
214 for (i
= 0; i
< MAX_CPU_SLOTS
; i
++) {
215 p
= (uint32_t *)((uintptr_t)console_ring
.buffer
+ console_ring
.len
+ (i
* sizeof(console_buf_t
)));
216 if (OSCompareAndSwap(CPU_BUF_FREE_HEX
, 0, (UInt32
*)p
)) {
220 assert(i
< MAX_CPU_SLOTS
);
222 cbp
= (console_buf_t
*)(uintptr_t)p
;
223 if ((uintptr_t)cbp
>= (uintptr_t)console_ring
.buffer
+ KERN_CONSOLE_BUF_SIZE
) {
224 printf("console_cpu_alloc() failed to allocate cpu buffer\n");
228 cbp
->buf_base
= (char *)&cbp
->buf
;
229 cbp
->buf_ptr
= cbp
->buf_base
;
230 cbp
->buf_end
= cbp
->buf_base
+ CPU_BUFFER_LEN
;
235 console_cpu_free(void * buf
)
237 assert((uintptr_t)buf
> (uintptr_t)console_ring
.buffer
);
238 assert((uintptr_t)buf
< (uintptr_t)console_ring
.buffer
+ KERN_CONSOLE_BUF_SIZE
);
240 *(uint32_t *)buf
= CPU_BUF_FREE_HEX
;
245 console_ring_space(void)
247 return console_ring
.len
- console_ring
.used
;
251 console_ring_put(char ch
)
253 if (console_ring
.used
< console_ring
.len
) {
255 *console_ring
.write_ptr
++ = ch
;
256 if (console_ring
.write_ptr
- console_ring
.buffer
== console_ring
.len
) {
257 console_ring
.write_ptr
= console_ring
.buffer
;
265 static inline boolean_t
266 cpu_buffer_put(console_buf_t
* cbp
, char ch
)
268 if (ch
!= '\0' && cbp
->buf_ptr
< cbp
->buf_end
) {
269 *(cbp
->buf_ptr
++) = ch
;
277 cpu_buffer_size(console_buf_t
* cbp
)
279 return (int)(cbp
->buf_ptr
- cbp
->buf_base
);
283 _cnputs(char * c
, int size
)
285 /* The console device output routines are assumed to be
289 uint32_t lock_timeout_ticks
= UINT32_MAX
;
291 uint32_t lock_timeout_ticks
= LockTimeOut
* 2; // 250ms is not enough, 500 is just right
294 mp_disable_preemption();
295 if (!hw_lock_to(&cnputc_lock
, lock_timeout_ticks
, LCK_GRP_NULL
)) {
296 /* If we timed out on the lock, and we're in the debugger,
297 * copy lock data for debugging and break the lock.
299 hw_lock_data_t _shadow_lock
;
300 memcpy(&_shadow_lock
, &cnputc_lock
, sizeof(cnputc_lock
));
301 if (kernel_debugger_entry_count
) {
302 /* Since hw_lock_to takes a pre-emption count...*/
303 mp_enable_preemption();
304 hw_lock_init(&cnputc_lock
);
305 hw_lock_lock(&cnputc_lock
, LCK_GRP_NULL
);
307 panic("Lock acquire timeout in _cnputs() lock=%p, lock owner thread=0x%lx, current_thread: %p\n", &_shadow_lock
,
308 _shadow_lock
.lock_data
, current_thread());
314 cons_ops
[cons_ops_index
].putc(0, 0, '\r');
316 cons_ops
[cons_ops_index
].putc(0, 0, *c
);
320 hw_lock_unlock(&cnputc_lock
);
321 mp_enable_preemption();
325 cnputc_unbuffered(char c
)
338 cnputsusr(char *s
, int size
)
341 console_write(s
, size
);
347 /* Spin (with pre-emption enabled) waiting for console_ring_try_empty()
348 * to complete output. There is a small window here where we could
349 * end up with a stale value of console_output, but it's unlikely,
350 * and _cnputs(), which outputs to the console device, is internally
351 * synchronized. There's something of a conflict between the
352 * character-at-a-time (with pre-emption enabled) unbuffered
353 * output model here, and the buffered output from cnputc(),
354 * whose consumers include printf() ( which outputs a sequence
355 * with pre-emption disabled, and should be safe to call with
356 * interrupts off); we don't want to disable pre-emption indefinitely
357 * here, and spinlocks and mutexes are inappropriate.
359 while (console_output
!= 0) {
364 * We disable interrupts to avoid issues caused by rendevous IPIs
365 * and an interruptible core holding the lock while an uninterruptible
366 * core wants it. Stackshot is the prime example of this.
368 state
= ml_set_interrupts_enabled(FALSE
);
370 console_restore_interrupts_state(state
);
374 console_ring_try_empty(void)
377 boolean_t handle_tlb_flushes
= (ml_get_interrupts_enabled() == FALSE
);
378 #endif /* __x86_64__ */
381 int total_chars_out
= 0;
382 int size_before_wrap
= 0;
386 if (handle_tlb_flushes
) {
387 handle_pending_TLB_flushes();
389 #endif /* __x86_64__ */
392 * Try to get the read lock on the ring buffer to empty it.
393 * If this fails someone else is already emptying...
395 if (!simple_lock_try(&console_ring
.read_lock
, LCK_GRP_NULL
)) {
397 * If multiple cores are spinning trying to empty the buffer,
398 * we may suffer lock starvation (get the read lock, but
399 * never the write lock, with other cores unable to get the
400 * read lock). As a result, insert a delay on failure, to
401 * let other cores have a turn.
407 boolean_t state
= ml_set_interrupts_enabled(FALSE
);
409 /* Indicate that we're in the process of writing a block of data to the console. */
410 os_atomic_inc(&console_output
, relaxed
);
412 simple_lock_try_lock_loop(&console_ring
.write_lock
, LCK_GRP_NULL
);
414 /* try small chunk at a time, so we allow writes from other cpus into the buffer */
415 nchars_out
= MIN(console_ring
.used
, MAX_INT_DISABLED_FLUSH_SIZE
);
417 /* account for data to be read before wrap around */
418 size_before_wrap
= (int)((console_ring
.buffer
+ console_ring
.len
) - console_ring
.read_ptr
);
419 if (nchars_out
> size_before_wrap
) {
420 nchars_out
= size_before_wrap
;
423 if (nchars_out
> 0) {
424 _cnputs(console_ring
.read_ptr
, nchars_out
);
425 console_ring
.read_ptr
=
426 console_ring
.buffer
+ ((console_ring
.read_ptr
- console_ring
.buffer
+ nchars_out
) % console_ring
.len
);
427 console_ring
.used
-= nchars_out
;
428 total_chars_out
+= nchars_out
;
431 simple_unlock(&console_ring
.write_lock
);
433 os_atomic_dec(&console_output
, relaxed
);
435 simple_unlock(&console_ring
.read_lock
);
437 console_restore_interrupts_state(state
);
440 * In case we end up being the console drain thread
441 * for far too long, break out. Except in panic/suspend cases
442 * where we should clear out full buffer.
444 if (!kernel_debugger_entry_count
&& !console_suspended
&& (total_chars_out
>= MAX_TOTAL_FLUSH_SIZE
)) {
447 } while (nchars_out
> 0);
454 console_suspended
= true;
455 console_ring_try_empty();
461 console_suspended
= false;
465 console_write(char * str
, int size
)
468 int chunk_size
= size
;
471 if (size
> console_ring
.len
) {
472 chunk_size
= CPU_CONS_BUF_SIZE
;
476 boolean_t state
= ml_set_interrupts_enabled(FALSE
);
478 simple_lock_try_lock_loop(&console_ring
.write_lock
, LCK_GRP_NULL
);
479 while (chunk_size
> console_ring_space()) {
480 simple_unlock(&console_ring
.write_lock
);
481 console_restore_interrupts_state(state
);
483 console_ring_try_empty();
485 state
= ml_set_interrupts_enabled(FALSE
);
486 simple_lock_try_lock_loop(&console_ring
.write_lock
, LCK_GRP_NULL
);
489 for (i
= 0; i
< chunk_size
; i
++) {
490 console_ring_put(str
[i
]);
495 simple_unlock(&console_ring
.write_lock
);
496 console_restore_interrupts_state(state
);
499 console_ring_try_empty();
506 cpu_data_t
* cpu_data_p
;
508 boolean_t needs_print
= TRUE
;
512 mp_disable_preemption();
513 cpu_data_p
= current_cpu_datap();
514 cbp
= (console_buf_t
*)cpu_data_p
->cpu_console_buf
;
515 if (console_suspended
|| cbp
== NULL
) {
516 mp_enable_preemption();
517 /* Put directly if console ring is not initialized or we're heading into suspend */
523 /* Is there a panic backtrace going on? */
524 if (cpu_data_p
->PAB_active
) {
525 /* If another processor was in the process of emptying the
526 * console ring buffer when it received the panic backtrace
527 * signal, that processor will be spinning in DebugXCall()
528 * waiting for the panicking processor to finish printing
529 * the backtrace. But panicking processor will never
530 * be able to obtain the ring buffer lock since it is
531 * owned by a processor that's spinning in DebugXCall().
532 * Blow away any locks that other processors may have on
533 * the console ring buffer so that the backtrace can
536 console_ring_lock_init();
538 #endif /* __x86_64__ */
540 state
= ml_set_interrupts_enabled(FALSE
);
544 * If the cpu buffer is full, we'll flush, then try
545 * another put. If it fails a second time... screw
548 if (needs_print
&& !cpu_buffer_put(cbp
, c
)) {
549 simple_lock_try_lock_loop(&console_ring
.write_lock
, LCK_GRP_NULL
);
551 if (cpu_buffer_size(cbp
) > console_ring_space()) {
552 simple_unlock(&console_ring
.write_lock
);
553 console_restore_interrupts_state(state
);
554 mp_enable_preemption();
556 console_ring_try_empty();
560 for (cp
= cbp
->buf_base
; cp
< cbp
->buf_ptr
; cp
++) {
561 console_ring_put(*cp
);
563 cbp
->buf_ptr
= cbp
->buf_base
;
564 simple_unlock(&console_ring
.write_lock
);
566 cpu_buffer_put(cbp
, c
);
572 console_restore_interrupts_state(state
);
573 mp_enable_preemption();
577 /* We printed a newline, time to flush the CPU buffer to the global buffer */
578 simple_lock_try_lock_loop(&console_ring
.write_lock
, LCK_GRP_NULL
);
581 * Is there enough space in the shared ring buffer?
582 * Try to empty if not.
583 * Note, we want the entire local buffer to fit to
584 * avoid another cpu interjecting.
587 if (cpu_buffer_size(cbp
) > console_ring_space()) {
588 simple_unlock(&console_ring
.write_lock
);
589 console_restore_interrupts_state(state
);
590 mp_enable_preemption();
592 console_ring_try_empty();
597 for (cp
= cbp
->buf_base
; cp
< cbp
->buf_ptr
; cp
++) {
598 console_ring_put(*cp
);
601 cbp
->buf_ptr
= cbp
->buf_base
;
602 simple_unlock(&console_ring
.write_lock
);
604 console_restore_interrupts_state(state
);
605 mp_enable_preemption();
607 console_ring_try_empty();
613 _serial_getc(__unused
int a
, __unused
int b
, boolean_t wait
, __unused boolean_t raw
)
618 } while (wait
&& c
< 0);
620 #if defined(__x86_64__) || defined(__arm__)
621 // Check for the NMI string
622 if (c
== nmi_string
[nmi_counter
]) {
624 if (nmi_counter
== NMI_STRING_SIZE
) {
625 // We've got the NMI string, now do an NMI
626 Debugger("Automatic NMI");
630 } else if (c
!= -1) {
639 _serial_putc(__unused
int a
, __unused
int b
, int c
)
647 return cons_ops
[cons_ops_index
].getc(0, 0, TRUE
, FALSE
);
653 return cons_ops
[cons_ops_index
].getc(0, 0, FALSE
, FALSE
);
657 vcgetc(__unused
int l
, __unused
int u
, __unused boolean_t wait
, __unused boolean_t raw
)
661 if (0 == PE_stub_poll_input(0, &c
)) {
668 #ifdef CONFIG_XNUPOST
669 static uint32_t cons_test_ops_count
= 0;
672 * Try to do multiple cpu buffer allocs and free and intentionally
673 * allow for pre-emption.
676 alloc_free_func(void * arg
, wait_result_t wres __unused
)
678 console_buf_t
* cbp
= NULL
;
679 int count
= (int)arg
;
681 T_LOG("Doing %d iterations of console cpu alloc and free.", count
);
683 while (count
-- > 0) {
684 os_atomic_inc(&cons_test_ops_count
, relaxed
);
685 cbp
= (console_buf_t
*)console_cpu_alloc(0);
687 T_ASSERT_NOTNULL(cbp
, "cpu allocation failed");
689 console_cpu_free(cbp
);
691 /* give chance to another thread to come in */
697 * Log to console by multiple methods - printf, unbuffered write, console_write()
700 log_to_console_func(void * arg __unused
, wait_result_t wres __unused
)
702 uint64_t thread_id
= current_thread()->thread_id
;
703 char somedata
[10] = "123456789";
704 for (int i
= 0; i
< 26; i
++) {
705 os_atomic_inc(&cons_test_ops_count
, relaxed
);
706 printf(" thid: %llu printf iteration %d\n", thread_id
, i
);
707 cnputc_unbuffered((char)('A' + i
));
708 cnputc_unbuffered('\n');
709 console_write((char *)somedata
, sizeof(somedata
));
712 printf("finished the log_to_console_func operations\n\n");
716 console_serial_parallel_log_tests(void)
720 cons_test_ops_count
= 0;
722 kr
= kernel_thread_start(log_to_console_func
, NULL
, &thread
);
723 T_ASSERT_EQ_INT(kr
, KERN_SUCCESS
, "kernel_thread_start returned successfully");
727 log_to_console_func(NULL
, 0);
729 /* wait until other thread has also finished */
730 while (cons_test_ops_count
< 52) {
734 thread_deallocate(thread
);
735 T_LOG("parallel_logging tests is now complete. From this point forward we expect full lines\n");
740 console_serial_alloc_rel_tests(void)
742 unsigned long i
, free_buf_count
= 0;
748 T_LOG("doing alloc/release tests");
750 for (i
= 0; i
< MAX_CPU_SLOTS
; i
++) {
751 p
= (uint32_t *)((uintptr_t)console_ring
.buffer
+ console_ring
.len
+ (i
* sizeof(console_buf_t
)));
752 cbp
= (console_buf_t
*)(void *)p
;
753 /* p should either be allocated cpu buffer or have CPU_BUF_FREE_HEX in it */
754 T_ASSERT(*p
== CPU_BUF_FREE_HEX
|| cbp
->buf_base
== &cbp
->buf
[0], "");
755 if (*p
== CPU_BUF_FREE_HEX
) {
760 T_ASSERT_GE_ULONG(free_buf_count
, 2, "At least 2 buffers should be free");
761 cons_test_ops_count
= 0;
763 kr
= kernel_thread_start(alloc_free_func
, (void *)1000, &thread
);
764 T_ASSERT_EQ_INT(kr
, KERN_SUCCESS
, "kernel_thread_start returned successfully");
766 /* yeild cpu to give other thread chance to get on-core */
769 alloc_free_func((void *)1000, 0);
771 /* wait until other thread finishes its tasks */
772 while (cons_test_ops_count
< 2000) {
776 thread_deallocate(thread
);
777 /* verify again that atleast 2 slots are free */
779 for (i
= 0; i
< MAX_CPU_SLOTS
; i
++) {
780 p
= (uint32_t *)((uintptr_t)console_ring
.buffer
+ console_ring
.len
+ (i
* sizeof(console_buf_t
)));
781 cbp
= (console_buf_t
*)(void *)p
;
782 /* p should either be allocated cpu buffer or have CPU_BUF_FREE_HEX in it */
783 T_ASSERT(*p
== CPU_BUF_FREE_HEX
|| cbp
->buf_base
== &cbp
->buf
[0], "");
784 if (*p
== CPU_BUF_FREE_HEX
) {
788 T_ASSERT_GE_ULONG(free_buf_count
, 2, "At least 2 buffers should be free after alloc free tests");
794 console_serial_test(void)
797 char buffer
[CPU_BUFFER_LEN
];
801 T_LOG("Checking console_ring status.");
802 T_ASSERT_EQ_INT(console_ring
.len
, KERN_CONSOLE_RING_SIZE
, "Console ring size is not correct.");
803 T_ASSERT_GT_INT(KERN_CONSOLE_BUF_SIZE
, KERN_CONSOLE_RING_SIZE
, "kernel console buffer size is < allocation.");
805 /* select the next slot from the per cpu buffers at end of console_ring.buffer */
806 for (i
= 0; i
< MAX_CPU_SLOTS
; i
++) {
807 p
= (uint32_t *)((uintptr_t)console_ring
.buffer
+ console_ring
.len
+ (i
* sizeof(console_buf_t
)));
808 cbp
= (console_buf_t
*)(void *)p
;
809 /* p should either be allocated cpu buffer or have CPU_BUF_FREE_HEX in it */
810 T_ASSERT(*p
== CPU_BUF_FREE_HEX
|| cbp
->buf_base
== &cbp
->buf
[0], "verified initialization of cpu buffers p=%p", (void *)p
);
813 /* setup buffer to be chars */
814 for (i
= 0; i
< CPU_BUFFER_LEN
; i
++) {
815 buffer
[i
] = (char)('0' + (i
% 10));
817 buffer
[CPU_BUFFER_LEN
- 1] = '\0';
819 T_LOG("Printing %d char string to serial one char at a time.", CPU_BUFFER_LEN
);
820 for (i
= 0; i
< CPU_BUFFER_LEN
; i
++) {
821 printf("%c", buffer
[i
]);
824 T_LOG("Printing %d char string to serial as a whole", CPU_BUFFER_LEN
);
825 printf("%s\n", buffer
);
827 T_LOG("Using console_write call repeatedly for 100 iterations");
828 for (i
= 0; i
< 100; i
++) {
829 console_write(&buffer
[0], 14);
836 T_LOG("Using T_LOG to print buffer %s", buffer
);