static void _serial_putc(int, int, int);
SECURITY_READ_ONLY_EARLY(struct console_ops) cons_ops[] = {
- {
- .putc = _serial_putc, .getc = _serial_getc,
- },
- {
- .putc = vcputc, .getc = vcgetc,
- },
+ {
+ .putc = _serial_putc, .getc = _serial_getc,
+ },
+ {
+ .putc = vcputc, .getc = vcgetc,
+ },
};
SECURITY_READ_ONLY_EARLY(uint32_t) nconsops = (sizeof cons_ops / sizeof cons_ops[0]);
* take the panic when it reenables interrupts.
* Hopefully one day this is fixed so that this workaround is unnecessary.
*/
- if (state == TRUE)
+ if (state == TRUE) {
ml_spin_debug_clear_self();
+ }
#endif /* INTERRUPT_MASKED_DEBUG */
ml_set_interrupts_enabled(state);
}
int ret, i;
uint32_t * p;
- if (!OSCompareAndSwap(0, KERN_CONSOLE_RING_SIZE, (UInt32 *)&console_ring.len))
+ if (!OSCompareAndSwap(0, KERN_CONSOLE_RING_SIZE, (UInt32 *)&console_ring.len)) {
return;
+ }
assert(console_ring.len > 0);
/* select the next slot from the per cpu buffers at end of console_ring.buffer */
for (i = 0; i < MAX_CPU_SLOTS; i++) {
p = (uint32_t *)((uintptr_t)console_ring.buffer + console_ring.len + (i * sizeof(console_buf_t)));
- if (OSCompareAndSwap(CPU_BUF_FREE_HEX, 0, (UInt32 *)p))
+ if (OSCompareAndSwap(CPU_BUF_FREE_HEX, 0, (UInt32 *)p)) {
break;
+ }
}
assert(i < MAX_CPU_SLOTS);
{
assert((uintptr_t)buf > (uintptr_t)console_ring.buffer);
assert((uintptr_t)buf < (uintptr_t)console_ring.buffer + KERN_CONSOLE_BUF_SIZE);
- if (buf != NULL)
+ if (buf != NULL) {
*(uint32_t *)buf = CPU_BUF_FREE_HEX;
+ }
}
static inline int
if (console_ring.used < console_ring.len) {
console_ring.used++;
*console_ring.write_ptr++ = ch;
- if (console_ring.write_ptr - console_ring.buffer == console_ring.len)
+ if (console_ring.write_ptr - console_ring.buffer == console_ring.len) {
console_ring.write_ptr = console_ring.buffer;
+ }
return TRUE;
} else {
return FALSE;
#endif
mp_disable_preemption();
- if (!hw_lock_to(&cnputc_lock, lock_timeout_ticks)) {
+ if (!hw_lock_to(&cnputc_lock, lock_timeout_ticks, LCK_GRP_NULL)) {
/* If we timed out on the lock, and we're in the debugger,
* copy lock data for debugging and break the lock.
*/
/* Since hw_lock_to takes a pre-emption count...*/
mp_enable_preemption();
hw_lock_init(&cnputc_lock);
- hw_lock_lock(&cnputc_lock);
+ hw_lock_lock(&cnputc_lock, LCK_GRP_NULL);
} else {
panic("Lock acquire timeout in _cnputs() lock=%p, lock owner thread=0x%lx, current_thread: %p\n", &_shadow_lock,
- _shadow_lock.lock_data, current_thread());
+ _shadow_lock.lock_data, current_thread());
}
}
while (size-- > 0) {
- cons_ops[cons_ops_index].putc(0, 0, *c);
- if (*c == '\n')
+ if (*c == '\n') {
cons_ops[cons_ops_index].putc(0, 0, '\r');
+ }
+ cons_ops[cons_ops_index].putc(0, 0, *c);
c++;
}
}
-void cnputcusr(char c)
+void
+cnputcusr(char c)
{
cnputsusr(&c, 1);
}
void
cnputsusr(char *s, int size)
{
-
if (size > 1) {
console_write(s, size);
return;
do {
#ifdef __x86_64__
- if (handle_tlb_flushes)
+ if (handle_tlb_flushes) {
handle_pending_TLB_flushes();
+ }
#endif /* __x86_64__ */
/*
* Try to get the read lock on the ring buffer to empty it.
* If this fails someone else is already emptying...
*/
- if (!simple_lock_try(&console_ring.read_lock)) {
+ if (!simple_lock_try(&console_ring.read_lock, LCK_GRP_NULL)) {
/*
* If multiple cores are spinning trying to empty the buffer,
* we may suffer lock starvation (get the read lock, but
boolean_t state = ml_set_interrupts_enabled(FALSE);
/* Indicate that we're in the process of writing a block of data to the console. */
- (void)hw_atomic_add(&console_output, 1);
+ os_atomic_inc(&console_output, relaxed);
- simple_lock_try_lock_loop(&console_ring.write_lock);
+ simple_lock_try_lock_loop(&console_ring.write_lock, LCK_GRP_NULL);
/* try small chunk at a time, so we allow writes from other cpus into the buffer */
nchars_out = MIN(console_ring.used, MAX_INT_DISABLED_FLUSH_SIZE);
/* account for data to be read before wrap around */
size_before_wrap = (int)((console_ring.buffer + console_ring.len) - console_ring.read_ptr);
- if (nchars_out > size_before_wrap)
+ if (nchars_out > size_before_wrap) {
nchars_out = size_before_wrap;
+ }
if (nchars_out > 0) {
_cnputs(console_ring.read_ptr, nchars_out);
simple_unlock(&console_ring.write_lock);
- (void)hw_atomic_sub(&console_output, 1);
+ os_atomic_dec(&console_output, relaxed);
simple_unlock(&console_ring.read_lock);
* for far too long, break out. Except in panic/suspend cases
* where we should clear out full buffer.
*/
- if (!kernel_debugger_entry_count && !console_suspended && (total_chars_out >= MAX_TOTAL_FLUSH_SIZE))
+ if (!kernel_debugger_entry_count && !console_suspended && (total_chars_out >= MAX_TOTAL_FLUSH_SIZE)) {
break;
-
+ }
} while (nchars_out > 0);
}
int chunk_size = size;
int i = 0;
- if (size > console_ring.len)
+ if (size > console_ring.len) {
chunk_size = CPU_CONS_BUF_SIZE;
+ }
while (size > 0) {
boolean_t state = ml_set_interrupts_enabled(FALSE);
- simple_lock_try_lock_loop(&console_ring.write_lock);
+ simple_lock_try_lock_loop(&console_ring.write_lock, LCK_GRP_NULL);
while (chunk_size > console_ring_space()) {
simple_unlock(&console_ring.write_lock);
console_restore_interrupts_state(state);
console_ring_try_empty();
state = ml_set_interrupts_enabled(FALSE);
- simple_lock_try_lock_loop(&console_ring.write_lock);
+ simple_lock_try_lock_loop(&console_ring.write_lock, LCK_GRP_NULL);
}
- for (i = 0; i < chunk_size; i++)
+ for (i = 0; i < chunk_size; i++) {
console_ring_put(str[i]);
+ }
str = &str[i];
size -= chunk_size;
* it.
*/
if (needs_print && !cpu_buffer_put(cbp, c)) {
- simple_lock_try_lock_loop(&console_ring.write_lock);
+ simple_lock_try_lock_loop(&console_ring.write_lock, LCK_GRP_NULL);
if (cpu_buffer_size(cbp) > console_ring_space()) {
simple_unlock(&console_ring.write_lock);
goto restart;
}
- for (cp = cbp->buf_base; cp < cbp->buf_ptr; cp++)
+ for (cp = cbp->buf_base; cp < cbp->buf_ptr; cp++) {
console_ring_put(*cp);
+ }
cbp->buf_ptr = cbp->buf_base;
simple_unlock(&console_ring.write_lock);
}
/* We printed a newline, time to flush the CPU buffer to the global buffer */
- simple_lock_try_lock_loop(&console_ring.write_lock);
+ simple_lock_try_lock_loop(&console_ring.write_lock, LCK_GRP_NULL);
/*
* Is there enough space in the shared ring buffer?
goto restart;
}
- for (cp = cbp->buf_base; cp < cbp->buf_ptr; cp++)
+ for (cp = cbp->buf_base; cp < cbp->buf_ptr; cp++) {
console_ring_put(*cp);
+ }
cbp->buf_ptr = cbp->buf_base;
simple_unlock(&console_ring.write_lock);
{
char c;
- if (0 == (*PE_poll_input)(0, &c))
+ if (0 == PE_stub_poll_input(0, &c)) {
return c;
- else
+ } else {
return 0;
+ }
}
#ifdef CONFIG_XNUPOST
T_LOG("Doing %d iterations of console cpu alloc and free.", count);
while (count-- > 0) {
- (void)hw_atomic_add(&cons_test_ops_count, 1);
+ os_atomic_inc(&cons_test_ops_count, relaxed);
cbp = (console_buf_t *)console_cpu_alloc(0);
if (cbp == NULL) {
T_ASSERT_NOTNULL(cbp, "cpu allocation failed");
uint64_t thread_id = current_thread()->thread_id;
char somedata[10] = "123456789";
for (int i = 0; i < 26; i++) {
- (void)hw_atomic_add(&cons_test_ops_count, 1);
+ os_atomic_inc(&cons_test_ops_count, relaxed);
printf(" thid: %llu printf iteration %d\n", thread_id, i);
cnputc_unbuffered((char)('A' + i));
cnputc_unbuffered('\n');
T_LOG("Using console_write call repeatedly for 100 iterations");
for (i = 0; i < 100; i++) {
console_write(&buffer[0], 14);
- if ((i % 6) == 0)
+ if ((i % 6) == 0) {
printf("\n");
+ }
}
printf("\n");