static void _serial_putc(int, int, int);
SECURITY_READ_ONLY_EARLY(struct console_ops) cons_ops[] = {
static void _serial_putc(int, int, int);
SECURITY_READ_ONLY_EARLY(struct console_ops) cons_ops[] = {
- {
- .putc = _serial_putc, .getc = _serial_getc,
- },
- {
- .putc = vcputc, .getc = vcgetc,
- },
+ {
+ .putc = _serial_putc, .getc = _serial_getc,
+ },
+ {
+ .putc = vcputc, .getc = vcgetc,
+ },
* take the panic when it reenables interrupts.
* Hopefully one day this is fixed so that this workaround is unnecessary.
*/
* take the panic when it reenables interrupts.
* Hopefully one day this is fixed so that this workaround is unnecessary.
*/
/* select the next slot from the per cpu buffers at end of console_ring.buffer */
for (i = 0; i < MAX_CPU_SLOTS; i++) {
p = (uint32_t *)((uintptr_t)console_ring.buffer + console_ring.len + (i * sizeof(console_buf_t)));
/* select the next slot from the per cpu buffers at end of console_ring.buffer */
for (i = 0; i < MAX_CPU_SLOTS; i++) {
p = (uint32_t *)((uintptr_t)console_ring.buffer + console_ring.len + (i * sizeof(console_buf_t)));
{
assert((uintptr_t)buf > (uintptr_t)console_ring.buffer);
assert((uintptr_t)buf < (uintptr_t)console_ring.buffer + KERN_CONSOLE_BUF_SIZE);
{
assert((uintptr_t)buf > (uintptr_t)console_ring.buffer);
assert((uintptr_t)buf < (uintptr_t)console_ring.buffer + KERN_CONSOLE_BUF_SIZE);
/* If we timed out on the lock, and we're in the debugger,
* copy lock data for debugging and break the lock.
*/
/* If we timed out on the lock, and we're in the debugger,
* copy lock data for debugging and break the lock.
*/
} else {
panic("Lock acquire timeout in _cnputs() lock=%p, lock owner thread=0x%lx, current_thread: %p\n", &_shadow_lock,
} else {
panic("Lock acquire timeout in _cnputs() lock=%p, lock owner thread=0x%lx, current_thread: %p\n", &_shadow_lock,
#endif /* __x86_64__ */
/*
* Try to get the read lock on the ring buffer to empty it.
* If this fails someone else is already emptying...
*/
#endif /* __x86_64__ */
/*
* Try to get the read lock on the ring buffer to empty it.
* If this fails someone else is already emptying...
*/
/*
* If multiple cores are spinning trying to empty the buffer,
* we may suffer lock starvation (get the read lock, but
/*
* If multiple cores are spinning trying to empty the buffer,
* we may suffer lock starvation (get the read lock, but
/* Indicate that we're in the process of writing a block of data to the console. */
(void)hw_atomic_add(&console_output, 1);
/* Indicate that we're in the process of writing a block of data to the console. */
(void)hw_atomic_add(&console_output, 1);
/* try small chunk at a time, so we allow writes from other cpus into the buffer */
nchars_out = MIN(console_ring.used, MAX_INT_DISABLED_FLUSH_SIZE);
/* account for data to be read before wrap around */
size_before_wrap = (int)((console_ring.buffer + console_ring.len) - console_ring.read_ptr);
/* try small chunk at a time, so we allow writes from other cpus into the buffer */
nchars_out = MIN(console_ring.used, MAX_INT_DISABLED_FLUSH_SIZE);
/* account for data to be read before wrap around */
size_before_wrap = (int)((console_ring.buffer + console_ring.len) - console_ring.read_ptr);
* for far too long, break out. Except in panic/suspend cases
* where we should clear out full buffer.
*/
* for far too long, break out. Except in panic/suspend cases
* where we should clear out full buffer.
*/
while (chunk_size > console_ring_space()) {
simple_unlock(&console_ring.write_lock);
console_restore_interrupts_state(state);
while (chunk_size > console_ring_space()) {
simple_unlock(&console_ring.write_lock);
console_restore_interrupts_state(state);
T_LOG("Using console_write call repeatedly for 100 iterations");
for (i = 0; i < 100; i++) {
console_write(&buffer[0], 14);
T_LOG("Using console_write call repeatedly for 100 iterations");
for (i = 0; i < 100; i++) {
console_write(&buffer[0], 14);