2 * Copyright (c) 2000-2019 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
28 /* Copyright (c) 1995 NeXT Computer, Inc. All Rights Reserved */
30 * Copyright (c) 1982, 1986, 1993
31 * The Regents of the University of California. All rights reserved.
33 * Redistribution and use in source and binary forms, with or without
34 * modification, are permitted provided that the following conditions
36 * 1. Redistributions of source code must retain the above copyright
37 * notice, this list of conditions and the following disclaimer.
38 * 2. Redistributions in binary form must reproduce the above copyright
39 * notice, this list of conditions and the following disclaimer in the
40 * documentation and/or other materials provided with the distribution.
41 * 3. All advertising materials mentioning features or use of this software
42 * must display the following acknowledgement:
43 * This product includes software developed by the University of
44 * California, Berkeley and its contributors.
45 * 4. Neither the name of the University nor the names of its contributors
46 * may be used to endorse or promote products derived from this software
47 * without specific prior written permission.
49 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
50 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
51 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
52 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
53 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
54 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
55 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
56 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
57 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
58 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
61 * @(#)subr_log.c 8.3 (Berkeley) 2/14/95
65 * Error log buffer for kernel printf's.
68 #include <machine/atomic.h>
69 #include <sys/param.h>
70 #include <sys/systm.h>
71 #include <sys/proc_internal.h>
72 #include <sys/vnode.h>
74 #include <firehose/tracepoint_private.h>
75 #include <firehose/chunk_private.h>
76 #include <firehose/ioctl_private.h>
77 #include <os/firehose_buffer_private.h>
79 #include <os/log_private.h>
80 #include <sys/ioctl.h>
81 #include <sys/msgbuf.h>
82 #include <sys/file_internal.h>
83 #include <sys/errno.h>
84 #include <sys/select.h>
85 #include <sys/kernel.h>
86 #include <kern/thread.h>
87 #include <kern/sched_prim.h>
88 #include <kern/simple_lock.h>
90 #include <sys/signalvar.h>
92 #include <sys/sysctl.h>
93 #include <sys/queue.h>
94 #include <kern/kalloc.h>
95 #include <pexpert/pexpert.h>
96 #include <mach/mach_port.h>
97 #include <mach/mach_vm.h>
98 #include <mach/vm_map.h>
99 #include <vm/vm_kern.h>
100 #include <kern/task.h>
101 #include <kern/locks.h>
103 /* XXX should be in a common header somewhere */
104 extern void logwakeup(struct msgbuf
*);
105 extern void oslogwakeup(void);
106 extern void oslog_streamwakeup(void);
107 static void oslog_streamwakeup_locked(void);
109 SECURITY_READ_ONLY_LATE(vm_offset_t
) kernel_firehose_addr
= 0;
110 SECURITY_READ_ONLY_LATE(uint8_t) __firehose_buffer_kernel_chunk_count
=
111 FIREHOSE_BUFFER_KERNEL_DEFAULT_CHUNK_COUNT
;
112 SECURITY_READ_ONLY_LATE(uint8_t) __firehose_num_kernel_io_pages
=
113 FIREHOSE_BUFFER_KERNEL_DEFAULT_IO_PAGES
;
115 /* log message counters for streaming mode */
116 uint32_t oslog_s_streamed_msgcount
= 0;
117 uint32_t oslog_s_dropped_msgcount
= 0;
118 extern uint32_t oslog_s_error_count
;
120 uint32_t oslog_msgbuf_dropped_charcount
= 0;
122 #define LOG_RDPRI (PZERO + 1)
124 #define LOG_NBIO 0x02
125 #define LOG_ASYNC 0x04
126 #define LOG_RDWAIT 0x08
128 /* All globals should be accessed under bsd_log_lock() or bsd_log_lock_safe() */
130 static char amsg_bufc
[1024];
131 static struct msgbuf aslbuf
= {.msg_magic
= MSG_MAGIC
, .msg_size
= sizeof(amsg_bufc
), .msg_bufx
= 0, .msg_bufr
= 0, .msg_bufc
= amsg_bufc
};
132 struct msgbuf
*aslbufp
__attribute__((used
)) = &aslbuf
;
134 /* logsoftc only valid while log_open=1 */
136 int sc_state
; /* see above for possibilities */
137 struct selinfo sc_selp
; /* thread waiting for select */
138 int sc_pgid
; /* process/group for async I/O */
139 struct msgbuf
*sc_mbp
;
143 char smsg_bufc
[CONFIG_MSG_BSIZE
]; /* static buffer */
144 char oslog_stream_bufc
[FIREHOSE_CHUNK_SIZE
]; /* static buffer */
145 struct firehose_chunk_s oslog_boot_buf
= {
147 .fcp_next_entry_offs
= offsetof(struct firehose_chunk_s
, fc_data
),
148 .fcp_private_offs
= FIREHOSE_CHUNK_SIZE
,
149 .fcp_refcnt
= 1, // indicate that there is a writer to this chunk
150 .fcp_stream
= firehose_stream_persist
,
151 .fcp_flag_io
= 1, // for now, lets assume this is coming from the io bank
153 }; /* static buffer */
154 firehose_chunk_t firehose_boot_chunk
= &oslog_boot_buf
;
155 struct msgbuf msgbuf
= {.msg_magic
= MSG_MAGIC
, .msg_size
= sizeof(smsg_bufc
), .msg_bufx
= 0, .msg_bufr
= 0, .msg_bufc
= smsg_bufc
};
156 struct msgbuf oslog_stream_buf
= {.msg_magic
= MSG_MAGIC
, .msg_size
= 0, .msg_bufx
= 0, .msg_bufr
= 0, .msg_bufc
= NULL
};
157 struct msgbuf
*msgbufp
__attribute__((used
)) = &msgbuf
;
158 struct msgbuf
*oslog_streambufp
__attribute__((used
)) = &oslog_stream_buf
;
160 // List entries for keeping track of the streaming buffer
161 static oslog_stream_buf_entry_t oslog_stream_buf_entries
;
163 #define OSLOG_NUM_STREAM_ENTRIES 64
164 #define OSLOG_STREAM_BUF_SIZE 4096
167 int os_log_wakeup
= 0;
168 int oslog_stream_open
= 0;
169 int oslog_stream_buf_bytesavail
= 0;
170 int oslog_stream_buf_size
= OSLOG_STREAM_BUF_SIZE
;
171 int oslog_stream_num_entries
= OSLOG_NUM_STREAM_ENTRIES
;
173 /* oslogsoftc only valid while oslog_open=1 */
175 int sc_state
; /* see above for possibilities */
176 struct selinfo sc_selp
; /* thread waiting for select */
177 int sc_pgid
; /* process/group for async I/O */
180 struct oslog_streamsoftc
{
181 int sc_state
; /* see above for possibilities */
182 struct selinfo sc_selp
; /* thread waiting for select */
183 int sc_pgid
; /* process/group for async I/O */
186 STAILQ_HEAD(, oslog_stream_buf_entry_s
) oslog_stream_free_head
=
187 STAILQ_HEAD_INITIALIZER(oslog_stream_free_head
);
188 STAILQ_HEAD(, oslog_stream_buf_entry_s
) oslog_stream_buf_head
=
189 STAILQ_HEAD_INITIALIZER(oslog_stream_buf_head
);
191 /* defined in osfmk/kern/printf.c */
192 extern bool bsd_log_lock(bool);
193 extern void bsd_log_lock_safe(void);
194 extern void bsd_log_unlock(void);
196 LCK_GRP_DECLARE(oslog_stream_lock_grp
, "oslog streaming");
197 LCK_SPIN_DECLARE(oslog_stream_lock
, &oslog_stream_lock_grp
);
198 #define stream_lock() lck_spin_lock(&oslog_stream_lock)
199 #define stream_unlock() lck_spin_unlock(&oslog_stream_lock)
201 /* XXX wants a linker set so these can be static */
202 extern d_open_t logopen
;
203 extern d_close_t logclose
;
204 extern d_read_t logread
;
205 extern d_ioctl_t logioctl
;
206 extern d_select_t logselect
;
208 /* XXX wants a linker set so these can be static */
209 extern d_open_t oslogopen
;
210 extern d_close_t oslogclose
;
211 extern d_select_t oslogselect
;
212 extern d_ioctl_t oslogioctl
;
214 /* XXX wants a linker set so these can be static */
215 extern d_open_t oslog_streamopen
;
216 extern d_close_t oslog_streamclose
;
217 extern d_read_t oslog_streamread
;
218 extern d_ioctl_t oslog_streamioctl
;
219 extern d_select_t oslog_streamselect
;
221 void oslog_setsize(int size
);
222 void oslog_streamwrite_locked(firehose_tracepoint_id_u ftid
,
223 uint64_t stamp
, const void *pubdata
, size_t publen
);
224 void oslog_streamwrite_metadata_locked(oslog_stream_buf_entry_t m_entry
);
225 static oslog_stream_buf_entry_t
oslog_stream_find_free_buf_entry_locked(void);
226 static void oslog_streamwrite_append_bytes(const char *buffer
, int buflen
);
229 * Serialize log access. Note that the log can be written at interrupt level,
230 * so any log manipulations that can be done from, or affect, another processor
231 * at interrupt level must be guarded with a spin lock.
235 #define LOG_SETSIZE_DEBUG(x...) kprintf(x)
237 #define LOG_SETSIZE_DEBUG(x...) do { } while(0)
240 static int sysctl_kern_msgbuf(struct sysctl_oid
*oidp
,
241 void *arg1
, int arg2
, struct sysctl_req
*req
);
245 logopen(__unused dev_t dev
, __unused
int flags
, __unused
int mode
, struct proc
*p
)
252 if (atm_get_diagnostic_config() & ATM_ENABLE_LEGACY_LOGGING
) {
253 logsoftc
.sc_mbp
= msgbufp
;
256 * Support for messagetracer (kern_asl_msg())
257 * In this mode, /dev/klog exports only ASL-formatted messages
258 * written into aslbufp via vaddlog().
260 logsoftc
.sc_mbp
= aslbufp
;
262 logsoftc
.sc_pgid
= p
->p_pid
; /* signal process only */
272 logclose(__unused dev_t dev
, __unused
int flag
, __unused
int devtype
, __unused
struct proc
*p
)
275 logsoftc
.sc_state
&= ~(LOG_NBIO
| LOG_ASYNC
);
276 selwakeup(&logsoftc
.sc_selp
);
277 selthreadclear(&logsoftc
.sc_selp
);
285 oslogopen(__unused dev_t dev
, __unused
int flags
, __unused
int mode
, struct proc
*p
)
292 oslogsoftc
.sc_pgid
= p
->p_pid
; /* signal process only */
300 oslogclose(__unused dev_t dev
, __unused
int flag
, __unused
int devtype
, __unused
struct proc
*p
)
303 oslogsoftc
.sc_state
&= ~(LOG_NBIO
| LOG_ASYNC
);
304 selwakeup(&oslogsoftc
.sc_selp
);
305 selthreadclear(&oslogsoftc
.sc_selp
);
312 oslog_streamopen(__unused dev_t dev
, __unused
int flags
, __unused
int mode
, struct proc
*p
)
314 char *oslog_stream_msg_bufc
= NULL
;
315 oslog_stream_buf_entry_t entries
= NULL
;
318 if (oslog_stream_open
) {
324 // Allocate the stream buffer
325 oslog_stream_msg_bufc
= kheap_alloc(KHEAP_DATA_BUFFERS
,
326 oslog_stream_buf_size
, Z_WAITOK
| Z_ZERO
);
327 if (!oslog_stream_msg_bufc
) {
331 /* entries to support kernel logging in stream mode */
332 size_t entries_size
= oslog_stream_num_entries
* sizeof(struct oslog_stream_buf_entry_s
);
333 entries
= kalloc(entries_size
);
335 kheap_free(KHEAP_DATA_BUFFERS
,
336 oslog_stream_msg_bufc
, oslog_stream_buf_size
);
339 /* Zeroing to avoid copying uninitialized struct padding to userspace. */
340 bzero(entries
, entries_size
);
343 if (oslog_stream_open
) {
345 kheap_free(KHEAP_DATA_BUFFERS
,
346 oslog_stream_msg_bufc
, oslog_stream_buf_size
);
347 kfree(entries
, entries_size
);
351 assert(oslog_streambufp
->msg_bufc
== NULL
);
352 oslog_streambufp
->msg_bufc
= oslog_stream_msg_bufc
;
353 oslog_streambufp
->msg_size
= oslog_stream_buf_size
;
355 oslog_stream_buf_entries
= entries
;
357 STAILQ_INIT(&oslog_stream_free_head
);
358 STAILQ_INIT(&oslog_stream_buf_head
);
360 for (int i
= 0; i
< oslog_stream_num_entries
; i
++) {
361 oslog_stream_buf_entries
[i
].type
= oslog_stream_link_type_log
;
362 STAILQ_INSERT_TAIL(&oslog_stream_free_head
, &oslog_stream_buf_entries
[i
], buf_entries
);
365 /* there should be no pending entries in the stream */
366 assert(STAILQ_EMPTY(&oslog_stream_buf_head
));
367 assert(oslog_streambufp
->msg_bufx
== 0);
368 assert(oslog_streambufp
->msg_bufr
== 0);
370 oslog_streambufp
->msg_bufx
= 0;
371 oslog_streambufp
->msg_bufr
= 0;
372 oslog_streamsoftc
.sc_pgid
= p
->p_pid
; /* signal process only */
373 oslog_stream_open
= 1;
374 oslog_stream_buf_bytesavail
= oslog_stream_buf_size
;
381 oslog_streamclose(__unused dev_t dev
, __unused
int flag
, __unused
int devtype
, __unused
struct proc
*p
)
383 oslog_stream_buf_entry_t next_entry
= NULL
;
384 char *oslog_stream_msg_bufc
= NULL
;
385 oslog_stream_buf_entry_t entries
= NULL
;
389 if (oslog_stream_open
== 0) {
394 // Consume all log lines
395 while (!STAILQ_EMPTY(&oslog_stream_buf_head
)) {
396 next_entry
= STAILQ_FIRST(&oslog_stream_buf_head
);
397 STAILQ_REMOVE_HEAD(&oslog_stream_buf_head
, buf_entries
);
399 oslog_streamwakeup_locked();
400 oslog_streamsoftc
.sc_state
&= ~(LOG_NBIO
| LOG_ASYNC
);
401 selwakeup(&oslog_streamsoftc
.sc_selp
);
402 selthreadclear(&oslog_streamsoftc
.sc_selp
);
403 oslog_stream_open
= 0;
404 oslog_streambufp
->msg_bufr
= 0;
405 oslog_streambufp
->msg_bufx
= 0;
406 oslog_stream_msg_bufc
= oslog_streambufp
->msg_bufc
;
407 oslog_streambufp
->msg_bufc
= NULL
;
408 entries
= oslog_stream_buf_entries
;
409 oslog_stream_buf_entries
= NULL
;
410 oslog_streambufp
->msg_size
= 0;
414 // Free the stream buffer
415 kheap_free(KHEAP_DATA_BUFFERS
, oslog_stream_msg_bufc
,
416 oslog_stream_buf_size
);
417 // Free the list entries
418 kfree(entries
, oslog_stream_num_entries
* sizeof(struct oslog_stream_buf_entry_s
));
425 logread(__unused dev_t dev
, struct uio
*uio
, int flag
)
428 struct msgbuf
*mbp
= logsoftc
.sc_mbp
;
432 while (mbp
->msg_bufr
== mbp
->msg_bufx
) {
433 if (flag
& IO_NDELAY
) {
437 if (logsoftc
.sc_state
& LOG_NBIO
) {
441 logsoftc
.sc_state
|= LOG_RDWAIT
;
444 * If the wakeup is missed
445 * then wait for 5 sec and reevaluate
447 if ((error
= tsleep((caddr_t
)mbp
, LOG_RDPRI
| PCATCH
,
448 "klog", 5 * hz
)) != 0) {
449 /* if it times out; ignore */
450 if (error
!= EWOULDBLOCK
) {
456 logsoftc
.sc_state
&= ~LOG_RDWAIT
;
458 while ((resid
= uio_resid(uio
)) > 0) {
461 if (mbp
->msg_bufx
>= mbp
->msg_bufr
) {
462 l
= mbp
->msg_bufx
- mbp
->msg_bufr
;
464 l
= mbp
->msg_size
- mbp
->msg_bufr
;
466 if ((l
= MIN(l
, (size_t)resid
)) == 0) {
470 const size_t readpos
= mbp
->msg_bufr
;
473 error
= uiomove((caddr_t
)&mbp
->msg_bufc
[readpos
], (int)l
, uio
);
479 mbp
->msg_bufr
= (int)(readpos
+ l
);
480 if (mbp
->msg_bufr
>= mbp
->msg_size
) {
491 oslog_streamread(__unused dev_t dev
, struct uio
*uio
, int flag
)
495 static char logline
[FIREHOSE_CHUNK_SIZE
];
499 if (!oslog_stream_open
) {
504 while (STAILQ_EMPTY(&oslog_stream_buf_head
)) {
505 assert(oslog_stream_buf_bytesavail
== oslog_stream_buf_size
);
507 if (flag
& IO_NDELAY
|| oslog_streamsoftc
.sc_state
& LOG_NBIO
) {
512 oslog_streamsoftc
.sc_state
|= LOG_RDWAIT
;
513 wait_result_t wr
= assert_wait((event_t
)oslog_streambufp
,
514 THREAD_INTERRUPTIBLE
);
515 if (wr
== THREAD_WAITING
) {
517 wr
= thread_block(THREAD_CONTINUE_NULL
);
522 case THREAD_AWAKENED
:
523 case THREAD_TIMED_OUT
:
531 if (!oslog_stream_open
) {
537 oslog_stream_buf_entry_t read_entry
= NULL
;
540 read_entry
= STAILQ_FIRST(&oslog_stream_buf_head
);
541 assert(read_entry
!= NULL
);
542 STAILQ_REMOVE_HEAD(&oslog_stream_buf_head
, buf_entries
);
544 // Copy the timestamp first
545 memcpy(logline
+ logpos
, &read_entry
->timestamp
, sizeof(uint64_t));
546 logpos
+= sizeof(uint64_t);
548 switch (read_entry
->type
) {
549 /* Handle metadata messages */
550 case oslog_stream_link_type_metadata
:
552 memcpy(logline
+ logpos
,
553 (read_entry
->metadata
), read_entry
->size
);
554 logpos
+= read_entry
->size
;
558 // Free the list entry
559 kfree(read_entry
, sizeof(struct oslog_stream_buf_entry_s
) + read_entry
->size
);
562 /* Handle log messages */
563 case oslog_stream_link_type_log
:
565 /* ensure that the correct read entry was dequeued */
566 assert(read_entry
->offset
== oslog_streambufp
->msg_bufr
);
567 rec_length
= read_entry
->size
;
569 // If the next log line is contiguous in the buffer, copy it out.
570 if (read_entry
->offset
+ rec_length
<= oslog_streambufp
->msg_size
) {
571 memcpy(logline
+ logpos
,
572 oslog_streambufp
->msg_bufc
+ read_entry
->offset
, rec_length
);
574 oslog_streambufp
->msg_bufr
+= rec_length
;
575 if (oslog_streambufp
->msg_bufr
== oslog_streambufp
->msg_size
) {
576 oslog_streambufp
->msg_bufr
= 0;
578 logpos
+= rec_length
;
580 // Otherwise, copy until the end of the buffer, and
581 // copy the remaining bytes starting at index 0.
582 int bytes_left
= oslog_streambufp
->msg_size
- read_entry
->offset
;
583 memcpy(logline
+ logpos
,
584 oslog_streambufp
->msg_bufc
+ read_entry
->offset
, bytes_left
);
585 logpos
+= bytes_left
;
586 rec_length
-= bytes_left
;
588 memcpy(logline
+ logpos
, (const void *)oslog_streambufp
->msg_bufc
,
590 oslog_streambufp
->msg_bufr
= rec_length
;
591 logpos
+= rec_length
;
594 oslog_stream_buf_bytesavail
+= read_entry
->size
;
595 assert(oslog_stream_buf_bytesavail
<= oslog_stream_buf_size
);
597 assert(oslog_streambufp
->msg_bufr
< oslog_streambufp
->msg_size
);
598 STAILQ_INSERT_TAIL(&oslog_stream_free_head
, read_entry
, buf_entries
);
605 panic("Got unexpected log entry type: %hhu\n", read_entry
->type
);
609 copy_size
= min(logpos
, (int) MIN(uio_resid(uio
), INT_MAX
));
611 error
= uiomove((caddr_t
)logline
, copy_size
, uio
);
613 os_atomic_inc(&oslog_s_streamed_msgcount
, relaxed
);
620 logselect(__unused dev_t dev
, int rw
, void * wql
, struct proc
*p
)
622 const struct msgbuf
*mbp
= logsoftc
.sc_mbp
;
627 if (mbp
->msg_bufr
!= mbp
->msg_bufx
) {
631 selrecord(p
, &logsoftc
.sc_selp
, wql
);
639 oslogselect(__unused dev_t dev
, int rw
, void * wql
, struct proc
*p
)
648 selrecord(p
, &oslogsoftc
.sc_selp
, wql
);
656 oslog_streamselect(__unused dev_t dev
, int rw
, void * wql
, struct proc
*p
)
664 if (STAILQ_EMPTY(&oslog_stream_buf_head
)) {
665 selrecord(p
, &oslog_streamsoftc
.sc_selp
, wql
);
677 logwakeup(struct msgbuf
*mbp
)
679 /* cf. r24974766 & r25201228*/
680 if (oslog_is_safe() == FALSE
) {
690 mbp
= logsoftc
.sc_mbp
;
692 if (mbp
!= logsoftc
.sc_mbp
) {
695 selwakeup(&logsoftc
.sc_selp
);
696 if (logsoftc
.sc_state
& LOG_ASYNC
) {
697 int pgid
= logsoftc
.sc_pgid
;
700 gsignal(-pgid
, SIGIO
);
702 proc_signal(pgid
, SIGIO
);
706 if (logsoftc
.sc_state
& LOG_RDWAIT
) {
707 wakeup((caddr_t
)mbp
);
708 logsoftc
.sc_state
&= ~LOG_RDWAIT
;
717 if (!oslog_is_safe()) {
726 selwakeup(&oslogsoftc
.sc_selp
);
732 oslog_streamwakeup_locked(void)
734 LCK_SPIN_ASSERT(&oslog_stream_lock
, LCK_ASSERT_OWNED
);
735 if (!oslog_stream_open
) {
738 selwakeup(&oslog_streamsoftc
.sc_selp
);
739 if (oslog_streamsoftc
.sc_state
& LOG_RDWAIT
) {
740 wakeup((caddr_t
)oslog_streambufp
);
741 oslog_streamsoftc
.sc_state
&= ~LOG_RDWAIT
;
746 oslog_streamwakeup(void)
748 /* cf. r24974766 & r25201228*/
749 if (oslog_is_safe() == FALSE
) {
754 oslog_streamwakeup_locked();
760 logioctl(__unused dev_t dev
, u_long com
, caddr_t data
, __unused
int flag
, __unused
struct proc
*p
)
763 const struct msgbuf
*mbp
= logsoftc
.sc_mbp
;
767 /* return number of characters immediately available */
769 l
= mbp
->msg_bufx
- mbp
->msg_bufr
;
778 logsoftc
.sc_state
|= LOG_NBIO
;
780 logsoftc
.sc_state
&= ~LOG_NBIO
;
786 logsoftc
.sc_state
|= LOG_ASYNC
;
788 logsoftc
.sc_state
&= ~LOG_ASYNC
;
793 logsoftc
.sc_pgid
= *(int *)data
;
797 *(int *)data
= logsoftc
.sc_pgid
;
810 oslogioctl(__unused dev_t dev
, u_long com
, caddr_t data
, __unused
int flag
, __unused
struct proc
*p
)
813 mach_vm_size_t buffer_size
= (__firehose_buffer_kernel_chunk_count
* FIREHOSE_CHUNK_SIZE
);
814 firehose_buffer_map_info_t map_info
= {0, 0};
815 firehose_buffer_t kernel_firehose_buffer
= NULL
;
816 mach_vm_address_t user_addr
= 0;
817 mach_port_t mem_entry_ptr
= MACH_PORT_NULL
;
820 /* return number of characters immediately available */
823 kernel_firehose_buffer
= (firehose_buffer_t
)kernel_firehose_addr
;
825 ret
= mach_make_memory_entry_64(kernel_map
,
827 (mach_vm_offset_t
) kernel_firehose_buffer
,
828 (MAP_MEM_VM_SHARE
| VM_PROT_READ
),
831 if (ret
== KERN_SUCCESS
) {
832 ret
= mach_vm_map_kernel(get_task_map(current_task()),
837 VM_MAP_KERNEL_FLAGS_NONE
,
847 if (ret
== KERN_SUCCESS
) {
848 map_info
.fbmi_addr
= (uint64_t) (user_addr
);
849 map_info
.fbmi_size
= buffer_size
;
850 bcopy(&map_info
, data
, sizeof(firehose_buffer_map_info_t
));
857 __firehose_merge_updates(*(firehose_push_reply_t
*)(data
));
867 oslog_streamioctl(__unused dev_t dev
, u_long com
, caddr_t data
, __unused
int flag
, __unused
struct proc
*p
)
875 if (data
&& *(int *)data
) {
876 oslog_streamsoftc
.sc_state
|= LOG_NBIO
;
878 oslog_streamsoftc
.sc_state
&= ~LOG_NBIO
;
882 if (data
&& *(int *)data
) {
883 oslog_streamsoftc
.sc_state
|= LOG_ASYNC
;
885 oslog_streamsoftc
.sc_state
&= ~LOG_ASYNC
;
902 if (!PE_parse_boot_argn("firehose_chunk_count", &__firehose_buffer_kernel_chunk_count
, sizeof(__firehose_buffer_kernel_chunk_count
))) {
903 __firehose_buffer_kernel_chunk_count
= FIREHOSE_BUFFER_KERNEL_DEFAULT_CHUNK_COUNT
;
905 if (!PE_parse_boot_argn("firehose_io_pages", &__firehose_num_kernel_io_pages
, sizeof(__firehose_num_kernel_io_pages
))) {
906 __firehose_num_kernel_io_pages
= FIREHOSE_BUFFER_KERNEL_DEFAULT_IO_PAGES
;
908 if (!__firehose_kernel_configuration_valid(__firehose_buffer_kernel_chunk_count
, __firehose_num_kernel_io_pages
)) {
909 printf("illegal firehose configuration %u/%u, using defaults\n", __firehose_buffer_kernel_chunk_count
, __firehose_num_kernel_io_pages
);
910 __firehose_buffer_kernel_chunk_count
= FIREHOSE_BUFFER_KERNEL_DEFAULT_CHUNK_COUNT
;
911 __firehose_num_kernel_io_pages
= FIREHOSE_BUFFER_KERNEL_DEFAULT_IO_PAGES
;
913 vm_size_t size
= __firehose_buffer_kernel_chunk_count
* FIREHOSE_CHUNK_SIZE
;
915 kr
= kmem_alloc_flags(kernel_map
, &kernel_firehose_addr
,
916 size
+ (2 * PAGE_SIZE
), VM_KERN_MEMORY_LOG
,
917 KMA_GUARD_FIRST
| KMA_GUARD_LAST
| KMA_ZERO
);
918 if (kr
!= KERN_SUCCESS
) {
919 panic("Failed to allocate memory for firehose logging buffer");
921 kernel_firehose_addr
+= PAGE_SIZE
;
922 /* register buffer with firehose */
923 kernel_firehose_addr
= (vm_offset_t
)__firehose_buffer_create((size_t *) &size
);
925 printf("oslog_init completed, %u chunks, %u io pages\n",
926 __firehose_buffer_kernel_chunk_count
, __firehose_num_kernel_io_pages
);
928 STARTUP(OSLOG
, STARTUP_RANK_FIRST
, oslog_init
);
933 * Decription: Output a character to the log; assumes the bsd_log_lock() or
934 * bsd_log_lock_safe() is held by the caller.
936 * Parameters: c Character to output
940 * Notes: This functions is used for multibyte output to the log; it
941 * should be used preferrentially where possible to ensure that
942 * log entries do not end up interspersed due to preemption or
946 log_putc_locked(struct msgbuf
*mbp
, char c
)
948 mbp
->msg_bufc
[mbp
->msg_bufx
++] = c
;
949 if (mbp
->msg_bufx
>= mbp
->msg_size
) {
954 static oslog_stream_buf_entry_t
955 oslog_stream_find_free_buf_entry_locked(void)
958 oslog_stream_buf_entry_t buf_entry
= NULL
;
960 LCK_SPIN_ASSERT(&oslog_stream_lock
, LCK_ASSERT_OWNED
);
962 mbp
= oslog_streambufp
;
964 buf_entry
= STAILQ_FIRST(&oslog_stream_free_head
);
966 STAILQ_REMOVE_HEAD(&oslog_stream_free_head
, buf_entries
);
968 // If no list elements are available in the free-list,
969 // consume the next log line so we can free up its list element
970 oslog_stream_buf_entry_t prev_entry
= NULL
;
972 buf_entry
= STAILQ_FIRST(&oslog_stream_buf_head
);
973 while (buf_entry
->type
== oslog_stream_link_type_metadata
) {
974 prev_entry
= buf_entry
;
975 buf_entry
= STAILQ_NEXT(buf_entry
, buf_entries
);
978 if (prev_entry
== NULL
) {
979 STAILQ_REMOVE_HEAD(&oslog_stream_buf_head
, buf_entries
);
981 STAILQ_REMOVE_AFTER(&oslog_stream_buf_head
, prev_entry
, buf_entries
);
984 mbp
->msg_bufr
+= buf_entry
->size
;
985 oslog_s_dropped_msgcount
++;
986 if (mbp
->msg_bufr
>= mbp
->msg_size
) {
987 mbp
->msg_bufr
= (mbp
->msg_bufr
% mbp
->msg_size
);
995 oslog_streamwrite_metadata_locked(oslog_stream_buf_entry_t m_entry
)
997 LCK_SPIN_ASSERT(&oslog_stream_lock
, LCK_ASSERT_OWNED
);
998 STAILQ_INSERT_TAIL(&oslog_stream_buf_head
, m_entry
, buf_entries
);
1004 oslog_streamwrite_append_bytes(const char *buffer
, int buflen
)
1008 LCK_SPIN_ASSERT(&oslog_stream_lock
, LCK_ASSERT_OWNED
);
1010 assert(oslog_stream_buf_bytesavail
>= buflen
);
1011 oslog_stream_buf_bytesavail
-= buflen
;
1012 assert(oslog_stream_buf_bytesavail
>= 0);
1014 mbp
= oslog_streambufp
;
1015 if (mbp
->msg_bufx
+ buflen
<= mbp
->msg_size
) {
1017 * If this will fit without needing to be split across the end
1018 * of the buffer, copy it directly in one go.
1020 memcpy((void *)(mbp
->msg_bufc
+ mbp
->msg_bufx
), buffer
, buflen
);
1022 mbp
->msg_bufx
+= buflen
;
1023 if (mbp
->msg_bufx
== mbp
->msg_size
) {
1028 * Copy up to the end of the stream buffer, and then put what remains
1031 int bytes_left
= mbp
->msg_size
- mbp
->msg_bufx
;
1032 memcpy((void *)(mbp
->msg_bufc
+ mbp
->msg_bufx
), buffer
, bytes_left
);
1034 buflen
-= bytes_left
;
1035 buffer
+= bytes_left
;
1037 // Copy the remainder of the data from the beginning of stream
1038 memcpy((void *)mbp
->msg_bufc
, buffer
, buflen
);
1039 mbp
->msg_bufx
= buflen
;
1045 oslog_streamwrite_locked(firehose_tracepoint_id_u ftid
,
1046 uint64_t stamp
, const void *pubdata
, size_t publen
)
1049 oslog_stream_buf_entry_t buf_entry
= NULL
;
1050 oslog_stream_buf_entry_t next_entry
= NULL
;
1052 LCK_SPIN_ASSERT(&oslog_stream_lock
, LCK_ASSERT_OWNED
);
1054 assert(publen
<= UINT16_MAX
);
1055 const ssize_t ft_length
= offsetof(struct firehose_tracepoint_s
, ft_data
) + publen
;
1057 mbp
= oslog_streambufp
;
1058 if (ft_length
> mbp
->msg_size
) {
1059 os_atomic_inc(&oslog_s_error_count
, relaxed
);
1063 // Ensure that we have a list element for this record
1064 buf_entry
= oslog_stream_find_free_buf_entry_locked();
1066 assert(buf_entry
!= NULL
);
1068 while (ft_length
> oslog_stream_buf_bytesavail
) {
1069 oslog_stream_buf_entry_t prev_entry
= NULL
;
1071 next_entry
= STAILQ_FIRST(&oslog_stream_buf_head
);
1072 assert(next_entry
!= NULL
);
1073 while (next_entry
->type
== oslog_stream_link_type_metadata
) {
1074 prev_entry
= next_entry
;
1075 next_entry
= STAILQ_NEXT(next_entry
, buf_entries
);
1078 if (prev_entry
== NULL
) {
1079 STAILQ_REMOVE_HEAD(&oslog_stream_buf_head
, buf_entries
);
1081 STAILQ_REMOVE_AFTER(&oslog_stream_buf_head
, prev_entry
, buf_entries
);
1084 mbp
->msg_bufr
+= next_entry
->size
;
1085 if (mbp
->msg_bufr
>= mbp
->msg_size
) {
1086 mbp
->msg_bufr
= (mbp
->msg_bufr
% mbp
->msg_size
);
1089 oslog_s_dropped_msgcount
++;
1090 oslog_stream_buf_bytesavail
+= next_entry
->size
;
1091 assert(oslog_stream_buf_bytesavail
<= oslog_stream_buf_size
);
1093 STAILQ_INSERT_TAIL(&oslog_stream_free_head
, next_entry
, buf_entries
);
1096 assert(ft_length
<= oslog_stream_buf_bytesavail
);
1098 // Write the log line and update the list entry for this record
1099 buf_entry
->offset
= mbp
->msg_bufx
;
1100 buf_entry
->size
= (uint16_t)ft_length
;
1101 buf_entry
->timestamp
= stamp
;
1102 buf_entry
->type
= oslog_stream_link_type_log
;
1104 // Construct a tracepoint
1105 struct firehose_tracepoint_s fs
= {
1106 .ft_thread
= thread_tid(current_thread()),
1107 .ft_id
.ftid_value
= ftid
.ftid_value
,
1111 oslog_streamwrite_append_bytes((char *)&fs
, sizeof(fs
));
1112 oslog_streamwrite_append_bytes(pubdata
, (int)publen
);
1114 assert(mbp
->msg_bufr
< mbp
->msg_size
);
1115 // Insert the element to the buffer data list
1116 STAILQ_INSERT_TAIL(&oslog_stream_buf_head
, buf_entry
, buf_entries
);
1124 * Decription: Output a character to the log; assumes the bsd_log_lock() or
1125 * bsd_log_lock_safe() is NOT held by the caller.
1127 * Parameters: c Character to output
1131 * Notes: This function is used for single byte output to the log. It
1132 * primarily exists to maintain binary backward compatibility.
1137 if (!bsd_log_lock(oslog_is_safe())) {
1138 os_atomic_inc(&oslog_msgbuf_dropped_charcount
, relaxed
);
1142 log_putc_locked(msgbufp
, c
);
1143 int unread_count
= msgbufp
->msg_bufx
- msgbufp
->msg_bufr
;
1147 if (unread_count
< 0) {
1148 unread_count
= 0 - unread_count
;
1150 if (c
== '\n' || unread_count
>= (msgbufp
->msg_size
/ 2)) {
1157 * it is possible to increase the kernel log buffer size by adding
1159 * to the kernel command line, and to read the current size using
1160 * sysctl kern.msgbuf
1161 * If there is no parameter on the kernel command line, the buffer is
1162 * allocated statically and is CONFIG_MSG_BSIZE characters in size, otherwise
1163 * memory is dynamically allocated. Memory management must already be up.
1166 log_setsize(int size
)
1169 int new_logsize
, new_bufr
, new_bufx
;
1171 int old_logsize
, old_bufr
, old_bufx
;
1175 if (size
> MAX_MSG_BSIZE
) {
1184 new_logdata
= kheap_alloc(KHEAP_DATA_BUFFERS
, size
, Z_WAITOK
| Z_ZERO
);
1186 printf("log_setsize: unable to allocate memory\n");
1190 bsd_log_lock_safe();
1192 old_logsize
= msgbufp
->msg_size
;
1193 old_logdata
= msgbufp
->msg_bufc
;
1194 old_bufr
= msgbufp
->msg_bufr
;
1195 old_bufx
= msgbufp
->msg_bufx
;
1197 LOG_SETSIZE_DEBUG("log_setsize(%d): old_logdata %p old_logsize %d old_bufr %d old_bufx %d\n",
1198 size
, old_logdata
, old_logsize
, old_bufr
, old_bufx
);
1200 /* start "new_logsize" bytes before the write pointer */
1201 if (new_logsize
<= old_bufx
) {
1202 count
= new_logsize
;
1203 p
= old_logdata
+ old_bufx
- count
;
1206 * if new buffer is bigger, copy what we have and let the
1207 * bzero above handle the difference
1209 count
= MIN(new_logsize
, old_logsize
);
1210 p
= old_logdata
+ old_logsize
- (count
- old_bufx
);
1212 for (i
= 0; i
< count
; i
++) {
1213 if (p
>= old_logdata
+ old_logsize
) {
1218 new_logdata
[i
] = ch
;
1222 if (new_bufx
>= new_logsize
) {
1225 msgbufp
->msg_bufx
= new_bufx
;
1227 new_bufr
= old_bufx
- old_bufr
; /* how much were we trailing bufx by? */
1229 new_bufr
+= old_logsize
;
1231 new_bufr
= new_bufx
- new_bufr
; /* now relative to oldest data in new buffer */
1233 new_bufr
+= new_logsize
;
1235 msgbufp
->msg_bufr
= new_bufr
;
1237 msgbufp
->msg_size
= new_logsize
;
1238 msgbufp
->msg_bufc
= new_logdata
;
1240 LOG_SETSIZE_DEBUG("log_setsize(%d): new_logdata %p new_logsize %d new_bufr %d new_bufx %d\n",
1241 size
, new_logdata
, new_logsize
, new_bufr
, new_bufx
);
1245 /* this memory is now dead - clear it so that it compresses better
1246 * in case of suspend to disk etc. */
1247 bzero(old_logdata
, old_logsize
);
1248 if (old_logdata
!= smsg_bufc
) {
1249 /* dynamic memory that must be freed */
1250 kheap_free(KHEAP_DATA_BUFFERS
, old_logdata
, old_logsize
);
1253 printf("set system log size to %d bytes\n", new_logsize
);
1259 oslog_setsize(int size
)
1262 // If the size is less than the default stream buffer
1264 if (size
<= OSLOG_STREAM_BUF_SIZE
) {
1268 scale
= (uint16_t) (size
/ OSLOG_STREAM_BUF_SIZE
);
1270 oslog_stream_buf_size
= size
;
1271 oslog_stream_num_entries
= scale
* OSLOG_NUM_STREAM_ENTRIES
;
1272 printf("oslog_setsize: new buffer size = %d, new num entries= %d\n", oslog_stream_buf_size
, oslog_stream_num_entries
);
1275 SYSCTL_PROC(_kern
, OID_AUTO
, msgbuf
,
1276 CTLTYPE_INT
| CTLFLAG_RW
| CTLFLAG_LOCKED
, 0, 0,
1277 sysctl_kern_msgbuf
, "I", "");
1280 sysctl_kern_msgbuf(struct sysctl_oid
*oidp __unused
,
1281 void *arg1 __unused
, int arg2 __unused
, struct sysctl_req
*req
)
1283 int old_bufsize
, bufsize
;
1286 bsd_log_lock_safe();
1287 old_bufsize
= bufsize
= msgbufp
->msg_size
;
1290 error
= sysctl_io_number(req
, bufsize
, sizeof(bufsize
), &bufsize
, NULL
);
1295 if (bufsize
!= old_bufsize
) {
1296 error
= log_setsize(bufsize
);
1304 * This should be called by /sbin/dmesg only via libproc.
1305 * It returns as much data still in the buffer as possible.
1308 log_dmesg(user_addr_t buffer
, uint32_t buffersize
, int32_t *retval
)
1311 uint32_t localbuff_size
;
1312 int error
= 0, newl
, skip
;
1313 char *localbuff
, *p
, *copystart
, ch
;
1316 bsd_log_lock_safe();
1317 localbuff_size
= (msgbufp
->msg_size
+ 2); /* + '\n' + '\0' */
1320 /* Allocate a temporary non-circular buffer for copyout */
1321 localbuff
= kheap_alloc(KHEAP_DATA_BUFFERS
, localbuff_size
, Z_WAITOK
);
1323 printf("log_dmesg: unable to allocate memory\n");
1327 /* in between here, the log could become bigger, but that's fine */
1328 bsd_log_lock_safe();
1331 * The message buffer is circular; start at the write pointer, and
1332 * make one loop up to write pointer - 1.
1334 p
= msgbufp
->msg_bufc
+ msgbufp
->msg_bufx
;
1335 for (i
= newl
= skip
= 0; p
!= msgbufp
->msg_bufc
+ msgbufp
->msg_bufx
- 1; ++p
) {
1336 if (p
>= msgbufp
->msg_bufc
+ msgbufp
->msg_size
) {
1337 p
= msgbufp
->msg_bufc
;
1340 /* Skip "\n<.*>" syslog sequences. */
1347 if (newl
&& ch
== '<') {
1354 newl
= (ch
== '\n');
1355 localbuff
[i
++] = ch
;
1356 /* The original version of this routine contained a buffer
1357 * overflow. At the time, a "small" targeted fix was desired
1358 * so the change below to check the buffer bounds was made.
1359 * TODO: rewrite this needlessly convoluted routine.
1361 if (i
== (localbuff_size
- 2)) {
1366 localbuff
[i
++] = '\n';
1370 if (buffersize
>= i
) {
1371 copystart
= localbuff
;
1374 copystart
= localbuff
+ i
- buffersize
;
1375 copysize
= buffersize
;
1380 error
= copyout(copystart
, buffer
, copysize
);
1382 *retval
= (int32_t)copysize
;
1385 kheap_free(KHEAP_DATA_BUFFERS
, localbuff
, localbuff_size
);
1389 #ifdef CONFIG_XNUPOST
1391 size_t find_pattern_in_buffer(const char *, size_t, size_t);
1394 * returns count of pattern found in systemlog buffer.
1395 * stops searching further if count reaches expected_count.
1398 find_pattern_in_buffer(const char *pattern
, size_t len
, size_t expected_count
)
1400 if (pattern
== NULL
|| len
== 0 || expected_count
== 0) {
1404 size_t msg_bufx
= msgbufp
->msg_bufx
;
1405 size_t msg_size
= msgbufp
->msg_size
;
1406 size_t match_count
= 0;
1408 for (size_t i
= 0; i
< msg_size
; i
++) {
1409 boolean_t match
= TRUE
;
1410 for (size_t j
= 0; j
< len
; j
++) {
1411 size_t pos
= (msg_bufx
+ i
+ j
) % msg_size
;
1412 if (msgbufp
->msg_bufc
[pos
] != pattern
[j
]) {
1417 if (match
&& ++match_count
>= expected_count
) {