2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
23 #include <machine/spl.h>
26 #include <mach/clock_types.h>
27 #include <mach/mach_types.h>
28 #include <machine/machine_routines.h>
30 #include <sys/kdebug.h>
31 #include <sys/errno.h>
32 #include <sys/param.h>
35 #include <sys/sysctl.h>
37 #include <kern/thread.h>
38 #include <kern/task.h>
39 #include <vm/vm_kern.h>
42 /* kd_buf kd_buffer[kd_bufsize/sizeof(kd_buf)]; */
44 unsigned int kd_buftomem
=0;
48 unsigned int nkdbufs
= 8192;
49 unsigned int kd_bufsize
= 0;
50 unsigned int kdebug_flags
= 0;
51 unsigned int kdebug_enable
=0;
52 unsigned int kdebug_nolog
=1;
53 unsigned int kdlog_beg
=0;
54 unsigned int kdlog_end
=0;
55 unsigned int kdlog_value1
=0;
56 unsigned int kdlog_value2
=0;
57 unsigned int kdlog_value3
=0;
58 unsigned int kdlog_value4
=0;
60 unsigned long long kd_prev_timebase
= 0LL;
61 decl_simple_lock_data(,kd_trace_lock
);
63 kd_threadmap
*kd_mapptr
= 0;
64 unsigned int kd_mapsize
= 0;
65 unsigned int kd_mapcount
= 0;
66 unsigned int kd_maptomem
= 0;
68 pid_t global_state_pid
= -1; /* Used to control exclusive use of kd_buffer */
70 #define DBG_FUNC_MASK 0xfffffffc
73 extern natural_t rtclock_decrementer_min
;
87 kd_threadmap
*map
; /* pointer to the map buffer */
93 typedef struct krt krt_t
;
95 /* Support syscall SYS_kdebug_trace */
96 kdebug_trace(p
, uap
, retval
)
98 struct kdebug_args
*uap
;
104 kernel_debug(uap
->code
, uap
->arg1
, uap
->arg2
, uap
->arg3
, uap
->arg4
, 0);
110 kernel_debug(debugid
, arg1
, arg2
, arg3
, arg4
, arg5
)
111 unsigned int debugid
, arg1
, arg2
, arg3
, arg4
, arg5
;
114 struct proc
*curproc
;
116 unsigned long long now
;
117 mach_timespec_t
*tsp
;
119 s
= ml_set_interrupts_enabled(FALSE
);
123 ml_set_interrupts_enabled(s
);
127 usimple_lock(&kd_trace_lock
);
128 if (kdebug_flags
& KDBG_PIDCHECK
)
130 /* If kdebug flag is not set for current proc, return */
131 curproc
= current_proc();
132 if ((curproc
&& !(curproc
->p_flag
& P_KDEBUG
)) &&
133 ((debugid
&0xffff0000) != (MACHDBG_CODE(DBG_MACH_SCHED
, 0) | DBG_FUNC_NONE
)))
135 usimple_unlock(&kd_trace_lock
);
136 ml_set_interrupts_enabled(s
);
140 else if (kdebug_flags
& KDBG_PIDEXCLUDE
)
142 /* If kdebug flag is set for current proc, return */
143 curproc
= current_proc();
144 if ((curproc
&& (curproc
->p_flag
& P_KDEBUG
)) &&
145 ((debugid
&0xffff0000) != (MACHDBG_CODE(DBG_MACH_SCHED
, 0) | DBG_FUNC_NONE
)))
147 usimple_unlock(&kd_trace_lock
);
148 ml_set_interrupts_enabled(s
);
153 if (kdebug_flags
& KDBG_RANGECHECK
)
155 if ((debugid
< kdlog_beg
) || (debugid
> kdlog_end
)
156 && (debugid
>> 24 != DBG_TRACE
))
158 usimple_unlock(&kd_trace_lock
);
159 ml_set_interrupts_enabled(s
);
163 else if (kdebug_flags
& KDBG_VALCHECK
)
165 if ((debugid
& DBG_FUNC_MASK
) != kdlog_value1
&&
166 (debugid
& DBG_FUNC_MASK
) != kdlog_value2
&&
167 (debugid
& DBG_FUNC_MASK
) != kdlog_value3
&&
168 (debugid
& DBG_FUNC_MASK
) != kdlog_value4
&&
169 (debugid
>> 24 != DBG_TRACE
))
171 usimple_unlock(&kd_trace_lock
);
172 ml_set_interrupts_enabled(s
);
177 kd
->debugid
= debugid
;
182 kd
->arg5
= (int)current_thread();
184 kd
->arg5
|= KDBG_CPU_MASK
;
186 ml_get_timebase((unsigned long long *)&kd
->timestamp
);
188 /* Watch for out of order timestamps */
189 now
= (((unsigned long long)kd
->timestamp
.tv_sec
) << 32) |
190 (unsigned long long)((unsigned int)(kd
->timestamp
.tv_nsec
));
192 if (now
< kd_prev_timebase
)
194 /* timestamps are out of order -- adjust */
196 tsp
= (mach_timespec_t
*)&kd_prev_timebase
;
197 kd
->timestamp
.tv_sec
= tsp
->tv_sec
;
198 kd
->timestamp
.tv_nsec
= tsp
->tv_nsec
;
202 /* Then just store the previous timestamp */
203 kd_prev_timebase
= now
;
209 if (kd_bufptr
>= kd_buflast
)
210 kd_bufptr
= kd_buffer
;
211 if (kd_bufptr
== kd_readlast
) {
212 if (kdebug_flags
& KDBG_NOWRAP
)
214 kdebug_flags
|= KDBG_WRAPPED
;
216 usimple_unlock(&kd_trace_lock
);
217 ml_set_interrupts_enabled(s
);
221 kernel_debug1(debugid
, arg1
, arg2
, arg3
, arg4
, arg5
)
222 unsigned int debugid
, arg1
, arg2
, arg3
, arg4
, arg5
;
225 struct proc
*curproc
;
227 unsigned long long now
;
228 mach_timespec_t
*tsp
;
230 s
= ml_set_interrupts_enabled(FALSE
);
234 ml_set_interrupts_enabled(s
);
238 usimple_lock(&kd_trace_lock
);
239 if (kdebug_flags
& KDBG_PIDCHECK
)
241 /* If kdebug flag is not set for current proc, return */
242 curproc
= current_proc();
243 if ((curproc
&& !(curproc
->p_flag
& P_KDEBUG
)) &&
244 ((debugid
&0xffff0000) != (MACHDBG_CODE(DBG_MACH_SCHED
, 0) | DBG_FUNC_NONE
)))
246 usimple_unlock(&kd_trace_lock
);
247 ml_set_interrupts_enabled(s
);
251 else if (kdebug_flags
& KDBG_PIDEXCLUDE
)
253 /* If kdebug flag is set for current proc, return */
254 curproc
= current_proc();
255 if ((curproc
&& (curproc
->p_flag
& P_KDEBUG
)) &&
256 ((debugid
&0xffff0000) != (MACHDBG_CODE(DBG_MACH_SCHED
, 0) | DBG_FUNC_NONE
)))
258 usimple_unlock(&kd_trace_lock
);
259 ml_set_interrupts_enabled(s
);
264 if (kdebug_flags
& KDBG_RANGECHECK
)
266 if ((debugid
< kdlog_beg
) || (debugid
> kdlog_end
)
267 && (debugid
>> 24 != DBG_TRACE
))
269 usimple_unlock(&kd_trace_lock
);
270 ml_set_interrupts_enabled(s
);
274 else if (kdebug_flags
& KDBG_VALCHECK
)
276 if ((debugid
& DBG_FUNC_MASK
) != kdlog_value1
&&
277 (debugid
& DBG_FUNC_MASK
) != kdlog_value2
&&
278 (debugid
& DBG_FUNC_MASK
) != kdlog_value3
&&
279 (debugid
& DBG_FUNC_MASK
) != kdlog_value4
&&
280 (debugid
>> 24 != DBG_TRACE
))
282 usimple_unlock(&kd_trace_lock
);
283 ml_set_interrupts_enabled(s
);
289 kd
->debugid
= debugid
;
295 ml_get_timebase((unsigned long long *)&kd
->timestamp
);
297 /* Watch for out of order timestamps */
298 now
= (((unsigned long long)kd
->timestamp
.tv_sec
) << 32) |
299 (unsigned long long)((unsigned int)(kd
->timestamp
.tv_nsec
));
301 if (now
< kd_prev_timebase
)
303 /* timestamps are out of order -- adjust */
305 tsp
= (mach_timespec_t
*)&kd_prev_timebase
;
306 kd
->timestamp
.tv_sec
= tsp
->tv_sec
;
307 kd
->timestamp
.tv_nsec
= tsp
->tv_nsec
;
311 /* Then just store the previous timestamp */
312 kd_prev_timebase
= now
;
317 if (kd_bufptr
>= kd_buflast
)
318 kd_bufptr
= kd_buffer
;
319 if (kd_bufptr
== kd_readlast
) {
320 if (kdebug_flags
& KDBG_NOWRAP
)
322 kdebug_flags
|= KDBG_WRAPPED
;
324 usimple_unlock(&kd_trace_lock
);
325 ml_set_interrupts_enabled(s
);
331 kd_bufsize
= nkdbufs
* sizeof(kd_buf
);
332 if (kmem_alloc(kernel_map
, &kd_buftomem
,
333 (vm_size_t
)kd_bufsize
) == KERN_SUCCESS
)
334 kd_buffer
= (kd_buf
*) kd_buftomem
;
335 else kd_buffer
= (kd_buf
*) 0;
336 kdebug_flags
&= ~KDBG_WRAPPED
;
338 simple_lock_init(&kd_trace_lock
);
339 kdebug_flags
|= (KDBG_INIT
| KDBG_BUFINIT
);
340 kd_bufptr
= kd_buffer
;
341 kd_buflast
= &kd_bufptr
[nkdbufs
];
342 kd_readlast
= kd_bufptr
;
343 kd_prev_timebase
= 0LL;
347 kdebug_flags
&= ~(KDBG_INIT
| KDBG_BUFINIT
);
361 if ((kdebug_flags
& KDBG_INIT
) && (kdebug_flags
& KDBG_BUFINIT
) && kd_bufsize
&& kd_buffer
)
362 kmem_free(kernel_map
, (char *)kd_buffer
, kd_bufsize
);
364 if ((kdebug_flags
& KDBG_MAPINIT
) && kd_mapsize
&& kd_mapptr
)
366 kmem_free(kernel_map
, (char *)kd_mapptr
, kd_mapsize
);
367 kdebug_flags
&= ~KDBG_MAPINIT
;
369 kd_mapptr
= (kd_threadmap
*) 0;
373 ret
= kdbg_bootstrap();
378 void kdbg_trace_string(struct proc
*proc
, long *arg1
, long *arg2
, long *arg3
, long *arg4
)
394 /* Collect the pathname for tracing */
395 dbg_nameptr
= proc
->p_comm
;
396 dbg_namelen
= strlen(proc
->p_comm
);
402 if(dbg_namelen
> sizeof(dbg_parms
))
403 dbg_namelen
= sizeof(dbg_parms
);
405 for(i
=0;dbg_namelen
> 0; i
++)
407 dbg_parms
[i
]=*(long*)dbg_nameptr
;
408 dbg_nameptr
+= sizeof(long);
409 dbg_namelen
-= sizeof(long);
418 kdbg_resolve_map(thread_act_t th_act
, krt_t
*t
)
420 kd_threadmap
*mapptr
;
422 if(t
->count
< t
->maxcount
)
424 mapptr
=&t
->map
[t
->count
];
425 mapptr
->thread
= (unsigned int)getshuttle_thread(th_act
);
427 (void) strncpy (mapptr
->command
, t
->p
->p_comm
,
428 sizeof(t
->p
->p_comm
)-1);
429 mapptr
->command
[sizeof(t
->p
->p_comm
)-1] = '\0';
439 if (kdebug_flags
& KDBG_MAPINIT
)
442 /* Calculate size of thread map buffer */
443 for (p
= allproc
.lh_first
, kd_mapcount
=0; p
;
444 p
= p
->p_list
.le_next
)
446 kd_mapcount
+= get_task_numacts((task_t
)p
->task
);
449 kd_mapsize
= kd_mapcount
* sizeof(kd_threadmap
);
450 if((kmem_alloc(kernel_map
, & kd_maptomem
,
451 (vm_size_t
)kd_mapsize
) == KERN_SUCCESS
))
452 kd_mapptr
= (kd_threadmap
*) kd_maptomem
;
454 kd_mapptr
= (kd_threadmap
*) 0;
458 kdebug_flags
|= KDBG_MAPINIT
;
459 /* Initialize thread map data */
460 akrt
.map
= kd_mapptr
;
462 akrt
.maxcount
= kd_mapcount
;
464 for (p
= allproc
.lh_first
; p
; p
= p
->p_list
.le_next
)
467 task_act_iterate_wth_args((task_t
)p
->task
, kdbg_resolve_map
, &akrt
);
476 /* Clean up the trace buffer */
477 global_state_pid
= -1;
480 kdebug_flags
&= ~KDBG_BUFINIT
;
481 kdebug_flags
&= (unsigned int)~KDBG_CKTYPES
;
482 kdebug_flags
&= ~(KDBG_NOWRAP
| KDBG_RANGECHECK
| KDBG_VALCHECK
);
483 kdebug_flags
&= ~(KDBG_PIDCHECK
| KDBG_PIDEXCLUDE
);
484 kmem_free(kernel_map
, (char *)kd_buffer
, kd_bufsize
);
485 kd_buffer
= (kd_buf
*)0;
487 kd_prev_timebase
= 0LL;
489 /* Clean up the thread map buffer */
490 kdebug_flags
&= ~KDBG_MAPINIT
;
491 kmem_free(kernel_map
, (char *)kd_mapptr
, kd_mapsize
);
492 kd_mapptr
= (kd_threadmap
*) 0;
497 kdbg_setpid(kd_regtype
*kdr
)
503 pid
= (pid_t
)kdr
->value1
;
504 flag
= (int)kdr
->value2
;
508 if ((p
= pfind(pid
)) == NULL
)
512 if (flag
== 1) /* turn on pid check for this and all pids */
514 kdebug_flags
|= KDBG_PIDCHECK
;
515 kdebug_flags
&= ~KDBG_PIDEXCLUDE
;
516 p
->p_flag
|= P_KDEBUG
;
518 else /* turn off pid check for this pid value */
520 /* Don't turn off all pid checking though */
521 /* kdebug_flags &= ~KDBG_PIDCHECK;*/
522 p
->p_flag
&= ~P_KDEBUG
;
531 /* This is for pid exclusion in the trace buffer */
532 kdbg_setpidex(kd_regtype
*kdr
)
538 pid
= (pid_t
)kdr
->value1
;
539 flag
= (int)kdr
->value2
;
543 if ((p
= pfind(pid
)) == NULL
)
547 if (flag
== 1) /* turn on pid exclusion */
549 kdebug_flags
|= KDBG_PIDEXCLUDE
;
550 kdebug_flags
&= ~KDBG_PIDCHECK
;
551 p
->p_flag
|= P_KDEBUG
;
553 else /* turn off pid exclusion for this pid value */
555 /* Don't turn off all pid exclusion though */
556 /* kdebug_flags &= ~KDBG_PIDEXCLUDE;*/
557 p
->p_flag
&= ~P_KDEBUG
;
566 /* This is for setting a minimum decrementer value */
567 kdbg_setrtcdec(kd_regtype
*kdr
)
572 decval
= (natural_t
)kdr
->value1
;
574 if (decval
&& decval
< KDBG_MINRTCDEC
)
578 rtclock_decrementer_min
= decval
;
587 kdbg_setreg(kd_regtype
* kdr
)
590 unsigned int val_1
, val_2
, val
;
593 case KDBG_CLASSTYPE
:
594 val_1
= (kdr
->value1
& 0xff);
595 val_2
= (kdr
->value2
& 0xff);
596 kdlog_beg
= (val_1
<<24);
597 kdlog_end
= (val_2
<<24);
598 kdebug_flags
&= (unsigned int)~KDBG_CKTYPES
;
599 kdebug_flags
&= ~KDBG_VALCHECK
; /* Turn off specific value check */
600 kdebug_flags
|= (KDBG_RANGECHECK
| KDBG_CLASSTYPE
);
602 case KDBG_SUBCLSTYPE
:
603 val_1
= (kdr
->value1
& 0xff);
604 val_2
= (kdr
->value2
& 0xff);
606 kdlog_beg
= ((val_1
<<24) | (val_2
<< 16));
607 kdlog_end
= ((val_1
<<24) | (val
<< 16));
608 kdebug_flags
&= (unsigned int)~KDBG_CKTYPES
;
609 kdebug_flags
&= ~KDBG_VALCHECK
; /* Turn off specific value check */
610 kdebug_flags
|= (KDBG_RANGECHECK
| KDBG_SUBCLSTYPE
);
612 case KDBG_RANGETYPE
:
613 kdlog_beg
= (kdr
->value1
);
614 kdlog_end
= (kdr
->value2
);
615 kdebug_flags
&= (unsigned int)~KDBG_CKTYPES
;
616 kdebug_flags
&= ~KDBG_VALCHECK
; /* Turn off specific value check */
617 kdebug_flags
|= (KDBG_RANGECHECK
| KDBG_RANGETYPE
);
620 kdlog_value1
= (kdr
->value1
);
621 kdlog_value2
= (kdr
->value2
);
622 kdlog_value3
= (kdr
->value3
);
623 kdlog_value4
= (kdr
->value4
);
624 kdebug_flags
&= (unsigned int)~KDBG_CKTYPES
;
625 kdebug_flags
&= ~KDBG_RANGECHECK
; /* Turn off range check */
626 kdebug_flags
|= KDBG_VALCHECK
; /* Turn on specific value check */
629 kdebug_flags
&= (unsigned int)~KDBG_CKTYPES
;
640 kdbg_getreg(kd_regtype
* kdr
)
643 unsigned int val_1
, val_2
, val
;
646 case KDBG_CLASSTYPE
:
647 val_1
= (kdr
->value1
& 0xff);
649 kdlog_beg
= (val_1
<<24);
650 kdlog_end
= (val_2
<<24);
651 kdebug_flags
&= (unsigned int)~KDBG_CKTYPES
;
652 kdebug_flags
|= (KDBG_RANGECHECK
| KDBG_CLASSTYPE
);
654 case KDBG_SUBCLSTYPE
:
655 val_1
= (kdr
->value1
& 0xff);
656 val_2
= (kdr
->value2
& 0xff);
658 kdlog_beg
= ((val_1
<<24) | (val_2
<< 16));
659 kdlog_end
= ((val_1
<<24) | (val
<< 16));
660 kdebug_flags
&= (unsigned int)~KDBG_CKTYPES
;
661 kdebug_flags
|= (KDBG_RANGECHECK
| KDBG_SUBCLSTYPE
);
663 case KDBG_RANGETYPE
:
664 kdlog_beg
= (kdr
->value1
);
665 kdlog_end
= (kdr
->value2
);
666 kdebug_flags
&= (unsigned int)~KDBG_CKTYPES
;
667 kdebug_flags
|= (KDBG_RANGECHECK
| KDBG_RANGETYPE
);
670 kdebug_flags
&= (unsigned int)~KDBG_CKTYPES
;
684 kdbg_readmap(kd_threadmap
*buffer
, size_t *number
)
690 count
= avail
/sizeof (kd_threadmap
);
692 if (count
&& (count
<= kd_mapcount
))
694 if((kdebug_flags
& KDBG_MAPINIT
) && kd_mapsize
&& kd_mapptr
)
696 if (*number
< kd_mapsize
)
700 if (copyout(kd_mapptr
, buffer
, kd_mapsize
))
710 if ((kdebug_flags
& KDBG_MAPINIT
) && kd_mapsize
&& kd_mapptr
)
712 kmem_free(kernel_map
, (char *)kd_mapptr
, kd_mapsize
);
713 kdebug_flags
&= ~KDBG_MAPINIT
;
715 kd_mapptr
= (kd_threadmap
*) 0;
723 kdbg_control(name
, namelen
, where
, sizep
)
732 unsigned int value
= name
[1];
734 kbufinfo_t kd_bufinfo
;
737 struct proc
*p
, *curproc
;
739 if(curproc
= current_proc())
740 curpid
= curproc
->p_pid
;
744 if (global_state_pid
== -1)
745 global_state_pid
= curpid
;
746 else if (global_state_pid
!= curpid
)
748 if((p
= pfind(global_state_pid
)) == NULL
)
750 /* The global pid no longer exists */
751 global_state_pid
= curpid
;
755 /* The global pid exists, deny this request */
762 value
&= KDBG_USERFLAGS
;
763 kdebug_flags
|= value
;
766 value
&= KDBG_USERFLAGS
;
767 kdebug_flags
&= ~value
;
769 case KERN_KDENABLE
: /* used to enable or disable */
772 /* enable only if buffer is initialized */
773 if (!(kdebug_flags
& KDBG_BUFINIT
))
779 kdebug_enable
=(value
)?1:0;
780 kdebug_nolog
= (value
)?0:1;
785 /* We allow a maximum buffer size of 25% of memory */
786 /* 'value' is the desired number of trace entries */
787 max_entries
= (mem_size
/4) / sizeof(kd_buf
);
788 if (value
<= max_entries
)
791 nkdbufs
= max_entries
;
794 if(size
< sizeof(kbufinfo_t
)) {
798 kd_bufinfo
.nkdbufs
= nkdbufs
;
799 kd_bufinfo
.nkdthreads
= kd_mapsize
/ sizeof(kd_threadmap
);
800 kd_bufinfo
.nolog
= kdebug_nolog
;
801 kd_bufinfo
.flags
= kdebug_flags
;
802 if(copyout (&kd_bufinfo
, where
, sizeof(kbufinfo_t
))) {
813 if(size
< sizeof(kd_regtype
)) {
817 if (copyin(where
, &kd_Reg
, sizeof(kd_regtype
))) {
821 ret
= kdbg_setreg(&kd_Reg
);
824 if(size
< sizeof(kd_regtype
)) {
828 ret
= kdbg_getreg(&kd_Reg
);
829 if (copyout(&kd_Reg
, where
, sizeof(kd_regtype
))){
834 ret
= kdbg_read(where
, sizep
);
837 if (size
< sizeof(kd_regtype
)) {
841 if (copyin(where
, &kd_Reg
, sizeof(kd_regtype
))) {
845 ret
= kdbg_setpid(&kd_Reg
);
848 if (size
< sizeof(kd_regtype
)) {
852 if (copyin(where
, &kd_Reg
, sizeof(kd_regtype
))) {
856 ret
= kdbg_setpidex(&kd_Reg
);
859 ret
= kdbg_readmap((kd_threadmap
*)where
, sizep
);
861 case KERN_KDSETRTCDEC
:
862 if (size
< sizeof(kd_regtype
)) {
866 if (copyin(where
, &kd_Reg
, sizeof(kd_regtype
))) {
870 ret
= kdbg_setrtcdec(&kd_Reg
);
879 kdbg_read(kd_buf
* buffer
, size_t *number
)
886 unsigned int my_kdebug_flags
;
887 kd_buf
* my_kd_bufptr
;
889 s
= ml_set_interrupts_enabled(FALSE
);
890 usimple_lock(&kd_trace_lock
);
891 my_kdebug_flags
= kdebug_flags
;
892 my_kd_bufptr
= kd_bufptr
;
893 usimple_unlock(&kd_trace_lock
);
894 ml_set_interrupts_enabled(s
);
896 count
= avail
/sizeof(kd_buf
);
898 if ((my_kdebug_flags
& KDBG_BUFINIT
) && kd_bufsize
&& kd_buffer
) {
901 if (!(my_kdebug_flags
& KDBG_WRAPPED
) && (my_kd_bufptr
> kd_readlast
))
903 copycount
= my_kd_bufptr
-kd_readlast
;
904 if (copycount
> count
)
907 if (copyout(kd_readlast
, buffer
, copycount
* sizeof(kd_buf
)))
912 kd_readlast
+= copycount
;
916 else if (!(my_kdebug_flags
& KDBG_WRAPPED
) && (my_kd_bufptr
== kd_readlast
))
923 if (my_kdebug_flags
& KDBG_WRAPPED
)
925 kd_readlast
= my_kd_bufptr
;
926 kdebug_flags
&= ~KDBG_WRAPPED
;
929 /* Note that by setting kd_readlast equal to my_kd_bufptr,
930 we now treat the kd_buffer read the same as if we weren't
931 wrapped and my_kd_bufptr was less than kd_readlast.
934 /* first copyout from readlast to end of kd_buffer */
935 copycount
= kd_buflast
- kd_readlast
;
936 if (copycount
> count
)
938 if (copyout(kd_readlast
, buffer
, copycount
* sizeof(kd_buf
)))
945 totalcount
= copycount
;
946 kd_readlast
+= copycount
;
947 if (kd_readlast
== kd_buflast
)
948 kd_readlast
= kd_buffer
;
951 *number
= totalcount
;
955 /* second copyout from top of kd_buffer to bufptr */
956 copycount
= my_kd_bufptr
- kd_readlast
;
957 if (copycount
> count
)
961 *number
= totalcount
;
964 if (copyout(kd_readlast
, buffer
, copycount
* sizeof(kd_buf
)))
968 kd_readlast
+= copycount
;
969 totalcount
+= copycount
;
970 *number
= totalcount
;
973 } /* end if KDBG_BUFINIT */