2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
20 * @APPLE_LICENSE_HEADER_END@
28 * Revision 1.1.1.1 1998/09/22 21:05:49 wsanchez
29 * Import of Mac OS X kernel (~semeria)
31 * Revision 1.1.1.1 1998/03/07 02:26:08 wsanchez
32 * Import of OSF Mach kernel (~mburg)
34 * Revision 1.1.5.1 1995/01/06 19:53:45 devrcs
35 * mk6 CR668 - 1.3b26 merge
37 * [1994/10/12 22:25:24 dwm]
39 * Revision 1.1.2.2 1994/05/16 19:19:22 meissner
40 * Protect against hash_ptr being null in _profile_update_stats.
41 * [1994/05/16 17:23:53 meissner]
43 * Remove _profile_cnt_to_hex, _profile_strbuffer.
44 * _profile_print_stats now takes const pointers.
45 * Use the new 64-bit arithmetic support instead of converting to double.
46 * Add _profile_merge_stats to merge statistics.
47 * [1994/04/28 21:45:04 meissner]
49 * If MACH_ASSERT is on in server or kernel, turn on profiling printfs.
50 * Print out fractional digits for average # of hash searches in stats.
51 * Update overflow_ticks for # times the lprofil counter overflows into high word.
52 * Don't make sizes of C/asm structures a const array, since it has pointers in it.
53 * Add support for converting 64 bit ints to a string.
54 * Use PROF_CNT_TO_DECIMAL where possible instead of PROF_CNT_TO_LDOUBLE.
55 * [1994/04/20 15:47:02 meissner]
57 * Revision 1.1.2.1 1994/04/08 17:51:51 meissner
59 * [1994/04/08 02:11:40 meissner]
61 * Make most stats 64 bits, except for things like memory allocation.
62 * [1994/04/02 14:58:28 meissner]
64 * Add some printfs under #idef DEBUG_PROFILE.
65 * [1994/03/29 21:00:11 meissner]
67 * Further changes for gprof/prof overflow support.
68 * Add overflow support for {gprof,prof,old,dummy}_mcount counters.
69 * [1994/03/17 20:13:31 meissner]
71 * Add gprof/prof overflow support
72 * [1994/03/17 14:56:51 meissner]
74 * Use memset instead of bzero.
75 * [1994/02/28 23:56:10 meissner]
77 * Add size of histogram counters & unused fields to profile_profil struct
78 * [1994/02/17 21:41:50 meissner]
80 * Allocate slop space for server in addition to microkernel.
81 * Add 3rd argument to _profile_print_stats for profil info.
82 * Print # histogram ticks too low/too high for server/mk.
83 * [1994/02/16 22:38:18 meissner]
85 * Calculate percentages for # of hash buckets.
86 * [1994/02/11 16:52:04 meissner]
88 * Print stats as an unsigned number.
89 * [1994/02/07 18:47:05 meissner]
91 * For kernel and server, include <kern/assert.h> not <assert.h>.
92 * Always do assert on comparing asm vs. C structure sizes.
93 * Add _profile_reset to reset profiling information.
94 * Add _profile_update_stats to update the statistics.
95 * Move _gprof_write code that updates hash stats to _profile_update_stats.
96 * Don't allocate space for basic block support just yet.
97 * Add support for range checking the gprof arc {from,self}pc addresses.
98 * _profile_debug now calls _profile_update_stats.
99 * Print how many times the acontext was locked.
100 * If DEBUG_PROFILE is defined, set pv->debug to 1.
102 * [1994/02/07 12:41:03 meissner]
104 * Keep track of the number of times the kernel overflows the HISTCOUNTER counter.
105 * [1994/02/03 20:13:28 meissner]
107 * Add stats for {user,kernel,idle} mode in the kernel.
108 * [1994/02/03 15:17:31 meissner]
110 * Print unused stats in hex as well as decimal.
111 * [1994/02/03 14:52:20 meissner]
113 * _profile_print_stats no longer takes profile_{vars,md} pointer arguments.
114 * If stream is NULL, _profile_print_stats will use stdout.
115 * Separate _profile_update_stats from _gprof_write.
116 * [1994/02/03 00:58:55 meissner]
118 * Combine _profile_{vars,stats,md}; Allow more than one _profile_vars.
119 * [1994/02/01 12:04:01 meissner]
121 * Add allocation flag to _profile_md_init.
122 * Fix core dumps in _profile_print_stats if no profile_vars ptr passed.
123 * Print numbers in 12 columns, not 8.
124 * Print my_cpu/max_cpu if max_cpu != 0.
125 * Make allocations print like other stats.
126 * Use ACONTEXT_FIRST to start loop on, not ACONTEXT_PROF.
127 * [1994/01/28 23:33:26 meissner]
129 * Move callback pointers into separate allocation context.
130 * Add size fields for other structures to profile-vars.
131 * [1994/01/26 20:23:37 meissner]
133 * Allocate initial memory at startup.
134 * Print structure sizes and version number when printing stats.
135 * Initialize size fields and version numbers.
136 * Allocation context pointers moved to _profile_vars.
137 * [1994/01/25 01:46:04 meissner]
139 * Move init code here from assembly language.
140 * [1994/01/22 01:13:21 meissner]
142 * Include <profile/profile-internal.h> instead of "profile-md.h".
143 * [1994/01/20 20:56:49 meissner]
146 * [1994/01/18 23:08:02 meissner]
148 * Rename profile.h -> profile-md.h.
149 * [1994/01/18 19:44:57 meissner]
151 * Write out stats unused fields.
152 * Make _prof_write write out the prof stats gprof collects.
153 * [1994/01/15 18:40:37 meissner]
155 * Remove debug code called from profile-asm.s.
156 * Always print out the # of profil buckets.
157 * [1994/01/15 00:59:06 meissner]
160 * [1994/01/04 16:34:46 meissner]
162 * Move max hash bucket calculation into _gprof_write & put info in stats structure.
163 * [1994/01/04 16:15:17 meissner]
165 * Use _profile_printf to write diagnostics; add diag_stream to hold stream to write to.
166 * [1994/01/04 15:37:46 meissner]
168 * Correctly handle case where more than one allocation context was
169 * allocated due to multiple threads.
170 * Cast stats to long for output.
171 * Print number of profil buckets field in _profile_stats.
172 * Add support for GFUNC allocation context.
173 * [1994/01/04 14:26:00 meissner]
175 * CR 10198 - Initial version.
176 * [1994/01/01 22:44:10 meissne
181 #include <profiling/profile-internal.h>
185 #if defined(MACH_KERNEL) || defined(_KERNEL)
187 #include <mach_assert.h>
188 #if MACH_ASSERT && !defined(DEBUG_PROFILE)
189 #define DEBUG_PROFILE 1
192 extern int printf(const char *, ...);
193 extern void panic(const char *);
196 #define panic(str) exit(1)
199 #ifndef PROFILE_NUM_FUNCS
200 #define PROFILE_NUM_FUNCS 2000
203 #ifndef PROFILE_NUM_ARCS
204 #define PROFILE_NUM_ARCS 8000
208 * Information passed on from profile-asm.s
211 extern int _profile_do_stats
;
212 extern size_t _profile_size
;
213 extern size_t _profile_stats_size
;
214 extern size_t _profile_md_size
;
215 extern size_t _profile_profil_size
;
216 extern size_t _profile_hash_size
;
219 * All profiling variables, and a dummy gprof record.
222 struct profile_vars _profile_vars
= { 0 };
223 struct hasharc _gprof_dummy
= { 0 };
226 * Forward references.
229 static void *_profile_md_acontext(struct profile_vars
*pv
,
232 acontext_type_t type
);
234 static void _profile_reset_alloc(struct profile_vars
*,
237 extern void _bogus_function(void);
240 * Function to set up the initial allocation for a context block.
244 _profile_md_acontext(struct profile_vars
*pv
,
247 acontext_type_t type
)
250 struct alloc_context context
;
251 struct page_list plist
;
255 struct memory
*mptr
= (struct memory
*)ptr
;
256 struct alloc_context
*context
= &mptr
->context
;
257 struct page_list
*plist
= &mptr
->plist
;
260 _profile_printf("_profile_md_acontext: pv= 0x%lx, ptr= 0x%lx, len= %6ld, type= %d\n",
267 /* Fill in context block header */
268 context
->next
= pv
->acontext
[type
];
269 context
->plist
= plist
;
272 /* Fill in first page list information */
273 plist
->ptr
= plist
->first
= (void *)&mptr
->data
[0];
274 plist
->next
= (struct page_list
*)0;
275 plist
->bytes_free
= len
- ((char *)plist
->ptr
- (char *)ptr
);
276 plist
->bytes_allocated
= 0;
277 plist
->num_allocations
= 0;
279 /* Update statistics */
280 pv
->stats
.num_context
[type
]++;
281 pv
->stats
.wasted
[type
] += plist
->bytes_free
;
282 pv
->stats
.overhead
[type
] += len
- plist
->bytes_free
;
284 /* And setup context block */
285 pv
->acontext
[type
] = context
;
287 return (void *)((char *)ptr
+len
);
292 * Machine dependent function to initialize things.
296 _profile_md_init(struct profile_vars
*pv
,
298 profile_alloc_mem_t alloc_mem
)
300 size_t page_size
= pv
->page_size
;
305 size_t extra_arc_size
;
306 size_t extra_func_size
;
307 size_t callback_size
= page_size
;
312 size_t c_size
; /* size C thinks structure is */
313 size_t *asm_size_ptr
; /* pointer to size asm thinks struct is */
314 const char *name
; /* structure name */
316 { sizeof(struct profile_profil
), &_profile_profil_size
, "profile_profil" },
317 { sizeof(struct profile_stats
), &_profile_stats_size
, "profile_stats" },
318 { sizeof(struct profile_md
), &_profile_md_size
, "profile_md" },
319 { sizeof(struct profile_vars
), &_profile_size
, "profile_vars" }};
322 _profile_printf("_profile_md_init: pv = 0x%lx, type = %d, alloc = %d\n",
328 for (i
= 0; i
< sizeof (sizes
) / sizeof(sizes
[0]); i
++) {
329 if (sizes
[i
].c_size
!= *sizes
[i
].asm_size_ptr
) {
330 _profile_printf("C thinks struct %s is %ld bytes, asm thinks it is %ld bytes\n",
332 (long)sizes
[i
].c_size
,
333 (long)*sizes
[i
].asm_size_ptr
);
335 panic(sizes
[i
].name
);
339 /* Figure out which function will handle compiler generated profiling */
340 if (type
== PROFILE_GPROF
) {
341 pv
->md
.save_mcount_ptr
= _gprof_mcount
;
343 } else if (type
== PROFILE_PROF
) {
344 pv
->md
.save_mcount_ptr
= _prof_mcount
;
347 pv
->md
.save_mcount_ptr
= _dummy_mcount
;
350 pv
->vars_size
= sizeof(struct profile_vars
);
351 pv
->plist_size
= sizeof(struct page_list
);
352 pv
->acontext_size
= sizeof(struct alloc_context
);
353 pv
->callback_size
= sizeof(struct callback
);
354 pv
->major_version
= PROFILE_MAJOR_VERSION
;
355 pv
->minor_version
= PROFILE_MINOR_VERSION
;
360 pv
->output_uarea
= 1;
361 pv
->output_stats
= (prof_flag_t
) _profile_do_stats
;
362 pv
->output_clock
= 1;
363 pv
->multiple_sections
= 1;
365 pv
->bogus_func
= _bogus_function
;
371 if (!pv
->error_msg
) {
372 pv
->error_msg
= "error in profiling";
375 if (!pv
->page_size
) {
376 pv
->page_size
= 4096;
379 pv
->stats
.stats_size
= sizeof(struct profile_stats
);
380 pv
->stats
.major_version
= PROFILE_MAJOR_VERSION
;
381 pv
->stats
.minor_version
= PROFILE_MINOR_VERSION
;
383 pv
->md
.md_size
= sizeof(struct profile_md
);
384 pv
->md
.major_version
= PROFILE_MAJOR_VERSION
;
385 pv
->md
.minor_version
= PROFILE_MINOR_VERSION
;
386 pv
->md
.hash_size
= _profile_hash_size
;
387 pv
->md
.num_cache
= MAX_CACHE
;
388 pv
->md
.mcount_ptr_ptr
= &_mcount_ptr
;
389 pv
->md
.dummy_ptr
= &_gprof_dummy
;
390 pv
->md
.alloc_pages
= _profile_alloc_pages
;
392 /* zero out all allocation context blocks */
393 for (ac
= ACONTEXT_FIRST
; ac
< ACONTEXT_MAX
; ac
++) {
394 pv
->acontext
[ac
] = (struct alloc_context
*)0;
397 /* Don't allocate memory if not desired */
402 /* Allocate some space for the initial allocations */
405 misc_size
= page_size
;
406 ptr
= _profile_alloc_pages(misc_size
+ callback_size
);
407 ptr
= _profile_md_acontext(pv
, ptr
, misc_size
, ACONTEXT_MISC
);
408 ptr
= _profile_md_acontext(pv
, ptr
, callback_size
, ACONTEXT_CALLBACK
);
413 #if defined(MACH_KERNEL) || defined(_KERNEL)
415 * For the MK & server allocate some slop space now for the
416 * secondary context blocks in case allocations are done at
417 * interrupt level when another allocation is being done. This
418 * is done before the main allocation blocks and will be pushed
419 * so that it will only be used when the main allocation block
422 extra_arc_size
= 4*page_size
;
423 extra_func_size
= 2*page_size
;
425 extra_arc_size
= extra_func_size
= 0;
428 /* Set up allocation areas */
429 arc_size
= ROUNDUP(PROFILE_NUM_ARCS
* sizeof(struct hasharc
), page_size
);
430 func_size
= ROUNDUP(PROFILE_NUM_FUNCS
* sizeof(struct gfuncs
), page_size
);
431 hash_size
= _profile_hash_size
* sizeof (struct hasharc
*);
432 misc_size
= ROUNDUP(hash_size
+ page_size
, page_size
);
434 ptr
= _profile_alloc_pages(arc_size
441 #if defined(MACH_KERNEL) || defined(_KERNEL)
442 ptr
= _profile_md_acontext(pv
, ptr
, extra_arc_size
, ACONTEXT_GPROF
);
443 ptr
= _profile_md_acontext(pv
, ptr
, extra_func_size
, ACONTEXT_GFUNC
);
445 ptr
= _profile_md_acontext(pv
, ptr
, arc_size
, ACONTEXT_GPROF
);
446 ptr
= _profile_md_acontext(pv
, ptr
, func_size
, ACONTEXT_GFUNC
);
447 ptr
= _profile_md_acontext(pv
, ptr
, misc_size
, ACONTEXT_MISC
);
448 ptr
= _profile_md_acontext(pv
, ptr
, callback_size
, ACONTEXT_CALLBACK
);
450 /* Allocate hash table */
451 pv
->md
.hash_ptr
= (struct hasharc
**) _profile_alloc(pv
, hash_size
, ACONTEXT_MISC
);
455 /* Set up allocation areas */
456 func_size
= ROUNDUP(PROFILE_NUM_FUNCS
* sizeof(struct prof_ext
), page_size
);
457 misc_size
= page_size
;
459 ptr
= _profile_alloc_pages(func_size
463 ptr
= _profile_md_acontext(pv
, ptr
, func_size
, ACONTEXT_PROF
);
464 ptr
= _profile_md_acontext(pv
, ptr
, misc_size
, ACONTEXT_MISC
);
465 ptr
= _profile_md_acontext(pv
, ptr
, callback_size
, ACONTEXT_CALLBACK
);
472 * Machine dependent functions to start and stop profiling.
476 _profile_md_start(void)
478 _mcount_ptr
= _profile_vars
.md
.save_mcount_ptr
;
483 _profile_md_stop(void)
485 _mcount_ptr
= _dummy_mcount
;
491 * Free up all memory in a memory context block.
495 _profile_reset_alloc(struct profile_vars
*pv
, acontext_type_t ac
)
497 struct alloc_context
*aptr
;
498 struct page_list
*plist
;
500 for (aptr
= pv
->acontext
[ac
];
501 aptr
!= (struct alloc_context
*)0;
504 for (plist
= aptr
->plist
;
505 plist
!= (struct page_list
*)0;
506 plist
= plist
->next
) {
508 plist
->ptr
= plist
->first
;
509 plist
->bytes_free
+= plist
->bytes_allocated
;
510 plist
->bytes_allocated
= 0;
511 plist
->num_allocations
= 0;
512 memset(plist
->first
, '\0', plist
->bytes_allocated
);
519 * Reset profiling. Since the only user of this function is the kernel
520 * and the server, we don't have to worry about other stuff than gprof.
524 _profile_reset(struct profile_vars
*pv
)
526 struct alloc_context
*aptr
;
527 struct page_list
*plist
;
528 struct gfuncs
*gfunc
;
534 /* Reset all function unique pointers back to 0 */
535 for (aptr
= pv
->acontext
[ACONTEXT_GFUNC
];
536 aptr
!= (struct alloc_context
*)0;
539 for (plist
= aptr
->plist
;
540 plist
!= (struct page_list
*)0;
541 plist
= plist
->next
) {
543 for (gfunc
= (struct gfuncs
*)plist
->first
;
544 gfunc
< (struct gfuncs
*)plist
->ptr
;
547 *(gfunc
->unique_ptr
) = (struct hasharc
*)0;
553 _profile_reset_alloc(pv
, ACONTEXT_GPROF
);
554 _profile_reset_alloc(pv
, ACONTEXT_GFUNC
);
555 _profile_reset_alloc(pv
, ACONTEXT_PROF
);
557 memset((void *)pv
->profil_buf
, '\0', pv
->profil_info
.profil_len
);
558 memset((void *)pv
->md
.hash_ptr
, '\0', pv
->md
.hash_size
* sizeof(struct hasharc
*));
559 memset((void *)&pv
->stats
, '\0', sizeof(pv
->stats
));
561 pv
->stats
.stats_size
= sizeof(struct profile_stats
);
562 pv
->stats
.major_version
= PROFILE_MAJOR_VERSION
;
563 pv
->stats
.minor_version
= PROFILE_MINOR_VERSION
;
572 * Machine dependent function to write out gprof records.
576 _gprof_write(struct profile_vars
*pv
, struct callback
*callback_ptr
)
578 struct alloc_context
*aptr
;
579 struct page_list
*plist
;
581 struct hasharc
*hptr
;
584 for (aptr
= pv
->acontext
[ACONTEXT_GPROF
];
585 aptr
!= (struct alloc_context
*)0;
588 for (plist
= aptr
->plist
; plist
!= (struct page_list
*)0; plist
= plist
->next
) {
589 hptr
= (struct hasharc
*)plist
->first
;
590 for (i
= 0; i
< plist
->num_allocations
; (i
++, hptr
++)) {
592 struct gprof_arc arc
= hptr
->arc
;
593 int nrecs
= 1 + (hptr
->overflow
* 2);
596 if (pv
->check_funcs
) {
597 if (arc
.frompc
< pv
->profil_info
.lowpc
||
598 arc
.frompc
> pv
->profil_info
.highpc
) {
600 arc
.frompc
= (prof_uptrint_t
)pv
->bogus_func
;
603 if (arc
.selfpc
< pv
->profil_info
.lowpc
||
604 arc
.selfpc
> pv
->profil_info
.highpc
) {
606 arc
.selfpc
= (prof_uptrint_t
)pv
->bogus_func
;
610 /* For each overflow, emit 2 extra records with the count
612 for (j
= 0; j
< nrecs
; j
++) {
613 bytes
+= sizeof (arc
);
614 if ((*pv
->fwrite_func
)((void *)&arc
,
622 arc
.count
= 0x80000000;
633 * Machine dependent function to write out prof records.
637 _prof_write(struct profile_vars
*pv
, struct callback
*callback_ptr
)
639 struct alloc_context
*aptr
;
640 struct page_list
*plist
;
642 struct prof_ext prof_st
;
643 struct prof_int
*pptr
;
648 /* Write out information prof_mcount collects */
649 for (aptr
= pv
->acontext
[ACONTEXT_PROF
];
650 aptr
!= (struct alloc_context
*)0;
653 for (plist
= aptr
->plist
; plist
!= (struct page_list
*)0; plist
= plist
->next
) {
654 pptr
= (struct prof_int
*)plist
->first
;
656 for (i
= 0; i
< plist
->num_allocations
; (i
++, pptr
++)) {
658 /* Write out 2 records for each overflow, each with a
659 count of 0x80000000 + the normal record */
660 prof_st
= pptr
->prof
;
661 nrecs
= 1 + (pptr
->overflow
* 2);
663 for (j
= 0; j
< nrecs
; j
++) {
664 bytes
+= sizeof (struct prof_ext
);
665 if ((*pv
->fwrite_func
)((void *)&prof_st
,
673 prof_st
.cncall
= 0x80000000;
679 /* Now write out the prof information that gprof collects */
680 for (aptr
= pv
->acontext
[ACONTEXT_GFUNC
];
681 aptr
!= (struct alloc_context
*)0;
684 for (plist
= aptr
->plist
; plist
!= (struct page_list
*)0; plist
= plist
->next
) {
685 gptr
= (struct gfuncs
*)plist
->first
;
687 for (i
= 0; i
< plist
->num_allocations
; (i
++, gptr
++)) {
689 /* Write out 2 records for each overflow, each with a
690 count of 0x80000000 + the normal record */
691 prof_st
= gptr
->prof
.prof
;
692 nrecs
= 1 + (gptr
->prof
.overflow
* 2);
694 for (j
= 0; j
< nrecs
; j
++) {
695 bytes
+= sizeof (struct prof_ext
);
696 if ((*pv
->fwrite_func
)((void *)&prof_st
,
704 prof_st
.cncall
= 0x80000000;
715 * Update any statistics. For the 386, calculate the hash table loading factor.
716 * Also figure out how many overflows occured.
720 _profile_update_stats(struct profile_vars
*pv
)
722 struct alloc_context
*aptr
;
723 struct page_list
*plist
;
724 struct hasharc
*hptr
;
725 struct prof_int
*pptr
;
730 for(i
= 0; i
< MAX_BUCKETS
+1; i
++) {
731 pv
->stats
.buckets
[i
] = 0;
734 pv
->stats
.hash_buckets
= 0;
736 if (pv
->md
.hash_ptr
) {
737 for (i
= 0; i
< pv
->md
.hash_size
; i
++) {
739 struct hasharc
*hptr
;
741 for (hptr
= pv
->md
.hash_ptr
[i
]; hptr
; hptr
= hptr
->next
) {
745 pv
->stats
.buckets
[ (nbuckets
< MAX_BUCKETS
) ? nbuckets
: MAX_BUCKETS
]++;
746 if (pv
->stats
.hash_buckets
< nbuckets
) {
747 pv
->stats
.hash_buckets
= nbuckets
;
752 /* Count how many times functions are out of bounds */
753 if (pv
->check_funcs
) {
754 pv
->stats
.bogus_count
= 0;
756 for (aptr
= pv
->acontext
[ACONTEXT_GPROF
];
757 aptr
!= (struct alloc_context
*)0;
760 for (plist
= aptr
->plist
;
761 plist
!= (struct page_list
*)0;
762 plist
= plist
->next
) {
764 hptr
= (struct hasharc
*)plist
->first
;
765 for (i
= 0; i
< plist
->num_allocations
; (i
++, hptr
++)) {
767 if (hptr
->arc
.frompc
< pv
->profil_info
.lowpc
||
768 hptr
->arc
.frompc
> pv
->profil_info
.highpc
) {
769 pv
->stats
.bogus_count
++;
772 if (hptr
->arc
.selfpc
< pv
->profil_info
.lowpc
||
773 hptr
->arc
.selfpc
> pv
->profil_info
.highpc
) {
774 pv
->stats
.bogus_count
++;
781 /* Figure out how many overflows occurred */
782 PROF_ULONG_TO_CNT(pv
->stats
.prof_overflow
, 0);
783 PROF_ULONG_TO_CNT(pv
->stats
.gprof_overflow
, 0);
785 for (aptr
= pv
->acontext
[ACONTEXT_GPROF
];
786 aptr
!= (struct alloc_context
*)0;
789 for (plist
= aptr
->plist
;
790 plist
!= (struct page_list
*)0;
791 plist
= plist
->next
) {
793 hptr
= (struct hasharc
*)plist
->first
;
794 for (i
= 0; i
< plist
->num_allocations
; (i
++, hptr
++)) {
795 PROF_CNT_ADD(pv
->stats
.gprof_overflow
, hptr
->overflow
);
800 for (aptr
= pv
->acontext
[ACONTEXT_PROF
];
801 aptr
!= (struct alloc_context
*)0;
804 for (plist
= aptr
->plist
;
805 plist
!= (struct page_list
*)0;
806 plist
= plist
->next
) {
808 pptr
= (struct prof_int
*)plist
->first
;
809 for (i
= 0; i
< plist
->num_allocations
; (i
++, pptr
++)) {
810 PROF_CNT_ADD(pv
->stats
.prof_overflow
, pptr
->overflow
);
815 for (aptr
= pv
->acontext
[ACONTEXT_GFUNC
];
816 aptr
!= (struct alloc_context
*)0;
819 for (plist
= aptr
->plist
;
820 plist
!= (struct page_list
*)0;
821 plist
= plist
->next
) {
823 fptr
= (struct gfuncs
*)plist
->first
;
824 for (i
= 0; i
< plist
->num_allocations
; (i
++, fptr
++)) {
825 PROF_CNT_ADD(pv
->stats
.prof_overflow
, fptr
->prof
.overflow
);
830 /* Now go through & count how many times the LHISTCOUNTER overflowed into a 2nd word */
831 lptr
= (LHISTCOUNTER
*)pv
->profil_buf
;
833 if (pv
->use_profil
&&
834 pv
->profil_info
.counter_size
== sizeof(LHISTCOUNTER
) &&
835 lptr
!= (LHISTCOUNTER
*)0) {
837 PROF_ULONG_TO_CNT(pv
->stats
.overflow_ticks
, 0);
838 for (i
= 0; i
< pv
->stats
.profil_buckets
; i
++) {
839 PROF_CNT_ADD(pv
->stats
.overflow_ticks
, lptr
[i
].high
);
844 #if !defined(_KERNEL) && !defined(MACH_KERNEL)
847 * Routine callable from the debugger that prints the statistics.
850 int _profile_debug(void)
852 _profile_update_stats(&_profile_vars
);
853 _profile_print_stats(stderr
, &_profile_vars
.stats
, &_profile_vars
.profil_info
);
858 * Print the statistics structure in a meaningful way.
861 void _profile_print_stats(FILE *stream
,
862 const struct profile_stats
*stats
,
863 const struct profile_profil
*pinfo
)
866 prof_cnt_t total_hits
;
870 int width_wasted
= 0;
871 int width_overhead
= 0;
872 int width_context
= 0;
873 static const char *cname
[ACONTEXT_MAX
] = ACONTEXT_NAMES
;
884 sprintf(buf
, "%ld.%ld", (long)stats
->major_version
, (long)stats
->minor_version
);
885 fprintf(stream
, "%12s profiling version number\n", buf
);
886 fprintf(stream
, "%12lu size of profile_vars\n", (long unsigned)sizeof(struct profile_vars
));
887 fprintf(stream
, "%12lu size of profile_stats\n", (long unsigned)sizeof(struct profile_stats
));
888 fprintf(stream
, "%12lu size of profile_md\n", (long unsigned)sizeof(struct profile_md
));
889 fprintf(stream
, "%12s calls to _{,g}prof_mcount\n", PROF_CNT_TO_DECIMAL((char *)0, stats
->cnt
));
890 fprintf(stream
, "%12s calls to old mcount\n", PROF_CNT_TO_DECIMAL((char *)0, stats
->old_mcount
));
891 fprintf(stream
, "%12s calls to _dummy_mcount\n", PROF_CNT_TO_DECIMAL((char *)0, stats
->dummy
));
892 fprintf(stream
, "%12lu functions profiled\n", (long unsigned)stats
->prof_records
);
893 fprintf(stream
, "%12lu gprof arcs\n", (long unsigned)stats
->gprof_records
);
896 fprintf(stream
, "%12lu profil buckets\n", (long unsigned)stats
->profil_buckets
);
897 fprintf(stream
, "%12lu profil lowpc [0x%lx]\n",
898 (long unsigned)pinfo
->lowpc
,
899 (long unsigned)pinfo
->lowpc
);
901 fprintf(stream
, "%12lu profil highpc [0x%lx]\n",
902 (long unsigned)pinfo
->highpc
,
903 (long unsigned)pinfo
->highpc
);
905 fprintf(stream
, "%12lu profil highpc-lowpc\n", (long unsigned)(pinfo
->highpc
- pinfo
->lowpc
));
906 fprintf(stream
, "%12lu profil buffer length\n", (long unsigned)pinfo
->profil_len
);
907 fprintf(stream
, "%12lu profil sizeof counters\n", (long unsigned)pinfo
->counter_size
);
908 fprintf(stream
, "%12lu profil scale (%g)\n",
909 (long unsigned)pinfo
->scale
,
910 ((double)pinfo
->scale
) / ((double) 0x10000));
913 for (i
= 0; i
< sizeof (pinfo
->profil_unused
) / sizeof (pinfo
->profil_unused
[0]); i
++) {
914 if (pinfo
->profil_unused
[i
]) {
915 fprintf(stream
, "%12lu profil unused[%2d] {0x%.8lx}\n",
916 (long unsigned)pinfo
->profil_unused
[i
],
918 (long unsigned)pinfo
->profil_unused
[i
]);
923 if (stats
->max_cpu
) {
924 fprintf(stream
, "%12lu current cpu/thread\n", (long unsigned)stats
->my_cpu
);
925 fprintf(stream
, "%12lu max cpu/thread+1\n", (long unsigned)stats
->max_cpu
);
928 if (stats
->bogus_count
!= 0) {
930 "%12lu gprof functions found outside of range\n",
931 (long unsigned)stats
->bogus_count
);
934 if (PROF_CNT_NE_0(stats
->too_low
)) {
936 "%12s histogram ticks were too low\n",
937 PROF_CNT_TO_DECIMAL((char *)0, stats
->too_low
));
940 if (PROF_CNT_NE_0(stats
->too_high
)) {
942 "%12s histogram ticks were too high\n",
943 PROF_CNT_TO_DECIMAL((char *)0, stats
->too_high
));
946 if (PROF_CNT_NE_0(stats
->acontext_locked
)) {
948 "%12s times an allocation context was locked\n",
949 PROF_CNT_TO_DECIMAL((char *)0, stats
->acontext_locked
));
952 if (PROF_CNT_NE_0(stats
->kernel_ticks
)
953 || PROF_CNT_NE_0(stats
->user_ticks
)
954 || PROF_CNT_NE_0(stats
->idle_ticks
)) {
956 prof_cnt_t total_ticks
;
957 long double total_ticks_dbl
;
959 total_ticks
= stats
->kernel_ticks
;
960 PROF_CNT_LADD(total_ticks
, stats
->user_ticks
);
961 PROF_CNT_LADD(total_ticks
, stats
->idle_ticks
);
962 total_ticks_dbl
= PROF_CNT_TO_LDOUBLE(total_ticks
);
965 "%12s total ticks\n",
966 PROF_CNT_TO_DECIMAL((char *)0, total_ticks
));
969 "%12s ticks within the kernel (%5.2Lf%%)\n",
970 PROF_CNT_TO_DECIMAL((char *)0, stats
->kernel_ticks
),
971 100.0L * (PROF_CNT_TO_LDOUBLE(stats
->kernel_ticks
) / total_ticks_dbl
));
974 "%12s ticks within user space (%5.2Lf%%)\n",
975 PROF_CNT_TO_DECIMAL((char *)0, stats
->user_ticks
),
976 100.0L * (PROF_CNT_TO_LDOUBLE(stats
->user_ticks
) / total_ticks_dbl
));
979 "%12s ticks idle (%5.2Lf%%)\n",
980 PROF_CNT_TO_DECIMAL((char *)0, stats
->idle_ticks
),
981 100.0L * (PROF_CNT_TO_LDOUBLE(stats
->idle_ticks
) / total_ticks_dbl
));
984 if (PROF_CNT_NE_0(stats
->overflow_ticks
)) {
985 fprintf(stream
, "%12s times a HISTCOUNTER counter would have overflowed\n",
986 PROF_CNT_TO_DECIMAL((char *)0, stats
->overflow_ticks
));
989 if (PROF_CNT_NE_0(stats
->hash_num
)) {
990 long double total_buckets
= 0.0L;
992 for (i
= 0; i
<= MAX_BUCKETS
; i
++) {
993 total_buckets
+= (long double)stats
->buckets
[i
];
996 fprintf(stream
, "%12lu max bucket(s) on hash chain.\n", (long unsigned)stats
->hash_buckets
);
997 for (i
= 0; i
< MAX_BUCKETS
; i
++) {
998 if (stats
->buckets
[i
] != 0) {
999 fprintf(stream
, "%12lu bucket(s) had %d entries (%5.2Lf%%)\n",
1000 (long unsigned)stats
->buckets
[i
], i
,
1001 100.0L * ((long double)stats
->buckets
[i
] / total_buckets
));
1005 if (stats
->buckets
[MAX_BUCKETS
] != 0) {
1006 fprintf(stream
, "%12lu bucket(s) had more than %d entries (%5.2Lf%%)\n",
1007 (long unsigned)stats
->buckets
[MAX_BUCKETS
], MAX_BUCKETS
,
1008 100.0L * ((long double)stats
->buckets
[MAX_BUCKETS
] / total_buckets
));
1012 PROF_ULONG_TO_CNT(total_hits
, 0);
1013 for (i
= 0; i
< MAX_CACHE
; i
++) {
1014 PROF_CNT_LADD(total_hits
, stats
->cache_hits
[i
]);
1017 if (PROF_CNT_NE_0(total_hits
)) {
1018 long double total
= PROF_CNT_TO_LDOUBLE(stats
->cnt
);
1019 long double total_hits_dbl
= PROF_CNT_TO_LDOUBLE(total_hits
);
1022 "%12s cache hits (%.2Lf%%)\n",
1023 PROF_CNT_TO_DECIMAL((char *)0, total_hits
),
1024 100.0L * (total_hits_dbl
/ total
));
1026 for (i
= 0; i
< MAX_CACHE
; i
++) {
1027 if (PROF_CNT_NE_0(stats
->cache_hits
[i
])) {
1029 "%12s times cache#%d matched (%5.2Lf%% of cache hits, %5.2Lf%% total)\n",
1030 PROF_CNT_TO_DECIMAL((char *)0, stats
->cache_hits
[i
]),
1032 100.0L * (PROF_CNT_TO_LDOUBLE(stats
->cache_hits
[i
]) / total_hits_dbl
),
1033 100.0L * (PROF_CNT_TO_LDOUBLE(stats
->cache_hits
[i
]) / total
));
1037 if (PROF_CNT_NE_0(stats
->hash_num
)) {
1038 fprintf(stream
, "%12s times hash table searched\n", PROF_CNT_TO_DECIMAL((char *)0, stats
->hash_num
));
1039 fprintf(stream
, "%12s hash buckets searched\n", PROF_CNT_TO_DECIMAL((char *)0, stats
->hash_search
));
1040 fprintf(stream
, "%12.4Lf average buckets searched\n",
1041 PROF_CNT_TO_LDOUBLE(stats
->hash_search
) / PROF_CNT_TO_LDOUBLE(stats
->hash_num
));
1045 for (i
= 0; i
< sizeof (stats
->stats_unused
) / sizeof (stats
->stats_unused
[0]); i
++) {
1046 if (PROF_CNT_NE_0(stats
->stats_unused
[i
])) {
1047 fprintf(stream
, "%12s unused[%2d] {0x%.8lx 0x%.8lx}\n",
1048 PROF_CNT_TO_DECIMAL((char *)0, stats
->stats_unused
[i
]),
1050 (unsigned long)stats
->stats_unused
[i
].high
,
1051 (unsigned long)stats
->stats_unused
[i
].low
);
1055 /* Get the width for the allocation contexts */
1056 for (ac
= ACONTEXT_FIRST
; ac
< ACONTEXT_MAX
; ac
++) {
1059 if (stats
->num_context
[ac
] == 0) {
1063 len
= strlen (cname
[ac
]);
1064 if (len
> width_cname
)
1067 len
= sprintf (buf
, "%lu", (long unsigned)stats
->num_alloc
[ac
]);
1068 if (len
> width_alloc
)
1071 len
= sprintf (buf
, "%lu", (long unsigned)stats
->wasted
[ac
]);
1072 if (len
> width_wasted
)
1075 len
= sprintf (buf
, "%lu", (long unsigned)stats
->overhead
[ac
]);
1076 if (len
> width_overhead
)
1077 width_overhead
= len
;
1079 len
= sprintf (buf
, "%lu", (long unsigned)stats
->num_context
[ac
]);
1080 if (len
> width_context
)
1081 width_context
= len
;
1084 /* Print info about allocation contexts */
1085 for (ac
= ACONTEXT_FIRST
; ac
< ACONTEXT_MAX
; ac
++) {
1086 if (stats
->num_context
[ac
] == 0) {
1091 "%12lu bytes in %-*s %*lu alloc, %*lu unused, %*lu over, %*lu context\n",
1092 (long unsigned)stats
->bytes_alloc
[ac
],
1093 width_cname
, cname
[ac
],
1094 width_alloc
, (long unsigned)stats
->num_alloc
[ac
],
1095 width_wasted
, (long unsigned)stats
->wasted
[ac
],
1096 width_overhead
, (long unsigned)stats
->overhead
[ac
],
1097 width_context
, (long unsigned)stats
->num_context
[ac
]);
1103 * Merge a new statistics field into an old one.
1106 void _profile_merge_stats(struct profile_stats
*old_stats
, const struct profile_stats
*new_stats
)
1110 /* If nothing passed, just return */
1111 if (!old_stats
|| !new_stats
)
1114 /* If the old_stats has not been initialized, just copy in the new stats */
1115 if (old_stats
->major_version
== 0) {
1116 *old_stats
= *new_stats
;
1118 /* Otherwise, update stats, field by field */
1120 if (old_stats
->prof_records
< new_stats
->prof_records
)
1121 old_stats
->prof_records
= new_stats
->prof_records
;
1123 if (old_stats
->gprof_records
< new_stats
->gprof_records
)
1124 old_stats
->gprof_records
= new_stats
->gprof_records
;
1126 if (old_stats
->hash_buckets
< new_stats
->hash_buckets
)
1127 old_stats
->hash_buckets
= new_stats
->hash_buckets
;
1129 if (old_stats
->bogus_count
< new_stats
->bogus_count
)
1130 old_stats
->bogus_count
= new_stats
->bogus_count
;
1132 PROF_CNT_LADD(old_stats
->cnt
, new_stats
->cnt
);
1133 PROF_CNT_LADD(old_stats
->dummy
, new_stats
->dummy
);
1134 PROF_CNT_LADD(old_stats
->old_mcount
, new_stats
->old_mcount
);
1135 PROF_CNT_LADD(old_stats
->hash_search
, new_stats
->hash_search
);
1136 PROF_CNT_LADD(old_stats
->hash_num
, new_stats
->hash_num
);
1137 PROF_CNT_LADD(old_stats
->user_ticks
, new_stats
->user_ticks
);
1138 PROF_CNT_LADD(old_stats
->kernel_ticks
, new_stats
->kernel_ticks
);
1139 PROF_CNT_LADD(old_stats
->idle_ticks
, new_stats
->idle_ticks
);
1140 PROF_CNT_LADD(old_stats
->overflow_ticks
, new_stats
->overflow_ticks
);
1141 PROF_CNT_LADD(old_stats
->acontext_locked
, new_stats
->acontext_locked
);
1142 PROF_CNT_LADD(old_stats
->too_low
, new_stats
->too_low
);
1143 PROF_CNT_LADD(old_stats
->too_high
, new_stats
->too_high
);
1144 PROF_CNT_LADD(old_stats
->prof_overflow
, new_stats
->prof_overflow
);
1145 PROF_CNT_LADD(old_stats
->gprof_overflow
, new_stats
->gprof_overflow
);
1147 for (i
= 0; i
< (int)ACONTEXT_MAX
; i
++) {
1148 if (old_stats
->num_alloc
[i
] < new_stats
->num_alloc
[i
])
1149 old_stats
->num_alloc
[i
] = new_stats
->num_alloc
[i
];
1151 if (old_stats
->bytes_alloc
[i
] < new_stats
->bytes_alloc
[i
])
1152 old_stats
->bytes_alloc
[i
] = new_stats
->bytes_alloc
[i
];
1154 if (old_stats
->num_context
[i
] < new_stats
->num_context
[i
])
1155 old_stats
->num_context
[i
] = new_stats
->num_context
[i
];
1157 if (old_stats
->wasted
[i
] < new_stats
->wasted
[i
])
1158 old_stats
->wasted
[i
] = new_stats
->wasted
[i
];
1160 if (old_stats
->overhead
[i
] < new_stats
->overhead
[i
])
1161 old_stats
->overhead
[i
] = new_stats
->overhead
[i
];
1165 for (i
= 0; i
< MAX_BUCKETS
+1; i
++) {
1166 if (old_stats
->buckets
[i
] < new_stats
->buckets
[i
])
1167 old_stats
->buckets
[i
] = new_stats
->buckets
[i
];
1170 for (i
= 0; i
< MAX_CACHE
; i
++) {
1171 PROF_CNT_LADD(old_stats
->cache_hits
[i
], new_stats
->cache_hits
[i
]);
1174 for (i
= 0; i
< sizeof(old_stats
->stats_unused
) / sizeof(old_stats
->stats_unused
[0]); i
++) {
1175 PROF_CNT_LADD(old_stats
->stats_unused
[i
], new_stats
->stats_unused
[i
]);
1184 * Invalid function address used when checking of function addresses is
1185 * desired for gprof arcs, and we discover an address out of bounds.
1186 * There should be no callers of this function.
1190 _bogus_function(void)