2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
4 * @APPLE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
21 * @APPLE_LICENSE_HEADER_END@
29 * Revision 1.1.1.1 1998/09/22 21:05:49 wsanchez
30 * Import of Mac OS X kernel (~semeria)
32 * Revision 1.1.1.1 1998/03/07 02:26:08 wsanchez
33 * Import of OSF Mach kernel (~mburg)
35 * Revision 1.1.5.1 1995/01/06 19:53:45 devrcs
36 * mk6 CR668 - 1.3b26 merge
38 * [1994/10/12 22:25:24 dwm]
40 * Revision 1.1.2.2 1994/05/16 19:19:22 meissner
41 * Protect against hash_ptr being null in _profile_update_stats.
42 * [1994/05/16 17:23:53 meissner]
44 * Remove _profile_cnt_to_hex, _profile_strbuffer.
45 * _profile_print_stats now takes const pointers.
46 * Use the new 64-bit arithmetic support instead of converting to double.
47 * Add _profile_merge_stats to merge statistics.
48 * [1994/04/28 21:45:04 meissner]
50 * If MACH_ASSERT is on in server or kernel, turn on profiling printfs.
51 * Print out fractional digits for average # of hash searches in stats.
52 * Update overflow_ticks for # times the lprofil counter overflows into high word.
53 * Don't make sizes of C/asm structures a const array, since it has pointers in it.
54 * Add support for converting 64 bit ints to a string.
55 * Use PROF_CNT_TO_DECIMAL where possible instead of PROF_CNT_TO_LDOUBLE.
56 * [1994/04/20 15:47:02 meissner]
58 * Revision 1.1.2.1 1994/04/08 17:51:51 meissner
60 * [1994/04/08 02:11:40 meissner]
62 * Make most stats 64 bits, except for things like memory allocation.
63 * [1994/04/02 14:58:28 meissner]
65 * Add some printfs under #idef DEBUG_PROFILE.
66 * [1994/03/29 21:00:11 meissner]
68 * Further changes for gprof/prof overflow support.
69 * Add overflow support for {gprof,prof,old,dummy}_mcount counters.
70 * [1994/03/17 20:13:31 meissner]
72 * Add gprof/prof overflow support
73 * [1994/03/17 14:56:51 meissner]
75 * Use memset instead of bzero.
76 * [1994/02/28 23:56:10 meissner]
78 * Add size of histogram counters & unused fields to profile_profil struct
79 * [1994/02/17 21:41:50 meissner]
81 * Allocate slop space for server in addition to microkernel.
82 * Add 3rd argument to _profile_print_stats for profil info.
83 * Print # histogram ticks too low/too high for server/mk.
84 * [1994/02/16 22:38:18 meissner]
86 * Calculate percentages for # of hash buckets.
87 * [1994/02/11 16:52:04 meissner]
89 * Print stats as an unsigned number.
90 * [1994/02/07 18:47:05 meissner]
92 * For kernel and server, include <kern/assert.h> not <assert.h>.
93 * Always do assert on comparing asm vs. C structure sizes.
94 * Add _profile_reset to reset profiling information.
95 * Add _profile_update_stats to update the statistics.
96 * Move _gprof_write code that updates hash stats to _profile_update_stats.
97 * Don't allocate space for basic block support just yet.
98 * Add support for range checking the gprof arc {from,self}pc addresses.
99 * _profile_debug now calls _profile_update_stats.
100 * Print how many times the acontext was locked.
101 * If DEBUG_PROFILE is defined, set pv->debug to 1.
103 * [1994/02/07 12:41:03 meissner]
105 * Keep track of the number of times the kernel overflows the HISTCOUNTER counter.
106 * [1994/02/03 20:13:28 meissner]
108 * Add stats for {user,kernel,idle} mode in the kernel.
109 * [1994/02/03 15:17:31 meissner]
111 * Print unused stats in hex as well as decimal.
112 * [1994/02/03 14:52:20 meissner]
114 * _profile_print_stats no longer takes profile_{vars,md} pointer arguments.
115 * If stream is NULL, _profile_print_stats will use stdout.
116 * Separate _profile_update_stats from _gprof_write.
117 * [1994/02/03 00:58:55 meissner]
119 * Combine _profile_{vars,stats,md}; Allow more than one _profile_vars.
120 * [1994/02/01 12:04:01 meissner]
122 * Add allocation flag to _profile_md_init.
123 * Fix core dumps in _profile_print_stats if no profile_vars ptr passed.
124 * Print numbers in 12 columns, not 8.
125 * Print my_cpu/max_cpu if max_cpu != 0.
126 * Make allocations print like other stats.
127 * Use ACONTEXT_FIRST to start loop on, not ACONTEXT_PROF.
128 * [1994/01/28 23:33:26 meissner]
130 * Move callback pointers into separate allocation context.
131 * Add size fields for other structures to profile-vars.
132 * [1994/01/26 20:23:37 meissner]
134 * Allocate initial memory at startup.
135 * Print structure sizes and version number when printing stats.
136 * Initialize size fields and version numbers.
137 * Allocation context pointers moved to _profile_vars.
138 * [1994/01/25 01:46:04 meissner]
140 * Move init code here from assembly language.
141 * [1994/01/22 01:13:21 meissner]
143 * Include <profile/profile-internal.h> instead of "profile-md.h".
144 * [1994/01/20 20:56:49 meissner]
147 * [1994/01/18 23:08:02 meissner]
149 * Rename profile.h -> profile-md.h.
150 * [1994/01/18 19:44:57 meissner]
152 * Write out stats unused fields.
153 * Make _prof_write write out the prof stats gprof collects.
154 * [1994/01/15 18:40:37 meissner]
156 * Remove debug code called from profile-asm.s.
157 * Always print out the # of profil buckets.
158 * [1994/01/15 00:59:06 meissner]
161 * [1994/01/04 16:34:46 meissner]
163 * Move max hash bucket calculation into _gprof_write & put info in stats structure.
164 * [1994/01/04 16:15:17 meissner]
166 * Use _profile_printf to write diagnostics; add diag_stream to hold stream to write to.
167 * [1994/01/04 15:37:46 meissner]
169 * Correctly handle case where more than one allocation context was
170 * allocated due to multiple threads.
171 * Cast stats to long for output.
172 * Print number of profil buckets field in _profile_stats.
173 * Add support for GFUNC allocation context.
174 * [1994/01/04 14:26:00 meissner]
176 * CR 10198 - Initial version.
177 * [1994/01/01 22:44:10 meissne
182 #include <profiling/profile-internal.h>
183 #include <vm/vm_kern.h>
187 #if defined(MACH_KERNEL) || defined(_KERNEL)
189 #include <mach_assert.h>
190 #if MACH_ASSERT && !defined(DEBUG_PROFILE)
191 #define DEBUG_PROFILE 1
196 #define panic(str) exit(1)
199 #ifndef PROFILE_NUM_FUNCS
200 #define PROFILE_NUM_FUNCS 2000
203 #ifndef PROFILE_NUM_ARCS
204 #define PROFILE_NUM_ARCS 8000
208 * Information passed on from profile-asm.s
211 extern int _profile_do_stats
;
212 extern size_t _profile_size
;
213 extern size_t _profile_stats_size
;
214 extern size_t _profile_md_size
;
215 extern size_t _profile_profil_size
;
216 extern size_t _profile_hash_size
;
219 * All profiling variables, and a dummy gprof record.
222 struct profile_vars _profile_vars
= { 0 };
223 struct hasharc _gprof_dummy
= { 0 };
226 * Forward references.
229 static void *_profile_md_acontext(struct profile_vars
*pv
,
232 acontext_type_t type
);
234 static void _profile_reset_alloc(struct profile_vars
*,
237 extern void _bogus_function(void);
241 struct profile_vars
*_profile_vars_cpus
[NCPUS
] = { &_profile_vars
};
242 struct profile_vars _profile_vars_aux
[NCPUS
-1];
243 #define PROFILE_VARS(cpu) (_profile_vars_cpus[(cpu)])
245 #define PROFILE_VARS(cpu) (&_profile_vars)
249 _profile_alloc_pages (size_t size
)
254 * For the MK, we can't support allocating pages at runtime, because we
255 * might be at interrupt level, so abort if we didn't size the table
259 if (PROFILE_VARS(0)->active
) {
260 panic("Call to _profile_alloc_pages while profiling is running.");
263 if (kmem_alloc(kernel_map
, &addr
, size
)) {
264 panic("Could not allocate memory for profiling");
267 memset((void *)addr
, '\0', size
);
268 if (PROFILE_VARS(0)->debug
) {
269 printf("Allocated %d bytes for profiling, address 0x%x\n", (int)size
, (int)addr
);
272 return((caddr_t
)addr
);
276 _profile_free_pages(void *addr
, size_t size
)
278 if (PROFILE_VARS(0)->debug
) {
279 printf("Freed %d bytes for profiling, address 0x%x\n", (int)size
, (int)addr
);
282 kmem_free(kernel_map
, (vm_offset_t
)addr
, size
);
286 void _profile_error(struct profile_vars
*pv
)
288 panic("Fatal error in profiling");
293 * Function to set up the initial allocation for a context block.
297 _profile_md_acontext(struct profile_vars
*pv
,
300 acontext_type_t type
)
303 struct alloc_context context
;
304 struct page_list plist
;
308 struct memory
*mptr
= (struct memory
*)ptr
;
309 struct alloc_context
*context
= &mptr
->context
;
310 struct page_list
*plist
= &mptr
->plist
;
313 _profile_printf("_profile_md_acontext: pv= 0x%lx, ptr= 0x%lx, len= %6ld, type= %d\n",
320 /* Fill in context block header */
321 context
->next
= pv
->acontext
[type
];
322 context
->plist
= plist
;
325 /* Fill in first page list information */
326 plist
->ptr
= plist
->first
= (void *)&mptr
->data
[0];
327 plist
->next
= (struct page_list
*)0;
328 plist
->bytes_free
= len
- ((char *)plist
->ptr
- (char *)ptr
);
329 plist
->bytes_allocated
= 0;
330 plist
->num_allocations
= 0;
332 /* Update statistics */
333 pv
->stats
.num_context
[type
]++;
334 pv
->stats
.wasted
[type
] += plist
->bytes_free
;
335 pv
->stats
.overhead
[type
] += len
- plist
->bytes_free
;
337 /* And setup context block */
338 pv
->acontext
[type
] = context
;
340 return (void *)((char *)ptr
+len
);
345 * Machine dependent function to initialize things.
349 _profile_md_init(struct profile_vars
*pv
,
351 profile_alloc_mem_t alloc_mem
)
353 size_t page_size
= pv
->page_size
;
358 size_t extra_arc_size
;
359 size_t extra_func_size
;
360 size_t callback_size
= page_size
;
365 size_t c_size
; /* size C thinks structure is */
366 size_t *asm_size_ptr
; /* pointer to size asm thinks struct is */
367 const char *name
; /* structure name */
369 { sizeof(struct profile_profil
), &_profile_profil_size
, "profile_profil" },
370 { sizeof(struct profile_stats
), &_profile_stats_size
, "profile_stats" },
371 { sizeof(struct profile_md
), &_profile_md_size
, "profile_md" },
372 { sizeof(struct profile_vars
), &_profile_size
, "profile_vars" }};
375 _profile_printf("_profile_md_init: pv = 0x%lx, type = %d, alloc = %d\n",
381 for (i
= 0; i
< sizeof (sizes
) / sizeof(sizes
[0]); i
++) {
382 if (sizes
[i
].c_size
!= *sizes
[i
].asm_size_ptr
) {
383 _profile_printf("C thinks struct %s is %ld bytes, asm thinks it is %ld bytes\n",
385 (long)sizes
[i
].c_size
,
386 (long)*sizes
[i
].asm_size_ptr
);
388 panic(sizes
[i
].name
);
392 /* Figure out which function will handle compiler generated profiling */
393 if (type
== PROFILE_GPROF
) {
394 pv
->md
.save_mcount_ptr
= _gprof_mcount
;
396 } else if (type
== PROFILE_PROF
) {
397 pv
->md
.save_mcount_ptr
= _prof_mcount
;
400 pv
->md
.save_mcount_ptr
= _dummy_mcount
;
403 pv
->vars_size
= sizeof(struct profile_vars
);
404 pv
->plist_size
= sizeof(struct page_list
);
405 pv
->acontext_size
= sizeof(struct alloc_context
);
406 pv
->callback_size
= sizeof(struct callback
);
407 pv
->major_version
= PROFILE_MAJOR_VERSION
;
408 pv
->minor_version
= PROFILE_MINOR_VERSION
;
413 pv
->output_uarea
= 1;
414 pv
->output_stats
= (prof_flag_t
) _profile_do_stats
;
415 pv
->output_clock
= 1;
416 pv
->multiple_sections
= 1;
418 pv
->bogus_func
= _bogus_function
;
424 if (!pv
->error_msg
) {
425 pv
->error_msg
= "error in profiling";
428 if (!pv
->page_size
) {
429 pv
->page_size
= 4096;
432 pv
->stats
.stats_size
= sizeof(struct profile_stats
);
433 pv
->stats
.major_version
= PROFILE_MAJOR_VERSION
;
434 pv
->stats
.minor_version
= PROFILE_MINOR_VERSION
;
436 pv
->md
.md_size
= sizeof(struct profile_md
);
437 pv
->md
.major_version
= PROFILE_MAJOR_VERSION
;
438 pv
->md
.minor_version
= PROFILE_MINOR_VERSION
;
439 pv
->md
.hash_size
= _profile_hash_size
;
440 pv
->md
.num_cache
= MAX_CACHE
;
441 pv
->md
.mcount_ptr_ptr
= &_mcount_ptr
;
442 pv
->md
.dummy_ptr
= &_gprof_dummy
;
443 pv
->md
.alloc_pages
= _profile_alloc_pages
;
445 /* zero out all allocation context blocks */
446 for (ac
= ACONTEXT_FIRST
; ac
< ACONTEXT_MAX
; ac
++) {
447 pv
->acontext
[ac
] = (struct alloc_context
*)0;
450 /* Don't allocate memory if not desired */
455 /* Allocate some space for the initial allocations */
458 misc_size
= page_size
;
459 ptr
= _profile_alloc_pages(misc_size
+ callback_size
);
460 ptr
= _profile_md_acontext(pv
, ptr
, misc_size
, ACONTEXT_MISC
);
461 ptr
= _profile_md_acontext(pv
, ptr
, callback_size
, ACONTEXT_CALLBACK
);
466 #if defined(MACH_KERNEL) || defined(_KERNEL)
468 * For the MK & server allocate some slop space now for the
469 * secondary context blocks in case allocations are done at
470 * interrupt level when another allocation is being done. This
471 * is done before the main allocation blocks and will be pushed
472 * so that it will only be used when the main allocation block
475 extra_arc_size
= 4*page_size
;
476 extra_func_size
= 2*page_size
;
478 extra_arc_size
= extra_func_size
= 0;
481 /* Set up allocation areas */
482 arc_size
= ROUNDUP(PROFILE_NUM_ARCS
* sizeof(struct hasharc
), page_size
);
483 func_size
= ROUNDUP(PROFILE_NUM_FUNCS
* sizeof(struct gfuncs
), page_size
);
484 hash_size
= _profile_hash_size
* sizeof (struct hasharc
*);
485 misc_size
= ROUNDUP(hash_size
+ page_size
, page_size
);
487 ptr
= _profile_alloc_pages(arc_size
494 #if defined(MACH_KERNEL) || defined(_KERNEL)
495 ptr
= _profile_md_acontext(pv
, ptr
, extra_arc_size
, ACONTEXT_GPROF
);
496 ptr
= _profile_md_acontext(pv
, ptr
, extra_func_size
, ACONTEXT_GFUNC
);
498 ptr
= _profile_md_acontext(pv
, ptr
, arc_size
, ACONTEXT_GPROF
);
499 ptr
= _profile_md_acontext(pv
, ptr
, func_size
, ACONTEXT_GFUNC
);
500 ptr
= _profile_md_acontext(pv
, ptr
, misc_size
, ACONTEXT_MISC
);
501 ptr
= _profile_md_acontext(pv
, ptr
, callback_size
, ACONTEXT_CALLBACK
);
503 /* Allocate hash table */
504 pv
->md
.hash_ptr
= (struct hasharc
**) _profile_alloc(pv
, hash_size
, ACONTEXT_MISC
);
508 /* Set up allocation areas */
509 func_size
= ROUNDUP(PROFILE_NUM_FUNCS
* sizeof(struct prof_ext
), page_size
);
510 misc_size
= page_size
;
512 ptr
= _profile_alloc_pages(func_size
516 ptr
= _profile_md_acontext(pv
, ptr
, func_size
, ACONTEXT_PROF
);
517 ptr
= _profile_md_acontext(pv
, ptr
, misc_size
, ACONTEXT_MISC
);
518 ptr
= _profile_md_acontext(pv
, ptr
, callback_size
, ACONTEXT_CALLBACK
);
525 * Machine dependent functions to start and stop profiling.
529 _profile_md_start(void)
531 _mcount_ptr
= _profile_vars
.md
.save_mcount_ptr
;
536 _profile_md_stop(void)
538 _mcount_ptr
= _dummy_mcount
;
544 * Free up all memory in a memory context block.
548 _profile_reset_alloc(struct profile_vars
*pv
, acontext_type_t ac
)
550 struct alloc_context
*aptr
;
551 struct page_list
*plist
;
553 for (aptr
= pv
->acontext
[ac
];
554 aptr
!= (struct alloc_context
*)0;
557 for (plist
= aptr
->plist
;
558 plist
!= (struct page_list
*)0;
559 plist
= plist
->next
) {
561 plist
->ptr
= plist
->first
;
562 plist
->bytes_free
+= plist
->bytes_allocated
;
563 plist
->bytes_allocated
= 0;
564 plist
->num_allocations
= 0;
565 memset(plist
->first
, '\0', plist
->bytes_allocated
);
572 * Reset profiling. Since the only user of this function is the kernel
573 * and the server, we don't have to worry about other stuff than gprof.
577 _profile_reset(struct profile_vars
*pv
)
579 struct alloc_context
*aptr
;
580 struct page_list
*plist
;
581 struct gfuncs
*gfunc
;
587 /* Reset all function unique pointers back to 0 */
588 for (aptr
= pv
->acontext
[ACONTEXT_GFUNC
];
589 aptr
!= (struct alloc_context
*)0;
592 for (plist
= aptr
->plist
;
593 plist
!= (struct page_list
*)0;
594 plist
= plist
->next
) {
596 for (gfunc
= (struct gfuncs
*)plist
->first
;
597 gfunc
< (struct gfuncs
*)plist
->ptr
;
600 *(gfunc
->unique_ptr
) = (struct hasharc
*)0;
606 _profile_reset_alloc(pv
, ACONTEXT_GPROF
);
607 _profile_reset_alloc(pv
, ACONTEXT_GFUNC
);
608 _profile_reset_alloc(pv
, ACONTEXT_PROF
);
610 memset((void *)pv
->profil_buf
, '\0', pv
->profil_info
.profil_len
);
611 memset((void *)pv
->md
.hash_ptr
, '\0', pv
->md
.hash_size
* sizeof(struct hasharc
*));
612 memset((void *)&pv
->stats
, '\0', sizeof(pv
->stats
));
614 pv
->stats
.stats_size
= sizeof(struct profile_stats
);
615 pv
->stats
.major_version
= PROFILE_MAJOR_VERSION
;
616 pv
->stats
.minor_version
= PROFILE_MINOR_VERSION
;
625 * Machine dependent function to write out gprof records.
629 _gprof_write(struct profile_vars
*pv
, struct callback
*callback_ptr
)
631 struct alloc_context
*aptr
;
632 struct page_list
*plist
;
634 struct hasharc
*hptr
;
637 for (aptr
= pv
->acontext
[ACONTEXT_GPROF
];
638 aptr
!= (struct alloc_context
*)0;
641 for (plist
= aptr
->plist
; plist
!= (struct page_list
*)0; plist
= plist
->next
) {
642 hptr
= (struct hasharc
*)plist
->first
;
643 for (i
= 0; i
< plist
->num_allocations
; (i
++, hptr
++)) {
645 struct gprof_arc arc
= hptr
->arc
;
646 int nrecs
= 1 + (hptr
->overflow
* 2);
649 if (pv
->check_funcs
) {
650 if (arc
.frompc
< pv
->profil_info
.lowpc
||
651 arc
.frompc
> pv
->profil_info
.highpc
) {
653 arc
.frompc
= (prof_uptrint_t
)pv
->bogus_func
;
656 if (arc
.selfpc
< pv
->profil_info
.lowpc
||
657 arc
.selfpc
> pv
->profil_info
.highpc
) {
659 arc
.selfpc
= (prof_uptrint_t
)pv
->bogus_func
;
663 /* For each overflow, emit 2 extra records with the count
665 for (j
= 0; j
< nrecs
; j
++) {
666 bytes
+= sizeof (arc
);
667 if ((*pv
->fwrite_func
)((void *)&arc
,
675 arc
.count
= 0x80000000;
686 * Machine dependent function to write out prof records.
690 _prof_write(struct profile_vars
*pv
, struct callback
*callback_ptr
)
692 struct alloc_context
*aptr
;
693 struct page_list
*plist
;
695 struct prof_ext prof_st
;
696 struct prof_int
*pptr
;
701 /* Write out information prof_mcount collects */
702 for (aptr
= pv
->acontext
[ACONTEXT_PROF
];
703 aptr
!= (struct alloc_context
*)0;
706 for (plist
= aptr
->plist
; plist
!= (struct page_list
*)0; plist
= plist
->next
) {
707 pptr
= (struct prof_int
*)plist
->first
;
709 for (i
= 0; i
< plist
->num_allocations
; (i
++, pptr
++)) {
711 /* Write out 2 records for each overflow, each with a
712 count of 0x80000000 + the normal record */
713 prof_st
= pptr
->prof
;
714 nrecs
= 1 + (pptr
->overflow
* 2);
716 for (j
= 0; j
< nrecs
; j
++) {
717 bytes
+= sizeof (struct prof_ext
);
718 if ((*pv
->fwrite_func
)((void *)&prof_st
,
726 prof_st
.cncall
= 0x80000000;
732 /* Now write out the prof information that gprof collects */
733 for (aptr
= pv
->acontext
[ACONTEXT_GFUNC
];
734 aptr
!= (struct alloc_context
*)0;
737 for (plist
= aptr
->plist
; plist
!= (struct page_list
*)0; plist
= plist
->next
) {
738 gptr
= (struct gfuncs
*)plist
->first
;
740 for (i
= 0; i
< plist
->num_allocations
; (i
++, gptr
++)) {
742 /* Write out 2 records for each overflow, each with a
743 count of 0x80000000 + the normal record */
744 prof_st
= gptr
->prof
.prof
;
745 nrecs
= 1 + (gptr
->prof
.overflow
* 2);
747 for (j
= 0; j
< nrecs
; j
++) {
748 bytes
+= sizeof (struct prof_ext
);
749 if ((*pv
->fwrite_func
)((void *)&prof_st
,
757 prof_st
.cncall
= 0x80000000;
768 * Update any statistics. For the 386, calculate the hash table loading factor.
769 * Also figure out how many overflows occurred.
773 _profile_update_stats(struct profile_vars
*pv
)
775 struct alloc_context
*aptr
;
776 struct page_list
*plist
;
777 struct hasharc
*hptr
;
778 struct prof_int
*pptr
;
783 for(i
= 0; i
< MAX_BUCKETS
+1; i
++) {
784 pv
->stats
.buckets
[i
] = 0;
787 pv
->stats
.hash_buckets
= 0;
789 if (pv
->md
.hash_ptr
) {
790 for (i
= 0; i
< pv
->md
.hash_size
; i
++) {
792 struct hasharc
*hptr
;
794 for (hptr
= pv
->md
.hash_ptr
[i
]; hptr
; hptr
= hptr
->next
) {
798 pv
->stats
.buckets
[ (nbuckets
< MAX_BUCKETS
) ? nbuckets
: MAX_BUCKETS
]++;
799 if (pv
->stats
.hash_buckets
< nbuckets
) {
800 pv
->stats
.hash_buckets
= nbuckets
;
805 /* Count how many times functions are out of bounds */
806 if (pv
->check_funcs
) {
807 pv
->stats
.bogus_count
= 0;
809 for (aptr
= pv
->acontext
[ACONTEXT_GPROF
];
810 aptr
!= (struct alloc_context
*)0;
813 for (plist
= aptr
->plist
;
814 plist
!= (struct page_list
*)0;
815 plist
= plist
->next
) {
817 hptr
= (struct hasharc
*)plist
->first
;
818 for (i
= 0; i
< plist
->num_allocations
; (i
++, hptr
++)) {
820 if (hptr
->arc
.frompc
< pv
->profil_info
.lowpc
||
821 hptr
->arc
.frompc
> pv
->profil_info
.highpc
) {
822 pv
->stats
.bogus_count
++;
825 if (hptr
->arc
.selfpc
< pv
->profil_info
.lowpc
||
826 hptr
->arc
.selfpc
> pv
->profil_info
.highpc
) {
827 pv
->stats
.bogus_count
++;
834 /* Figure out how many overflows occurred */
835 PROF_ULONG_TO_CNT(pv
->stats
.prof_overflow
, 0);
836 PROF_ULONG_TO_CNT(pv
->stats
.gprof_overflow
, 0);
838 for (aptr
= pv
->acontext
[ACONTEXT_GPROF
];
839 aptr
!= (struct alloc_context
*)0;
842 for (plist
= aptr
->plist
;
843 plist
!= (struct page_list
*)0;
844 plist
= plist
->next
) {
846 hptr
= (struct hasharc
*)plist
->first
;
847 for (i
= 0; i
< plist
->num_allocations
; (i
++, hptr
++)) {
848 PROF_CNT_ADD(pv
->stats
.gprof_overflow
, hptr
->overflow
);
853 for (aptr
= pv
->acontext
[ACONTEXT_PROF
];
854 aptr
!= (struct alloc_context
*)0;
857 for (plist
= aptr
->plist
;
858 plist
!= (struct page_list
*)0;
859 plist
= plist
->next
) {
861 pptr
= (struct prof_int
*)plist
->first
;
862 for (i
= 0; i
< plist
->num_allocations
; (i
++, pptr
++)) {
863 PROF_CNT_ADD(pv
->stats
.prof_overflow
, pptr
->overflow
);
868 for (aptr
= pv
->acontext
[ACONTEXT_GFUNC
];
869 aptr
!= (struct alloc_context
*)0;
872 for (plist
= aptr
->plist
;
873 plist
!= (struct page_list
*)0;
874 plist
= plist
->next
) {
876 fptr
= (struct gfuncs
*)plist
->first
;
877 for (i
= 0; i
< plist
->num_allocations
; (i
++, fptr
++)) {
878 PROF_CNT_ADD(pv
->stats
.prof_overflow
, fptr
->prof
.overflow
);
883 /* Now go through & count how many times the LHISTCOUNTER overflowed into a 2nd word */
884 lptr
= (LHISTCOUNTER
*)pv
->profil_buf
;
886 if (pv
->use_profil
&&
887 pv
->profil_info
.counter_size
== sizeof(LHISTCOUNTER
) &&
888 lptr
!= (LHISTCOUNTER
*)0) {
890 PROF_ULONG_TO_CNT(pv
->stats
.overflow_ticks
, 0);
891 for (i
= 0; i
< pv
->stats
.profil_buckets
; i
++) {
892 PROF_CNT_ADD(pv
->stats
.overflow_ticks
, lptr
[i
].high
);
897 #if !defined(_KERNEL) && !defined(MACH_KERNEL)
900 * Routine callable from the debugger that prints the statistics.
903 int _profile_debug(void)
905 _profile_update_stats(&_profile_vars
);
906 _profile_print_stats(stderr
, &_profile_vars
.stats
, &_profile_vars
.profil_info
);
911 * Print the statistics structure in a meaningful way.
914 void _profile_print_stats(FILE *stream
,
915 const struct profile_stats
*stats
,
916 const struct profile_profil
*pinfo
)
919 prof_cnt_t total_hits
;
923 int width_wasted
= 0;
924 int width_overhead
= 0;
925 int width_context
= 0;
926 static const char *cname
[ACONTEXT_MAX
] = ACONTEXT_NAMES
;
937 sprintf(buf
, "%ld.%ld", (long)stats
->major_version
, (long)stats
->minor_version
);
938 fprintf(stream
, "%12s profiling version number\n", buf
);
939 fprintf(stream
, "%12lu size of profile_vars\n", (long unsigned)sizeof(struct profile_vars
));
940 fprintf(stream
, "%12lu size of profile_stats\n", (long unsigned)sizeof(struct profile_stats
));
941 fprintf(stream
, "%12lu size of profile_md\n", (long unsigned)sizeof(struct profile_md
));
942 fprintf(stream
, "%12s calls to _{,g}prof_mcount\n", PROF_CNT_TO_DECIMAL((char *)0, stats
->cnt
));
943 fprintf(stream
, "%12s calls to old mcount\n", PROF_CNT_TO_DECIMAL((char *)0, stats
->old_mcount
));
944 fprintf(stream
, "%12s calls to _dummy_mcount\n", PROF_CNT_TO_DECIMAL((char *)0, stats
->dummy
));
945 fprintf(stream
, "%12lu functions profiled\n", (long unsigned)stats
->prof_records
);
946 fprintf(stream
, "%12lu gprof arcs\n", (long unsigned)stats
->gprof_records
);
949 fprintf(stream
, "%12lu profil buckets\n", (long unsigned)stats
->profil_buckets
);
950 fprintf(stream
, "%12lu profil lowpc [0x%lx]\n",
951 (long unsigned)pinfo
->lowpc
,
952 (long unsigned)pinfo
->lowpc
);
954 fprintf(stream
, "%12lu profil highpc [0x%lx]\n",
955 (long unsigned)pinfo
->highpc
,
956 (long unsigned)pinfo
->highpc
);
958 fprintf(stream
, "%12lu profil highpc-lowpc\n", (long unsigned)(pinfo
->highpc
- pinfo
->lowpc
));
959 fprintf(stream
, "%12lu profil buffer length\n", (long unsigned)pinfo
->profil_len
);
960 fprintf(stream
, "%12lu profil sizeof counters\n", (long unsigned)pinfo
->counter_size
);
961 fprintf(stream
, "%12lu profil scale (%g)\n",
962 (long unsigned)pinfo
->scale
,
963 ((double)pinfo
->scale
) / ((double) 0x10000));
966 for (i
= 0; i
< sizeof (pinfo
->profil_unused
) / sizeof (pinfo
->profil_unused
[0]); i
++) {
967 if (pinfo
->profil_unused
[i
]) {
968 fprintf(stream
, "%12lu profil unused[%2d] {0x%.8lx}\n",
969 (long unsigned)pinfo
->profil_unused
[i
],
971 (long unsigned)pinfo
->profil_unused
[i
]);
976 if (stats
->max_cpu
) {
977 fprintf(stream
, "%12lu current cpu/thread\n", (long unsigned)stats
->my_cpu
);
978 fprintf(stream
, "%12lu max cpu/thread+1\n", (long unsigned)stats
->max_cpu
);
981 if (stats
->bogus_count
!= 0) {
983 "%12lu gprof functions found outside of range\n",
984 (long unsigned)stats
->bogus_count
);
987 if (PROF_CNT_NE_0(stats
->too_low
)) {
989 "%12s histogram ticks were too low\n",
990 PROF_CNT_TO_DECIMAL((char *)0, stats
->too_low
));
993 if (PROF_CNT_NE_0(stats
->too_high
)) {
995 "%12s histogram ticks were too high\n",
996 PROF_CNT_TO_DECIMAL((char *)0, stats
->too_high
));
999 if (PROF_CNT_NE_0(stats
->acontext_locked
)) {
1001 "%12s times an allocation context was locked\n",
1002 PROF_CNT_TO_DECIMAL((char *)0, stats
->acontext_locked
));
1005 if (PROF_CNT_NE_0(stats
->kernel_ticks
)
1006 || PROF_CNT_NE_0(stats
->user_ticks
)
1007 || PROF_CNT_NE_0(stats
->idle_ticks
)) {
1009 prof_cnt_t total_ticks
;
1010 long double total_ticks_dbl
;
1012 total_ticks
= stats
->kernel_ticks
;
1013 PROF_CNT_LADD(total_ticks
, stats
->user_ticks
);
1014 PROF_CNT_LADD(total_ticks
, stats
->idle_ticks
);
1015 total_ticks_dbl
= PROF_CNT_TO_LDOUBLE(total_ticks
);
1018 "%12s total ticks\n",
1019 PROF_CNT_TO_DECIMAL((char *)0, total_ticks
));
1022 "%12s ticks within the kernel (%5.2Lf%%)\n",
1023 PROF_CNT_TO_DECIMAL((char *)0, stats
->kernel_ticks
),
1024 100.0L * (PROF_CNT_TO_LDOUBLE(stats
->kernel_ticks
) / total_ticks_dbl
));
1027 "%12s ticks within user space (%5.2Lf%%)\n",
1028 PROF_CNT_TO_DECIMAL((char *)0, stats
->user_ticks
),
1029 100.0L * (PROF_CNT_TO_LDOUBLE(stats
->user_ticks
) / total_ticks_dbl
));
1032 "%12s ticks idle (%5.2Lf%%)\n",
1033 PROF_CNT_TO_DECIMAL((char *)0, stats
->idle_ticks
),
1034 100.0L * (PROF_CNT_TO_LDOUBLE(stats
->idle_ticks
) / total_ticks_dbl
));
1037 if (PROF_CNT_NE_0(stats
->overflow_ticks
)) {
1038 fprintf(stream
, "%12s times a HISTCOUNTER counter would have overflowed\n",
1039 PROF_CNT_TO_DECIMAL((char *)0, stats
->overflow_ticks
));
1042 if (PROF_CNT_NE_0(stats
->hash_num
)) {
1043 long double total_buckets
= 0.0L;
1045 for (i
= 0; i
<= MAX_BUCKETS
; i
++) {
1046 total_buckets
+= (long double)stats
->buckets
[i
];
1049 fprintf(stream
, "%12lu max bucket(s) on hash chain.\n", (long unsigned)stats
->hash_buckets
);
1050 for (i
= 0; i
< MAX_BUCKETS
; i
++) {
1051 if (stats
->buckets
[i
] != 0) {
1052 fprintf(stream
, "%12lu bucket(s) had %d entries (%5.2Lf%%)\n",
1053 (long unsigned)stats
->buckets
[i
], i
,
1054 100.0L * ((long double)stats
->buckets
[i
] / total_buckets
));
1058 if (stats
->buckets
[MAX_BUCKETS
] != 0) {
1059 fprintf(stream
, "%12lu bucket(s) had more than %d entries (%5.2Lf%%)\n",
1060 (long unsigned)stats
->buckets
[MAX_BUCKETS
], MAX_BUCKETS
,
1061 100.0L * ((long double)stats
->buckets
[MAX_BUCKETS
] / total_buckets
));
1065 PROF_ULONG_TO_CNT(total_hits
, 0);
1066 for (i
= 0; i
< MAX_CACHE
; i
++) {
1067 PROF_CNT_LADD(total_hits
, stats
->cache_hits
[i
]);
1070 if (PROF_CNT_NE_0(total_hits
)) {
1071 long double total
= PROF_CNT_TO_LDOUBLE(stats
->cnt
);
1072 long double total_hits_dbl
= PROF_CNT_TO_LDOUBLE(total_hits
);
1075 "%12s cache hits (%.2Lf%%)\n",
1076 PROF_CNT_TO_DECIMAL((char *)0, total_hits
),
1077 100.0L * (total_hits_dbl
/ total
));
1079 for (i
= 0; i
< MAX_CACHE
; i
++) {
1080 if (PROF_CNT_NE_0(stats
->cache_hits
[i
])) {
1082 "%12s times cache#%d matched (%5.2Lf%% of cache hits, %5.2Lf%% total)\n",
1083 PROF_CNT_TO_DECIMAL((char *)0, stats
->cache_hits
[i
]),
1085 100.0L * (PROF_CNT_TO_LDOUBLE(stats
->cache_hits
[i
]) / total_hits_dbl
),
1086 100.0L * (PROF_CNT_TO_LDOUBLE(stats
->cache_hits
[i
]) / total
));
1090 if (PROF_CNT_NE_0(stats
->hash_num
)) {
1091 fprintf(stream
, "%12s times hash table searched\n", PROF_CNT_TO_DECIMAL((char *)0, stats
->hash_num
));
1092 fprintf(stream
, "%12s hash buckets searched\n", PROF_CNT_TO_DECIMAL((char *)0, stats
->hash_search
));
1093 fprintf(stream
, "%12.4Lf average buckets searched\n",
1094 PROF_CNT_TO_LDOUBLE(stats
->hash_search
) / PROF_CNT_TO_LDOUBLE(stats
->hash_num
));
1098 for (i
= 0; i
< sizeof (stats
->stats_unused
) / sizeof (stats
->stats_unused
[0]); i
++) {
1099 if (PROF_CNT_NE_0(stats
->stats_unused
[i
])) {
1100 fprintf(stream
, "%12s unused[%2d] {0x%.8lx 0x%.8lx}\n",
1101 PROF_CNT_TO_DECIMAL((char *)0, stats
->stats_unused
[i
]),
1103 (unsigned long)stats
->stats_unused
[i
].high
,
1104 (unsigned long)stats
->stats_unused
[i
].low
);
1108 /* Get the width for the allocation contexts */
1109 for (ac
= ACONTEXT_FIRST
; ac
< ACONTEXT_MAX
; ac
++) {
1112 if (stats
->num_context
[ac
] == 0) {
1116 len
= strlen (cname
[ac
]);
1117 if (len
> width_cname
)
1120 len
= sprintf (buf
, "%lu", (long unsigned)stats
->num_alloc
[ac
]);
1121 if (len
> width_alloc
)
1124 len
= sprintf (buf
, "%lu", (long unsigned)stats
->wasted
[ac
]);
1125 if (len
> width_wasted
)
1128 len
= sprintf (buf
, "%lu", (long unsigned)stats
->overhead
[ac
]);
1129 if (len
> width_overhead
)
1130 width_overhead
= len
;
1132 len
= sprintf (buf
, "%lu", (long unsigned)stats
->num_context
[ac
]);
1133 if (len
> width_context
)
1134 width_context
= len
;
1137 /* Print info about allocation contexts */
1138 for (ac
= ACONTEXT_FIRST
; ac
< ACONTEXT_MAX
; ac
++) {
1139 if (stats
->num_context
[ac
] == 0) {
1144 "%12lu bytes in %-*s %*lu alloc, %*lu unused, %*lu over, %*lu context\n",
1145 (long unsigned)stats
->bytes_alloc
[ac
],
1146 width_cname
, cname
[ac
],
1147 width_alloc
, (long unsigned)stats
->num_alloc
[ac
],
1148 width_wasted
, (long unsigned)stats
->wasted
[ac
],
1149 width_overhead
, (long unsigned)stats
->overhead
[ac
],
1150 width_context
, (long unsigned)stats
->num_context
[ac
]);
1156 * Merge a new statistics field into an old one.
1159 void _profile_merge_stats(struct profile_stats
*old_stats
, const struct profile_stats
*new_stats
)
1163 /* If nothing passed, just return */
1164 if (!old_stats
|| !new_stats
)
1167 /* If the old_stats has not been initialized, just copy in the new stats */
1168 if (old_stats
->major_version
== 0) {
1169 *old_stats
= *new_stats
;
1171 /* Otherwise, update stats, field by field */
1173 if (old_stats
->prof_records
< new_stats
->prof_records
)
1174 old_stats
->prof_records
= new_stats
->prof_records
;
1176 if (old_stats
->gprof_records
< new_stats
->gprof_records
)
1177 old_stats
->gprof_records
= new_stats
->gprof_records
;
1179 if (old_stats
->hash_buckets
< new_stats
->hash_buckets
)
1180 old_stats
->hash_buckets
= new_stats
->hash_buckets
;
1182 if (old_stats
->bogus_count
< new_stats
->bogus_count
)
1183 old_stats
->bogus_count
= new_stats
->bogus_count
;
1185 PROF_CNT_LADD(old_stats
->cnt
, new_stats
->cnt
);
1186 PROF_CNT_LADD(old_stats
->dummy
, new_stats
->dummy
);
1187 PROF_CNT_LADD(old_stats
->old_mcount
, new_stats
->old_mcount
);
1188 PROF_CNT_LADD(old_stats
->hash_search
, new_stats
->hash_search
);
1189 PROF_CNT_LADD(old_stats
->hash_num
, new_stats
->hash_num
);
1190 PROF_CNT_LADD(old_stats
->user_ticks
, new_stats
->user_ticks
);
1191 PROF_CNT_LADD(old_stats
->kernel_ticks
, new_stats
->kernel_ticks
);
1192 PROF_CNT_LADD(old_stats
->idle_ticks
, new_stats
->idle_ticks
);
1193 PROF_CNT_LADD(old_stats
->overflow_ticks
, new_stats
->overflow_ticks
);
1194 PROF_CNT_LADD(old_stats
->acontext_locked
, new_stats
->acontext_locked
);
1195 PROF_CNT_LADD(old_stats
->too_low
, new_stats
->too_low
);
1196 PROF_CNT_LADD(old_stats
->too_high
, new_stats
->too_high
);
1197 PROF_CNT_LADD(old_stats
->prof_overflow
, new_stats
->prof_overflow
);
1198 PROF_CNT_LADD(old_stats
->gprof_overflow
, new_stats
->gprof_overflow
);
1200 for (i
= 0; i
< (int)ACONTEXT_MAX
; i
++) {
1201 if (old_stats
->num_alloc
[i
] < new_stats
->num_alloc
[i
])
1202 old_stats
->num_alloc
[i
] = new_stats
->num_alloc
[i
];
1204 if (old_stats
->bytes_alloc
[i
] < new_stats
->bytes_alloc
[i
])
1205 old_stats
->bytes_alloc
[i
] = new_stats
->bytes_alloc
[i
];
1207 if (old_stats
->num_context
[i
] < new_stats
->num_context
[i
])
1208 old_stats
->num_context
[i
] = new_stats
->num_context
[i
];
1210 if (old_stats
->wasted
[i
] < new_stats
->wasted
[i
])
1211 old_stats
->wasted
[i
] = new_stats
->wasted
[i
];
1213 if (old_stats
->overhead
[i
] < new_stats
->overhead
[i
])
1214 old_stats
->overhead
[i
] = new_stats
->overhead
[i
];
1218 for (i
= 0; i
< MAX_BUCKETS
+1; i
++) {
1219 if (old_stats
->buckets
[i
] < new_stats
->buckets
[i
])
1220 old_stats
->buckets
[i
] = new_stats
->buckets
[i
];
1223 for (i
= 0; i
< MAX_CACHE
; i
++) {
1224 PROF_CNT_LADD(old_stats
->cache_hits
[i
], new_stats
->cache_hits
[i
]);
1227 for (i
= 0; i
< sizeof(old_stats
->stats_unused
) / sizeof(old_stats
->stats_unused
[0]); i
++) {
1228 PROF_CNT_LADD(old_stats
->stats_unused
[i
], new_stats
->stats_unused
[i
]);
1237 * Invalid function address used when checking of function addresses is
1238 * desired for gprof arcs, and we discover an address out of bounds.
1239 * There should be no callers of this function.
1243 _bogus_function(void)