]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/profile.c
xnu-792.18.15.tar.gz
[apple/xnu.git] / osfmk / kern / profile.c
1 /*
2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56
57 /*
58 */
59 #include <mach_prof.h>
60
61 #include <mach/task_server.h>
62 #include <mach/thread_act_server.h>
63
64 #if MACH_PROF
65 #include <kern/thread.h>
66 #include <kern/queue.h>
67 #include <kern/profile.h>
68 #include <kern/sched_prim.h>
69 #include <kern/spl.h>
70 #include <kern/misc_protos.h>
71 #include <ipc/ipc_space.h>
72 #include <machine/machparam.h>
73 #include <mach/prof.h>
74
75 thread_t profile_thread_id = THREAD_NULL;
76 int profile_sample_count = 0; /* Provided for looking at from kdb. */
77 extern kern_return_t task_suspend(task_t task); /* ack */
78
79 /* Forwards */
80 prof_data_t pbuf_alloc(void);
81 void pbuf_free(
82 prof_data_t pbuf);
83 void profile_thread(void);
84 void send_last_sample_buf(
85 prof_data_t pbuf);
86
87 /*
88 *****************************************************************************
89 * profile_thread is the profile/trace kernel support thread. It is started
90 * by a server/user request through task_sample, or thread_sample. The profile
91 * thread dequeues messages and sends them to the receive_prof thread, in the
92 * server, via the send_samples and send_notices mig interface functions. If
93 * there are no messages in the queue profile thread blocks until wakened by
94 * profile (called in from mach_clock), or last_sample (called by thread/task_
95 * sample).
96 */
97
98 void
99 profile_thread(void)
100 {
101 spl_t s;
102 buffer_t buf_entry;
103 queue_entry_t prof_queue_entry;
104 prof_data_t pbuf;
105 kern_return_t kr;
106 int j;
107
108 /* Initialise the queue header for the prof_queue */
109 mpqueue_init(&prof_queue);
110
111 while (TRUE) {
112
113 /* Dequeue the first buffer. */
114 s = splsched();
115 mpdequeue_head(&prof_queue, &prof_queue_entry);
116 splx(s);
117
118 if ((buf_entry = (buffer_t) prof_queue_entry) == NULLPBUF) {
119 assert_wait((event_t) profile_thread, THREAD_UNINT);
120 thread_block(THREAD_CONTINUE_NULL);
121 if (current_thread()->wait_result != THREAD_AWAKENED)
122 break;
123 } else
124 {
125 int dropped;
126
127 pbuf = buf_entry->p_prof;
128 kr = send_samples(pbuf->prof_port, (void *)buf_entry->p_zone,
129 (mach_msg_type_number_t)buf_entry->p_index);
130 profile_sample_count += buf_entry->p_index;
131 if (kr != KERN_SUCCESS)
132 printf("send_samples(%x, %x, %d) error %x\n",
133 pbuf->prof_port, buf_entry->p_zone, buf_entry->p_index, kr);
134 dropped = buf_entry->p_dropped;
135 if (dropped > 0) {
136 printf("kernel: profile dropped %d sample%s\n", dropped,
137 dropped == 1 ? "" : "s");
138 buf_entry->p_dropped = 0;
139 }
140
141 /* Indicate you've finished the dirty job */
142 buf_entry->p_full = FALSE;
143 if (buf_entry->p_wakeme)
144 thread_wakeup((event_t) &buf_entry->p_wakeme);
145 }
146
147 }
148 /* The profile thread has been signalled to exit. Any threads waiting
149 for the last buffer of samples to be acknowledged should be woken
150 up now. */
151 profile_thread_id = THREAD_NULL;
152 while (1) {
153 s = splsched();
154 mpdequeue_head(&prof_queue, &prof_queue_entry);
155 splx(s);
156 if ((buf_entry = (buffer_t) prof_queue_entry) == NULLPBUF)
157 break;
158 if (buf_entry->p_wakeme)
159 thread_wakeup((event_t) &buf_entry->p_wakeme);
160 }
161 #if 0 /* XXXXX */
162 thread_halt_self();
163 #else
164 panic("profile_thread(): halt_self");
165 #endif /* XXXXX */
166 }
167
168 /*
169 *****************************************************************************
170 * send_last_sample is the drain mechanism to allow partial profiled buffers
171 * to be sent to the receive_prof thread in the server.
172 *****************************************************************************
173 */
174
175 void
176 send_last_sample_buf(prof_data_t pbuf)
177 {
178 spl_t s;
179 buffer_t buf_entry;
180
181 if (pbuf == NULLPROFDATA)
182 return;
183
184 /* Ask for the sending of the last PC buffer.
185 * Make a request to the profile_thread by inserting
186 * the buffer in the send queue, and wake it up.
187 * The last buffer must be inserted at the head of the
188 * send queue, so the profile_thread handles it immediatly.
189 */
190 buf_entry = pbuf->prof_area + pbuf->prof_index;
191 buf_entry->p_prof = pbuf;
192
193 /*
194 Watch out in case profile thread exits while we are about to
195 queue data for it.
196 */
197 s = splsched();
198 if (profile_thread_id == THREAD_NULL)
199 splx(s);
200 else {
201 buf_entry->p_wakeme = 1;
202 mpenqueue_tail(&prof_queue, &buf_entry->p_list);
203 thread_wakeup((event_t) profile_thread);
204 assert_wait((event_t) &buf_entry->p_wakeme, THREAD_ABORTSAFE);
205 splx(s);
206 thread_block(THREAD_CONTINUE_NULL);
207 }
208 }
209
210
211 /*
212 *****************************************************************************
213 * add clock tick parameters to profile/trace buffers. Called from the mach_
214 * clock heritz_tick function. DCI version stores thread, sp, and pc values
215 * into the profile/trace buffers. MACH_PROF version just stores pc values.
216 *****************************************************************************
217 */
218
219 void
220 profile(natural_t pc,
221 prof_data_t pbuf)
222 {
223 natural_t inout_val = pc;
224 buffer_t buf_entry;
225
226 if (pbuf == NULLPROFDATA)
227 return;
228
229 /* Inserts the PC value in the buffer of the thread */
230 set_pbuf_value(pbuf, &inout_val);
231 switch((int)inout_val) {
232 case 0:
233 if (profile_thread_id == THREAD_NULL) {
234 reset_pbuf_area(pbuf);
235 }
236 break;
237 case 1:
238 /* Normal case, value successfully inserted */
239 break;
240 case 2 :
241 /*
242 * The value we have just inserted caused the
243 * buffer to be full, and ready to be sent.
244 * If profile_thread_id is null, the profile
245 * thread has been killed. Since this generally
246 * happens only when the O/S server task of which
247 * it is a part is killed, it is not a great loss
248 * to throw away the data.
249 */
250 if (profile_thread_id == THREAD_NULL) {
251 reset_pbuf_area(pbuf);
252 break;
253 }
254
255 buf_entry = (buffer_t) &pbuf->prof_area[pbuf->prof_index];
256 buf_entry->p_prof = pbuf;
257 mpenqueue_tail(&prof_queue, &buf_entry->p_list);
258
259 /* Switch to another buffer */
260 reset_pbuf_area(pbuf);
261
262 /* Wake up the profile thread */
263 if (profile_thread_id != THREAD_NULL)
264 thread_wakeup((event_t) profile_thread);
265 break;
266
267 default:
268 printf("profile : unexpected case\n");
269 }
270 }
271
272 /*
273 *****************************************************************************
274 * pbuf_alloc creates a profile/trace buffer and assoc. zones for storing
275 * profiled items.
276 *****************************************************************************
277 */
278
279 prof_data_t
280 pbuf_alloc(void)
281 {
282 register prof_data_t pbuf;
283 register int i;
284 register natural_t *zone;
285
286 pbuf = (prof_data_t)kalloc(sizeof(struct prof_data));
287 if (!pbuf)
288 return(NULLPROFDATA);
289 pbuf->prof_port = MACH_PORT_NULL;
290 for (i=0; i< NB_PROF_BUFFER; i++) {
291 zone = (natural_t *)kalloc(SIZE_PROF_BUFFER*sizeof(natural_t));
292 if (!zone) {
293 i--;
294 while (i--)
295 kfree((vm_offset_t)pbuf->prof_area[i].p_zone,
296 SIZE_PROF_BUFFER*sizeof(natural_t));
297 kfree((vm_offset_t)pbuf, sizeof(struct prof_data));
298 return(NULLPROFDATA);
299 }
300 pbuf->prof_area[i].p_zone = zone;
301 pbuf->prof_area[i].p_full = FALSE;
302 }
303 pbuf->prof_port = MACH_PORT_NULL;
304 return(pbuf);
305 }
306
307 /*
308 *****************************************************************************
309 * pbuf_free free memory allocated for storing profile/trace items. Called
310 * when a task is no longer profiled/traced. Pbuf_free tears down the memory
311 * alloced in pbuf_alloc. It does not check to see if the structures are valid
312 * since it is only called by functions in this file.
313 *****************************************************************************
314 */
315 void
316 pbuf_free(
317 prof_data_t pbuf)
318 {
319 register int i;
320
321 if (pbuf->prof_port)
322 ipc_port_release_send(pbuf->prof_port);
323
324 for(i=0; i < NB_PROF_BUFFER ; i++)
325 kfree((vm_offset_t)pbuf->prof_area[i].p_zone,
326 SIZE_PROF_BUFFER*sizeof(natural_t));
327 kfree((vm_offset_t)pbuf, sizeof(struct prof_data));
328 }
329
330 #endif /* MACH_PROF */
331
332 /*
333 *****************************************************************************
334 * Thread_sample is used by MACH_PROF to profile a single thread, and is only
335 * stub in DCI.
336 *****************************************************************************
337 */
338
339 #if !MACH_PROF
340 kern_return_t
341 thread_sample(
342 __unused thread_t thread,
343 __unused ipc_port_t reply)
344 {
345 return KERN_FAILURE;
346 }
347 #else
348 kern_return_t
349 thread_sample(
350 thread_t thread,
351 ipc_port_t reply)
352 {
353 /*
354 * This routine is called every time that a new thread has made
355 * a request for the sampling service. We must keep track of the
356 * correspondance between its identity (thread) and the port
357 * we are going to use as a reply port to send out the samples resulting
358 * from its execution.
359 */
360 prof_data_t pbuf;
361 vm_offset_t vmpbuf;
362
363 if (reply != MACH_PORT_NULL) {
364 if (thread->profiled) /* yuck! */
365 return KERN_INVALID_ARGUMENT;
366 /* Start profiling this activation, do the initialization. */
367 pbuf = pbuf_alloc();
368 if ((thread->profil_buffer = pbuf) == NULLPROFDATA) {
369 printf("thread_sample: cannot allocate pbuf\n");
370 return KERN_RESOURCE_SHORTAGE;
371 }
372 else {
373 if (!set_pbuf_nb(pbuf, NB_PROF_BUFFER-1)) {
374 printf("mach_sample_thread: cannot set pbuf_nb\n");
375 return KERN_FAILURE;
376 }
377 reset_pbuf_area(pbuf);
378 }
379 pbuf->prof_port = reply;
380 thread->profiled = TRUE;
381 thread->profiled_own = TRUE;
382 if (profile_thread_id == THREAD_NULL)
383 profile_thread_id = kernel_thread(kernel_task, profile_thread);
384 } else {
385 if (!thread->profiled)
386 return(KERN_INVALID_ARGUMENT);
387
388 thread->profiled = FALSE;
389 /* do not stop sampling if thread is not profiled by its own */
390
391 if (!thread->profiled_own)
392 return KERN_SUCCESS;
393 else
394 thread->profiled_own = FALSE;
395
396 send_last_sample_buf(thread->profil_buffer);
397 pbuf_free(thread->profil_buffer);
398 thread->profil_buffer = NULLPROFDATA;
399 }
400 return KERN_SUCCESS;
401 }
402 #endif /* MACH_PROF */
403
404 /*
405 *****************************************************************************
406 * Task_sample is used to profile/trace tasks - all thread within a task using
407 * a common profile buffer to collect items generated by the hertz_tick. For
408 * each task profiled a profile buffer is created that associates a reply port
409 * (used to send the data to a server thread), task (used for throttling), and
410 * a zone area (used to store profiled/traced items).
411 *****************************************************************************
412 */
413
414 #if !MACH_PROF
415 kern_return_t
416 task_sample(
417 __unused task_t task,
418 __unused ipc_port_t reply)
419 {
420 return KERN_FAILURE;
421 }
422 #else
423 kern_return_t
424 task_sample(
425 task_t task,
426 ipc_port_t reply)
427 {
428 prof_data_t pbuf=task->profil_buffer;
429 vm_offset_t vmpbuf;
430 boolean_t turnon = (reply != MACH_PORT_NULL);
431
432 if (task == TASK_NULL)
433 return KERN_INVALID_ARGUMENT;
434 if (turnon) /* Do we want to profile this task? */
435 {
436 pbuf = pbuf_alloc(); /* allocate a profile buffer */
437 task_lock(task);
438 if (task->task_profiled) { /* if it is already profiled return so */
439 task_unlock(task);
440 if (pbuf != NULLPROFDATA)
441 pbuf_free(pbuf);
442 return(KERN_INVALID_ARGUMENT);
443 }
444 if (pbuf == NULLPROFDATA) {
445 task_unlock(task);
446 return KERN_RESOURCE_SHORTAGE; /* can't allocate a buffer, quit */
447 }
448 task->profil_buffer = pbuf;
449
450 if (!set_pbuf_nb(pbuf, NB_PROF_BUFFER-1)) {
451 pbuf_free(pbuf);
452 task_unlock(task);
453 return KERN_FAILURE;
454 }
455 reset_pbuf_area(pbuf);
456 pbuf->prof_port = reply; /* assoc. buffer with reply port */
457 } else { /* We want to stop profiling/tracing */
458 task_lock(task);
459 if (!task->task_profiled) { /* but this task is not being profiled */
460 task_unlock(task);
461 return(KERN_INVALID_ARGUMENT);
462 }
463 }
464
465 /*
466 * turnon = FALSE && task_profile = TRUE ||
467 * turnon = TRUE && task_profile = FALSE
468 */
469
470 if (turnon != task->task_profiled) {
471 int actual, i;
472 thread_t thread;
473
474 if (turnon && profile_thread_id == THREAD_NULL) /* 1st time thru? */
475 profile_thread_id = /* then start profile thread. */
476 kernel_thread(kernel_task, profile_thread);
477 task->task_profiled = turnon;
478 actual = task->thread_count;
479 for (i = 0, thread = (thread_t)queue_first(&task->threads);
480 i < actual;
481 i++, thread = (thread_t)queue_next(&thr_act->task_threads)) {
482 if (!thread->profiled_own) {
483 threadt->profiled = turnon;
484 if (turnon) {
485 threadt->profil_buffer = task->profil_buffer;
486 thread->profiled = TRUE;
487 } else {
488 thread->profiled = FALSE;
489 thread->profil_buffer = NULLPROFDATA;
490 }
491 }
492 }
493 if (!turnon) { /* drain buffers and clean-up */
494 send_last_sample_buf(task->profil_buffer);
495 pbuf_free(task->profil_buffer);
496 task->profil_buffer = NULLPROFDATA;
497 }
498 }
499
500 task_unlock(task);
501 return KERN_SUCCESS;
502 }
503 #endif /* MACH_PROF */
504