]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/profile.c
589f12c1756022524dcca2c454723a550e342138
[apple/xnu.git] / osfmk / kern / profile.c
1 /*
2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23 /*
24 * @OSF_COPYRIGHT@
25 */
26 /*
27 * Mach Operating System
28 * Copyright (c) 1991,1990,1989 Carnegie Mellon University
29 * All Rights Reserved.
30 *
31 * Permission to use, copy, modify and distribute this software and its
32 * documentation is hereby granted, provided that both the copyright
33 * notice and this permission notice appear in all copies of the
34 * software, derivative works or modified versions, and any portions
35 * thereof, and that both notices appear in supporting documentation.
36 *
37 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
38 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
39 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
40 *
41 * Carnegie Mellon requests users of this software to return to
42 *
43 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
44 * School of Computer Science
45 * Carnegie Mellon University
46 * Pittsburgh PA 15213-3890
47 *
48 * any improvements or extensions that they make and grant Carnegie Mellon
49 * the rights to redistribute these changes.
50 */
51
52 /*
53 */
54 #include <mach_prof.h>
55
56 #include <mach/task_server.h>
57 #include <mach/thread_act_server.h>
58
59 #if MACH_PROF
60 #include <kern/thread.h>
61 #include <kern/queue.h>
62 #include <kern/profile.h>
63 #include <kern/sched_prim.h>
64 #include <kern/spl.h>
65 #include <kern/misc_protos.h>
66 #include <ipc/ipc_space.h>
67 #include <machine/machparam.h>
68 #include <mach/prof.h>
69
70 thread_t profile_thread_id = THREAD_NULL;
71 int profile_sample_count = 0; /* Provided for looking at from kdb. */
72 extern kern_return_t task_suspend(task_t task); /* ack */
73
74 /* Forwards */
75 prof_data_t pbuf_alloc(void);
76 void pbuf_free(
77 prof_data_t pbuf);
78 void profile_thread(void);
79 void send_last_sample_buf(
80 prof_data_t pbuf);
81
82 /*
83 *****************************************************************************
84 * profile_thread is the profile/trace kernel support thread. It is started
85 * by a server/user request through task_sample, or thread_sample. The profile
86 * thread dequeues messages and sends them to the receive_prof thread, in the
87 * server, via the send_samples and send_notices mig interface functions. If
88 * there are no messages in the queue profile thread blocks until wakened by
89 * profile (called in from mach_clock), or last_sample (called by thread/task_
90 * sample).
91 */
92
93 void
94 profile_thread(void)
95 {
96 spl_t s;
97 buffer_t buf_entry;
98 queue_entry_t prof_queue_entry;
99 prof_data_t pbuf;
100 kern_return_t kr;
101 int j;
102
103 /* Initialise the queue header for the prof_queue */
104 mpqueue_init(&prof_queue);
105
106 while (TRUE) {
107
108 /* Dequeue the first buffer. */
109 s = splsched();
110 mpdequeue_head(&prof_queue, &prof_queue_entry);
111 splx(s);
112
113 if ((buf_entry = (buffer_t) prof_queue_entry) == NULLPBUF) {
114 assert_wait((event_t) profile_thread, THREAD_UNINT);
115 thread_block(THREAD_CONTINUE_NULL);
116 if (current_thread()->wait_result != THREAD_AWAKENED)
117 break;
118 } else
119 {
120 int dropped;
121
122 pbuf = buf_entry->p_prof;
123 kr = send_samples(pbuf->prof_port, (void *)buf_entry->p_zone,
124 (mach_msg_type_number_t)buf_entry->p_index);
125 profile_sample_count += buf_entry->p_index;
126 if (kr != KERN_SUCCESS)
127 printf("send_samples(%x, %x, %d) error %x\n",
128 pbuf->prof_port, buf_entry->p_zone, buf_entry->p_index, kr);
129 dropped = buf_entry->p_dropped;
130 if (dropped > 0) {
131 printf("kernel: profile dropped %d sample%s\n", dropped,
132 dropped == 1 ? "" : "s");
133 buf_entry->p_dropped = 0;
134 }
135
136 /* Indicate you've finished the dirty job */
137 buf_entry->p_full = FALSE;
138 if (buf_entry->p_wakeme)
139 thread_wakeup((event_t) &buf_entry->p_wakeme);
140 }
141
142 }
143 /* The profile thread has been signalled to exit. Any threads waiting
144 for the last buffer of samples to be acknowledged should be woken
145 up now. */
146 profile_thread_id = THREAD_NULL;
147 while (1) {
148 s = splsched();
149 mpdequeue_head(&prof_queue, &prof_queue_entry);
150 splx(s);
151 if ((buf_entry = (buffer_t) prof_queue_entry) == NULLPBUF)
152 break;
153 if (buf_entry->p_wakeme)
154 thread_wakeup((event_t) &buf_entry->p_wakeme);
155 }
156 #if 0 /* XXXXX */
157 thread_halt_self();
158 #else
159 panic("profile_thread(): halt_self");
160 #endif /* XXXXX */
161 }
162
163 /*
164 *****************************************************************************
165 * send_last_sample is the drain mechanism to allow partial profiled buffers
166 * to be sent to the receive_prof thread in the server.
167 *****************************************************************************
168 */
169
170 void
171 send_last_sample_buf(prof_data_t pbuf)
172 {
173 spl_t s;
174 buffer_t buf_entry;
175
176 if (pbuf == NULLPROFDATA)
177 return;
178
179 /* Ask for the sending of the last PC buffer.
180 * Make a request to the profile_thread by inserting
181 * the buffer in the send queue, and wake it up.
182 * The last buffer must be inserted at the head of the
183 * send queue, so the profile_thread handles it immediatly.
184 */
185 buf_entry = pbuf->prof_area + pbuf->prof_index;
186 buf_entry->p_prof = pbuf;
187
188 /*
189 Watch out in case profile thread exits while we are about to
190 queue data for it.
191 */
192 s = splsched();
193 if (profile_thread_id == THREAD_NULL)
194 splx(s);
195 else {
196 buf_entry->p_wakeme = 1;
197 mpenqueue_tail(&prof_queue, &buf_entry->p_list);
198 thread_wakeup((event_t) profile_thread);
199 assert_wait((event_t) &buf_entry->p_wakeme, THREAD_ABORTSAFE);
200 splx(s);
201 thread_block(THREAD_CONTINUE_NULL);
202 }
203 }
204
205
206 /*
207 *****************************************************************************
208 * add clock tick parameters to profile/trace buffers. Called from the mach_
209 * clock heritz_tick function. DCI version stores thread, sp, and pc values
210 * into the profile/trace buffers. MACH_PROF version just stores pc values.
211 *****************************************************************************
212 */
213
214 void
215 profile(natural_t pc,
216 prof_data_t pbuf)
217 {
218 natural_t inout_val = pc;
219 buffer_t buf_entry;
220
221 if (pbuf == NULLPROFDATA)
222 return;
223
224 /* Inserts the PC value in the buffer of the thread */
225 set_pbuf_value(pbuf, &inout_val);
226 switch((int)inout_val) {
227 case 0:
228 if (profile_thread_id == THREAD_NULL) {
229 reset_pbuf_area(pbuf);
230 }
231 break;
232 case 1:
233 /* Normal case, value successfully inserted */
234 break;
235 case 2 :
236 /*
237 * The value we have just inserted caused the
238 * buffer to be full, and ready to be sent.
239 * If profile_thread_id is null, the profile
240 * thread has been killed. Since this generally
241 * happens only when the O/S server task of which
242 * it is a part is killed, it is not a great loss
243 * to throw away the data.
244 */
245 if (profile_thread_id == THREAD_NULL) {
246 reset_pbuf_area(pbuf);
247 break;
248 }
249
250 buf_entry = (buffer_t) &pbuf->prof_area[pbuf->prof_index];
251 buf_entry->p_prof = pbuf;
252 mpenqueue_tail(&prof_queue, &buf_entry->p_list);
253
254 /* Switch to another buffer */
255 reset_pbuf_area(pbuf);
256
257 /* Wake up the profile thread */
258 if (profile_thread_id != THREAD_NULL)
259 thread_wakeup((event_t) profile_thread);
260 break;
261
262 default:
263 printf("profile : unexpected case\n");
264 }
265 }
266
267 /*
268 *****************************************************************************
269 * pbuf_alloc creates a profile/trace buffer and assoc. zones for storing
270 * profiled items.
271 *****************************************************************************
272 */
273
274 prof_data_t
275 pbuf_alloc(void)
276 {
277 register prof_data_t pbuf;
278 register int i;
279 register natural_t *zone;
280
281 pbuf = (prof_data_t)kalloc(sizeof(struct prof_data));
282 if (!pbuf)
283 return(NULLPROFDATA);
284 pbuf->prof_port = MACH_PORT_NULL;
285 for (i=0; i< NB_PROF_BUFFER; i++) {
286 zone = (natural_t *)kalloc(SIZE_PROF_BUFFER*sizeof(natural_t));
287 if (!zone) {
288 i--;
289 while (i--)
290 kfree((vm_offset_t)pbuf->prof_area[i].p_zone,
291 SIZE_PROF_BUFFER*sizeof(natural_t));
292 kfree((vm_offset_t)pbuf, sizeof(struct prof_data));
293 return(NULLPROFDATA);
294 }
295 pbuf->prof_area[i].p_zone = zone;
296 pbuf->prof_area[i].p_full = FALSE;
297 }
298 pbuf->prof_port = MACH_PORT_NULL;
299 return(pbuf);
300 }
301
302 /*
303 *****************************************************************************
304 * pbuf_free free memory allocated for storing profile/trace items. Called
305 * when a task is no longer profiled/traced. Pbuf_free tears down the memory
306 * alloced in pbuf_alloc. It does not check to see if the structures are valid
307 * since it is only called by functions in this file.
308 *****************************************************************************
309 */
310 void
311 pbuf_free(
312 prof_data_t pbuf)
313 {
314 register int i;
315
316 if (pbuf->prof_port)
317 ipc_port_release_send(pbuf->prof_port);
318
319 for(i=0; i < NB_PROF_BUFFER ; i++)
320 kfree((vm_offset_t)pbuf->prof_area[i].p_zone,
321 SIZE_PROF_BUFFER*sizeof(natural_t));
322 kfree((vm_offset_t)pbuf, sizeof(struct prof_data));
323 }
324
325 #endif /* MACH_PROF */
326
327 /*
328 *****************************************************************************
329 * Thread_sample is used by MACH_PROF to profile a single thread, and is only
330 * stub in DCI.
331 *****************************************************************************
332 */
333
334 #if !MACH_PROF
335 kern_return_t
336 thread_sample(
337 __unused thread_t thread,
338 __unused ipc_port_t reply)
339 {
340 return KERN_FAILURE;
341 }
342 #else
343 kern_return_t
344 thread_sample(
345 thread_t thread,
346 ipc_port_t reply)
347 {
348 /*
349 * This routine is called every time that a new thread has made
350 * a request for the sampling service. We must keep track of the
351 * correspondance between its identity (thread) and the port
352 * we are going to use as a reply port to send out the samples resulting
353 * from its execution.
354 */
355 prof_data_t pbuf;
356 vm_offset_t vmpbuf;
357
358 if (reply != MACH_PORT_NULL) {
359 if (thread->profiled) /* yuck! */
360 return KERN_INVALID_ARGUMENT;
361 /* Start profiling this activation, do the initialization. */
362 pbuf = pbuf_alloc();
363 if ((thread->profil_buffer = pbuf) == NULLPROFDATA) {
364 printf("thread_sample: cannot allocate pbuf\n");
365 return KERN_RESOURCE_SHORTAGE;
366 }
367 else {
368 if (!set_pbuf_nb(pbuf, NB_PROF_BUFFER-1)) {
369 printf("mach_sample_thread: cannot set pbuf_nb\n");
370 return KERN_FAILURE;
371 }
372 reset_pbuf_area(pbuf);
373 }
374 pbuf->prof_port = reply;
375 thread->profiled = TRUE;
376 thread->profiled_own = TRUE;
377 if (profile_thread_id == THREAD_NULL)
378 profile_thread_id = kernel_thread(kernel_task, profile_thread);
379 } else {
380 if (!thread->profiled)
381 return(KERN_INVALID_ARGUMENT);
382
383 thread->profiled = FALSE;
384 /* do not stop sampling if thread is not profiled by its own */
385
386 if (!thread->profiled_own)
387 return KERN_SUCCESS;
388 else
389 thread->profiled_own = FALSE;
390
391 send_last_sample_buf(thread->profil_buffer);
392 pbuf_free(thread->profil_buffer);
393 thread->profil_buffer = NULLPROFDATA;
394 }
395 return KERN_SUCCESS;
396 }
397 #endif /* MACH_PROF */
398
399 /*
400 *****************************************************************************
401 * Task_sample is used to profile/trace tasks - all thread within a task using
402 * a common profile buffer to collect items generated by the hertz_tick. For
403 * each task profiled a profile buffer is created that associates a reply port
404 * (used to send the data to a server thread), task (used for throttling), and
405 * a zone area (used to store profiled/traced items).
406 *****************************************************************************
407 */
408
409 #if !MACH_PROF
410 kern_return_t
411 task_sample(
412 __unused task_t task,
413 __unused ipc_port_t reply)
414 {
415 return KERN_FAILURE;
416 }
417 #else
418 kern_return_t
419 task_sample(
420 task_t task,
421 ipc_port_t reply)
422 {
423 prof_data_t pbuf=task->profil_buffer;
424 vm_offset_t vmpbuf;
425 boolean_t turnon = (reply != MACH_PORT_NULL);
426
427 if (task == TASK_NULL)
428 return KERN_INVALID_ARGUMENT;
429 if (turnon) /* Do we want to profile this task? */
430 {
431 pbuf = pbuf_alloc(); /* allocate a profile buffer */
432 task_lock(task);
433 if (task->task_profiled) { /* if it is already profiled return so */
434 task_unlock(task);
435 if (pbuf != NULLPROFDATA)
436 pbuf_free(pbuf);
437 return(KERN_INVALID_ARGUMENT);
438 }
439 if (pbuf == NULLPROFDATA) {
440 task_unlock(task);
441 return KERN_RESOURCE_SHORTAGE; /* can't allocate a buffer, quit */
442 }
443 task->profil_buffer = pbuf;
444
445 if (!set_pbuf_nb(pbuf, NB_PROF_BUFFER-1)) {
446 pbuf_free(pbuf);
447 task_unlock(task);
448 return KERN_FAILURE;
449 }
450 reset_pbuf_area(pbuf);
451 pbuf->prof_port = reply; /* assoc. buffer with reply port */
452 } else { /* We want to stop profiling/tracing */
453 task_lock(task);
454 if (!task->task_profiled) { /* but this task is not being profiled */
455 task_unlock(task);
456 return(KERN_INVALID_ARGUMENT);
457 }
458 }
459
460 /*
461 * turnon = FALSE && task_profile = TRUE ||
462 * turnon = TRUE && task_profile = FALSE
463 */
464
465 if (turnon != task->task_profiled) {
466 int actual, i;
467 thread_t thread;
468
469 if (turnon && profile_thread_id == THREAD_NULL) /* 1st time thru? */
470 profile_thread_id = /* then start profile thread. */
471 kernel_thread(kernel_task, profile_thread);
472 task->task_profiled = turnon;
473 actual = task->thread_count;
474 for (i = 0, thread = (thread_t)queue_first(&task->threads);
475 i < actual;
476 i++, thread = (thread_t)queue_next(&thr_act->task_threads)) {
477 if (!thread->profiled_own) {
478 threadt->profiled = turnon;
479 if (turnon) {
480 threadt->profil_buffer = task->profil_buffer;
481 thread->profiled = TRUE;
482 } else {
483 thread->profiled = FALSE;
484 thread->profil_buffer = NULLPROFDATA;
485 }
486 }
487 }
488 if (!turnon) { /* drain buffers and clean-up */
489 send_last_sample_buf(task->profil_buffer);
490 pbuf_free(task->profil_buffer);
491 task->profil_buffer = NULLPROFDATA;
492 }
493 }
494
495 task_unlock(task);
496 return KERN_SUCCESS;
497 }
498 #endif /* MACH_PROF */
499