]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/profile.c
xnu-792.tar.gz
[apple/xnu.git] / osfmk / kern / profile.c
1 /*
2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 /*
23 * @OSF_COPYRIGHT@
24 */
25 /*
26 * Mach Operating System
27 * Copyright (c) 1991,1990,1989 Carnegie Mellon University
28 * All Rights Reserved.
29 *
30 * Permission to use, copy, modify and distribute this software and its
31 * documentation is hereby granted, provided that both the copyright
32 * notice and this permission notice appear in all copies of the
33 * software, derivative works or modified versions, and any portions
34 * thereof, and that both notices appear in supporting documentation.
35 *
36 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
37 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
38 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
39 *
40 * Carnegie Mellon requests users of this software to return to
41 *
42 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
43 * School of Computer Science
44 * Carnegie Mellon University
45 * Pittsburgh PA 15213-3890
46 *
47 * any improvements or extensions that they make and grant Carnegie Mellon
48 * the rights to redistribute these changes.
49 */
50
51 /*
52 */
53 #include <mach_prof.h>
54
55 #include <mach/task_server.h>
56 #include <mach/thread_act_server.h>
57
58 #if MACH_PROF
59 #include <kern/thread.h>
60 #include <kern/queue.h>
61 #include <kern/profile.h>
62 #include <kern/sched_prim.h>
63 #include <kern/spl.h>
64 #include <kern/misc_protos.h>
65 #include <ipc/ipc_space.h>
66 #include <machine/machparam.h>
67 #include <mach/prof.h>
68
69 thread_t profile_thread_id = THREAD_NULL;
70 int profile_sample_count = 0; /* Provided for looking at from kdb. */
71 extern kern_return_t task_suspend(task_t task); /* ack */
72
73 /* Forwards */
74 prof_data_t pbuf_alloc(void);
75 void pbuf_free(
76 prof_data_t pbuf);
77 void profile_thread(void);
78 void send_last_sample_buf(
79 prof_data_t pbuf);
80
81 /*
82 *****************************************************************************
83 * profile_thread is the profile/trace kernel support thread. It is started
84 * by a server/user request through task_sample, or thread_sample. The profile
85 * thread dequeues messages and sends them to the receive_prof thread, in the
86 * server, via the send_samples and send_notices mig interface functions. If
87 * there are no messages in the queue profile thread blocks until wakened by
88 * profile (called in from mach_clock), or last_sample (called by thread/task_
89 * sample).
90 */
91
92 void
93 profile_thread(void)
94 {
95 spl_t s;
96 buffer_t buf_entry;
97 queue_entry_t prof_queue_entry;
98 prof_data_t pbuf;
99 kern_return_t kr;
100 int j;
101
102 /* Initialise the queue header for the prof_queue */
103 mpqueue_init(&prof_queue);
104
105 while (TRUE) {
106
107 /* Dequeue the first buffer. */
108 s = splsched();
109 mpdequeue_head(&prof_queue, &prof_queue_entry);
110 splx(s);
111
112 if ((buf_entry = (buffer_t) prof_queue_entry) == NULLPBUF) {
113 assert_wait((event_t) profile_thread, THREAD_UNINT);
114 thread_block(THREAD_CONTINUE_NULL);
115 if (current_thread()->wait_result != THREAD_AWAKENED)
116 break;
117 } else
118 {
119 int dropped;
120
121 pbuf = buf_entry->p_prof;
122 kr = send_samples(pbuf->prof_port, (void *)buf_entry->p_zone,
123 (mach_msg_type_number_t)buf_entry->p_index);
124 profile_sample_count += buf_entry->p_index;
125 if (kr != KERN_SUCCESS)
126 printf("send_samples(%x, %x, %d) error %x\n",
127 pbuf->prof_port, buf_entry->p_zone, buf_entry->p_index, kr);
128 dropped = buf_entry->p_dropped;
129 if (dropped > 0) {
130 printf("kernel: profile dropped %d sample%s\n", dropped,
131 dropped == 1 ? "" : "s");
132 buf_entry->p_dropped = 0;
133 }
134
135 /* Indicate you've finished the dirty job */
136 buf_entry->p_full = FALSE;
137 if (buf_entry->p_wakeme)
138 thread_wakeup((event_t) &buf_entry->p_wakeme);
139 }
140
141 }
142 /* The profile thread has been signalled to exit. Any threads waiting
143 for the last buffer of samples to be acknowledged should be woken
144 up now. */
145 profile_thread_id = THREAD_NULL;
146 while (1) {
147 s = splsched();
148 mpdequeue_head(&prof_queue, &prof_queue_entry);
149 splx(s);
150 if ((buf_entry = (buffer_t) prof_queue_entry) == NULLPBUF)
151 break;
152 if (buf_entry->p_wakeme)
153 thread_wakeup((event_t) &buf_entry->p_wakeme);
154 }
155 #if 0 /* XXXXX */
156 thread_halt_self();
157 #else
158 panic("profile_thread(): halt_self");
159 #endif /* XXXXX */
160 }
161
162 /*
163 *****************************************************************************
164 * send_last_sample is the drain mechanism to allow partial profiled buffers
165 * to be sent to the receive_prof thread in the server.
166 *****************************************************************************
167 */
168
169 void
170 send_last_sample_buf(prof_data_t pbuf)
171 {
172 spl_t s;
173 buffer_t buf_entry;
174
175 if (pbuf == NULLPROFDATA)
176 return;
177
178 /* Ask for the sending of the last PC buffer.
179 * Make a request to the profile_thread by inserting
180 * the buffer in the send queue, and wake it up.
181 * The last buffer must be inserted at the head of the
182 * send queue, so the profile_thread handles it immediatly.
183 */
184 buf_entry = pbuf->prof_area + pbuf->prof_index;
185 buf_entry->p_prof = pbuf;
186
187 /*
188 Watch out in case profile thread exits while we are about to
189 queue data for it.
190 */
191 s = splsched();
192 if (profile_thread_id == THREAD_NULL)
193 splx(s);
194 else {
195 buf_entry->p_wakeme = 1;
196 mpenqueue_tail(&prof_queue, &buf_entry->p_list);
197 thread_wakeup((event_t) profile_thread);
198 assert_wait((event_t) &buf_entry->p_wakeme, THREAD_ABORTSAFE);
199 splx(s);
200 thread_block(THREAD_CONTINUE_NULL);
201 }
202 }
203
204
205 /*
206 *****************************************************************************
207 * add clock tick parameters to profile/trace buffers. Called from the mach_
208 * clock heritz_tick function. DCI version stores thread, sp, and pc values
209 * into the profile/trace buffers. MACH_PROF version just stores pc values.
210 *****************************************************************************
211 */
212
213 void
214 profile(natural_t pc,
215 prof_data_t pbuf)
216 {
217 natural_t inout_val = pc;
218 buffer_t buf_entry;
219
220 if (pbuf == NULLPROFDATA)
221 return;
222
223 /* Inserts the PC value in the buffer of the thread */
224 set_pbuf_value(pbuf, &inout_val);
225 switch((int)inout_val) {
226 case 0:
227 if (profile_thread_id == THREAD_NULL) {
228 reset_pbuf_area(pbuf);
229 }
230 break;
231 case 1:
232 /* Normal case, value successfully inserted */
233 break;
234 case 2 :
235 /*
236 * The value we have just inserted caused the
237 * buffer to be full, and ready to be sent.
238 * If profile_thread_id is null, the profile
239 * thread has been killed. Since this generally
240 * happens only when the O/S server task of which
241 * it is a part is killed, it is not a great loss
242 * to throw away the data.
243 */
244 if (profile_thread_id == THREAD_NULL) {
245 reset_pbuf_area(pbuf);
246 break;
247 }
248
249 buf_entry = (buffer_t) &pbuf->prof_area[pbuf->prof_index];
250 buf_entry->p_prof = pbuf;
251 mpenqueue_tail(&prof_queue, &buf_entry->p_list);
252
253 /* Switch to another buffer */
254 reset_pbuf_area(pbuf);
255
256 /* Wake up the profile thread */
257 if (profile_thread_id != THREAD_NULL)
258 thread_wakeup((event_t) profile_thread);
259 break;
260
261 default:
262 printf("profile : unexpected case\n");
263 }
264 }
265
266 /*
267 *****************************************************************************
268 * pbuf_alloc creates a profile/trace buffer and assoc. zones for storing
269 * profiled items.
270 *****************************************************************************
271 */
272
273 prof_data_t
274 pbuf_alloc(void)
275 {
276 register prof_data_t pbuf;
277 register int i;
278 register natural_t *zone;
279
280 pbuf = (prof_data_t)kalloc(sizeof(struct prof_data));
281 if (!pbuf)
282 return(NULLPROFDATA);
283 pbuf->prof_port = MACH_PORT_NULL;
284 for (i=0; i< NB_PROF_BUFFER; i++) {
285 zone = (natural_t *)kalloc(SIZE_PROF_BUFFER*sizeof(natural_t));
286 if (!zone) {
287 i--;
288 while (i--)
289 kfree((vm_offset_t)pbuf->prof_area[i].p_zone,
290 SIZE_PROF_BUFFER*sizeof(natural_t));
291 kfree((vm_offset_t)pbuf, sizeof(struct prof_data));
292 return(NULLPROFDATA);
293 }
294 pbuf->prof_area[i].p_zone = zone;
295 pbuf->prof_area[i].p_full = FALSE;
296 }
297 pbuf->prof_port = MACH_PORT_NULL;
298 return(pbuf);
299 }
300
301 /*
302 *****************************************************************************
303 * pbuf_free free memory allocated for storing profile/trace items. Called
304 * when a task is no longer profiled/traced. Pbuf_free tears down the memory
305 * alloced in pbuf_alloc. It does not check to see if the structures are valid
306 * since it is only called by functions in this file.
307 *****************************************************************************
308 */
309 void
310 pbuf_free(
311 prof_data_t pbuf)
312 {
313 register int i;
314
315 if (pbuf->prof_port)
316 ipc_port_release_send(pbuf->prof_port);
317
318 for(i=0; i < NB_PROF_BUFFER ; i++)
319 kfree((vm_offset_t)pbuf->prof_area[i].p_zone,
320 SIZE_PROF_BUFFER*sizeof(natural_t));
321 kfree((vm_offset_t)pbuf, sizeof(struct prof_data));
322 }
323
324 #endif /* MACH_PROF */
325
326 /*
327 *****************************************************************************
328 * Thread_sample is used by MACH_PROF to profile a single thread, and is only
329 * stub in DCI.
330 *****************************************************************************
331 */
332
333 #if !MACH_PROF
334 kern_return_t
335 thread_sample(
336 __unused thread_t thread,
337 __unused ipc_port_t reply)
338 {
339 return KERN_FAILURE;
340 }
341 #else
342 kern_return_t
343 thread_sample(
344 thread_t thread,
345 ipc_port_t reply)
346 {
347 /*
348 * This routine is called every time that a new thread has made
349 * a request for the sampling service. We must keep track of the
350 * correspondance between its identity (thread) and the port
351 * we are going to use as a reply port to send out the samples resulting
352 * from its execution.
353 */
354 prof_data_t pbuf;
355 vm_offset_t vmpbuf;
356
357 if (reply != MACH_PORT_NULL) {
358 if (thread->profiled) /* yuck! */
359 return KERN_INVALID_ARGUMENT;
360 /* Start profiling this activation, do the initialization. */
361 pbuf = pbuf_alloc();
362 if ((thread->profil_buffer = pbuf) == NULLPROFDATA) {
363 printf("thread_sample: cannot allocate pbuf\n");
364 return KERN_RESOURCE_SHORTAGE;
365 }
366 else {
367 if (!set_pbuf_nb(pbuf, NB_PROF_BUFFER-1)) {
368 printf("mach_sample_thread: cannot set pbuf_nb\n");
369 return KERN_FAILURE;
370 }
371 reset_pbuf_area(pbuf);
372 }
373 pbuf->prof_port = reply;
374 thread->profiled = TRUE;
375 thread->profiled_own = TRUE;
376 if (profile_thread_id == THREAD_NULL)
377 profile_thread_id = kernel_thread(kernel_task, profile_thread);
378 } else {
379 if (!thread->profiled)
380 return(KERN_INVALID_ARGUMENT);
381
382 thread->profiled = FALSE;
383 /* do not stop sampling if thread is not profiled by its own */
384
385 if (!thread->profiled_own)
386 return KERN_SUCCESS;
387 else
388 thread->profiled_own = FALSE;
389
390 send_last_sample_buf(thread->profil_buffer);
391 pbuf_free(thread->profil_buffer);
392 thread->profil_buffer = NULLPROFDATA;
393 }
394 return KERN_SUCCESS;
395 }
396 #endif /* MACH_PROF */
397
398 /*
399 *****************************************************************************
400 * Task_sample is used to profile/trace tasks - all thread within a task using
401 * a common profile buffer to collect items generated by the hertz_tick. For
402 * each task profiled a profile buffer is created that associates a reply port
403 * (used to send the data to a server thread), task (used for throttling), and
404 * a zone area (used to store profiled/traced items).
405 *****************************************************************************
406 */
407
408 #if !MACH_PROF
409 kern_return_t
410 task_sample(
411 __unused task_t task,
412 __unused ipc_port_t reply)
413 {
414 return KERN_FAILURE;
415 }
416 #else
417 kern_return_t
418 task_sample(
419 task_t task,
420 ipc_port_t reply)
421 {
422 prof_data_t pbuf=task->profil_buffer;
423 vm_offset_t vmpbuf;
424 boolean_t turnon = (reply != MACH_PORT_NULL);
425
426 if (task == TASK_NULL)
427 return KERN_INVALID_ARGUMENT;
428 if (turnon) /* Do we want to profile this task? */
429 {
430 pbuf = pbuf_alloc(); /* allocate a profile buffer */
431 task_lock(task);
432 if (task->task_profiled) { /* if it is already profiled return so */
433 task_unlock(task);
434 if (pbuf != NULLPROFDATA)
435 pbuf_free(pbuf);
436 return(KERN_INVALID_ARGUMENT);
437 }
438 if (pbuf == NULLPROFDATA) {
439 task_unlock(task);
440 return KERN_RESOURCE_SHORTAGE; /* can't allocate a buffer, quit */
441 }
442 task->profil_buffer = pbuf;
443
444 if (!set_pbuf_nb(pbuf, NB_PROF_BUFFER-1)) {
445 pbuf_free(pbuf);
446 task_unlock(task);
447 return KERN_FAILURE;
448 }
449 reset_pbuf_area(pbuf);
450 pbuf->prof_port = reply; /* assoc. buffer with reply port */
451 } else { /* We want to stop profiling/tracing */
452 task_lock(task);
453 if (!task->task_profiled) { /* but this task is not being profiled */
454 task_unlock(task);
455 return(KERN_INVALID_ARGUMENT);
456 }
457 }
458
459 /*
460 * turnon = FALSE && task_profile = TRUE ||
461 * turnon = TRUE && task_profile = FALSE
462 */
463
464 if (turnon != task->task_profiled) {
465 int actual, i;
466 thread_t thread;
467
468 if (turnon && profile_thread_id == THREAD_NULL) /* 1st time thru? */
469 profile_thread_id = /* then start profile thread. */
470 kernel_thread(kernel_task, profile_thread);
471 task->task_profiled = turnon;
472 actual = task->thread_count;
473 for (i = 0, thread = (thread_t)queue_first(&task->threads);
474 i < actual;
475 i++, thread = (thread_t)queue_next(&thr_act->task_threads)) {
476 if (!thread->profiled_own) {
477 threadt->profiled = turnon;
478 if (turnon) {
479 threadt->profil_buffer = task->profil_buffer;
480 thread->profiled = TRUE;
481 } else {
482 thread->profiled = FALSE;
483 thread->profil_buffer = NULLPROFDATA;
484 }
485 }
486 }
487 if (!turnon) { /* drain buffers and clean-up */
488 send_last_sample_buf(task->profil_buffer);
489 pbuf_free(task->profil_buffer);
490 task->profil_buffer = NULLPROFDATA;
491 }
492 }
493
494 task_unlock(task);
495 return KERN_SUCCESS;
496 }
497 #endif /* MACH_PROF */
498