]> git.saurik.com Git - apple/xnu.git/blob - osfmk/corpses/corpse.c
xnu-6153.61.1.tar.gz
[apple/xnu.git] / osfmk / corpses / corpse.c
1 /*
2 * Copyright (c) 2012-2013, 2015 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29
30 /*
31 * Corpses Overview
32 * ================
33 *
34 * A corpse is a state of process that is past the point of its death. This means that process has
35 * completed all its termination operations like releasing file descriptors, mach ports, sockets and
36 * other constructs used to identify a process. For all the processes this mimics the behavior as if
37 * the process has died and no longer available by any means.
38 *
39 * Why do we need Corpses?
40 * -----------------------
41 * For crash inspection we need to inspect the state and data that is associated with process so that
42 * crash reporting infrastructure can build backtraces, find leaks etc. For example a crash
43 *
44 * Corpses functionality in kernel
45 * ===============================
46 * The corpse functionality is an extension of existing exception reporting mechanisms we have. The
47 * exception_triage calls will try to deliver the first round of exceptions allowing
48 * task/debugger/ReportCrash/launchd level exception handlers to respond to exception. If even after
49 * notification the exception is not handled, then the process begins the death operations and during
50 * proc_prepareexit, we decide to create a corpse for inspection. Following is a sample run through
51 * of events and data shuffling that happens when corpses is enabled.
52 *
53 * * a process causes an exception during normal execution of threads.
54 * * The exception generated by either mach(e.g GUARDED_MARCHPORT) or bsd(eg SIGABORT, GUARDED_FD
55 * etc) side is passed through the exception_triage() function to follow the thread -> task -> host
56 * level exception handling system. This set of steps are same as before and allow for existing
57 * crash reporting systems (both internal and 3rd party) to catch and create reports as required.
58 * * If above exception handling returns failed (when nobody handles the notification), then the
59 * proc_prepareexit path has logic to decide to create corpse.
60 * * The task_mark_corpse function allocates userspace vm memory and attaches the information
61 * kcdata_descriptor_t to task->corpse_info field of task.
62 * - All the task's threads are marked with the "inspection" flag which signals the termination
63 * daemon to not reap them but hold until they are being inspected.
64 * - task flags t_flags reflect the corpse bit and also a PENDING_CORPSE bit. PENDING_CORPSE
65 * prevents task_terminate from stripping important data from task.
66 * - It marks all the threads to terminate and return to AST for termination.
67 * - The allocation logic takes into account the rate limiting policy of allowing only
68 * TOTAL_CORPSES_ALLOWED in flight.
69 * * The proc exit threads continues and collects required information in the allocated vm region.
70 * Once complete it marks itself for termination.
71 * * In the thread_terminate_self(), the last thread to enter will do a call to proc_exit().
72 * Following this is a check to see if task is marked for corpse notification and will
73 * invoke the the task_deliver_crash_notification().
74 * * Once EXC_CORPSE_NOTIFY is delivered, it removes the PENDING_CORPSE flag from task (and
75 * inspection flag from all its threads) and allows task_terminate to go ahead and continue
76 * the mach task termination process.
77 * * ASIDE: The rest of the threads that are reaching the thread_terminate_daemon() with the
78 * inspection flag set are just bounced to another holding queue (crashed_threads_queue).
79 * Only after the corpse notification these are pulled out from holding queue and enqueued
80 * back to termination queue
81 *
82 *
83 * Corpse info format
84 * ==================
85 * The kernel (task_mark_corpse()) makes a vm allocation in the dead task's vm space (with tag
86 * VM_MEMORY_CORPSEINFO (80)). Within this memory all corpse information is saved by various
87 * subsystems like
88 * * bsd proc exit path may write down pid, parent pid, number of file descriptors etc
89 * * mach side may append data regarding ledger usage, memory stats etc
90 * See detailed info about the memory structure and format in kern_cdata.h documentation.
91 *
92 * Configuring Corpses functionality
93 * =================================
94 * boot-arg: -no_corpses disables the corpse generation. This can be added/removed without affecting
95 * any other subsystem.
96 * TOTAL_CORPSES_ALLOWED : (recompilation required) - Changing this number allows for controlling
97 * the number of corpse instances to be held for inspection before allowing memory to be reclaimed
98 * by system.
99 * CORPSEINFO_ALLOCATION_SIZE: is the default size of vm allocation. If in future there is much more
100 * data to be put in, then please re-tune this parameter.
101 *
102 * Debugging/Visibility
103 * ====================
104 * * lldbmacros for thread and task summary are updated to show "C" flag for corpse task/threads.
105 * * there are macros to see list of threads in termination queue (dumpthread_terminate_queue)
106 * and holding queue (dumpcrashed_thread_queue).
107 * * In case of corpse creation is disabled of ignored then the system log is updated with
108 * printf data with reason.
109 *
110 * Limitations of Corpses
111 * ======================
112 * With holding off memory for inspection, it creates vm pressure which might not be desirable
113 * on low memory devices. There are limits to max corpses being inspected at a time which is
114 * marked by TOTAL_CORPSES_ALLOWED.
115 *
116 */
117
118
119 #include <stdatomic.h>
120 #include <kern/assert.h>
121 #include <mach/mach_types.h>
122 #include <mach/boolean.h>
123 #include <mach/vm_param.h>
124 #include <kern/kern_types.h>
125 #include <kern/mach_param.h>
126 #include <kern/thread.h>
127 #include <kern/task.h>
128 #include <corpses/task_corpse.h>
129 #include <kern/kalloc.h>
130 #include <kern/kern_cdata.h>
131 #include <mach/mach_vm.h>
132 #include <kern/exc_guard.h>
133
134 #if CONFIG_MACF
135 #include <security/mac_mach_internal.h>
136 #endif
137
138 /*
139 * Exported interfaces
140 */
141 #include <mach/task_server.h>
142
143 union corpse_creation_gate {
144 struct {
145 uint16_t user_faults;
146 uint16_t corpses;
147 };
148 uint32_t value;
149 };
150
151 static _Atomic uint32_t inflight_corpses;
152 unsigned long total_corpses_created = 0;
153 boolean_t corpse_enabled_config = TRUE;
154
155 /* bootarg to generate corpse with size up to max_footprint_mb */
156 boolean_t corpse_threshold_system_limit = FALSE;
157
158 /* bootarg to turn on corpse forking for EXC_RESOURCE */
159 int exc_via_corpse_forking = 1;
160
161 /* bootarg to generate corpse for fatal high memory watermark violation */
162 int corpse_for_fatal_memkill = 1;
163
164 #ifdef __arm__
165 static inline int
166 IS_64BIT_PROCESS(__unused void *p)
167 {
168 return 0;
169 }
170 #else
171 extern int IS_64BIT_PROCESS(void *);
172 #endif /* __arm__ */
173 extern void gather_populate_corpse_crashinfo(void *p, task_t task,
174 mach_exception_data_type_t code, mach_exception_data_type_t subcode,
175 uint64_t *udata_buffer, int num_udata, void *reason);
176 extern void *proc_find(int pid);
177 extern int proc_rele(void *p);
178
179
180 void
181 corpses_init()
182 {
183 char temp_buf[20];
184 int exc_corpse_forking;
185 int fatal_memkill;
186 if (PE_parse_boot_argn("-no_corpses", temp_buf, sizeof(temp_buf))) {
187 corpse_enabled_config = FALSE;
188 }
189 if (PE_parse_boot_argn("exc_via_corpse_forking", &exc_corpse_forking, sizeof(exc_corpse_forking))) {
190 exc_via_corpse_forking = exc_corpse_forking;
191 }
192 if (PE_parse_boot_argn("corpse_for_fatal_memkill", &fatal_memkill, sizeof(fatal_memkill))) {
193 corpse_for_fatal_memkill = fatal_memkill;
194 }
195 #if DEBUG || DEVELOPMENT
196 if (PE_parse_boot_argn("-corpse_threshold_system_limit", &corpse_threshold_system_limit, sizeof(corpse_threshold_system_limit))) {
197 corpse_threshold_system_limit = TRUE;
198 }
199 #endif /* DEBUG || DEVELOPMENT */
200 }
201
202 /*
203 * Routine: corpses_enabled
204 * returns FALSE if not enabled
205 */
206 boolean_t
207 corpses_enabled()
208 {
209 return corpse_enabled_config;
210 }
211
212 unsigned long
213 total_corpses_count(void)
214 {
215 union corpse_creation_gate gate;
216
217 gate.value = atomic_load_explicit(&inflight_corpses, memory_order_relaxed);
218 return gate.corpses;
219 }
220
221 /*
222 * Routine: task_crashinfo_get_ref()
223 * Grab a slot at creating a corpse.
224 * Returns: KERN_SUCCESS if the policy allows for creating a corpse.
225 */
226 static kern_return_t
227 task_crashinfo_get_ref(uint16_t kcd_u_flags)
228 {
229 union corpse_creation_gate oldgate, newgate;
230
231 assert(kcd_u_flags & CORPSE_CRASHINFO_HAS_REF);
232
233 oldgate.value = atomic_load_explicit(&inflight_corpses, memory_order_relaxed);
234 for (;;) {
235 newgate = oldgate;
236 if (kcd_u_flags & CORPSE_CRASHINFO_USER_FAULT) {
237 if (newgate.user_faults++ >= TOTAL_USER_FAULTS_ALLOWED) {
238 return KERN_RESOURCE_SHORTAGE;
239 }
240 }
241 if (newgate.corpses++ >= TOTAL_CORPSES_ALLOWED) {
242 return KERN_RESOURCE_SHORTAGE;
243 }
244
245 // this reloads the value in oldgate
246 if (atomic_compare_exchange_strong_explicit(&inflight_corpses,
247 &oldgate.value, newgate.value, memory_order_relaxed,
248 memory_order_relaxed)) {
249 return KERN_SUCCESS;
250 }
251 }
252 }
253
254 /*
255 * Routine: task_crashinfo_release_ref
256 * release the slot for corpse being used.
257 */
258 static kern_return_t
259 task_crashinfo_release_ref(uint16_t kcd_u_flags)
260 {
261 union corpse_creation_gate oldgate, newgate;
262
263 assert(kcd_u_flags & CORPSE_CRASHINFO_HAS_REF);
264
265 oldgate.value = atomic_load_explicit(&inflight_corpses, memory_order_relaxed);
266 for (;;) {
267 newgate = oldgate;
268 if (kcd_u_flags & CORPSE_CRASHINFO_USER_FAULT) {
269 if (newgate.user_faults-- == 0) {
270 panic("corpse in flight count over-release");
271 }
272 }
273 if (newgate.corpses-- == 0) {
274 panic("corpse in flight count over-release");
275 }
276 // this reloads the value in oldgate
277 if (atomic_compare_exchange_strong_explicit(&inflight_corpses,
278 &oldgate.value, newgate.value, memory_order_relaxed,
279 memory_order_relaxed)) {
280 return KERN_SUCCESS;
281 }
282 }
283 }
284
285
286 kcdata_descriptor_t
287 task_crashinfo_alloc_init(mach_vm_address_t crash_data_p, unsigned size,
288 uint32_t kc_u_flags, unsigned kc_flags)
289 {
290 kcdata_descriptor_t kcdata;
291
292 if (kc_u_flags & CORPSE_CRASHINFO_HAS_REF) {
293 if (KERN_SUCCESS != task_crashinfo_get_ref(kc_u_flags)) {
294 return NULL;
295 }
296 }
297
298 kcdata = kcdata_memory_alloc_init(crash_data_p, TASK_CRASHINFO_BEGIN, size,
299 kc_flags);
300 if (kcdata) {
301 kcdata->kcd_user_flags = kc_u_flags;
302 } else if (kc_u_flags & CORPSE_CRASHINFO_HAS_REF) {
303 task_crashinfo_release_ref(kc_u_flags);
304 }
305 return kcdata;
306 }
307
308
309 /*
310 * Free up the memory associated with task_crashinfo_data
311 */
312 kern_return_t
313 task_crashinfo_destroy(kcdata_descriptor_t data)
314 {
315 if (!data) {
316 return KERN_INVALID_ARGUMENT;
317 }
318 if (data->kcd_user_flags & CORPSE_CRASHINFO_HAS_REF) {
319 task_crashinfo_release_ref(data->kcd_user_flags);
320 }
321 return kcdata_memory_destroy(data);
322 }
323
324 /*
325 * Routine: task_get_corpseinfo
326 * params: task - task which has corpse info setup.
327 * returns: crash info data attached to task.
328 * NULL if task is null or has no corpse info
329 */
330 kcdata_descriptor_t
331 task_get_corpseinfo(task_t task)
332 {
333 kcdata_descriptor_t retval = NULL;
334 if (task != NULL) {
335 retval = task->corpse_info;
336 }
337 return retval;
338 }
339
340 /*
341 * Routine: task_add_to_corpse_task_list
342 * params: task - task to be added to corpse task list
343 * returns: None.
344 */
345 void
346 task_add_to_corpse_task_list(task_t corpse_task)
347 {
348 lck_mtx_lock(&tasks_corpse_lock);
349 queue_enter(&corpse_tasks, corpse_task, task_t, corpse_tasks);
350 lck_mtx_unlock(&tasks_corpse_lock);
351 }
352
353 /*
354 * Routine: task_remove_from_corpse_task_list
355 * params: task - task to be removed from corpse task list
356 * returns: None.
357 */
358 void
359 task_remove_from_corpse_task_list(task_t corpse_task)
360 {
361 lck_mtx_lock(&tasks_corpse_lock);
362 queue_remove(&corpse_tasks, corpse_task, task_t, corpse_tasks);
363 lck_mtx_unlock(&tasks_corpse_lock);
364 }
365
366 /*
367 * Routine: task_purge_all_corpses
368 * params: None.
369 * returns: None.
370 */
371 void
372 task_purge_all_corpses(void)
373 {
374 task_t task;
375
376 printf("Purging corpses......\n\n");
377
378 lck_mtx_lock(&tasks_corpse_lock);
379 /* Iterate through all the corpse tasks and clear all map entries */
380 queue_iterate(&corpse_tasks, task, task_t, corpse_tasks) {
381 vm_map_remove(task->map,
382 task->map->min_offset,
383 task->map->max_offset,
384 /*
385 * Final cleanup:
386 * + no unnesting
387 * + remove immutable mappings
388 * + allow gaps in the range
389 */
390 (VM_MAP_REMOVE_NO_UNNESTING |
391 VM_MAP_REMOVE_IMMUTABLE |
392 VM_MAP_REMOVE_GAPS_OK));
393 }
394
395 lck_mtx_unlock(&tasks_corpse_lock);
396 }
397
398 /*
399 * Routine: task_generate_corpse
400 * params: task - task to fork a corpse
401 * corpse_task - task port of the generated corpse
402 * returns: KERN_SUCCESS on Success.
403 * KERN_FAILURE on Failure.
404 * KERN_NOT_SUPPORTED on corpse disabled.
405 * KERN_RESOURCE_SHORTAGE on memory alloc failure or reaching max corpse.
406 */
407 kern_return_t
408 task_generate_corpse(
409 task_t task,
410 ipc_port_t *corpse_task_port)
411 {
412 task_t new_task;
413 kern_return_t kr;
414 thread_t thread, th_iter;
415 ipc_port_t corpse_port;
416 ipc_port_t old_notify;
417
418 if (task == kernel_task || task == TASK_NULL) {
419 return KERN_INVALID_ARGUMENT;
420 }
421
422 task_lock(task);
423 if (task_is_a_corpse_fork(task)) {
424 task_unlock(task);
425 return KERN_INVALID_ARGUMENT;
426 }
427 task_unlock(task);
428
429 /* Generate a corpse for the given task, will return with a ref on corpse task */
430 kr = task_generate_corpse_internal(task, &new_task, &thread, 0, 0, 0, NULL);
431 if (kr != KERN_SUCCESS) {
432 return kr;
433 }
434 if (thread != THREAD_NULL) {
435 thread_deallocate(thread);
436 }
437
438 /* wait for all the threads in the task to terminate */
439 task_lock(new_task);
440 task_wait_till_threads_terminate_locked(new_task);
441
442 /* Reset thread ports of all the threads in task */
443 queue_iterate(&new_task->threads, th_iter, thread_t, task_threads)
444 {
445 /* Do not reset the thread port for inactive threads */
446 if (th_iter->corpse_dup == FALSE) {
447 ipc_thread_reset(th_iter);
448 }
449 }
450 task_unlock(new_task);
451
452 /* transfer the task ref to port and arm the no-senders notification */
453 corpse_port = convert_task_to_port(new_task);
454 assert(IP_NULL != corpse_port);
455
456 ip_lock(corpse_port);
457 require_ip_active(corpse_port);
458 ipc_port_nsrequest(corpse_port, corpse_port->ip_mscount, ipc_port_make_sonce_locked(corpse_port), &old_notify);
459 /* port unlocked */
460
461 assert(IP_NULL == old_notify);
462 *corpse_task_port = corpse_port;
463 return KERN_SUCCESS;
464 }
465
466 /*
467 * Routine: task_enqueue_exception_with_corpse
468 * params: task - task to generate a corpse and enqueue it
469 * etype - EXC_RESOURCE or EXC_GUARD
470 * code - exception code to be enqueued
471 * codeCnt - code array count - code and subcode
472 *
473 * returns: KERN_SUCCESS on Success.
474 * KERN_FAILURE on Failure.
475 * KERN_INVALID_ARGUMENT on invalid arguments passed.
476 * KERN_NOT_SUPPORTED on corpse disabled.
477 * KERN_RESOURCE_SHORTAGE on memory alloc failure or reaching max corpse.
478 */
479 kern_return_t
480 task_enqueue_exception_with_corpse(
481 task_t task,
482 exception_type_t etype,
483 mach_exception_data_t code,
484 mach_msg_type_number_t codeCnt,
485 void *reason)
486 {
487 task_t new_task = TASK_NULL;
488 thread_t thread = THREAD_NULL;
489 kern_return_t kr;
490
491 if (codeCnt < 2) {
492 return KERN_INVALID_ARGUMENT;
493 }
494
495 /* Generate a corpse for the given task, will return with a ref on corpse task */
496 kr = task_generate_corpse_internal(task, &new_task, &thread,
497 etype, code[0], code[1], reason);
498 if (kr == KERN_SUCCESS) {
499 if (thread == THREAD_NULL) {
500 return KERN_FAILURE;
501 }
502 assert(new_task != TASK_NULL);
503 assert(etype == EXC_RESOURCE || etype == EXC_GUARD);
504 thread_exception_enqueue(new_task, thread, etype);
505 }
506 return kr;
507 }
508
509 /*
510 * Routine: task_generate_corpse_internal
511 * params: task - task to fork a corpse
512 * corpse_task - task of the generated corpse
513 * exc_thread - equivalent thread in corpse enqueuing exception
514 * etype - EXC_RESOURCE or EXC_GUARD or 0
515 * code - mach exception code to be passed in corpse blob
516 * subcode - mach exception subcode to be passed in corpse blob
517 * returns: KERN_SUCCESS on Success.
518 * KERN_FAILURE on Failure.
519 * KERN_NOT_SUPPORTED on corpse disabled.
520 * KERN_RESOURCE_SHORTAGE on memory alloc failure or reaching max corpse.
521 */
522 kern_return_t
523 task_generate_corpse_internal(
524 task_t task,
525 task_t *corpse_task,
526 thread_t *exc_thread,
527 exception_type_t etype,
528 mach_exception_data_type_t code,
529 mach_exception_data_type_t subcode,
530 void *reason)
531 {
532 task_t new_task = TASK_NULL;
533 thread_t thread = THREAD_NULL;
534 thread_t thread_next = THREAD_NULL;
535 kern_return_t kr;
536 struct proc *p = NULL;
537 int is_64bit_addr;
538 int is_64bit_data;
539 int t_flags;
540 uint64_t *udata_buffer = NULL;
541 int size = 0;
542 int num_udata = 0;
543 uint16_t kc_u_flags = CORPSE_CRASHINFO_HAS_REF;
544
545 #if CONFIG_MACF
546 struct label *label = NULL;
547 #endif
548
549 if (!corpses_enabled()) {
550 return KERN_NOT_SUPPORTED;
551 }
552
553 if (etype == EXC_GUARD && EXC_GUARD_DECODE_GUARD_TYPE(code) == GUARD_TYPE_USER) {
554 kc_u_flags |= CORPSE_CRASHINFO_USER_FAULT;
555 }
556
557 kr = task_crashinfo_get_ref(kc_u_flags);
558 if (kr != KERN_SUCCESS) {
559 return kr;
560 }
561
562 /* Having a task reference does not guarantee a proc reference */
563 p = proc_find(task_pid(task));
564 if (p == NULL) {
565 kr = KERN_INVALID_TASK;
566 goto error_task_generate_corpse;
567 }
568
569 is_64bit_addr = IS_64BIT_PROCESS(p);
570 is_64bit_data = (task == TASK_NULL) ? is_64bit_addr : task_get_64bit_data(task);
571 t_flags = TF_CORPSE_FORK |
572 TF_PENDING_CORPSE |
573 TF_CORPSE |
574 (is_64bit_addr ? TF_64B_ADDR : TF_NONE) |
575 (is_64bit_data ? TF_64B_DATA : TF_NONE);
576
577 #if CONFIG_MACF
578 /* Create the corpse label credentials from the process. */
579 label = mac_exc_create_label_for_proc(p);
580 #endif
581
582 /* Create a task for corpse */
583 kr = task_create_internal(task,
584 NULL,
585 TRUE,
586 is_64bit_addr,
587 is_64bit_data,
588 t_flags,
589 TPF_NONE,
590 TWF_NONE,
591 &new_task);
592 if (kr != KERN_SUCCESS) {
593 goto error_task_generate_corpse;
594 }
595
596 /* Create and copy threads from task, returns a ref to thread */
597 kr = task_duplicate_map_and_threads(task, p, new_task, &thread,
598 &udata_buffer, &size, &num_udata);
599 if (kr != KERN_SUCCESS) {
600 goto error_task_generate_corpse;
601 }
602
603 kr = task_collect_crash_info(new_task,
604 #if CONFIG_MACF
605 label,
606 #endif
607 TRUE);
608 if (kr != KERN_SUCCESS) {
609 goto error_task_generate_corpse;
610 }
611
612 /* transfer our references to the corpse info */
613 assert(new_task->corpse_info->kcd_user_flags == 0);
614 new_task->corpse_info->kcd_user_flags = kc_u_flags;
615 kc_u_flags = 0;
616
617 kr = task_start_halt(new_task);
618 if (kr != KERN_SUCCESS) {
619 goto error_task_generate_corpse;
620 }
621
622 /* terminate the ipc space */
623 ipc_space_terminate(new_task->itk_space);
624
625 /* Populate the corpse blob, use the proc struct of task instead of corpse task */
626 gather_populate_corpse_crashinfo(p, new_task,
627 code, subcode, udata_buffer, num_udata, reason);
628
629 /* Add it to global corpse task list */
630 task_add_to_corpse_task_list(new_task);
631
632 *corpse_task = new_task;
633 *exc_thread = thread;
634
635 error_task_generate_corpse:
636 #if CONFIG_MACF
637 if (label) {
638 mac_exc_free_label(label);
639 }
640 #endif
641
642 /* Release the proc reference */
643 if (p != NULL) {
644 proc_rele(p);
645 }
646
647 if (kr != KERN_SUCCESS) {
648 if (thread != THREAD_NULL) {
649 thread_deallocate(thread);
650 }
651 if (new_task != TASK_NULL) {
652 task_lock(new_task);
653 /* Terminate all the other threads in the task. */
654 queue_iterate(&new_task->threads, thread_next, thread_t, task_threads)
655 {
656 thread_terminate_internal(thread_next);
657 }
658 /* wait for all the threads in the task to terminate */
659 task_wait_till_threads_terminate_locked(new_task);
660 task_unlock(new_task);
661
662 task_clear_corpse(new_task);
663 task_terminate_internal(new_task);
664 task_deallocate(new_task);
665 }
666 if (kc_u_flags) {
667 task_crashinfo_release_ref(kc_u_flags);
668 }
669 }
670 /* Free the udata buffer allocated in task_duplicate_map_and_threads */
671 if (udata_buffer != NULL) {
672 kfree(udata_buffer, size);
673 }
674
675 return kr;
676 }
677
678 /*
679 * Routine: task_map_corpse_info
680 * params: task - Map the corpse info in task's address space
681 * corpse_task - task port of the corpse
682 * kcd_addr_begin - address of the mapped corpse info
683 * kcd_addr_begin - size of the mapped corpse info
684 * returns: KERN_SUCCESS on Success.
685 * KERN_FAILURE on Failure.
686 * KERN_INVALID_ARGUMENT on invalid arguments.
687 * Note: Temporary function, will be deleted soon.
688 */
689 kern_return_t
690 task_map_corpse_info(
691 task_t task,
692 task_t corpse_task,
693 vm_address_t *kcd_addr_begin,
694 uint32_t *kcd_size)
695 {
696 kern_return_t kr;
697 mach_vm_address_t kcd_addr_begin_64;
698 mach_vm_size_t size_64;
699
700 kr = task_map_corpse_info_64(task, corpse_task, &kcd_addr_begin_64, &size_64);
701 if (kr != KERN_SUCCESS) {
702 return kr;
703 }
704
705 *kcd_addr_begin = (vm_address_t)kcd_addr_begin_64;
706 *kcd_size = (uint32_t) size_64;
707 return KERN_SUCCESS;
708 }
709
710 /*
711 * Routine: task_map_corpse_info_64
712 * params: task - Map the corpse info in task's address space
713 * corpse_task - task port of the corpse
714 * kcd_addr_begin - address of the mapped corpse info (takes mach_vm_addess_t *)
715 * kcd_addr_begin - size of the mapped corpse info (takes mach_vm_size_t *)
716 * returns: KERN_SUCCESS on Success.
717 * KERN_FAILURE on Failure.
718 * KERN_INVALID_ARGUMENT on invalid arguments.
719 */
720 kern_return_t
721 task_map_corpse_info_64(
722 task_t task,
723 task_t corpse_task,
724 mach_vm_address_t *kcd_addr_begin,
725 mach_vm_size_t *kcd_size)
726 {
727 kern_return_t kr;
728 mach_vm_offset_t crash_data_ptr = 0;
729 mach_vm_size_t size = CORPSEINFO_ALLOCATION_SIZE;
730 void *corpse_info_kernel = NULL;
731
732 if (task == TASK_NULL || task_is_a_corpse_fork(task)) {
733 return KERN_INVALID_ARGUMENT;
734 }
735
736 if (corpse_task == TASK_NULL || !task_is_a_corpse(corpse_task) ||
737 kcdata_memory_get_begin_addr(corpse_task->corpse_info) == NULL) {
738 return KERN_INVALID_ARGUMENT;
739 }
740 corpse_info_kernel = kcdata_memory_get_begin_addr(corpse_task->corpse_info);
741 kr = mach_vm_allocate_kernel(task->map, &crash_data_ptr, size,
742 VM_FLAGS_ANYWHERE, VM_MEMORY_CORPSEINFO);
743 if (kr != KERN_SUCCESS) {
744 return kr;
745 }
746 copyout(corpse_info_kernel, crash_data_ptr, size);
747 *kcd_addr_begin = crash_data_ptr;
748 *kcd_size = size;
749
750 return KERN_SUCCESS;
751 }
752
753 uint64_t
754 task_corpse_get_crashed_thread_id(task_t corpse_task)
755 {
756 return corpse_task->crashed_thread_id;
757 }