]> git.saurik.com Git - apple/xnu.git/blame_incremental - osfmk/kperf/x86_64/kperf_mp.c
xnu-6153.141.1.tar.gz
[apple/xnu.git] / osfmk / kperf / x86_64 / kperf_mp.c
... / ...
CommitLineData
1/*
2 * Copyright (c) 2011-2016 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29#include <i386/mp.h>
30#include <mach/mach_types.h>
31#include <kern/processor.h>
32#include <kperf/buffer.h>
33#include <kperf/kperf.h>
34#include <kperf/kperf_arch.h>
35#include <kperf/kperf_timer.h>
36#include <stdatomic.h>
37
38bool
39kperf_mp_broadcast_other_running(struct kperf_timer *trigger)
40{
41 int current_cpu = cpu_number();
42 int ncpus = machine_info.logical_cpu_max;
43 bool system_only_self = true;
44 cpumask_t cpu_mask = 0;
45
46 for (int i = 0; i < ncpus; i++) {
47 uint64_t i_bit = UINT64_C(1) << i;
48 processor_t processor = cpu_to_processor(i);
49
50 /* do not IPI processors that are not scheduling threads */
51 if (processor == PROCESSOR_NULL ||
52 processor->state != PROCESSOR_RUNNING ||
53 processor->active_thread == THREAD_NULL) {
54#if DEVELOPMENT || DEBUG
55 BUF_VERB(PERF_TM_SKIPPED, i,
56 processor != PROCESSOR_NULL ? processor->state : 0,
57 processor != PROCESSOR_NULL ? processor->active_thread : 0);
58#endif /* DEVELOPMENT || DEBUG */
59 continue;
60 }
61
62 /* don't run the handler on the current processor */
63 if (i == current_cpu) {
64 system_only_self = false;
65 continue;
66 }
67
68 /* nor processors that have not responded to the last IPI */
69 uint64_t already_pending = atomic_fetch_or_explicit(
70 &trigger->pending_cpus, i_bit,
71 memory_order_relaxed);
72 if (already_pending & i_bit) {
73#if DEVELOPMENT || DEBUG
74 BUF_VERB(PERF_TM_PENDING, i_bit, already_pending);
75 atomic_fetch_add_explicit(&kperf_pending_ipis, 1,
76 memory_order_relaxed);
77#endif /* DEVELOPMENT || DEBUG */
78 continue;
79 }
80
81 cpu_mask |= cpu_to_cpumask(i);
82 }
83
84 if (cpu_mask != 0) {
85 mp_cpus_call(cpu_mask, NOSYNC, kperf_ipi_handler, trigger);
86 }
87
88 return system_only_self;
89}