]> git.saurik.com Git - apple/xnu.git/blame - osfmk/kern/sched_average.c
xnu-3248.40.184.tar.gz
[apple/xnu.git] / osfmk / kern / sched_average.c
CommitLineData
1c79356b 1/*
2d21ac55 2 * Copyright (c) 2000-2007 Apple Computer, Inc. All rights reserved.
1c79356b 3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
1c79356b 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
8f6c56a5 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
8f6c56a5 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
1c79356b
A
27 */
28/*
29 * @OSF_COPYRIGHT@
30 */
31/*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56/*
57 */
58/*
1c79356b
A
59 * Author: Avadis Tevanian, Jr.
60 * Date: 1986
61 *
91447636 62 * Compute various averages.
1c79356b
A
63 */
64
91447636 65#include <mach/mach_types.h>
1c79356b 66
1c79356b
A
67#include <kern/sched.h>
68#include <kern/assert.h>
69#include <kern/processor.h>
70#include <kern/thread.h>
39236c6e
A
71#if CONFIG_TELEMETRY
72#include <kern/telemetry.h>
73#endif
490019cf
A
74
75#include <sys/kdebug.h>
76
9bccf70c
A
77uint32_t avenrun[3] = {0, 0, 0};
78uint32_t mach_factor[3] = {0, 0, 0};
1c79356b 79
3e170ce0 80#if defined(CONFIG_SCHED_TIMESHARE_CORE)
1c79356b
A
81/*
82 * Values are scaled by LOAD_SCALE, defined in processor_info.h
83 */
0b4e3aa0
A
84#define base(n) ((n) << SCHED_TICK_SHIFT)
85#define frac(n) (((base(n) - 1) * LOAD_SCALE) / base(n))
86
9bccf70c 87static uint32_t fract[3] = {
0b4e3aa0
A
88 frac(5), /* 5 second average */
89 frac(30), /* 30 second average */
90 frac(60), /* 1 minute average */
1c79356b 91};
9bccf70c 92
0b4e3aa0
A
93#undef base
94#undef frac
1c79356b 95
3e170ce0 96#endif /* CONFIG_SCHED_TIMESHARE_CORE */
6d2010ae 97
91447636
A
98static unsigned int sched_nrun;
99
100typedef void (*sched_avg_comp_t)(
101 void *param);
102
91447636
A
103static struct sched_average {
104 sched_avg_comp_t comp;
39236c6e
A
105 void *param;
106 int period; /* in seconds */
107 uint64_t deadline;
91447636 108} sched_average[] = {
6d2010ae
A
109 { compute_averunnable, &sched_nrun, 5, 0 },
110 { compute_stack_target, NULL, 5, 1 },
111 { compute_memory_pressure, NULL, 1, 0 },
316670eb
A
112 { compute_zone_gc_throttle, NULL, 60, 0 },
113 { compute_pageout_gc_throttle, NULL, 1, 0 },
6d2010ae 114 { compute_pmap_gc_throttle, NULL, 60, 0 },
39236c6e
A
115#if CONFIG_TELEMETRY
116 { compute_telemetry, NULL, 1, 0 },
117#endif
91447636
A
118 { NULL, NULL, 0, 0 }
119};
120
121typedef struct sched_average *sched_average_t;
122
39236c6e
A
123/* The "stdelta" parameter represents the number of scheduler maintenance
124 * "ticks" that have elapsed since the last invocation, subject to
125 * integer division imprecision.
126 */
127
1c79356b 128void
39236c6e 129compute_averages(uint64_t stdelta)
1c79356b 130{
39236c6e
A
131 int ncpus, nthreads, nshared, nbackground, nshared_non_bg;
132 uint32_t factor_now, average_now, load_now = 0, background_load_now = 0, combined_fgbg_load_now = 0;
2d21ac55 133 sched_average_t avg;
39236c6e
A
134 uint64_t abstime, index;
135
2d21ac55
A
136 /*
137 * Retrieve counts, ignoring
138 * the current thread.
139 */
140 ncpus = processor_avail_count;
141 nthreads = sched_run_count - 1;
142 nshared = sched_share_count;
39236c6e 143 nbackground = sched_background_count;
2d21ac55
A
144
145 /*
146 * Load average and mach factor calculations for
147 * those which ask about these things.
148 */
149 average_now = nthreads * LOAD_SCALE;
150
151 if (nthreads > ncpus)
152 factor_now = (ncpus * LOAD_SCALE) / (nthreads + 1);
153 else
154 factor_now = (ncpus - nthreads) * LOAD_SCALE;
155
39236c6e
A
156 /* For those statistics that formerly relied on being recomputed
157 * on timer ticks, advance by the approximate number of corresponding
158 * elapsed intervals, thus compensating for potential idle intervals.
159 */
160 for (index = 0; index < stdelta; index++) {
161 sched_mach_factor = ((sched_mach_factor << 2) + factor_now) / 5;
162 sched_load_average = ((sched_load_average << 2) + average_now) / 5;
163 }
2d21ac55 164 /*
39236c6e
A
165 * Compute the timeshare priority conversion factor based on loading.
166 * Because our counters may be incremented and accessed
167 * concurrently with respect to each other, we may have
168 * windows where the invariant nthreads >= nshared >= nbackground
169 * is broken, so truncate values in these cases.
2d21ac55 170 */
39236c6e 171
2d21ac55
A
172 if (nshared > nthreads)
173 nshared = nthreads;
174
39236c6e
A
175 if (nbackground > nshared)
176 nbackground = nshared;
177
178 nshared_non_bg = nshared - nbackground;
179
180 if (nshared_non_bg > ncpus) {
2d21ac55 181 if (ncpus > 1)
39236c6e 182 load_now = nshared_non_bg / ncpus;
55e303ae 183 else
39236c6e 184 load_now = nshared_non_bg;
91447636 185
2d21ac55
A
186 if (load_now > NRQS - 1)
187 load_now = NRQS - 1;
1c79356b
A
188 }
189
39236c6e
A
190 if (nbackground > ncpus) {
191 if (ncpus > 1)
192 background_load_now = nbackground / ncpus;
193 else
194 background_load_now = nbackground;
195
196 if (background_load_now > NRQS - 1)
197 background_load_now = NRQS - 1;
198 }
199
200 if (nshared > ncpus) {
201 if (ncpus > 1)
202 combined_fgbg_load_now = nshared / ncpus;
203 else
204 combined_fgbg_load_now = nshared;
205
206 if (combined_fgbg_load_now > NRQS - 1)
207 combined_fgbg_load_now = NRQS - 1;
208 }
209
490019cf
A
210 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
211 MACHDBG_CODE(DBG_MACH_SCHED, MACH_SCHED_LOAD) | DBG_FUNC_NONE,
212 (nthreads - nshared), (nshared - nbackground), nbackground, 0, 0);
213
6d2010ae
A
214 /*
215 * Sample total running threads.
216 */
217 sched_nrun = nthreads;
218
3e170ce0 219#if defined(CONFIG_SCHED_TIMESHARE_CORE)
6d2010ae 220
2d21ac55
A
221 /*
222 * The conversion factor consists of
223 * two components: a fixed value based
224 * on the absolute time unit, and a
225 * dynamic portion based on loading.
226 *
227 * Zero loading results in a out of range
228 * shift count. Accumulated usage is ignored
229 * during conversion and new usage deltas
230 * are discarded.
231 */
232 sched_pri_shift = sched_fixed_shift - sched_load_shifts[load_now];
39236c6e
A
233 sched_background_pri_shift = sched_fixed_shift - sched_load_shifts[background_load_now];
234 sched_combined_fgbg_pri_shift = sched_fixed_shift - sched_load_shifts[combined_fgbg_load_now];
2d21ac55 235
1c79356b 236 /*
9bccf70c 237 * Compute old-style Mach load averages.
1c79356b 238 */
39236c6e
A
239
240 for (index = 0; index < stdelta; index++) {
9bccf70c 241 register int i;
1c79356b
A
242
243 for (i = 0; i < 3; i++) {
244 mach_factor[i] = ((mach_factor[i] * fract[i]) +
245 (factor_now * (LOAD_SCALE - fract[i]))) / LOAD_SCALE;
246
247 avenrun[i] = ((avenrun[i] * fract[i]) +
248 (average_now * (LOAD_SCALE - fract[i]))) / LOAD_SCALE;
249 }
250 }
3e170ce0 251#endif /* CONFIG_SCHED_TIMESHARE_CORE */
9bccf70c
A
252
253 /*
91447636 254 * Compute averages in other components.
9bccf70c 255 */
6d2010ae 256 abstime = mach_absolute_time();
91447636 257 for (avg = sched_average; avg->comp != NULL; ++avg) {
6d2010ae 258 if (abstime >= avg->deadline) {
39236c6e
A
259 uint64_t period_abs = (avg->period * sched_one_second_interval);
260 uint64_t ninvokes = 1;
261
262 ninvokes += (abstime - avg->deadline) / period_abs;
263 ninvokes = MIN(ninvokes, SCHED_TICK_MAX_DELTA);
264
265 for (index = 0; index < ninvokes; index++) {
266 (*avg->comp)(avg->param);
267 }
268 avg->deadline = abstime + period_abs;
91447636 269 }
9bccf70c 270 }
1c79356b 271}