]> git.saurik.com Git - apple/xnu.git/blame - osfmk/kern/sched_average.c
xnu-3247.1.106.tar.gz
[apple/xnu.git] / osfmk / kern / sched_average.c
CommitLineData
1c79356b 1/*
2d21ac55 2 * Copyright (c) 2000-2007 Apple Computer, Inc. All rights reserved.
1c79356b 3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
1c79356b 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
8f6c56a5 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
8f6c56a5 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
1c79356b
A
27 */
28/*
29 * @OSF_COPYRIGHT@
30 */
31/*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56/*
57 */
58/*
1c79356b
A
59 * Author: Avadis Tevanian, Jr.
60 * Date: 1986
61 *
91447636 62 * Compute various averages.
1c79356b
A
63 */
64
91447636 65#include <mach/mach_types.h>
1c79356b 66
1c79356b
A
67#include <kern/sched.h>
68#include <kern/assert.h>
69#include <kern/processor.h>
70#include <kern/thread.h>
39236c6e
A
71#if CONFIG_TELEMETRY
72#include <kern/telemetry.h>
73#endif
91447636 74
9bccf70c
A
75uint32_t avenrun[3] = {0, 0, 0};
76uint32_t mach_factor[3] = {0, 0, 0};
1c79356b 77
3e170ce0 78#if defined(CONFIG_SCHED_TIMESHARE_CORE)
1c79356b
A
79/*
80 * Values are scaled by LOAD_SCALE, defined in processor_info.h
81 */
0b4e3aa0
A
82#define base(n) ((n) << SCHED_TICK_SHIFT)
83#define frac(n) (((base(n) - 1) * LOAD_SCALE) / base(n))
84
9bccf70c 85static uint32_t fract[3] = {
0b4e3aa0
A
86 frac(5), /* 5 second average */
87 frac(30), /* 30 second average */
88 frac(60), /* 1 minute average */
1c79356b 89};
9bccf70c 90
0b4e3aa0
A
91#undef base
92#undef frac
1c79356b 93
3e170ce0 94#endif /* CONFIG_SCHED_TIMESHARE_CORE */
6d2010ae 95
91447636
A
96static unsigned int sched_nrun;
97
98typedef void (*sched_avg_comp_t)(
99 void *param);
100
91447636
A
101static struct sched_average {
102 sched_avg_comp_t comp;
39236c6e
A
103 void *param;
104 int period; /* in seconds */
105 uint64_t deadline;
91447636 106} sched_average[] = {
6d2010ae
A
107 { compute_averunnable, &sched_nrun, 5, 0 },
108 { compute_stack_target, NULL, 5, 1 },
109 { compute_memory_pressure, NULL, 1, 0 },
316670eb
A
110 { compute_zone_gc_throttle, NULL, 60, 0 },
111 { compute_pageout_gc_throttle, NULL, 1, 0 },
6d2010ae 112 { compute_pmap_gc_throttle, NULL, 60, 0 },
39236c6e
A
113#if CONFIG_TELEMETRY
114 { compute_telemetry, NULL, 1, 0 },
115#endif
91447636
A
116 { NULL, NULL, 0, 0 }
117};
118
119typedef struct sched_average *sched_average_t;
120
39236c6e
A
121/* The "stdelta" parameter represents the number of scheduler maintenance
122 * "ticks" that have elapsed since the last invocation, subject to
123 * integer division imprecision.
124 */
125
1c79356b 126void
39236c6e 127compute_averages(uint64_t stdelta)
1c79356b 128{
39236c6e
A
129 int ncpus, nthreads, nshared, nbackground, nshared_non_bg;
130 uint32_t factor_now, average_now, load_now = 0, background_load_now = 0, combined_fgbg_load_now = 0;
2d21ac55 131 sched_average_t avg;
39236c6e
A
132 uint64_t abstime, index;
133
2d21ac55
A
134 /*
135 * Retrieve counts, ignoring
136 * the current thread.
137 */
138 ncpus = processor_avail_count;
139 nthreads = sched_run_count - 1;
140 nshared = sched_share_count;
39236c6e 141 nbackground = sched_background_count;
2d21ac55
A
142
143 /*
144 * Load average and mach factor calculations for
145 * those which ask about these things.
146 */
147 average_now = nthreads * LOAD_SCALE;
148
149 if (nthreads > ncpus)
150 factor_now = (ncpus * LOAD_SCALE) / (nthreads + 1);
151 else
152 factor_now = (ncpus - nthreads) * LOAD_SCALE;
153
39236c6e
A
154 /* For those statistics that formerly relied on being recomputed
155 * on timer ticks, advance by the approximate number of corresponding
156 * elapsed intervals, thus compensating for potential idle intervals.
157 */
158 for (index = 0; index < stdelta; index++) {
159 sched_mach_factor = ((sched_mach_factor << 2) + factor_now) / 5;
160 sched_load_average = ((sched_load_average << 2) + average_now) / 5;
161 }
2d21ac55 162 /*
39236c6e
A
163 * Compute the timeshare priority conversion factor based on loading.
164 * Because our counters may be incremented and accessed
165 * concurrently with respect to each other, we may have
166 * windows where the invariant nthreads >= nshared >= nbackground
167 * is broken, so truncate values in these cases.
2d21ac55 168 */
39236c6e 169
2d21ac55
A
170 if (nshared > nthreads)
171 nshared = nthreads;
172
39236c6e
A
173 if (nbackground > nshared)
174 nbackground = nshared;
175
176 nshared_non_bg = nshared - nbackground;
177
178 if (nshared_non_bg > ncpus) {
2d21ac55 179 if (ncpus > 1)
39236c6e 180 load_now = nshared_non_bg / ncpus;
55e303ae 181 else
39236c6e 182 load_now = nshared_non_bg;
91447636 183
2d21ac55
A
184 if (load_now > NRQS - 1)
185 load_now = NRQS - 1;
1c79356b
A
186 }
187
39236c6e
A
188 if (nbackground > ncpus) {
189 if (ncpus > 1)
190 background_load_now = nbackground / ncpus;
191 else
192 background_load_now = nbackground;
193
194 if (background_load_now > NRQS - 1)
195 background_load_now = NRQS - 1;
196 }
197
198 if (nshared > ncpus) {
199 if (ncpus > 1)
200 combined_fgbg_load_now = nshared / ncpus;
201 else
202 combined_fgbg_load_now = nshared;
203
204 if (combined_fgbg_load_now > NRQS - 1)
205 combined_fgbg_load_now = NRQS - 1;
206 }
207
6d2010ae
A
208 /*
209 * Sample total running threads.
210 */
211 sched_nrun = nthreads;
212
3e170ce0 213#if defined(CONFIG_SCHED_TIMESHARE_CORE)
6d2010ae 214
2d21ac55
A
215 /*
216 * The conversion factor consists of
217 * two components: a fixed value based
218 * on the absolute time unit, and a
219 * dynamic portion based on loading.
220 *
221 * Zero loading results in a out of range
222 * shift count. Accumulated usage is ignored
223 * during conversion and new usage deltas
224 * are discarded.
225 */
226 sched_pri_shift = sched_fixed_shift - sched_load_shifts[load_now];
39236c6e
A
227 sched_background_pri_shift = sched_fixed_shift - sched_load_shifts[background_load_now];
228 sched_combined_fgbg_pri_shift = sched_fixed_shift - sched_load_shifts[combined_fgbg_load_now];
2d21ac55 229
1c79356b 230 /*
9bccf70c 231 * Compute old-style Mach load averages.
1c79356b 232 */
39236c6e
A
233
234 for (index = 0; index < stdelta; index++) {
9bccf70c 235 register int i;
1c79356b
A
236
237 for (i = 0; i < 3; i++) {
238 mach_factor[i] = ((mach_factor[i] * fract[i]) +
239 (factor_now * (LOAD_SCALE - fract[i]))) / LOAD_SCALE;
240
241 avenrun[i] = ((avenrun[i] * fract[i]) +
242 (average_now * (LOAD_SCALE - fract[i]))) / LOAD_SCALE;
243 }
244 }
3e170ce0 245#endif /* CONFIG_SCHED_TIMESHARE_CORE */
9bccf70c
A
246
247 /*
91447636 248 * Compute averages in other components.
9bccf70c 249 */
6d2010ae 250 abstime = mach_absolute_time();
91447636 251 for (avg = sched_average; avg->comp != NULL; ++avg) {
6d2010ae 252 if (abstime >= avg->deadline) {
39236c6e
A
253 uint64_t period_abs = (avg->period * sched_one_second_interval);
254 uint64_t ninvokes = 1;
255
256 ninvokes += (abstime - avg->deadline) / period_abs;
257 ninvokes = MIN(ninvokes, SCHED_TICK_MAX_DELTA);
258
259 for (index = 0; index < ninvokes; index++) {
260 (*avg->comp)(avg->param);
261 }
262 avg->deadline = abstime + period_abs;
91447636 263 }
9bccf70c 264 }
1c79356b 265}