]> git.saurik.com Git - apple/xnu.git/blob - osfmk/arm/machine_routines_common.c
xnu-4570.20.62.tar.gz
[apple/xnu.git] / osfmk / arm / machine_routines_common.c
1 /*
2 * Copyright (c) 2007-2013 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <arm/machine_cpu.h>
30 #include <arm/cpu_internal.h>
31 #include <arm/cpuid.h>
32 #include <arm/cpu_data.h>
33 #include <arm/cpu_data_internal.h>
34 #include <arm/misc_protos.h>
35 #include <arm/machdep_call.h>
36 #include <arm/machine_routines.h>
37 #include <arm/rtclock.h>
38 #include <kern/machine.h>
39 #include <kern/thread.h>
40 #include <kern/thread_group.h>
41 #include <kern/policy_internal.h>
42 #include <machine/config.h>
43
44 #if MONOTONIC
45 #include <kern/monotonic.h>
46 #include <machine/monotonic.h>
47 #endif /* MONOTONIC */
48
49 #include <mach/machine.h>
50
51 #if INTERRUPT_MASKED_DEBUG
52 extern boolean_t interrupt_masked_debug;
53 extern uint64_t interrupt_masked_timeout;
54 #endif
55
56 extern uint64_t mach_absolutetime_asleep;
57
58 static void
59 sched_perfcontrol_oncore_default(perfcontrol_state_t new_thread_state __unused, going_on_core_t on __unused)
60 {
61 }
62
63 static void
64 sched_perfcontrol_switch_default(perfcontrol_state_t old_thread_state __unused, perfcontrol_state_t new_thread_state __unused)
65 {
66 }
67
68 static void
69 sched_perfcontrol_offcore_default(perfcontrol_state_t old_thread_state __unused, going_off_core_t off __unused, boolean_t thread_terminating __unused)
70 {
71 }
72
73 static void
74 sched_perfcontrol_thread_group_default(thread_group_data_t data __unused)
75 {
76 }
77
78 static void
79 sched_perfcontrol_max_runnable_latency_default(perfcontrol_max_runnable_latency_t latencies __unused)
80 {
81 }
82
83 static void
84 sched_perfcontrol_work_interval_notify_default(perfcontrol_state_t thread_state __unused, perfcontrol_work_interval_t work_interval __unused)
85 {
86 }
87
88 static void
89 sched_perfcontrol_deadline_passed_default(__unused uint64_t deadline)
90 {
91 }
92
93 static void
94 sched_perfcontrol_csw_default(
95 __unused perfcontrol_event event, __unused uint32_t cpu_id, __unused uint64_t timestamp,
96 __unused uint32_t flags, __unused struct perfcontrol_thread_data *offcore,
97 __unused struct perfcontrol_thread_data *oncore,
98 __unused struct perfcontrol_cpu_counters *cpu_counters, __unused void *unused)
99 {
100 }
101
102 static void
103 sched_perfcontrol_state_update_default(
104 __unused perfcontrol_event event, __unused uint32_t cpu_id, __unused uint64_t timestamp,
105 __unused uint32_t flags, __unused struct perfcontrol_thread_data *thr_data,
106 __unused void *unused)
107 {
108 }
109
110 sched_perfcontrol_offcore_t sched_perfcontrol_offcore = sched_perfcontrol_offcore_default;
111 sched_perfcontrol_context_switch_t sched_perfcontrol_switch = sched_perfcontrol_switch_default;
112 sched_perfcontrol_oncore_t sched_perfcontrol_oncore = sched_perfcontrol_oncore_default;
113 sched_perfcontrol_thread_group_init_t sched_perfcontrol_thread_group_init = sched_perfcontrol_thread_group_default;
114 sched_perfcontrol_thread_group_deinit_t sched_perfcontrol_thread_group_deinit = sched_perfcontrol_thread_group_default;
115 sched_perfcontrol_thread_group_flags_update_t sched_perfcontrol_thread_group_flags_update = sched_perfcontrol_thread_group_default;
116 sched_perfcontrol_max_runnable_latency_t sched_perfcontrol_max_runnable_latency = sched_perfcontrol_max_runnable_latency_default;
117 sched_perfcontrol_work_interval_notify_t sched_perfcontrol_work_interval_notify = sched_perfcontrol_work_interval_notify_default;
118 sched_perfcontrol_deadline_passed_t sched_perfcontrol_deadline_passed = sched_perfcontrol_deadline_passed_default;
119 sched_perfcontrol_csw_t sched_perfcontrol_csw = sched_perfcontrol_csw_default;
120 sched_perfcontrol_state_update_t sched_perfcontrol_state_update = sched_perfcontrol_state_update_default;
121
122 void
123 sched_perfcontrol_register_callbacks(sched_perfcontrol_callbacks_t callbacks, unsigned long size_of_state)
124 {
125 assert(callbacks == NULL || callbacks->version >= SCHED_PERFCONTROL_CALLBACKS_VERSION_2);
126
127 if (size_of_state > sizeof(struct perfcontrol_state)) {
128 panic("%s: Invalid required state size %lu", __FUNCTION__, size_of_state);
129 }
130
131 if (callbacks) {
132
133
134 if (callbacks->version >= SCHED_PERFCONTROL_CALLBACKS_VERSION_5) {
135 if (callbacks->csw != NULL) {
136 sched_perfcontrol_csw = callbacks->csw;
137 } else {
138 sched_perfcontrol_csw = sched_perfcontrol_csw_default;
139 }
140
141 if (callbacks->state_update != NULL) {
142 sched_perfcontrol_state_update = callbacks->state_update;
143 } else {
144 sched_perfcontrol_state_update = sched_perfcontrol_state_update_default;
145 }
146 }
147
148 if (callbacks->version >= SCHED_PERFCONTROL_CALLBACKS_VERSION_4) {
149 if (callbacks->deadline_passed != NULL) {
150 sched_perfcontrol_deadline_passed = callbacks->deadline_passed;
151 } else {
152 sched_perfcontrol_deadline_passed = sched_perfcontrol_deadline_passed_default;
153 }
154 }
155
156 if (callbacks->offcore != NULL) {
157 sched_perfcontrol_offcore = callbacks->offcore;
158 } else {
159 sched_perfcontrol_offcore = sched_perfcontrol_offcore_default;
160 }
161
162 if (callbacks->context_switch != NULL) {
163 sched_perfcontrol_switch = callbacks->context_switch;
164 } else {
165 sched_perfcontrol_switch = sched_perfcontrol_switch_default;
166 }
167
168 if (callbacks->oncore != NULL) {
169 sched_perfcontrol_oncore = callbacks->oncore;
170 } else {
171 sched_perfcontrol_oncore = sched_perfcontrol_oncore_default;
172 }
173
174 if (callbacks->max_runnable_latency != NULL) {
175 sched_perfcontrol_max_runnable_latency = callbacks->max_runnable_latency;
176 } else {
177 sched_perfcontrol_max_runnable_latency = sched_perfcontrol_max_runnable_latency_default;
178 }
179
180 if (callbacks->work_interval_notify != NULL) {
181 sched_perfcontrol_work_interval_notify = callbacks->work_interval_notify;
182 } else {
183 sched_perfcontrol_work_interval_notify = sched_perfcontrol_work_interval_notify_default;
184 }
185 } else {
186 /* reset to defaults */
187 sched_perfcontrol_offcore = sched_perfcontrol_offcore_default;
188 sched_perfcontrol_switch = sched_perfcontrol_switch_default;
189 sched_perfcontrol_oncore = sched_perfcontrol_oncore_default;
190 sched_perfcontrol_thread_group_init = sched_perfcontrol_thread_group_default;
191 sched_perfcontrol_thread_group_deinit = sched_perfcontrol_thread_group_default;
192 sched_perfcontrol_thread_group_flags_update = sched_perfcontrol_thread_group_default;
193 sched_perfcontrol_max_runnable_latency = sched_perfcontrol_max_runnable_latency_default;
194 sched_perfcontrol_work_interval_notify = sched_perfcontrol_work_interval_notify_default;
195 sched_perfcontrol_csw = sched_perfcontrol_csw_default;
196 sched_perfcontrol_state_update = sched_perfcontrol_state_update_default;
197 }
198 }
199
200
201 static void
202 machine_switch_populate_perfcontrol_thread_data(struct perfcontrol_thread_data *data,
203 thread_t thread,
204 uint64_t same_pri_latency)
205 {
206 bzero(data, sizeof(struct perfcontrol_thread_data));
207 data->perfctl_class = thread_get_perfcontrol_class(thread);
208 data->energy_estimate_nj = 0;
209 data->thread_id = thread->thread_id;
210 data->scheduling_latency_at_same_basepri = same_pri_latency;
211 data->perfctl_state = FIND_PERFCONTROL_STATE(thread);
212 }
213
214 static void
215 machine_switch_populate_perfcontrol_cpu_counters(struct perfcontrol_cpu_counters *cpu_counters)
216 {
217 #if MONOTONIC
218 mt_perfcontrol(&cpu_counters->instructions, &cpu_counters->cycles);
219 #else /* MONOTONIC */
220 cpu_counters->instructions = 0;
221 cpu_counters->cycles = 0;
222 #endif /* !MONOTONIC */
223 }
224
225 int perfcontrol_callout_stats_enabled = 0;
226 static _Atomic uint64_t perfcontrol_callout_stats[PERFCONTROL_CALLOUT_MAX][PERFCONTROL_STAT_MAX];
227 static _Atomic uint64_t perfcontrol_callout_count[PERFCONTROL_CALLOUT_MAX];
228
229 #if MONOTONIC
230 static inline
231 bool perfcontrol_callout_counters_begin(uint64_t *counters)
232 {
233 if (!perfcontrol_callout_stats_enabled)
234 return false;
235 mt_fixed_counts(counters);
236 return true;
237 }
238
239 static inline
240 void perfcontrol_callout_counters_end(uint64_t *start_counters,
241 perfcontrol_callout_type_t type)
242 {
243 uint64_t end_counters[MT_CORE_NFIXED];
244 mt_fixed_counts(end_counters);
245 atomic_fetch_add_explicit(&perfcontrol_callout_stats[type][PERFCONTROL_STAT_CYCLES],
246 end_counters[MT_CORE_CYCLES] - start_counters[MT_CORE_CYCLES], memory_order_relaxed);
247 #ifdef MT_CORE_INSTRS
248 atomic_fetch_add_explicit(&perfcontrol_callout_stats[type][PERFCONTROL_STAT_INSTRS],
249 end_counters[MT_CORE_INSTRS] - start_counters[MT_CORE_INSTRS], memory_order_relaxed);
250 #endif /* defined(MT_CORE_INSTRS) */
251 atomic_fetch_add_explicit(&perfcontrol_callout_count[type], 1, memory_order_relaxed);
252 }
253 #endif /* MONOTONIC */
254
255 uint64_t perfcontrol_callout_stat_avg(perfcontrol_callout_type_t type,
256 perfcontrol_callout_stat_t stat)
257 {
258 if (!perfcontrol_callout_stats_enabled)
259 return 0;
260 return (perfcontrol_callout_stats[type][stat] / perfcontrol_callout_count[type]);
261 }
262
263 void
264 machine_switch_perfcontrol_context(perfcontrol_event event,
265 uint64_t timestamp,
266 uint32_t flags,
267 uint64_t new_thread_same_pri_latency,
268 thread_t old,
269 thread_t new)
270 {
271 if (sched_perfcontrol_switch != sched_perfcontrol_switch_default) {
272 perfcontrol_state_t old_perfcontrol_state = FIND_PERFCONTROL_STATE(old);
273 perfcontrol_state_t new_perfcontrol_state = FIND_PERFCONTROL_STATE(new);
274 sched_perfcontrol_switch(old_perfcontrol_state, new_perfcontrol_state);
275 }
276
277 if (sched_perfcontrol_csw != sched_perfcontrol_csw_default) {
278 uint32_t cpu_id = (uint32_t)cpu_number();
279 struct perfcontrol_cpu_counters cpu_counters;
280 struct perfcontrol_thread_data offcore, oncore;
281 machine_switch_populate_perfcontrol_thread_data(&offcore, old, 0);
282 machine_switch_populate_perfcontrol_thread_data(&oncore, new,
283 new_thread_same_pri_latency);
284 machine_switch_populate_perfcontrol_cpu_counters(&cpu_counters);
285
286 #if MONOTONIC
287 uint64_t counters[MT_CORE_NFIXED];
288 bool ctrs_enabled = perfcontrol_callout_counters_begin(counters);
289 #endif /* MONOTONIC */
290 sched_perfcontrol_csw(event, cpu_id, timestamp, flags,
291 &offcore, &oncore, &cpu_counters, NULL);
292 #if MONOTONIC
293 if (ctrs_enabled) perfcontrol_callout_counters_end(counters, PERFCONTROL_CALLOUT_CONTEXT);
294 #endif /* MONOTONIC */
295
296 #if __arm64__
297 old->machine.energy_estimate_nj += offcore.energy_estimate_nj;
298 new->machine.energy_estimate_nj += oncore.energy_estimate_nj;
299 #endif
300 }
301 }
302
303 void
304 machine_switch_perfcontrol_state_update(perfcontrol_event event,
305 uint64_t timestamp,
306 uint32_t flags,
307 thread_t thread)
308 {
309 if (sched_perfcontrol_state_update == sched_perfcontrol_state_update_default)
310 return;
311 uint32_t cpu_id = (uint32_t)cpu_number();
312 struct perfcontrol_thread_data data;
313 machine_switch_populate_perfcontrol_thread_data(&data, thread, 0);
314
315 #if MONOTONIC
316 uint64_t counters[MT_CORE_NFIXED];
317 bool ctrs_enabled = perfcontrol_callout_counters_begin(counters);
318 #endif /* MONOTONIC */
319 sched_perfcontrol_state_update(event, cpu_id, timestamp, flags,
320 &data, NULL);
321 #if MONOTONIC
322 if (ctrs_enabled) perfcontrol_callout_counters_end(counters, PERFCONTROL_CALLOUT_STATE_UPDATE);
323 #endif /* MONOTONIC */
324
325 #if __arm64__
326 thread->machine.energy_estimate_nj += data.energy_estimate_nj;
327 #endif
328 }
329
330 void
331 machine_thread_going_on_core(thread_t new_thread,
332 int urgency,
333 uint64_t sched_latency,
334 uint64_t same_pri_latency,
335 uint64_t timestamp)
336 {
337
338 if (sched_perfcontrol_oncore == sched_perfcontrol_oncore_default)
339 return;
340 struct going_on_core on_core;
341 perfcontrol_state_t state = FIND_PERFCONTROL_STATE(new_thread);
342
343 on_core.thread_id = new_thread->thread_id;
344 on_core.energy_estimate_nj = 0;
345 on_core.qos_class = proc_get_effective_thread_policy(new_thread, TASK_POLICY_QOS);
346 on_core.urgency = urgency;
347 on_core.is_32_bit = thread_is_64bit(new_thread) ? FALSE : TRUE;
348 on_core.is_kernel_thread = new_thread->task == kernel_task;
349 on_core.scheduling_latency = sched_latency;
350 on_core.start_time = timestamp;
351 on_core.scheduling_latency_at_same_basepri = same_pri_latency;
352
353 #if MONOTONIC
354 uint64_t counters[MT_CORE_NFIXED];
355 bool ctrs_enabled = perfcontrol_callout_counters_begin(counters);
356 #endif /* MONOTONIC */
357 sched_perfcontrol_oncore(state, &on_core);
358 #if MONOTONIC
359 if (ctrs_enabled) perfcontrol_callout_counters_end(counters, PERFCONTROL_CALLOUT_ON_CORE);
360 #endif /* MONOTONIC */
361
362 #if __arm64__
363 new_thread->machine.energy_estimate_nj += on_core.energy_estimate_nj;
364 #endif
365 }
366
367 void
368 machine_thread_going_off_core(thread_t old_thread, boolean_t thread_terminating, uint64_t last_dispatch)
369 {
370 if (sched_perfcontrol_offcore == sched_perfcontrol_offcore_default)
371 return;
372 struct going_off_core off_core;
373 perfcontrol_state_t state = FIND_PERFCONTROL_STATE(old_thread);
374
375 off_core.thread_id = old_thread->thread_id;
376 off_core.energy_estimate_nj = 0;
377 off_core.end_time = last_dispatch;
378
379 #if MONOTONIC
380 uint64_t counters[MT_CORE_NFIXED];
381 bool ctrs_enabled = perfcontrol_callout_counters_begin(counters);
382 #endif /* MONOTONIC */
383 sched_perfcontrol_offcore(state, &off_core, thread_terminating);
384 #if MONOTONIC
385 if (ctrs_enabled) perfcontrol_callout_counters_end(counters, PERFCONTROL_CALLOUT_OFF_CORE);
386 #endif /* MONOTONIC */
387
388 #if __arm64__
389 old_thread->machine.energy_estimate_nj += off_core.energy_estimate_nj;
390 #endif
391 }
392
393
394 void
395 machine_max_runnable_latency(uint64_t bg_max_latency,
396 uint64_t default_max_latency,
397 uint64_t realtime_max_latency)
398 {
399 if (sched_perfcontrol_max_runnable_latency == sched_perfcontrol_max_runnable_latency_default)
400 return;
401 struct perfcontrol_max_runnable_latency latencies = {
402 .max_scheduling_latencies = {
403 [THREAD_URGENCY_NONE] = 0,
404 [THREAD_URGENCY_BACKGROUND] = bg_max_latency,
405 [THREAD_URGENCY_NORMAL] = default_max_latency,
406 [THREAD_URGENCY_REAL_TIME] = realtime_max_latency
407 }
408 };
409
410 sched_perfcontrol_max_runnable_latency(&latencies);
411 }
412
413 void
414 machine_work_interval_notify(thread_t thread,
415 struct kern_work_interval_args* kwi_args)
416 {
417 if (sched_perfcontrol_work_interval_notify == sched_perfcontrol_work_interval_notify_default)
418 return;
419 perfcontrol_state_t state = FIND_PERFCONTROL_STATE(thread);
420 struct perfcontrol_work_interval work_interval = {
421 .thread_id = thread->thread_id,
422 .qos_class = proc_get_effective_thread_policy(thread, TASK_POLICY_QOS),
423 .urgency = kwi_args->urgency,
424 .flags = kwi_args->notify_flags,
425 .work_interval_id = kwi_args->work_interval_id,
426 .start = kwi_args->start,
427 .finish = kwi_args->finish,
428 .deadline = kwi_args->deadline,
429 .next_start = kwi_args->next_start,
430 .create_flags = kwi_args->create_flags,
431 };
432 sched_perfcontrol_work_interval_notify(state, &work_interval);
433 }
434
435 void
436 machine_perfcontrol_deadline_passed(uint64_t deadline)
437 {
438 if (sched_perfcontrol_deadline_passed != sched_perfcontrol_deadline_passed_default)
439 sched_perfcontrol_deadline_passed(deadline);
440 }
441
442 #if INTERRUPT_MASKED_DEBUG
443 /*
444 * ml_spin_debug_reset()
445 * Reset the timestamp on a thread that has been unscheduled
446 * to avoid false alarms. Alarm will go off if interrupts are held
447 * disabled for too long, starting from now.
448 */
449 void
450 ml_spin_debug_reset(thread_t thread)
451 {
452 thread->machine.intmask_timestamp = mach_absolute_time();
453 }
454
455 /*
456 * ml_spin_debug_clear()
457 * Clear the timestamp on a thread that has been unscheduled
458 * to avoid false alarms
459 */
460 void
461 ml_spin_debug_clear(thread_t thread)
462 {
463 thread->machine.intmask_timestamp = 0;
464 }
465
466 /*
467 * ml_spin_debug_clear_self()
468 * Clear the timestamp on the current thread to prevent
469 * false alarms
470 */
471 void
472 ml_spin_debug_clear_self()
473 {
474 ml_spin_debug_clear(current_thread());
475 }
476
477 void
478 ml_check_interrupts_disabled_duration(thread_t thread)
479 {
480 uint64_t start;
481 uint64_t now;
482
483 start = thread->machine.intmask_timestamp;
484 if (start != 0) {
485 now = mach_absolute_time();
486
487 if ((now - start) > interrupt_masked_timeout) {
488 mach_timebase_info_data_t timebase;
489 clock_timebase_info(&timebase);
490
491 #ifndef KASAN
492 /*
493 * Disable the actual panic for KASAN due to the overhead of KASAN itself, leave the rest of the
494 * mechanism enabled so that KASAN can catch any bugs in the mechanism itself.
495 */
496 panic("Interrupts held disabled for %llu nanoseconds", (((now - start) * timebase.numer)/timebase.denom));
497 #endif
498 }
499 }
500
501 return;
502 }
503 #endif // INTERRUPT_MASKED_DEBUG
504
505
506 boolean_t
507 ml_set_interrupts_enabled(boolean_t enable)
508 {
509 thread_t thread;
510 uint64_t state;
511
512 #if __arm__
513 #define INTERRUPT_MASK PSR_IRQF
514 state = __builtin_arm_rsr("cpsr");
515 #else
516 #define INTERRUPT_MASK DAIF_IRQF
517 state = __builtin_arm_rsr("DAIF");
518 #endif
519 if (enable) {
520 #if INTERRUPT_MASKED_DEBUG
521 if (interrupt_masked_debug && (state & INTERRUPT_MASK)) {
522 // Interrupts are currently masked, we will enable them (after finishing this check)
523 thread = current_thread();
524 ml_check_interrupts_disabled_duration(thread);
525 thread->machine.intmask_timestamp = 0;
526 }
527 #endif // INTERRUPT_MASKED_DEBUG
528 if (get_preemption_level() == 0) {
529 thread = current_thread();
530 while (thread->machine.CpuDatap->cpu_pending_ast & AST_URGENT) {
531 #if __ARM_USER_PROTECT__
532 uintptr_t up = arm_user_protect_begin(thread);
533 #endif
534 ast_taken_kernel();
535 #if __ARM_USER_PROTECT__
536 arm_user_protect_end(thread, up, FALSE);
537 #endif
538 }
539 }
540 #if __arm__
541 __asm__ volatile ("cpsie if" ::: "memory"); // Enable IRQ FIQ
542 #else
543 __builtin_arm_wsr("DAIFClr", (DAIFSC_IRQF | DAIFSC_FIQF));
544 #endif
545 } else {
546 #if __arm__
547 __asm__ volatile ("cpsid if" ::: "memory"); // Mask IRQ FIQ
548 #else
549 __builtin_arm_wsr("DAIFSet", (DAIFSC_IRQF | DAIFSC_FIQF));
550 #endif
551 #if INTERRUPT_MASKED_DEBUG
552 if (interrupt_masked_debug && ((state & INTERRUPT_MASK) == 0)) {
553 // Interrupts were enabled, we just masked them
554 current_thread()->machine.intmask_timestamp = mach_absolute_time();
555 }
556 #endif
557 }
558 return ((state & INTERRUPT_MASK) == 0);
559 }
560
561 static boolean_t ml_quiescing;
562
563 void ml_set_is_quiescing(boolean_t quiescing)
564 {
565 assert(FALSE == ml_get_interrupts_enabled());
566 ml_quiescing = quiescing;
567 }
568
569 boolean_t ml_is_quiescing(void)
570 {
571 assert(FALSE == ml_get_interrupts_enabled());
572 return (ml_quiescing);
573 }
574
575 uint64_t ml_get_booter_memory_size(void)
576 {
577 enum { kRoundSize = 512*1024*1024ULL };
578 uint64_t size;
579 size = BootArgs->memSizeActual;
580 if (!size)
581 {
582 size = BootArgs->memSize;
583 size = (size + kRoundSize - 1) & ~(kRoundSize - 1);
584 size -= BootArgs->memSize;
585 }
586 return (size);
587 }
588
589 uint64_t
590 ml_get_abstime_offset(void)
591 {
592 return rtclock_base_abstime;
593 }
594
595 uint64_t
596 ml_get_conttime_offset(void)
597 {
598 return (rtclock_base_abstime + mach_absolutetime_asleep);
599 }
600
601 uint64_t
602 ml_get_time_since_reset(void)
603 {
604 /* The timebase resets across S2R, so just return the raw value. */
605 return ml_get_hwclock();
606 }
607
608 uint64_t
609 ml_get_conttime_wake_time(void)
610 {
611 /* The wake time is simply our continuous time offset. */
612 return ml_get_conttime_offset();
613 }
614