2 * Copyright (c) 2007-2013 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <arm/machine_cpu.h>
30 #include <arm/cpu_internal.h>
31 #include <arm/cpuid.h>
32 #include <arm/cpu_data.h>
33 #include <arm/cpu_data_internal.h>
34 #include <arm/misc_protos.h>
35 #include <arm/machdep_call.h>
36 #include <arm/machine_routines.h>
37 #include <arm/rtclock.h>
38 #include <kern/machine.h>
39 #include <kern/thread.h>
40 #include <kern/thread_group.h>
41 #include <kern/policy_internal.h>
42 #include <machine/config.h>
43 #include <pexpert/pexpert.h>
46 #include <kern/monotonic.h>
47 #include <machine/monotonic.h>
48 #endif /* MONOTONIC */
50 #include <mach/machine.h>
52 #if INTERRUPT_MASKED_DEBUG
53 extern boolean_t interrupt_masked_debug
;
54 extern uint64_t interrupt_masked_timeout
;
57 extern uint64_t mach_absolutetime_asleep
;
60 sched_perfcontrol_oncore_default(perfcontrol_state_t new_thread_state __unused
, going_on_core_t on __unused
)
65 sched_perfcontrol_switch_default(perfcontrol_state_t old_thread_state __unused
, perfcontrol_state_t new_thread_state __unused
)
70 sched_perfcontrol_offcore_default(perfcontrol_state_t old_thread_state __unused
, going_off_core_t off __unused
, boolean_t thread_terminating __unused
)
75 sched_perfcontrol_thread_group_default(thread_group_data_t data __unused
)
80 sched_perfcontrol_max_runnable_latency_default(perfcontrol_max_runnable_latency_t latencies __unused
)
85 sched_perfcontrol_work_interval_notify_default(perfcontrol_state_t thread_state __unused
,
86 perfcontrol_work_interval_t work_interval __unused
)
91 sched_perfcontrol_work_interval_ctl_default(perfcontrol_state_t thread_state __unused
,
92 perfcontrol_work_interval_instance_t instance __unused
)
97 sched_perfcontrol_deadline_passed_default(__unused
uint64_t deadline
)
102 sched_perfcontrol_csw_default(
103 __unused perfcontrol_event event
, __unused
uint32_t cpu_id
, __unused
uint64_t timestamp
,
104 __unused
uint32_t flags
, __unused
struct perfcontrol_thread_data
*offcore
,
105 __unused
struct perfcontrol_thread_data
*oncore
,
106 __unused
struct perfcontrol_cpu_counters
*cpu_counters
, __unused
void *unused
)
111 sched_perfcontrol_state_update_default(
112 __unused perfcontrol_event event
, __unused
uint32_t cpu_id
, __unused
uint64_t timestamp
,
113 __unused
uint32_t flags
, __unused
struct perfcontrol_thread_data
*thr_data
,
114 __unused
void *unused
)
118 sched_perfcontrol_offcore_t sched_perfcontrol_offcore
= sched_perfcontrol_offcore_default
;
119 sched_perfcontrol_context_switch_t sched_perfcontrol_switch
= sched_perfcontrol_switch_default
;
120 sched_perfcontrol_oncore_t sched_perfcontrol_oncore
= sched_perfcontrol_oncore_default
;
121 sched_perfcontrol_thread_group_init_t sched_perfcontrol_thread_group_init
= sched_perfcontrol_thread_group_default
;
122 sched_perfcontrol_thread_group_deinit_t sched_perfcontrol_thread_group_deinit
= sched_perfcontrol_thread_group_default
;
123 sched_perfcontrol_thread_group_flags_update_t sched_perfcontrol_thread_group_flags_update
= sched_perfcontrol_thread_group_default
;
124 sched_perfcontrol_max_runnable_latency_t sched_perfcontrol_max_runnable_latency
= sched_perfcontrol_max_runnable_latency_default
;
125 sched_perfcontrol_work_interval_notify_t sched_perfcontrol_work_interval_notify
= sched_perfcontrol_work_interval_notify_default
;
126 sched_perfcontrol_work_interval_ctl_t sched_perfcontrol_work_interval_ctl
= sched_perfcontrol_work_interval_ctl_default
;
127 sched_perfcontrol_deadline_passed_t sched_perfcontrol_deadline_passed
= sched_perfcontrol_deadline_passed_default
;
128 sched_perfcontrol_csw_t sched_perfcontrol_csw
= sched_perfcontrol_csw_default
;
129 sched_perfcontrol_state_update_t sched_perfcontrol_state_update
= sched_perfcontrol_state_update_default
;
132 sched_perfcontrol_register_callbacks(sched_perfcontrol_callbacks_t callbacks
, unsigned long size_of_state
)
134 assert(callbacks
== NULL
|| callbacks
->version
>= SCHED_PERFCONTROL_CALLBACKS_VERSION_2
);
136 if (size_of_state
> sizeof(struct perfcontrol_state
)) {
137 panic("%s: Invalid required state size %lu", __FUNCTION__
, size_of_state
);
143 if (callbacks
->version
>= SCHED_PERFCONTROL_CALLBACKS_VERSION_7
) {
144 if (callbacks
->work_interval_ctl
!= NULL
) {
145 sched_perfcontrol_work_interval_ctl
= callbacks
->work_interval_ctl
;
147 sched_perfcontrol_work_interval_ctl
= sched_perfcontrol_work_interval_ctl_default
;
151 if (callbacks
->version
>= SCHED_PERFCONTROL_CALLBACKS_VERSION_5
) {
152 if (callbacks
->csw
!= NULL
) {
153 sched_perfcontrol_csw
= callbacks
->csw
;
155 sched_perfcontrol_csw
= sched_perfcontrol_csw_default
;
158 if (callbacks
->state_update
!= NULL
) {
159 sched_perfcontrol_state_update
= callbacks
->state_update
;
161 sched_perfcontrol_state_update
= sched_perfcontrol_state_update_default
;
165 if (callbacks
->version
>= SCHED_PERFCONTROL_CALLBACKS_VERSION_4
) {
166 if (callbacks
->deadline_passed
!= NULL
) {
167 sched_perfcontrol_deadline_passed
= callbacks
->deadline_passed
;
169 sched_perfcontrol_deadline_passed
= sched_perfcontrol_deadline_passed_default
;
173 if (callbacks
->offcore
!= NULL
) {
174 sched_perfcontrol_offcore
= callbacks
->offcore
;
176 sched_perfcontrol_offcore
= sched_perfcontrol_offcore_default
;
179 if (callbacks
->context_switch
!= NULL
) {
180 sched_perfcontrol_switch
= callbacks
->context_switch
;
182 sched_perfcontrol_switch
= sched_perfcontrol_switch_default
;
185 if (callbacks
->oncore
!= NULL
) {
186 sched_perfcontrol_oncore
= callbacks
->oncore
;
188 sched_perfcontrol_oncore
= sched_perfcontrol_oncore_default
;
191 if (callbacks
->max_runnable_latency
!= NULL
) {
192 sched_perfcontrol_max_runnable_latency
= callbacks
->max_runnable_latency
;
194 sched_perfcontrol_max_runnable_latency
= sched_perfcontrol_max_runnable_latency_default
;
197 if (callbacks
->work_interval_notify
!= NULL
) {
198 sched_perfcontrol_work_interval_notify
= callbacks
->work_interval_notify
;
200 sched_perfcontrol_work_interval_notify
= sched_perfcontrol_work_interval_notify_default
;
203 /* reset to defaults */
204 sched_perfcontrol_offcore
= sched_perfcontrol_offcore_default
;
205 sched_perfcontrol_switch
= sched_perfcontrol_switch_default
;
206 sched_perfcontrol_oncore
= sched_perfcontrol_oncore_default
;
207 sched_perfcontrol_thread_group_init
= sched_perfcontrol_thread_group_default
;
208 sched_perfcontrol_thread_group_deinit
= sched_perfcontrol_thread_group_default
;
209 sched_perfcontrol_thread_group_flags_update
= sched_perfcontrol_thread_group_default
;
210 sched_perfcontrol_max_runnable_latency
= sched_perfcontrol_max_runnable_latency_default
;
211 sched_perfcontrol_work_interval_notify
= sched_perfcontrol_work_interval_notify_default
;
212 sched_perfcontrol_work_interval_ctl
= sched_perfcontrol_work_interval_ctl_default
;
213 sched_perfcontrol_csw
= sched_perfcontrol_csw_default
;
214 sched_perfcontrol_state_update
= sched_perfcontrol_state_update_default
;
220 machine_switch_populate_perfcontrol_thread_data(struct perfcontrol_thread_data
*data
,
222 uint64_t same_pri_latency
)
224 bzero(data
, sizeof(struct perfcontrol_thread_data
));
225 data
->perfctl_class
= thread_get_perfcontrol_class(thread
);
226 data
->energy_estimate_nj
= 0;
227 data
->thread_id
= thread
->thread_id
;
228 data
->scheduling_latency_at_same_basepri
= same_pri_latency
;
229 data
->perfctl_state
= FIND_PERFCONTROL_STATE(thread
);
233 machine_switch_populate_perfcontrol_cpu_counters(struct perfcontrol_cpu_counters
*cpu_counters
)
236 mt_perfcontrol(&cpu_counters
->instructions
, &cpu_counters
->cycles
);
237 #else /* MONOTONIC */
238 cpu_counters
->instructions
= 0;
239 cpu_counters
->cycles
= 0;
240 #endif /* !MONOTONIC */
243 int perfcontrol_callout_stats_enabled
= 0;
244 static _Atomic
uint64_t perfcontrol_callout_stats
[PERFCONTROL_CALLOUT_MAX
][PERFCONTROL_STAT_MAX
];
245 static _Atomic
uint64_t perfcontrol_callout_count
[PERFCONTROL_CALLOUT_MAX
];
249 bool perfcontrol_callout_counters_begin(uint64_t *counters
)
251 if (!perfcontrol_callout_stats_enabled
)
253 mt_fixed_counts(counters
);
258 void perfcontrol_callout_counters_end(uint64_t *start_counters
,
259 perfcontrol_callout_type_t type
)
261 uint64_t end_counters
[MT_CORE_NFIXED
];
262 mt_fixed_counts(end_counters
);
263 atomic_fetch_add_explicit(&perfcontrol_callout_stats
[type
][PERFCONTROL_STAT_CYCLES
],
264 end_counters
[MT_CORE_CYCLES
] - start_counters
[MT_CORE_CYCLES
], memory_order_relaxed
);
265 #ifdef MT_CORE_INSTRS
266 atomic_fetch_add_explicit(&perfcontrol_callout_stats
[type
][PERFCONTROL_STAT_INSTRS
],
267 end_counters
[MT_CORE_INSTRS
] - start_counters
[MT_CORE_INSTRS
], memory_order_relaxed
);
268 #endif /* defined(MT_CORE_INSTRS) */
269 atomic_fetch_add_explicit(&perfcontrol_callout_count
[type
], 1, memory_order_relaxed
);
271 #endif /* MONOTONIC */
273 uint64_t perfcontrol_callout_stat_avg(perfcontrol_callout_type_t type
,
274 perfcontrol_callout_stat_t stat
)
276 if (!perfcontrol_callout_stats_enabled
)
278 return (perfcontrol_callout_stats
[type
][stat
] / perfcontrol_callout_count
[type
]);
282 machine_switch_perfcontrol_context(perfcontrol_event event
,
285 uint64_t new_thread_same_pri_latency
,
289 if (sched_perfcontrol_switch
!= sched_perfcontrol_switch_default
) {
290 perfcontrol_state_t old_perfcontrol_state
= FIND_PERFCONTROL_STATE(old
);
291 perfcontrol_state_t new_perfcontrol_state
= FIND_PERFCONTROL_STATE(new);
292 sched_perfcontrol_switch(old_perfcontrol_state
, new_perfcontrol_state
);
295 if (sched_perfcontrol_csw
!= sched_perfcontrol_csw_default
) {
296 uint32_t cpu_id
= (uint32_t)cpu_number();
297 struct perfcontrol_cpu_counters cpu_counters
;
298 struct perfcontrol_thread_data offcore
, oncore
;
299 machine_switch_populate_perfcontrol_thread_data(&offcore
, old
, 0);
300 machine_switch_populate_perfcontrol_thread_data(&oncore
, new,
301 new_thread_same_pri_latency
);
302 machine_switch_populate_perfcontrol_cpu_counters(&cpu_counters
);
305 uint64_t counters
[MT_CORE_NFIXED
];
306 bool ctrs_enabled
= perfcontrol_callout_counters_begin(counters
);
307 #endif /* MONOTONIC */
308 sched_perfcontrol_csw(event
, cpu_id
, timestamp
, flags
,
309 &offcore
, &oncore
, &cpu_counters
, NULL
);
311 if (ctrs_enabled
) perfcontrol_callout_counters_end(counters
, PERFCONTROL_CALLOUT_CONTEXT
);
312 #endif /* MONOTONIC */
315 old
->machine
.energy_estimate_nj
+= offcore
.energy_estimate_nj
;
316 new->machine
.energy_estimate_nj
+= oncore
.energy_estimate_nj
;
322 machine_switch_perfcontrol_state_update(perfcontrol_event event
,
327 if (sched_perfcontrol_state_update
== sched_perfcontrol_state_update_default
)
329 uint32_t cpu_id
= (uint32_t)cpu_number();
330 struct perfcontrol_thread_data data
;
331 machine_switch_populate_perfcontrol_thread_data(&data
, thread
, 0);
334 uint64_t counters
[MT_CORE_NFIXED
];
335 bool ctrs_enabled
= perfcontrol_callout_counters_begin(counters
);
336 #endif /* MONOTONIC */
337 sched_perfcontrol_state_update(event
, cpu_id
, timestamp
, flags
,
340 if (ctrs_enabled
) perfcontrol_callout_counters_end(counters
, PERFCONTROL_CALLOUT_STATE_UPDATE
);
341 #endif /* MONOTONIC */
344 thread
->machine
.energy_estimate_nj
+= data
.energy_estimate_nj
;
349 machine_thread_going_on_core(thread_t new_thread
,
351 uint64_t sched_latency
,
352 uint64_t same_pri_latency
,
356 if (sched_perfcontrol_oncore
== sched_perfcontrol_oncore_default
)
358 struct going_on_core on_core
;
359 perfcontrol_state_t state
= FIND_PERFCONTROL_STATE(new_thread
);
361 on_core
.thread_id
= new_thread
->thread_id
;
362 on_core
.energy_estimate_nj
= 0;
363 on_core
.qos_class
= proc_get_effective_thread_policy(new_thread
, TASK_POLICY_QOS
);
364 on_core
.urgency
= urgency
;
365 on_core
.is_32_bit
= thread_is_64bit_data(new_thread
) ? FALSE
: TRUE
;
366 on_core
.is_kernel_thread
= new_thread
->task
== kernel_task
;
367 on_core
.scheduling_latency
= sched_latency
;
368 on_core
.start_time
= timestamp
;
369 on_core
.scheduling_latency_at_same_basepri
= same_pri_latency
;
372 uint64_t counters
[MT_CORE_NFIXED
];
373 bool ctrs_enabled
= perfcontrol_callout_counters_begin(counters
);
374 #endif /* MONOTONIC */
375 sched_perfcontrol_oncore(state
, &on_core
);
377 if (ctrs_enabled
) perfcontrol_callout_counters_end(counters
, PERFCONTROL_CALLOUT_ON_CORE
);
378 #endif /* MONOTONIC */
381 new_thread
->machine
.energy_estimate_nj
+= on_core
.energy_estimate_nj
;
386 machine_thread_going_off_core(thread_t old_thread
, boolean_t thread_terminating
, uint64_t last_dispatch
)
388 if (sched_perfcontrol_offcore
== sched_perfcontrol_offcore_default
)
390 struct going_off_core off_core
;
391 perfcontrol_state_t state
= FIND_PERFCONTROL_STATE(old_thread
);
393 off_core
.thread_id
= old_thread
->thread_id
;
394 off_core
.energy_estimate_nj
= 0;
395 off_core
.end_time
= last_dispatch
;
398 uint64_t counters
[MT_CORE_NFIXED
];
399 bool ctrs_enabled
= perfcontrol_callout_counters_begin(counters
);
400 #endif /* MONOTONIC */
401 sched_perfcontrol_offcore(state
, &off_core
, thread_terminating
);
403 if (ctrs_enabled
) perfcontrol_callout_counters_end(counters
, PERFCONTROL_CALLOUT_OFF_CORE
);
404 #endif /* MONOTONIC */
407 old_thread
->machine
.energy_estimate_nj
+= off_core
.energy_estimate_nj
;
413 machine_max_runnable_latency(uint64_t bg_max_latency
,
414 uint64_t default_max_latency
,
415 uint64_t realtime_max_latency
)
417 if (sched_perfcontrol_max_runnable_latency
== sched_perfcontrol_max_runnable_latency_default
)
419 struct perfcontrol_max_runnable_latency latencies
= {
420 .max_scheduling_latencies
= {
421 [THREAD_URGENCY_NONE
] = 0,
422 [THREAD_URGENCY_BACKGROUND
] = bg_max_latency
,
423 [THREAD_URGENCY_NORMAL
] = default_max_latency
,
424 [THREAD_URGENCY_REAL_TIME
] = realtime_max_latency
428 sched_perfcontrol_max_runnable_latency(&latencies
);
432 machine_work_interval_notify(thread_t thread
,
433 struct kern_work_interval_args
* kwi_args
)
435 if (sched_perfcontrol_work_interval_notify
== sched_perfcontrol_work_interval_notify_default
)
437 perfcontrol_state_t state
= FIND_PERFCONTROL_STATE(thread
);
438 struct perfcontrol_work_interval work_interval
= {
439 .thread_id
= thread
->thread_id
,
440 .qos_class
= proc_get_effective_thread_policy(thread
, TASK_POLICY_QOS
),
441 .urgency
= kwi_args
->urgency
,
442 .flags
= kwi_args
->notify_flags
,
443 .work_interval_id
= kwi_args
->work_interval_id
,
444 .start
= kwi_args
->start
,
445 .finish
= kwi_args
->finish
,
446 .deadline
= kwi_args
->deadline
,
447 .next_start
= kwi_args
->next_start
,
448 .create_flags
= kwi_args
->create_flags
,
450 sched_perfcontrol_work_interval_notify(state
, &work_interval
);
455 machine_perfcontrol_deadline_passed(uint64_t deadline
)
457 if (sched_perfcontrol_deadline_passed
!= sched_perfcontrol_deadline_passed_default
)
458 sched_perfcontrol_deadline_passed(deadline
);
461 #if INTERRUPT_MASKED_DEBUG
463 * ml_spin_debug_reset()
464 * Reset the timestamp on a thread that has been unscheduled
465 * to avoid false alarms. Alarm will go off if interrupts are held
466 * disabled for too long, starting from now.
469 ml_spin_debug_reset(thread_t thread
)
471 thread
->machine
.intmask_timestamp
= mach_absolute_time();
475 * ml_spin_debug_clear()
476 * Clear the timestamp on a thread that has been unscheduled
477 * to avoid false alarms
480 ml_spin_debug_clear(thread_t thread
)
482 thread
->machine
.intmask_timestamp
= 0;
486 * ml_spin_debug_clear_self()
487 * Clear the timestamp on the current thread to prevent
491 ml_spin_debug_clear_self()
493 ml_spin_debug_clear(current_thread());
497 ml_check_interrupts_disabled_duration(thread_t thread
)
502 start
= thread
->machine
.intmask_timestamp
;
504 now
= mach_absolute_time();
506 if ((now
- start
) > interrupt_masked_timeout
* debug_cpu_performance_degradation_factor
) {
507 mach_timebase_info_data_t timebase
;
508 clock_timebase_info(&timebase
);
512 * Disable the actual panic for KASAN due to the overhead of KASAN itself, leave the rest of the
513 * mechanism enabled so that KASAN can catch any bugs in the mechanism itself.
515 panic("Interrupts held disabled for %llu nanoseconds", (((now
- start
) * timebase
.numer
)/timebase
.denom
));
522 #endif // INTERRUPT_MASKED_DEBUG
526 ml_set_interrupts_enabled(boolean_t enable
)
532 #define INTERRUPT_MASK PSR_IRQF
533 state
= __builtin_arm_rsr("cpsr");
535 #define INTERRUPT_MASK DAIF_IRQF
536 state
= __builtin_arm_rsr("DAIF");
538 if (enable
&& (state
& INTERRUPT_MASK
)) {
539 #if INTERRUPT_MASKED_DEBUG
540 if (interrupt_masked_debug
) {
541 // Interrupts are currently masked, we will enable them (after finishing this check)
542 thread
= current_thread();
543 ml_check_interrupts_disabled_duration(thread
);
544 thread
->machine
.intmask_timestamp
= 0;
546 #endif // INTERRUPT_MASKED_DEBUG
547 if (get_preemption_level() == 0) {
548 thread
= current_thread();
549 while (thread
->machine
.CpuDatap
->cpu_pending_ast
& AST_URGENT
) {
550 #if __ARM_USER_PROTECT__
551 uintptr_t up
= arm_user_protect_begin(thread
);
554 #if __ARM_USER_PROTECT__
555 arm_user_protect_end(thread
, up
, FALSE
);
560 __asm__
volatile ("cpsie if" ::: "memory"); // Enable IRQ FIQ
562 __builtin_arm_wsr("DAIFClr", (DAIFSC_IRQF
| DAIFSC_FIQF
));
564 } else if (!enable
&& ((state
& INTERRUPT_MASK
) == 0)) {
566 __asm__
volatile ("cpsid if" ::: "memory"); // Mask IRQ FIQ
568 __builtin_arm_wsr("DAIFSet", (DAIFSC_IRQF
| DAIFSC_FIQF
));
570 #if INTERRUPT_MASKED_DEBUG
571 if (interrupt_masked_debug
) {
572 // Interrupts were enabled, we just masked them
573 current_thread()->machine
.intmask_timestamp
= mach_absolute_time();
577 return ((state
& INTERRUPT_MASK
) == 0);
581 * Routine: ml_at_interrupt_context
582 * Function: Check if running at interrupt context
585 ml_at_interrupt_context(void)
587 /* Do not use a stack-based check here, as the top-level exception handler
588 * is free to use some other stack besides the per-CPU interrupt stack.
589 * Interrupts should always be disabled if we're at interrupt context.
590 * Check that first, as we may be in a preemptible non-interrupt context, in
591 * which case we could be migrated to a different CPU between obtaining
592 * the per-cpu data pointer and loading cpu_int_state. We then might end
593 * up checking the interrupt state of a different CPU, resulting in a false
594 * positive. But if interrupts are disabled, we also know we cannot be
596 return (!ml_get_interrupts_enabled() && (getCpuDatap()->cpu_int_state
!= NULL
));
600 ml_stack_remaining(void)
602 uintptr_t local
= (uintptr_t) &local
;
603 vm_offset_t intstack_top_ptr
;
605 /* Since this is a stack-based check, we don't need to worry about
606 * preemption as we do in ml_at_interrupt_context(). If we are preemptible,
607 * then the sp should never be within any CPU's interrupt stack unless
608 * something has gone horribly wrong. */
609 intstack_top_ptr
= getCpuDatap()->intstack_top
;
610 if ((local
< intstack_top_ptr
) && (local
> intstack_top_ptr
- INTSTACK_SIZE
)) {
611 return (local
- (getCpuDatap()->intstack_top
- INTSTACK_SIZE
));
613 return (local
- current_thread()->kernel_stack
);
617 static boolean_t ml_quiescing
;
619 void ml_set_is_quiescing(boolean_t quiescing
)
621 assert(FALSE
== ml_get_interrupts_enabled());
622 ml_quiescing
= quiescing
;
625 boolean_t
ml_is_quiescing(void)
627 assert(FALSE
== ml_get_interrupts_enabled());
628 return (ml_quiescing
);
631 uint64_t ml_get_booter_memory_size(void)
634 uint64_t roundsize
= 512*1024*1024ULL;
635 size
= BootArgs
->memSizeActual
;
637 size
= BootArgs
->memSize
;
638 if (size
< (2 * roundsize
)) roundsize
>>= 1;
639 size
= (size
+ roundsize
- 1) & ~(roundsize
- 1);
640 size
-= BootArgs
->memSize
;
646 ml_get_abstime_offset(void)
648 return rtclock_base_abstime
;
652 ml_get_conttime_offset(void)
654 return (rtclock_base_abstime
+ mach_absolutetime_asleep
);
658 ml_get_time_since_reset(void)
660 /* The timebase resets across S2R, so just return the raw value. */
661 return ml_get_hwclock();
665 ml_get_conttime_wake_time(void)
667 /* The wake time is simply our continuous time offset. */
668 return ml_get_conttime_offset();