2 * Copyright (c) 2007-2013 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <arm/machine_cpu.h>
30 #include <arm/cpu_internal.h>
31 #include <arm/cpuid.h>
32 #include <arm/cpu_data.h>
33 #include <arm/cpu_data_internal.h>
34 #include <arm/misc_protos.h>
35 #include <arm/machdep_call.h>
36 #include <arm/machine_routines.h>
37 #include <arm/rtclock.h>
38 #include <kern/machine.h>
39 #include <kern/thread.h>
40 #include <kern/thread_group.h>
41 #include <kern/policy_internal.h>
42 #include <machine/config.h>
43 #include <pexpert/pexpert.h>
46 #include <kern/monotonic.h>
47 #include <machine/monotonic.h>
48 #endif /* MONOTONIC */
50 #include <mach/machine.h>
52 #if INTERRUPT_MASKED_DEBUG
53 extern boolean_t interrupt_masked_debug
;
54 extern uint64_t interrupt_masked_timeout
;
57 extern uint64_t mach_absolutetime_asleep
;
60 sched_perfcontrol_oncore_default(perfcontrol_state_t new_thread_state __unused
, going_on_core_t on __unused
)
65 sched_perfcontrol_switch_default(perfcontrol_state_t old_thread_state __unused
, perfcontrol_state_t new_thread_state __unused
)
70 sched_perfcontrol_offcore_default(perfcontrol_state_t old_thread_state __unused
, going_off_core_t off __unused
, boolean_t thread_terminating __unused
)
75 sched_perfcontrol_thread_group_default(thread_group_data_t data __unused
)
80 sched_perfcontrol_max_runnable_latency_default(perfcontrol_max_runnable_latency_t latencies __unused
)
85 sched_perfcontrol_work_interval_notify_default(perfcontrol_state_t thread_state __unused
,
86 perfcontrol_work_interval_t work_interval __unused
)
91 sched_perfcontrol_work_interval_ctl_default(perfcontrol_state_t thread_state __unused
,
92 perfcontrol_work_interval_instance_t instance __unused
)
97 sched_perfcontrol_deadline_passed_default(__unused
uint64_t deadline
)
102 sched_perfcontrol_csw_default(
103 __unused perfcontrol_event event
, __unused
uint32_t cpu_id
, __unused
uint64_t timestamp
,
104 __unused
uint32_t flags
, __unused
struct perfcontrol_thread_data
*offcore
,
105 __unused
struct perfcontrol_thread_data
*oncore
,
106 __unused
struct perfcontrol_cpu_counters
*cpu_counters
, __unused
void *unused
)
111 sched_perfcontrol_state_update_default(
112 __unused perfcontrol_event event
, __unused
uint32_t cpu_id
, __unused
uint64_t timestamp
,
113 __unused
uint32_t flags
, __unused
struct perfcontrol_thread_data
*thr_data
,
114 __unused
void *unused
)
118 sched_perfcontrol_offcore_t sched_perfcontrol_offcore
= sched_perfcontrol_offcore_default
;
119 sched_perfcontrol_context_switch_t sched_perfcontrol_switch
= sched_perfcontrol_switch_default
;
120 sched_perfcontrol_oncore_t sched_perfcontrol_oncore
= sched_perfcontrol_oncore_default
;
121 sched_perfcontrol_thread_group_init_t sched_perfcontrol_thread_group_init
= sched_perfcontrol_thread_group_default
;
122 sched_perfcontrol_thread_group_deinit_t sched_perfcontrol_thread_group_deinit
= sched_perfcontrol_thread_group_default
;
123 sched_perfcontrol_thread_group_flags_update_t sched_perfcontrol_thread_group_flags_update
= sched_perfcontrol_thread_group_default
;
124 sched_perfcontrol_max_runnable_latency_t sched_perfcontrol_max_runnable_latency
= sched_perfcontrol_max_runnable_latency_default
;
125 sched_perfcontrol_work_interval_notify_t sched_perfcontrol_work_interval_notify
= sched_perfcontrol_work_interval_notify_default
;
126 sched_perfcontrol_work_interval_ctl_t sched_perfcontrol_work_interval_ctl
= sched_perfcontrol_work_interval_ctl_default
;
127 sched_perfcontrol_deadline_passed_t sched_perfcontrol_deadline_passed
= sched_perfcontrol_deadline_passed_default
;
128 sched_perfcontrol_csw_t sched_perfcontrol_csw
= sched_perfcontrol_csw_default
;
129 sched_perfcontrol_state_update_t sched_perfcontrol_state_update
= sched_perfcontrol_state_update_default
;
132 sched_perfcontrol_register_callbacks(sched_perfcontrol_callbacks_t callbacks
, unsigned long size_of_state
)
134 assert(callbacks
== NULL
|| callbacks
->version
>= SCHED_PERFCONTROL_CALLBACKS_VERSION_2
);
136 if (size_of_state
> sizeof(struct perfcontrol_state
)) {
137 panic("%s: Invalid required state size %lu", __FUNCTION__
, size_of_state
);
142 if (callbacks
->version
>= SCHED_PERFCONTROL_CALLBACKS_VERSION_7
) {
143 if (callbacks
->work_interval_ctl
!= NULL
) {
144 sched_perfcontrol_work_interval_ctl
= callbacks
->work_interval_ctl
;
146 sched_perfcontrol_work_interval_ctl
= sched_perfcontrol_work_interval_ctl_default
;
150 if (callbacks
->version
>= SCHED_PERFCONTROL_CALLBACKS_VERSION_5
) {
151 if (callbacks
->csw
!= NULL
) {
152 sched_perfcontrol_csw
= callbacks
->csw
;
154 sched_perfcontrol_csw
= sched_perfcontrol_csw_default
;
157 if (callbacks
->state_update
!= NULL
) {
158 sched_perfcontrol_state_update
= callbacks
->state_update
;
160 sched_perfcontrol_state_update
= sched_perfcontrol_state_update_default
;
164 if (callbacks
->version
>= SCHED_PERFCONTROL_CALLBACKS_VERSION_4
) {
165 if (callbacks
->deadline_passed
!= NULL
) {
166 sched_perfcontrol_deadline_passed
= callbacks
->deadline_passed
;
168 sched_perfcontrol_deadline_passed
= sched_perfcontrol_deadline_passed_default
;
172 if (callbacks
->offcore
!= NULL
) {
173 sched_perfcontrol_offcore
= callbacks
->offcore
;
175 sched_perfcontrol_offcore
= sched_perfcontrol_offcore_default
;
178 if (callbacks
->context_switch
!= NULL
) {
179 sched_perfcontrol_switch
= callbacks
->context_switch
;
181 sched_perfcontrol_switch
= sched_perfcontrol_switch_default
;
184 if (callbacks
->oncore
!= NULL
) {
185 sched_perfcontrol_oncore
= callbacks
->oncore
;
187 sched_perfcontrol_oncore
= sched_perfcontrol_oncore_default
;
190 if (callbacks
->max_runnable_latency
!= NULL
) {
191 sched_perfcontrol_max_runnable_latency
= callbacks
->max_runnable_latency
;
193 sched_perfcontrol_max_runnable_latency
= sched_perfcontrol_max_runnable_latency_default
;
196 if (callbacks
->work_interval_notify
!= NULL
) {
197 sched_perfcontrol_work_interval_notify
= callbacks
->work_interval_notify
;
199 sched_perfcontrol_work_interval_notify
= sched_perfcontrol_work_interval_notify_default
;
202 /* reset to defaults */
203 sched_perfcontrol_offcore
= sched_perfcontrol_offcore_default
;
204 sched_perfcontrol_switch
= sched_perfcontrol_switch_default
;
205 sched_perfcontrol_oncore
= sched_perfcontrol_oncore_default
;
206 sched_perfcontrol_thread_group_init
= sched_perfcontrol_thread_group_default
;
207 sched_perfcontrol_thread_group_deinit
= sched_perfcontrol_thread_group_default
;
208 sched_perfcontrol_thread_group_flags_update
= sched_perfcontrol_thread_group_default
;
209 sched_perfcontrol_max_runnable_latency
= sched_perfcontrol_max_runnable_latency_default
;
210 sched_perfcontrol_work_interval_notify
= sched_perfcontrol_work_interval_notify_default
;
211 sched_perfcontrol_work_interval_ctl
= sched_perfcontrol_work_interval_ctl_default
;
212 sched_perfcontrol_csw
= sched_perfcontrol_csw_default
;
213 sched_perfcontrol_state_update
= sched_perfcontrol_state_update_default
;
219 machine_switch_populate_perfcontrol_thread_data(struct perfcontrol_thread_data
*data
,
221 uint64_t same_pri_latency
)
223 bzero(data
, sizeof(struct perfcontrol_thread_data
));
224 data
->perfctl_class
= thread_get_perfcontrol_class(thread
);
225 data
->energy_estimate_nj
= 0;
226 data
->thread_id
= thread
->thread_id
;
227 data
->scheduling_latency_at_same_basepri
= same_pri_latency
;
228 data
->perfctl_state
= FIND_PERFCONTROL_STATE(thread
);
232 machine_switch_populate_perfcontrol_cpu_counters(struct perfcontrol_cpu_counters
*cpu_counters
)
235 mt_perfcontrol(&cpu_counters
->instructions
, &cpu_counters
->cycles
);
236 #else /* MONOTONIC */
237 cpu_counters
->instructions
= 0;
238 cpu_counters
->cycles
= 0;
239 #endif /* !MONOTONIC */
242 int perfcontrol_callout_stats_enabled
= 0;
243 static _Atomic
uint64_t perfcontrol_callout_stats
[PERFCONTROL_CALLOUT_MAX
][PERFCONTROL_STAT_MAX
];
244 static _Atomic
uint64_t perfcontrol_callout_count
[PERFCONTROL_CALLOUT_MAX
];
249 perfcontrol_callout_counters_begin(uint64_t *counters
)
251 if (!perfcontrol_callout_stats_enabled
) {
254 mt_fixed_counts(counters
);
260 perfcontrol_callout_counters_end(uint64_t *start_counters
,
261 perfcontrol_callout_type_t type
)
263 uint64_t end_counters
[MT_CORE_NFIXED
];
264 mt_fixed_counts(end_counters
);
265 atomic_fetch_add_explicit(&perfcontrol_callout_stats
[type
][PERFCONTROL_STAT_CYCLES
],
266 end_counters
[MT_CORE_CYCLES
] - start_counters
[MT_CORE_CYCLES
], memory_order_relaxed
);
267 #ifdef MT_CORE_INSTRS
268 atomic_fetch_add_explicit(&perfcontrol_callout_stats
[type
][PERFCONTROL_STAT_INSTRS
],
269 end_counters
[MT_CORE_INSTRS
] - start_counters
[MT_CORE_INSTRS
], memory_order_relaxed
);
270 #endif /* defined(MT_CORE_INSTRS) */
271 atomic_fetch_add_explicit(&perfcontrol_callout_count
[type
], 1, memory_order_relaxed
);
273 #endif /* MONOTONIC */
276 perfcontrol_callout_stat_avg(perfcontrol_callout_type_t type
,
277 perfcontrol_callout_stat_t stat
)
279 if (!perfcontrol_callout_stats_enabled
) {
282 return perfcontrol_callout_stats
[type
][stat
] / perfcontrol_callout_count
[type
];
286 machine_switch_perfcontrol_context(perfcontrol_event event
,
289 uint64_t new_thread_same_pri_latency
,
293 if (sched_perfcontrol_switch
!= sched_perfcontrol_switch_default
) {
294 perfcontrol_state_t old_perfcontrol_state
= FIND_PERFCONTROL_STATE(old
);
295 perfcontrol_state_t new_perfcontrol_state
= FIND_PERFCONTROL_STATE(new);
296 sched_perfcontrol_switch(old_perfcontrol_state
, new_perfcontrol_state
);
299 if (sched_perfcontrol_csw
!= sched_perfcontrol_csw_default
) {
300 uint32_t cpu_id
= (uint32_t)cpu_number();
301 struct perfcontrol_cpu_counters cpu_counters
;
302 struct perfcontrol_thread_data offcore
, oncore
;
303 machine_switch_populate_perfcontrol_thread_data(&offcore
, old
, 0);
304 machine_switch_populate_perfcontrol_thread_data(&oncore
, new,
305 new_thread_same_pri_latency
);
306 machine_switch_populate_perfcontrol_cpu_counters(&cpu_counters
);
309 uint64_t counters
[MT_CORE_NFIXED
];
310 bool ctrs_enabled
= perfcontrol_callout_counters_begin(counters
);
311 #endif /* MONOTONIC */
312 sched_perfcontrol_csw(event
, cpu_id
, timestamp
, flags
,
313 &offcore
, &oncore
, &cpu_counters
, NULL
);
316 perfcontrol_callout_counters_end(counters
, PERFCONTROL_CALLOUT_CONTEXT
);
318 #endif /* MONOTONIC */
321 old
->machine
.energy_estimate_nj
+= offcore
.energy_estimate_nj
;
322 new->machine
.energy_estimate_nj
+= oncore
.energy_estimate_nj
;
328 machine_switch_perfcontrol_state_update(perfcontrol_event event
,
333 if (sched_perfcontrol_state_update
== sched_perfcontrol_state_update_default
) {
336 uint32_t cpu_id
= (uint32_t)cpu_number();
337 struct perfcontrol_thread_data data
;
338 machine_switch_populate_perfcontrol_thread_data(&data
, thread
, 0);
341 uint64_t counters
[MT_CORE_NFIXED
];
342 bool ctrs_enabled
= perfcontrol_callout_counters_begin(counters
);
343 #endif /* MONOTONIC */
344 sched_perfcontrol_state_update(event
, cpu_id
, timestamp
, flags
,
348 perfcontrol_callout_counters_end(counters
, PERFCONTROL_CALLOUT_STATE_UPDATE
);
350 #endif /* MONOTONIC */
353 thread
->machine
.energy_estimate_nj
+= data
.energy_estimate_nj
;
358 machine_thread_going_on_core(thread_t new_thread
,
359 thread_urgency_t urgency
,
360 uint64_t sched_latency
,
361 uint64_t same_pri_latency
,
364 if (sched_perfcontrol_oncore
== sched_perfcontrol_oncore_default
) {
367 struct going_on_core on_core
;
368 perfcontrol_state_t state
= FIND_PERFCONTROL_STATE(new_thread
);
370 on_core
.thread_id
= new_thread
->thread_id
;
371 on_core
.energy_estimate_nj
= 0;
372 on_core
.qos_class
= proc_get_effective_thread_policy(new_thread
, TASK_POLICY_QOS
);
373 on_core
.urgency
= urgency
;
374 on_core
.is_32_bit
= thread_is_64bit_data(new_thread
) ? FALSE
: TRUE
;
375 on_core
.is_kernel_thread
= new_thread
->task
== kernel_task
;
376 on_core
.scheduling_latency
= sched_latency
;
377 on_core
.start_time
= timestamp
;
378 on_core
.scheduling_latency_at_same_basepri
= same_pri_latency
;
381 uint64_t counters
[MT_CORE_NFIXED
];
382 bool ctrs_enabled
= perfcontrol_callout_counters_begin(counters
);
383 #endif /* MONOTONIC */
384 sched_perfcontrol_oncore(state
, &on_core
);
387 perfcontrol_callout_counters_end(counters
, PERFCONTROL_CALLOUT_ON_CORE
);
389 #endif /* MONOTONIC */
392 new_thread
->machine
.energy_estimate_nj
+= on_core
.energy_estimate_nj
;
397 machine_thread_going_off_core(thread_t old_thread
, boolean_t thread_terminating
,
398 uint64_t last_dispatch
, __unused boolean_t thread_runnable
)
400 if (sched_perfcontrol_offcore
== sched_perfcontrol_offcore_default
) {
403 struct going_off_core off_core
;
404 perfcontrol_state_t state
= FIND_PERFCONTROL_STATE(old_thread
);
406 off_core
.thread_id
= old_thread
->thread_id
;
407 off_core
.energy_estimate_nj
= 0;
408 off_core
.end_time
= last_dispatch
;
411 uint64_t counters
[MT_CORE_NFIXED
];
412 bool ctrs_enabled
= perfcontrol_callout_counters_begin(counters
);
413 #endif /* MONOTONIC */
414 sched_perfcontrol_offcore(state
, &off_core
, thread_terminating
);
417 perfcontrol_callout_counters_end(counters
, PERFCONTROL_CALLOUT_OFF_CORE
);
419 #endif /* MONOTONIC */
422 old_thread
->machine
.energy_estimate_nj
+= off_core
.energy_estimate_nj
;
428 machine_max_runnable_latency(uint64_t bg_max_latency
,
429 uint64_t default_max_latency
,
430 uint64_t realtime_max_latency
)
432 if (sched_perfcontrol_max_runnable_latency
== sched_perfcontrol_max_runnable_latency_default
) {
435 struct perfcontrol_max_runnable_latency latencies
= {
436 .max_scheduling_latencies
= {
437 [THREAD_URGENCY_NONE
] = 0,
438 [THREAD_URGENCY_BACKGROUND
] = bg_max_latency
,
439 [THREAD_URGENCY_NORMAL
] = default_max_latency
,
440 [THREAD_URGENCY_REAL_TIME
] = realtime_max_latency
444 sched_perfcontrol_max_runnable_latency(&latencies
);
448 machine_work_interval_notify(thread_t thread
,
449 struct kern_work_interval_args
* kwi_args
)
451 if (sched_perfcontrol_work_interval_notify
== sched_perfcontrol_work_interval_notify_default
) {
454 perfcontrol_state_t state
= FIND_PERFCONTROL_STATE(thread
);
455 struct perfcontrol_work_interval work_interval
= {
456 .thread_id
= thread
->thread_id
,
457 .qos_class
= proc_get_effective_thread_policy(thread
, TASK_POLICY_QOS
),
458 .urgency
= kwi_args
->urgency
,
459 .flags
= kwi_args
->notify_flags
,
460 .work_interval_id
= kwi_args
->work_interval_id
,
461 .start
= kwi_args
->start
,
462 .finish
= kwi_args
->finish
,
463 .deadline
= kwi_args
->deadline
,
464 .next_start
= kwi_args
->next_start
,
465 .create_flags
= kwi_args
->create_flags
,
467 sched_perfcontrol_work_interval_notify(state
, &work_interval
);
472 machine_perfcontrol_deadline_passed(uint64_t deadline
)
474 if (sched_perfcontrol_deadline_passed
!= sched_perfcontrol_deadline_passed_default
) {
475 sched_perfcontrol_deadline_passed(deadline
);
479 #if INTERRUPT_MASKED_DEBUG
481 * ml_spin_debug_reset()
482 * Reset the timestamp on a thread that has been unscheduled
483 * to avoid false alarms. Alarm will go off if interrupts are held
484 * disabled for too long, starting from now.
487 ml_spin_debug_reset(thread_t thread
)
489 thread
->machine
.intmask_timestamp
= mach_absolute_time();
493 * ml_spin_debug_clear()
494 * Clear the timestamp on a thread that has been unscheduled
495 * to avoid false alarms
498 ml_spin_debug_clear(thread_t thread
)
500 thread
->machine
.intmask_timestamp
= 0;
504 * ml_spin_debug_clear_self()
505 * Clear the timestamp on the current thread to prevent
509 ml_spin_debug_clear_self()
511 ml_spin_debug_clear(current_thread());
515 ml_check_interrupts_disabled_duration(thread_t thread
)
520 start
= thread
->machine
.intmask_timestamp
;
522 now
= mach_absolute_time();
524 if ((now
- start
) > interrupt_masked_timeout
* debug_cpu_performance_degradation_factor
) {
525 mach_timebase_info_data_t timebase
;
526 clock_timebase_info(&timebase
);
530 * Disable the actual panic for KASAN due to the overhead of KASAN itself, leave the rest of the
531 * mechanism enabled so that KASAN can catch any bugs in the mechanism itself.
533 panic("Interrupts held disabled for %llu nanoseconds", (((now
- start
) * timebase
.numer
) / timebase
.denom
));
540 #endif // INTERRUPT_MASKED_DEBUG
544 ml_set_interrupts_enabled(boolean_t enable
)
550 #define INTERRUPT_MASK PSR_IRQF
551 state
= __builtin_arm_rsr("cpsr");
553 #define INTERRUPT_MASK DAIF_IRQF
554 state
= __builtin_arm_rsr("DAIF");
556 if (enable
&& (state
& INTERRUPT_MASK
)) {
557 #if INTERRUPT_MASKED_DEBUG
558 if (interrupt_masked_debug
) {
559 // Interrupts are currently masked, we will enable them (after finishing this check)
560 thread
= current_thread();
561 ml_check_interrupts_disabled_duration(thread
);
562 thread
->machine
.intmask_timestamp
= 0;
564 #endif // INTERRUPT_MASKED_DEBUG
565 if (get_preemption_level() == 0) {
566 thread
= current_thread();
567 while (thread
->machine
.CpuDatap
->cpu_pending_ast
& AST_URGENT
) {
568 #if __ARM_USER_PROTECT__
569 uintptr_t up
= arm_user_protect_begin(thread
);
572 #if __ARM_USER_PROTECT__
573 arm_user_protect_end(thread
, up
, FALSE
);
578 __asm__
volatile ("cpsie if" ::: "memory"); // Enable IRQ FIQ
580 __builtin_arm_wsr("DAIFClr", (DAIFSC_IRQF
| DAIFSC_FIQF
));
582 } else if (!enable
&& ((state
& INTERRUPT_MASK
) == 0)) {
584 __asm__
volatile ("cpsid if" ::: "memory"); // Mask IRQ FIQ
586 __builtin_arm_wsr("DAIFSet", (DAIFSC_IRQF
| DAIFSC_FIQF
));
588 #if INTERRUPT_MASKED_DEBUG
589 if (interrupt_masked_debug
) {
590 // Interrupts were enabled, we just masked them
591 current_thread()->machine
.intmask_timestamp
= mach_absolute_time();
595 return (state
& INTERRUPT_MASK
) == 0;
599 ml_early_set_interrupts_enabled(boolean_t enable
)
601 return ml_set_interrupts_enabled(enable
);
605 * Routine: ml_at_interrupt_context
606 * Function: Check if running at interrupt context
609 ml_at_interrupt_context(void)
611 /* Do not use a stack-based check here, as the top-level exception handler
612 * is free to use some other stack besides the per-CPU interrupt stack.
613 * Interrupts should always be disabled if we're at interrupt context.
614 * Check that first, as we may be in a preemptible non-interrupt context, in
615 * which case we could be migrated to a different CPU between obtaining
616 * the per-cpu data pointer and loading cpu_int_state. We then might end
617 * up checking the interrupt state of a different CPU, resulting in a false
618 * positive. But if interrupts are disabled, we also know we cannot be
620 return !ml_get_interrupts_enabled() && (getCpuDatap()->cpu_int_state
!= NULL
);
624 ml_stack_remaining(void)
626 uintptr_t local
= (uintptr_t) &local
;
627 vm_offset_t intstack_top_ptr
;
629 /* Since this is a stack-based check, we don't need to worry about
630 * preemption as we do in ml_at_interrupt_context(). If we are preemptible,
631 * then the sp should never be within any CPU's interrupt stack unless
632 * something has gone horribly wrong. */
633 intstack_top_ptr
= getCpuDatap()->intstack_top
;
634 if ((local
< intstack_top_ptr
) && (local
> intstack_top_ptr
- INTSTACK_SIZE
)) {
635 return local
- (getCpuDatap()->intstack_top
- INTSTACK_SIZE
);
637 return local
- current_thread()->kernel_stack
;
641 static boolean_t ml_quiescing
;
644 ml_set_is_quiescing(boolean_t quiescing
)
646 assert(FALSE
== ml_get_interrupts_enabled());
647 ml_quiescing
= quiescing
;
651 ml_is_quiescing(void)
653 assert(FALSE
== ml_get_interrupts_enabled());
658 ml_get_booter_memory_size(void)
661 uint64_t roundsize
= 512 * 1024 * 1024ULL;
662 size
= BootArgs
->memSizeActual
;
664 size
= BootArgs
->memSize
;
665 if (size
< (2 * roundsize
)) {
668 size
= (size
+ roundsize
- 1) & ~(roundsize
- 1);
669 size
-= BootArgs
->memSize
;
675 ml_get_abstime_offset(void)
677 return rtclock_base_abstime
;
681 ml_get_conttime_offset(void)
683 return rtclock_base_abstime
+ mach_absolutetime_asleep
;
687 ml_get_time_since_reset(void)
689 /* The timebase resets across S2R, so just return the raw value. */
690 return ml_get_hwclock();
694 ml_get_conttime_wake_time(void)
696 /* The wake time is simply our continuous time offset. */
697 return ml_get_conttime_offset();
701 * ml_snoop_thread_is_on_core(thread_t thread)
702 * Check if the given thread is currently on core. This function does not take
703 * locks, disable preemption, or otherwise guarantee synchronization. The
704 * result should be considered advisory.
707 ml_snoop_thread_is_on_core(thread_t thread
)
709 unsigned int cur_cpu_num
= 0;
711 for (cur_cpu_num
= 0; cur_cpu_num
< MAX_CPUS
; cur_cpu_num
++) {
712 if (CpuDataEntries
[cur_cpu_num
].cpu_data_vaddr
) {
713 if (CpuDataEntries
[cur_cpu_num
].cpu_data_vaddr
->cpu_active_thread
== thread
) {