2 * Copyright (c) 2007-2013 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <arm/machine_cpu.h>
30 #include <arm/cpu_internal.h>
31 #include <arm/cpuid.h>
32 #include <arm/cpu_data.h>
33 #include <arm/cpu_data_internal.h>
34 #include <arm/misc_protos.h>
35 #include <arm/machdep_call.h>
36 #include <arm/machine_routines.h>
37 #include <arm/rtclock.h>
38 #include <kern/machine.h>
39 #include <kern/thread.h>
40 #include <kern/thread_group.h>
41 #include <kern/policy_internal.h>
42 #include <machine/config.h>
43 #include <machine/atomic.h>
44 #include <pexpert/pexpert.h>
47 #include <kern/monotonic.h>
48 #include <machine/monotonic.h>
49 #endif /* MONOTONIC */
51 #include <mach/machine.h>
53 #if INTERRUPT_MASKED_DEBUG
54 extern boolean_t interrupt_masked_debug
;
55 extern uint64_t interrupt_masked_timeout
;
58 extern uint64_t mach_absolutetime_asleep
;
61 sched_perfcontrol_oncore_default(perfcontrol_state_t new_thread_state __unused
, going_on_core_t on __unused
)
66 sched_perfcontrol_switch_default(perfcontrol_state_t old_thread_state __unused
, perfcontrol_state_t new_thread_state __unused
)
71 sched_perfcontrol_offcore_default(perfcontrol_state_t old_thread_state __unused
, going_off_core_t off __unused
, boolean_t thread_terminating __unused
)
76 sched_perfcontrol_thread_group_default(thread_group_data_t data __unused
)
81 sched_perfcontrol_max_runnable_latency_default(perfcontrol_max_runnable_latency_t latencies __unused
)
86 sched_perfcontrol_work_interval_notify_default(perfcontrol_state_t thread_state __unused
,
87 perfcontrol_work_interval_t work_interval __unused
)
92 sched_perfcontrol_work_interval_ctl_default(perfcontrol_state_t thread_state __unused
,
93 perfcontrol_work_interval_instance_t instance __unused
)
98 sched_perfcontrol_deadline_passed_default(__unused
uint64_t deadline
)
103 sched_perfcontrol_csw_default(
104 __unused perfcontrol_event event
, __unused
uint32_t cpu_id
, __unused
uint64_t timestamp
,
105 __unused
uint32_t flags
, __unused
struct perfcontrol_thread_data
*offcore
,
106 __unused
struct perfcontrol_thread_data
*oncore
,
107 __unused
struct perfcontrol_cpu_counters
*cpu_counters
, __unused
void *unused
)
112 sched_perfcontrol_state_update_default(
113 __unused perfcontrol_event event
, __unused
uint32_t cpu_id
, __unused
uint64_t timestamp
,
114 __unused
uint32_t flags
, __unused
struct perfcontrol_thread_data
*thr_data
,
115 __unused
void *unused
)
119 sched_perfcontrol_offcore_t sched_perfcontrol_offcore
= sched_perfcontrol_offcore_default
;
120 sched_perfcontrol_context_switch_t sched_perfcontrol_switch
= sched_perfcontrol_switch_default
;
121 sched_perfcontrol_oncore_t sched_perfcontrol_oncore
= sched_perfcontrol_oncore_default
;
122 sched_perfcontrol_thread_group_init_t sched_perfcontrol_thread_group_init
= sched_perfcontrol_thread_group_default
;
123 sched_perfcontrol_thread_group_deinit_t sched_perfcontrol_thread_group_deinit
= sched_perfcontrol_thread_group_default
;
124 sched_perfcontrol_thread_group_flags_update_t sched_perfcontrol_thread_group_flags_update
= sched_perfcontrol_thread_group_default
;
125 sched_perfcontrol_max_runnable_latency_t sched_perfcontrol_max_runnable_latency
= sched_perfcontrol_max_runnable_latency_default
;
126 sched_perfcontrol_work_interval_notify_t sched_perfcontrol_work_interval_notify
= sched_perfcontrol_work_interval_notify_default
;
127 sched_perfcontrol_work_interval_ctl_t sched_perfcontrol_work_interval_ctl
= sched_perfcontrol_work_interval_ctl_default
;
128 sched_perfcontrol_deadline_passed_t sched_perfcontrol_deadline_passed
= sched_perfcontrol_deadline_passed_default
;
129 sched_perfcontrol_csw_t sched_perfcontrol_csw
= sched_perfcontrol_csw_default
;
130 sched_perfcontrol_state_update_t sched_perfcontrol_state_update
= sched_perfcontrol_state_update_default
;
133 sched_perfcontrol_register_callbacks(sched_perfcontrol_callbacks_t callbacks
, unsigned long size_of_state
)
135 assert(callbacks
== NULL
|| callbacks
->version
>= SCHED_PERFCONTROL_CALLBACKS_VERSION_2
);
137 if (size_of_state
> sizeof(struct perfcontrol_state
)) {
138 panic("%s: Invalid required state size %lu", __FUNCTION__
, size_of_state
);
143 if (callbacks
->version
>= SCHED_PERFCONTROL_CALLBACKS_VERSION_7
) {
144 if (callbacks
->work_interval_ctl
!= NULL
) {
145 sched_perfcontrol_work_interval_ctl
= callbacks
->work_interval_ctl
;
147 sched_perfcontrol_work_interval_ctl
= sched_perfcontrol_work_interval_ctl_default
;
151 if (callbacks
->version
>= SCHED_PERFCONTROL_CALLBACKS_VERSION_5
) {
152 if (callbacks
->csw
!= NULL
) {
153 sched_perfcontrol_csw
= callbacks
->csw
;
155 sched_perfcontrol_csw
= sched_perfcontrol_csw_default
;
158 if (callbacks
->state_update
!= NULL
) {
159 sched_perfcontrol_state_update
= callbacks
->state_update
;
161 sched_perfcontrol_state_update
= sched_perfcontrol_state_update_default
;
165 if (callbacks
->version
>= SCHED_PERFCONTROL_CALLBACKS_VERSION_4
) {
166 if (callbacks
->deadline_passed
!= NULL
) {
167 sched_perfcontrol_deadline_passed
= callbacks
->deadline_passed
;
169 sched_perfcontrol_deadline_passed
= sched_perfcontrol_deadline_passed_default
;
173 if (callbacks
->offcore
!= NULL
) {
174 sched_perfcontrol_offcore
= callbacks
->offcore
;
176 sched_perfcontrol_offcore
= sched_perfcontrol_offcore_default
;
179 if (callbacks
->context_switch
!= NULL
) {
180 sched_perfcontrol_switch
= callbacks
->context_switch
;
182 sched_perfcontrol_switch
= sched_perfcontrol_switch_default
;
185 if (callbacks
->oncore
!= NULL
) {
186 sched_perfcontrol_oncore
= callbacks
->oncore
;
188 sched_perfcontrol_oncore
= sched_perfcontrol_oncore_default
;
191 if (callbacks
->max_runnable_latency
!= NULL
) {
192 sched_perfcontrol_max_runnable_latency
= callbacks
->max_runnable_latency
;
194 sched_perfcontrol_max_runnable_latency
= sched_perfcontrol_max_runnable_latency_default
;
197 if (callbacks
->work_interval_notify
!= NULL
) {
198 sched_perfcontrol_work_interval_notify
= callbacks
->work_interval_notify
;
200 sched_perfcontrol_work_interval_notify
= sched_perfcontrol_work_interval_notify_default
;
203 /* reset to defaults */
204 sched_perfcontrol_offcore
= sched_perfcontrol_offcore_default
;
205 sched_perfcontrol_switch
= sched_perfcontrol_switch_default
;
206 sched_perfcontrol_oncore
= sched_perfcontrol_oncore_default
;
207 sched_perfcontrol_thread_group_init
= sched_perfcontrol_thread_group_default
;
208 sched_perfcontrol_thread_group_deinit
= sched_perfcontrol_thread_group_default
;
209 sched_perfcontrol_thread_group_flags_update
= sched_perfcontrol_thread_group_default
;
210 sched_perfcontrol_max_runnable_latency
= sched_perfcontrol_max_runnable_latency_default
;
211 sched_perfcontrol_work_interval_notify
= sched_perfcontrol_work_interval_notify_default
;
212 sched_perfcontrol_work_interval_ctl
= sched_perfcontrol_work_interval_ctl_default
;
213 sched_perfcontrol_csw
= sched_perfcontrol_csw_default
;
214 sched_perfcontrol_state_update
= sched_perfcontrol_state_update_default
;
220 machine_switch_populate_perfcontrol_thread_data(struct perfcontrol_thread_data
*data
,
222 uint64_t same_pri_latency
)
224 bzero(data
, sizeof(struct perfcontrol_thread_data
));
225 data
->perfctl_class
= thread_get_perfcontrol_class(thread
);
226 data
->energy_estimate_nj
= 0;
227 data
->thread_id
= thread
->thread_id
;
228 data
->scheduling_latency_at_same_basepri
= same_pri_latency
;
229 data
->perfctl_state
= FIND_PERFCONTROL_STATE(thread
);
233 machine_switch_populate_perfcontrol_cpu_counters(struct perfcontrol_cpu_counters
*cpu_counters
)
236 mt_perfcontrol(&cpu_counters
->instructions
, &cpu_counters
->cycles
);
237 #else /* MONOTONIC */
238 cpu_counters
->instructions
= 0;
239 cpu_counters
->cycles
= 0;
240 #endif /* !MONOTONIC */
243 int perfcontrol_callout_stats_enabled
= 0;
244 static _Atomic
uint64_t perfcontrol_callout_stats
[PERFCONTROL_CALLOUT_MAX
][PERFCONTROL_STAT_MAX
];
245 static _Atomic
uint64_t perfcontrol_callout_count
[PERFCONTROL_CALLOUT_MAX
];
250 perfcontrol_callout_counters_begin(uint64_t *counters
)
252 if (!perfcontrol_callout_stats_enabled
) {
255 mt_fixed_counts(counters
);
261 perfcontrol_callout_counters_end(uint64_t *start_counters
,
262 perfcontrol_callout_type_t type
)
264 uint64_t end_counters
[MT_CORE_NFIXED
];
265 mt_fixed_counts(end_counters
);
266 os_atomic_add(&perfcontrol_callout_stats
[type
][PERFCONTROL_STAT_CYCLES
],
267 end_counters
[MT_CORE_CYCLES
] - start_counters
[MT_CORE_CYCLES
], relaxed
);
268 #ifdef MT_CORE_INSTRS
269 os_atomic_add(&perfcontrol_callout_stats
[type
][PERFCONTROL_STAT_INSTRS
],
270 end_counters
[MT_CORE_INSTRS
] - start_counters
[MT_CORE_INSTRS
], relaxed
);
271 #endif /* defined(MT_CORE_INSTRS) */
272 os_atomic_inc(&perfcontrol_callout_count
[type
], relaxed
);
274 #endif /* MONOTONIC */
277 perfcontrol_callout_stat_avg(perfcontrol_callout_type_t type
,
278 perfcontrol_callout_stat_t stat
)
280 if (!perfcontrol_callout_stats_enabled
) {
283 return os_atomic_load_wide(&perfcontrol_callout_stats
[type
][stat
], relaxed
) /
284 os_atomic_load_wide(&perfcontrol_callout_count
[type
], relaxed
);
288 machine_switch_perfcontrol_context(perfcontrol_event event
,
291 uint64_t new_thread_same_pri_latency
,
295 if (sched_perfcontrol_switch
!= sched_perfcontrol_switch_default
) {
296 perfcontrol_state_t old_perfcontrol_state
= FIND_PERFCONTROL_STATE(old
);
297 perfcontrol_state_t new_perfcontrol_state
= FIND_PERFCONTROL_STATE(new);
298 sched_perfcontrol_switch(old_perfcontrol_state
, new_perfcontrol_state
);
301 if (sched_perfcontrol_csw
!= sched_perfcontrol_csw_default
) {
302 uint32_t cpu_id
= (uint32_t)cpu_number();
303 struct perfcontrol_cpu_counters cpu_counters
;
304 struct perfcontrol_thread_data offcore
, oncore
;
305 machine_switch_populate_perfcontrol_thread_data(&offcore
, old
, 0);
306 machine_switch_populate_perfcontrol_thread_data(&oncore
, new,
307 new_thread_same_pri_latency
);
308 machine_switch_populate_perfcontrol_cpu_counters(&cpu_counters
);
311 uint64_t counters
[MT_CORE_NFIXED
];
312 bool ctrs_enabled
= perfcontrol_callout_counters_begin(counters
);
313 #endif /* MONOTONIC */
314 sched_perfcontrol_csw(event
, cpu_id
, timestamp
, flags
,
315 &offcore
, &oncore
, &cpu_counters
, NULL
);
318 perfcontrol_callout_counters_end(counters
, PERFCONTROL_CALLOUT_CONTEXT
);
320 #endif /* MONOTONIC */
323 old
->machine
.energy_estimate_nj
+= offcore
.energy_estimate_nj
;
324 new->machine
.energy_estimate_nj
+= oncore
.energy_estimate_nj
;
330 machine_switch_perfcontrol_state_update(perfcontrol_event event
,
335 if (sched_perfcontrol_state_update
== sched_perfcontrol_state_update_default
) {
338 uint32_t cpu_id
= (uint32_t)cpu_number();
339 struct perfcontrol_thread_data data
;
340 machine_switch_populate_perfcontrol_thread_data(&data
, thread
, 0);
343 uint64_t counters
[MT_CORE_NFIXED
];
344 bool ctrs_enabled
= perfcontrol_callout_counters_begin(counters
);
345 #endif /* MONOTONIC */
346 sched_perfcontrol_state_update(event
, cpu_id
, timestamp
, flags
,
350 perfcontrol_callout_counters_end(counters
, PERFCONTROL_CALLOUT_STATE_UPDATE
);
352 #endif /* MONOTONIC */
355 thread
->machine
.energy_estimate_nj
+= data
.energy_estimate_nj
;
360 machine_thread_going_on_core(thread_t new_thread
,
361 thread_urgency_t urgency
,
362 uint64_t sched_latency
,
363 uint64_t same_pri_latency
,
366 if (sched_perfcontrol_oncore
== sched_perfcontrol_oncore_default
) {
369 struct going_on_core on_core
;
370 perfcontrol_state_t state
= FIND_PERFCONTROL_STATE(new_thread
);
372 on_core
.thread_id
= new_thread
->thread_id
;
373 on_core
.energy_estimate_nj
= 0;
374 on_core
.qos_class
= proc_get_effective_thread_policy(new_thread
, TASK_POLICY_QOS
);
375 on_core
.urgency
= urgency
;
376 on_core
.is_32_bit
= thread_is_64bit_data(new_thread
) ? FALSE
: TRUE
;
377 on_core
.is_kernel_thread
= new_thread
->task
== kernel_task
;
378 on_core
.scheduling_latency
= sched_latency
;
379 on_core
.start_time
= timestamp
;
380 on_core
.scheduling_latency_at_same_basepri
= same_pri_latency
;
383 uint64_t counters
[MT_CORE_NFIXED
];
384 bool ctrs_enabled
= perfcontrol_callout_counters_begin(counters
);
385 #endif /* MONOTONIC */
386 sched_perfcontrol_oncore(state
, &on_core
);
389 perfcontrol_callout_counters_end(counters
, PERFCONTROL_CALLOUT_ON_CORE
);
391 #endif /* MONOTONIC */
394 new_thread
->machine
.energy_estimate_nj
+= on_core
.energy_estimate_nj
;
399 machine_thread_going_off_core(thread_t old_thread
, boolean_t thread_terminating
,
400 uint64_t last_dispatch
, __unused boolean_t thread_runnable
)
402 if (sched_perfcontrol_offcore
== sched_perfcontrol_offcore_default
) {
405 struct going_off_core off_core
;
406 perfcontrol_state_t state
= FIND_PERFCONTROL_STATE(old_thread
);
408 off_core
.thread_id
= old_thread
->thread_id
;
409 off_core
.energy_estimate_nj
= 0;
410 off_core
.end_time
= last_dispatch
;
413 uint64_t counters
[MT_CORE_NFIXED
];
414 bool ctrs_enabled
= perfcontrol_callout_counters_begin(counters
);
415 #endif /* MONOTONIC */
416 sched_perfcontrol_offcore(state
, &off_core
, thread_terminating
);
419 perfcontrol_callout_counters_end(counters
, PERFCONTROL_CALLOUT_OFF_CORE
);
421 #endif /* MONOTONIC */
424 old_thread
->machine
.energy_estimate_nj
+= off_core
.energy_estimate_nj
;
430 machine_max_runnable_latency(uint64_t bg_max_latency
,
431 uint64_t default_max_latency
,
432 uint64_t realtime_max_latency
)
434 if (sched_perfcontrol_max_runnable_latency
== sched_perfcontrol_max_runnable_latency_default
) {
437 struct perfcontrol_max_runnable_latency latencies
= {
438 .max_scheduling_latencies
= {
439 [THREAD_URGENCY_NONE
] = 0,
440 [THREAD_URGENCY_BACKGROUND
] = bg_max_latency
,
441 [THREAD_URGENCY_NORMAL
] = default_max_latency
,
442 [THREAD_URGENCY_REAL_TIME
] = realtime_max_latency
446 sched_perfcontrol_max_runnable_latency(&latencies
);
450 machine_work_interval_notify(thread_t thread
,
451 struct kern_work_interval_args
* kwi_args
)
453 if (sched_perfcontrol_work_interval_notify
== sched_perfcontrol_work_interval_notify_default
) {
456 perfcontrol_state_t state
= FIND_PERFCONTROL_STATE(thread
);
457 struct perfcontrol_work_interval work_interval
= {
458 .thread_id
= thread
->thread_id
,
459 .qos_class
= proc_get_effective_thread_policy(thread
, TASK_POLICY_QOS
),
460 .urgency
= kwi_args
->urgency
,
461 .flags
= kwi_args
->notify_flags
,
462 .work_interval_id
= kwi_args
->work_interval_id
,
463 .start
= kwi_args
->start
,
464 .finish
= kwi_args
->finish
,
465 .deadline
= kwi_args
->deadline
,
466 .next_start
= kwi_args
->next_start
,
467 .create_flags
= kwi_args
->create_flags
,
469 sched_perfcontrol_work_interval_notify(state
, &work_interval
);
474 machine_perfcontrol_deadline_passed(uint64_t deadline
)
476 if (sched_perfcontrol_deadline_passed
!= sched_perfcontrol_deadline_passed_default
) {
477 sched_perfcontrol_deadline_passed(deadline
);
481 #if INTERRUPT_MASKED_DEBUG
483 * ml_spin_debug_reset()
484 * Reset the timestamp on a thread that has been unscheduled
485 * to avoid false alarms. Alarm will go off if interrupts are held
486 * disabled for too long, starting from now.
488 * Call ml_get_timebase() directly to prevent extra overhead on newer
489 * platforms that's enabled in DEVELOPMENT kernel configurations.
492 ml_spin_debug_reset(thread_t thread
)
494 thread
->machine
.intmask_timestamp
= ml_get_timebase();
498 * ml_spin_debug_clear()
499 * Clear the timestamp on a thread that has been unscheduled
500 * to avoid false alarms
503 ml_spin_debug_clear(thread_t thread
)
505 thread
->machine
.intmask_timestamp
= 0;
509 * ml_spin_debug_clear_self()
510 * Clear the timestamp on the current thread to prevent
514 ml_spin_debug_clear_self()
516 ml_spin_debug_clear(current_thread());
520 ml_check_interrupts_disabled_duration(thread_t thread
)
525 start
= thread
->machine
.intmask_timestamp
;
527 now
= ml_get_timebase();
529 if ((now
- start
) > interrupt_masked_timeout
* debug_cpu_performance_degradation_factor
) {
530 mach_timebase_info_data_t timebase
;
531 clock_timebase_info(&timebase
);
535 * Disable the actual panic for KASAN due to the overhead of KASAN itself, leave the rest of the
536 * mechanism enabled so that KASAN can catch any bugs in the mechanism itself.
538 panic("Interrupts held disabled for %llu nanoseconds", (((now
- start
) * timebase
.numer
) / timebase
.denom
));
545 #endif // INTERRUPT_MASKED_DEBUG
549 ml_set_interrupts_enabled(boolean_t enable
)
555 #define INTERRUPT_MASK PSR_IRQF
556 state
= __builtin_arm_rsr("cpsr");
558 #define INTERRUPT_MASK DAIF_IRQF
559 state
= __builtin_arm_rsr("DAIF");
561 if (enable
&& (state
& INTERRUPT_MASK
)) {
562 assert(getCpuDatap()->cpu_int_state
== NULL
); // Make sure we're not enabling interrupts from primary interrupt context
563 #if INTERRUPT_MASKED_DEBUG
564 if (interrupt_masked_debug
) {
565 // Interrupts are currently masked, we will enable them (after finishing this check)
566 thread
= current_thread();
567 ml_check_interrupts_disabled_duration(thread
);
568 thread
->machine
.intmask_timestamp
= 0;
570 #endif // INTERRUPT_MASKED_DEBUG
571 if (get_preemption_level() == 0) {
572 thread
= current_thread();
573 while (thread
->machine
.CpuDatap
->cpu_pending_ast
& AST_URGENT
) {
574 #if __ARM_USER_PROTECT__
575 uintptr_t up
= arm_user_protect_begin(thread
);
578 #if __ARM_USER_PROTECT__
579 arm_user_protect_end(thread
, up
, FALSE
);
584 __asm__
volatile ("cpsie if" ::: "memory"); // Enable IRQ FIQ
586 __builtin_arm_wsr("DAIFClr", (DAIFSC_IRQF
| DAIFSC_FIQF
));
588 } else if (!enable
&& ((state
& INTERRUPT_MASK
) == 0)) {
590 __asm__
volatile ("cpsid if" ::: "memory"); // Mask IRQ FIQ
592 __builtin_arm_wsr("DAIFSet", (DAIFSC_IRQF
| DAIFSC_FIQF
));
594 #if INTERRUPT_MASKED_DEBUG
595 if (interrupt_masked_debug
) {
596 // Interrupts were enabled, we just masked them
597 current_thread()->machine
.intmask_timestamp
= ml_get_timebase();
601 return (state
& INTERRUPT_MASK
) == 0;
605 ml_early_set_interrupts_enabled(boolean_t enable
)
607 return ml_set_interrupts_enabled(enable
);
611 * Routine: ml_at_interrupt_context
612 * Function: Check if running at interrupt context
615 ml_at_interrupt_context(void)
617 /* Do not use a stack-based check here, as the top-level exception handler
618 * is free to use some other stack besides the per-CPU interrupt stack.
619 * Interrupts should always be disabled if we're at interrupt context.
620 * Check that first, as we may be in a preemptible non-interrupt context, in
621 * which case we could be migrated to a different CPU between obtaining
622 * the per-cpu data pointer and loading cpu_int_state. We then might end
623 * up checking the interrupt state of a different CPU, resulting in a false
624 * positive. But if interrupts are disabled, we also know we cannot be
626 return !ml_get_interrupts_enabled() && (getCpuDatap()->cpu_int_state
!= NULL
);
630 ml_stack_remaining(void)
632 uintptr_t local
= (uintptr_t) &local
;
633 vm_offset_t intstack_top_ptr
;
635 /* Since this is a stack-based check, we don't need to worry about
636 * preemption as we do in ml_at_interrupt_context(). If we are preemptible,
637 * then the sp should never be within any CPU's interrupt stack unless
638 * something has gone horribly wrong. */
639 intstack_top_ptr
= getCpuDatap()->intstack_top
;
640 if ((local
< intstack_top_ptr
) && (local
> intstack_top_ptr
- INTSTACK_SIZE
)) {
641 return local
- (getCpuDatap()->intstack_top
- INTSTACK_SIZE
);
643 return local
- current_thread()->kernel_stack
;
647 static boolean_t ml_quiescing
;
650 ml_set_is_quiescing(boolean_t quiescing
)
652 assert(FALSE
== ml_get_interrupts_enabled());
653 ml_quiescing
= quiescing
;
657 ml_is_quiescing(void)
659 assert(FALSE
== ml_get_interrupts_enabled());
664 ml_get_booter_memory_size(void)
667 uint64_t roundsize
= 512 * 1024 * 1024ULL;
668 size
= BootArgs
->memSizeActual
;
670 size
= BootArgs
->memSize
;
671 if (size
< (2 * roundsize
)) {
674 size
= (size
+ roundsize
- 1) & ~(roundsize
- 1);
675 size
-= BootArgs
->memSize
;
681 ml_get_abstime_offset(void)
683 return rtclock_base_abstime
;
687 ml_get_conttime_offset(void)
689 return rtclock_base_abstime
+ mach_absolutetime_asleep
;
693 ml_get_time_since_reset(void)
695 /* The timebase resets across S2R, so just return the raw value. */
696 return ml_get_hwclock();
700 ml_set_reset_time(__unused
uint64_t wake_time
)
705 ml_get_conttime_wake_time(void)
707 /* The wake time is simply our continuous time offset. */
708 return ml_get_conttime_offset();
712 * ml_snoop_thread_is_on_core(thread_t thread)
713 * Check if the given thread is currently on core. This function does not take
714 * locks, disable preemption, or otherwise guarantee synchronization. The
715 * result should be considered advisory.
718 ml_snoop_thread_is_on_core(thread_t thread
)
720 unsigned int cur_cpu_num
= 0;
722 for (cur_cpu_num
= 0; cur_cpu_num
< MAX_CPUS
; cur_cpu_num
++) {
723 if (CpuDataEntries
[cur_cpu_num
].cpu_data_vaddr
) {
724 if (CpuDataEntries
[cur_cpu_num
].cpu_data_vaddr
->cpu_active_thread
== thread
) {