2 * Copyright (c) 2007-2016 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
31 * cpu specific routines
34 #include <kern/kalloc.h>
35 #include <kern/machine.h>
36 #include <kern/cpu_number.h>
37 #include <kern/thread.h>
38 #include <kern/timer_queue.h>
39 #include <arm/cpu_data.h>
40 #include <arm/cpuid.h>
41 #include <arm/caches_internal.h>
42 #include <arm/cpu_data_internal.h>
43 #include <arm/cpu_internal.h>
44 #include <arm/misc_protos.h>
45 #include <arm/machine_cpu.h>
46 #include <arm/rtclock.h>
47 #include <arm/proc_reg.h>
48 #include <mach/processor_info.h>
50 #include <vm/vm_kern.h>
51 #include <vm/vm_map.h>
52 #include <pexpert/arm/board_config.h>
53 #include <pexpert/arm/protos.h>
54 #include <sys/kdebug.h>
56 #include <machine/atomic.h>
62 extern unsigned int resume_idle_cpu
;
63 extern unsigned int start_cpu
;
65 unsigned int start_cpu_paddr
;
67 extern boolean_t idle_enable
;
68 extern unsigned int real_ncpus
;
69 extern uint64_t wake_abstime
;
71 extern void* wfi_inst
;
72 unsigned wfi_fast
= 1;
73 unsigned patch_to_nop
= 0xe1a00000;
75 void *LowExceptionVectorsAddr
;
76 #define IOS_STATE (((vm_offset_t)LowExceptionVectorsAddr + 0x80))
77 #define IOS_STATE_SIZE (0x08UL)
78 static const uint8_t suspend_signature
[] = {'X', 'S', 'O', 'M', 'P', 'S', 'U', 'S'};
79 static const uint8_t running_signature
[] = {'X', 'S', 'O', 'M', 'N', 'N', 'U', 'R'};
82 * Routine: cpu_bootstrap
98 cpu_data_t
*cpu_data_ptr
= getCpuDatap();
99 pmap_switch_user_ttb(kernel_pmap
);
100 cpu_data_ptr
->cpu_active_thread
= current_thread();
101 cpu_data_ptr
->cpu_reset_handler
= (vm_offset_t
) start_cpu_paddr
;
102 cpu_data_ptr
->cpu_flags
|= SleepState
;
103 cpu_data_ptr
->cpu_user_debug
= NULL
;
107 PE_cpu_machine_quiesce(cpu_data_ptr
->cpu_id
);
111 _Atomic
uint32_t cpu_idle_count
= 0;
117 void __attribute__((noreturn
))
120 cpu_data_t
*cpu_data_ptr
= getCpuDatap();
121 uint64_t new_idle_timeout_ticks
= 0x0ULL
, lastPop
;
123 if ((!idle_enable
) || (cpu_data_ptr
->cpu_signal
& SIGPdisabled
))
127 lastPop
= cpu_data_ptr
->rtcPop
;
129 pmap_switch_user_ttb(kernel_pmap
);
130 cpu_data_ptr
->cpu_active_thread
= current_thread();
131 if (cpu_data_ptr
->cpu_user_debug
)
133 cpu_data_ptr
->cpu_user_debug
= NULL
;
135 if (cpu_data_ptr
->cpu_idle_notify
)
136 ((processor_idle_t
) cpu_data_ptr
->cpu_idle_notify
) (cpu_data_ptr
->cpu_id
, TRUE
, &new_idle_timeout_ticks
);
138 if (cpu_data_ptr
->idle_timer_notify
!= 0) {
139 if (new_idle_timeout_ticks
== 0x0ULL
) {
140 /* turn off the idle timer */
141 cpu_data_ptr
->idle_timer_deadline
= 0x0ULL
;
143 /* set the new idle timeout */
144 clock_absolutetime_interval_to_deadline(new_idle_timeout_ticks
, &cpu_data_ptr
->idle_timer_deadline
);
146 timer_resync_deadlines();
147 if (cpu_data_ptr
->rtcPop
!= lastPop
)
155 platform_cache_idle_enter();
156 cpu_idle_wfi((boolean_t
) wfi_fast
);
157 platform_cache_idle_exit();
164 * Routine: cpu_idle_exit
170 uint64_t new_idle_timeout_ticks
= 0x0ULL
;
171 cpu_data_t
*cpu_data_ptr
= getCpuDatap();
178 pmap_set_pmap(cpu_data_ptr
->cpu_active_thread
->map
->pmap
, current_thread());
180 if (cpu_data_ptr
->cpu_idle_notify
)
181 ((processor_idle_t
) cpu_data_ptr
->cpu_idle_notify
) (cpu_data_ptr
->cpu_id
, FALSE
, &new_idle_timeout_ticks
);
183 if (cpu_data_ptr
->idle_timer_notify
!= 0) {
184 if (new_idle_timeout_ticks
== 0x0ULL
) {
185 /* turn off the idle timer */
186 cpu_data_ptr
->idle_timer_deadline
= 0x0ULL
;
188 /* set the new idle timeout */
189 clock_absolutetime_interval_to_deadline(new_idle_timeout_ticks
, &cpu_data_ptr
->idle_timer_deadline
);
191 timer_resync_deadlines();
200 cpu_data_t
*cdp
= getCpuDatap();
201 arm_cpu_info_t
*cpu_info_p
;
203 if (cdp
->cpu_type
!= CPU_TYPE_ARM
) {
205 cdp
->cpu_type
= CPU_TYPE_ARM
;
207 timer_call_queue_init(&cdp
->rtclock_timer
.queue
);
208 cdp
->rtclock_timer
.deadline
= EndOfAllTime
;
210 if (cdp
== &BootCpuData
) {
216 * We initialize non-boot CPUs here; the boot CPU is
217 * dealt with as part of pmap_bootstrap.
219 pmap_cpu_data_init();
221 /* ARM_SMP: Assuming identical cpu */
224 cpu_info_p
= cpuid_info();
226 /* switch based on CPU's reported architecture */
227 switch (cpu_info_p
->arm_info
.arm_arch
) {
228 case CPU_ARCH_ARMv4T
:
229 case CPU_ARCH_ARMv5T
:
230 cdp
->cpu_subtype
= CPU_SUBTYPE_ARM_V4T
;
232 case CPU_ARCH_ARMv5TE
:
233 case CPU_ARCH_ARMv5TEJ
:
234 if (cpu_info_p
->arm_info
.arm_implementor
== CPU_VID_INTEL
)
235 cdp
->cpu_subtype
= CPU_SUBTYPE_ARM_XSCALE
;
237 cdp
->cpu_subtype
= CPU_SUBTYPE_ARM_V5TEJ
;
240 cdp
->cpu_subtype
= CPU_SUBTYPE_ARM_V6
;
243 cdp
->cpu_subtype
= CPU_SUBTYPE_ARM_V7
;
245 case CPU_ARCH_ARMv7f
:
246 cdp
->cpu_subtype
= CPU_SUBTYPE_ARM_V7F
;
248 case CPU_ARCH_ARMv7s
:
249 cdp
->cpu_subtype
= CPU_SUBTYPE_ARM_V7S
;
251 case CPU_ARCH_ARMv7k
:
252 cdp
->cpu_subtype
= CPU_SUBTYPE_ARM_V7K
;
255 cdp
->cpu_subtype
= CPU_SUBTYPE_ARM_ALL
;
259 cdp
->cpu_threadtype
= CPU_THREADTYPE_NONE
;
261 cdp
->cpu_stat
.irq_ex_cnt_wake
= 0;
262 cdp
->cpu_stat
.ipi_cnt_wake
= 0;
263 cdp
->cpu_stat
.timer_cnt_wake
= 0;
264 cdp
->cpu_running
= TRUE
;
265 cdp
->cpu_sleep_token_last
= cdp
->cpu_sleep_token
;
266 cdp
->cpu_sleep_token
= 0x0UL
;
271 cpu_data_alloc(boolean_t is_boot_cpu
)
273 cpu_data_t
*cpu_data_ptr
= NULL
;
276 cpu_data_ptr
= &BootCpuData
;
278 void *irq_stack
= NULL
;
279 void *fiq_stack
= NULL
;
281 if ((kmem_alloc(kernel_map
, (vm_offset_t
*)&cpu_data_ptr
, sizeof(cpu_data_t
), VM_KERN_MEMORY_CPU
)) != KERN_SUCCESS
)
282 goto cpu_data_alloc_error
;
284 bzero((void *)cpu_data_ptr
, sizeof(cpu_data_t
));
286 if ((irq_stack
= kalloc(INTSTACK_SIZE
)) == 0)
287 goto cpu_data_alloc_error
;
288 #if __BIGGEST_ALIGNMENT__
289 /* force 16-byte alignment */
290 if ((uint32_t)irq_stack
& 0x0F)
291 irq_stack
= (void *)((uint32_t)irq_stack
+ (0x10 - ((uint32_t)irq_stack
& 0x0F)));
293 cpu_data_ptr
->intstack_top
= (vm_offset_t
)irq_stack
+ INTSTACK_SIZE
;
294 cpu_data_ptr
->istackptr
= cpu_data_ptr
->intstack_top
;
296 if ((fiq_stack
= kalloc(PAGE_SIZE
)) == 0)
297 goto cpu_data_alloc_error
;
298 #if __BIGGEST_ALIGNMENT__
299 /* force 16-byte alignment */
300 if ((uint32_t)fiq_stack
& 0x0F)
301 fiq_stack
= (void *)((uint32_t)fiq_stack
+ (0x10 - ((uint32_t)fiq_stack
& 0x0F)));
303 cpu_data_ptr
->fiqstack_top
= (vm_offset_t
)fiq_stack
+ PAGE_SIZE
;
304 cpu_data_ptr
->fiqstackptr
= cpu_data_ptr
->fiqstack_top
;
307 cpu_data_ptr
->cpu_processor
= cpu_processor_alloc(is_boot_cpu
);
308 if (cpu_data_ptr
->cpu_processor
== (struct processor
*)NULL
)
309 goto cpu_data_alloc_error
;
313 cpu_data_alloc_error
:
314 panic("cpu_data_alloc() failed\n");
315 return (cpu_data_t
*)NULL
;
320 cpu_data_free(cpu_data_t
*cpu_data_ptr
)
322 if (cpu_data_ptr
== &BootCpuData
)
325 cpu_processor_free( cpu_data_ptr
->cpu_processor
);
326 kfree( (void *)(cpu_data_ptr
->intstack_top
- INTSTACK_SIZE
), INTSTACK_SIZE
);
327 kfree( (void *)(cpu_data_ptr
->fiqstack_top
- PAGE_SIZE
), PAGE_SIZE
);
328 kmem_free(kernel_map
, (vm_offset_t
)cpu_data_ptr
, sizeof(cpu_data_t
));
332 cpu_data_init(cpu_data_t
*cpu_data_ptr
)
336 cpu_data_ptr
->cpu_flags
= 0;
338 cpu_data_ptr
->cpu_exc_vectors
= (vm_offset_t
)&ExceptionVectorsTable
;
340 cpu_data_ptr
->interrupts_enabled
= 0;
341 cpu_data_ptr
->cpu_int_state
= 0;
342 cpu_data_ptr
->cpu_pending_ast
= AST_NONE
;
343 cpu_data_ptr
->cpu_cache_dispatch
= (void *) 0;
344 cpu_data_ptr
->rtcPop
= EndOfAllTime
;
345 cpu_data_ptr
->rtclock_datap
= &RTClockData
;
346 cpu_data_ptr
->cpu_user_debug
= NULL
;
347 cpu_data_ptr
->cpu_base_timebase_low
= 0;
348 cpu_data_ptr
->cpu_base_timebase_high
= 0;
349 cpu_data_ptr
->cpu_idle_notify
= (void *) 0;
350 cpu_data_ptr
->cpu_idle_latency
= 0x0ULL
;
351 cpu_data_ptr
->cpu_idle_pop
= 0x0ULL
;
352 cpu_data_ptr
->cpu_reset_type
= 0x0UL
;
353 cpu_data_ptr
->cpu_reset_handler
= 0x0UL
;
354 cpu_data_ptr
->cpu_reset_assist
= 0x0UL
;
355 cpu_data_ptr
->cpu_regmap_paddr
= 0x0ULL
;
356 cpu_data_ptr
->cpu_phys_id
= 0x0UL
;
357 cpu_data_ptr
->cpu_l2_access_penalty
= 0;
358 cpu_data_ptr
->cpu_cluster_type
= CLUSTER_TYPE_SMP
;
359 cpu_data_ptr
->cpu_cluster_id
= 0;
360 cpu_data_ptr
->cpu_l2_id
= 0;
361 cpu_data_ptr
->cpu_l2_size
= 0;
362 cpu_data_ptr
->cpu_l3_id
= 0;
363 cpu_data_ptr
->cpu_l3_size
= 0;
365 cpu_data_ptr
->cpu_signal
= SIGPdisabled
;
367 #if DEBUG || DEVELOPMENT
368 cpu_data_ptr
->failed_xcall
= NULL
;
369 cpu_data_ptr
->failed_signal
= 0;
370 cpu_data_ptr
->failed_signal_count
= 0;
373 cpu_data_ptr
->cpu_get_fiq_handler
= NULL
;
374 cpu_data_ptr
->cpu_tbd_hardware_addr
= NULL
;
375 cpu_data_ptr
->cpu_tbd_hardware_val
= NULL
;
376 cpu_data_ptr
->cpu_get_decrementer_func
= NULL
;
377 cpu_data_ptr
->cpu_set_decrementer_func
= NULL
;
378 cpu_data_ptr
->cpu_sleep_token
= ARM_CPU_ON_SLEEP_PATH
;
379 cpu_data_ptr
->cpu_sleep_token_last
= 0x00000000UL
;
380 cpu_data_ptr
->cpu_xcall_p0
= NULL
;
381 cpu_data_ptr
->cpu_xcall_p1
= NULL
;
383 #if __ARM_SMP__ && defined(ARMA7)
384 cpu_data_ptr
->cpu_CLWFlush_req
= 0x0ULL
;
385 cpu_data_ptr
->cpu_CLWFlush_last
= 0x0ULL
;
386 cpu_data_ptr
->cpu_CLWClean_req
= 0x0ULL
;
387 cpu_data_ptr
->cpu_CLWClean_last
= 0x0ULL
;
388 cpu_data_ptr
->cpu_CLW_active
= 0x1UL
;
391 pmap_cpu_data_t
* pmap_cpu_data_ptr
= &cpu_data_ptr
->cpu_pmap_cpu_data
;
393 pmap_cpu_data_ptr
->cpu_user_pmap
= (struct pmap
*) NULL
;
394 pmap_cpu_data_ptr
->cpu_user_pmap_stamp
= 0;
395 pmap_cpu_data_ptr
->cpu_number
= PMAP_INVALID_CPU_NUM
;
397 for (i
= 0; i
< (sizeof(pmap_cpu_data_ptr
->cpu_asid_high_bits
) / sizeof(*pmap_cpu_data_ptr
->cpu_asid_high_bits
)); i
++) {
398 pmap_cpu_data_ptr
->cpu_asid_high_bits
[i
] = 0;
400 cpu_data_ptr
->halt_status
= CPU_NOT_HALTED
;
404 cpu_data_register(cpu_data_t
*cpu_data_ptr
)
408 cpu
= OSIncrementAtomic((SInt32
*)&real_ncpus
);
409 if (real_ncpus
> MAX_CPUS
) {
413 cpu_data_ptr
->cpu_number
= cpu
;
414 CpuDataEntries
[cpu
].cpu_data_vaddr
= cpu_data_ptr
;
415 CpuDataEntries
[cpu
].cpu_data_paddr
= (void *)ml_vtophys( (vm_offset_t
)cpu_data_ptr
);
422 kprintf("cpu_start() cpu: %d\n", cpu
);
423 if (cpu
== cpu_number()) {
428 cpu_data_t
*cpu_data_ptr
;
429 thread_t first_thread
;
431 cpu_data_ptr
= CpuDataEntries
[cpu
].cpu_data_vaddr
;
432 cpu_data_ptr
->cpu_reset_handler
= (vm_offset_t
) start_cpu_paddr
;
434 cpu_data_ptr
->cpu_pmap_cpu_data
.cpu_user_pmap
= NULL
;
436 if (cpu_data_ptr
->cpu_processor
->next_thread
!= THREAD_NULL
)
437 first_thread
= cpu_data_ptr
->cpu_processor
->next_thread
;
439 first_thread
= cpu_data_ptr
->cpu_processor
->idle_thread
;
440 cpu_data_ptr
->cpu_active_thread
= first_thread
;
441 first_thread
->machine
.CpuDatap
= cpu_data_ptr
;
443 flush_dcache((vm_offset_t
)&CpuDataEntries
[cpu
], sizeof(cpu_data_entry_t
), FALSE
);
444 flush_dcache((vm_offset_t
)cpu_data_ptr
, sizeof(cpu_data_t
), FALSE
);
445 (void) PE_cpu_start(cpu_data_ptr
->cpu_id
, (vm_offset_t
)NULL
, (vm_offset_t
)NULL
);
454 cpu_timebase_init(boolean_t from_boot __unused
)
456 cpu_data_t
*cdp
= getCpuDatap();
458 if (cdp
->cpu_get_fiq_handler
== NULL
) {
459 cdp
->cpu_get_fiq_handler
= rtclock_timebase_func
.tbd_fiq_handler
;
460 cdp
->cpu_get_decrementer_func
= rtclock_timebase_func
.tbd_get_decrementer
;
461 cdp
->cpu_set_decrementer_func
= rtclock_timebase_func
.tbd_set_decrementer
;
462 cdp
->cpu_tbd_hardware_addr
= (void *)rtclock_timebase_addr
;
463 cdp
->cpu_tbd_hardware_val
= (void *)rtclock_timebase_val
;
465 cdp
->cpu_decrementer
= 0x7FFFFFFFUL
;
466 cdp
->cpu_timebase_low
= 0x0UL
;
467 cdp
->cpu_timebase_high
= 0x0UL
;
469 #if __arm__ && (__BIGGEST_ALIGNMENT__ > 4)
470 /* For the newer ARMv7k ABI where 64-bit types are 64-bit aligned, but pointers
472 cdp
->cpu_base_timebase_low
= rtclock_base_abstime_low
;
473 cdp
->cpu_base_timebase_high
= rtclock_base_abstime_high
;
475 *((uint64_t *) & cdp
->cpu_base_timebase_low
) = rtclock_base_abstime
;
480 __attribute__((noreturn
))
484 cpu_data_t
*cpu_data_ptr
= getCpuDatap();
486 if (cpu_data_ptr
== &BootCpuData
) {
487 cpu_data_t
*target_cdp
;
490 for (cpu
=0; cpu
< MAX_CPUS
; cpu
++) {
491 target_cdp
= (cpu_data_t
*)CpuDataEntries
[cpu
].cpu_data_vaddr
;
492 if(target_cdp
== (cpu_data_t
*)NULL
)
495 if (target_cdp
== cpu_data_ptr
)
498 while (target_cdp
->cpu_sleep_token
!= ARM_CPU_ON_SLEEP_PATH
);
501 /* Now that the other cores have entered the sleep path, set
502 * the abstime fixup we'll use when we resume.*/
503 rtclock_base_abstime
= ml_get_timebase();
504 wake_abstime
= rtclock_base_abstime
;
507 platform_cache_disable();
510 cpu_data_ptr
->cpu_sleep_token
= ARM_CPU_ON_SLEEP_PATH
;
511 #if __ARM_SMP__ && defined(ARMA7)
512 cpu_data_ptr
->cpu_CLWFlush_req
= 0;
513 cpu_data_ptr
->cpu_CLWClean_req
= 0;
514 __builtin_arm_dmb(DMB_ISH
);
515 cpu_data_ptr
->cpu_CLW_active
= 0;
517 if (cpu_data_ptr
== &BootCpuData
) {
518 platform_cache_disable();
519 platform_cache_shutdown();
520 bcopy((const void *)suspend_signature
, (void *)(IOS_STATE
), IOS_STATE_SIZE
);
522 CleanPoC_DcacheRegion((vm_offset_t
) cpu_data_ptr
, sizeof(cpu_data_t
));
524 __builtin_arm_dsb(DSB_SY
);
526 #if __ARM_ENABLE_WFE_
533 cpu_machine_idle_init(boolean_t from_boot
)
535 static const unsigned int *BootArgs_paddr
= (unsigned int *)NULL
;
536 static const unsigned int *CpuDataEntries_paddr
= (unsigned int *)NULL
;
537 static unsigned int resume_idle_cpu_paddr
= (unsigned int )NULL
;
538 cpu_data_t
*cpu_data_ptr
= getCpuDatap();
541 unsigned int jtag
= 0;
545 if (PE_parse_boot_argn("jtag", &jtag
, sizeof (jtag
))) {
553 if (!PE_parse_boot_argn("wfi", &wfi
, sizeof (wfi
)))
557 bcopy_phys((addr64_t
)ml_static_vtop((vm_offset_t
)&patch_to_nop
),
558 (addr64_t
)ml_static_vtop((vm_offset_t
)&wfi_inst
), sizeof(unsigned));
562 LowExceptionVectorsAddr
= (void *)ml_io_map(ml_vtophys((vm_offset_t
)gPhysBase
), PAGE_SIZE
);
564 /* Copy Exception Vectors low, but don't touch the sleep token */
565 bcopy((void *)&ExceptionLowVectorsBase
, (void *)LowExceptionVectorsAddr
, 0x90);
566 bcopy(((void *)(((vm_offset_t
)&ExceptionLowVectorsBase
) + 0xA0)), ((void *)(((vm_offset_t
)LowExceptionVectorsAddr
) + 0xA0)), ARM_PGBYTES
- 0xA0);
568 start_cpu_paddr
= ml_static_vtop((vm_offset_t
)&start_cpu
);
570 BootArgs_paddr
= (unsigned int *)ml_static_vtop((vm_offset_t
)BootArgs
);
571 bcopy_phys((addr64_t
)ml_static_vtop((vm_offset_t
)&BootArgs_paddr
),
572 (addr64_t
)((unsigned int)(gPhysBase
) +
573 ((unsigned int)&(ResetHandlerData
.boot_args
) - (unsigned int)&ExceptionLowVectorsBase
)),
576 CpuDataEntries_paddr
= (unsigned int *)ml_static_vtop((vm_offset_t
)CpuDataEntries
);
577 bcopy_phys((addr64_t
)ml_static_vtop((vm_offset_t
)&CpuDataEntries_paddr
),
578 (addr64_t
)((unsigned int)(gPhysBase
) +
579 ((unsigned int)&(ResetHandlerData
.cpu_data_entries
) - (unsigned int)&ExceptionLowVectorsBase
)),
582 CleanPoC_DcacheRegion((vm_offset_t
) phystokv((char *) (gPhysBase
)), PAGE_SIZE
);
584 resume_idle_cpu_paddr
= (unsigned int)ml_static_vtop((vm_offset_t
)&resume_idle_cpu
);
588 if (cpu_data_ptr
== &BootCpuData
) {
589 bcopy(((const void *)running_signature
), (void *)(IOS_STATE
), IOS_STATE_SIZE
);
592 cpu_data_ptr
->cpu_reset_handler
= resume_idle_cpu_paddr
;
593 clean_dcache((vm_offset_t
)cpu_data_ptr
, sizeof(cpu_data_t
), FALSE
);
597 machine_track_platform_idle(boolean_t entry
)
600 (void)__c11_atomic_fetch_add(&cpu_idle_count
, 1, __ATOMIC_RELAXED
);
602 (void)__c11_atomic_fetch_sub(&cpu_idle_count
, 1, __ATOMIC_RELAXED
);