/*
- * Copyright (c) 2003-2006 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 2003-2009 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
*/
#include <machine/cpu_capabilities.h>
+#include <machine/commpage.h>
+#include <machine/asm.h>
+#include <assym.s>
- .text
- .align 2, 0x90
- .globl __commpage_set_timestamp
-/* extern void _commpage_set_timestamp(uint64_t abstime, uint64_t secs); */
-__commpage_set_timestamp:
- push %ebp
- mov %esp,%ebp
-
- mov _commPagePtr32,%ecx
- sub $ _COMM_PAGE32_BASE_ADDRESS,%ecx
- mov _commPagePtr64,%edx /* point to 64-bit commpage too */
- mov %edx,%eax
- sub $ _COMM_PAGE32_START_ADDRESS,%edx /* because kernel is built 32-bit */
- test %eax,%eax
- cmovz %ecx,%edx /* if no 64-bit commpage, point to 32 with both */
-
- movl $0,_COMM_PAGE_TIMEENABLE(%ecx)
- movl $0,_COMM_PAGE_TIMEENABLE(%edx)
-
- mov 8(%ebp),%eax
- or 12(%ebp),%eax
+/*
+ * extern void commpage_sched_gen_inc(void);
+ */
+ .text
+
+ .globl _commpage_sched_gen_inc
+_commpage_sched_gen_inc:
+#if defined (__x86_64__)
+ FRAME
+
+ /* Increment 32-bit commpage field if present */
+ movq _commPagePtr32(%rip),%rdx
+ testq %rdx,%rdx
je 1f
+ subq $(ASM_COMM_PAGE32_BASE_ADDRESS),%rdx
+ lock
+ incl ASM_COMM_PAGE_SCHED_GEN(%rdx)
- mov 8(%ebp),%eax
- mov %eax,_COMM_PAGE_TIMEBASE(%ecx)
- mov %eax,_COMM_PAGE_TIMEBASE(%edx)
- mov 12(%ebp),%eax
- mov %eax,_COMM_PAGE_TIMEBASE+4(%ecx)
- mov %eax,_COMM_PAGE_TIMEBASE+4(%edx)
-
- mov 16(%ebp),%eax
- mov %eax,_COMM_PAGE_TIMESTAMP(%ecx)
- mov %eax,_COMM_PAGE_TIMESTAMP(%edx)
- mov 20(%ebp),%eax
- mov %eax,_COMM_PAGE_TIMESTAMP+4(%ecx)
- mov %eax,_COMM_PAGE_TIMESTAMP+4(%edx)
-
- movl $1,_COMM_PAGE_TIMEENABLE(%ecx)
- movl $1,_COMM_PAGE_TIMEENABLE(%edx)
+ /* Increment 64-bit commpage field if present */
+ movq _commPagePtr64(%rip),%rdx
+ testq %rdx,%rdx
+ je 1f
+ subq $(ASM_COMM_PAGE32_START_ADDRESS),%rdx
+ lock
+ incl ASM_COMM_PAGE_SCHED_GEN(%rdx)
1:
- pop %ebp
+ EMARF
ret
-
- .text
- .align 2, 0x90
- .globl _commpage_set_nanotime
-/* extern void commpage_set_nanotime(uint64_t tsc_base, uint64_t ns_base, uint32_t scale, uint32_t shift); */
-_commpage_set_nanotime:
- push %ebp
- mov %esp,%ebp
-
- mov _commPagePtr32,%ecx
- testl %ecx,%ecx
+#elif defined (__i386__)
+ FRAME
+
+ /* Increment 32-bit commpage field if present */
+ mov _commPagePtr32,%edx
+ testl %edx,%edx
je 1f
+ sub $(ASM_COMM_PAGE32_BASE_ADDRESS),%edx
+ lock
+ incl ASM_COMM_PAGE_SCHED_GEN(%edx)
- sub $(_COMM_PAGE_BASE_ADDRESS),%ecx
- mov _commPagePtr64,%edx /* point to 64-bit commpage too */
- mov %edx,%eax
- sub $ _COMM_PAGE32_START_ADDRESS,%edx /* because kernel is built 32-bit */
- test %eax,%eax
- cmovz %ecx,%edx /* if no 64-bit commpage, point to 32 with both */
-
- mov 8(%ebp),%eax
- mov %eax,_COMM_PAGE_NT_TSC_BASE(%ecx)
- mov %eax,_COMM_PAGE_NT_TSC_BASE(%edx)
- mov 12(%ebp),%eax
- mov %eax,_COMM_PAGE_NT_TSC_BASE+4(%ecx)
- mov %eax,_COMM_PAGE_NT_TSC_BASE+4(%edx)
-
- mov 24(%ebp),%eax
- mov %eax,_COMM_PAGE_NT_SCALE(%ecx)
- mov %eax,_COMM_PAGE_NT_SCALE(%edx)
-
- mov 28(%ebp),%eax
- mov %eax,_COMM_PAGE_NT_SHIFT(%ecx)
- mov %eax,_COMM_PAGE_NT_SHIFT(%edx)
-
- mov 16(%ebp),%eax
- mov %eax,_COMM_PAGE_NT_NS_BASE(%ecx)
- mov %eax,_COMM_PAGE_NT_NS_BASE(%edx)
- mov 20(%ebp),%eax
- mov %eax,_COMM_PAGE_NT_NS_BASE+4(%ecx)
- mov %eax,_COMM_PAGE_NT_NS_BASE+4(%edx)
+ /* Increment 64-bit commpage field if present */
+ mov _commPagePtr64,%edx
+ testl %edx,%edx
+ je 1f
+ sub $(ASM_COMM_PAGE32_START_ADDRESS),%edx
+ lock
+ incl ASM_COMM_PAGE_SCHED_GEN(%edx)
1:
- pop %ebp
+ EMARF
ret
-
-#define CPN(routine) _commpage_ ## routine
+#else
+#error unsupported architecture
+#endif
/* pointers to the 32-bit commpage routine descriptors */
/* WARNING: these must be sorted by commpage address! */
.const_data
- .align 2
+ .align 3
.globl _commpage_32_routines
_commpage_32_routines:
- .long CPN(compare_and_swap32_mp)
- .long CPN(compare_and_swap32_up)
- .long CPN(compare_and_swap64_mp)
- .long CPN(compare_and_swap64_up)
- .long CPN(atomic_add32_mp)
- .long CPN(atomic_add32_up)
- .long CPN(mach_absolute_time)
- .long CPN(spin_lock_try_mp)
- .long CPN(spin_lock_try_up)
- .long CPN(spin_lock_mp)
- .long CPN(spin_lock_up)
- .long CPN(spin_unlock)
- .long CPN(pthread_getspecific)
- .long CPN(gettimeofday)
- .long CPN(sys_flush_dcache)
- .long CPN(sys_icache_invalidate)
- .long CPN(pthread_self)
-// .long CPN(relinquish)
- .long CPN(bit_test_and_set_mp)
- .long CPN(bit_test_and_set_up)
- .long CPN(bit_test_and_clear_mp)
- .long CPN(bit_test_and_clear_up)
- .long CPN(bzero_scalar)
- .long CPN(bzero_sse3)
- .long CPN(bcopy_scalar)
- .long CPN(bcopy_sse3)
- .long CPN(bcopy_sse4)
- .long CPN(old_nanotime)
- .long CPN(memset_pattern_sse3)
- .long CPN(longcopy_sse4)
- .long CPN(nanotime)
+ COMMPAGE_DESCRIPTOR_REFERENCE(preempt)
+ COMMPAGE_DESCRIPTOR_REFERENCE(backoff)
+ COMMPAGE_DESCRIPTOR_REFERENCE(pfz_enqueue)
+ COMMPAGE_DESCRIPTOR_REFERENCE(pfz_dequeue)
+ COMMPAGE_DESCRIPTOR_REFERENCE(pfz_mutex_lock)
+#if defined (__i386__)
.long 0
+#elif defined (__x86_64__)
+ .quad 0
+#else
+#error unsupported architecture
+#endif
/* pointers to the 64-bit commpage routine descriptors */
/* WARNING: these must be sorted by commpage address! */
.const_data
- .align 2
+ .align 3
.globl _commpage_64_routines
_commpage_64_routines:
- .long CPN(compare_and_swap32_mp_64)
- .long CPN(compare_and_swap32_up_64)
- .long CPN(compare_and_swap64_mp_64)
- .long CPN(compare_and_swap64_up_64)
- .long CPN(atomic_add32_mp_64)
- .long CPN(atomic_add32_up_64)
- .long CPN(atomic_add64_mp_64)
- .long CPN(atomic_add64_up_64)
- .long CPN(mach_absolute_time)
- .long CPN(spin_lock_try_mp_64)
- .long CPN(spin_lock_try_up_64)
- .long CPN(spin_lock_mp_64)
- .long CPN(spin_lock_up_64)
- .long CPN(spin_unlock_64)
- .long CPN(pthread_getspecific_64)
- .long CPN(gettimeofday_64)
- .long CPN(sys_flush_dcache_64)
- .long CPN(sys_icache_invalidate) /* same routine as 32-bit version, just a "ret" */
- .long CPN(pthread_self_64)
- .long CPN(bit_test_and_set_mp_64)
- .long CPN(bit_test_and_set_up_64)
- .long CPN(bit_test_and_clear_mp_64)
- .long CPN(bit_test_and_clear_up_64)
- .long CPN(bzero_sse3_64)
- .long CPN(bcopy_sse4_64)
- .long CPN(old_nanotime_64)
- .long CPN(memset_pattern_sse3_64)
- .long CPN(longcopy_sse4_64)
- .long CPN(nanotime_64)
+ COMMPAGE_DESCRIPTOR_REFERENCE(preempt_64)
+ COMMPAGE_DESCRIPTOR_REFERENCE(backoff_64)
+ COMMPAGE_DESCRIPTOR_REFERENCE(pfz_enqueue_64)
+ COMMPAGE_DESCRIPTOR_REFERENCE(pfz_dequeue_64)
+ COMMPAGE_DESCRIPTOR_REFERENCE(pfz_mutex_lock_64)
+#if defined (__i386__)
.long 0
+#elif defined (__x86_64__)
+ .quad 0
+#else
+#error unsupported architecture
+#endif