]> git.saurik.com Git - apple/xnu.git/blobdiff - osfmk/arm64/platform_tests.c
xnu-4570.51.1.tar.gz
[apple/xnu.git] / osfmk / arm64 / platform_tests.c
index d5391b3279a757fb2ab62d123c45c87ec1151a93..61b62741930dd155a7050eae1336a91b86fccf47 100644 (file)
@@ -425,6 +425,7 @@ static kern_return_t
 lt_test_trylocks()
 {
        boolean_t success; 
+       extern unsigned int real_ncpus;
        
        /* 
         * First mtx try lock succeeds, second fails.
@@ -512,9 +513,15 @@ lt_test_trylocks()
        lt_start_trylock_thread(lt_trylock_hw_lock_with_to);
        success = hw_lock_to(&lt_hw_lock, 100);
        T_ASSERT_NOTNULL(success, "First spin lock with timeout should succeed");
+       if (real_ncpus == 1) {
+               mp_enable_preemption(); /* if we re-enable preemption, the other thread can timeout and exit */
+       }
        OSIncrementAtomic((volatile SInt32*)&lt_thread_lock_grabbed);
        lt_wait_for_lock_test_threads();
        T_ASSERT_NULL(lt_thread_lock_success, "Second spin lock with timeout should fail and timeout");
+       if (real_ncpus == 1) {
+               mp_disable_preemption(); /* don't double-enable when we unlock */
+       }
        hw_lock_unlock(&lt_hw_lock);
 
        lt_reset();
@@ -524,9 +531,15 @@ lt_test_trylocks()
        OSMemoryBarrier();
        lt_start_trylock_thread(lt_trylock_hw_lock_with_to);
        hw_lock_lock(&lt_hw_lock);
+       if (real_ncpus == 1) {
+               mp_enable_preemption(); /* if we re-enable preemption, the other thread can timeout and exit */
+       }
        OSIncrementAtomic((volatile SInt32*)&lt_thread_lock_grabbed);
        lt_wait_for_lock_test_threads();
        T_ASSERT_NULL(lt_thread_lock_success, "after taking a spin lock, lock attempt with timeout should fail");
+       if (real_ncpus == 1) {
+               mp_disable_preemption(); /* don't double-enable when we unlock */
+       }
        hw_lock_unlock(&lt_hw_lock);
 
        success = lck_spin_try_lock(&lt_lck_spin_t);
@@ -541,9 +554,15 @@ lt_test_trylocks()
        lt_target_done_threads = 1;
        lt_start_trylock_thread(lt_trylock_spin_try_lock);
        lck_spin_lock(&lt_lck_spin_t);
+       if (real_ncpus == 1) {
+               mp_enable_preemption(); /* if we re-enable preemption, the other thread can timeout and exit */
+       }
        OSIncrementAtomic((volatile SInt32*)&lt_thread_lock_grabbed);
        lt_wait_for_lock_test_threads();
        T_ASSERT_NULL(lt_thread_lock_success, "spin trylock attempt of previously held lock should fail");
+       if (real_ncpus == 1) {
+               mp_disable_preemption(); /* don't double-enable when we unlock */
+       }
        lck_spin_unlock(&lt_lck_spin_t);
 
        return KERN_SUCCESS;