spinlock.c 4.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184
  1. #include "precompile.h"
  2. #include "spinlock.h"
  3. // please refer http://book.opensourceproject.org.cn/kernel/kernel3rd/opensource/0596005652/understandlk-chp-5-sect-2.html
  4. static __inline void backoff(unsigned int nloop)
  5. {
  6. //and d, s d <- d & s
  7. //if ZF d, s
  8. #ifdef _WIN32
  9. __asm {
  10. and ecx, nloop;
  11. cmovz ecx, nloop;
  12. rep nop;
  13. }
  14. #else
  15. //TODO: need to confirm its correctness.
  16. __asm__ __volatile__ ("and %edi, %ecx;"
  17. "cmovz %edi, %ecx;"
  18. "rep;"
  19. "nop");
  20. #endif
  21. }
  22. TOOLKIT_API void spinlock_enter(volatile spinlock_t *sl, int spin_count)
  23. {
  24. DWORD tid = GetCurrentThreadId();
  25. if (sl->_value == tid) {
  26. sl->_reentrance++;
  27. } else {
  28. int i = 0;
  29. if (spin_count <= 0)
  30. spin_count = DEFAULT_SPIN_COUNT;
  31. while (InterlockedCompareExchange((LONG*)&sl->_value, (LONG)tid, 0) != 0) {
  32. for (; i < spin_count; ++i)
  33. if (sl->_value == 0)
  34. break;
  35. if (i >= spin_count)
  36. Sleep(1); /* yield cpu */
  37. }
  38. sl->_reentrance++;
  39. }
  40. }
  41. TOOLKIT_API void spinlock_leave(volatile spinlock_t *sl)
  42. {
  43. _ReadWriteBarrier();
  44. if (--sl->_reentrance == 0)
  45. InterlockedExchange((LONG*)&sl->_value, 0);
  46. }
  47. TOOLKIT_API void rw_spinlock_read_lock(rw_spinlock_t *rwlock)
  48. {
  49. union rw_spinlock_barrier bnew, b;
  50. for (;;) {
  51. bnew.nAtomic = b.nAtomic = rwlock->barrier.nAtomic ;
  52. bnew.readerCount++ ;
  53. bnew.writerActive = b.writerActive = 0 ;
  54. bnew.writerCount = b.writerCount = 0 ;
  55. if (InterlockedCompareExchange(&rwlock->barrier.nAtomic, bnew.nAtomic, b.nAtomic) == b.nAtomic)
  56. break;
  57. backoff(DEFAULT_SPIN_COUNT) ;
  58. }
  59. }
  60. TOOLKIT_API void rw_spinlock_read_unlock(rw_spinlock_t *rwlock)
  61. {
  62. union rw_spinlock_barrier bnew, b ;
  63. _ReadWriteBarrier();
  64. for (;;) {
  65. bnew.nAtomic = b.nAtomic = rwlock->barrier.nAtomic ;
  66. bnew.readerCount-- ;
  67. assert( b.writerActive == 0 ) ;
  68. if (InterlockedCompareExchange(&rwlock->barrier.nAtomic, bnew.nAtomic, b.nAtomic) == b.nAtomic)
  69. break;
  70. backoff(DEFAULT_SPIN_COUNT) ;
  71. }
  72. }
  73. TOOLKIT_API void rw_spinlock_write_lock(rw_spinlock_t *rwlock)
  74. {
  75. union rw_spinlock_barrier bnew, b ;
  76. for (;;) {
  77. bnew.nAtomic = b.nAtomic = rwlock->barrier.nAtomic ;
  78. bnew.writerCount++ ;
  79. if (InterlockedCompareExchange(&rwlock->barrier.nAtomic, bnew.nAtomic, b.nAtomic) == b.nAtomic)
  80. break;
  81. backoff(DEFAULT_SPIN_COUNT) ;
  82. }
  83. for (;;) {
  84. bnew.nAtomic = b.nAtomic = rwlock->barrier.nAtomic ;
  85. bnew.readerCount =
  86. b.readerCount = 0 ;
  87. b.writerActive = 0 ;
  88. bnew.writerActive = 1 ;
  89. if (InterlockedCompareExchange(&rwlock->barrier.nAtomic, bnew.nAtomic, b.nAtomic) == b.nAtomic)
  90. break;
  91. backoff(DEFAULT_SPIN_COUNT) ;
  92. }
  93. }
  94. TOOLKIT_API void rw_spinlock_write_unlock(rw_spinlock_t *rwlock)
  95. {
  96. union rw_spinlock_barrier bnew, b ;
  97. _ReadWriteBarrier();
  98. for (;;) {
  99. bnew.nAtomic = b.nAtomic = rwlock->barrier.nAtomic ;
  100. assert( b.writerActive == 1 ) ;
  101. assert( b.readerCount == 0 ) ;
  102. bnew.writerActive = 0 ;
  103. --bnew.writerCount ;
  104. if (InterlockedCompareExchange(&rwlock->barrier.nAtomic, bnew.nAtomic, b.nAtomic) == b.nAtomic)
  105. break;
  106. backoff(DEFAULT_SPIN_COUNT) ;
  107. }
  108. }
  109. #define BACKOFF_LIMIT 1000
  110. TOOLKIT_API void fastlock_enter( lock_t l )
  111. {
  112. int i = 0;
  113. int spin_count = 0;
  114. int backoffs = 0;
  115. while (InterlockedCompareExchange(l, 1, 0) == 1) {
  116. for (spin_count = i + 10000; i < spin_count; ++i) {
  117. if (*l == 0)
  118. break;
  119. #ifdef _WIN32
  120. #if _MSC_VER < 1400
  121. __asm { rep nop }
  122. #else
  123. /*TODO: the debug point locate here and cannot be kill by cmd 'taskkill' when close SpHost in debug env*/
  124. YieldProcessor();
  125. #endif
  126. #else
  127. __asm__ __volatile__("rep; nop"); /* a.k.a. PAUSE */
  128. #endif //_WIN32
  129. }
  130. backoffs++;
  131. if (backoffs % BACKOFF_LIMIT == 0) {
  132. Sleep(500);
  133. }
  134. SwitchToThread();
  135. }
  136. }
  137. int fastlock_tryenter(lock_t l)
  138. {
  139. int i = 0;
  140. int spin_count = 0;
  141. int backoffs = 0;
  142. while (InterlockedCompareExchange(l, 1, 0) == 1) {
  143. for (spin_count = i + 10000; i < spin_count; ++i) {
  144. if (*l == 0)
  145. break;
  146. #ifdef _WIN32
  147. #if _MSC_VER < 1400
  148. __asm { rep nop }
  149. #else
  150. /*TODO: the debug point locate here and cannot be kill by cmd 'taskkill' when close SpHost in debug env*/
  151. YieldProcessor();
  152. #endif
  153. #else
  154. __asm__ __volatile__("rep; nop"); /* a.k.a. PAUSE */
  155. #endif //_WIN32
  156. }
  157. backoffs++;
  158. if (backoffs % BACKOFF_LIMIT == 0) {
  159. Sleep(500);
  160. if (*l == 0)
  161. break;
  162. else
  163. return 0;
  164. }
  165. SwitchToThread();
  166. }
  167. return 1;
  168. }