spinlock.h 3.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128
  1. #ifndef __SPINLOCK_H__
  2. #define __SPINLOCK_H__
  3. #pragma once
  4. #include "config.h"
  5. #include "dbgutil.h"
  6. #if _WIN32
  7. #include <intrin.h>
  8. #endif
  9. #include <assert.h>
  10. #include <winpr/interlocked.h>
  11. #ifdef __cplusplus
  12. extern "C" {
  13. #endif
  14. typedef volatile LONG lock_t[1];
  15. #define FASTLOCK_INIT() {0}
  16. /* "REP NOP" is PAUSE, coded for tools that don't know it by that name. */
  17. #if (defined(__GNUC__) || defined(__clang__)) && (defined(__i386__) || defined(__x86_64__))
  18. #define PAUSE_INSTRUCTION() __asm__ __volatile__("pause\n") /* Some assemblers can't do REP NOP, so go with PAUSE. */
  19. #elif defined(_MSC_VER) && (defined(_M_IX86) || defined(_M_X64))
  20. #define PAUSE_INSTRUCTION() _mm_pause() /* this is actually "rep nop" and not a SIMD instruction. No inline asm in MSVC x86-64! */
  21. #elif defined(__WATCOMC__) && defined(__386__)
  22. /* watcom assembler rejects PAUSE if CPU < i686, and it refuses REP NOP as an invalid combination. Hardcode the bytes. */
  23. extern _inline void PAUSE_INSTRUCTION(void);
  24. #pragma aux PAUSE_INSTRUCTION = "db 0f3h,90h"
  25. #else
  26. #define PAUSE_INSTRUCTION()
  27. #endif
  28. // non-reentrance
  29. static __inline void fastlock_init(lock_t l)
  30. {
  31. l[0] = 0;
  32. }
  33. TOOLKIT_API void fastlock_enter(lock_t l);
  34. static __inline void fastlock_leave(lock_t l)
  35. {
  36. _ReadWriteBarrier();
  37. InterlockedExchange(l, 0);
  38. }
  39. static __inline void fastlock_term(lock_t l)
  40. {
  41. TOOLKIT_ASSERT(l[0] == 0);
  42. }
  43. /* spin lock, note: reentrance!!!
  44. * 11 1111 1111
  45. */
  46. #define DEFAULT_SPIN_COUNT 0x3ff
  47. // because zero is invalid thread id, so when a thread retrieve the lock
  48. // _value is set to the thread's id.
  49. typedef struct spinlock_t {
  50. volatile DWORD _value;
  51. volatile DWORD _reentrance;
  52. }spinlock_t;
  53. static __inline void spinlock_init(spinlock_t *plock)
  54. {
  55. plock->_value = 0;
  56. plock->_reentrance = 0;
  57. }
  58. /**
  59. * enter lock
  60. * @param spin_count -1 use default
  61. */
  62. TOOLKIT_API void spinlock_enter(volatile spinlock_t *sl, int spin_count);
  63. /**
  64. * leave lock
  65. */
  66. TOOLKIT_API void spinlock_leave(volatile spinlock_t *sl);
  67. union rw_spinlock_barrier {
  68. struct {
  69. unsigned int readerCount : 32 / 2;
  70. unsigned int writerCount : 32 / 2 - 1;
  71. unsigned int writerActive : 1 ;
  72. };
  73. unsigned int nAtomic ;
  74. };
  75. typedef volatile struct rw_spinlock_s {
  76. union rw_spinlock_barrier barrier;
  77. }rw_spinlock_t;
  78. static __inline void rw_spinlock_init(rw_spinlock_t *rwlock)
  79. {
  80. rwlock->barrier.nAtomic = 0;
  81. }
  82. // John M. Mellor-Crummey, Michael L. Scott "Scalable Reader-Writer Synchronization for Shared-Memory Multiprocessors", 1991
  83. TOOLKIT_API void rw_spinlock_read_lock(rw_spinlock_t *rwlock);
  84. TOOLKIT_API void rw_spinlock_read_unlock(rw_spinlock_t *rwlock);
  85. TOOLKIT_API void rw_spinlock_write_lock(rw_spinlock_t *rwlock);
  86. TOOLKIT_API void rw_spinlock_write_unlock(rw_spinlock_t *rwlock);
  87. static __inline int rw_spinlock_is_reading(rw_spinlock_t *rwlock)
  88. {
  89. return rwlock->barrier.readerCount != 0;
  90. }
  91. static __inline int rw_spinlock_is_writing(rw_spinlock_t *rwlock)
  92. {
  93. return rwlock->barrier.writerActive == 1 ;
  94. }
  95. #ifdef __cplusplus
  96. } // extern "C" {
  97. #endif
  98. #endif //__SPINLOCK_H__