|
8 | 8 | // option. This file may not be copied, modified, or distributed
|
9 | 9 | // except according to those terms.
|
10 | 10 |
|
11 |
| -use prelude::v1::*; |
12 | 11 |
|
13 |
| -use sync::atomic; |
14 |
| -use alloc::{mod, heap}; |
15 |
| - |
16 |
| -use libc::DWORD; |
| 12 | +use kinds::Sync; |
| 13 | +use cell::UnsafeCell; |
17 | 14 | use sys::sync as ffi;
|
18 | 15 |
|
19 |
| -const SPIN_COUNT: DWORD = 4000; |
20 |
| - |
21 |
| -pub struct Mutex { inner: atomic::AtomicUint } |
| 16 | +pub struct Mutex { inner: UnsafeCell<ffi::SRWLOCK> } |
22 | 17 |
|
23 |
| -pub const MUTEX_INIT: Mutex = Mutex { inner: atomic::ATOMIC_UINT_INIT }; |
| 18 | +pub const MUTEX_INIT: Mutex = Mutex { |
| 19 | + inner: UnsafeCell { value: ffi::SRWLOCK_INIT } |
| 20 | +}; |
24 | 21 |
|
25 | 22 | unsafe impl Sync for Mutex {}
|
26 | 23 |
|
27 | 24 | #[inline]
|
28 |
| -pub unsafe fn raw(m: &Mutex) -> ffi::LPCRITICAL_SECTION { |
29 |
| - m.get() |
| 25 | +pub unsafe fn raw(m: &Mutex) -> ffi::PSRWLOCK { |
| 26 | + m.inner.get() |
30 | 27 | }
|
31 | 28 |
|
| 29 | +// So you might be asking why we're using SRWLock instead of CriticalSection? |
| 30 | +// |
| 31 | +// 1. SRWLock is several times faster than CriticalSection according to benchmarks performed on both |
| 32 | +// Windows 8 and Windows 7. |
| 33 | +// |
| 34 | +// 2. CriticalSection allows recursive locking while SRWLock deadlocks. The Unix implementation |
| 35 | +// deadlocks so consistency is preferred. See #19962 for more details. |
| 36 | +// |
| 37 | +// 3. While CriticalSection is fair and SRWLock is not, the current Rust policy is there there are |
| 38 | +// no guarantees of fairness. |
| 39 | + |
32 | 40 | impl Mutex {
|
33 | 41 | #[inline]
|
34 |
| - pub unsafe fn new() -> Mutex { |
35 |
| - Mutex { inner: atomic::AtomicUint::new(init_lock() as uint) } |
36 |
| - } |
| 42 | + pub unsafe fn new() -> Mutex { MUTEX_INIT } |
37 | 43 | #[inline]
|
38 | 44 | pub unsafe fn lock(&self) {
|
39 |
| - ffi::EnterCriticalSection(self.get()) |
| 45 | + ffi::AcquireSRWLockExclusive(self.inner.get()) |
40 | 46 | }
|
41 | 47 | #[inline]
|
42 | 48 | pub unsafe fn try_lock(&self) -> bool {
|
43 |
| - ffi::TryEnterCriticalSection(self.get()) != 0 |
| 49 | + ffi::TryAcquireSRWLockExclusive(self.inner.get()) != 0 |
44 | 50 | }
|
45 | 51 | #[inline]
|
46 | 52 | pub unsafe fn unlock(&self) {
|
47 |
| - ffi::LeaveCriticalSection(self.get()) |
| 53 | + ffi::ReleaseSRWLockExclusive(self.inner.get()) |
48 | 54 | }
|
49 | 55 | pub unsafe fn destroy(&self) {
|
50 |
| - let lock = self.inner.swap(0, atomic::SeqCst); |
51 |
| - if lock != 0 { free_lock(lock as ffi::LPCRITICAL_SECTION) } |
| 56 | + // ... |
52 | 57 | }
|
53 |
| - |
54 |
| - unsafe fn get(&self) -> ffi::LPCRITICAL_SECTION { |
55 |
| - match self.inner.load(atomic::SeqCst) { |
56 |
| - 0 => {} |
57 |
| - n => return n as ffi::LPCRITICAL_SECTION |
58 |
| - } |
59 |
| - let lock = init_lock(); |
60 |
| - match self.inner.compare_and_swap(0, lock as uint, atomic::SeqCst) { |
61 |
| - 0 => return lock as ffi::LPCRITICAL_SECTION, |
62 |
| - _ => {} |
63 |
| - } |
64 |
| - free_lock(lock); |
65 |
| - return self.inner.load(atomic::SeqCst) as ffi::LPCRITICAL_SECTION; |
66 |
| - } |
67 |
| -} |
68 |
| - |
69 |
| -unsafe fn init_lock() -> ffi::LPCRITICAL_SECTION { |
70 |
| - let block = heap::allocate(ffi::CRITICAL_SECTION_SIZE, 8) |
71 |
| - as ffi::LPCRITICAL_SECTION; |
72 |
| - if block.is_null() { alloc::oom() } |
73 |
| - ffi::InitializeCriticalSectionAndSpinCount(block, SPIN_COUNT); |
74 |
| - return block; |
75 |
| -} |
76 |
| - |
77 |
| -unsafe fn free_lock(h: ffi::LPCRITICAL_SECTION) { |
78 |
| - ffi::DeleteCriticalSection(h); |
79 |
| - heap::deallocate(h as *mut _, ffi::CRITICAL_SECTION_SIZE, 8); |
80 | 58 | }
|
0 commit comments