Skip to content

Commit 3507254

Browse files
committed
use more thread-safe structures in sync
1 parent 53ccfb2 commit 3507254

File tree

1 file changed

+39
-166
lines changed
  • compiler/rustc_data_structures/src

1 file changed

+39
-166
lines changed

compiler/rustc_data_structures/src/sync.rs

Lines changed: 39 additions & 166 deletions
Original file line numberDiff line numberDiff line change
@@ -100,83 +100,6 @@ cfg_if! {
100100
unsafe impl<T> Send for T {}
101101
unsafe impl<T> Sync for T {}
102102

103-
use std::ops::Add;
104-
105-
/// This is a single threaded variant of `AtomicU64`, `AtomicUsize`, etc.
106-
/// It has explicit ordering arguments and is only intended for use with
107-
/// the native atomic types.
108-
/// You should use this type through the `AtomicU64`, `AtomicUsize`, etc, type aliases
109-
/// as it's not intended to be used separately.
110-
#[derive(Debug, Default)]
111-
pub struct Atomic<T: Copy>(Cell<T>);
112-
113-
impl<T: Copy> Atomic<T> {
114-
#[inline]
115-
pub fn new(v: T) -> Self {
116-
Atomic(Cell::new(v))
117-
}
118-
119-
#[inline]
120-
pub fn into_inner(self) -> T {
121-
self.0.into_inner()
122-
}
123-
124-
#[inline]
125-
pub fn load(&self, _: Ordering) -> T {
126-
self.0.get()
127-
}
128-
129-
#[inline]
130-
pub fn store(&self, val: T, _: Ordering) {
131-
self.0.set(val)
132-
}
133-
134-
#[inline]
135-
pub fn swap(&self, val: T, _: Ordering) -> T {
136-
self.0.replace(val)
137-
}
138-
}
139-
140-
impl Atomic<bool> {
141-
pub fn fetch_or(&self, val: bool, _: Ordering) -> bool {
142-
let result = self.0.get() | val;
143-
self.0.set(val);
144-
result
145-
}
146-
}
147-
148-
impl<T: Copy + PartialEq> Atomic<T> {
149-
#[inline]
150-
pub fn compare_exchange(&self,
151-
current: T,
152-
new: T,
153-
_: Ordering,
154-
_: Ordering)
155-
-> Result<T, T> {
156-
let read = self.0.get();
157-
if read == current {
158-
self.0.set(new);
159-
Ok(read)
160-
} else {
161-
Err(read)
162-
}
163-
}
164-
}
165-
166-
impl<T: Add<Output=T> + Copy> Atomic<T> {
167-
#[inline]
168-
pub fn fetch_add(&self, val: T, _: Ordering) -> T {
169-
let old = self.0.get();
170-
self.0.set(old + val);
171-
old
172-
}
173-
}
174-
175-
pub type AtomicUsize = Atomic<usize>;
176-
pub type AtomicBool = Atomic<bool>;
177-
pub type AtomicU32 = Atomic<u32>;
178-
pub type AtomicU64 = Atomic<u64>;
179-
180103
pub fn join<A, B, RA, RB>(oper_a: A, oper_b: B) -> (RA, RB)
181104
where A: FnOnce() -> RA,
182105
B: FnOnce() -> RB
@@ -245,90 +168,10 @@ cfg_if! {
245168
}
246169

247170
pub type MetadataRef = OwnedSlice;
248-
249-
pub use std::cell::OnceCell;
250-
251-
pub type MTLockRef<'a, T> = &'a mut MTLock<T>;
252-
253-
#[derive(Debug, Default)]
254-
pub struct MTLock<T>(T);
255-
256-
impl<T> MTLock<T> {
257-
#[inline(always)]
258-
pub fn new(inner: T) -> Self {
259-
MTLock(inner)
260-
}
261-
262-
#[inline(always)]
263-
pub fn into_inner(self) -> T {
264-
self.0
265-
}
266-
267-
#[inline(always)]
268-
pub fn get_mut(&mut self) -> &mut T {
269-
&mut self.0
270-
}
271-
272-
#[inline(always)]
273-
pub fn lock(&self) -> &T {
274-
&self.0
275-
}
276-
277-
#[inline(always)]
278-
pub fn lock_mut(&mut self) -> &mut T {
279-
&mut self.0
280-
}
281-
}
282-
283-
// FIXME: Probably a bad idea (in the threaded case)
284-
impl<T: Clone> Clone for MTLock<T> {
285-
#[inline]
286-
fn clone(&self) -> Self {
287-
MTLock(self.0.clone())
288-
}
289-
}
290171
} else {
291172
pub use std::marker::Send as Send;
292173
pub use std::marker::Sync as Sync;
293174

294-
pub use std::sync::OnceLock as OnceCell;
295-
296-
pub use std::sync::atomic::{AtomicBool, AtomicUsize, AtomicU32, AtomicU64};
297-
298-
pub type MTLockRef<'a, T> = &'a MTLock<T>;
299-
300-
#[derive(Debug, Default)]
301-
pub struct MTLock<T>(Lock<T>);
302-
303-
impl<T> MTLock<T> {
304-
#[inline(always)]
305-
pub fn new(inner: T) -> Self {
306-
MTLock(Lock::new(inner))
307-
}
308-
309-
#[inline(always)]
310-
pub fn into_inner(self) -> T {
311-
self.0.into_inner()
312-
}
313-
314-
#[inline(always)]
315-
pub fn get_mut(&mut self) -> &mut T {
316-
self.0.get_mut()
317-
}
318-
319-
#[inline(always)]
320-
pub fn lock(&self) -> LockGuard<'_, T> {
321-
self.0.lock()
322-
}
323-
324-
#[inline(always)]
325-
pub fn lock_mut(&self) -> LockGuard<'_, T> {
326-
self.lock()
327-
}
328-
}
329-
330-
use std::thread;
331-
332175
#[inline]
333176
pub fn join<A, B, RA: DynSend, RB: DynSend>(oper_a: A, oper_b: B) -> (RA, RB)
334177
where
@@ -496,13 +339,51 @@ cfg_if! {
496339

497340
pub use std::sync::Arc as Lrc;
498341

342+
use std::thread;
343+
499344
use parking_lot::RwLock as InnerRwLock;
500345

501346
pub use parking_lot::MappedRwLockReadGuard as MappedReadGuard;
502347
pub use parking_lot::MappedRwLockWriteGuard as MappedWriteGuard;
503348
pub use parking_lot::RwLockReadGuard as ReadGuard;
504349
pub use parking_lot::RwLockWriteGuard as WriteGuard;
505350

351+
pub use std::sync::OnceLock as OnceCell;
352+
353+
pub use std::sync::atomic::{AtomicBool, AtomicU32, AtomicU64, AtomicUsize};
354+
355+
pub type MTRef<'a, T> = &'a T;
356+
357+
#[derive(Debug, Default)]
358+
pub struct MTLock<T>(Lock<T>);
359+
360+
impl<T> MTLock<T> {
361+
#[inline(always)]
362+
pub fn new(inner: T) -> Self {
363+
MTLock(Lock::new(inner))
364+
}
365+
366+
#[inline(always)]
367+
pub fn into_inner(self) -> T {
368+
self.0.into_inner()
369+
}
370+
371+
#[inline(always)]
372+
pub fn get_mut(&mut self) -> &mut T {
373+
self.0.get_mut()
374+
}
375+
376+
#[inline(always)]
377+
pub fn lock(&self) -> LockGuard<'_, T> {
378+
self.0.lock()
379+
}
380+
381+
#[inline(always)]
382+
pub fn lock_mut(&self) -> LockGuard<'_, T> {
383+
self.lock()
384+
}
385+
}
386+
506387
/// This makes locks panic if they are already held.
507388
/// It is only useful when you are running in a single thread
508389
const ERROR_CHECKING: bool = false;
@@ -843,30 +724,22 @@ unsafe impl<T: Send> std::marker::Sync for WorkerLocal<T> {}
843724
/// It will panic if it is used on multiple threads.
844725
#[derive(Debug)]
845726
pub struct OneThread<T> {
846-
#[cfg(parallel_compiler)]
847727
thread: thread::ThreadId,
848728
inner: T,
849729
}
850730

851-
#[cfg(parallel_compiler)]
852731
unsafe impl<T> std::marker::Sync for OneThread<T> {}
853-
#[cfg(parallel_compiler)]
854732
unsafe impl<T> std::marker::Send for OneThread<T> {}
855733

856734
impl<T> OneThread<T> {
857735
#[inline(always)]
858736
fn check(&self) {
859-
#[cfg(parallel_compiler)]
860737
assert_eq!(thread::current().id(), self.thread);
861738
}
862739

863740
#[inline(always)]
864741
pub fn new(inner: T) -> Self {
865-
OneThread {
866-
#[cfg(parallel_compiler)]
867-
thread: thread::current().id(),
868-
inner,
869-
}
742+
OneThread { thread: thread::current().id(), inner }
870743
}
871744

872745
#[inline(always)]

0 commit comments

Comments
 (0)