From ab391584394800509c27802feca5ea069003622c Mon Sep 17 00:00:00 2001 From: Brian Anderson Date: Mon, 17 Feb 2014 01:37:26 -0800 Subject: [PATCH 1/4] std: Make atomics immutable. #11583 In Rust, the strongest guarantee that `&mut` provides is that the memory pointed to is *not aliased*, whereas `&`'s guarantees are much weaker: that the value can be aliased, and may be mutated under proper precautions (interior mutability). Our atomics though use `&mut` for mutation even while creating multiple aliases, so this changes them to use 'interior mutability', mutating through immutable references. --- src/librustc/middle/typeck/check/mod.rs | 6 +- src/libstd/intrinsics.rs | 88 ++++ src/libstd/sync/atomics.rs | 201 ++++---- src/libstd/sync/atomics_stage0.rs | 625 ++++++++++++++++++++++++ src/libstd/sync/mod.rs | 4 + src/test/auxiliary/cci_intrinsic.rs | 24 +- src/test/run-pass/intrinsic-atomics.rs | 28 +- 7 files changed, 855 insertions(+), 121 deletions(-) create mode 100644 src/libstd/sync/atomics_stage0.rs diff --git a/src/librustc/middle/typeck/check/mod.rs b/src/librustc/middle/typeck/check/mod.rs index 9b8ce481de958..45ecb17a08131 100644 --- a/src/librustc/middle/typeck/check/mod.rs +++ b/src/librustc/middle/typeck/check/mod.rs @@ -3956,7 +3956,7 @@ pub fn check_intrinsic_type(ccx: @CrateCtxt, it: &ast::ForeignItem) { //We only care about the operation here match split[1] { - "cxchg" => (1, ~[ty::mk_mut_rptr(tcx, + "cxchg" => (1, ~[ty::mk_imm_rptr(tcx, ty::ReLateBound(it.id, ty::BrAnon(0)), param(ccx, 0)), param(ccx, 0), @@ -3970,7 +3970,7 @@ pub fn check_intrinsic_type(ccx: @CrateCtxt, it: &ast::ForeignItem) { param(ccx, 0)), "store" => (1, ~[ - ty::mk_mut_rptr(tcx, ty::ReLateBound(it.id, ty::BrAnon(0)), + ty::mk_imm_rptr(tcx, ty::ReLateBound(it.id, ty::BrAnon(0)), param(ccx, 0)), param(ccx, 0) ], @@ -3978,7 +3978,7 @@ pub fn check_intrinsic_type(ccx: @CrateCtxt, it: &ast::ForeignItem) { "xchg" | "xadd" | "xsub" | "and" | "nand" | "or" | "xor" | "max" | "min" | "umax" | "umin" => { - (1, ~[ty::mk_mut_rptr(tcx, + (1, ~[ty::mk_imm_rptr(tcx, ty::ReLateBound(it.id, ty::BrAnon(0)), param(ccx, 0)), param(ccx, 0) ], param(ccx, 0)) diff --git a/src/libstd/intrinsics.rs b/src/libstd/intrinsics.rs index 7c2db7688fd0c..cf95d1c34444f 100644 --- a/src/libstd/intrinsics.rs +++ b/src/libstd/intrinsics.rs @@ -164,6 +164,7 @@ pub trait TyVisitor { fn visit_self(&mut self) -> bool; } +#[cfg(stage0)] extern "rust-intrinsic" { pub fn atomic_cxchg(dst: &mut T, old: T, src: T) -> T; pub fn atomic_cxchg_acq(dst: &mut T, old: T, src: T) -> T; @@ -244,6 +245,93 @@ extern "rust-intrinsic" { pub fn atomic_umax_rel(dst: &mut T, src: T) -> T; pub fn atomic_umax_acqrel(dst: &mut T, src: T) -> T; pub fn atomic_umax_relaxed(dst: &mut T, src: T) -> T; +} + +#[cfg(not(stage0))] +extern "rust-intrinsic" { + + pub fn atomic_cxchg(dst: &T, old: T, src: T) -> T; + pub fn atomic_cxchg_acq(dst: &T, old: T, src: T) -> T; + pub fn atomic_cxchg_rel(dst: &T, old: T, src: T) -> T; + pub fn atomic_cxchg_acqrel(dst: &T, old: T, src: T) -> T; + pub fn atomic_cxchg_relaxed(dst: &T, old: T, src: T) -> T; + + pub fn atomic_load(src: &T) -> T; + pub fn atomic_load_acq(src: &T) -> T; + pub fn atomic_load_relaxed(src: &T) -> T; + + pub fn atomic_store(dst: &T, val: T); + pub fn atomic_store_rel(dst: &T, val: T); + pub fn atomic_store_relaxed(dst: &T, val: T); + + pub fn atomic_xchg(dst: &T, src: T) -> T; + pub fn atomic_xchg_acq(dst: &T, src: T) -> T; + pub fn atomic_xchg_rel(dst: &T, src: T) -> T; + pub fn atomic_xchg_acqrel(dst: &T, src: T) -> T; + pub fn atomic_xchg_relaxed(dst: &T, src: T) -> T; + + pub fn atomic_xadd(dst: &T, src: T) -> T; + pub fn atomic_xadd_acq(dst: &T, src: T) -> T; + pub fn atomic_xadd_rel(dst: &T, src: T) -> T; + pub fn atomic_xadd_acqrel(dst: &T, src: T) -> T; + pub fn atomic_xadd_relaxed(dst: &T, src: T) -> T; + + pub fn atomic_xsub(dst: &T, src: T) -> T; + pub fn atomic_xsub_acq(dst: &T, src: T) -> T; + pub fn atomic_xsub_rel(dst: &T, src: T) -> T; + pub fn atomic_xsub_acqrel(dst: &T, src: T) -> T; + pub fn atomic_xsub_relaxed(dst: &T, src: T) -> T; + + pub fn atomic_and(dst: &T, src: T) -> T; + pub fn atomic_and_acq(dst: &T, src: T) -> T; + pub fn atomic_and_rel(dst: &T, src: T) -> T; + pub fn atomic_and_acqrel(dst: &T, src: T) -> T; + pub fn atomic_and_relaxed(dst: &T, src: T) -> T; + + pub fn atomic_nand(dst: &T, src: T) -> T; + pub fn atomic_nand_acq(dst: &T, src: T) -> T; + pub fn atomic_nand_rel(dst: &T, src: T) -> T; + pub fn atomic_nand_acqrel(dst: &T, src: T) -> T; + pub fn atomic_nand_relaxed(dst: &T, src: T) -> T; + + pub fn atomic_or(dst: &T, src: T) -> T; + pub fn atomic_or_acq(dst: &T, src: T) -> T; + pub fn atomic_or_rel(dst: &T, src: T) -> T; + pub fn atomic_or_acqrel(dst: &T, src: T) -> T; + pub fn atomic_or_relaxed(dst: &T, src: T) -> T; + + pub fn atomic_xor(dst: &T, src: T) -> T; + pub fn atomic_xor_acq(dst: &T, src: T) -> T; + pub fn atomic_xor_rel(dst: &T, src: T) -> T; + pub fn atomic_xor_acqrel(dst: &T, src: T) -> T; + pub fn atomic_xor_relaxed(dst: &T, src: T) -> T; + + pub fn atomic_max(dst: &T, src: T) -> T; + pub fn atomic_max_acq(dst: &T, src: T) -> T; + pub fn atomic_max_rel(dst: &T, src: T) -> T; + pub fn atomic_max_acqrel(dst: &T, src: T) -> T; + pub fn atomic_max_relaxed(dst: &T, src: T) -> T; + + pub fn atomic_min(dst: &T, src: T) -> T; + pub fn atomic_min_acq(dst: &T, src: T) -> T; + pub fn atomic_min_rel(dst: &T, src: T) -> T; + pub fn atomic_min_acqrel(dst: &T, src: T) -> T; + pub fn atomic_min_relaxed(dst: &T, src: T) -> T; + + pub fn atomic_umin(dst: &T, src: T) -> T; + pub fn atomic_umin_acq(dst: &T, src: T) -> T; + pub fn atomic_umin_rel(dst: &T, src: T) -> T; + pub fn atomic_umin_acqrel(dst: &T, src: T) -> T; + pub fn atomic_umin_relaxed(dst: &T, src: T) -> T; + + pub fn atomic_umax(dst: &T, src: T) -> T; + pub fn atomic_umax_acq(dst: &T, src: T) -> T; + pub fn atomic_umax_rel(dst: &T, src: T) -> T; + pub fn atomic_umax_acqrel(dst: &T, src: T) -> T; + pub fn atomic_umax_relaxed(dst: &T, src: T) -> T; +} + +extern "rust-intrinsic" { pub fn atomic_fence(); pub fn atomic_fence_acq(); diff --git a/src/libstd/sync/atomics.rs b/src/libstd/sync/atomics.rs index b4d465c0397de..ade54780484a6 100644 --- a/src/libstd/sync/atomics.rs +++ b/src/libstd/sync/atomics.rs @@ -32,7 +32,8 @@ use ops::Drop; */ pub struct AtomicFlag { priv v: int, - priv nopod: marker::NoPod + priv nopod: marker::NoPod, + priv nofreeze: marker::NoFreeze } /** @@ -40,7 +41,8 @@ pub struct AtomicFlag { */ pub struct AtomicBool { priv v: uint, - priv nopod: marker::NoPod + priv nopod: marker::NoPod, + priv nofreeze: marker::NoFreeze } /** @@ -48,7 +50,8 @@ pub struct AtomicBool { */ pub struct AtomicInt { priv v: int, - priv nopod: marker::NoPod + priv nopod: marker::NoPod, + priv nofreeze: marker::NoFreeze } /** @@ -56,7 +59,8 @@ pub struct AtomicInt { */ pub struct AtomicUint { priv v: uint, - priv nopod: marker::NoPod + priv nopod: marker::NoPod, + priv nofreeze: marker::NoFreeze } /** @@ -65,7 +69,8 @@ pub struct AtomicUint { */ pub struct AtomicU64 { priv v: u64, - priv nopod: marker::NoPod + priv nopod: marker::NoPod, + priv nofreeze: marker::NoFreeze } /** @@ -73,7 +78,8 @@ pub struct AtomicU64 { */ pub struct AtomicPtr { priv p: uint, - priv nopod: marker::NoPod + priv nopod: marker::NoPod, + priv nofreeze: marker::NoFreeze } /** @@ -82,6 +88,7 @@ pub struct AtomicPtr { #[unsafe_no_drop_flag] pub struct AtomicOption { priv p: uint, + priv nofreeze: marker::NoFreeze } pub enum Ordering { @@ -92,24 +99,34 @@ pub enum Ordering { SeqCst } -pub static INIT_ATOMIC_FLAG : AtomicFlag = AtomicFlag { v: 0, nopod: marker::NoPod }; -pub static INIT_ATOMIC_BOOL : AtomicBool = AtomicBool { v: 0, nopod: marker::NoPod }; -pub static INIT_ATOMIC_INT : AtomicInt = AtomicInt { v: 0, nopod: marker::NoPod }; -pub static INIT_ATOMIC_UINT : AtomicUint = AtomicUint { v: 0, nopod: marker::NoPod }; -pub static INIT_ATOMIC_U64 : AtomicU64 = AtomicU64 { v: 0, nopod: marker::NoPod }; +pub static INIT_ATOMIC_FLAG : AtomicFlag = AtomicFlag { + v: 0, nopod: marker::NoPod, nofreeze: marker::NoFreeze +}; +pub static INIT_ATOMIC_BOOL : AtomicBool = AtomicBool { + v: 0, nopod: marker::NoPod, nofreeze: marker::NoFreeze +}; +pub static INIT_ATOMIC_INT : AtomicInt = AtomicInt { + v: 0, nopod: marker::NoPod, nofreeze: marker::NoFreeze +}; +pub static INIT_ATOMIC_UINT : AtomicUint = AtomicUint { + v: 0, nopod: marker::NoPod, nofreeze: marker::NoFreeze +}; +pub static INIT_ATOMIC_U64 : AtomicU64 = AtomicU64 { + v: 0, nopod: marker::NoPod, nofreeze: marker::NoFreeze +}; impl AtomicFlag { pub fn new() -> AtomicFlag { - AtomicFlag { v: 0, nopod: marker::NoPod} + AtomicFlag { v: 0, nopod: marker::NoPod, nofreeze: marker::NoFreeze } } /** * Clears the atomic flag */ #[inline] - pub fn clear(&mut self, order: Ordering) { - unsafe {atomic_store(&mut self.v, 0, order)} + pub fn clear(&self, order: Ordering) { + unsafe {atomic_store(&self.v, 0, order)} } /** @@ -117,14 +134,14 @@ impl AtomicFlag { * flag. */ #[inline] - pub fn test_and_set(&mut self, order: Ordering) -> bool { - unsafe { atomic_compare_and_swap(&mut self.v, 0, 1, order) > 0 } + pub fn test_and_set(&self, order: Ordering) -> bool { + unsafe { atomic_compare_and_swap(&self.v, 0, 1, order) > 0 } } } impl AtomicBool { pub fn new(v: bool) -> AtomicBool { - AtomicBool { v: if v { 1 } else { 0 }, nopod: marker::NoPod } + AtomicBool { v: if v { 1 } else { 0 }, nopod: marker::NoPod, nofreeze: marker::NoFreeze } } #[inline] @@ -133,63 +150,63 @@ impl AtomicBool { } #[inline] - pub fn store(&mut self, val: bool, order: Ordering) { + pub fn store(&self, val: bool, order: Ordering) { let val = if val { 1 } else { 0 }; - unsafe { atomic_store(&mut self.v, val, order); } + unsafe { atomic_store(&self.v, val, order); } } #[inline] - pub fn swap(&mut self, val: bool, order: Ordering) -> bool { + pub fn swap(&self, val: bool, order: Ordering) -> bool { let val = if val { 1 } else { 0 }; - unsafe { atomic_swap(&mut self.v, val, order) > 0 } + unsafe { atomic_swap(&self.v, val, order) > 0 } } #[inline] - pub fn compare_and_swap(&mut self, old: bool, new: bool, order: Ordering) -> bool { + pub fn compare_and_swap(&self, old: bool, new: bool, order: Ordering) -> bool { let old = if old { 1 } else { 0 }; let new = if new { 1 } else { 0 }; - unsafe { atomic_compare_and_swap(&mut self.v, old, new, order) > 0 } + unsafe { atomic_compare_and_swap(&self.v, old, new, order) > 0 } } /// Returns the old value #[inline] - pub fn fetch_and(&mut self, val: bool, order: Ordering) -> bool { + pub fn fetch_and(&self, val: bool, order: Ordering) -> bool { let val = if val { 1 } else { 0 }; - unsafe { atomic_and(&mut self.v, val, order) > 0 } + unsafe { atomic_and(&self.v, val, order) > 0 } } /// Returns the old value #[inline] - pub fn fetch_nand(&mut self, val: bool, order: Ordering) -> bool { + pub fn fetch_nand(&self, val: bool, order: Ordering) -> bool { let val = if val { 1 } else { 0 }; - unsafe { atomic_nand(&mut self.v, val, order) > 0 } + unsafe { atomic_nand(&self.v, val, order) > 0 } } /// Returns the old value #[inline] - pub fn fetch_or(&mut self, val: bool, order: Ordering) -> bool { + pub fn fetch_or(&self, val: bool, order: Ordering) -> bool { let val = if val { 1 } else { 0 }; - unsafe { atomic_or(&mut self.v, val, order) > 0 } + unsafe { atomic_or(&self.v, val, order) > 0 } } /// Returns the old value #[inline] - pub fn fetch_xor(&mut self, val: bool, order: Ordering) -> bool { + pub fn fetch_xor(&self, val: bool, order: Ordering) -> bool { let val = if val { 1 } else { 0 }; - unsafe { atomic_xor(&mut self.v, val, order) > 0 } + unsafe { atomic_xor(&self.v, val, order) > 0 } } } impl AtomicInt { pub fn new(v: int) -> AtomicInt { - AtomicInt { v:v, nopod: marker::NoPod} + AtomicInt { v:v, nopod: marker::NoPod, nofreeze: marker::NoFreeze } } #[inline] @@ -198,36 +215,36 @@ impl AtomicInt { } #[inline] - pub fn store(&mut self, val: int, order: Ordering) { - unsafe { atomic_store(&mut self.v, val, order); } + pub fn store(&self, val: int, order: Ordering) { + unsafe { atomic_store(&self.v, val, order); } } #[inline] - pub fn swap(&mut self, val: int, order: Ordering) -> int { - unsafe { atomic_swap(&mut self.v, val, order) } + pub fn swap(&self, val: int, order: Ordering) -> int { + unsafe { atomic_swap(&self.v, val, order) } } #[inline] - pub fn compare_and_swap(&mut self, old: int, new: int, order: Ordering) -> int { - unsafe { atomic_compare_and_swap(&mut self.v, old, new, order) } + pub fn compare_and_swap(&self, old: int, new: int, order: Ordering) -> int { + unsafe { atomic_compare_and_swap(&self.v, old, new, order) } } /// Returns the old value (like __sync_fetch_and_add). #[inline] - pub fn fetch_add(&mut self, val: int, order: Ordering) -> int { - unsafe { atomic_add(&mut self.v, val, order) } + pub fn fetch_add(&self, val: int, order: Ordering) -> int { + unsafe { atomic_add(&self.v, val, order) } } /// Returns the old value (like __sync_fetch_and_sub). #[inline] - pub fn fetch_sub(&mut self, val: int, order: Ordering) -> int { - unsafe { atomic_sub(&mut self.v, val, order) } + pub fn fetch_sub(&self, val: int, order: Ordering) -> int { + unsafe { atomic_sub(&self.v, val, order) } } } impl AtomicU64 { pub fn new(v: u64) -> AtomicU64 { - AtomicU64 { v:v, nopod: marker::NoPod } + AtomicU64 { v:v, nopod: marker::NoPod, nofreeze: marker::NoFreeze } } #[inline] @@ -236,34 +253,34 @@ impl AtomicU64 { } #[inline] - pub fn store(&mut self, val: u64, order: Ordering) { - unsafe { atomic_store(&mut self.v, val, order); } + pub fn store(&self, val: u64, order: Ordering) { + unsafe { atomic_store(&self.v, val, order); } } #[inline] - pub fn swap(&mut self, val: u64, order: Ordering) -> u64 { - unsafe { atomic_swap(&mut self.v, val, order) } + pub fn swap(&self, val: u64, order: Ordering) -> u64 { + unsafe { atomic_swap(&self.v, val, order) } } #[inline] - pub fn compare_and_swap(&mut self, old: u64, new: u64, order: Ordering) -> u64 { - unsafe { atomic_compare_and_swap(&mut self.v, old, new, order) } + pub fn compare_and_swap(&self, old: u64, new: u64, order: Ordering) -> u64 { + unsafe { atomic_compare_and_swap(&self.v, old, new, order) } } #[inline] - pub fn fetch_add(&mut self, val: u64, order: Ordering) -> u64 { - unsafe { atomic_add(&mut self.v, val, order) } + pub fn fetch_add(&self, val: u64, order: Ordering) -> u64 { + unsafe { atomic_add(&self.v, val, order) } } #[inline] - pub fn fetch_sub(&mut self, val: u64, order: Ordering) -> u64 { - unsafe { atomic_sub(&mut self.v, val, order) } + pub fn fetch_sub(&self, val: u64, order: Ordering) -> u64 { + unsafe { atomic_sub(&self.v, val, order) } } } impl AtomicUint { pub fn new(v: uint) -> AtomicUint { - AtomicUint { v:v, nopod: marker::NoPod } + AtomicUint { v:v, nopod: marker::NoPod, nofreeze: marker::NoFreeze } } #[inline] @@ -272,36 +289,36 @@ impl AtomicUint { } #[inline] - pub fn store(&mut self, val: uint, order: Ordering) { - unsafe { atomic_store(&mut self.v, val, order); } + pub fn store(&self, val: uint, order: Ordering) { + unsafe { atomic_store(&self.v, val, order); } } #[inline] - pub fn swap(&mut self, val: uint, order: Ordering) -> uint { - unsafe { atomic_swap(&mut self.v, val, order) } + pub fn swap(&self, val: uint, order: Ordering) -> uint { + unsafe { atomic_swap(&self.v, val, order) } } #[inline] - pub fn compare_and_swap(&mut self, old: uint, new: uint, order: Ordering) -> uint { - unsafe { atomic_compare_and_swap(&mut self.v, old, new, order) } + pub fn compare_and_swap(&self, old: uint, new: uint, order: Ordering) -> uint { + unsafe { atomic_compare_and_swap(&self.v, old, new, order) } } /// Returns the old value (like __sync_fetch_and_add). #[inline] - pub fn fetch_add(&mut self, val: uint, order: Ordering) -> uint { - unsafe { atomic_add(&mut self.v, val, order) } + pub fn fetch_add(&self, val: uint, order: Ordering) -> uint { + unsafe { atomic_add(&self.v, val, order) } } /// Returns the old value (like __sync_fetch_and_sub).. #[inline] - pub fn fetch_sub(&mut self, val: uint, order: Ordering) -> uint { - unsafe { atomic_sub(&mut self.v, val, order) } + pub fn fetch_sub(&self, val: uint, order: Ordering) -> uint { + unsafe { atomic_sub(&self.v, val, order) } } } impl AtomicPtr { pub fn new(p: *mut T) -> AtomicPtr { - AtomicPtr { p: p as uint, nopod: marker::NoPod } + AtomicPtr { p: p as uint, nopod: marker::NoPod, nofreeze: marker::NoFreeze } } #[inline] @@ -312,19 +329,19 @@ impl AtomicPtr { } #[inline] - pub fn store(&mut self, ptr: *mut T, order: Ordering) { - unsafe { atomic_store(&mut self.p, ptr as uint, order); } + pub fn store(&self, ptr: *mut T, order: Ordering) { + unsafe { atomic_store(&self.p, ptr as uint, order); } } #[inline] - pub fn swap(&mut self, ptr: *mut T, order: Ordering) -> *mut T { - unsafe { atomic_swap(&mut self.p, ptr as uint, order) as *mut T } + pub fn swap(&self, ptr: *mut T, order: Ordering) -> *mut T { + unsafe { atomic_swap(&self.p, ptr as uint, order) as *mut T } } #[inline] - pub fn compare_and_swap(&mut self, old: *mut T, new: *mut T, order: Ordering) -> *mut T { + pub fn compare_and_swap(&self, old: *mut T, new: *mut T, order: Ordering) -> *mut T { unsafe { - atomic_compare_and_swap(&mut self.p, old as uint, + atomic_compare_and_swap(&self.p, old as uint, new as uint, order) as *mut T } } @@ -332,17 +349,17 @@ impl AtomicPtr { impl AtomicOption { pub fn new(p: ~T) -> AtomicOption { - unsafe { AtomicOption { p: cast::transmute(p) } } + unsafe { AtomicOption { p: cast::transmute(p), nofreeze: marker::NoFreeze } } } - pub fn empty() -> AtomicOption { AtomicOption { p: 0 } } + pub fn empty() -> AtomicOption { AtomicOption { p: 0, nofreeze: marker::NoFreeze } } #[inline] - pub fn swap(&mut self, val: ~T, order: Ordering) -> Option<~T> { + pub fn swap(&self, val: ~T, order: Ordering) -> Option<~T> { unsafe { let val = cast::transmute(val); - let p = atomic_swap(&mut self.p, val, order); + let p = atomic_swap(&self.p, val, order); if p as uint == 0 { None } else { @@ -352,7 +369,7 @@ impl AtomicOption { } #[inline] - pub fn take(&mut self, order: Ordering) -> Option<~T> { + pub fn take(&self, order: Ordering) -> Option<~T> { unsafe { self.swap(cast::transmute(0), order) } } @@ -360,11 +377,11 @@ impl AtomicOption { /// if so. If the option was already 'Some', returns 'Some' of the rejected /// value. #[inline] - pub fn fill(&mut self, val: ~T, order: Ordering) -> Option<~T> { + pub fn fill(&self, val: ~T, order: Ordering) -> Option<~T> { unsafe { let val = cast::transmute(val); let expected = cast::transmute(0); - let oldval = atomic_compare_and_swap(&mut self.p, expected, val, order); + let oldval = atomic_compare_and_swap(&self.p, expected, val, order); if oldval == expected { None } else { @@ -376,7 +393,7 @@ impl AtomicOption { /// Be careful: The caller must have some external method of ensuring the /// result does not get invalidated by another task after this returns. #[inline] - pub fn is_empty(&mut self, order: Ordering) -> bool { + pub fn is_empty(&self, order: Ordering) -> bool { unsafe { atomic_load(&self.p, order) as uint == 0 } } } @@ -389,7 +406,7 @@ impl Drop for AtomicOption { } #[inline] -pub unsafe fn atomic_store(dst: &mut T, val: T, order:Ordering) { +pub unsafe fn atomic_store(dst: &T, val: T, order:Ordering) { match order { Release => intrinsics::atomic_store_rel(dst, val), Relaxed => intrinsics::atomic_store_relaxed(dst, val), @@ -407,7 +424,7 @@ pub unsafe fn atomic_load(dst: &T, order:Ordering) -> T { } #[inline] -pub unsafe fn atomic_swap(dst: &mut T, val: T, order: Ordering) -> T { +pub unsafe fn atomic_swap(dst: &T, val: T, order: Ordering) -> T { match order { Acquire => intrinsics::atomic_xchg_acq(dst, val), Release => intrinsics::atomic_xchg_rel(dst, val), @@ -419,7 +436,7 @@ pub unsafe fn atomic_swap(dst: &mut T, val: T, order: Ordering) -> T { /// Returns the old value (like __sync_fetch_and_add). #[inline] -pub unsafe fn atomic_add(dst: &mut T, val: T, order: Ordering) -> T { +pub unsafe fn atomic_add(dst: &T, val: T, order: Ordering) -> T { match order { Acquire => intrinsics::atomic_xadd_acq(dst, val), Release => intrinsics::atomic_xadd_rel(dst, val), @@ -431,7 +448,7 @@ pub unsafe fn atomic_add(dst: &mut T, val: T, order: Ordering) -> T { /// Returns the old value (like __sync_fetch_and_sub). #[inline] -pub unsafe fn atomic_sub(dst: &mut T, val: T, order: Ordering) -> T { +pub unsafe fn atomic_sub(dst: &T, val: T, order: Ordering) -> T { match order { Acquire => intrinsics::atomic_xsub_acq(dst, val), Release => intrinsics::atomic_xsub_rel(dst, val), @@ -442,7 +459,7 @@ pub unsafe fn atomic_sub(dst: &mut T, val: T, order: Ordering) -> T { } #[inline] -pub unsafe fn atomic_compare_and_swap(dst:&mut T, old:T, new:T, order: Ordering) -> T { +pub unsafe fn atomic_compare_and_swap(dst:&T, old:T, new:T, order: Ordering) -> T { match order { Acquire => intrinsics::atomic_cxchg_acq(dst, old, new), Release => intrinsics::atomic_cxchg_rel(dst, old, new), @@ -453,7 +470,7 @@ pub unsafe fn atomic_compare_and_swap(dst:&mut T, old:T, new:T, order: Orderi } #[inline] -pub unsafe fn atomic_and(dst: &mut T, val: T, order: Ordering) -> T { +pub unsafe fn atomic_and(dst: &T, val: T, order: Ordering) -> T { match order { Acquire => intrinsics::atomic_and_acq(dst, val), Release => intrinsics::atomic_and_rel(dst, val), @@ -464,7 +481,7 @@ pub unsafe fn atomic_and(dst: &mut T, val: T, order: Ordering) -> T { } #[inline] -pub unsafe fn atomic_nand(dst: &mut T, val: T, order: Ordering) -> T { +pub unsafe fn atomic_nand(dst: &T, val: T, order: Ordering) -> T { match order { Acquire => intrinsics::atomic_nand_acq(dst, val), Release => intrinsics::atomic_nand_rel(dst, val), @@ -476,7 +493,7 @@ pub unsafe fn atomic_nand(dst: &mut T, val: T, order: Ordering) -> T { #[inline] -pub unsafe fn atomic_or(dst: &mut T, val: T, order: Ordering) -> T { +pub unsafe fn atomic_or(dst: &T, val: T, order: Ordering) -> T { match order { Acquire => intrinsics::atomic_or_acq(dst, val), Release => intrinsics::atomic_or_rel(dst, val), @@ -488,7 +505,7 @@ pub unsafe fn atomic_or(dst: &mut T, val: T, order: Ordering) -> T { #[inline] -pub unsafe fn atomic_xor(dst: &mut T, val: T, order: Ordering) -> T { +pub unsafe fn atomic_xor(dst: &T, val: T, order: Ordering) -> T { match order { Acquire => intrinsics::atomic_xor_acq(dst, val), Release => intrinsics::atomic_xor_rel(dst, val), @@ -610,16 +627,16 @@ mod test { fn different_sizes() { unsafe { let mut slot = 0u16; - assert_eq!(super::atomic_swap(&mut slot, 1, SeqCst), 0); + assert_eq!(super::atomic_swap(&slot, 1, SeqCst), 0); let mut slot = 0u8; - assert_eq!(super::atomic_compare_and_swap(&mut slot, 1, 2, SeqCst), 0); + assert_eq!(super::atomic_compare_and_swap(&slot, 1, 2, SeqCst), 0); let mut slot = 0u32; - assert_eq!(super::atomic_load(&mut slot, SeqCst), 0); + assert_eq!(super::atomic_load(&slot, SeqCst), 0); let mut slot = 0u64; - super::atomic_store(&mut slot, 2, SeqCst); + super::atomic_store(&slot, 2, SeqCst); } } } diff --git a/src/libstd/sync/atomics_stage0.rs b/src/libstd/sync/atomics_stage0.rs new file mode 100644 index 0000000000000..b4d465c0397de --- /dev/null +++ b/src/libstd/sync/atomics_stage0.rs @@ -0,0 +1,625 @@ +// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +/*! + * Atomic types + * + * Basic atomic types supporting atomic operations. Each method takes an + * `Ordering` which represents the strength of the memory barrier for that + * operation. These orderings are the same as C++11 atomic orderings + * [http://gcc.gnu.org/wiki/Atomic/GCCMM/AtomicSync] + * + * All atomic types are a single word in size. + */ + +#[allow(missing_doc)]; + +use intrinsics; +use cast; +use std::kinds::marker; +use option::{Option,Some,None}; +use ops::Drop; + +/** + * A simple atomic flag, that can be set and cleared. The most basic atomic type. + */ +pub struct AtomicFlag { + priv v: int, + priv nopod: marker::NoPod +} + +/** + * An atomic boolean type. + */ +pub struct AtomicBool { + priv v: uint, + priv nopod: marker::NoPod +} + +/** + * A signed atomic integer type, supporting basic atomic arithmetic operations + */ +pub struct AtomicInt { + priv v: int, + priv nopod: marker::NoPod +} + +/** + * An unsigned atomic integer type, supporting basic atomic arithmetic operations + */ +pub struct AtomicUint { + priv v: uint, + priv nopod: marker::NoPod +} + +/** + * An unsigned atomic integer type that is forced to be 64-bits. This does not + * support all operations. + */ +pub struct AtomicU64 { + priv v: u64, + priv nopod: marker::NoPod +} + +/** + * An unsafe atomic pointer. Only supports basic atomic operations + */ +pub struct AtomicPtr { + priv p: uint, + priv nopod: marker::NoPod +} + +/** + * An owned atomic pointer. Ensures that only a single reference to the data is held at any time. + */ +#[unsafe_no_drop_flag] +pub struct AtomicOption { + priv p: uint, +} + +pub enum Ordering { + Relaxed, + Release, + Acquire, + AcqRel, + SeqCst +} + +pub static INIT_ATOMIC_FLAG : AtomicFlag = AtomicFlag { v: 0, nopod: marker::NoPod }; +pub static INIT_ATOMIC_BOOL : AtomicBool = AtomicBool { v: 0, nopod: marker::NoPod }; +pub static INIT_ATOMIC_INT : AtomicInt = AtomicInt { v: 0, nopod: marker::NoPod }; +pub static INIT_ATOMIC_UINT : AtomicUint = AtomicUint { v: 0, nopod: marker::NoPod }; +pub static INIT_ATOMIC_U64 : AtomicU64 = AtomicU64 { v: 0, nopod: marker::NoPod }; + +impl AtomicFlag { + + pub fn new() -> AtomicFlag { + AtomicFlag { v: 0, nopod: marker::NoPod} + } + + /** + * Clears the atomic flag + */ + #[inline] + pub fn clear(&mut self, order: Ordering) { + unsafe {atomic_store(&mut self.v, 0, order)} + } + + /** + * Sets the flag if it was previously unset, returns the previous value of the + * flag. + */ + #[inline] + pub fn test_and_set(&mut self, order: Ordering) -> bool { + unsafe { atomic_compare_and_swap(&mut self.v, 0, 1, order) > 0 } + } +} + +impl AtomicBool { + pub fn new(v: bool) -> AtomicBool { + AtomicBool { v: if v { 1 } else { 0 }, nopod: marker::NoPod } + } + + #[inline] + pub fn load(&self, order: Ordering) -> bool { + unsafe { atomic_load(&self.v, order) > 0 } + } + + #[inline] + pub fn store(&mut self, val: bool, order: Ordering) { + let val = if val { 1 } else { 0 }; + + unsafe { atomic_store(&mut self.v, val, order); } + } + + #[inline] + pub fn swap(&mut self, val: bool, order: Ordering) -> bool { + let val = if val { 1 } else { 0 }; + + unsafe { atomic_swap(&mut self.v, val, order) > 0 } + } + + #[inline] + pub fn compare_and_swap(&mut self, old: bool, new: bool, order: Ordering) -> bool { + let old = if old { 1 } else { 0 }; + let new = if new { 1 } else { 0 }; + + unsafe { atomic_compare_and_swap(&mut self.v, old, new, order) > 0 } + } + + /// Returns the old value + #[inline] + pub fn fetch_and(&mut self, val: bool, order: Ordering) -> bool { + let val = if val { 1 } else { 0 }; + + unsafe { atomic_and(&mut self.v, val, order) > 0 } + } + + /// Returns the old value + #[inline] + pub fn fetch_nand(&mut self, val: bool, order: Ordering) -> bool { + let val = if val { 1 } else { 0 }; + + unsafe { atomic_nand(&mut self.v, val, order) > 0 } + } + + /// Returns the old value + #[inline] + pub fn fetch_or(&mut self, val: bool, order: Ordering) -> bool { + let val = if val { 1 } else { 0 }; + + unsafe { atomic_or(&mut self.v, val, order) > 0 } + } + + /// Returns the old value + #[inline] + pub fn fetch_xor(&mut self, val: bool, order: Ordering) -> bool { + let val = if val { 1 } else { 0 }; + + unsafe { atomic_xor(&mut self.v, val, order) > 0 } + } +} + +impl AtomicInt { + pub fn new(v: int) -> AtomicInt { + AtomicInt { v:v, nopod: marker::NoPod} + } + + #[inline] + pub fn load(&self, order: Ordering) -> int { + unsafe { atomic_load(&self.v, order) } + } + + #[inline] + pub fn store(&mut self, val: int, order: Ordering) { + unsafe { atomic_store(&mut self.v, val, order); } + } + + #[inline] + pub fn swap(&mut self, val: int, order: Ordering) -> int { + unsafe { atomic_swap(&mut self.v, val, order) } + } + + #[inline] + pub fn compare_and_swap(&mut self, old: int, new: int, order: Ordering) -> int { + unsafe { atomic_compare_and_swap(&mut self.v, old, new, order) } + } + + /// Returns the old value (like __sync_fetch_and_add). + #[inline] + pub fn fetch_add(&mut self, val: int, order: Ordering) -> int { + unsafe { atomic_add(&mut self.v, val, order) } + } + + /// Returns the old value (like __sync_fetch_and_sub). + #[inline] + pub fn fetch_sub(&mut self, val: int, order: Ordering) -> int { + unsafe { atomic_sub(&mut self.v, val, order) } + } +} + +impl AtomicU64 { + pub fn new(v: u64) -> AtomicU64 { + AtomicU64 { v:v, nopod: marker::NoPod } + } + + #[inline] + pub fn load(&self, order: Ordering) -> u64 { + unsafe { atomic_load(&self.v, order) } + } + + #[inline] + pub fn store(&mut self, val: u64, order: Ordering) { + unsafe { atomic_store(&mut self.v, val, order); } + } + + #[inline] + pub fn swap(&mut self, val: u64, order: Ordering) -> u64 { + unsafe { atomic_swap(&mut self.v, val, order) } + } + + #[inline] + pub fn compare_and_swap(&mut self, old: u64, new: u64, order: Ordering) -> u64 { + unsafe { atomic_compare_and_swap(&mut self.v, old, new, order) } + } + + #[inline] + pub fn fetch_add(&mut self, val: u64, order: Ordering) -> u64 { + unsafe { atomic_add(&mut self.v, val, order) } + } + + #[inline] + pub fn fetch_sub(&mut self, val: u64, order: Ordering) -> u64 { + unsafe { atomic_sub(&mut self.v, val, order) } + } +} + +impl AtomicUint { + pub fn new(v: uint) -> AtomicUint { + AtomicUint { v:v, nopod: marker::NoPod } + } + + #[inline] + pub fn load(&self, order: Ordering) -> uint { + unsafe { atomic_load(&self.v, order) } + } + + #[inline] + pub fn store(&mut self, val: uint, order: Ordering) { + unsafe { atomic_store(&mut self.v, val, order); } + } + + #[inline] + pub fn swap(&mut self, val: uint, order: Ordering) -> uint { + unsafe { atomic_swap(&mut self.v, val, order) } + } + + #[inline] + pub fn compare_and_swap(&mut self, old: uint, new: uint, order: Ordering) -> uint { + unsafe { atomic_compare_and_swap(&mut self.v, old, new, order) } + } + + /// Returns the old value (like __sync_fetch_and_add). + #[inline] + pub fn fetch_add(&mut self, val: uint, order: Ordering) -> uint { + unsafe { atomic_add(&mut self.v, val, order) } + } + + /// Returns the old value (like __sync_fetch_and_sub).. + #[inline] + pub fn fetch_sub(&mut self, val: uint, order: Ordering) -> uint { + unsafe { atomic_sub(&mut self.v, val, order) } + } +} + +impl AtomicPtr { + pub fn new(p: *mut T) -> AtomicPtr { + AtomicPtr { p: p as uint, nopod: marker::NoPod } + } + + #[inline] + pub fn load(&self, order: Ordering) -> *mut T { + unsafe { + atomic_load(&self.p, order) as *mut T + } + } + + #[inline] + pub fn store(&mut self, ptr: *mut T, order: Ordering) { + unsafe { atomic_store(&mut self.p, ptr as uint, order); } + } + + #[inline] + pub fn swap(&mut self, ptr: *mut T, order: Ordering) -> *mut T { + unsafe { atomic_swap(&mut self.p, ptr as uint, order) as *mut T } + } + + #[inline] + pub fn compare_and_swap(&mut self, old: *mut T, new: *mut T, order: Ordering) -> *mut T { + unsafe { + atomic_compare_and_swap(&mut self.p, old as uint, + new as uint, order) as *mut T + } + } +} + +impl AtomicOption { + pub fn new(p: ~T) -> AtomicOption { + unsafe { AtomicOption { p: cast::transmute(p) } } + } + + pub fn empty() -> AtomicOption { AtomicOption { p: 0 } } + + #[inline] + pub fn swap(&mut self, val: ~T, order: Ordering) -> Option<~T> { + unsafe { + let val = cast::transmute(val); + + let p = atomic_swap(&mut self.p, val, order); + if p as uint == 0 { + None + } else { + Some(cast::transmute(p)) + } + } + } + + #[inline] + pub fn take(&mut self, order: Ordering) -> Option<~T> { + unsafe { self.swap(cast::transmute(0), order) } + } + + /// A compare-and-swap. Succeeds if the option is 'None' and returns 'None' + /// if so. If the option was already 'Some', returns 'Some' of the rejected + /// value. + #[inline] + pub fn fill(&mut self, val: ~T, order: Ordering) -> Option<~T> { + unsafe { + let val = cast::transmute(val); + let expected = cast::transmute(0); + let oldval = atomic_compare_and_swap(&mut self.p, expected, val, order); + if oldval == expected { + None + } else { + Some(cast::transmute(val)) + } + } + } + + /// Be careful: The caller must have some external method of ensuring the + /// result does not get invalidated by another task after this returns. + #[inline] + pub fn is_empty(&mut self, order: Ordering) -> bool { + unsafe { atomic_load(&self.p, order) as uint == 0 } + } +} + +#[unsafe_destructor] +impl Drop for AtomicOption { + fn drop(&mut self) { + let _ = self.take(SeqCst); + } +} + +#[inline] +pub unsafe fn atomic_store(dst: &mut T, val: T, order:Ordering) { + match order { + Release => intrinsics::atomic_store_rel(dst, val), + Relaxed => intrinsics::atomic_store_relaxed(dst, val), + _ => intrinsics::atomic_store(dst, val) + } +} + +#[inline] +pub unsafe fn atomic_load(dst: &T, order:Ordering) -> T { + match order { + Acquire => intrinsics::atomic_load_acq(dst), + Relaxed => intrinsics::atomic_load_relaxed(dst), + _ => intrinsics::atomic_load(dst) + } +} + +#[inline] +pub unsafe fn atomic_swap(dst: &mut T, val: T, order: Ordering) -> T { + match order { + Acquire => intrinsics::atomic_xchg_acq(dst, val), + Release => intrinsics::atomic_xchg_rel(dst, val), + AcqRel => intrinsics::atomic_xchg_acqrel(dst, val), + Relaxed => intrinsics::atomic_xchg_relaxed(dst, val), + _ => intrinsics::atomic_xchg(dst, val) + } +} + +/// Returns the old value (like __sync_fetch_and_add). +#[inline] +pub unsafe fn atomic_add(dst: &mut T, val: T, order: Ordering) -> T { + match order { + Acquire => intrinsics::atomic_xadd_acq(dst, val), + Release => intrinsics::atomic_xadd_rel(dst, val), + AcqRel => intrinsics::atomic_xadd_acqrel(dst, val), + Relaxed => intrinsics::atomic_xadd_relaxed(dst, val), + _ => intrinsics::atomic_xadd(dst, val) + } +} + +/// Returns the old value (like __sync_fetch_and_sub). +#[inline] +pub unsafe fn atomic_sub(dst: &mut T, val: T, order: Ordering) -> T { + match order { + Acquire => intrinsics::atomic_xsub_acq(dst, val), + Release => intrinsics::atomic_xsub_rel(dst, val), + AcqRel => intrinsics::atomic_xsub_acqrel(dst, val), + Relaxed => intrinsics::atomic_xsub_relaxed(dst, val), + _ => intrinsics::atomic_xsub(dst, val) + } +} + +#[inline] +pub unsafe fn atomic_compare_and_swap(dst:&mut T, old:T, new:T, order: Ordering) -> T { + match order { + Acquire => intrinsics::atomic_cxchg_acq(dst, old, new), + Release => intrinsics::atomic_cxchg_rel(dst, old, new), + AcqRel => intrinsics::atomic_cxchg_acqrel(dst, old, new), + Relaxed => intrinsics::atomic_cxchg_relaxed(dst, old, new), + _ => intrinsics::atomic_cxchg(dst, old, new), + } +} + +#[inline] +pub unsafe fn atomic_and(dst: &mut T, val: T, order: Ordering) -> T { + match order { + Acquire => intrinsics::atomic_and_acq(dst, val), + Release => intrinsics::atomic_and_rel(dst, val), + AcqRel => intrinsics::atomic_and_acqrel(dst, val), + Relaxed => intrinsics::atomic_and_relaxed(dst, val), + _ => intrinsics::atomic_and(dst, val) + } +} + +#[inline] +pub unsafe fn atomic_nand(dst: &mut T, val: T, order: Ordering) -> T { + match order { + Acquire => intrinsics::atomic_nand_acq(dst, val), + Release => intrinsics::atomic_nand_rel(dst, val), + AcqRel => intrinsics::atomic_nand_acqrel(dst, val), + Relaxed => intrinsics::atomic_nand_relaxed(dst, val), + _ => intrinsics::atomic_nand(dst, val) + } +} + + +#[inline] +pub unsafe fn atomic_or(dst: &mut T, val: T, order: Ordering) -> T { + match order { + Acquire => intrinsics::atomic_or_acq(dst, val), + Release => intrinsics::atomic_or_rel(dst, val), + AcqRel => intrinsics::atomic_or_acqrel(dst, val), + Relaxed => intrinsics::atomic_or_relaxed(dst, val), + _ => intrinsics::atomic_or(dst, val) + } +} + + +#[inline] +pub unsafe fn atomic_xor(dst: &mut T, val: T, order: Ordering) -> T { + match order { + Acquire => intrinsics::atomic_xor_acq(dst, val), + Release => intrinsics::atomic_xor_rel(dst, val), + AcqRel => intrinsics::atomic_xor_acqrel(dst, val), + Relaxed => intrinsics::atomic_xor_relaxed(dst, val), + _ => intrinsics::atomic_xor(dst, val) + } +} + + +/** + * An atomic fence. + * + * A fence 'A' which has `Release` ordering semantics, synchronizes with a + * fence 'B' with (at least) `Acquire` semantics, if and only if there exists + * atomic operations X and Y, both operating on some atomic object 'M' such + * that A is sequenced before X, Y is synchronized before B and Y observers + * the change to M. This provides a happens-before dependence between A and B. + * + * Atomic operations with `Release` or `Acquire` semantics can also synchronize + * with a fence. + * + * A fence with has `SeqCst` ordering, in addition to having both `Acquire` and + * `Release` semantics, participates in the global program order of the other + * `SeqCst` operations and/or fences. + * + * Accepts `Acquire`, `Release`, `AcqRel` and `SeqCst` orderings. + */ +#[inline] +pub fn fence(order: Ordering) { + unsafe { + match order { + Acquire => intrinsics::atomic_fence_acq(), + Release => intrinsics::atomic_fence_rel(), + AcqRel => intrinsics::atomic_fence_rel(), + _ => intrinsics::atomic_fence(), + } + } +} + +#[cfg(test)] +mod test { + use option::*; + use super::*; + + #[test] + fn flag() { + let mut flg = AtomicFlag::new(); + assert!(!flg.test_and_set(SeqCst)); + assert!(flg.test_and_set(SeqCst)); + + flg.clear(SeqCst); + assert!(!flg.test_and_set(SeqCst)); + } + + #[test] + fn option_empty() { + let mut option: AtomicOption<()> = AtomicOption::empty(); + assert!(option.is_empty(SeqCst)); + } + + #[test] + fn option_swap() { + let mut p = AtomicOption::new(~1); + let a = ~2; + + let b = p.swap(a, SeqCst); + + assert_eq!(b, Some(~1)); + assert_eq!(p.take(SeqCst), Some(~2)); + } + + #[test] + fn option_take() { + let mut p = AtomicOption::new(~1); + + assert_eq!(p.take(SeqCst), Some(~1)); + assert_eq!(p.take(SeqCst), None); + + let p2 = ~2; + p.swap(p2, SeqCst); + + assert_eq!(p.take(SeqCst), Some(~2)); + } + + #[test] + fn option_fill() { + let mut p = AtomicOption::new(~1); + assert!(p.fill(~2, SeqCst).is_some()); // should fail; shouldn't leak! + assert_eq!(p.take(SeqCst), Some(~1)); + + assert!(p.fill(~2, SeqCst).is_none()); // shouldn't fail + assert_eq!(p.take(SeqCst), Some(~2)); + } + + #[test] + fn bool_and() { + let mut a = AtomicBool::new(true); + assert_eq!(a.fetch_and(false, SeqCst),true); + assert_eq!(a.load(SeqCst),false); + } + + static mut S_FLAG : AtomicFlag = INIT_ATOMIC_FLAG; + static mut S_BOOL : AtomicBool = INIT_ATOMIC_BOOL; + static mut S_INT : AtomicInt = INIT_ATOMIC_INT; + static mut S_UINT : AtomicUint = INIT_ATOMIC_UINT; + + #[test] + fn static_init() { + unsafe { + assert!(!S_FLAG.test_and_set(SeqCst)); + assert!(!S_BOOL.load(SeqCst)); + assert!(S_INT.load(SeqCst) == 0); + assert!(S_UINT.load(SeqCst) == 0); + } + } + + #[test] + fn different_sizes() { + unsafe { + let mut slot = 0u16; + assert_eq!(super::atomic_swap(&mut slot, 1, SeqCst), 0); + + let mut slot = 0u8; + assert_eq!(super::atomic_compare_and_swap(&mut slot, 1, 2, SeqCst), 0); + + let mut slot = 0u32; + assert_eq!(super::atomic_load(&mut slot, SeqCst), 0); + + let mut slot = 0u64; + super::atomic_store(&mut slot, 2, SeqCst); + } + } +} diff --git a/src/libstd/sync/mod.rs b/src/libstd/sync/mod.rs index 3213c538152c6..994d12b34e5aa 100644 --- a/src/libstd/sync/mod.rs +++ b/src/libstd/sync/mod.rs @@ -16,6 +16,10 @@ //! other types of concurrent primitives. pub mod arc; +#[cfg(stage0)] +#[path = "atomics_stage0.rs"] +pub mod atomics; +#[cfg(not(stage0))] pub mod atomics; pub mod deque; pub mod mpmc_bounded_queue; diff --git a/src/test/auxiliary/cci_intrinsic.rs b/src/test/auxiliary/cci_intrinsic.rs index 07d6df89d220c..b7278f64289e0 100644 --- a/src/test/auxiliary/cci_intrinsic.rs +++ b/src/test/auxiliary/cci_intrinsic.rs @@ -10,21 +10,21 @@ pub mod rusti { extern "rust-intrinsic" { - pub fn atomic_cxchg(dst: &mut T, old: T, src: T) -> T; - pub fn atomic_cxchg_acq(dst: &mut T, old: T, src: T) -> T; - pub fn atomic_cxchg_rel(dst: &mut T, old: T, src: T) -> T; + pub fn atomic_cxchg(dst: &T, old: T, src: T) -> T; + pub fn atomic_cxchg_acq(dst: &T, old: T, src: T) -> T; + pub fn atomic_cxchg_rel(dst: &T, old: T, src: T) -> T; - pub fn atomic_xchg(dst: &mut T, src: T) -> T; - pub fn atomic_xchg_acq(dst: &mut T, src: T) -> T; - pub fn atomic_xchg_rel(dst: &mut T, src: T) -> T; + pub fn atomic_xchg(dst: &T, src: T) -> T; + pub fn atomic_xchg_acq(dst: &T, src: T) -> T; + pub fn atomic_xchg_rel(dst: &T, src: T) -> T; - pub fn atomic_xadd(dst: &mut T, src: T) -> T; - pub fn atomic_xadd_acq(dst: &mut T, src: T) -> T; - pub fn atomic_xadd_rel(dst: &mut T, src: T) -> T; + pub fn atomic_xadd(dst: &T, src: T) -> T; + pub fn atomic_xadd_acq(dst: &T, src: T) -> T; + pub fn atomic_xadd_rel(dst: &T, src: T) -> T; - pub fn atomic_xsub(dst: &mut T, src: T) -> T; - pub fn atomic_xsub_acq(dst: &mut T, src: T) -> T; - pub fn atomic_xsub_rel(dst: &mut T, src: T) -> T; + pub fn atomic_xsub(dst: &T, src: T) -> T; + pub fn atomic_xsub_acq(dst: &T, src: T) -> T; + pub fn atomic_xsub_rel(dst: &T, src: T) -> T; } } diff --git a/src/test/run-pass/intrinsic-atomics.rs b/src/test/run-pass/intrinsic-atomics.rs index d6e394a345e22..cb1f8ed60187f 100644 --- a/src/test/run-pass/intrinsic-atomics.rs +++ b/src/test/run-pass/intrinsic-atomics.rs @@ -10,27 +10,27 @@ mod rusti { extern "rust-intrinsic" { - pub fn atomic_cxchg(dst: &mut T, old: T, src: T) -> T; - pub fn atomic_cxchg_acq(dst: &mut T, old: T, src: T) -> T; - pub fn atomic_cxchg_rel(dst: &mut T, old: T, src: T) -> T; + pub fn atomic_cxchg(dst: &T, old: T, src: T) -> T; + pub fn atomic_cxchg_acq(dst: &T, old: T, src: T) -> T; + pub fn atomic_cxchg_rel(dst: &T, old: T, src: T) -> T; pub fn atomic_load(src: &T) -> T; pub fn atomic_load_acq(src: &T) -> T; - pub fn atomic_store(dst: &mut T, val: T); - pub fn atomic_store_rel(dst: &mut T, val: T); + pub fn atomic_store(dst: &T, val: T); + pub fn atomic_store_rel(dst: &T, val: T); - pub fn atomic_xchg(dst: &mut T, src: T) -> T; - pub fn atomic_xchg_acq(dst: &mut T, src: T) -> T; - pub fn atomic_xchg_rel(dst: &mut T, src: T) -> T; + pub fn atomic_xchg(dst: &T, src: T) -> T; + pub fn atomic_xchg_acq(dst: &T, src: T) -> T; + pub fn atomic_xchg_rel(dst: &T, src: T) -> T; - pub fn atomic_xadd(dst: &mut T, src: T) -> T; - pub fn atomic_xadd_acq(dst: &mut T, src: T) -> T; - pub fn atomic_xadd_rel(dst: &mut T, src: T) -> T; + pub fn atomic_xadd(dst: &T, src: T) -> T; + pub fn atomic_xadd_acq(dst: &T, src: T) -> T; + pub fn atomic_xadd_rel(dst: &T, src: T) -> T; - pub fn atomic_xsub(dst: &mut T, src: T) -> T; - pub fn atomic_xsub_acq(dst: &mut T, src: T) -> T; - pub fn atomic_xsub_rel(dst: &mut T, src: T) -> T; + pub fn atomic_xsub(dst: &T, src: T) -> T; + pub fn atomic_xsub_acq(dst: &T, src: T) -> T; + pub fn atomic_xsub_rel(dst: &T, src: T) -> T; } } From df07e7a45b32aeccbc3eea4be6e3cf39d7aaf9b9 Mon Sep 17 00:00:00 2001 From: Brian Anderson Date: Mon, 24 Feb 2014 18:20:52 -0800 Subject: [PATCH 2/4] std: Make the generic atomics take unsafe pointers These mutate values behind references that are Freeze, which is not allowed. --- src/librustc/middle/typeck/check/mod.rs | 14 +-- src/libstd/intrinsics.rs | 161 ++++++++++++------------ src/libstd/sync/atomics.rs | 86 ++++++------- src/test/auxiliary/cci_intrinsic.rs | 18 +-- src/test/run-pass/intrinsic-atomics.rs | 64 +++++----- 5 files changed, 164 insertions(+), 179 deletions(-) diff --git a/src/librustc/middle/typeck/check/mod.rs b/src/librustc/middle/typeck/check/mod.rs index 45ecb17a08131..d2176d767f570 100644 --- a/src/librustc/middle/typeck/check/mod.rs +++ b/src/librustc/middle/typeck/check/mod.rs @@ -3956,31 +3956,25 @@ pub fn check_intrinsic_type(ccx: @CrateCtxt, it: &ast::ForeignItem) { //We only care about the operation here match split[1] { - "cxchg" => (1, ~[ty::mk_imm_rptr(tcx, - ty::ReLateBound(it.id, ty::BrAnon(0)), - param(ccx, 0)), + "cxchg" => (1, ~[ty::mk_mut_ptr(tcx, param(ccx, 0)), param(ccx, 0), param(ccx, 0), ], param(ccx, 0)), "load" => (1, ~[ - ty::mk_imm_rptr(tcx, ty::ReLateBound(it.id, ty::BrAnon(0)), - param(ccx, 0)) + ty::mk_imm_ptr(tcx, param(ccx, 0)) ], param(ccx, 0)), "store" => (1, ~[ - ty::mk_imm_rptr(tcx, ty::ReLateBound(it.id, ty::BrAnon(0)), - param(ccx, 0)), + ty::mk_mut_ptr(tcx, param(ccx, 0)), param(ccx, 0) ], ty::mk_nil()), "xchg" | "xadd" | "xsub" | "and" | "nand" | "or" | "xor" | "max" | "min" | "umax" | "umin" => { - (1, ~[ty::mk_imm_rptr(tcx, - ty::ReLateBound(it.id, ty::BrAnon(0)), - param(ccx, 0)), param(ccx, 0) ], + (1, ~[ty::mk_mut_ptr(tcx, param(ccx, 0)), param(ccx, 0) ], param(ccx, 0)) } "fence" => { diff --git a/src/libstd/intrinsics.rs b/src/libstd/intrinsics.rs index cf95d1c34444f..76c7d66bd823e 100644 --- a/src/libstd/intrinsics.rs +++ b/src/libstd/intrinsics.rs @@ -250,85 +250,88 @@ extern "rust-intrinsic" { #[cfg(not(stage0))] extern "rust-intrinsic" { - pub fn atomic_cxchg(dst: &T, old: T, src: T) -> T; - pub fn atomic_cxchg_acq(dst: &T, old: T, src: T) -> T; - pub fn atomic_cxchg_rel(dst: &T, old: T, src: T) -> T; - pub fn atomic_cxchg_acqrel(dst: &T, old: T, src: T) -> T; - pub fn atomic_cxchg_relaxed(dst: &T, old: T, src: T) -> T; - - pub fn atomic_load(src: &T) -> T; - pub fn atomic_load_acq(src: &T) -> T; - pub fn atomic_load_relaxed(src: &T) -> T; - - pub fn atomic_store(dst: &T, val: T); - pub fn atomic_store_rel(dst: &T, val: T); - pub fn atomic_store_relaxed(dst: &T, val: T); - - pub fn atomic_xchg(dst: &T, src: T) -> T; - pub fn atomic_xchg_acq(dst: &T, src: T) -> T; - pub fn atomic_xchg_rel(dst: &T, src: T) -> T; - pub fn atomic_xchg_acqrel(dst: &T, src: T) -> T; - pub fn atomic_xchg_relaxed(dst: &T, src: T) -> T; - - pub fn atomic_xadd(dst: &T, src: T) -> T; - pub fn atomic_xadd_acq(dst: &T, src: T) -> T; - pub fn atomic_xadd_rel(dst: &T, src: T) -> T; - pub fn atomic_xadd_acqrel(dst: &T, src: T) -> T; - pub fn atomic_xadd_relaxed(dst: &T, src: T) -> T; - - pub fn atomic_xsub(dst: &T, src: T) -> T; - pub fn atomic_xsub_acq(dst: &T, src: T) -> T; - pub fn atomic_xsub_rel(dst: &T, src: T) -> T; - pub fn atomic_xsub_acqrel(dst: &T, src: T) -> T; - pub fn atomic_xsub_relaxed(dst: &T, src: T) -> T; - - pub fn atomic_and(dst: &T, src: T) -> T; - pub fn atomic_and_acq(dst: &T, src: T) -> T; - pub fn atomic_and_rel(dst: &T, src: T) -> T; - pub fn atomic_and_acqrel(dst: &T, src: T) -> T; - pub fn atomic_and_relaxed(dst: &T, src: T) -> T; - - pub fn atomic_nand(dst: &T, src: T) -> T; - pub fn atomic_nand_acq(dst: &T, src: T) -> T; - pub fn atomic_nand_rel(dst: &T, src: T) -> T; - pub fn atomic_nand_acqrel(dst: &T, src: T) -> T; - pub fn atomic_nand_relaxed(dst: &T, src: T) -> T; - - pub fn atomic_or(dst: &T, src: T) -> T; - pub fn atomic_or_acq(dst: &T, src: T) -> T; - pub fn atomic_or_rel(dst: &T, src: T) -> T; - pub fn atomic_or_acqrel(dst: &T, src: T) -> T; - pub fn atomic_or_relaxed(dst: &T, src: T) -> T; - - pub fn atomic_xor(dst: &T, src: T) -> T; - pub fn atomic_xor_acq(dst: &T, src: T) -> T; - pub fn atomic_xor_rel(dst: &T, src: T) -> T; - pub fn atomic_xor_acqrel(dst: &T, src: T) -> T; - pub fn atomic_xor_relaxed(dst: &T, src: T) -> T; - - pub fn atomic_max(dst: &T, src: T) -> T; - pub fn atomic_max_acq(dst: &T, src: T) -> T; - pub fn atomic_max_rel(dst: &T, src: T) -> T; - pub fn atomic_max_acqrel(dst: &T, src: T) -> T; - pub fn atomic_max_relaxed(dst: &T, src: T) -> T; - - pub fn atomic_min(dst: &T, src: T) -> T; - pub fn atomic_min_acq(dst: &T, src: T) -> T; - pub fn atomic_min_rel(dst: &T, src: T) -> T; - pub fn atomic_min_acqrel(dst: &T, src: T) -> T; - pub fn atomic_min_relaxed(dst: &T, src: T) -> T; - - pub fn atomic_umin(dst: &T, src: T) -> T; - pub fn atomic_umin_acq(dst: &T, src: T) -> T; - pub fn atomic_umin_rel(dst: &T, src: T) -> T; - pub fn atomic_umin_acqrel(dst: &T, src: T) -> T; - pub fn atomic_umin_relaxed(dst: &T, src: T) -> T; - - pub fn atomic_umax(dst: &T, src: T) -> T; - pub fn atomic_umax_acq(dst: &T, src: T) -> T; - pub fn atomic_umax_rel(dst: &T, src: T) -> T; - pub fn atomic_umax_acqrel(dst: &T, src: T) -> T; - pub fn atomic_umax_relaxed(dst: &T, src: T) -> T; + // NB: These intrinsics take unsafe pointers because they mutate aliased + // memory, which is not valid for either `&` or `&mut`. + + pub fn atomic_cxchg(dst: *mut T, old: T, src: T) -> T; + pub fn atomic_cxchg_acq(dst: *mut T, old: T, src: T) -> T; + pub fn atomic_cxchg_rel(dst: *mut T, old: T, src: T) -> T; + pub fn atomic_cxchg_acqrel(dst: *mut T, old: T, src: T) -> T; + pub fn atomic_cxchg_relaxed(dst: *mut T, old: T, src: T) -> T; + + pub fn atomic_load(src: *T) -> T; + pub fn atomic_load_acq(src: *T) -> T; + pub fn atomic_load_relaxed(src: *T) -> T; + + pub fn atomic_store(dst: *mut T, val: T); + pub fn atomic_store_rel(dst: *mut T, val: T); + pub fn atomic_store_relaxed(dst: *mut T, val: T); + + pub fn atomic_xchg(dst: *mut T, src: T) -> T; + pub fn atomic_xchg_acq(dst: *mut T, src: T) -> T; + pub fn atomic_xchg_rel(dst: *mut T, src: T) -> T; + pub fn atomic_xchg_acqrel(dst: *mut T, src: T) -> T; + pub fn atomic_xchg_relaxed(dst: *mut T, src: T) -> T; + + pub fn atomic_xadd(dst: *mut T, src: T) -> T; + pub fn atomic_xadd_acq(dst: *mut T, src: T) -> T; + pub fn atomic_xadd_rel(dst: *mut T, src: T) -> T; + pub fn atomic_xadd_acqrel(dst: *mut T, src: T) -> T; + pub fn atomic_xadd_relaxed(dst: *mut T, src: T) -> T; + + pub fn atomic_xsub(dst: *mut T, src: T) -> T; + pub fn atomic_xsub_acq(dst: *mut T, src: T) -> T; + pub fn atomic_xsub_rel(dst: *mut T, src: T) -> T; + pub fn atomic_xsub_acqrel(dst: *mut T, src: T) -> T; + pub fn atomic_xsub_relaxed(dst: *mut T, src: T) -> T; + + pub fn atomic_and(dst: *mut T, src: T) -> T; + pub fn atomic_and_acq(dst: *mut T, src: T) -> T; + pub fn atomic_and_rel(dst: *mut T, src: T) -> T; + pub fn atomic_and_acqrel(dst: *mut T, src: T) -> T; + pub fn atomic_and_relaxed(dst: *mut T, src: T) -> T; + + pub fn atomic_nand(dst: *mut T, src: T) -> T; + pub fn atomic_nand_acq(dst: *mut T, src: T) -> T; + pub fn atomic_nand_rel(dst: *mut T, src: T) -> T; + pub fn atomic_nand_acqrel(dst: *mut T, src: T) -> T; + pub fn atomic_nand_relaxed(dst: *mut T, src: T) -> T; + + pub fn atomic_or(dst: *mut T, src: T) -> T; + pub fn atomic_or_acq(dst: *mut T, src: T) -> T; + pub fn atomic_or_rel(dst: *mut T, src: T) -> T; + pub fn atomic_or_acqrel(dst: *mut T, src: T) -> T; + pub fn atomic_or_relaxed(dst: *mut T, src: T) -> T; + + pub fn atomic_xor(dst: *mut T, src: T) -> T; + pub fn atomic_xor_acq(dst: *mut T, src: T) -> T; + pub fn atomic_xor_rel(dst: *mut T, src: T) -> T; + pub fn atomic_xor_acqrel(dst: *mut T, src: T) -> T; + pub fn atomic_xor_relaxed(dst: *mut T, src: T) -> T; + + pub fn atomic_max(dst: *mut T, src: T) -> T; + pub fn atomic_max_acq(dst: *mut T, src: T) -> T; + pub fn atomic_max_rel(dst: *mut T, src: T) -> T; + pub fn atomic_max_acqrel(dst: *mut T, src: T) -> T; + pub fn atomic_max_relaxed(dst: *mut T, src: T) -> T; + + pub fn atomic_min(dst: *mut T, src: T) -> T; + pub fn atomic_min_acq(dst: *mut T, src: T) -> T; + pub fn atomic_min_rel(dst: *mut T, src: T) -> T; + pub fn atomic_min_acqrel(dst: *mut T, src: T) -> T; + pub fn atomic_min_relaxed(dst: *mut T, src: T) -> T; + + pub fn atomic_umin(dst: *mut T, src: T) -> T; + pub fn atomic_umin_acq(dst: *mut T, src: T) -> T; + pub fn atomic_umin_rel(dst: *mut T, src: T) -> T; + pub fn atomic_umin_acqrel(dst: *mut T, src: T) -> T; + pub fn atomic_umin_relaxed(dst: *mut T, src: T) -> T; + + pub fn atomic_umax(dst: *mut T, src: T) -> T; + pub fn atomic_umax_acq(dst: *mut T, src: T) -> T; + pub fn atomic_umax_rel(dst: *mut T, src: T) -> T; + pub fn atomic_umax_acqrel(dst: *mut T, src: T) -> T; + pub fn atomic_umax_relaxed(dst: *mut T, src: T) -> T; } extern "rust-intrinsic" { diff --git a/src/libstd/sync/atomics.rs b/src/libstd/sync/atomics.rs index ade54780484a6..287a92a44a216 100644 --- a/src/libstd/sync/atomics.rs +++ b/src/libstd/sync/atomics.rs @@ -23,6 +23,7 @@ use intrinsics; use cast; +use cast::transmute_mut_unsafe; use std::kinds::marker; use option::{Option,Some,None}; use ops::Drop; @@ -126,7 +127,7 @@ impl AtomicFlag { */ #[inline] pub fn clear(&self, order: Ordering) { - unsafe {atomic_store(&self.v, 0, order)} + unsafe {atomic_store(transmute_mut_unsafe(&self.v), 0, order)} } /** @@ -135,7 +136,7 @@ impl AtomicFlag { */ #[inline] pub fn test_and_set(&self, order: Ordering) -> bool { - unsafe { atomic_compare_and_swap(&self.v, 0, 1, order) > 0 } + unsafe { atomic_compare_and_swap(transmute_mut_unsafe(&self.v), 0, 1, order) > 0 } } } @@ -153,14 +154,14 @@ impl AtomicBool { pub fn store(&self, val: bool, order: Ordering) { let val = if val { 1 } else { 0 }; - unsafe { atomic_store(&self.v, val, order); } + unsafe { atomic_store(transmute_mut_unsafe(&self.v), val, order); } } #[inline] pub fn swap(&self, val: bool, order: Ordering) -> bool { let val = if val { 1 } else { 0 }; - unsafe { atomic_swap(&self.v, val, order) > 0 } + unsafe { atomic_swap(transmute_mut_unsafe(&self.v), val, order) > 0 } } #[inline] @@ -168,7 +169,7 @@ impl AtomicBool { let old = if old { 1 } else { 0 }; let new = if new { 1 } else { 0 }; - unsafe { atomic_compare_and_swap(&self.v, old, new, order) > 0 } + unsafe { atomic_compare_and_swap(transmute_mut_unsafe(&self.v), old, new, order) > 0 } } /// Returns the old value @@ -176,7 +177,7 @@ impl AtomicBool { pub fn fetch_and(&self, val: bool, order: Ordering) -> bool { let val = if val { 1 } else { 0 }; - unsafe { atomic_and(&self.v, val, order) > 0 } + unsafe { atomic_and(transmute_mut_unsafe(&self.v), val, order) > 0 } } /// Returns the old value @@ -184,7 +185,7 @@ impl AtomicBool { pub fn fetch_nand(&self, val: bool, order: Ordering) -> bool { let val = if val { 1 } else { 0 }; - unsafe { atomic_nand(&self.v, val, order) > 0 } + unsafe { atomic_nand(transmute_mut_unsafe(&self.v), val, order) > 0 } } /// Returns the old value @@ -192,7 +193,7 @@ impl AtomicBool { pub fn fetch_or(&self, val: bool, order: Ordering) -> bool { let val = if val { 1 } else { 0 }; - unsafe { atomic_or(&self.v, val, order) > 0 } + unsafe { atomic_or(transmute_mut_unsafe(&self.v), val, order) > 0 } } /// Returns the old value @@ -200,7 +201,7 @@ impl AtomicBool { pub fn fetch_xor(&self, val: bool, order: Ordering) -> bool { let val = if val { 1 } else { 0 }; - unsafe { atomic_xor(&self.v, val, order) > 0 } + unsafe { atomic_xor(transmute_mut_unsafe(&self.v), val, order) > 0 } } } @@ -216,29 +217,29 @@ impl AtomicInt { #[inline] pub fn store(&self, val: int, order: Ordering) { - unsafe { atomic_store(&self.v, val, order); } + unsafe { atomic_store(transmute_mut_unsafe(&self.v), val, order); } } #[inline] pub fn swap(&self, val: int, order: Ordering) -> int { - unsafe { atomic_swap(&self.v, val, order) } + unsafe { atomic_swap(transmute_mut_unsafe(&self.v), val, order) } } #[inline] pub fn compare_and_swap(&self, old: int, new: int, order: Ordering) -> int { - unsafe { atomic_compare_and_swap(&self.v, old, new, order) } + unsafe { atomic_compare_and_swap(transmute_mut_unsafe(&self.v), old, new, order) } } /// Returns the old value (like __sync_fetch_and_add). #[inline] pub fn fetch_add(&self, val: int, order: Ordering) -> int { - unsafe { atomic_add(&self.v, val, order) } + unsafe { atomic_add(transmute_mut_unsafe(&self.v), val, order) } } /// Returns the old value (like __sync_fetch_and_sub). #[inline] pub fn fetch_sub(&self, val: int, order: Ordering) -> int { - unsafe { atomic_sub(&self.v, val, order) } + unsafe { atomic_sub(transmute_mut_unsafe(&self.v), val, order) } } } @@ -254,27 +255,27 @@ impl AtomicU64 { #[inline] pub fn store(&self, val: u64, order: Ordering) { - unsafe { atomic_store(&self.v, val, order); } + unsafe { atomic_store(transmute_mut_unsafe(&self.v), val, order); } } #[inline] pub fn swap(&self, val: u64, order: Ordering) -> u64 { - unsafe { atomic_swap(&self.v, val, order) } + unsafe { atomic_swap(transmute_mut_unsafe(&self.v), val, order) } } #[inline] pub fn compare_and_swap(&self, old: u64, new: u64, order: Ordering) -> u64 { - unsafe { atomic_compare_and_swap(&self.v, old, new, order) } + unsafe { atomic_compare_and_swap(transmute_mut_unsafe(&self.v), old, new, order) } } #[inline] pub fn fetch_add(&self, val: u64, order: Ordering) -> u64 { - unsafe { atomic_add(&self.v, val, order) } + unsafe { atomic_add(transmute_mut_unsafe(&self.v), val, order) } } #[inline] pub fn fetch_sub(&self, val: u64, order: Ordering) -> u64 { - unsafe { atomic_sub(&self.v, val, order) } + unsafe { atomic_sub(transmute_mut_unsafe(&self.v), val, order) } } } @@ -290,29 +291,29 @@ impl AtomicUint { #[inline] pub fn store(&self, val: uint, order: Ordering) { - unsafe { atomic_store(&self.v, val, order); } + unsafe { atomic_store(transmute_mut_unsafe(&self.v), val, order); } } #[inline] pub fn swap(&self, val: uint, order: Ordering) -> uint { - unsafe { atomic_swap(&self.v, val, order) } + unsafe { atomic_swap(transmute_mut_unsafe(&self.v), val, order) } } #[inline] pub fn compare_and_swap(&self, old: uint, new: uint, order: Ordering) -> uint { - unsafe { atomic_compare_and_swap(&self.v, old, new, order) } + unsafe { atomic_compare_and_swap(transmute_mut_unsafe(&self.v), old, new, order) } } /// Returns the old value (like __sync_fetch_and_add). #[inline] pub fn fetch_add(&self, val: uint, order: Ordering) -> uint { - unsafe { atomic_add(&self.v, val, order) } + unsafe { atomic_add(transmute_mut_unsafe(&self.v), val, order) } } /// Returns the old value (like __sync_fetch_and_sub).. #[inline] pub fn fetch_sub(&self, val: uint, order: Ordering) -> uint { - unsafe { atomic_sub(&self.v, val, order) } + unsafe { atomic_sub(transmute_mut_unsafe(&self.v), val, order) } } } @@ -330,18 +331,18 @@ impl AtomicPtr { #[inline] pub fn store(&self, ptr: *mut T, order: Ordering) { - unsafe { atomic_store(&self.p, ptr as uint, order); } + unsafe { atomic_store(transmute_mut_unsafe(&self.p), ptr as uint, order); } } #[inline] pub fn swap(&self, ptr: *mut T, order: Ordering) -> *mut T { - unsafe { atomic_swap(&self.p, ptr as uint, order) as *mut T } + unsafe { atomic_swap(transmute_mut_unsafe(&self.p), ptr as uint, order) as *mut T } } #[inline] pub fn compare_and_swap(&self, old: *mut T, new: *mut T, order: Ordering) -> *mut T { unsafe { - atomic_compare_and_swap(&self.p, old as uint, + atomic_compare_and_swap(transmute_mut_unsafe(&self.p), old as uint, new as uint, order) as *mut T } } @@ -359,7 +360,7 @@ impl AtomicOption { unsafe { let val = cast::transmute(val); - let p = atomic_swap(&self.p, val, order); + let p = atomic_swap(transmute_mut_unsafe(&self.p), val, order); if p as uint == 0 { None } else { @@ -381,7 +382,8 @@ impl AtomicOption { unsafe { let val = cast::transmute(val); let expected = cast::transmute(0); - let oldval = atomic_compare_and_swap(&self.p, expected, val, order); + let oldval = atomic_compare_and_swap( + transmute_mut_unsafe(&self.p), expected, val, order); if oldval == expected { None } else { @@ -406,7 +408,7 @@ impl Drop for AtomicOption { } #[inline] -pub unsafe fn atomic_store(dst: &T, val: T, order:Ordering) { +pub unsafe fn atomic_store(dst: *mut T, val: T, order:Ordering) { match order { Release => intrinsics::atomic_store_rel(dst, val), Relaxed => intrinsics::atomic_store_relaxed(dst, val), @@ -415,7 +417,7 @@ pub unsafe fn atomic_store(dst: &T, val: T, order:Ordering) { } #[inline] -pub unsafe fn atomic_load(dst: &T, order:Ordering) -> T { +pub unsafe fn atomic_load(dst: *T, order:Ordering) -> T { match order { Acquire => intrinsics::atomic_load_acq(dst), Relaxed => intrinsics::atomic_load_relaxed(dst), @@ -424,7 +426,7 @@ pub unsafe fn atomic_load(dst: &T, order:Ordering) -> T { } #[inline] -pub unsafe fn atomic_swap(dst: &T, val: T, order: Ordering) -> T { +pub unsafe fn atomic_swap(dst: *mut T, val: T, order: Ordering) -> T { match order { Acquire => intrinsics::atomic_xchg_acq(dst, val), Release => intrinsics::atomic_xchg_rel(dst, val), @@ -436,7 +438,7 @@ pub unsafe fn atomic_swap(dst: &T, val: T, order: Ordering) -> T { /// Returns the old value (like __sync_fetch_and_add). #[inline] -pub unsafe fn atomic_add(dst: &T, val: T, order: Ordering) -> T { +pub unsafe fn atomic_add(dst: *mut T, val: T, order: Ordering) -> T { match order { Acquire => intrinsics::atomic_xadd_acq(dst, val), Release => intrinsics::atomic_xadd_rel(dst, val), @@ -448,7 +450,7 @@ pub unsafe fn atomic_add(dst: &T, val: T, order: Ordering) -> T { /// Returns the old value (like __sync_fetch_and_sub). #[inline] -pub unsafe fn atomic_sub(dst: &T, val: T, order: Ordering) -> T { +pub unsafe fn atomic_sub(dst: *mut T, val: T, order: Ordering) -> T { match order { Acquire => intrinsics::atomic_xsub_acq(dst, val), Release => intrinsics::atomic_xsub_rel(dst, val), @@ -459,7 +461,7 @@ pub unsafe fn atomic_sub(dst: &T, val: T, order: Ordering) -> T { } #[inline] -pub unsafe fn atomic_compare_and_swap(dst:&T, old:T, new:T, order: Ordering) -> T { +pub unsafe fn atomic_compare_and_swap(dst: *mut T, old:T, new:T, order: Ordering) -> T { match order { Acquire => intrinsics::atomic_cxchg_acq(dst, old, new), Release => intrinsics::atomic_cxchg_rel(dst, old, new), @@ -470,7 +472,7 @@ pub unsafe fn atomic_compare_and_swap(dst:&T, old:T, new:T, order: Ordering) } #[inline] -pub unsafe fn atomic_and(dst: &T, val: T, order: Ordering) -> T { +pub unsafe fn atomic_and(dst: *mut T, val: T, order: Ordering) -> T { match order { Acquire => intrinsics::atomic_and_acq(dst, val), Release => intrinsics::atomic_and_rel(dst, val), @@ -481,7 +483,7 @@ pub unsafe fn atomic_and(dst: &T, val: T, order: Ordering) -> T { } #[inline] -pub unsafe fn atomic_nand(dst: &T, val: T, order: Ordering) -> T { +pub unsafe fn atomic_nand(dst: *mut T, val: T, order: Ordering) -> T { match order { Acquire => intrinsics::atomic_nand_acq(dst, val), Release => intrinsics::atomic_nand_rel(dst, val), @@ -493,7 +495,7 @@ pub unsafe fn atomic_nand(dst: &T, val: T, order: Ordering) -> T { #[inline] -pub unsafe fn atomic_or(dst: &T, val: T, order: Ordering) -> T { +pub unsafe fn atomic_or(dst: *mut T, val: T, order: Ordering) -> T { match order { Acquire => intrinsics::atomic_or_acq(dst, val), Release => intrinsics::atomic_or_rel(dst, val), @@ -505,7 +507,7 @@ pub unsafe fn atomic_or(dst: &T, val: T, order: Ordering) -> T { #[inline] -pub unsafe fn atomic_xor(dst: &T, val: T, order: Ordering) -> T { +pub unsafe fn atomic_xor(dst: *mut T, val: T, order: Ordering) -> T { match order { Acquire => intrinsics::atomic_xor_acq(dst, val), Release => intrinsics::atomic_xor_rel(dst, val), @@ -627,16 +629,16 @@ mod test { fn different_sizes() { unsafe { let mut slot = 0u16; - assert_eq!(super::atomic_swap(&slot, 1, SeqCst), 0); + assert_eq!(super::atomic_swap(&mut slot, 1, SeqCst), 0); let mut slot = 0u8; - assert_eq!(super::atomic_compare_and_swap(&slot, 1, 2, SeqCst), 0); + assert_eq!(super::atomic_compare_and_swap(&mut slot, 1, 2, SeqCst), 0); let mut slot = 0u32; assert_eq!(super::atomic_load(&slot, SeqCst), 0); let mut slot = 0u64; - super::atomic_store(&slot, 2, SeqCst); + super::atomic_store(&mut slot, 2, SeqCst); } } } diff --git a/src/test/auxiliary/cci_intrinsic.rs b/src/test/auxiliary/cci_intrinsic.rs index b7278f64289e0..35d987480c078 100644 --- a/src/test/auxiliary/cci_intrinsic.rs +++ b/src/test/auxiliary/cci_intrinsic.rs @@ -10,26 +10,12 @@ pub mod rusti { extern "rust-intrinsic" { - pub fn atomic_cxchg(dst: &T, old: T, src: T) -> T; - pub fn atomic_cxchg_acq(dst: &T, old: T, src: T) -> T; - pub fn atomic_cxchg_rel(dst: &T, old: T, src: T) -> T; - - pub fn atomic_xchg(dst: &T, src: T) -> T; - pub fn atomic_xchg_acq(dst: &T, src: T) -> T; - pub fn atomic_xchg_rel(dst: &T, src: T) -> T; - - pub fn atomic_xadd(dst: &T, src: T) -> T; - pub fn atomic_xadd_acq(dst: &T, src: T) -> T; - pub fn atomic_xadd_rel(dst: &T, src: T) -> T; - - pub fn atomic_xsub(dst: &T, src: T) -> T; - pub fn atomic_xsub_acq(dst: &T, src: T) -> T; - pub fn atomic_xsub_rel(dst: &T, src: T) -> T; + pub fn atomic_xchg(dst: *mut T, src: T) -> T; } } #[inline(always)] -pub fn atomic_xchg(dst: &mut int, src: int) -> int { +pub fn atomic_xchg(dst: *mut int, src: int) -> int { unsafe { rusti::atomic_xchg(dst, src) } diff --git a/src/test/run-pass/intrinsic-atomics.rs b/src/test/run-pass/intrinsic-atomics.rs index cb1f8ed60187f..b663cbfa50974 100644 --- a/src/test/run-pass/intrinsic-atomics.rs +++ b/src/test/run-pass/intrinsic-atomics.rs @@ -10,27 +10,27 @@ mod rusti { extern "rust-intrinsic" { - pub fn atomic_cxchg(dst: &T, old: T, src: T) -> T; - pub fn atomic_cxchg_acq(dst: &T, old: T, src: T) -> T; - pub fn atomic_cxchg_rel(dst: &T, old: T, src: T) -> T; + pub fn atomic_cxchg(dst: *mut T, old: T, src: T) -> T; + pub fn atomic_cxchg_acq(dst: *mut T, old: T, src: T) -> T; + pub fn atomic_cxchg_rel(dst: *mut T, old: T, src: T) -> T; - pub fn atomic_load(src: &T) -> T; - pub fn atomic_load_acq(src: &T) -> T; + pub fn atomic_load(src: *T) -> T; + pub fn atomic_load_acq(src: *T) -> T; - pub fn atomic_store(dst: &T, val: T); - pub fn atomic_store_rel(dst: &T, val: T); + pub fn atomic_store(dst: *mut T, val: T); + pub fn atomic_store_rel(dst: *mut T, val: T); - pub fn atomic_xchg(dst: &T, src: T) -> T; - pub fn atomic_xchg_acq(dst: &T, src: T) -> T; - pub fn atomic_xchg_rel(dst: &T, src: T) -> T; + pub fn atomic_xchg(dst: *mut T, src: T) -> T; + pub fn atomic_xchg_acq(dst: *mut T, src: T) -> T; + pub fn atomic_xchg_rel(dst: *mut T, src: T) -> T; - pub fn atomic_xadd(dst: &T, src: T) -> T; - pub fn atomic_xadd_acq(dst: &T, src: T) -> T; - pub fn atomic_xadd_rel(dst: &T, src: T) -> T; + pub fn atomic_xadd(dst: *mut T, src: T) -> T; + pub fn atomic_xadd_acq(dst: *mut T, src: T) -> T; + pub fn atomic_xadd_rel(dst: *mut T, src: T) -> T; - pub fn atomic_xsub(dst: &T, src: T) -> T; - pub fn atomic_xsub_acq(dst: &T, src: T) -> T; - pub fn atomic_xsub_rel(dst: &T, src: T) -> T; + pub fn atomic_xsub(dst: *mut T, src: T) -> T; + pub fn atomic_xsub_acq(dst: *mut T, src: T) -> T; + pub fn atomic_xsub_rel(dst: *mut T, src: T) -> T; } } @@ -38,41 +38,41 @@ pub fn main() { unsafe { let mut x = ~1; - assert_eq!(rusti::atomic_load(x), 1); + assert_eq!(rusti::atomic_load(&*x), 1); *x = 5; - assert_eq!(rusti::atomic_load_acq(x), 5); + assert_eq!(rusti::atomic_load_acq(&*x), 5); - rusti::atomic_store(x,3); + rusti::atomic_store(&mut *x,3); assert_eq!(*x, 3); - rusti::atomic_store_rel(x,1); + rusti::atomic_store_rel(&mut *x,1); assert_eq!(*x, 1); - assert_eq!(rusti::atomic_cxchg(x, 1, 2), 1); + assert_eq!(rusti::atomic_cxchg(&mut *x, 1, 2), 1); assert_eq!(*x, 2); - assert_eq!(rusti::atomic_cxchg_acq(x, 1, 3), 2); + assert_eq!(rusti::atomic_cxchg_acq(&mut *x, 1, 3), 2); assert_eq!(*x, 2); - assert_eq!(rusti::atomic_cxchg_rel(x, 2, 1), 2); + assert_eq!(rusti::atomic_cxchg_rel(&mut *x, 2, 1), 2); assert_eq!(*x, 1); - assert_eq!(rusti::atomic_xchg(x, 0), 1); + assert_eq!(rusti::atomic_xchg(&mut *x, 0), 1); assert_eq!(*x, 0); - assert_eq!(rusti::atomic_xchg_acq(x, 1), 0); + assert_eq!(rusti::atomic_xchg_acq(&mut *x, 1), 0); assert_eq!(*x, 1); - assert_eq!(rusti::atomic_xchg_rel(x, 0), 1); + assert_eq!(rusti::atomic_xchg_rel(&mut *x, 0), 1); assert_eq!(*x, 0); - assert_eq!(rusti::atomic_xadd(x, 1), 0); - assert_eq!(rusti::atomic_xadd_acq(x, 1), 1); - assert_eq!(rusti::atomic_xadd_rel(x, 1), 2); + assert_eq!(rusti::atomic_xadd(&mut *x, 1), 0); + assert_eq!(rusti::atomic_xadd_acq(&mut *x, 1), 1); + assert_eq!(rusti::atomic_xadd_rel(&mut *x, 1), 2); assert_eq!(*x, 3); - assert_eq!(rusti::atomic_xsub(x, 1), 3); - assert_eq!(rusti::atomic_xsub_acq(x, 1), 2); - assert_eq!(rusti::atomic_xsub_rel(x, 1), 1); + assert_eq!(rusti::atomic_xsub(&mut *x, 1), 3); + assert_eq!(rusti::atomic_xsub_acq(&mut *x, 1), 2); + assert_eq!(rusti::atomic_xsub_rel(&mut *x, 1), 1); assert_eq!(*x, 0); } } From bcfff9de32441886239af89275760a5286ec8d25 Mon Sep 17 00:00:00 2001 From: Brian Anderson Date: Mon, 24 Feb 2014 18:22:09 -0800 Subject: [PATCH 3/4] std: Make the generic atomics in `sync::atomics` private I'm not comfortable exposing public functions that purport to do atomic operations on arbitrary T. --- src/libstd/sync/atomics.rs | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/src/libstd/sync/atomics.rs b/src/libstd/sync/atomics.rs index 287a92a44a216..909e4ed8c82a0 100644 --- a/src/libstd/sync/atomics.rs +++ b/src/libstd/sync/atomics.rs @@ -408,7 +408,7 @@ impl Drop for AtomicOption { } #[inline] -pub unsafe fn atomic_store(dst: *mut T, val: T, order:Ordering) { +unsafe fn atomic_store(dst: *mut T, val: T, order:Ordering) { match order { Release => intrinsics::atomic_store_rel(dst, val), Relaxed => intrinsics::atomic_store_relaxed(dst, val), @@ -417,7 +417,7 @@ pub unsafe fn atomic_store(dst: *mut T, val: T, order:Ordering) { } #[inline] -pub unsafe fn atomic_load(dst: *T, order:Ordering) -> T { +unsafe fn atomic_load(dst: *T, order:Ordering) -> T { match order { Acquire => intrinsics::atomic_load_acq(dst), Relaxed => intrinsics::atomic_load_relaxed(dst), @@ -426,7 +426,7 @@ pub unsafe fn atomic_load(dst: *T, order:Ordering) -> T { } #[inline] -pub unsafe fn atomic_swap(dst: *mut T, val: T, order: Ordering) -> T { +unsafe fn atomic_swap(dst: *mut T, val: T, order: Ordering) -> T { match order { Acquire => intrinsics::atomic_xchg_acq(dst, val), Release => intrinsics::atomic_xchg_rel(dst, val), @@ -438,7 +438,7 @@ pub unsafe fn atomic_swap(dst: *mut T, val: T, order: Ordering) -> T { /// Returns the old value (like __sync_fetch_and_add). #[inline] -pub unsafe fn atomic_add(dst: *mut T, val: T, order: Ordering) -> T { +unsafe fn atomic_add(dst: *mut T, val: T, order: Ordering) -> T { match order { Acquire => intrinsics::atomic_xadd_acq(dst, val), Release => intrinsics::atomic_xadd_rel(dst, val), @@ -450,7 +450,7 @@ pub unsafe fn atomic_add(dst: *mut T, val: T, order: Ordering) -> T { /// Returns the old value (like __sync_fetch_and_sub). #[inline] -pub unsafe fn atomic_sub(dst: *mut T, val: T, order: Ordering) -> T { +unsafe fn atomic_sub(dst: *mut T, val: T, order: Ordering) -> T { match order { Acquire => intrinsics::atomic_xsub_acq(dst, val), Release => intrinsics::atomic_xsub_rel(dst, val), @@ -461,7 +461,7 @@ pub unsafe fn atomic_sub(dst: *mut T, val: T, order: Ordering) -> T { } #[inline] -pub unsafe fn atomic_compare_and_swap(dst: *mut T, old:T, new:T, order: Ordering) -> T { +unsafe fn atomic_compare_and_swap(dst: *mut T, old:T, new:T, order: Ordering) -> T { match order { Acquire => intrinsics::atomic_cxchg_acq(dst, old, new), Release => intrinsics::atomic_cxchg_rel(dst, old, new), @@ -472,7 +472,7 @@ pub unsafe fn atomic_compare_and_swap(dst: *mut T, old:T, new:T, order: Order } #[inline] -pub unsafe fn atomic_and(dst: *mut T, val: T, order: Ordering) -> T { +unsafe fn atomic_and(dst: *mut T, val: T, order: Ordering) -> T { match order { Acquire => intrinsics::atomic_and_acq(dst, val), Release => intrinsics::atomic_and_rel(dst, val), @@ -483,7 +483,7 @@ pub unsafe fn atomic_and(dst: *mut T, val: T, order: Ordering) -> T { } #[inline] -pub unsafe fn atomic_nand(dst: *mut T, val: T, order: Ordering) -> T { +unsafe fn atomic_nand(dst: *mut T, val: T, order: Ordering) -> T { match order { Acquire => intrinsics::atomic_nand_acq(dst, val), Release => intrinsics::atomic_nand_rel(dst, val), @@ -495,7 +495,7 @@ pub unsafe fn atomic_nand(dst: *mut T, val: T, order: Ordering) -> T { #[inline] -pub unsafe fn atomic_or(dst: *mut T, val: T, order: Ordering) -> T { +unsafe fn atomic_or(dst: *mut T, val: T, order: Ordering) -> T { match order { Acquire => intrinsics::atomic_or_acq(dst, val), Release => intrinsics::atomic_or_rel(dst, val), @@ -507,7 +507,7 @@ pub unsafe fn atomic_or(dst: *mut T, val: T, order: Ordering) -> T { #[inline] -pub unsafe fn atomic_xor(dst: *mut T, val: T, order: Ordering) -> T { +unsafe fn atomic_xor(dst: *mut T, val: T, order: Ordering) -> T { match order { Acquire => intrinsics::atomic_xor_acq(dst, val), Release => intrinsics::atomic_xor_rel(dst, val), From b2b469afc3495764b30de2ec0ba87c470beb5f75 Mon Sep 17 00:00:00 2001 From: Brian Anderson Date: Mon, 24 Feb 2014 18:23:01 -0800 Subject: [PATCH 4/4] std: Remove AtomicU64 Support for this is less universal than for word-size things; it has no users; i'd rather play it safe. --- src/libstd/sync/atomics.rs | 49 -------------------------------------- 1 file changed, 49 deletions(-) diff --git a/src/libstd/sync/atomics.rs b/src/libstd/sync/atomics.rs index 909e4ed8c82a0..e38dc6c62011e 100644 --- a/src/libstd/sync/atomics.rs +++ b/src/libstd/sync/atomics.rs @@ -64,16 +64,6 @@ pub struct AtomicUint { priv nofreeze: marker::NoFreeze } -/** - * An unsigned atomic integer type that is forced to be 64-bits. This does not - * support all operations. - */ -pub struct AtomicU64 { - priv v: u64, - priv nopod: marker::NoPod, - priv nofreeze: marker::NoFreeze -} - /** * An unsafe atomic pointer. Only supports basic atomic operations */ @@ -112,9 +102,6 @@ pub static INIT_ATOMIC_INT : AtomicInt = AtomicInt { pub static INIT_ATOMIC_UINT : AtomicUint = AtomicUint { v: 0, nopod: marker::NoPod, nofreeze: marker::NoFreeze }; -pub static INIT_ATOMIC_U64 : AtomicU64 = AtomicU64 { - v: 0, nopod: marker::NoPod, nofreeze: marker::NoFreeze -}; impl AtomicFlag { @@ -243,42 +230,6 @@ impl AtomicInt { } } -impl AtomicU64 { - pub fn new(v: u64) -> AtomicU64 { - AtomicU64 { v:v, nopod: marker::NoPod, nofreeze: marker::NoFreeze } - } - - #[inline] - pub fn load(&self, order: Ordering) -> u64 { - unsafe { atomic_load(&self.v, order) } - } - - #[inline] - pub fn store(&self, val: u64, order: Ordering) { - unsafe { atomic_store(transmute_mut_unsafe(&self.v), val, order); } - } - - #[inline] - pub fn swap(&self, val: u64, order: Ordering) -> u64 { - unsafe { atomic_swap(transmute_mut_unsafe(&self.v), val, order) } - } - - #[inline] - pub fn compare_and_swap(&self, old: u64, new: u64, order: Ordering) -> u64 { - unsafe { atomic_compare_and_swap(transmute_mut_unsafe(&self.v), old, new, order) } - } - - #[inline] - pub fn fetch_add(&self, val: u64, order: Ordering) -> u64 { - unsafe { atomic_add(transmute_mut_unsafe(&self.v), val, order) } - } - - #[inline] - pub fn fetch_sub(&self, val: u64, order: Ordering) -> u64 { - unsafe { atomic_sub(transmute_mut_unsafe(&self.v), val, order) } - } -} - impl AtomicUint { pub fn new(v: uint) -> AtomicUint { AtomicUint { v:v, nopod: marker::NoPod, nofreeze: marker::NoFreeze }