Skip to content

Commit f6766cf

Browse files
committed
atomic_load intrinsic: use const generic parameter for ordering
1 parent 04a67d5 commit f6766cf

File tree

16 files changed

+202
-76
lines changed

16 files changed

+202
-76
lines changed

compiler/rustc_codegen_cranelift/src/intrinsics/mod.rs

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -870,11 +870,12 @@ fn codegen_regular_intrinsic_call<'tcx>(
870870
// FIXME use a compiler fence once Cranelift supports it
871871
fx.bcx.ins().fence();
872872
}
873-
_ if intrinsic.as_str().starts_with("atomic_load") => {
873+
sym::atomic_load => {
874874
intrinsic_args!(fx, args => (ptr); intrinsic);
875875
let ptr = ptr.load_scalar(fx);
876876

877877
let ty = generic_args.type_at(0);
878+
let _ord = generic_args.const_at(1).to_value(); // FIXME: forward this to cranelift once they support that
878879
match ty.kind() {
879880
ty::Uint(UintTy::U128) | ty::Int(IntTy::I128) => {
880881
// FIXME implement 128bit atomics

compiler/rustc_codegen_ssa/src/mir/intrinsic.rs

Lines changed: 61 additions & 36 deletions
Original file line numberDiff line numberDiff line change
@@ -99,6 +99,19 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
9999
let llret_ty = bx.backend_type(bx.layout_of(ret_ty));
100100
let result = PlaceRef::new_sized(llresult, fn_abi.ret.layout);
101101

102+
let ret_llval = |bx: &mut Bx, llval| {
103+
if !fn_abi.ret.is_ignore() {
104+
if let PassMode::Cast { .. } = &fn_abi.ret.mode {
105+
bx.store_to_place(llval, result.val);
106+
} else {
107+
OperandRef::from_immediate_or_packed_pair(bx, llval, result.layout)
108+
.val
109+
.store(bx, result);
110+
}
111+
}
112+
Ok(())
113+
};
114+
102115
let llval = match name {
103116
sym::abort => {
104117
bx.abort();
@@ -331,6 +344,53 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
331344
use crate::common::AtomicOrdering::*;
332345
use crate::common::{AtomicRmwBinOp, SynchronizationScope};
333346

347+
let invalid_monomorphization = |ty| {
348+
bx.tcx().dcx().emit_err(InvalidMonomorphization::BasicIntegerType {
349+
span,
350+
name,
351+
ty,
352+
});
353+
};
354+
355+
let parse_const_generic_ordering = |ord: ty::Value<'tcx>| {
356+
let discr = ord.valtree.unwrap_branch()[0].unwrap_leaf();
357+
let ord = discr.to_atomic_ordering();
358+
// We have to translate from the intrinsic ordering to the backend ordering.
359+
use rustc_middle::ty::AtomicOrdering;
360+
match ord {
361+
AtomicOrdering::Relaxed => Relaxed,
362+
AtomicOrdering::Release => Release,
363+
AtomicOrdering::Acquire => Acquire,
364+
AtomicOrdering::AcqRel => AcquireRelease,
365+
AtomicOrdering::SeqCst => SequentiallyConsistent,
366+
}
367+
};
368+
369+
// Some intrinsics have the ordering already converted to a const generic parameter, we handle those first.
370+
match name {
371+
sym::atomic_load => {
372+
let ty = fn_args.type_at(0);
373+
let ordering = fn_args.const_at(1).to_value();
374+
if !(int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_raw_ptr()) {
375+
invalid_monomorphization(ty);
376+
return Ok(());
377+
}
378+
let layout = bx.layout_of(ty);
379+
let source = args[0].immediate();
380+
let llval = bx.atomic_load(
381+
bx.backend_type(layout),
382+
source,
383+
parse_const_generic_ordering(ordering),
384+
layout.size,
385+
);
386+
387+
return ret_llval(bx, llval);
388+
}
389+
390+
// The rest falls back to below.
391+
_ => {}
392+
}
393+
334394
let Some((instruction, ordering)) = atomic.split_once('_') else {
335395
bx.sess().dcx().emit_fatal(errors::MissingMemoryOrdering);
336396
};
@@ -344,14 +404,6 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
344404
_ => bx.sess().dcx().emit_fatal(errors::UnknownAtomicOrdering),
345405
};
346406

347-
let invalid_monomorphization = |ty| {
348-
bx.tcx().dcx().emit_err(InvalidMonomorphization::BasicIntegerType {
349-
span,
350-
name,
351-
ty,
352-
});
353-
};
354-
355407
match instruction {
356408
"cxchg" | "cxchgweak" => {
357409
let Some((success, failure)) = ordering.split_once('_') else {
@@ -384,24 +436,6 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
384436
return Ok(());
385437
}
386438

387-
"load" => {
388-
let ty = fn_args.type_at(0);
389-
if int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_raw_ptr() {
390-
let layout = bx.layout_of(ty);
391-
let size = layout.size;
392-
let source = args[0].immediate();
393-
bx.atomic_load(
394-
bx.backend_type(layout),
395-
source,
396-
parse_ordering(bx, ordering),
397-
size,
398-
)
399-
} else {
400-
invalid_monomorphization(ty);
401-
return Ok(());
402-
}
403-
}
404-
405439
"store" => {
406440
let ty = fn_args.type_at(0);
407441
if int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_raw_ptr() {
@@ -532,16 +566,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
532566
}
533567
};
534568

535-
if !fn_abi.ret.is_ignore() {
536-
if let PassMode::Cast { .. } = &fn_abi.ret.mode {
537-
bx.store_to_place(llval, result.val);
538-
} else {
539-
OperandRef::from_immediate_or_packed_pair(bx, llval, result.layout)
540-
.val
541-
.store(bx, result);
542-
}
543-
}
544-
Ok(())
569+
ret_llval(bx, llval)
545570
}
546571
}
547572

compiler/rustc_hir_analysis/src/check/intrinsic.rs

Lines changed: 7 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -204,24 +204,25 @@ pub(crate) fn check_intrinsic_type(
204204

205205
// Each atomic op has variants with different suffixes (`_seq_cst`, `_acquire`, etc.). Use
206206
// string ops to strip the suffixes, because the variants all get the same treatment here.
207-
let (n_tps, inputs, output) = match split[1] {
207+
let (n_tps, n_cts, inputs, output) = match split[1] {
208208
"cxchg" | "cxchgweak" => (
209209
1,
210+
0,
210211
vec![Ty::new_mut_ptr(tcx, param(0)), param(0), param(0)],
211212
Ty::new_tup(tcx, &[param(0), tcx.types.bool]),
212213
),
213-
"load" => (1, vec![Ty::new_imm_ptr(tcx, param(0))], param(0)),
214-
"store" => (1, vec![Ty::new_mut_ptr(tcx, param(0)), param(0)], tcx.types.unit),
214+
"load" => (1, 1, vec![Ty::new_imm_ptr(tcx, param(0))], param(0)),
215+
"store" => (1, 0, vec![Ty::new_mut_ptr(tcx, param(0)), param(0)], tcx.types.unit),
215216

216217
"xchg" | "xadd" | "xsub" | "and" | "nand" | "or" | "xor" | "max" | "min" | "umax"
217-
| "umin" => (1, vec![Ty::new_mut_ptr(tcx, param(0)), param(0)], param(0)),
218-
"fence" | "singlethreadfence" => (0, Vec::new(), tcx.types.unit),
218+
| "umin" => (1, 0, vec![Ty::new_mut_ptr(tcx, param(0)), param(0)], param(0)),
219+
"fence" | "singlethreadfence" => (0, 0, Vec::new(), tcx.types.unit),
219220
op => {
220221
tcx.dcx().emit_err(UnrecognizedAtomicOperation { span, op });
221222
return;
222223
}
223224
};
224-
(n_tps, 0, 0, inputs, output, hir::Safety::Unsafe)
225+
(n_tps, 0, n_cts, inputs, output, hir::Safety::Unsafe)
225226
} else if intrinsic_name == sym::contract_check_ensures {
226227
// contract_check_ensures::<Ret, C>(Ret, C) -> Ret
227228
// where C: for<'a> Fn(&'a Ret) -> bool,

compiler/rustc_middle/src/ty/consts/int.rs

Lines changed: 33 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -26,6 +26,19 @@ impl ConstInt {
2626
}
2727
}
2828

29+
/// An enum to represent the compiler-side view of `intrinsics::AtomicOrdering`.
30+
/// This lives here because there's a method in this file that needs it and it is entirely unclear
31+
/// where else to put this...
32+
#[derive(Debug)]
33+
pub enum AtomicOrdering {
34+
// These values must match `intrinsics::AtomicOrdering`!
35+
Relaxed = 0,
36+
Release = 1,
37+
Acquire = 2,
38+
AcqRel = 3,
39+
SeqCst = 4,
40+
}
41+
2942
impl std::fmt::Debug for ConstInt {
3043
fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
3144
let Self { int, signed, is_ptr_sized_integral } = *self;
@@ -318,6 +331,25 @@ impl ScalarInt {
318331
self.to_uint(tcx.data_layout.pointer_size).try_into().unwrap()
319332
}
320333

334+
#[inline]
335+
pub fn to_atomic_ordering(self) -> AtomicOrdering {
336+
use AtomicOrdering::*;
337+
let val = self.to_u32();
338+
if val == Relaxed as u32 {
339+
Relaxed
340+
} else if val == Release as u32 {
341+
Release
342+
} else if val == Acquire as u32 {
343+
Acquire
344+
} else if val == AcqRel as u32 {
345+
AcqRel
346+
} else if val == SeqCst as u32 {
347+
SeqCst
348+
} else {
349+
panic!("not a valid atomic ordering")
350+
}
351+
}
352+
321353
/// Converts the `ScalarInt` to `bool`.
322354
/// Panics if the `size` of the `ScalarInt` is not equal to 1 byte.
323355
/// Errors if it is not a valid `bool`.
@@ -488,7 +520,7 @@ from_scalar_int_for_x_signed!(i8, i16, i32, i64, i128);
488520
impl From<std::cmp::Ordering> for ScalarInt {
489521
#[inline]
490522
fn from(c: std::cmp::Ordering) -> Self {
491-
// Here we rely on `Ordering` having the same values in host and target!
523+
// Here we rely on `cmp::Ordering` having the same values in host and target!
492524
ScalarInt::from(c as i8)
493525
}
494526
}

compiler/rustc_middle/src/ty/mod.rs

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -74,8 +74,8 @@ pub use self::closure::{
7474
place_to_string_for_capture,
7575
};
7676
pub use self::consts::{
77-
AnonConstKind, Const, ConstInt, ConstKind, Expr, ExprKind, ScalarInt, UnevaluatedConst,
78-
ValTree, ValTreeKind, Value,
77+
AnonConstKind, AtomicOrdering, Const, ConstInt, ConstKind, Expr, ExprKind, ScalarInt,
78+
UnevaluatedConst, ValTree, ValTreeKind, Value,
7979
};
8080
pub use self::context::{
8181
CtxtInterners, CurrentGcx, DeducedParamAttrs, Feed, FreeRegionInfo, GlobalCtxt, Lift, TyCtxt,

compiler/rustc_span/src/symbol.rs

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -513,6 +513,7 @@ symbols! {
513513
async_iterator_poll_next,
514514
async_trait_bounds,
515515
atomic,
516+
atomic_load,
516517
atomic_mod,
517518
atomics,
518519
att_syntax,

library/core/src/intrinsics/mod.rs

Lines changed: 28 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -30,7 +30,7 @@
3030
//!
3131
//! The atomic intrinsics provide common atomic operations on machine
3232
//! words, with multiple possible memory orderings. See the
33-
//! [atomic types][crate::sync::atomic] docs for details.
33+
//! [atomic types][atomic] docs for details.
3434
//!
3535
//! # Unwinding
3636
//!
@@ -50,7 +50,7 @@
5050
)]
5151
#![allow(missing_docs)]
5252

53-
use crate::marker::{DiscriminantKind, Tuple};
53+
use crate::marker::{ConstParamTy, DiscriminantKind, Tuple};
5454
use crate::ptr;
5555

5656
pub mod fallback;
@@ -62,6 +62,20 @@ pub mod simd;
6262
#[cfg(all(target_has_atomic = "8", target_has_atomic = "32", target_has_atomic = "ptr"))]
6363
use crate::sync::atomic::{self, AtomicBool, AtomicI32, AtomicIsize, AtomicU32, Ordering};
6464

65+
/// A type for atomic ordering parameters for intrinsics. This is a separate type from
66+
/// `atomic::Ordering` so that we can make it `ConstParamTy` and fix the values used here without a
67+
/// risk of leaking that to stable code.
68+
#[derive(Debug, ConstParamTy, PartialEq, Eq)]
69+
pub enum AtomicOrdering {
70+
// These values must match the compiler's `AtomicOrdering` defined in
71+
// `rustc_middle/src/ty/consts/int.rs`!
72+
Relaxed = 0,
73+
Release = 1,
74+
Acquire = 2,
75+
AcqRel = 3,
76+
SeqCst = 4,
77+
}
78+
6579
// N.B., these intrinsics take raw pointers because they mutate aliased
6680
// memory, which is not valid for either `&` or `&mut`.
6781

@@ -391,6 +405,15 @@ pub unsafe fn atomic_cxchgweak_seqcst_acquire<T: Copy>(dst: *mut T, old: T, src:
391405
#[rustc_nounwind]
392406
pub unsafe fn atomic_cxchgweak_seqcst_seqcst<T: Copy>(dst: *mut T, old: T, src: T) -> (T, bool);
393407

408+
/// Loads the current value of the pointer.
409+
/// `T` must be an integer or pointer type.
410+
///
411+
/// The stabilized version of this intrinsic is available on the
412+
/// [`atomic`] types via the `load` method. For example, [`AtomicBool::load`].
413+
#[rustc_intrinsic]
414+
#[rustc_nounwind]
415+
#[cfg(not(bootstrap))]
416+
pub unsafe fn atomic_load<T: Copy, const ORD: AtomicOrdering>(src: *const T) -> T;
394417
/// Loads the current value of the pointer.
395418
/// `T` must be an integer or pointer type.
396419
///
@@ -399,6 +422,7 @@ pub unsafe fn atomic_cxchgweak_seqcst_seqcst<T: Copy>(dst: *mut T, old: T, src:
399422
/// [`Ordering::SeqCst`] as the `order`. For example, [`AtomicBool::load`].
400423
#[rustc_intrinsic]
401424
#[rustc_nounwind]
425+
#[cfg(bootstrap)]
402426
pub unsafe fn atomic_load_seqcst<T: Copy>(src: *const T) -> T;
403427
/// Loads the current value of the pointer.
404428
/// `T` must be an integer or pointer type.
@@ -408,6 +432,7 @@ pub unsafe fn atomic_load_seqcst<T: Copy>(src: *const T) -> T;
408432
/// [`Ordering::Acquire`] as the `order`. For example, [`AtomicBool::load`].
409433
#[rustc_intrinsic]
410434
#[rustc_nounwind]
435+
#[cfg(bootstrap)]
411436
pub unsafe fn atomic_load_acquire<T: Copy>(src: *const T) -> T;
412437
/// Loads the current value of the pointer.
413438
/// `T` must be an integer or pointer type.
@@ -417,6 +442,7 @@ pub unsafe fn atomic_load_acquire<T: Copy>(src: *const T) -> T;
417442
/// [`Ordering::Relaxed`] as the `order`. For example, [`AtomicBool::load`].
418443
#[rustc_intrinsic]
419444
#[rustc_nounwind]
445+
#[cfg(bootstrap)]
420446
pub unsafe fn atomic_load_relaxed<T: Copy>(src: *const T) -> T;
421447

422448
/// Stores the value at the specified memory location.

library/core/src/sync/atomic.rs

Lines changed: 18 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3822,6 +3822,7 @@ unsafe fn atomic_store<T: Copy>(dst: *mut T, val: T, order: Ordering) {
38223822

38233823
#[inline]
38243824
#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
3825+
#[cfg(bootstrap)]
38253826
unsafe fn atomic_load<T: Copy>(dst: *const T, order: Ordering) -> T {
38263827
// SAFETY: the caller must uphold the safety contract for `atomic_load`.
38273828
unsafe {
@@ -3835,6 +3836,23 @@ unsafe fn atomic_load<T: Copy>(dst: *const T, order: Ordering) -> T {
38353836
}
38363837
}
38373838

3839+
#[inline]
3840+
#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
3841+
#[cfg(not(bootstrap))]
3842+
unsafe fn atomic_load<T: Copy>(dst: *const T, order: Ordering) -> T {
3843+
use intrinsics::AtomicOrdering;
3844+
// SAFETY: the caller must uphold the safety contract for `atomic_load`.
3845+
unsafe {
3846+
match order {
3847+
Relaxed => intrinsics::atomic_load::<T, { AtomicOrdering::Relaxed }>(dst),
3848+
Acquire => intrinsics::atomic_load::<T, { AtomicOrdering::Acquire }>(dst),
3849+
SeqCst => intrinsics::atomic_load::<T, { AtomicOrdering::SeqCst }>(dst),
3850+
Release => panic!("there is no such thing as a release load"),
3851+
AcqRel => panic!("there is no such thing as an acquire-release load"),
3852+
}
3853+
}
3854+
}
3855+
38383856
#[inline]
38393857
#[cfg(target_has_atomic)]
38403858
#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces

0 commit comments

Comments
 (0)