diff --git a/compiler/rustc_codegen_ssa/src/mir/analyze.rs b/compiler/rustc_codegen_ssa/src/mir/analyze.rs index 99f35b7920864..8330e4b35c84f 100644 --- a/compiler/rustc_codegen_ssa/src/mir/analyze.rs +++ b/compiler/rustc_codegen_ssa/src/mir/analyze.rs @@ -5,8 +5,8 @@ use rustc_data_structures::graph::dominators::Dominators; use rustc_index::bit_set::DenseBitSet; use rustc_index::{IndexSlice, IndexVec}; use rustc_middle::mir::visit::{MutatingUseContext, NonMutatingUseContext, PlaceContext, Visitor}; -use rustc_middle::mir::{self, DefLocation, Location, TerminatorKind, traversal}; -use rustc_middle::ty::layout::{HasTyCtxt, LayoutOf}; +use rustc_middle::mir::{self, DefLocation, Location, PlaceElem, TerminatorKind, traversal}; +use rustc_middle::ty::layout::LayoutOf; use rustc_middle::{bug, span_bug}; use tracing::debug; @@ -96,65 +96,92 @@ impl<'a, 'b, 'tcx, Bx: BuilderMethods<'b, 'tcx>> LocalAnalyzer<'a, 'b, 'tcx, Bx> fn process_place( &mut self, place_ref: &mir::PlaceRef<'tcx>, - context: PlaceContext, + mut context: PlaceContext, location: Location, ) { - let cx = self.fx.cx; - - if let Some((place_base, elem)) = place_ref.last_projection() { - let mut base_context = if context.is_mutating_use() { - PlaceContext::MutatingUse(MutatingUseContext::Projection) - } else { - PlaceContext::NonMutatingUse(NonMutatingUseContext::Projection) - }; - - // Allow uses of projections that are ZSTs or from scalar fields. - let is_consume = matches!( - context, - PlaceContext::NonMutatingUse( - NonMutatingUseContext::Copy | NonMutatingUseContext::Move, - ) + let maybe_local = if place_ref.is_indirect_first_projection() { + // After we deref a pointer, the local *of that pointer* is no + // longer interesting for the rest of the projection chain. + self.visit_local( + place_ref.local, + PlaceContext::NonMutatingUse(NonMutatingUseContext::Copy), + location, ); - if is_consume { - let base_ty = place_base.ty(self.fx.mir, cx.tcx()); - let base_ty = self.fx.monomorphize(base_ty); - - // ZSTs don't require any actual memory access. - let elem_ty = base_ty.projection_ty(cx.tcx(), self.fx.monomorphize(elem)).ty; - let span = self.fx.mir.local_decls[place_ref.local].source_info.span; - if cx.spanned_layout_of(elem_ty, span).is_zst() { - return; - } - - if let mir::ProjectionElem::Field(..) = elem { - let layout = cx.spanned_layout_of(base_ty.ty, span); - if cx.is_backend_immediate(layout) || cx.is_backend_scalar_pair(layout) { - // Recurse with the same context, instead of `Projection`, - // potentially stopping at non-operand projections, - // which would trigger `not_ssa` on locals. - base_context = context; - } - } - } - - if let mir::ProjectionElem::Deref = elem { - // Deref projections typically only read the pointer. - base_context = PlaceContext::NonMutatingUse(NonMutatingUseContext::Copy); - } + None + } else { + Some(place_ref.local) + }; - self.process_place(&place_base, base_context, location); - // HACK(eddyb) this emulates the old `visit_projection_elem`, this - // entire `visit_place`-like `process_place` method should be rewritten, - // now that we have moved to the "slice of projections" representation. - if let mir::ProjectionElem::Index(local) = elem { + let mut projection: &[PlaceElem<'tcx>] = place_ref.projection; + loop { + // Index projections are the only ones with another local, so handle + // that special case before the normal projection match. + if let [PlaceElem::Index(index_local), ..] = *projection { self.visit_local( - local, + index_local, PlaceContext::NonMutatingUse(NonMutatingUseContext::Copy), location, ); } - } else { - self.visit_local(place_ref.local, context, location); + + projection = match projection { + // No more projections means we're done looping. + [] => break, + // The only deref allowed in a Runtime-phase place is at the + // beginning, which we checked before the loop. + [PlaceElem::Deref, rest @ ..] => { + assert_eq!(maybe_local, None); + rest + } + // Making SSA locals useful for non-primitives heavily depends on + // not forcing stack allocation for basic newtypes and simple + // enums like `Option` or `Result>`. + [PlaceElem::Downcast { .. }, PlaceElem::Field { .. }, rest @ ..] + | [PlaceElem::Field { .. }, rest @ ..] => { + if let PlaceContext::NonMutatingUse( + NonMutatingUseContext::Copy + | NonMutatingUseContext::Move + | NonMutatingUseContext::Inspect, + ) = context + { + // Reading fields (or pseudo-fields) in operands can stay SSA + } else { + // But storing into a projection needs memory, especially for function returns + context = PlaceContext::MutatingUse(MutatingUseContext::Projection); + } + rest + } + [PlaceElem::Downcast { .. }, ..] => { + span_bug!(self.fx.mir.span, "Non-field downcast in {place_ref:?}"); + } + // FIXME: These are type-changing, but not layout-affecting, so + // they probably needn't force memory, but for now they do since + // `maybe_codegen_consume_direct` doesn't handle them. + [ + PlaceElem::OpaqueCast { .. } + | PlaceElem::UnwrapUnsafeBinder { .. } + | PlaceElem::Subtype { .. }, + rest @ .., + ] => { + context = PlaceContext::MutatingUse(MutatingUseContext::Projection); + rest + } + // The various types of indexing use address arithmetic, so we + // need to force the local to Memory like a borrow would. + [ + PlaceElem::Index { .. } + | PlaceElem::ConstantIndex { .. } + | PlaceElem::Subslice { .. }, + rest @ .., + ] => { + context = PlaceContext::MutatingUse(MutatingUseContext::Projection); + rest + } + }; + } + + if let Some(local) = maybe_local { + self.visit_local(local, context, location); } } } diff --git a/compiler/rustc_codegen_ssa/src/mir/mod.rs b/compiler/rustc_codegen_ssa/src/mir/mod.rs index 0758e5d045673..ad25c5c0f5a92 100644 --- a/compiler/rustc_codegen_ssa/src/mir/mod.rs +++ b/compiler/rustc_codegen_ssa/src/mir/mod.rs @@ -135,6 +135,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { } } +#[derive(Debug)] enum LocalRef<'tcx, V> { Place(PlaceRef<'tcx, V>), /// `UnsizedPlace(p)`: `p` itself is a thin pointer (indirect place). diff --git a/compiler/rustc_codegen_ssa/src/mir/operand.rs b/compiler/rustc_codegen_ssa/src/mir/operand.rs index 7e355b6406aed..4594c7266ce87 100644 --- a/compiler/rustc_codegen_ssa/src/mir/operand.rs +++ b/compiler/rustc_codegen_ssa/src/mir/operand.rs @@ -142,7 +142,7 @@ pub struct OperandRef<'tcx, V> { pub layout: TyAndLayout<'tcx>, } -impl fmt::Debug for OperandRef<'_, V> { +impl fmt::Debug for OperandRef<'_, V> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "OperandRef({:?} @ {:?})", self.val, self.layout) } @@ -372,16 +372,22 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> { ) }) } else { - let (in_scalar, imm) = match (self.val, self.layout.backend_repr) { + let (imm, in_scalar, in_bty) = match (self.val, self.layout.backend_repr) { // Extract a scalar component from a pair. (OperandValue::Pair(a_llval, b_llval), BackendRepr::ScalarPair(a, b)) => { - if offset.bytes() == 0 { + // This needs to look at `offset`, rather than `i`, because + // for a type like `Option`, the first thing in the pair + // is the tag, so `(_2 as Some).0` needs to read the *second* + // thing in the pair despite it being "field zero". + if offset == Size::ZERO { assert_eq!(field.size, a.size(bx.cx())); - (Some(a), a_llval) + let bty = bx.scalar_pair_element_backend_type(self.layout, 0, false); + (a_llval, a, bty) } else { assert_eq!(offset, a.size(bx.cx()).align_to(b.align(bx.cx()).abi)); assert_eq!(field.size, b.size(bx.cx())); - (Some(b), b_llval) + let bty = bx.scalar_pair_element_backend_type(self.layout, 1, false); + (b_llval, b, bty) } } @@ -392,23 +398,13 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> { OperandValue::Immediate(match field.backend_repr { BackendRepr::SimdVector { .. } => imm, BackendRepr::Scalar(out_scalar) => { - let Some(in_scalar) = in_scalar else { - span_bug!( - fx.mir.span, - "OperandRef::extract_field({:?}): missing input scalar for output scalar", - self - ) - }; - if in_scalar != out_scalar { - // If the backend and backend_immediate types might differ, - // flip back to the backend type then to the new immediate. - // This avoids nop truncations, but still handles things like - // Bools in union fields needs to be truncated. - let backend = bx.from_immediate(imm); - bx.to_immediate_scalar(backend, out_scalar) - } else { - imm - } + // For a type like `Result` the layout is `Pair(i64, ptr)`. + // But if we're reading the `Ok` payload, we need to turn that `ptr` + // back into an integer. To avoid repeating logic we do that by + // calling the transmute code, which is legal thanks to the size + // assert we did when pulling it out of the pair. + let out_bty = bx.backend_type(field); + fx.transmute_immediate(bx, imm, in_scalar, in_bty, out_scalar, out_bty) } BackendRepr::ScalarPair(_, _) | BackendRepr::Memory { .. } => bug!(), }) @@ -712,7 +708,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { LocalRef::Operand(mut o) => { // Moves out of scalar and scalar pair fields are trivial. for elem in place_ref.projection.iter() { - match elem { + match *elem { mir::ProjectionElem::Field(f, _) => { assert!( !o.layout.ty.is_any_ptr(), @@ -721,6 +717,16 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { ); o = o.extract_field(self, bx, f.index()); } + mir::ProjectionElem::Downcast(_sym, variant_idx) => { + let layout = o.layout.for_variant(bx.cx(), variant_idx); + // The transmute here handles cases like `Result` + // where the immediate values need to change for + // the specific types in the cast-to variant. + let Some(val) = self.codegen_transmute_operand(bx, o, layout) else { + bug!("Couldn't transmute in downcast to {variant_idx:?} of {o:?}"); + }; + o = OperandRef { val, layout }; + } mir::ProjectionElem::Index(_) | mir::ProjectionElem::ConstantIndex { .. } => { // ZSTs don't require any actual memory access. diff --git a/compiler/rustc_codegen_ssa/src/mir/rvalue.rs b/compiler/rustc_codegen_ssa/src/mir/rvalue.rs index 72cfd2bffb5d0..a9f853c86898d 100644 --- a/compiler/rustc_codegen_ssa/src/mir/rvalue.rs +++ b/compiler/rustc_codegen_ssa/src/mir/rvalue.rs @@ -281,13 +281,13 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { && in_a.size(self.cx) == out_a.size(self.cx) && in_b.size(self.cx) == out_b.size(self.cx) { - let in_a_ibty = bx.scalar_pair_element_backend_type(operand.layout, 0, false); - let in_b_ibty = bx.scalar_pair_element_backend_type(operand.layout, 1, false); - let out_a_ibty = bx.scalar_pair_element_backend_type(cast, 0, false); - let out_b_ibty = bx.scalar_pair_element_backend_type(cast, 1, false); + let in_a_bty = bx.scalar_pair_element_backend_type(operand.layout, 0, false); + let in_b_bty = bx.scalar_pair_element_backend_type(operand.layout, 1, false); + let out_a_bty = bx.scalar_pair_element_backend_type(cast, 0, false); + let out_b_bty = bx.scalar_pair_element_backend_type(cast, 1, false); Some(OperandValue::Pair( - self.transmute_immediate(bx, imm_a, in_a, in_a_ibty, out_a, out_a_ibty), - self.transmute_immediate(bx, imm_b, in_b, in_b_ibty, out_b, out_b_ibty), + self.transmute_immediate(bx, imm_a, in_a, in_a_bty, out_a, out_a_bty), + self.transmute_immediate(bx, imm_b, in_b, in_b_bty, out_b, out_b_bty), )) } else { None @@ -353,7 +353,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { /// /// `to_backend_ty` must be the *non*-immediate backend type (so it will be /// `i8`, not `i1`, for `bool`-like types.) - fn transmute_immediate( + pub(crate) fn transmute_immediate( &self, bx: &mut Bx, mut imm: Bx::Value, @@ -371,8 +371,8 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { return imm; } - use abi::Primitive::*; imm = bx.from_immediate(imm); + debug_assert_eq!(bx.cx().val_ty(imm), from_backend_ty); // If we have a scalar, we must already know its range. Either // @@ -385,8 +385,10 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { // self.assume_scalar_range(bx, imm, from_scalar, from_backend_ty); + use abi::Primitive::*; imm = match (from_scalar.primitive(), to_scalar.primitive()) { - (Int(..) | Float(_), Int(..) | Float(_)) => bx.bitcast(imm, to_backend_ty), + (Int(..), Int(..)) | (Float(_), Float(_)) => imm, + (Int(..), Float(_)) | (Float(_), Int(..)) => bx.bitcast(imm, to_backend_ty), (Pointer(..), Pointer(..)) => bx.pointercast(imm, to_backend_ty), (Int(..), Pointer(..)) => bx.ptradd(bx.const_null(bx.type_ptr()), imm), (Pointer(..), Int(..)) => { @@ -411,6 +413,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { // constraint that the `transmute` introduced is to `assume` it. self.assume_scalar_range(bx, imm, to_scalar, to_backend_ty); + debug_assert_eq!(bx.cx().val_ty(imm), to_backend_ty); imm = bx.to_immediate_scalar(imm, to_scalar); imm } @@ -1157,8 +1160,8 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { mir::AggregateKind::Coroutine(..) | mir::AggregateKind::CoroutineClosure(..) => false, }; allowed_kind && { - let ty = rvalue.ty(self.mir, self.cx.tcx()); - let ty = self.monomorphize(ty); + let ty = rvalue.ty(self.mir, self.cx.tcx()); + let ty = self.monomorphize(ty); let layout = self.cx.spanned_layout_of(ty, span); !self.cx.is_backend_ref(layout) } diff --git a/tests/codegen/common_prim_int_ptr.rs b/tests/codegen/common_prim_int_ptr.rs index a1d7a125f324c..266ecc368adc6 100644 --- a/tests/codegen/common_prim_int_ptr.rs +++ b/tests/codegen/common_prim_int_ptr.rs @@ -40,9 +40,13 @@ pub unsafe fn extract_int(x: Result>) -> usize { } // CHECK-LABEL: @extract_box -// CHECK-SAME: (i{{[0-9]+}} {{[^%]+}} [[DISCRIMINANT:%[0-9]+]], ptr {{[^%]+}} [[PAYLOAD:%[0-9]+]]) +// CHECK-SAME: (i{{[0-9]+}} {{[^%]+}} [[DISCRIMINANT:%x.0]], ptr {{[^%]+}} [[PAYLOAD:%x.1]]) #[no_mangle] pub unsafe fn extract_box(x: Result>) -> Box { + // CHECK: [[NOT_OK:%.+]] = icmp ne i{{[0-9]+}} [[DISCRIMINANT]], 0 + // CHECK: call void @llvm.assume(i1 [[NOT_OK]]) + // CHECK: [[NOT_NULL:%.+]] = icmp ne ptr [[PAYLOAD]], null + // CHECK: call void @llvm.assume(i1 [[NOT_NULL]]) // CHECK: ret ptr [[PAYLOAD]] match x { Ok(_) => std::intrinsics::unreachable(), diff --git a/tests/codegen/enum/enum-extract.rs b/tests/codegen/enum/enum-extract.rs new file mode 100644 index 0000000000000..285953313866b --- /dev/null +++ b/tests/codegen/enum/enum-extract.rs @@ -0,0 +1,151 @@ +//@ revisions: OPT DBG +//@ compile-flags: -Cno-prepopulate-passes -Cdebuginfo=0 +//@[OPT] compile-flags: -Copt-level=1 +//@[DBG] compile-flags: -Copt-level=0 +//@ min-llvm-version: 19 +//@ only-64bit + +#![crate_type = "lib"] + +// This tests various cases around consuming enums as SSA values in what we emit. +// Importantly, it checks things like correct `i1` handling for `bool` +// and for mixed integer/pointer payloads. + +use std::cmp::Ordering; +use std::num::NonZero; +use std::ptr::NonNull; + +#[no_mangle] +fn use_option_u32(x: Option) -> u32 { + // CHECK-LABEL: @use_option_u32 + // OPT-SAME: (i32 noundef range(i32 0, 2) %x.0, i32 %x.1) + + // CHECK-NOT: alloca + // CHECK: %[[DISCR:.+]] = zext i32 %x.0 to i64 + // CHECK: %[[IS_SOME:.+]] = trunc nuw i64 %[[DISCR]] to i1 + // OPT: %[[LIKELY:.+]] = call i1 @llvm.expect.i1(i1 %[[IS_SOME]], i1 true) + // OPT: br i1 %[[LIKELY]], label %[[BLOCK:.+]], + // DBG: br i1 %[[IS_SOME]], label %[[BLOCK:.+]], + + // CHECK: [[BLOCK]]: + // CHECK: ret i32 %x.1 + + if let Some(val) = x { val } else { unreachable!() } +} + +#[no_mangle] +fn use_option_bool(x: Option) -> bool { + // CHECK-LABEL: @use_option_bool + // OPT-SAME: (i8 noundef range(i8 0, 3) %x) + + // CHECK-NOT: alloca + // CHECK: %[[IS_NONE:.+]] = icmp eq i8 %x, 2 + // CHECK: %[[DISCR:.+]] = select i1 %[[IS_NONE]], i64 0, i64 1 + // CHECK: %[[IS_SOME:.+]] = trunc nuw i64 %[[DISCR]] to i1 + // OPT: %[[LIKELY:.+]] = call i1 @llvm.expect.i1(i1 %[[IS_SOME]], i1 true) + // OPT: br i1 %[[LIKELY]], label %[[BLOCK:.+]], + // DBG: br i1 %[[IS_SOME]], label %[[BLOCK:.+]], + + // CHECK: [[BLOCK]]: + // CHECK: %val = trunc nuw i8 %x to i1 + // CHECK: ret i1 %val + + if let Some(val) = x { val } else { unreachable!() } +} + +#[no_mangle] +fn use_option_ordering(x: Option) -> Ordering { + // CHECK-LABEL: @use_option_ordering + // OPT-SAME: (i8 noundef range(i8 -1, 3) %x) + + // CHECK: %[[IS_NONE:.+]] = icmp eq i8 %x, 2 + // CHECK: %[[DISCR:.+]] = select i1 %[[IS_NONE]], i64 0, i64 1 + // CHECK: %[[IS_SOME:.+]] = trunc nuw i64 %[[DISCR]] to i1 + // OPT: %[[LIKELY:.+]] = call i1 @llvm.expect.i1(i1 %[[IS_SOME]], i1 true) + // OPT: br i1 %[[LIKELY]], label %[[BLOCK:.+]], + // DBG: br i1 %[[IS_SOME]], label %[[BLOCK:.+]], + + // CHECK: [[BLOCK]]: + // OPT: %[[SHIFTED:.+]] = sub i8 %x, -1 + // OPT: %[[IN_WIDTH:.+]] = icmp ule i8 %[[SHIFTED]], 3 + // OPT: call void @llvm.assume(i1 %[[IN_WIDTH]]) + // DBG-NOT: assume + // CHECK: ret i8 %x + + if let Some(val) = x { val } else { unreachable!() } +} + +#[no_mangle] +fn use_result_nzusize(x: Result, NonNull>) -> NonZero { + // CHECK-LABEL: @use_result_nzusize + // OPT-SAME: (i64 noundef range(i64 0, 2) %x.0, ptr noundef %x.1) + + // CHECK-NOT: alloca + // CHECK: %[[IS_ERR:.+]] = trunc nuw i64 %x.0 to i1 + // OPT: %[[UNLIKELY:.+]] = call i1 @llvm.expect.i1(i1 %[[IS_ERR]], i1 false) + // OPT: br i1 %[[UNLIKELY]], label %[[PANIC:.+]], label %[[BLOCK:.+]] + // DBG: br i1 %[[IS_ERR]], label %[[PANIC:.+]], label %[[BLOCK:.+]] + + // CHECK: [[BLOCK]]: + // CHECK: %val = ptrtoint ptr %x.1 to i64 + // CHECK: ret i64 %val + + if let Ok(val) = x { val } else { unreachable!() } +} + +#[repr(u64)] +enum BigEnum { + Foo = 100, + Bar = 200, +} + +#[no_mangle] +fn use_result_bigenum(x: Result) -> BigEnum { + // CHECK-LABEL: @use_result_bigenum + // OPT-SAME: (i64 noundef range(i64 0, 2) %x.0, i64 noundef %x.1) + + // CHECK-NOT: alloca + // CHECK: %[[IS_ERR:.+]] = trunc nuw i64 %x.0 to i1 + // OPT: %[[UNLIKELY:.+]] = call i1 @llvm.expect.i1(i1 %[[IS_ERR]], i1 false) + // OPT: br i1 %[[UNLIKELY]], label %[[PANIC:.+]], label %[[BLOCK:.+]] + // DBG: br i1 %[[IS_ERR]], label %[[PANIC:.+]], label %[[BLOCK:.+]] + + // CHECK: [[BLOCK]]: + // CHECK: ret i64 %x.1 + + if let Ok(val) = x { val } else { unreachable!() } +} + +struct WhateverError; + +#[no_mangle] +fn use_result_nonnull(x: Result, WhateverError>) -> NonNull { + // CHECK-LABEL: @use_result_nonnull + // OPT-SAME: (ptr noundef %x) + + // CHECK-NOT: alloca + // CHECK: %[[ADDR:.+]] = ptrtoint ptr %x to i64 + // CHECK: %[[IS_NULL:.+]] = icmp eq i64 %[[ADDR]], 0 + // CHECK: %[[DISCR:.+]] = select i1 %[[IS_NULL]], i64 1, i64 0 + // CHECK: %[[IS_ERR:.+]] = trunc nuw i64 %[[DISCR]] to i1 + // OPT: %[[UNLIKELY:.+]] = call i1 @llvm.expect.i1(i1 %[[IS_ERR]], i1 false) + // OPT: br i1 %[[UNLIKELY]], label %[[PANIC:.+]], label %[[BLOCK:.+]] + // DBG: br i1 %[[IS_ERR]], label %[[PANIC:.+]], label %[[BLOCK:.+]] + + // CHECK: [[BLOCK]]: + // CHECK: ret ptr %x + + if let Ok(val) = x { val } else { unreachable!() } +} + +const SOME_FOUR: Option = Some(4); + +#[no_mangle] +fn use_option_from_const() -> u8 { + // CHECK-LABEL: @use_option_from_const() + // CHECK-NEXT: start: + // OPT-NEXT: ret i8 4 + // DBG: %[[PAYLOAD:.+]] = load i8, ptr getelementptr inbounds + // DBG: ret i8 %[[PAYLOAD]] + if let Some(val) = SOME_FOUR { val } else { unreachable!() } +} diff --git a/tests/codegen/enum/enum-match.rs b/tests/codegen/enum/enum-match.rs index a24b98050d232..4dd6347043fda 100644 --- a/tests/codegen/enum/enum-match.rs +++ b/tests/codegen/enum/enum-match.rs @@ -11,11 +11,11 @@ pub enum Enum0 { B, } -// CHECK: define noundef{{( range\(i8 [0-9]+, [0-9]+\))?}} i8 @match0{{.*}} +// CHECK: define noundef{{( range\(i8 [0-9]+, [0-9]+\))?}} i8 @match0(i8{{.*}}%e) // CHECK-NEXT: start: -// CHECK-NEXT: %1 = icmp eq i8 %0, 2 -// CHECK-NEXT: %2 = and i8 %0, 1 -// CHECK-NEXT: %{{.+}} = select i1 %1, i8 13, i8 %2 +// CHECK-NEXT: %[[IS_B:.+]] = icmp eq i8 %e, 2 +// CHECK-NEXT: %[[RET:.+]] = select i1 %[[IS_B]], i8 13, i8 %e +// CHECK-NEXT: ret i8 %[[RET]] #[no_mangle] pub fn match0(e: Enum0) -> u8 { use Enum0::*; @@ -32,13 +32,14 @@ pub enum Enum1 { C, } -// CHECK: define noundef{{( range\(i8 [0-9]+, [0-9]+\))?}} i8 @match1{{.*}} +// CHECK: define noundef{{( range\(i8 [0-9]+, [0-9]+\))?}} i8 @match1{{.*}}(i8{{.*}}%e) // CHECK-NEXT: start: -// CHECK-NEXT: %1 = add{{( nsw)?}} i8 %0, -2 -// CHECK-NEXT: %2 = zext i8 %1 to i64 -// CHECK-NEXT: %3 = icmp ult i8 %1, 2 -// CHECK-NEXT: %4 = add nuw nsw i64 %2, 1 -// CHECK-NEXT: %_2 = select i1 %3, i64 %4, i64 0 +// CHECK-NEXT: %0 = add{{( nsw)?}} i8 %e, -2 +// CHECK-NEXT: %1 = zext i8 %0 to i64 +// CHECK-NEXT: %2 = icmp ult i8 %0, 2 +// CHECK-NEXT: %3 = add nuw nsw i64 %1, 1 +// CHECK-NEXT: %[[DISCR:.+]] = select i1 %2, i64 %3, i64 0 +// CHECK-NEXT: switch i64 %[[DISCR]] #[no_mangle] pub fn match1(e: Enum1) -> u8 { use Enum1::*; @@ -92,14 +93,14 @@ pub enum Enum2 { E, } -// CHECK: define noundef{{( range\(i8 [0-9]+, [0-9]+\))?}} i8 @match2{{.*}} +// CHECK: define noundef{{( range\(i8 [0-9]+, [0-9]+\))?}} i8 @match2(i8{{.*}}%e) // CHECK-NEXT: start: -// CHECK-NEXT: %1 = add i8 %0, 2 -// CHECK-NEXT: %2 = zext i8 %1 to i64 -// CHECK-NEXT: %3 = icmp ult i8 %1, 4 -// CHECK-NEXT: %4 = add nuw nsw i64 %2, 1 -// CHECK-NEXT: %_2 = select i1 %3, i64 %4, i64 0 -// CHECK-NEXT: switch i64 %_2, label {{.*}} [ +// CHECK-NEXT: %0 = add i8 %e, 2 +// CHECK-NEXT: %1 = zext i8 %0 to i64 +// CHECK-NEXT: %2 = icmp ult i8 %0, 4 +// CHECK-NEXT: %3 = add nuw nsw i64 %1, 1 +// CHECK-NEXT: %[[DISCR:.+]] = select i1 %2, i64 %3, i64 0 +// CHECK-NEXT: switch i64 %[[DISCR]], label {{.*}} [ #[no_mangle] pub fn match2(e: Enum2) -> u8 { use Enum2::*; diff --git a/tests/codegen/intrinsics/cold_path3.rs b/tests/codegen/intrinsics/cold_path3.rs index bf3347de665db..36cb35a320277 100644 --- a/tests/codegen/intrinsics/cold_path3.rs +++ b/tests/codegen/intrinsics/cold_path3.rs @@ -45,7 +45,7 @@ pub fn test(x: Option) { } // CHECK-LABEL: @test( - // CHECK: switch i32 %1, label %bb1 [ + // CHECK: switch i32 %x.1, label %bb1 [ // CHECK: i32 0, label %bb6 // CHECK: i32 1, label %bb5 // CHECK: i32 2, label %bb4 @@ -76,7 +76,7 @@ pub fn test2(x: Option) { } // CHECK-LABEL: @test2( - // CHECK: switch i32 %1, label %bb1 [ + // CHECK: switch i32 %x.1, label %bb1 [ // CHECK: i32 10, label %bb5 // CHECK: i32 11, label %bb4 // CHECK: i32 13, label %bb3 diff --git a/tests/codegen/try_question_mark_nop.rs b/tests/codegen/try_question_mark_nop.rs index 3a3453b22b44f..bdf950bb7c123 100644 --- a/tests/codegen/try_question_mark_nop.rs +++ b/tests/codegen/try_question_mark_nop.rs @@ -16,10 +16,10 @@ use std::ptr::NonNull; #[no_mangle] pub fn option_nop_match_32(x: Option) -> Option { // CHECK: start: - // TWENTY-NEXT: %[[IS_SOME:.+]] = trunc nuw i32 %0 to i1 - // TWENTY-NEXT: %[[PAYLOAD:.+]] = select i1 %[[IS_SOME]], i32 %1, i32 undef - // CHECK-NEXT: [[REG1:%.*]] = insertvalue { i32, i32 } poison, i32 %0, 0 - // NINETEEN-NEXT: [[REG2:%.*]] = insertvalue { i32, i32 } [[REG1]], i32 %1, 1 + // TWENTY-NEXT: %[[IS_SOME:.+]] = trunc nuw i32 %x.0 to i1 + // TWENTY-NEXT: %[[PAYLOAD:.+]] = select i1 %[[IS_SOME]], i32 %x.1, i32 undef + // CHECK-NEXT: [[REG1:%.*]] = insertvalue { i32, i32 } poison, i32 %x.0, 0 + // NINETEEN-NEXT: [[REG2:%.*]] = insertvalue { i32, i32 } [[REG1]], i32 %x.1, 1 // TWENTY-NEXT: [[REG2:%.*]] = insertvalue { i32, i32 } [[REG1]], i32 %[[PAYLOAD]], 1 // CHECK-NEXT: ret { i32, i32 } [[REG2]] match x { @@ -32,8 +32,8 @@ pub fn option_nop_match_32(x: Option) -> Option { #[no_mangle] pub fn option_nop_traits_32(x: Option) -> Option { // CHECK: start: - // TWENTY-NEXT: %[[IS_SOME:.+]] = trunc nuw i32 %0 to i1 - // TWENTY-NEXT: select i1 %[[IS_SOME]], i32 %1, i32 undef + // TWENTY-NEXT: %[[IS_SOME:.+]] = trunc nuw i32 %x.0 to i1 + // TWENTY-NEXT: select i1 %[[IS_SOME]], i32 %x.1, i32 undef // CHECK-NEXT: insertvalue { i32, i32 } // CHECK-NEXT: insertvalue { i32, i32 } // CHECK-NEXT: ret { i32, i32 } @@ -90,11 +90,11 @@ pub fn control_flow_nop_traits_32(x: ControlFlow) -> ControlFlow) -> Option { // CHECK: start: - // TWENTY-NEXT: %[[TRUNC:[0-9]+]] = trunc nuw i64 %0 to i1 - // TWENTY-NEXT: %[[SEL:\.[0-9]+]] = select i1 %[[TRUNC]], i64 %1, i64 undef - // CHECK-NEXT: [[REG1:%[0-9a-zA-Z_.]+]] = insertvalue { i64, i64 } poison, i64 %0, 0 - // NINETEEN-NEXT: [[REG2:%[0-9a-zA-Z_.]+]] = insertvalue { i64, i64 } [[REG1]], i64 %1, 1 - // TWENTY-NEXT: [[REG2:%[0-9a-zA-Z_.]+]] = insertvalue { i64, i64 } [[REG1]], i64 %[[SEL]], 1 + // TWENTY-NEXT: %[[TRUNC:.+]] = trunc nuw i64 %x.0 to i1 + // TWENTY-NEXT: %[[SEL:.+]] = select i1 %[[TRUNC]], i64 %x.1, i64 undef + // CHECK-NEXT: [[REG1:%.+]] = insertvalue { i64, i64 } poison, i64 %x.0, 0 + // NINETEEN-NEXT: [[REG2:%.+]] = insertvalue { i64, i64 } [[REG1]], i64 %x.1, 1 + // TWENTY-NEXT: [[REG2:%.+]] = insertvalue { i64, i64 } [[REG1]], i64 %[[SEL]], 1 // CHECK-NEXT: ret { i64, i64 } [[REG2]] match x { Some(x) => Some(x), @@ -106,8 +106,8 @@ pub fn option_nop_match_64(x: Option) -> Option { #[no_mangle] pub fn option_nop_traits_64(x: Option) -> Option { // CHECK: start: - // TWENTY-NEXT: %[[TRUNC:[0-9]+]] = trunc nuw i64 %0 to i1 - // TWENTY-NEXT: %[[SEL:\.[0-9]+]] = select i1 %[[TRUNC]], i64 %1, i64 undef + // TWENTY-NEXT: %[[TRUNC:.+]] = trunc nuw i64 %x.0 to i1 + // TWENTY-NEXT: %[[SEL:.+]] = select i1 %[[TRUNC]], i64 %x.1, i64 undef // CHECK-NEXT: insertvalue { i64, i64 } // CHECK-NEXT: insertvalue { i64, i64 } // CHECK-NEXT: ret { i64, i64 }