diff --git a/src/Cargo.lock b/src/Cargo.lock index 3361e81ecfe6d..a704bd4faf640 100644 --- a/src/Cargo.lock +++ b/src/Cargo.lock @@ -2128,6 +2128,32 @@ dependencies = [ "rustc_llvm 0.0.0", ] +[[package]] +name = "rustc_codegen_ssa" +version = "0.0.0" +dependencies = [ + "bitflags 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)", + "cc 1.0.25 (registry+https://github.com/rust-lang/crates.io-index)", + "jobserver 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)", + "log 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)", + "memmap 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)", + "num_cpus 1.8.0 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc 0.0.0", + "rustc-demangle 0.1.9 (registry+https://github.com/rust-lang/crates.io-index)", + "rustc_allocator 0.0.0", + "rustc_apfloat 0.0.0", + "rustc_codegen_utils 0.0.0", + "rustc_data_structures 0.0.0", + "rustc_errors 0.0.0", + "rustc_fs_util 0.0.0", + "rustc_incremental 0.0.0", + "rustc_mir 0.0.0", + "rustc_target 0.0.0", + "serialize 0.0.0", + "syntax 0.0.0", + "syntax_pos 0.0.0", +] + [[package]] name = "rustc_codegen_utils" version = "0.0.0" @@ -2184,6 +2210,7 @@ dependencies = [ "rustc-rayon 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)", "rustc_allocator 0.0.0", "rustc_borrowck 0.0.0", + "rustc_codegen_ssa 0.0.0", "rustc_codegen_utils 0.0.0", "rustc_data_structures 0.0.0", "rustc_errors 0.0.0", diff --git a/src/librustc_codegen_llvm/abi.rs b/src/librustc_codegen_llvm/abi.rs index 7b93d3e795ed8..3ff9d81a2d95f 100644 --- a/src/librustc_codegen_llvm/abi.rs +++ b/src/librustc_codegen_llvm/abi.rs @@ -9,18 +9,21 @@ // except according to those terms. use llvm::{self, AttributePlace}; -use base; -use builder::{Builder, MemFlags}; -use common::{ty_fn_sig, C_usize}; +use rustc_codegen_ssa::MemFlags; +use builder::Builder; +use rustc_codegen_ssa::common::ty_fn_sig; use context::CodegenCx; -use mir::place::PlaceRef; -use mir::operand::OperandValue; +use rustc_codegen_ssa::mir::place::PlaceRef; +use rustc_codegen_ssa::mir::operand::OperandValue; use type_::Type; use type_of::{LayoutLlvmExt, PointerKind}; use value::Value; +use rustc_target::abi::call::ArgType; + +use rustc_codegen_ssa::interfaces::*; use rustc_target::abi::{LayoutOf, Size, TyLayout}; -use rustc::ty::{self, Ty}; +use rustc::ty::{self, Ty, Instance}; use rustc::ty::layout; use libc::c_uint; @@ -104,29 +107,29 @@ impl ArgAttributesExt for ArgAttributes { } pub trait LlvmType { - fn llvm_type(&self, cx: &CodegenCx<'ll, '_>) -> &'ll Type; + fn llvm_type(&self, cx: &CodegenCx<'ll, '_, &'ll Value>) -> &'ll Type; } impl LlvmType for Reg { - fn llvm_type(&self, cx: &CodegenCx<'ll, '_>) -> &'ll Type { + fn llvm_type(&self, cx: &CodegenCx<'ll, '_, &'ll Value>) -> &'ll Type { match self.kind { - RegKind::Integer => Type::ix(cx, self.size.bits()), + RegKind::Integer => cx.type_ix(self.size.bits()), RegKind::Float => { match self.size.bits() { - 32 => Type::f32(cx), - 64 => Type::f64(cx), + 32 => cx.type_f32(), + 64 => cx.type_f64(), _ => bug!("unsupported float: {:?}", self) } } RegKind::Vector => { - Type::vector(Type::i8(cx), self.size.bytes()) + cx.type_vector(cx.type_i8(), self.size.bytes()) } } } } impl LlvmType for CastTarget { - fn llvm_type(&self, cx: &CodegenCx<'ll, '_>) -> &'ll Type { + fn llvm_type(&self, cx: &CodegenCx<'ll, '_, &'ll Value>) -> &'ll Type { let rest_ll_unit = self.rest.unit.llvm_type(cx); let (rest_count, rem_bytes) = if self.rest.unit.size.bytes() == 0 { (0, 0) @@ -143,7 +146,7 @@ impl LlvmType for CastTarget { // Simplify to array when all chunks are the same size and type if rem_bytes == 0 { - return Type::array(rest_ll_unit, rest_count); + return cx.type_array(rest_ll_unit, rest_count); } } @@ -158,23 +161,32 @@ impl LlvmType for CastTarget { if rem_bytes != 0 { // Only integers can be really split further. assert_eq!(self.rest.unit.kind, RegKind::Integer); - args.push(Type::ix(cx, rem_bytes * 8)); + args.push(cx.type_ix(rem_bytes * 8)); } - Type::struct_(cx, &args, false) + cx.type_struct(&args, false) } } pub trait ArgTypeExt<'ll, 'tcx> { - fn memory_ty(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type; - fn store(&self, bx: &Builder<'_, 'll, 'tcx>, val: &'ll Value, dst: PlaceRef<'ll, 'tcx>); - fn store_fn_arg(&self, bx: &Builder<'_, 'll, 'tcx>, idx: &mut usize, dst: PlaceRef<'ll, 'tcx>); + fn memory_ty(&self, cx: &CodegenCx<'ll, 'tcx, &'ll Value>) -> &'ll Type; + fn store( + &self, + bx: &mut Builder<'_, 'll, 'tcx, &'ll Value>, + val: &'ll Value, + dst: PlaceRef<'tcx, &'ll Value> + ); + fn store_fn_arg( + &self, + bx: &mut Builder<'_, 'll, 'tcx, &'ll Value>, + idx: &mut usize, dst: PlaceRef<'tcx, &'ll Value> + ); } impl ArgTypeExt<'ll, 'tcx> for ArgType<'tcx, Ty<'tcx>> { /// Get the LLVM type for a place of the original Rust type of /// this argument/return, i.e. the result of `type_of::type_of`. - fn memory_ty(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type { + fn memory_ty(&self, cx: &CodegenCx<'ll, 'tcx, &'ll Value>) -> &'ll Type { self.layout.llvm_type(cx) } @@ -182,11 +194,16 @@ impl ArgTypeExt<'ll, 'tcx> for ArgType<'tcx, Ty<'tcx>> { /// place for the original Rust type of this argument/return. /// Can be used for both storing formal arguments into Rust variables /// or results of call/invoke instructions into their destinations. - fn store(&self, bx: &Builder<'_, 'll, 'tcx>, val: &'ll Value, dst: PlaceRef<'ll, 'tcx>) { + fn store( + &self, + bx: &mut Builder<'_, 'll, 'tcx, &'ll Value>, + val: &'ll Value, + dst: PlaceRef<'tcx, &'ll Value> + ) { if self.is_ignore() { return; } - let cx = bx.cx; + let cx = bx.cx(); if self.is_sized_indirect() { OperandValue::Ref(val, None, self.layout.align).store(bx, dst) } else if self.is_unsized_indirect() { @@ -196,7 +213,7 @@ impl ArgTypeExt<'ll, 'tcx> for ArgType<'tcx, Ty<'tcx>> { // uses it for i16 -> {i8, i8}, but not for i24 -> {i8, i8, i8}. let can_store_through_cast_ptr = false; if can_store_through_cast_ptr { - let cast_dst = bx.pointercast(dst.llval, cast.llvm_type(cx).ptr_to()); + let cast_dst = bx.pointercast(dst.llval, cx.type_ptr_to(cast.llvm_type(cx))); bx.store(val, cast_dst, self.layout.align); } else { // The actual return type is a struct, but the ABI @@ -223,12 +240,16 @@ impl ArgTypeExt<'ll, 'tcx> for ArgType<'tcx, Ty<'tcx>> { bx.store(val, llscratch, scratch_align); // ...and then memcpy it to the intended destination. - base::call_memcpy(bx, - bx.pointercast(dst.llval, Type::i8p(cx)), - bx.pointercast(llscratch, Type::i8p(cx)), - C_usize(cx, self.layout.size.bytes()), - self.layout.align.min(scratch_align), - MemFlags::empty()); + let llval_cast = bx.pointercast(dst.llval, cx.type_i8p()); + let llscratch_cast = bx.pointercast(llscratch, cx.type_i8p()); + let size = cx.const_usize(self.layout.size.bytes()); + bx.call_memcpy( + llval_cast, + llscratch_cast, + size, + self.layout.align.min(scratch_align), + MemFlags::empty() + ); bx.lifetime_end(llscratch, scratch_size); } @@ -237,7 +258,12 @@ impl ArgTypeExt<'ll, 'tcx> for ArgType<'tcx, Ty<'tcx>> { } } - fn store_fn_arg(&self, bx: &Builder<'a, 'll, 'tcx>, idx: &mut usize, dst: PlaceRef<'ll, 'tcx>) { + fn store_fn_arg( + &self, + bx: &mut Builder<'a, 'll, 'tcx, &'ll Value>, + idx: &mut usize, + dst: PlaceRef<'tcx, &'ll Value> + ) { let mut next = || { let val = llvm::get_param(bx.llfn(), *idx as c_uint); *idx += 1; @@ -258,38 +284,63 @@ impl ArgTypeExt<'ll, 'tcx> for ArgType<'tcx, Ty<'tcx>> { } } +impl<'a, 'll: 'a, 'tcx: 'll> ArgTypeMethods<'a, 'll, 'tcx> for Builder<'a, 'll, 'tcx, &'ll Value> { + fn store_fn_arg( + &mut self, + ty: &ArgType<'tcx, Ty<'tcx>>, + idx: &mut usize, dst: PlaceRef<'tcx, >::Value> + ) { + ty.store_fn_arg(self, idx, dst) + } + fn store_arg_ty( + &mut self, + ty: &ArgType<'tcx, Ty<'tcx>>, + val: &'ll Value, + dst: PlaceRef<'tcx, &'ll Value> + ) { + ty.store(self, val, dst) + } + fn memory_ty(&self, ty: &ArgType<'tcx, Ty<'tcx>>) -> &'ll Type { + ty.memory_ty(self.cx()) + } +} + pub trait FnTypeExt<'tcx> { - fn of_instance(cx: &CodegenCx<'ll, 'tcx>, instance: &ty::Instance<'tcx>) -> Self; - fn new(cx: &CodegenCx<'ll, 'tcx>, + fn of_instance(cx: &CodegenCx<'ll, 'tcx, &'ll Value>, instance: &ty::Instance<'tcx>) -> Self; + fn new(cx: &CodegenCx<'ll, 'tcx, &'ll Value>, sig: ty::FnSig<'tcx>, extra_args: &[Ty<'tcx>]) -> Self; - fn new_vtable(cx: &CodegenCx<'ll, 'tcx>, + fn new_vtable(cx: &CodegenCx<'ll, 'tcx, &'ll Value>, sig: ty::FnSig<'tcx>, extra_args: &[Ty<'tcx>]) -> Self; fn new_internal( - cx: &CodegenCx<'ll, 'tcx>, + cx: &CodegenCx<'ll, 'tcx, &'ll Value>, sig: ty::FnSig<'tcx>, extra_args: &[Ty<'tcx>], mk_arg_type: impl Fn(Ty<'tcx>, Option) -> ArgType<'tcx, Ty<'tcx>>, ) -> Self; fn adjust_for_abi(&mut self, - cx: &CodegenCx<'ll, 'tcx>, + cx: &CodegenCx<'ll, 'tcx, &'ll Value>, abi: Abi); - fn llvm_type(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type; + fn llvm_type(&self, cx: &CodegenCx<'ll, 'tcx, &'ll Value>) -> &'ll Type; fn llvm_cconv(&self) -> llvm::CallConv; fn apply_attrs_llfn(&self, llfn: &'ll Value); - fn apply_attrs_callsite(&self, bx: &Builder<'a, 'll, 'tcx>, callsite: &'ll Value); + fn apply_attrs_callsite( + &self, + bx: &mut Builder<'a, 'll, 'tcx, &'ll Value>, + callsite: &'ll Value + ); } impl<'tcx> FnTypeExt<'tcx> for FnType<'tcx, Ty<'tcx>> { - fn of_instance(cx: &CodegenCx<'ll, 'tcx>, instance: &ty::Instance<'tcx>) -> Self { + fn of_instance(cx: &CodegenCx<'ll, 'tcx, &'ll Value>, instance: &ty::Instance<'tcx>) -> Self { let fn_ty = instance.ty(cx.tcx); let sig = ty_fn_sig(cx, fn_ty); let sig = cx.tcx.normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), &sig); FnType::new(cx, sig, &[]) } - fn new(cx: &CodegenCx<'ll, 'tcx>, + fn new(cx: &CodegenCx<'ll, 'tcx, &'ll Value>, sig: ty::FnSig<'tcx>, extra_args: &[Ty<'tcx>]) -> Self { FnType::new_internal(cx, sig, extra_args, |ty, _| { @@ -297,7 +348,7 @@ impl<'tcx> FnTypeExt<'tcx> for FnType<'tcx, Ty<'tcx>> { }) } - fn new_vtable(cx: &CodegenCx<'ll, 'tcx>, + fn new_vtable(cx: &CodegenCx<'ll, 'tcx, &'ll Value>, sig: ty::FnSig<'tcx>, extra_args: &[Ty<'tcx>]) -> Self { FnType::new_internal(cx, sig, extra_args, |ty, arg_idx| { @@ -324,7 +375,7 @@ impl<'tcx> FnTypeExt<'tcx> for FnType<'tcx, Ty<'tcx>> { } fn new_internal( - cx: &CodegenCx<'ll, 'tcx>, + cx: &CodegenCx<'ll, 'tcx, &'ll Value>, sig: ty::FnSig<'tcx>, extra_args: &[Ty<'tcx>], mk_arg_type: impl Fn(Ty<'tcx>, Option) -> ArgType<'tcx, Ty<'tcx>>, @@ -505,7 +556,7 @@ impl<'tcx> FnTypeExt<'tcx> for FnType<'tcx, Ty<'tcx>> { } fn adjust_for_abi(&mut self, - cx: &CodegenCx<'ll, 'tcx>, + cx: &CodegenCx<'ll, 'tcx, &'ll Value>, abi: Abi) { if abi == Abi::Unadjusted { return } @@ -575,7 +626,7 @@ impl<'tcx> FnTypeExt<'tcx> for FnType<'tcx, Ty<'tcx>> { } } - fn llvm_type(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type { + fn llvm_type(&self, cx: &CodegenCx<'ll, 'tcx, &'ll Value>) -> &'ll Type { let args_capacity: usize = self.args.iter().map(|arg| if arg.pad.is_some() { 1 } else { 0 } + if let PassMode::Pair(_, _) = arg.mode { 2 } else { 1 } @@ -585,14 +636,14 @@ impl<'tcx> FnTypeExt<'tcx> for FnType<'tcx, Ty<'tcx>> { ); let llreturn_ty = match self.ret.mode { - PassMode::Ignore => Type::void(cx), + PassMode::Ignore => cx.type_void(), PassMode::Direct(_) | PassMode::Pair(..) => { self.ret.layout.immediate_llvm_type(cx) } PassMode::Cast(cast) => cast.llvm_type(cx), PassMode::Indirect(..) => { - llargument_tys.push(self.ret.memory_ty(cx).ptr_to()); - Type::void(cx) + llargument_tys.push(cx.type_ptr_to(self.ret.memory_ty(cx))); + cx.type_void() } }; @@ -618,15 +669,15 @@ impl<'tcx> FnTypeExt<'tcx> for FnType<'tcx, Ty<'tcx>> { continue; } PassMode::Cast(cast) => cast.llvm_type(cx), - PassMode::Indirect(_, None) => arg.memory_ty(cx).ptr_to(), + PassMode::Indirect(_, None) => cx.type_ptr_to(arg.memory_ty(cx)), }; llargument_tys.push(llarg_ty); } if self.variadic { - Type::variadic_func(&llargument_tys, llreturn_ty) + cx.type_variadic_func(&llargument_tys, llreturn_ty) } else { - Type::func(&llargument_tys, llreturn_ty) + cx.type_func(&llargument_tys, llreturn_ty) } } @@ -681,7 +732,11 @@ impl<'tcx> FnTypeExt<'tcx> for FnType<'tcx, Ty<'tcx>> { } } - fn apply_attrs_callsite(&self, bx: &Builder<'a, 'll, 'tcx>, callsite: &'ll Value) { + fn apply_attrs_callsite( + &self, + bx: &mut Builder<'a, 'll, 'tcx, &'ll Value>, + callsite: &'ll Value + ) { let mut i = 0; let mut apply = |attrs: &ArgAttributes| { attrs.apply_callsite(llvm::AttributePlace::Argument(i), callsite); @@ -700,7 +755,7 @@ impl<'tcx> FnTypeExt<'tcx> for FnType<'tcx, Ty<'tcx>> { // by the LLVM verifier. if let layout::Int(..) = scalar.value { if !scalar.is_bool() { - let range = scalar.valid_range_exclusive(bx.cx); + let range = scalar.valid_range_exclusive(bx.cx()); if range.start != range.end { bx.range_metadata(callsite, range); } @@ -733,3 +788,29 @@ impl<'tcx> FnTypeExt<'tcx> for FnType<'tcx, Ty<'tcx>> { } } } + +impl AbiMethods<'tcx> for CodegenCx<'ll, 'tcx, &'ll Value> { + fn new_fn_type(&self, sig: ty::FnSig<'tcx>, extra_args: &[Ty<'tcx>]) -> FnType<'tcx, Ty<'tcx>> { + FnType::new(&self, sig, extra_args) + } + fn new_vtable( + &self, + sig: ty::FnSig<'tcx>, + extra_args: &[Ty<'tcx>] + ) -> FnType<'tcx, Ty<'tcx>> { + FnType::new_vtable(&self, sig, extra_args) + } + fn fn_type_of_instance(&self, instance: &Instance<'tcx>) -> FnType<'tcx, Ty<'tcx>> { + FnType::of_instance(&self, instance) + } +} + +impl AbiBuilderMethods<'a, 'll, 'tcx> for Builder<'a, 'll, 'tcx, &'ll Value> { + fn apply_attrs_callsite( + &mut self, + ty: &FnType<'tcx, Ty<'tcx>>, + callsite: >::Value + ) { + ty.apply_attrs_callsite(self, callsite) + } +} diff --git a/src/librustc_codegen_llvm/asm.rs b/src/librustc_codegen_llvm/asm.rs index f1bb41bcebacf..2d6b9f156e922 100644 --- a/src/librustc_codegen_llvm/asm.rs +++ b/src/librustc_codegen_llvm/asm.rs @@ -9,55 +9,55 @@ // except according to those terms. use llvm; -use common::*; -use type_::Type; +use context::CodegenCx; use type_of::LayoutLlvmExt; use builder::Builder; use value::Value; use rustc::hir; +use rustc_codegen_ssa::interfaces::*; -use mir::place::PlaceRef; -use mir::operand::OperandValue; +use rustc_codegen_ssa::mir::place::PlaceRef; +use rustc_codegen_ssa::mir::operand::OperandValue; use std::ffi::CString; -use syntax::ast::AsmDialect; use libc::{c_uint, c_char}; -// Take an inline assembly expression and splat it out via LLVM -pub fn codegen_inline_asm( - bx: &Builder<'a, 'll, 'tcx>, - ia: &hir::InlineAsm, - outputs: Vec>, - mut inputs: Vec<&'ll Value> -) -> bool { - let mut ext_constraints = vec![]; - let mut output_types = vec![]; - - // Prepare the output operands - let mut indirect_outputs = vec![]; - for (i, (out, place)) in ia.outputs.iter().zip(&outputs).enumerate() { - if out.is_rw { - inputs.push(place.load(bx).immediate()); - ext_constraints.push(i.to_string()); + +impl AsmBuilderMethods<'a, 'll, 'tcx> for Builder<'a, 'll, 'tcx, &'ll Value> { + fn codegen_inline_asm( + &mut self, + ia: &hir::InlineAsm, + outputs: Vec>, + mut inputs: Vec<&'ll Value> + ) -> bool { + let mut ext_constraints = vec![]; + let mut output_types = vec![]; + + // Prepare the output operands + let mut indirect_outputs = vec![]; + for (i, (out, place)) in ia.outputs.iter().zip(&outputs).enumerate() { + if out.is_rw { + inputs.push(self.load_ref(place).immediate()); + ext_constraints.push(i.to_string()); + } + if out.is_indirect { + indirect_outputs.push(self.load_ref(place).immediate()); + } else { + output_types.push(place.layout.llvm_type(self.cx())); + } } - if out.is_indirect { - indirect_outputs.push(place.load(bx).immediate()); - } else { - output_types.push(place.layout.llvm_type(bx.cx)); + if !indirect_outputs.is_empty() { + indirect_outputs.extend_from_slice(&inputs); + inputs = indirect_outputs; } - } - if !indirect_outputs.is_empty() { - indirect_outputs.extend_from_slice(&inputs); - inputs = indirect_outputs; - } let clobbers = ia.clobbers.iter() .map(|s| format!("~{{{}}}", &s)); // Default per-arch clobbers // Basically what clang does - let arch_clobbers = match &bx.sess().target.target.arch[..] { + let arch_clobbers = match &self.cx().sess().target.target.arch[..] { "x86" | "x86_64" => vec!["~{dirflag}", "~{fpsr}", "~{flags}"], "mips" | "mips64" => vec!["~{$1}"], _ => Vec::new() @@ -76,26 +76,21 @@ pub fn codegen_inline_asm( // Depending on how many outputs we have, the return type is different let num_outputs = output_types.len(); let output_type = match num_outputs { - 0 => Type::void(bx.cx), + 0 => self.cx().type_void(), 1 => output_types[0], - _ => Type::struct_(bx.cx, &output_types, false) - }; - - let dialect = match ia.dialect { - AsmDialect::Att => llvm::AsmDialect::Att, - AsmDialect::Intel => llvm::AsmDialect::Intel, + _ => self.cx().type_struct(&output_types, false) }; let asm = CString::new(ia.asm.as_str().as_bytes()).unwrap(); let constraint_cstr = CString::new(all_constraints).unwrap(); - let r = bx.inline_asm_call( + let r = self.inline_asm_call( asm.as_ptr(), constraint_cstr.as_ptr(), &inputs, output_type, ia.volatile, ia.alignstack, - dialect + ia.dialect ); if r.is_none() { return false; @@ -105,30 +100,32 @@ pub fn codegen_inline_asm( // Again, based on how many outputs we have let outputs = ia.outputs.iter().zip(&outputs).filter(|&(ref o, _)| !o.is_indirect); for (i, (_, &place)) in outputs.enumerate() { - let v = if num_outputs == 1 { r } else { bx.extract_value(r, i as u64) }; - OperandValue::Immediate(v).store(bx, place); + let v = if num_outputs == 1 { r } else { self.extract_value(r, i as u64) }; + OperandValue::Immediate(v).store(self, place); } - // Store mark in a metadata node so we can map LLVM errors - // back to source locations. See #17552. - unsafe { - let key = "srcloc"; - let kind = llvm::LLVMGetMDKindIDInContext(bx.cx.llcx, - key.as_ptr() as *const c_char, key.len() as c_uint); + // Store mark in a metadata node so we can map LLVM errors + // back to source locations. See #17552. + unsafe { + let key = "srcloc"; + let kind = llvm::LLVMGetMDKindIDInContext(self.cx().llcx, + key.as_ptr() as *const c_char, key.len() as c_uint); - let val: &'ll Value = C_i32(bx.cx, ia.ctxt.outer().as_u32() as i32); + let val: &'ll Value = self.cx().const_i32(ia.ctxt.outer().as_u32() as i32); - llvm::LLVMSetMetadata(r, kind, - llvm::LLVMMDNodeInContext(bx.cx.llcx, &val, 1)); - } + llvm::LLVMSetMetadata(r, kind, + llvm::LLVMMDNodeInContext(self.cx().llcx, &val, 1)); + } return true; + } } -pub fn codegen_global_asm<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, - ga: &hir::GlobalAsm) { - let asm = CString::new(ga.asm.as_str().as_bytes()).unwrap(); - unsafe { - llvm::LLVMRustAppendModuleInlineAsm(cx.llmod, asm.as_ptr()); +impl AsmMethods for CodegenCx<'ll, 'tcx, &'ll Value> { + fn codegen_global_asm(&self, ga: &hir::GlobalAsm) { + let asm = CString::new(ga.asm.as_str().as_bytes()).unwrap(); + unsafe { + llvm::LLVMRustAppendModuleInlineAsm(self.llmod, asm.as_ptr()); + } } } diff --git a/src/librustc_codegen_llvm/attributes.rs b/src/librustc_codegen_llvm/attributes.rs index f45b3728bc1b0..c9a1436f97fe8 100644 --- a/src/librustc_codegen_llvm/attributes.rs +++ b/src/librustc_codegen_llvm/attributes.rs @@ -16,11 +16,11 @@ use rustc::hir::def_id::{DefId, LOCAL_CRATE}; use rustc::session::Session; use rustc::session::config::Sanitizer; use rustc::ty::TyCtxt; -use rustc::ty::layout::HasTyCtxt; use rustc::ty::query::Providers; use rustc_data_structures::sync::Lrc; use rustc_data_structures::fx::FxHashMap; use rustc_target::spec::PanicStrategy; +use rustc_codegen_ssa::interfaces::*; use attributes; use llvm::{self, Attribute}; @@ -33,7 +33,7 @@ use value::Value; /// Mark LLVM function to use provided inline heuristic. #[inline] -pub fn inline(cx: &CodegenCx<'ll, '_>, val: &'ll Value, inline: InlineAttr) { +pub fn inline(cx: &CodegenCx<'ll, '_, &'ll Value>, val: &'ll Value, inline: InlineAttr) { use self::InlineAttr::*; match inline { Hint => Attribute::InlineHint.apply_llfn(Function, val), @@ -76,7 +76,7 @@ pub fn naked(val: &'ll Value, is_naked: bool) { Attribute::Naked.toggle_llfn(Function, val, is_naked); } -pub fn set_frame_pointer_elimination(cx: &CodegenCx<'ll, '_>, llfn: &'ll Value) { +pub fn set_frame_pointer_elimination(cx: &CodegenCx<'ll, '_, &'ll Value>, llfn: &'ll Value) { if cx.sess().must_not_eliminate_frame_pointers() { llvm::AddFunctionAttrStringValue( llfn, llvm::AttributePlace::Function, @@ -84,7 +84,7 @@ pub fn set_frame_pointer_elimination(cx: &CodegenCx<'ll, '_>, llfn: &'ll Value) } } -pub fn set_probestack(cx: &CodegenCx<'ll, '_>, llfn: &'ll Value) { +pub fn set_probestack(cx: &CodegenCx<'ll, '_, &'ll Value>, llfn: &'ll Value) { // Only use stack probes if the target specification indicates that we // should be using stack probes if !cx.sess().target.target.options.stack_probes { @@ -127,7 +127,7 @@ pub fn llvm_target_features(sess: &Session) -> impl Iterator { .filter(|l| !l.is_empty()) } -pub fn apply_target_cpu_attr(cx: &CodegenCx<'ll, '_>, llfn: &'ll Value) { +pub fn apply_target_cpu_attr(cx: &CodegenCx<'ll, '_, &'ll Value>, llfn: &'ll Value) { let cpu = llvm_util::target_cpu(cx.tcx.sess); let target_cpu = CString::new(cpu).unwrap(); llvm::AddFunctionAttrStringValue( @@ -149,7 +149,7 @@ pub fn non_lazy_bind(sess: &Session, llfn: &'ll Value) { /// Composite function which sets LLVM attributes for function depending on its AST (#[attribute]) /// attributes. pub fn from_fn_attrs( - cx: &CodegenCx<'ll, '_>, + cx: &CodegenCx<'ll, '_, &'ll Value>, llfn: &'ll Value, id: Option, ) { diff --git a/src/librustc_codegen_llvm/back/archive.rs b/src/librustc_codegen_llvm/back/archive.rs index af9efc6d7c417..c51f683b6d8ae 100644 --- a/src/librustc_codegen_llvm/back/archive.rs +++ b/src/librustc_codegen_llvm/back/archive.rs @@ -18,6 +18,7 @@ use std::ptr; use std::str; use back::bytecode::RLIB_BYTECODE_EXTENSION; +use rustc_codegen_ssa::back::archive::find_library; use libc; use llvm::archive_ro::{ArchiveRO, Child}; use llvm::{self, ArchiveKind}; @@ -52,29 +53,6 @@ enum Addition { }, } -pub fn find_library(name: &str, search_paths: &[PathBuf], sess: &Session) - -> PathBuf { - // On Windows, static libraries sometimes show up as libfoo.a and other - // times show up as foo.lib - let oslibname = format!("{}{}{}", - sess.target.target.options.staticlib_prefix, - name, - sess.target.target.options.staticlib_suffix); - let unixlibname = format!("lib{}.a", name); - - for path in search_paths { - debug!("looking for {} inside {:?}", name, path); - let test = path.join(&oslibname); - if test.exists() { return test } - if oslibname != unixlibname { - let test = path.join(&unixlibname); - if test.exists() { return test } - } - } - sess.fatal(&format!("could not find native static library `{}`, \ - perhaps an -L flag is missing?", name)); -} - fn is_relevant_child(c: &Child) -> bool { match c.name() { Some(name) => !name.contains("SYMDEF"), diff --git a/src/librustc_codegen_llvm/back/link.rs b/src/librustc_codegen_llvm/back/link.rs index 86c6a5e65b0e9..6b92eb025bf50 100644 --- a/src/librustc_codegen_llvm/back/link.rs +++ b/src/librustc_codegen_llvm/back/link.rs @@ -9,11 +9,12 @@ // except according to those terms. use back::wasm; -use cc::windows_registry; use super::archive::{ArchiveBuilder, ArchiveConfig}; use super::bytecode::RLIB_BYTECODE_EXTENSION; -use super::linker::Linker; -use super::command::Command; +use rustc_codegen_ssa::back::linker::Linker; +use rustc_codegen_ssa::back::link::{remove, ignored_for_lto, each_linked_rlib, linker_and_flavor, + get_linker}; +use rustc_codegen_ssa::back::command::Command; use super::rpath::RPathConfig; use super::rpath; use metadata::METADATA_FILENAME; @@ -22,9 +23,9 @@ use rustc::session::config::{RUST_CGU_EXT, Lto}; use rustc::session::filesearch; use rustc::session::search_paths::PathKind; use rustc::session::Session; -use rustc::middle::cstore::{NativeLibrary, LibSource, NativeLibraryKind}; +use rustc::middle::cstore::{NativeLibrary, NativeLibraryKind}; use rustc::middle::dependency_format::Linkage; -use {CodegenResults, CrateInfo}; +use rustc_codegen_ssa::CodegenResults; use rustc::util::common::time; use rustc_fs_util::fix_windows_verbatim_for_gcc; use rustc::hir::def_id::CrateNum; @@ -33,6 +34,7 @@ use rustc_target::spec::{PanicStrategy, RelroLevel, LinkerFlavor}; use rustc_data_structures::fx::FxHashSet; use context::get_reloc_model; use llvm; +use LlvmCodegenBackend; use std::ascii; use std::char; @@ -50,77 +52,11 @@ pub use rustc_codegen_utils::link::{find_crate_name, filename_for_input, default invalid_output_for_target, out_filename, check_file_is_writeable, filename_for_metadata}; -// The third parameter is for env vars, used on windows to set up the -// path for MSVC to find its DLLs, and gcc to find its bundled -// toolchain -pub fn get_linker(sess: &Session, linker: &Path, flavor: LinkerFlavor) -> (PathBuf, Command) { - let msvc_tool = windows_registry::find_tool(&sess.opts.target_triple.triple(), "link.exe"); - - // If our linker looks like a batch script on Windows then to execute this - // we'll need to spawn `cmd` explicitly. This is primarily done to handle - // emscripten where the linker is `emcc.bat` and needs to be spawned as - // `cmd /c emcc.bat ...`. - // - // This worked historically but is needed manually since #42436 (regression - // was tagged as #42791) and some more info can be found on #44443 for - // emscripten itself. - let mut cmd = match linker.to_str() { - Some(linker) if cfg!(windows) && linker.ends_with(".bat") => Command::bat_script(linker), - _ => match flavor { - LinkerFlavor::Lld(f) => Command::lld(linker, f), - LinkerFlavor::Msvc - if sess.opts.cg.linker.is_none() && sess.target.target.options.linker.is_none() => - { - Command::new(msvc_tool.as_ref().map(|t| t.path()).unwrap_or(linker)) - }, - _ => Command::new(linker), - } - }; - - // The compiler's sysroot often has some bundled tools, so add it to the - // PATH for the child. - let mut new_path = sess.host_filesearch(PathKind::All) - .get_tools_search_paths(); - let mut msvc_changed_path = false; - if sess.target.target.options.is_like_msvc { - if let Some(ref tool) = msvc_tool { - cmd.args(tool.args()); - for &(ref k, ref v) in tool.env() { - if k == "PATH" { - new_path.extend(env::split_paths(v)); - msvc_changed_path = true; - } else { - cmd.env(k, v); - } - } - } - } - - if !msvc_changed_path { - if let Some(path) = env::var_os("PATH") { - new_path.extend(env::split_paths(&path)); - } - } - cmd.env("PATH", env::join_paths(new_path).unwrap()); - - (linker.to_path_buf(), cmd) -} - -pub fn remove(sess: &Session, path: &Path) { - match fs::remove_file(path) { - Ok(..) => {} - Err(e) => { - sess.err(&format!("failed to remove {}: {}", - path.display(), - e)); - } - } -} /// Perform the linkage portion of the compilation phase. This will generate all /// of the requested outputs for this compilation session. pub(crate) fn link_binary(sess: &Session, - codegen_results: &CodegenResults, + codegen_results: &CodegenResults, outputs: &OutputFilenames, crate_name: &str) -> Vec { let mut out_filenames = Vec::new(); @@ -219,62 +155,8 @@ fn preserve_objects_for_their_debuginfo(sess: &Session) -> bool { false } -pub(crate) fn each_linked_rlib(sess: &Session, - info: &CrateInfo, - f: &mut dyn FnMut(CrateNum, &Path)) -> Result<(), String> { - let crates = info.used_crates_static.iter(); - let fmts = sess.dependency_formats.borrow(); - let fmts = fmts.get(&config::CrateType::Executable) - .or_else(|| fmts.get(&config::CrateType::Staticlib)) - .or_else(|| fmts.get(&config::CrateType::Cdylib)) - .or_else(|| fmts.get(&config::CrateType::ProcMacro)); - let fmts = match fmts { - Some(f) => f, - None => return Err("could not find formats for rlibs".to_string()) - }; - for &(cnum, ref path) in crates { - match fmts.get(cnum.as_usize() - 1) { - Some(&Linkage::NotLinked) | - Some(&Linkage::IncludedFromDylib) => continue, - Some(_) => {} - None => return Err("could not find formats for rlibs".to_string()) - } - let name = &info.crate_name[&cnum]; - let path = match *path { - LibSource::Some(ref p) => p, - LibSource::MetadataOnly => { - return Err(format!("could not find rlib for: `{}`, found rmeta (metadata) file", - name)) - } - LibSource::None => { - return Err(format!("could not find rlib for: `{}`", name)) - } - }; - f(cnum, &path); - } - Ok(()) -} - -/// Returns a boolean indicating whether the specified crate should be ignored -/// during LTO. -/// -/// Crates ignored during LTO are not lumped together in the "massive object -/// file" that we create and are linked in their normal rlib states. See -/// comments below for what crates do not participate in LTO. -/// -/// It's unusual for a crate to not participate in LTO. Typically only -/// compiler-specific and unstable crates have a reason to not participate in -/// LTO. -pub(crate) fn ignored_for_lto(sess: &Session, info: &CrateInfo, cnum: CrateNum) -> bool { - // If our target enables builtin function lowering in LLVM then the - // crates providing these functions don't participate in LTO (e.g. - // no_builtins or compiler builtins crates). - !sess.target.target.options.no_builtins && - (info.is_no_builtins.contains(&cnum) || info.compiler_builtins == Some(cnum)) -} - fn link_binary_output(sess: &Session, - codegen_results: &CodegenResults, + codegen_results: &CodegenResults, crate_type: config::CrateType, outputs: &OutputFilenames, crate_name: &str) -> Vec { @@ -361,8 +243,11 @@ fn archive_config<'a>(sess: &'a Session, /// building an `.rlib` (stomping over one another), or writing an `.rmeta` into a /// directory being searched for `extern crate` (observing an incomplete file). /// The returned path is the temporary file containing the complete metadata. -fn emit_metadata<'a>(sess: &'a Session, codegen_results: &CodegenResults, tmpdir: &TempDir) - -> PathBuf { +fn emit_metadata<'a>( + sess: &'a Session, + codegen_results: &CodegenResults, + tmpdir: &TempDir +) -> PathBuf { let out_filename = tmpdir.path().join(METADATA_FILENAME); let result = fs::write(&out_filename, &codegen_results.metadata.raw_data); @@ -385,7 +270,7 @@ enum RlibFlavor { // all of the object files from native libraries. This is done by unzipping // native libraries and inserting all of the contents into this archive. fn link_rlib<'a>(sess: &'a Session, - codegen_results: &CodegenResults, + codegen_results: &CodegenResults, flavor: RlibFlavor, out_filename: &Path, tmpdir: &TempDir) -> ArchiveBuilder<'a> { @@ -499,7 +384,7 @@ fn link_rlib<'a>(sess: &'a Session, // link in the metadata object file (and also don't prepare the archive with a // metadata file). fn link_staticlib(sess: &Session, - codegen_results: &CodegenResults, + codegen_results: &CodegenResults, out_filename: &Path, tempdir: &TempDir) { let mut ab = link_rlib(sess, @@ -584,69 +469,6 @@ fn print_native_static_libs(sess: &Session, all_native_libs: &[NativeLibrary]) { } } -pub fn linker_and_flavor(sess: &Session) -> (PathBuf, LinkerFlavor) { - fn infer_from( - sess: &Session, - linker: Option, - flavor: Option, - ) -> Option<(PathBuf, LinkerFlavor)> { - match (linker, flavor) { - (Some(linker), Some(flavor)) => Some((linker, flavor)), - // only the linker flavor is known; use the default linker for the selected flavor - (None, Some(flavor)) => Some((PathBuf::from(match flavor { - LinkerFlavor::Em => if cfg!(windows) { "emcc.bat" } else { "emcc" }, - LinkerFlavor::Gcc => "cc", - LinkerFlavor::Ld => "ld", - LinkerFlavor::Msvc => "link.exe", - LinkerFlavor::Lld(_) => "lld", - }), flavor)), - (Some(linker), None) => { - let stem = linker.file_stem().and_then(|stem| stem.to_str()).unwrap_or_else(|| { - sess.fatal("couldn't extract file stem from specified linker"); - }).to_owned(); - - let flavor = if stem == "emcc" { - LinkerFlavor::Em - } else if stem == "gcc" || stem.ends_with("-gcc") { - LinkerFlavor::Gcc - } else if stem == "ld" || stem == "ld.lld" || stem.ends_with("-ld") { - LinkerFlavor::Ld - } else if stem == "link" || stem == "lld-link" { - LinkerFlavor::Msvc - } else if stem == "lld" || stem == "rust-lld" { - LinkerFlavor::Lld(sess.target.target.options.lld_flavor) - } else { - // fall back to the value in the target spec - sess.target.target.linker_flavor - }; - - Some((linker, flavor)) - }, - (None, None) => None, - } - } - - // linker and linker flavor specified via command line have precedence over what the target - // specification specifies - if let Some(ret) = infer_from( - sess, - sess.opts.cg.linker.clone(), - sess.opts.debugging_opts.linker_flavor, - ) { - return ret; - } - - if let Some(ret) = infer_from( - sess, - sess.target.target.options.linker.clone().map(PathBuf::from), - Some(sess.target.target.linker_flavor), - ) { - return ret; - } - - bug!("Not enough information provided to determine how to invoke the linker"); -} - // Create a dynamic library or executable // // This will invoke the system linker/cc to create the resulting file. This @@ -654,7 +476,7 @@ pub fn linker_and_flavor(sess: &Session) -> (PathBuf, LinkerFlavor) { fn link_natively(sess: &Session, crate_type: config::CrateType, out_filename: &Path, - codegen_results: &CodegenResults, + codegen_results: &CodegenResults, tmpdir: &Path) { info!("preparing {:?} to {:?}", crate_type, out_filename); let (linker, flavor) = linker_and_flavor(sess); @@ -1028,7 +850,7 @@ fn link_args(cmd: &mut dyn Linker, crate_type: config::CrateType, tmpdir: &Path, out_filename: &Path, - codegen_results: &CodegenResults) { + codegen_results: &CodegenResults) { // Linker plugins should be specified early in the list of arguments cmd.cross_lang_lto(); @@ -1240,7 +1062,7 @@ fn link_args(cmd: &mut dyn Linker, // may have their native library pulled in above. fn add_local_native_libraries(cmd: &mut dyn Linker, sess: &Session, - codegen_results: &CodegenResults) { + codegen_results: &CodegenResults) { sess.target_filesearch(PathKind::All).for_each_lib_search_path(|path, k| { match k { PathKind::Framework => { cmd.framework_path(path); } @@ -1275,7 +1097,7 @@ fn add_local_native_libraries(cmd: &mut dyn Linker, // the intermediate rlib version) fn add_upstream_rust_crates(cmd: &mut dyn Linker, sess: &Session, - codegen_results: &CodegenResults, + codegen_results: &CodegenResults, crate_type: config::CrateType, tmpdir: &Path) { // All of the heavy lifting has previously been accomplished by the @@ -1399,7 +1221,7 @@ fn add_upstream_rust_crates(cmd: &mut dyn Linker, // linking it. fn link_sanitizer_runtime(cmd: &mut dyn Linker, sess: &Session, - codegen_results: &CodegenResults, + codegen_results: &CodegenResults, tmpdir: &Path, cnum: CrateNum) { let src = &codegen_results.crate_info.used_crate_source[&cnum]; @@ -1468,7 +1290,7 @@ fn add_upstream_rust_crates(cmd: &mut dyn Linker, // we're at the end of the dependency chain. fn add_static_crate(cmd: &mut dyn Linker, sess: &Session, - codegen_results: &CodegenResults, + codegen_results: &CodegenResults, tmpdir: &Path, crate_type: config::CrateType, cnum: CrateNum) { @@ -1608,7 +1430,7 @@ fn add_upstream_rust_crates(cmd: &mut dyn Linker, // also be resolved in the target crate. fn add_upstream_native_libraries(cmd: &mut dyn Linker, sess: &Session, - codegen_results: &CodegenResults, + codegen_results: &CodegenResults, crate_type: config::CrateType) { // Be sure to use a topological sorting of crates because there may be // interdependencies between native libraries. When passing -nodefaultlibs, diff --git a/src/librustc_codegen_llvm/back/lto.rs b/src/librustc_codegen_llvm/back/lto.rs index 61856236a1491..80b37047642be 100644 --- a/src/librustc_codegen_llvm/back/lto.rs +++ b/src/librustc_codegen_llvm/back/lto.rs @@ -9,14 +9,15 @@ // except according to those terms. use back::bytecode::{DecodedBytecode, RLIB_BYTECODE_EXTENSION}; -use back::symbol_export; -use back::write::{ModuleConfig, with_llvm_pmb, CodegenContext}; -use back::write::{self, DiagnosticHandlers, pre_lto_bitcode_filename}; +use rustc_codegen_ssa::back::symbol_export; +use rustc_codegen_ssa::back::write::{ModuleConfig, CodegenContext, pre_lto_bitcode_filename}; +use rustc_codegen_ssa::back::lto::{SerializedModule, LtoModuleCodegen, ThinShared, ThinModule}; +use rustc_codegen_ssa::interfaces::*; +use back::write::{self, with_llvm_pmb, save_temp_bitcode, get_llvm_opt_level, + new_diagnostic_handlers}; use errors::{FatalError, Handler}; use llvm::archive_ro::ArchiveRO; -use llvm::{True, False}; -use llvm; -use memmap; +use llvm::{self, True, False}; use rustc::dep_graph::WorkProduct; use rustc::dep_graph::cgu_reuse_tracker::CguReuse; use rustc::hir::def_id::LOCAL_CRATE; @@ -25,7 +26,8 @@ use rustc::session::config::{self, Lto}; use rustc::util::common::time_ext; use rustc_data_structures::fx::FxHashMap; use time_graph::Timeline; -use {ModuleCodegen, ModuleLlvm, ModuleKind}; +use {ModuleLlvm, LlvmCodegenBackend}; +use rustc_codegen_ssa::{ModuleCodegen, ModuleKind}; use libc; @@ -47,71 +49,16 @@ pub fn crate_type_allows_lto(crate_type: config::CrateType) -> bool { } } -pub(crate) enum LtoModuleCodegen { - Fat { - module: Option, - _serialized_bitcode: Vec, - }, - - Thin(ThinModule), -} - -impl LtoModuleCodegen { - pub fn name(&self) -> &str { - match *self { - LtoModuleCodegen::Fat { .. } => "everything", - LtoModuleCodegen::Thin(ref m) => m.name(), - } - } - - /// Optimize this module within the given codegen context. - /// - /// This function is unsafe as it'll return a `ModuleCodegen` still - /// points to LLVM data structures owned by this `LtoModuleCodegen`. - /// It's intended that the module returned is immediately code generated and - /// dropped, and then this LTO module is dropped. - pub(crate) unsafe fn optimize(&mut self, - cgcx: &CodegenContext, - timeline: &mut Timeline) - -> Result - { - match *self { - LtoModuleCodegen::Fat { ref mut module, .. } => { - let module = module.take().unwrap(); - { - let config = cgcx.config(module.kind); - let llmod = module.module_llvm.llmod(); - let tm = &*module.module_llvm.tm; - run_pass_manager(cgcx, tm, llmod, config, false); - timeline.record("fat-done"); - } - Ok(module) - } - LtoModuleCodegen::Thin(ref mut thin) => thin.optimize(cgcx, timeline), - } - } - - /// A "gauge" of how costly it is to optimize this module, used to sort - /// biggest modules first. - pub fn cost(&self) -> u64 { - match *self { - // Only one module with fat LTO, so the cost doesn't matter. - LtoModuleCodegen::Fat { .. } => 0, - LtoModuleCodegen::Thin(ref m) => m.cost(), - } - } -} - /// Performs LTO, which in the case of full LTO means merging all modules into /// a single one and returning it for further optimizing. For ThinLTO, it will /// do the global analysis necessary and return two lists, one of the modules /// the need optimization and another for modules that can simply be copied over /// from the incr. comp. cache. -pub(crate) fn run(cgcx: &CodegenContext, - modules: Vec, - cached_modules: Vec<(SerializedModule, WorkProduct)>, +pub(crate) fn run(cgcx: &CodegenContext, + modules: Vec>, + cached_modules: Vec<(SerializedModule, WorkProduct)>, timeline: &mut Timeline) - -> Result<(Vec, Vec), FatalError> + -> Result<(Vec>, Vec), FatalError> { let diag_handler = cgcx.create_diag_handler(); let export_threshold = match cgcx.lto { @@ -230,13 +177,13 @@ pub(crate) fn run(cgcx: &CodegenContext, } } -fn fat_lto(cgcx: &CodegenContext, +fn fat_lto(cgcx: &CodegenContext, diag_handler: &Handler, - mut modules: Vec, - mut serialized_modules: Vec<(SerializedModule, CString)>, + mut modules: Vec>, + mut serialized_modules: Vec<(SerializedModule, CString)>, symbol_white_list: &[*const libc::c_char], timeline: &mut Timeline) - -> Result, FatalError> + -> Result>, FatalError> { info!("going for a fat lto"); @@ -272,7 +219,7 @@ fn fat_lto(cgcx: &CodegenContext, // The linking steps below may produce errors and diagnostics within LLVM // which we'd like to handle and print, so set up our diagnostic handlers // (which get unregistered when they go out of scope below). - let _handler = DiagnosticHandlers::new(cgcx, diag_handler, llcx); + let _handler = new_diagnostic_handlers(cgcx, diag_handler, llcx); // For all other modules we codegened we'll need to link them into our own // bitcode. All modules were codegened in their own LLVM context, however, @@ -303,7 +250,7 @@ fn fat_lto(cgcx: &CodegenContext, serialized_bitcode.push(bc_decoded); } drop(linker); - cgcx.save_temp_bitcode(&module, "lto.input"); + save_temp_bitcode(&cgcx, &module, "lto.input"); // Internalize everything that *isn't* in our whitelist to help strip out // more modules and such @@ -312,14 +259,14 @@ fn fat_lto(cgcx: &CodegenContext, llvm::LLVMRustRunRestrictionPass(llmod, ptr as *const *const libc::c_char, symbol_white_list.len() as libc::size_t); - cgcx.save_temp_bitcode(&module, "lto.after-restriction"); + save_temp_bitcode(&cgcx, &module, "lto.after-restriction"); } if cgcx.no_landing_pads { unsafe { llvm::LLVMRustMarkAllFunctionsNounwind(llmod); } - cgcx.save_temp_bitcode(&module, "lto.after-nounwind"); + save_temp_bitcode(&cgcx, &module, "lto.after-nounwind"); } timeline.record("passes"); } @@ -386,14 +333,14 @@ impl Drop for Linker<'a> { /// calculating the *index* for ThinLTO. This index will then be shared amongst /// all of the `LtoModuleCodegen` units returned below and destroyed once /// they all go out of scope. -fn thin_lto(cgcx: &CodegenContext, +fn thin_lto(cgcx: &CodegenContext, diag_handler: &Handler, - modules: Vec, - serialized_modules: Vec<(SerializedModule, CString)>, - cached_modules: Vec<(SerializedModule, WorkProduct)>, + modules: Vec>, + serialized_modules: Vec<(SerializedModule, CString)>, + cached_modules: Vec<(SerializedModule, WorkProduct)>, symbol_white_list: &[*const libc::c_char], timeline: &mut Timeline) - -> Result<(Vec, Vec), FatalError> + -> Result<(Vec>, Vec), FatalError> { unsafe { info!("going for that thin, thin LTO"); @@ -556,9 +503,8 @@ fn thin_lto(cgcx: &CodegenContext, } } -fn run_pass_manager(cgcx: &CodegenContext, - tm: &llvm::TargetMachine, - llmod: &llvm::Module, +pub(crate) fn run_pass_manager(cgcx: &CodegenContext, + module: &ModuleCodegen, config: &ModuleConfig, thin: bool) { // Now we have one massive module inside of llmod. Time to run the @@ -569,7 +515,7 @@ fn run_pass_manager(cgcx: &CodegenContext, debug!("running the pass manager"); unsafe { let pm = llvm::LLVMCreatePassManager(); - llvm::LLVMRustAddAnalysisPasses(tm, pm, llmod); + llvm::LLVMRustAddAnalysisPasses(module.module_llvm.tm, pm, module.module_llvm.llmod()); if config.verify_llvm_ir { let pass = llvm::LLVMRustFindAndCreatePass("verify\0".as_ptr() as *const _); @@ -588,12 +534,13 @@ fn run_pass_manager(cgcx: &CodegenContext, // Note that in general this shouldn't matter too much as you typically // only turn on ThinLTO when you're compiling with optimizations // otherwise. - let opt_level = config.opt_level.unwrap_or(llvm::CodeGenOptLevel::None); + let opt_level = config.opt_level.map(get_llvm_opt_level) + .unwrap_or(llvm::CodeGenOptLevel::None); let opt_level = match opt_level { llvm::CodeGenOptLevel::None => llvm::CodeGenOptLevel::Less, level => level, }; - with_llvm_pmb(llmod, config, opt_level, false, &mut |b| { + with_llvm_pmb(module.module_llvm.llmod(), config, opt_level, false, &mut |b| { if thin { if !llvm::LLVMRustPassManagerBuilderPopulateThinLTOPassManager(b, pm) { panic!("this version of LLVM does not support ThinLTO"); @@ -611,29 +558,13 @@ fn run_pass_manager(cgcx: &CodegenContext, } time_ext(cgcx.time_passes, None, "LTO passes", || - llvm::LLVMRunPassManager(pm, llmod)); + llvm::LLVMRunPassManager(pm, module.module_llvm.llmod())); llvm::LLVMDisposePassManager(pm); } debug!("lto done"); } -pub enum SerializedModule { - Local(ModuleBuffer), - FromRlib(Vec), - FromUncompressedFile(memmap::Mmap), -} - -impl SerializedModule { - fn data(&self) -> &[u8] { - match *self { - SerializedModule::Local(ref m) => m.data(), - SerializedModule::FromRlib(ref m) => m, - SerializedModule::FromUncompressedFile(ref m) => m, - } - } -} - pub struct ModuleBuffer(&'static mut llvm::ModuleBuffer); unsafe impl Send for ModuleBuffer {} @@ -645,8 +576,10 @@ impl ModuleBuffer { llvm::LLVMRustModuleBufferCreate(m) }) } +} - pub fn data(&self) -> &[u8] { +impl ModuleBufferMethods for ModuleBuffer { + fn data(&self) -> &[u8] { unsafe { let ptr = llvm::LLVMRustModuleBufferPtr(self.0); let len = llvm::LLVMRustModuleBufferLen(self.0); @@ -661,19 +594,7 @@ impl Drop for ModuleBuffer { } } -pub struct ThinModule { - shared: Arc, - idx: usize, -} - -struct ThinShared { - data: ThinData, - thin_buffers: Vec, - serialized_modules: Vec, - module_names: Vec, -} - -struct ThinData(&'static mut llvm::ThinLTOData); +pub struct ThinData(&'static mut llvm::ThinLTOData); unsafe impl Send for ThinData {} unsafe impl Sync for ThinData {} @@ -698,8 +619,10 @@ impl ThinBuffer { ThinBuffer(buffer) } } +} - pub fn data(&self) -> &[u8] { +impl ThinBufferMethods for ThinBuffer { + fn data(&self) -> &[u8] { unsafe { let ptr = llvm::LLVMRustThinLTOBufferPtr(self.0) as *const _; let len = llvm::LLVMRustThinLTOBufferLen(self.0); @@ -716,161 +639,142 @@ impl Drop for ThinBuffer { } } -impl ThinModule { - fn name(&self) -> &str { - self.shared.module_names[self.idx].to_str().unwrap() - } - - fn cost(&self) -> u64 { - // Yes, that's correct, we're using the size of the bytecode as an - // indicator for how costly this codegen unit is. - self.data().len() as u64 - } - - fn data(&self) -> &[u8] { - let a = self.shared.thin_buffers.get(self.idx).map(|b| b.data()); - a.unwrap_or_else(|| { - let len = self.shared.thin_buffers.len(); - self.shared.serialized_modules[self.idx - len].data() - }) - } - - unsafe fn optimize(&mut self, cgcx: &CodegenContext, timeline: &mut Timeline) - -> Result - { - let diag_handler = cgcx.create_diag_handler(); - let tm = (cgcx.tm_factory)().map_err(|e| { - write::llvm_err(&diag_handler, e) - })?; - - // Right now the implementation we've got only works over serialized - // modules, so we create a fresh new LLVM context and parse the module - // into that context. One day, however, we may do this for upstream - // crates but for locally codegened modules we may be able to reuse - // that LLVM Context and Module. - let llcx = llvm::LLVMRustContextCreate(cgcx.fewer_names); - let llmod_raw = llvm::LLVMRustParseBitcodeForThinLTO( +pub unsafe fn optimize_thin_module( + thin_module: &mut ThinModule, + cgcx: &CodegenContext, + timeline: &mut Timeline +) -> Result, FatalError> { + let diag_handler = cgcx.create_diag_handler(); + let tm = (cgcx.tm_factory)().map_err(|e| { + write::llvm_err(&diag_handler, e) + })?; + + // Right now the implementation we've got only works over serialized + // modules, so we create a fresh new LLVM context and parse the module + // into that context. One day, however, we may do this for upstream + // crates but for locally codegened modules we may be able to reuse + // that LLVM Context and Module. + let llcx = llvm::LLVMRustContextCreate(cgcx.fewer_names); + let llmod_raw = llvm::LLVMRustParseBitcodeForThinLTO( + llcx, + thin_module.data().as_ptr(), + thin_module.data().len(), + thin_module.shared.module_names[thin_module.idx].as_ptr(), + ).ok_or_else(|| { + let msg = "failed to parse bitcode for thin LTO module".to_string(); + write::llvm_err(&diag_handler, msg) + })? as *const _; + let module = ModuleCodegen { + module_llvm: ModuleLlvm { + llmod_raw, llcx, - self.data().as_ptr(), - self.data().len(), - self.shared.module_names[self.idx].as_ptr(), - ).ok_or_else(|| { - let msg = "failed to parse bitcode for thin LTO module".to_string(); - write::llvm_err(&diag_handler, msg) - })? as *const _; - let module = ModuleCodegen { - module_llvm: ModuleLlvm { - llmod_raw, - llcx, - tm, - }, - name: self.name().to_string(), - kind: ModuleKind::Regular, - }; - { - let llmod = module.module_llvm.llmod(); - cgcx.save_temp_bitcode(&module, "thin-lto-input"); - - // Before we do much else find the "main" `DICompileUnit` that we'll be - // using below. If we find more than one though then rustc has changed - // in a way we're not ready for, so generate an ICE by returning - // an error. - let mut cu1 = ptr::null_mut(); - let mut cu2 = ptr::null_mut(); - llvm::LLVMRustThinLTOGetDICompileUnit(llmod, &mut cu1, &mut cu2); - if !cu2.is_null() { - let msg = "multiple source DICompileUnits found".to_string(); - return Err(write::llvm_err(&diag_handler, msg)) - } + tm, + }, + name: thin_module.name().to_string(), + kind: ModuleKind::Regular, + }; + { + let llmod = module.module_llvm.llmod(); + save_temp_bitcode(&cgcx, &module, "thin-lto-input"); + + // Before we do much else find the "main" `DICompileUnit` that we'll be + // using below. If we find more than one though then rustc has changed + // in a way we're not ready for, so generate an ICE by returning + // an error. + let mut cu1 = ptr::null_mut(); + let mut cu2 = ptr::null_mut(); + llvm::LLVMRustThinLTOGetDICompileUnit(llmod, &mut cu1, &mut cu2); + if !cu2.is_null() { + let msg = "multiple source DICompileUnits found".to_string(); + return Err(write::llvm_err(&diag_handler, msg)) + } - // Like with "fat" LTO, get some better optimizations if landing pads - // are disabled by removing all landing pads. - if cgcx.no_landing_pads { - llvm::LLVMRustMarkAllFunctionsNounwind(llmod); - cgcx.save_temp_bitcode(&module, "thin-lto-after-nounwind"); - timeline.record("nounwind"); - } + // Like with "fat" LTO, get some better optimizations if landing pads + // are disabled by removing all landing pads. + if cgcx.no_landing_pads { + llvm::LLVMRustMarkAllFunctionsNounwind(llmod); + save_temp_bitcode(&cgcx, &module, "thin-lto-after-nounwind"); + timeline.record("nounwind"); + } - // Up next comes the per-module local analyses that we do for Thin LTO. - // Each of these functions is basically copied from the LLVM - // implementation and then tailored to suit this implementation. Ideally - // each of these would be supported by upstream LLVM but that's perhaps - // a patch for another day! - // - // You can find some more comments about these functions in the LLVM - // bindings we've got (currently `PassWrapper.cpp`) - if !llvm::LLVMRustPrepareThinLTORename(self.shared.data.0, llmod) { - let msg = "failed to prepare thin LTO module".to_string(); - return Err(write::llvm_err(&diag_handler, msg)) - } - cgcx.save_temp_bitcode(&module, "thin-lto-after-rename"); - timeline.record("rename"); - if !llvm::LLVMRustPrepareThinLTOResolveWeak(self.shared.data.0, llmod) { - let msg = "failed to prepare thin LTO module".to_string(); - return Err(write::llvm_err(&diag_handler, msg)) - } - cgcx.save_temp_bitcode(&module, "thin-lto-after-resolve"); - timeline.record("resolve"); - if !llvm::LLVMRustPrepareThinLTOInternalize(self.shared.data.0, llmod) { - let msg = "failed to prepare thin LTO module".to_string(); - return Err(write::llvm_err(&diag_handler, msg)) - } - cgcx.save_temp_bitcode(&module, "thin-lto-after-internalize"); - timeline.record("internalize"); - if !llvm::LLVMRustPrepareThinLTOImport(self.shared.data.0, llmod) { - let msg = "failed to prepare thin LTO module".to_string(); - return Err(write::llvm_err(&diag_handler, msg)) - } - cgcx.save_temp_bitcode(&module, "thin-lto-after-import"); - timeline.record("import"); - - // Ok now this is a bit unfortunate. This is also something you won't - // find upstream in LLVM's ThinLTO passes! This is a hack for now to - // work around bugs in LLVM. - // - // First discovered in #45511 it was found that as part of ThinLTO - // importing passes LLVM will import `DICompileUnit` metadata - // information across modules. This means that we'll be working with one - // LLVM module that has multiple `DICompileUnit` instances in it (a - // bunch of `llvm.dbg.cu` members). Unfortunately there's a number of - // bugs in LLVM's backend which generates invalid DWARF in a situation - // like this: - // - // https://bugs.llvm.org/show_bug.cgi?id=35212 - // https://bugs.llvm.org/show_bug.cgi?id=35562 - // - // While the first bug there is fixed the second ended up causing #46346 - // which was basically a resurgence of #45511 after LLVM's bug 35212 was - // fixed. - // - // This function below is a huge hack around this problem. The function - // below is defined in `PassWrapper.cpp` and will basically "merge" - // all `DICompileUnit` instances in a module. Basically it'll take all - // the objects, rewrite all pointers of `DISubprogram` to point to the - // first `DICompileUnit`, and then delete all the other units. - // - // This is probably mangling to the debug info slightly (but hopefully - // not too much) but for now at least gets LLVM to emit valid DWARF (or - // so it appears). Hopefully we can remove this once upstream bugs are - // fixed in LLVM. - llvm::LLVMRustThinLTOPatchDICompileUnit(llmod, cu1); - cgcx.save_temp_bitcode(&module, "thin-lto-after-patch"); - timeline.record("patch"); - - // Alright now that we've done everything related to the ThinLTO - // analysis it's time to run some optimizations! Here we use the same - // `run_pass_manager` as the "fat" LTO above except that we tell it to - // populate a thin-specific pass manager, which presumably LLVM treats a - // little differently. - info!("running thin lto passes over {}", module.name); - let config = cgcx.config(module.kind); - run_pass_manager(cgcx, module.module_llvm.tm, llmod, config, true); - cgcx.save_temp_bitcode(&module, "thin-lto-after-pm"); - timeline.record("thin-done"); + // Up next comes the per-module local analyses that we do for Thin LTO. + // Each of these functions is basically copied from the LLVM + // implementation and then tailored to suit this implementation. Ideally + // each of these would be supported by upstream LLVM but that's perhaps + // a patch for another day! + // + // You can find some more comments about these functions in the LLVM + // bindings we've got (currently `PassWrapper.cpp`) + if !llvm::LLVMRustPrepareThinLTORename(thin_module.shared.data.0, llmod) { + let msg = "failed to prepare thin LTO module".to_string(); + return Err(write::llvm_err(&diag_handler, msg)) } + save_temp_bitcode(cgcx, &module, "thin-lto-after-rename"); + timeline.record("rename"); + if !llvm::LLVMRustPrepareThinLTOResolveWeak(thin_module.shared.data.0, llmod) { + let msg = "failed to prepare thin LTO module".to_string(); + return Err(write::llvm_err(&diag_handler, msg)) + } + save_temp_bitcode(cgcx, &module, "thin-lto-after-resolve"); + timeline.record("resolve"); + if !llvm::LLVMRustPrepareThinLTOInternalize(thin_module.shared.data.0, llmod) { + let msg = "failed to prepare thin LTO module".to_string(); + return Err(write::llvm_err(&diag_handler, msg)) + } + save_temp_bitcode(cgcx, &module, "thin-lto-after-internalize"); + timeline.record("internalize"); + if !llvm::LLVMRustPrepareThinLTOImport(thin_module.shared.data.0, llmod) { + let msg = "failed to prepare thin LTO module".to_string(); + return Err(write::llvm_err(&diag_handler, msg)) + } + save_temp_bitcode(cgcx, &module, "thin-lto-after-import"); + timeline.record("import"); - Ok(module) + // Ok now this is a bit unfortunate. This is also something you won't + // find upstream in LLVM's ThinLTO passes! This is a hack for now to + // work around bugs in LLVM. + // + // First discovered in #45511 it was found that as part of ThinLTO + // importing passes LLVM will import `DICompileUnit` metadata + // information across modules. This means that we'll be working with one + // LLVM module that has multiple `DICompileUnit` instances in it (a + // bunch of `llvm.dbg.cu` members). Unfortunately there's a number of + // bugs in LLVM's backend which generates invalid DWARF in a situation + // like this: + // + // https://bugs.llvm.org/show_bug.cgi?id=35212 + // https://bugs.llvm.org/show_bug.cgi?id=35562 + // + // While the first bug there is fixed the second ended up causing #46346 + // which was basically a resurgence of #45511 after LLVM's bug 35212 was + // fixed. + // + // This function below is a huge hack around this problem. The function + // below is defined in `PassWrapper.cpp` and will basically "merge" + // all `DICompileUnit` instances in a module. Basically it'll take all + // the objects, rewrite all pointers of `DISubprogram` to point to the + // first `DICompileUnit`, and then delete all the other units. + // + // This is probably mangling to the debug info slightly (but hopefully + // not too much) but for now at least gets LLVM to emit valid DWARF (or + // so it appears). Hopefully we can remove this once upstream bugs are + // fixed in LLVM. + llvm::LLVMRustThinLTOPatchDICompileUnit(llmod, cu1); + save_temp_bitcode(cgcx, &module, "thin-lto-after-patch"); + timeline.record("patch"); + + // Alright now that we've done everything related to the ThinLTO + // analysis it's time to run some optimizations! Here we use the same + // `run_pass_manager` as the "fat" LTO above except that we tell it to + // populate a thin-specific pass manager, which presumably LLVM treats a + // little differently. + info!("running thin lto passes over {}", module.name); + let config = cgcx.config(module.kind); + run_pass_manager(cgcx, &module, config, true); + save_temp_bitcode(cgcx, &module, "thin-lto-after-pm"); + timeline.record("thin-done"); } + Ok(module) } #[derive(Debug, Default)] diff --git a/src/librustc_codegen_llvm/back/write.rs b/src/librustc_codegen_llvm/back/write.rs index 81619c219757b..d7415a82681ed 100644 --- a/src/librustc_codegen_llvm/back/write.rs +++ b/src/librustc_codegen_llvm/back/write.rs @@ -10,58 +10,36 @@ use attributes; use back::bytecode::{self, RLIB_BYTECODE_EXTENSION}; -use back::lto::{self, ModuleBuffer, ThinBuffer, SerializedModule}; -use back::link::{self, get_linker, remove}; -use back::command::Command; -use back::linker::LinkerInfo; -use back::symbol_export::ExportedSymbols; +use back::lto::{ModuleBuffer, ThinBuffer}; +use rustc_codegen_ssa::back::write::{CodegenContext, ModuleConfig, run_assembler, + DiagnosticHandlers}; +use rustc_codegen_ssa::interfaces::*; use base; use consts; -use memmap; -use rustc_incremental::{copy_cgu_workproducts_to_incr_comp_cache_dir, - in_incr_comp_dir, in_incr_comp_dir_sess}; -use rustc::dep_graph::{WorkProduct, WorkProductId, WorkProductFileKind}; -use rustc::dep_graph::cgu_reuse_tracker::CguReuseTracker; -use rustc::middle::cstore::EncodedMetadata; -use rustc::session::config::{self, OutputFilenames, OutputType, Passes, Sanitizer, Lto}; +use rustc::session::config::{self, OutputType, Passes, Lto}; use rustc::session::Session; -use rustc::util::nodemap::FxHashMap; -use time_graph::{self, TimeGraph, Timeline}; +use time_graph::Timeline; use llvm::{self, DiagnosticInfo, PassManager, SMDiagnostic}; use llvm_util; -use {CodegenResults, ModuleCodegen, CompiledModule, ModuleKind, // ModuleLlvm, - CachedModuleCodegen}; -use CrateInfo; -use rustc::hir::def_id::{CrateNum, LOCAL_CRATE}; -use rustc::ty::TyCtxt; -use rustc::util::common::{time_ext, time_depth, set_time_depth, print_time_passes_entry}; +use ModuleLlvm; +use rustc_codegen_ssa::{ModuleCodegen, CompiledModule}; +use rustc::util::common::time_ext; use rustc_fs_util::{path2cstr, link_or_copy}; use rustc_data_structures::small_c_str::SmallCStr; -use rustc_data_structures::svh::Svh; -use errors::{self, Handler, Level, DiagnosticBuilder, FatalError, DiagnosticId}; -use errors::emitter::{Emitter}; -use syntax::attr; -use syntax::ext::hygiene::Mark; -use syntax_pos::MultiSpan; -use syntax_pos::symbol::Symbol; +use errors::{self, Handler, FatalError}; use type_::Type; use context::{is_pie_binary, get_reloc_model}; -use common::{C_bytes_in_context, val_ty}; -use jobserver::{Client, Acquired}; +use common; +use LlvmCodegenBackend; use rustc_demangle; -use std::any::Any; use std::ffi::{CString, CStr}; use std::fs; use std::io::{self, Write}; -use std::mem; -use std::path::{Path, PathBuf}; +use std::path::Path; use std::str; use std::sync::Arc; -use std::sync::mpsc::{channel, Sender, Receiver}; use std::slice; -use std::time::Instant; -use std::thread; use libc::{c_uint, c_void, c_char, size_t}; pub const RELOC_MODEL_ARGS : [(&'static str, llvm::RelocMode); 7] = [ @@ -88,8 +66,6 @@ pub const TLS_MODEL_ARGS : [(&'static str, llvm::ThreadLocalMode); 4] = [ ("local-exec", llvm::ThreadLocalMode::LocalExec), ]; -const PRE_THIN_LTO_BC_EXT: &str = "pre-thin-lto.bc"; - pub fn llvm_err(handler: &errors::Handler, msg: String) -> FatalError { match llvm::last_error() { Some(err) => handler.fatal(&format!("{}: {}", msg, err)), @@ -117,7 +93,7 @@ pub fn write_output_file( } } -fn get_llvm_opt_level(optimize: config::OptLevel) -> llvm::CodeGenOptLevel { +pub(crate) fn get_llvm_opt_level(optimize: config::OptLevel) -> llvm::CodeGenOptLevel { match optimize { config::OptLevel::No => llvm::CodeGenOptLevel::None, config::OptLevel::Less => llvm::CodeGenOptLevel::Less, @@ -127,7 +103,7 @@ fn get_llvm_opt_level(optimize: config::OptLevel) -> llvm::CodeGenOptLevel { } } -fn get_llvm_opt_size(optimize: config::OptLevel) -> llvm::CodeGenOptSize { +pub(crate) fn get_llvm_opt_size(optimize: config::OptLevel) -> llvm::CodeGenOptSize { match optimize { config::OptLevel::Size => llvm::CodeGenOptSizeDefault, config::OptLevel::SizeMin => llvm::CodeGenOptSizeAggressive, @@ -225,230 +201,45 @@ pub fn target_machine_factory(sess: &Session, find_features: bool) }) } -/// Module-specific configuration for `optimize_and_codegen`. -pub struct ModuleConfig { - /// Names of additional optimization passes to run. - passes: Vec, - /// Some(level) to optimize at a certain level, or None to run - /// absolutely no optimizations (used for the metadata module). - pub opt_level: Option, - - /// Some(level) to optimize binary size, or None to not affect program size. - opt_size: Option, - - pgo_gen: Option, - pgo_use: String, - - // Flags indicating which outputs to produce. - pub emit_pre_thin_lto_bc: bool, - emit_no_opt_bc: bool, - emit_bc: bool, - emit_bc_compressed: bool, - emit_lto_bc: bool, - emit_ir: bool, - emit_asm: bool, - emit_obj: bool, - // Miscellaneous flags. These are mostly copied from command-line - // options. - pub verify_llvm_ir: bool, - no_prepopulate_passes: bool, - no_builtins: bool, - time_passes: bool, - vectorize_loop: bool, - vectorize_slp: bool, - merge_functions: bool, - inline_threshold: Option, - // Instead of creating an object file by doing LLVM codegen, just - // make the object file bitcode. Provides easy compatibility with - // emscripten's ecc compiler, when used as the linker. - obj_is_bitcode: bool, - no_integrated_as: bool, - embed_bitcode: bool, - embed_bitcode_marker: bool, -} - -impl ModuleConfig { - fn new(passes: Vec) -> ModuleConfig { - ModuleConfig { - passes, - opt_level: None, - opt_size: None, - - pgo_gen: None, - pgo_use: String::new(), - - emit_no_opt_bc: false, - emit_pre_thin_lto_bc: false, - emit_bc: false, - emit_bc_compressed: false, - emit_lto_bc: false, - emit_ir: false, - emit_asm: false, - emit_obj: false, - obj_is_bitcode: false, - embed_bitcode: false, - embed_bitcode_marker: false, - no_integrated_as: false, - - verify_llvm_ir: false, - no_prepopulate_passes: false, - no_builtins: false, - time_passes: false, - vectorize_loop: false, - vectorize_slp: false, - merge_functions: false, - inline_threshold: None - } - } - - fn set_flags(&mut self, sess: &Session, no_builtins: bool) { - self.verify_llvm_ir = sess.verify_llvm_ir(); - self.no_prepopulate_passes = sess.opts.cg.no_prepopulate_passes; - self.no_builtins = no_builtins || sess.target.target.options.no_builtins; - self.time_passes = sess.time_passes(); - self.inline_threshold = sess.opts.cg.inline_threshold; - self.obj_is_bitcode = sess.target.target.options.obj_is_bitcode || - sess.opts.debugging_opts.cross_lang_lto.enabled(); - let embed_bitcode = sess.target.target.options.embed_bitcode || - sess.opts.debugging_opts.embed_bitcode; - if embed_bitcode { - match sess.opts.optimize { - config::OptLevel::No | - config::OptLevel::Less => { - self.embed_bitcode_marker = embed_bitcode; - } - _ => self.embed_bitcode = embed_bitcode, - } - } - - // Copy what clang does by turning on loop vectorization at O2 and - // slp vectorization at O3. Otherwise configure other optimization aspects - // of this pass manager builder. - // Turn off vectorization for emscripten, as it's not very well supported. - self.vectorize_loop = !sess.opts.cg.no_vectorize_loops && - (sess.opts.optimize == config::OptLevel::Default || - sess.opts.optimize == config::OptLevel::Aggressive) && - !sess.target.target.options.is_like_emscripten; - - self.vectorize_slp = !sess.opts.cg.no_vectorize_slp && - sess.opts.optimize == config::OptLevel::Aggressive && - !sess.target.target.options.is_like_emscripten; - - self.merge_functions = sess.opts.optimize == config::OptLevel::Default || - sess.opts.optimize == config::OptLevel::Aggressive; - } -} - -/// Assembler name and command used by codegen when no_integrated_as is enabled -struct AssemblerCommand { - name: PathBuf, - cmd: Command, -} - -/// Additional resources used by optimize_and_codegen (not module specific) -#[derive(Clone)] -pub struct CodegenContext { - // Resources needed when running LTO - pub time_passes: bool, - pub lto: Lto, - pub no_landing_pads: bool, - pub save_temps: bool, - pub fewer_names: bool, - pub exported_symbols: Option>, - pub opts: Arc, - pub crate_types: Vec, - pub each_linked_rlib_for_lto: Vec<(CrateNum, PathBuf)>, - output_filenames: Arc, - regular_module_config: Arc, - metadata_module_config: Arc, - allocator_module_config: Arc, - pub tm_factory: Arc Result<&'static mut llvm::TargetMachine, String> + Send + Sync>, - pub msvc_imps_needed: bool, - pub target_pointer_width: String, - debuginfo: config::DebugInfo, - - // Number of cgus excluding the allocator/metadata modules - pub total_cgus: usize, - // Handler to use for diagnostics produced during codegen. - pub diag_emitter: SharedEmitter, - // LLVM passes added by plugins. - pub plugin_passes: Vec, - // LLVM optimizations for which we want to print remarks. - pub remark: Passes, - // Worker thread number - pub worker: usize, - // The incremental compilation session directory, or None if we are not - // compiling incrementally - pub incr_comp_session_dir: Option, - // Used to update CGU re-use information during the thinlto phase. - pub cgu_reuse_tracker: CguReuseTracker, - // Channel back to the main control thread to send messages to - coordinator_send: Sender>, - // A reference to the TimeGraph so we can register timings. None means that - // measuring is disabled. - time_graph: Option, - // The assembler command if no_integrated_as option is enabled, None otherwise - assembler_cmd: Option>, -} - -impl CodegenContext { - pub fn create_diag_handler(&self) -> Handler { - Handler::with_emitter(true, false, Box::new(self.diag_emitter.clone())) - } - - pub(crate) fn config(&self, kind: ModuleKind) -> &ModuleConfig { - match kind { - ModuleKind::Regular => &self.regular_module_config, - ModuleKind::Metadata => &self.metadata_module_config, - ModuleKind::Allocator => &self.allocator_module_config, - } +pub(crate) fn save_temp_bitcode( + cgcx: &CodegenContext, + module: &ModuleCodegen, + name: &str +) { + if !cgcx.save_temps { + return } - - pub(crate) fn save_temp_bitcode(&self, module: &ModuleCodegen, name: &str) { - if !self.save_temps { - return - } - unsafe { - let ext = format!("{}.bc", name); - let cgu = Some(&module.name[..]); - let path = self.output_filenames.temp_path_ext(&ext, cgu); - let cstr = path2cstr(&path); - let llmod = module.module_llvm.llmod(); - llvm::LLVMWriteBitcodeToFile(llmod, cstr.as_ptr()); - } + unsafe { + let ext = format!("{}.bc", name); + let cgu = Some(&module.name[..]); + let path = cgcx.output_filenames.temp_path_ext(&ext, cgu); + let cstr = path2cstr(&path); + let llmod = module.module_llvm.llmod(); + llvm::LLVMWriteBitcodeToFile(llmod, cstr.as_ptr()); } } -pub struct DiagnosticHandlers<'a> { - data: *mut (&'a CodegenContext, &'a Handler), - llcx: &'a llvm::Context, -} - -impl<'a> DiagnosticHandlers<'a> { - pub fn new(cgcx: &'a CodegenContext, - handler: &'a Handler, - llcx: &'a llvm::Context) -> Self { - let data = Box::into_raw(Box::new((cgcx, handler))); - unsafe { - llvm::LLVMRustSetInlineAsmDiagnosticHandler(llcx, inline_asm_handler, data as *mut _); - llvm::LLVMContextSetDiagnosticHandler(llcx, diagnostic_handler, data as *mut _); - } - DiagnosticHandlers { data, llcx } +pub fn new_diagnostic_handlers<'a>(cgcx: &'a CodegenContext, + handler: &'a Handler, + llcx: &'a llvm::Context) -> DiagnosticHandlers<'a, LlvmCodegenBackend> { + let data = Box::into_raw(Box::new((cgcx, handler))); + unsafe { + llvm::LLVMRustSetInlineAsmDiagnosticHandler(llcx, inline_asm_handler, data as *mut _); + llvm::LLVMContextSetDiagnosticHandler(llcx, diagnostic_handler, data as *mut _); } + DiagnosticHandlers { data, llcx: llcx } } -impl<'a> Drop for DiagnosticHandlers<'a> { - fn drop(&mut self) { - use std::ptr::null_mut; - unsafe { - llvm::LLVMRustSetInlineAsmDiagnosticHandler(self.llcx, inline_asm_handler, null_mut()); - llvm::LLVMContextSetDiagnosticHandler(self.llcx, diagnostic_handler, null_mut()); - drop(Box::from_raw(self.data)); - } +pub fn drop_diagnostic_handlers(diag: &mut DiagnosticHandlers<'a, LlvmCodegenBackend>) { + use std::ptr::null_mut; + unsafe { + llvm::LLVMRustSetInlineAsmDiagnosticHandler(diag.llcx, inline_asm_handler, null_mut()); + llvm::LLVMContextSetDiagnosticHandler(diag.llcx, diagnostic_handler, null_mut()); + drop(Box::from_raw(diag.data)); } } -unsafe extern "C" fn report_inline_asm<'a, 'b>(cgcx: &'a CodegenContext, +unsafe extern "C" fn report_inline_asm<'a, 'b>(cgcx: &'a CodegenContext, msg: &'b str, cookie: c_uint) { cgcx.diag_emitter.inline_asm_error(cookie as u32, msg.to_string()); @@ -460,7 +251,7 @@ unsafe extern "C" fn inline_asm_handler(diag: &SMDiagnostic, if user.is_null() { return } - let (cgcx, _) = *(user as *const (&CodegenContext, &Handler)); + let (cgcx, _) = *(user as *const (&CodegenContext, &Handler)); let msg = llvm::build_string(|s| llvm::LLVMRustWriteSMDiagnosticToString(diag, s)) .expect("non-UTF8 SMDiagnostic"); @@ -472,7 +263,7 @@ unsafe extern "C" fn diagnostic_handler(info: &DiagnosticInfo, user: *mut c_void if user.is_null() { return } - let (cgcx, diag_handler) = *(user as *const (&CodegenContext, &Handler)); + let (cgcx, diag_handler) = *(user as *const (&CodegenContext, &Handler)); match llvm::diagnostic::Diagnostic::unpack(info) { llvm::diagnostic::InlineAsm(inline) => { @@ -509,9 +300,9 @@ unsafe extern "C" fn diagnostic_handler(info: &DiagnosticInfo, user: *mut c_void } // Unsafe due to LLVM calls. -unsafe fn optimize(cgcx: &CodegenContext, +pub(crate) unsafe fn optimize(cgcx: &CodegenContext, diag_handler: &Handler, - module: &ModuleCodegen, + module: &ModuleCodegen, config: &ModuleConfig, timeline: &mut Timeline) -> Result<(), FatalError> @@ -519,7 +310,7 @@ unsafe fn optimize(cgcx: &CodegenContext, let llmod = module.module_llvm.llmod(); let llcx = &*module.module_llvm.llcx; let tm = &*module.module_llvm.tm; - let _handlers = DiagnosticHandlers::new(cgcx, diag_handler, llcx); + let _handlers = new_diagnostic_handlers(cgcx, diag_handler, llcx); let module_name = module.name.clone(); let module_name = Some(&module_name[..]); @@ -570,7 +361,8 @@ unsafe fn optimize(cgcx: &CodegenContext, if !config.no_prepopulate_passes { llvm::LLVMRustAddAnalysisPasses(tm, fpm, llmod); llvm::LLVMRustAddAnalysisPasses(tm, mpm, llmod); - let opt_level = config.opt_level.unwrap_or(llvm::CodeGenOptLevel::None); + let opt_level = config.opt_level.map(get_llvm_opt_level) + .unwrap_or(llvm::CodeGenOptLevel::None); let prepare_for_thin_lto = cgcx.lto == Lto::Thin || cgcx.lto == Lto::ThinLocal || (cgcx.lto != Lto::Fat && cgcx.opts.debugging_opts.cross_lang_lto.enabled()); have_name_anon_globals_pass = have_name_anon_globals_pass || prepare_for_thin_lto; @@ -643,37 +435,10 @@ unsafe fn optimize(cgcx: &CodegenContext, Ok(()) } -fn generate_lto_work(cgcx: &CodegenContext, - modules: Vec, - import_only_modules: Vec<(SerializedModule, WorkProduct)>) - -> Vec<(WorkItem, u64)> -{ - let mut timeline = cgcx.time_graph.as_ref().map(|tg| { - tg.start(CODEGEN_WORKER_TIMELINE, - CODEGEN_WORK_PACKAGE_KIND, - "generate lto") - }).unwrap_or(Timeline::noop()); - let (lto_modules, copy_jobs) = lto::run(cgcx, modules, import_only_modules, &mut timeline) - .unwrap_or_else(|e| e.raise()); - - let lto_modules = lto_modules.into_iter().map(|module| { - let cost = module.cost(); - (WorkItem::LTO(module), cost) - }); - - let copy_jobs = copy_jobs.into_iter().map(|wp| { - (WorkItem::CopyPostLtoArtifacts(CachedModuleCodegen { - name: wp.cgu_name.clone(), - source: wp, - }), 0) - }); - - lto_modules.chain(copy_jobs).collect() -} -unsafe fn codegen(cgcx: &CodegenContext, +pub(crate) unsafe fn codegen(cgcx: &CodegenContext, diag_handler: &Handler, - module: ModuleCodegen, + module: ModuleCodegen, config: &ModuleConfig, timeline: &mut Timeline) -> Result @@ -685,7 +450,7 @@ unsafe fn codegen(cgcx: &CodegenContext, let tm = &*module.module_llvm.tm; let module_name = module.name.clone(); let module_name = Some(&module_name[..]); - let handlers = DiagnosticHandlers::new(cgcx, diag_handler, llcx); + let handlers = new_diagnostic_handlers(cgcx, diag_handler, llcx); if cgcx.msvc_imps_needed { create_msvc_imps(cgcx, llcx, llmod); @@ -885,14 +650,14 @@ unsafe fn codegen(cgcx: &CodegenContext, /// /// Basically all of this is us attempting to follow in the footsteps of clang /// on iOS. See #35968 for lots more info. -unsafe fn embed_bitcode(cgcx: &CodegenContext, +unsafe fn embed_bitcode(cgcx: &CodegenContext, llcx: &llvm::Context, llmod: &llvm::Module, bitcode: Option<&[u8]>) { - let llconst = C_bytes_in_context(llcx, bitcode.unwrap_or(&[])); + let llconst = common::bytes_in_context(llcx, bitcode.unwrap_or(&[])); let llglobal = llvm::LLVMAddGlobal( llmod, - val_ty(llconst), + common::val_ty(llconst), "rustc.embedded.module\0".as_ptr() as *const _, ); llvm::LLVMSetInitializer(llglobal, llconst); @@ -909,10 +674,10 @@ unsafe fn embed_bitcode(cgcx: &CodegenContext, llvm::LLVMRustSetLinkage(llglobal, llvm::Linkage::PrivateLinkage); llvm::LLVMSetGlobalConstant(llglobal, llvm::True); - let llconst = C_bytes_in_context(llcx, &[]); + let llconst = common::bytes_in_context(llcx, &[]); let llglobal = llvm::LLVMAddGlobal( llmod, - val_ty(llconst), + common::val_ty(llconst), "rustc.embedded.cmdline\0".as_ptr() as *const _, ); llvm::LLVMSetInitializer(llglobal, llconst); @@ -925,1259 +690,7 @@ unsafe fn embed_bitcode(cgcx: &CodegenContext, llvm::LLVMRustSetLinkage(llglobal, llvm::Linkage::PrivateLinkage); } -pub(crate) struct CompiledModules { - pub modules: Vec, - pub metadata_module: CompiledModule, - pub allocator_module: Option, -} - -fn need_crate_bitcode_for_rlib(sess: &Session) -> bool { - sess.crate_types.borrow().contains(&config::CrateType::Rlib) && - sess.opts.output_types.contains_key(&OutputType::Exe) -} - -fn need_pre_thin_lto_bitcode_for_incr_comp(sess: &Session) -> bool { - if sess.opts.incremental.is_none() { - return false - } - - match sess.lto() { - Lto::Fat | - Lto::No => false, - Lto::Thin | - Lto::ThinLocal => true, - } -} - -pub fn start_async_codegen(tcx: TyCtxt, - time_graph: Option, - metadata: EncodedMetadata, - coordinator_receive: Receiver>, - total_cgus: usize) - -> OngoingCodegen { - let sess = tcx.sess; - let crate_name = tcx.crate_name(LOCAL_CRATE); - let crate_hash = tcx.crate_hash(LOCAL_CRATE); - let no_builtins = attr::contains_name(&tcx.hir.krate().attrs, "no_builtins"); - let subsystem = attr::first_attr_value_str_by_name(&tcx.hir.krate().attrs, - "windows_subsystem"); - let windows_subsystem = subsystem.map(|subsystem| { - if subsystem != "windows" && subsystem != "console" { - tcx.sess.fatal(&format!("invalid windows subsystem `{}`, only \ - `windows` and `console` are allowed", - subsystem)); - } - subsystem.to_string() - }); - - let linker_info = LinkerInfo::new(tcx); - let crate_info = CrateInfo::new(tcx); - - // Figure out what we actually need to build. - let mut modules_config = ModuleConfig::new(sess.opts.cg.passes.clone()); - let mut metadata_config = ModuleConfig::new(vec![]); - let mut allocator_config = ModuleConfig::new(vec![]); - - if let Some(ref sanitizer) = sess.opts.debugging_opts.sanitizer { - match *sanitizer { - Sanitizer::Address => { - modules_config.passes.push("asan".to_owned()); - modules_config.passes.push("asan-module".to_owned()); - } - Sanitizer::Memory => { - modules_config.passes.push("msan".to_owned()) - } - Sanitizer::Thread => { - modules_config.passes.push("tsan".to_owned()) - } - _ => {} - } - } - - if sess.opts.debugging_opts.profile { - modules_config.passes.push("insert-gcov-profiling".to_owned()) - } - - modules_config.pgo_gen = sess.opts.debugging_opts.pgo_gen.clone(); - modules_config.pgo_use = sess.opts.debugging_opts.pgo_use.clone(); - - modules_config.opt_level = Some(get_llvm_opt_level(sess.opts.optimize)); - modules_config.opt_size = Some(get_llvm_opt_size(sess.opts.optimize)); - - // Save all versions of the bytecode if we're saving our temporaries. - if sess.opts.cg.save_temps { - modules_config.emit_no_opt_bc = true; - modules_config.emit_pre_thin_lto_bc = true; - modules_config.emit_bc = true; - modules_config.emit_lto_bc = true; - metadata_config.emit_bc = true; - allocator_config.emit_bc = true; - } - - // Emit compressed bitcode files for the crate if we're emitting an rlib. - // Whenever an rlib is created, the bitcode is inserted into the archive in - // order to allow LTO against it. - if need_crate_bitcode_for_rlib(sess) { - modules_config.emit_bc_compressed = true; - allocator_config.emit_bc_compressed = true; - } - - modules_config.emit_pre_thin_lto_bc = - need_pre_thin_lto_bitcode_for_incr_comp(sess); - - modules_config.no_integrated_as = tcx.sess.opts.cg.no_integrated_as || - tcx.sess.target.target.options.no_integrated_as; - - for output_type in sess.opts.output_types.keys() { - match *output_type { - OutputType::Bitcode => { modules_config.emit_bc = true; } - OutputType::LlvmAssembly => { modules_config.emit_ir = true; } - OutputType::Assembly => { - modules_config.emit_asm = true; - // If we're not using the LLVM assembler, this function - // could be invoked specially with output_type_assembly, so - // in this case we still want the metadata object file. - if !sess.opts.output_types.contains_key(&OutputType::Assembly) { - metadata_config.emit_obj = true; - allocator_config.emit_obj = true; - } - } - OutputType::Object => { modules_config.emit_obj = true; } - OutputType::Metadata => { metadata_config.emit_obj = true; } - OutputType::Exe => { - modules_config.emit_obj = true; - metadata_config.emit_obj = true; - allocator_config.emit_obj = true; - }, - OutputType::Mir => {} - OutputType::DepInfo => {} - } - } - - modules_config.set_flags(sess, no_builtins); - metadata_config.set_flags(sess, no_builtins); - allocator_config.set_flags(sess, no_builtins); - - // Exclude metadata and allocator modules from time_passes output, since - // they throw off the "LLVM passes" measurement. - metadata_config.time_passes = false; - allocator_config.time_passes = false; - - let (shared_emitter, shared_emitter_main) = SharedEmitter::new(); - let (codegen_worker_send, codegen_worker_receive) = channel(); - - let coordinator_thread = start_executing_work(tcx, - &crate_info, - shared_emitter, - codegen_worker_send, - coordinator_receive, - total_cgus, - sess.jobserver.clone(), - time_graph.clone(), - Arc::new(modules_config), - Arc::new(metadata_config), - Arc::new(allocator_config)); - - OngoingCodegen { - crate_name, - crate_hash, - metadata, - windows_subsystem, - linker_info, - crate_info, - - time_graph, - coordinator_send: tcx.tx_to_llvm_workers.lock().clone(), - codegen_worker_receive, - shared_emitter_main, - future: coordinator_thread, - output_filenames: tcx.output_filenames(LOCAL_CRATE), - } -} - -fn copy_all_cgu_workproducts_to_incr_comp_cache_dir( - sess: &Session, - compiled_modules: &CompiledModules, -) -> FxHashMap { - let mut work_products = FxHashMap::default(); - - if sess.opts.incremental.is_none() { - return work_products; - } - - for module in compiled_modules.modules.iter().filter(|m| m.kind == ModuleKind::Regular) { - let mut files = vec![]; - - if let Some(ref path) = module.object { - files.push((WorkProductFileKind::Object, path.clone())); - } - if let Some(ref path) = module.bytecode { - files.push((WorkProductFileKind::Bytecode, path.clone())); - } - if let Some(ref path) = module.bytecode_compressed { - files.push((WorkProductFileKind::BytecodeCompressed, path.clone())); - } - - if let Some((id, product)) = - copy_cgu_workproducts_to_incr_comp_cache_dir(sess, &module.name, &files) { - work_products.insert(id, product); - } - } - - work_products -} - -fn produce_final_output_artifacts(sess: &Session, - compiled_modules: &CompiledModules, - crate_output: &OutputFilenames) { - let mut user_wants_bitcode = false; - let mut user_wants_objects = false; - - // Produce final compile outputs. - let copy_gracefully = |from: &Path, to: &Path| { - if let Err(e) = fs::copy(from, to) { - sess.err(&format!("could not copy {:?} to {:?}: {}", from, to, e)); - } - }; - - let copy_if_one_unit = |output_type: OutputType, - keep_numbered: bool| { - if compiled_modules.modules.len() == 1 { - // 1) Only one codegen unit. In this case it's no difficulty - // to copy `foo.0.x` to `foo.x`. - let module_name = Some(&compiled_modules.modules[0].name[..]); - let path = crate_output.temp_path(output_type, module_name); - copy_gracefully(&path, - &crate_output.path(output_type)); - if !sess.opts.cg.save_temps && !keep_numbered { - // The user just wants `foo.x`, not `foo.#module-name#.x`. - remove(sess, &path); - } - } else { - let ext = crate_output.temp_path(output_type, None) - .extension() - .unwrap() - .to_str() - .unwrap() - .to_owned(); - - if crate_output.outputs.contains_key(&output_type) { - // 2) Multiple codegen units, with `--emit foo=some_name`. We have - // no good solution for this case, so warn the user. - sess.warn(&format!("ignoring emit path because multiple .{} files \ - were produced", ext)); - } else if crate_output.single_output_file.is_some() { - // 3) Multiple codegen units, with `-o some_name`. We have - // no good solution for this case, so warn the user. - sess.warn(&format!("ignoring -o because multiple .{} files \ - were produced", ext)); - } else { - // 4) Multiple codegen units, but no explicit name. We - // just leave the `foo.0.x` files in place. - // (We don't have to do any work in this case.) - } - } - }; - - // Flag to indicate whether the user explicitly requested bitcode. - // Otherwise, we produced it only as a temporary output, and will need - // to get rid of it. - for output_type in crate_output.outputs.keys() { - match *output_type { - OutputType::Bitcode => { - user_wants_bitcode = true; - // Copy to .bc, but always keep the .0.bc. There is a later - // check to figure out if we should delete .0.bc files, or keep - // them for making an rlib. - copy_if_one_unit(OutputType::Bitcode, true); - } - OutputType::LlvmAssembly => { - copy_if_one_unit(OutputType::LlvmAssembly, false); - } - OutputType::Assembly => { - copy_if_one_unit(OutputType::Assembly, false); - } - OutputType::Object => { - user_wants_objects = true; - copy_if_one_unit(OutputType::Object, true); - } - OutputType::Mir | - OutputType::Metadata | - OutputType::Exe | - OutputType::DepInfo => {} - } - } - - // Clean up unwanted temporary files. - - // We create the following files by default: - // - #crate#.#module-name#.bc - // - #crate#.#module-name#.o - // - #crate#.crate.metadata.bc - // - #crate#.crate.metadata.o - // - #crate#.o (linked from crate.##.o) - // - #crate#.bc (copied from crate.##.bc) - // We may create additional files if requested by the user (through - // `-C save-temps` or `--emit=` flags). - - if !sess.opts.cg.save_temps { - // Remove the temporary .#module-name#.o objects. If the user didn't - // explicitly request bitcode (with --emit=bc), and the bitcode is not - // needed for building an rlib, then we must remove .#module-name#.bc as - // well. - - // Specific rules for keeping .#module-name#.bc: - // - If the user requested bitcode (`user_wants_bitcode`), and - // codegen_units > 1, then keep it. - // - If the user requested bitcode but codegen_units == 1, then we - // can toss .#module-name#.bc because we copied it to .bc earlier. - // - If we're not building an rlib and the user didn't request - // bitcode, then delete .#module-name#.bc. - // If you change how this works, also update back::link::link_rlib, - // where .#module-name#.bc files are (maybe) deleted after making an - // rlib. - let needs_crate_object = crate_output.outputs.contains_key(&OutputType::Exe); - - let keep_numbered_bitcode = user_wants_bitcode && sess.codegen_units() > 1; - - let keep_numbered_objects = needs_crate_object || - (user_wants_objects && sess.codegen_units() > 1); - - for module in compiled_modules.modules.iter() { - if let Some(ref path) = module.object { - if !keep_numbered_objects { - remove(sess, path); - } - } - - if let Some(ref path) = module.bytecode { - if !keep_numbered_bitcode { - remove(sess, path); - } - } - } - - if !user_wants_bitcode { - if let Some(ref path) = compiled_modules.metadata_module.bytecode { - remove(sess, &path); - } - - if let Some(ref allocator_module) = compiled_modules.allocator_module { - if let Some(ref path) = allocator_module.bytecode { - remove(sess, path); - } - } - } - } - - // We leave the following files around by default: - // - #crate#.o - // - #crate#.crate.metadata.o - // - #crate#.bc - // These are used in linking steps and will be cleaned up afterward. -} - -pub(crate) fn dump_incremental_data(_codegen_results: &CodegenResults) { - // FIXME(mw): This does not work at the moment because the situation has - // become more complicated due to incremental LTO. Now a CGU - // can have more than two caching states. - // println!("[incremental] Re-using {} out of {} modules", - // codegen_results.modules.iter().filter(|m| m.pre_existing).count(), - // codegen_results.modules.len()); -} - -enum WorkItem { - /// Optimize a newly codegened, totally unoptimized module. - Optimize(ModuleCodegen), - /// Copy the post-LTO artifacts from the incremental cache to the output - /// directory. - CopyPostLtoArtifacts(CachedModuleCodegen), - /// Perform (Thin)LTO on the given module. - LTO(lto::LtoModuleCodegen), -} - -impl WorkItem { - fn module_kind(&self) -> ModuleKind { - match *self { - WorkItem::Optimize(ref m) => m.kind, - WorkItem::CopyPostLtoArtifacts(_) | - WorkItem::LTO(_) => ModuleKind::Regular, - } - } - - fn name(&self) -> String { - match *self { - WorkItem::Optimize(ref m) => format!("optimize: {}", m.name), - WorkItem::CopyPostLtoArtifacts(ref m) => format!("copy post LTO artifacts: {}", m.name), - WorkItem::LTO(ref m) => format!("lto: {}", m.name()), - } - } -} - -enum WorkItemResult { - Compiled(CompiledModule), - NeedsLTO(ModuleCodegen), -} - -fn execute_work_item(cgcx: &CodegenContext, - work_item: WorkItem, - timeline: &mut Timeline) - -> Result -{ - let module_config = cgcx.config(work_item.module_kind()); - - match work_item { - WorkItem::Optimize(module) => { - execute_optimize_work_item(cgcx, module, module_config, timeline) - } - WorkItem::CopyPostLtoArtifacts(module) => { - execute_copy_from_cache_work_item(cgcx, module, module_config, timeline) - } - WorkItem::LTO(module) => { - execute_lto_work_item(cgcx, module, module_config, timeline) - } - } -} - -fn execute_optimize_work_item(cgcx: &CodegenContext, - module: ModuleCodegen, - module_config: &ModuleConfig, - timeline: &mut Timeline) - -> Result -{ - let diag_handler = cgcx.create_diag_handler(); - - unsafe { - optimize(cgcx, &diag_handler, &module, module_config, timeline)?; - } - - let linker_does_lto = cgcx.opts.debugging_opts.cross_lang_lto.enabled(); - - // After we've done the initial round of optimizations we need to - // decide whether to synchronously codegen this module or ship it - // back to the coordinator thread for further LTO processing (which - // has to wait for all the initial modules to be optimized). - // - // Here we dispatch based on the `cgcx.lto` and kind of module we're - // codegenning... - let needs_lto = match cgcx.lto { - Lto::No => false, - - // If the linker does LTO, we don't have to do it. Note that we - // keep doing full LTO, if it is requested, as not to break the - // assumption that the output will be a single module. - Lto::Thin | Lto::ThinLocal if linker_does_lto => false, - - // Here we've got a full crate graph LTO requested. We ignore - // this, however, if the crate type is only an rlib as there's - // no full crate graph to process, that'll happen later. - // - // This use case currently comes up primarily for targets that - // require LTO so the request for LTO is always unconditionally - // passed down to the backend, but we don't actually want to do - // anything about it yet until we've got a final product. - Lto::Fat | Lto::Thin => { - cgcx.crate_types.len() != 1 || - cgcx.crate_types[0] != config::CrateType::Rlib - } - - // When we're automatically doing ThinLTO for multi-codegen-unit - // builds we don't actually want to LTO the allocator modules if - // it shows up. This is due to various linker shenanigans that - // we'll encounter later. - // - // Additionally here's where we also factor in the current LLVM - // version. If it doesn't support ThinLTO we skip this. - Lto::ThinLocal => { - module.kind != ModuleKind::Allocator && - unsafe { llvm::LLVMRustThinLTOAvailable() } - } - }; - - // Metadata modules never participate in LTO regardless of the lto - // settings. - let needs_lto = needs_lto && module.kind != ModuleKind::Metadata; - - if needs_lto { - Ok(WorkItemResult::NeedsLTO(module)) - } else { - let module = unsafe { - codegen(cgcx, &diag_handler, module, module_config, timeline)? - }; - Ok(WorkItemResult::Compiled(module)) - } -} - -fn execute_copy_from_cache_work_item(cgcx: &CodegenContext, - module: CachedModuleCodegen, - module_config: &ModuleConfig, - _: &mut Timeline) - -> Result -{ - let incr_comp_session_dir = cgcx.incr_comp_session_dir - .as_ref() - .unwrap(); - let mut object = None; - let mut bytecode = None; - let mut bytecode_compressed = None; - for (kind, saved_file) in &module.source.saved_files { - let obj_out = match kind { - WorkProductFileKind::Object => { - let path = cgcx.output_filenames.temp_path(OutputType::Object, - Some(&module.name)); - object = Some(path.clone()); - path - } - WorkProductFileKind::Bytecode => { - let path = cgcx.output_filenames.temp_path(OutputType::Bitcode, - Some(&module.name)); - bytecode = Some(path.clone()); - path - } - WorkProductFileKind::BytecodeCompressed => { - let path = cgcx.output_filenames.temp_path(OutputType::Bitcode, - Some(&module.name)) - .with_extension(RLIB_BYTECODE_EXTENSION); - bytecode_compressed = Some(path.clone()); - path - } - }; - let source_file = in_incr_comp_dir(&incr_comp_session_dir, - &saved_file); - debug!("copying pre-existing module `{}` from {:?} to {}", - module.name, - source_file, - obj_out.display()); - match link_or_copy(&source_file, &obj_out) { - Ok(_) => { } - Err(err) => { - let diag_handler = cgcx.create_diag_handler(); - diag_handler.err(&format!("unable to copy {} to {}: {}", - source_file.display(), - obj_out.display(), - err)); - } - } - } - - assert_eq!(object.is_some(), module_config.emit_obj); - assert_eq!(bytecode.is_some(), module_config.emit_bc); - assert_eq!(bytecode_compressed.is_some(), module_config.emit_bc_compressed); - - Ok(WorkItemResult::Compiled(CompiledModule { - name: module.name, - kind: ModuleKind::Regular, - object, - bytecode, - bytecode_compressed, - })) -} - -fn execute_lto_work_item(cgcx: &CodegenContext, - mut module: lto::LtoModuleCodegen, - module_config: &ModuleConfig, - timeline: &mut Timeline) - -> Result -{ - let diag_handler = cgcx.create_diag_handler(); - - unsafe { - let module = module.optimize(cgcx, timeline)?; - let module = codegen(cgcx, &diag_handler, module, module_config, timeline)?; - Ok(WorkItemResult::Compiled(module)) - } -} - -enum Message { - Token(io::Result), - NeedsLTO { - result: ModuleCodegen, - worker_id: usize, - }, - Done { - result: Result, - worker_id: usize, - }, - CodegenDone { - llvm_work_item: WorkItem, - cost: u64, - }, - AddImportOnlyModule { - module_data: SerializedModule, - work_product: WorkProduct, - }, - CodegenComplete, - CodegenItem, -} - -struct Diagnostic { - msg: String, - code: Option, - lvl: Level, -} - -#[derive(PartialEq, Clone, Copy, Debug)] -enum MainThreadWorkerState { - Idle, - Codegenning, - LLVMing, -} - -fn start_executing_work(tcx: TyCtxt, - crate_info: &CrateInfo, - shared_emitter: SharedEmitter, - codegen_worker_send: Sender, - coordinator_receive: Receiver>, - total_cgus: usize, - jobserver: Client, - time_graph: Option, - modules_config: Arc, - metadata_config: Arc, - allocator_config: Arc) - -> thread::JoinHandle> { - let coordinator_send = tcx.tx_to_llvm_workers.lock().clone(); - let sess = tcx.sess; - - // Compute the set of symbols we need to retain when doing LTO (if we need to) - let exported_symbols = { - let mut exported_symbols = FxHashMap::default(); - - let copy_symbols = |cnum| { - let symbols = tcx.exported_symbols(cnum) - .iter() - .map(|&(s, lvl)| (s.symbol_name(tcx).to_string(), lvl)) - .collect(); - Arc::new(symbols) - }; - - match sess.lto() { - Lto::No => None, - Lto::ThinLocal => { - exported_symbols.insert(LOCAL_CRATE, copy_symbols(LOCAL_CRATE)); - Some(Arc::new(exported_symbols)) - } - Lto::Fat | Lto::Thin => { - exported_symbols.insert(LOCAL_CRATE, copy_symbols(LOCAL_CRATE)); - for &cnum in tcx.crates().iter() { - exported_symbols.insert(cnum, copy_symbols(cnum)); - } - Some(Arc::new(exported_symbols)) - } - } - }; - - // First up, convert our jobserver into a helper thread so we can use normal - // mpsc channels to manage our messages and such. - // After we've requested tokens then we'll, when we can, - // get tokens on `coordinator_receive` which will - // get managed in the main loop below. - let coordinator_send2 = coordinator_send.clone(); - let helper = jobserver.into_helper_thread(move |token| { - drop(coordinator_send2.send(Box::new(Message::Token(token)))); - }).expect("failed to spawn helper thread"); - - let mut each_linked_rlib_for_lto = Vec::new(); - drop(link::each_linked_rlib(sess, crate_info, &mut |cnum, path| { - if link::ignored_for_lto(sess, crate_info, cnum) { - return - } - each_linked_rlib_for_lto.push((cnum, path.to_path_buf())); - })); - - let assembler_cmd = if modules_config.no_integrated_as { - // HACK: currently we use linker (gcc) as our assembler - let (linker, flavor) = link::linker_and_flavor(sess); - - let (name, mut cmd) = get_linker(sess, &linker, flavor); - cmd.args(&sess.target.target.options.asm_args); - Some(Arc::new(AssemblerCommand { - name, - cmd, - })) - } else { - None - }; - - let cgcx = CodegenContext { - crate_types: sess.crate_types.borrow().clone(), - each_linked_rlib_for_lto, - lto: sess.lto(), - no_landing_pads: sess.no_landing_pads(), - fewer_names: sess.fewer_names(), - save_temps: sess.opts.cg.save_temps, - opts: Arc::new(sess.opts.clone()), - time_passes: sess.time_passes(), - exported_symbols, - plugin_passes: sess.plugin_llvm_passes.borrow().clone(), - remark: sess.opts.cg.remark.clone(), - worker: 0, - incr_comp_session_dir: sess.incr_comp_session_dir_opt().map(|r| r.clone()), - cgu_reuse_tracker: sess.cgu_reuse_tracker.clone(), - coordinator_send, - diag_emitter: shared_emitter.clone(), - time_graph, - output_filenames: tcx.output_filenames(LOCAL_CRATE), - regular_module_config: modules_config, - metadata_module_config: metadata_config, - allocator_module_config: allocator_config, - tm_factory: target_machine_factory(tcx.sess, false), - total_cgus, - msvc_imps_needed: msvc_imps_needed(tcx), - target_pointer_width: tcx.sess.target.target.target_pointer_width.clone(), - debuginfo: tcx.sess.opts.debuginfo, - assembler_cmd, - }; - - // This is the "main loop" of parallel work happening for parallel codegen. - // It's here that we manage parallelism, schedule work, and work with - // messages coming from clients. - // - // There are a few environmental pre-conditions that shape how the system - // is set up: - // - // - Error reporting only can happen on the main thread because that's the - // only place where we have access to the compiler `Session`. - // - LLVM work can be done on any thread. - // - Codegen can only happen on the main thread. - // - Each thread doing substantial work most be in possession of a `Token` - // from the `Jobserver`. - // - The compiler process always holds one `Token`. Any additional `Tokens` - // have to be requested from the `Jobserver`. - // - // Error Reporting - // =============== - // The error reporting restriction is handled separately from the rest: We - // set up a `SharedEmitter` the holds an open channel to the main thread. - // When an error occurs on any thread, the shared emitter will send the - // error message to the receiver main thread (`SharedEmitterMain`). The - // main thread will periodically query this error message queue and emit - // any error messages it has received. It might even abort compilation if - // has received a fatal error. In this case we rely on all other threads - // being torn down automatically with the main thread. - // Since the main thread will often be busy doing codegen work, error - // reporting will be somewhat delayed, since the message queue can only be - // checked in between to work packages. - // - // Work Processing Infrastructure - // ============================== - // The work processing infrastructure knows three major actors: - // - // - the coordinator thread, - // - the main thread, and - // - LLVM worker threads - // - // The coordinator thread is running a message loop. It instructs the main - // thread about what work to do when, and it will spawn off LLVM worker - // threads as open LLVM WorkItems become available. - // - // The job of the main thread is to codegen CGUs into LLVM work package - // (since the main thread is the only thread that can do this). The main - // thread will block until it receives a message from the coordinator, upon - // which it will codegen one CGU, send it to the coordinator and block - // again. This way the coordinator can control what the main thread is - // doing. - // - // The coordinator keeps a queue of LLVM WorkItems, and when a `Token` is - // available, it will spawn off a new LLVM worker thread and let it process - // that a WorkItem. When a LLVM worker thread is done with its WorkItem, - // it will just shut down, which also frees all resources associated with - // the given LLVM module, and sends a message to the coordinator that the - // has been completed. - // - // Work Scheduling - // =============== - // The scheduler's goal is to minimize the time it takes to complete all - // work there is, however, we also want to keep memory consumption low - // if possible. These two goals are at odds with each other: If memory - // consumption were not an issue, we could just let the main thread produce - // LLVM WorkItems at full speed, assuring maximal utilization of - // Tokens/LLVM worker threads. However, since codegen usual is faster - // than LLVM processing, the queue of LLVM WorkItems would fill up and each - // WorkItem potentially holds on to a substantial amount of memory. - // - // So the actual goal is to always produce just enough LLVM WorkItems as - // not to starve our LLVM worker threads. That means, once we have enough - // WorkItems in our queue, we can block the main thread, so it does not - // produce more until we need them. - // - // Doing LLVM Work on the Main Thread - // ---------------------------------- - // Since the main thread owns the compiler processes implicit `Token`, it is - // wasteful to keep it blocked without doing any work. Therefore, what we do - // in this case is: We spawn off an additional LLVM worker thread that helps - // reduce the queue. The work it is doing corresponds to the implicit - // `Token`. The coordinator will mark the main thread as being busy with - // LLVM work. (The actual work happens on another OS thread but we just care - // about `Tokens`, not actual threads). - // - // When any LLVM worker thread finishes while the main thread is marked as - // "busy with LLVM work", we can do a little switcheroo: We give the Token - // of the just finished thread to the LLVM worker thread that is working on - // behalf of the main thread's implicit Token, thus freeing up the main - // thread again. The coordinator can then again decide what the main thread - // should do. This allows the coordinator to make decisions at more points - // in time. - // - // Striking a Balance between Throughput and Memory Consumption - // ------------------------------------------------------------ - // Since our two goals, (1) use as many Tokens as possible and (2) keep - // memory consumption as low as possible, are in conflict with each other, - // we have to find a trade off between them. Right now, the goal is to keep - // all workers busy, which means that no worker should find the queue empty - // when it is ready to start. - // How do we do achieve this? Good question :) We actually never know how - // many `Tokens` are potentially available so it's hard to say how much to - // fill up the queue before switching the main thread to LLVM work. Also we - // currently don't have a means to estimate how long a running LLVM worker - // will still be busy with it's current WorkItem. However, we know the - // maximal count of available Tokens that makes sense (=the number of CPU - // cores), so we can take a conservative guess. The heuristic we use here - // is implemented in the `queue_full_enough()` function. - // - // Some Background on Jobservers - // ----------------------------- - // It's worth also touching on the management of parallelism here. We don't - // want to just spawn a thread per work item because while that's optimal - // parallelism it may overload a system with too many threads or violate our - // configuration for the maximum amount of cpu to use for this process. To - // manage this we use the `jobserver` crate. - // - // Job servers are an artifact of GNU make and are used to manage - // parallelism between processes. A jobserver is a glorified IPC semaphore - // basically. Whenever we want to run some work we acquire the semaphore, - // and whenever we're done with that work we release the semaphore. In this - // manner we can ensure that the maximum number of parallel workers is - // capped at any one point in time. - // - // LTO and the coordinator thread - // ------------------------------ - // - // The final job the coordinator thread is responsible for is managing LTO - // and how that works. When LTO is requested what we'll to is collect all - // optimized LLVM modules into a local vector on the coordinator. Once all - // modules have been codegened and optimized we hand this to the `lto` - // module for further optimization. The `lto` module will return back a list - // of more modules to work on, which the coordinator will continue to spawn - // work for. - // - // Each LLVM module is automatically sent back to the coordinator for LTO if - // necessary. There's already optimizations in place to avoid sending work - // back to the coordinator if LTO isn't requested. - return thread::spawn(move || { - // We pretend to be within the top-level LLVM time-passes task here: - set_time_depth(1); - - let max_workers = ::num_cpus::get(); - let mut worker_id_counter = 0; - let mut free_worker_ids = Vec::new(); - let mut get_worker_id = |free_worker_ids: &mut Vec| { - if let Some(id) = free_worker_ids.pop() { - id - } else { - let id = worker_id_counter; - worker_id_counter += 1; - id - } - }; - - // This is where we collect codegen units that have gone all the way - // through codegen and LLVM. - let mut compiled_modules = vec![]; - let mut compiled_metadata_module = None; - let mut compiled_allocator_module = None; - let mut needs_lto = Vec::new(); - let mut lto_import_only_modules = Vec::new(); - let mut started_lto = false; - - // This flag tracks whether all items have gone through codegens - let mut codegen_done = false; - - // This is the queue of LLVM work items that still need processing. - let mut work_items = Vec::<(WorkItem, u64)>::new(); - - // This are the Jobserver Tokens we currently hold. Does not include - // the implicit Token the compiler process owns no matter what. - let mut tokens = Vec::new(); - - let mut main_thread_worker_state = MainThreadWorkerState::Idle; - let mut running = 0; - - let mut llvm_start_time = None; - - // Run the message loop while there's still anything that needs message - // processing: - while !codegen_done || - work_items.len() > 0 || - running > 0 || - needs_lto.len() > 0 || - lto_import_only_modules.len() > 0 || - main_thread_worker_state != MainThreadWorkerState::Idle { - - // While there are still CGUs to be codegened, the coordinator has - // to decide how to utilize the compiler processes implicit Token: - // For codegenning more CGU or for running them through LLVM. - if !codegen_done { - if main_thread_worker_state == MainThreadWorkerState::Idle { - if !queue_full_enough(work_items.len(), running, max_workers) { - // The queue is not full enough, codegen more items: - if let Err(_) = codegen_worker_send.send(Message::CodegenItem) { - panic!("Could not send Message::CodegenItem to main thread") - } - main_thread_worker_state = MainThreadWorkerState::Codegenning; - } else { - // The queue is full enough to not let the worker - // threads starve. Use the implicit Token to do some - // LLVM work too. - let (item, _) = work_items.pop() - .expect("queue empty - queue_full_enough() broken?"); - let cgcx = CodegenContext { - worker: get_worker_id(&mut free_worker_ids), - .. cgcx.clone() - }; - maybe_start_llvm_timer(cgcx.config(item.module_kind()), - &mut llvm_start_time); - main_thread_worker_state = MainThreadWorkerState::LLVMing; - spawn_work(cgcx, item); - } - } - } else { - // If we've finished everything related to normal codegen - // then it must be the case that we've got some LTO work to do. - // Perform the serial work here of figuring out what we're - // going to LTO and then push a bunch of work items onto our - // queue to do LTO - if work_items.len() == 0 && - running == 0 && - main_thread_worker_state == MainThreadWorkerState::Idle { - assert!(!started_lto); - assert!(needs_lto.len() + lto_import_only_modules.len() > 0); - started_lto = true; - let modules = mem::replace(&mut needs_lto, Vec::new()); - let import_only_modules = - mem::replace(&mut lto_import_only_modules, Vec::new()); - for (work, cost) in generate_lto_work(&cgcx, modules, import_only_modules) { - let insertion_index = work_items - .binary_search_by_key(&cost, |&(_, cost)| cost) - .unwrap_or_else(|e| e); - work_items.insert(insertion_index, (work, cost)); - if !cgcx.opts.debugging_opts.no_parallel_llvm { - helper.request_token(); - } - } - } - - // In this branch, we know that everything has been codegened, - // so it's just a matter of determining whether the implicit - // Token is free to use for LLVM work. - match main_thread_worker_state { - MainThreadWorkerState::Idle => { - if let Some((item, _)) = work_items.pop() { - let cgcx = CodegenContext { - worker: get_worker_id(&mut free_worker_ids), - .. cgcx.clone() - }; - maybe_start_llvm_timer(cgcx.config(item.module_kind()), - &mut llvm_start_time); - main_thread_worker_state = MainThreadWorkerState::LLVMing; - spawn_work(cgcx, item); - } else { - // There is no unstarted work, so let the main thread - // take over for a running worker. Otherwise the - // implicit token would just go to waste. - // We reduce the `running` counter by one. The - // `tokens.truncate()` below will take care of - // giving the Token back. - debug_assert!(running > 0); - running -= 1; - main_thread_worker_state = MainThreadWorkerState::LLVMing; - } - } - MainThreadWorkerState::Codegenning => { - bug!("codegen worker should not be codegenning after \ - codegen was already completed") - } - MainThreadWorkerState::LLVMing => { - // Already making good use of that token - } - } - } - - // Spin up what work we can, only doing this while we've got available - // parallelism slots and work left to spawn. - while work_items.len() > 0 && running < tokens.len() { - let (item, _) = work_items.pop().unwrap(); - - maybe_start_llvm_timer(cgcx.config(item.module_kind()), - &mut llvm_start_time); - - let cgcx = CodegenContext { - worker: get_worker_id(&mut free_worker_ids), - .. cgcx.clone() - }; - - spawn_work(cgcx, item); - running += 1; - } - - // Relinquish accidentally acquired extra tokens - tokens.truncate(running); - - let msg = coordinator_receive.recv().unwrap(); - match *msg.downcast::().ok().unwrap() { - // Save the token locally and the next turn of the loop will use - // this to spawn a new unit of work, or it may get dropped - // immediately if we have no more work to spawn. - Message::Token(token) => { - match token { - Ok(token) => { - tokens.push(token); - - if main_thread_worker_state == MainThreadWorkerState::LLVMing { - // If the main thread token is used for LLVM work - // at the moment, we turn that thread into a regular - // LLVM worker thread, so the main thread is free - // to react to codegen demand. - main_thread_worker_state = MainThreadWorkerState::Idle; - running += 1; - } - } - Err(e) => { - let msg = &format!("failed to acquire jobserver token: {}", e); - shared_emitter.fatal(msg); - // Exit the coordinator thread - panic!("{}", msg) - } - } - } - - Message::CodegenDone { llvm_work_item, cost } => { - // We keep the queue sorted by estimated processing cost, - // so that more expensive items are processed earlier. This - // is good for throughput as it gives the main thread more - // time to fill up the queue and it avoids scheduling - // expensive items to the end. - // Note, however, that this is not ideal for memory - // consumption, as LLVM module sizes are not evenly - // distributed. - let insertion_index = - work_items.binary_search_by_key(&cost, |&(_, cost)| cost); - let insertion_index = match insertion_index { - Ok(idx) | Err(idx) => idx - }; - work_items.insert(insertion_index, (llvm_work_item, cost)); - - if !cgcx.opts.debugging_opts.no_parallel_llvm { - helper.request_token(); - } - assert_eq!(main_thread_worker_state, - MainThreadWorkerState::Codegenning); - main_thread_worker_state = MainThreadWorkerState::Idle; - } - - Message::CodegenComplete => { - codegen_done = true; - assert_eq!(main_thread_worker_state, - MainThreadWorkerState::Codegenning); - main_thread_worker_state = MainThreadWorkerState::Idle; - } - - // If a thread exits successfully then we drop a token associated - // with that worker and update our `running` count. We may later - // re-acquire a token to continue running more work. We may also not - // actually drop a token here if the worker was running with an - // "ephemeral token" - // - // Note that if the thread failed that means it panicked, so we - // abort immediately. - Message::Done { result: Ok(compiled_module), worker_id } => { - if main_thread_worker_state == MainThreadWorkerState::LLVMing { - main_thread_worker_state = MainThreadWorkerState::Idle; - } else { - running -= 1; - } - - free_worker_ids.push(worker_id); - - match compiled_module.kind { - ModuleKind::Regular => { - compiled_modules.push(compiled_module); - } - ModuleKind::Metadata => { - assert!(compiled_metadata_module.is_none()); - compiled_metadata_module = Some(compiled_module); - } - ModuleKind::Allocator => { - assert!(compiled_allocator_module.is_none()); - compiled_allocator_module = Some(compiled_module); - } - } - } - Message::NeedsLTO { result, worker_id } => { - assert!(!started_lto); - if main_thread_worker_state == MainThreadWorkerState::LLVMing { - main_thread_worker_state = MainThreadWorkerState::Idle; - } else { - running -= 1; - } - free_worker_ids.push(worker_id); - needs_lto.push(result); - } - Message::AddImportOnlyModule { module_data, work_product } => { - assert!(!started_lto); - assert!(!codegen_done); - assert_eq!(main_thread_worker_state, - MainThreadWorkerState::Codegenning); - lto_import_only_modules.push((module_data, work_product)); - main_thread_worker_state = MainThreadWorkerState::Idle; - } - Message::Done { result: Err(()), worker_id: _ } => { - bug!("worker thread panicked"); - } - Message::CodegenItem => { - bug!("the coordinator should not receive codegen requests") - } - } - } - - if let Some(llvm_start_time) = llvm_start_time { - let total_llvm_time = Instant::now().duration_since(llvm_start_time); - // This is the top-level timing for all of LLVM, set the time-depth - // to zero. - set_time_depth(0); - print_time_passes_entry(cgcx.time_passes, - "LLVM passes", - total_llvm_time); - } - - // Regardless of what order these modules completed in, report them to - // the backend in the same order every time to ensure that we're handing - // out deterministic results. - compiled_modules.sort_by(|a, b| a.name.cmp(&b.name)); - - let compiled_metadata_module = compiled_metadata_module - .expect("Metadata module not compiled?"); - - Ok(CompiledModules { - modules: compiled_modules, - metadata_module: compiled_metadata_module, - allocator_module: compiled_allocator_module, - }) - }); - - // A heuristic that determines if we have enough LLVM WorkItems in the - // queue so that the main thread can do LLVM work instead of codegen - fn queue_full_enough(items_in_queue: usize, - workers_running: usize, - max_workers: usize) -> bool { - // Tune me, plz. - items_in_queue > 0 && - items_in_queue >= max_workers.saturating_sub(workers_running / 2) - } - - fn maybe_start_llvm_timer(config: &ModuleConfig, - llvm_start_time: &mut Option) { - // We keep track of the -Ztime-passes output manually, - // since the closure-based interface does not fit well here. - if config.time_passes { - if llvm_start_time.is_none() { - *llvm_start_time = Some(Instant::now()); - } - } - } -} - -pub const CODEGEN_WORKER_ID: usize = ::std::usize::MAX; -pub const CODEGEN_WORKER_TIMELINE: time_graph::TimelineId = - time_graph::TimelineId(CODEGEN_WORKER_ID); -pub const CODEGEN_WORK_PACKAGE_KIND: time_graph::WorkPackageKind = - time_graph::WorkPackageKind(&["#DE9597", "#FED1D3", "#FDC5C7", "#B46668", "#88494B"]); -const LLVM_WORK_PACKAGE_KIND: time_graph::WorkPackageKind = - time_graph::WorkPackageKind(&["#7DB67A", "#C6EEC4", "#ACDAAA", "#579354", "#3E6F3C"]); - -fn spawn_work(cgcx: CodegenContext, work: WorkItem) { - let depth = time_depth(); - - thread::spawn(move || { - set_time_depth(depth); - - // Set up a destructor which will fire off a message that we're done as - // we exit. - struct Bomb { - coordinator_send: Sender>, - result: Option, - worker_id: usize, - } - impl Drop for Bomb { - fn drop(&mut self) { - let worker_id = self.worker_id; - let msg = match self.result.take() { - Some(WorkItemResult::Compiled(m)) => { - Message::Done { result: Ok(m), worker_id } - } - Some(WorkItemResult::NeedsLTO(m)) => { - Message::NeedsLTO { result: m, worker_id } - } - None => Message::Done { result: Err(()), worker_id } - }; - drop(self.coordinator_send.send(Box::new(msg))); - } - } - - let mut bomb = Bomb { - coordinator_send: cgcx.coordinator_send.clone(), - result: None, - worker_id: cgcx.worker, - }; - // Execute the work itself, and if it finishes successfully then flag - // ourselves as a success as well. - // - // Note that we ignore any `FatalError` coming out of `execute_work_item`, - // as a diagnostic was already sent off to the main thread - just - // surface that there was an error in this worker. - bomb.result = { - let timeline = cgcx.time_graph.as_ref().map(|tg| { - tg.start(time_graph::TimelineId(cgcx.worker), - LLVM_WORK_PACKAGE_KIND, - &work.name()) - }); - let mut timeline = timeline.unwrap_or(Timeline::noop()); - execute_work_item(&cgcx, work, &mut timeline).ok() - }; - }); -} - -pub fn run_assembler(cgcx: &CodegenContext, handler: &Handler, assembly: &Path, object: &Path) { - let assembler = cgcx.assembler_cmd - .as_ref() - .expect("cgcx.assembler_cmd is missing?"); - - let pname = &assembler.name; - let mut cmd = assembler.cmd.clone(); - cmd.arg("-c").arg("-o").arg(object).arg(assembly); - debug!("{:?}", cmd); - - match cmd.output() { - Ok(prog) => { - if !prog.status.success() { - let mut note = prog.stderr.clone(); - note.extend_from_slice(&prog.stdout); - - handler.struct_err(&format!("linking with `{}` failed: {}", - pname.display(), - prog.status)) - .note(&format!("{:?}", &cmd)) - .note(str::from_utf8(¬e[..]).unwrap()) - .emit(); - handler.abort_if_errors(); - } - }, - Err(e) => { - handler.err(&format!("could not exec the linker `{}`: {}", pname.display(), e)); - handler.abort_if_errors(); - } - } -} pub unsafe fn with_llvm_pmb(llmod: &llvm::Module, config: &ModuleConfig, @@ -2190,7 +703,7 @@ pub unsafe fn with_llvm_pmb(llmod: &llvm::Module, // reasonable defaults and prepare it to actually populate the pass // manager. let builder = llvm::LLVMPassManagerBuilderCreate(); - let opt_size = config.opt_size.unwrap_or(llvm::CodeGenOptSizeNone); + let opt_size = config.opt_size.map(get_llvm_opt_size).unwrap_or(llvm::CodeGenOptSizeNone); let inline_threshold = config.inline_threshold; let pgo_gen_path = config.pgo_gen.as_ref().map(|s| { @@ -2259,276 +772,16 @@ pub unsafe fn with_llvm_pmb(llmod: &llvm::Module, } -enum SharedEmitterMessage { - Diagnostic(Diagnostic), - InlineAsmError(u32, String), - AbortIfErrors, - Fatal(String), -} - -#[derive(Clone)] -pub struct SharedEmitter { - sender: Sender, -} - -pub struct SharedEmitterMain { - receiver: Receiver, -} - -impl SharedEmitter { - pub fn new() -> (SharedEmitter, SharedEmitterMain) { - let (sender, receiver) = channel(); - - (SharedEmitter { sender }, SharedEmitterMain { receiver }) - } - - fn inline_asm_error(&self, cookie: u32, msg: String) { - drop(self.sender.send(SharedEmitterMessage::InlineAsmError(cookie, msg))); - } - - fn fatal(&self, msg: &str) { - drop(self.sender.send(SharedEmitterMessage::Fatal(msg.to_string()))); - } -} - -impl Emitter for SharedEmitter { - fn emit(&mut self, db: &DiagnosticBuilder) { - drop(self.sender.send(SharedEmitterMessage::Diagnostic(Diagnostic { - msg: db.message(), - code: db.code.clone(), - lvl: db.level, - }))); - for child in &db.children { - drop(self.sender.send(SharedEmitterMessage::Diagnostic(Diagnostic { - msg: child.message(), - code: None, - lvl: child.level, - }))); - } - drop(self.sender.send(SharedEmitterMessage::AbortIfErrors)); - } -} - -impl SharedEmitterMain { - pub fn check(&self, sess: &Session, blocking: bool) { - loop { - let message = if blocking { - match self.receiver.recv() { - Ok(message) => Ok(message), - Err(_) => Err(()), - } - } else { - match self.receiver.try_recv() { - Ok(message) => Ok(message), - Err(_) => Err(()), - } - }; - - match message { - Ok(SharedEmitterMessage::Diagnostic(diag)) => { - let handler = sess.diagnostic(); - match diag.code { - Some(ref code) => { - handler.emit_with_code(&MultiSpan::new(), - &diag.msg, - code.clone(), - diag.lvl); - } - None => { - handler.emit(&MultiSpan::new(), - &diag.msg, - diag.lvl); - } - } - } - Ok(SharedEmitterMessage::InlineAsmError(cookie, msg)) => { - match Mark::from_u32(cookie).expn_info() { - Some(ei) => sess.span_err(ei.call_site, &msg), - None => sess.err(&msg), - } - } - Ok(SharedEmitterMessage::AbortIfErrors) => { - sess.abort_if_errors(); - } - Ok(SharedEmitterMessage::Fatal(msg)) => { - sess.fatal(&msg); - } - Err(_) => { - break; - } - } - - } - } -} - -pub struct OngoingCodegen { - crate_name: Symbol, - crate_hash: Svh, - metadata: EncodedMetadata, - windows_subsystem: Option, - linker_info: LinkerInfo, - crate_info: CrateInfo, - time_graph: Option, - coordinator_send: Sender>, - codegen_worker_receive: Receiver, - shared_emitter_main: SharedEmitterMain, - future: thread::JoinHandle>, - output_filenames: Arc, -} - -impl OngoingCodegen { - pub(crate) fn join( - self, - sess: &Session - ) -> (CodegenResults, FxHashMap) { - self.shared_emitter_main.check(sess, true); - let compiled_modules = match self.future.join() { - Ok(Ok(compiled_modules)) => compiled_modules, - Ok(Err(())) => { - sess.abort_if_errors(); - panic!("expected abort due to worker thread errors") - }, - Err(_) => { - bug!("panic during codegen/LLVM phase"); - } - }; - - sess.cgu_reuse_tracker.check_expected_reuse(sess); - - sess.abort_if_errors(); - - if let Some(time_graph) = self.time_graph { - time_graph.dump(&format!("{}-timings", self.crate_name)); - } - - let work_products = - copy_all_cgu_workproducts_to_incr_comp_cache_dir(sess, - &compiled_modules); - produce_final_output_artifacts(sess, - &compiled_modules, - &self.output_filenames); - - // FIXME: time_llvm_passes support - does this use a global context or - // something? - if sess.codegen_units() == 1 && sess.time_llvm_passes() { - unsafe { llvm::LLVMRustPrintPassTimings(); } - } - - (CodegenResults { - crate_name: self.crate_name, - crate_hash: self.crate_hash, - metadata: self.metadata, - windows_subsystem: self.windows_subsystem, - linker_info: self.linker_info, - crate_info: self.crate_info, - - modules: compiled_modules.modules, - allocator_module: compiled_modules.allocator_module, - metadata_module: compiled_modules.metadata_module, - }, work_products) - } - - pub(crate) fn submit_pre_codegened_module_to_llvm(&self, - tcx: TyCtxt, - module: ModuleCodegen) { - self.wait_for_signal_to_codegen_item(); - self.check_for_errors(tcx.sess); - - // These are generally cheap and won't through off scheduling. - let cost = 0; - submit_codegened_module_to_llvm(tcx, module, cost); - } - - pub fn codegen_finished(&self, tcx: TyCtxt) { - self.wait_for_signal_to_codegen_item(); - self.check_for_errors(tcx.sess); - drop(self.coordinator_send.send(Box::new(Message::CodegenComplete))); - } - - pub fn check_for_errors(&self, sess: &Session) { - self.shared_emitter_main.check(sess, false); - } - - pub fn wait_for_signal_to_codegen_item(&self) { - match self.codegen_worker_receive.recv() { - Ok(Message::CodegenItem) => { - // Nothing to do - } - Ok(_) => panic!("unexpected message"), - Err(_) => { - // One of the LLVM threads must have panicked, fall through so - // error handling can be reached. - } - } - } -} - -pub(crate) fn submit_codegened_module_to_llvm(tcx: TyCtxt, - module: ModuleCodegen, - cost: u64) { - let llvm_work_item = WorkItem::Optimize(module); - drop(tcx.tx_to_llvm_workers.lock().send(Box::new(Message::CodegenDone { - llvm_work_item, - cost, - }))); -} - -pub(crate) fn submit_post_lto_module_to_llvm(tcx: TyCtxt, - module: CachedModuleCodegen) { - let llvm_work_item = WorkItem::CopyPostLtoArtifacts(module); - drop(tcx.tx_to_llvm_workers.lock().send(Box::new(Message::CodegenDone { - llvm_work_item, - cost: 0, - }))); -} - -pub(crate) fn submit_pre_lto_module_to_llvm(tcx: TyCtxt, - module: CachedModuleCodegen) { - let filename = pre_lto_bitcode_filename(&module.name); - let bc_path = in_incr_comp_dir_sess(tcx.sess, &filename); - let file = fs::File::open(&bc_path).unwrap_or_else(|e| { - panic!("failed to open bitcode file `{}`: {}", bc_path.display(), e) - }); - - let mmap = unsafe { - memmap::Mmap::map(&file).unwrap_or_else(|e| { - panic!("failed to mmap bitcode file `{}`: {}", bc_path.display(), e) - }) - }; - - // Schedule the module to be loaded - drop(tcx.tx_to_llvm_workers.lock().send(Box::new(Message::AddImportOnlyModule { - module_data: SerializedModule::FromUncompressedFile(mmap), - work_product: module.source, - }))); -} - -pub(super) fn pre_lto_bitcode_filename(module_name: &str) -> String { - format!("{}.{}", module_name, PRE_THIN_LTO_BC_EXT) -} - -fn msvc_imps_needed(tcx: TyCtxt) -> bool { - // This should never be true (because it's not supported). If it is true, - // something is wrong with commandline arg validation. - assert!(!(tcx.sess.opts.debugging_opts.cross_lang_lto.enabled() && - tcx.sess.target.target.options.is_like_msvc && - tcx.sess.opts.cg.prefer_dynamic)); - - tcx.sess.target.target.options.is_like_msvc && - tcx.sess.crate_types.borrow().iter().any(|ct| *ct == config::CrateType::Rlib) && - // ThinLTO can't handle this workaround in all cases, so we don't - // emit the `__imp_` symbols. Instead we make them unnecessary by disallowing - // dynamic linking when cross-language LTO is enabled. - !tcx.sess.opts.debugging_opts.cross_lang_lto.enabled() -} - // Create a `__imp_ = &symbol` global for every public static `symbol`. // This is required to satisfy `dllimport` references to static data in .rlibs // when using MSVC linker. We do this only for data, as linker can fix up // code references on its own. // See #26591, #27438 -fn create_msvc_imps(cgcx: &CodegenContext, llcx: &llvm::Context, llmod: &llvm::Module) { +fn create_msvc_imps( + cgcx: &CodegenContext, + llcx: &llvm::Context, + llmod: &llvm::Module +) { if !cgcx.msvc_imps_needed { return } diff --git a/src/librustc_codegen_llvm/base.rs b/src/librustc_codegen_llvm/base.rs index 614a562846e86..eeab1ab51350f 100644 --- a/src/librustc_codegen_llvm/base.rs +++ b/src/librustc_codegen_llvm/base.rs @@ -24,589 +24,40 @@ //! int) and rec(x=int, y=int, z=int) will have the same llvm::Type. use super::ModuleLlvm; -use super::ModuleCodegen; -use super::ModuleKind; -use super::CachedModuleCodegen; +use rustc_codegen_ssa::{ModuleCodegen, ModuleKind}; +use rustc_codegen_ssa::base::maybe_create_entry_wrapper; +use super::LlvmCodegenBackend; -use abi; -use back::write::{self, OngoingCodegen}; -use llvm::{self, TypeKind, get_param}; +use llvm; use metadata; -use rustc::dep_graph::cgu_reuse_tracker::CguReuse; -use rustc::hir::def_id::{CrateNum, DefId, LOCAL_CRATE}; -use rustc::middle::lang_items::StartFnLangItem; -use rustc::middle::weak_lang_items; -use rustc::mir::mono::{Linkage, Visibility, Stats, CodegenUnitNameBuilder}; +use rustc::mir::mono::{Linkage, Visibility, Stats}; use rustc::middle::cstore::{EncodedMetadata}; -use rustc::ty::{self, Ty, TyCtxt}; -use rustc::ty::layout::{self, Align, TyLayout, LayoutOf}; -use rustc::ty::query::Providers; -use rustc::middle::cstore::{self, LinkagePreference}; +use rustc::ty::TyCtxt; use rustc::middle::exported_symbols; -use rustc::util::common::{time, print_time_passes_entry}; -use rustc::util::profiling::ProfileCategory; -use rustc::session::config::{self, DebugInfo, EntryFnType, Lto}; -use rustc::session::Session; -use rustc_incremental; -use allocator; -use mir::place::PlaceRef; -use attributes; -use builder::{Builder, MemFlags}; -use callee; -use common::{C_bool, C_bytes_in_context, C_i32, C_usize}; -use rustc_mir::monomorphize::collector::{self, MonoItemCollectionMode}; -use rustc_mir::monomorphize::item::DefPathBasedNames; -use common::{self, C_struct_in_context, C_array, val_ty}; -use consts; +use rustc::session::config::{self, DebugInfo}; +use builder::Builder; +use common; use context::CodegenCx; -use debuginfo; -use declare; -use meth; -use mir; -use monomorphize::Instance; -use monomorphize::partitioning::{self, PartitioningStrategy, CodegenUnit, CodegenUnitExt}; -use rustc_codegen_utils::symbol_names_test; -use time_graph; -use mono_item::{MonoItem, BaseMonoItemExt, MonoItemExt}; -use type_::Type; -use type_of::LayoutLlvmExt; -use rustc::util::nodemap::{FxHashMap, DefIdSet}; -use CrateInfo; +use monomorphize::partitioning::CodegenUnitExt; +use rustc_codegen_ssa::mono_item::MonoItemExt; use rustc_data_structures::small_c_str::SmallCStr; -use rustc_data_structures::sync::Lrc; -use std::any::Any; +use rustc_codegen_ssa::interfaces::*; +use rustc_codegen_ssa::back::write::submit_codegened_module_to_llvm; + use std::ffi::CString; -use std::sync::Arc; -use std::time::{Instant, Duration}; -use std::i32; -use std::cmp; -use std::sync::mpsc; -use syntax_pos::Span; +use std::time::Instant; use syntax_pos::symbol::InternedString; -use syntax::attr; -use rustc::hir::{self, CodegenFnAttrs}; +use rustc::hir::CodegenFnAttrs; use value::Value; -use mir::operand::OperandValue; - -use rustc_codegen_utils::check_for_rustc_errors_attr; - -pub struct StatRecorder<'a, 'll: 'a, 'tcx: 'll> { - cx: &'a CodegenCx<'ll, 'tcx>, - name: Option, - istart: usize, -} - -impl StatRecorder<'a, 'll, 'tcx> { - pub fn new(cx: &'a CodegenCx<'ll, 'tcx>, name: String) -> Self { - let istart = cx.stats.borrow().n_llvm_insns; - StatRecorder { - cx, - name: Some(name), - istart, - } - } -} - -impl Drop for StatRecorder<'a, 'll, 'tcx> { - fn drop(&mut self) { - if self.cx.sess().codegen_stats() { - let mut stats = self.cx.stats.borrow_mut(); - let iend = stats.n_llvm_insns; - stats.fn_stats.push((self.name.take().unwrap(), iend - self.istart)); - stats.n_fns += 1; - // Reset LLVM insn count to avoid compound costs. - stats.n_llvm_insns = self.istart; - } - } -} -pub fn bin_op_to_icmp_predicate(op: hir::BinOpKind, - signed: bool) - -> llvm::IntPredicate { - match op { - hir::BinOpKind::Eq => llvm::IntEQ, - hir::BinOpKind::Ne => llvm::IntNE, - hir::BinOpKind::Lt => if signed { llvm::IntSLT } else { llvm::IntULT }, - hir::BinOpKind::Le => if signed { llvm::IntSLE } else { llvm::IntULE }, - hir::BinOpKind::Gt => if signed { llvm::IntSGT } else { llvm::IntUGT }, - hir::BinOpKind::Ge => if signed { llvm::IntSGE } else { llvm::IntUGE }, - op => { - bug!("comparison_op_to_icmp_predicate: expected comparison operator, \ - found {:?}", - op) - } - } -} -pub fn bin_op_to_fcmp_predicate(op: hir::BinOpKind) -> llvm::RealPredicate { - match op { - hir::BinOpKind::Eq => llvm::RealOEQ, - hir::BinOpKind::Ne => llvm::RealUNE, - hir::BinOpKind::Lt => llvm::RealOLT, - hir::BinOpKind::Le => llvm::RealOLE, - hir::BinOpKind::Gt => llvm::RealOGT, - hir::BinOpKind::Ge => llvm::RealOGE, - op => { - bug!("comparison_op_to_fcmp_predicate: expected comparison operator, \ - found {:?}", - op); - } - } -} - -pub fn compare_simd_types( - bx: &Builder<'a, 'll, 'tcx>, - lhs: &'ll Value, - rhs: &'ll Value, - t: Ty<'tcx>, - ret_ty: &'ll Type, - op: hir::BinOpKind -) -> &'ll Value { - let signed = match t.sty { - ty::Float(_) => { - let cmp = bin_op_to_fcmp_predicate(op); - return bx.sext(bx.fcmp(cmp, lhs, rhs), ret_ty); - }, - ty::Uint(_) => false, - ty::Int(_) => true, - _ => bug!("compare_simd_types: invalid SIMD type"), - }; - - let cmp = bin_op_to_icmp_predicate(op, signed); - // LLVM outputs an `< size x i1 >`, so we need to perform a sign extension - // to get the correctly sized type. This will compile to a single instruction - // once the IR is converted to assembly if the SIMD instruction is supported - // by the target architecture. - bx.sext(bx.icmp(cmp, lhs, rhs), ret_ty) -} - -/// Retrieve the information we are losing (making dynamic) in an unsizing -/// adjustment. -/// -/// The `old_info` argument is a bit funny. It is intended for use -/// in an upcast, where the new vtable for an object will be derived -/// from the old one. -pub fn unsized_info( - cx: &CodegenCx<'ll, 'tcx>, - source: Ty<'tcx>, - target: Ty<'tcx>, - old_info: Option<&'ll Value>, -) -> &'ll Value { - let (source, target) = cx.tcx.struct_lockstep_tails(source, target); - match (&source.sty, &target.sty) { - (&ty::Array(_, len), &ty::Slice(_)) => { - C_usize(cx, len.unwrap_usize(cx.tcx)) - } - (&ty::Dynamic(..), &ty::Dynamic(..)) => { - // For now, upcasts are limited to changes in marker - // traits, and hence never actually require an actual - // change to the vtable. - old_info.expect("unsized_info: missing old info for trait upcast") - } - (_, &ty::Dynamic(ref data, ..)) => { - let vtable_ptr = cx.layout_of(cx.tcx.mk_mut_ptr(target)) - .field(cx, abi::FAT_PTR_EXTRA); - consts::ptrcast(meth::get_vtable(cx, source, data.principal()), - vtable_ptr.llvm_type(cx)) - } - _ => bug!("unsized_info: invalid unsizing {:?} -> {:?}", - source, - target), - } -} - -/// Coerce `src` to `dst_ty`. `src_ty` must be a thin pointer. -pub fn unsize_thin_ptr( - bx: &Builder<'a, 'll, 'tcx>, - src: &'ll Value, - src_ty: Ty<'tcx>, - dst_ty: Ty<'tcx> -) -> (&'ll Value, &'ll Value) { - debug!("unsize_thin_ptr: {:?} => {:?}", src_ty, dst_ty); - match (&src_ty.sty, &dst_ty.sty) { - (&ty::Ref(_, a, _), - &ty::Ref(_, b, _)) | - (&ty::Ref(_, a, _), - &ty::RawPtr(ty::TypeAndMut { ty: b, .. })) | - (&ty::RawPtr(ty::TypeAndMut { ty: a, .. }), - &ty::RawPtr(ty::TypeAndMut { ty: b, .. })) => { - assert!(bx.cx.type_is_sized(a)); - let ptr_ty = bx.cx.layout_of(b).llvm_type(bx.cx).ptr_to(); - (bx.pointercast(src, ptr_ty), unsized_info(bx.cx, a, b, None)) - } - (&ty::Adt(def_a, _), &ty::Adt(def_b, _)) if def_a.is_box() && def_b.is_box() => { - let (a, b) = (src_ty.boxed_ty(), dst_ty.boxed_ty()); - assert!(bx.cx.type_is_sized(a)); - let ptr_ty = bx.cx.layout_of(b).llvm_type(bx.cx).ptr_to(); - (bx.pointercast(src, ptr_ty), unsized_info(bx.cx, a, b, None)) - } - (&ty::Adt(def_a, _), &ty::Adt(def_b, _)) => { - assert_eq!(def_a, def_b); - - let src_layout = bx.cx.layout_of(src_ty); - let dst_layout = bx.cx.layout_of(dst_ty); - let mut result = None; - for i in 0..src_layout.fields.count() { - let src_f = src_layout.field(bx.cx, i); - assert_eq!(src_layout.fields.offset(i).bytes(), 0); - assert_eq!(dst_layout.fields.offset(i).bytes(), 0); - if src_f.is_zst() { - continue; - } - assert_eq!(src_layout.size, src_f.size); - - let dst_f = dst_layout.field(bx.cx, i); - assert_ne!(src_f.ty, dst_f.ty); - assert_eq!(result, None); - result = Some(unsize_thin_ptr(bx, src, src_f.ty, dst_f.ty)); - } - let (lldata, llextra) = result.unwrap(); - // HACK(eddyb) have to bitcast pointers until LLVM removes pointee types. - (bx.bitcast(lldata, dst_layout.scalar_pair_element_llvm_type(bx.cx, 0, true)), - bx.bitcast(llextra, dst_layout.scalar_pair_element_llvm_type(bx.cx, 1, true))) - } - _ => bug!("unsize_thin_ptr: called on bad types"), - } -} - -/// Coerce `src`, which is a reference to a value of type `src_ty`, -/// to a value of type `dst_ty` and store the result in `dst` -pub fn coerce_unsized_into( - bx: &Builder<'a, 'll, 'tcx>, - src: PlaceRef<'ll, 'tcx>, - dst: PlaceRef<'ll, 'tcx> -) { - let src_ty = src.layout.ty; - let dst_ty = dst.layout.ty; - let coerce_ptr = || { - let (base, info) = match src.load(bx).val { - OperandValue::Pair(base, info) => { - // fat-ptr to fat-ptr unsize preserves the vtable - // i.e. &'a fmt::Debug+Send => &'a fmt::Debug - // So we need to pointercast the base to ensure - // the types match up. - let thin_ptr = dst.layout.field(bx.cx, abi::FAT_PTR_ADDR); - (bx.pointercast(base, thin_ptr.llvm_type(bx.cx)), info) - } - OperandValue::Immediate(base) => { - unsize_thin_ptr(bx, base, src_ty, dst_ty) - } - OperandValue::Ref(..) => bug!() - }; - OperandValue::Pair(base, info).store(bx, dst); - }; - match (&src_ty.sty, &dst_ty.sty) { - (&ty::Ref(..), &ty::Ref(..)) | - (&ty::Ref(..), &ty::RawPtr(..)) | - (&ty::RawPtr(..), &ty::RawPtr(..)) => { - coerce_ptr() - } - (&ty::Adt(def_a, _), &ty::Adt(def_b, _)) if def_a.is_box() && def_b.is_box() => { - coerce_ptr() - } - - (&ty::Adt(def_a, _), &ty::Adt(def_b, _)) => { - assert_eq!(def_a, def_b); - - for i in 0..def_a.variants[0].fields.len() { - let src_f = src.project_field(bx, i); - let dst_f = dst.project_field(bx, i); - - if dst_f.layout.is_zst() { - continue; - } - - if src_f.layout.ty == dst_f.layout.ty { - memcpy_ty(bx, dst_f.llval, src_f.llval, src_f.layout, - src_f.align.min(dst_f.align), MemFlags::empty()); - } else { - coerce_unsized_into(bx, src_f, dst_f); - } - } - } - _ => bug!("coerce_unsized_into: invalid coercion {:?} -> {:?}", - src_ty, - dst_ty), - } -} - -pub fn cast_shift_expr_rhs( - cx: &Builder<'_, 'll, '_>, op: hir::BinOpKind, lhs: &'ll Value, rhs: &'ll Value -) -> &'ll Value { - cast_shift_rhs(op, lhs, rhs, |a, b| cx.trunc(a, b), |a, b| cx.zext(a, b)) -} - -fn cast_shift_rhs<'ll, F, G>(op: hir::BinOpKind, - lhs: &'ll Value, - rhs: &'ll Value, - trunc: F, - zext: G) - -> &'ll Value - where F: FnOnce(&'ll Value, &'ll Type) -> &'ll Value, - G: FnOnce(&'ll Value, &'ll Type) -> &'ll Value -{ - // Shifts may have any size int on the rhs - if op.is_shift() { - let mut rhs_llty = val_ty(rhs); - let mut lhs_llty = val_ty(lhs); - if rhs_llty.kind() == TypeKind::Vector { - rhs_llty = rhs_llty.element_type() - } - if lhs_llty.kind() == TypeKind::Vector { - lhs_llty = lhs_llty.element_type() - } - let rhs_sz = rhs_llty.int_width(); - let lhs_sz = lhs_llty.int_width(); - if lhs_sz < rhs_sz { - trunc(rhs, lhs_llty) - } else if lhs_sz > rhs_sz { - // FIXME (#1877: If in the future shifting by negative - // values is no longer undefined then this is wrong. - zext(rhs, lhs_llty) - } else { - rhs - } - } else { - rhs - } -} - -/// Returns whether this session's target will use SEH-based unwinding. -/// -/// This is only true for MSVC targets, and even then the 64-bit MSVC target -/// currently uses SEH-ish unwinding with DWARF info tables to the side (same as -/// 64-bit MinGW) instead of "full SEH". -pub fn wants_msvc_seh(sess: &Session) -> bool { - sess.target.target.options.is_like_msvc -} - -pub fn call_assume(bx: &Builder<'_, 'll, '_>, val: &'ll Value) { - let assume_intrinsic = bx.cx.get_intrinsic("llvm.assume"); - bx.call(assume_intrinsic, &[val], None); -} - -pub fn from_immediate(bx: &Builder<'_, 'll, '_>, val: &'ll Value) -> &'ll Value { - if val_ty(val) == Type::i1(bx.cx) { - bx.zext(val, Type::i8(bx.cx)) - } else { - val - } -} - -pub fn to_immediate( - bx: &Builder<'_, 'll, '_>, - val: &'ll Value, - layout: layout::TyLayout, -) -> &'ll Value { - if let layout::Abi::Scalar(ref scalar) = layout.abi { - return to_immediate_scalar(bx, val, scalar); - } - val -} - -pub fn to_immediate_scalar( - bx: &Builder<'_, 'll, '_>, - val: &'ll Value, - scalar: &layout::Scalar, -) -> &'ll Value { - if scalar.is_bool() { - return bx.trunc(val, Type::i1(bx.cx)); - } - val -} - -pub fn call_memcpy( - bx: &Builder<'_, 'll, '_>, - dst: &'ll Value, - src: &'ll Value, - n_bytes: &'ll Value, - align: Align, - flags: MemFlags, -) { - if flags.contains(MemFlags::NONTEMPORAL) { - // HACK(nox): This is inefficient but there is no nontemporal memcpy. - let val = bx.load(src, align); - let ptr = bx.pointercast(dst, val_ty(val).ptr_to()); - bx.store_with_flags(val, ptr, align, flags); - return; - } - let cx = bx.cx; - let ptr_width = &cx.sess().target.target.target_pointer_width; - let key = format!("llvm.memcpy.p0i8.p0i8.i{}", ptr_width); - let memcpy = cx.get_intrinsic(&key); - let src_ptr = bx.pointercast(src, Type::i8p(cx)); - let dst_ptr = bx.pointercast(dst, Type::i8p(cx)); - let size = bx.intcast(n_bytes, cx.isize_ty, false); - let align = C_i32(cx, align.abi() as i32); - let volatile = C_bool(cx, flags.contains(MemFlags::VOLATILE)); - bx.call(memcpy, &[dst_ptr, src_ptr, size, align, volatile], None); -} - -pub fn memcpy_ty( - bx: &Builder<'_, 'll, 'tcx>, - dst: &'ll Value, - src: &'ll Value, - layout: TyLayout<'tcx>, - align: Align, - flags: MemFlags, -) { - let size = layout.size.bytes(); - if size == 0 { - return; - } - - call_memcpy(bx, dst, src, C_usize(bx.cx, size), align, flags); -} - -pub fn call_memset( - bx: &Builder<'_, 'll, '_>, - ptr: &'ll Value, - fill_byte: &'ll Value, - size: &'ll Value, - align: &'ll Value, - volatile: bool, -) -> &'ll Value { - let ptr_width = &bx.cx.sess().target.target.target_pointer_width; - let intrinsic_key = format!("llvm.memset.p0i8.i{}", ptr_width); - let llintrinsicfn = bx.cx.get_intrinsic(&intrinsic_key); - let volatile = C_bool(bx.cx, volatile); - bx.call(llintrinsicfn, &[ptr, fill_byte, size, align, volatile], None) -} - -pub fn codegen_instance<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, instance: Instance<'tcx>) { - let _s = if cx.sess().codegen_stats() { - let mut instance_name = String::new(); - DefPathBasedNames::new(cx.tcx, true, true) - .push_def_path(instance.def_id(), &mut instance_name); - Some(StatRecorder::new(cx, instance_name)) - } else { - None - }; - - // this is an info! to allow collecting monomorphization statistics - // and to allow finding the last function before LLVM aborts from - // release builds. - info!("codegen_instance({})", instance); - - let fn_ty = instance.ty(cx.tcx); - let sig = common::ty_fn_sig(cx, fn_ty); - let sig = cx.tcx.normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), &sig); - - let lldecl = cx.instances.borrow().get(&instance).cloned().unwrap_or_else(|| - bug!("Instance `{:?}` not already declared", instance)); - - cx.stats.borrow_mut().n_closures += 1; - - let mir = cx.tcx.instance_mir(instance.def); - mir::codegen_mir(cx, lldecl, &mir, instance, sig); -} - -pub fn set_link_section(llval: &Value, attrs: &CodegenFnAttrs) { - let sect = match attrs.link_section { - Some(name) => name, - None => return, - }; - unsafe { - let buf = SmallCStr::new(§.as_str()); - llvm::LLVMSetSection(llval, buf.as_ptr()); - } -} - -/// Create the `main` function which will initialize the rust runtime and call -/// users main function. -fn maybe_create_entry_wrapper(cx: &CodegenCx) { - let (main_def_id, span) = match *cx.sess().entry_fn.borrow() { - Some((id, span, _)) => { - (cx.tcx.hir.local_def_id(id), span) - } - None => return, - }; - - let instance = Instance::mono(cx.tcx, main_def_id); - - if !cx.codegen_unit.contains_item(&MonoItem::Fn(instance)) { - // We want to create the wrapper in the same codegen unit as Rust's main - // function. - return; - } - - let main_llfn = callee::get_fn(cx, instance); - - let et = cx.sess().entry_fn.get().map(|e| e.2); - match et { - Some(EntryFnType::Main) => create_entry_fn(cx, span, main_llfn, main_def_id, true), - Some(EntryFnType::Start) => create_entry_fn(cx, span, main_llfn, main_def_id, false), - None => {} // Do nothing. - } - - fn create_entry_fn( - cx: &CodegenCx<'ll, '_>, - sp: Span, - rust_main: &'ll Value, - rust_main_def_id: DefId, - use_start_lang_item: bool, - ) { - let llfty = Type::func(&[Type::c_int(cx), Type::i8p(cx).ptr_to()], Type::c_int(cx)); - - let main_ret_ty = cx.tcx.fn_sig(rust_main_def_id).output(); - // Given that `main()` has no arguments, - // then its return type cannot have - // late-bound regions, since late-bound - // regions must appear in the argument - // listing. - let main_ret_ty = cx.tcx.erase_regions( - &main_ret_ty.no_late_bound_regions().unwrap(), - ); - - if declare::get_defined_value(cx, "main").is_some() { - // FIXME: We should be smart and show a better diagnostic here. - cx.sess().struct_span_err(sp, "entry symbol `main` defined multiple times") - .help("did you use #[no_mangle] on `fn main`? Use #[start] instead") - .emit(); - cx.sess().abort_if_errors(); - bug!(); - } - let llfn = declare::declare_cfn(cx, "main", llfty); - - // `main` should respect same config for frame pointer elimination as rest of code - attributes::set_frame_pointer_elimination(cx, llfn); - attributes::apply_target_cpu_attr(cx, llfn); - - let bx = Builder::new_block(cx, llfn, "top"); - - debuginfo::gdb::insert_reference_to_gdb_debug_scripts_section_global(&bx); - - // Params from native main() used as args for rust start function - let param_argc = get_param(llfn, 0); - let param_argv = get_param(llfn, 1); - let arg_argc = bx.intcast(param_argc, cx.isize_ty, true); - let arg_argv = param_argv; - - let (start_fn, args) = if use_start_lang_item { - let start_def_id = cx.tcx.require_lang_item(StartFnLangItem); - let start_fn = callee::resolve_and_get_fn( - cx, - start_def_id, - cx.tcx.intern_substs(&[main_ret_ty.into()]), - ); - (start_fn, vec![bx.pointercast(rust_main, Type::i8p(cx).ptr_to()), - arg_argc, arg_argv]) - } else { - debug!("using user-defined start fn"); - (rust_main, vec![arg_argc, arg_argv]) - }; - - let result = bx.call(start_fn, &args, None); - bx.ret(bx.intcast(result, Type::c_int(cx), true)); - } -} - -fn write_metadata<'a, 'gcx>(tcx: TyCtxt<'a, 'gcx, 'gcx>, - llvm_module: &ModuleLlvm) - -> EncodedMetadata { +pub fn write_metadata<'a, 'gcx>( + tcx: TyCtxt<'a, 'gcx, 'gcx>, + llvm_module: &ModuleLlvm +) -> EncodedMetadata { use std::io::Write; use flate2::Compression; use flate2::write::DeflateEncoder; @@ -647,12 +98,12 @@ fn write_metadata<'a, 'gcx>(tcx: TyCtxt<'a, 'gcx, 'gcx>, DeflateEncoder::new(&mut compressed, Compression::fast()) .write_all(&metadata.raw_data).unwrap(); - let llmeta = C_bytes_in_context(metadata_llcx, &compressed); - let llconst = C_struct_in_context(metadata_llcx, &[llmeta], false); + let llmeta = common::bytes_in_context(metadata_llcx, &compressed); + let llconst = common::struct_in_context(metadata_llcx, &[llmeta], false); let name = exported_symbols::metadata_symbol_name(tcx); let buf = CString::new(name).unwrap(); let llglobal = unsafe { - llvm::LLVMAddGlobal(metadata_llmod, val_ty(llconst), buf.as_ptr()) + llvm::LLVMAddGlobal(metadata_llmod, common::val_ty(llconst), buf.as_ptr()) }; unsafe { llvm::LLVMSetInitializer(llglobal, llconst); @@ -696,491 +147,7 @@ pub fn iter_globals(llmod: &'ll llvm::Module) -> ValueIter<'ll> { } } -fn determine_cgu_reuse<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, - cgu: &CodegenUnit<'tcx>) - -> CguReuse { - if !tcx.dep_graph.is_fully_enabled() { - return CguReuse::No - } - - let work_product_id = &cgu.work_product_id(); - if tcx.dep_graph.previous_work_product(work_product_id).is_none() { - // We don't have anything cached for this CGU. This can happen - // if the CGU did not exist in the previous session. - return CguReuse::No - } - - // Try to mark the CGU as green. If it we can do so, it means that nothing - // affecting the LLVM module has changed and we can re-use a cached version. - // If we compile with any kind of LTO, this means we can re-use the bitcode - // of the Pre-LTO stage (possibly also the Post-LTO version but we'll only - // know that later). If we are not doing LTO, there is only one optimized - // version of each module, so we re-use that. - let dep_node = cgu.codegen_dep_node(tcx); - assert!(!tcx.dep_graph.dep_node_exists(&dep_node), - "CompileCodegenUnit dep-node for CGU `{}` already exists before marking.", - cgu.name()); - - if tcx.dep_graph.try_mark_green(tcx, &dep_node).is_some() { - // We can re-use either the pre- or the post-thinlto state - if tcx.sess.lto() != Lto::No { - CguReuse::PreLto - } else { - CguReuse::PostLto - } - } else { - CguReuse::No - } -} - -pub fn codegen_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, - rx: mpsc::Receiver>) - -> OngoingCodegen -{ - check_for_rustc_errors_attr(tcx); - - if let Some(true) = tcx.sess.opts.debugging_opts.thinlto { - if unsafe { !llvm::LLVMRustThinLTOAvailable() } { - tcx.sess.fatal("this compiler's LLVM does not support ThinLTO"); - } - } - - if (tcx.sess.opts.debugging_opts.pgo_gen.is_some() || - !tcx.sess.opts.debugging_opts.pgo_use.is_empty()) && - unsafe { !llvm::LLVMRustPGOAvailable() } - { - tcx.sess.fatal("this compiler's LLVM does not support PGO"); - } - - let cgu_name_builder = &mut CodegenUnitNameBuilder::new(tcx); - - // Codegen the metadata. - tcx.sess.profiler(|p| p.start_activity(ProfileCategory::Codegen)); - - let metadata_cgu_name = cgu_name_builder.build_cgu_name(LOCAL_CRATE, - &["crate"], - Some("metadata")).as_str() - .to_string(); - let metadata_llvm_module = ModuleLlvm::new(tcx.sess, &metadata_cgu_name); - let metadata = time(tcx.sess, "write metadata", || { - write_metadata(tcx, &metadata_llvm_module) - }); - tcx.sess.profiler(|p| p.end_activity(ProfileCategory::Codegen)); - - let metadata_module = ModuleCodegen { - name: metadata_cgu_name, - module_llvm: metadata_llvm_module, - kind: ModuleKind::Metadata, - }; - - let time_graph = if tcx.sess.opts.debugging_opts.codegen_time_graph { - Some(time_graph::TimeGraph::new()) - } else { - None - }; - - // Skip crate items and just output metadata in -Z no-codegen mode. - if tcx.sess.opts.debugging_opts.no_codegen || - !tcx.sess.opts.output_types.should_codegen() { - let ongoing_codegen = write::start_async_codegen( - tcx, - time_graph.clone(), - metadata, - rx, - 1); - - ongoing_codegen.submit_pre_codegened_module_to_llvm(tcx, metadata_module); - ongoing_codegen.codegen_finished(tcx); - - assert_and_save_dep_graph(tcx); - - ongoing_codegen.check_for_errors(tcx.sess); - - return ongoing_codegen; - } - - // Run the monomorphization collector and partition the collected items into - // codegen units. - let codegen_units = tcx.collect_and_partition_mono_items(LOCAL_CRATE).1; - let codegen_units = (*codegen_units).clone(); - - // Force all codegen_unit queries so they are already either red or green - // when compile_codegen_unit accesses them. We are not able to re-execute - // the codegen_unit query from just the DepNode, so an unknown color would - // lead to having to re-execute compile_codegen_unit, possibly - // unnecessarily. - if tcx.dep_graph.is_fully_enabled() { - for cgu in &codegen_units { - tcx.codegen_unit(cgu.name().clone()); - } - } - - let ongoing_codegen = write::start_async_codegen( - tcx, - time_graph.clone(), - metadata, - rx, - codegen_units.len()); - - // Codegen an allocator shim, if necessary. - // - // If the crate doesn't have an `allocator_kind` set then there's definitely - // no shim to generate. Otherwise we also check our dependency graph for all - // our output crate types. If anything there looks like its a `Dynamic` - // linkage, then it's already got an allocator shim and we'll be using that - // one instead. If nothing exists then it's our job to generate the - // allocator! - let any_dynamic_crate = tcx.sess.dependency_formats.borrow() - .iter() - .any(|(_, list)| { - use rustc::middle::dependency_format::Linkage; - list.iter().any(|&linkage| linkage == Linkage::Dynamic) - }); - let allocator_module = if any_dynamic_crate { - None - } else if let Some(kind) = *tcx.sess.allocator_kind.get() { - let llmod_id = cgu_name_builder.build_cgu_name(LOCAL_CRATE, - &["crate"], - Some("allocator")).as_str() - .to_string(); - let modules = ModuleLlvm::new(tcx.sess, &llmod_id); - time(tcx.sess, "write allocator module", || { - unsafe { - allocator::codegen(tcx, &modules, kind) - } - }); - - Some(ModuleCodegen { - name: llmod_id, - module_llvm: modules, - kind: ModuleKind::Allocator, - }) - } else { - None - }; - - if let Some(allocator_module) = allocator_module { - ongoing_codegen.submit_pre_codegened_module_to_llvm(tcx, allocator_module); - } - - ongoing_codegen.submit_pre_codegened_module_to_llvm(tcx, metadata_module); - - // We sort the codegen units by size. This way we can schedule work for LLVM - // a bit more efficiently. - let codegen_units = { - let mut codegen_units = codegen_units; - codegen_units.sort_by_cached_key(|cgu| cmp::Reverse(cgu.size_estimate())); - codegen_units - }; - - let mut total_codegen_time = Duration::new(0, 0); - let mut all_stats = Stats::default(); - - for cgu in codegen_units.into_iter() { - ongoing_codegen.wait_for_signal_to_codegen_item(); - ongoing_codegen.check_for_errors(tcx.sess); - - let cgu_reuse = determine_cgu_reuse(tcx, &cgu); - tcx.sess.cgu_reuse_tracker.set_actual_reuse(&cgu.name().as_str(), cgu_reuse); - - match cgu_reuse { - CguReuse::No => { - let _timing_guard = time_graph.as_ref().map(|time_graph| { - time_graph.start(write::CODEGEN_WORKER_TIMELINE, - write::CODEGEN_WORK_PACKAGE_KIND, - &format!("codegen {}", cgu.name())) - }); - let start_time = Instant::now(); - let stats = compile_codegen_unit(tcx, *cgu.name()); - all_stats.extend(stats); - total_codegen_time += start_time.elapsed(); - false - } - CguReuse::PreLto => { - write::submit_pre_lto_module_to_llvm(tcx, CachedModuleCodegen { - name: cgu.name().to_string(), - source: cgu.work_product(tcx), - }); - true - } - CguReuse::PostLto => { - write::submit_post_lto_module_to_llvm(tcx, CachedModuleCodegen { - name: cgu.name().to_string(), - source: cgu.work_product(tcx), - }); - true - } - }; - } - - ongoing_codegen.codegen_finished(tcx); - - // Since the main thread is sometimes blocked during codegen, we keep track - // -Ztime-passes output manually. - print_time_passes_entry(tcx.sess.time_passes(), - "codegen to LLVM IR", - total_codegen_time); - - rustc_incremental::assert_module_sources::assert_module_sources(tcx); - - symbol_names_test::report_symbol_names(tcx); - - if tcx.sess.codegen_stats() { - println!("--- codegen stats ---"); - println!("n_glues_created: {}", all_stats.n_glues_created); - println!("n_null_glues: {}", all_stats.n_null_glues); - println!("n_real_glues: {}", all_stats.n_real_glues); - - println!("n_fns: {}", all_stats.n_fns); - println!("n_inlines: {}", all_stats.n_inlines); - println!("n_closures: {}", all_stats.n_closures); - println!("fn stats:"); - all_stats.fn_stats.sort_by_key(|&(_, insns)| insns); - for &(ref name, insns) in all_stats.fn_stats.iter() { - println!("{} insns, {}", insns, *name); - } - } - - if tcx.sess.count_llvm_insns() { - for (k, v) in all_stats.llvm_insns.iter() { - println!("{:7} {}", *v, *k); - } - } - - ongoing_codegen.check_for_errors(tcx.sess); - - assert_and_save_dep_graph(tcx); - ongoing_codegen -} - -fn assert_and_save_dep_graph<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>) { - time(tcx.sess, - "assert dep graph", - || rustc_incremental::assert_dep_graph(tcx)); - - time(tcx.sess, - "serialize dep graph", - || rustc_incremental::save_dep_graph(tcx)); -} - -fn collect_and_partition_mono_items<'a, 'tcx>( - tcx: TyCtxt<'a, 'tcx, 'tcx>, - cnum: CrateNum, -) -> (Arc, Arc>>>) -{ - assert_eq!(cnum, LOCAL_CRATE); - - let collection_mode = match tcx.sess.opts.debugging_opts.print_mono_items { - Some(ref s) => { - let mode_string = s.to_lowercase(); - let mode_string = mode_string.trim(); - if mode_string == "eager" { - MonoItemCollectionMode::Eager - } else { - if mode_string != "lazy" { - let message = format!("Unknown codegen-item collection mode '{}'. \ - Falling back to 'lazy' mode.", - mode_string); - tcx.sess.warn(&message); - } - - MonoItemCollectionMode::Lazy - } - } - None => { - if tcx.sess.opts.cg.link_dead_code { - MonoItemCollectionMode::Eager - } else { - MonoItemCollectionMode::Lazy - } - } - }; - - let (items, inlining_map) = - time(tcx.sess, "monomorphization collection", || { - collector::collect_crate_mono_items(tcx, collection_mode) - }); - - tcx.sess.abort_if_errors(); - - ::rustc_mir::monomorphize::assert_symbols_are_distinct(tcx, items.iter()); - - let strategy = if tcx.sess.opts.incremental.is_some() { - PartitioningStrategy::PerModule - } else { - PartitioningStrategy::FixedUnitCount(tcx.sess.codegen_units()) - }; - - let codegen_units = time(tcx.sess, "codegen unit partitioning", || { - partitioning::partition(tcx, - items.iter().cloned(), - strategy, - &inlining_map) - .into_iter() - .map(Arc::new) - .collect::>() - }); - - let mono_items: DefIdSet = items.iter().filter_map(|mono_item| { - match *mono_item { - MonoItem::Fn(ref instance) => Some(instance.def_id()), - MonoItem::Static(def_id) => Some(def_id), - _ => None, - } - }).collect(); - - if tcx.sess.opts.debugging_opts.print_mono_items.is_some() { - let mut item_to_cgus: FxHashMap<_, Vec<_>> = Default::default(); - - for cgu in &codegen_units { - for (&mono_item, &linkage) in cgu.items() { - item_to_cgus.entry(mono_item) - .or_default() - .push((cgu.name().clone(), linkage)); - } - } - - let mut item_keys: Vec<_> = items - .iter() - .map(|i| { - let mut output = i.to_string(tcx); - output.push_str(" @@"); - let mut empty = Vec::new(); - let cgus = item_to_cgus.get_mut(i).unwrap_or(&mut empty); - cgus.as_mut_slice().sort_by_key(|&(ref name, _)| name.clone()); - cgus.dedup(); - for &(ref cgu_name, (linkage, _)) in cgus.iter() { - output.push_str(" "); - output.push_str(&cgu_name.as_str()); - - let linkage_abbrev = match linkage { - Linkage::External => "External", - Linkage::AvailableExternally => "Available", - Linkage::LinkOnceAny => "OnceAny", - Linkage::LinkOnceODR => "OnceODR", - Linkage::WeakAny => "WeakAny", - Linkage::WeakODR => "WeakODR", - Linkage::Appending => "Appending", - Linkage::Internal => "Internal", - Linkage::Private => "Private", - Linkage::ExternalWeak => "ExternalWeak", - Linkage::Common => "Common", - }; - - output.push_str("["); - output.push_str(linkage_abbrev); - output.push_str("]"); - } - output - }) - .collect(); - - item_keys.sort(); - - for item in item_keys { - println!("MONO_ITEM {}", item); - } - } - - (Arc::new(mono_items), Arc::new(codegen_units)) -} - -impl CrateInfo { - pub fn new(tcx: TyCtxt) -> CrateInfo { - let mut info = CrateInfo { - panic_runtime: None, - compiler_builtins: None, - profiler_runtime: None, - sanitizer_runtime: None, - is_no_builtins: Default::default(), - native_libraries: Default::default(), - used_libraries: tcx.native_libraries(LOCAL_CRATE), - link_args: tcx.link_args(LOCAL_CRATE), - crate_name: Default::default(), - used_crates_dynamic: cstore::used_crates(tcx, LinkagePreference::RequireDynamic), - used_crates_static: cstore::used_crates(tcx, LinkagePreference::RequireStatic), - used_crate_source: Default::default(), - wasm_imports: Default::default(), - lang_item_to_crate: Default::default(), - missing_lang_items: Default::default(), - }; - let lang_items = tcx.lang_items(); - - let load_wasm_items = tcx.sess.crate_types.borrow() - .iter() - .any(|c| *c != config::CrateType::Rlib) && - tcx.sess.opts.target_triple.triple() == "wasm32-unknown-unknown"; - - if load_wasm_items { - info.load_wasm_imports(tcx, LOCAL_CRATE); - } - - let crates = tcx.crates(); - - let n_crates = crates.len(); - info.native_libraries.reserve(n_crates); - info.crate_name.reserve(n_crates); - info.used_crate_source.reserve(n_crates); - info.missing_lang_items.reserve(n_crates); - - for &cnum in crates.iter() { - info.native_libraries.insert(cnum, tcx.native_libraries(cnum)); - info.crate_name.insert(cnum, tcx.crate_name(cnum).to_string()); - info.used_crate_source.insert(cnum, tcx.used_crate_source(cnum)); - if tcx.is_panic_runtime(cnum) { - info.panic_runtime = Some(cnum); - } - if tcx.is_compiler_builtins(cnum) { - info.compiler_builtins = Some(cnum); - } - if tcx.is_profiler_runtime(cnum) { - info.profiler_runtime = Some(cnum); - } - if tcx.is_sanitizer_runtime(cnum) { - info.sanitizer_runtime = Some(cnum); - } - if tcx.is_no_builtins(cnum) { - info.is_no_builtins.insert(cnum); - } - if load_wasm_items { - info.load_wasm_imports(tcx, cnum); - } - let missing = tcx.missing_lang_items(cnum); - for &item in missing.iter() { - if let Ok(id) = lang_items.require(item) { - info.lang_item_to_crate.insert(item, id.krate); - } - } - - // No need to look for lang items that are whitelisted and don't - // actually need to exist. - let missing = missing.iter() - .cloned() - .filter(|&l| !weak_lang_items::whitelisted(tcx, l)) - .collect(); - info.missing_lang_items.insert(cnum, missing); - } - - return info - } - - fn load_wasm_imports(&mut self, tcx: TyCtxt, cnum: CrateNum) { - self.wasm_imports.extend(tcx.wasm_import_module_map(cnum).iter().map(|(&id, module)| { - let instance = Instance::mono(tcx, id); - let import_name = tcx.symbol_name(instance); - - (import_name.to_string(), module.clone()) - })); - } -} - -fn is_codegened_item(tcx: TyCtxt, id: DefId) -> bool { - let (all_mono_items, _) = - tcx.collect_and_partition_mono_items(LOCAL_CRATE); - all_mono_items.contains(&id) -} - -fn compile_codegen_unit<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, +pub fn compile_codegen_unit<'ll, 'tcx>(tcx: TyCtxt<'ll, 'tcx, 'tcx>, cgu_name: InternedString) -> Stats { let start_time = Instant::now(); @@ -1197,69 +164,52 @@ fn compile_codegen_unit<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, let cost = time_to_codegen.as_secs() * 1_000_000_000 + time_to_codegen.subsec_nanos() as u64; - write::submit_codegened_module_to_llvm(tcx, - module, - cost); + submit_codegened_module_to_llvm(&LlvmCodegenBackend(()), tcx, module, cost); return stats; - fn module_codegen<'a, 'tcx>( - tcx: TyCtxt<'a, 'tcx, 'tcx>, + fn module_codegen<'ll, 'tcx>( + tcx: TyCtxt<'ll, 'tcx, 'tcx>, cgu_name: InternedString) - -> (Stats, ModuleCodegen) + -> (Stats, ModuleCodegen) { + let backend = LlvmCodegenBackend(()); let cgu = tcx.codegen_unit(cgu_name); - // Instantiate monomorphizations without filling out definitions yet... - let llvm_module = ModuleLlvm::new(tcx.sess, &cgu_name.as_str()); + let llvm_module = backend.new_metadata(tcx.sess, &cgu_name.as_str()); let stats = { let cx = CodegenCx::new(tcx, cgu, &llvm_module); let mono_items = cx.codegen_unit .items_in_deterministic_order(cx.tcx); for &(mono_item, (linkage, visibility)) in &mono_items { - mono_item.predefine(&cx, linkage, visibility); + mono_item.predefine::>(&cx, linkage, visibility); } // ... and now that we have everything pre-defined, fill out those definitions. for &(mono_item, _) in &mono_items { - mono_item.define(&cx); + mono_item.define::>(&cx); } // If this codegen unit contains the main function, also create the // wrapper here - maybe_create_entry_wrapper(&cx); + maybe_create_entry_wrapper::>(&cx); // Run replace-all-uses-with for statics that need it - for &(old_g, new_g) in cx.statics_to_rauw.borrow().iter() { - unsafe { - let bitcast = llvm::LLVMConstPointerCast(new_g, val_ty(old_g)); - llvm::LLVMReplaceAllUsesWith(old_g, bitcast); - llvm::LLVMDeleteGlobal(old_g); - } + for &(old_g, new_g) in cx.statics_to_rauw().borrow().iter() { + cx.static_replace_all_uses(old_g, new_g) } // Create the llvm.used variable // This variable has type [N x i8*] and is stored in the llvm.metadata section - if !cx.used_statics.borrow().is_empty() { - let name = const_cstr!("llvm.used"); - let section = const_cstr!("llvm.metadata"); - let array = C_array(Type::i8(&cx).ptr_to(), &*cx.used_statics.borrow()); - - unsafe { - let g = llvm::LLVMAddGlobal(cx.llmod, - val_ty(array), - name.as_ptr()); - llvm::LLVMSetInitializer(g, array); - llvm::LLVMRustSetLinkage(g, llvm::Linkage::AppendingLinkage); - llvm::LLVMSetSection(g, section.as_ptr()); - } + if !cx.used_statics().borrow().is_empty() { + cx.create_used_variable() } // Finalize debuginfo if cx.sess().opts.debuginfo != DebugInfo::None { - debuginfo::finalize(&cx); + cx.debuginfo_finalize(); } - cx.stats.into_inner() + cx.consume_stats().into_inner() }; (stats, ModuleCodegen { @@ -1270,52 +220,15 @@ fn compile_codegen_unit<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, } } -pub fn provide(providers: &mut Providers) { - providers.collect_and_partition_mono_items = - collect_and_partition_mono_items; - - providers.is_codegened_item = is_codegened_item; - - providers.codegen_unit = |tcx, name| { - let (_, all) = tcx.collect_and_partition_mono_items(LOCAL_CRATE); - all.iter() - .find(|cgu| *cgu.name() == name) - .cloned() - .unwrap_or_else(|| panic!("failed to find cgu with name {:?}", name)) - }; - - provide_extern(providers); -} - -pub fn provide_extern(providers: &mut Providers) { - providers.dllimport_foreign_items = |tcx, krate| { - let module_map = tcx.foreign_modules(krate); - let module_map = module_map.iter() - .map(|lib| (lib.def_id, lib)) - .collect::>(); - - let dllimports = tcx.native_libraries(krate) - .iter() - .filter(|lib| { - if lib.kind != cstore::NativeLibraryKind::NativeUnknown { - return false - } - let cfg = match lib.cfg { - Some(ref cfg) => cfg, - None => return true, - }; - attr::cfg_matches(cfg, &tcx.sess.parse_sess, None) - }) - .filter_map(|lib| lib.foreign_module) - .map(|id| &module_map[&id]) - .flat_map(|module| module.foreign_items.iter().cloned()) - .collect(); - Lrc::new(dllimports) - }; - - providers.is_dllimport_foreign_item = |tcx, def_id| { - tcx.dllimport_foreign_items(def_id.krate).contains(&def_id) +pub fn set_link_section(llval: &Value, attrs: &CodegenFnAttrs) { + let sect = match attrs.link_section { + Some(name) => name, + None => return, }; + unsafe { + let buf = SmallCStr::new(§.as_str()); + llvm::LLVMSetSection(llval, buf.as_ptr()); + } } pub fn linkage_to_llvm(linkage: Linkage) -> llvm::Linkage { @@ -1341,25 +254,3 @@ pub fn visibility_to_llvm(linkage: Visibility) -> llvm::Visibility { Visibility::Protected => llvm::Visibility::Protected, } } - -// FIXME(mw): Anything that is produced via DepGraph::with_task() must implement -// the HashStable trait. Normally DepGraph::with_task() calls are -// hidden behind queries, but CGU creation is a special case in two -// ways: (1) it's not a query and (2) CGU are output nodes, so their -// Fingerprints are not actually needed. It remains to be clarified -// how exactly this case will be handled in the red/green system but -// for now we content ourselves with providing a no-op HashStable -// implementation for CGUs. -mod temp_stable_hash_impls { - use rustc_data_structures::stable_hasher::{StableHasherResult, StableHasher, - HashStable}; - use ModuleCodegen; - - impl HashStable for ModuleCodegen { - fn hash_stable(&self, - _: &mut HCX, - _: &mut StableHasher) { - // do nothing - } - } -} diff --git a/src/librustc_codegen_llvm/builder.rs b/src/librustc_codegen_llvm/builder.rs index 169bd9a8466a0..34a8ae6b862de 100644 --- a/src/librustc_codegen_llvm/builder.rs +++ b/src/librustc_codegen_llvm/builder.rs @@ -9,29 +9,35 @@ // except according to those terms. use llvm::{AtomicRmwBinOp, AtomicOrdering, SynchronizationScope, AsmDialect}; -use llvm::{IntPredicate, RealPredicate, False, OperandBundleDef}; -use llvm::{self, BasicBlock}; -use common::*; +use llvm::{self, False, OperandBundleDef, BasicBlock}; +use rustc_codegen_ssa::common::{IntPredicate, TypeKind, RealPredicate}; +use rustc_codegen_ssa::{self, MemFlags}; +use context::CodegenCx; use type_::Type; +use type_of::LayoutLlvmExt; use value::Value; use libc::{c_uint, c_char}; use rustc::ty::TyCtxt; -use rustc::ty::layout::{Align, Size}; -use rustc::session::{config, Session}; +use rustc::ty::layout::{self, Align, Size}; +use rustc::session::config; use rustc_data_structures::small_c_str::SmallCStr; - +use rustc_codegen_ssa::interfaces::*; +use syntax; +use rustc_codegen_ssa::base::to_immediate; +use rustc_codegen_ssa::mir::operand::{OperandValue, OperandRef}; +use rustc_codegen_ssa::mir::place::PlaceRef; use std::borrow::Cow; use std::ops::Range; use std::ptr; // All Builders must have an llfn associated with them #[must_use] -pub struct Builder<'a, 'll: 'a, 'tcx: 'll> { +pub struct Builder<'a, 'll: 'a, 'tcx: 'll, V : 'll> { pub llbuilder: &'ll mut llvm::Builder<'ll>, - pub cx: &'a CodegenCx<'ll, 'tcx>, + pub cx: &'a CodegenCx<'ll, 'tcx, V>, } -impl Drop for Builder<'a, 'll, 'tcx> { +impl Drop for Builder<'a, 'll, 'tcx, V> { fn drop(&mut self) { unsafe { llvm::LLVMDisposeBuilder(&mut *(self.llbuilder as *mut _)); @@ -46,17 +52,19 @@ fn noname() -> *const c_char { &CNULL } -bitflags! { - pub struct MemFlags: u8 { - const VOLATILE = 1 << 0; - const NONTEMPORAL = 1 << 1; - const UNALIGNED = 1 << 2; - } +impl HasCodegen<'a, 'll, 'tcx> for Builder<'a, 'll, 'tcx, &'ll Value> { + type CodegenCx = CodegenCx<'ll, 'tcx, &'ll Value>; } -impl Builder<'a, 'll, 'tcx> { - pub fn new_block<'b>(cx: &'a CodegenCx<'ll, 'tcx>, llfn: &'ll Value, name: &'b str) -> Self { - let bx = Builder::with_cx(cx); +impl BuilderMethods<'a, 'll, 'tcx> + for Builder<'a, 'll, 'tcx, &'ll Value> { + + fn new_block<'b>( + cx: &'a CodegenCx<'ll, 'tcx, &'ll Value>, + llfn: &'ll Value, + name: &'b str + ) -> Self { + let mut bx = Builder::with_cx(cx); let llbb = unsafe { let name = SmallCStr::new(name); llvm::LLVMAppendBasicBlockInContext( @@ -69,7 +77,7 @@ impl Builder<'a, 'll, 'tcx> { bx } - pub fn with_cx(cx: &'a CodegenCx<'ll, 'tcx>) -> Self { + fn with_cx(cx: &'a CodegenCx<'ll, 'tcx, &'ll Value>) -> Self { // Create a fresh builder from the crate context. let llbuilder = unsafe { llvm::LLVMCreateBuilderInContext(cx.llcx) @@ -80,85 +88,81 @@ impl Builder<'a, 'll, 'tcx> { } } - pub fn build_sibling_block<'b>(&self, name: &'b str) -> Builder<'a, 'll, 'tcx> { + fn build_sibling_block<'b>(&self, name: &'b str) -> Self { Builder::new_block(self.cx, self.llfn(), name) } - pub fn sess(&self) -> &Session { - self.cx.sess() - } - - pub fn tcx(&self) -> TyCtxt<'a, 'tcx, 'tcx> { + fn tcx(&self) -> TyCtxt<'a, 'tcx, 'tcx> { self.cx.tcx } - pub fn llfn(&self) -> &'ll Value { + fn llfn(&self) -> &'ll Value { unsafe { llvm::LLVMGetBasicBlockParent(self.llbb()) } } - pub fn llbb(&self) -> &'ll BasicBlock { + fn llbb(&self) -> &'ll BasicBlock { unsafe { llvm::LLVMGetInsertBlock(self.llbuilder) } } fn count_insn(&self, category: &str) { - if self.cx.sess().codegen_stats() { - self.cx.stats.borrow_mut().n_llvm_insns += 1; + if self.cx().sess().codegen_stats() { + self.cx().stats.borrow_mut().n_llvm_insns += 1; } - if self.cx.sess().count_llvm_insns() { - *self.cx.stats - .borrow_mut() - .llvm_insns - .entry(category.to_string()) - .or_insert(0) += 1; + if self.cx().sess().count_llvm_insns() { + *self.cx().stats + .borrow_mut() + .llvm_insns + .entry(category.to_string()) + .or_insert(0) += 1; } } - pub fn set_value_name(&self, value: &'ll Value, name: &str) { + fn set_value_name(&mut self, value: &'ll Value, name: &str) { let cname = SmallCStr::new(name); unsafe { llvm::LLVMSetValueName(value, cname.as_ptr()); } } - pub fn position_at_end(&self, llbb: &'ll BasicBlock) { + fn position_at_end(&mut self, llbb: &'ll BasicBlock) { unsafe { llvm::LLVMPositionBuilderAtEnd(self.llbuilder, llbb); } } - pub fn position_at_start(&self, llbb: &'ll BasicBlock) { + fn position_at_start(&mut self, llbb: &'ll BasicBlock) { unsafe { llvm::LLVMRustPositionBuilderAtStart(self.llbuilder, llbb); } } - pub fn ret_void(&self) { + fn ret_void(&mut self) { self.count_insn("retvoid"); unsafe { llvm::LLVMBuildRetVoid(self.llbuilder); } } - pub fn ret(&self, v: &'ll Value) { + fn ret(&mut self, v: &'ll Value) { self.count_insn("ret"); unsafe { llvm::LLVMBuildRet(self.llbuilder, v); } } - pub fn br(&self, dest: &'ll BasicBlock) { + fn br(&mut self, dest: &'ll BasicBlock) { self.count_insn("br"); unsafe { llvm::LLVMBuildBr(self.llbuilder, dest); } } - pub fn cond_br( - &self, + fn cond_br( + &mut self, cond: &'ll Value, then_llbb: &'ll BasicBlock, else_llbb: &'ll BasicBlock, @@ -169,8 +173,8 @@ impl Builder<'a, 'll, 'tcx> { } } - pub fn switch( - &self, + fn switch( + &mut self, v: &'ll Value, else_llbb: &'ll BasicBlock, num_cases: usize, @@ -180,12 +184,14 @@ impl Builder<'a, 'll, 'tcx> { } } - pub fn invoke(&self, - llfn: &'ll Value, - args: &[&'ll Value], - then: &'ll BasicBlock, - catch: &'ll BasicBlock, - bundle: Option<&OperandBundleDef<'ll>>) -> &'ll Value { + fn invoke( + &mut self, + llfn: &'ll Value, + args: &[&'ll Value], + then: &'ll BasicBlock, + catch: &'ll BasicBlock, + bundle: Option<&rustc_codegen_ssa::common::OperandBundleDef<'ll, &'ll Value>> + ) -> &'ll Value { self.count_insn("invoke"); debug!("Invoke {:?} with args ({:?})", @@ -193,21 +199,38 @@ impl Builder<'a, 'll, 'tcx> { args); let args = self.check_call("invoke", llfn, args); - let bundle = bundle.map(|b| &*b.raw); - - unsafe { - llvm::LLVMRustBuildInvoke(self.llbuilder, - llfn, - args.as_ptr(), - args.len() as c_uint, - then, - catch, - bundle, - noname()) + match bundle { + Some(b) => { + let llvm_bundle = OperandBundleDef::from_generic(b); + unsafe { + llvm::LLVMRustBuildInvoke( + self.llbuilder, + llfn, + args.as_ptr(), + args.len() as c_uint, + then, + catch, + Some(&*(llvm_bundle.raw)), + noname() + ) + } + } + None => unsafe { + llvm::LLVMRustBuildInvoke( + self.llbuilder, + llfn, + args.as_ptr(), + args.len() as c_uint, + then, + catch, + None, + noname() + ) + } } } - pub fn unreachable(&self) { + fn unreachable(&mut self) { self.count_insn("unreachable"); unsafe { llvm::LLVMBuildUnreachable(self.llbuilder); @@ -215,21 +238,21 @@ impl Builder<'a, 'll, 'tcx> { } /* Arithmetic */ - pub fn add(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + fn add(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { self.count_insn("add"); unsafe { llvm::LLVMBuildAdd(self.llbuilder, lhs, rhs, noname()) } } - pub fn fadd(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + fn fadd(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { self.count_insn("fadd"); unsafe { llvm::LLVMBuildFAdd(self.llbuilder, lhs, rhs, noname()) } } - pub fn fadd_fast(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + fn fadd_fast(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { self.count_insn("fadd"); unsafe { let instr = llvm::LLVMBuildFAdd(self.llbuilder, lhs, rhs, noname()); @@ -238,21 +261,21 @@ impl Builder<'a, 'll, 'tcx> { } } - pub fn sub(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + fn sub(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { self.count_insn("sub"); unsafe { llvm::LLVMBuildSub(self.llbuilder, lhs, rhs, noname()) } } - pub fn fsub(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + fn fsub(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { self.count_insn("fsub"); unsafe { llvm::LLVMBuildFSub(self.llbuilder, lhs, rhs, noname()) } } - pub fn fsub_fast(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + fn fsub_fast(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { self.count_insn("fsub"); unsafe { let instr = llvm::LLVMBuildFSub(self.llbuilder, lhs, rhs, noname()); @@ -261,21 +284,21 @@ impl Builder<'a, 'll, 'tcx> { } } - pub fn mul(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + fn mul(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { self.count_insn("mul"); unsafe { llvm::LLVMBuildMul(self.llbuilder, lhs, rhs, noname()) } } - pub fn fmul(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + fn fmul(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { self.count_insn("fmul"); unsafe { llvm::LLVMBuildFMul(self.llbuilder, lhs, rhs, noname()) } } - pub fn fmul_fast(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + fn fmul_fast(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { self.count_insn("fmul"); unsafe { let instr = llvm::LLVMBuildFMul(self.llbuilder, lhs, rhs, noname()); @@ -285,42 +308,42 @@ impl Builder<'a, 'll, 'tcx> { } - pub fn udiv(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + fn udiv(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { self.count_insn("udiv"); unsafe { llvm::LLVMBuildUDiv(self.llbuilder, lhs, rhs, noname()) } } - pub fn exactudiv(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + fn exactudiv(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { self.count_insn("exactudiv"); unsafe { llvm::LLVMBuildExactUDiv(self.llbuilder, lhs, rhs, noname()) } } - pub fn sdiv(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + fn sdiv(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { self.count_insn("sdiv"); unsafe { llvm::LLVMBuildSDiv(self.llbuilder, lhs, rhs, noname()) } } - pub fn exactsdiv(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + fn exactsdiv(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { self.count_insn("exactsdiv"); unsafe { llvm::LLVMBuildExactSDiv(self.llbuilder, lhs, rhs, noname()) } } - pub fn fdiv(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + fn fdiv(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { self.count_insn("fdiv"); unsafe { llvm::LLVMBuildFDiv(self.llbuilder, lhs, rhs, noname()) } } - pub fn fdiv_fast(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + fn fdiv_fast(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { self.count_insn("fdiv"); unsafe { let instr = llvm::LLVMBuildFDiv(self.llbuilder, lhs, rhs, noname()); @@ -329,28 +352,28 @@ impl Builder<'a, 'll, 'tcx> { } } - pub fn urem(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + fn urem(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { self.count_insn("urem"); unsafe { llvm::LLVMBuildURem(self.llbuilder, lhs, rhs, noname()) } } - pub fn srem(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + fn srem(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { self.count_insn("srem"); unsafe { llvm::LLVMBuildSRem(self.llbuilder, lhs, rhs, noname()) } } - pub fn frem(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + fn frem(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { self.count_insn("frem"); unsafe { llvm::LLVMBuildFRem(self.llbuilder, lhs, rhs, noname()) } } - pub fn frem_fast(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + fn frem_fast(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { self.count_insn("frem"); unsafe { let instr = llvm::LLVMBuildFRem(self.llbuilder, lhs, rhs, noname()); @@ -359,78 +382,78 @@ impl Builder<'a, 'll, 'tcx> { } } - pub fn shl(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + fn shl(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { self.count_insn("shl"); unsafe { llvm::LLVMBuildShl(self.llbuilder, lhs, rhs, noname()) } } - pub fn lshr(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + fn lshr(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { self.count_insn("lshr"); unsafe { llvm::LLVMBuildLShr(self.llbuilder, lhs, rhs, noname()) } } - pub fn ashr(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + fn ashr(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { self.count_insn("ashr"); unsafe { llvm::LLVMBuildAShr(self.llbuilder, lhs, rhs, noname()) } } - pub fn and(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + fn and(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { self.count_insn("and"); unsafe { llvm::LLVMBuildAnd(self.llbuilder, lhs, rhs, noname()) } } - pub fn or(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + fn or(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { self.count_insn("or"); unsafe { llvm::LLVMBuildOr(self.llbuilder, lhs, rhs, noname()) } } - pub fn xor(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + fn xor(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { self.count_insn("xor"); unsafe { llvm::LLVMBuildXor(self.llbuilder, lhs, rhs, noname()) } } - pub fn neg(&self, v: &'ll Value) -> &'ll Value { + fn neg(&mut self, v: &'ll Value) -> &'ll Value { self.count_insn("neg"); unsafe { llvm::LLVMBuildNeg(self.llbuilder, v, noname()) } } - pub fn fneg(&self, v: &'ll Value) -> &'ll Value { + fn fneg(&mut self, v: &'ll Value) -> &'ll Value { self.count_insn("fneg"); unsafe { llvm::LLVMBuildFNeg(self.llbuilder, v, noname()) } } - pub fn not(&self, v: &'ll Value) -> &'ll Value { + fn not(&mut self, v: &'ll Value) -> &'ll Value { self.count_insn("not"); unsafe { llvm::LLVMBuildNot(self.llbuilder, v, noname()) } } - pub fn alloca(&self, ty: &'ll Type, name: &str, align: Align) -> &'ll Value { - let bx = Builder::with_cx(self.cx); + fn alloca(&mut self, ty: &'ll Type, name: &str, align: Align) -> &'ll Value { + let mut bx = Builder::with_cx(self.cx); bx.position_at_start(unsafe { llvm::LLVMGetFirstBasicBlock(self.llfn()) }); bx.dynamic_alloca(ty, name, align) } - pub fn dynamic_alloca(&self, ty: &'ll Type, name: &str, align: Align) -> &'ll Value { + fn dynamic_alloca(&mut self, ty: &'ll Type, name: &str, align: Align) -> &'ll Value { self.count_insn("alloca"); unsafe { let alloca = if name.is_empty() { @@ -445,7 +468,7 @@ impl Builder<'a, 'll, 'tcx> { } } - pub fn array_alloca(&self, + fn array_alloca(&mut self, ty: &'ll Type, len: &'ll Value, name: &str, @@ -464,7 +487,7 @@ impl Builder<'a, 'll, 'tcx> { } } - pub fn load(&self, ptr: &'ll Value, align: Align) -> &'ll Value { + fn load(&mut self, ptr: &'ll Value, align: Align) -> &'ll Value { self.count_insn("load"); unsafe { let load = llvm::LLVMBuildLoad(self.llbuilder, ptr, noname()); @@ -473,7 +496,7 @@ impl Builder<'a, 'll, 'tcx> { } } - pub fn volatile_load(&self, ptr: &'ll Value) -> &'ll Value { + fn volatile_load(&mut self, ptr: &'ll Value) -> &'ll Value { self.count_insn("load.volatile"); unsafe { let insn = llvm::LLVMBuildLoad(self.llbuilder, ptr, noname()); @@ -482,10 +505,20 @@ impl Builder<'a, 'll, 'tcx> { } } - pub fn atomic_load(&self, ptr: &'ll Value, order: AtomicOrdering, align: Align) -> &'ll Value { + fn atomic_load( + &mut self, + ptr: &'ll Value, + order: rustc_codegen_ssa::common::AtomicOrdering, + align: Align + ) -> &'ll Value { self.count_insn("load.atomic"); unsafe { - let load = llvm::LLVMRustBuildAtomicLoad(self.llbuilder, ptr, noname(), order); + let load = llvm::LLVMRustBuildAtomicLoad( + self.llbuilder, + ptr, + noname(), + AtomicOrdering::from_generic(order) + ); // FIXME(eddyb) Isn't it UB to use `pref` instead of `abi` here? // However, 64-bit atomic loads on `i686-apple-darwin` appear to // require `___atomic_load` with ABI-alignment, so it's staying. @@ -494,9 +527,80 @@ impl Builder<'a, 'll, 'tcx> { } } + fn load_ref( + &mut self, + ptr: &PlaceRef<'tcx, &'ll Value> + ) -> OperandRef<'tcx, &'ll Value> { + debug!("PlaceRef::load: {:?}", ptr); + + assert_eq!(ptr.llextra.is_some(), ptr.layout.is_unsized()); - pub fn range_metadata(&self, load: &'ll Value, range: Range) { - if self.sess().target.target.arch == "amdgpu" { + if ptr.layout.is_zst() { + return OperandRef::new_zst(self.cx(), ptr.layout); + } + + fn scalar_load_metadata<'a, 'll, 'tcx>( + bx: &mut Builder<'a, 'll, 'tcx, &'ll Value>, + load: &'ll Value, + scalar: &layout::Scalar + ) { + let vr = scalar.valid_range.clone(); + match scalar.value { + layout::Int(..) => { + let range = scalar.valid_range_exclusive(bx.cx()); + if range.start != range.end { + bx.range_metadata(load, range); + } + } + layout::Pointer if vr.start() < vr.end() && !vr.contains(&0) => { + bx.nonnull_metadata(load); + } + _ => {} + } + } + + let val = if let Some(llextra) = ptr.llextra { + OperandValue::Ref(ptr.llval, Some(llextra), ptr.align) + } else if ptr.layout.is_llvm_immediate() { + let mut const_llval = None; + unsafe { + if let Some(global) = llvm::LLVMIsAGlobalVariable(ptr.llval) { + if llvm::LLVMIsGlobalConstant(global) == llvm::True { + const_llval = llvm::LLVMGetInitializer(global); + } + } + } + let llval = const_llval.unwrap_or_else(|| { + let load = self.load(ptr.llval, ptr.align); + if let layout::Abi::Scalar(ref scalar) = ptr.layout.abi { + scalar_load_metadata(self, load, scalar); + } + load + }); + OperandValue::Immediate(to_immediate(self, llval, ptr.layout)) + } else if let layout::Abi::ScalarPair(ref a, ref b) = ptr.layout.abi { + let mut load = |i, scalar: &layout::Scalar| { + let llptr = self.struct_gep(ptr.llval, i as u64); + let load = self.load(llptr, ptr.align); + scalar_load_metadata(self, load, scalar); + if scalar.is_bool() { + self.trunc(load, self.cx().type_i1()) + } else { + load + } + }; + OperandValue::Pair(load(0, a), load(1, b)) + } else { + OperandValue::Ref(ptr.llval, None, ptr.align) + }; + + OperandRef { val, layout: ptr.layout } + } + + + + fn range_metadata(&mut self, load: &'ll Value, range: Range) { + if self.cx().sess().target.target.arch == "amdgpu" { // amdgpu/LLVM does something weird and thinks a i64 value is // split into a v2i32, halving the bitwidth LLVM expects, // tripping an assertion. So, for now, just disable this @@ -505,10 +609,10 @@ impl Builder<'a, 'll, 'tcx> { } unsafe { - let llty = val_ty(load); + let llty = self.cx.val_ty(load); let v = [ - C_uint_big(llty, range.start), - C_uint_big(llty, range.end) + self.cx.const_uint_big(llty, range.start), + self.cx.const_uint_big(llty, range.end) ]; llvm::LLVMSetMetadata(load, llvm::MD_range as c_uint, @@ -518,19 +622,37 @@ impl Builder<'a, 'll, 'tcx> { } } - pub fn nonnull_metadata(&self, load: &'ll Value) { + fn nonnull_metadata(&mut self, load: &'ll Value) { unsafe { llvm::LLVMSetMetadata(load, llvm::MD_nonnull as c_uint, llvm::LLVMMDNodeInContext(self.cx.llcx, ptr::null(), 0)); } } - pub fn store(&self, val: &'ll Value, ptr: &'ll Value, align: Align) -> &'ll Value { + fn store(&mut self, val: &'ll Value, ptr: &'ll Value, align: Align) -> &'ll Value { self.store_with_flags(val, ptr, align, MemFlags::empty()) } - pub fn store_with_flags( - &self, + fn atomic_store(&mut self, val: &'ll Value, ptr: &'ll Value, + order: rustc_codegen_ssa::common::AtomicOrdering, align: Align) { + debug!("Store {:?} -> {:?}", val, ptr); + self.count_insn("store.atomic"); + let ptr = self.check_store(val, ptr); + unsafe { + let store = llvm::LLVMRustBuildAtomicStore( + self.llbuilder, + val, + ptr, + AtomicOrdering::from_generic(order) + ); + // FIXME(eddyb) Isn't it UB to use `pref` instead of `abi` here? + // Also see `atomic_load` for more context. + llvm::LLVMSetAlignment(store, align.pref() as c_uint); + } + } + + fn store_with_flags( + &mut self, val: &'ll Value, ptr: &'ll Value, align: Align, @@ -555,7 +677,7 @@ impl Builder<'a, 'll, 'tcx> { // *always* point to a metadata value of the integer 1. // // [1]: http://llvm.org/docs/LangRef.html#store-instruction - let one = C_i32(self.cx, 1); + let one = self.cx.const_i32(1); let node = llvm::LLVMMDNodeInContext(self.cx.llcx, &one, 1); llvm::LLVMSetMetadata(store, llvm::MD_nontemporal as c_uint, node); } @@ -563,20 +685,7 @@ impl Builder<'a, 'll, 'tcx> { } } - pub fn atomic_store(&self, val: &'ll Value, ptr: &'ll Value, - order: AtomicOrdering, align: Align) { - debug!("Store {:?} -> {:?}", val, ptr); - self.count_insn("store.atomic"); - let ptr = self.check_store(val, ptr); - unsafe { - let store = llvm::LLVMRustBuildAtomicStore(self.llbuilder, val, ptr, order); - // FIXME(eddyb) Isn't it UB to use `pref` instead of `abi` here? - // Also see `atomic_load` for more context. - llvm::LLVMSetAlignment(store, align.pref() as c_uint); - } - } - - pub fn gep(&self, ptr: &'ll Value, indices: &[&'ll Value]) -> &'ll Value { + fn gep(&mut self, ptr: &'ll Value, indices: &[&'ll Value]) -> &'ll Value { self.count_insn("gep"); unsafe { llvm::LLVMBuildGEP(self.llbuilder, ptr, indices.as_ptr(), @@ -584,7 +693,7 @@ impl Builder<'a, 'll, 'tcx> { } } - pub fn inbounds_gep(&self, ptr: &'ll Value, indices: &[&'ll Value]) -> &'ll Value { + fn inbounds_gep(&mut self, ptr: &'ll Value, indices: &[&'ll Value]) -> &'ll Value { self.count_insn("inboundsgep"); unsafe { llvm::LLVMBuildInBoundsGEP( @@ -592,122 +701,109 @@ impl Builder<'a, 'll, 'tcx> { } } - pub fn struct_gep(&self, ptr: &'ll Value, idx: u64) -> &'ll Value { - self.count_insn("structgep"); - assert_eq!(idx as c_uint as u64, idx); - unsafe { - llvm::LLVMBuildStructGEP(self.llbuilder, ptr, idx as c_uint, noname()) - } - } - /* Casts */ - pub fn trunc(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { + fn trunc(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { self.count_insn("trunc"); unsafe { llvm::LLVMBuildTrunc(self.llbuilder, val, dest_ty, noname()) } } - pub fn zext(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { - self.count_insn("zext"); - unsafe { - llvm::LLVMBuildZExt(self.llbuilder, val, dest_ty, noname()) - } - } - - pub fn sext(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { + fn sext(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { self.count_insn("sext"); unsafe { llvm::LLVMBuildSExt(self.llbuilder, val, dest_ty, noname()) } } - pub fn fptoui(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { + fn fptoui(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { self.count_insn("fptoui"); unsafe { llvm::LLVMBuildFPToUI(self.llbuilder, val, dest_ty, noname()) } } - pub fn fptosi(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { + fn fptosi(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { self.count_insn("fptosi"); unsafe { llvm::LLVMBuildFPToSI(self.llbuilder, val, dest_ty,noname()) } } - pub fn uitofp(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { + fn uitofp(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { self.count_insn("uitofp"); unsafe { llvm::LLVMBuildUIToFP(self.llbuilder, val, dest_ty, noname()) } } - pub fn sitofp(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { + fn sitofp(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { self.count_insn("sitofp"); unsafe { llvm::LLVMBuildSIToFP(self.llbuilder, val, dest_ty, noname()) } } - pub fn fptrunc(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { + fn fptrunc(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { self.count_insn("fptrunc"); unsafe { llvm::LLVMBuildFPTrunc(self.llbuilder, val, dest_ty, noname()) } } - pub fn fpext(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { + fn fpext(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { self.count_insn("fpext"); unsafe { llvm::LLVMBuildFPExt(self.llbuilder, val, dest_ty, noname()) } } - pub fn ptrtoint(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { + fn ptrtoint(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { self.count_insn("ptrtoint"); unsafe { llvm::LLVMBuildPtrToInt(self.llbuilder, val, dest_ty, noname()) } } - pub fn inttoptr(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { + fn inttoptr(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { self.count_insn("inttoptr"); unsafe { llvm::LLVMBuildIntToPtr(self.llbuilder, val, dest_ty, noname()) } } - pub fn bitcast(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { + fn bitcast(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { self.count_insn("bitcast"); unsafe { llvm::LLVMBuildBitCast(self.llbuilder, val, dest_ty, noname()) } } - pub fn pointercast(&self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { - self.count_insn("pointercast"); + + fn intcast(&mut self, val: &'ll Value, dest_ty: &'ll Type, is_signed: bool) -> &'ll Value { + self.count_insn("intcast"); unsafe { - llvm::LLVMBuildPointerCast(self.llbuilder, val, dest_ty, noname()) + llvm::LLVMRustBuildIntCast(self.llbuilder, val, dest_ty, is_signed) } } - pub fn intcast(&self, val: &'ll Value, dest_ty: &'ll Type, is_signed: bool) -> &'ll Value { - self.count_insn("intcast"); + fn pointercast(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { + self.count_insn("pointercast"); unsafe { - llvm::LLVMRustBuildIntCast(self.llbuilder, val, dest_ty, is_signed) + llvm::LLVMBuildPointerCast(self.llbuilder, val, dest_ty, noname()) } } /* Comparisons */ - pub fn icmp(&self, op: IntPredicate, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + fn icmp(&mut self, op: IntPredicate, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { self.count_insn("icmp"); + let op = llvm::IntPredicate::from_generic(op); unsafe { llvm::LLVMBuildICmp(self.llbuilder, op as c_uint, lhs, rhs, noname()) } } - pub fn fcmp(&self, op: RealPredicate, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + fn fcmp(&mut self, op: RealPredicate, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { self.count_insn("fcmp"); unsafe { llvm::LLVMBuildFCmp(self.llbuilder, op as c_uint, lhs, rhs, noname()) @@ -715,14 +811,14 @@ impl Builder<'a, 'll, 'tcx> { } /* Miscellaneous instructions */ - pub fn empty_phi(&self, ty: &'ll Type) -> &'ll Value { + fn empty_phi(&mut self, ty: &'ll Type) -> &'ll Value { self.count_insn("emptyphi"); unsafe { llvm::LLVMBuildPhi(self.llbuilder, ty, noname()) } } - pub fn phi(&self, ty: &'ll Type, vals: &[&'ll Value], bbs: &[&'ll BasicBlock]) -> &'ll Value { + fn phi(&mut self, ty: &'ll Type, vals: &[&'ll Value], bbs: &[&'ll BasicBlock]) -> &'ll Value { assert_eq!(vals.len(), bbs.len()); let phi = self.empty_phi(ty); self.count_insn("addincoming"); @@ -734,10 +830,10 @@ impl Builder<'a, 'll, 'tcx> { } } - pub fn inline_asm_call(&self, asm: *const c_char, cons: *const c_char, - inputs: &[&'ll Value], output: &'ll Type, - volatile: bool, alignstack: bool, - dia: AsmDialect) -> Option<&'ll Value> { + fn inline_asm_call(&mut self, asm: *const c_char, cons: *const c_char, + inputs: &[&'ll Value], output: &'ll Type, + volatile: bool, alignstack: bool, + dia: syntax::ast::AsmDialect) -> Option<&'ll Value> { self.count_insn("inlineasm"); let volatile = if volatile { llvm::True } @@ -747,18 +843,18 @@ impl Builder<'a, 'll, 'tcx> { let argtys = inputs.iter().map(|v| { debug!("Asm Input Type: {:?}", *v); - val_ty(*v) + self.cx.val_ty(*v) }).collect::>(); debug!("Asm Output Type: {:?}", output); - let fty = Type::func(&argtys[..], output); + let fty = &self.cx().type_func(&argtys[..], output); unsafe { // Ask LLVM to verify that the constraints are well-formed. let constraints_ok = llvm::LLVMRustInlineAsmVerify(fty, cons); debug!("Constraint verification result: {:?}", constraints_ok); if constraints_ok { let v = llvm::LLVMRustInlineAsm( - fty, asm, cons, volatile, alignstack, dia); + fty, asm, cons, volatile, alignstack, AsmDialect::from_generic(dia)); Some(self.call(v, inputs, None)) } else { // LLVM has detected an issue with our constaints, bail out @@ -767,31 +863,14 @@ impl Builder<'a, 'll, 'tcx> { } } - pub fn call(&self, llfn: &'ll Value, args: &[&'ll Value], - bundle: Option<&OperandBundleDef<'ll>>) -> &'ll Value { - self.count_insn("call"); - - debug!("Call {:?} with args ({:?})", - llfn, - args); - - let args = self.check_call("call", llfn, args); - let bundle = bundle.map(|b| &*b.raw); - - unsafe { - llvm::LLVMRustBuildCall(self.llbuilder, llfn, args.as_ptr(), - args.len() as c_uint, bundle, noname()) - } - } - - pub fn minnum(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + fn minnum(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { self.count_insn("minnum"); unsafe { let instr = llvm::LLVMRustBuildMinNum(self.llbuilder, lhs, rhs); instr.expect("LLVMRustBuildMinNum is not available in LLVM version < 6.0") } } - pub fn maxnum(&self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { + fn maxnum(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { self.count_insn("maxnum"); unsafe { let instr = llvm::LLVMRustBuildMaxNum(self.llbuilder, lhs, rhs); @@ -799,8 +878,8 @@ impl Builder<'a, 'll, 'tcx> { } } - pub fn select( - &self, cond: &'ll Value, + fn select( + &mut self, cond: &'ll Value, then_val: &'ll Value, else_val: &'ll Value, ) -> &'ll Value { @@ -811,22 +890,22 @@ impl Builder<'a, 'll, 'tcx> { } #[allow(dead_code)] - pub fn va_arg(&self, list: &'ll Value, ty: &'ll Type) -> &'ll Value { + fn va_arg(&mut self, list: &'ll Value, ty: &'ll Type) -> &'ll Value { self.count_insn("vaarg"); unsafe { llvm::LLVMBuildVAArg(self.llbuilder, list, ty, noname()) } } - pub fn extract_element(&self, vec: &'ll Value, idx: &'ll Value) -> &'ll Value { + fn extract_element(&mut self, vec: &'ll Value, idx: &'ll Value) -> &'ll Value { self.count_insn("extractelement"); unsafe { llvm::LLVMBuildExtractElement(self.llbuilder, vec, idx, noname()) } } - pub fn insert_element( - &self, vec: &'ll Value, + fn insert_element( + &mut self, vec: &'ll Value, elt: &'ll Value, idx: &'ll Value, ) -> &'ll Value { @@ -836,24 +915,24 @@ impl Builder<'a, 'll, 'tcx> { } } - pub fn shuffle_vector(&self, v1: &'ll Value, v2: &'ll Value, mask: &'ll Value) -> &'ll Value { + fn shuffle_vector(&mut self, v1: &'ll Value, v2: &'ll Value, mask: &'ll Value) -> &'ll Value { self.count_insn("shufflevector"); unsafe { llvm::LLVMBuildShuffleVector(self.llbuilder, v1, v2, mask, noname()) } } - pub fn vector_splat(&self, num_elts: usize, elt: &'ll Value) -> &'ll Value { + fn vector_splat(&mut self, num_elts: usize, elt: &'ll Value) -> &'ll Value { unsafe { - let elt_ty = val_ty(elt); - let undef = llvm::LLVMGetUndef(Type::vector(elt_ty, num_elts as u64)); - let vec = self.insert_element(undef, elt, C_i32(self.cx, 0)); - let vec_i32_ty = Type::vector(Type::i32(self.cx), num_elts as u64); - self.shuffle_vector(vec, undef, C_null(vec_i32_ty)) + let elt_ty = self.cx.val_ty(elt); + let undef = llvm::LLVMGetUndef(&self.cx().type_vector(elt_ty, num_elts as u64)); + let vec = self.insert_element(undef, elt, self.cx.const_i32(0)); + let vec_i32_ty = &self.cx().type_vector(&self.cx().type_i32(), num_elts as u64); + self.shuffle_vector(vec, undef, self.cx().const_null(vec_i32_ty)) } } - pub fn vector_reduce_fadd_fast(&self, acc: &'ll Value, src: &'ll Value) -> &'ll Value { + fn vector_reduce_fadd_fast(&mut self, acc: &'ll Value, src: &'ll Value) -> &'ll Value { self.count_insn("vector.reduce.fadd_fast"); unsafe { // FIXME: add a non-fast math version once @@ -865,7 +944,7 @@ impl Builder<'a, 'll, 'tcx> { instr } } - pub fn vector_reduce_fmul_fast(&self, acc: &'ll Value, src: &'ll Value) -> &'ll Value { + fn vector_reduce_fmul_fast(&mut self, acc: &'ll Value, src: &'ll Value) -> &'ll Value { self.count_insn("vector.reduce.fmul_fast"); unsafe { // FIXME: add a non-fast math version once @@ -877,56 +956,56 @@ impl Builder<'a, 'll, 'tcx> { instr } } - pub fn vector_reduce_add(&self, src: &'ll Value) -> &'ll Value { + fn vector_reduce_add(&mut self, src: &'ll Value) -> &'ll Value { self.count_insn("vector.reduce.add"); unsafe { let instr = llvm::LLVMRustBuildVectorReduceAdd(self.llbuilder, src); instr.expect("LLVMRustBuildVectorReduceAdd is not available in LLVM version < 5.0") } } - pub fn vector_reduce_mul(&self, src: &'ll Value) -> &'ll Value { + fn vector_reduce_mul(&mut self, src: &'ll Value) -> &'ll Value { self.count_insn("vector.reduce.mul"); unsafe { let instr = llvm::LLVMRustBuildVectorReduceMul(self.llbuilder, src); instr.expect("LLVMRustBuildVectorReduceMul is not available in LLVM version < 5.0") } } - pub fn vector_reduce_and(&self, src: &'ll Value) -> &'ll Value { + fn vector_reduce_and(&mut self, src: &'ll Value) -> &'ll Value { self.count_insn("vector.reduce.and"); unsafe { let instr = llvm::LLVMRustBuildVectorReduceAnd(self.llbuilder, src); instr.expect("LLVMRustBuildVectorReduceAnd is not available in LLVM version < 5.0") } } - pub fn vector_reduce_or(&self, src: &'ll Value) -> &'ll Value { + fn vector_reduce_or(&mut self, src: &'ll Value) -> &'ll Value { self.count_insn("vector.reduce.or"); unsafe { let instr = llvm::LLVMRustBuildVectorReduceOr(self.llbuilder, src); instr.expect("LLVMRustBuildVectorReduceOr is not available in LLVM version < 5.0") } } - pub fn vector_reduce_xor(&self, src: &'ll Value) -> &'ll Value { + fn vector_reduce_xor(&mut self, src: &'ll Value) -> &'ll Value { self.count_insn("vector.reduce.xor"); unsafe { let instr = llvm::LLVMRustBuildVectorReduceXor(self.llbuilder, src); instr.expect("LLVMRustBuildVectorReduceXor is not available in LLVM version < 5.0") } } - pub fn vector_reduce_fmin(&self, src: &'ll Value) -> &'ll Value { + fn vector_reduce_fmin(&mut self, src: &'ll Value) -> &'ll Value { self.count_insn("vector.reduce.fmin"); unsafe { let instr = llvm::LLVMRustBuildVectorReduceFMin(self.llbuilder, src, /*NoNaNs:*/ false); instr.expect("LLVMRustBuildVectorReduceFMin is not available in LLVM version < 5.0") } } - pub fn vector_reduce_fmax(&self, src: &'ll Value) -> &'ll Value { + fn vector_reduce_fmax(&mut self, src: &'ll Value) -> &'ll Value { self.count_insn("vector.reduce.fmax"); unsafe { let instr = llvm::LLVMRustBuildVectorReduceFMax(self.llbuilder, src, /*NoNaNs:*/ false); instr.expect("LLVMRustBuildVectorReduceFMax is not available in LLVM version < 5.0") } } - pub fn vector_reduce_fmin_fast(&self, src: &'ll Value) -> &'ll Value { + fn vector_reduce_fmin_fast(&mut self, src: &'ll Value) -> &'ll Value { self.count_insn("vector.reduce.fmin_fast"); unsafe { let instr = llvm::LLVMRustBuildVectorReduceFMin(self.llbuilder, src, /*NoNaNs:*/ true) @@ -935,7 +1014,7 @@ impl Builder<'a, 'll, 'tcx> { instr } } - pub fn vector_reduce_fmax_fast(&self, src: &'ll Value) -> &'ll Value { + fn vector_reduce_fmax_fast(&mut self, src: &'ll Value) -> &'ll Value { self.count_insn("vector.reduce.fmax_fast"); unsafe { let instr = llvm::LLVMRustBuildVectorReduceFMax(self.llbuilder, src, /*NoNaNs:*/ true) @@ -944,14 +1023,14 @@ impl Builder<'a, 'll, 'tcx> { instr } } - pub fn vector_reduce_min(&self, src: &'ll Value, is_signed: bool) -> &'ll Value { + fn vector_reduce_min(&mut self, src: &'ll Value, is_signed: bool) -> &'ll Value { self.count_insn("vector.reduce.min"); unsafe { let instr = llvm::LLVMRustBuildVectorReduceMin(self.llbuilder, src, is_signed); instr.expect("LLVMRustBuildVectorReduceMin is not available in LLVM version < 5.0") } } - pub fn vector_reduce_max(&self, src: &'ll Value, is_signed: bool) -> &'ll Value { + fn vector_reduce_max(&mut self, src: &'ll Value, is_signed: bool) -> &'ll Value { self.count_insn("vector.reduce.max"); unsafe { let instr = llvm::LLVMRustBuildVectorReduceMax(self.llbuilder, src, is_signed); @@ -959,7 +1038,7 @@ impl Builder<'a, 'll, 'tcx> { } } - pub fn extract_value(&self, agg_val: &'ll Value, idx: u64) -> &'ll Value { + fn extract_value(&mut self, agg_val: &'ll Value, idx: u64) -> &'ll Value { self.count_insn("extractvalue"); assert_eq!(idx as c_uint as u64, idx); unsafe { @@ -967,7 +1046,7 @@ impl Builder<'a, 'll, 'tcx> { } } - pub fn insert_value(&self, agg_val: &'ll Value, elt: &'ll Value, + fn insert_value(&mut self, agg_val: &'ll Value, elt: &'ll Value, idx: u64) -> &'ll Value { self.count_insn("insertvalue"); assert_eq!(idx as c_uint as u64, idx); @@ -977,7 +1056,7 @@ impl Builder<'a, 'll, 'tcx> { } } - pub fn landing_pad(&self, ty: &'ll Type, pers_fn: &'ll Value, + fn landing_pad(&mut self, ty: &'ll Type, pers_fn: &'ll Value, num_clauses: usize) -> &'ll Value { self.count_insn("landingpad"); unsafe { @@ -986,27 +1065,27 @@ impl Builder<'a, 'll, 'tcx> { } } - pub fn add_clause(&self, landing_pad: &'ll Value, clause: &'ll Value) { + fn add_clause(&mut self, landing_pad: &'ll Value, clause: &'ll Value) { unsafe { llvm::LLVMAddClause(landing_pad, clause); } } - pub fn set_cleanup(&self, landing_pad: &'ll Value) { + fn set_cleanup(&mut self, landing_pad: &'ll Value) { self.count_insn("setcleanup"); unsafe { llvm::LLVMSetCleanup(landing_pad, llvm::True); } } - pub fn resume(&self, exn: &'ll Value) -> &'ll Value { + fn resume(&mut self, exn: &'ll Value) -> &'ll Value { self.count_insn("resume"); unsafe { llvm::LLVMBuildResume(self.llbuilder, exn) } } - pub fn cleanup_pad(&self, + fn cleanup_pad(&mut self, parent: Option<&'ll Value>, args: &[&'ll Value]) -> &'ll Value { self.count_insn("cleanuppad"); @@ -1021,8 +1100,8 @@ impl Builder<'a, 'll, 'tcx> { ret.expect("LLVM does not have support for cleanuppad") } - pub fn cleanup_ret( - &self, cleanup: &'ll Value, + fn cleanup_ret( + &mut self, cleanup: &'ll Value, unwind: Option<&'ll BasicBlock>, ) -> &'ll Value { self.count_insn("cleanupret"); @@ -1032,7 +1111,7 @@ impl Builder<'a, 'll, 'tcx> { ret.expect("LLVM does not have support for cleanupret") } - pub fn catch_pad(&self, + fn catch_pad(&mut self, parent: &'ll Value, args: &[&'ll Value]) -> &'ll Value { self.count_insn("catchpad"); @@ -1045,7 +1124,7 @@ impl Builder<'a, 'll, 'tcx> { ret.expect("LLVM does not have support for catchpad") } - pub fn catch_ret(&self, pad: &'ll Value, unwind: &'ll BasicBlock) -> &'ll Value { + fn catch_ret(&mut self, pad: &'ll Value, unwind: &'ll BasicBlock) -> &'ll Value { self.count_insn("catchret"); let ret = unsafe { llvm::LLVMRustBuildCatchRet(self.llbuilder, pad, unwind) @@ -1053,8 +1132,8 @@ impl Builder<'a, 'll, 'tcx> { ret.expect("LLVM does not have support for catchret") } - pub fn catch_switch( - &self, + fn catch_switch( + &mut self, parent: Option<&'ll Value>, unwind: Option<&'ll BasicBlock>, num_handlers: usize, @@ -1069,80 +1148,101 @@ impl Builder<'a, 'll, 'tcx> { ret.expect("LLVM does not have support for catchswitch") } - pub fn add_handler(&self, catch_switch: &'ll Value, handler: &'ll BasicBlock) { + fn add_handler(&mut self, catch_switch: &'ll Value, handler: &'ll BasicBlock) { unsafe { llvm::LLVMRustAddHandler(catch_switch, handler); } } - pub fn set_personality_fn(&self, personality: &'ll Value) { + fn set_personality_fn(&mut self, personality: &'ll Value) { unsafe { llvm::LLVMSetPersonalityFn(self.llfn(), personality); } } // Atomic Operations - pub fn atomic_cmpxchg( - &self, + fn atomic_cmpxchg( + &mut self, dst: &'ll Value, cmp: &'ll Value, src: &'ll Value, - order: AtomicOrdering, - failure_order: AtomicOrdering, - weak: llvm::Bool, + order: rustc_codegen_ssa::common::AtomicOrdering, + failure_order: rustc_codegen_ssa::common::AtomicOrdering, + weak: bool, ) -> &'ll Value { - unsafe { - llvm::LLVMRustBuildAtomicCmpXchg(self.llbuilder, dst, cmp, src, - order, failure_order, weak) + let weak = if weak { llvm::True } else { llvm::False }; + unsafe { + llvm::LLVMRustBuildAtomicCmpXchg( + self.llbuilder, + dst, + cmp, + src, + AtomicOrdering::from_generic(order), + AtomicOrdering::from_generic(failure_order), + weak + ) } } - pub fn atomic_rmw( - &self, - op: AtomicRmwBinOp, + fn atomic_rmw( + &mut self, + op: rustc_codegen_ssa::common::AtomicRmwBinOp, dst: &'ll Value, src: &'ll Value, - order: AtomicOrdering, + order: rustc_codegen_ssa::common::AtomicOrdering, ) -> &'ll Value { unsafe { - llvm::LLVMBuildAtomicRMW(self.llbuilder, op, dst, src, order, False) + llvm::LLVMBuildAtomicRMW( + self.llbuilder, + AtomicRmwBinOp::from_generic(op), + dst, + src, + AtomicOrdering::from_generic(order), + False) } } - pub fn atomic_fence(&self, order: AtomicOrdering, scope: SynchronizationScope) { + fn atomic_fence( + &mut self, + order: rustc_codegen_ssa::common::AtomicOrdering, + scope: rustc_codegen_ssa::common::SynchronizationScope + ) { unsafe { - llvm::LLVMRustBuildAtomicFence(self.llbuilder, order, scope); + llvm::LLVMRustBuildAtomicFence( + self.llbuilder, + AtomicOrdering::from_generic(order), + SynchronizationScope::from_generic(scope) + ); } } - pub fn add_case(&self, s: &'ll Value, on_val: &'ll Value, dest: &'ll BasicBlock) { + fn add_case(&mut self, s: &'ll Value, on_val: &'ll Value, dest: &'ll BasicBlock) { unsafe { llvm::LLVMAddCase(s, on_val, dest) } } - pub fn add_incoming_to_phi(&self, phi: &'ll Value, val: &'ll Value, bb: &'ll BasicBlock) { + fn add_incoming_to_phi(&mut self, phi: &'ll Value, val: &'ll Value, bb: &'ll BasicBlock) { self.count_insn("addincoming"); unsafe { llvm::LLVMAddIncoming(phi, &val, &bb, 1 as c_uint); } } - pub fn set_invariant_load(&self, load: &'ll Value) { + fn set_invariant_load(&mut self, load: &'ll Value) { unsafe { llvm::LLVMSetMetadata(load, llvm::MD_invariant_load as c_uint, llvm::LLVMMDNodeInContext(self.cx.llcx, ptr::null(), 0)); } } - /// Returns the ptr value that should be used for storing `val`. - fn check_store<'b>(&self, + fn check_store<'b>(&mut self, val: &'ll Value, ptr: &'ll Value) -> &'ll Value { - let dest_ptr_ty = val_ty(ptr); - let stored_ty = val_ty(val); - let stored_ptr_ty = stored_ty.ptr_to(); + let dest_ptr_ty = self.cx.val_ty(ptr); + let stored_ty = self.cx.val_ty(val); + let stored_ptr_ty = self.cx.type_ptr_to(stored_ty); - assert_eq!(dest_ptr_ty.kind(), llvm::TypeKind::Pointer); + assert_eq!(self.cx.type_kind(dest_ptr_ty), TypeKind::Pointer); if dest_ptr_ty == stored_ptr_ty { ptr @@ -1154,24 +1254,23 @@ impl Builder<'a, 'll, 'tcx> { } } - /// Returns the args that should be used for a call to `llfn`. - fn check_call<'b>(&self, + fn check_call<'b>(&mut self, typ: &str, llfn: &'ll Value, args: &'b [&'ll Value]) -> Cow<'b, [&'ll Value]> { - let mut fn_ty = val_ty(llfn); + let mut fn_ty = self.cx.val_ty(llfn); // Strip off pointers - while fn_ty.kind() == llvm::TypeKind::Pointer { - fn_ty = fn_ty.element_type(); + while self.cx.type_kind(fn_ty) == TypeKind::Pointer { + fn_ty = self.cx.element_type(fn_ty); } - assert!(fn_ty.kind() == llvm::TypeKind::Function, + assert!(self.cx.type_kind(fn_ty) == TypeKind::Function, "builder::{} not passed a function, but {:?}", typ, fn_ty); - let param_tys = fn_ty.func_params(); + let param_tys = self.cx.func_params_types(fn_ty); let all_args_match = param_tys.iter() - .zip(args.iter().map(|&v| val_ty(v))) + .zip(args.iter().map(|&v| self.cx().val_ty(v))) .all(|(expected_ty, actual_ty)| *expected_ty == actual_ty); if all_args_match { @@ -1182,7 +1281,7 @@ impl Builder<'a, 'll, 'tcx> { .zip(args.iter()) .enumerate() .map(|(i, (expected_ty, &actual_val))| { - let actual_ty = val_ty(actual_val); + let actual_ty = self.cx().val_ty(actual_val); if expected_ty != actual_ty { debug!("Type mismatch in function call of {:?}. \ Expected {:?} for param {}, got {:?}; injecting bitcast", @@ -1197,23 +1296,15 @@ impl Builder<'a, 'll, 'tcx> { Cow::Owned(casted_args) } - pub fn lifetime_start(&self, ptr: &'ll Value, size: Size) { + fn lifetime_start(&mut self, ptr: &'ll Value, size: Size) { self.call_lifetime_intrinsic("llvm.lifetime.start", ptr, size); } - pub fn lifetime_end(&self, ptr: &'ll Value, size: Size) { + fn lifetime_end(&mut self, ptr: &'ll Value, size: Size) { self.call_lifetime_intrinsic("llvm.lifetime.end", ptr, size); } - /// If LLVM lifetime intrinsic support is enabled (i.e. optimizations - /// on), and `ptr` is nonzero-sized, then extracts the size of `ptr` - /// and the intrinsic for `lt` and passes them to `emit`, which is in - /// charge of generating code to call the passed intrinsic on whatever - /// block of generated code is targeted for the intrinsic. - /// - /// If LLVM lifetime intrinsic support is disabled (i.e. optimizations - /// off) or `ptr` is zero-sized, then no-op (does not call `emit`). - fn call_lifetime_intrinsic(&self, intrinsic: &str, ptr: &'ll Value, size: Size) { + fn call_lifetime_intrinsic(&mut self, intrinsic: &str, ptr: &'ll Value, size: Size) { if self.cx.sess().opts.optimize == config::OptLevel::No { return; } @@ -1225,7 +1316,116 @@ impl Builder<'a, 'll, 'tcx> { let lifetime_intrinsic = self.cx.get_intrinsic(intrinsic); - let ptr = self.pointercast(ptr, Type::i8p(self.cx)); - self.call(lifetime_intrinsic, &[C_u64(self.cx, size), ptr], None); + let ptr = self.pointercast(ptr, self.cx.type_i8p()); + self.call(lifetime_intrinsic, &[self.cx.const_u64(size), ptr], None); + } + + fn call( + &mut self, + llfn: &'ll Value, + args: &[&'ll Value], + bundle: Option<&rustc_codegen_ssa::common::OperandBundleDef<'ll, &'ll Value>> + ) -> &'ll Value { + self.count_insn("call"); + + debug!("Call {:?} with args ({:?})", + llfn, + args); + + let args = self.check_call("call", llfn, args); + match bundle { + Some(b) => { + let bundle = OperandBundleDef::from_generic(b); + unsafe { + llvm::LLVMRustBuildCall( + self.llbuilder, + llfn, + args.as_ptr() as *const &llvm::Value, + args.len() as c_uint, + Some(&*(bundle.raw)), noname() + ) + } + } + None => unsafe { + llvm::LLVMRustBuildCall( + self.llbuilder, + llfn, + args.as_ptr() as *const &llvm::Value, + args.len() as c_uint, + None, noname() + ) + } + } + } + + fn call_memcpy( + &mut self, + dst: &'ll Value, + src: &'ll Value, + n_bytes: &'ll Value, + align: Align, + flags: MemFlags, + ) { + if flags.contains(MemFlags::NONTEMPORAL) { + // HACK(nox): This is inefficient but there is no nontemporal memcpy. + let val = &self.load(src, align); + let ptr = &self.pointercast(dst, &self.cx().type_ptr_to(&self.cx().val_ty(val))); + &self.store_with_flags(val, ptr, align, flags); + return; + } + let cx = &self.cx(); + let ptr_width = &self.cx().sess().target.target.target_pointer_width; + let key = format!("llvm.memcpy.p0i8.p0i8.i{}", ptr_width); + let memcpy = cx.get_intrinsic(&key); + let src_ptr = &self.pointercast(src, cx.type_i8p()); + let dst_ptr = &self.pointercast(dst, cx.type_i8p()); + let size = &self.intcast(n_bytes, cx.type_isize(), false); + let align = cx.const_i32(align.abi() as i32); + let volatile = cx.const_bool(flags.contains(MemFlags::VOLATILE)); + &self.call(memcpy, &[dst_ptr, src_ptr, size, align, volatile], None); + } + + fn call_memset( + &mut self, + ptr: &'ll Value, + fill_byte: &'ll Value, + size: &'ll Value, + align: &'ll Value, + volatile: bool, + ) -> &'ll Value { + let ptr_width = &self.cx().sess().target.target.target_pointer_width; + let intrinsic_key = format!("llvm.memset.p0i8.i{}", ptr_width); + let llintrinsicfn = &self.cx().get_intrinsic(&intrinsic_key); + let volatile = &self.cx().const_bool(volatile); + &self.call(llintrinsicfn, &[ptr, fill_byte, size, align, volatile], None) + } + + fn zext(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { + self.count_insn("zext"); + unsafe { + llvm::LLVMBuildZExt(self.llbuilder, val, dest_ty, noname()) + } + } + + fn struct_gep(&mut self, ptr: &'ll Value, idx: u64) -> &'ll Value { + self.count_insn("structgep"); + assert_eq!(idx as c_uint as u64, idx); + unsafe { + llvm::LLVMBuildStructGEP(self.llbuilder, ptr, idx as c_uint, noname()) + } + } + + fn cx(&self) -> &'a CodegenCx<'ll, 'tcx, &'ll Value> { + &self.cx + } + + fn delete_basic_block(&mut self, bb: &'ll BasicBlock) { + unsafe { + llvm::LLVMDeleteBasicBlock(bb); + } + } + + fn do_not_inline(&mut self, llret: &'ll Value) { + llvm::Attribute::NoInline.apply_callsite(llvm::AttributePlace::Function, llret); } } diff --git a/src/librustc_codegen_llvm/callee.rs b/src/librustc_codegen_llvm/callee.rs index 4b4ccb3b600b3..74a532b50fc57 100644 --- a/src/librustc_codegen_llvm/callee.rs +++ b/src/librustc_codegen_llvm/callee.rs @@ -15,18 +15,15 @@ //! closure. use attributes; -use common::{self, CodegenCx}; -use consts; -use declare; +use rustc_codegen_ssa::common; use llvm; use monomorphize::Instance; -use type_of::LayoutLlvmExt; +use context::CodegenCx; use value::Value; +use rustc_codegen_ssa::interfaces::*; -use rustc::hir::def_id::DefId; -use rustc::ty::{self, TypeFoldable}; +use rustc::ty::TypeFoldable; use rustc::ty::layout::LayoutOf; -use rustc::ty::subst::Substs; /// Codegens a reference to a fn/method item, monomorphizing and /// inlining as it goes. @@ -36,10 +33,10 @@ use rustc::ty::subst::Substs; /// - `cx`: the crate context /// - `instance`: the instance to be instantiated pub fn get_fn( - cx: &CodegenCx<'ll, 'tcx>, + cx: &CodegenCx<'ll, 'tcx, &'ll Value>, instance: Instance<'tcx>, ) -> &'ll Value { - let tcx = cx.tcx; + let tcx = cx.tcx(); debug!("get_fn(instance={:?})", instance); @@ -47,8 +44,8 @@ pub fn get_fn( assert!(!instance.substs.has_escaping_regions()); assert!(!instance.substs.has_param_types()); - let fn_ty = instance.ty(cx.tcx); - if let Some(&llfn) = cx.instances.borrow().get(&instance) { + let fn_ty = instance.ty(*cx.tcx()); + if let Some(&llfn) = cx.instances().borrow().get(&instance) { return llfn; } @@ -57,9 +54,9 @@ pub fn get_fn( // Create a fn pointer with the substituted signature. let fn_ptr_ty = tcx.mk_fn_ptr(common::ty_fn_sig(cx, fn_ty)); - let llptrty = cx.layout_of(fn_ptr_ty).llvm_type(cx); + let llptrty = cx.backend_type(&cx.layout_of(fn_ptr_ty)); - let llfn = if let Some(llfn) = declare::get_declared_value(cx, &sym) { + let llfn = if let Some(llfn) = cx.get_declared_value(&sym) { // This is subtle and surprising, but sometimes we have to bitcast // the resulting fn pointer. The reason has to do with external // functions. If you have two crates that both bind the same C @@ -83,19 +80,19 @@ pub fn get_fn( // This can occur on either a crate-local or crate-external // reference. It also occurs when testing libcore and in some // other weird situations. Annoying. - if common::val_ty(llfn) != llptrty { + if cx.val_ty(llfn) != llptrty { debug!("get_fn: casting {:?} to {:?}", llfn, llptrty); - consts::ptrcast(llfn, llptrty) + cx.static_ptrcast(llfn, llptrty) } else { debug!("get_fn: not casting pointer!"); llfn } } else { - let llfn = declare::declare_fn(cx, &sym, fn_ty); - assert_eq!(common::val_ty(llfn), llptrty); + let llfn = cx.declare_fn(&sym, fn_ty); + assert_eq!(cx.val_ty(llfn), llptrty); debug!("get_fn: not casting pointer!"); - if instance.def.is_inline(tcx) { + if instance.def.is_inline(*tcx) { attributes::inline(cx, llfn, attributes::InlineAttr::Hint); } attributes::from_fn_attrs(cx, llfn, Some(instance.def.def_id())); @@ -204,19 +201,3 @@ pub fn get_fn( llfn } - -pub fn resolve_and_get_fn( - cx: &CodegenCx<'ll, 'tcx>, - def_id: DefId, - substs: &'tcx Substs<'tcx>, -) -> &'ll Value { - get_fn( - cx, - ty::Instance::resolve( - cx.tcx, - ty::ParamEnv::reveal_all(), - def_id, - substs - ).unwrap() - ) -} diff --git a/src/librustc_codegen_llvm/common.rs b/src/librustc_codegen_llvm/common.rs index c08937fa9b916..6f84e96e8e338 100644 --- a/src/librustc_codegen_llvm/common.rs +++ b/src/librustc_codegen_llvm/common.rs @@ -12,44 +12,26 @@ //! Code that is useful in various codegen modules. -use llvm::{self, TypeKind}; -use llvm::{True, False, Bool, OperandBundleDef}; -use rustc::hir::def_id::DefId; -use rustc::middle::lang_items::LangItem; +use llvm::{self, True, False, Bool, BasicBlock}; use abi; -use base; -use builder::Builder; use consts; -use declare; use type_::Type; use type_of::LayoutLlvmExt; use value::Value; +use rustc_codegen_ssa::interfaces::*; -use rustc::ty::{self, Ty, TyCtxt}; -use rustc::ty::layout::{HasDataLayout, LayoutOf}; -use rustc::hir; +use rustc::ty::layout::{HasDataLayout, LayoutOf, self, TyLayout, Size}; +use rustc::mir::interpret::{Scalar, AllocType, Allocation}; +use consts::const_alloc_to_llvm; +use rustc_codegen_ssa::mir::place::PlaceRef; use libc::{c_uint, c_char}; -use std::iter; -use rustc_target::spec::abi::Abi; use syntax::symbol::LocalInternedString; -use syntax_pos::{Span, DUMMY_SP}; +use syntax::ast::Mutability; pub use context::CodegenCx; -pub fn type_needs_drop<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, ty: Ty<'tcx>) -> bool { - ty.needs_drop(tcx, ty::ParamEnv::reveal_all()) -} - -pub fn type_is_sized<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, ty: Ty<'tcx>) -> bool { - ty.is_sized(tcx.at(DUMMY_SP), ty::ParamEnv::reveal_all()) -} - -pub fn type_is_freeze<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, ty: Ty<'tcx>) -> bool { - ty.is_freeze(tcx, ty::ParamEnv::reveal_all(), DUMMY_SP) -} - /* * A note on nomenclature of linking: "extern", "foreign", and "upcall". * @@ -76,380 +58,305 @@ pub fn type_is_freeze<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, ty: Ty<'tcx>) -> bo * */ -/// A structure representing an active landing pad for the duration of a basic -/// block. -/// -/// Each `Block` may contain an instance of this, indicating whether the block -/// is part of a landing pad or not. This is used to make decision about whether -/// to emit `invoke` instructions (e.g. in a landing pad we don't continue to -/// use `invoke`) and also about various function call metadata. -/// -/// For GNU exceptions (`landingpad` + `resume` instructions) this structure is -/// just a bunch of `None` instances (not too interesting), but for MSVC -/// exceptions (`cleanuppad` + `cleanupret` instructions) this contains data. -/// When inside of a landing pad, each function call in LLVM IR needs to be -/// annotated with which landing pad it's a part of. This is accomplished via -/// the `OperandBundleDef` value created for MSVC landing pads. -pub struct Funclet<'ll> { - cleanuppad: &'ll Value, - operand: OperandBundleDef<'ll>, +impl Backend<'ll> for CodegenCx<'ll, 'tcx, &'ll Value> { + type Value = &'ll Value; + type BasicBlock = &'ll BasicBlock; + type Type = &'ll Type; + type Context = &'ll llvm::Context; } -impl Funclet<'ll> { - pub fn new(cleanuppad: &'ll Value) -> Self { - Funclet { - cleanuppad, - operand: OperandBundleDef::new("funclet", &[cleanuppad]), +impl<'ll, 'tcx : 'll> ConstMethods<'ll, 'tcx> for CodegenCx<'ll, 'tcx, &'ll Value> { + + fn const_null(&self, t: &'ll Type) -> &'ll Value { + unsafe { + llvm::LLVMConstNull(t) } } - pub fn cleanuppad(&self) -> &'ll Value { - self.cleanuppad + fn const_undef(&self, t: &'ll Type) -> &'ll Value { + unsafe { + llvm::LLVMGetUndef(t) + } } - pub fn bundle(&self) -> &OperandBundleDef<'ll> { - &self.operand + fn const_int(&self, t: &'ll Type, i: i64) -> &'ll Value { + unsafe { + llvm::LLVMConstInt(t, i as u64, True) + } } -} -pub fn val_ty(v: &'ll Value) -> &'ll Type { - unsafe { - llvm::LLVMTypeOf(v) + fn const_uint(&self, t: &'ll Type, i: u64) -> &'ll Value { + unsafe { + llvm::LLVMConstInt(t, i, False) + } } -} -// LLVM constant constructors. -pub fn C_null(t: &'ll Type) -> &'ll Value { - unsafe { - llvm::LLVMConstNull(t) + fn const_uint_big(&self, t: &'ll Type, u: u128) -> &'ll Value { + unsafe { + let words = [u as u64, (u >> 64) as u64]; + llvm::LLVMConstIntOfArbitraryPrecision(t, 2, words.as_ptr()) + } } -} -pub fn C_undef(t: &'ll Type) -> &'ll Value { - unsafe { - llvm::LLVMGetUndef(t) + fn const_bool(&self, val: bool) -> &'ll Value { + &self.const_uint(&self.type_i1(), val as u64) } -} -pub fn C_int(t: &'ll Type, i: i64) -> &'ll Value { - unsafe { - llvm::LLVMConstInt(t, i as u64, True) + fn const_i32(&self, i: i32) -> &'ll Value { + &self.const_int(&self.type_i32(), i as i64) } -} -pub fn C_uint(t: &'ll Type, i: u64) -> &'ll Value { - unsafe { - llvm::LLVMConstInt(t, i, False) + fn const_u32(&self, i: u32) -> &'ll Value { + &self.const_uint(&self.type_i32(), i as u64) } -} -pub fn C_uint_big(t: &'ll Type, u: u128) -> &'ll Value { - unsafe { - let words = [u as u64, (u >> 64) as u64]; - llvm::LLVMConstIntOfArbitraryPrecision(t, 2, words.as_ptr()) + fn const_u64(&self, i: u64) -> &'ll Value { + &self.const_uint(&self.type_i64(), i) } -} - -pub fn C_bool(cx: &CodegenCx<'ll, '_>, val: bool) -> &'ll Value { - C_uint(Type::i1(cx), val as u64) -} - -pub fn C_i32(cx: &CodegenCx<'ll, '_>, i: i32) -> &'ll Value { - C_int(Type::i32(cx), i as i64) -} - -pub fn C_u32(cx: &CodegenCx<'ll, '_>, i: u32) -> &'ll Value { - C_uint(Type::i32(cx), i as u64) -} -pub fn C_u64(cx: &CodegenCx<'ll, '_>, i: u64) -> &'ll Value { - C_uint(Type::i64(cx), i) -} + fn const_usize(&self, i: u64) -> &'ll Value { + let bit_size = self.data_layout().pointer_size.bits(); + if bit_size < 64 { + // make sure it doesn't overflow + assert!(i < (1<, i: u64) -> &'ll Value { - let bit_size = cx.data_layout().pointer_size.bits(); - if bit_size < 64 { - // make sure it doesn't overflow - assert!(i < (1<, i: u8) -> &'ll Value { - C_uint(Type::i8(cx), i as u64) -} + fn const_u8(&self, i: u8) -> &'ll Value { + &self.const_uint(&self.type_i8(), i as u64) + } + fn const_cstr( + &self, + s: LocalInternedString, + null_terminated: bool, + ) -> &'ll Value { + unsafe { + if let Some(&llval) = &self.const_cstr_cache.borrow().get(&s) { + return llval; + } -// This is a 'c-like' raw string, which differs from -// our boxed-and-length-annotated strings. -pub fn C_cstr( - cx: &CodegenCx<'ll, '_>, - s: LocalInternedString, - null_terminated: bool, -) -> &'ll Value { - unsafe { - if let Some(&llval) = cx.const_cstr_cache.borrow().get(&s) { - return llval; + let sc = llvm::LLVMConstStringInContext(&self.llcx, + s.as_ptr() as *const c_char, + s.len() as c_uint, + !null_terminated as Bool); + let sym = &self.generate_local_symbol_name("str"); + let g = &self.define_global(&sym[..], &self.val_ty(sc)).unwrap_or_else(||{ + bug!("symbol `{}` is already defined", sym); + }); + llvm::LLVMSetInitializer(g, sc); + llvm::LLVMSetGlobalConstant(g, True); + llvm::LLVMRustSetLinkage(g, llvm::Linkage::InternalLinkage); + + &self.const_cstr_cache.borrow_mut().insert(s, g); + g } - - let sc = llvm::LLVMConstStringInContext(cx.llcx, - s.as_ptr() as *const c_char, - s.len() as c_uint, - !null_terminated as Bool); - let sym = cx.generate_local_symbol_name("str"); - let g = declare::define_global(cx, &sym[..], val_ty(sc)).unwrap_or_else(||{ - bug!("symbol `{}` is already defined", sym); - }); - llvm::LLVMSetInitializer(g, sc); - llvm::LLVMSetGlobalConstant(g, True); - llvm::LLVMRustSetLinkage(g, llvm::Linkage::InternalLinkage); - - cx.const_cstr_cache.borrow_mut().insert(s, g); - g } -} - -// NB: Do not use `do_spill_noroot` to make this into a constant string, or -// you will be kicked off fast isel. See issue #4352 for an example of this. -pub fn C_str_slice(cx: &CodegenCx<'ll, '_>, s: LocalInternedString) -> &'ll Value { - let len = s.len(); - let cs = consts::ptrcast(C_cstr(cx, s, false), - cx.layout_of(cx.tcx.mk_str()).llvm_type(cx).ptr_to()); - C_fat_ptr(cx, cs, C_usize(cx, len as u64)) -} -pub fn C_fat_ptr(cx: &CodegenCx<'ll, '_>, ptr: &'ll Value, meta: &'ll Value) -> &'ll Value { - assert_eq!(abi::FAT_PTR_ADDR, 0); - assert_eq!(abi::FAT_PTR_EXTRA, 1); - C_struct(cx, &[ptr, meta], false) -} - -pub fn C_struct(cx: &CodegenCx<'ll, '_>, elts: &[&'ll Value], packed: bool) -> &'ll Value { - C_struct_in_context(cx.llcx, elts, packed) -} + fn const_str_slice(&self, s: LocalInternedString) -> &'ll Value { + let len = s.len(); + let cs = consts::ptrcast(&self.const_cstr(s, false), + &self.type_ptr_to(&self.layout_of(&self.tcx.mk_str()).llvm_type(self))); + &self.const_fat_ptr(cs, &self.const_usize(len as u64)) + } -pub fn C_struct_in_context( - llcx: &'ll llvm::Context, - elts: &[&'ll Value], - packed: bool, -) -> &'ll Value { - unsafe { - llvm::LLVMConstStructInContext(llcx, - elts.as_ptr(), elts.len() as c_uint, - packed as Bool) + fn const_fat_ptr( + &self, + ptr: &'ll Value, + meta: &'ll Value + ) -> &'ll Value { + assert_eq!(abi::FAT_PTR_ADDR, 0); + assert_eq!(abi::FAT_PTR_EXTRA, 1); + &self.const_struct(&[ptr, meta], false) } -} -pub fn C_array(ty: &'ll Type, elts: &[&'ll Value]) -> &'ll Value { - unsafe { - return llvm::LLVMConstArray(ty, elts.as_ptr(), elts.len() as c_uint); + fn const_struct( + &self, + elts: &[&'ll Value], + packed: bool + ) -> &'ll Value { + struct_in_context(&self.llcx, elts, packed) } -} -pub fn C_vector(elts: &[&'ll Value]) -> &'ll Value { - unsafe { - return llvm::LLVMConstVector(elts.as_ptr(), elts.len() as c_uint); + fn const_array(&self, ty: &'ll Type, elts: &[&'ll Value]) -> &'ll Value { + unsafe { + return llvm::LLVMConstArray(ty, elts.as_ptr(), elts.len() as c_uint); + } } -} -pub fn C_bytes(cx: &CodegenCx<'ll, '_>, bytes: &[u8]) -> &'ll Value { - C_bytes_in_context(cx.llcx, bytes) -} + fn const_vector(&self, elts: &[&'ll Value]) -> &'ll Value { + unsafe { + return llvm::LLVMConstVector(elts.as_ptr(), elts.len() as c_uint); + } + } -pub fn C_bytes_in_context(llcx: &'ll llvm::Context, bytes: &[u8]) -> &'ll Value { - unsafe { - let ptr = bytes.as_ptr() as *const c_char; - return llvm::LLVMConstStringInContext(llcx, ptr, bytes.len() as c_uint, True); + fn const_bytes(&self, bytes: &[u8]) -> &'ll Value { + bytes_in_context(&self.llcx, bytes) } -} -pub fn const_get_elt(v: &'ll Value, idx: u64) -> &'ll Value { - unsafe { - assert_eq!(idx as c_uint as u64, idx); - let us = &[idx as c_uint]; - let r = llvm::LLVMConstExtractValue(v, us.as_ptr(), us.len() as c_uint); + fn const_get_elt(&self, v: &'ll Value, idx: u64) -> &'ll Value { + unsafe { + assert_eq!(idx as c_uint as u64, idx); + let us = &[idx as c_uint]; + let r = llvm::LLVMConstExtractValue(v, us.as_ptr(), us.len() as c_uint); - debug!("const_get_elt(v={:?}, idx={}, r={:?})", - v, idx, r); + debug!("const_get_elt(v={:?}, idx={}, r={:?})", + v, idx, r); - r + r + } } -} -pub fn const_get_real(v: &'ll Value) -> Option<(f64, bool)> { - unsafe { - if is_const_real(v) { - let mut loses_info: llvm::Bool = ::std::mem::uninitialized(); - let r = llvm::LLVMConstRealGetDouble(v, &mut loses_info); - let loses_info = if loses_info == 1 { true } else { false }; - Some((r, loses_info)) - } else { - None + fn const_get_real(&self, v: &'ll Value) -> Option<(f64, bool)> { + unsafe { + if self.is_const_real(v) { + let mut loses_info: llvm::Bool = ::std::mem::uninitialized(); + let r = llvm::LLVMConstRealGetDouble(v, &mut loses_info); + let loses_info = if loses_info == 1 { true } else { false }; + Some((r, loses_info)) + } else { + None + } } } -} -pub fn const_to_uint(v: &'ll Value) -> u64 { - unsafe { - llvm::LLVMConstIntGetZExtValue(v) + fn const_to_uint(&self, v: &'ll Value) -> u64 { + unsafe { + llvm::LLVMConstIntGetZExtValue(v) + } } -} -pub fn is_const_integral(v: &'ll Value) -> bool { - unsafe { - llvm::LLVMIsAConstantInt(v).is_some() + fn is_const_integral(&self, v: &'ll Value) -> bool { + unsafe { + llvm::LLVMIsAConstantInt(v).is_some() + } } -} -pub fn is_const_real(v: &'ll Value) -> bool { - unsafe { - llvm::LLVMIsAConstantFP(v).is_some() + fn is_const_real(&self, v: &'ll Value) -> bool { + unsafe { + llvm::LLVMIsAConstantFP(v).is_some() + } } -} - -#[inline] -fn hi_lo_to_u128(lo: u64, hi: u64) -> u128 { - ((hi as u128) << 64) | (lo as u128) -} - -pub fn const_to_opt_u128(v: &'ll Value, sign_ext: bool) -> Option { - unsafe { - if is_const_integral(v) { - let (mut lo, mut hi) = (0u64, 0u64); - let success = llvm::LLVMRustConstInt128Get(v, sign_ext, - &mut hi, &mut lo); - if success { - Some(hi_lo_to_u128(lo, hi)) + fn const_to_opt_u128(&self, v: &'ll Value, sign_ext: bool) -> Option { + unsafe { + if self.is_const_integral(v) { + let (mut lo, mut hi) = (0u64, 0u64); + let success = llvm::LLVMRustConstInt128Get(v, sign_ext, + &mut hi, &mut lo); + if success { + Some(hi_lo_to_u128(lo, hi)) + } else { + None + } } else { None } - } else { - None } } -} -pub fn langcall(tcx: TyCtxt, - span: Option, - msg: &str, - li: LangItem) - -> DefId { - tcx.lang_items().require(li).unwrap_or_else(|s| { - let msg = format!("{} {}", msg, s); - match span { - Some(span) => tcx.sess.span_fatal(span, &msg[..]), - None => tcx.sess.fatal(&msg[..]), + fn scalar_to_backend( + &self, + cv: Scalar, + layout: &layout::Scalar, + llty: &'ll Type, + ) -> &'ll Value { + let bitsize = if layout.is_bool() { 1 } else { layout.value.size(self).bits() }; + match cv { + Scalar::Bits { size: 0, .. } => { + assert_eq!(0, layout.value.size(self).bytes()); + self.const_undef(self.type_ix(0)) + }, + Scalar::Bits { bits, size } => { + assert_eq!(size as u64, layout.value.size(self).bytes()); + let llval = self.const_uint_big(self.type_ix(bitsize), bits); + if layout.value == layout::Pointer { + unsafe { llvm::LLVMConstIntToPtr(llval, llty) } + } else { + self.static_bitcast(llval, llty) + } + }, + Scalar::Ptr(ptr) => { + let alloc_type = self.tcx.alloc_map.lock().get(ptr.alloc_id); + let base_addr = match alloc_type { + Some(AllocType::Memory(alloc)) => { + let init = const_alloc_to_llvm(self, alloc); + if alloc.mutability == Mutability::Mutable { + self.static_addr_of_mut(init, alloc.align, None) + } else { + self.static_addr_of(init, alloc.align, None) + } + } + Some(AllocType::Function(fn_instance)) => { + self.get_fn(fn_instance) + } + Some(AllocType::Static(def_id)) => { + assert!(self.tcx.is_static(def_id).is_some()); + self.get_static(def_id) + } + None => bug!("missing allocation {:?}", ptr.alloc_id), + }; + let llval = unsafe { llvm::LLVMConstInBoundsGEP( + self.static_bitcast(base_addr, self.type_i8p()), + &self.const_usize(ptr.offset.bytes()), + 1, + ) }; + if layout.value != layout::Pointer { + unsafe { llvm::LLVMConstPtrToInt(llval, llty) } + } else { + self.static_bitcast(llval, llty) + } + } } - }) -} + } -// To avoid UB from LLVM, these two functions mask RHS with an -// appropriate mask unconditionally (i.e. the fallback behavior for -// all shifts). For 32- and 64-bit types, this matches the semantics -// of Java. (See related discussion on #1877 and #10183.) - -pub fn build_unchecked_lshift( - bx: &Builder<'a, 'll, 'tcx>, - lhs: &'ll Value, - rhs: &'ll Value -) -> &'ll Value { - let rhs = base::cast_shift_expr_rhs(bx, hir::BinOpKind::Shl, lhs, rhs); - // #1877, #10183: Ensure that input is always valid - let rhs = shift_mask_rhs(bx, rhs); - bx.shl(lhs, rhs) + fn from_const_alloc( + &self, + layout: TyLayout<'tcx>, + alloc: &Allocation, + offset: Size, + ) -> PlaceRef<'tcx, &'ll Value> { + let init = const_alloc_to_llvm(self, alloc); + let base_addr = self.static_addr_of(init, layout.align, None); + + let llval = unsafe { llvm::LLVMConstInBoundsGEP( + self.static_bitcast(base_addr, self.type_i8p()), + &self.const_usize(offset.bytes()), + 1, + )}; + let llval = self.static_bitcast(llval, self.type_ptr_to(layout.llvm_type(self))); + PlaceRef::new_sized(llval, layout, alloc.align) + } } -pub fn build_unchecked_rshift( - bx: &Builder<'a, 'll, 'tcx>, lhs_t: Ty<'tcx>, lhs: &'ll Value, rhs: &'ll Value -) -> &'ll Value { - let rhs = base::cast_shift_expr_rhs(bx, hir::BinOpKind::Shr, lhs, rhs); - // #1877, #10183: Ensure that input is always valid - let rhs = shift_mask_rhs(bx, rhs); - let is_signed = lhs_t.is_signed(); - if is_signed { - bx.ashr(lhs, rhs) - } else { - bx.lshr(lhs, rhs) +pub fn val_ty(v: &'ll Value) -> &'ll Type { + unsafe { + llvm::LLVMTypeOf(v) } } -fn shift_mask_rhs(bx: &Builder<'a, 'll, 'tcx>, rhs: &'ll Value) -> &'ll Value { - let rhs_llty = val_ty(rhs); - bx.and(rhs, shift_mask_val(bx, rhs_llty, rhs_llty, false)) +pub fn bytes_in_context(llcx: &'ll llvm::Context, bytes: &[u8]) -> &'ll Value { + unsafe { + let ptr = bytes.as_ptr() as *const c_char; + return llvm::LLVMConstStringInContext(llcx, ptr, bytes.len() as c_uint, True); + } } -pub fn shift_mask_val( - bx: &Builder<'a, 'll, 'tcx>, - llty: &'ll Type, - mask_llty: &'ll Type, - invert: bool -) -> &'ll Value { - let kind = llty.kind(); - match kind { - TypeKind::Integer => { - // i8/u8 can shift by at most 7, i16/u16 by at most 15, etc. - let val = llty.int_width() - 1; - if invert { - C_int(mask_llty, !val as i64) - } else { - C_uint(mask_llty, val) - } - }, - TypeKind::Vector => { - let mask = shift_mask_val(bx, llty.element_type(), mask_llty.element_type(), invert); - bx.vector_splat(mask_llty.vector_length(), mask) - }, - _ => bug!("shift_mask_val: expected Integer or Vector, found {:?}", kind), +pub fn struct_in_context( + llcx: &'a llvm::Context, + elts: &[&'a Value], + packed: bool, +) -> &'a Value { + unsafe { + llvm::LLVMConstStructInContext(llcx, + elts.as_ptr(), elts.len() as c_uint, + packed as Bool) } } -pub fn ty_fn_sig<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, - ty: Ty<'tcx>) - -> ty::PolyFnSig<'tcx> -{ - match ty.sty { - ty::FnDef(..) | - // Shims currently have type FnPtr. Not sure this should remain. - ty::FnPtr(_) => ty.fn_sig(cx.tcx), - ty::Closure(def_id, substs) => { - let tcx = cx.tcx; - let sig = substs.closure_sig(def_id, tcx); - - let env_ty = tcx.closure_env_ty(def_id, substs).unwrap(); - sig.map_bound(|sig| tcx.mk_fn_sig( - iter::once(*env_ty.skip_binder()).chain(sig.inputs().iter().cloned()), - sig.output(), - sig.variadic, - sig.unsafety, - sig.abi - )) - } - ty::Generator(def_id, substs, _) => { - let tcx = cx.tcx; - let sig = substs.poly_sig(def_id, cx.tcx); - - let env_region = ty::ReLateBound(ty::INNERMOST, ty::BrEnv); - let env_ty = tcx.mk_mut_ref(tcx.mk_region(env_region), ty); - - sig.map_bound(|sig| { - let state_did = tcx.lang_items().gen_state().unwrap(); - let state_adt_ref = tcx.adt_def(state_did); - let state_substs = tcx.intern_substs(&[ - sig.yield_ty.into(), - sig.return_ty.into(), - ]); - let ret_ty = tcx.mk_adt(state_adt_ref, state_substs); - - tcx.mk_fn_sig(iter::once(env_ty), - ret_ty, - false, - hir::Unsafety::Normal, - Abi::Rust - ) - }) - } - _ => bug!("unexpected type {:?} to ty_fn_sig", ty) - } +#[inline] +fn hi_lo_to_u128(lo: u64, hi: u64) -> u128 { + ((hi as u128) << 64) | (lo as u128) } diff --git a/src/librustc_codegen_llvm/consts.rs b/src/librustc_codegen_llvm/consts.rs index 9228870bf3a5c..38d3276faa2e9 100644 --- a/src/librustc_codegen_llvm/consts.rs +++ b/src/librustc_codegen_llvm/consts.rs @@ -11,39 +11,87 @@ use libc::c_uint; use llvm::{self, SetUnnamedAddr, True}; use rustc::hir::def_id::DefId; +use rustc::mir::interpret::{ConstValue, Allocation, read_target_uint, + Pointer, ConstEvalErr, GlobalId}; use rustc::hir::Node; use debuginfo; -use base; use monomorphize::MonoItem; -use common::{CodegenCx, val_ty}; -use declare; +use common::CodegenCx; use monomorphize::Instance; use syntax_pos::Span; +use rustc_target::abi::HasDataLayout; use syntax_pos::symbol::LocalInternedString; +use base; use type_::Type; use type_of::LayoutLlvmExt; +use rustc_data_structures::sync::Lrc; use value::Value; use rustc::ty::{self, Ty}; +use rustc_codegen_ssa::interfaces::*; -use rustc::ty::layout::{Align, LayoutOf}; +use rustc::ty::layout::{self, Size, Align, LayoutOf}; use rustc::hir::{self, CodegenFnAttrs, CodegenFnAttrFlags}; use std::ffi::{CStr, CString}; -pub fn ptrcast(val: &'ll Value, ty: &'ll Type) -> &'ll Value { - unsafe { - llvm::LLVMConstPointerCast(val, ty) + +pub fn const_alloc_to_llvm(cx: &CodegenCx<'ll, '_, &'ll Value>, alloc: &Allocation) -> &'ll Value { + let mut llvals = Vec::with_capacity(alloc.relocations.len() + 1); + let layout = cx.data_layout(); + let pointer_size = layout.pointer_size.bytes() as usize; + + let mut next_offset = 0; + for &(offset, ((), alloc_id)) in alloc.relocations.iter() { + let offset = offset.bytes(); + assert_eq!(offset as usize as u64, offset); + let offset = offset as usize; + if offset > next_offset { + llvals.push(cx.const_bytes(&alloc.bytes[next_offset..offset])); + } + let ptr_offset = read_target_uint( + layout.endian, + &alloc.bytes[offset..(offset + pointer_size)], + ).expect("const_alloc_to_llvm: could not read relocation pointer") as u64; + llvals.push(cx.scalar_to_backend( + Pointer::new(alloc_id, Size::from_bytes(ptr_offset)).into(), + &layout::Scalar { + value: layout::Primitive::Pointer, + valid_range: 0..=!0 + }, + cx.type_i8p() + )); + next_offset = offset + pointer_size; + } + if alloc.bytes.len() >= next_offset { + llvals.push(cx.const_bytes(&alloc.bytes[next_offset ..])); } + + cx.const_struct(&llvals, true) } -pub fn bitcast(val: &'ll Value, ty: &'ll Type) -> &'ll Value { - unsafe { - llvm::LLVMConstBitCast(val, ty) - } +pub fn codegen_static_initializer( + cx: &CodegenCx<'ll, 'tcx, &'ll Value>, + def_id: DefId, +) -> Result<(&'ll Value, &'tcx Allocation), Lrc>> { + let instance = ty::Instance::mono(cx.tcx, def_id); + let cid = GlobalId { + instance, + promoted: None, + }; + let param_env = ty::ParamEnv::reveal_all(); + let static_ = cx.tcx.const_eval(param_env.and(cid))?; + + let alloc = match static_.val { + ConstValue::ByRef(_, alloc, n) if n.bytes() == 0 => alloc, + _ => bug!("static const eval returned {:#?}", static_), + }; + Ok((const_alloc_to_llvm(cx, alloc), alloc)) } -fn set_global_alignment(cx: &CodegenCx<'ll, '_>, + + +fn set_global_alignment(cx: &CodegenCx<'ll, '_, &'ll Value>, gv: &'ll Value, mut align: Align) { // The target may require greater alignment for globals than the type does. @@ -62,179 +110,8 @@ fn set_global_alignment(cx: &CodegenCx<'ll, '_>, } } -pub fn addr_of_mut( - cx: &CodegenCx<'ll, '_>, - cv: &'ll Value, - align: Align, - kind: Option<&str>, -) -> &'ll Value { - unsafe { - let gv = match kind { - Some(kind) if !cx.tcx.sess.fewer_names() => { - let name = cx.generate_local_symbol_name(kind); - let gv = declare::define_global(cx, &name[..], val_ty(cv)).unwrap_or_else(||{ - bug!("symbol `{}` is already defined", name); - }); - llvm::LLVMRustSetLinkage(gv, llvm::Linkage::PrivateLinkage); - gv - }, - _ => declare::define_private_global(cx, val_ty(cv)), - }; - llvm::LLVMSetInitializer(gv, cv); - set_global_alignment(cx, gv, align); - SetUnnamedAddr(gv, true); - gv - } -} - -pub fn addr_of( - cx: &CodegenCx<'ll, '_>, - cv: &'ll Value, - align: Align, - kind: Option<&str>, -) -> &'ll Value { - if let Some(&gv) = cx.const_globals.borrow().get(&cv) { - unsafe { - // Upgrade the alignment in cases where the same constant is used with different - // alignment requirements - let llalign = align.abi() as u32; - if llalign > llvm::LLVMGetAlignment(gv) { - llvm::LLVMSetAlignment(gv, llalign); - } - } - return gv; - } - let gv = addr_of_mut(cx, cv, align, kind); - unsafe { - llvm::LLVMSetGlobalConstant(gv, True); - } - cx.const_globals.borrow_mut().insert(cv, gv); - gv -} - -pub fn get_static(cx: &CodegenCx<'ll, '_>, def_id: DefId) -> &'ll Value { - let instance = Instance::mono(cx.tcx, def_id); - if let Some(&g) = cx.instances.borrow().get(&instance) { - return g; - } - - let defined_in_current_codegen_unit = cx.codegen_unit - .items() - .contains_key(&MonoItem::Static(def_id)); - assert!(!defined_in_current_codegen_unit, - "consts::get_static() should always hit the cache for \ - statics defined in the same CGU, but did not for `{:?}`", - def_id); - - let ty = instance.ty(cx.tcx); - let sym = cx.tcx.symbol_name(instance).as_str(); - - debug!("get_static: sym={} instance={:?}", sym, instance); - - let g = if let Some(id) = cx.tcx.hir.as_local_node_id(def_id) { - - let llty = cx.layout_of(ty).llvm_type(cx); - let (g, attrs) = match cx.tcx.hir.get(id) { - Node::Item(&hir::Item { - ref attrs, span, node: hir::ItemKind::Static(..), .. - }) => { - if declare::get_declared_value(cx, &sym[..]).is_some() { - span_bug!(span, "Conflicting symbol names for static?"); - } - - let g = declare::define_global(cx, &sym[..], llty).unwrap(); - - if !cx.tcx.is_reachable_non_generic(def_id) { - unsafe { - llvm::LLVMRustSetVisibility(g, llvm::Visibility::Hidden); - } - } - - (g, attrs) - } - - Node::ForeignItem(&hir::ForeignItem { - ref attrs, span, node: hir::ForeignItemKind::Static(..), .. - }) => { - let fn_attrs = cx.tcx.codegen_fn_attrs(def_id); - (check_and_apply_linkage(cx, &fn_attrs, ty, sym, Some(span)), attrs) - } - - item => bug!("get_static: expected static, found {:?}", item) - }; - - debug!("get_static: sym={} attrs={:?}", sym, attrs); - - for attr in attrs { - if attr.check_name("thread_local") { - llvm::set_thread_local_mode(g, cx.tls_model); - } - } - - g - } else { - // FIXME(nagisa): perhaps the map of externs could be offloaded to llvm somehow? - debug!("get_static: sym={} item_attr={:?}", sym, cx.tcx.item_attrs(def_id)); - - let attrs = cx.tcx.codegen_fn_attrs(def_id); - let g = check_and_apply_linkage(cx, &attrs, ty, sym, None); - - // Thread-local statics in some other crate need to *always* be linked - // against in a thread-local fashion, so we need to be sure to apply the - // thread-local attribute locally if it was present remotely. If we - // don't do this then linker errors can be generated where the linker - // complains that one object files has a thread local version of the - // symbol and another one doesn't. - if attrs.flags.contains(CodegenFnAttrFlags::THREAD_LOCAL) { - llvm::set_thread_local_mode(g, cx.tls_model); - } - - let needs_dll_storage_attr = - cx.use_dll_storage_attrs && !cx.tcx.is_foreign_item(def_id) && - // ThinLTO can't handle this workaround in all cases, so we don't - // emit the attrs. Instead we make them unnecessary by disallowing - // dynamic linking when cross-language LTO is enabled. - !cx.tcx.sess.opts.debugging_opts.cross_lang_lto.enabled(); - - // If this assertion triggers, there's something wrong with commandline - // argument validation. - debug_assert!(!(cx.tcx.sess.opts.debugging_opts.cross_lang_lto.enabled() && - cx.tcx.sess.target.target.options.is_like_msvc && - cx.tcx.sess.opts.cg.prefer_dynamic)); - - if needs_dll_storage_attr { - // This item is external but not foreign, i.e. it originates from an external Rust - // crate. Since we don't know whether this crate will be linked dynamically or - // statically in the final application, we always mark such symbols as 'dllimport'. - // If final linkage happens to be static, we rely on compiler-emitted __imp_ stubs to - // make things work. - // - // However, in some scenarios we defer emission of statics to downstream - // crates, so there are cases where a static with an upstream DefId - // is actually present in the current crate. We can find out via the - // is_codegened_item query. - if !cx.tcx.is_codegened_item(def_id) { - unsafe { - llvm::LLVMSetDLLStorageClass(g, llvm::DLLStorageClass::DllImport); - } - } - } - g - }; - - if cx.use_dll_storage_attrs && cx.tcx.is_dllimport_foreign_item(def_id) { - // For foreign (native) libs we know the exact storage type to use. - unsafe { - llvm::LLVMSetDLLStorageClass(g, llvm::DLLStorageClass::DllImport); - } - } - - cx.instances.borrow_mut().insert(instance, g); - g -} - fn check_and_apply_linkage( - cx: &CodegenCx<'ll, 'tcx>, + cx: &CodegenCx<'ll, 'tcx, &'ll Value>, attrs: &CodegenFnAttrs, ty: Ty<'tcx>, sym: LocalInternedString, @@ -260,7 +137,7 @@ fn check_and_apply_linkage( }; unsafe { // Declare a symbol `foo` with the desired linkage. - let g1 = declare::declare_global(cx, &sym, llty2); + let g1 = cx.declare_global(&sym, llty2); llvm::LLVMRustSetLinkage(g1, base::linkage_to_llvm(linkage)); // Declare an internal global `extern_with_linkage_foo` which @@ -271,7 +148,7 @@ fn check_and_apply_linkage( // zero. let mut real_name = "_rust_extern_with_linkage_".to_string(); real_name.push_str(&sym); - let g2 = declare::define_global(cx, &real_name, llty).unwrap_or_else(||{ + let g2 = cx.define_global(&real_name, llty).unwrap_or_else(||{ if let Some(span) = span { cx.sess().span_fatal( span, @@ -288,150 +165,348 @@ fn check_and_apply_linkage( } else { // Generate an external declaration. // FIXME(nagisa): investigate whether it can be changed into define_global - declare::declare_global(cx, &sym, llty) + cx.declare_global(&sym, llty) } } -pub fn codegen_static<'a, 'tcx>( - cx: &CodegenCx<'a, 'tcx>, - def_id: DefId, - is_mutable: bool, -) { +pub fn ptrcast(val: &'ll Value, ty: &'ll Type) -> &'ll Value { unsafe { - let attrs = cx.tcx.codegen_fn_attrs(def_id); + llvm::LLVMConstPointerCast(val, ty) + } +} - let (v, alloc) = match ::mir::codegen_static_initializer(cx, def_id) { - Ok(v) => v, - // Error has already been reported - Err(_) => return, - }; +impl StaticMethods<'ll> for CodegenCx<'ll, 'tcx, &'ll Value> { - let g = get_static(cx, def_id); + fn static_ptrcast(&self, val: &'ll Value, ty: &'ll Type) -> &'ll Value { + ptrcast(val, ty) + } - // boolean SSA values are i1, but they have to be stored in i8 slots, - // otherwise some LLVM optimization passes don't work as expected - let mut val_llty = val_ty(v); - let v = if val_llty == Type::i1(cx) { - val_llty = Type::i8(cx); - llvm::LLVMConstZExt(v, val_llty) - } else { - v - }; + fn static_bitcast(&self, val: &'ll Value, ty: &'ll Type) -> &'ll Value { + unsafe { + llvm::LLVMConstBitCast(val, ty) + } + } + + fn static_addr_of_mut( + &self, + cv: &'ll Value, + align: Align, + kind: Option<&str>, + ) -> &'ll Value { + unsafe { + let gv = match kind { + Some(kind) if !&self.tcx.sess.fewer_names() => { + let name = self.generate_local_symbol_name(kind); + let gv = self.define_global(&name[..], + &self.val_ty(cv)).unwrap_or_else(||{ + bug!("symbol `{}` is already defined", name); + }); + llvm::LLVMRustSetLinkage(gv, llvm::Linkage::PrivateLinkage); + gv + }, + _ => self.define_private_global(&self.val_ty(cv)), + }; + llvm::LLVMSetInitializer(gv, cv); + set_global_alignment(&self, gv, align); + SetUnnamedAddr(gv, true); + gv + } + } + + fn static_addr_of( + &self, + cv: &'ll Value, + align: Align, + kind: Option<&str>, + ) -> &'ll Value { + if let Some(&gv) = &self.const_globals.borrow().get(&cv) { + unsafe { + // Upgrade the alignment in cases where the same constant is used with different + // alignment requirements + let llalign = align.abi() as u32; + if llalign > llvm::LLVMGetAlignment(gv) { + llvm::LLVMSetAlignment(gv, llalign); + } + } + return gv; + } + let gv = &self.static_addr_of_mut(cv, align, kind); + unsafe { + llvm::LLVMSetGlobalConstant(gv, True); + } + &self.const_globals.borrow_mut().insert(cv, gv); + gv + } + + fn get_static(&self, def_id: DefId) -> &'ll Value { + let instance = Instance::mono(self.tcx, def_id); + if let Some(&g) = &self.instances.borrow().get(&instance) { + return g; + } + + let defined_in_current_codegen_unit = &self.codegen_unit + .items() + .contains_key(&MonoItem::Static(def_id)); + assert!(!defined_in_current_codegen_unit, + "consts::get_static() should always hit the cache for \ + statics defined in the same CGU, but did not for `{:?}`", + def_id); + + let ty = instance.ty(self.tcx); + let sym = self.tcx.symbol_name(instance).as_str(); + + debug!("get_static: sym={} instance={:?}", sym, instance); + + let g = if let Some(id) = self.tcx.hir.as_local_node_id(def_id) { + + let llty = &self.layout_of(ty).llvm_type(self); + let (g, attrs) = match &self.tcx.hir.get(id) { + Node::Item(&hir::Item { + ref attrs, span, node: hir::ItemKind::Static(..), .. + }) => { + if self.get_declared_value(&sym[..]).is_some() { + span_bug!(span, "Conflicting symbol names for static?"); + } + + let g = self.define_global(&sym[..], llty).unwrap(); + + if !&self.tcx.is_reachable_non_generic(def_id) { + unsafe { + llvm::LLVMRustSetVisibility(g, llvm::Visibility::Hidden); + } + } + + (g, attrs) + } + + Node::ForeignItem(&hir::ForeignItem { + ref attrs, span, node: hir::ForeignItemKind::Static(..), .. + }) => { + let fn_attrs = &self.tcx.codegen_fn_attrs(def_id); + (check_and_apply_linkage(&self, &fn_attrs, ty, sym, Some(span)), attrs) + } + + item => bug!("get_static: expected static, found {:?}", item) + }; + + debug!("get_static: sym={} attrs={:?}", sym, attrs); + + for attr in attrs { + if attr.check_name("thread_local") { + llvm::set_thread_local_mode(g, self.tls_model); + } + } - let instance = Instance::mono(cx.tcx, def_id); - let ty = instance.ty(cx.tcx); - let llty = cx.layout_of(ty).llvm_type(cx); - let g = if val_llty == llty { g } else { - // If we created the global with the wrong type, - // correct the type. - let empty_string = const_cstr!(""); - let name_str_ref = CStr::from_ptr(llvm::LLVMGetValueName(g)); - let name_string = CString::new(name_str_ref.to_bytes()).unwrap(); - llvm::LLVMSetValueName(g, empty_string.as_ptr()); - - let linkage = llvm::LLVMRustGetLinkage(g); - let visibility = llvm::LLVMRustGetVisibility(g); - - let new_g = llvm::LLVMRustGetOrInsertGlobal( - cx.llmod, name_string.as_ptr(), val_llty); - - llvm::LLVMRustSetLinkage(new_g, linkage); - llvm::LLVMRustSetVisibility(new_g, visibility); - - // To avoid breaking any invariants, we leave around the old - // global for the moment; we'll replace all references to it - // with the new global later. (See base::codegen_backend.) - cx.statics_to_rauw.borrow_mut().push((g, new_g)); - new_g + // FIXME(nagisa): perhaps the map of externs could be offloaded to llvm somehow? + debug!("get_static: sym={} item_attr={:?}", sym, &self.tcx.item_attrs(def_id)); + + let attrs = &self.tcx.codegen_fn_attrs(def_id); + let g = check_and_apply_linkage(&self, &attrs, ty, sym, None); + + // Thread-local statics in some other crate need to *always* be linked + // against in a thread-local fashion, so we need to be sure to apply the + // thread-local attribute locally if it was present remotely. If we + // don't do this then linker errors can be generated where the linker + // complains that one object files has a thread local version of the + // symbol and another one doesn't. + if attrs.flags.contains(CodegenFnAttrFlags::THREAD_LOCAL) { + llvm::set_thread_local_mode(g, self.tls_model); + } + + let needs_dll_storage_attr = + self.use_dll_storage_attrs && !&self.tcx.is_foreign_item(def_id) && + // ThinLTO can't handle this workaround in all cases, so we don't + // emit the attrs. Instead we make them unnecessary by disallowing + // dynamic linking when cross-language LTO is enabled. + !&self.tcx.sess.opts.debugging_opts.cross_lang_lto.enabled(); + + // If this assertion triggers, there's something wrong with commandline + // argument validation. + debug_assert!(!(self.tcx.sess.opts.debugging_opts.cross_lang_lto.enabled() && + self.tcx.sess.target.target.options.is_like_msvc && + self.tcx.sess.opts.cg.prefer_dynamic)); + + if needs_dll_storage_attr { + // This item is external but not foreign, i.e. it originates from an external Rust + // crate. Since we don't know whether this crate will be linked dynamically or + // statically in the final application, we always mark such symbols as 'dllimport'. + // If final linkage happens to be static, we rely on compiler-emitted __imp_ stubs + // to make things work. + // + // However, in some scenarios we defer emission of statics to downstream + // crates, so there are cases where a static with an upstream DefId + // is actually present in the current crate. We can find out via the + // is_codegened_item query. + if !&self.tcx.is_codegened_item(def_id) { + unsafe { + llvm::LLVMSetDLLStorageClass(g, llvm::DLLStorageClass::DllImport); + } + } + } + g }; - set_global_alignment(cx, g, cx.align_of(ty)); - llvm::LLVMSetInitializer(g, v); - - // As an optimization, all shared statics which do not have interior - // mutability are placed into read-only memory. - if !is_mutable { - if cx.type_is_freeze(ty) { - llvm::LLVMSetGlobalConstant(g, llvm::True); + + if self.use_dll_storage_attrs && self.tcx.is_dllimport_foreign_item(def_id) { + // For foreign (native) libs we know the exact storage type to use. + unsafe { + llvm::LLVMSetDLLStorageClass(g, llvm::DLLStorageClass::DllImport); } } - debuginfo::create_global_var_metadata(cx, def_id, g); - - if attrs.flags.contains(CodegenFnAttrFlags::THREAD_LOCAL) { - llvm::set_thread_local_mode(g, cx.tls_model); - - // Do not allow LLVM to change the alignment of a TLS on macOS. - // - // By default a global's alignment can be freely increased. - // This allows LLVM to generate more performant instructions - // e.g. using load-aligned into a SIMD register. - // - // However, on macOS 10.10 or below, the dynamic linker does not - // respect any alignment given on the TLS (radar 24221680). - // This will violate the alignment assumption, and causing segfault at runtime. - // - // This bug is very easy to trigger. In `println!` and `panic!`, - // the `LOCAL_STDOUT`/`LOCAL_STDERR` handles are stored in a TLS, - // which the values would be `mem::replace`d on initialization. - // The implementation of `mem::replace` will use SIMD - // whenever the size is 32 bytes or higher. LLVM notices SIMD is used - // and tries to align `LOCAL_STDOUT`/`LOCAL_STDERR` to a 32-byte boundary, - // which macOS's dyld disregarded and causing crashes - // (see issues #51794, #51758, #50867, #48866 and #44056). - // - // To workaround the bug, we trick LLVM into not increasing - // the global's alignment by explicitly assigning a section to it - // (equivalent to automatically generating a `#[link_section]` attribute). - // See the comment in the `GlobalValue::canIncreaseAlignment()` function - // of `lib/IR/Globals.cpp` for why this works. - // - // When the alignment is not increased, the optimized `mem::replace` - // will use load-unaligned instructions instead, and thus avoiding the crash. - // - // We could remove this hack whenever we decide to drop macOS 10.10 support. - if cx.tcx.sess.target.target.options.is_like_osx { - let sect_name = if alloc.bytes.iter().all(|b| *b == 0) { - CStr::from_bytes_with_nul_unchecked(b"__DATA,__thread_bss\0") - } else { - CStr::from_bytes_with_nul_unchecked(b"__DATA,__thread_data\0") - }; - llvm::LLVMSetSection(g, sect_name.as_ptr()); + &self.instances.borrow_mut().insert(instance, g); + g + } + + fn codegen_static( + &self, + def_id: DefId, + is_mutable: bool, + ) { + unsafe { + let attrs = &self.tcx.codegen_fn_attrs(def_id); + + let (v, alloc) = match codegen_static_initializer(&self, def_id) { + Ok(v) => v, + // Error has already been reported + Err(_) => return, + }; + + let g = &self.get_static(def_id); + + // boolean SSA values are i1, but they have to be stored in i8 slots, + // otherwise some LLVM optimization passes don't work as expected + let mut val_llty = self.val_ty(v); + let v = if val_llty == self.type_i1() { + val_llty = self.type_i8(); + llvm::LLVMConstZExt(v, val_llty) + } else { + v + }; + + let instance = Instance::mono(self.tcx, def_id); + let ty = instance.ty(self.tcx); + let llty = self.layout_of(ty).llvm_type(self); + let g = if val_llty == llty { + g + } else { + // If we created the global with the wrong type, + // correct the type. + let empty_string = const_cstr!(""); + let name_str_ref = CStr::from_ptr(llvm::LLVMGetValueName(g)); + let name_string = CString::new(name_str_ref.to_bytes()).unwrap(); + llvm::LLVMSetValueName(g, empty_string.as_ptr()); + + let linkage = llvm::LLVMRustGetLinkage(g); + let visibility = llvm::LLVMRustGetVisibility(g); + + let new_g = llvm::LLVMRustGetOrInsertGlobal( + &self.llmod, name_string.as_ptr(), val_llty); + + llvm::LLVMRustSetLinkage(new_g, linkage); + llvm::LLVMRustSetVisibility(new_g, visibility); + + // To avoid breaking any invariants, we leave around the old + // global for the moment; we'll replace all references to it + // with the new global later. (See base::codegen_backend.) + &self.statics_to_rauw.borrow_mut().push((g, new_g)); + new_g + }; + set_global_alignment(&self, g, self.align_of(ty)); + llvm::LLVMSetInitializer(g, v); + + // As an optimization, all shared statics which do not have interior + // mutability are placed into read-only memory. + if !is_mutable { + if self.type_is_freeze(ty) { + llvm::LLVMSetGlobalConstant(g, llvm::True); + } + } + + debuginfo::create_global_var_metadata(&self, def_id, g); + + if attrs.flags.contains(CodegenFnAttrFlags::THREAD_LOCAL) { + llvm::set_thread_local_mode(g, self.tls_model); + + // Do not allow LLVM to change the alignment of a TLS on macOS. + // + // By default a global's alignment can be freely increased. + // This allows LLVM to generate more performant instructions + // e.g. using load-aligned into a SIMD register. + // + // However, on macOS 10.10 or below, the dynamic linker does not + // respect any alignment given on the TLS (radar 24221680). + // This will violate the alignment assumption, and causing segfault at runtime. + // + // This bug is very easy to trigger. In `println!` and `panic!`, + // the `LOCAL_STDOUT`/`LOCAL_STDERR` handles are stored in a TLS, + // which the values would be `mem::replace`d on initialization. + // The implementation of `mem::replace` will use SIMD + // whenever the size is 32 bytes or higher. LLVM notices SIMD is used + // and tries to align `LOCAL_STDOUT`/`LOCAL_STDERR` to a 32-byte boundary, + // which macOS's dyld disregarded and causing crashes + // (see issues #51794, #51758, #50867, #48866 and #44056). + // + // To workaround the bug, we trick LLVM into not increasing + // the global's alignment by explicitly assigning a section to it + // (equivalent to automatically generating a `#[link_section]` attribute). + // See the comment in the `GlobalValue::canIncreaseAlignment()` function + // of `lib/IR/Globals.cpp` for why this works. + // + // When the alignment is not increased, the optimized `mem::replace` + // will use load-unaligned instructions instead, and thus avoiding the crash. + // + // We could remove this hack whenever we decide to drop macOS 10.10 support. + if self.tcx.sess.target.target.options.is_like_osx { + let sect_name = if alloc.bytes.iter().all(|b| *b == 0) { + CStr::from_bytes_with_nul_unchecked(b"__DATA,__thread_bss\0") + } else { + CStr::from_bytes_with_nul_unchecked(b"__DATA,__thread_data\0") + }; + llvm::LLVMSetSection(g, sect_name.as_ptr()); + } } - } - // Wasm statics with custom link sections get special treatment as they - // go into custom sections of the wasm executable. - if cx.tcx.sess.opts.target_triple.triple().starts_with("wasm32") { - if let Some(section) = attrs.link_section { - let section = llvm::LLVMMDStringInContext( - cx.llcx, - section.as_str().as_ptr() as *const _, - section.as_str().len() as c_uint, - ); - let alloc = llvm::LLVMMDStringInContext( - cx.llcx, - alloc.bytes.as_ptr() as *const _, - alloc.bytes.len() as c_uint, - ); - let data = [section, alloc]; - let meta = llvm::LLVMMDNodeInContext(cx.llcx, data.as_ptr(), 2); - llvm::LLVMAddNamedMetadataOperand( - cx.llmod, - "wasm.custom_sections\0".as_ptr() as *const _, - meta, - ); + // Wasm statics with custom link sections get special treatment as they + // go into custom sections of the wasm executable. + if self.tcx.sess.opts.target_triple.triple().starts_with("wasm32") { + if let Some(section) = attrs.link_section { + let section = llvm::LLVMMDStringInContext( + &self.llcx, + section.as_str().as_ptr() as *const _, + section.as_str().len() as c_uint, + ); + let alloc = llvm::LLVMMDStringInContext( + &self.llcx, + alloc.bytes.as_ptr() as *const _, + alloc.bytes.len() as c_uint, + ); + let data = [section, alloc]; + let meta = llvm::LLVMMDNodeInContext(&self.llcx, data.as_ptr(), 2); + llvm::LLVMAddNamedMetadataOperand( + &self.llmod, + "wasm.custom_sections\0".as_ptr() as *const _, + meta, + ); + } + } else { + base::set_link_section(g, &attrs); } - } else { - base::set_link_section(g, &attrs); - } - if attrs.flags.contains(CodegenFnAttrFlags::USED) { - // This static will be stored in the llvm.used variable which is an array of i8* - let cast = llvm::LLVMConstPointerCast(g, Type::i8p(cx)); - cx.used_statics.borrow_mut().push(cast); + if attrs.flags.contains(CodegenFnAttrFlags::USED) { + // This static will be stored in the llvm.used variable which is an array of i8* + let cast = llvm::LLVMConstPointerCast(g, &self.type_i8p()); + &self.used_statics.borrow_mut().push(cast); + } + } + } + fn static_replace_all_uses(&self, old_g: &'ll Value, new_g: &'ll Value) { + unsafe { + let bitcast = llvm::LLVMConstPointerCast(new_g, self.val_ty(old_g)); + llvm::LLVMReplaceAllUsesWith(old_g, bitcast); + llvm::LLVMDeleteGlobal(old_g); } } } diff --git a/src/librustc_codegen_llvm/context.rs b/src/librustc_codegen_llvm/context.rs index 826df82193a31..dc37d351b403f 100644 --- a/src/librustc_codegen_llvm/context.rs +++ b/src/librustc_codegen_llvm/context.rs @@ -9,20 +9,18 @@ // except according to those terms. use attributes; -use common; use llvm; use rustc::dep_graph::DepGraphSafe; use rustc::hir; use debuginfo; -use callee; -use base; -use declare; use monomorphize::Instance; use value::Value; use monomorphize::partitioning::CodegenUnit; use type_::Type; use type_of::PointeeInfo; +use rustc_codegen_ssa::interfaces::*; +use libc::c_uint; use rustc_data_structures::base_n; use rustc_data_structures::small_c_str::SmallCStr; @@ -33,36 +31,39 @@ use rustc::ty::layout::{LayoutError, LayoutOf, Size, TyLayout}; use rustc::ty::{self, Ty, TyCtxt}; use rustc::util::nodemap::FxHashMap; use rustc_target::spec::{HasTargetSpec, Target}; +use rustc_codegen_ssa::callee::resolve_and_get_fn; +use rustc_codegen_ssa::base::wants_msvc_seh; +use callee::get_fn; use std::ffi::CStr; use std::cell::{Cell, RefCell}; use std::iter; use std::str; use std::sync::Arc; +use std::hash::Hash; use syntax::symbol::LocalInternedString; use abi::Abi; /// There is one `CodegenCx` per compilation unit. Each one has its own LLVM /// `llvm::Context` so that several compilation units may be optimized in parallel. /// All other LLVM data structures in the `CodegenCx` are tied to that `llvm::Context`. -pub struct CodegenCx<'a, 'tcx: 'a> { - pub tcx: TyCtxt<'a, 'tcx, 'tcx>, +pub struct CodegenCx<'ll, 'tcx: 'll, V> { + pub tcx: TyCtxt<'ll, 'tcx, 'tcx>, pub check_overflow: bool, pub use_dll_storage_attrs: bool, pub tls_model: llvm::ThreadLocalMode, - pub llmod: &'a llvm::Module, - pub llcx: &'a llvm::Context, + pub llmod: &'ll llvm::Module, + pub llcx: &'ll llvm::Context, pub stats: RefCell, pub codegen_unit: Arc>, /// Cache instances of monomorphic and polymorphic items - pub instances: RefCell, &'a Value>>, + pub instances: RefCell, V>>, /// Cache generated vtables - pub vtables: RefCell, ty::PolyExistentialTraitRef<'tcx>), - &'a Value>>, + pub vtables: RefCell, ty::PolyExistentialTraitRef<'tcx>), V>>, /// Cache of constant strings, - pub const_cstr_cache: RefCell>, + pub const_cstr_cache: RefCell>, /// Reverse-direction for const ptrs cast from globals. /// Key is a Value holding a *T, @@ -72,40 +73,39 @@ pub struct CodegenCx<'a, 'tcx: 'a> { /// when we ptrcast, and we have to ptrcast during codegen /// of a [T] const because we form a slice, a (*T,usize) pair, not /// a pointer to an LLVM array type. Similar for trait objects. - pub const_unsized: RefCell>, + pub const_unsized: RefCell>, /// Cache of emitted const globals (value -> global) - pub const_globals: RefCell>, + pub const_globals: RefCell>, /// List of globals for static variables which need to be passed to the /// LLVM function ReplaceAllUsesWith (RAUW) when codegen is complete. /// (We have to make sure we don't invalidate any Values referring /// to constants.) - pub statics_to_rauw: RefCell>, + pub statics_to_rauw: RefCell>, /// Statics that will be placed in the llvm.used variable /// See http://llvm.org/docs/LangRef.html#the-llvm-used-global-variable for details - pub used_statics: RefCell>, + pub used_statics: RefCell>, - pub lltypes: RefCell, Option), &'a Type>>, - pub scalar_lltypes: RefCell, &'a Type>>, + pub lltypes: RefCell, Option), &'ll Type>>, + pub scalar_lltypes: RefCell, &'ll Type>>, pub pointee_infos: RefCell, Size), Option>>, - pub isize_ty: &'a Type, + pub isize_ty: &'ll Type, - pub dbg_cx: Option>, + pub dbg_cx: Option>, - eh_personality: Cell>, - eh_unwind_resume: Cell>, - pub rust_try_fn: Cell>, + eh_personality: Cell>, + eh_unwind_resume: Cell>, + pub rust_try_fn: Cell>, - intrinsics: RefCell>, + intrinsics: RefCell>, /// A counter that is used for generating local symbol names local_gen_sym_counter: Cell, } -impl<'a, 'tcx> DepGraphSafe for CodegenCx<'a, 'tcx> { -} +impl<'ll, 'tcx, Value> DepGraphSafe for CodegenCx<'ll, 'tcx, Value> {} pub fn get_reloc_model(sess: &Session) -> llvm::RelocMode { let reloc_model_arg = match sess.opts.cg.relocation_model { @@ -218,11 +218,11 @@ pub unsafe fn create_module( llmod } -impl<'a, 'tcx> CodegenCx<'a, 'tcx> { - crate fn new(tcx: TyCtxt<'a, 'tcx, 'tcx>, +impl<'ll, 'tcx, Value : Eq+Hash> CodegenCx<'ll, 'tcx, Value> { + crate fn new(tcx: TyCtxt<'ll, 'tcx, 'tcx>, codegen_unit: Arc>, - llvm_module: &'a ::ModuleLlvm) - -> CodegenCx<'a, 'tcx> { + llvm_module: &'ll ::ModuleLlvm) + -> CodegenCx<'ll, 'tcx, Value> { // An interesting part of Windows which MSVC forces our hand on (and // apparently MinGW didn't) is the usage of `dllimport` and `dllexport` // attributes in LLVM IR as well as native dependencies (in C these @@ -316,34 +316,26 @@ impl<'a, 'tcx> CodegenCx<'a, 'tcx> { } } -impl<'b, 'tcx> CodegenCx<'b, 'tcx> { - pub fn sess<'a>(&'a self) -> &'a Session { - &self.tcx.sess +impl MiscMethods<'ll, 'tcx> for CodegenCx<'ll, 'tcx, &'ll Value> { + fn vtables(&self) -> &RefCell< + FxHashMap<(Ty<'tcx>, ty::PolyExistentialTraitRef<'tcx>), &'ll Value> + > { + &self.vtables } - pub fn get_intrinsic(&self, key: &str) -> &'b Value { - if let Some(v) = self.intrinsics.borrow().get(key).cloned() { - return v; - } + fn instances(&self) -> &RefCell, &'ll Value>> { + &self.instances + } - declare_intrinsic(self, key).unwrap_or_else(|| bug!("unknown intrinsic '{}'", key)) + fn get_fn(&self, instance: Instance<'tcx>) -> &'ll Value { + get_fn(&&self,instance) } - /// Generate a new symbol name with the given prefix. This symbol name must - /// only be used for definitions with `internal` or `private` linkage. - pub fn generate_local_symbol_name(&self, prefix: &str) -> String { - let idx = self.local_gen_sym_counter.get(); - self.local_gen_sym_counter.set(idx + 1); - // Include a '.' character, so there can be no accidental conflicts with - // user defined names - let mut name = String::with_capacity(prefix.len() + 6); - name.push_str(prefix); - name.push_str("."); - base_n::push_str(idx as u128, base_n::ALPHANUMERIC_ONLY, &mut name); - name + fn get_param(&self, llfn: &'ll Value, index: c_uint) -> &'ll Value { + llvm::get_param(llfn, index) } - pub fn eh_personality(&self) -> &'b Value { + fn eh_personality(&self) -> &'ll Value { // The exception handling personality function. // // If our compilation unit has the `eh_personality` lang item somewhere @@ -369,17 +361,17 @@ impl<'b, 'tcx> CodegenCx<'b, 'tcx> { } let tcx = self.tcx; let llfn = match tcx.lang_items().eh_personality() { - Some(def_id) if !base::wants_msvc_seh(self.sess()) => { - callee::resolve_and_get_fn(self, def_id, tcx.intern_substs(&[])) + Some(def_id) if !wants_msvc_seh(self.sess()) => { + resolve_and_get_fn(self, def_id, tcx.intern_substs(&[])) } _ => { - let name = if base::wants_msvc_seh(self.sess()) { + let name = if wants_msvc_seh(self.sess()) { "__CxxFrameHandler3" } else { "rust_eh_personality" }; - let fty = Type::variadic_func(&[], Type::i32(self)); - declare::declare_cfn(self, name, fty) + let fty = &self.type_variadic_func(&[], &self.type_i32()); + self.declare_cfn(name, fty) } }; attributes::apply_target_cpu_attr(self, llfn); @@ -389,7 +381,7 @@ impl<'b, 'tcx> CodegenCx<'b, 'tcx> { // Returns a Value of the "eh_unwind_resume" lang item if one is defined, // otherwise declares it as an external function. - pub fn eh_unwind_resume(&self) -> &'b Value { + fn eh_unwind_resume(&self) -> &'ll Value { use attributes; let unwresume = &self.eh_unwind_resume; if let Some(llfn) = unwresume.get() { @@ -399,7 +391,7 @@ impl<'b, 'tcx> CodegenCx<'b, 'tcx> { let tcx = self.tcx; assert!(self.sess().target.target.options.custom_unwind_resume); if let Some(def_id) = tcx.lang_items().eh_unwind_resume() { - let llfn = callee::resolve_and_get_fn(self, def_id, tcx.intern_substs(&[])); + let llfn = resolve_and_get_fn(self, def_id, tcx.intern_substs(&[])); unwresume.set(Some(llfn)); return llfn; } @@ -412,59 +404,424 @@ impl<'b, 'tcx> CodegenCx<'b, 'tcx> { Abi::C ))); - let llfn = declare::declare_fn(self, "rust_eh_unwind_resume", ty); + let llfn = self.declare_fn("rust_eh_unwind_resume", ty); attributes::unwind(llfn, true); attributes::apply_target_cpu_attr(self, llfn); unwresume.set(Some(llfn)); llfn } - pub fn type_needs_drop(&self, ty: Ty<'tcx>) -> bool { - common::type_needs_drop(self.tcx, ty) + fn sess(&self) -> &Session { + &self.tcx.sess + } + + fn check_overflow(&self) -> bool { + self.check_overflow + } + + fn stats(&self) -> &RefCell { + &self.stats + } + + fn consume_stats(self) -> RefCell { + self.stats + } + + fn codegen_unit(&self) -> &Arc> { + &self.codegen_unit + } + + fn statics_to_rauw(&self) -> &RefCell> { + &self.statics_to_rauw } - pub fn type_is_sized(&self, ty: Ty<'tcx>) -> bool { - common::type_is_sized(self.tcx, ty) + fn used_statics(&self) -> &RefCell> { + &self.used_statics } - pub fn type_is_freeze(&self, ty: Ty<'tcx>) -> bool { - common::type_is_freeze(self.tcx, ty) + fn set_frame_pointer_elimination(&self, llfn: &'ll Value) { + attributes::set_frame_pointer_elimination(self, llfn) } - pub fn type_has_metadata(&self, ty: Ty<'tcx>) -> bool { - use syntax_pos::DUMMY_SP; - if ty.is_sized(self.tcx.at(DUMMY_SP), ty::ParamEnv::reveal_all()) { - return false; + fn apply_target_cpu_attr(&self, llfn: &'ll Value) { + attributes::apply_target_cpu_attr(self, llfn) + } + + fn env_alloca_allowed(&self) -> bool { + unsafe { llvm::LLVMRustVersionMajor() < 6 } + } + + fn create_used_variable(&self) { + let name = const_cstr!("llvm.used"); + let section = const_cstr!("llvm.metadata"); + let array = self.const_array( + &self.type_ptr_to(self.type_i8()), + &*self.used_statics.borrow() + ); + + unsafe { + let g = llvm::LLVMAddGlobal(self.llmod, + self.val_ty(array), + name.as_ptr()); + llvm::LLVMSetInitializer(g, array); + llvm::LLVMRustSetLinkage(g, llvm::Linkage::AppendingLinkage); + llvm::LLVMSetSection(g, section.as_ptr()); } + } +} + +impl<'a, 'll: 'a, 'tcx: 'll> CodegenMethods<'a, 'll, 'tcx> for CodegenCx<'ll, 'tcx, &'ll Value> {} - let tail = self.tcx.struct_tail(ty); - match tail.sty { - ty::Foreign(..) => false, - ty::Str | ty::Slice(..) | ty::Dynamic(..) => true, - _ => bug!("unexpected unsized tail: {:?}", tail.sty), +impl IntrinsicDeclarationMethods<'b> for CodegenCx<'b, 'tcx, &'b Value> { + fn get_intrinsic(&self, key: &str) -> &'b Value { + if let Some(v) = self.intrinsics.borrow().get(key).cloned() { + return v; + } + + self.declare_intrinsic(key).unwrap_or_else(|| bug!("unknown intrinsic '{}'", key)) + } + + fn declare_intrinsic( + &self, + key: &str + ) -> Option<&'b Value> { + macro_rules! ifn { + ($name:expr, fn() -> $ret:expr) => ( + if key == $name { + let f = &self.declare_cfn($name, &self.type_func(&[], $ret)); + llvm::SetUnnamedAddr(f, false); + &self.intrinsics.borrow_mut().insert($name, f.clone()); + return Some(f); + } + ); + ($name:expr, fn(...) -> $ret:expr) => ( + if key == $name { + let f = &self.declare_cfn($name, &self.type_variadic_func(&[], $ret)); + llvm::SetUnnamedAddr(f, false); + &self.intrinsics.borrow_mut().insert($name, f.clone()); + return Some(f); + } + ); + ($name:expr, fn($($arg:expr),*) -> $ret:expr) => ( + if key == $name { + let f = &self.declare_cfn($name, &self.type_func(&[$($arg),*], $ret)); + llvm::SetUnnamedAddr(f, false); + &self.intrinsics.borrow_mut().insert($name, f.clone()); + return Some(f); + } + ); + } + macro_rules! mk_struct { + ($($field_ty:expr),*) => (&self.type_struct( &[$($field_ty),*], false)) } + + let i8p = &self.type_i8p(); + let void = &self.type_void(); + let i1 = &self.type_i1(); + let t_i8 = &self.type_i8(); + let t_i16 = &self.type_i16(); + let t_i32 = &self.type_i32(); + let t_i64 = &self.type_i64(); + let t_i128 = &self.type_i128(); + let t_f32 = &self.type_f32(); + let t_f64 = &self.type_f64(); + + let t_v2f32 = &self.type_vector(t_f32, 2); + let t_v4f32 = &self.type_vector(t_f32, 4); + let t_v8f32 = &self.type_vector(t_f32, 8); + let t_v16f32 = &self.type_vector(t_f32, 16); + + let t_v2f64 = &self.type_vector(t_f64, 2); + let t_v4f64 = &self.type_vector(t_f64, 4); + let t_v8f64 = &self.type_vector(t_f64, 8); + + ifn!("llvm.memcpy.p0i8.p0i8.i16", fn(i8p, i8p, t_i16, t_i32, i1) -> void); + ifn!("llvm.memcpy.p0i8.p0i8.i32", fn(i8p, i8p, t_i32, t_i32, i1) -> void); + ifn!("llvm.memcpy.p0i8.p0i8.i64", fn(i8p, i8p, t_i64, t_i32, i1) -> void); + ifn!("llvm.memmove.p0i8.p0i8.i16", fn(i8p, i8p, t_i16, t_i32, i1) -> void); + ifn!("llvm.memmove.p0i8.p0i8.i32", fn(i8p, i8p, t_i32, t_i32, i1) -> void); + ifn!("llvm.memmove.p0i8.p0i8.i64", fn(i8p, i8p, t_i64, t_i32, i1) -> void); + ifn!("llvm.memset.p0i8.i16", fn(i8p, t_i8, t_i16, t_i32, i1) -> void); + ifn!("llvm.memset.p0i8.i32", fn(i8p, t_i8, t_i32, t_i32, i1) -> void); + ifn!("llvm.memset.p0i8.i64", fn(i8p, t_i8, t_i64, t_i32, i1) -> void); + + ifn!("llvm.trap", fn() -> void); + ifn!("llvm.debugtrap", fn() -> void); + ifn!("llvm.frameaddress", fn(t_i32) -> i8p); + + ifn!("llvm.powi.f32", fn(t_f32, t_i32) -> t_f32); + ifn!("llvm.powi.v2f32", fn(t_v2f32, t_i32) -> t_v2f32); + ifn!("llvm.powi.v4f32", fn(t_v4f32, t_i32) -> t_v4f32); + ifn!("llvm.powi.v8f32", fn(t_v8f32, t_i32) -> t_v8f32); + ifn!("llvm.powi.v16f32", fn(t_v16f32, t_i32) -> t_v16f32); + ifn!("llvm.powi.f64", fn(t_f64, t_i32) -> t_f64); + ifn!("llvm.powi.v2f64", fn(t_v2f64, t_i32) -> t_v2f64); + ifn!("llvm.powi.v4f64", fn(t_v4f64, t_i32) -> t_v4f64); + ifn!("llvm.powi.v8f64", fn(t_v8f64, t_i32) -> t_v8f64); + + ifn!("llvm.pow.f32", fn(t_f32, t_f32) -> t_f32); + ifn!("llvm.pow.v2f32", fn(t_v2f32, t_v2f32) -> t_v2f32); + ifn!("llvm.pow.v4f32", fn(t_v4f32, t_v4f32) -> t_v4f32); + ifn!("llvm.pow.v8f32", fn(t_v8f32, t_v8f32) -> t_v8f32); + ifn!("llvm.pow.v16f32", fn(t_v16f32, t_v16f32) -> t_v16f32); + ifn!("llvm.pow.f64", fn(t_f64, t_f64) -> t_f64); + ifn!("llvm.pow.v2f64", fn(t_v2f64, t_v2f64) -> t_v2f64); + ifn!("llvm.pow.v4f64", fn(t_v4f64, t_v4f64) -> t_v4f64); + ifn!("llvm.pow.v8f64", fn(t_v8f64, t_v8f64) -> t_v8f64); + + ifn!("llvm.sqrt.f32", fn(t_f32) -> t_f32); + ifn!("llvm.sqrt.v2f32", fn(t_v2f32) -> t_v2f32); + ifn!("llvm.sqrt.v4f32", fn(t_v4f32) -> t_v4f32); + ifn!("llvm.sqrt.v8f32", fn(t_v8f32) -> t_v8f32); + ifn!("llvm.sqrt.v16f32", fn(t_v16f32) -> t_v16f32); + ifn!("llvm.sqrt.f64", fn(t_f64) -> t_f64); + ifn!("llvm.sqrt.v2f64", fn(t_v2f64) -> t_v2f64); + ifn!("llvm.sqrt.v4f64", fn(t_v4f64) -> t_v4f64); + ifn!("llvm.sqrt.v8f64", fn(t_v8f64) -> t_v8f64); + + ifn!("llvm.sin.f32", fn(t_f32) -> t_f32); + ifn!("llvm.sin.v2f32", fn(t_v2f32) -> t_v2f32); + ifn!("llvm.sin.v4f32", fn(t_v4f32) -> t_v4f32); + ifn!("llvm.sin.v8f32", fn(t_v8f32) -> t_v8f32); + ifn!("llvm.sin.v16f32", fn(t_v16f32) -> t_v16f32); + ifn!("llvm.sin.f64", fn(t_f64) -> t_f64); + ifn!("llvm.sin.v2f64", fn(t_v2f64) -> t_v2f64); + ifn!("llvm.sin.v4f64", fn(t_v4f64) -> t_v4f64); + ifn!("llvm.sin.v8f64", fn(t_v8f64) -> t_v8f64); + + ifn!("llvm.cos.f32", fn(t_f32) -> t_f32); + ifn!("llvm.cos.v2f32", fn(t_v2f32) -> t_v2f32); + ifn!("llvm.cos.v4f32", fn(t_v4f32) -> t_v4f32); + ifn!("llvm.cos.v8f32", fn(t_v8f32) -> t_v8f32); + ifn!("llvm.cos.v16f32", fn(t_v16f32) -> t_v16f32); + ifn!("llvm.cos.f64", fn(t_f64) -> t_f64); + ifn!("llvm.cos.v2f64", fn(t_v2f64) -> t_v2f64); + ifn!("llvm.cos.v4f64", fn(t_v4f64) -> t_v4f64); + ifn!("llvm.cos.v8f64", fn(t_v8f64) -> t_v8f64); + + ifn!("llvm.exp.f32", fn(t_f32) -> t_f32); + ifn!("llvm.exp.v2f32", fn(t_v2f32) -> t_v2f32); + ifn!("llvm.exp.v4f32", fn(t_v4f32) -> t_v4f32); + ifn!("llvm.exp.v8f32", fn(t_v8f32) -> t_v8f32); + ifn!("llvm.exp.v16f32", fn(t_v16f32) -> t_v16f32); + ifn!("llvm.exp.f64", fn(t_f64) -> t_f64); + ifn!("llvm.exp.v2f64", fn(t_v2f64) -> t_v2f64); + ifn!("llvm.exp.v4f64", fn(t_v4f64) -> t_v4f64); + ifn!("llvm.exp.v8f64", fn(t_v8f64) -> t_v8f64); + + ifn!("llvm.exp2.f32", fn(t_f32) -> t_f32); + ifn!("llvm.exp2.v2f32", fn(t_v2f32) -> t_v2f32); + ifn!("llvm.exp2.v4f32", fn(t_v4f32) -> t_v4f32); + ifn!("llvm.exp2.v8f32", fn(t_v8f32) -> t_v8f32); + ifn!("llvm.exp2.v16f32", fn(t_v16f32) -> t_v16f32); + ifn!("llvm.exp2.f64", fn(t_f64) -> t_f64); + ifn!("llvm.exp2.v2f64", fn(t_v2f64) -> t_v2f64); + ifn!("llvm.exp2.v4f64", fn(t_v4f64) -> t_v4f64); + ifn!("llvm.exp2.v8f64", fn(t_v8f64) -> t_v8f64); + + ifn!("llvm.log.f32", fn(t_f32) -> t_f32); + ifn!("llvm.log.v2f32", fn(t_v2f32) -> t_v2f32); + ifn!("llvm.log.v4f32", fn(t_v4f32) -> t_v4f32); + ifn!("llvm.log.v8f32", fn(t_v8f32) -> t_v8f32); + ifn!("llvm.log.v16f32", fn(t_v16f32) -> t_v16f32); + ifn!("llvm.log.f64", fn(t_f64) -> t_f64); + ifn!("llvm.log.v2f64", fn(t_v2f64) -> t_v2f64); + ifn!("llvm.log.v4f64", fn(t_v4f64) -> t_v4f64); + ifn!("llvm.log.v8f64", fn(t_v8f64) -> t_v8f64); + + ifn!("llvm.log10.f32", fn(t_f32) -> t_f32); + ifn!("llvm.log10.v2f32", fn(t_v2f32) -> t_v2f32); + ifn!("llvm.log10.v4f32", fn(t_v4f32) -> t_v4f32); + ifn!("llvm.log10.v8f32", fn(t_v8f32) -> t_v8f32); + ifn!("llvm.log10.v16f32", fn(t_v16f32) -> t_v16f32); + ifn!("llvm.log10.f64", fn(t_f64) -> t_f64); + ifn!("llvm.log10.v2f64", fn(t_v2f64) -> t_v2f64); + ifn!("llvm.log10.v4f64", fn(t_v4f64) -> t_v4f64); + ifn!("llvm.log10.v8f64", fn(t_v8f64) -> t_v8f64); + + ifn!("llvm.log2.f32", fn(t_f32) -> t_f32); + ifn!("llvm.log2.v2f32", fn(t_v2f32) -> t_v2f32); + ifn!("llvm.log2.v4f32", fn(t_v4f32) -> t_v4f32); + ifn!("llvm.log2.v8f32", fn(t_v8f32) -> t_v8f32); + ifn!("llvm.log2.v16f32", fn(t_v16f32) -> t_v16f32); + ifn!("llvm.log2.f64", fn(t_f64) -> t_f64); + ifn!("llvm.log2.v2f64", fn(t_v2f64) -> t_v2f64); + ifn!("llvm.log2.v4f64", fn(t_v4f64) -> t_v4f64); + ifn!("llvm.log2.v8f64", fn(t_v8f64) -> t_v8f64); + + ifn!("llvm.fma.f32", fn(t_f32, t_f32, t_f32) -> t_f32); + ifn!("llvm.fma.v2f32", fn(t_v2f32, t_v2f32, t_v2f32) -> t_v2f32); + ifn!("llvm.fma.v4f32", fn(t_v4f32, t_v4f32, t_v4f32) -> t_v4f32); + ifn!("llvm.fma.v8f32", fn(t_v8f32, t_v8f32, t_v8f32) -> t_v8f32); + ifn!("llvm.fma.v16f32", fn(t_v16f32, t_v16f32, t_v16f32) -> t_v16f32); + ifn!("llvm.fma.f64", fn(t_f64, t_f64, t_f64) -> t_f64); + ifn!("llvm.fma.v2f64", fn(t_v2f64, t_v2f64, t_v2f64) -> t_v2f64); + ifn!("llvm.fma.v4f64", fn(t_v4f64, t_v4f64, t_v4f64) -> t_v4f64); + ifn!("llvm.fma.v8f64", fn(t_v8f64, t_v8f64, t_v8f64) -> t_v8f64); + + ifn!("llvm.fabs.f32", fn(t_f32) -> t_f32); + ifn!("llvm.fabs.v2f32", fn(t_v2f32) -> t_v2f32); + ifn!("llvm.fabs.v4f32", fn(t_v4f32) -> t_v4f32); + ifn!("llvm.fabs.v8f32", fn(t_v8f32) -> t_v8f32); + ifn!("llvm.fabs.v16f32", fn(t_v16f32) -> t_v16f32); + ifn!("llvm.fabs.f64", fn(t_f64) -> t_f64); + ifn!("llvm.fabs.v2f64", fn(t_v2f64) -> t_v2f64); + ifn!("llvm.fabs.v4f64", fn(t_v4f64) -> t_v4f64); + ifn!("llvm.fabs.v8f64", fn(t_v8f64) -> t_v8f64); + + ifn!("llvm.floor.f32", fn(t_f32) -> t_f32); + ifn!("llvm.floor.v2f32", fn(t_v2f32) -> t_v2f32); + ifn!("llvm.floor.v4f32", fn(t_v4f32) -> t_v4f32); + ifn!("llvm.floor.v8f32", fn(t_v8f32) -> t_v8f32); + ifn!("llvm.floor.v16f32", fn(t_v16f32) -> t_v16f32); + ifn!("llvm.floor.f64", fn(t_f64) -> t_f64); + ifn!("llvm.floor.v2f64", fn(t_v2f64) -> t_v2f64); + ifn!("llvm.floor.v4f64", fn(t_v4f64) -> t_v4f64); + ifn!("llvm.floor.v8f64", fn(t_v8f64) -> t_v8f64); + + ifn!("llvm.ceil.f32", fn(t_f32) -> t_f32); + ifn!("llvm.ceil.v2f32", fn(t_v2f32) -> t_v2f32); + ifn!("llvm.ceil.v4f32", fn(t_v4f32) -> t_v4f32); + ifn!("llvm.ceil.v8f32", fn(t_v8f32) -> t_v8f32); + ifn!("llvm.ceil.v16f32", fn(t_v16f32) -> t_v16f32); + ifn!("llvm.ceil.f64", fn(t_f64) -> t_f64); + ifn!("llvm.ceil.v2f64", fn(t_v2f64) -> t_v2f64); + ifn!("llvm.ceil.v4f64", fn(t_v4f64) -> t_v4f64); + ifn!("llvm.ceil.v8f64", fn(t_v8f64) -> t_v8f64); + + ifn!("llvm.trunc.f32", fn(t_f32) -> t_f32); + ifn!("llvm.trunc.f64", fn(t_f64) -> t_f64); + + ifn!("llvm.copysign.f32", fn(t_f32, t_f32) -> t_f32); + ifn!("llvm.copysign.f64", fn(t_f64, t_f64) -> t_f64); + ifn!("llvm.round.f32", fn(t_f32) -> t_f32); + ifn!("llvm.round.f64", fn(t_f64) -> t_f64); + + ifn!("llvm.rint.f32", fn(t_f32) -> t_f32); + ifn!("llvm.rint.f64", fn(t_f64) -> t_f64); + ifn!("llvm.nearbyint.f32", fn(t_f32) -> t_f32); + ifn!("llvm.nearbyint.f64", fn(t_f64) -> t_f64); + + ifn!("llvm.ctpop.i8", fn(t_i8) -> t_i8); + ifn!("llvm.ctpop.i16", fn(t_i16) -> t_i16); + ifn!("llvm.ctpop.i32", fn(t_i32) -> t_i32); + ifn!("llvm.ctpop.i64", fn(t_i64) -> t_i64); + ifn!("llvm.ctpop.i128", fn(t_i128) -> t_i128); + + ifn!("llvm.ctlz.i8", fn(t_i8 , i1) -> t_i8); + ifn!("llvm.ctlz.i16", fn(t_i16, i1) -> t_i16); + ifn!("llvm.ctlz.i32", fn(t_i32, i1) -> t_i32); + ifn!("llvm.ctlz.i64", fn(t_i64, i1) -> t_i64); + ifn!("llvm.ctlz.i128", fn(t_i128, i1) -> t_i128); + + ifn!("llvm.cttz.i8", fn(t_i8 , i1) -> t_i8); + ifn!("llvm.cttz.i16", fn(t_i16, i1) -> t_i16); + ifn!("llvm.cttz.i32", fn(t_i32, i1) -> t_i32); + ifn!("llvm.cttz.i64", fn(t_i64, i1) -> t_i64); + ifn!("llvm.cttz.i128", fn(t_i128, i1) -> t_i128); + + ifn!("llvm.bswap.i16", fn(t_i16) -> t_i16); + ifn!("llvm.bswap.i32", fn(t_i32) -> t_i32); + ifn!("llvm.bswap.i64", fn(t_i64) -> t_i64); + ifn!("llvm.bswap.i128", fn(t_i128) -> t_i128); + + ifn!("llvm.bitreverse.i8", fn(t_i8) -> t_i8); + ifn!("llvm.bitreverse.i16", fn(t_i16) -> t_i16); + ifn!("llvm.bitreverse.i32", fn(t_i32) -> t_i32); + ifn!("llvm.bitreverse.i64", fn(t_i64) -> t_i64); + ifn!("llvm.bitreverse.i128", fn(t_i128) -> t_i128); + + ifn!("llvm.sadd.with.overflow.i8", fn(t_i8, t_i8) -> mk_struct!{t_i8, i1}); + ifn!("llvm.sadd.with.overflow.i16", fn(t_i16, t_i16) -> mk_struct!{t_i16, i1}); + ifn!("llvm.sadd.with.overflow.i32", fn(t_i32, t_i32) -> mk_struct!{t_i32, i1}); + ifn!("llvm.sadd.with.overflow.i64", fn(t_i64, t_i64) -> mk_struct!{t_i64, i1}); + ifn!("llvm.sadd.with.overflow.i128", fn(t_i128, t_i128) -> mk_struct!{t_i128, i1}); + + ifn!("llvm.uadd.with.overflow.i8", fn(t_i8, t_i8) -> mk_struct!{t_i8, i1}); + ifn!("llvm.uadd.with.overflow.i16", fn(t_i16, t_i16) -> mk_struct!{t_i16, i1}); + ifn!("llvm.uadd.with.overflow.i32", fn(t_i32, t_i32) -> mk_struct!{t_i32, i1}); + ifn!("llvm.uadd.with.overflow.i64", fn(t_i64, t_i64) -> mk_struct!{t_i64, i1}); + ifn!("llvm.uadd.with.overflow.i128", fn(t_i128, t_i128) -> mk_struct!{t_i128, i1}); + + ifn!("llvm.ssub.with.overflow.i8", fn(t_i8, t_i8) -> mk_struct!{t_i8, i1}); + ifn!("llvm.ssub.with.overflow.i16", fn(t_i16, t_i16) -> mk_struct!{t_i16, i1}); + ifn!("llvm.ssub.with.overflow.i32", fn(t_i32, t_i32) -> mk_struct!{t_i32, i1}); + ifn!("llvm.ssub.with.overflow.i64", fn(t_i64, t_i64) -> mk_struct!{t_i64, i1}); + ifn!("llvm.ssub.with.overflow.i128", fn(t_i128, t_i128) -> mk_struct!{t_i128, i1}); + + ifn!("llvm.usub.with.overflow.i8", fn(t_i8, t_i8) -> mk_struct!{t_i8, i1}); + ifn!("llvm.usub.with.overflow.i16", fn(t_i16, t_i16) -> mk_struct!{t_i16, i1}); + ifn!("llvm.usub.with.overflow.i32", fn(t_i32, t_i32) -> mk_struct!{t_i32, i1}); + ifn!("llvm.usub.with.overflow.i64", fn(t_i64, t_i64) -> mk_struct!{t_i64, i1}); + ifn!("llvm.usub.with.overflow.i128", fn(t_i128, t_i128) -> mk_struct!{t_i128, i1}); + + ifn!("llvm.smul.with.overflow.i8", fn(t_i8, t_i8) -> mk_struct!{t_i8, i1}); + ifn!("llvm.smul.with.overflow.i16", fn(t_i16, t_i16) -> mk_struct!{t_i16, i1}); + ifn!("llvm.smul.with.overflow.i32", fn(t_i32, t_i32) -> mk_struct!{t_i32, i1}); + ifn!("llvm.smul.with.overflow.i64", fn(t_i64, t_i64) -> mk_struct!{t_i64, i1}); + ifn!("llvm.smul.with.overflow.i128", fn(t_i128, t_i128) -> mk_struct!{t_i128, i1}); + + ifn!("llvm.umul.with.overflow.i8", fn(t_i8, t_i8) -> mk_struct!{t_i8, i1}); + ifn!("llvm.umul.with.overflow.i16", fn(t_i16, t_i16) -> mk_struct!{t_i16, i1}); + ifn!("llvm.umul.with.overflow.i32", fn(t_i32, t_i32) -> mk_struct!{t_i32, i1}); + ifn!("llvm.umul.with.overflow.i64", fn(t_i64, t_i64) -> mk_struct!{t_i64, i1}); + ifn!("llvm.umul.with.overflow.i128", fn(t_i128, t_i128) -> mk_struct!{t_i128, i1}); + + ifn!("llvm.lifetime.start", fn(t_i64,i8p) -> void); + ifn!("llvm.lifetime.end", fn(t_i64, i8p) -> void); + + ifn!("llvm.expect.i1", fn(i1, i1) -> i1); + ifn!("llvm.eh.typeid.for", fn(i8p) -> t_i32); + ifn!("llvm.localescape", fn(...) -> void); + ifn!("llvm.localrecover", fn(i8p, i8p, t_i32) -> i8p); + ifn!("llvm.x86.seh.recoverfp", fn(i8p, i8p) -> i8p); + + ifn!("llvm.assume", fn(i1) -> void); + ifn!("llvm.prefetch", fn(i8p, t_i32, t_i32, t_i32) -> void); + + if self.sess().opts.debuginfo != DebugInfo::None { + ifn!("llvm.dbg.declare", fn(&self.type_metadata(), &self.type_metadata()) -> void); + ifn!("llvm.dbg.value", fn(&self.type_metadata(), t_i64, &self.type_metadata()) -> void); + } + return None; } } -impl ty::layout::HasDataLayout for &'a CodegenCx<'ll, 'tcx> { +impl<'b, 'tcx> CodegenCx<'b, 'tcx, &'b Value> { + /// Generate a new symbol name with the given prefix. This symbol name must + /// only be used for definitions with `internal` or `private` linkage. + pub fn generate_local_symbol_name(&self, prefix: &str) -> String { + let idx = self.local_gen_sym_counter.get(); + self.local_gen_sym_counter.set(idx + 1); + // Include a '.' character, so there can be no accidental conflicts with + // user defined names + let mut name = String::with_capacity(prefix.len() + 6); + name.push_str(prefix); + name.push_str("."); + base_n::push_str(idx as u128, base_n::ALPHANUMERIC_ONLY, &mut name); + name + } +} + +impl ty::layout::HasDataLayout for &'a CodegenCx<'ll, 'tcx, &'ll Value> { fn data_layout(&self) -> &ty::layout::TargetDataLayout { &self.tcx.data_layout } } -impl HasTargetSpec for &'a CodegenCx<'ll, 'tcx> { +impl HasTargetSpec for &'a CodegenCx<'ll, 'tcx, &'ll Value> { fn target_spec(&self) -> &Target { &self.tcx.sess.target.target } } -impl ty::layout::HasTyCtxt<'tcx> for &'a CodegenCx<'ll, 'tcx> { +impl ty::layout::HasTyCtxt<'tcx> for &'a CodegenCx<'ll, 'tcx, &'ll Value> { fn tcx<'b>(&'b self) -> TyCtxt<'b, 'tcx, 'tcx> { self.tcx } } -impl LayoutOf for &'a CodegenCx<'ll, 'tcx> { +impl LayoutOf for &'a CodegenCx<'ll, 'tcx, &'ll Value> { type Ty = Ty<'tcx>; type TyLayout = TyLayout<'tcx>; @@ -477,307 +834,3 @@ impl LayoutOf for &'a CodegenCx<'ll, 'tcx> { }) } } - -/// Declare any llvm intrinsics that you might need -fn declare_intrinsic(cx: &CodegenCx<'ll, '_>, key: &str) -> Option<&'ll Value> { - macro_rules! ifn { - ($name:expr, fn() -> $ret:expr) => ( - if key == $name { - let f = declare::declare_cfn(cx, $name, Type::func(&[], $ret)); - llvm::SetUnnamedAddr(f, false); - cx.intrinsics.borrow_mut().insert($name, f.clone()); - return Some(f); - } - ); - ($name:expr, fn(...) -> $ret:expr) => ( - if key == $name { - let f = declare::declare_cfn(cx, $name, Type::variadic_func(&[], $ret)); - llvm::SetUnnamedAddr(f, false); - cx.intrinsics.borrow_mut().insert($name, f.clone()); - return Some(f); - } - ); - ($name:expr, fn($($arg:expr),*) -> $ret:expr) => ( - if key == $name { - let f = declare::declare_cfn(cx, $name, Type::func(&[$($arg),*], $ret)); - llvm::SetUnnamedAddr(f, false); - cx.intrinsics.borrow_mut().insert($name, f.clone()); - return Some(f); - } - ); - } - macro_rules! mk_struct { - ($($field_ty:expr),*) => (Type::struct_(cx, &[$($field_ty),*], false)) - } - - let i8p = Type::i8p(cx); - let void = Type::void(cx); - let i1 = Type::i1(cx); - let t_i8 = Type::i8(cx); - let t_i16 = Type::i16(cx); - let t_i32 = Type::i32(cx); - let t_i64 = Type::i64(cx); - let t_i128 = Type::i128(cx); - let t_f32 = Type::f32(cx); - let t_f64 = Type::f64(cx); - - let t_v2f32 = Type::vector(t_f32, 2); - let t_v4f32 = Type::vector(t_f32, 4); - let t_v8f32 = Type::vector(t_f32, 8); - let t_v16f32 = Type::vector(t_f32, 16); - - let t_v2f64 = Type::vector(t_f64, 2); - let t_v4f64 = Type::vector(t_f64, 4); - let t_v8f64 = Type::vector(t_f64, 8); - - ifn!("llvm.memcpy.p0i8.p0i8.i16", fn(i8p, i8p, t_i16, t_i32, i1) -> void); - ifn!("llvm.memcpy.p0i8.p0i8.i32", fn(i8p, i8p, t_i32, t_i32, i1) -> void); - ifn!("llvm.memcpy.p0i8.p0i8.i64", fn(i8p, i8p, t_i64, t_i32, i1) -> void); - ifn!("llvm.memmove.p0i8.p0i8.i16", fn(i8p, i8p, t_i16, t_i32, i1) -> void); - ifn!("llvm.memmove.p0i8.p0i8.i32", fn(i8p, i8p, t_i32, t_i32, i1) -> void); - ifn!("llvm.memmove.p0i8.p0i8.i64", fn(i8p, i8p, t_i64, t_i32, i1) -> void); - ifn!("llvm.memset.p0i8.i16", fn(i8p, t_i8, t_i16, t_i32, i1) -> void); - ifn!("llvm.memset.p0i8.i32", fn(i8p, t_i8, t_i32, t_i32, i1) -> void); - ifn!("llvm.memset.p0i8.i64", fn(i8p, t_i8, t_i64, t_i32, i1) -> void); - - ifn!("llvm.trap", fn() -> void); - ifn!("llvm.debugtrap", fn() -> void); - ifn!("llvm.frameaddress", fn(t_i32) -> i8p); - - ifn!("llvm.powi.f32", fn(t_f32, t_i32) -> t_f32); - ifn!("llvm.powi.v2f32", fn(t_v2f32, t_i32) -> t_v2f32); - ifn!("llvm.powi.v4f32", fn(t_v4f32, t_i32) -> t_v4f32); - ifn!("llvm.powi.v8f32", fn(t_v8f32, t_i32) -> t_v8f32); - ifn!("llvm.powi.v16f32", fn(t_v16f32, t_i32) -> t_v16f32); - ifn!("llvm.powi.f64", fn(t_f64, t_i32) -> t_f64); - ifn!("llvm.powi.v2f64", fn(t_v2f64, t_i32) -> t_v2f64); - ifn!("llvm.powi.v4f64", fn(t_v4f64, t_i32) -> t_v4f64); - ifn!("llvm.powi.v8f64", fn(t_v8f64, t_i32) -> t_v8f64); - - ifn!("llvm.pow.f32", fn(t_f32, t_f32) -> t_f32); - ifn!("llvm.pow.v2f32", fn(t_v2f32, t_v2f32) -> t_v2f32); - ifn!("llvm.pow.v4f32", fn(t_v4f32, t_v4f32) -> t_v4f32); - ifn!("llvm.pow.v8f32", fn(t_v8f32, t_v8f32) -> t_v8f32); - ifn!("llvm.pow.v16f32", fn(t_v16f32, t_v16f32) -> t_v16f32); - ifn!("llvm.pow.f64", fn(t_f64, t_f64) -> t_f64); - ifn!("llvm.pow.v2f64", fn(t_v2f64, t_v2f64) -> t_v2f64); - ifn!("llvm.pow.v4f64", fn(t_v4f64, t_v4f64) -> t_v4f64); - ifn!("llvm.pow.v8f64", fn(t_v8f64, t_v8f64) -> t_v8f64); - - ifn!("llvm.sqrt.f32", fn(t_f32) -> t_f32); - ifn!("llvm.sqrt.v2f32", fn(t_v2f32) -> t_v2f32); - ifn!("llvm.sqrt.v4f32", fn(t_v4f32) -> t_v4f32); - ifn!("llvm.sqrt.v8f32", fn(t_v8f32) -> t_v8f32); - ifn!("llvm.sqrt.v16f32", fn(t_v16f32) -> t_v16f32); - ifn!("llvm.sqrt.f64", fn(t_f64) -> t_f64); - ifn!("llvm.sqrt.v2f64", fn(t_v2f64) -> t_v2f64); - ifn!("llvm.sqrt.v4f64", fn(t_v4f64) -> t_v4f64); - ifn!("llvm.sqrt.v8f64", fn(t_v8f64) -> t_v8f64); - - ifn!("llvm.sin.f32", fn(t_f32) -> t_f32); - ifn!("llvm.sin.v2f32", fn(t_v2f32) -> t_v2f32); - ifn!("llvm.sin.v4f32", fn(t_v4f32) -> t_v4f32); - ifn!("llvm.sin.v8f32", fn(t_v8f32) -> t_v8f32); - ifn!("llvm.sin.v16f32", fn(t_v16f32) -> t_v16f32); - ifn!("llvm.sin.f64", fn(t_f64) -> t_f64); - ifn!("llvm.sin.v2f64", fn(t_v2f64) -> t_v2f64); - ifn!("llvm.sin.v4f64", fn(t_v4f64) -> t_v4f64); - ifn!("llvm.sin.v8f64", fn(t_v8f64) -> t_v8f64); - - ifn!("llvm.cos.f32", fn(t_f32) -> t_f32); - ifn!("llvm.cos.v2f32", fn(t_v2f32) -> t_v2f32); - ifn!("llvm.cos.v4f32", fn(t_v4f32) -> t_v4f32); - ifn!("llvm.cos.v8f32", fn(t_v8f32) -> t_v8f32); - ifn!("llvm.cos.v16f32", fn(t_v16f32) -> t_v16f32); - ifn!("llvm.cos.f64", fn(t_f64) -> t_f64); - ifn!("llvm.cos.v2f64", fn(t_v2f64) -> t_v2f64); - ifn!("llvm.cos.v4f64", fn(t_v4f64) -> t_v4f64); - ifn!("llvm.cos.v8f64", fn(t_v8f64) -> t_v8f64); - - ifn!("llvm.exp.f32", fn(t_f32) -> t_f32); - ifn!("llvm.exp.v2f32", fn(t_v2f32) -> t_v2f32); - ifn!("llvm.exp.v4f32", fn(t_v4f32) -> t_v4f32); - ifn!("llvm.exp.v8f32", fn(t_v8f32) -> t_v8f32); - ifn!("llvm.exp.v16f32", fn(t_v16f32) -> t_v16f32); - ifn!("llvm.exp.f64", fn(t_f64) -> t_f64); - ifn!("llvm.exp.v2f64", fn(t_v2f64) -> t_v2f64); - ifn!("llvm.exp.v4f64", fn(t_v4f64) -> t_v4f64); - ifn!("llvm.exp.v8f64", fn(t_v8f64) -> t_v8f64); - - ifn!("llvm.exp2.f32", fn(t_f32) -> t_f32); - ifn!("llvm.exp2.v2f32", fn(t_v2f32) -> t_v2f32); - ifn!("llvm.exp2.v4f32", fn(t_v4f32) -> t_v4f32); - ifn!("llvm.exp2.v8f32", fn(t_v8f32) -> t_v8f32); - ifn!("llvm.exp2.v16f32", fn(t_v16f32) -> t_v16f32); - ifn!("llvm.exp2.f64", fn(t_f64) -> t_f64); - ifn!("llvm.exp2.v2f64", fn(t_v2f64) -> t_v2f64); - ifn!("llvm.exp2.v4f64", fn(t_v4f64) -> t_v4f64); - ifn!("llvm.exp2.v8f64", fn(t_v8f64) -> t_v8f64); - - ifn!("llvm.log.f32", fn(t_f32) -> t_f32); - ifn!("llvm.log.v2f32", fn(t_v2f32) -> t_v2f32); - ifn!("llvm.log.v4f32", fn(t_v4f32) -> t_v4f32); - ifn!("llvm.log.v8f32", fn(t_v8f32) -> t_v8f32); - ifn!("llvm.log.v16f32", fn(t_v16f32) -> t_v16f32); - ifn!("llvm.log.f64", fn(t_f64) -> t_f64); - ifn!("llvm.log.v2f64", fn(t_v2f64) -> t_v2f64); - ifn!("llvm.log.v4f64", fn(t_v4f64) -> t_v4f64); - ifn!("llvm.log.v8f64", fn(t_v8f64) -> t_v8f64); - - ifn!("llvm.log10.f32", fn(t_f32) -> t_f32); - ifn!("llvm.log10.v2f32", fn(t_v2f32) -> t_v2f32); - ifn!("llvm.log10.v4f32", fn(t_v4f32) -> t_v4f32); - ifn!("llvm.log10.v8f32", fn(t_v8f32) -> t_v8f32); - ifn!("llvm.log10.v16f32", fn(t_v16f32) -> t_v16f32); - ifn!("llvm.log10.f64", fn(t_f64) -> t_f64); - ifn!("llvm.log10.v2f64", fn(t_v2f64) -> t_v2f64); - ifn!("llvm.log10.v4f64", fn(t_v4f64) -> t_v4f64); - ifn!("llvm.log10.v8f64", fn(t_v8f64) -> t_v8f64); - - ifn!("llvm.log2.f32", fn(t_f32) -> t_f32); - ifn!("llvm.log2.v2f32", fn(t_v2f32) -> t_v2f32); - ifn!("llvm.log2.v4f32", fn(t_v4f32) -> t_v4f32); - ifn!("llvm.log2.v8f32", fn(t_v8f32) -> t_v8f32); - ifn!("llvm.log2.v16f32", fn(t_v16f32) -> t_v16f32); - ifn!("llvm.log2.f64", fn(t_f64) -> t_f64); - ifn!("llvm.log2.v2f64", fn(t_v2f64) -> t_v2f64); - ifn!("llvm.log2.v4f64", fn(t_v4f64) -> t_v4f64); - ifn!("llvm.log2.v8f64", fn(t_v8f64) -> t_v8f64); - - ifn!("llvm.fma.f32", fn(t_f32, t_f32, t_f32) -> t_f32); - ifn!("llvm.fma.v2f32", fn(t_v2f32, t_v2f32, t_v2f32) -> t_v2f32); - ifn!("llvm.fma.v4f32", fn(t_v4f32, t_v4f32, t_v4f32) -> t_v4f32); - ifn!("llvm.fma.v8f32", fn(t_v8f32, t_v8f32, t_v8f32) -> t_v8f32); - ifn!("llvm.fma.v16f32", fn(t_v16f32, t_v16f32, t_v16f32) -> t_v16f32); - ifn!("llvm.fma.f64", fn(t_f64, t_f64, t_f64) -> t_f64); - ifn!("llvm.fma.v2f64", fn(t_v2f64, t_v2f64, t_v2f64) -> t_v2f64); - ifn!("llvm.fma.v4f64", fn(t_v4f64, t_v4f64, t_v4f64) -> t_v4f64); - ifn!("llvm.fma.v8f64", fn(t_v8f64, t_v8f64, t_v8f64) -> t_v8f64); - - ifn!("llvm.fabs.f32", fn(t_f32) -> t_f32); - ifn!("llvm.fabs.v2f32", fn(t_v2f32) -> t_v2f32); - ifn!("llvm.fabs.v4f32", fn(t_v4f32) -> t_v4f32); - ifn!("llvm.fabs.v8f32", fn(t_v8f32) -> t_v8f32); - ifn!("llvm.fabs.v16f32", fn(t_v16f32) -> t_v16f32); - ifn!("llvm.fabs.f64", fn(t_f64) -> t_f64); - ifn!("llvm.fabs.v2f64", fn(t_v2f64) -> t_v2f64); - ifn!("llvm.fabs.v4f64", fn(t_v4f64) -> t_v4f64); - ifn!("llvm.fabs.v8f64", fn(t_v8f64) -> t_v8f64); - - ifn!("llvm.floor.f32", fn(t_f32) -> t_f32); - ifn!("llvm.floor.v2f32", fn(t_v2f32) -> t_v2f32); - ifn!("llvm.floor.v4f32", fn(t_v4f32) -> t_v4f32); - ifn!("llvm.floor.v8f32", fn(t_v8f32) -> t_v8f32); - ifn!("llvm.floor.v16f32", fn(t_v16f32) -> t_v16f32); - ifn!("llvm.floor.f64", fn(t_f64) -> t_f64); - ifn!("llvm.floor.v2f64", fn(t_v2f64) -> t_v2f64); - ifn!("llvm.floor.v4f64", fn(t_v4f64) -> t_v4f64); - ifn!("llvm.floor.v8f64", fn(t_v8f64) -> t_v8f64); - - ifn!("llvm.ceil.f32", fn(t_f32) -> t_f32); - ifn!("llvm.ceil.v2f32", fn(t_v2f32) -> t_v2f32); - ifn!("llvm.ceil.v4f32", fn(t_v4f32) -> t_v4f32); - ifn!("llvm.ceil.v8f32", fn(t_v8f32) -> t_v8f32); - ifn!("llvm.ceil.v16f32", fn(t_v16f32) -> t_v16f32); - ifn!("llvm.ceil.f64", fn(t_f64) -> t_f64); - ifn!("llvm.ceil.v2f64", fn(t_v2f64) -> t_v2f64); - ifn!("llvm.ceil.v4f64", fn(t_v4f64) -> t_v4f64); - ifn!("llvm.ceil.v8f64", fn(t_v8f64) -> t_v8f64); - - ifn!("llvm.trunc.f32", fn(t_f32) -> t_f32); - ifn!("llvm.trunc.f64", fn(t_f64) -> t_f64); - - ifn!("llvm.copysign.f32", fn(t_f32, t_f32) -> t_f32); - ifn!("llvm.copysign.f64", fn(t_f64, t_f64) -> t_f64); - ifn!("llvm.round.f32", fn(t_f32) -> t_f32); - ifn!("llvm.round.f64", fn(t_f64) -> t_f64); - - ifn!("llvm.rint.f32", fn(t_f32) -> t_f32); - ifn!("llvm.rint.f64", fn(t_f64) -> t_f64); - ifn!("llvm.nearbyint.f32", fn(t_f32) -> t_f32); - ifn!("llvm.nearbyint.f64", fn(t_f64) -> t_f64); - - ifn!("llvm.ctpop.i8", fn(t_i8) -> t_i8); - ifn!("llvm.ctpop.i16", fn(t_i16) -> t_i16); - ifn!("llvm.ctpop.i32", fn(t_i32) -> t_i32); - ifn!("llvm.ctpop.i64", fn(t_i64) -> t_i64); - ifn!("llvm.ctpop.i128", fn(t_i128) -> t_i128); - - ifn!("llvm.ctlz.i8", fn(t_i8 , i1) -> t_i8); - ifn!("llvm.ctlz.i16", fn(t_i16, i1) -> t_i16); - ifn!("llvm.ctlz.i32", fn(t_i32, i1) -> t_i32); - ifn!("llvm.ctlz.i64", fn(t_i64, i1) -> t_i64); - ifn!("llvm.ctlz.i128", fn(t_i128, i1) -> t_i128); - - ifn!("llvm.cttz.i8", fn(t_i8 , i1) -> t_i8); - ifn!("llvm.cttz.i16", fn(t_i16, i1) -> t_i16); - ifn!("llvm.cttz.i32", fn(t_i32, i1) -> t_i32); - ifn!("llvm.cttz.i64", fn(t_i64, i1) -> t_i64); - ifn!("llvm.cttz.i128", fn(t_i128, i1) -> t_i128); - - ifn!("llvm.bswap.i16", fn(t_i16) -> t_i16); - ifn!("llvm.bswap.i32", fn(t_i32) -> t_i32); - ifn!("llvm.bswap.i64", fn(t_i64) -> t_i64); - ifn!("llvm.bswap.i128", fn(t_i128) -> t_i128); - - ifn!("llvm.bitreverse.i8", fn(t_i8) -> t_i8); - ifn!("llvm.bitreverse.i16", fn(t_i16) -> t_i16); - ifn!("llvm.bitreverse.i32", fn(t_i32) -> t_i32); - ifn!("llvm.bitreverse.i64", fn(t_i64) -> t_i64); - ifn!("llvm.bitreverse.i128", fn(t_i128) -> t_i128); - - ifn!("llvm.sadd.with.overflow.i8", fn(t_i8, t_i8) -> mk_struct!{t_i8, i1}); - ifn!("llvm.sadd.with.overflow.i16", fn(t_i16, t_i16) -> mk_struct!{t_i16, i1}); - ifn!("llvm.sadd.with.overflow.i32", fn(t_i32, t_i32) -> mk_struct!{t_i32, i1}); - ifn!("llvm.sadd.with.overflow.i64", fn(t_i64, t_i64) -> mk_struct!{t_i64, i1}); - ifn!("llvm.sadd.with.overflow.i128", fn(t_i128, t_i128) -> mk_struct!{t_i128, i1}); - - ifn!("llvm.uadd.with.overflow.i8", fn(t_i8, t_i8) -> mk_struct!{t_i8, i1}); - ifn!("llvm.uadd.with.overflow.i16", fn(t_i16, t_i16) -> mk_struct!{t_i16, i1}); - ifn!("llvm.uadd.with.overflow.i32", fn(t_i32, t_i32) -> mk_struct!{t_i32, i1}); - ifn!("llvm.uadd.with.overflow.i64", fn(t_i64, t_i64) -> mk_struct!{t_i64, i1}); - ifn!("llvm.uadd.with.overflow.i128", fn(t_i128, t_i128) -> mk_struct!{t_i128, i1}); - - ifn!("llvm.ssub.with.overflow.i8", fn(t_i8, t_i8) -> mk_struct!{t_i8, i1}); - ifn!("llvm.ssub.with.overflow.i16", fn(t_i16, t_i16) -> mk_struct!{t_i16, i1}); - ifn!("llvm.ssub.with.overflow.i32", fn(t_i32, t_i32) -> mk_struct!{t_i32, i1}); - ifn!("llvm.ssub.with.overflow.i64", fn(t_i64, t_i64) -> mk_struct!{t_i64, i1}); - ifn!("llvm.ssub.with.overflow.i128", fn(t_i128, t_i128) -> mk_struct!{t_i128, i1}); - - ifn!("llvm.usub.with.overflow.i8", fn(t_i8, t_i8) -> mk_struct!{t_i8, i1}); - ifn!("llvm.usub.with.overflow.i16", fn(t_i16, t_i16) -> mk_struct!{t_i16, i1}); - ifn!("llvm.usub.with.overflow.i32", fn(t_i32, t_i32) -> mk_struct!{t_i32, i1}); - ifn!("llvm.usub.with.overflow.i64", fn(t_i64, t_i64) -> mk_struct!{t_i64, i1}); - ifn!("llvm.usub.with.overflow.i128", fn(t_i128, t_i128) -> mk_struct!{t_i128, i1}); - - ifn!("llvm.smul.with.overflow.i8", fn(t_i8, t_i8) -> mk_struct!{t_i8, i1}); - ifn!("llvm.smul.with.overflow.i16", fn(t_i16, t_i16) -> mk_struct!{t_i16, i1}); - ifn!("llvm.smul.with.overflow.i32", fn(t_i32, t_i32) -> mk_struct!{t_i32, i1}); - ifn!("llvm.smul.with.overflow.i64", fn(t_i64, t_i64) -> mk_struct!{t_i64, i1}); - ifn!("llvm.smul.with.overflow.i128", fn(t_i128, t_i128) -> mk_struct!{t_i128, i1}); - - ifn!("llvm.umul.with.overflow.i8", fn(t_i8, t_i8) -> mk_struct!{t_i8, i1}); - ifn!("llvm.umul.with.overflow.i16", fn(t_i16, t_i16) -> mk_struct!{t_i16, i1}); - ifn!("llvm.umul.with.overflow.i32", fn(t_i32, t_i32) -> mk_struct!{t_i32, i1}); - ifn!("llvm.umul.with.overflow.i64", fn(t_i64, t_i64) -> mk_struct!{t_i64, i1}); - ifn!("llvm.umul.with.overflow.i128", fn(t_i128, t_i128) -> mk_struct!{t_i128, i1}); - - ifn!("llvm.lifetime.start", fn(t_i64,i8p) -> void); - ifn!("llvm.lifetime.end", fn(t_i64, i8p) -> void); - - ifn!("llvm.expect.i1", fn(i1, i1) -> i1); - ifn!("llvm.eh.typeid.for", fn(i8p) -> t_i32); - ifn!("llvm.localescape", fn(...) -> void); - ifn!("llvm.localrecover", fn(i8p, i8p, t_i32) -> i8p); - ifn!("llvm.x86.seh.recoverfp", fn(i8p, i8p) -> i8p); - - ifn!("llvm.assume", fn(i1) -> void); - ifn!("llvm.prefetch", fn(i8p, t_i32, t_i32, t_i32) -> void); - - if cx.sess().opts.debuginfo != DebugInfo::None { - ifn!("llvm.dbg.declare", fn(Type::metadata(cx), Type::metadata(cx)) -> void); - ifn!("llvm.dbg.value", fn(Type::metadata(cx), t_i64, Type::metadata(cx)) -> void); - } - - None -} diff --git a/src/librustc_codegen_llvm/debuginfo/create_scope_map.rs b/src/librustc_codegen_llvm/debuginfo/create_scope_map.rs index 56352ae963f20..31fce4d2655b2 100644 --- a/src/librustc_codegen_llvm/debuginfo/create_scope_map.rs +++ b/src/librustc_codegen_llvm/debuginfo/create_scope_map.rs @@ -8,14 +8,15 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use super::{FunctionDebugContext, FunctionDebugContextData}; +use rustc_codegen_ssa::debuginfo::{FunctionDebugContext, FunctionDebugContextData, MirDebugScope}; use super::metadata::file_metadata; use super::utils::{DIB, span_start}; use llvm; -use llvm::debuginfo::DIScope; +use llvm::debuginfo::{DIScope, DISubprogram}; use common::CodegenCx; use rustc::mir::{Mir, SourceScope}; +use value::Value; use libc::c_uint; @@ -26,28 +27,13 @@ use rustc_data_structures::indexed_vec::{Idx, IndexVec}; use syntax_pos::BytePos; -#[derive(Clone, Copy, Debug)] -pub struct MirDebugScope<'ll> { - pub scope_metadata: Option<&'ll DIScope>, - // Start and end offsets of the file to which this DIScope belongs. - // These are used to quickly determine whether some span refers to the same file. - pub file_start_pos: BytePos, - pub file_end_pos: BytePos, -} - -impl MirDebugScope<'ll> { - pub fn is_valid(&self) -> bool { - self.scope_metadata.is_some() - } -} - /// Produce DIScope DIEs for each MIR Scope which has variables defined in it. /// If debuginfo is disabled, the returned vector is empty. pub fn create_mir_scopes( - cx: &CodegenCx<'ll, '_>, + cx: &CodegenCx<'ll, '_, &'ll Value>, mir: &Mir, - debug_context: &FunctionDebugContext<'ll>, -) -> IndexVec> { + debug_context: &FunctionDebugContext<&'ll DISubprogram>, +) -> IndexVec> { let null_scope = MirDebugScope { scope_metadata: None, file_start_pos: BytePos(0), @@ -79,12 +65,12 @@ pub fn create_mir_scopes( scopes } -fn make_mir_scope(cx: &CodegenCx<'ll, '_>, +fn make_mir_scope(cx: &CodegenCx<'ll, '_, &'ll Value>, mir: &Mir, has_variables: &BitSet, - debug_context: &FunctionDebugContextData<'ll>, + debug_context: &FunctionDebugContextData<&'ll DISubprogram>, scope: SourceScope, - scopes: &mut IndexVec>) { + scopes: &mut IndexVec>) { if scopes[scope].is_valid() { return; } diff --git a/src/librustc_codegen_llvm/debuginfo/gdb.rs b/src/librustc_codegen_llvm/debuginfo/gdb.rs index f6faddb894ffd..d617ad2b8e112 100644 --- a/src/librustc_codegen_llvm/debuginfo/gdb.rs +++ b/src/librustc_codegen_llvm/debuginfo/gdb.rs @@ -12,24 +12,25 @@ use llvm; -use common::{C_bytes, CodegenCx, C_i32}; +use common::CodegenCx; use builder::Builder; -use declare; use rustc::session::config::DebugInfo; -use type_::Type; use value::Value; +use rustc_codegen_ssa::interfaces::*; use syntax::attr; /// Inserts a side-effect free instruction sequence that makes sure that the /// .debug_gdb_scripts global is referenced, so it isn't removed by the linker. -pub fn insert_reference_to_gdb_debug_scripts_section_global(bx: &Builder) { - if needs_gdb_debug_scripts_section(bx.cx) { - let gdb_debug_scripts_section = get_or_insert_gdb_debug_scripts_section_global(bx.cx); +pub fn insert_reference_to_gdb_debug_scripts_section_global( + bx: &mut Builder<'_, 'll, '_, &'ll Value> +) { + if needs_gdb_debug_scripts_section(bx.cx()) { + let gdb_debug_scripts_section = get_or_insert_gdb_debug_scripts_section_global(bx.cx()); // Load just the first byte as that's all that's necessary to force // LLVM to keep around the reference to the global. - let indices = [C_i32(bx.cx, 0), C_i32(bx.cx, 0)]; + let indices = [bx.cx().const_i32(0), bx.cx().const_i32(0)]; let element = bx.inbounds_gep(gdb_debug_scripts_section, &indices); let volative_load_instruction = bx.volatile_load(element); unsafe { @@ -40,7 +41,7 @@ pub fn insert_reference_to_gdb_debug_scripts_section_global(bx: &Builder) { /// Allocates the global variable responsible for the .debug_gdb_scripts binary /// section. -pub fn get_or_insert_gdb_debug_scripts_section_global(cx: &CodegenCx<'ll, '_>) +pub fn get_or_insert_gdb_debug_scripts_section_global(cx: &CodegenCx<'ll, '_, &'ll Value>) -> &'ll Value { let c_section_var_name = "__rustc_debug_gdb_scripts_section__\0"; let section_var_name = &c_section_var_name[..c_section_var_name.len()-1]; @@ -55,15 +56,15 @@ pub fn get_or_insert_gdb_debug_scripts_section_global(cx: &CodegenCx<'ll, '_>) let section_contents = b"\x01gdb_load_rust_pretty_printers.py\0"; unsafe { - let llvm_type = Type::array(Type::i8(cx), + let llvm_type = cx.type_array(cx.type_i8(), section_contents.len() as u64); - let section_var = declare::define_global(cx, section_var_name, + let section_var = cx.define_global(section_var_name, llvm_type).unwrap_or_else(||{ bug!("symbol `{}` is already defined", section_var_name) }); llvm::LLVMSetSection(section_var, section_name.as_ptr() as *const _); - llvm::LLVMSetInitializer(section_var, C_bytes(cx, section_contents)); + llvm::LLVMSetInitializer(section_var, cx.const_bytes(section_contents)); llvm::LLVMSetGlobalConstant(section_var, llvm::True); llvm::LLVMSetUnnamedAddr(section_var, llvm::True); llvm::LLVMRustSetLinkage(section_var, llvm::Linkage::LinkOnceODRLinkage); @@ -75,7 +76,7 @@ pub fn get_or_insert_gdb_debug_scripts_section_global(cx: &CodegenCx<'ll, '_>) }) } -pub fn needs_gdb_debug_scripts_section(cx: &CodegenCx) -> bool { +pub fn needs_gdb_debug_scripts_section(cx: &CodegenCx<'ll, '_, &'ll Value>) -> bool { let omit_gdb_pretty_printer_section = attr::contains_name(&cx.tcx.hir.krate_attrs(), "omit_gdb_pretty_printer_section"); diff --git a/src/librustc_codegen_llvm/debuginfo/metadata.rs b/src/librustc_codegen_llvm/debuginfo/metadata.rs index 846d505641103..cd728f3801480 100644 --- a/src/librustc_codegen_llvm/debuginfo/metadata.rs +++ b/src/librustc_codegen_llvm/debuginfo/metadata.rs @@ -17,6 +17,7 @@ use super::utils::{debug_context, DIB, span_start, use super::namespace::mangled_name_of_instance; use super::type_names::compute_debuginfo_type_name; use super::{CrateDebugContext}; +use rustc_codegen_ssa::interfaces::*; use abi; use value::Value; @@ -152,7 +153,7 @@ impl TypeMap<'ll, 'tcx> { // Get the UniqueTypeId for the given type. If the UniqueTypeId for the given // type has been requested before, this is just a table lookup. Otherwise an // ID will be generated and stored for later lookup. - fn get_unique_type_id_of_type<'a>(&mut self, cx: &CodegenCx<'a, 'tcx>, + fn get_unique_type_id_of_type<'a>(&mut self, cx: &CodegenCx<'a, 'tcx, &'a Value>, type_: Ty<'tcx>) -> UniqueTypeId { // Let's see if we already have something in the cache if let Some(unique_type_id) = self.type_to_unique_id.get(&type_).cloned() { @@ -182,7 +183,7 @@ impl TypeMap<'ll, 'tcx> { // types of their own, so they need special handling. We still need a // UniqueTypeId for them, since to debuginfo they *are* real types. fn get_unique_type_id_of_enum_variant<'a>(&mut self, - cx: &CodegenCx<'a, 'tcx>, + cx: &CodegenCx<'a, 'tcx, &'a Value>, enum_type: Ty<'tcx>, variant_name: &str) -> UniqueTypeId { @@ -211,7 +212,7 @@ enum RecursiveTypeDescription<'ll, 'tcx> { } fn create_and_register_recursive_type_forward_declaration( - cx: &CodegenCx<'ll, 'tcx>, + cx: &CodegenCx<'ll, 'tcx, &'ll Value>, unfinished_type: Ty<'tcx>, unique_type_id: UniqueTypeId, metadata_stub: &'ll DICompositeType, @@ -235,7 +236,7 @@ impl RecursiveTypeDescription<'ll, 'tcx> { // Finishes up the description of the type in question (mostly by providing // descriptions of the fields of the given type) and returns the final type // metadata. - fn finalize(&self, cx: &CodegenCx<'ll, 'tcx>) -> MetadataCreationResult<'ll> { + fn finalize(&self, cx: &CodegenCx<'ll, 'tcx, &'ll Value>) -> MetadataCreationResult<'ll> { match *self { FinalMetadata(metadata) => MetadataCreationResult::new(metadata, false), UnfinishedMetadata { @@ -288,7 +289,7 @@ macro_rules! return_if_metadata_created_in_meantime { } fn fixed_vec_metadata( - cx: &CodegenCx<'ll, 'tcx>, + cx: &CodegenCx<'ll, 'tcx, &'ll Value>, unique_type_id: UniqueTypeId, array_or_slice_type: Ty<'tcx>, element_type: Ty<'tcx>, @@ -325,7 +326,7 @@ fn fixed_vec_metadata( } fn vec_slice_metadata( - cx: &CodegenCx<'ll, 'tcx>, + cx: &CodegenCx<'ll, 'tcx, &'ll Value>, slice_ptr_type: Ty<'tcx>, element_type: Ty<'tcx>, unique_type_id: UniqueTypeId, @@ -375,7 +376,7 @@ fn vec_slice_metadata( } fn subroutine_type_metadata( - cx: &CodegenCx<'ll, 'tcx>, + cx: &CodegenCx<'ll, 'tcx, &'ll Value>, unique_type_id: UniqueTypeId, signature: ty::PolyFnSig<'tcx>, span: Span, @@ -417,7 +418,7 @@ fn subroutine_type_metadata( // of a DST struct, there is no trait_object_type and the results of this // function will be a little bit weird. fn trait_pointer_metadata( - cx: &CodegenCx<'ll, 'tcx>, + cx: &CodegenCx<'ll, 'tcx, &'ll Value>, trait_type: Ty<'tcx>, trait_object_type: Option>, unique_type_id: UniqueTypeId, @@ -480,7 +481,7 @@ fn trait_pointer_metadata( } pub fn type_metadata( - cx: &CodegenCx<'ll, 'tcx>, + cx: &CodegenCx<'ll, 'tcx, &'ll Value>, t: Ty<'tcx>, usage_site_span: Span, ) -> &'ll DIType { @@ -700,7 +701,7 @@ pub fn type_metadata( metadata } -pub fn file_metadata(cx: &CodegenCx<'ll, '_>, +pub fn file_metadata(cx: &CodegenCx<'ll, '_, &'ll Value>, file_name: &FileName, defining_crate: CrateNum) -> &'ll DIFile { debug!("file_metadata: file_name: {}, defining_crate: {}", @@ -718,11 +719,11 @@ pub fn file_metadata(cx: &CodegenCx<'ll, '_>, file_metadata_raw(cx, &file_name.to_string(), &directory.to_string_lossy()) } -pub fn unknown_file_metadata(cx: &CodegenCx<'ll, '_>) -> &'ll DIFile { +pub fn unknown_file_metadata(cx: &CodegenCx<'ll, '_, &'ll Value>) -> &'ll DIFile { file_metadata_raw(cx, "", "") } -fn file_metadata_raw(cx: &CodegenCx<'ll, '_>, +fn file_metadata_raw(cx: &CodegenCx<'ll, '_, &'ll Value>, file_name: &str, directory: &str) -> &'ll DIFile { @@ -748,7 +749,7 @@ fn file_metadata_raw(cx: &CodegenCx<'ll, '_>, file_metadata } -fn basic_type_metadata(cx: &CodegenCx<'ll, 'tcx>, t: Ty<'tcx>) -> &'ll DIType { +fn basic_type_metadata(cx: &CodegenCx<'ll, 'tcx, &'ll Value>, t: Ty<'tcx>) -> &'ll DIType { debug!("basic_type_metadata: {:?}", t); let (name, encoding) = match t.sty { @@ -784,7 +785,7 @@ fn basic_type_metadata(cx: &CodegenCx<'ll, 'tcx>, t: Ty<'tcx>) -> &'ll DIType { } fn foreign_type_metadata( - cx: &CodegenCx<'ll, 'tcx>, + cx: &CodegenCx<'ll, 'tcx, &'ll Value>, t: Ty<'tcx>, unique_type_id: UniqueTypeId, ) -> &'ll DIType { @@ -795,7 +796,7 @@ fn foreign_type_metadata( } fn pointer_type_metadata( - cx: &CodegenCx<'ll, 'tcx>, + cx: &CodegenCx<'ll, 'tcx, &'ll Value>, pointer_type: Ty<'tcx>, pointee_type_metadata: &'ll DIType, ) -> &'ll DIType { @@ -929,7 +930,7 @@ enum MemberDescriptionFactory<'ll, 'tcx> { } impl MemberDescriptionFactory<'ll, 'tcx> { - fn create_member_descriptions(&self, cx: &CodegenCx<'ll, 'tcx>) + fn create_member_descriptions(&self, cx: &CodegenCx<'ll, 'tcx, &'ll Value>) -> Vec> { match *self { StructMDF(ref this) => { @@ -963,7 +964,7 @@ struct StructMemberDescriptionFactory<'tcx> { } impl<'tcx> StructMemberDescriptionFactory<'tcx> { - fn create_member_descriptions(&self, cx: &CodegenCx<'ll, 'tcx>) + fn create_member_descriptions(&self, cx: &CodegenCx<'ll, 'tcx, &'ll Value>) -> Vec> { let layout = cx.layout_of(self.ty); self.variant.fields.iter().enumerate().map(|(i, f)| { @@ -988,7 +989,7 @@ impl<'tcx> StructMemberDescriptionFactory<'tcx> { fn prepare_struct_metadata( - cx: &CodegenCx<'ll, 'tcx>, + cx: &CodegenCx<'ll, 'tcx, &'ll Value>, struct_type: Ty<'tcx>, unique_type_id: UniqueTypeId, span: Span, @@ -1033,7 +1034,7 @@ struct TupleMemberDescriptionFactory<'tcx> { } impl<'tcx> TupleMemberDescriptionFactory<'tcx> { - fn create_member_descriptions(&self, cx: &CodegenCx<'ll, 'tcx>) + fn create_member_descriptions(&self, cx: &CodegenCx<'ll, 'tcx, &'ll Value>) -> Vec> { let layout = cx.layout_of(self.ty); self.component_types.iter().enumerate().map(|(i, &component_type)| { @@ -1051,7 +1052,7 @@ impl<'tcx> TupleMemberDescriptionFactory<'tcx> { } fn prepare_tuple_metadata( - cx: &CodegenCx<'ll, 'tcx>, + cx: &CodegenCx<'ll, 'tcx, &'ll Value>, tuple_type: Ty<'tcx>, component_types: &[Ty<'tcx>], unique_type_id: UniqueTypeId, @@ -1087,7 +1088,7 @@ struct UnionMemberDescriptionFactory<'tcx> { } impl<'tcx> UnionMemberDescriptionFactory<'tcx> { - fn create_member_descriptions(&self, cx: &CodegenCx<'ll, 'tcx>) + fn create_member_descriptions(&self, cx: &CodegenCx<'ll, 'tcx, &'ll Value>) -> Vec> { self.variant.fields.iter().enumerate().map(|(i, f)| { let field = self.layout.field(cx, i); @@ -1105,7 +1106,7 @@ impl<'tcx> UnionMemberDescriptionFactory<'tcx> { } fn prepare_union_metadata( - cx: &CodegenCx<'ll, 'tcx>, + cx: &CodegenCx<'ll, 'tcx, &'ll Value>, union_type: Ty<'tcx>, unique_type_id: UniqueTypeId, span: Span, @@ -1156,7 +1157,7 @@ struct EnumMemberDescriptionFactory<'ll, 'tcx> { } impl EnumMemberDescriptionFactory<'ll, 'tcx> { - fn create_member_descriptions(&self, cx: &CodegenCx<'ll, 'tcx>) + fn create_member_descriptions(&self, cx: &CodegenCx<'ll, 'tcx, &'ll Value>) -> Vec> { let adt = &self.enum_type.ty_adt_def().unwrap(); match self.layout.variants { @@ -1241,7 +1242,7 @@ impl EnumMemberDescriptionFactory<'ll, 'tcx> { // of discriminant instead of us having to recover its path. // Right now it's not even going to work for `niche_start > 0`, // and for multiple niche variants it only supports the first. - fn compute_field_path<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, + fn compute_field_path<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx, &'a Value>, name: &mut String, layout: TyLayout<'tcx>, offset: Size, @@ -1291,7 +1292,7 @@ struct VariantMemberDescriptionFactory<'ll, 'tcx> { } impl VariantMemberDescriptionFactory<'ll, 'tcx> { - fn create_member_descriptions(&self, cx: &CodegenCx<'ll, 'tcx>) + fn create_member_descriptions(&self, cx: &CodegenCx<'ll, 'tcx, &'ll Value>) -> Vec> { self.args.iter().enumerate().map(|(i, &(ref name, ty))| { let (size, align) = cx.size_and_align_of(ty); @@ -1322,7 +1323,7 @@ enum EnumDiscriminantInfo<'ll> { // descriptions of the fields of the variant. This is a rudimentary version of a // full RecursiveTypeDescription. fn describe_enum_variant( - cx: &CodegenCx<'ll, 'tcx>, + cx: &CodegenCx<'ll, 'tcx, &'ll Value>, layout: layout::TyLayout<'tcx>, variant: &'tcx ty::VariantDef, discriminant_info: EnumDiscriminantInfo<'ll>, @@ -1384,7 +1385,7 @@ fn describe_enum_variant( } fn prepare_enum_metadata( - cx: &CodegenCx<'ll, 'tcx>, + cx: &CodegenCx<'ll, 'tcx, &'ll Value>, enum_type: Ty<'tcx>, enum_def_id: DefId, unique_type_id: UniqueTypeId, @@ -1502,7 +1503,7 @@ fn prepare_enum_metadata( }), ); - fn get_enum_discriminant_name(cx: &CodegenCx, + fn get_enum_discriminant_name(cx: &CodegenCx<'ll, '_, &'ll Value>, def_id: DefId) -> InternedString { cx.tcx.item_name(def_id) @@ -1514,7 +1515,7 @@ fn prepare_enum_metadata( /// /// Examples of Rust types to use this are: structs, tuples, boxes, vecs, and enums. fn composite_type_metadata( - cx: &CodegenCx<'ll, 'tcx>, + cx: &CodegenCx<'ll, 'tcx, &'ll Value>, composite_type: Ty<'tcx>, composite_type_name: &str, composite_type_unique_id: UniqueTypeId, @@ -1540,7 +1541,7 @@ fn composite_type_metadata( composite_type_metadata } -fn set_members_of_composite_type(cx: &CodegenCx<'ll, '_>, +fn set_members_of_composite_type(cx: &CodegenCx<'ll, '_, &'ll Value>, composite_type_metadata: &'ll DICompositeType, member_descriptions: Vec>) { // In some rare cases LLVM metadata uniquing would lead to an existing type @@ -1591,7 +1592,7 @@ fn set_members_of_composite_type(cx: &CodegenCx<'ll, '_>, // any caching, does not add any fields to the struct. This can be done later // with set_members_of_composite_type(). fn create_struct_stub( - cx: &CodegenCx<'ll, 'tcx>, + cx: &CodegenCx<'ll, 'tcx, &'ll Value>, struct_type: Ty<'tcx>, struct_type_name: &str, unique_type_id: UniqueTypeId, @@ -1629,7 +1630,7 @@ fn create_struct_stub( } fn create_union_stub( - cx: &CodegenCx<'ll, 'tcx>, + cx: &CodegenCx<'ll, 'tcx, &'ll Value>, union_type: Ty<'tcx>, union_type_name: &str, unique_type_id: UniqueTypeId, @@ -1668,7 +1669,7 @@ fn create_union_stub( /// /// Adds the created metadata nodes directly to the crate's IR. pub fn create_global_var_metadata( - cx: &CodegenCx<'ll, '_>, + cx: &CodegenCx<'ll, '_, &'ll Value>, def_id: DefId, global: &'ll Value, ) { @@ -1728,28 +1729,12 @@ pub fn create_global_var_metadata( } } -// Creates an "extension" of an existing DIScope into another file. -pub fn extend_scope_to_file( - cx: &CodegenCx<'ll, '_>, - scope_metadata: &'ll DIScope, - file: &syntax_pos::SourceFile, - defining_crate: CrateNum, -) -> &'ll DILexicalBlock { - let file_metadata = file_metadata(cx, &file.name, defining_crate); - unsafe { - llvm::LLVMRustDIBuilderCreateLexicalBlockFile( - DIB(cx), - scope_metadata, - file_metadata) - } -} - /// Creates debug information for the given vtable, which is for the /// given type. /// /// Adds the created metadata nodes directly to the crate's IR. pub fn create_vtable_metadata( - cx: &CodegenCx<'ll, 'tcx>, + cx: &CodegenCx<'ll, 'tcx, &'ll Value>, ty: ty::Ty<'tcx>, vtable: &'ll Value, ) { @@ -1799,3 +1784,19 @@ pub fn create_vtable_metadata( 0); } } + +// Creates an "extension" of an existing DIScope into another file. +pub fn extend_scope_to_file( + cx: &CodegenCx<'ll, '_, &'ll Value>, + scope_metadata: &'ll DIScope, + file: &syntax_pos::SourceFile, + defining_crate: CrateNum, +) -> &'ll DILexicalBlock { + let file_metadata = file_metadata(cx, &file.name, defining_crate); + unsafe { + llvm::LLVMRustDIBuilderCreateLexicalBlockFile( + DIB(cx), + scope_metadata, + file_metadata) + } +} diff --git a/src/librustc_codegen_llvm/debuginfo/mod.rs b/src/librustc_codegen_llvm/debuginfo/mod.rs index 042e72e921ece..f2ee7576431da 100644 --- a/src/librustc_codegen_llvm/debuginfo/mod.rs +++ b/src/librustc_codegen_llvm/debuginfo/mod.rs @@ -11,8 +11,8 @@ // See doc.rs for documentation. mod doc; -use self::VariableAccess::*; -use self::VariableKind::*; +use rustc_codegen_ssa::debuginfo::VariableAccess::*; +use rustc_codegen_ssa::debuginfo::VariableKind::*; use self::utils::{DIB, span_start, create_DIArray, is_node_local_to_unit}; use self::namespace::mangled_name_of_instance; @@ -21,7 +21,8 @@ use self::metadata::{type_metadata, file_metadata, TypeMap}; use self::source_loc::InternalDebugLocation::{self, UnknownLocation}; use llvm; -use llvm::debuginfo::{DIFile, DIType, DIScope, DIBuilder, DISubprogram, DIArray, DIFlags}; +use llvm::debuginfo::{DIFile, DIType, DIScope, DIBuilder, DIArray, DIFlags, + DILexicalBlock, DISubprogram}; use rustc::hir::CodegenFnAttrFlags; use rustc::hir::def_id::{DefId, CrateNum}; use rustc::ty::subst::{Substs, UnpackedKind}; @@ -35,7 +36,10 @@ use rustc::mir; use rustc::session::config::{self, DebugInfo}; use rustc::util::nodemap::{DefIdMap, FxHashMap, FxHashSet}; use rustc_data_structures::small_c_str::SmallCStr; +use rustc_data_structures::indexed_vec::IndexVec; use value::Value; +use rustc_codegen_ssa::debuginfo::{FunctionDebugContext, MirDebugScope, VariableAccess, + VariableKind, FunctionDebugContextData}; use libc::c_uint; use std::cell::{Cell, RefCell}; @@ -45,6 +49,7 @@ use syntax_pos::{self, Span, Pos}; use syntax::ast; use syntax::symbol::{Symbol, InternedString}; use rustc::ty::layout::{self, LayoutOf}; +use rustc_codegen_ssa::interfaces::*; pub mod gdb; mod utils; @@ -54,10 +59,8 @@ pub mod metadata; mod create_scope_map; mod source_loc; -pub use self::create_scope_map::{create_mir_scopes, MirDebugScope}; -pub use self::source_loc::start_emitting_source_locations; +pub use self::create_scope_map::{create_mir_scopes}; pub use self::metadata::create_global_var_metadata; -pub use self::metadata::create_vtable_metadata; pub use self::metadata::extend_scope_to_file; pub use self::source_loc::set_source_location; @@ -109,56 +112,9 @@ impl<'a, 'tcx> CrateDebugContext<'a, 'tcx> { } } -pub enum FunctionDebugContext<'ll> { - RegularContext(FunctionDebugContextData<'ll>), - DebugInfoDisabled, - FunctionWithoutDebugInfo, -} - -impl FunctionDebugContext<'ll> { - pub fn get_ref<'a>(&'a self, span: Span) -> &'a FunctionDebugContextData<'ll> { - match *self { - FunctionDebugContext::RegularContext(ref data) => data, - FunctionDebugContext::DebugInfoDisabled => { - span_bug!(span, "{}", FunctionDebugContext::debuginfo_disabled_message()); - } - FunctionDebugContext::FunctionWithoutDebugInfo => { - span_bug!(span, "{}", FunctionDebugContext::should_be_ignored_message()); - } - } - } - - fn debuginfo_disabled_message() -> &'static str { - "debuginfo: Error trying to access FunctionDebugContext although debug info is disabled!" - } - - fn should_be_ignored_message() -> &'static str { - "debuginfo: Error trying to access FunctionDebugContext for function that should be \ - ignored by debug info!" - } -} - -pub struct FunctionDebugContextData<'ll> { - fn_metadata: &'ll DISubprogram, - source_locations_enabled: Cell, - pub defining_crate: CrateNum, -} - -pub enum VariableAccess<'a, 'll> { - // The llptr given is an alloca containing the variable's value - DirectVariable { alloca: &'ll Value }, - // The llptr given is an alloca containing the start of some pointer chain - // leading to the variable's content. - IndirectVariable { alloca: &'ll Value, address_operations: &'a [i64] } -} - -pub enum VariableKind { - ArgumentVariable(usize /*index*/), - LocalVariable, -} /// Create any deferred debug metadata nodes -pub fn finalize(cx: &CodegenCx) { +pub fn finalize(cx: &CodegenCx<'ll, '_, &'ll Value>) { if cx.dbg_cx.is_none() { return; } @@ -202,348 +158,405 @@ pub fn finalize(cx: &CodegenCx) { }; } -/// Creates the function-specific debug context. -/// -/// Returns the FunctionDebugContext for the function which holds state needed -/// for debug info creation. The function may also return another variant of the -/// FunctionDebugContext enum which indicates why no debuginfo should be created -/// for the function. -pub fn create_function_debug_context( - cx: &CodegenCx<'ll, 'tcx>, - instance: Instance<'tcx>, - sig: ty::FnSig<'tcx>, - llfn: &'ll Value, - mir: &mir::Mir, -) -> FunctionDebugContext<'ll> { - if cx.sess().opts.debuginfo == DebugInfo::None { - return FunctionDebugContext::DebugInfoDisabled; +impl<'a, 'll: 'a, 'tcx: 'll> DebugInfoBuilderMethods<'a, 'll, 'tcx> + for Builder<'a, 'll, 'tcx, &'ll Value> +{ + fn declare_local( + &mut self, + dbg_context: &FunctionDebugContext<&'ll DISubprogram>, + variable_name: ast::Name, + variable_type: Ty<'tcx>, + scope_metadata: &'ll DIScope, + variable_access: VariableAccess<'_, &'ll Value>, + variable_kind: VariableKind, + span: Span, + ) { + assert!(!dbg_context.get_ref(span).source_locations_enabled.get()); + let cx = self.cx(); + + let file = span_start(cx, span).file; + let file_metadata = file_metadata(cx, + &file.name, + dbg_context.get_ref(span).defining_crate); + + let loc = span_start(cx, span); + let type_metadata = type_metadata(cx, variable_type, span); + + let (argument_index, dwarf_tag) = match variable_kind { + ArgumentVariable(index) => (index as c_uint, DW_TAG_arg_variable), + LocalVariable => (0, DW_TAG_auto_variable) + }; + let align = cx.align_of(variable_type); + + let name = SmallCStr::new(&variable_name.as_str()); + match (variable_access, &[][..]) { + (DirectVariable { alloca }, address_operations) | + (IndirectVariable {alloca, address_operations}, _) => { + let metadata = unsafe { + llvm::LLVMRustDIBuilderCreateVariable( + DIB(cx), + dwarf_tag, + scope_metadata, + name.as_ptr(), + file_metadata, + loc.line as c_uint, + type_metadata, + cx.sess().opts.optimize != config::OptLevel::No, + DIFlags::FlagZero, + argument_index, + align.abi() as u32, + ) + }; + source_loc::set_debug_location(self, + InternalDebugLocation::new(scope_metadata, loc.line, loc.col.to_usize())); + unsafe { + let debug_loc = llvm::LLVMGetCurrentDebugLocation(self.llbuilder); + let instr = llvm::LLVMRustDIBuilderInsertDeclareAtEnd( + DIB(cx), + alloca, + metadata, + address_operations.as_ptr(), + address_operations.len() as c_uint, + debug_loc, + self.llbb()); + + llvm::LLVMSetInstDebugLocation(self.llbuilder, instr); + } + source_loc::set_debug_location(self, UnknownLocation); + } + } + } + + fn set_source_location( + &mut self, + debug_context: &FunctionDebugContext<&'ll DISubprogram>, + scope: Option<&'ll DIScope>, + span: Span, + ) { + set_source_location(debug_context, &self, scope, span) + } + fn insert_reference_to_gdb_debug_scripts_section_global(&mut self) { + gdb::insert_reference_to_gdb_debug_scripts_section_global(self) } +} - if let InstanceDef::Item(def_id) = instance.def { - if cx.tcx.codegen_fn_attrs(def_id).flags.contains(CodegenFnAttrFlags::NO_DEBUG) { - return FunctionDebugContext::FunctionWithoutDebugInfo; +impl<'ll, 'tcx: 'll> DebugInfoMethods<'ll, 'tcx> for CodegenCx<'ll, 'tcx, &'ll Value> { + + type DIScope = &'ll DIScope; + + fn create_function_debug_context( + &self, + instance: Instance<'tcx>, + sig: ty::FnSig<'tcx>, + llfn: &'ll Value, + mir: &mir::Mir, + ) -> FunctionDebugContext<&'ll DISubprogram> { + if self.sess().opts.debuginfo == DebugInfo::None { + return FunctionDebugContext::DebugInfoDisabled; + } + + if let InstanceDef::Item(def_id) = instance.def { + if self.tcx().codegen_fn_attrs(def_id).flags.contains(CodegenFnAttrFlags::NO_DEBUG) { + return FunctionDebugContext::FunctionWithoutDebugInfo; + } } - } - let span = mir.span; + let span = mir.span; - // This can be the case for functions inlined from another crate - if span.is_dummy() { - // FIXME(simulacrum): Probably can't happen; remove. - return FunctionDebugContext::FunctionWithoutDebugInfo; - } + // This can be the case for functions inlined from another crate + if span.is_dummy() { + // FIXME(simulacrum): Probably can't happen; remove. + return FunctionDebugContext::FunctionWithoutDebugInfo; + } - let def_id = instance.def_id(); - let containing_scope = get_containing_scope(cx, instance); - let loc = span_start(cx, span); - let file_metadata = file_metadata(cx, &loc.file.name, def_id.krate); + let def_id = instance.def_id(); + let containing_scope = get_containing_scope(&self, instance); + let loc = span_start(&self, span); + let file_metadata = file_metadata(&self, &loc.file.name, def_id.krate); - let function_type_metadata = unsafe { - let fn_signature = get_function_signature(cx, sig); - llvm::LLVMRustDIBuilderCreateSubroutineType(DIB(cx), file_metadata, fn_signature) - }; + let function_type_metadata = unsafe { + let fn_signature = get_function_signature(&self, sig); + llvm::LLVMRustDIBuilderCreateSubroutineType(DIB(&self), file_metadata, fn_signature) + }; - // Find the enclosing function, in case this is a closure. - let def_key = cx.tcx.def_key(def_id); - let mut name = def_key.disambiguated_data.data.to_string(); + // Find the enclosing function, in case this is a closure. + let def_key = self.tcx().def_key(def_id); + let mut name = def_key.disambiguated_data.data.to_string(); - let enclosing_fn_def_id = cx.tcx.closure_base_def_id(def_id); + let enclosing_fn_def_id = self.tcx().closure_base_def_id(def_id); - // Get_template_parameters() will append a `<...>` clause to the function - // name if necessary. - let generics = cx.tcx.generics_of(enclosing_fn_def_id); - let substs = instance.substs.truncate_to(cx.tcx, generics); - let template_parameters = get_template_parameters(cx, - &generics, - substs, - file_metadata, - &mut name); + // Get_template_parameters() will append a `<...>` clause to the function + // name if necessary. + let generics = self.tcx().generics_of(enclosing_fn_def_id); + let substs = instance.substs.truncate_to(*self.tcx(), generics); + let template_parameters = get_template_parameters(&self, + &generics, + substs, + file_metadata, + &mut name); - // Get the linkage_name, which is just the symbol name - let linkage_name = mangled_name_of_instance(cx, instance); + // Get the linkage_name, which is just the symbol name + let linkage_name = mangled_name_of_instance(&self, instance); - let scope_line = span_start(cx, span).line; - let is_local_to_unit = is_node_local_to_unit(cx, def_id); + let scope_line = span_start(&self, span).line; + let is_local_to_unit = is_node_local_to_unit(&self, def_id); - let function_name = CString::new(name).unwrap(); - let linkage_name = SmallCStr::new(&linkage_name.as_str()); + let function_name = CString::new(name).unwrap(); + let linkage_name = SmallCStr::new(&linkage_name.as_str()); - let mut flags = DIFlags::FlagPrototyped; + let mut flags = DIFlags::FlagPrototyped; - let local_id = cx.tcx.hir.as_local_node_id(def_id); - if let Some((id, _, _)) = *cx.sess().entry_fn.borrow() { + let local_id = self.tcx().hir.as_local_node_id(def_id); + if let Some((id, _, _)) = *self.sess().entry_fn.borrow() { if local_id == Some(id) { flags |= DIFlags::FlagMainSubprogram; } } - if cx.layout_of(sig.output()).abi.is_uninhabited() { + if self.layout_of(sig.output()).abi.is_uninhabited() { flags |= DIFlags::FlagNoReturn; } - let fn_metadata = unsafe { - llvm::LLVMRustDIBuilderCreateFunction( - DIB(cx), - containing_scope, - function_name.as_ptr(), - linkage_name.as_ptr(), - file_metadata, - loc.line as c_uint, - function_type_metadata, - is_local_to_unit, - true, - scope_line as c_uint, - flags, - cx.sess().opts.optimize != config::OptLevel::No, - llfn, - template_parameters, - None) - }; + let fn_metadata = unsafe { + llvm::LLVMRustDIBuilderCreateFunction( + DIB(&self), + containing_scope, + function_name.as_ptr(), + linkage_name.as_ptr(), + file_metadata, + loc.line as c_uint, + function_type_metadata, + is_local_to_unit, + true, + scope_line as c_uint, + flags, + self.sess().opts.optimize != config::OptLevel::No, + llfn, + template_parameters, + None) + }; - // Initialize fn debug context (including scope map and namespace map) - let fn_debug_context = FunctionDebugContextData { - fn_metadata, - source_locations_enabled: Cell::new(false), - defining_crate: def_id.krate, - }; + // Initialize fn debug context (including scope map and namespace map) + let fn_debug_context = FunctionDebugContextData { + fn_metadata, + source_locations_enabled: Cell::new(false), + defining_crate: def_id.krate, + }; - return FunctionDebugContext::RegularContext(fn_debug_context); + return FunctionDebugContext::RegularContext(fn_debug_context); - fn get_function_signature( - cx: &CodegenCx<'ll, 'tcx>, - sig: ty::FnSig<'tcx>, - ) -> &'ll DIArray { - if cx.sess().opts.debuginfo == DebugInfo::Limited { - return create_DIArray(DIB(cx), &[]); - } + fn get_function_signature<'ll, 'tcx>( + cx: &CodegenCx<'ll, 'tcx, &'ll Value>, + sig: ty::FnSig<'tcx>, + ) -> &'ll DIArray { + if cx.sess().opts.debuginfo == DebugInfo::Limited { + return create_DIArray(DIB(cx), &[]); + } - let mut signature = Vec::with_capacity(sig.inputs().len() + 1); + let mut signature = Vec::with_capacity(sig.inputs().len() + 1); - // Return type -- llvm::DIBuilder wants this at index 0 - signature.push(match sig.output().sty { - ty::Tuple(ref tys) if tys.is_empty() => None, - _ => Some(type_metadata(cx, sig.output(), syntax_pos::DUMMY_SP)) - }); + // Return type -- llvm::DIBuilder wants this at index 0 + signature.push(match sig.output().sty { + ty::Tuple(ref tys) if tys.is_empty() => None, + _ => Some(type_metadata(cx, sig.output(), syntax_pos::DUMMY_SP)) + }); - let inputs = if sig.abi == Abi::RustCall { - &sig.inputs()[..sig.inputs().len() - 1] - } else { - sig.inputs() - }; + let inputs = if sig.abi == Abi::RustCall { + &sig.inputs()[..sig.inputs().len() - 1] + } else { + sig.inputs() + }; - // Arguments types - if cx.sess().target.target.options.is_like_msvc { - // FIXME(#42800): - // There is a bug in MSDIA that leads to a crash when it encounters - // a fixed-size array of `u8` or something zero-sized in a - // function-type (see #40477). - // As a workaround, we replace those fixed-size arrays with a - // pointer-type. So a function `fn foo(a: u8, b: [u8; 4])` would - // appear as `fn foo(a: u8, b: *const u8)` in debuginfo, - // and a function `fn bar(x: [(); 7])` as `fn bar(x: *const ())`. - // This transformed type is wrong, but these function types are - // already inaccurate due to ABI adjustments (see #42800). - signature.extend(inputs.iter().map(|&t| { - let t = match t.sty { - ty::Array(ct, _) - if (ct == cx.tcx.types.u8) || cx.layout_of(ct).is_zst() => { - cx.tcx.mk_imm_ptr(ct) - } - _ => t - }; - Some(type_metadata(cx, t, syntax_pos::DUMMY_SP)) - })); - } else { - signature.extend(inputs.iter().map(|t| { - Some(type_metadata(cx, t, syntax_pos::DUMMY_SP)) - })); - } + // Arguments types + if cx.sess().target.target.options.is_like_msvc { + // FIXME(#42800): + // There is a bug in MSDIA that leads to a crash when it encounters + // a fixed-size array of `u8` or something zero-sized in a + // function-type (see #40477). + // As a workaround, we replace those fixed-size arrays with a + // pointer-type. So a function `fn foo(a: u8, b: [u8; 4])` would + // appear as `fn foo(a: u8, b: *const u8)` in debuginfo, + // and a function `fn bar(x: [(); 7])` as `fn bar(x: *const ())`. + // This transformed type is wrong, but these function types are + // already inaccurate due to ABI adjustments (see #42800). + signature.extend(inputs.iter().map(|&t| { + let t = match t.sty { + ty::Array(ct, _) + if (ct == cx.tcx.types.u8) || cx.layout_of(ct).is_zst() => { + cx.tcx.mk_imm_ptr(ct) + } + _ => t + }; + Some(type_metadata(cx, t, syntax_pos::DUMMY_SP)) + })); + } else { + signature.extend(inputs.iter().map(|t| { + Some(type_metadata(cx, t, syntax_pos::DUMMY_SP)) + })); + } - if sig.abi == Abi::RustCall && !sig.inputs().is_empty() { - if let ty::Tuple(args) = sig.inputs()[sig.inputs().len() - 1].sty { - signature.extend( - args.iter().map(|argument_type| { - Some(type_metadata(cx, argument_type, syntax_pos::DUMMY_SP)) - }) - ); + if sig.abi == Abi::RustCall && !sig.inputs().is_empty() { + if let ty::Tuple(args) = sig.inputs()[sig.inputs().len() - 1].sty { + signature.extend( + args.iter().map(|argument_type| { + Some(type_metadata(cx, argument_type, syntax_pos::DUMMY_SP)) + }) + ); + } } + + create_DIArray(DIB(cx), &signature[..]) } - create_DIArray(DIB(cx), &signature[..]) - } + fn get_template_parameters<'ll, 'tcx>( + cx: &CodegenCx<'ll, 'tcx, &'ll Value>, + generics: &ty::Generics, + substs: &Substs<'tcx>, + file_metadata: &'ll DIFile, + name_to_append_suffix_to: &mut String, + ) -> &'ll DIArray { + if substs.types().next().is_none() { + return create_DIArray(DIB(cx), &[]); + } - fn get_template_parameters( - cx: &CodegenCx<'ll, 'tcx>, - generics: &ty::Generics, - substs: &Substs<'tcx>, - file_metadata: &'ll DIFile, - name_to_append_suffix_to: &mut String, - ) -> &'ll DIArray { - if substs.types().next().is_none() { - return create_DIArray(DIB(cx), &[]); - } + name_to_append_suffix_to.push('<'); + for (i, actual_type) in substs.types().enumerate() { + if i != 0 { + name_to_append_suffix_to.push_str(","); + } - name_to_append_suffix_to.push('<'); - for (i, actual_type) in substs.types().enumerate() { - if i != 0 { - name_to_append_suffix_to.push_str(","); + let actual_type = cx.tcx.normalize_erasing_regions( + ParamEnv::reveal_all(), actual_type + ); + // Add actual type name to <...> clause of function name + let actual_type_name = compute_debuginfo_type_name(cx, + actual_type, + true); + name_to_append_suffix_to.push_str(&actual_type_name[..]); } + name_to_append_suffix_to.push('>'); + + // Again, only create type information if full debuginfo is enabled + let template_params: Vec<_> = if cx.sess().opts.debuginfo == DebugInfo::Full { + let names = get_parameter_names(cx, generics); + substs.iter().zip(names).filter_map(|(kind, name)| { + if let UnpackedKind::Type(ty) = kind.unpack() { + let actual_type = cx.tcx.normalize_erasing_regions( + ParamEnv::reveal_all(), ty + ); + let actual_type_metadata = + type_metadata(cx, actual_type, syntax_pos::DUMMY_SP); + let name = SmallCStr::new(&name.as_str()); + Some(unsafe { + Some(llvm::LLVMRustDIBuilderCreateTemplateTypeParameter( + DIB(cx), + None, + name.as_ptr(), + actual_type_metadata, + file_metadata, + 0, + 0, + )) + }) + } else { + None + } + }).collect() + } else { + vec![] + }; + + return create_DIArray(DIB(cx), &template_params[..]); + } - let actual_type = cx.tcx.normalize_erasing_regions(ParamEnv::reveal_all(), actual_type); - // Add actual type name to <...> clause of function name - let actual_type_name = compute_debuginfo_type_name(cx, - actual_type, - true); - name_to_append_suffix_to.push_str(&actual_type_name[..]); + fn get_parameter_names<'ll>(cx: &CodegenCx<'ll, '_, &'ll Value>, + generics: &ty::Generics) + -> Vec { + let mut names = generics.parent.map_or(vec![], |def_id| { + get_parameter_names(cx, cx.tcx.generics_of(def_id)) + }); + names.extend(generics.params.iter().map(|param| param.name)); + names } - name_to_append_suffix_to.push('>'); - - // Again, only create type information if full debuginfo is enabled - let template_params: Vec<_> = if cx.sess().opts.debuginfo == DebugInfo::Full { - let names = get_parameter_names(cx, generics); - substs.iter().zip(names).filter_map(|(kind, name)| { - if let UnpackedKind::Type(ty) = kind.unpack() { - let actual_type = cx.tcx.normalize_erasing_regions(ParamEnv::reveal_all(), ty); - let actual_type_metadata = - type_metadata(cx, actual_type, syntax_pos::DUMMY_SP); - let name = SmallCStr::new(&name.as_str()); - Some(unsafe { - Some(llvm::LLVMRustDIBuilderCreateTemplateTypeParameter( - DIB(cx), - None, - name.as_ptr(), - actual_type_metadata, - file_metadata, - 0, - 0, - )) - }) + + fn get_containing_scope<'ll, 'tcx>( + cx: &CodegenCx<'ll, 'tcx, &'ll Value>, + instance: Instance<'tcx>, + ) -> &'ll DIScope { + // First, let's see if this is a method within an inherent impl. Because + // if yes, we want to make the result subroutine DIE a child of the + // subroutine's self-type. + let self_type = cx.tcx.impl_of_method(instance.def_id()).and_then(|impl_def_id| { + // If the method does *not* belong to a trait, proceed + if cx.tcx.trait_id_of_impl(impl_def_id).is_none() { + let impl_self_ty = cx.tcx.subst_and_normalize_erasing_regions( + instance.substs, + ty::ParamEnv::reveal_all(), + &cx.tcx.type_of(impl_def_id), + ); + + // Only "class" methods are generally understood by LLVM, + // so avoid methods on other types (e.g. `<*mut T>::null`). + match impl_self_ty.sty { + ty::Adt(def, ..) if !def.is_box() => { + Some(type_metadata(cx, impl_self_ty, syntax_pos::DUMMY_SP)) + } + _ => None + } } else { + // For trait method impls we still use the "parallel namespace" + // strategy None } - }).collect() - } else { - vec![] - }; + }); + + self_type.unwrap_or_else(|| { + namespace::item_namespace(cx, DefId { + krate: instance.def_id().krate, + index: cx.tcx + .def_key(instance.def_id()) + .parent + .expect("get_containing_scope: missing parent?") + }) + }) + } + } - create_DIArray(DIB(cx), &template_params[..]) + fn create_vtable_metadata( + &self, + ty: Ty<'tcx>, + vtable: Self::Value, + ) { + metadata::create_vtable_metadata(&self, ty, vtable) } - fn get_parameter_names(cx: &CodegenCx, - generics: &ty::Generics) - -> Vec { - let mut names = generics.parent.map_or(vec![], |def_id| { - get_parameter_names(cx, cx.tcx.generics_of(def_id)) - }); - names.extend(generics.params.iter().map(|param| param.name)); - names + fn create_mir_scopes( + &self, + mir: &mir::Mir, + debug_context: &FunctionDebugContext<&'ll DISubprogram>, + ) -> IndexVec> { + create_scope_map::create_mir_scopes(&self, mir, debug_context) } - fn get_containing_scope( - cx: &CodegenCx<'ll, 'tcx>, - instance: Instance<'tcx>, - ) -> &'ll DIScope { - // First, let's see if this is a method within an inherent impl. Because - // if yes, we want to make the result subroutine DIE a child of the - // subroutine's self-type. - let self_type = cx.tcx.impl_of_method(instance.def_id()).and_then(|impl_def_id| { - // If the method does *not* belong to a trait, proceed - if cx.tcx.trait_id_of_impl(impl_def_id).is_none() { - let impl_self_ty = cx.tcx.subst_and_normalize_erasing_regions( - instance.substs, - ty::ParamEnv::reveal_all(), - &cx.tcx.type_of(impl_def_id), - ); + fn extend_scope_to_file( + &self, + scope_metadata: &'ll DIScope, + file: &syntax_pos::SourceFile, + defining_crate: CrateNum, + ) -> &'ll DILexicalBlock { + metadata::extend_scope_to_file(&self, scope_metadata, file, defining_crate) + } - // Only "class" methods are generally understood by LLVM, - // so avoid methods on other types (e.g. `<*mut T>::null`). - match impl_self_ty.sty { - ty::Adt(def, ..) if !def.is_box() => { - Some(type_metadata(cx, impl_self_ty, syntax_pos::DUMMY_SP)) - } - _ => None - } - } else { - // For trait method impls we still use the "parallel namespace" - // strategy - None - } - }); - - self_type.unwrap_or_else(|| { - namespace::item_namespace(cx, DefId { - krate: instance.def_id().krate, - index: cx.tcx - .def_key(instance.def_id()) - .parent - .expect("get_containing_scope: missing parent?") - }) - }) + fn debuginfo_finalize(&self) { + finalize(self) } -} -pub fn declare_local( - bx: &Builder<'a, 'll, 'tcx>, - dbg_context: &FunctionDebugContext<'ll>, - variable_name: ast::Name, - variable_type: Ty<'tcx>, - scope_metadata: &'ll DIScope, - variable_access: VariableAccess<'_, 'll>, - variable_kind: VariableKind, - span: Span, -) { - assert!(!dbg_context.get_ref(span).source_locations_enabled.get()); - let cx = bx.cx; - - let file = span_start(cx, span).file; - let file_metadata = file_metadata(cx, - &file.name, - dbg_context.get_ref(span).defining_crate); - - let loc = span_start(cx, span); - let type_metadata = type_metadata(cx, variable_type, span); - - let (argument_index, dwarf_tag) = match variable_kind { - ArgumentVariable(index) => (index as c_uint, DW_TAG_arg_variable), - LocalVariable => (0, DW_TAG_auto_variable) - }; - let align = cx.align_of(variable_type); - - let name = SmallCStr::new(&variable_name.as_str()); - match (variable_access, &[][..]) { - (DirectVariable { alloca }, address_operations) | - (IndirectVariable {alloca, address_operations}, _) => { - let metadata = unsafe { - llvm::LLVMRustDIBuilderCreateVariable( - DIB(cx), - dwarf_tag, - scope_metadata, - name.as_ptr(), - file_metadata, - loc.line as c_uint, - type_metadata, - cx.sess().opts.optimize != config::OptLevel::No, - DIFlags::FlagZero, - argument_index, - align.abi() as u32, - ) - }; - source_loc::set_debug_location(bx, - InternalDebugLocation::new(scope_metadata, loc.line, loc.col.to_usize())); - unsafe { - let debug_loc = llvm::LLVMGetCurrentDebugLocation(bx.llbuilder); - let instr = llvm::LLVMRustDIBuilderInsertDeclareAtEnd( - DIB(cx), - alloca, - metadata, - address_operations.as_ptr(), - address_operations.len() as c_uint, - debug_loc, - bx.llbb()); - - llvm::LLVMSetInstDebugLocation(bx.llbuilder, instr); - } - source_loc::set_debug_location(bx, UnknownLocation); + fn debuginfo_upvar_decls_ops_sequence(&self, byte_offset_of_var_in_env: u64) -> [i64; 4] { + unsafe { + [llvm::LLVMRustDIBuilderCreateOpDeref(), + llvm::LLVMRustDIBuilderCreateOpPlusUconst(), + byte_offset_of_var_in_env as i64, + llvm::LLVMRustDIBuilderCreateOpDeref()] } } } diff --git a/src/librustc_codegen_llvm/debuginfo/namespace.rs b/src/librustc_codegen_llvm/debuginfo/namespace.rs index 06f8a4b131b60..173a297f52eba 100644 --- a/src/librustc_codegen_llvm/debuginfo/namespace.rs +++ b/src/librustc_codegen_llvm/debuginfo/namespace.rs @@ -20,18 +20,19 @@ use llvm::debuginfo::DIScope; use rustc::hir::def_id::DefId; use rustc::hir::map::DefPathData; use common::CodegenCx; +use value::Value; use rustc_data_structures::small_c_str::SmallCStr; pub fn mangled_name_of_instance<'a, 'tcx>( - cx: &CodegenCx<'a, 'tcx>, + cx: &CodegenCx<'a, 'tcx, &'a Value>, instance: Instance<'tcx>, ) -> ty::SymbolName { let tcx = cx.tcx; tcx.symbol_name(instance) } -pub fn item_namespace(cx: &CodegenCx<'ll, '_>, def_id: DefId) -> &'ll DIScope { +pub fn item_namespace(cx: &CodegenCx<'ll, '_, &'ll Value>, def_id: DefId) -> &'ll DIScope { if let Some(&scope) = debug_context(cx).namespace_map.borrow().get(&def_id) { return scope; } diff --git a/src/librustc_codegen_llvm/debuginfo/source_loc.rs b/src/librustc_codegen_llvm/debuginfo/source_loc.rs index 60ebcb888166f..166636b901ca4 100644 --- a/src/librustc_codegen_llvm/debuginfo/source_loc.rs +++ b/src/librustc_codegen_llvm/debuginfo/source_loc.rs @@ -12,21 +12,23 @@ use self::InternalDebugLocation::*; use super::utils::{debug_context, span_start}; use super::metadata::UNKNOWN_COLUMN_NUMBER; -use super::FunctionDebugContext; +use rustc_codegen_ssa::debuginfo::FunctionDebugContext; use llvm; -use llvm::debuginfo::DIScope; +use llvm::debuginfo::{DIScope, DISubprogram}; use builder::Builder; +use rustc_codegen_ssa::interfaces::*; use libc::c_uint; use syntax_pos::{Span, Pos}; +use value::Value; /// Sets the current debug location at the beginning of the span. /// /// Maps to a call to llvm::LLVMSetCurrentDebugLocation(...). pub fn set_source_location( - debug_context: &FunctionDebugContext<'ll>, - bx: &Builder<'_, 'll, '_>, + debug_context: &FunctionDebugContext<&'ll DISubprogram>, + bx: &Builder<'_, 'll, '_, &'ll Value>, scope: Option<&'ll DIScope>, span: Span, ) { @@ -40,8 +42,8 @@ pub fn set_source_location( }; let dbg_loc = if function_debug_context.source_locations_enabled.get() { - debug!("set_source_location: {}", bx.sess().source_map().span_to_string(span)); - let loc = span_start(bx.cx, span); + debug!("set_source_location: {}", bx.cx().sess().source_map().span_to_string(span)); + let loc = span_start(bx.cx(), span); InternalDebugLocation::new(scope.unwrap(), loc.line, loc.col.to_usize()) } else { UnknownLocation @@ -49,18 +51,6 @@ pub fn set_source_location( set_debug_location(bx, dbg_loc); } -/// Enables emitting source locations for the given functions. -/// -/// Since we don't want source locations to be emitted for the function prelude, -/// they are disabled when beginning to codegen a new function. This functions -/// switches source location emitting on and must therefore be called before the -/// first real statement/expression of the function is codegened. -pub fn start_emitting_source_locations(dbg_context: &FunctionDebugContext<'ll>) { - if let FunctionDebugContext::RegularContext(ref data) = *dbg_context { - data.source_locations_enabled.set(true); - } -} - #[derive(Copy, Clone, PartialEq)] pub enum InternalDebugLocation<'ll> { @@ -78,13 +68,16 @@ impl InternalDebugLocation<'ll> { } } -pub fn set_debug_location(bx: &Builder<'_, 'll, '_>, debug_location: InternalDebugLocation<'ll>) { +pub fn set_debug_location( + bx: &Builder<'_, 'll, '_, &'ll Value>, + debug_location: InternalDebugLocation<'ll> +) { let metadata_node = match debug_location { KnownLocation { scope, line, col } => { // For MSVC, set the column number to zero. // Otherwise, emit it. This mimics clang behaviour. // See discussion in https://github.com/rust-lang/rust/issues/42921 - let col_used = if bx.cx.sess().target.target.options.is_like_msvc { + let col_used = if bx.cx().sess().target.target.options.is_like_msvc { UNKNOWN_COLUMN_NUMBER } else { col as c_uint @@ -93,7 +86,7 @@ pub fn set_debug_location(bx: &Builder<'_, 'll, '_>, debug_location: InternalDeb unsafe { Some(llvm::LLVMRustDIBuilderCreateDebugLocation( - debug_context(bx.cx).llcontext, + debug_context(bx.cx()).llcontext, line as c_uint, col_used, scope, diff --git a/src/librustc_codegen_llvm/debuginfo/type_names.rs b/src/librustc_codegen_llvm/debuginfo/type_names.rs index f5abb527e430f..92ca4aac9f9c4 100644 --- a/src/librustc_codegen_llvm/debuginfo/type_names.rs +++ b/src/librustc_codegen_llvm/debuginfo/type_names.rs @@ -14,6 +14,8 @@ use common::CodegenCx; use rustc::hir::def_id::DefId; use rustc::ty::subst::Substs; use rustc::ty::{self, Ty}; +use value::Value; +use rustc_codegen_ssa::interfaces::*; use rustc::hir; @@ -21,7 +23,7 @@ use rustc::hir; // any caching, i.e. calling the function twice with the same type will also do // the work twice. The `qualified` parameter only affects the first level of the // type name, further levels (i.e. type parameters) are always fully qualified. -pub fn compute_debuginfo_type_name<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, +pub fn compute_debuginfo_type_name<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx, &'a Value>, t: Ty<'tcx>, qualified: bool) -> String { @@ -32,7 +34,7 @@ pub fn compute_debuginfo_type_name<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, // Pushes the name of the type as it should be stored in debuginfo on the // `output` String. See also compute_debuginfo_type_name(). -pub fn push_debuginfo_type_name<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, +pub fn push_debuginfo_type_name<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx, &'a Value>, t: Ty<'tcx>, qualified: bool, output: &mut String) { @@ -181,7 +183,7 @@ pub fn push_debuginfo_type_name<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, } } - fn push_item_name(cx: &CodegenCx, + fn push_item_name(cx: &CodegenCx<'ll, '_, &'ll Value>, def_id: DefId, qualified: bool, output: &mut String) { @@ -201,7 +203,7 @@ pub fn push_debuginfo_type_name<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, // reconstructed for items from non-local crates. For local crates, this // would be possible but with inlining and LTO we have to use the least // common denominator - otherwise we would run into conflicts. - fn push_type_params<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, + fn push_type_params<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx, &'a Value>, substs: &Substs<'tcx>, output: &mut String) { if substs.types().next().is_none() { diff --git a/src/librustc_codegen_llvm/debuginfo/utils.rs b/src/librustc_codegen_llvm/debuginfo/utils.rs index 19bc4ac39d308..e69ab0f102cae 100644 --- a/src/librustc_codegen_llvm/debuginfo/utils.rs +++ b/src/librustc_codegen_llvm/debuginfo/utils.rs @@ -19,10 +19,12 @@ use rustc::ty::DefIdTree; use llvm; use llvm::debuginfo::{DIScope, DIBuilder, DIDescriptor, DIArray}; use common::{CodegenCx}; +use value::Value; +use rustc_codegen_ssa::interfaces::*; use syntax_pos::{self, Span}; -pub fn is_node_local_to_unit(cx: &CodegenCx, def_id: DefId) -> bool +pub fn is_node_local_to_unit(cx: &CodegenCx<'ll, '_, &'ll Value>, def_id: DefId) -> bool { // The is_local_to_unit flag indicates whether a function is local to the // current compilation unit (i.e. if it is *static* in the C-sense). The @@ -46,22 +48,22 @@ pub fn create_DIArray( } /// Return syntax_pos::Loc corresponding to the beginning of the span -pub fn span_start(cx: &CodegenCx, span: Span) -> syntax_pos::Loc { +pub fn span_start(cx: &CodegenCx<'ll, '_, &'ll Value>, span: Span) -> syntax_pos::Loc { cx.sess().source_map().lookup_char_pos(span.lo()) } #[inline] -pub fn debug_context(cx: &'a CodegenCx<'ll, 'tcx>) -> &'a CrateDebugContext<'ll, 'tcx> { +pub fn debug_context(cx: &'a CodegenCx<'ll, 'tcx, &'ll Value>) -> &'a CrateDebugContext<'ll, 'tcx> { cx.dbg_cx.as_ref().unwrap() } #[inline] #[allow(non_snake_case)] -pub fn DIB(cx: &'a CodegenCx<'ll, '_>) -> &'a DIBuilder<'ll> { +pub fn DIB(cx: &'a CodegenCx<'ll, '_, &'ll Value>) -> &'a DIBuilder<'ll> { cx.dbg_cx.as_ref().unwrap().builder } -pub fn get_namespace_for_item(cx: &CodegenCx<'ll, '_>, def_id: DefId) -> &'ll DIScope { +pub fn get_namespace_for_item(cx: &CodegenCx<'ll, '_, &'ll Value>, def_id: DefId) -> &'ll DIScope { item_namespace(cx, cx.tcx.parent(def_id) .expect("get_namespace_for_item: missing parent?")) } diff --git a/src/librustc_codegen_llvm/declare.rs b/src/librustc_codegen_llvm/declare.rs index 26969e24f0883..25c691b1c5e40 100644 --- a/src/librustc_codegen_llvm/declare.rs +++ b/src/librustc_codegen_llvm/declare.rs @@ -30,30 +30,17 @@ use rustc_target::spec::PanicStrategy; use abi::{Abi, FnType, FnTypeExt}; use attributes; use context::CodegenCx; -use common; +use rustc_codegen_ssa::common; use type_::Type; +use rustc_codegen_ssa::interfaces::*; use value::Value; - -/// Declare a global value. -/// -/// If there’s a value with the same name already declared, the function will -/// return its Value instead. -pub fn declare_global(cx: &CodegenCx<'ll, '_>, name: &str, ty: &'ll Type) -> &'ll Value { - debug!("declare_global(name={:?})", name); - let namebuf = SmallCStr::new(name); - unsafe { - llvm::LLVMRustGetOrInsertGlobal(cx.llmod, namebuf.as_ptr(), ty) - } -} - - /// Declare a function. /// /// If there’s a value with the same name already declared, the function will /// update the declaration and return existing Value instead. fn declare_raw_fn( - cx: &CodegenCx<'ll, '_>, + cx: &CodegenCx<'ll, '_, &'ll Value>, name: &str, callconv: llvm::CallConv, ty: &'ll Type, @@ -109,124 +96,109 @@ fn declare_raw_fn( llfn } +impl DeclareMethods<'ll, 'tcx> for CodegenCx<'ll, 'tcx, &'ll Value> { -/// Declare a C ABI function. -/// -/// Only use this for foreign function ABIs and glue. For Rust functions use -/// `declare_fn` instead. -/// -/// If there’s a value with the same name already declared, the function will -/// update the declaration and return existing Value instead. -pub fn declare_cfn(cx: &CodegenCx<'ll, '_>, name: &str, fn_type: &'ll Type) -> &'ll Value { - declare_raw_fn(cx, name, llvm::CCallConv, fn_type) -} - - -/// Declare a Rust function. -/// -/// If there’s a value with the same name already declared, the function will -/// update the declaration and return existing Value instead. -pub fn declare_fn( - cx: &CodegenCx<'ll, 'tcx>, - name: &str, - fn_type: Ty<'tcx>, -) -> &'ll Value { - debug!("declare_rust_fn(name={:?}, fn_type={:?})", name, fn_type); - let sig = common::ty_fn_sig(cx, fn_type); - let sig = cx.tcx.normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), &sig); - debug!("declare_rust_fn (after region erasure) sig={:?}", sig); - - let fty = FnType::new(cx, sig, &[]); - let llfn = declare_raw_fn(cx, name, fty.llvm_cconv(), fty.llvm_type(cx)); - - if cx.layout_of(sig.output()).abi.is_uninhabited() { - llvm::Attribute::NoReturn.apply_llfn(Function, llfn); + fn declare_global( + &self, + name: &str, ty: &'ll Type + ) -> &'ll Value { + debug!("declare_global(name={:?})", name); + let namebuf = SmallCStr::new(name); + unsafe { + llvm::LLVMRustGetOrInsertGlobal(self.llmod, namebuf.as_ptr(), ty) + } } - if sig.abi != Abi::Rust && sig.abi != Abi::RustCall { - attributes::unwind(llfn, false); + fn declare_cfn( + &self, + name: &str, + fn_type: &'ll Type + ) -> &'ll Value { + declare_raw_fn(self, name, llvm::CCallConv, fn_type) } - fty.apply_attrs_llfn(llfn); + fn declare_fn( + &self, + name: &str, + fn_type: Ty<'tcx>, + ) -> &'ll Value { + debug!("declare_rust_fn(name={:?}, fn_type={:?})", name, fn_type); + let sig = common::ty_fn_sig(self, fn_type); + let sig = self.tcx.normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), &sig); + debug!("declare_rust_fn (after region erasure) sig={:?}", sig); + + let fty = FnType::new(self, sig, &[]); + let llfn = declare_raw_fn(self, name, fty.llvm_cconv(), fty.llvm_type(self)); + + if self.layout_of(sig.output()).abi.is_uninhabited() { + llvm::Attribute::NoReturn.apply_llfn(Function, llfn); + } - llfn -} + if sig.abi != Abi::Rust && sig.abi != Abi::RustCall { + attributes::unwind(llfn, false); + } + fty.apply_attrs_llfn(llfn); -/// Declare a global with an intention to define it. -/// -/// Use this function when you intend to define a global. This function will -/// return None if the name already has a definition associated with it. In that -/// case an error should be reported to the user, because it usually happens due -/// to user’s fault (e.g. misuse of #[no_mangle] or #[export_name] attributes). -pub fn define_global(cx: &CodegenCx<'ll, '_>, name: &str, ty: &'ll Type) -> Option<&'ll Value> { - if get_defined_value(cx, name).is_some() { - None - } else { - Some(declare_global(cx, name, ty)) + llfn } -} -/// Declare a private global -/// -/// Use this function when you intend to define a global without a name. -pub fn define_private_global(cx: &CodegenCx<'ll, '_>, ty: &'ll Type) -> &'ll Value { - unsafe { - llvm::LLVMRustInsertPrivateGlobal(cx.llmod, ty) + fn define_global( + &self, + name: &str, + ty: &'ll Type + ) -> Option<&'ll Value> { + if self.get_defined_value(name).is_some() { + None + } else { + Some(self.declare_global(name, ty)) + } } -} -/// Declare a Rust function with an intention to define it. -/// -/// Use this function when you intend to define a function. This function will -/// return panic if the name already has a definition associated with it. This -/// can happen with #[no_mangle] or #[export_name], for example. -pub fn define_fn( - cx: &CodegenCx<'ll, 'tcx>, - name: &str, - fn_type: Ty<'tcx>, -) -> &'ll Value { - if get_defined_value(cx, name).is_some() { - cx.sess().fatal(&format!("symbol `{}` already defined", name)) - } else { - declare_fn(cx, name, fn_type) + fn define_private_global(&self, ty: &'ll Type) -> &'ll Value { + unsafe { + llvm::LLVMRustInsertPrivateGlobal(self.llmod, ty) + } } -} -/// Declare a Rust function with an intention to define it. -/// -/// Use this function when you intend to define a function. This function will -/// return panic if the name already has a definition associated with it. This -/// can happen with #[no_mangle] or #[export_name], for example. -pub fn define_internal_fn( - cx: &CodegenCx<'ll, 'tcx>, - name: &str, - fn_type: Ty<'tcx>, -) -> &'ll Value { - let llfn = define_fn(cx, name, fn_type); - unsafe { llvm::LLVMRustSetLinkage(llfn, llvm::Linkage::InternalLinkage) }; - llfn -} + fn define_fn( + &self, + name: &str, + fn_type: Ty<'tcx>, + ) -> &'ll Value { + if self.get_defined_value(name).is_some() { + self.sess().fatal(&format!("symbol `{}` already defined", name)) + } else { + self.declare_fn(name, fn_type) + } + } + fn define_internal_fn( + &self, + name: &str, + fn_type: Ty<'tcx>, + ) -> &'ll Value { + let llfn = self.define_fn(name, fn_type); + unsafe { llvm::LLVMRustSetLinkage(llfn, llvm::Linkage::InternalLinkage) }; + llfn + } -/// Get declared value by name. -pub fn get_declared_value(cx: &CodegenCx<'ll, '_>, name: &str) -> Option<&'ll Value> { - debug!("get_declared_value(name={:?})", name); - let namebuf = SmallCStr::new(name); - unsafe { llvm::LLVMRustGetNamedValue(cx.llmod, namebuf.as_ptr()) } -} + fn get_declared_value(&self, name: &str) -> Option<&'ll Value> { + debug!("get_declared_value(name={:?})", name); + let namebuf = SmallCStr::new(name); + unsafe { llvm::LLVMRustGetNamedValue(self.llmod, namebuf.as_ptr()) } + } -/// Get defined or externally defined (AvailableExternally linkage) value by -/// name. -pub fn get_defined_value(cx: &CodegenCx<'ll, '_>, name: &str) -> Option<&'ll Value> { - get_declared_value(cx, name).and_then(|val|{ - let declaration = unsafe { - llvm::LLVMIsDeclaration(val) != 0 - }; - if !declaration { - Some(val) - } else { - None - } - }) + fn get_defined_value(&self, name: &str) -> Option<&'ll Value> { + self.get_declared_value(name).and_then(|val|{ + let declaration = unsafe { + llvm::LLVMIsDeclaration(val) != 0 + }; + if !declaration { + Some(val) + } else { + None + } + }) + } } diff --git a/src/librustc_codegen_llvm/diagnostics.rs b/src/librustc_codegen_llvm/diagnostics.rs index 5721938c9c0a7..5ceb53a40b407 100644 --- a/src/librustc_codegen_llvm/diagnostics.rs +++ b/src/librustc_codegen_llvm/diagnostics.rs @@ -45,39 +45,6 @@ extern "platform-intrinsic" { unsafe { simd_add(i32x2(0, 0), i32x2(1, 2)); } // ok! ``` -"##, - -E0668: r##" -Malformed inline assembly rejected by LLVM. - -LLVM checks the validity of the constraints and the assembly string passed to -it. This error implies that LLVM seems something wrong with the inline -assembly call. - -In particular, it can happen if you forgot the closing bracket of a register -constraint (see issue #51430): -```ignore (error-emitted-at-codegen-which-cannot-be-handled-by-compile_fail) -#![feature(asm)] - -fn main() { - let rax: u64; - unsafe { - asm!("" :"={rax"(rax)); - println!("Accumulator is: {}", rax); - } -} -``` -"##, - -E0669: r##" -Cannot convert inline assembly operand to a single LLVM value. - -This error usually happens when trying to pass in a value to an input inline -assembly operand that is actually a pair of values. In particular, this can -happen when trying to pass in a slice, for instance a `&str`. In Rust, these -values are represented internally as a pair of values, the pointer and its -length. When passed as an input operand, this pair of values can not be -coerced into a register and thus we must fail with an error. -"##, +"## } diff --git a/src/librustc_codegen_llvm/intrinsic.rs b/src/librustc_codegen_llvm/intrinsic.rs index 272196afa6f92..1ae4093c6a1e3 100644 --- a/src/librustc_codegen_llvm/intrinsic.rs +++ b/src/librustc_codegen_llvm/intrinsic.rs @@ -12,31 +12,33 @@ use attributes; use intrinsics::{self, Intrinsic}; -use llvm::{self, TypeKind}; +use llvm; use abi::{Abi, FnType, LlvmType, PassMode}; -use mir::place::PlaceRef; -use mir::operand::{OperandRef, OperandValue}; -use base::*; -use common::*; -use declare; -use glue; +use rustc_codegen_ssa::mir::place::PlaceRef; +use rustc_codegen_ssa::mir::operand::{OperandRef, OperandValue}; +use rustc_codegen_ssa::glue; +use rustc_codegen_ssa::base::{to_immediate, wants_msvc_seh, compare_simd_types}; +use context::CodegenCx; use type_::Type; use type_of::LayoutLlvmExt; use rustc::ty::{self, Ty}; use rustc::ty::layout::{HasDataLayout, LayoutOf}; +use rustc_codegen_ssa::common::TypeKind; use rustc::hir; use syntax::ast; use syntax::symbol::Symbol; use builder::Builder; use value::Value; +use rustc_codegen_ssa::interfaces::*; + use rustc::session::Session; use syntax_pos::Span; use std::cmp::Ordering; use std::iter; -fn get_simple_intrinsic(cx: &CodegenCx<'ll, '_>, name: &str) -> Option<&'ll Value> { +fn get_simple_intrinsic(cx: &CodegenCx<'ll, '_, &'ll Value>, name: &str) -> Option<&'ll Value> { let llvm_name = match name { "sqrtf32" => "llvm.sqrt.f32", "sqrtf64" => "llvm.sqrt.f64", @@ -83,603 +85,643 @@ fn get_simple_intrinsic(cx: &CodegenCx<'ll, '_>, name: &str) -> Option<&'ll Valu Some(cx.get_intrinsic(&llvm_name)) } -/// Remember to add all intrinsics here, in librustc_typeck/check/mod.rs, -/// and in libcore/intrinsics.rs; if you need access to any llvm intrinsics, -/// add them to librustc_codegen_llvm/context.rs -pub fn codegen_intrinsic_call( - bx: &Builder<'a, 'll, 'tcx>, - callee_ty: Ty<'tcx>, - fn_ty: &FnType<'tcx, Ty<'tcx>>, - args: &[OperandRef<'ll, 'tcx>], - llresult: &'ll Value, - span: Span, -) { - let cx = bx.cx; - let tcx = cx.tcx; +impl IntrinsicCallMethods<'a, 'll, 'tcx> for Builder<'a, 'll, 'tcx, &'ll Value> { - let (def_id, substs) = match callee_ty.sty { - ty::FnDef(def_id, substs) => (def_id, substs), - _ => bug!("expected fn item type, found {}", callee_ty) - }; + fn codegen_intrinsic_call( + &mut self, + callee_ty: Ty<'tcx>, + fn_ty: &FnType<'tcx, Ty<'tcx>>, + args: &[OperandRef<'tcx, &'ll Value>], + llresult: &'ll Value, + span: Span, + ) { + let cx = self.cx(); + let tcx = cx.tcx; - let sig = callee_ty.fn_sig(tcx); - let sig = tcx.normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), &sig); - let arg_tys = sig.inputs(); - let ret_ty = sig.output(); - let name = &*tcx.item_name(def_id).as_str(); - - let llret_ty = cx.layout_of(ret_ty).llvm_type(cx); - let result = PlaceRef::new_sized(llresult, fn_ty.ret.layout, fn_ty.ret.layout.align); - - let simple = get_simple_intrinsic(cx, name); - let llval = match name { - _ if simple.is_some() => { - bx.call(simple.unwrap(), - &args.iter().map(|arg| arg.immediate()).collect::>(), - None) - } - "unreachable" => { - return; - }, - "likely" => { - let expect = cx.get_intrinsic(&("llvm.expect.i1")); - bx.call(expect, &[args[0].immediate(), C_bool(cx, true)], None) - } - "unlikely" => { - let expect = cx.get_intrinsic(&("llvm.expect.i1")); - bx.call(expect, &[args[0].immediate(), C_bool(cx, false)], None) - } - "try" => { - try_intrinsic(bx, cx, - args[0].immediate(), - args[1].immediate(), - args[2].immediate(), - llresult); - return; - } - "breakpoint" => { - let llfn = cx.get_intrinsic(&("llvm.debugtrap")); - bx.call(llfn, &[], None) - } - "size_of" => { - let tp_ty = substs.type_at(0); - C_usize(cx, cx.size_of(tp_ty).bytes()) - } - "size_of_val" => { - let tp_ty = substs.type_at(0); - if let OperandValue::Pair(_, meta) = args[0].val { - let (llsize, _) = - glue::size_and_align_of_dst(bx, tp_ty, Some(meta)); - llsize - } else { - C_usize(cx, cx.size_of(tp_ty).bytes()) + let (def_id, substs) = match callee_ty.sty { + ty::FnDef(def_id, substs) => (def_id, substs), + _ => bug!("expected fn item type, found {}", callee_ty) + }; + + let sig = callee_ty.fn_sig(tcx); + let sig = tcx.normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), &sig); + let arg_tys = sig.inputs(); + let ret_ty = sig.output(); + let name = &*tcx.item_name(def_id).as_str(); + + let llret_ty = cx.layout_of(ret_ty).llvm_type(cx); + let result = PlaceRef::new_sized(llresult, fn_ty.ret.layout, fn_ty.ret.layout.align); + + let simple = get_simple_intrinsic(cx, name); + let llval = match name { + _ if simple.is_some() => { + self.call(simple.unwrap(), + &args.iter().map(|arg| arg.immediate()).collect::>(), + None) } - } - "min_align_of" => { - let tp_ty = substs.type_at(0); - C_usize(cx, cx.align_of(tp_ty).abi()) - } - "min_align_of_val" => { - let tp_ty = substs.type_at(0); - if let OperandValue::Pair(_, meta) = args[0].val { - let (_, llalign) = - glue::size_and_align_of_dst(bx, tp_ty, Some(meta)); - llalign - } else { - C_usize(cx, cx.align_of(tp_ty).abi()) + "unreachable" => { + return; + }, + "likely" => { + let expect = cx.get_intrinsic(&("llvm.expect.i1")); + self.call(expect, &[args[0].immediate(), cx.const_bool(true)], None) } - } - "pref_align_of" => { - let tp_ty = substs.type_at(0); - C_usize(cx, cx.align_of(tp_ty).pref()) - } - "type_name" => { - let tp_ty = substs.type_at(0); - let ty_name = Symbol::intern(&tp_ty.to_string()).as_str(); - C_str_slice(cx, ty_name) - } - "type_id" => { - C_u64(cx, cx.tcx.type_id_hash(substs.type_at(0))) - } - "init" => { - let ty = substs.type_at(0); - if !cx.layout_of(ty).is_zst() { - // Just zero out the stack slot. - // If we store a zero constant, LLVM will drown in vreg allocation for large data - // structures, and the generated code will be awful. (A telltale sign of this is - // large quantities of `mov [byte ptr foo],0` in the generated code.) - memset_intrinsic(bx, false, ty, llresult, C_u8(cx, 0), C_usize(cx, 1)); + "unlikely" => { + let expect = cx.get_intrinsic(&("llvm.expect.i1")); + self.call(expect, &[args[0].immediate(), cx.const_bool(false)], None) } - return; - } - // Effectively no-ops - "uninit" => { - return; - } - "needs_drop" => { - let tp_ty = substs.type_at(0); - - C_bool(cx, bx.cx.type_needs_drop(tp_ty)) - } - "offset" => { - let ptr = args[0].immediate(); - let offset = args[1].immediate(); - bx.inbounds_gep(ptr, &[offset]) - } - "arith_offset" => { - let ptr = args[0].immediate(); - let offset = args[1].immediate(); - bx.gep(ptr, &[offset]) - } - - "copy_nonoverlapping" => { - copy_intrinsic(bx, false, false, substs.type_at(0), - args[1].immediate(), args[0].immediate(), args[2].immediate()) - } - "copy" => { - copy_intrinsic(bx, true, false, substs.type_at(0), - args[1].immediate(), args[0].immediate(), args[2].immediate()) - } - "write_bytes" => { - memset_intrinsic(bx, false, substs.type_at(0), - args[0].immediate(), args[1].immediate(), args[2].immediate()) - } - - "volatile_copy_nonoverlapping_memory" => { - copy_intrinsic(bx, false, true, substs.type_at(0), - args[0].immediate(), args[1].immediate(), args[2].immediate()) - } - "volatile_copy_memory" => { - copy_intrinsic(bx, true, true, substs.type_at(0), - args[0].immediate(), args[1].immediate(), args[2].immediate()) - } - "volatile_set_memory" => { - memset_intrinsic(bx, true, substs.type_at(0), - args[0].immediate(), args[1].immediate(), args[2].immediate()) - } - "volatile_load" | "unaligned_volatile_load" => { - let tp_ty = substs.type_at(0); - let mut ptr = args[0].immediate(); - if let PassMode::Cast(ty) = fn_ty.ret.mode { - ptr = bx.pointercast(ptr, ty.llvm_type(cx).ptr_to()); + "try" => { + try_intrinsic(self, cx, + args[0].immediate(), + args[1].immediate(), + args[2].immediate(), + llresult); + return; } - let load = bx.volatile_load(ptr); - let align = if name == "unaligned_volatile_load" { - 1 - } else { - cx.align_of(tp_ty).abi() as u32 - }; - unsafe { - llvm::LLVMSetAlignment(load, align); + "breakpoint" => { + let llfn = cx.get_intrinsic(&("llvm.debugtrap")); + self.call(llfn, &[], None) } - to_immediate(bx, load, cx.layout_of(tp_ty)) - }, - "volatile_store" => { - let dst = args[0].deref(bx.cx); - args[1].val.volatile_store(bx, dst); - return; - }, - "unaligned_volatile_store" => { - let dst = args[0].deref(bx.cx); - args[1].val.unaligned_volatile_store(bx, dst); - return; - }, - "prefetch_read_data" | "prefetch_write_data" | - "prefetch_read_instruction" | "prefetch_write_instruction" => { - let expect = cx.get_intrinsic(&("llvm.prefetch")); - let (rw, cache_type) = match name { - "prefetch_read_data" => (0, 1), - "prefetch_write_data" => (1, 1), - "prefetch_read_instruction" => (0, 0), - "prefetch_write_instruction" => (1, 0), - _ => bug!() - }; - bx.call(expect, &[ - args[0].immediate(), - C_i32(cx, rw), - args[1].immediate(), - C_i32(cx, cache_type) - ], None) - }, - "ctlz" | "ctlz_nonzero" | "cttz" | "cttz_nonzero" | "ctpop" | "bswap" | - "bitreverse" | "add_with_overflow" | "sub_with_overflow" | - "mul_with_overflow" | "overflowing_add" | "overflowing_sub" | "overflowing_mul" | - "unchecked_div" | "unchecked_rem" | "unchecked_shl" | "unchecked_shr" | "exact_div" => { - let ty = arg_tys[0]; - match int_type_width_signed(ty, cx) { - Some((width, signed)) => - match name { - "ctlz" | "cttz" => { - let y = C_bool(bx.cx, false); - let llfn = cx.get_intrinsic(&format!("llvm.{}.i{}", name, width)); - bx.call(llfn, &[args[0].immediate(), y], None) - } - "ctlz_nonzero" | "cttz_nonzero" => { - let y = C_bool(bx.cx, true); - let llvm_name = &format!("llvm.{}.i{}", &name[..4], width); - let llfn = cx.get_intrinsic(llvm_name); - bx.call(llfn, &[args[0].immediate(), y], None) - } - "ctpop" => bx.call(cx.get_intrinsic(&format!("llvm.ctpop.i{}", width)), - &[args[0].immediate()], None), - "bswap" => { - if width == 8 { - args[0].immediate() // byte swap a u8/i8 is just a no-op - } else { - bx.call(cx.get_intrinsic(&format!("llvm.bswap.i{}", width)), - &[args[0].immediate()], None) - } - } - "bitreverse" => { - bx.call(cx.get_intrinsic(&format!("llvm.bitreverse.i{}", width)), - &[args[0].immediate()], None) - } - "add_with_overflow" | "sub_with_overflow" | "mul_with_overflow" => { - let intrinsic = format!("llvm.{}{}.with.overflow.i{}", - if signed { 's' } else { 'u' }, - &name[..3], width); - let llfn = bx.cx.get_intrinsic(&intrinsic); - - // Convert `i1` to a `bool`, and write it to the out parameter - let pair = bx.call(llfn, &[ - args[0].immediate(), - args[1].immediate() - ], None); - let val = bx.extract_value(pair, 0); - let overflow = bx.zext(bx.extract_value(pair, 1), Type::bool(cx)); - - let dest = result.project_field(bx, 0); - bx.store(val, dest.llval, dest.align); - let dest = result.project_field(bx, 1); - bx.store(overflow, dest.llval, dest.align); - - return; - }, - "overflowing_add" => bx.add(args[0].immediate(), args[1].immediate()), - "overflowing_sub" => bx.sub(args[0].immediate(), args[1].immediate()), - "overflowing_mul" => bx.mul(args[0].immediate(), args[1].immediate()), - "exact_div" => - if signed { - bx.exactsdiv(args[0].immediate(), args[1].immediate()) - } else { - bx.exactudiv(args[0].immediate(), args[1].immediate()) - }, - "unchecked_div" => - if signed { - bx.sdiv(args[0].immediate(), args[1].immediate()) - } else { - bx.udiv(args[0].immediate(), args[1].immediate()) - }, - "unchecked_rem" => - if signed { - bx.srem(args[0].immediate(), args[1].immediate()) - } else { - bx.urem(args[0].immediate(), args[1].immediate()) - }, - "unchecked_shl" => bx.shl(args[0].immediate(), args[1].immediate()), - "unchecked_shr" => - if signed { - bx.ashr(args[0].immediate(), args[1].immediate()) - } else { - bx.lshr(args[0].immediate(), args[1].immediate()) - }, - _ => bug!(), - }, - None => { - span_invalid_monomorphization_error( - tcx.sess, span, - &format!("invalid monomorphization of `{}` intrinsic: \ - expected basic integer type, found `{}`", name, ty)); - return; + "size_of" => { + let tp_ty = substs.type_at(0); + cx.const_usize(cx.size_of(tp_ty).bytes()) + } + "size_of_val" => { + let tp_ty = substs.type_at(0); + if let OperandValue::Pair(_, meta) = args[0].val { + let (llsize, _) = + glue::size_and_align_of_dst(self, tp_ty, Some(meta)); + llsize + } else { + cx.const_usize(cx.size_of(tp_ty).bytes()) } } - }, - "fadd_fast" | "fsub_fast" | "fmul_fast" | "fdiv_fast" | "frem_fast" => { - let sty = &arg_tys[0].sty; - match float_type_width(sty) { - Some(_width) => - match name { - "fadd_fast" => bx.fadd_fast(args[0].immediate(), args[1].immediate()), - "fsub_fast" => bx.fsub_fast(args[0].immediate(), args[1].immediate()), - "fmul_fast" => bx.fmul_fast(args[0].immediate(), args[1].immediate()), - "fdiv_fast" => bx.fdiv_fast(args[0].immediate(), args[1].immediate()), - "frem_fast" => bx.frem_fast(args[0].immediate(), args[1].immediate()), - _ => bug!(), - }, - None => { - span_invalid_monomorphization_error( - tcx.sess, span, - &format!("invalid monomorphization of `{}` intrinsic: \ - expected basic float type, found `{}`", name, sty)); - return; + "min_align_of" => { + let tp_ty = substs.type_at(0); + cx.const_usize(cx.align_of(tp_ty).abi()) + } + "min_align_of_val" => { + let tp_ty = substs.type_at(0); + if let OperandValue::Pair(_, meta) = args[0].val { + let (_, llalign) = + glue::size_and_align_of_dst(self, tp_ty, Some(meta)); + llalign + } else { + cx.const_usize(cx.align_of(tp_ty).abi()) } } + "pref_align_of" => { + let tp_ty = substs.type_at(0); + cx.const_usize(cx.align_of(tp_ty).pref()) + } + "type_name" => { + let tp_ty = substs.type_at(0); + let ty_name = Symbol::intern(&tp_ty.to_string()).as_str(); + cx.const_str_slice(ty_name) + } + "type_id" => { + cx.const_u64(cx.tcx.type_id_hash(substs.type_at(0))) + } + "init" => { + let ty = substs.type_at(0); + if !cx.layout_of(ty).is_zst() { + // Just zero out the stack slot. + // If we store a zero constant, LLVM will drown in vreg allocation for large + // data structures, and the generated code will be awful. (A telltale sign of + // this is large quantities of `mov [byte ptr foo],0` in the generated code.) + memset_intrinsic( + self, + false, + ty, + llresult, + cx.const_u8(0), + cx.const_usize(1) + ); + } + return; + } + // Effectively no-ops + "uninit" => { + return; + } + "needs_drop" => { + let tp_ty = substs.type_at(0); - }, - - "discriminant_value" => { - args[0].deref(bx.cx).codegen_get_discr(bx, ret_ty) - } - - name if name.starts_with("simd_") => { - match generic_simd_intrinsic(bx, name, - callee_ty, - args, - ret_ty, llret_ty, - span) { - Ok(llval) => llval, - Err(()) => return + cx.const_bool(cx.type_needs_drop(tp_ty)) + } + "offset" => { + let ptr = args[0].immediate(); + let offset = args[1].immediate(); + &self.inbounds_gep(ptr, &[offset]) + } + "arith_offset" => { + let ptr = args[0].immediate(); + let offset = args[1].immediate(); + &self.gep(ptr, &[offset]) } - } - // This requires that atomic intrinsics follow a specific naming pattern: - // "atomic_[_]", and no ordering means SeqCst - name if name.starts_with("atomic_") => { - use llvm::AtomicOrdering::*; - - let split: Vec<&str> = name.split('_').collect(); - - let is_cxchg = split[1] == "cxchg" || split[1] == "cxchgweak"; - let (order, failorder) = match split.len() { - 2 => (SequentiallyConsistent, SequentiallyConsistent), - 3 => match split[2] { - "unordered" => (Unordered, Unordered), - "relaxed" => (Monotonic, Monotonic), - "acq" => (Acquire, Acquire), - "rel" => (Release, Monotonic), - "acqrel" => (AcquireRelease, Acquire), - "failrelaxed" if is_cxchg => - (SequentiallyConsistent, Monotonic), - "failacq" if is_cxchg => - (SequentiallyConsistent, Acquire), - _ => cx.sess().fatal("unknown ordering in atomic intrinsic") - }, - 4 => match (split[2], split[3]) { - ("acq", "failrelaxed") if is_cxchg => - (Acquire, Monotonic), - ("acqrel", "failrelaxed") if is_cxchg => - (AcquireRelease, Monotonic), - _ => cx.sess().fatal("unknown ordering in atomic intrinsic") - }, - _ => cx.sess().fatal("Atomic intrinsic not in correct format"), - }; - let invalid_monomorphization = |ty| { - span_invalid_monomorphization_error(tcx.sess, span, - &format!("invalid monomorphization of `{}` intrinsic: \ - expected basic integer type, found `{}`", name, ty)); - }; + "copy_nonoverlapping" => { + copy_intrinsic(self, false, false, substs.type_at(0), + args[1].immediate(), args[0].immediate(), args[2].immediate()) + } + "copy" => { + copy_intrinsic(self, true, false, substs.type_at(0), + args[1].immediate(), args[0].immediate(), args[2].immediate()) + } + "write_bytes" => { + memset_intrinsic(self, false, substs.type_at(0), + args[0].immediate(), args[1].immediate(), args[2].immediate()) + } - match split[1] { - "cxchg" | "cxchgweak" => { - let ty = substs.type_at(0); - if int_type_width_signed(ty, cx).is_some() { - let weak = if split[1] == "cxchgweak" { llvm::True } else { llvm::False }; - let pair = bx.atomic_cmpxchg( - args[0].immediate(), - args[1].immediate(), - args[2].immediate(), - order, - failorder, - weak); - let val = bx.extract_value(pair, 0); - let success = bx.zext(bx.extract_value(pair, 1), Type::bool(bx.cx)); - - let dest = result.project_field(bx, 0); - bx.store(val, dest.llval, dest.align); - let dest = result.project_field(bx, 1); - bx.store(success, dest.llval, dest.align); - return; - } else { - return invalid_monomorphization(ty); - } + "volatile_copy_nonoverlapping_memory" => { + copy_intrinsic(self, false, true, substs.type_at(0), + args[0].immediate(), args[1].immediate(), args[2].immediate()) + } + "volatile_copy_memory" => { + copy_intrinsic(self, true, true, substs.type_at(0), + args[0].immediate(), args[1].immediate(), args[2].immediate()) + } + "volatile_set_memory" => { + memset_intrinsic(self, true, substs.type_at(0), + args[0].immediate(), args[1].immediate(), args[2].immediate()) + } + "volatile_load" | "unaligned_volatile_load" => { + let tp_ty = substs.type_at(0); + let mut ptr = args[0].immediate(); + if let PassMode::Cast(ty) = fn_ty.ret.mode { + ptr = &self.pointercast(ptr, cx.type_ptr_to(ty.llvm_type(cx))); } - - "load" => { - let ty = substs.type_at(0); - if int_type_width_signed(ty, cx).is_some() { - let align = cx.align_of(ty); - bx.atomic_load(args[0].immediate(), order, align) - } else { - return invalid_monomorphization(ty); + let load = &self.volatile_load(ptr); + let align = if name == "unaligned_volatile_load" { + 1 + } else { + cx.align_of(tp_ty).abi() as u32 + }; + unsafe { + llvm::LLVMSetAlignment(load, align); + } + to_immediate(self, load, cx.layout_of(tp_ty)) + }, + "volatile_store" => { + let dst = args[0].deref(cx); + args[1].val.volatile_store(self, dst); + return; + }, + "unaligned_volatile_store" => { + let dst = args[0].deref(cx); + args[1].val.unaligned_volatile_store(self, dst); + return; + }, + "prefetch_read_data" | "prefetch_write_data" | + "prefetch_read_instruction" | "prefetch_write_instruction" => { + let expect = cx.get_intrinsic(&("llvm.prefetch")); + let (rw, cache_type) = match name { + "prefetch_read_data" => (0, 1), + "prefetch_write_data" => (1, 1), + "prefetch_read_instruction" => (0, 0), + "prefetch_write_instruction" => (1, 0), + _ => bug!() + }; + &self.call(expect, &[ + args[0].immediate(), + cx.const_i32(rw), + args[1].immediate(), + cx.const_i32(cache_type) + ], None) + }, + "ctlz" | "ctlz_nonzero" | "cttz" | "cttz_nonzero" | "ctpop" | "bswap" | + "bitreverse" | "add_with_overflow" | "sub_with_overflow" | + "mul_with_overflow" | "overflowing_add" | "overflowing_sub" | "overflowing_mul" | + "unchecked_div" | "unchecked_rem" | "unchecked_shl" | "unchecked_shr" | "exact_div" => { + let ty = arg_tys[0]; + match int_type_width_signed(ty, cx) { + Some((width, signed)) => + match name { + "ctlz" | "cttz" => { + let y = cx.const_bool(false); + let llfn = cx.get_intrinsic(&format!("llvm.{}.i{}", name, width)); + self.call(llfn, &[args[0].immediate(), y], None) + } + "ctlz_nonzero" | "cttz_nonzero" => { + let y = cx.const_bool(true); + let llvm_name = &format!("llvm.{}.i{}", &name[..4], width); + let llfn = cx.get_intrinsic(llvm_name); + self.call(llfn, &[args[0].immediate(), y], None) + } + "ctpop" => self.call( + cx.get_intrinsic(&format!("llvm.ctpop.i{}", width)), + &[args[0].immediate()], + None + ), + "bswap" => { + if width == 8 { + args[0].immediate() // byte swap a u8/i8 is just a no-op + } else { + self.call(cx.get_intrinsic(&format!("llvm.bswap.i{}", width)), + &[args[0].immediate()], None) + } + } + "bitreverse" => { + self.call(cx.get_intrinsic(&format!("llvm.bitreverse.i{}", width)), + &[args[0].immediate()], None) + } + "add_with_overflow" | "sub_with_overflow" | "mul_with_overflow" => { + let intrinsic = format!("llvm.{}{}.with.overflow.i{}", + if signed { 's' } else { 'u' }, + &name[..3], width); + let llfn = cx.get_intrinsic(&intrinsic); + + // Convert `i1` to a `bool`, and write it to the out parameter + let pair = &self.call(llfn, &[ + args[0].immediate(), + args[1].immediate() + ], None); + let val = &self.extract_value(pair, 0); + let val1 = &self.extract_value(pair, 1); + let overflow = &self.zext( + val1, + cx.type_bool() + ); + + let dest = result.project_field(self, 0); + &self.store(val, dest.llval, dest.align); + let dest = result.project_field(self, 1); + &self.store(overflow, dest.llval, dest.align); + + return; + }, + "overflowing_add" => self.add(args[0].immediate(), args[1].immediate()), + "overflowing_sub" => self.sub(args[0].immediate(), args[1].immediate()), + "overflowing_mul" => self.mul(args[0].immediate(), args[1].immediate()), + "exact_div" => + if signed { + self.exactsdiv(args[0].immediate(), args[1].immediate()) + } else { + self.exactudiv(args[0].immediate(), args[1].immediate()) + }, + "unchecked_div" => + if signed { + self.sdiv(args[0].immediate(), args[1].immediate()) + } else { + self.udiv(args[0].immediate(), args[1].immediate()) + }, + "unchecked_rem" => + if signed { + self.srem(args[0].immediate(), args[1].immediate()) + } else { + self.urem(args[0].immediate(), args[1].immediate()) + }, + "unchecked_shl" => self.shl(args[0].immediate(), args[1].immediate()), + "unchecked_shr" => + if signed { + self.ashr(args[0].immediate(), args[1].immediate()) + } else { + self.lshr(args[0].immediate(), args[1].immediate()) + }, + _ => bug!(), + }, + None => { + span_invalid_monomorphization_error( + tcx.sess, span, + &format!("invalid monomorphization of `{}` intrinsic: \ + expected basic integer type, found `{}`", name, ty)); + return; } } - "store" => { - let ty = substs.type_at(0); - if int_type_width_signed(ty, cx).is_some() { - let align = cx.align_of(ty); - bx.atomic_store(args[1].immediate(), args[0].immediate(), order, align); + }, + "fadd_fast" | "fsub_fast" | "fmul_fast" | "fdiv_fast" | "frem_fast" => { + let sty = &arg_tys[0].sty; + match float_type_width(sty) { + Some(_width) => + match name { + "fadd_fast" => self.fadd_fast(args[0].immediate(), args[1].immediate()), + "fsub_fast" => self.fsub_fast(args[0].immediate(), args[1].immediate()), + "fmul_fast" => self.fmul_fast(args[0].immediate(), args[1].immediate()), + "fdiv_fast" => self.fdiv_fast(args[0].immediate(), args[1].immediate()), + "frem_fast" => self.frem_fast(args[0].immediate(), args[1].immediate()), + _ => bug!(), + }, + None => { + span_invalid_monomorphization_error( + tcx.sess, span, + &format!("invalid monomorphization of `{}` intrinsic: \ + expected basic float type, found `{}`", name, sty)); return; - } else { - return invalid_monomorphization(ty); } } - "fence" => { - bx.atomic_fence(order, llvm::SynchronizationScope::CrossThread); - return; - } + }, - "singlethreadfence" => { - bx.atomic_fence(order, llvm::SynchronizationScope::SingleThread); - return; - } + "discriminant_value" => { + args[0].deref(cx).codegen_get_discr(self, ret_ty) + } - // These are all AtomicRMW ops - op => { - let atom_op = match op { - "xchg" => llvm::AtomicXchg, - "xadd" => llvm::AtomicAdd, - "xsub" => llvm::AtomicSub, - "and" => llvm::AtomicAnd, - "nand" => llvm::AtomicNand, - "or" => llvm::AtomicOr, - "xor" => llvm::AtomicXor, - "max" => llvm::AtomicMax, - "min" => llvm::AtomicMin, - "umax" => llvm::AtomicUMax, - "umin" => llvm::AtomicUMin, - _ => cx.sess().fatal("unknown atomic operation") - }; - - let ty = substs.type_at(0); - if int_type_width_signed(ty, cx).is_some() { - bx.atomic_rmw(atom_op, args[0].immediate(), args[1].immediate(), order) - } else { - return invalid_monomorphization(ty); - } + name if name.starts_with("simd_") => { + match generic_simd_intrinsic(self, name, + callee_ty, + args, + ret_ty, llret_ty, + span) { + Ok(llval) => llval, + Err(()) => return } } - } - - "nontemporal_store" => { - let dst = args[0].deref(bx.cx); - args[1].val.nontemporal_store(bx, dst); - return; - } + // This requires that atomic intrinsics follow a specific naming pattern: + // "atomic_[_]", and no ordering means SeqCst + name if name.starts_with("atomic_") => { + use rustc_codegen_ssa::common::AtomicOrdering::*; + use rustc_codegen_ssa::common:: + {SynchronizationScope, AtomicRmwBinOp}; + + let split: Vec<&str> = name.split('_').collect(); + + let is_cxchg = split[1] == "cxchg" || split[1] == "cxchgweak"; + let (order, failorder) = match split.len() { + 2 => (SequentiallyConsistent, SequentiallyConsistent), + 3 => match split[2] { + "unordered" => (Unordered, Unordered), + "relaxed" => (Monotonic, Monotonic), + "acq" => (Acquire, Acquire), + "rel" => (Release, Monotonic), + "acqrel" => (AcquireRelease, Acquire), + "failrelaxed" if is_cxchg => + (SequentiallyConsistent, Monotonic), + "failacq" if is_cxchg => + (SequentiallyConsistent, Acquire), + _ => cx.sess().fatal("unknown ordering in atomic intrinsic") + }, + 4 => match (split[2], split[3]) { + ("acq", "failrelaxed") if is_cxchg => + (Acquire, Monotonic), + ("acqrel", "failrelaxed") if is_cxchg => + (AcquireRelease, Monotonic), + _ => cx.sess().fatal("unknown ordering in atomic intrinsic") + }, + _ => cx.sess().fatal("Atomic intrinsic not in correct format"), + }; - _ => { - let intr = Intrinsic::find(&name).unwrap_or_else(|| - bug!("unknown intrinsic '{}'", name)); + let invalid_monomorphization = |ty| { + span_invalid_monomorphization_error(tcx.sess, span, + &format!("invalid monomorphization of `{}` intrinsic: \ + expected basic integer type, found `{}`", name, ty)); + }; - fn one(x: Vec) -> T { - assert_eq!(x.len(), 1); - x.into_iter().next().unwrap() - } - fn ty_to_type(cx: &CodegenCx<'ll, '_>, t: &intrinsics::Type) -> Vec<&'ll Type> { - use intrinsics::Type::*; - match *t { - Void => vec![Type::void(cx)], - Integer(_signed, _width, llvm_width) => { - vec![Type::ix(cx, llvm_width as u64)] + match split[1] { + "cxchg" | "cxchgweak" => { + let ty = substs.type_at(0); + if int_type_width_signed(ty, cx).is_some() { + let weak = split[1] == "cxchgweak"; + let pair = &self.atomic_cmpxchg( + args[0].immediate(), + args[1].immediate(), + args[2].immediate(), + order, + failorder, + weak); + let val = &self.extract_value(pair, 0); + let val1 = &self.extract_value(pair, 1); + let success = &self.zext( + val1, + &self.cx().type_bool() + ); + + let dest = result.project_field(self, 0); + &self.store(val, dest.llval, dest.align); + let dest = result.project_field(self, 1); + &self.store(success, dest.llval, dest.align); + return; + } else { + return invalid_monomorphization(ty); + } } - Float(x) => { - match x { - 32 => vec![Type::f32(cx)], - 64 => vec![Type::f64(cx)], - _ => bug!() + + "load" => { + let ty = substs.type_at(0); + if int_type_width_signed(ty, cx).is_some() { + let align = cx.align_of(ty); + self.atomic_load(args[0].immediate(), order, align) + } else { + return invalid_monomorphization(ty); } } - Pointer(ref t, ref llvm_elem, _const) => { - let t = llvm_elem.as_ref().unwrap_or(t); - let elem = one(ty_to_type(cx, t)); - vec![elem.ptr_to()] + + "store" => { + let ty = substs.type_at(0); + if int_type_width_signed(ty, cx).is_some() { + let align = cx.align_of(ty); + self.atomic_store( + args[1].immediate(), + args[0].immediate(), + order, + align + ); + return; + } else { + return invalid_monomorphization(ty); + } } - Vector(ref t, ref llvm_elem, length) => { - let t = llvm_elem.as_ref().unwrap_or(t); - let elem = one(ty_to_type(cx, t)); - vec![Type::vector(elem, length as u64)] + + "fence" => { + &self.atomic_fence(order, SynchronizationScope::CrossThread); + return; } - Aggregate(false, ref contents) => { - let elems = contents.iter() - .map(|t| one(ty_to_type(cx, t))) - .collect::>(); - vec![Type::struct_(cx, &elems, false)] + + "singlethreadfence" => { + &self.atomic_fence(order, SynchronizationScope::SingleThread); + return; } - Aggregate(true, ref contents) => { - contents.iter() - .flat_map(|t| ty_to_type(cx, t)) - .collect() + + // These are all AtomicRMW ops + op => { + let atom_op = match op { + "xchg" => AtomicRmwBinOp::AtomicXchg, + "xadd" => AtomicRmwBinOp::AtomicAdd, + "xsub" => AtomicRmwBinOp::AtomicSub, + "and" => AtomicRmwBinOp::AtomicAnd, + "nand" => AtomicRmwBinOp::AtomicNand, + "or" => AtomicRmwBinOp::AtomicOr, + "xor" => AtomicRmwBinOp::AtomicXor, + "max" => AtomicRmwBinOp::AtomicMax, + "min" => AtomicRmwBinOp::AtomicMin, + "umax" => AtomicRmwBinOp::AtomicUMax, + "umin" => AtomicRmwBinOp::AtomicUMin, + _ => cx.sess().fatal("unknown atomic operation") + }; + + let ty = substs.type_at(0); + if int_type_width_signed(ty, cx).is_some() { + self.atomic_rmw( + atom_op, + args[0].immediate(), + args[1].immediate(), + order + ) + } else { + return invalid_monomorphization(ty); + } } } } - // This allows an argument list like `foo, (bar, baz), - // qux` to be converted into `foo, bar, baz, qux`, integer - // arguments to be truncated as needed and pointers to be - // cast. - fn modify_as_needed( - bx: &Builder<'a, 'll, 'tcx>, - t: &intrinsics::Type, - arg: &OperandRef<'ll, 'tcx>, - ) -> Vec<&'ll Value> { - match *t { - intrinsics::Type::Aggregate(true, ref contents) => { - // We found a tuple that needs squishing! So - // run over the tuple and load each field. - // - // This assumes the type is "simple", i.e. no - // destructors, and the contents are SIMD - // etc. - assert!(!bx.cx.type_needs_drop(arg.layout.ty)); - let (ptr, align) = match arg.val { - OperandValue::Ref(ptr, None, align) => (ptr, align), - _ => bug!() - }; - let arg = PlaceRef::new_sized(ptr, arg.layout, align); - (0..contents.len()).map(|i| { - arg.project_field(bx, i).load(bx).immediate() - }).collect() - } - intrinsics::Type::Pointer(_, Some(ref llvm_elem), _) => { - let llvm_elem = one(ty_to_type(bx.cx, llvm_elem)); - vec![bx.pointercast(arg.immediate(), llvm_elem.ptr_to())] - } - intrinsics::Type::Vector(_, Some(ref llvm_elem), length) => { - let llvm_elem = one(ty_to_type(bx.cx, llvm_elem)); - vec![bx.bitcast(arg.immediate(), Type::vector(llvm_elem, length as u64))] + "nontemporal_store" => { + let dst = args[0].deref(cx); + args[1].val.nontemporal_store(self, dst); + return; + } + + _ => { + let intr = match Intrinsic::find(&name) { + Some(intr) => intr, + None => bug!("unknown intrinsic '{}'", name), + }; + fn one(x: Vec) -> T { + assert_eq!(x.len(), 1); + x.into_iter().next().unwrap() + } + fn ty_to_type<'ll>( + cx: &CodegenCx<'ll, '_, &'ll Value>, + t: &intrinsics::Type + ) -> Vec<&'ll Type> { + use intrinsics::Type::*; + match *t { + Void => vec![cx.type_void()], + Integer(_signed, _width, llvm_width) => { + vec![cx.type_ix( llvm_width as u64)] + } + Float(x) => { + match x { + 32 => vec![cx.type_f32()], + 64 => vec![cx.type_f64()], + _ => bug!() + } + } + Pointer(ref t, ref llvm_elem, _const) => { + let t = llvm_elem.as_ref().unwrap_or(t); + let elem = one(ty_to_type(cx, t)); + vec![cx.type_ptr_to(elem)] + } + Vector(ref t, ref llvm_elem, length) => { + let t = llvm_elem.as_ref().unwrap_or(t); + let elem = one(ty_to_type(cx, t)); + vec![cx.type_vector(elem, length as u64)] + } + Aggregate(false, ref contents) => { + let elems = contents.iter() + .map(|t| one(ty_to_type(cx, t))) + .collect::>(); + vec![cx.type_struct( &elems, false)] + } + Aggregate(true, ref contents) => { + contents.iter() + .flat_map(|t| ty_to_type(cx, t)) + .collect() + } } - intrinsics::Type::Integer(_, width, llvm_width) if width != llvm_width => { - // the LLVM intrinsic uses a smaller integer - // size than the C intrinsic's signature, so - // we have to trim it down here. - vec![bx.trunc(arg.immediate(), Type::ix(bx.cx, llvm_width as u64))] + } + + // This allows an argument list like `foo, (bar, baz), + // qux` to be converted into `foo, bar, baz, qux`, integer + // arguments to be truncated as needed and pointers to be + // cast. + fn modify_as_needed<'a, 'll: 'a, 'tcx: 'll>( + bx: &mut Builder<'a, 'll, 'tcx, &'ll Value>, + t: &intrinsics::Type, + arg: &OperandRef<'tcx, &'ll Value>, + ) -> Vec<&'ll Value> { + match *t { + intrinsics::Type::Aggregate(true, ref contents) => { + // We found a tuple that needs squishing! So + // run over the tuple and load each field. + // + // This assumes the type is "simple", i.e. no + // destructors, and the contents are SIMD + // etc. + assert!(!bx.cx().type_needs_drop(arg.layout.ty)); + let (ptr, align) = match arg.val { + OperandValue::Ref(ptr, None, align) => (ptr, align), + _ => bug!() + }; + let arg = PlaceRef::new_sized(ptr, arg.layout, align); + (0..contents.len()).map(|i| { + let field = arg.project_field(bx, i); + bx.load_ref(&field).immediate() + }).collect() + } + intrinsics::Type::Pointer(_, Some(ref llvm_elem), _) => { + let llvm_elem = one(ty_to_type(bx.cx(), llvm_elem)); + vec![bx.pointercast(arg.immediate(), bx.cx().type_ptr_to(llvm_elem))] + } + intrinsics::Type::Vector(_, Some(ref llvm_elem), length) => { + let llvm_elem = one(ty_to_type(bx.cx(), llvm_elem)); + vec![ + bx.bitcast(arg.immediate(), + bx.cx().type_vector(llvm_elem, length as u64)) + ] + } + intrinsics::Type::Integer(_, width, llvm_width) if width != llvm_width => { + // the LLVM intrinsic uses a smaller integer + // size than the C intrinsic's signature, so + // we have to trim it down here. + vec![bx.trunc(arg.immediate(), bx.cx().type_ix(llvm_width as u64))] + } + _ => vec![arg.immediate()], } - _ => vec![arg.immediate()], } - } - let inputs = intr.inputs.iter() - .flat_map(|t| ty_to_type(cx, t)) - .collect::>(); + let inputs = intr.inputs.iter() + .flat_map(|t| ty_to_type(cx, t)) + .collect::>(); - let outputs = one(ty_to_type(cx, &intr.output)); + let outputs = one(ty_to_type(cx, &intr.output)); - let llargs: Vec<_> = intr.inputs.iter().zip(args).flat_map(|(t, arg)| { - modify_as_needed(bx, t, arg) - }).collect(); - assert_eq!(inputs.len(), llargs.len()); + let llargs: Vec<_> = intr.inputs.iter().zip(args).flat_map(|(t, arg)| { + modify_as_needed(self, t, arg) + }).collect(); + assert_eq!(inputs.len(), llargs.len()); - let val = match intr.definition { - intrinsics::IntrinsicDef::Named(name) => { - let f = declare::declare_cfn(cx, - name, - Type::func(&inputs, outputs)); - bx.call(f, &llargs, None) - } - }; + let val = match intr.definition { + intrinsics::IntrinsicDef::Named(name) => { + let f = cx.declare_cfn( + name, + cx.type_func(&inputs, outputs) + ); + self.call(f, &llargs, None) + } + }; - match *intr.output { - intrinsics::Type::Aggregate(flatten, ref elems) => { - // the output is a tuple so we need to munge it properly - assert!(!flatten); + match *intr.output { + intrinsics::Type::Aggregate(flatten, ref elems) => { + // the output is a tuple so we need to munge it properly + assert!(!flatten); - for i in 0..elems.len() { - let dest = result.project_field(bx, i); - let val = bx.extract_value(val, i as u64); - bx.store(val, dest.llval, dest.align); + for i in 0..elems.len() { + let dest = result.project_field(self, i); + let val = &self.extract_value(val, i as u64); + &self.store(val, dest.llval, dest.align); + } + return; } - return; + _ => val, } - _ => val, } - } - }; + }; - if !fn_ty.ret.is_ignore() { - if let PassMode::Cast(ty) = fn_ty.ret.mode { - let ptr = bx.pointercast(result.llval, ty.llvm_type(cx).ptr_to()); - bx.store(llval, ptr, result.align); - } else { - OperandRef::from_immediate_or_packed_pair(bx, llval, result.layout) - .val.store(bx, result); + if !fn_ty.ret.is_ignore() { + if let PassMode::Cast(ty) = fn_ty.ret.mode { + let ptr = &self.pointercast(result.llval, cx.type_ptr_to(ty.llvm_type(cx))); + &self.store(llval, ptr, result.align); + } else { + OperandRef::from_immediate_or_packed_pair(self, llval, result.layout) + .val.store(self, result); + } } } } fn copy_intrinsic( - bx: &Builder<'a, 'll, 'tcx>, + bx: &mut Builder<'a, 'll, 'tcx, &'ll Value>, allow_overlap: bool, volatile: bool, ty: Ty<'tcx>, @@ -687,10 +729,10 @@ fn copy_intrinsic( src: &'ll Value, count: &'ll Value, ) -> &'ll Value { - let cx = bx.cx; + let cx = bx.cx(); let (size, align) = cx.size_and_align_of(ty); - let size = C_usize(cx, size.bytes()); - let align = C_i32(cx, align.abi() as i32); + let size = cx.const_usize(size.bytes()); + let align = cx.const_i32(align.abi() as i32); let operation = if allow_overlap { "memmove" @@ -701,48 +743,50 @@ fn copy_intrinsic( let name = format!("llvm.{}.p0i8.p0i8.i{}", operation, cx.data_layout().pointer_size.bits()); - let dst_ptr = bx.pointercast(dst, Type::i8p(cx)); - let src_ptr = bx.pointercast(src, Type::i8p(cx)); + let dst_ptr = bx.pointercast(dst, cx.type_i8p()); + let src_ptr = bx.pointercast(src, cx.type_i8p()); let llfn = cx.get_intrinsic(&name); - + let mul = bx.mul(size, count); + let volatile_const = cx.const_bool(volatile); bx.call(llfn, &[dst_ptr, src_ptr, - bx.mul(size, count), + mul, align, - C_bool(cx, volatile)], + volatile_const], None) } fn memset_intrinsic( - bx: &Builder<'a, 'll, 'tcx>, + bx: &mut Builder<'a, 'll, 'tcx, &'ll Value>, volatile: bool, ty: Ty<'tcx>, dst: &'ll Value, val: &'ll Value, count: &'ll Value ) -> &'ll Value { - let cx = bx.cx; + let cx = bx.cx(); let (size, align) = cx.size_and_align_of(ty); - let size = C_usize(cx, size.bytes()); - let align = C_i32(cx, align.abi() as i32); - let dst = bx.pointercast(dst, Type::i8p(cx)); - call_memset(bx, dst, val, bx.mul(size, count), align, volatile) + let size = cx.const_usize(size.bytes()); + let align = cx.const_i32(align.abi() as i32); + let dst = bx.pointercast(dst, cx.type_i8p()); + let mul = bx.mul(size, count); + bx.call_memset(dst, val, mul, align, volatile) } fn try_intrinsic( - bx: &Builder<'a, 'll, 'tcx>, - cx: &CodegenCx<'ll, 'tcx>, + bx: &mut Builder<'a, 'll, 'tcx, &'ll Value>, + cx: &CodegenCx<'ll, 'tcx, &'ll Value>, func: &'ll Value, data: &'ll Value, local_ptr: &'ll Value, dest: &'ll Value, ) { - if bx.sess().no_landing_pads() { + if bx.cx().sess().no_landing_pads() { bx.call(func, &[data], None); let ptr_align = bx.tcx().data_layout.pointer_align; - bx.store(C_null(Type::i8p(&bx.cx)), dest, ptr_align); - } else if wants_msvc_seh(bx.sess()) { + bx.store(cx.const_null(cx.type_i8p()), dest, ptr_align); + } else if wants_msvc_seh(bx.cx().sess()) { codegen_msvc_try(bx, cx, func, data, local_ptr, dest); } else { codegen_gnu_try(bx, cx, func, data, local_ptr, dest); @@ -757,22 +801,22 @@ fn try_intrinsic( // writing, however, LLVM does not recommend the usage of these new instructions // as the old ones are still more optimized. fn codegen_msvc_try( - bx: &Builder<'a, 'll, 'tcx>, - cx: &CodegenCx<'ll, 'tcx>, + bx: &mut Builder<'a, 'll, 'tcx, &'ll Value>, + cx: &CodegenCx<'ll, 'tcx, &'ll Value>, func: &'ll Value, data: &'ll Value, local_ptr: &'ll Value, dest: &'ll Value, ) { - let llfn = get_rust_try_fn(cx, &mut |bx| { - let cx = bx.cx; + let llfn = get_rust_try_fn(cx, &mut |mut bx| { + let cx = bx.cx(); - bx.set_personality_fn(bx.cx.eh_personality()); + bx.set_personality_fn(bx.cx().eh_personality()); - let normal = bx.build_sibling_block("normal"); - let catchswitch = bx.build_sibling_block("catchswitch"); - let catchpad = bx.build_sibling_block("catchpad"); - let caught = bx.build_sibling_block("caught"); + let mut normal = bx.build_sibling_block("normal"); + let mut catchswitch = bx.build_sibling_block("catchswitch"); + let mut catchpad = bx.build_sibling_block("catchpad"); + let mut caught = bx.build_sibling_block("caught"); let func = llvm::get_param(bx.llfn(), 0); let data = llvm::get_param(bx.llfn(), 1); @@ -817,34 +861,36 @@ fn codegen_msvc_try( // } // // More information can be found in libstd's seh.rs implementation. - let i64p = Type::i64(cx).ptr_to(); + let i64p = cx.type_ptr_to(cx.type_i64()); let ptr_align = bx.tcx().data_layout.pointer_align; let slot = bx.alloca(i64p, "slot", ptr_align); bx.invoke(func, &[data], normal.llbb(), catchswitch.llbb(), None); - normal.ret(C_i32(cx, 0)); + normal.ret(cx.const_i32(0)); let cs = catchswitch.catch_switch(None, None, 1); catchswitch.add_handler(cs, catchpad.llbb()); let tcx = cx.tcx; let tydesc = match tcx.lang_items().msvc_try_filter() { - Some(did) => ::consts::get_static(cx, did), + Some(did) => cx.get_static(did), None => bug!("msvc_try_filter not defined"), }; - let tok = catchpad.catch_pad(cs, &[tydesc, C_i32(cx, 0), slot]); + let tok = catchpad.catch_pad(cs, &[tydesc, cx.const_i32(0), slot]); let addr = catchpad.load(slot, ptr_align); let i64_align = bx.tcx().data_layout.i64_align; let arg1 = catchpad.load(addr, i64_align); - let val1 = C_i32(cx, 1); - let arg2 = catchpad.load(catchpad.inbounds_gep(addr, &[val1]), i64_align); + let val1 = cx.const_i32(1); + let gep1 = catchpad.inbounds_gep(addr, &[val1]); + let arg2 = catchpad.load(gep1, i64_align); let local_ptr = catchpad.bitcast(local_ptr, i64p); + let gep2 = catchpad.inbounds_gep(local_ptr, &[val1]); catchpad.store(arg1, local_ptr, i64_align); - catchpad.store(arg2, catchpad.inbounds_gep(local_ptr, &[val1]), i64_align); + catchpad.store(arg2, gep2, i64_align); catchpad.catch_ret(tok, caught.llbb()); - caught.ret(C_i32(cx, 1)); + caught.ret(cx.const_i32(1)); }); // Note that no invoke is used here because by definition this function @@ -866,15 +912,15 @@ fn codegen_msvc_try( // functions in play. By calling a shim we're guaranteed that our shim will have // the right personality function. fn codegen_gnu_try( - bx: &Builder<'a, 'll, 'tcx>, - cx: &CodegenCx<'ll, 'tcx>, + bx: &mut Builder<'a, 'll, 'tcx, &'ll Value>, + cx: &CodegenCx<'ll, 'tcx, &'ll Value>, func: &'ll Value, data: &'ll Value, local_ptr: &'ll Value, dest: &'ll Value, ) { - let llfn = get_rust_try_fn(cx, &mut |bx| { - let cx = bx.cx; + let llfn = get_rust_try_fn(cx, &mut |mut bx| { + let cx = bx.cx(); // Codegens the shims described above: // @@ -893,14 +939,14 @@ fn codegen_gnu_try( // expected to be `*mut *mut u8` for this to actually work, but that's // managed by the standard library. - let then = bx.build_sibling_block("then"); - let catch = bx.build_sibling_block("catch"); + let mut then = bx.build_sibling_block("then"); + let mut catch = bx.build_sibling_block("catch"); let func = llvm::get_param(bx.llfn(), 0); let data = llvm::get_param(bx.llfn(), 1); let local_ptr = llvm::get_param(bx.llfn(), 2); bx.invoke(func, &[data], then.llbb(), catch.llbb(), None); - then.ret(C_i32(cx, 0)); + then.ret(cx.const_i32(0)); // Type indicator for the exception being thrown. // @@ -908,13 +954,14 @@ fn codegen_gnu_try( // being thrown. The second value is a "selector" indicating which of // the landing pad clauses the exception's type had been matched to. // rust_try ignores the selector. - let lpad_ty = Type::struct_(cx, &[Type::i8p(cx), Type::i32(cx)], false); - let vals = catch.landing_pad(lpad_ty, bx.cx.eh_personality(), 1); - catch.add_clause(vals, C_null(Type::i8p(cx))); + let lpad_ty = cx.type_struct(&[cx.type_i8p(), cx.type_i32()], false); + let vals = catch.landing_pad(lpad_ty, bx.cx().eh_personality(), 1); + catch.add_clause(vals, bx.cx().const_null(cx.type_i8p())); let ptr = catch.extract_value(vals, 0); let ptr_align = bx.tcx().data_layout.pointer_align; - catch.store(ptr, catch.bitcast(local_ptr, Type::i8p(cx).ptr_to()), ptr_align); - catch.ret(C_i32(cx, 1)); + let bitcast = catch.bitcast(local_ptr, cx.type_ptr_to(cx.type_i8p())); + catch.store(ptr, bitcast, ptr_align); + catch.ret(cx.const_i32(1)); }); // Note that no invoke is used here because by definition this function @@ -927,11 +974,11 @@ fn codegen_gnu_try( // Helper function to give a Block to a closure to codegen a shim function. // This is currently primarily used for the `try` intrinsic functions above. fn gen_fn<'ll, 'tcx>( - cx: &CodegenCx<'ll, 'tcx>, + cx: &CodegenCx<'ll, 'tcx, &'ll Value>, name: &str, inputs: Vec>, output: Ty<'tcx>, - codegen: &mut dyn FnMut(Builder<'_, 'll, 'tcx>), + codegen: &mut dyn FnMut(Builder<'_, 'll, 'tcx, &'ll Value>), ) -> &'ll Value { let rust_fn_ty = cx.tcx.mk_fn_ptr(ty::Binder::bind(cx.tcx.mk_fn_sig( inputs.into_iter(), @@ -940,7 +987,7 @@ fn gen_fn<'ll, 'tcx>( hir::Unsafety::Unsafe, Abi::Rust ))); - let llfn = declare::define_internal_fn(cx, name, rust_fn_ty); + let llfn = cx.define_internal_fn(name, rust_fn_ty); attributes::from_fn_attrs(cx, llfn, None); let bx = Builder::new_block(cx, llfn, "entry-block"); codegen(bx); @@ -952,8 +999,8 @@ fn gen_fn<'ll, 'tcx>( // // This function is only generated once and is then cached. fn get_rust_try_fn<'ll, 'tcx>( - cx: &CodegenCx<'ll, 'tcx>, - codegen: &mut dyn FnMut(Builder<'_, 'll, 'tcx>), + cx: &CodegenCx<'ll, 'tcx, &'ll Value>, + codegen: &mut dyn FnMut(Builder<'_, 'll, 'tcx, &'ll Value>), ) -> &'ll Value { if let Some(llfn) = cx.rust_try_fn.get() { return llfn; @@ -980,10 +1027,10 @@ fn span_invalid_monomorphization_error(a: &Session, b: Span, c: &str) { } fn generic_simd_intrinsic( - bx: &Builder<'a, 'll, 'tcx>, + bx: &mut Builder<'a, 'll, 'tcx, &'ll Value>, name: &str, callee_ty: Ty<'tcx>, - args: &[OperandRef<'ll, 'tcx>], + args: &[OperandRef<'tcx, &'ll Value>], ret_ty: Ty<'tcx>, llret_ty: &'ll Type, span: Span @@ -995,7 +1042,7 @@ fn generic_simd_intrinsic( }; ($msg: tt, $($fmt: tt)*) => { span_invalid_monomorphization_error( - bx.sess(), span, + bx.cx().sess(), span, &format!(concat!("invalid monomorphization of `{}` intrinsic: ", $msg), name, $($fmt)*)); } @@ -1056,7 +1103,7 @@ fn generic_simd_intrinsic( found `{}` with length {}", in_len, in_ty, ret_ty, out_len); - require!(llret_ty.element_type().kind() == TypeKind::Integer, + require!(bx.cx().type_kind(bx.cx().element_type(llret_ty)) == TypeKind::Integer, "expected return type with integer elements, found `{}` with non-integer `{}`", ret_ty, ret_ty.simd_type(tcx)); @@ -1092,8 +1139,8 @@ fn generic_simd_intrinsic( let indices: Option> = (0..n) .map(|i| { let arg_idx = i; - let val = const_get_elt(vector, i as u64); - match const_to_opt_u128(val, true) { + let val = bx.cx().const_get_elt(vector, i as u64); + match bx.cx().const_to_opt_u128(val, true) { None => { emit_error!("shuffle index #{} is not a constant", arg_idx); None @@ -1103,18 +1150,18 @@ fn generic_simd_intrinsic( arg_idx, total_len); None } - Some(idx) => Some(C_i32(bx.cx, idx as i32)), + Some(idx) => Some(bx.cx().const_i32(idx as i32)), } }) .collect(); let indices = match indices { Some(i) => i, - None => return Ok(C_null(llret_ty)) + None => return Ok(bx.cx().const_null(llret_ty)) }; return Ok(bx.shuffle_vector(args[0].immediate(), args[1].immediate(), - C_vector(&indices))) + bx.cx().const_vector(&indices))) } if name == "simd_insert" { @@ -1145,8 +1192,8 @@ fn generic_simd_intrinsic( _ => return_error!("mask element type is `{}`, expected `i_`", m_elem_ty) } // truncate the mask to a vector of i1s - let i1 = Type::i1(bx.cx); - let i1xn = Type::vector(i1, m_len as u64); + let i1 = bx.cx().type_i1(); + let i1xn = bx.cx().type_vector(i1, m_len as u64); let m_i1s = bx.trunc(args[0].immediate(), i1xn); return Ok(bx.select(m_i1s, args[1].immediate(), args[2].immediate())); } @@ -1156,9 +1203,9 @@ fn generic_simd_intrinsic( in_elem: &::rustc::ty::TyS, in_ty: &::rustc::ty::TyS, in_len: usize, - bx: &Builder<'a, 'll, 'tcx>, + bx: &mut Builder<'a, 'll, 'tcx, &'ll Value>, span: Span, - args: &[OperandRef<'ll, 'tcx>], + args: &[OperandRef<'tcx, &'ll Value>], ) -> Result<&'ll Value, ()> { macro_rules! emit_error { ($msg: tt) => { @@ -1166,7 +1213,7 @@ fn generic_simd_intrinsic( }; ($msg: tt, $($fmt: tt)*) => { span_invalid_monomorphization_error( - bx.sess(), span, + bx.cx().sess(), span, &format!(concat!("invalid monomorphization of `{}` intrinsic: ", $msg), name, $($fmt)*)); } @@ -1207,7 +1254,7 @@ fn generic_simd_intrinsic( }; let llvm_name = &format!("llvm.{0}.v{1}{2}", name, in_len, ety); - let intrinsic = bx.cx.get_intrinsic(&llvm_name); + let intrinsic = bx.cx().get_intrinsic(&llvm_name); let c = bx.call(intrinsic, &args.iter().map(|arg| arg.immediate()).collect::>(), None); @@ -1274,20 +1321,20 @@ fn generic_simd_intrinsic( } } - fn llvm_vector_ty(cx: &CodegenCx<'ll, '_>, elem_ty: ty::Ty, vec_len: usize, + fn llvm_vector_ty(cx: &CodegenCx<'ll, '_, &'ll Value>, elem_ty: ty::Ty, vec_len: usize, mut no_pointers: usize) -> &'ll Type { // FIXME: use cx.layout_of(ty).llvm_type() ? let mut elem_ty = match elem_ty.sty { - ty::Int(v) => Type::int_from_ty(cx, v), - ty::Uint(v) => Type::uint_from_ty(cx, v), - ty::Float(v) => Type::float_from_ty(cx, v), + ty::Int(v) => cx.type_int_from_ty( v), + ty::Uint(v) => cx.type_uint_from_ty( v), + ty::Float(v) => cx.type_float_from_ty( v), _ => unreachable!(), }; while no_pointers > 0 { - elem_ty = elem_ty.ptr_to(); + elem_ty = cx.type_ptr_to(elem_ty); no_pointers -= 1; } - Type::vector(elem_ty, vec_len as u64) + cx.type_vector(elem_ty, vec_len as u64) } @@ -1364,29 +1411,32 @@ fn generic_simd_intrinsic( } // Alignment of T, must be a constant integer value: - let alignment_ty = Type::i32(bx.cx); - let alignment = C_i32(bx.cx, bx.cx.align_of(in_elem).abi() as i32); + let alignment_ty = bx.cx().type_i32(); + let alignment = bx.cx().const_i32(bx.cx().align_of(in_elem).abi() as i32); // Truncate the mask vector to a vector of i1s: let (mask, mask_ty) = { - let i1 = Type::i1(bx.cx); - let i1xn = Type::vector(i1, in_len as u64); + let i1 = bx.cx().type_i1(); + let i1xn = bx.cx().type_vector(i1, in_len as u64); (bx.trunc(args[2].immediate(), i1xn), i1xn) }; // Type of the vector of pointers: - let llvm_pointer_vec_ty = llvm_vector_ty(bx.cx, underlying_ty, in_len, pointer_count); + let llvm_pointer_vec_ty = llvm_vector_ty(bx.cx(), underlying_ty, in_len, pointer_count); let llvm_pointer_vec_str = llvm_vector_str(underlying_ty, in_len, pointer_count); // Type of the vector of elements: - let llvm_elem_vec_ty = llvm_vector_ty(bx.cx, underlying_ty, in_len, pointer_count - 1); + let llvm_elem_vec_ty = llvm_vector_ty(bx.cx(), underlying_ty, in_len, pointer_count - 1); let llvm_elem_vec_str = llvm_vector_str(underlying_ty, in_len, pointer_count - 1); let llvm_intrinsic = format!("llvm.masked.gather.{}.{}", llvm_elem_vec_str, llvm_pointer_vec_str); - let f = declare::declare_cfn(bx.cx, &llvm_intrinsic, - Type::func(&[llvm_pointer_vec_ty, alignment_ty, mask_ty, - llvm_elem_vec_ty], llvm_elem_vec_ty)); + let f = bx.cx().declare_cfn(&llvm_intrinsic, + bx.cx().type_func(&[ + llvm_pointer_vec_ty, + alignment_ty, + mask_ty, + llvm_elem_vec_ty], llvm_elem_vec_ty)); llvm::SetUnnamedAddr(f, false); let v = bx.call(f, &[args[1].immediate(), alignment, mask, args[0].immediate()], None); @@ -1461,30 +1511,30 @@ fn generic_simd_intrinsic( } // Alignment of T, must be a constant integer value: - let alignment_ty = Type::i32(bx.cx); - let alignment = C_i32(bx.cx, bx.cx.align_of(in_elem).abi() as i32); + let alignment_ty = bx.cx().type_i32(); + let alignment = bx.cx().const_i32(bx.cx().align_of(in_elem).abi() as i32); // Truncate the mask vector to a vector of i1s: let (mask, mask_ty) = { - let i1 = Type::i1(bx.cx); - let i1xn = Type::vector(i1, in_len as u64); + let i1 = bx.cx().type_i1(); + let i1xn = bx.cx().type_vector(i1, in_len as u64); (bx.trunc(args[2].immediate(), i1xn), i1xn) }; - let ret_t = Type::void(bx.cx); + let ret_t = bx.cx().type_void(); // Type of the vector of pointers: - let llvm_pointer_vec_ty = llvm_vector_ty(bx.cx, underlying_ty, in_len, pointer_count); + let llvm_pointer_vec_ty = llvm_vector_ty(bx.cx(), underlying_ty, in_len, pointer_count); let llvm_pointer_vec_str = llvm_vector_str(underlying_ty, in_len, pointer_count); // Type of the vector of elements: - let llvm_elem_vec_ty = llvm_vector_ty(bx.cx, underlying_ty, in_len, pointer_count - 1); + let llvm_elem_vec_ty = llvm_vector_ty(bx.cx(), underlying_ty, in_len, pointer_count - 1); let llvm_elem_vec_str = llvm_vector_str(underlying_ty, in_len, pointer_count - 1); let llvm_intrinsic = format!("llvm.masked.scatter.{}.{}", llvm_elem_vec_str, llvm_pointer_vec_str); - let f = declare::declare_cfn(bx.cx, &llvm_intrinsic, - Type::func(&[llvm_elem_vec_ty, + let f = bx.cx().declare_cfn(&llvm_intrinsic, + bx.cx().type_func(&[llvm_elem_vec_ty, llvm_pointer_vec_ty, alignment_ty, mask_ty], ret_t)); @@ -1524,7 +1574,7 @@ fn generic_simd_intrinsic( // code is generated // * if the accumulator of the fmul isn't 1, incorrect // code is generated - match const_get_real(acc) { + match bx.cx().const_get_real(acc) { None => return_error!("accumulator of {} is not a constant", $name), Some((v, loses_info)) => { if $name.contains("mul") && v != 1.0_f64 { @@ -1540,8 +1590,8 @@ fn generic_simd_intrinsic( } else { // unordered arithmetic reductions do not: match f.bit_width() { - 32 => C_undef(Type::f32(bx.cx)), - 64 => C_undef(Type::f64(bx.cx)), + 32 => bx.cx().const_undef(bx.cx().type_f32()), + 64 => bx.cx().const_undef(bx.cx().type_f64()), v => { return_error!(r#" unsupported {} from `{}` with element `{}` of size `{}` to `{}`"#, @@ -1618,8 +1668,8 @@ unsupported {} from `{}` with element `{}` of size `{}` to `{}`"#, } // boolean reductions operate on vectors of i1s: - let i1 = Type::i1(bx.cx); - let i1xn = Type::vector(i1, in_len as u64); + let i1 = bx.cx().type_i1(); + let i1xn = bx.cx().type_vector(i1, in_len as u64); bx.trunc(args[0].immediate(), i1xn) }; return match in_elem.sty { @@ -1629,7 +1679,7 @@ unsupported {} from `{}` with element `{}` of size `{}` to `{}`"#, if !$boolean { r } else { - bx.zext(r, Type::bool(bx.cx)) + bx.zext(r, bx.cx().type_bool()) } ) }, @@ -1755,7 +1805,7 @@ unsupported {} from `{}` with element `{}` of size `{}` to `{}`"#, // Returns None if the type is not an integer // FIXME: there’s multiple of this functions, investigate using some of the already existing // stuffs. -fn int_type_width_signed(ty: Ty, cx: &CodegenCx) -> Option<(u64, bool)> { +fn int_type_width_signed(ty: Ty, cx: &CodegenCx<'ll, '_, &'ll Value>) -> Option<(u64, bool)> { match ty.sty { ty::Int(t) => Some((match t { ast::IntTy::Isize => cx.tcx.sess.target.isize_ty.bit_width().unwrap() as u64, diff --git a/src/librustc_codegen_llvm/lib.rs b/src/librustc_codegen_llvm/lib.rs index 63a8ab077e5ae..e2ca821ecb2a5 100644 --- a/src/librustc_codegen_llvm/lib.rs +++ b/src/librustc_codegen_llvm/lib.rs @@ -37,11 +37,10 @@ #![feature(static_nobundle)] use back::write::create_target_machine; -use rustc::dep_graph::WorkProduct; use syntax_pos::symbol::Symbol; -#[macro_use] extern crate bitflags; extern crate flate2; +#[macro_use] extern crate bitflags; extern crate libc; #[macro_use] extern crate rustc; extern crate jobserver; @@ -56,6 +55,7 @@ extern crate rustc_incremental; extern crate rustc_llvm; extern crate rustc_platform_intrinsics as intrinsics; extern crate rustc_codegen_utils; +extern crate rustc_codegen_ssa; extern crate rustc_fs_util; #[macro_use] extern crate log; @@ -67,28 +67,30 @@ extern crate cc; // Used to locate MSVC extern crate tempfile; extern crate memmap; -use back::bytecode::RLIB_BYTECODE_EXTENSION; - +use rustc_codegen_ssa::interfaces::*; +use rustc_codegen_ssa::back::write::{CodegenContext, ModuleConfig, DiagnosticHandlers}; +use rustc_codegen_ssa::back::lto::{SerializedModule, LtoModuleCodegen, ThinModule}; +use rustc_codegen_ssa::CompiledModule; +use errors::{FatalError, Handler}; +use rustc::dep_graph::WorkProduct; +use rustc::util::time_graph::Timeline; +use syntax_pos::symbol::InternedString; +use rustc::mir::mono::Stats; pub use llvm_util::target_features; use std::any::Any; -use std::path::{PathBuf}; -use std::sync::mpsc; -use rustc_data_structures::sync::Lrc; +use std::sync::{mpsc, Arc}; use rustc::dep_graph::DepGraph; -use rustc::hir::def_id::CrateNum; -use rustc::middle::cstore::MetadataLoader; -use rustc::middle::cstore::{NativeLibrary, CrateSource, LibSource}; -use rustc::middle::lang_items::LangItem; +use rustc::middle::allocator::AllocatorKind; +use rustc::middle::cstore::{EncodedMetadata, MetadataLoader}; use rustc::session::{Session, CompileIncomplete}; use rustc::session::config::{OutputFilenames, OutputType, PrintRequest}; use rustc::ty::{self, TyCtxt}; use rustc::util::time_graph; -use rustc::util::nodemap::{FxHashSet, FxHashMap}; use rustc::util::profiling::ProfileCategory; use rustc_mir::monomorphize; +use rustc_codegen_ssa::ModuleCodegen; use rustc_codegen_utils::codegen_backend::CodegenBackend; -use rustc_data_structures::svh::Svh; mod diagnostics; @@ -96,11 +98,8 @@ mod back { pub use rustc_codegen_utils::symbol_names; mod archive; pub mod bytecode; - mod command; - pub mod linker; pub mod link; pub mod lto; - pub mod symbol_export; pub mod write; mod rpath; pub mod wasm; @@ -111,29 +110,144 @@ mod allocator; mod asm; mod attributes; mod base; -mod builder; mod callee; +mod builder; mod common; mod consts; mod context; mod debuginfo; mod declare; -mod glue; mod intrinsic; -pub mod llvm; +mod mono_item; + +// The following is a work around that replaces `pub mod llvm;` and that fixes issue 53912. +#[path = "llvm/mod.rs"] mod llvm_; pub mod llvm { pub use super::llvm_::*; } + mod llvm_util; mod metadata; -mod meth; -mod mir; -mod mono_item; mod type_; mod type_of; mod value; +#[derive(Clone)] pub struct LlvmCodegenBackend(()); -impl !Send for LlvmCodegenBackend {} // Llvm is on a per-thread basis -impl !Sync for LlvmCodegenBackend {} +impl ExtraBackendMethods for LlvmCodegenBackend { + fn thin_lto_available(&self) -> bool { + unsafe { !llvm::LLVMRustThinLTOAvailable() } + } + fn pgo_available(&self) -> bool { + unsafe { !llvm::LLVMRustPGOAvailable() } + } + fn new_metadata(&self, sess: &Session, mod_name: &str) -> ModuleLlvm { + ModuleLlvm::new(sess, mod_name) + } + fn write_metadata<'b, 'gcx>( + &self, + tcx: TyCtxt<'b, 'gcx, 'gcx>, + metadata: &ModuleLlvm + ) -> EncodedMetadata { + base::write_metadata(tcx, metadata) + } + fn codegen_allocator(&self, tcx: TyCtxt, mods: &ModuleLlvm, kind: AllocatorKind) { + unsafe { allocator::codegen(tcx, mods, kind) } + } + fn compile_codegen_unit<'ll, 'tcx: 'll>( + &self, + tcx: TyCtxt<'ll, 'tcx, 'tcx>, + cgu_name: InternedString + ) -> Stats { + base::compile_codegen_unit(tcx, cgu_name) + } + fn target_machine_factory( + &self, + sess: &Session, + find_features: bool + ) -> Arc + Result<&'static mut llvm::TargetMachine, String> + Send + Sync> { + back::write::target_machine_factory(sess, find_features) + } + fn target_cpu<'b>(&self, sess: &'b Session) -> &'b str { + llvm_util::target_cpu(sess) + } +} + +impl Clone for &'static mut llvm::TargetMachine { + fn clone(&self) -> Self { + // This method should never be called. It is put here because in + // rustc_codegen_ssa::back::write::CodegenContext, the TargetMachine is contained in a + // closure returned by a function under an Arc. The clone-deriving algorithm works when the + // struct contains the original LLVM TargetMachine type but not any more when supplied with + // a generic type. Hence this dummy Clone implementation. + panic!() + } +} + +impl WriteBackendMethods for LlvmCodegenBackend { + type Module = ModuleLlvm; + type ModuleBuffer = back::lto::ModuleBuffer; + type Context = llvm::Context; + type TargetMachine = &'static mut llvm::TargetMachine; + type ThinData = back::lto::ThinData; + type ThinBuffer = back::lto::ThinBuffer; + fn print_pass_timings(&self) { + unsafe { llvm::LLVMRustPrintPassTimings(); } + } + fn run_lto( + cgcx: &CodegenContext, + modules: Vec>, + cached_modules: Vec<(SerializedModule, WorkProduct)>, + timeline: &mut Timeline + ) -> Result<(Vec>, Vec), FatalError> { + back::lto::run(cgcx, modules, cached_modules, timeline) + } + fn new_diagnostic_handlers<'a>( + cgcx: &'a CodegenContext, + handler: &'a Handler, + llcx: &'a Self::Context + ) -> DiagnosticHandlers<'a, Self> { + back::write::new_diagnostic_handlers(cgcx, handler, llcx) + } + fn drop_diagnostic_handlers<'a>(diag : &mut DiagnosticHandlers<'a, Self>) { + back::write::drop_diagnostic_handlers(diag) + } + unsafe fn optimize( + cgcx: &CodegenContext, + diag_handler: &Handler, + module: &ModuleCodegen, + config: &ModuleConfig, + timeline: &mut Timeline + ) -> Result<(), FatalError> { + back::write::optimize(cgcx, diag_handler, module, config, timeline) + } + unsafe fn optimize_thin( + cgcx: &CodegenContext, + thin: &mut ThinModule, + timeline: &mut Timeline + ) -> Result, FatalError> { + back::lto::optimize_thin_module(thin, cgcx, timeline) + } + unsafe fn codegen( + cgcx: &CodegenContext, + diag_handler: &Handler, + module: ModuleCodegen, + config: &ModuleConfig, + timeline: &mut Timeline + ) -> Result { + back::write::codegen(cgcx, diag_handler, module, config, timeline) + } + fn run_lto_pass_manager( + cgcx: &CodegenContext, + module: &ModuleCodegen, + config: &ModuleConfig, + thin: bool + ) { + back::lto::run_pass_manager(cgcx, module, config, thin) + } +} + +unsafe impl<'a> Send for LlvmCodegenBackend {} // Llvm is on a per-thread basis +unsafe impl<'a> Sync for LlvmCodegenBackend {} impl LlvmCodegenBackend { pub fn new() -> Box { @@ -141,7 +255,7 @@ impl LlvmCodegenBackend { } } -impl CodegenBackend for LlvmCodegenBackend { +impl<'a> CodegenBackend for LlvmCodegenBackend { fn init(&self, sess: &Session) { llvm_util::init(sess); // Make sure llvm is inited } @@ -195,23 +309,23 @@ impl CodegenBackend for LlvmCodegenBackend { fn provide(&self, providers: &mut ty::query::Providers) { back::symbol_names::provide(providers); - back::symbol_export::provide(providers); - base::provide(providers); + rustc_codegen_ssa::back::symbol_export::provide(providers); + rustc_codegen_ssa::base::provide(providers); attributes::provide(providers); } fn provide_extern(&self, providers: &mut ty::query::Providers) { - back::symbol_export::provide_extern(providers); - base::provide_extern(providers); + rustc_codegen_ssa::back::symbol_export::provide_extern(providers); + rustc_codegen_ssa::base::provide_extern(providers); attributes::provide_extern(providers); } - fn codegen_crate<'a, 'tcx>( + fn codegen_crate<'b, 'tcx>( &self, - tcx: TyCtxt<'a, 'tcx, 'tcx>, + tcx: TyCtxt<'b, 'tcx, 'tcx>, rx: mpsc::Receiver> ) -> Box { - box base::codegen_crate(tcx, rx) + box rustc_codegen_ssa::base::codegen_crate(LlvmCodegenBackend(()), tcx, rx) } fn join_codegen_and_link( @@ -223,11 +337,12 @@ impl CodegenBackend for LlvmCodegenBackend { ) -> Result<(), CompileIncomplete>{ use rustc::util::common::time; let (ongoing_codegen, work_products) = - ongoing_codegen.downcast::<::back::write::OngoingCodegen>() + ongoing_codegen.downcast:: + >() .expect("Expected LlvmCodegenBackend's OngoingCodegen, found Box") .join(sess); if sess.opts.debugging_opts.incremental_info { - back::write::dump_incremental_data(&ongoing_codegen); + rustc_codegen_ssa::back::write::dump_incremental_data(&ongoing_codegen); } time(sess, @@ -264,73 +379,7 @@ pub fn __rustc_codegen_backend() -> Box { LlvmCodegenBackend::new() } -struct ModuleCodegen { - /// The name of the module. When the crate may be saved between - /// compilations, incremental compilation requires that name be - /// unique amongst **all** crates. Therefore, it should contain - /// something unique to this crate (e.g., a module path) as well - /// as the crate name and disambiguator. - /// We currently generate these names via CodegenUnit::build_cgu_name(). - name: String, - module_llvm: ModuleLlvm, - kind: ModuleKind, -} - -struct CachedModuleCodegen { - name: String, - source: WorkProduct, -} - -#[derive(Copy, Clone, Debug, PartialEq)] -enum ModuleKind { - Regular, - Metadata, - Allocator, -} - -impl ModuleCodegen { - fn into_compiled_module(self, - emit_obj: bool, - emit_bc: bool, - emit_bc_compressed: bool, - outputs: &OutputFilenames) -> CompiledModule { - let object = if emit_obj { - Some(outputs.temp_path(OutputType::Object, Some(&self.name))) - } else { - None - }; - let bytecode = if emit_bc { - Some(outputs.temp_path(OutputType::Bitcode, Some(&self.name))) - } else { - None - }; - let bytecode_compressed = if emit_bc_compressed { - Some(outputs.temp_path(OutputType::Bitcode, Some(&self.name)) - .with_extension(RLIB_BYTECODE_EXTENSION)) - } else { - None - }; - - CompiledModule { - name: self.name.clone(), - kind: self.kind, - object, - bytecode, - bytecode_compressed, - } - } -} - -#[derive(Debug)] -struct CompiledModule { - name: String, - kind: ModuleKind, - object: Option, - bytecode: Option, - bytecode_compressed: Option, -} - -struct ModuleLlvm { +pub struct ModuleLlvm { llcx: &'static mut llvm::Context, llmod_raw: *const llvm::Module, tm: &'static mut llvm::TargetMachine, @@ -369,35 +418,4 @@ impl Drop for ModuleLlvm { } } -struct CodegenResults { - crate_name: Symbol, - modules: Vec, - allocator_module: Option, - metadata_module: CompiledModule, - crate_hash: Svh, - metadata: rustc::middle::cstore::EncodedMetadata, - windows_subsystem: Option, - linker_info: back::linker::LinkerInfo, - crate_info: CrateInfo, -} - -/// Misc info we load from metadata to persist beyond the tcx -struct CrateInfo { - panic_runtime: Option, - compiler_builtins: Option, - profiler_runtime: Option, - sanitizer_runtime: Option, - is_no_builtins: FxHashSet, - native_libraries: FxHashMap>>, - crate_name: FxHashMap, - used_libraries: Lrc>, - link_args: Lrc>, - used_crate_source: FxHashMap>, - used_crates_static: Vec<(CrateNum, LibSource)>, - used_crates_dynamic: Vec<(CrateNum, LibSource)>, - wasm_imports: FxHashMap, - lang_item_to_crate: FxHashMap, - missing_lang_items: FxHashMap>, -} - __build_diagnostic_array! { librustc_codegen_llvm, DIAGNOSTICS } diff --git a/src/librustc_codegen_llvm/llvm/ffi.rs b/src/librustc_codegen_llvm/llvm/ffi.rs index 0b98fa4eaf551..fe1197c2fe8cc 100644 --- a/src/librustc_codegen_llvm/llvm/ffi.rs +++ b/src/librustc_codegen_llvm/llvm/ffi.rs @@ -19,6 +19,8 @@ use libc::{c_uint, c_int, size_t, c_char}; use libc::{c_ulonglong, c_void}; use std::marker::PhantomData; +use syntax; +use rustc_codegen_ssa; use super::RustString; @@ -141,6 +143,23 @@ pub enum IntPredicate { IntSLE = 41, } +impl IntPredicate { + pub fn from_generic(intpre: rustc_codegen_ssa::common::IntPredicate) -> Self { + match intpre { + rustc_codegen_ssa::common::IntPredicate::IntEQ => IntPredicate::IntEQ, + rustc_codegen_ssa::common::IntPredicate::IntNE => IntPredicate::IntNE, + rustc_codegen_ssa::common::IntPredicate::IntUGT => IntPredicate::IntUGT, + rustc_codegen_ssa::common::IntPredicate::IntUGE => IntPredicate::IntUGE, + rustc_codegen_ssa::common::IntPredicate::IntULT => IntPredicate::IntULT, + rustc_codegen_ssa::common::IntPredicate::IntULE => IntPredicate::IntULE, + rustc_codegen_ssa::common::IntPredicate::IntSGT => IntPredicate::IntSGT, + rustc_codegen_ssa::common::IntPredicate::IntSGE => IntPredicate::IntSGE, + rustc_codegen_ssa::common::IntPredicate::IntSLT => IntPredicate::IntSLT, + rustc_codegen_ssa::common::IntPredicate::IntSLE => IntPredicate::IntSLE, + } + } +} + /// LLVMRealPredicate #[derive(Copy, Clone)] #[repr(C)] @@ -163,6 +182,31 @@ pub enum RealPredicate { RealPredicateTrue = 15, } +impl RealPredicate { + pub fn from_generic(realpred: rustc_codegen_ssa::common::RealPredicate) -> Self { + match realpred { + rustc_codegen_ssa::common::RealPredicate::RealPredicateFalse => + RealPredicate::RealPredicateFalse, + rustc_codegen_ssa::common::RealPredicate::RealOEQ => RealPredicate::RealOEQ, + rustc_codegen_ssa::common::RealPredicate::RealOGT => RealPredicate::RealOGT, + rustc_codegen_ssa::common::RealPredicate::RealOGE => RealPredicate::RealOGE, + rustc_codegen_ssa::common::RealPredicate::RealOLT => RealPredicate::RealOLT, + rustc_codegen_ssa::common::RealPredicate::RealOLE => RealPredicate::RealOLE, + rustc_codegen_ssa::common::RealPredicate::RealONE => RealPredicate::RealONE, + rustc_codegen_ssa::common::RealPredicate::RealORD => RealPredicate::RealORD, + rustc_codegen_ssa::common::RealPredicate::RealUNO => RealPredicate::RealUNO, + rustc_codegen_ssa::common::RealPredicate::RealUEQ => RealPredicate::RealUEQ, + rustc_codegen_ssa::common::RealPredicate::RealUGT => RealPredicate::RealUGT, + rustc_codegen_ssa::common::RealPredicate::RealUGE => RealPredicate::RealUGE, + rustc_codegen_ssa::common::RealPredicate::RealULT => RealPredicate::RealULT, + rustc_codegen_ssa::common::RealPredicate::RealULE => RealPredicate::RealULE, + rustc_codegen_ssa::common::RealPredicate::RealUNE => RealPredicate::RealUNE, + rustc_codegen_ssa::common::RealPredicate::RealPredicateTrue => + RealPredicate::RealPredicateTrue + } + } +} + /// LLVMTypeKind #[derive(Copy, Clone, PartialEq, Debug)] #[repr(C)] @@ -186,6 +230,30 @@ pub enum TypeKind { Token = 16, } +impl TypeKind { + pub fn to_generic(self) -> rustc_codegen_ssa::common::TypeKind { + match self { + TypeKind::Void => rustc_codegen_ssa::common::TypeKind::Void, + TypeKind::Half => rustc_codegen_ssa::common::TypeKind::Half, + TypeKind::Float => rustc_codegen_ssa::common::TypeKind::Float, + TypeKind::Double => rustc_codegen_ssa::common::TypeKind::Double, + TypeKind::X86_FP80 => rustc_codegen_ssa::common::TypeKind::X86_FP80, + TypeKind::FP128 => rustc_codegen_ssa::common::TypeKind::FP128, + TypeKind::PPC_FP128 => rustc_codegen_ssa::common::TypeKind::PPC_FP128, + TypeKind::Label => rustc_codegen_ssa::common::TypeKind::Label, + TypeKind::Integer => rustc_codegen_ssa::common::TypeKind::Integer, + TypeKind::Function => rustc_codegen_ssa::common::TypeKind::Function, + TypeKind::Struct => rustc_codegen_ssa::common::TypeKind::Struct, + TypeKind::Array => rustc_codegen_ssa::common::TypeKind::Array, + TypeKind::Pointer => rustc_codegen_ssa::common::TypeKind::Pointer, + TypeKind::Vector => rustc_codegen_ssa::common::TypeKind::Vector, + TypeKind::Metadata => rustc_codegen_ssa::common::TypeKind::Metadata, + TypeKind::X86_MMX => rustc_codegen_ssa::common::TypeKind::X86_MMX, + TypeKind::Token => rustc_codegen_ssa::common::TypeKind::Token, + } + } +} + /// LLVMAtomicRmwBinOp #[derive(Copy, Clone)] #[repr(C)] @@ -203,6 +271,24 @@ pub enum AtomicRmwBinOp { AtomicUMin = 10, } +impl AtomicRmwBinOp { + pub fn from_generic(op : rustc_codegen_ssa::common::AtomicRmwBinOp) -> Self { + match op { + rustc_codegen_ssa::common::AtomicRmwBinOp::AtomicXchg => AtomicRmwBinOp::AtomicXchg, + rustc_codegen_ssa::common::AtomicRmwBinOp::AtomicAdd => AtomicRmwBinOp::AtomicAdd, + rustc_codegen_ssa::common::AtomicRmwBinOp::AtomicSub => AtomicRmwBinOp::AtomicSub, + rustc_codegen_ssa::common::AtomicRmwBinOp::AtomicAnd => AtomicRmwBinOp::AtomicAnd, + rustc_codegen_ssa::common::AtomicRmwBinOp::AtomicNand => AtomicRmwBinOp::AtomicNand, + rustc_codegen_ssa::common::AtomicRmwBinOp::AtomicOr => AtomicRmwBinOp::AtomicOr, + rustc_codegen_ssa::common::AtomicRmwBinOp::AtomicXor => AtomicRmwBinOp::AtomicXor, + rustc_codegen_ssa::common::AtomicRmwBinOp::AtomicMax => AtomicRmwBinOp::AtomicMax, + rustc_codegen_ssa::common::AtomicRmwBinOp::AtomicMin => AtomicRmwBinOp::AtomicMin, + rustc_codegen_ssa::common::AtomicRmwBinOp::AtomicUMax => AtomicRmwBinOp::AtomicUMax, + rustc_codegen_ssa::common::AtomicRmwBinOp::AtomicUMin => AtomicRmwBinOp::AtomicUMin + } + } +} + /// LLVMAtomicOrdering #[derive(Copy, Clone)] #[repr(C)] @@ -218,6 +304,23 @@ pub enum AtomicOrdering { SequentiallyConsistent = 7, } +impl AtomicOrdering { + pub fn from_generic(ao : rustc_codegen_ssa::common::AtomicOrdering) -> Self { + match ao { + rustc_codegen_ssa::common::AtomicOrdering::NotAtomic => AtomicOrdering::NotAtomic, + rustc_codegen_ssa::common::AtomicOrdering::Unordered => AtomicOrdering::Unordered, + rustc_codegen_ssa::common::AtomicOrdering::Monotonic => AtomicOrdering::Monotonic, + rustc_codegen_ssa::common::AtomicOrdering::Acquire => AtomicOrdering::Acquire, + rustc_codegen_ssa::common::AtomicOrdering::Release => AtomicOrdering::Release, + rustc_codegen_ssa::common::AtomicOrdering::AcquireRelease => + AtomicOrdering::AcquireRelease, + rustc_codegen_ssa::common::AtomicOrdering::SequentiallyConsistent => + AtomicOrdering::SequentiallyConsistent + } + } +} + + /// LLVMRustSynchronizationScope #[derive(Copy, Clone)] #[repr(C)] @@ -229,6 +332,19 @@ pub enum SynchronizationScope { CrossThread, } +impl SynchronizationScope { + pub fn from_generic(sc : rustc_codegen_ssa::common::SynchronizationScope) -> Self { + match sc { + rustc_codegen_ssa::common::SynchronizationScope::Other => + SynchronizationScope::Other, + rustc_codegen_ssa::common::SynchronizationScope::SingleThread => + SynchronizationScope::SingleThread, + rustc_codegen_ssa::common::SynchronizationScope::CrossThread => + SynchronizationScope::CrossThread, + } + } +} + /// LLVMRustFileType #[derive(Copy, Clone)] #[repr(C)] @@ -269,6 +385,15 @@ pub enum AsmDialect { Intel, } +impl AsmDialect { + pub fn from_generic(asm : syntax::ast::AsmDialect) -> Self { + match asm { + syntax::ast::AsmDialect::Att => AsmDialect::Att, + syntax::ast::AsmDialect::Intel => AsmDialect::Intel + } + } +} + /// LLVMRustCodeGenOptLevel #[derive(Copy, Clone, PartialEq)] #[repr(C)] diff --git a/src/librustc_codegen_llvm/llvm/mod.rs b/src/librustc_codegen_llvm/llvm/mod.rs index 4343c8c184ecf..a4b75340203af 100644 --- a/src/librustc_codegen_llvm/llvm/mod.rs +++ b/src/librustc_codegen_llvm/llvm/mod.rs @@ -28,6 +28,7 @@ use std::ffi::CStr; use std::cell::RefCell; use libc::{self, c_uint, c_char, size_t}; use rustc_data_structures::small_c_str::SmallCStr; +use rustc_codegen_ssa; pub mod archive_ro; pub mod diagnostic; @@ -271,6 +272,12 @@ impl OperandBundleDef<'a> { }; OperandBundleDef { raw: def } } + + pub fn from_generic( + bundle : &rustc_codegen_ssa::common::OperandBundleDef<'a, &'a Value> + ) -> Self { + Self::new(bundle.name, &[bundle.val]) + } } impl Drop for OperandBundleDef<'a> { diff --git a/src/librustc_codegen_llvm/mir/constant.rs b/src/librustc_codegen_llvm/mir/constant.rs deleted file mode 100644 index 9f0f744389089..0000000000000 --- a/src/librustc_codegen_llvm/mir/constant.rs +++ /dev/null @@ -1,225 +0,0 @@ -// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT -// file at the top-level directory of this distribution and at -// http://rust-lang.org/COPYRIGHT. -// -// Licensed under the Apache License, Version 2.0 or the MIT license -// , at your -// option. This file may not be copied, modified, or distributed -// except according to those terms. - -use llvm; -use rustc::mir::interpret::{ConstEvalErr, read_target_uint}; -use rustc_mir::const_eval::const_field; -use rustc::hir::def_id::DefId; -use rustc::mir; -use rustc_data_structures::indexed_vec::Idx; -use rustc_data_structures::sync::Lrc; -use rustc::mir::interpret::{GlobalId, Pointer, Scalar, Allocation, ConstValue, AllocType}; -use rustc::ty::{self, Ty}; -use rustc::ty::layout::{self, HasDataLayout, LayoutOf, Size}; -use builder::Builder; -use common::{CodegenCx}; -use common::{C_bytes, C_struct, C_uint_big, C_undef, C_usize}; -use consts; -use type_of::LayoutLlvmExt; -use type_::Type; -use syntax::ast::Mutability; -use syntax::source_map::Span; -use value::Value; - -use super::super::callee; -use super::FunctionCx; - -pub fn scalar_to_llvm( - cx: &CodegenCx<'ll, '_>, - cv: Scalar, - layout: &layout::Scalar, - llty: &'ll Type, -) -> &'ll Value { - let bitsize = if layout.is_bool() { 1 } else { layout.value.size(cx).bits() }; - match cv { - Scalar::Bits { size: 0, .. } => { - assert_eq!(0, layout.value.size(cx).bytes()); - C_undef(Type::ix(cx, 0)) - }, - Scalar::Bits { bits, size } => { - assert_eq!(size as u64, layout.value.size(cx).bytes()); - let llval = C_uint_big(Type::ix(cx, bitsize), bits); - if layout.value == layout::Pointer { - unsafe { llvm::LLVMConstIntToPtr(llval, llty) } - } else { - consts::bitcast(llval, llty) - } - }, - Scalar::Ptr(ptr) => { - let alloc_type = cx.tcx.alloc_map.lock().get(ptr.alloc_id); - let base_addr = match alloc_type { - Some(AllocType::Memory(alloc)) => { - let init = const_alloc_to_llvm(cx, alloc); - if alloc.mutability == Mutability::Mutable { - consts::addr_of_mut(cx, init, alloc.align, None) - } else { - consts::addr_of(cx, init, alloc.align, None) - } - } - Some(AllocType::Function(fn_instance)) => { - callee::get_fn(cx, fn_instance) - } - Some(AllocType::Static(def_id)) => { - assert!(cx.tcx.is_static(def_id).is_some()); - consts::get_static(cx, def_id) - } - None => bug!("missing allocation {:?}", ptr.alloc_id), - }; - let llval = unsafe { llvm::LLVMConstInBoundsGEP( - consts::bitcast(base_addr, Type::i8p(cx)), - &C_usize(cx, ptr.offset.bytes()), - 1, - ) }; - if layout.value != layout::Pointer { - unsafe { llvm::LLVMConstPtrToInt(llval, llty) } - } else { - consts::bitcast(llval, llty) - } - } - } -} - -pub fn const_alloc_to_llvm(cx: &CodegenCx<'ll, '_>, alloc: &Allocation) -> &'ll Value { - let mut llvals = Vec::with_capacity(alloc.relocations.len() + 1); - let layout = cx.data_layout(); - let pointer_size = layout.pointer_size.bytes() as usize; - - let mut next_offset = 0; - for &(offset, ((), alloc_id)) in alloc.relocations.iter() { - let offset = offset.bytes(); - assert_eq!(offset as usize as u64, offset); - let offset = offset as usize; - if offset > next_offset { - llvals.push(C_bytes(cx, &alloc.bytes[next_offset..offset])); - } - let ptr_offset = read_target_uint( - layout.endian, - &alloc.bytes[offset..(offset + pointer_size)], - ).expect("const_alloc_to_llvm: could not read relocation pointer") as u64; - llvals.push(scalar_to_llvm( - cx, - Pointer::new(alloc_id, Size::from_bytes(ptr_offset)).into(), - &layout::Scalar { - value: layout::Primitive::Pointer, - valid_range: 0..=!0 - }, - Type::i8p(cx) - )); - next_offset = offset + pointer_size; - } - if alloc.bytes.len() >= next_offset { - llvals.push(C_bytes(cx, &alloc.bytes[next_offset ..])); - } - - C_struct(cx, &llvals, true) -} - -pub fn codegen_static_initializer( - cx: &CodegenCx<'ll, 'tcx>, - def_id: DefId, -) -> Result<(&'ll Value, &'tcx Allocation), Lrc>> { - let instance = ty::Instance::mono(cx.tcx, def_id); - let cid = GlobalId { - instance, - promoted: None, - }; - let param_env = ty::ParamEnv::reveal_all(); - let static_ = cx.tcx.const_eval(param_env.and(cid))?; - - let alloc = match static_.val { - ConstValue::ByRef(_, alloc, n) if n.bytes() == 0 => alloc, - _ => bug!("static const eval returned {:#?}", static_), - }; - Ok((const_alloc_to_llvm(cx, alloc), alloc)) -} - -impl FunctionCx<'a, 'll, 'tcx> { - fn fully_evaluate( - &mut self, - bx: &Builder<'a, 'll, 'tcx>, - constant: &'tcx ty::Const<'tcx>, - ) -> Result<&'tcx ty::Const<'tcx>, Lrc>> { - match constant.val { - ConstValue::Unevaluated(def_id, ref substs) => { - let tcx = bx.tcx(); - let param_env = ty::ParamEnv::reveal_all(); - let instance = ty::Instance::resolve(tcx, param_env, def_id, substs).unwrap(); - let cid = GlobalId { - instance, - promoted: None, - }; - tcx.const_eval(param_env.and(cid)) - }, - _ => Ok(constant), - } - } - - pub fn eval_mir_constant( - &mut self, - bx: &Builder<'a, 'll, 'tcx>, - constant: &mir::Constant<'tcx>, - ) -> Result<&'tcx ty::Const<'tcx>, Lrc>> { - let c = self.monomorphize(&constant.literal); - self.fully_evaluate(bx, c) - } - - /// process constant containing SIMD shuffle indices - pub fn simd_shuffle_indices( - &mut self, - bx: &Builder<'a, 'll, 'tcx>, - span: Span, - ty: Ty<'tcx>, - constant: Result<&'tcx ty::Const<'tcx>, Lrc>>, - ) -> (&'ll Value, Ty<'tcx>) { - constant - .and_then(|c| { - let field_ty = c.ty.builtin_index().unwrap(); - let fields = match c.ty.sty { - ty::Array(_, n) => n.unwrap_usize(bx.tcx()), - ref other => bug!("invalid simd shuffle type: {}", other), - }; - let values: Result, Lrc<_>> = (0..fields).map(|field| { - let field = const_field( - bx.tcx(), - ty::ParamEnv::reveal_all(), - self.instance, - None, - mir::Field::new(field as usize), - c, - )?; - if let Some(prim) = field.val.try_to_scalar() { - let layout = bx.cx.layout_of(field_ty); - let scalar = match layout.abi { - layout::Abi::Scalar(ref x) => x, - _ => bug!("from_const: invalid ByVal layout: {:#?}", layout) - }; - Ok(scalar_to_llvm( - bx.cx, prim, scalar, - layout.immediate_llvm_type(bx.cx), - )) - } else { - bug!("simd shuffle field {:?}", field) - } - }).collect(); - let llval = C_struct(bx.cx, &values?, false); - Ok((llval, c.ty)) - }) - .unwrap_or_else(|e| { - e.report_as_error( - bx.tcx().at(span), - "could not evaluate shuffle_indices at compile time", - ); - // We've errored, so we don't have to produce working code. - let ty = self.monomorphize(&ty); - let llty = bx.cx.layout_of(ty).llvm_type(bx.cx); - (C_undef(llty), ty) - }) - } -} diff --git a/src/librustc_codegen_llvm/mono_item.rs b/src/librustc_codegen_llvm/mono_item.rs index dab9b147cc070..be5fa7f73ee29 100644 --- a/src/librustc_codegen_llvm/mono_item.rs +++ b/src/librustc_codegen_llvm/mono_item.rs @@ -8,181 +8,85 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -//! Walks the crate looking for items/impl-items/trait-items that have -//! either a `rustc_symbol_name` or `rustc_item_path` attribute and -//! generates an error giving, respectively, the symbol name or -//! item-path. This is used for unit testing the code that generates -//! paths etc in all kinds of annoying scenarios. - -use asm; use attributes; use base; -use consts; use context::CodegenCx; -use declare; use llvm; use monomorphize::Instance; use type_of::LayoutLlvmExt; -use rustc::hir; -use rustc::hir::def::Def; use rustc::hir::def_id::{DefId, LOCAL_CRATE}; use rustc::mir::mono::{Linkage, Visibility}; use rustc::ty::TypeFoldable; use rustc::ty::layout::LayoutOf; -use std::fmt; +use value::Value; +use rustc_codegen_ssa::interfaces::*; pub use rustc::mir::mono::MonoItem; pub use rustc_mir::monomorphize::item::MonoItemExt as BaseMonoItemExt; -pub trait MonoItemExt<'a, 'tcx>: fmt::Debug + BaseMonoItemExt<'a, 'tcx> { - fn define(&self, cx: &CodegenCx<'a, 'tcx>) { - debug!("BEGIN IMPLEMENTING '{} ({})' in cgu {}", - self.to_string(cx.tcx), - self.to_raw_string(), - cx.codegen_unit.name()); +impl<'ll, 'tcx: 'll> PreDefineMethods<'ll, 'tcx> for CodegenCx<'ll, 'tcx, &'ll Value> { + fn predefine_static(&self, + def_id: DefId, + linkage: Linkage, + visibility: Visibility, + symbol_name: &str) { + let instance = Instance::mono(self.tcx, def_id); + let ty = instance.ty(self.tcx); + let llty = self.layout_of(ty).llvm_type(self); + + let g = self.define_global(symbol_name, llty).unwrap_or_else(|| { + self.sess().span_fatal(self.tcx.def_span(def_id), + &format!("symbol `{}` is already defined", symbol_name)) + }); - match *self.as_mono_item() { - MonoItem::Static(def_id) => { - let tcx = cx.tcx; - let is_mutable = match tcx.describe_def(def_id) { - Some(Def::Static(_, is_mutable)) => is_mutable, - Some(other) => { - bug!("Expected Def::Static, found {:?}", other) - } - None => { - bug!("Expected Def::Static for {:?}, found nothing", def_id) - } - }; - consts::codegen_static(&cx, def_id, is_mutable); - } - MonoItem::GlobalAsm(node_id) => { - let item = cx.tcx.hir.expect_item(node_id); - if let hir::ItemKind::GlobalAsm(ref ga) = item.node { - asm::codegen_global_asm(cx, ga); - } else { - span_bug!(item.span, "Mismatch between hir::Item type and MonoItem type") - } - } - MonoItem::Fn(instance) => { - base::codegen_instance(&cx, instance); - } + unsafe { + llvm::LLVMRustSetLinkage(g, base::linkage_to_llvm(linkage)); + llvm::LLVMRustSetVisibility(g, base::visibility_to_llvm(visibility)); } - debug!("END IMPLEMENTING '{} ({})' in cgu {}", - self.to_string(cx.tcx), - self.to_raw_string(), - cx.codegen_unit.name()); + self.instances.borrow_mut().insert(instance, g); } - fn predefine(&self, - cx: &CodegenCx<'a, 'tcx>, - linkage: Linkage, - visibility: Visibility) { - debug!("BEGIN PREDEFINING '{} ({})' in cgu {}", - self.to_string(cx.tcx), - self.to_raw_string(), - cx.codegen_unit.name()); - - let symbol_name = self.symbol_name(cx.tcx).as_str(); - - debug!("symbol {}", &symbol_name); - - match *self.as_mono_item() { - MonoItem::Static(def_id) => { - predefine_static(cx, def_id, linkage, visibility, &symbol_name); - } - MonoItem::Fn(instance) => { - predefine_fn(cx, instance, linkage, visibility, &symbol_name); - } - MonoItem::GlobalAsm(..) => {} + fn predefine_fn(&self, + instance: Instance<'tcx>, + linkage: Linkage, + visibility: Visibility, + symbol_name: &str) { + assert!(!instance.substs.needs_infer() && + !instance.substs.has_param_types()); + + let mono_ty = instance.ty(self.tcx); + let attrs = self.tcx.codegen_fn_attrs(instance.def_id()); + let lldecl = self.declare_fn(symbol_name, mono_ty); + unsafe { llvm::LLVMRustSetLinkage(lldecl, base::linkage_to_llvm(linkage)) }; + base::set_link_section(lldecl, &attrs); + if linkage == Linkage::LinkOnceODR || + linkage == Linkage::WeakODR { + llvm::SetUniqueComdat(self.llmod, lldecl); } - debug!("END PREDEFINING '{} ({})' in cgu {}", - self.to_string(cx.tcx), - self.to_raw_string(), - cx.codegen_unit.name()); - } - - fn to_raw_string(&self) -> String { - match *self.as_mono_item() { - MonoItem::Fn(instance) => { - format!("Fn({:?}, {})", - instance.def, - instance.substs.as_ptr() as usize) - } - MonoItem::Static(id) => { - format!("Static({:?})", id) + // If we're compiling the compiler-builtins crate, e.g. the equivalent of + // compiler-rt, then we want to implicitly compile everything with hidden + // visibility as we're going to link this object all over the place but + // don't want the symbols to get exported. + if linkage != Linkage::Internal && linkage != Linkage::Private && + self.tcx.is_compiler_builtins(LOCAL_CRATE) { + unsafe { + llvm::LLVMRustSetVisibility(lldecl, llvm::Visibility::Hidden); } - MonoItem::GlobalAsm(id) => { - format!("GlobalAsm({:?})", id) + } else { + unsafe { + llvm::LLVMRustSetVisibility(lldecl, base::visibility_to_llvm(visibility)); } } - } -} -impl<'a, 'tcx> MonoItemExt<'a, 'tcx> for MonoItem<'tcx> {} - -fn predefine_static<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, - def_id: DefId, - linkage: Linkage, - visibility: Visibility, - symbol_name: &str) { - let instance = Instance::mono(cx.tcx, def_id); - let ty = instance.ty(cx.tcx); - let llty = cx.layout_of(ty).llvm_type(cx); - - let g = declare::define_global(cx, symbol_name, llty).unwrap_or_else(|| { - cx.sess().span_fatal(cx.tcx.def_span(def_id), - &format!("symbol `{}` is already defined", symbol_name)) - }); - - unsafe { - llvm::LLVMRustSetLinkage(g, base::linkage_to_llvm(linkage)); - llvm::LLVMRustSetVisibility(g, base::visibility_to_llvm(visibility)); - } - - cx.instances.borrow_mut().insert(instance, g); -} - -fn predefine_fn<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, - instance: Instance<'tcx>, - linkage: Linkage, - visibility: Visibility, - symbol_name: &str) { - assert!(!instance.substs.needs_infer() && - !instance.substs.has_param_types()); - - let mono_ty = instance.ty(cx.tcx); - let attrs = cx.tcx.codegen_fn_attrs(instance.def_id()); - let lldecl = declare::declare_fn(cx, symbol_name, mono_ty); - unsafe { llvm::LLVMRustSetLinkage(lldecl, base::linkage_to_llvm(linkage)) }; - base::set_link_section(lldecl, &attrs); - if linkage == Linkage::LinkOnceODR || - linkage == Linkage::WeakODR { - llvm::SetUniqueComdat(cx.llmod, lldecl); - } - - // If we're compiling the compiler-builtins crate, e.g. the equivalent of - // compiler-rt, then we want to implicitly compile everything with hidden - // visibility as we're going to link this object all over the place but - // don't want the symbols to get exported. - if linkage != Linkage::Internal && linkage != Linkage::Private && - cx.tcx.is_compiler_builtins(LOCAL_CRATE) { - unsafe { - llvm::LLVMRustSetVisibility(lldecl, llvm::Visibility::Hidden); + debug!("predefine_fn: mono_ty = {:?} instance = {:?}", mono_ty, instance); + if instance.def.is_inline(self.tcx) { + attributes::inline(self, lldecl, attributes::InlineAttr::Hint); } - } else { - unsafe { - llvm::LLVMRustSetVisibility(lldecl, base::visibility_to_llvm(visibility)); - } - } + attributes::from_fn_attrs(self, lldecl, Some(instance.def.def_id())); - debug!("predefine_fn: mono_ty = {:?} instance = {:?}", mono_ty, instance); - if instance.def.is_inline(cx.tcx) { - attributes::inline(cx, lldecl, attributes::InlineAttr::Hint); + self.instances.borrow_mut().insert(instance, lldecl); } - attributes::from_fn_attrs(cx, lldecl, Some(instance.def.def_id())); - - cx.instances.borrow_mut().insert(instance, lldecl); } diff --git a/src/librustc_codegen_llvm/type_.rs b/src/librustc_codegen_llvm/type_.rs index 51a233d791625..4178b398e13ee 100644 --- a/src/librustc_codegen_llvm/type_.rs +++ b/src/librustc_codegen_llvm/type_.rs @@ -13,15 +13,23 @@ pub use llvm::Type; use llvm; -use llvm::{Bool, False, True, TypeKind}; - +use llvm::{Bool, False, True}; use context::CodegenCx; +use value::Value; +use rustc_codegen_ssa::interfaces::*; -use syntax::ast; -use rustc::ty::layout::{self, Align, Size}; +use rustc::util::nodemap::FxHashMap; +use rustc::ty::{Ty, TyCtxt}; +use rustc::ty::layout::TyLayout; +use rustc_target::abi::call::{CastTarget, FnType, Reg}; use rustc_data_structures::small_c_str::SmallCStr; +use common; +use rustc_codegen_ssa::common::TypeKind; +use type_of::LayoutLlvmExt; +use abi::{LlvmType, FnTypeExt}; use std::fmt; +use std::cell::RefCell; use libc::c_uint; @@ -39,231 +47,183 @@ impl fmt::Debug for Type { } } -impl Type { - pub fn void(cx: &CodegenCx<'ll, '_>) -> &'ll Type { - unsafe { - llvm::LLVMVoidTypeInContext(cx.llcx) - } - } +impl<'ll> CodegenObject for &'ll Type {} - pub fn metadata(cx: &CodegenCx<'ll, '_>) -> &'ll Type { - unsafe { - llvm::LLVMRustMetadataTypeInContext(cx.llcx) - } - } +impl BaseTypeMethods<'ll, 'tcx> for CodegenCx<'ll, 'tcx, &'ll Value> { - pub fn i1(cx: &CodegenCx<'ll, '_>) -> &'ll Type { + fn type_void(&self) -> &'ll Type { unsafe { - llvm::LLVMInt1TypeInContext(cx.llcx) + llvm::LLVMVoidTypeInContext(&self.llcx) } } - pub fn i8(cx: &CodegenCx<'ll, '_>) -> &'ll Type { + fn type_metadata(&self) -> &'ll Type { unsafe { - llvm::LLVMInt8TypeInContext(cx.llcx) + llvm::LLVMRustMetadataTypeInContext(self.llcx) } } - pub fn i8_llcx(llcx: &llvm::Context) -> &Type { + fn type_i1(&self) -> &'ll Type { unsafe { - llvm::LLVMInt8TypeInContext(llcx) + llvm::LLVMInt1TypeInContext(&self.llcx) } } - pub fn i16(cx: &CodegenCx<'ll, '_>) -> &'ll Type { + fn type_i8(&self) -> &'ll Type { unsafe { - llvm::LLVMInt16TypeInContext(cx.llcx) + llvm::LLVMInt8TypeInContext(&self.llcx) } } - pub fn i32(cx: &CodegenCx<'ll, '_>) -> &'ll Type { - unsafe { - llvm::LLVMInt32TypeInContext(cx.llcx) - } - } - pub fn i64(cx: &CodegenCx<'ll, '_>) -> &'ll Type { + fn type_i16(&self) -> &'ll Type { unsafe { - llvm::LLVMInt64TypeInContext(cx.llcx) - } - } - pub fn i128(cx: &CodegenCx<'ll, '_>) -> &'ll Type { - unsafe { - llvm::LLVMIntTypeInContext(cx.llcx, 128) + llvm::LLVMInt16TypeInContext(&self.llcx) } } - // Creates an integer type with the given number of bits, e.g. i24 - pub fn ix(cx: &CodegenCx<'ll, '_>, num_bits: u64) -> &'ll Type { + fn type_i32(&self) -> &'ll Type { unsafe { - llvm::LLVMIntTypeInContext(cx.llcx, num_bits as c_uint) + llvm::LLVMInt32TypeInContext(&self.llcx) } } - // Creates an integer type with the given number of bits, e.g. i24 - pub fn ix_llcx(llcx: &llvm::Context, num_bits: u64) -> &Type { + fn type_i64(&self) -> &'ll Type { unsafe { - llvm::LLVMIntTypeInContext(llcx, num_bits as c_uint) + llvm::LLVMInt64TypeInContext(&self.llcx) } } - pub fn f32(cx: &CodegenCx<'ll, '_>) -> &'ll Type { + fn type_i128(&self) -> &'ll Type { unsafe { - llvm::LLVMFloatTypeInContext(cx.llcx) + llvm::LLVMIntTypeInContext(&self.llcx, 128) } } - pub fn f64(cx: &CodegenCx<'ll, '_>) -> &'ll Type { + fn type_ix(&self, num_bits: u64) -> &'ll Type { unsafe { - llvm::LLVMDoubleTypeInContext(cx.llcx) + llvm::LLVMIntTypeInContext(&self.llcx, num_bits as c_uint) } } - pub fn bool(cx: &CodegenCx<'ll, '_>) -> &'ll Type { - Type::i8(cx) - } - - pub fn char(cx: &CodegenCx<'ll, '_>) -> &'ll Type { - Type::i32(cx) - } - - pub fn i8p(cx: &CodegenCx<'ll, '_>) -> &'ll Type { - Type::i8(cx).ptr_to() - } - - pub fn i8p_llcx(llcx: &llvm::Context) -> &Type { - Type::i8_llcx(llcx).ptr_to() + fn type_isize(&self) -> &'ll Type { + self.isize_ty } - pub fn isize(cx: &CodegenCx<'ll, '_>) -> &'ll Type { - cx.isize_ty - } - - pub fn c_int(cx: &CodegenCx<'ll, '_>) -> &'ll Type { - match &cx.tcx.sess.target.target.target_c_int_width[..] { - "16" => Type::i16(cx), - "32" => Type::i32(cx), - "64" => Type::i64(cx), - width => bug!("Unsupported target_c_int_width: {}", width), - } - } - - pub fn int_from_ty(cx: &CodegenCx<'ll, '_>, t: ast::IntTy) -> &'ll Type { - match t { - ast::IntTy::Isize => cx.isize_ty, - ast::IntTy::I8 => Type::i8(cx), - ast::IntTy::I16 => Type::i16(cx), - ast::IntTy::I32 => Type::i32(cx), - ast::IntTy::I64 => Type::i64(cx), - ast::IntTy::I128 => Type::i128(cx), + fn type_f32(&self) -> &'ll Type { + unsafe { + llvm::LLVMFloatTypeInContext(&self.llcx) } } - pub fn uint_from_ty(cx: &CodegenCx<'ll, '_>, t: ast::UintTy) -> &'ll Type { - match t { - ast::UintTy::Usize => cx.isize_ty, - ast::UintTy::U8 => Type::i8(cx), - ast::UintTy::U16 => Type::i16(cx), - ast::UintTy::U32 => Type::i32(cx), - ast::UintTy::U64 => Type::i64(cx), - ast::UintTy::U128 => Type::i128(cx), + fn type_f64(&self) -> &'ll Type { + unsafe { + llvm::LLVMDoubleTypeInContext(&self.llcx) } } - pub fn float_from_ty(cx: &CodegenCx<'ll, '_>, t: ast::FloatTy) -> &'ll Type { - match t { - ast::FloatTy::F32 => Type::f32(cx), - ast::FloatTy::F64 => Type::f64(cx), + fn type_x86_mmx(&self) -> &'ll Type { + unsafe { + llvm::LLVMX86MMXTypeInContext(&self.llcx) } } - pub fn func(args: &[&'ll Type], ret: &'ll Type) -> &'ll Type { + fn type_func( + &self, + args: &[&'ll Type], + ret: &'ll Type + ) -> &'ll Type { unsafe { llvm::LLVMFunctionType(ret, args.as_ptr(), args.len() as c_uint, False) } } - pub fn variadic_func(args: &[&'ll Type], ret: &'ll Type) -> &'ll Type { + fn type_variadic_func( + &self, + args: &[&'ll Type], + ret: &'ll Type + ) -> &'ll Type { unsafe { llvm::LLVMFunctionType(ret, args.as_ptr(), args.len() as c_uint, True) } } - pub fn struct_(cx: &CodegenCx<'ll, '_>, els: &[&'ll Type], packed: bool) -> &'ll Type { + fn type_struct( + &self, + els: &[&'ll Type], + packed: bool + ) -> &'ll Type { unsafe { - llvm::LLVMStructTypeInContext(cx.llcx, els.as_ptr(), + llvm::LLVMStructTypeInContext(&self.llcx, els.as_ptr(), els.len() as c_uint, packed as Bool) } } - pub fn named_struct(cx: &CodegenCx<'ll, '_>, name: &str) -> &'ll Type { + fn type_named_struct(&self, name: &str) -> &'ll Type { let name = SmallCStr::new(name); unsafe { - llvm::LLVMStructCreateNamed(cx.llcx, name.as_ptr()) + llvm::LLVMStructCreateNamed(&self.llcx, name.as_ptr()) } } - pub fn array(ty: &Type, len: u64) -> &Type { + fn type_array(&self, ty: &'ll Type, len: u64) -> &'ll Type { unsafe { llvm::LLVMRustArrayType(ty, len) } } - pub fn vector(ty: &Type, len: u64) -> &Type { + fn type_vector(&self, ty: &'ll Type, len: u64) -> &'ll Type { unsafe { llvm::LLVMVectorType(ty, len as c_uint) } } - pub fn kind(&self) -> TypeKind { + fn type_kind(&self, ty: &'ll Type) -> TypeKind { unsafe { - llvm::LLVMRustGetTypeKind(self) + llvm::LLVMRustGetTypeKind(ty).to_generic() } } - pub fn set_struct_body(&'ll self, els: &[&'ll Type], packed: bool) { + fn set_struct_body(&self, ty: &'ll Type, els: &[&'ll Type], packed: bool) { unsafe { - llvm::LLVMStructSetBody(self, els.as_ptr(), + llvm::LLVMStructSetBody(ty, els.as_ptr(), els.len() as c_uint, packed as Bool) } } - pub fn ptr_to(&self) -> &Type { - unsafe { - llvm::LLVMPointerType(self, 0) - } + fn type_ptr_to(&self, ty: &'ll Type) -> &'ll Type { + ty.ptr_to() } - pub fn element_type(&self) -> &Type { + fn element_type(&self, ty: &'ll Type) -> &'ll Type { unsafe { - llvm::LLVMGetElementType(self) + llvm::LLVMGetElementType(ty) } } - /// Return the number of elements in `self` if it is a LLVM vector type. - pub fn vector_length(&self) -> usize { + fn vector_length(&self, ty: &'ll Type) -> usize { unsafe { - llvm::LLVMGetVectorSize(self) as usize + llvm::LLVMGetVectorSize(ty) as usize } } - pub fn func_params(&self) -> Vec<&Type> { + fn func_params_types(&self, ty: &'ll Type) -> Vec<&'ll Type> { unsafe { - let n_args = llvm::LLVMCountParamTypes(self) as usize; + let n_args = llvm::LLVMCountParamTypes(ty) as usize; let mut args = Vec::with_capacity(n_args); - llvm::LLVMGetParamTypes(self, args.as_mut_ptr()); + llvm::LLVMGetParamTypes(ty, args.as_mut_ptr()); args.set_len(n_args); args } } - pub fn float_width(&self) -> usize { - match self.kind() { + fn float_width(&self, ty : &'ll Type) -> usize { + match &self.type_kind(ty) { TypeKind::Float => 32, TypeKind::Double => 64, TypeKind::X86_FP80 => 80, @@ -272,45 +232,89 @@ impl Type { } } - /// Retrieve the bit width of the integer type `self`. - pub fn int_width(&self) -> u64 { + fn int_width(&self, ty: &'ll Type) -> u64 { unsafe { - llvm::LLVMGetIntTypeWidth(self) as u64 + llvm::LLVMGetIntTypeWidth(ty) as u64 } } - pub fn from_integer(cx: &CodegenCx<'ll, '_>, i: layout::Integer) -> &'ll Type { - use rustc::ty::layout::Integer::*; - match i { - I8 => Type::i8(cx), - I16 => Type::i16(cx), - I32 => Type::i32(cx), - I64 => Type::i64(cx), - I128 => Type::i128(cx), + fn val_ty(&self, v: &'ll Value) -> &'ll Type { + common::val_ty(v) + } + + fn scalar_lltypes(&self) -> &RefCell, Self::Type>> { + &self.scalar_lltypes + } + + fn tcx(&self) -> &TyCtxt<'ll, 'tcx, 'tcx> { + &self.tcx + } +} + +impl Type { + pub fn i8_llcx(llcx: &llvm::Context) -> &Type { + unsafe { + llvm::LLVMInt8TypeInContext(llcx) } } - /// Return a LLVM type that has at most the required alignment, - /// as a conservative approximation for unknown pointee types. - pub fn pointee_for_abi_align(cx: &CodegenCx<'ll, '_>, align: Align) -> &'ll Type { - // FIXME(eddyb) We could find a better approximation if ity.align < align. - let ity = layout::Integer::approximate_abi_align(cx, align); - Type::from_integer(cx, ity) + // Creates an integer type with the given number of bits, e.g. i24 + pub fn ix_llcx( + llcx: &llvm::Context, + num_bits: u64 + ) -> &Type { + unsafe { + llvm::LLVMIntTypeInContext(llcx, num_bits as c_uint) + } } - /// Return a LLVM type that has at most the required alignment, - /// and exactly the required size, as a best-effort padding array. - pub fn padding_filler(cx: &CodegenCx<'ll, '_>, size: Size, align: Align) -> &'ll Type { - let unit = layout::Integer::approximate_abi_align(cx, align); - let size = size.bytes(); - let unit_size = unit.size().bytes(); - assert_eq!(size % unit_size, 0); - Type::array(Type::from_integer(cx, unit), size / unit_size) + pub fn i8p_llcx(llcx: &'ll llvm::Context) -> &'ll Type { + Type::i8_llcx(llcx).ptr_to() } - pub fn x86_mmx(cx: &CodegenCx<'ll, '_>) -> &'ll Type { + pub fn ptr_to(&self) -> &Type { unsafe { - llvm::LLVMX86MMXTypeInContext(cx.llcx) + llvm::LLVMPointerType(&self, 0) } } } + +impl<'a, 'll: 'a, 'tcx: 'll> DerivedTypeMethods<'a, 'll, 'tcx> for CodegenCx<'ll, 'tcx, &'ll Value> + {} + +impl LayoutTypeMethods<'ll, 'tcx> for CodegenCx<'ll, 'tcx, &'ll Value> { + fn backend_type(&self, ty: &TyLayout<'tcx>) -> &'ll Type { + ty.llvm_type(&self) + } + fn immediate_backend_type(&self, ty: &TyLayout<'tcx>) -> &'ll Type { + ty.immediate_llvm_type(&self) + } + fn is_backend_immediate(&self, ty: &TyLayout<'tcx>) -> bool { + ty.is_llvm_immediate() + } + fn is_backend_scalar_pair(&self, ty: &TyLayout<'tcx>) -> bool { + ty.is_llvm_scalar_pair() + } + fn backend_field_index(&self, ty: &TyLayout<'tcx>, index: usize) -> u64 { + ty.llvm_field_index(index) + } + fn scalar_pair_element_backend_type<'a>( + &self, + ty: &TyLayout<'tcx>, + index: usize, + immediate: bool + ) -> &'ll Type { + ty.scalar_pair_element_llvm_type(&self, index, immediate) + } + fn cast_backend_type(&self, ty: &CastTarget) -> &'ll Type { + ty.llvm_type(&self) + } + fn fn_backend_type(&self, ty: &FnType<'tcx, Ty<'tcx>>) -> &'ll Type { + ty.llvm_type(&self) + } + fn reg_backend_type(&self, ty: &Reg) -> &'ll Type { + ty.llvm_type(&self) + } +} + +impl<'a, 'll: 'a, 'tcx: 'll> TypeMethods<'a, 'll, 'tcx> for CodegenCx<'ll, 'tcx, &'ll Value> {} diff --git a/src/librustc_codegen_llvm/type_of.rs b/src/librustc_codegen_llvm/type_of.rs index 03ded64e64235..cdefd543ea67a 100644 --- a/src/librustc_codegen_llvm/type_of.rs +++ b/src/librustc_codegen_llvm/type_of.rs @@ -16,10 +16,12 @@ use rustc::ty::layout::{self, Align, LayoutOf, Size, TyLayout}; use rustc_target::abi::FloatTy; use rustc_mir::monomorphize::item::DefPathBasedNames; use type_::Type; +use value::Value; +use rustc_codegen_ssa::interfaces::*; use std::fmt::Write; -fn uncached_llvm_type<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, +fn uncached_llvm_type<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx, &'a Value>, layout: TyLayout<'tcx>, defer: &mut Option<(&'a Type, TyLayout<'tcx>)>) -> &'a Type { @@ -37,14 +39,14 @@ fn uncached_llvm_type<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, (cx.sess().target.target.arch == "x86" || cx.sess().target.target.arch == "x86_64"); if use_x86_mmx { - return Type::x86_mmx(cx) + return cx.type_x86_mmx() } else { let element = layout.scalar_llvm_type_at(cx, element, Size::ZERO); - return Type::vector(element, count); + return cx.type_vector(element, count); } } layout::Abi::ScalarPair(..) => { - return Type::struct_(cx, &[ + return cx.type_struct( &[ layout.scalar_pair_element_llvm_type(cx, 0, false), layout.scalar_pair_element_llvm_type(cx, 1, false), ], false); @@ -79,30 +81,30 @@ fn uncached_llvm_type<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, match layout.fields { layout::FieldPlacement::Union(_) => { - let fill = Type::padding_filler(cx, layout.size, layout.align); + let fill = cx.type_padding_filler( layout.size, layout.align); let packed = false; match name { None => { - Type::struct_(cx, &[fill], packed) + cx.type_struct( &[fill], packed) } Some(ref name) => { - let llty = Type::named_struct(cx, name); - llty.set_struct_body(&[fill], packed); + let llty = cx.type_named_struct( name); + cx.set_struct_body(llty, &[fill], packed); llty } } } layout::FieldPlacement::Array { count, .. } => { - Type::array(layout.field(cx, 0).llvm_type(cx), count) + cx.type_array(layout.field(cx, 0).llvm_type(cx), count) } layout::FieldPlacement::Arbitrary { .. } => { match name { None => { let (llfields, packed) = struct_llfields(cx, layout); - Type::struct_(cx, &llfields, packed) + cx.type_struct( &llfields, packed) } Some(ref name) => { - let llty = Type::named_struct(cx, name); + let llty = cx.type_named_struct( name); *defer = Some((llty, layout)); llty } @@ -111,7 +113,7 @@ fn uncached_llvm_type<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, } } -fn struct_llfields<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, +fn struct_llfields<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx, &'a Value>, layout: TyLayout<'tcx>) -> (Vec<&'a Type>, bool) { debug!("struct_llfields: {:#?}", layout); @@ -136,7 +138,7 @@ fn struct_llfields<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, let padding = target_offset - offset; let padding_align = prev_effective_align.min(effective_field_align); assert_eq!(offset.abi_align(padding_align) + padding, target_offset); - result.push(Type::padding_filler(cx, padding, padding_align)); + result.push(cx.type_padding_filler( padding, padding_align)); debug!(" padding before: {:?}", padding); result.push(field.llvm_type(cx)); @@ -153,7 +155,7 @@ fn struct_llfields<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, assert_eq!(offset.abi_align(padding_align) + padding, layout.size); debug!("struct_llfields: pad_bytes: {:?} offset: {:?} stride: {:?}", padding, offset, layout.size); - result.push(Type::padding_filler(cx, padding, padding_align)); + result.push(cx.type_padding_filler(padding, padding_align)); assert_eq!(result.len(), 1 + field_count * 2); } else { debug!("struct_llfields: offset: {:?} stride: {:?}", @@ -163,7 +165,7 @@ fn struct_llfields<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, (result, packed) } -impl<'a, 'tcx> CodegenCx<'a, 'tcx> { +impl<'a, 'tcx> CodegenCx<'a, 'tcx, &'a Value> { pub fn align_of(&self, ty: Ty<'tcx>) -> Align { self.layout_of(ty).align } @@ -202,14 +204,14 @@ pub struct PointeeInfo { pub trait LayoutLlvmExt<'tcx> { fn is_llvm_immediate(&self) -> bool; fn is_llvm_scalar_pair<'a>(&self) -> bool; - fn llvm_type<'a>(&self, cx: &CodegenCx<'a, 'tcx>) -> &'a Type; - fn immediate_llvm_type<'a>(&self, cx: &CodegenCx<'a, 'tcx>) -> &'a Type; - fn scalar_llvm_type_at<'a>(&self, cx: &CodegenCx<'a, 'tcx>, + fn llvm_type<'a>(&self, cx: &CodegenCx<'a, 'tcx, &'a Value>) -> &'a Type; + fn immediate_llvm_type<'a>(&self, cx: &CodegenCx<'a, 'tcx, &'a Value>) -> &'a Type; + fn scalar_llvm_type_at<'a>(&self, cx: &CodegenCx<'a, 'tcx, &'a Value>, scalar: &layout::Scalar, offset: Size) -> &'a Type; - fn scalar_pair_element_llvm_type<'a>(&self, cx: &CodegenCx<'a, 'tcx>, + fn scalar_pair_element_llvm_type<'a>(&self, cx: &CodegenCx<'a, 'tcx, &'a Value>, index: usize, immediate: bool) -> &'a Type; fn llvm_field_index(&self, index: usize) -> u64; - fn pointee_info_at<'a>(&self, cx: &CodegenCx<'a, 'tcx>, offset: Size) + fn pointee_info_at<'a>(&self, cx: &CodegenCx<'a, 'tcx, &'a Value>, offset: Size) -> Option; } @@ -245,7 +247,7 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyLayout<'tcx> { /// with the inner-most trailing unsized field using the "minimal unit" /// of that field's type - this is useful for taking the address of /// that field and ensuring the struct has the right alignment. - fn llvm_type<'a>(&self, cx: &CodegenCx<'a, 'tcx>) -> &'a Type { + fn llvm_type<'a>(&self, cx: &CodegenCx<'a, 'tcx, &'a Value>) -> &'a Type { if let layout::Abi::Scalar(ref scalar) = self.abi { // Use a different cache for scalars because pointers to DSTs // can be either fat or thin (data pointers of fat pointers). @@ -255,17 +257,17 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyLayout<'tcx> { let llty = match self.ty.sty { ty::Ref(_, ty, _) | ty::RawPtr(ty::TypeAndMut { ty, .. }) => { - cx.layout_of(ty).llvm_type(cx).ptr_to() + cx.type_ptr_to(cx.layout_of(ty).llvm_type(cx)) } ty::Adt(def, _) if def.is_box() => { - cx.layout_of(self.ty.boxed_ty()).llvm_type(cx).ptr_to() + cx.type_ptr_to(cx.layout_of(self.ty.boxed_ty()).llvm_type(cx)) } ty::FnPtr(sig) => { let sig = cx.tcx.normalize_erasing_late_bound_regions( ty::ParamEnv::reveal_all(), &sig, ); - FnType::new(cx, sig, &[]).llvm_type(cx).ptr_to() + cx.type_ptr_to(FnType::new(cx, sig, &[]).llvm_type(cx)) } _ => self.scalar_llvm_type_at(cx, scalar, Size::ZERO) }; @@ -307,40 +309,40 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyLayout<'tcx> { if let Some((llty, layout)) = defer { let (llfields, packed) = struct_llfields(cx, layout); - llty.set_struct_body(&llfields, packed) + cx.set_struct_body(llty, &llfields, packed) } llty } - fn immediate_llvm_type<'a>(&self, cx: &CodegenCx<'a, 'tcx>) -> &'a Type { + fn immediate_llvm_type<'a>(&self, cx: &CodegenCx<'a, 'tcx, &'a Value>) -> &'a Type { if let layout::Abi::Scalar(ref scalar) = self.abi { if scalar.is_bool() { - return Type::i1(cx); + return cx.type_i1(); } } self.llvm_type(cx) } - fn scalar_llvm_type_at<'a>(&self, cx: &CodegenCx<'a, 'tcx>, + fn scalar_llvm_type_at<'a>(&self, cx: &CodegenCx<'a, 'tcx, &'a Value>, scalar: &layout::Scalar, offset: Size) -> &'a Type { match scalar.value { - layout::Int(i, _) => Type::from_integer(cx, i), - layout::Float(FloatTy::F32) => Type::f32(cx), - layout::Float(FloatTy::F64) => Type::f64(cx), + layout::Int(i, _) => cx.type_from_integer( i), + layout::Float(FloatTy::F32) => cx.type_f32(), + layout::Float(FloatTy::F64) => cx.type_f64(), layout::Pointer => { // If we know the alignment, pick something better than i8. let pointee = if let Some(pointee) = self.pointee_info_at(cx, offset) { - Type::pointee_for_abi_align(cx, pointee.align) + cx.type_pointee_for_abi_align( pointee.align) } else { - Type::i8(cx) + cx.type_i8() }; - pointee.ptr_to() + cx.type_ptr_to(pointee) } } } - fn scalar_pair_element_llvm_type<'a>(&self, cx: &CodegenCx<'a, 'tcx>, + fn scalar_pair_element_llvm_type<'a>(&self, cx: &CodegenCx<'a, 'tcx, &'a Value>, index: usize, immediate: bool) -> &'a Type { // HACK(eddyb) special-case fat pointers until LLVM removes // pointee types, to avoid bitcasting every `OperandRef::deref`. @@ -369,7 +371,7 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyLayout<'tcx> { // when immediate. We need to load/store `bool` as `i8` to avoid // crippling LLVM optimizations or triggering other LLVM bugs with `i1`. if immediate && scalar.is_bool() { - return Type::i1(cx); + return cx.type_i1(); } let offset = if index == 0 { @@ -403,7 +405,7 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyLayout<'tcx> { } } - fn pointee_info_at<'a>(&self, cx: &CodegenCx<'a, 'tcx>, offset: Size) + fn pointee_info_at<'a>(&self, cx: &CodegenCx<'a, 'tcx, &'a Value>, offset: Size) -> Option { if let Some(&pointee) = cx.pointee_infos.borrow().get(&(self.ty, offset)) { return pointee; diff --git a/src/librustc_codegen_llvm/value.rs b/src/librustc_codegen_llvm/value.rs index 4bf5b09baa629..6b48a6269488c 100644 --- a/src/librustc_codegen_llvm/value.rs +++ b/src/librustc_codegen_llvm/value.rs @@ -12,6 +12,7 @@ pub use llvm::Value; use llvm; +use rustc_codegen_ssa::interfaces::CodegenObject; use std::fmt; use std::hash::{Hash, Hasher}; @@ -21,6 +22,8 @@ impl PartialEq for Value { } } +impl<'ll> CodegenObject for &'ll Value {} + impl Eq for Value {} impl Hash for Value { diff --git a/src/librustc_codegen_ssa/Cargo.toml b/src/librustc_codegen_ssa/Cargo.toml new file mode 100644 index 0000000000000..6e2e0cb32f23a --- /dev/null +++ b/src/librustc_codegen_ssa/Cargo.toml @@ -0,0 +1,33 @@ +[package] +authors = ["The Rust Project Developers"] +name = "rustc_codegen_ssa" +version = "0.0.0" + +[lib] +name = "rustc_codegen_ssa" +path = "lib.rs" +crate-type = ["dylib"] +test = false + +[dependencies] +bitflags = "1.0" +log = "0.4" +num_cpus = "1.0" +rustc-demangle = "0.1.4" +memmap = "0.6" +jobserver = "0.1" +cc = "1.0.1" + +syntax = { path = "../libsyntax" } +syntax_pos = { path = "../libsyntax_pos" } +rustc = { path = "../librustc" } +rustc_target = { path = "../librustc_target" } +rustc_data_structures = { path = "../librustc_data_structures" } +rustc_apfloat = { path = "../librustc_apfloat" } +rustc_allocator = { path = "../librustc_allocator" } +rustc_fs_util = { path = "../librustc_fs_util" } +rustc_errors = { path = "../librustc_errors" } +rustc_mir = { path = "../librustc_mir" } +rustc_codegen_utils = { path = "../librustc_codegen_utils" } +rustc_incremental = { path = "../librustc_incremental" } +serialize = { path = "../libserialize" } diff --git a/src/librustc_codegen_ssa/README.md b/src/librustc_codegen_ssa/README.md new file mode 100644 index 0000000000000..32e9c11f34814 --- /dev/null +++ b/src/librustc_codegen_ssa/README.md @@ -0,0 +1,121 @@ +# Refactoring of `rustc_codegen_llvm` +by Denis Merigoux, October 23rd 2018 + +## State of the code before the refactoring + +All the code related to the compilation of MIR into LLVM IR was contained inside the `rustc_codegen_llvm` crate. Here is the breakdown of the most important elements: +* the `back` folder (7,800 LOC) implements the mechanisms for creating the different object files and archive through LLVM, but also the communication mechanisms for parallel code generation; +* the `debuginfo` (3,200 LOC) folder contains all code that passes debug information down to LLVM; +* the `llvm` (2,200 LOC) folder defines the FFI necessary to communicate with LLVM using the C++ API; +* the `mir` (4,300 LOC) folder implements the actual lowering from MIR to LLVM IR; +* the `base.rs` (1,300 LOC) file contains some helper functions but also the high-level code that launches the code generation and distributes the work. +* the `builder.rs` (1,200 LOC) file contains all the functions generating individual LLVM IR instructions inside a basic block; +* the `common.rs` (450 LOC) contains various helper functions and all the functions generating LLVM static values; +* the `type_.rs` (300 LOC) defines most of the type translations to LLVM IR. + +The goal of this refactoring is to separate inside this crate code that is specific to the LLVM from code that can be reused for other rustc backends. For instance, the `mir` folder is almost entirely backend-specific but it relies heavily on other parts of the crate. The separation of the code must not affect the logic of the code nor its performance. + +For these reasons, the separation process involves two transformations that have to be done at the same time for the resulting code to compile : + +1. replace all the LLVM-specific types by generics inside function signatures and structure definitions; +2. encapsulate all functions calling the LLVM FFI inside a set of traits that will define the interface between backend-agnostic code and the backend. + +While the LLVM-specific code will be left in `rustc_codegen_llvm`, all the new interfaces and backend-agnostic code will be moved in `rustc_codegen_ssa` (name suggestion by @eddyb). + +## Generic types and structures + +@irinagpopa started to parametrize the types of `rustc_codegen_llvm` by a generic `Value` type, implemented in LLVM by a reference `&'ll Value`. This work has been extended to all structures inside the `mir` folder and elsewhere, as well as for LLVM's `BasicBlock` and `Type` types. + +The two most important structures for the LLVM codegen are `CodegenCx` and `Builder`. They are parametrized by multiple liftime parameters and the type for `Value`. + +```rust +struct CodegenCx<'ll, 'tcx: 'll, V : 'll> { + /* ... */ +} + +struct Builder<'a, 'll: 'a, 'tcx: 'll, V: 'll> { + cx: &'a CodegenCx<'ll, 'tcx, V>, + /* ... */ +} +``` + +`CodegenCx` is used to compile one codegen-unit that can contain multiple functions, whereas `Builder` is created to compile one basic block. + +The code in `rustc_codegen_llvm` has to deal with multiple explicit lifetime parameters, that correspond to the following: +* `'tcx` is the longest lifetime, that corresponds to the original `TyCtxt` containing the program's information; +* `'a` is a short-lived reference of a `CodegenCx` or another object inside a struct; +* `'ll` is the lifetime of references to LLVM objects such as `Value` or `Type`. + +Although there are already many lifetime parameters in the code, making it generic uncovered situations where the borrow-checker was passing only due to the special nature of the LLVM objects manipulated (they are extern pointers). For instance, a additional lifetime parameter had to be added to `LocalAnalyser` in `analyse.rs`, leading to the definition: + +```rust +struct LocalAnalyzer<'mir, 'a: 'mir, 'f: 'mir, 'll: 'a + 'f, 'tcx: 'll> { + /* ... */ +} +``` + +However, the two most important structures `CodegenCx` and `Builder` are not defined in the backend-agnostic code. Indeed, their content is highly specific of the backend and it makes more sense to leave their definition to the backend implementor than to allow just a narrow spot via a generic field for the backend's context. + +## Traits and interface + +Because they have to be defined by the backend, `CodegenCx` and `Builder` will be the structures implementing all the traits defining the backend's interface. These traits are defined in the folder `rustc_codegen_ssa/interfaces` and all the backend-agnostic code is parametrized by them. For instance, let us explain how a function in `base.rs` is parametrized: + +```rust +pub fn codegen_instance<'a, 'll: 'a, 'tcx: 'll, Bx: BuilderMethods<'a, 'll, 'tcx>>( + cx: &'a Bx::CodegenCx, + instance: Instance<'tcx> +) where &'a Bx::CodegenCx : + LayoutOf, TyLayout=TyLayout<'tcx>> + HasTyCtxt<'tcx> { + /* ... */ +} +``` + +In this signature, we have the three lifetime parameters explained earlier and the master type `Bx` which satisfies the trait `BuilderMethods` corresponding to the interface satisfied by the `Builder` struct. The `BuilderMethods` defines an associated type `Bx::CodegenCx` that itself satisfies the `CodegenMethods` traits implemented by the struct `CodegenCx`. This prototype contains a `where` clause because the `LayoutOf` trait is satisfied by a reference (`&'a Bx::CodegenCx`) of the associated type and that we can't specify that in the trait definition of `BuilderMethods`. Finally, we have to specify that the associated types inside `LayoutOf` are the actual types of Rust, using the `Ty = Ty<'tcx>` syntax. + +On the trait side, here is an example with part of the definition of `BuilderMethods` in `interfaces/builder.rs`: + +```rust +pub trait BuilderMethods<'a, 'll :'a, 'tcx: 'll> : HasCodegen<'a, 'll, 'tcx> + + DebugInfoBuilderMethods<'a, 'll, 'tcx> + ArgTypeMethods<'a, 'll, 'tcx> + + AbiBuilderMethods<'a, 'll, 'tcx> + IntrinsicCallMethods<'a, 'll, 'tcx> + + AsmBuilderMethods<'a, 'll, 'tcx> + where &'a Self::CodegenCx : + LayoutOf, TyLayout = TyLayout<'tcx>> + HasTyCtxt<'tcx> +{ + fn new_block<'b>( + cx: &'a Self::CodegenCx, + llfn: >::Value, + name: &'b str + ) -> Self; + /* ... */ + fn cond_br( + &mut self, + cond: >::Value, + then_llbb: >::BasicBlock, + else_llbb: >::BasicBlock, + ); + /* ... */ +} +``` + +Finally, a master structure implementing the `ExtraBackendMethods` trait is used for high-level codegen-driving functions like `codegen_crate` in `base.rs`. For LLVM, it is the empty `LlvmCodegenBackend`. `ExtraBackendMethods` should be implemented by the same structure that implements the `CodegenBackend` defined in `rustc_codegen_utils/codegen_backend.rs`. + +During the traitification process, certain functions have been converted from methods of a local structure to methods of `CodegenCx` or `Builder` and a corresponding `self` parameter has been added. Indeed, LLVM stores information internally that it can access when called through its API. This information does not show up in a Rust data structure carried around when these methods are called. However, when implementing a Rust backend for `rustc`, these methods will need information from `CodegenCx`, hence the additional parameter (unused in the LLVM implementation of the trait). + +## State of the code after the refactoring + +The traits offer an API which is very similar to the API of LLVM. This is not the best solution since LLVM has a very special way of doing things: when addding another backend, the traits definition might be changed in order to offer more flexibility. + +However, the current separation between backend-agnostic and LLVM-specific code has allows the reuse of a significant part of the old `rustc_codegen_llvm`. Here is the new LOC breakdown between backend-agnostic (BA) and LLVM for the most important elements: + +* `back` folder: 3,800 (BA) vs 4,100 (LLVM); +* `mir` folder: 4,400 (BA) vs 0 (LLVM); +* `base.rs`: 1,100 (BA) vs 250 (LLVM); +* `builder.rs`: 1,400 (BA) vs 0 (LLVM); +* `common.rs`: 350 (BA) vs 350 (LLVM); + +The `debuginfo` folder has been left almost untouched by the splitting and is specific to LLVM. Only its high-level features have been traitified. + +The new `interfaces` folder has 1500 LOC only for trait definitions. Overall, the 27,000 LOC-sized old `rustc_codegen_llvm` code has been split into the new 18,500 LOC-sized new `rustc_codegen_llvm` and the 12,000 LOC-sized `rustc_codegen_ssa`. We can say that this refactoring allowed the reuse of approximately 10,000 LOC that would otherwise have had to be duplicated between the multiple backends of `rustc`. + +The refactored version of `rustc`'s backend introduced no regression over the test suite nor in performance benchmark, which is in coherence with the nature of the refactoring that used only compile-time parametricity (no trait objects). diff --git a/src/librustc_codegen_ssa/back/archive.rs b/src/librustc_codegen_ssa/back/archive.rs new file mode 100644 index 0000000000000..b5e1deb0d5df3 --- /dev/null +++ b/src/librustc_codegen_ssa/back/archive.rs @@ -0,0 +1,36 @@ +// Copyright 2018 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use rustc::session::Session; + +use std::path::PathBuf; + +pub fn find_library(name: &str, search_paths: &[PathBuf], sess: &Session) + -> PathBuf { + // On Windows, static libraries sometimes show up as libfoo.a and other + // times show up as foo.lib + let oslibname = format!("{}{}{}", + sess.target.target.options.staticlib_prefix, + name, + sess.target.target.options.staticlib_suffix); + let unixlibname = format!("lib{}.a", name); + + for path in search_paths { + debug!("looking for {} inside {:?}", name, path); + let test = path.join(&oslibname); + if test.exists() { return test } + if oslibname != unixlibname { + let test = path.join(&unixlibname); + if test.exists() { return test } + } + } + sess.fatal(&format!("could not find native static library `{}`, \ + perhaps an -L flag is missing?", name)); +} diff --git a/src/librustc_codegen_llvm/back/command.rs b/src/librustc_codegen_ssa/back/command.rs similarity index 100% rename from src/librustc_codegen_llvm/back/command.rs rename to src/librustc_codegen_ssa/back/command.rs diff --git a/src/librustc_codegen_ssa/back/link.rs b/src/librustc_codegen_ssa/back/link.rs new file mode 100644 index 0000000000000..4b9415fb0069a --- /dev/null +++ b/src/librustc_codegen_ssa/back/link.rs @@ -0,0 +1,213 @@ +// Copyright 2018 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +/// For all the linkers we support, and information they might +/// need out of the shared crate context before we get rid of it. + +use rustc::session::{Session, config}; +use rustc::session::search_paths::PathKind; +use rustc::middle::dependency_format::Linkage; +use rustc::middle::cstore::LibSource; +use rustc_target::spec::LinkerFlavor; +use rustc::hir::def_id::CrateNum; + +use super::command::Command; +use CrateInfo; + +use cc::windows_registry; +use std::fs; +use std::path::{Path, PathBuf}; +use std::env; + + +// The third parameter is for env vars, used on windows to set up the +// path for MSVC to find its DLLs, and gcc to find its bundled +// toolchain +pub fn get_linker(sess: &Session, linker: &Path, flavor: LinkerFlavor) -> (PathBuf, Command) { + let msvc_tool = windows_registry::find_tool(&sess.opts.target_triple.triple(), "link.exe"); + + // If our linker looks like a batch script on Windows then to execute this + // we'll need to spawn `cmd` explicitly. This is primarily done to handle + // emscripten where the linker is `emcc.bat` and needs to be spawned as + // `cmd /c emcc.bat ...`. + // + // This worked historically but is needed manually since #42436 (regression + // was tagged as #42791) and some more info can be found on #44443 for + // emscripten itself. + let mut cmd = match linker.to_str() { + Some(linker) if cfg!(windows) && linker.ends_with(".bat") => Command::bat_script(linker), + _ => match flavor { + LinkerFlavor::Lld(f) => Command::lld(linker, f), + LinkerFlavor::Msvc + if sess.opts.cg.linker.is_none() && sess.target.target.options.linker.is_none() => + { + Command::new(msvc_tool.as_ref().map(|t| t.path()).unwrap_or(linker)) + }, + _ => Command::new(linker), + } + }; + + // The compiler's sysroot often has some bundled tools, so add it to the + // PATH for the child. + let mut new_path = sess.host_filesearch(PathKind::All) + .get_tools_search_paths(); + let mut msvc_changed_path = false; + if sess.target.target.options.is_like_msvc { + if let Some(ref tool) = msvc_tool { + cmd.args(tool.args()); + for &(ref k, ref v) in tool.env() { + if k == "PATH" { + new_path.extend(env::split_paths(v)); + msvc_changed_path = true; + } else { + cmd.env(k, v); + } + } + } + } + + if !msvc_changed_path { + if let Some(path) = env::var_os("PATH") { + new_path.extend(env::split_paths(&path)); + } + } + cmd.env("PATH", env::join_paths(new_path).unwrap()); + + (linker.to_path_buf(), cmd) +} + +pub fn each_linked_rlib(sess: &Session, + info: &CrateInfo, + f: &mut dyn FnMut(CrateNum, &Path)) -> Result<(), String> { + let crates = info.used_crates_static.iter(); + let fmts = sess.dependency_formats.borrow(); + let fmts = fmts.get(&config::CrateType::Executable) + .or_else(|| fmts.get(&config::CrateType::Staticlib)) + .or_else(|| fmts.get(&config::CrateType::Cdylib)) + .or_else(|| fmts.get(&config::CrateType::ProcMacro)); + let fmts = match fmts { + Some(f) => f, + None => return Err("could not find formats for rlibs".to_string()) + }; + for &(cnum, ref path) in crates { + match fmts.get(cnum.as_usize() - 1) { + Some(&Linkage::NotLinked) | + Some(&Linkage::IncludedFromDylib) => continue, + Some(_) => {} + None => return Err("could not find formats for rlibs".to_string()) + } + let name = &info.crate_name[&cnum]; + let path = match *path { + LibSource::Some(ref p) => p, + LibSource::MetadataOnly => { + return Err(format!("could not find rlib for: `{}`, found rmeta (metadata) file", + name)) + } + LibSource::None => { + return Err(format!("could not find rlib for: `{}`", name)) + } + }; + f(cnum, &path); + } + Ok(()) +} + + +/// Returns a boolean indicating whether the specified crate should be ignored +/// during LTO. +/// +/// Crates ignored during LTO are not lumped together in the "massive object +/// file" that we create and are linked in their normal rlib states. See +/// comments below for what crates do not participate in LTO. +/// +/// It's unusual for a crate to not participate in LTO. Typically only +/// compiler-specific and unstable crates have a reason to not participate in +/// LTO. +pub fn ignored_for_lto(sess: &Session, info: &CrateInfo, cnum: CrateNum) -> bool { + // If our target enables builtin function lowering in LLVM then the + // crates providing these functions don't participate in LTO (e.g. + // no_builtins or compiler builtins crates). + !sess.target.target.options.no_builtins && + (info.is_no_builtins.contains(&cnum) || info.compiler_builtins == Some(cnum)) +} + +pub fn remove(sess: &Session, path: &Path) { + match fs::remove_file(path) { + Ok(..) => {} + Err(e) => { + sess.err(&format!("failed to remove {}: {}", + path.display(), + e)); + } + } +} + +pub fn linker_and_flavor(sess: &Session) -> (PathBuf, LinkerFlavor) { + fn infer_from( + sess: &Session, + linker: Option, + flavor: Option, + ) -> Option<(PathBuf, LinkerFlavor)> { + match (linker, flavor) { + (Some(linker), Some(flavor)) => Some((linker, flavor)), + // only the linker flavor is known; use the default linker for the selected flavor + (None, Some(flavor)) => Some((PathBuf::from(match flavor { + LinkerFlavor::Em => if cfg!(windows) { "emcc.bat" } else { "emcc" }, + LinkerFlavor::Gcc => "cc", + LinkerFlavor::Ld => "ld", + LinkerFlavor::Msvc => "link.exe", + LinkerFlavor::Lld(_) => "lld", + }), flavor)), + (Some(linker), None) => { + let stem = linker.file_stem().and_then(|stem| stem.to_str()).unwrap_or_else(|| { + sess.fatal("couldn't extract file stem from specified linker"); + }).to_owned(); + + let flavor = if stem == "emcc" { + LinkerFlavor::Em + } else if stem == "gcc" || stem.ends_with("-gcc") { + LinkerFlavor::Gcc + } else if stem == "ld" || stem == "ld.lld" || stem.ends_with("-ld") { + LinkerFlavor::Ld + } else if stem == "link" || stem == "lld-link" { + LinkerFlavor::Msvc + } else if stem == "lld" || stem == "rust-lld" { + LinkerFlavor::Lld(sess.target.target.options.lld_flavor) + } else { + // fall back to the value in the target spec + sess.target.target.linker_flavor + }; + + Some((linker, flavor)) + }, + (None, None) => None, + } + } + + // linker and linker flavor specified via command line have precedence over what the target + // specification specifies + if let Some(ret) = infer_from( + sess, + sess.opts.cg.linker.clone(), + sess.opts.debugging_opts.linker_flavor, + ) { + return ret; + } + + if let Some(ret) = infer_from( + sess, + sess.target.target.options.linker.clone().map(PathBuf::from), + Some(sess.target.target.linker_flavor), + ) { + return ret; + } + + bug!("Not enough information provided to determine how to invoke the linker"); +} diff --git a/src/librustc_codegen_llvm/back/linker.rs b/src/librustc_codegen_ssa/back/linker.rs similarity index 96% rename from src/librustc_codegen_llvm/back/linker.rs rename to src/librustc_codegen_ssa/back/linker.rs index e18c8b9dec463..34d0e5349600e 100644 --- a/src/librustc_codegen_llvm/back/linker.rs +++ b/src/librustc_codegen_ssa/back/linker.rs @@ -1,4 +1,4 @@ -// Copyright 2015 The Rust Project Developers. See the COPYRIGHT +// Copyright 2018 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // @@ -8,38 +8,41 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. +/// For all the linkers we support, and information they might +/// need out of the shared crate context before we get rid of it. + +use super::symbol_export; +use super::command::Command; +use super::archive; +use interfaces::*; + +use rustc_target::spec::{LinkerFlavor, LldFlavor}; use rustc_data_structures::fx::FxHashMap; -use std::ffi::{OsStr, OsString}; +use rustc::session::config::{self, CrateType, OptLevel, DebugInfo, CrossLangLto}; +use rustc::session::Session; +use rustc::ty::TyCtxt; +use rustc::hir::def_id::{CrateNum, LOCAL_CRATE}; +use rustc::middle::dependency_format::Linkage; + use std::fs::{self, File}; +use std::ffi::{OsString, OsStr}; +use std::path::{Path, PathBuf}; use std::io::prelude::*; use std::io::{self, BufWriter}; -use std::path::{Path, PathBuf}; - -use back::archive; -use back::command::Command; -use back::symbol_export; -use rustc::hir::def_id::{LOCAL_CRATE, CrateNum}; -use rustc::middle::dependency_format::Linkage; -use rustc::session::Session; -use rustc::session::config::{self, CrateType, OptLevel, DebugInfo, - CrossLangLto}; -use rustc::ty::TyCtxt; -use rustc_target::spec::{LinkerFlavor, LldFlavor}; use serialize::{json, Encoder}; -use llvm_util; -/// For all the linkers we support, and information they might -/// need out of the shared crate context before we get rid of it. -pub struct LinkerInfo { +pub struct LinkerInfo { exports: FxHashMap>, + backend: B } -impl LinkerInfo { - pub fn new(tcx: TyCtxt) -> LinkerInfo { +impl LinkerInfo { + pub fn new(tcx: TyCtxt, backend: B) -> LinkerInfo { LinkerInfo { exports: tcx.sess.crate_types.borrow().iter().map(|&c| { (c, exported_symbols(tcx, c)) }).collect(), + backend } } @@ -96,6 +99,7 @@ impl LinkerInfo { } } + /// Linker abstraction used by back::link to build up the command to invoke a /// linker. /// @@ -137,16 +141,249 @@ pub trait Linker { fn finalize(&mut self) -> Command; } -pub struct GccLinker<'a> { + +impl<'a, B : ExtraBackendMethods> Linker for MsvcLinker<'a, B> { + fn link_rlib(&mut self, lib: &Path) { self.cmd.arg(lib); } + fn add_object(&mut self, path: &Path) { self.cmd.arg(path); } + fn args(&mut self, args: &[String]) { self.cmd.args(args); } + + fn build_dylib(&mut self, out_filename: &Path) { + self.cmd.arg("/DLL"); + let mut arg: OsString = "/IMPLIB:".into(); + arg.push(out_filename.with_extension("dll.lib")); + self.cmd.arg(arg); + } + + fn build_static_executable(&mut self) { + // noop + } + + fn gc_sections(&mut self, _keep_metadata: bool) { + // MSVC's ICF (Identical COMDAT Folding) link optimization is + // slow for Rust and thus we disable it by default when not in + // optimization build. + if self.sess.opts.optimize != config::OptLevel::No { + self.cmd.arg("/OPT:REF,ICF"); + } else { + // It is necessary to specify NOICF here, because /OPT:REF + // implies ICF by default. + self.cmd.arg("/OPT:REF,NOICF"); + } + } + + fn link_dylib(&mut self, lib: &str) { + self.cmd.arg(&format!("{}.lib", lib)); + } + + fn link_rust_dylib(&mut self, lib: &str, path: &Path) { + // When producing a dll, the MSVC linker may not actually emit a + // `foo.lib` file if the dll doesn't actually export any symbols, so we + // check to see if the file is there and just omit linking to it if it's + // not present. + let name = format!("{}.dll.lib", lib); + if fs::metadata(&path.join(&name)).is_ok() { + self.cmd.arg(name); + } + } + + fn link_staticlib(&mut self, lib: &str) { + self.cmd.arg(&format!("{}.lib", lib)); + } + + fn position_independent_executable(&mut self) { + // noop + } + + fn no_position_independent_executable(&mut self) { + // noop + } + + fn full_relro(&mut self) { + // noop + } + + fn partial_relro(&mut self) { + // noop + } + + fn no_relro(&mut self) { + // noop + } + + fn no_default_libraries(&mut self) { + // Currently we don't pass the /NODEFAULTLIB flag to the linker on MSVC + // as there's been trouble in the past of linking the C++ standard + // library required by LLVM. This likely needs to happen one day, but + // in general Windows is also a more controlled environment than + // Unix, so it's not necessarily as critical that this be implemented. + // + // Note that there are also some licensing worries about statically + // linking some libraries which require a specific agreement, so it may + // not ever be possible for us to pass this flag. + } + + fn include_path(&mut self, path: &Path) { + let mut arg = OsString::from("/LIBPATH:"); + arg.push(path); + self.cmd.arg(&arg); + } + + fn output_filename(&mut self, path: &Path) { + let mut arg = OsString::from("/OUT:"); + arg.push(path); + self.cmd.arg(&arg); + } + + fn framework_path(&mut self, _path: &Path) { + bug!("frameworks are not supported on windows") + } + fn link_framework(&mut self, _framework: &str) { + bug!("frameworks are not supported on windows") + } + + fn link_whole_staticlib(&mut self, lib: &str, _search_path: &[PathBuf]) { + // not supported? + self.link_staticlib(lib); + } + fn link_whole_rlib(&mut self, path: &Path) { + // not supported? + self.link_rlib(path); + } + fn optimize(&mut self) { + // Needs more investigation of `/OPT` arguments + } + + fn pgo_gen(&mut self) { + // Nothing needed here. + } + + fn debuginfo(&mut self) { + // This will cause the Microsoft linker to generate a PDB file + // from the CodeView line tables in the object files. + self.cmd.arg("/DEBUG"); + + // This will cause the Microsoft linker to embed .natvis info into the the PDB file + let sysroot = self.sess.sysroot(); + let natvis_dir_path = sysroot.join("lib\\rustlib\\etc"); + if let Ok(natvis_dir) = fs::read_dir(&natvis_dir_path) { + // LLVM 5.0.0's lld-link frontend doesn't yet recognize, and chokes + // on, the /NATVIS:... flags. LLVM 6 (or earlier) should at worst ignore + // them, eventually mooting this workaround, per this landed patch: + // https://github.com/llvm-mirror/lld/commit/27b9c4285364d8d76bb43839daa100 + if let Some(ref linker_path) = self.sess.opts.cg.linker { + if let Some(linker_name) = Path::new(&linker_path).file_stem() { + if linker_name.to_str().unwrap().to_lowercase() == "lld-link" { + self.sess.warn("not embedding natvis: lld-link may not support the flag"); + return; + } + } + } + for entry in natvis_dir { + match entry { + Ok(entry) => { + let path = entry.path(); + if path.extension() == Some("natvis".as_ref()) { + let mut arg = OsString::from("/NATVIS:"); + arg.push(path); + self.cmd.arg(arg); + } + }, + Err(err) => { + self.sess.warn(&format!("error enumerating natvis directory: {}", err)); + }, + } + } + } + } + + // Currently the compiler doesn't use `dllexport` (an LLVM attribute) to + // export symbols from a dynamic library. When building a dynamic library, + // however, we're going to want some symbols exported, so this function + // generates a DEF file which lists all the symbols. + // + // The linker will read this `*.def` file and export all the symbols from + // the dynamic library. Note that this is not as simple as just exporting + // all the symbols in the current crate (as specified by `codegen.reachable`) + // but rather we also need to possibly export the symbols of upstream + // crates. Upstream rlibs may be linked statically to this dynamic library, + // in which case they may continue to transitively be used and hence need + // their symbols exported. + fn export_symbols(&mut self, + tmpdir: &Path, + crate_type: CrateType) { + let path = tmpdir.join("lib.def"); + let res = (|| -> io::Result<()> { + let mut f = BufWriter::new(File::create(&path)?); + + // Start off with the standard module name header and then go + // straight to exports. + writeln!(f, "LIBRARY")?; + writeln!(f, "EXPORTS")?; + for symbol in self.info.exports[&crate_type].iter() { + debug!(" _{}", symbol); + writeln!(f, " {}", symbol)?; + } + Ok(()) + })(); + if let Err(e) = res { + self.sess.fatal(&format!("failed to write lib.def file: {}", e)); + } + let mut arg = OsString::from("/DEF:"); + arg.push(path); + self.cmd.arg(&arg); + } + + fn subsystem(&mut self, subsystem: &str) { + // Note that previous passes of the compiler validated this subsystem, + // so we just blindly pass it to the linker. + self.cmd.arg(&format!("/SUBSYSTEM:{}", subsystem)); + + // Windows has two subsystems we're interested in right now, the console + // and windows subsystems. These both implicitly have different entry + // points (starting symbols). The console entry point starts with + // `mainCRTStartup` and the windows entry point starts with + // `WinMainCRTStartup`. These entry points, defined in system libraries, + // will then later probe for either `main` or `WinMain`, respectively to + // start the application. + // + // In Rust we just always generate a `main` function so we want control + // to always start there, so we force the entry point on the windows + // subsystem to be `mainCRTStartup` to get everything booted up + // correctly. + // + // For more information see RFC #1665 + if subsystem == "windows" { + self.cmd.arg("/ENTRY:mainCRTStartup"); + } + } + + fn finalize(&mut self) -> Command { + let mut cmd = Command::new(""); + ::std::mem::swap(&mut cmd, &mut self.cmd); + cmd + } + + // MSVC doesn't need group indicators + fn group_start(&mut self) {} + fn group_end(&mut self) {} + + fn cross_lang_lto(&mut self) { + // Do nothing + } +} + + + +pub struct GccLinker<'a, B : 'a + ExtraBackendMethods> { cmd: Command, sess: &'a Session, - info: &'a LinkerInfo, + info: &'a LinkerInfo, hinted_static: bool, // Keeps track of the current hinting mode. // Link as ld is_ld: bool, } -impl<'a> GccLinker<'a> { +impl<'a, B : ExtraBackendMethods> GccLinker<'a, B> { /// Argument that must be passed *directly* to the linker /// /// These arguments need to be prepended with '-Wl,' when a gcc-style linker is used @@ -204,7 +441,7 @@ impl<'a> GccLinker<'a> { }; self.linker_arg(&format!("-plugin-opt={}", opt_level)); - self.linker_arg(&format!("-plugin-opt=mcpu={}", llvm_util::target_cpu(self.sess))); + self.linker_arg(&format!("-plugin-opt=mcpu={}", self.info.backend.target_cpu(self.sess))); match self.sess.lto() { config::Lto::Thin | @@ -219,7 +456,7 @@ impl<'a> GccLinker<'a> { } } -impl<'a> Linker for GccLinker<'a> { +impl<'a, B : ExtraBackendMethods> Linker for GccLinker<'a, B> { fn link_dylib(&mut self, lib: &str) { self.hint_dynamic(); self.cmd.arg(format!("-l{}",lib)); } fn link_staticlib(&mut self, lib: &str) { self.hint_static(); self.cmd.arg(format!("-l{}",lib)); @@ -489,249 +726,19 @@ impl<'a> Linker for GccLinker<'a> { } } -pub struct MsvcLinker<'a> { +pub struct MsvcLinker<'a, B : 'a + ExtraBackendMethods> { cmd: Command, sess: &'a Session, - info: &'a LinkerInfo + info: &'a LinkerInfo } -impl<'a> Linker for MsvcLinker<'a> { - fn link_rlib(&mut self, lib: &Path) { self.cmd.arg(lib); } - fn add_object(&mut self, path: &Path) { self.cmd.arg(path); } - fn args(&mut self, args: &[String]) { self.cmd.args(args); } - - fn build_dylib(&mut self, out_filename: &Path) { - self.cmd.arg("/DLL"); - let mut arg: OsString = "/IMPLIB:".into(); - arg.push(out_filename.with_extension("dll.lib")); - self.cmd.arg(arg); - } - - fn build_static_executable(&mut self) { - // noop - } - - fn gc_sections(&mut self, _keep_metadata: bool) { - // MSVC's ICF (Identical COMDAT Folding) link optimization is - // slow for Rust and thus we disable it by default when not in - // optimization build. - if self.sess.opts.optimize != config::OptLevel::No { - self.cmd.arg("/OPT:REF,ICF"); - } else { - // It is necessary to specify NOICF here, because /OPT:REF - // implies ICF by default. - self.cmd.arg("/OPT:REF,NOICF"); - } - } - - fn link_dylib(&mut self, lib: &str) { - self.cmd.arg(&format!("{}.lib", lib)); - } - - fn link_rust_dylib(&mut self, lib: &str, path: &Path) { - // When producing a dll, the MSVC linker may not actually emit a - // `foo.lib` file if the dll doesn't actually export any symbols, so we - // check to see if the file is there and just omit linking to it if it's - // not present. - let name = format!("{}.dll.lib", lib); - if fs::metadata(&path.join(&name)).is_ok() { - self.cmd.arg(name); - } - } - - fn link_staticlib(&mut self, lib: &str) { - self.cmd.arg(&format!("{}.lib", lib)); - } - - fn position_independent_executable(&mut self) { - // noop - } - - fn no_position_independent_executable(&mut self) { - // noop - } - - fn full_relro(&mut self) { - // noop - } - - fn partial_relro(&mut self) { - // noop - } - - fn no_relro(&mut self) { - // noop - } - - fn no_default_libraries(&mut self) { - // Currently we don't pass the /NODEFAULTLIB flag to the linker on MSVC - // as there's been trouble in the past of linking the C++ standard - // library required by LLVM. This likely needs to happen one day, but - // in general Windows is also a more controlled environment than - // Unix, so it's not necessarily as critical that this be implemented. - // - // Note that there are also some licensing worries about statically - // linking some libraries which require a specific agreement, so it may - // not ever be possible for us to pass this flag. - } - - fn include_path(&mut self, path: &Path) { - let mut arg = OsString::from("/LIBPATH:"); - arg.push(path); - self.cmd.arg(&arg); - } - - fn output_filename(&mut self, path: &Path) { - let mut arg = OsString::from("/OUT:"); - arg.push(path); - self.cmd.arg(&arg); - } - - fn framework_path(&mut self, _path: &Path) { - bug!("frameworks are not supported on windows") - } - fn link_framework(&mut self, _framework: &str) { - bug!("frameworks are not supported on windows") - } - - fn link_whole_staticlib(&mut self, lib: &str, _search_path: &[PathBuf]) { - // not supported? - self.link_staticlib(lib); - } - fn link_whole_rlib(&mut self, path: &Path) { - // not supported? - self.link_rlib(path); - } - fn optimize(&mut self) { - // Needs more investigation of `/OPT` arguments - } - - fn pgo_gen(&mut self) { - // Nothing needed here. - } - - fn debuginfo(&mut self) { - // This will cause the Microsoft linker to generate a PDB file - // from the CodeView line tables in the object files. - self.cmd.arg("/DEBUG"); - - // This will cause the Microsoft linker to embed .natvis info into the the PDB file - let sysroot = self.sess.sysroot(); - let natvis_dir_path = sysroot.join("lib\\rustlib\\etc"); - if let Ok(natvis_dir) = fs::read_dir(&natvis_dir_path) { - // LLVM 5.0.0's lld-link frontend doesn't yet recognize, and chokes - // on, the /NATVIS:... flags. LLVM 6 (or earlier) should at worst ignore - // them, eventually mooting this workaround, per this landed patch: - // https://github.com/llvm-mirror/lld/commit/27b9c4285364d8d76bb43839daa100 - if let Some(ref linker_path) = self.sess.opts.cg.linker { - if let Some(linker_name) = Path::new(&linker_path).file_stem() { - if linker_name.to_str().unwrap().to_lowercase() == "lld-link" { - self.sess.warn("not embedding natvis: lld-link may not support the flag"); - return; - } - } - } - for entry in natvis_dir { - match entry { - Ok(entry) => { - let path = entry.path(); - if path.extension() == Some("natvis".as_ref()) { - let mut arg = OsString::from("/NATVIS:"); - arg.push(path); - self.cmd.arg(arg); - } - }, - Err(err) => { - self.sess.warn(&format!("error enumerating natvis directory: {}", err)); - }, - } - } - } - } - - // Currently the compiler doesn't use `dllexport` (an LLVM attribute) to - // export symbols from a dynamic library. When building a dynamic library, - // however, we're going to want some symbols exported, so this function - // generates a DEF file which lists all the symbols. - // - // The linker will read this `*.def` file and export all the symbols from - // the dynamic library. Note that this is not as simple as just exporting - // all the symbols in the current crate (as specified by `codegen.reachable`) - // but rather we also need to possibly export the symbols of upstream - // crates. Upstream rlibs may be linked statically to this dynamic library, - // in which case they may continue to transitively be used and hence need - // their symbols exported. - fn export_symbols(&mut self, - tmpdir: &Path, - crate_type: CrateType) { - let path = tmpdir.join("lib.def"); - let res = (|| -> io::Result<()> { - let mut f = BufWriter::new(File::create(&path)?); - - // Start off with the standard module name header and then go - // straight to exports. - writeln!(f, "LIBRARY")?; - writeln!(f, "EXPORTS")?; - for symbol in self.info.exports[&crate_type].iter() { - debug!(" _{}", symbol); - writeln!(f, " {}", symbol)?; - } - Ok(()) - })(); - if let Err(e) = res { - self.sess.fatal(&format!("failed to write lib.def file: {}", e)); - } - let mut arg = OsString::from("/DEF:"); - arg.push(path); - self.cmd.arg(&arg); - } - - fn subsystem(&mut self, subsystem: &str) { - // Note that previous passes of the compiler validated this subsystem, - // so we just blindly pass it to the linker. - self.cmd.arg(&format!("/SUBSYSTEM:{}", subsystem)); - - // Windows has two subsystems we're interested in right now, the console - // and windows subsystems. These both implicitly have different entry - // points (starting symbols). The console entry point starts with - // `mainCRTStartup` and the windows entry point starts with - // `WinMainCRTStartup`. These entry points, defined in system libraries, - // will then later probe for either `main` or `WinMain`, respectively to - // start the application. - // - // In Rust we just always generate a `main` function so we want control - // to always start there, so we force the entry point on the windows - // subsystem to be `mainCRTStartup` to get everything booted up - // correctly. - // - // For more information see RFC #1665 - if subsystem == "windows" { - self.cmd.arg("/ENTRY:mainCRTStartup"); - } - } - - fn finalize(&mut self) -> Command { - let mut cmd = Command::new(""); - ::std::mem::swap(&mut cmd, &mut self.cmd); - cmd - } - - // MSVC doesn't need group indicators - fn group_start(&mut self) {} - fn group_end(&mut self) {} - - fn cross_lang_lto(&mut self) { - // Do nothing - } -} - -pub struct EmLinker<'a> { +pub struct EmLinker<'a, B : 'a + ExtraBackendMethods> { cmd: Command, sess: &'a Session, - info: &'a LinkerInfo + info: &'a LinkerInfo } -impl<'a> Linker for EmLinker<'a> { +impl<'a, B : ExtraBackendMethods> Linker for EmLinker<'a, B> { fn include_path(&mut self, path: &Path) { self.cmd.arg("-L").arg(path); } @@ -895,42 +902,13 @@ impl<'a> Linker for EmLinker<'a> { } } -fn exported_symbols(tcx: TyCtxt, crate_type: CrateType) -> Vec { - let mut symbols = Vec::new(); - - let export_threshold = symbol_export::crates_export_threshold(&[crate_type]); - for &(symbol, level) in tcx.exported_symbols(LOCAL_CRATE).iter() { - if level.is_below_threshold(export_threshold) { - symbols.push(symbol.symbol_name(tcx).to_string()); - } - } - - let formats = tcx.sess.dependency_formats.borrow(); - let deps = formats[&crate_type].iter(); - - for (index, dep_format) in deps.enumerate() { - let cnum = CrateNum::new(index + 1); - // For each dependency that we are linking to statically ... - if *dep_format == Linkage::Static { - // ... we add its symbol list to our export list. - for &(symbol, level) in tcx.exported_symbols(cnum).iter() { - if level.is_below_threshold(export_threshold) { - symbols.push(symbol.symbol_name(tcx).to_string()); - } - } - } - } - - symbols -} - -pub struct WasmLd<'a> { +pub struct WasmLd<'a, B : 'a + ExtraBackendMethods> { cmd: Command, sess: &'a Session, - info: &'a LinkerInfo, + info: &'a LinkerInfo, } -impl<'a> Linker for WasmLd<'a> { +impl<'a, B : ExtraBackendMethods> Linker for WasmLd<'a, B> { fn link_dylib(&mut self, lib: &str) { self.cmd.arg("-l").arg(lib); } @@ -1093,3 +1071,33 @@ impl<'a> Linker for WasmLd<'a> { // Do nothing for now } } + + +fn exported_symbols(tcx: TyCtxt, crate_type: CrateType) -> Vec { + let mut symbols = Vec::new(); + + let export_threshold = symbol_export::crates_export_threshold(&[crate_type]); + for &(symbol, level) in tcx.exported_symbols(LOCAL_CRATE).iter() { + if level.is_below_threshold(export_threshold) { + symbols.push(symbol.symbol_name(tcx).to_string()); + } + } + + let formats = tcx.sess.dependency_formats.borrow(); + let deps = formats[&crate_type].iter(); + + for (index, dep_format) in deps.enumerate() { + let cnum = CrateNum::new(index + 1); + // For each dependency that we are linking to statically ... + if *dep_format == Linkage::Static { + // ... we add its symbol list to our export list. + for &(symbol, level) in tcx.exported_symbols(cnum).iter() { + if level.is_below_threshold(export_threshold) { + symbols.push(symbol.symbol_name(tcx).to_string()); + } + } + } + } + + symbols +} diff --git a/src/librustc_codegen_ssa/back/lto.rs b/src/librustc_codegen_ssa/back/lto.rs new file mode 100644 index 0000000000000..944b98e84499e --- /dev/null +++ b/src/librustc_codegen_ssa/back/lto.rs @@ -0,0 +1,124 @@ +// Copyright 2018 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use super::write::CodegenContext; +use interfaces::*; +use ModuleCodegen; + +use rustc::util::time_graph::Timeline; +use rustc_errors::FatalError; + +use std::sync::Arc; +use std::ffi::CString; + +pub struct ThinModule { + pub shared: Arc>, + pub idx: usize, +} + +impl ThinModule { + pub fn name(&self) -> &str { + self.shared.module_names[self.idx].to_str().unwrap() + } + + pub fn cost(&self) -> u64 { + // Yes, that's correct, we're using the size of the bytecode as an + // indicator for how costly this codegen unit is. + self.data().len() as u64 + } + + pub fn data(&self) -> &[u8] { + let a = self.shared.thin_buffers.get(self.idx).map(|b| b.data()); + a.unwrap_or_else(|| { + let len = self.shared.thin_buffers.len(); + self.shared.serialized_modules[self.idx - len].data() + }) + } + +} + +pub struct ThinShared { + pub data: B::ThinData, + pub thin_buffers: Vec, + pub serialized_modules: Vec>, + pub module_names: Vec, +} + + +pub enum LtoModuleCodegen { + Fat { + module: Option>, + _serialized_bitcode: Vec>, + }, + + Thin(ThinModule), +} + +impl LtoModuleCodegen +{ + pub fn name(&self) -> &str { + match *self { + LtoModuleCodegen::Fat { .. } => "everything", + LtoModuleCodegen::Thin(ref m) => m.name(), + } + } + + /// Optimize this module within the given codegen context. + /// + /// This function is unsafe as it'll return a `ModuleCodegen` still + /// points to LLVM data structures owned by this `LtoModuleCodegen`. + /// It's intended that the module returned is immediately code generated and + /// dropped, and then this LTO module is dropped. + pub unsafe fn optimize( + &mut self, + cgcx: &CodegenContext, + timeline: &mut Timeline + ) -> Result, FatalError> { + match *self { + LtoModuleCodegen::Fat { ref mut module, .. } => { + let module = module.take().unwrap(); + { + let config = cgcx.config(module.kind); + B::run_lto_pass_manager(cgcx, &module, config, false); + timeline.record("fat-done"); + } + Ok(module) + } + LtoModuleCodegen::Thin(ref mut thin) => B::optimize_thin(cgcx, thin, timeline), + } + } + + /// A "gauge" of how costly it is to optimize this module, used to sort + /// biggest modules first. + pub fn cost(&self) -> u64 { + match *self { + // Only one module with fat LTO, so the cost doesn't matter. + LtoModuleCodegen::Fat { .. } => 0, + LtoModuleCodegen::Thin(ref m) => m.cost(), + } + } +} + + +pub enum SerializedModule { + Local(M), + FromRlib(Vec), + FromUncompressedFile(memmap::Mmap), +} + +impl SerializedModule { + pub fn data(&self) -> &[u8] { + match *self { + SerializedModule::Local(ref m) => m.data(), + SerializedModule::FromRlib(ref m) => m, + SerializedModule::FromUncompressedFile(ref m) => m, + } + } +} diff --git a/src/librustc_codegen_ssa/back/mod.rs b/src/librustc_codegen_ssa/back/mod.rs new file mode 100644 index 0000000000000..3d7ead74d1c5d --- /dev/null +++ b/src/librustc_codegen_ssa/back/mod.rs @@ -0,0 +1,17 @@ +// Copyright 2018 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +pub mod write; +pub mod linker; +pub mod lto; +pub mod link; +pub mod command; +pub mod symbol_export; +pub mod archive; diff --git a/src/librustc_codegen_llvm/back/symbol_export.rs b/src/librustc_codegen_ssa/back/symbol_export.rs similarity index 99% rename from src/librustc_codegen_llvm/back/symbol_export.rs rename to src/librustc_codegen_ssa/back/symbol_export.rs index 6b1b0b94fd9d7..a18844f636ee9 100644 --- a/src/librustc_codegen_llvm/back/symbol_export.rs +++ b/src/librustc_codegen_ssa/back/symbol_export.rs @@ -11,7 +11,7 @@ use rustc_data_structures::sync::Lrc; use std::sync::Arc; -use monomorphize::Instance; +use rustc_mir::monomorphize::Instance; use rustc::hir; use rustc::hir::Node; use rustc::hir::CodegenFnAttrFlags; diff --git a/src/librustc_codegen_ssa/back/write.rs b/src/librustc_codegen_ssa/back/write.rs new file mode 100644 index 0000000000000..af6f88dd00c6a --- /dev/null +++ b/src/librustc_codegen_ssa/back/write.rs @@ -0,0 +1,1826 @@ +// Copyright 2013-2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use {ModuleCodegen, ModuleKind, CachedModuleCodegen, CompiledModule, CrateInfo, CodegenResults, + RLIB_BYTECODE_EXTENSION}; +use super::linker::LinkerInfo; +use super::lto::{self, SerializedModule}; +use super::link::{self, remove, get_linker}; +use super::command::Command; +use super::symbol_export::ExportedSymbols; + +use memmap; +use rustc_incremental::{copy_cgu_workproducts_to_incr_comp_cache_dir, + in_incr_comp_dir, in_incr_comp_dir_sess}; +use rustc::dep_graph::{WorkProduct, WorkProductId, WorkProductFileKind}; +use rustc::dep_graph::cgu_reuse_tracker::CguReuseTracker; +use rustc::middle::cstore::EncodedMetadata; +use rustc::session::config::{self, OutputFilenames, OutputType, Passes, Sanitizer, Lto}; +use rustc::session::Session; +use rustc::util::nodemap::FxHashMap; +use rustc::util::time_graph::{self, TimeGraph, Timeline}; +use interfaces::*; +use rustc::hir::def_id::{CrateNum, LOCAL_CRATE}; +use rustc::ty::TyCtxt; +use rustc::util::common::{time_depth, set_time_depth, print_time_passes_entry}; +use rustc_fs_util::link_or_copy; +use rustc_data_structures::svh::Svh; +use rustc_errors::{Handler, Level, DiagnosticBuilder, FatalError, DiagnosticId}; +use rustc_errors::emitter::{Emitter}; +use syntax::attr; +use syntax::ext::hygiene::Mark; +use syntax_pos::MultiSpan; +use syntax_pos::symbol::Symbol; +use jobserver::{Client, Acquired}; + +use std::any::Any; +use std::fs; +use std::io; +use std::mem; +use std::path::{Path, PathBuf}; +use std::str; +use std::sync::Arc; +use std::sync::mpsc::{channel, Sender, Receiver}; +use std::time::Instant; +use std::thread; + +const PRE_THIN_LTO_BC_EXT: &str = "pre-thin-lto.bc"; + +/// Module-specific configuration for `optimize_and_codegen`. +pub struct ModuleConfig { + /// Names of additional optimization passes to run. + pub passes: Vec, + /// Some(level) to optimize at a certain level, or None to run + /// absolutely no optimizations (used for the metadata module). + pub opt_level: Option, + + /// Some(level) to optimize binary size, or None to not affect program size. + pub opt_size: Option, + + pub pgo_gen: Option, + pub pgo_use: String, + + // Flags indicating which outputs to produce. + pub emit_pre_thin_lto_bc: bool, + pub emit_no_opt_bc: bool, + pub emit_bc: bool, + pub emit_bc_compressed: bool, + pub emit_lto_bc: bool, + pub emit_ir: bool, + pub emit_asm: bool, + pub emit_obj: bool, + // Miscellaneous flags. These are mostly copied from command-line + // options. + pub verify_llvm_ir: bool, + pub no_prepopulate_passes: bool, + pub no_builtins: bool, + pub time_passes: bool, + pub vectorize_loop: bool, + pub vectorize_slp: bool, + pub merge_functions: bool, + pub inline_threshold: Option, + // Instead of creating an object file by doing LLVM codegen, just + // make the object file bitcode. Provides easy compatibility with + // emscripten's ecc compiler, when used as the linker. + pub obj_is_bitcode: bool, + pub no_integrated_as: bool, + pub embed_bitcode: bool, + pub embed_bitcode_marker: bool, +} + +impl ModuleConfig { + fn new(passes: Vec) -> ModuleConfig { + ModuleConfig { + passes, + opt_level: None, + opt_size: None, + + pgo_gen: None, + pgo_use: String::new(), + + emit_no_opt_bc: false, + emit_pre_thin_lto_bc: false, + emit_bc: false, + emit_bc_compressed: false, + emit_lto_bc: false, + emit_ir: false, + emit_asm: false, + emit_obj: false, + obj_is_bitcode: false, + embed_bitcode: false, + embed_bitcode_marker: false, + no_integrated_as: false, + + verify_llvm_ir: false, + no_prepopulate_passes: false, + no_builtins: false, + time_passes: false, + vectorize_loop: false, + vectorize_slp: false, + merge_functions: false, + inline_threshold: None + } + } + + fn set_flags(&mut self, sess: &Session, no_builtins: bool) { + self.verify_llvm_ir = sess.verify_llvm_ir(); + self.no_prepopulate_passes = sess.opts.cg.no_prepopulate_passes; + self.no_builtins = no_builtins || sess.target.target.options.no_builtins; + self.time_passes = sess.time_passes(); + self.inline_threshold = sess.opts.cg.inline_threshold; + self.obj_is_bitcode = sess.target.target.options.obj_is_bitcode || + sess.opts.debugging_opts.cross_lang_lto.enabled(); + let embed_bitcode = sess.target.target.options.embed_bitcode || + sess.opts.debugging_opts.embed_bitcode; + if embed_bitcode { + match sess.opts.optimize { + config::OptLevel::No | + config::OptLevel::Less => { + self.embed_bitcode_marker = embed_bitcode; + } + _ => self.embed_bitcode = embed_bitcode, + } + } + + // Copy what clang does by turning on loop vectorization at O2 and + // slp vectorization at O3. Otherwise configure other optimization aspects + // of this pass manager builder. + // Turn off vectorization for emscripten, as it's not very well supported. + self.vectorize_loop = !sess.opts.cg.no_vectorize_loops && + (sess.opts.optimize == config::OptLevel::Default || + sess.opts.optimize == config::OptLevel::Aggressive) && + !sess.target.target.options.is_like_emscripten; + + self.vectorize_slp = !sess.opts.cg.no_vectorize_slp && + sess.opts.optimize == config::OptLevel::Aggressive && + !sess.target.target.options.is_like_emscripten; + + self.merge_functions = sess.opts.optimize == config::OptLevel::Default || + sess.opts.optimize == config::OptLevel::Aggressive; + } +} + +/// Assembler name and command used by codegen when no_integrated_as is enabled +pub struct AssemblerCommand { + name: PathBuf, + cmd: Command, +} + +/// Additional resources used by optimize_and_codegen (not module specific) +#[derive(Clone)] +pub struct CodegenContext { + // Resources needed when running LTO + pub backend: B, + pub time_passes: bool, + pub lto: Lto, + pub no_landing_pads: bool, + pub save_temps: bool, + pub fewer_names: bool, + pub exported_symbols: Option>, + pub opts: Arc, + pub crate_types: Vec, + pub each_linked_rlib_for_lto: Vec<(CrateNum, PathBuf)>, + pub output_filenames: Arc, + pub regular_module_config: Arc, + pub metadata_module_config: Arc, + pub allocator_module_config: Arc, + pub tm_factory: Arc Result + Send + Sync>, + pub msvc_imps_needed: bool, + pub target_pointer_width: String, + pub debuginfo: config::DebugInfo, + + // Number of cgus excluding the allocator/metadata modules + pub total_cgus: usize, + // Handler to use for diagnostics produced during codegen. + pub diag_emitter: SharedEmitter, + // LLVM passes added by plugins. + pub plugin_passes: Vec, + // LLVM optimizations for which we want to print remarks. + pub remark: Passes, + // Worker thread number + pub worker: usize, + // The incremental compilation session directory, or None if we are not + // compiling incrementally + pub incr_comp_session_dir: Option, + // Used to update CGU re-use information during the thinlto phase. + pub cgu_reuse_tracker: CguReuseTracker, + // Channel back to the main control thread to send messages to + pub coordinator_send: Sender>, + // A reference to the TimeGraph so we can register timings. None means that + // measuring is disabled. + pub time_graph: Option, + // The assembler command if no_integrated_as option is enabled, None otherwise + pub assembler_cmd: Option> +} + +impl CodegenContext { + pub fn create_diag_handler(&self) -> Handler { + Handler::with_emitter(true, false, Box::new(self.diag_emitter.clone())) + } + + pub fn config(&self, kind: ModuleKind) -> &ModuleConfig { + match kind { + ModuleKind::Regular => &self.regular_module_config, + ModuleKind::Metadata => &self.metadata_module_config, + ModuleKind::Allocator => &self.allocator_module_config, + } + } + +} + +pub struct DiagnosticHandlers<'a, B : WriteBackendMethods> { + pub data: *mut (&'a CodegenContext, &'a Handler), + pub llcx: &'a B::Context, +} + +impl<'a, B : WriteBackendMethods> Drop for DiagnosticHandlers<'a, B> { + fn drop(&mut self) { + B::drop_diagnostic_handlers(self); + } +} + +fn generate_lto_work( + cgcx: &CodegenContext, + modules: Vec>, + import_only_modules: Vec<(SerializedModule, WorkProduct)> +) -> Vec<(WorkItem, u64)> { + let mut timeline = cgcx.time_graph.as_ref().map(|tg| { + tg.start(CODEGEN_WORKER_TIMELINE, + CODEGEN_WORK_PACKAGE_KIND, + "generate lto") + }).unwrap_or(Timeline::noop()); + let (lto_modules, copy_jobs) = B::run_lto(cgcx, modules, import_only_modules, &mut timeline) + .unwrap_or_else(|e| e.raise()); + + let lto_modules = lto_modules.into_iter().map(|module| { + let cost = module.cost(); + (WorkItem::LTO(module), cost) + }); + + let copy_jobs = copy_jobs.into_iter().map(|wp| { + (WorkItem::CopyPostLtoArtifacts(CachedModuleCodegen { + name: wp.cgu_name.clone(), + source: wp, + }), 0) + }); + + lto_modules.chain(copy_jobs).collect() +} + +pub struct CompiledModules { + pub modules: Vec, + pub metadata_module: CompiledModule, + pub allocator_module: Option, +} + +fn need_crate_bitcode_for_rlib(sess: &Session) -> bool { + sess.crate_types.borrow().contains(&config::CrateType::Rlib) && + sess.opts.output_types.contains_key(&OutputType::Exe) +} + +fn need_pre_thin_lto_bitcode_for_incr_comp(sess: &Session) -> bool { + if sess.opts.incremental.is_none() { + return false + } + + match sess.lto() { + Lto::Fat | + Lto::No => false, + Lto::Thin | + Lto::ThinLocal => true, + } +} + +pub fn start_async_codegen( + backend: B, + tcx: TyCtxt, + time_graph: Option, + metadata: EncodedMetadata, + coordinator_receive: Receiver>, + total_cgus: usize +) -> OngoingCodegen { + let sess = tcx.sess; + let crate_name = tcx.crate_name(LOCAL_CRATE); + let crate_hash = tcx.crate_hash(LOCAL_CRATE); + let no_builtins = attr::contains_name(&tcx.hir.krate().attrs, "no_builtins"); + let subsystem = attr::first_attr_value_str_by_name(&tcx.hir.krate().attrs, + "windows_subsystem"); + let windows_subsystem = subsystem.map(|subsystem| { + if subsystem != "windows" && subsystem != "console" { + tcx.sess.fatal(&format!("invalid windows subsystem `{}`, only \ + `windows` and `console` are allowed", + subsystem)); + } + subsystem.to_string() + }); + + let linker_info = LinkerInfo::new(tcx, backend.clone()); + let crate_info = CrateInfo::new(tcx); + + // Figure out what we actually need to build. + let mut modules_config = ModuleConfig::new(sess.opts.cg.passes.clone()); + let mut metadata_config = ModuleConfig::new(vec![]); + let mut allocator_config = ModuleConfig::new(vec![]); + + if let Some(ref sanitizer) = sess.opts.debugging_opts.sanitizer { + match *sanitizer { + Sanitizer::Address => { + modules_config.passes.push("asan".to_owned()); + modules_config.passes.push("asan-module".to_owned()); + } + Sanitizer::Memory => { + modules_config.passes.push("msan".to_owned()) + } + Sanitizer::Thread => { + modules_config.passes.push("tsan".to_owned()) + } + _ => {} + } + } + + if sess.opts.debugging_opts.profile { + modules_config.passes.push("insert-gcov-profiling".to_owned()) + } + + modules_config.pgo_gen = sess.opts.debugging_opts.pgo_gen.clone(); + modules_config.pgo_use = sess.opts.debugging_opts.pgo_use.clone(); + + modules_config.opt_level = Some(sess.opts.optimize); + modules_config.opt_size = Some(sess.opts.optimize); + + // Save all versions of the bytecode if we're saving our temporaries. + if sess.opts.cg.save_temps { + modules_config.emit_no_opt_bc = true; + modules_config.emit_pre_thin_lto_bc = true; + modules_config.emit_bc = true; + modules_config.emit_lto_bc = true; + metadata_config.emit_bc = true; + allocator_config.emit_bc = true; + } + + // Emit compressed bitcode files for the crate if we're emitting an rlib. + // Whenever an rlib is created, the bitcode is inserted into the archive in + // order to allow LTO against it. + if need_crate_bitcode_for_rlib(sess) { + modules_config.emit_bc_compressed = true; + allocator_config.emit_bc_compressed = true; + } + + modules_config.emit_pre_thin_lto_bc = + need_pre_thin_lto_bitcode_for_incr_comp(sess); + + modules_config.no_integrated_as = tcx.sess.opts.cg.no_integrated_as || + tcx.sess.target.target.options.no_integrated_as; + + for output_type in sess.opts.output_types.keys() { + match *output_type { + OutputType::Bitcode => { modules_config.emit_bc = true; } + OutputType::LlvmAssembly => { modules_config.emit_ir = true; } + OutputType::Assembly => { + modules_config.emit_asm = true; + // If we're not using the LLVM assembler, this function + // could be invoked specially with output_type_assembly, so + // in this case we still want the metadata object file. + if !sess.opts.output_types.contains_key(&OutputType::Assembly) { + metadata_config.emit_obj = true; + allocator_config.emit_obj = true; + } + } + OutputType::Object => { modules_config.emit_obj = true; } + OutputType::Metadata => { metadata_config.emit_obj = true; } + OutputType::Exe => { + modules_config.emit_obj = true; + metadata_config.emit_obj = true; + allocator_config.emit_obj = true; + }, + OutputType::Mir => {} + OutputType::DepInfo => {} + } + } + + modules_config.set_flags(sess, no_builtins); + metadata_config.set_flags(sess, no_builtins); + allocator_config.set_flags(sess, no_builtins); + + // Exclude metadata and allocator modules from time_passes output, since + // they throw off the "LLVM passes" measurement. + metadata_config.time_passes = false; + allocator_config.time_passes = false; + + let (shared_emitter, shared_emitter_main) = SharedEmitter::new(); + let (codegen_worker_send, codegen_worker_receive) = channel(); + + let coordinator_thread = start_executing_work(backend.clone(), + tcx, + &crate_info, + shared_emitter, + codegen_worker_send, + coordinator_receive, + total_cgus, + sess.jobserver.clone(), + time_graph.clone(), + Arc::new(modules_config), + Arc::new(metadata_config), + Arc::new(allocator_config)); + + OngoingCodegen { + backend, + crate_name, + crate_hash, + metadata, + windows_subsystem, + linker_info, + crate_info, + + time_graph, + coordinator_send: tcx.tx_to_llvm_workers.lock().clone(), + codegen_worker_receive, + shared_emitter_main, + future: coordinator_thread, + output_filenames: tcx.output_filenames(LOCAL_CRATE), + } +} + +fn copy_all_cgu_workproducts_to_incr_comp_cache_dir( + sess: &Session, + compiled_modules: &CompiledModules, +) -> FxHashMap { + let mut work_products = FxHashMap::default(); + + if sess.opts.incremental.is_none() { + return work_products; + } + + for module in compiled_modules.modules.iter().filter(|m| m.kind == ModuleKind::Regular) { + let mut files = vec![]; + + if let Some(ref path) = module.object { + files.push((WorkProductFileKind::Object, path.clone())); + } + if let Some(ref path) = module.bytecode { + files.push((WorkProductFileKind::Bytecode, path.clone())); + } + if let Some(ref path) = module.bytecode_compressed { + files.push((WorkProductFileKind::BytecodeCompressed, path.clone())); + } + + if let Some((id, product)) = + copy_cgu_workproducts_to_incr_comp_cache_dir(sess, &module.name, &files) { + work_products.insert(id, product); + } + } + + work_products +} + +fn produce_final_output_artifacts(sess: &Session, + compiled_modules: &CompiledModules, + crate_output: &OutputFilenames) { + let mut user_wants_bitcode = false; + let mut user_wants_objects = false; + + // Produce final compile outputs. + let copy_gracefully = |from: &Path, to: &Path| { + if let Err(e) = fs::copy(from, to) { + sess.err(&format!("could not copy {:?} to {:?}: {}", from, to, e)); + } + }; + + let copy_if_one_unit = |output_type: OutputType, + keep_numbered: bool| { + if compiled_modules.modules.len() == 1 { + // 1) Only one codegen unit. In this case it's no difficulty + // to copy `foo.0.x` to `foo.x`. + let module_name = Some(&compiled_modules.modules[0].name[..]); + let path = crate_output.temp_path(output_type, module_name); + copy_gracefully(&path, + &crate_output.path(output_type)); + if !sess.opts.cg.save_temps && !keep_numbered { + // The user just wants `foo.x`, not `foo.#module-name#.x`. + remove(sess, &path); + } + } else { + let ext = crate_output.temp_path(output_type, None) + .extension() + .unwrap() + .to_str() + .unwrap() + .to_owned(); + + if crate_output.outputs.contains_key(&output_type) { + // 2) Multiple codegen units, with `--emit foo=some_name`. We have + // no good solution for this case, so warn the user. + sess.warn(&format!("ignoring emit path because multiple .{} files \ + were produced", ext)); + } else if crate_output.single_output_file.is_some() { + // 3) Multiple codegen units, with `-o some_name`. We have + // no good solution for this case, so warn the user. + sess.warn(&format!("ignoring -o because multiple .{} files \ + were produced", ext)); + } else { + // 4) Multiple codegen units, but no explicit name. We + // just leave the `foo.0.x` files in place. + // (We don't have to do any work in this case.) + } + } + }; + + // Flag to indicate whether the user explicitly requested bitcode. + // Otherwise, we produced it only as a temporary output, and will need + // to get rid of it. + for output_type in crate_output.outputs.keys() { + match *output_type { + OutputType::Bitcode => { + user_wants_bitcode = true; + // Copy to .bc, but always keep the .0.bc. There is a later + // check to figure out if we should delete .0.bc files, or keep + // them for making an rlib. + copy_if_one_unit(OutputType::Bitcode, true); + } + OutputType::LlvmAssembly => { + copy_if_one_unit(OutputType::LlvmAssembly, false); + } + OutputType::Assembly => { + copy_if_one_unit(OutputType::Assembly, false); + } + OutputType::Object => { + user_wants_objects = true; + copy_if_one_unit(OutputType::Object, true); + } + OutputType::Mir | + OutputType::Metadata | + OutputType::Exe | + OutputType::DepInfo => {} + } + } + + // Clean up unwanted temporary files. + + // We create the following files by default: + // - #crate#.#module-name#.bc + // - #crate#.#module-name#.o + // - #crate#.crate.metadata.bc + // - #crate#.crate.metadata.o + // - #crate#.o (linked from crate.##.o) + // - #crate#.bc (copied from crate.##.bc) + // We may create additional files if requested by the user (through + // `-C save-temps` or `--emit=` flags). + + if !sess.opts.cg.save_temps { + // Remove the temporary .#module-name#.o objects. If the user didn't + // explicitly request bitcode (with --emit=bc), and the bitcode is not + // needed for building an rlib, then we must remove .#module-name#.bc as + // well. + + // Specific rules for keeping .#module-name#.bc: + // - If the user requested bitcode (`user_wants_bitcode`), and + // codegen_units > 1, then keep it. + // - If the user requested bitcode but codegen_units == 1, then we + // can toss .#module-name#.bc because we copied it to .bc earlier. + // - If we're not building an rlib and the user didn't request + // bitcode, then delete .#module-name#.bc. + // If you change how this works, also update back::link::link_rlib, + // where .#module-name#.bc files are (maybe) deleted after making an + // rlib. + let needs_crate_object = crate_output.outputs.contains_key(&OutputType::Exe); + + let keep_numbered_bitcode = user_wants_bitcode && sess.codegen_units() > 1; + + let keep_numbered_objects = needs_crate_object || + (user_wants_objects && sess.codegen_units() > 1); + + for module in compiled_modules.modules.iter() { + if let Some(ref path) = module.object { + if !keep_numbered_objects { + remove(sess, path); + } + } + + if let Some(ref path) = module.bytecode { + if !keep_numbered_bitcode { + remove(sess, path); + } + } + } + + if !user_wants_bitcode { + if let Some(ref path) = compiled_modules.metadata_module.bytecode { + remove(sess, &path); + } + + if let Some(ref allocator_module) = compiled_modules.allocator_module { + if let Some(ref path) = allocator_module.bytecode { + remove(sess, path); + } + } + } + } + + // We leave the following files around by default: + // - #crate#.o + // - #crate#.crate.metadata.o + // - #crate#.bc + // These are used in linking steps and will be cleaned up afterward. +} + +pub fn dump_incremental_data( + _codegen_results: &CodegenResults +) { + // FIXME(mw): This does not work at the moment because the situation has + // become more complicated due to incremental LTO. Now a CGU + // can have more than two caching states. + // println!("[incremental] Re-using {} out of {} modules", + // codegen_results.modules.iter().filter(|m| m.pre_existing).count(), + // codegen_results.modules.len()); +} + +pub enum WorkItem { + /// Optimize a newly codegened, totally unoptimized module. + Optimize(ModuleCodegen), + /// Copy the post-LTO artifacts from the incremental cache to the output + /// directory. + CopyPostLtoArtifacts(CachedModuleCodegen), + /// Perform (Thin)LTO on the given module. + LTO(lto::LtoModuleCodegen), +} + +impl WorkItem { + pub fn module_kind(&self) -> ModuleKind { + match *self { + WorkItem::Optimize(ref m) => m.kind, + WorkItem::CopyPostLtoArtifacts(_) | + WorkItem::LTO(_) => ModuleKind::Regular, + } + } + + pub fn name(&self) -> String { + match *self { + WorkItem::Optimize(ref m) => format!("optimize: {}", m.name), + WorkItem::CopyPostLtoArtifacts(ref m) => format!("copy post LTO artifacts: {}", m.name), + WorkItem::LTO(ref m) => format!("lto: {}", m.name()), + } + } +} + +enum WorkItemResult { + Compiled(CompiledModule), + NeedsLTO(ModuleCodegen), +} + +fn execute_work_item( + cgcx: &CodegenContext, + work_item: WorkItem, + timeline: &mut Timeline +) -> Result, FatalError> { + let module_config = cgcx.config(work_item.module_kind()); + + match work_item { + WorkItem::Optimize(module) => { + execute_optimize_work_item(cgcx, module, module_config, timeline) + } + WorkItem::CopyPostLtoArtifacts(module) => { + execute_copy_from_cache_work_item(cgcx, module, module_config, timeline) + } + WorkItem::LTO(module) => { + execute_lto_work_item(cgcx, module, module_config, timeline) + } + } +} + +fn execute_optimize_work_item( + cgcx: &CodegenContext, + module: ModuleCodegen, + module_config: &ModuleConfig, + timeline: &mut Timeline +) -> Result, FatalError> { + let diag_handler = cgcx.create_diag_handler(); + + unsafe { + B::optimize(cgcx, &diag_handler, &module, module_config, timeline)?; + } + + let linker_does_lto = cgcx.opts.debugging_opts.cross_lang_lto.enabled(); + + // After we've done the initial round of optimizations we need to + // decide whether to synchronously codegen this module or ship it + // back to the coordinator thread for further LTO processing (which + // has to wait for all the initial modules to be optimized). + // + // Here we dispatch based on the `cgcx.lto` and kind of module we're + // codegenning... + let needs_lto = match cgcx.lto { + Lto::No => false, + + // If the linker does LTO, we don't have to do it. Note that we + // keep doing full LTO, if it is requested, as not to break the + // assumption that the output will be a single module. + Lto::Thin | Lto::ThinLocal if linker_does_lto => false, + + // Here we've got a full crate graph LTO requested. We ignore + // this, however, if the crate type is only an rlib as there's + // no full crate graph to process, that'll happen later. + // + // This use case currently comes up primarily for targets that + // require LTO so the request for LTO is always unconditionally + // passed down to the backend, but we don't actually want to do + // anything about it yet until we've got a final product. + Lto::Fat | Lto::Thin => { + cgcx.crate_types.len() != 1 || + cgcx.crate_types[0] != config::CrateType::Rlib + } + + // When we're automatically doing ThinLTO for multi-codegen-unit + // builds we don't actually want to LTO the allocator modules if + // it shows up. This is due to various linker shenanigans that + // we'll encounter later. + // + // Additionally here's where we also factor in the current LLVM + // version. If it doesn't support ThinLTO we skip this. + Lto::ThinLocal => { + module.kind != ModuleKind::Allocator && + cgcx.backend.thin_lto_available() + } + }; + + // Metadata modules never participate in LTO regardless of the lto + // settings. + let needs_lto = needs_lto && module.kind != ModuleKind::Metadata; + + if needs_lto { + Ok(WorkItemResult::NeedsLTO(module)) + } else { + let module = unsafe { B::codegen(cgcx, &diag_handler, module, module_config, timeline)? }; + Ok(WorkItemResult::Compiled(module)) + } +} + +fn execute_copy_from_cache_work_item( + cgcx: &CodegenContext, + module: CachedModuleCodegen, + module_config: &ModuleConfig, + _: &mut Timeline +) -> Result, FatalError> { + let incr_comp_session_dir = cgcx.incr_comp_session_dir + .as_ref() + .unwrap(); + let mut object = None; + let mut bytecode = None; + let mut bytecode_compressed = None; + for (kind, saved_file) in &module.source.saved_files { + let obj_out = match kind { + WorkProductFileKind::Object => { + let path = cgcx.output_filenames.temp_path(OutputType::Object, + Some(&module.name)); + object = Some(path.clone()); + path + } + WorkProductFileKind::Bytecode => { + let path = cgcx.output_filenames.temp_path(OutputType::Bitcode, + Some(&module.name)); + bytecode = Some(path.clone()); + path + } + WorkProductFileKind::BytecodeCompressed => { + let path = cgcx.output_filenames.temp_path(OutputType::Bitcode, + Some(&module.name)) + .with_extension(RLIB_BYTECODE_EXTENSION); + bytecode_compressed = Some(path.clone()); + path + } + }; + let source_file = in_incr_comp_dir(&incr_comp_session_dir, + &saved_file); + debug!("copying pre-existing module `{}` from {:?} to {}", + module.name, + source_file, + obj_out.display()); + match link_or_copy(&source_file, &obj_out) { + Ok(_) => { } + Err(err) => { + let diag_handler = cgcx.create_diag_handler(); + diag_handler.err(&format!("unable to copy {} to {}: {}", + source_file.display(), + obj_out.display(), + err)); + } + } + } + + assert_eq!(object.is_some(), module_config.emit_obj); + assert_eq!(bytecode.is_some(), module_config.emit_bc); + assert_eq!(bytecode_compressed.is_some(), module_config.emit_bc_compressed); + + Ok(WorkItemResult::Compiled(CompiledModule { + name: module.name, + kind: ModuleKind::Regular, + object, + bytecode, + bytecode_compressed, + })) +} + +fn execute_lto_work_item( + cgcx: &CodegenContext, + mut module: lto::LtoModuleCodegen, + module_config: &ModuleConfig, + timeline: &mut Timeline +) -> Result, FatalError> { + let diag_handler = cgcx.create_diag_handler(); + + unsafe { + let module = module.optimize(cgcx, timeline)?; + let module = B::codegen(cgcx, &diag_handler, module, module_config, timeline)?; + Ok(WorkItemResult::Compiled(module)) + } +} + +pub enum Message { + Token(io::Result), + NeedsLTO { + result: ModuleCodegen, + worker_id: usize, + }, + Done { + result: Result, + worker_id: usize, + }, + CodegenDone { + llvm_work_item: WorkItem, + cost: u64, + }, + AddImportOnlyModule { + module_data: SerializedModule, + work_product: WorkProduct, + }, + CodegenComplete, + CodegenItem, +} + +struct Diagnostic { + msg: String, + code: Option, + lvl: Level, +} + +#[derive(PartialEq, Clone, Copy, Debug)] +enum MainThreadWorkerState { + Idle, + Codegenning, + LLVMing, +} + +fn start_executing_work( + backend: B, + tcx: TyCtxt, + crate_info: &CrateInfo, + shared_emitter: SharedEmitter, + codegen_worker_send: Sender>, + coordinator_receive: Receiver>, + total_cgus: usize, + jobserver: Client, + time_graph: Option, + modules_config: Arc, + metadata_config: Arc, + allocator_config: Arc +) -> thread::JoinHandle> { + let coordinator_send = tcx.tx_to_llvm_workers.lock().clone(); + let sess = tcx.sess; + + // Compute the set of symbols we need to retain when doing LTO (if we need to) + let exported_symbols = { + let mut exported_symbols = FxHashMap::default(); + + let copy_symbols = |cnum| { + let symbols = tcx.exported_symbols(cnum) + .iter() + .map(|&(s, lvl)| (s.symbol_name(tcx).to_string(), lvl)) + .collect(); + Arc::new(symbols) + }; + + match sess.lto() { + Lto::No => None, + Lto::ThinLocal => { + exported_symbols.insert(LOCAL_CRATE, copy_symbols(LOCAL_CRATE)); + Some(Arc::new(exported_symbols)) + } + Lto::Fat | Lto::Thin => { + exported_symbols.insert(LOCAL_CRATE, copy_symbols(LOCAL_CRATE)); + for &cnum in tcx.crates().iter() { + exported_symbols.insert(cnum, copy_symbols(cnum)); + } + Some(Arc::new(exported_symbols)) + } + } + }; + + // First up, convert our jobserver into a helper thread so we can use normal + // mpsc channels to manage our messages and such. + // After we've requested tokens then we'll, when we can, + // get tokens on `coordinator_receive` which will + // get managed in the main loop below. + let coordinator_send2 = coordinator_send.clone(); + let helper = jobserver.into_helper_thread(move |token| { + let msg : Message = Message::Token(token); + drop(coordinator_send2.send(Box::new(msg))); + }).expect("failed to spawn helper thread"); + + let mut each_linked_rlib_for_lto = Vec::new(); + drop(link::each_linked_rlib(sess, crate_info, &mut |cnum, path| { + if link::ignored_for_lto(sess, crate_info, cnum) { + return + } + each_linked_rlib_for_lto.push((cnum, path.to_path_buf())); + })); + + let assembler_cmd = if modules_config.no_integrated_as { + // HACK: currently we use linker (gcc) as our assembler + let (linker, flavor) = link::linker_and_flavor(sess); + + let (name, mut cmd) = get_linker(sess, &linker, flavor); + cmd.args(&sess.target.target.options.asm_args); + Some(Arc::new(AssemblerCommand { + name, + cmd, + })) + } else { + None + }; + + let cgcx : CodegenContext = CodegenContext { + backend : backend.clone(), + crate_types: sess.crate_types.borrow().clone(), + each_linked_rlib_for_lto, + lto: sess.lto(), + no_landing_pads: sess.no_landing_pads(), + fewer_names: sess.fewer_names(), + save_temps: sess.opts.cg.save_temps, + opts: Arc::new(sess.opts.clone()), + time_passes: sess.time_passes(), + exported_symbols, + plugin_passes: sess.plugin_llvm_passes.borrow().clone(), + remark: sess.opts.cg.remark.clone(), + worker: 0, + incr_comp_session_dir: sess.incr_comp_session_dir_opt().map(|r| r.clone()), + cgu_reuse_tracker: sess.cgu_reuse_tracker.clone(), + coordinator_send, + diag_emitter: shared_emitter.clone(), + time_graph, + output_filenames: tcx.output_filenames(LOCAL_CRATE), + regular_module_config: modules_config, + metadata_module_config: metadata_config, + allocator_module_config: allocator_config, + tm_factory: backend.target_machine_factory(tcx.sess, false), + total_cgus, + msvc_imps_needed: msvc_imps_needed(tcx), + target_pointer_width: tcx.sess.target.target.target_pointer_width.clone(), + debuginfo: tcx.sess.opts.debuginfo, + assembler_cmd, + }; + + // This is the "main loop" of parallel work happening for parallel codegen. + // It's here that we manage parallelism, schedule work, and work with + // messages coming from clients. + // + // There are a few environmental pre-conditions that shape how the system + // is set up: + // + // - Error reporting only can happen on the main thread because that's the + // only place where we have access to the compiler `Session`. + // - LLVM work can be done on any thread. + // - Codegen can only happen on the main thread. + // - Each thread doing substantial work most be in possession of a `Token` + // from the `Jobserver`. + // - The compiler process always holds one `Token`. Any additional `Tokens` + // have to be requested from the `Jobserver`. + // + // Error Reporting + // =============== + // The error reporting restriction is handled separately from the rest: We + // set up a `SharedEmitter` the holds an open channel to the main thread. + // When an error occurs on any thread, the shared emitter will send the + // error message to the receiver main thread (`SharedEmitterMain`). The + // main thread will periodically query this error message queue and emit + // any error messages it has received. It might even abort compilation if + // has received a fatal error. In this case we rely on all other threads + // being torn down automatically with the main thread. + // Since the main thread will often be busy doing codegen work, error + // reporting will be somewhat delayed, since the message queue can only be + // checked in between to work packages. + // + // Work Processing Infrastructure + // ============================== + // The work processing infrastructure knows three major actors: + // + // - the coordinator thread, + // - the main thread, and + // - LLVM worker threads + // + // The coordinator thread is running a message loop. It instructs the main + // thread about what work to do when, and it will spawn off LLVM worker + // threads as open LLVM WorkItems become available. + // + // The job of the main thread is to codegen CGUs into LLVM work package + // (since the main thread is the only thread that can do this). The main + // thread will block until it receives a message from the coordinator, upon + // which it will codegen one CGU, send it to the coordinator and block + // again. This way the coordinator can control what the main thread is + // doing. + // + // The coordinator keeps a queue of LLVM WorkItems, and when a `Token` is + // available, it will spawn off a new LLVM worker thread and let it process + // that a WorkItem. When a LLVM worker thread is done with its WorkItem, + // it will just shut down, which also frees all resources associated with + // the given LLVM module, and sends a message to the coordinator that the + // has been completed. + // + // Work Scheduling + // =============== + // The scheduler's goal is to minimize the time it takes to complete all + // work there is, however, we also want to keep memory consumption low + // if possible. These two goals are at odds with each other: If memory + // consumption were not an issue, we could just let the main thread produce + // LLVM WorkItems at full speed, assuring maximal utilization of + // Tokens/LLVM worker threads. However, since codegen usual is faster + // than LLVM processing, the queue of LLVM WorkItems would fill up and each + // WorkItem potentially holds on to a substantial amount of memory. + // + // So the actual goal is to always produce just enough LLVM WorkItems as + // not to starve our LLVM worker threads. That means, once we have enough + // WorkItems in our queue, we can block the main thread, so it does not + // produce more until we need them. + // + // Doing LLVM Work on the Main Thread + // ---------------------------------- + // Since the main thread owns the compiler processes implicit `Token`, it is + // wasteful to keep it blocked without doing any work. Therefore, what we do + // in this case is: We spawn off an additional LLVM worker thread that helps + // reduce the queue. The work it is doing corresponds to the implicit + // `Token`. The coordinator will mark the main thread as being busy with + // LLVM work. (The actual work happens on another OS thread but we just care + // about `Tokens`, not actual threads). + // + // When any LLVM worker thread finishes while the main thread is marked as + // "busy with LLVM work", we can do a little switcheroo: We give the Token + // of the just finished thread to the LLVM worker thread that is working on + // behalf of the main thread's implicit Token, thus freeing up the main + // thread again. The coordinator can then again decide what the main thread + // should do. This allows the coordinator to make decisions at more points + // in time. + // + // Striking a Balance between Throughput and Memory Consumption + // ------------------------------------------------------------ + // Since our two goals, (1) use as many Tokens as possible and (2) keep + // memory consumption as low as possible, are in conflict with each other, + // we have to find a trade off between them. Right now, the goal is to keep + // all workers busy, which means that no worker should find the queue empty + // when it is ready to start. + // How do we do achieve this? Good question :) We actually never know how + // many `Tokens` are potentially available so it's hard to say how much to + // fill up the queue before switching the main thread to LLVM work. Also we + // currently don't have a means to estimate how long a running LLVM worker + // will still be busy with it's current WorkItem. However, we know the + // maximal count of available Tokens that makes sense (=the number of CPU + // cores), so we can take a conservative guess. The heuristic we use here + // is implemented in the `queue_full_enough()` function. + // + // Some Background on Jobservers + // ----------------------------- + // It's worth also touching on the management of parallelism here. We don't + // want to just spawn a thread per work item because while that's optimal + // parallelism it may overload a system with too many threads or violate our + // configuration for the maximum amount of cpu to use for this process. To + // manage this we use the `jobserver` crate. + // + // Job servers are an artifact of GNU make and are used to manage + // parallelism between processes. A jobserver is a glorified IPC semaphore + // basically. Whenever we want to run some work we acquire the semaphore, + // and whenever we're done with that work we release the semaphore. In this + // manner we can ensure that the maximum number of parallel workers is + // capped at any one point in time. + // + // LTO and the coordinator thread + // ------------------------------ + // + // The final job the coordinator thread is responsible for is managing LTO + // and how that works. When LTO is requested what we'll to is collect all + // optimized LLVM modules into a local vector on the coordinator. Once all + // modules have been codegened and optimized we hand this to the `lto` + // module for further optimization. The `lto` module will return back a list + // of more modules to work on, which the coordinator will continue to spawn + // work for. + // + // Each LLVM module is automatically sent back to the coordinator for LTO if + // necessary. There's already optimizations in place to avoid sending work + // back to the coordinator if LTO isn't requested. + return thread::spawn(move || { + // We pretend to be within the top-level LLVM time-passes task here: + set_time_depth(1); + + let max_workers = ::num_cpus::get(); + let mut worker_id_counter = 0; + let mut free_worker_ids = Vec::new(); + let mut get_worker_id = |free_worker_ids: &mut Vec| { + if let Some(id) = free_worker_ids.pop() { + id + } else { + let id = worker_id_counter; + worker_id_counter += 1; + id + } + }; + + // This is where we collect codegen units that have gone all the way + // through codegen and LLVM. + let mut compiled_modules = vec![]; + let mut compiled_metadata_module = None; + let mut compiled_allocator_module = None; + let mut needs_lto = Vec::new(); + let mut lto_import_only_modules = Vec::new(); + let mut started_lto = false; + + // This flag tracks whether all items have gone through codegens + let mut codegen_done = false; + + // This is the queue of LLVM work items that still need processing. + let mut work_items = Vec::<(WorkItem, u64)>::new(); + + // This are the Jobserver Tokens we currently hold. Does not include + // the implicit Token the compiler process owns no matter what. + let mut tokens = Vec::new(); + + let mut main_thread_worker_state = MainThreadWorkerState::Idle; + let mut running = 0; + + let mut llvm_start_time = None; + + // Run the message loop while there's still anything that needs message + // processing: + while !codegen_done || + work_items.len() > 0 || + running > 0 || + needs_lto.len() > 0 || + lto_import_only_modules.len() > 0 || + main_thread_worker_state != MainThreadWorkerState::Idle { + + // While there are still CGUs to be codegened, the coordinator has + // to decide how to utilize the compiler processes implicit Token: + // For codegenning more CGU or for running them through LLVM. + if !codegen_done { + if main_thread_worker_state == MainThreadWorkerState::Idle { + if !queue_full_enough(work_items.len(), running, max_workers) { + // The queue is not full enough, codegen more items: + if let Err(_) = codegen_worker_send.send(Message::CodegenItem) { + panic!("Could not send Message::CodegenItem to main thread") + } + main_thread_worker_state = MainThreadWorkerState::Codegenning; + } else { + // The queue is full enough to not let the worker + // threads starve. Use the implicit Token to do some + // LLVM work too. + let (item, _) = work_items.pop() + .expect("queue empty - queue_full_enough() broken?"); + let cgcx = CodegenContext { + worker: get_worker_id(&mut free_worker_ids), + .. cgcx.clone() + }; + maybe_start_llvm_timer(cgcx.config(item.module_kind()), + &mut llvm_start_time); + main_thread_worker_state = MainThreadWorkerState::LLVMing; + spawn_work(cgcx, item); + } + } + } else { + // If we've finished everything related to normal codegen + // then it must be the case that we've got some LTO work to do. + // Perform the serial work here of figuring out what we're + // going to LTO and then push a bunch of work items onto our + // queue to do LTO + if work_items.len() == 0 && + running == 0 && + main_thread_worker_state == MainThreadWorkerState::Idle { + assert!(!started_lto); + assert!(needs_lto.len() + lto_import_only_modules.len() > 0); + started_lto = true; + let modules = mem::replace(&mut needs_lto, Vec::new()); + let import_only_modules = + mem::replace(&mut lto_import_only_modules, Vec::new()); + for (work, cost) in generate_lto_work(&cgcx, modules, import_only_modules) { + let insertion_index = work_items + .binary_search_by_key(&cost, |&(_, cost)| cost) + .unwrap_or_else(|e| e); + work_items.insert(insertion_index, (work, cost)); + if !cgcx.opts.debugging_opts.no_parallel_llvm { + helper.request_token(); + } + } + } + + // In this branch, we know that everything has been codegened, + // so it's just a matter of determining whether the implicit + // Token is free to use for LLVM work. + match main_thread_worker_state { + MainThreadWorkerState::Idle => { + if let Some((item, _)) = work_items.pop() { + let cgcx = CodegenContext { + worker: get_worker_id(&mut free_worker_ids), + .. cgcx.clone() + }; + maybe_start_llvm_timer(cgcx.config(item.module_kind()), + &mut llvm_start_time); + main_thread_worker_state = MainThreadWorkerState::LLVMing; + spawn_work(cgcx, item); + } else { + // There is no unstarted work, so let the main thread + // take over for a running worker. Otherwise the + // implicit token would just go to waste. + // We reduce the `running` counter by one. The + // `tokens.truncate()` below will take care of + // giving the Token back. + debug_assert!(running > 0); + running -= 1; + main_thread_worker_state = MainThreadWorkerState::LLVMing; + } + } + MainThreadWorkerState::Codegenning => { + bug!("codegen worker should not be codegenning after \ + codegen was already completed") + } + MainThreadWorkerState::LLVMing => { + // Already making good use of that token + } + } + } + + // Spin up what work we can, only doing this while we've got available + // parallelism slots and work left to spawn. + while work_items.len() > 0 && running < tokens.len() { + let (item, _) = work_items.pop().unwrap(); + + maybe_start_llvm_timer(cgcx.config(item.module_kind()), + &mut llvm_start_time); + + let cgcx = CodegenContext { + worker: get_worker_id(&mut free_worker_ids), + .. cgcx.clone() + }; + + spawn_work(cgcx, item); + running += 1; + } + + // Relinquish accidentally acquired extra tokens + tokens.truncate(running); + + let msg = coordinator_receive.recv().unwrap(); + match *msg.downcast::>().ok().unwrap() { + // Save the token locally and the next turn of the loop will use + // this to spawn a new unit of work, or it may get dropped + // immediately if we have no more work to spawn. + Message::Token(token) => { + match token { + Ok(token) => { + tokens.push(token); + + if main_thread_worker_state == MainThreadWorkerState::LLVMing { + // If the main thread token is used for LLVM work + // at the moment, we turn that thread into a regular + // LLVM worker thread, so the main thread is free + // to react to codegen demand. + main_thread_worker_state = MainThreadWorkerState::Idle; + running += 1; + } + } + Err(e) => { + let msg = &format!("failed to acquire jobserver token: {}", e); + shared_emitter.fatal(msg); + // Exit the coordinator thread + panic!("{}", msg) + } + } + } + + Message::CodegenDone { llvm_work_item, cost } => { + // We keep the queue sorted by estimated processing cost, + // so that more expensive items are processed earlier. This + // is good for throughput as it gives the main thread more + // time to fill up the queue and it avoids scheduling + // expensive items to the end. + // Note, however, that this is not ideal for memory + // consumption, as LLVM module sizes are not evenly + // distributed. + let insertion_index = + work_items.binary_search_by_key(&cost, |&(_, cost)| cost); + let insertion_index = match insertion_index { + Ok(idx) | Err(idx) => idx + }; + work_items.insert(insertion_index, (llvm_work_item, cost)); + + if !cgcx.opts.debugging_opts.no_parallel_llvm { + helper.request_token(); + } + assert_eq!(main_thread_worker_state, + MainThreadWorkerState::Codegenning); + main_thread_worker_state = MainThreadWorkerState::Idle; + } + + Message::CodegenComplete => { + codegen_done = true; + assert_eq!(main_thread_worker_state, + MainThreadWorkerState::Codegenning); + main_thread_worker_state = MainThreadWorkerState::Idle; + } + + // If a thread exits successfully then we drop a token associated + // with that worker and update our `running` count. We may later + // re-acquire a token to continue running more work. We may also not + // actually drop a token here if the worker was running with an + // "ephemeral token" + // + // Note that if the thread failed that means it panicked, so we + // abort immediately. + Message::Done { result: Ok(compiled_module), worker_id } => { + if main_thread_worker_state == MainThreadWorkerState::LLVMing { + main_thread_worker_state = MainThreadWorkerState::Idle; + } else { + running -= 1; + } + + free_worker_ids.push(worker_id); + + match compiled_module.kind { + ModuleKind::Regular => { + compiled_modules.push(compiled_module); + } + ModuleKind::Metadata => { + assert!(compiled_metadata_module.is_none()); + compiled_metadata_module = Some(compiled_module); + } + ModuleKind::Allocator => { + assert!(compiled_allocator_module.is_none()); + compiled_allocator_module = Some(compiled_module); + } + } + } + Message::NeedsLTO { result, worker_id } => { + assert!(!started_lto); + if main_thread_worker_state == MainThreadWorkerState::LLVMing { + main_thread_worker_state = MainThreadWorkerState::Idle; + } else { + running -= 1; + } + free_worker_ids.push(worker_id); + needs_lto.push(result); + } + Message::AddImportOnlyModule { module_data, work_product } => { + assert!(!started_lto); + assert!(!codegen_done); + assert_eq!(main_thread_worker_state, + MainThreadWorkerState::Codegenning); + lto_import_only_modules.push((module_data, work_product)); + main_thread_worker_state = MainThreadWorkerState::Idle; + } + Message::Done { result: Err(()), worker_id: _ } => { + shared_emitter.fatal("aborting due to worker thread failure"); + // Exit the coordinator thread + return Err(()) + } + Message::CodegenItem => { + bug!("the coordinator should not receive codegen requests") + } + } + } + + if let Some(llvm_start_time) = llvm_start_time { + let total_llvm_time = Instant::now().duration_since(llvm_start_time); + // This is the top-level timing for all of LLVM, set the time-depth + // to zero. + set_time_depth(0); + print_time_passes_entry(cgcx.time_passes, + "LLVM passes", + total_llvm_time); + } + + // Regardless of what order these modules completed in, report them to + // the backend in the same order every time to ensure that we're handing + // out deterministic results. + compiled_modules.sort_by(|a, b| a.name.cmp(&b.name)); + + let compiled_metadata_module = compiled_metadata_module + .expect("Metadata module not compiled?"); + + Ok(CompiledModules { + modules: compiled_modules, + metadata_module: compiled_metadata_module, + allocator_module: compiled_allocator_module, + }) + }); + + // A heuristic that determines if we have enough LLVM WorkItems in the + // queue so that the main thread can do LLVM work instead of codegen + fn queue_full_enough(items_in_queue: usize, + workers_running: usize, + max_workers: usize) -> bool { + // Tune me, plz. + items_in_queue > 0 && + items_in_queue >= max_workers.saturating_sub(workers_running / 2) + } + + fn maybe_start_llvm_timer(config: &ModuleConfig, + llvm_start_time: &mut Option) { + // We keep track of the -Ztime-passes output manually, + // since the closure-based interface does not fit well here. + if config.time_passes { + if llvm_start_time.is_none() { + *llvm_start_time = Some(Instant::now()); + } + } + } +} + +pub const CODEGEN_WORKER_ID: usize = ::std::usize::MAX; +pub const CODEGEN_WORKER_TIMELINE: time_graph::TimelineId = + time_graph::TimelineId(CODEGEN_WORKER_ID); +pub const CODEGEN_WORK_PACKAGE_KIND: time_graph::WorkPackageKind = + time_graph::WorkPackageKind(&["#DE9597", "#FED1D3", "#FDC5C7", "#B46668", "#88494B"]); +const LLVM_WORK_PACKAGE_KIND: time_graph::WorkPackageKind = + time_graph::WorkPackageKind(&["#7DB67A", "#C6EEC4", "#ACDAAA", "#579354", "#3E6F3C"]); + +// Set up a destructor which will fire off a message that we're done as +// we exit. +struct Bomb { + coordinator_send: Sender>, + result: Option>, + worker_id: usize, +} +impl Drop for Bomb { + fn drop(&mut self) { + let worker_id = self.worker_id; + let msg : Message = match self.result.take() { + Some(WorkItemResult::Compiled(m)) => { + Message::Done { result: Ok(m), worker_id } + } + Some(WorkItemResult::NeedsLTO(m)) => { + Message::NeedsLTO { result: m, worker_id } + } + None => Message::Done { result: Err(()), worker_id } + }; + drop(self.coordinator_send.send(Box::new(msg))); + } +} + +fn spawn_work( + cgcx: CodegenContext, + work: WorkItem +) { + let depth = time_depth(); + + thread::spawn(move || { + set_time_depth(depth); + + let mut bomb : Bomb = Bomb { + coordinator_send: cgcx.coordinator_send.clone(), + result: None, + worker_id: cgcx.worker, + }; + + // Execute the work itself, and if it finishes successfully then flag + // ourselves as a success as well. + // + // Note that we ignore any `FatalError` coming out of `execute_work_item`, + // as a diagnostic was already sent off to the main thread - just + // surface that there was an error in this worker. + bomb.result = { + let timeline = cgcx.time_graph.as_ref().map(|tg| { + tg.start(time_graph::TimelineId(cgcx.worker), + LLVM_WORK_PACKAGE_KIND, + &work.name()) + }); + let mut timeline = timeline.unwrap_or(Timeline::noop()); + execute_work_item(&cgcx, work, &mut timeline).ok() + }; + }); +} + +pub fn run_assembler( + cgcx: &CodegenContext, + handler: &Handler, + assembly: &Path, + object: &Path +) { + let assembler = cgcx.assembler_cmd + .as_ref() + .expect("cgcx.assembler_cmd is missing?"); + + let pname = &assembler.name; + let mut cmd = assembler.cmd.clone(); + cmd.arg("-c").arg("-o").arg(object).arg(assembly); + debug!("{:?}", cmd); + + match cmd.output() { + Ok(prog) => { + if !prog.status.success() { + let mut note = prog.stderr.clone(); + note.extend_from_slice(&prog.stdout); + + handler.struct_err(&format!("linking with `{}` failed: {}", + pname.display(), + prog.status)) + .note(&format!("{:?}", &cmd)) + .note(str::from_utf8(¬e[..]).unwrap()) + .emit(); + handler.abort_if_errors(); + } + }, + Err(e) => { + handler.err(&format!("could not exec the linker `{}`: {}", pname.display(), e)); + handler.abort_if_errors(); + } + } +} + + +enum SharedEmitterMessage { + Diagnostic(Diagnostic), + InlineAsmError(u32, String), + AbortIfErrors, + Fatal(String), +} + +#[derive(Clone)] +pub struct SharedEmitter { + sender: Sender, +} + +pub struct SharedEmitterMain { + receiver: Receiver, +} + +impl SharedEmitter { + pub fn new() -> (SharedEmitter, SharedEmitterMain) { + let (sender, receiver) = channel(); + + (SharedEmitter { sender }, SharedEmitterMain { receiver }) + } + + pub fn inline_asm_error(&self, cookie: u32, msg: String) { + drop(self.sender.send(SharedEmitterMessage::InlineAsmError(cookie, msg))); + } + + pub fn fatal(&self, msg: &str) { + drop(self.sender.send(SharedEmitterMessage::Fatal(msg.to_string()))); + } +} + +impl Emitter for SharedEmitter { + fn emit(&mut self, db: &DiagnosticBuilder) { + drop(self.sender.send(SharedEmitterMessage::Diagnostic(Diagnostic { + msg: db.message(), + code: db.code.clone(), + lvl: db.level, + }))); + for child in &db.children { + drop(self.sender.send(SharedEmitterMessage::Diagnostic(Diagnostic { + msg: child.message(), + code: None, + lvl: child.level, + }))); + } + drop(self.sender.send(SharedEmitterMessage::AbortIfErrors)); + } +} + +impl SharedEmitterMain { + pub fn check(&self, sess: &Session, blocking: bool) { + loop { + let message = if blocking { + match self.receiver.recv() { + Ok(message) => Ok(message), + Err(_) => Err(()), + } + } else { + match self.receiver.try_recv() { + Ok(message) => Ok(message), + Err(_) => Err(()), + } + }; + + match message { + Ok(SharedEmitterMessage::Diagnostic(diag)) => { + let handler = sess.diagnostic(); + match diag.code { + Some(ref code) => { + handler.emit_with_code(&MultiSpan::new(), + &diag.msg, + code.clone(), + diag.lvl); + } + None => { + handler.emit(&MultiSpan::new(), + &diag.msg, + diag.lvl); + } + } + } + Ok(SharedEmitterMessage::InlineAsmError(cookie, msg)) => { + match Mark::from_u32(cookie).expn_info() { + Some(ei) => sess.span_err(ei.call_site, &msg), + None => sess.err(&msg), + } + } + Ok(SharedEmitterMessage::AbortIfErrors) => { + sess.abort_if_errors(); + } + Ok(SharedEmitterMessage::Fatal(msg)) => { + sess.fatal(&msg); + } + Err(_) => { + break; + } + } + + } + } +} + +pub struct OngoingCodegen { + pub backend : B, + pub crate_name: Symbol, + pub crate_hash: Svh, + pub metadata: EncodedMetadata, + pub windows_subsystem: Option, + pub linker_info: LinkerInfo, + pub crate_info: CrateInfo, + pub time_graph: Option, + pub coordinator_send: Sender>, + pub codegen_worker_receive: Receiver>, + pub shared_emitter_main: SharedEmitterMain, + pub future: thread::JoinHandle>, + pub output_filenames: Arc, +} + +impl OngoingCodegen { + pub fn join( + self, + sess: &Session + ) -> (CodegenResults, FxHashMap) { + self.shared_emitter_main.check(sess, true); + let compiled_modules = match self.future.join() { + Ok(Ok(compiled_modules)) => compiled_modules, + Ok(Err(())) => { + sess.abort_if_errors(); + panic!("expected abort due to worker thread errors") + }, + Err(_) => { + sess.fatal("Error during codegen/LLVM phase."); + } + }; + + sess.cgu_reuse_tracker.check_expected_reuse(sess); + + sess.abort_if_errors(); + + if let Some(time_graph) = self.time_graph { + time_graph.dump(&format!("{}-timings", self.crate_name)); + } + + let work_products = + copy_all_cgu_workproducts_to_incr_comp_cache_dir(sess, + &compiled_modules); + produce_final_output_artifacts(sess, + &compiled_modules, + &self.output_filenames); + + // FIXME: time_llvm_passes support - does this use a global context or + // something? + if sess.codegen_units() == 1 && sess.time_llvm_passes() { + self.backend.print_pass_timings() + } + + (CodegenResults { + crate_name: self.crate_name, + crate_hash: self.crate_hash, + metadata: self.metadata, + windows_subsystem: self.windows_subsystem, + linker_info: self.linker_info, + crate_info: self.crate_info, + + modules: compiled_modules.modules, + allocator_module: compiled_modules.allocator_module, + metadata_module: compiled_modules.metadata_module, + }, work_products) + } + + pub fn submit_pre_codegened_module_to_llvm(&self, + tcx: TyCtxt, + module: ModuleCodegen) { + self.wait_for_signal_to_codegen_item(); + self.check_for_errors(tcx.sess); + + // These are generally cheap and won't through off scheduling. + let cost = 0; + submit_codegened_module_to_llvm(&self.backend, tcx, module, cost); + } + + pub fn codegen_finished(&self, tcx: TyCtxt) { + self.wait_for_signal_to_codegen_item(); + self.check_for_errors(tcx.sess); + let msg : Message = Message::CodegenComplete; + drop(self.coordinator_send.send(Box::new(msg))); + } + + pub fn check_for_errors(&self, sess: &Session) { + self.shared_emitter_main.check(sess, false); + } + + pub fn wait_for_signal_to_codegen_item(&self) { + match self.codegen_worker_receive.recv() { + Ok(Message::CodegenItem) => { + // Nothing to do + } + Ok(_) => panic!("unexpected message"), + Err(_) => { + // One of the LLVM threads must have panicked, fall through so + // error handling can be reached. + } + } + } +} + +pub fn submit_codegened_module_to_llvm( + _backend: &B, + tcx: TyCtxt, + module: ModuleCodegen, + cost: u64 +) { + let llvm_work_item = WorkItem::Optimize(module); + let msg : Message = Message::CodegenDone { + llvm_work_item, + cost, + }; + drop(tcx.tx_to_llvm_workers.lock().send(Box::new(msg))); +} + +pub fn submit_post_lto_module_to_llvm( + _backend: &B, + tcx: TyCtxt, + module: CachedModuleCodegen +) { + let llvm_work_item = WorkItem::CopyPostLtoArtifacts(module); + let msg : Message = Message::CodegenDone { + llvm_work_item, + cost: 0, + }; + drop(tcx.tx_to_llvm_workers.lock().send(Box::new(msg))); +} + +pub fn submit_pre_lto_module_to_llvm( + _backend: &B, + tcx: TyCtxt, + module: CachedModuleCodegen +) { + let filename = pre_lto_bitcode_filename(&module.name); + let bc_path = in_incr_comp_dir_sess(tcx.sess, &filename); + let file = fs::File::open(&bc_path).unwrap_or_else(|e| { + panic!("failed to open bitcode file `{}`: {}", bc_path.display(), e) + }); + + let mmap = unsafe { + memmap::Mmap::map(&file).unwrap_or_else(|e| { + panic!("failed to mmap bitcode file `{}`: {}", bc_path.display(), e) + }) + }; + let msg : Message = Message::AddImportOnlyModule { + module_data: SerializedModule::FromUncompressedFile(mmap), + work_product: module.source, + }; + // Schedule the module to be loaded + drop(tcx.tx_to_llvm_workers.lock().send(Box::new(msg))); +} + +pub fn pre_lto_bitcode_filename(module_name: &str) -> String { + format!("{}.{}", module_name, PRE_THIN_LTO_BC_EXT) +} + +fn msvc_imps_needed(tcx: TyCtxt) -> bool { + // This should never be true (because it's not supported). If it is true, + // something is wrong with commandline arg validation. + assert!(!(tcx.sess.opts.debugging_opts.cross_lang_lto.enabled() && + tcx.sess.target.target.options.is_like_msvc && + tcx.sess.opts.cg.prefer_dynamic)); + + tcx.sess.target.target.options.is_like_msvc && + tcx.sess.crate_types.borrow().iter().any(|ct| *ct == config::CrateType::Rlib) && + // ThinLTO can't handle this workaround in all cases, so we don't + // emit the `__imp_` symbols. Instead we make them unnecessary by disallowing + // dynamic linking when cross-language LTO is enabled. + !tcx.sess.opts.debugging_opts.cross_lang_lto.enabled() +} diff --git a/src/librustc_codegen_ssa/base.rs b/src/librustc_codegen_ssa/base.rs new file mode 100644 index 0000000000000..2ffdf94f6c662 --- /dev/null +++ b/src/librustc_codegen_ssa/base.rs @@ -0,0 +1,1126 @@ +// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Codegen the completed AST to the LLVM IR. +//! +//! Some functions here, such as codegen_block and codegen_expr, return a value -- +//! the result of the codegen to LLVM -- while others, such as codegen_fn +//! and mono_item, are called only for the side effect of adding a +//! particular definition to the LLVM IR output we're producing. +//! +//! Hopefully useful general knowledge about codegen: +//! +//! * There's no way to find out the Ty type of a Value. Doing so +//! would be "trying to get the eggs out of an omelette" (credit: +//! pcwalton). You can, instead, find out its llvm::Type by calling val_ty, +//! but one llvm::Type corresponds to many `Ty`s; for instance, tup(int, int, +//! int) and rec(x=int, y=int, z=int) will have the same llvm::Type. + +use {ModuleCodegen, ModuleKind, CachedModuleCodegen}; + +use rustc::dep_graph::cgu_reuse_tracker::CguReuse; +use rustc::hir::def_id::{CrateNum, DefId, LOCAL_CRATE}; +use rustc::middle::lang_items::StartFnLangItem; +use rustc::middle::weak_lang_items; +use rustc::mir::mono::{Linkage, Stats, CodegenUnitNameBuilder}; +use rustc::ty::{self, Ty, TyCtxt}; +use rustc::ty::layout::{self, Align, TyLayout, LayoutOf, HasTyCtxt}; +use rustc::ty::query::Providers; +use rustc::middle::cstore::{self, LinkagePreference}; +use rustc::util::common::{time, print_time_passes_entry}; +use rustc::util::profiling::ProfileCategory; +use rustc::session::config::{self, EntryFnType, Lto}; +use rustc::session::Session; +use mir::place::PlaceRef; +use back::write::{OngoingCodegen, start_async_codegen, submit_pre_lto_module_to_llvm, + submit_post_lto_module_to_llvm}; +use {MemFlags, CrateInfo}; +use callee; +use rustc_mir::monomorphize::collector::{self, MonoItemCollectionMode}; +use rustc_mir::monomorphize::item::DefPathBasedNames; +use common::{self, RealPredicate, TypeKind, IntPredicate}; +use meth; +use mir; +use rustc::util::time_graph; +use rustc_mir::monomorphize::Instance; +use rustc_mir::monomorphize::partitioning::{self, PartitioningStrategy, + CodegenUnit, CodegenUnitExt}; +use mono_item::{MonoItem, BaseMonoItemExt}; +use rustc::util::nodemap::{FxHashMap, DefIdSet}; +use rustc_data_structures::sync::Lrc; +use rustc_codegen_utils::{symbol_names_test, check_for_rustc_errors_attr}; +use rustc::ty::layout::{FAT_PTR_ADDR, FAT_PTR_EXTRA}; + +use interfaces::*; + +use std::any::Any; +use std::sync::Arc; +use std::time::{Instant, Duration}; +use std::cmp; +use std::sync::mpsc; +use syntax_pos::Span; +use syntax::attr; +use rustc::hir; + +use mir::operand::OperandValue; + +use std::marker::PhantomData; + + +pub struct StatRecorder<'a, 'll: 'a, 'tcx: 'll, Cx: 'a + CodegenMethods<'a, 'll, 'tcx>> + where &'a Cx : LayoutOf, TyLayout = TyLayout<'tcx>> + HasTyCtxt<'tcx> +{ + cx: &'a Cx, + name: Option, + istart: usize, + phantom: PhantomData<(&'ll (), &'tcx ())> +} + +impl<'a, 'll: 'a, 'tcx: 'll, Cx: 'a + CodegenMethods<'a, 'll, 'tcx>> StatRecorder<'a, 'll, 'tcx, Cx> + where &'a Cx : LayoutOf, TyLayout = TyLayout<'tcx>> + HasTyCtxt<'tcx> +{ + pub fn new(cx: &'a Cx, name: String) -> Self { + let istart = cx.stats().borrow().n_llvm_insns; + StatRecorder { + cx, + name: Some(name), + istart, + phantom: PhantomData + } + } +} + +impl<'a, 'll: 'a, 'tcx: 'll, Cx: 'a + CodegenMethods<'a, 'll, 'tcx>> Drop for + StatRecorder<'a, 'll, 'tcx, Cx> + where &'a Cx : LayoutOf, TyLayout = TyLayout<'tcx>> + HasTyCtxt<'tcx> +{ + fn drop(&mut self) { + if self.cx.sess().codegen_stats() { + let mut stats = self.cx.stats().borrow_mut(); + let iend = stats.n_llvm_insns; + stats.fn_stats.push((self.name.take().unwrap(), iend - self.istart)); + stats.n_fns += 1; + // Reset LLVM insn count to avoid compound costs. + stats.n_llvm_insns = self.istart; + } + } +} + +pub fn bin_op_to_icmp_predicate(op: hir::BinOpKind, + signed: bool) + -> IntPredicate { + match op { + hir::BinOpKind::Eq => IntPredicate::IntEQ, + hir::BinOpKind::Ne => IntPredicate::IntNE, + hir::BinOpKind::Lt => if signed { IntPredicate::IntSLT } else { IntPredicate::IntULT }, + hir::BinOpKind::Le => if signed { IntPredicate::IntSLE } else { IntPredicate::IntULE }, + hir::BinOpKind::Gt => if signed { IntPredicate::IntSGT } else { IntPredicate::IntUGT }, + hir::BinOpKind::Ge => if signed { IntPredicate::IntSGE } else { IntPredicate::IntUGE }, + op => { + bug!("comparison_op_to_icmp_predicate: expected comparison operator, \ + found {:?}", + op) + } + } +} + +pub fn bin_op_to_fcmp_predicate(op: hir::BinOpKind) -> RealPredicate { + match op { + hir::BinOpKind::Eq => RealPredicate::RealOEQ, + hir::BinOpKind::Ne => RealPredicate::RealUNE, + hir::BinOpKind::Lt => RealPredicate::RealOLT, + hir::BinOpKind::Le => RealPredicate::RealOLE, + hir::BinOpKind::Gt => RealPredicate::RealOGT, + hir::BinOpKind::Ge => RealPredicate::RealOGE, + op => { + bug!("comparison_op_to_fcmp_predicate: expected comparison operator, \ + found {:?}", + op); + } + } +} + +pub fn compare_simd_types<'a, 'll:'a, 'tcx:'ll, Bx : BuilderMethods<'a, 'll, 'tcx>>( + bx: &mut Bx, + lhs: >::Value, + rhs: >::Value, + t: Ty<'tcx>, + ret_ty: >::Type, + op: hir::BinOpKind +) -> >::Value + where &'a Bx::CodegenCx : LayoutOf, TyLayout = TyLayout<'tcx>> + HasTyCtxt<'tcx> +{ + let signed = match t.sty { + ty::Float(_) => { + let cmp = bin_op_to_fcmp_predicate(op); + let cmp = bx.fcmp(cmp, lhs, rhs); + return bx.sext(cmp, ret_ty); + }, + ty::Uint(_) => false, + ty::Int(_) => true, + _ => bug!("compare_simd_types: invalid SIMD type"), + }; + + let cmp = bin_op_to_icmp_predicate(op, signed); + let cmp = bx.icmp(cmp, lhs, rhs); + // LLVM outputs an `< size x i1 >`, so we need to perform a sign extension + // to get the correctly sized type. This will compile to a single instruction + // once the IR is converted to assembly if the SIMD instruction is supported + // by the target architecture. + bx.sext(cmp, ret_ty) +} + +/// Retrieve the information we are losing (making dynamic) in an unsizing +/// adjustment. +/// +/// The `old_info` argument is a bit funny. It is intended for use +/// in an upcast, where the new vtable for an object will be derived +/// from the old one. +pub fn unsized_info<'a, 'll: 'a, 'tcx: 'll, Cx: 'a + CodegenMethods<'a, 'll, 'tcx>>( + cx: &'a Cx, + source: Ty<'tcx>, + target: Ty<'tcx>, + old_info: Option, +) -> Cx::Value where &'a Cx: LayoutOf, TyLayout = TyLayout<'tcx>> + HasTyCtxt<'tcx> { + let (source, target) = cx.tcx().struct_lockstep_tails(source, target); + match (&source.sty, &target.sty) { + (&ty::Array(_, len), &ty::Slice(_)) => { + cx.const_usize(len.unwrap_usize(*cx.tcx())) + } + (&ty::Dynamic(..), &ty::Dynamic(..)) => { + // For now, upcasts are limited to changes in marker + // traits, and hence never actually require an actual + // change to the vtable. + old_info.expect("unsized_info: missing old info for trait upcast") + } + (_, &ty::Dynamic(ref data, ..)) => { + let vtable_ptr = cx.layout_of(cx.tcx().mk_mut_ptr(target)) + .field(cx, FAT_PTR_EXTRA); + cx.static_ptrcast(meth::get_vtable(cx, source, data.principal()), + cx.backend_type(&vtable_ptr)) + } + _ => bug!("unsized_info: invalid unsizing {:?} -> {:?}", + source, + target), + } +} + +/// Coerce `src` to `dst_ty`. `src_ty` must be a thin pointer. +pub fn unsize_thin_ptr<'a, 'll: 'a, 'tcx: 'll, Bx: BuilderMethods<'a, 'll, 'tcx>>( + bx: &mut Bx, + src: >::Value, + src_ty: Ty<'tcx>, + dst_ty: Ty<'tcx> +) -> (>::Value, >::Value) where + &'a Bx::CodegenCx: LayoutOf, TyLayout = TyLayout<'tcx>> + HasTyCtxt<'tcx> +{ + debug!("unsize_thin_ptr: {:?} => {:?}", src_ty, dst_ty); + match (&src_ty.sty, &dst_ty.sty) { + (&ty::Ref(_, a, _), + &ty::Ref(_, b, _)) | + (&ty::Ref(_, a, _), + &ty::RawPtr(ty::TypeAndMut { ty: b, .. })) | + (&ty::RawPtr(ty::TypeAndMut { ty: a, .. }), + &ty::RawPtr(ty::TypeAndMut { ty: b, .. })) => { + assert!(bx.cx().type_is_sized(a)); + let ptr_ty = bx.cx().type_ptr_to(bx.cx().backend_type(&bx.cx().layout_of(b))); + (bx.pointercast(src, ptr_ty), unsized_info(bx.cx(), a, b, None)) + } + (&ty::Adt(def_a, _), &ty::Adt(def_b, _)) if def_a.is_box() && def_b.is_box() => { + let (a, b) = (src_ty.boxed_ty(), dst_ty.boxed_ty()); + assert!(bx.cx().type_is_sized(a)); + let ptr_ty = bx.cx().type_ptr_to(bx.cx().backend_type(&bx.cx().layout_of(b))); + (bx.pointercast(src, ptr_ty), unsized_info(bx.cx(), a, b, None)) + } + (&ty::Adt(def_a, _), &ty::Adt(def_b, _)) => { + assert_eq!(def_a, def_b); + + let src_layout = bx.cx().layout_of(src_ty); + let dst_layout = bx.cx().layout_of(dst_ty); + let mut result = None; + for i in 0..src_layout.fields.count() { + let src_f = src_layout.field(bx.cx(), i); + assert_eq!(src_layout.fields.offset(i).bytes(), 0); + assert_eq!(dst_layout.fields.offset(i).bytes(), 0); + if src_f.is_zst() { + continue; + } + assert_eq!(src_layout.size, src_f.size); + + let dst_f = dst_layout.field(bx.cx(), i); + assert_ne!(src_f.ty, dst_f.ty); + assert_eq!(result, None); + result = Some(unsize_thin_ptr(bx, src, src_f.ty, dst_f.ty)); + } + let (lldata, llextra) = result.unwrap(); + // HACK(eddyb) have to bitcast pointers until LLVM removes pointee types. + (bx.bitcast(lldata, bx.cx().scalar_pair_element_backend_type(&dst_layout, 0, true)), + bx.bitcast(llextra, bx.cx().scalar_pair_element_backend_type(&dst_layout, 1, true))) + } + _ => bug!("unsize_thin_ptr: called on bad types"), + } +} + +/// Coerce `src`, which is a reference to a value of type `src_ty`, +/// to a value of type `dst_ty` and store the result in `dst` +pub fn coerce_unsized_into<'a, 'll: 'a, 'tcx: 'll, Bx: BuilderMethods<'a, 'll, 'tcx>>( + bx: &mut Bx, + src: PlaceRef<'tcx, >::Value>, + dst: PlaceRef<'tcx, >::Value> +) where &'a Bx::CodegenCx: LayoutOf, TyLayout = TyLayout<'tcx>> + HasTyCtxt<'tcx> +{ + let src_ty = src.layout.ty; + let dst_ty = dst.layout.ty; + let mut coerce_ptr = || { + let (base, info) = match bx.load_ref(&src).val { + OperandValue::Pair(base, info) => { + // fat-ptr to fat-ptr unsize preserves the vtable + // i.e. &'a fmt::Debug+Send => &'a fmt::Debug + // So we need to pointercast the base to ensure + // the types match up. + let thin_ptr = dst.layout.field(bx.cx(), FAT_PTR_ADDR); + (bx.pointercast(base, bx.cx().backend_type(&thin_ptr)), info) + } + OperandValue::Immediate(base) => { + unsize_thin_ptr(bx, base, src_ty, dst_ty) + } + OperandValue::Ref(..) => bug!() + }; + OperandValue::Pair(base, info).store(bx, dst); + }; + match (&src_ty.sty, &dst_ty.sty) { + (&ty::Ref(..), &ty::Ref(..)) | + (&ty::Ref(..), &ty::RawPtr(..)) | + (&ty::RawPtr(..), &ty::RawPtr(..)) => { + coerce_ptr() + } + (&ty::Adt(def_a, _), &ty::Adt(def_b, _)) if def_a.is_box() && def_b.is_box() => { + coerce_ptr() + } + + (&ty::Adt(def_a, _), &ty::Adt(def_b, _)) => { + assert_eq!(def_a, def_b); + + for i in 0..def_a.variants[0].fields.len() { + let src_f = src.project_field(bx, i); + let dst_f = dst.project_field(bx, i); + + if dst_f.layout.is_zst() { + continue; + } + + if src_f.layout.ty == dst_f.layout.ty { + memcpy_ty(bx, dst_f.llval, src_f.llval, src_f.layout, + src_f.align.min(dst_f.align), MemFlags::empty()); + } else { + coerce_unsized_into(bx, src_f, dst_f); + } + } + } + _ => bug!("coerce_unsized_into: invalid coercion {:?} -> {:?}", + src_ty, + dst_ty), + } +} + +pub fn cast_shift_expr_rhs<'a, 'll: 'a, 'tcx: 'll, Bx : BuilderMethods<'a, 'll, 'tcx>>( + bx: &mut Bx, + op: hir::BinOpKind, + lhs: >::Value, + rhs: >::Value +) -> >::Value + where &'a Bx::CodegenCx : LayoutOf, TyLayout = TyLayout<'tcx>> + HasTyCtxt<'tcx> +{ + cast_shift_rhs(bx, op, lhs, rhs) +} + +fn cast_shift_rhs<'a, 'll :'a, 'tcx : 'll, Bx : BuilderMethods<'a, 'll, 'tcx>>( + bx: &mut Bx, + op: hir::BinOpKind, + lhs: >::Value, + rhs: >::Value, +) -> >::Value + where &'a Bx::CodegenCx : LayoutOf, TyLayout = TyLayout<'tcx>> + HasTyCtxt<'tcx> +{ + // Shifts may have any size int on the rhs + if op.is_shift() { + let mut rhs_llty = bx.cx().val_ty(rhs); + let mut lhs_llty = bx.cx().val_ty(lhs); + if bx.cx().type_kind(rhs_llty) == TypeKind::Vector { + rhs_llty = bx.cx().element_type(rhs_llty) + } + if bx.cx().type_kind(lhs_llty) == TypeKind::Vector { + lhs_llty = bx.cx().element_type(lhs_llty) + } + let rhs_sz = bx.cx().int_width(rhs_llty); + let lhs_sz = bx.cx().int_width(lhs_llty); + if lhs_sz < rhs_sz { + bx.trunc(rhs, lhs_llty) + } else if lhs_sz > rhs_sz { + // FIXME (#1877: If shifting by negative + // values becomes not undefined then this is wrong. + bx.zext(rhs, lhs_llty) + } else { + rhs + } + } else { + rhs + } +} + +/// Returns whether this session's target will use SEH-based unwinding. +/// +/// This is only true for MSVC targets, and even then the 64-bit MSVC target +/// currently uses SEH-ish unwinding with DWARF info tables to the side (same as +/// 64-bit MinGW) instead of "full SEH". +pub fn wants_msvc_seh(sess: &Session) -> bool { + sess.target.target.options.is_like_msvc +} + +pub fn call_assume<'a, 'll: 'a, 'tcx: 'll, Bx : BuilderMethods<'a, 'll ,'tcx>>( + bx: &mut Bx, + val: >::Value +) + where &'a Bx::CodegenCx : LayoutOf, TyLayout = TyLayout<'tcx>> + HasTyCtxt<'tcx> +{ + let assume_intrinsic = bx.cx().get_intrinsic("llvm.assume"); + bx.call(assume_intrinsic, &[val], None); +} + +pub fn from_immediate<'a, 'll: 'a, 'tcx: 'll, Bx : BuilderMethods<'a, 'll ,'tcx>>( + bx: &mut Bx, + val: >::Value +) -> >::Value + where &'a Bx::CodegenCx : LayoutOf, TyLayout = TyLayout<'tcx>> + HasTyCtxt<'tcx> +{ + if bx.cx().val_ty(val) == bx.cx().type_i1() { + bx.zext(val, bx.cx().type_i8()) + } else { + val + } +} + +pub fn to_immediate<'a, 'll: 'a, 'tcx: 'll, Bx : BuilderMethods<'a, 'll, 'tcx>>( + bx: &mut Bx, + val: >::Value, + layout: layout::TyLayout, +) -> >::Value + where &'a Bx::CodegenCx : LayoutOf, TyLayout = TyLayout<'tcx>> + HasTyCtxt<'tcx> +{ + if let layout::Abi::Scalar(ref scalar) = layout.abi { + return to_immediate_scalar(bx, val, scalar); + } + val +} + +pub fn to_immediate_scalar<'a, 'll :'a, 'tcx :'ll, Bx : BuilderMethods<'a, 'll, 'tcx>>( + bx: &mut Bx, + val: >::Value, + scalar: &layout::Scalar, +) -> >::Value + where &'a Bx::CodegenCx : LayoutOf, TyLayout = TyLayout<'tcx>> + HasTyCtxt<'tcx> +{ + if scalar.is_bool() { + return bx.trunc(val, bx.cx().type_i1()); + } + val +} + +pub fn memcpy_ty<'a, 'll: 'a, 'tcx: 'll, Bx : BuilderMethods<'a, 'll, 'tcx>>( + bx: &mut Bx, + dst: >::Value, + src: >::Value, + layout: TyLayout<'tcx>, + align: Align, + flags: MemFlags, +) + where &'a Bx::CodegenCx : LayoutOf, TyLayout = TyLayout<'tcx>> + HasTyCtxt<'tcx> +{ + let size = layout.size.bytes(); + if size == 0 { + return; + } + + bx.call_memcpy(dst, src, bx.cx().const_usize(size), align, flags); +} + +pub fn codegen_instance<'a, 'll: 'a, 'tcx: 'll, Bx: BuilderMethods<'a, 'll, 'tcx>>( + cx: &'a Bx::CodegenCx, + instance: Instance<'tcx> +) where &'a Bx::CodegenCx : LayoutOf, TyLayout=TyLayout<'tcx>> + HasTyCtxt<'tcx> { + let _s = if cx.sess().codegen_stats() { + let mut instance_name = String::new(); + DefPathBasedNames::new(*cx.tcx(), true, true) + .push_def_path(instance.def_id(), &mut instance_name); + Some(StatRecorder::new(cx, instance_name)) + } else { + None + }; + + // this is an info! to allow collecting monomorphization statistics + // and to allow finding the last function before LLVM aborts from + // release builds. + info!("codegen_instance({})", instance); + + let fn_ty = instance.ty(*cx.tcx()); + let sig = common::ty_fn_sig(cx, fn_ty); + let sig = cx.tcx().normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), &sig); + + let lldecl = match cx.instances().borrow().get(&instance) { + Some(&val) => val, + None => bug!("Instance `{:?}` not already declared", instance) + }; + + cx.stats().borrow_mut().n_closures += 1; + + let mir = cx.tcx().instance_mir(instance.def); + mir::codegen_mir::<'a, 'll, 'tcx, Bx>( + cx, lldecl, &mir, instance, sig + ); +} + +/// Create the `main` function which will initialize the rust runtime and call +/// users main function. +pub fn maybe_create_entry_wrapper<'a, 'll: 'a, 'tcx: 'll, Bx: BuilderMethods<'a, 'll, 'tcx>>( + cx: &'a Bx::CodegenCx +) + where &'a Bx::CodegenCx : LayoutOf, TyLayout = TyLayout<'tcx>> + HasTyCtxt<'tcx> +{ + let (main_def_id, span) = match *cx.sess().entry_fn.borrow() { + Some((id, span, _)) => { + (cx.tcx().hir.local_def_id(id), span) + } + None => return, + }; + + let instance = Instance::mono(*cx.tcx(), main_def_id); + + if !cx.codegen_unit().contains_item(&MonoItem::Fn(instance)) { + // We want to create the wrapper in the same codegen unit as Rust's main + // function. + return; + } + + let main_llfn = cx.get_fn(instance); + + let et = cx.sess().entry_fn.get().map(|e| e.2); + match et { + Some(EntryFnType::Main) => create_entry_fn::(cx, span, main_llfn, main_def_id, true), + Some(EntryFnType::Start) => create_entry_fn::(cx, span, main_llfn, main_def_id, false), + None => {} // Do nothing. + } + + fn create_entry_fn<'a, 'll: 'a, 'tcx: 'll, Bx: BuilderMethods<'a, 'll, 'tcx>>( + cx: &'a Bx::CodegenCx, + sp: Span, + rust_main: >::Value, + rust_main_def_id: DefId, + use_start_lang_item: bool, + ) + where &'a Bx::CodegenCx : + LayoutOf, TyLayout = TyLayout<'tcx>> + HasTyCtxt<'tcx> + { + let llfty = + cx.type_func(&[cx.type_int(), cx.type_ptr_to(cx.type_i8p())], cx.type_int()); + + let main_ret_ty = cx.tcx().fn_sig(rust_main_def_id).output(); + // Given that `main()` has no arguments, + // then its return type cannot have + // late-bound regions, since late-bound + // regions must appear in the argument + // listing. + let main_ret_ty = cx.tcx().erase_regions( + &main_ret_ty.no_late_bound_regions().unwrap(), + ); + + if cx.get_defined_value("main").is_some() { + // FIXME: We should be smart and show a better diagnostic here. + cx.sess().struct_span_err(sp, "entry symbol `main` defined multiple times") + .help("did you use #[no_mangle] on `fn main`? Use #[start] instead") + .emit(); + cx.sess().abort_if_errors(); + bug!(); + } + let llfn = cx.declare_cfn("main", llfty); + + // `main` should respect same config for frame pointer elimination as rest of code + cx.set_frame_pointer_elimination(llfn); + cx.apply_target_cpu_attr(llfn); + + let mut bx = Bx::new_block(&cx, llfn, "top"); + + bx.insert_reference_to_gdb_debug_scripts_section_global(); + + // Params from native main() used as args for rust start function + let param_argc = cx.get_param(llfn, 0); + let param_argv = cx.get_param(llfn, 1); + let arg_argc = bx.intcast(param_argc, cx.type_isize(), true); + let arg_argv = param_argv; + + let (start_fn, args) = if use_start_lang_item { + let start_def_id = cx.tcx().require_lang_item(StartFnLangItem); + let start_fn = callee::resolve_and_get_fn( + cx, + start_def_id, + cx.tcx().intern_substs(&[main_ret_ty.into()]), + ); + (start_fn, vec![bx.pointercast(rust_main, cx.type_ptr_to(cx.type_i8p())), + arg_argc, arg_argv]) + } else { + debug!("using user-defined start fn"); + (rust_main, vec![arg_argc, arg_argv]) + }; + + let result = bx.call(start_fn, &args, None); + let cast = bx.intcast(result, cx.type_int(), true); + bx.ret(cast); + } +} + +pub const CODEGEN_WORKER_ID: usize = ::std::usize::MAX; +pub const CODEGEN_WORKER_TIMELINE: time_graph::TimelineId = + time_graph::TimelineId(CODEGEN_WORKER_ID); +pub const CODEGEN_WORK_PACKAGE_KIND: time_graph::WorkPackageKind = + time_graph::WorkPackageKind(&["#DE9597", "#FED1D3", "#FDC5C7", "#B46668", "#88494B"]); + + +pub fn codegen_crate( + backend: B, + tcx: TyCtxt<'ll, 'tcx, 'tcx>, + rx: mpsc::Receiver> +) -> OngoingCodegen { + + check_for_rustc_errors_attr(tcx); + + if let Some(true) = tcx.sess.opts.debugging_opts.thinlto { + if backend.thin_lto_available() { + tcx.sess.fatal("this compiler's LLVM does not support ThinLTO"); + } + } + + if (tcx.sess.opts.debugging_opts.pgo_gen.is_some() || + !tcx.sess.opts.debugging_opts.pgo_use.is_empty()) && + backend.pgo_available() + { + tcx.sess.fatal("this compiler's LLVM does not support PGO"); + } + + let cgu_name_builder = &mut CodegenUnitNameBuilder::new(tcx); + + // Codegen the metadata. + tcx.sess.profiler(|p| p.start_activity(ProfileCategory::Codegen)); + + let metadata_cgu_name = cgu_name_builder.build_cgu_name(LOCAL_CRATE, + &["crate"], + Some("metadata")).as_str() + .to_string(); + let metadata_llvm_module = backend.new_metadata(tcx.sess, &metadata_cgu_name); + let metadata = time(tcx.sess, "write metadata", || { + backend.write_metadata(tcx, &metadata_llvm_module) + }); + tcx.sess.profiler(|p| p.end_activity(ProfileCategory::Codegen)); + + let metadata_module = ModuleCodegen { + name: metadata_cgu_name, + module_llvm: metadata_llvm_module, + kind: ModuleKind::Metadata, + }; + + let time_graph = if tcx.sess.opts.debugging_opts.codegen_time_graph { + Some(time_graph::TimeGraph::new()) + } else { + None + }; + + // Skip crate items and just output metadata in -Z no-codegen mode. + if tcx.sess.opts.debugging_opts.no_codegen || + !tcx.sess.opts.output_types.should_codegen() { + let ongoing_codegen = start_async_codegen( + backend, + tcx, + time_graph.clone(), + metadata, + rx, + 1); + + ongoing_codegen.submit_pre_codegened_module_to_llvm(tcx, metadata_module); + ongoing_codegen.codegen_finished(tcx); + + assert_and_save_dep_graph(tcx); + + ongoing_codegen.check_for_errors(tcx.sess); + + return ongoing_codegen; + } + + // Run the monomorphization collector and partition the collected items into + // codegen units. + let codegen_units = + tcx.collect_and_partition_mono_items(LOCAL_CRATE).1; + let codegen_units = (*codegen_units).clone(); + + // Force all codegen_unit queries so they are already either red or green + // when compile_codegen_unit accesses them. We are not able to re-execute + // the codegen_unit query from just the DepNode, so an unknown color would + // lead to having to re-execute compile_codegen_unit, possibly + // unnecessarily. + if tcx.dep_graph.is_fully_enabled() { + for cgu in &codegen_units { + tcx.codegen_unit(cgu.name().clone()); + } + } + + let ongoing_codegen = start_async_codegen( + backend.clone(), + tcx, + time_graph.clone(), + metadata, + rx, + codegen_units.len()); + + // Codegen an allocator shim, if necessary. + // + // If the crate doesn't have an `allocator_kind` set then there's definitely + // no shim to generate. Otherwise we also check our dependency graph for all + // our output crate types. If anything there looks like its a `Dynamic` + // linkage, then it's already got an allocator shim and we'll be using that + // one instead. If nothing exists then it's our job to generate the + // allocator! + let any_dynamic_crate = tcx.sess.dependency_formats.borrow() + .iter() + .any(|(_, list)| { + use rustc::middle::dependency_format::Linkage; + list.iter().any(|linkage| { + match linkage { + Linkage::Dynamic => true, + _ => false, + } + }) + }); + let allocator_module = if any_dynamic_crate { + None + } else if let Some(kind) = *tcx.sess.allocator_kind.get() { + let llmod_id = cgu_name_builder.build_cgu_name(LOCAL_CRATE, + &["crate"], + Some("allocator")).as_str() + .to_string(); + let modules = backend.new_metadata(tcx.sess, &llmod_id); + time(tcx.sess, "write allocator module", || { + backend.codegen_allocator(tcx, &modules, kind) + }); + + Some(ModuleCodegen { + name: llmod_id, + module_llvm: modules, + kind: ModuleKind::Allocator, + }) + } else { + None + }; + + if let Some(allocator_module) = allocator_module { + ongoing_codegen.submit_pre_codegened_module_to_llvm(tcx, allocator_module); + } + + ongoing_codegen.submit_pre_codegened_module_to_llvm(tcx, metadata_module); + + // We sort the codegen units by size. This way we can schedule work for LLVM + // a bit more efficiently. + let codegen_units = { + let mut codegen_units = codegen_units; + codegen_units.sort_by_cached_key(|cgu| cmp::Reverse(cgu.size_estimate())); + codegen_units + }; + + let mut total_codegen_time = Duration::new(0, 0); + let mut all_stats = Stats::default(); + + for cgu in codegen_units.into_iter() { + ongoing_codegen.wait_for_signal_to_codegen_item(); + ongoing_codegen.check_for_errors(tcx.sess); + + let cgu_reuse = determine_cgu_reuse(tcx, &cgu); + tcx.sess.cgu_reuse_tracker.set_actual_reuse(&cgu.name().as_str(), cgu_reuse); + + match cgu_reuse { + CguReuse::No => { + let _timing_guard = time_graph.as_ref().map(|time_graph| { + time_graph.start(CODEGEN_WORKER_TIMELINE, + CODEGEN_WORK_PACKAGE_KIND, + &format!("codegen {}", cgu.name())) + }); + let start_time = Instant::now(); + let stats = backend.compile_codegen_unit(tcx, *cgu.name()); + all_stats.extend(stats); + total_codegen_time += start_time.elapsed(); + false + } + CguReuse::PreLto => { + submit_pre_lto_module_to_llvm(&backend, tcx, CachedModuleCodegen { + name: cgu.name().to_string(), + source: cgu.work_product(tcx), + }); + true + } + CguReuse::PostLto => { + submit_post_lto_module_to_llvm(&backend, tcx, CachedModuleCodegen { + name: cgu.name().to_string(), + source: cgu.work_product(tcx), + }); + true + } + }; + } + + ongoing_codegen.codegen_finished(tcx); + + // Since the main thread is sometimes blocked during codegen, we keep track + // -Ztime-passes output manually. + print_time_passes_entry(tcx.sess.time_passes(), + "codegen to LLVM IR", + total_codegen_time); + + rustc_incremental::assert_module_sources::assert_module_sources(tcx); + + symbol_names_test::report_symbol_names(tcx); + + if tcx.sess.codegen_stats() { + println!("--- codegen stats ---"); + println!("n_glues_created: {}", all_stats.n_glues_created); + println!("n_null_glues: {}", all_stats.n_null_glues); + println!("n_real_glues: {}", all_stats.n_real_glues); + + println!("n_fns: {}", all_stats.n_fns); + println!("n_inlines: {}", all_stats.n_inlines); + println!("n_closures: {}", all_stats.n_closures); + println!("fn stats:"); + all_stats.fn_stats.sort_by_key(|&(_, insns)| insns); + for &(ref name, insns) in all_stats.fn_stats.iter() { + println!("{} insns, {}", insns, *name); + } + } + + if tcx.sess.count_llvm_insns() { + for (k, v) in all_stats.llvm_insns.iter() { + println!("{:7} {}", *v, *k); + } + } + + ongoing_codegen.check_for_errors(tcx.sess); + + assert_and_save_dep_graph(tcx); + ongoing_codegen +} + +fn assert_and_save_dep_graph<'ll, 'tcx>(tcx: TyCtxt<'ll, 'tcx, 'tcx>) { + time(tcx.sess, + "assert dep graph", + || rustc_incremental::assert_dep_graph(tcx)); + + time(tcx.sess, + "serialize dep graph", + || rustc_incremental::save_dep_graph(tcx)); +} + +fn collect_and_partition_mono_items<'ll, 'tcx>( + tcx: TyCtxt<'ll, 'tcx, 'tcx>, + cnum: CrateNum, +) -> (Arc, Arc>>>) +{ + assert_eq!(cnum, LOCAL_CRATE); + + let collection_mode = match tcx.sess.opts.debugging_opts.print_mono_items { + Some(ref s) => { + let mode_string = s.to_lowercase(); + let mode_string = mode_string.trim(); + if mode_string == "eager" { + MonoItemCollectionMode::Eager + } else { + if mode_string != "lazy" { + let message = format!("Unknown codegen-item collection mode '{}'. \ + Falling back to 'lazy' mode.", + mode_string); + tcx.sess.warn(&message); + } + + MonoItemCollectionMode::Lazy + } + } + None => { + if tcx.sess.opts.cg.link_dead_code { + MonoItemCollectionMode::Eager + } else { + MonoItemCollectionMode::Lazy + } + } + }; + + let (items, inlining_map) = + time(tcx.sess, "monomorphization collection", || { + collector::collect_crate_mono_items(tcx, collection_mode) + }); + + tcx.sess.abort_if_errors(); + + ::rustc_mir::monomorphize::assert_symbols_are_distinct(tcx, items.iter()); + + let strategy = if tcx.sess.opts.incremental.is_some() { + PartitioningStrategy::PerModule + } else { + PartitioningStrategy::FixedUnitCount(tcx.sess.codegen_units()) + }; + + let codegen_units = time(tcx.sess, "codegen unit partitioning", || { + partitioning::partition(tcx, + items.iter().cloned(), + strategy, + &inlining_map) + .into_iter() + .map(Arc::new) + .collect::>() + }); + + let mono_items: DefIdSet = items.iter().filter_map(|mono_item| { + match *mono_item { + MonoItem::Fn(ref instance) => Some(instance.def_id()), + MonoItem::Static(def_id) => Some(def_id), + _ => None, + } + }).collect(); + + if tcx.sess.opts.debugging_opts.print_mono_items.is_some() { + let mut item_to_cgus: FxHashMap<_, Vec<_>> = Default::default(); + + for cgu in &codegen_units { + for (&mono_item, &linkage) in cgu.items() { + item_to_cgus.entry(mono_item) + .or_default() + .push((cgu.name().clone(), linkage)); + } + } + + let mut item_keys: Vec<_> = items + .iter() + .map(|i| { + let mut output = i.to_string(tcx); + output.push_str(" @@"); + let mut empty = Vec::new(); + let cgus = item_to_cgus.get_mut(i).unwrap_or(&mut empty); + cgus.as_mut_slice().sort_by_key(|&(ref name, _)| name.clone()); + cgus.dedup(); + for &(ref cgu_name, (linkage, _)) in cgus.iter() { + output.push_str(" "); + output.push_str(&cgu_name.as_str()); + + let linkage_abbrev = match linkage { + Linkage::External => "External", + Linkage::AvailableExternally => "Available", + Linkage::LinkOnceAny => "OnceAny", + Linkage::LinkOnceODR => "OnceODR", + Linkage::WeakAny => "WeakAny", + Linkage::WeakODR => "WeakODR", + Linkage::Appending => "Appending", + Linkage::Internal => "Internal", + Linkage::Private => "Private", + Linkage::ExternalWeak => "ExternalWeak", + Linkage::Common => "Common", + }; + + output.push_str("["); + output.push_str(linkage_abbrev); + output.push_str("]"); + } + output + }) + .collect(); + + item_keys.sort(); + + for item in item_keys { + println!("MONO_ITEM {}", item); + } + } + + (Arc::new(mono_items), Arc::new(codegen_units)) +} + + +impl CrateInfo { + pub fn new(tcx: TyCtxt) -> CrateInfo { + let mut info = CrateInfo { + panic_runtime: None, + compiler_builtins: None, + profiler_runtime: None, + sanitizer_runtime: None, + is_no_builtins: Default::default(), + native_libraries: Default::default(), + used_libraries: tcx.native_libraries(LOCAL_CRATE), + link_args: tcx.link_args(LOCAL_CRATE), + crate_name: Default::default(), + used_crates_dynamic: cstore::used_crates(tcx, LinkagePreference::RequireDynamic), + used_crates_static: cstore::used_crates(tcx, LinkagePreference::RequireStatic), + used_crate_source: Default::default(), + wasm_imports: Default::default(), + lang_item_to_crate: Default::default(), + missing_lang_items: Default::default(), + }; + let lang_items = tcx.lang_items(); + + let load_wasm_items = tcx.sess.crate_types.borrow() + .iter() + .any(|c| *c != config::CrateType::Rlib) && + tcx.sess.opts.target_triple.triple() == "wasm32-unknown-unknown"; + + if load_wasm_items { + info.load_wasm_imports(tcx, LOCAL_CRATE); + } + + for &cnum in tcx.crates().iter() { + info.native_libraries.insert(cnum, tcx.native_libraries(cnum)); + info.crate_name.insert(cnum, tcx.crate_name(cnum).to_string()); + info.used_crate_source.insert(cnum, tcx.used_crate_source(cnum)); + if tcx.is_panic_runtime(cnum) { + info.panic_runtime = Some(cnum); + } + if tcx.is_compiler_builtins(cnum) { + info.compiler_builtins = Some(cnum); + } + if tcx.is_profiler_runtime(cnum) { + info.profiler_runtime = Some(cnum); + } + if tcx.is_sanitizer_runtime(cnum) { + info.sanitizer_runtime = Some(cnum); + } + if tcx.is_no_builtins(cnum) { + info.is_no_builtins.insert(cnum); + } + if load_wasm_items { + info.load_wasm_imports(tcx, cnum); + } + let missing = tcx.missing_lang_items(cnum); + for &item in missing.iter() { + if let Ok(id) = lang_items.require(item) { + info.lang_item_to_crate.insert(item, id.krate); + } + } + + // No need to look for lang items that are whitelisted and don't + // actually need to exist. + let missing = missing.iter() + .cloned() + .filter(|&l| !weak_lang_items::whitelisted(tcx, l)) + .collect(); + info.missing_lang_items.insert(cnum, missing); + } + + return info + } + + fn load_wasm_imports(&mut self, tcx: TyCtxt, cnum: CrateNum) { + for (&id, module) in tcx.wasm_import_module_map(cnum).iter() { + let instance = Instance::mono(tcx, id); + let import_name = tcx.symbol_name(instance); + self.wasm_imports.insert(import_name.to_string(), module.clone()); + } + } +} + +fn is_codegened_item(tcx: TyCtxt, id: DefId) -> bool { + let (all_mono_items, _) = + tcx.collect_and_partition_mono_items(LOCAL_CRATE); + all_mono_items.contains(&id) +} + + +pub fn provide(providers: &mut Providers) { + providers.collect_and_partition_mono_items = + collect_and_partition_mono_items; + + providers.is_codegened_item = is_codegened_item; + + providers.codegen_unit = |tcx, name| { + let (_, all) = tcx.collect_and_partition_mono_items(LOCAL_CRATE); + all.iter() + .find(|cgu| *cgu.name() == name) + .cloned() + .unwrap_or_else(|| panic!("failed to find cgu with name {:?}", name)) + }; + + provide_extern(providers); +} + +pub fn provide_extern(providers: &mut Providers) { + providers.dllimport_foreign_items = |tcx, krate| { + let module_map = tcx.foreign_modules(krate); + let module_map = module_map.iter() + .map(|lib| (lib.def_id, lib)) + .collect::>(); + + let dllimports = tcx.native_libraries(krate) + .iter() + .filter(|lib| { + if lib.kind != cstore::NativeLibraryKind::NativeUnknown { + return false + } + let cfg = match lib.cfg { + Some(ref cfg) => cfg, + None => return true, + }; + attr::cfg_matches(cfg, &tcx.sess.parse_sess, None) + }) + .filter_map(|lib| lib.foreign_module) + .map(|id| &module_map[&id]) + .flat_map(|module| module.foreign_items.iter().cloned()) + .collect(); + Lrc::new(dllimports) + }; + + providers.is_dllimport_foreign_item = |tcx, def_id| { + tcx.dllimport_foreign_items(def_id.krate).contains(&def_id) + }; +} + + +fn determine_cgu_reuse<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, + cgu: &CodegenUnit<'tcx>) + -> CguReuse { + if !tcx.dep_graph.is_fully_enabled() { + return CguReuse::No + } + + let work_product_id = &cgu.work_product_id(); + if tcx.dep_graph.previous_work_product(work_product_id).is_none() { + // We don't have anything cached for this CGU. This can happen + // if the CGU did not exist in the previous session. + return CguReuse::No + } + + // Try to mark the CGU as green. If it we can do so, it means that nothing + // affecting the LLVM module has changed and we can re-use a cached version. + // If we compile with any kind of LTO, this means we can re-use the bitcode + // of the Pre-LTO stage (possibly also the Post-LTO version but we'll only + // know that later). If we are not doing LTO, there is only one optimized + // version of each module, so we re-use that. + let dep_node = cgu.codegen_dep_node(tcx); + assert!(!tcx.dep_graph.dep_node_exists(&dep_node), + "CompileCodegenUnit dep-node for CGU `{}` already exists before marking.", + cgu.name()); + + if tcx.dep_graph.try_mark_green(tcx, &dep_node).is_some() { + // We can re-use either the pre- or the post-thinlto state + if tcx.sess.lto() != Lto::No { + CguReuse::PreLto + } else { + CguReuse::PostLto + } + } else { + CguReuse::No + } +} diff --git a/src/librustc_codegen_ssa/callee.rs b/src/librustc_codegen_ssa/callee.rs new file mode 100644 index 0000000000000..52359b334dc58 --- /dev/null +++ b/src/librustc_codegen_ssa/callee.rs @@ -0,0 +1,32 @@ +// Copyright 2018 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use interfaces::*; +use rustc::ty::{self, Ty}; +use rustc::ty::layout::{LayoutOf, TyLayout, HasTyCtxt}; +use rustc::ty::subst::Substs; +use rustc::hir::def_id::DefId; + +pub fn resolve_and_get_fn<'a, 'll: 'a, 'tcx: 'll, Cx : 'a + CodegenMethods<'a, 'll, 'tcx>>( + cx: &Cx, + def_id: DefId, + substs: &'tcx Substs<'tcx>, +) -> Cx::Value + where &'a Cx : LayoutOf, TyLayout = TyLayout<'tcx>> + HasTyCtxt<'tcx> +{ + cx.get_fn( + ty::Instance::resolve( + *cx.tcx(), + ty::ParamEnv::reveal_all(), + def_id, + substs + ).unwrap() + ) +} diff --git a/src/librustc_codegen_ssa/common.rs b/src/librustc_codegen_ssa/common.rs new file mode 100644 index 0000000000000..8493f57c51d71 --- /dev/null +++ b/src/librustc_codegen_ssa/common.rs @@ -0,0 +1,352 @@ +// Copyright 2018 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. +#![allow(non_camel_case_types, non_snake_case)] + +use rustc::ty::{self, Ty, TyCtxt}; +use syntax_pos::{DUMMY_SP, Span}; + +use rustc::hir::def_id::DefId; +use rustc::ty::layout::{LayoutOf, TyLayout, HasTyCtxt}; +use rustc::middle::lang_items::LangItem; +use base; +use interfaces::*; + +use rustc::hir; +use interfaces::BuilderMethods; + +use std::iter; + +use rustc_target::spec::abi::Abi; + + +pub fn type_needs_drop<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, ty: Ty<'tcx>) -> bool { + ty.needs_drop(tcx, ty::ParamEnv::reveal_all()) +} + +pub fn type_is_sized<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, ty: Ty<'tcx>) -> bool { + ty.is_sized(tcx.at(DUMMY_SP), ty::ParamEnv::reveal_all()) +} + +pub fn type_is_freeze<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, ty: Ty<'tcx>) -> bool { + ty.is_freeze(tcx, ty::ParamEnv::reveal_all(), DUMMY_SP) +} + +pub struct OperandBundleDef<'a, V> { + pub name: &'a str, + pub val: V +} + +impl<'a, V> OperandBundleDef<'a, V> { + pub fn new(name: &'a str, val: V) -> Self { + OperandBundleDef { + name, + val + } + } +} + + + +/// A structure representing an active landing pad for the duration of a basic +/// block. +/// +/// Each `Block` may contain an instance of this, indicating whether the block +/// is part of a landing pad or not. This is used to make decision about whether +/// to emit `invoke` instructions (e.g. in a landing pad we don't continue to +/// use `invoke`) and also about various function call metadata. +/// +/// For GNU exceptions (`landingpad` + `resume` instructions) this structure is +/// just a bunch of `None` instances (not too interesting), but for MSVC +/// exceptions (`cleanuppad` + `cleanupret` instructions) this contains data. +/// When inside of a landing pad, each function call in LLVM IR needs to be +/// annotated with which landing pad it's a part of. This is accomplished via +/// the `OperandBundleDef` value created for MSVC landing pads. +pub struct Funclet<'ll, V> { + cleanuppad: V, + operand: OperandBundleDef<'ll, V>, +} + +impl<'ll, V : CodegenObject> Funclet<'ll, V> { + pub fn new(cleanuppad: V) -> Self { + Funclet { + cleanuppad, + operand: OperandBundleDef::new("funclet", cleanuppad), + } + } + + pub fn cleanuppad(&self) -> V { + self.cleanuppad + } + + pub fn bundle(&self) -> &OperandBundleDef<'ll, V> { + &self.operand + } +} + +pub enum IntPredicate { + IntEQ, + IntNE, + IntUGT, + IntUGE, + IntULT, + IntULE, + IntSGT, + IntSGE, + IntSLT, + IntSLE +} + + +#[allow(dead_code)] +pub enum RealPredicate { + RealPredicateFalse, + RealOEQ, + RealOGT, + RealOGE, + RealOLT, + RealOLE, + RealONE, + RealORD, + RealUNO, + RealUEQ, + RealUGT, + RealUGE, + RealULT, + RealULE, + RealUNE, + RealPredicateTrue +} + +pub enum AtomicRmwBinOp { + AtomicXchg, + AtomicAdd, + AtomicSub, + AtomicAnd, + AtomicNand, + AtomicOr, + AtomicXor, + AtomicMax, + AtomicMin, + AtomicUMax, + AtomicUMin +} + +pub enum AtomicOrdering { + #[allow(dead_code)] + NotAtomic, + Unordered, + Monotonic, + // Consume, // Not specified yet. + Acquire, + Release, + AcquireRelease, + SequentiallyConsistent, +} + +pub enum SynchronizationScope { + // FIXME: figure out if this variant is needed at all. + #[allow(dead_code)] + Other, + SingleThread, + CrossThread, +} + +#[derive(Copy, Clone, PartialEq, Debug)] +pub enum TypeKind { + Void, + Half, + Float, + Double, + X86_FP80, + FP128, + PPC_FP128, + Label, + Integer, + Function, + Struct, + Array, + Pointer, + Vector, + Metadata, + X86_MMX, + Token, +} + +// FIXME(mw): Anything that is produced via DepGraph::with_task() must implement +// the HashStable trait. Normally DepGraph::with_task() calls are +// hidden behind queries, but CGU creation is a special case in two +// ways: (1) it's not a query and (2) CGU are output nodes, so their +// Fingerprints are not actually needed. It remains to be clarified +// how exactly this case will be handled in the red/green system but +// for now we content ourselves with providing a no-op HashStable +// implementation for CGUs. +mod temp_stable_hash_impls { + use rustc_data_structures::stable_hasher::{StableHasherResult, StableHasher, + HashStable}; + use ModuleCodegen; + + impl HashStable for ModuleCodegen { + fn hash_stable(&self, + _: &mut HCX, + _: &mut StableHasher) { + // do nothing + } + } +} + + + +// To avoid UB from LLVM, these two functions mask RHS with an +// appropriate mask unconditionally (i.e. the fallback behavior for +// all shifts). For 32- and 64-bit types, this matches the semantics +// of Java. (See related discussion on #1877 and #10183.) + +pub fn build_unchecked_lshift<'a, 'll: 'a, 'tcx: 'll, Bx: BuilderMethods<'a, 'll, 'tcx>>( + bx: &mut Bx, + lhs: >::Value, + rhs: >::Value +) -> >::Value + where &'a Bx::CodegenCx : LayoutOf, TyLayout = TyLayout<'tcx>> + HasTyCtxt<'tcx> +{ + let rhs = base::cast_shift_expr_rhs(bx, hir::BinOpKind::Shl, lhs, rhs); + // #1877, #10183: Ensure that input is always valid + let rhs = shift_mask_rhs(bx, rhs); + bx.shl(lhs, rhs) +} + +pub fn build_unchecked_rshift<'a, 'll: 'a, 'tcx: 'll, Bx: BuilderMethods<'a, 'll, 'tcx>>( + bx: &mut Bx, + lhs_t: Ty<'tcx>, + lhs: >::Value, + rhs: >::Value +) -> >::Value + where &'a Bx::CodegenCx : LayoutOf, TyLayout = TyLayout<'tcx>> + HasTyCtxt<'tcx> +{ + let rhs = base::cast_shift_expr_rhs(bx, hir::BinOpKind::Shr, lhs, rhs); + // #1877, #10183: Ensure that input is always valid + let rhs = shift_mask_rhs(bx, rhs); + let is_signed = lhs_t.is_signed(); + if is_signed { + bx.ashr(lhs, rhs) + } else { + bx.lshr(lhs, rhs) + } +} + +fn shift_mask_rhs<'a, 'll: 'a, 'tcx: 'll, Bx: BuilderMethods<'a, 'll, 'tcx>>( + bx: &mut Bx, + rhs: >::Value +) -> >::Value + where &'a Bx::CodegenCx : LayoutOf, TyLayout = TyLayout<'tcx>> + HasTyCtxt<'tcx> +{ + let rhs_llty = bx.cx().val_ty(rhs); + let shift_val = shift_mask_val(bx, rhs_llty, rhs_llty, false); + bx.and(rhs, shift_val) +} + +pub fn shift_mask_val<'a, 'll: 'a, 'tcx: 'll, Bx: BuilderMethods<'a, 'll, 'tcx>>( + bx: &mut Bx, + llty: >::Type, + mask_llty: >::Type, + invert: bool +) -> >::Value + where &'a Bx::CodegenCx : LayoutOf, TyLayout = TyLayout<'tcx>> + HasTyCtxt<'tcx> +{ + let kind = bx.cx().type_kind(llty); + match kind { + TypeKind::Integer => { + // i8/u8 can shift by at most 7, i16/u16 by at most 15, etc. + let val = bx.cx().int_width(llty) - 1; + if invert { + bx.cx().const_int(mask_llty, !val as i64) + } else { + bx.cx().const_uint(mask_llty, val) + } + }, + TypeKind::Vector => { + let mask = shift_mask_val( + bx, + bx.cx().element_type(llty), + bx.cx().element_type(mask_llty), + invert + ); + bx.vector_splat(bx.cx().vector_length(mask_llty), mask) + }, + _ => bug!("shift_mask_val: expected Integer or Vector, found {:?}", kind), + } +} + +pub fn ty_fn_sig<'a, 'll: 'a, 'tcx:'ll, Cx: 'a + CodegenMethods<'a, 'll, 'tcx>>( + cx: &'a Cx, + ty: Ty<'tcx> +) -> ty::PolyFnSig<'tcx> + where &'a Cx : LayoutOf, TyLayout = TyLayout<'tcx>> + HasTyCtxt<'tcx> +{ + match ty.sty { + ty::FnDef(..) | + // Shims currently have type FnPtr. Not sure this should remain. + ty::FnPtr(_) => ty.fn_sig(*cx.tcx()), + ty::Closure(def_id, substs) => { + let tcx = *cx.tcx(); + let sig = substs.closure_sig(def_id, tcx); + + let env_ty = tcx.closure_env_ty(def_id, substs).unwrap(); + sig.map_bound(|sig| tcx.mk_fn_sig( + iter::once(*env_ty.skip_binder()).chain(sig.inputs().iter().cloned()), + sig.output(), + sig.variadic, + sig.unsafety, + sig.abi + )) + } + ty::Generator(def_id, substs, _) => { + let tcx = *cx.tcx(); + let sig = substs.poly_sig(def_id, tcx); + + let env_region = ty::ReLateBound(ty::INNERMOST, ty::BrEnv); + let env_ty = tcx.mk_mut_ref(tcx.mk_region(env_region), ty); + + sig.map_bound(|sig| { + let state_did = tcx.lang_items().gen_state().unwrap(); + let state_adt_ref = tcx.adt_def(state_did); + let state_substs = tcx.intern_substs(&[ + sig.yield_ty.into(), + sig.return_ty.into(), + ]); + let ret_ty = tcx.mk_adt(state_adt_ref, state_substs); + + tcx.mk_fn_sig(iter::once(env_ty), + ret_ty, + false, + hir::Unsafety::Normal, + Abi::Rust + ) + }) + } + _ => bug!("unexpected type {:?} to ty_fn_sig", ty) + } +} + +pub fn langcall(tcx: TyCtxt, + span: Option, + msg: &str, + li: LangItem) + -> DefId { + match tcx.lang_items().require(li) { + Ok(id) => id, + Err(s) => { + let msg = format!("{} {}", msg, s); + match span { + Some(span) => tcx.sess.span_fatal(span, &msg[..]), + None => tcx.sess.fatal(&msg[..]), + } + } + } +} diff --git a/src/librustc_codegen_ssa/debuginfo.rs b/src/librustc_codegen_ssa/debuginfo.rs new file mode 100644 index 0000000000000..0fc61422bb3a2 --- /dev/null +++ b/src/librustc_codegen_ssa/debuginfo.rs @@ -0,0 +1,92 @@ +// Copyright 2018 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use syntax_pos::{BytePos, Span}; +use rustc::hir::def_id::CrateNum; +use std::cell::Cell; + +pub enum FunctionDebugContext { + RegularContext(FunctionDebugContextData), + DebugInfoDisabled, + FunctionWithoutDebugInfo, +} + +impl FunctionDebugContext { + pub fn get_ref<'a>(&'a self, span: Span) -> &'a FunctionDebugContextData { + match *self { + FunctionDebugContext::RegularContext(ref data) => data, + FunctionDebugContext::DebugInfoDisabled => { + span_bug!(span, "{}", FunctionDebugContext::::debuginfo_disabled_message()); + } + FunctionDebugContext::FunctionWithoutDebugInfo => { + span_bug!(span, "{}", FunctionDebugContext::::should_be_ignored_message()); + } + } + } + + fn debuginfo_disabled_message() -> &'static str { + "debuginfo: Error trying to access FunctionDebugContext although debug info is disabled!" + } + + fn should_be_ignored_message() -> &'static str { + "debuginfo: Error trying to access FunctionDebugContext for function that should be \ + ignored by debug info!" + } +} + +/// Enables emitting source locations for the given functions. +/// +/// Since we don't want source locations to be emitted for the function prelude, +/// they are disabled when beginning to codegen a new function. This functions +/// switches source location emitting on and must therefore be called before the +/// first real statement/expression of the function is codegened. +pub fn start_emitting_source_locations(dbg_context: &FunctionDebugContext) { + match *dbg_context { + FunctionDebugContext::RegularContext(ref data) => { + data.source_locations_enabled.set(true) + }, + _ => { /* safe to ignore */ } + } +} + +pub struct FunctionDebugContextData { + pub fn_metadata: D, + pub source_locations_enabled: Cell, + pub defining_crate: CrateNum, +} + +pub enum VariableAccess<'a, V> { + // The llptr given is an alloca containing the variable's value + DirectVariable { alloca: V }, + // The llptr given is an alloca containing the start of some pointer chain + // leading to the variable's content. + IndirectVariable { alloca: V, address_operations: &'a [i64] } +} + +pub enum VariableKind { + ArgumentVariable(usize /*index*/), + LocalVariable, +} + + +#[derive(Clone, Copy, Debug)] +pub struct MirDebugScope { + pub scope_metadata: Option, + // Start and end offsets of the file to which this DIScope belongs. + // These are used to quickly determine whether some span refers to the same file. + pub file_start_pos: BytePos, + pub file_end_pos: BytePos, +} + +impl MirDebugScope { + pub fn is_valid(&self) -> bool { + !self.scope_metadata.is_none() + } +} diff --git a/src/librustc_codegen_ssa/diagnostics.rs b/src/librustc_codegen_ssa/diagnostics.rs new file mode 100644 index 0000000000000..fd343a45561ce --- /dev/null +++ b/src/librustc_codegen_ssa/diagnostics.rs @@ -0,0 +1,48 @@ +// Copyright 2015 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +#![allow(non_snake_case)] + +register_long_diagnostics! { + +E0668: r##" +Malformed inline assembly rejected by LLVM. + +LLVM checks the validity of the constraints and the assembly string passed to +it. This error implies that LLVM seems something wrong with the inline +assembly call. + +In particular, it can happen if you forgot the closing bracket of a register +constraint (see issue #51430): +```ignore (error-emitted-at-codegen-which-cannot-be-handled-by-compile_fail) +#![feature(asm)] + +fn main() { + let rax: u64; + unsafe { + asm!("" :"={rax"(rax)); + println!("Accumulator is: {}", rax); + } +} +``` +"##, + +E0669: r##" +Cannot convert inline assembly operand to a single LLVM value. + +This error usually happens when trying to pass in a value to an input inline +assembly operand that is actually a pair of values. In particular, this can +happen when trying to pass in a slice, for instance a `&str`. In Rust, these +values are represented internally as a pair of values, the pointer and its +length. When passed as an input operand, this pair of values can not be +coerced into a register and thus we must fail with an error. +"## + +} diff --git a/src/librustc_codegen_llvm/glue.rs b/src/librustc_codegen_ssa/glue.rs similarity index 69% rename from src/librustc_codegen_llvm/glue.rs rename to src/librustc_codegen_ssa/glue.rs index 842bdf3cb493f..dca14602ef4a6 100644 --- a/src/librustc_codegen_llvm/glue.rs +++ b/src/librustc_codegen_ssa/glue.rs @@ -14,24 +14,29 @@ use std; -use builder::Builder; -use common::*; -use llvm; +use common::IntPredicate; use meth; -use rustc::ty::layout::LayoutOf; +use rustc::ty::layout::{LayoutOf, TyLayout, HasTyCtxt}; use rustc::ty::{self, Ty}; -use value::Value; +use interfaces::*; -pub fn size_and_align_of_dst(bx: &Builder<'_, 'll, 'tcx>, t: Ty<'tcx>, info: Option<&'ll Value>) - -> (&'ll Value, &'ll Value) { +pub fn size_and_align_of_dst<'a, 'll: 'a, 'tcx: 'll, + Bx: BuilderMethods<'a, 'll, 'tcx> + >( + bx: &mut Bx, + t: Ty<'tcx>, + info: Option<>::Value> +) -> (>::Value, >::Value) where + &'a Bx::CodegenCx: LayoutOf, TyLayout = TyLayout<'tcx>> + HasTyCtxt<'tcx> +{ debug!("calculate size of DST: {}; with lost info: {:?}", t, info); - if bx.cx.type_is_sized(t) { - let (size, align) = bx.cx.size_and_align_of(t); + if bx.cx().type_is_sized(t) { + let (size, align) = bx.cx().layout_of(t).size_and_align(); debug!("size_and_align_of_dst t={} info={:?} size: {:?} align: {:?}", t, info, size, align); - let size = C_usize(bx.cx, size.bytes()); - let align = C_usize(bx.cx, align.abi()); + let size = bx.cx().const_usize(size.bytes()); + let align = bx.cx().const_usize(align.abi()); return (size, align); } match t.sty { @@ -44,12 +49,12 @@ pub fn size_and_align_of_dst(bx: &Builder<'_, 'll, 'tcx>, t: Ty<'tcx>, info: Opt let unit = t.sequence_element_type(bx.tcx()); // The info in this case is the length of the str, so the size is that // times the unit size. - let (size, align) = bx.cx.size_and_align_of(unit); - (bx.mul(info.unwrap(), C_usize(bx.cx, size.bytes())), - C_usize(bx.cx, align.abi())) + let (size, align) = bx.cx().layout_of(unit).size_and_align(); + (bx.mul(info.unwrap(), bx.cx().const_usize(size.bytes())), + bx.cx().const_usize(align.abi())) } _ => { - let cx = bx.cx; + let cx = bx.cx(); // First get the size of all statically known fields. // Don't use size_of because it also rounds up to alignment, which we // want to avoid, as the unsized field's alignment could be smaller. @@ -62,8 +67,8 @@ pub fn size_and_align_of_dst(bx: &Builder<'_, 'll, 'tcx>, t: Ty<'tcx>, info: Opt let sized_align = layout.align.abi(); debug!("DST {} statically sized prefix size: {} align: {}", t, sized_size, sized_align); - let sized_size = C_usize(cx, sized_size); - let sized_align = C_usize(cx, sized_align); + let sized_size = cx.const_usize(sized_size); + let sized_align = cx.const_usize(sized_align); // Recurse to get the size of the dynamically sized field (must be // the last field). @@ -89,16 +94,17 @@ pub fn size_and_align_of_dst(bx: &Builder<'_, 'll, 'tcx>, t: Ty<'tcx>, info: Opt // Choose max of two known alignments (combined value must // be aligned according to more restrictive of the two). - let align = match (const_to_opt_u128(sized_align, false), - const_to_opt_u128(unsized_align, false)) { + let align = match (bx.cx().const_to_opt_u128(sized_align, false), + bx.cx().const_to_opt_u128(unsized_align, false)) { (Some(sized_align), Some(unsized_align)) => { // If both alignments are constant, (the sized_align should always be), then // pick the correct alignment statically. - C_usize(cx, std::cmp::max(sized_align, unsized_align) as u64) + cx.const_usize(std::cmp::max(sized_align, unsized_align) as u64) + } + _ => { + let cmp = bx.icmp(IntPredicate::IntUGT, sized_align, unsized_align); + bx.select(cmp, sized_align, unsized_align) } - _ => bx.select(bx.icmp(llvm::IntUGT, sized_align, unsized_align), - sized_align, - unsized_align) }; // Issue #27023: must add any necessary padding to `size` @@ -111,9 +117,11 @@ pub fn size_and_align_of_dst(bx: &Builder<'_, 'll, 'tcx>, t: Ty<'tcx>, info: Opt // emulated via the semi-standard fast bit trick: // // `(size + (align-1)) & -align` - - let addend = bx.sub(align, C_usize(bx.cx, 1)); - let size = bx.and(bx.add(size, addend), bx.neg(align)); + let one = bx.cx().const_usize(1); + let addend = bx.sub(align, one); + let add = bx.add(size, addend); + let neg = bx.neg(align); + let size = bx.and(add, neg); (size, align) } diff --git a/src/librustc_codegen_ssa/interfaces/abi.rs b/src/librustc_codegen_ssa/interfaces/abi.rs new file mode 100644 index 0000000000000..3acd0e10c308c --- /dev/null +++ b/src/librustc_codegen_ssa/interfaces/abi.rs @@ -0,0 +1,36 @@ +// Copyright 2018 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use rustc_target::abi::call::FnType; +use rustc::ty::{FnSig, Ty, Instance}; +use rustc::ty::layout::{LayoutOf, TyLayout, HasTyCtxt}; +use super::Backend; +use super::builder::HasCodegen; + +pub trait AbiMethods<'tcx> { + fn new_fn_type(&self, sig: FnSig<'tcx>, extra_args: &[Ty<'tcx>]) -> FnType<'tcx, Ty<'tcx>>; + fn new_vtable( + &self, + sig: FnSig<'tcx>, + extra_args: &[Ty<'tcx>] + ) -> FnType<'tcx, Ty<'tcx>>; + fn fn_type_of_instance(&self, instance: &Instance<'tcx>) -> FnType<'tcx, Ty<'tcx>>; +} + +pub trait AbiBuilderMethods<'a, 'll: 'a, 'tcx: 'll> : HasCodegen<'a, 'll, 'tcx> + where &'a Self::CodegenCx : + LayoutOf, TyLayout = TyLayout<'tcx>> + HasTyCtxt<'tcx> +{ + fn apply_attrs_callsite( + &mut self, + ty: &FnType<'tcx, Ty<'tcx>>, + callsite: >::Value + ); +} diff --git a/src/librustc_codegen_ssa/interfaces/asm.rs b/src/librustc_codegen_ssa/interfaces/asm.rs new file mode 100644 index 0000000000000..f7dd5a7747d13 --- /dev/null +++ b/src/librustc_codegen_ssa/interfaces/asm.rs @@ -0,0 +1,33 @@ +// Copyright 2018 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use rustc::hir::{InlineAsm, GlobalAsm}; +use rustc::ty::Ty; +use rustc::ty::layout::{TyLayout, LayoutOf, HasTyCtxt}; +use mir::place::PlaceRef; +use super::Backend; +use super::builder::HasCodegen; + +pub trait AsmBuilderMethods<'a, 'll: 'a, 'tcx: 'll> : HasCodegen<'a, 'll, 'tcx> + where &'a Self::CodegenCx : + LayoutOf, TyLayout = TyLayout<'tcx>> + HasTyCtxt<'tcx> +{ + // Take an inline assembly expression and splat it out via LLVM + fn codegen_inline_asm( + &mut self, + ia: &InlineAsm, + outputs: Vec>::Value>>, + inputs: Vec<>::Value> + ) -> bool; +} + +pub trait AsmMethods { + fn codegen_global_asm(&self, ga: &GlobalAsm); +} diff --git a/src/librustc_codegen_ssa/interfaces/backend.rs b/src/librustc_codegen_ssa/interfaces/backend.rs new file mode 100644 index 0000000000000..f11666d237844 --- /dev/null +++ b/src/librustc_codegen_ssa/interfaces/backend.rs @@ -0,0 +1,54 @@ +// Copyright 2018 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use super::CodegenObject; +use super::write::WriteBackendMethods; +use rustc::session::Session; +use rustc_codegen_utils::codegen_backend::CodegenBackend; +use rustc::middle::cstore::EncodedMetadata; +use rustc::middle::allocator::AllocatorKind; +use rustc::ty::TyCtxt; +use rustc::mir::mono::Stats; +use syntax_pos::symbol::InternedString; +use std::sync::Arc; + +pub trait Backend<'ll> { + type Value : 'll + CodegenObject; + type BasicBlock : Copy; + type Type : CodegenObject; + type Context; +} + +pub trait ExtraBackendMethods : CodegenBackend + WriteBackendMethods + Sized + Send + Sync { + fn thin_lto_available(&self) -> bool; + fn pgo_available(&self) -> bool; + fn new_metadata(&self, sess: &Session, mod_name: &str) -> Self::Module; + fn write_metadata<'b, 'gcx>( + &self, + tcx: TyCtxt<'b, 'gcx, 'gcx>, + metadata: &Self::Module + ) -> EncodedMetadata; + fn codegen_allocator(&self, tcx: TyCtxt, mods: &Self::Module, kind: AllocatorKind); + fn compile_codegen_unit<'ll, 'tcx: 'll>( + &self, + tcx: TyCtxt<'ll, 'tcx, 'tcx>, + cgu_name: InternedString + ) -> Stats ; + // If find_features is true this won't access `sess.crate_types` by assuming + // that `is_pie_binary` is false. When we discover LLVM target features + // `sess.crate_types` is uninitialized so we cannot access it. + fn target_machine_factory( + &self, + sess: &Session, + find_features: bool + ) -> Arc + Result + Send + Sync>; + fn target_cpu<'b>(&self, sess: &'b Session) -> &'b str; +} diff --git a/src/librustc_codegen_ssa/interfaces/builder.rs b/src/librustc_codegen_ssa/interfaces/builder.rs new file mode 100644 index 0000000000000..6061a3729b473 --- /dev/null +++ b/src/librustc_codegen_ssa/interfaces/builder.rs @@ -0,0 +1,658 @@ +// Copyright 2018 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use common::{IntPredicate, RealPredicate, AtomicOrdering, + SynchronizationScope, AtomicRmwBinOp, OperandBundleDef}; +use libc::c_char; +use rustc::ty::{Ty, TyCtxt}; +use rustc::ty::layout::{Align, Size, TyLayout, HasTyCtxt, LayoutOf}; +use MemFlags; +use super::Backend; +use super::CodegenMethods; +use super::debuginfo::DebugInfoBuilderMethods; +use super::intrinsic::IntrinsicCallMethods; +use super::type_::ArgTypeMethods; +use super::abi::AbiBuilderMethods; +use super::asm::AsmBuilderMethods; +use mir::place::PlaceRef; +use mir::operand::OperandRef; + +use std::borrow::Cow; +use std::ops::Range; +use syntax::ast::AsmDialect; + +pub trait HasCodegen<'a, 'll: 'a, 'tcx :'ll> + where &'a Self::CodegenCx : + LayoutOf, TyLayout = TyLayout<'tcx>> + HasTyCtxt<'tcx> +{ + type CodegenCx : 'a + CodegenMethods<'a, 'll, 'tcx>; +} + +pub trait BuilderMethods<'a, 'll :'a, 'tcx: 'll> : HasCodegen<'a, 'll, 'tcx> + + DebugInfoBuilderMethods<'a, 'll, 'tcx> + ArgTypeMethods<'a, 'll, 'tcx> + + AbiBuilderMethods<'a, 'll, 'tcx> + IntrinsicCallMethods<'a, 'll, 'tcx> + + AsmBuilderMethods<'a, 'll, 'tcx> + where &'a Self::CodegenCx : + LayoutOf, TyLayout = TyLayout<'tcx>> + HasTyCtxt<'tcx> +{ + fn new_block<'b>( + cx: &'a Self::CodegenCx, + llfn: >::Value, + name: &'b str + ) -> Self; + fn with_cx(cx: &'a Self::CodegenCx) -> Self; + fn build_sibling_block<'b>(&self, name: &'b str) -> Self; + fn cx(&self) -> &'a Self::CodegenCx; + fn tcx(&self) -> TyCtxt<'a, 'tcx, 'tcx>; + fn llfn(&self) -> >::Value; + fn llbb(&self) -> >::BasicBlock; + fn count_insn(&self, category: &str); + + fn set_value_name(&mut self, value: >::Value, name: &str); + fn position_at_end(&mut self, llbb: >::BasicBlock); + fn position_at_start(&mut self, llbb: >::BasicBlock); + fn ret_void(&mut self); + fn ret(&mut self, v: >::Value); + fn br(&mut self, dest: >::BasicBlock); + fn cond_br( + &mut self, + cond: >::Value, + then_llbb: >::BasicBlock, + else_llbb: >::BasicBlock, + ); + fn switch( + &mut self, + v: >::Value, + else_llbb: >::BasicBlock, + num_cases: usize, + ) -> >::Value; + fn invoke( + &mut self, + llfn: >::Value, + args: &[>::Value], + then: >::BasicBlock, + catch: >::BasicBlock, + bundle: Option<&OperandBundleDef<'ll, >::Value>> + ) -> >::Value; + fn unreachable(&mut self); + fn add( + &mut self, + lhs: >::Value, + rhs: >::Value + ) -> >::Value; + fn fadd( + &mut self, + lhs: >::Value, + rhs: >::Value + ) -> >::Value; + fn fadd_fast( + &mut self, + lhs: >::Value, + rhs: >::Value + ) -> >::Value; + fn sub( + &mut self, + lhs: >::Value, + rhs: >::Value + ) -> >::Value; + fn fsub( + &mut self, + lhs: >::Value, + rhs: >::Value + ) -> >::Value; + fn fsub_fast( + &mut self, + lhs: >::Value, + rhs: >::Value + ) -> >::Value; + fn mul( + &mut self, + lhs: >::Value, + rhs: >::Value + ) -> >::Value; + fn fmul( + &mut self, + lhs: >::Value, + rhs: >::Value + ) -> >::Value; + fn fmul_fast( + &mut self, + lhs: >::Value, + rhs: >::Value + ) -> >::Value; + fn udiv( + &mut self, + lhs: >::Value, + rhs: >::Value + ) -> >::Value; + fn exactudiv( + &mut self, + lhs: >::Value, + rhs: >::Value + ) -> >::Value; + fn sdiv( + &mut self, + lhs: >::Value, + rhs: >::Value + ) -> >::Value; + fn exactsdiv( + &mut self, + lhs: >::Value, + rhs: >::Value + ) -> >::Value; + fn fdiv( + &mut self, + lhs: >::Value, + rhs: >::Value + ) -> >::Value; + fn fdiv_fast( + &mut self, + lhs: >::Value, + rhs: >::Value + ) -> >::Value; + fn urem( + &mut self, + lhs: >::Value, + rhs: >::Value + ) -> >::Value; + fn srem( + &mut self, + lhs: >::Value, + rhs: >::Value + ) -> >::Value; + fn frem( + &mut self, + lhs: >::Value, + rhs: >::Value + ) -> >::Value; + fn frem_fast( + &mut self, + lhs: >::Value, + rhs: >::Value + ) -> >::Value; + fn shl( + &mut self, + lhs: >::Value, + rhs: >::Value + ) -> >::Value; + fn lshr( + &mut self, + lhs: >::Value, + rhs: >::Value + ) -> >::Value; + fn ashr( + &mut self, + lhs: >::Value, + rhs: >::Value + ) -> >::Value; + fn and( + &mut self, + lhs: >::Value, + rhs: >::Value + ) -> >::Value; + fn or( + &mut self, + lhs: >::Value, + rhs: >::Value + ) -> >::Value; + fn xor( + &mut self, + lhs: >::Value, + rhs: >::Value + ) -> >::Value; + fn neg( + &mut self, + v: >::Value + ) -> >::Value; + fn fneg( + &mut self, + v: >::Value + ) -> >::Value; + fn not( + &mut self, + v: >::Value + ) -> >::Value; + + fn alloca( + &mut self, + ty: >::Type, + name: &str, align: Align + ) -> >::Value; + fn dynamic_alloca( + &mut self, + ty: >::Type, + name: &str, align: Align + ) -> >::Value; + fn array_alloca( + &mut self, + ty: >::Type, + len: >::Value, + name: &str, + align: Align + ) -> >::Value; + + fn load( + &mut self, + ptr: >::Value, + align: Align + ) -> >::Value; + fn volatile_load( + &mut self, + ptr: >::Value + ) -> >::Value; + fn atomic_load( + &mut self, + ptr: >::Value, + order: AtomicOrdering, align: Align + ) -> >::Value; + fn load_ref( + &mut self, + ptr: &PlaceRef<'tcx,>::Value> + ) -> OperandRef<'tcx, >::Value>; + + fn range_metadata( + &mut self, + load: >::Value, + range: Range + ); + fn nonnull_metadata(&mut self, load: >::Value); + + fn store( + &mut self, + val: >::Value, + ptr: >::Value, + align: Align + ) -> >::Value; + fn atomic_store( + &mut self, + val: >::Value, + ptr: >::Value, + order: AtomicOrdering, + align: Align + ); + fn store_with_flags( + &mut self, + val: >::Value, + ptr: >::Value, + align: Align, + flags: MemFlags, + ) -> >::Value; + + fn gep( + &mut self, + ptr: >::Value, + indices: &[>::Value] + ) -> >::Value; + fn inbounds_gep( + &mut self, + ptr: >::Value, + indices: &[>::Value] + ) -> >::Value; + fn struct_gep( + &mut self, + ptr: >::Value, + idx: u64 + ) -> >::Value; + + fn trunc( + &mut self, + val: >::Value, + dest_ty: >::Type + ) -> >::Value; + fn sext( + &mut self, + val: >::Value, + dest_ty: >::Type + ) -> >::Value; + fn fptoui( + &mut self, + val: >::Value, + dest_ty: >::Type + ) -> >::Value; + fn fptosi( + &mut self, + val: >::Value, + dest_ty: >::Type + ) -> >::Value; + fn uitofp( + &mut self, + val: >::Value, + dest_ty: >::Type + ) -> >::Value; + fn sitofp( + &mut self, + val: >::Value, + dest_ty: >::Type + ) -> >::Value; + fn fptrunc( + &mut self, + val: >::Value, + dest_ty: >::Type + ) -> >::Value; + fn fpext( + &mut self, + val: >::Value, + dest_ty: >::Type + ) -> >::Value; + fn ptrtoint( + &mut self, + val: >::Value, + dest_ty: >::Type + ) -> >::Value; + fn inttoptr( + &mut self, + val: >::Value, + dest_ty: >::Type + ) -> >::Value; + fn bitcast( + &mut self, + val: >::Value, + dest_ty: >::Type + ) -> >::Value; + fn intcast( + &mut self, + val: >::Value, + dest_ty: >::Type, is_signed: bool + ) -> >::Value; + fn pointercast( + &mut self, + val: >::Value, + dest_ty: >::Type + ) -> >::Value; + + fn icmp( + &mut self, + op: IntPredicate, + lhs: >::Value, rhs: >::Value + ) -> >::Value; + fn fcmp( + &mut self, + op: RealPredicate, + lhs: >::Value, rhs: >::Value + ) -> >::Value; + + fn empty_phi( + &mut self, + ty: >::Type) -> >::Value; + fn phi( + &mut self, + ty: >::Type, + vals: &[>::Value], + bbs: &[>::BasicBlock] + ) -> >::Value; + fn inline_asm_call( + &mut self, + asm: *const c_char, + cons: *const c_char, + inputs: &[>::Value], + output: >::Type, + volatile: bool, + alignstack: bool, + dia: AsmDialect + ) -> Option<>::Value>; + + fn minnum( + &mut self, + lhs: >::Value, + rhs: >::Value + ) -> >::Value; + fn maxnum( + &mut self, + lhs: >::Value, + rhs: >::Value + ) -> >::Value; + fn select( + &mut self, cond: >::Value, + then_val: >::Value, + else_val: >::Value, + ) -> >::Value; + + fn va_arg( + &mut self, + list: >::Value, + ty: >::Type + ) -> >::Value; + fn extract_element(&mut self, + vec: >::Value, + idx: >::Value + ) -> >::Value; + fn insert_element( + &mut self, vec: >::Value, + elt: >::Value, + idx: >::Value, + ) -> >::Value; + fn shuffle_vector( + &mut self, + v1: >::Value, + v2: >::Value, + mask: >::Value + ) -> >::Value; + fn vector_splat( + &mut self, + num_elts: usize, + elt: >::Value + ) -> >::Value; + fn vector_reduce_fadd_fast( + &mut self, + acc: >::Value, + src: >::Value + ) -> >::Value; + fn vector_reduce_fmul_fast( + &mut self, + acc: >::Value, + src: >::Value + ) -> >::Value; + fn vector_reduce_add( + &mut self, + src: >::Value + ) -> >::Value; + fn vector_reduce_mul( + &mut self, + src: >::Value + ) -> >::Value; + fn vector_reduce_and( + &mut self, + src: >::Value + ) -> >::Value; + fn vector_reduce_or( + &mut self, + src: >::Value + ) -> >::Value; + fn vector_reduce_xor( + &mut self, + src: >::Value + ) -> >::Value; + fn vector_reduce_fmin( + &mut self, + src: >::Value + ) -> >::Value; + fn vector_reduce_fmax( + &mut self, + src: >::Value + ) -> >::Value; + fn vector_reduce_fmin_fast( + &mut self, + src: >::Value + ) -> >::Value; + fn vector_reduce_fmax_fast( + &mut self, + src: >::Value + ) -> >::Value; + fn vector_reduce_min( + &mut self, + src: >::Value, + is_signed: bool + ) -> >::Value; + fn vector_reduce_max( + &mut self, + src: >::Value, + is_signed: bool + ) -> >::Value; + fn extract_value( + &mut self, + agg_val: >::Value, + idx: u64 + ) -> >::Value; + fn insert_value( + &mut self, + agg_val: >::Value, + elt: >::Value, + idx: u64 + ) -> >::Value; + + fn landing_pad( + &mut self, + ty: >::Type, + pers_fn: >::Value, + num_clauses: usize + ) -> >::Value; + fn add_clause( + &mut self, + landing_pad: >::Value, + clause: >::Value + ); + fn set_cleanup( + &mut self, + landing_pad: >::Value + ); + fn resume( + &mut self, + exn: >::Value + ) -> >::Value; + fn cleanup_pad( + &mut self, + parent: Option<>::Value>, + args: &[>::Value] + ) -> >::Value; + fn cleanup_ret( + &mut self, cleanup: >::Value, + unwind: Option<>::BasicBlock>, + ) -> >::Value; + fn catch_pad( + &mut self, + parent: >::Value, + args: &[>::Value] + ) -> >::Value; + fn catch_ret( + &mut self, + pad: >::Value, + unwind: >::BasicBlock + ) -> >::Value; + fn catch_switch( + &mut self, + parent: Option<>::Value>, + unwind: Option<>::BasicBlock>, + num_handlers: usize, + ) -> >::Value; + fn add_handler( + &mut self, + catch_switch: >::Value, + handler: >::BasicBlock + ); + fn set_personality_fn(&mut self, personality: >::Value); + + fn atomic_cmpxchg( + &mut self, + dst: >::Value, + cmp: >::Value, + src: >::Value, + order: AtomicOrdering, + failure_order: AtomicOrdering, + weak: bool, + ) -> >::Value; + fn atomic_rmw( + &mut self, + op: AtomicRmwBinOp, + dst: >::Value, + src: >::Value, + order: AtomicOrdering, + ) -> >::Value; + fn atomic_fence(&mut self, order: AtomicOrdering, scope: SynchronizationScope); + fn add_case( + &mut self, + s: >::Value, + on_val: >::Value, + dest: >::BasicBlock + ); + fn add_incoming_to_phi( + &mut self, + phi: >::Value, + val: >::Value, + bb: >::BasicBlock + ); + fn set_invariant_load(&mut self, load: >::Value); + + /// Returns the ptr value that should be used for storing `val`. + fn check_store( + &mut self, + val: >::Value, + ptr: >::Value + ) -> >::Value; + + /// Returns the args that should be used for a call to `llfn`. + fn check_call<'b>( + &mut self, + typ: &str, + llfn: >::Value, + args: &'b [>::Value] + ) -> Cow<'b, [>::Value]> + where [>::Value] : ToOwned; + + fn lifetime_start(&mut self, ptr: >::Value, size: Size); + fn lifetime_end(&mut self, ptr: >::Value, size: Size); + + /// If LLVM lifetime intrinsic support is enabled (i.e. optimizations + /// on), and `ptr` is nonzero-sized, then extracts the size of `ptr` + /// and the intrinsic for `lt` and passes them to `emit`, which is in + /// charge of generating code to call the passed intrinsic on whatever + /// block of generated code is targeted for the intrinsic. + /// + /// If LLVM lifetime intrinsic support is disabled (i.e. optimizations + /// off) or `ptr` is zero-sized, then no-op (does not call `emit`). + fn call_lifetime_intrinsic( + &mut self, + intrinsic: &str, + ptr: >::Value, size: Size + ); + + fn call( + &mut self, + llfn: >::Value, + args: &[>::Value], + bundle: Option<&OperandBundleDef<'ll, >::Value>> + ) -> >::Value; + + fn call_memcpy( + &mut self, + dst: >::Value, + src: >::Value, + n_bytes: >::Value, + align: Align, + flags: MemFlags, + ); + + fn call_memset( + &mut self, + ptr: >::Value, + fill_byte: >::Value, + size: >::Value, + align: >::Value, + volatile: bool, + ) -> >::Value; + + fn zext( + &mut self, + val: >::Value, + dest_ty: >::Type + ) -> >::Value; + + fn delete_basic_block(&mut self, bb: >::BasicBlock); + fn do_not_inline(&mut self, llret: >::Value); +} diff --git a/src/librustc_codegen_ssa/interfaces/consts.rs b/src/librustc_codegen_ssa/interfaces/consts.rs new file mode 100644 index 0000000000000..837aa0a349d93 --- /dev/null +++ b/src/librustc_codegen_ssa/interfaces/consts.rs @@ -0,0 +1,79 @@ +// Copyright 2018 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use super::Backend; +use syntax::symbol::LocalInternedString; +use rustc::ty::layout; +use rustc::mir::interpret::Scalar; +use rustc::mir::interpret::Allocation; +use mir::place::PlaceRef; + +pub trait ConstMethods<'ll, 'tcx: 'll> : Backend<'ll> { + // Constant constructors + + fn const_null(&self, t: Self::Type) -> Self::Value; + fn const_undef(&self, t: Self::Type) -> Self::Value; + fn const_int(&self, t: Self::Type, i: i64) -> Self::Value; + fn const_uint(&self, t: Self::Type, i: u64) -> Self::Value; + fn const_uint_big(&self, t: Self::Type, u: u128) -> Self::Value; + fn const_bool(&self, val: bool) -> Self::Value; + fn const_i32(&self, i: i32) -> Self::Value; + fn const_u32(&self, i: u32) -> Self::Value; + fn const_u64(&self, i: u64) -> Self::Value; + fn const_usize(&self, i: u64) -> Self::Value; + fn const_u8(&self, i: u8) -> Self::Value; + + // This is a 'c-like' raw string, which differs from + // our boxed-and-length-annotated strings. + fn const_cstr( + &self, + s: LocalInternedString, + null_terminated: bool, + ) -> Self::Value; + + // NB: Do not use `do_spill_noroot` to make this into a constant string, or + // you will be kicked off fast isel. See issue #4352 for an example of this. + fn const_str_slice(&self, s: LocalInternedString) -> Self::Value; + + fn const_fat_ptr( + &self, + ptr: Self::Value, + meta: Self::Value + ) -> Self::Value; + fn const_struct( + &self, + elts: &[Self::Value], + packed: bool + ) -> Self::Value; + fn const_array(&self, ty: Self::Type, elts: &[Self::Value]) -> Self::Value; + fn const_vector(&self, elts: &[Self::Value]) -> Self::Value; + fn const_bytes(&self, bytes: &[u8]) -> Self::Value; + + fn const_get_elt(&self, v: Self::Value, idx: u64) -> Self::Value; + fn const_get_real(&self, v: Self::Value) -> Option<(f64, bool)>; + fn const_to_uint(&self, v: Self::Value) -> u64; + fn const_to_opt_u128(&self, v: Self::Value, sign_ext: bool) -> Option; + + fn is_const_integral(&self, v: Self::Value) -> bool; + fn is_const_real(&self, v: Self::Value) -> bool; + + fn scalar_to_backend( + &self, + cv: Scalar, + layout: &layout::Scalar, + llty: Self::Type, + ) -> Self::Value; + fn from_const_alloc( + &self, + layout: layout::TyLayout<'tcx>, + alloc: &Allocation, + offset: layout::Size, + ) -> PlaceRef<'tcx, Self::Value>; +} diff --git a/src/librustc_codegen_ssa/interfaces/debuginfo.rs b/src/librustc_codegen_ssa/interfaces/debuginfo.rs new file mode 100644 index 0000000000000..1cd24560f689f --- /dev/null +++ b/src/librustc_codegen_ssa/interfaces/debuginfo.rs @@ -0,0 +1,86 @@ +// Copyright 2018 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use rustc::ty::{Ty, FnSig}; +use rustc::ty::layout::{LayoutOf, TyLayout, HasTyCtxt}; +use super::Backend; +use super::builder::HasCodegen; +use rustc::mir; +use rustc_mir::monomorphize::Instance; +use debuginfo::{FunctionDebugContext, MirDebugScope, VariableAccess, VariableKind}; +use rustc_data_structures::indexed_vec::IndexVec; +use syntax_pos; +use rustc::hir::def_id::CrateNum; +use syntax::ast::Name; + +pub trait DebugInfoMethods<'ll, 'tcx: 'll> : Backend<'ll> { + type DIScope : 'll + Copy; + + fn create_vtable_metadata( + &self, + ty: Ty<'tcx>, + vtable: Self::Value, + ); + + /// Creates the function-specific debug context. + /// + /// Returns the FunctionDebugContext for the function which holds state needed + /// for debug info creation. The function may also return another variant of the + /// FunctionDebugContext enum which indicates why no debuginfo should be created + /// for the function. + fn create_function_debug_context( + &self, + instance: Instance<'tcx>, + sig: FnSig<'tcx>, + llfn: Self::Value, + mir: &mir::Mir, + ) -> FunctionDebugContext; + + fn create_mir_scopes( + &self, + mir: &mir::Mir, + debug_context: &FunctionDebugContext, + ) -> IndexVec>; + fn extend_scope_to_file( + &self, + scope_metadata: Self::DIScope, + file: &syntax_pos::SourceFile, + defining_crate: CrateNum, + ) -> Self::DIScope; + fn debuginfo_finalize(&self); + fn debuginfo_upvar_decls_ops_sequence(&self, byte_offset_of_var_in_env: u64) -> [i64; 4]; +} + +pub trait DebugInfoBuilderMethods<'a, 'll: 'a, 'tcx: 'll> : HasCodegen<'a, 'll, 'tcx> + where &'a Self::CodegenCx : + LayoutOf, TyLayout = TyLayout<'tcx>> + HasTyCtxt<'tcx> +{ + fn declare_local( + &mut self, + dbg_context: &FunctionDebugContext< + >::DIScope + >, + variable_name: Name, + variable_type: Ty<'tcx>, + scope_metadata: >::DIScope, + variable_access: VariableAccess<'_, >::Value>, + variable_kind: VariableKind, + span: syntax_pos::Span, + ); + fn set_source_location( + &mut self, + debug_context: &FunctionDebugContext< + >::DIScope + >, + scope: Option<>::DIScope>, + span: syntax_pos::Span, + ); + fn insert_reference_to_gdb_debug_scripts_section_global(&mut self); +} diff --git a/src/librustc_codegen_ssa/interfaces/declare.rs b/src/librustc_codegen_ssa/interfaces/declare.rs new file mode 100644 index 0000000000000..64b1aff648464 --- /dev/null +++ b/src/librustc_codegen_ssa/interfaces/declare.rs @@ -0,0 +1,113 @@ +// Copyright 2018 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use rustc::ty::Ty; +use super::Backend; +use rustc::hir::def_id::DefId; +use rustc::mir::mono::{Linkage, Visibility}; +use rustc_mir::monomorphize::Instance; + +pub trait DeclareMethods<'ll, 'tcx: 'll> : Backend<'ll> { + + /// Declare a global value. + /// + /// If there’s a value with the same name already declared, the function will + /// return its Value instead. + fn declare_global( + &self, + name: &str, ty: Self::Type + ) -> Self::Value; + + /// Declare a C ABI function. + /// + /// Only use this for foreign function ABIs and glue. For Rust functions use + /// `declare_fn` instead. + /// + /// If there’s a value with the same name already declared, the function will + /// update the declaration and return existing Value instead. + fn declare_cfn( + &self, + name: &str, + fn_type: Self::Type + ) -> Self::Value; + + /// Declare a Rust function. + /// + /// If there’s a value with the same name already declared, the function will + /// update the declaration and return existing Value instead. + fn declare_fn( + &self, + name: &str, + fn_type: Ty<'tcx>, + ) -> Self::Value; + + /// Declare a global with an intention to define it. + /// + /// Use this function when you intend to define a global. This function will + /// return None if the name already has a definition associated with it. In that + /// case an error should be reported to the user, because it usually happens due + /// to user’s fault (e.g. misuse of #[no_mangle] or #[export_name] attributes). + fn define_global( + &self, + name: &str, + ty: Self::Type + ) -> Option; + + /// Declare a private global + /// + /// Use this function when you intend to define a global without a name. + fn define_private_global(&self, ty: Self::Type) -> Self::Value; + + /// Declare a Rust function with an intention to define it. + /// + /// Use this function when you intend to define a function. This function will + /// return panic if the name already has a definition associated with it. This + /// can happen with #[no_mangle] or #[export_name], for example. + fn define_fn( + &self, + name: &str, + fn_type: Ty<'tcx>, + ) -> Self::Value; + + /// Declare a Rust function with an intention to define it. + /// + /// Use this function when you intend to define a function. This function will + /// return panic if the name already has a definition associated with it. This + /// can happen with #[no_mangle] or #[export_name], for example. + fn define_internal_fn( + &self, + name: &str, + fn_type: Ty<'tcx>, + ) -> Self::Value; + + /// Get declared value by name. + fn get_declared_value(&self, name: &str) -> Option; + + /// Get defined or externally defined (AvailableExternally linkage) value by + /// name. + fn get_defined_value(&self, name: &str) -> Option; +} + +pub trait PreDefineMethods<'ll, 'tcx: 'll> : Backend<'ll> { + fn predefine_static( + &self, + def_id: DefId, + linkage: Linkage, + visibility: Visibility, + symbol_name: &str + ); + fn predefine_fn( + &self, + instance: Instance<'tcx>, + linkage: Linkage, + visibility: Visibility, + symbol_name: &str + ); +} diff --git a/src/librustc_codegen_ssa/interfaces/intrinsic.rs b/src/librustc_codegen_ssa/interfaces/intrinsic.rs new file mode 100644 index 0000000000000..3be14cf34b8f4 --- /dev/null +++ b/src/librustc_codegen_ssa/interfaces/intrinsic.rs @@ -0,0 +1,44 @@ +// Copyright 2018 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use super::Backend; +use super::builder::HasCodegen; +use mir::operand::OperandRef; +use rustc::ty::Ty; +use rustc::ty::layout::{LayoutOf, HasTyCtxt, TyLayout}; +use rustc_target::abi::call::FnType; +use syntax_pos::Span; + +pub trait IntrinsicCallMethods<'a, 'll: 'a, 'tcx: 'll> : HasCodegen<'a, 'll, 'tcx> + where &'a Self::CodegenCx : + LayoutOf, TyLayout = TyLayout<'tcx>> + HasTyCtxt<'tcx> +{ + /// Remember to add all intrinsics here, in librustc_typeck/check/mod.rs, + /// and in libcore/intrinsics.rs; if you need access to any llvm intrinsics, + /// add them to librustc_codegen_llvm/context.rs + fn codegen_intrinsic_call( + &mut self, + callee_ty: Ty<'tcx>, + fn_ty: &FnType<'tcx, Ty<'tcx>>, + args: &[OperandRef<'tcx, >::Value>], + llresult: >::Value, + span: Span, + ); +} + +pub trait IntrinsicDeclarationMethods<'ll> : Backend<'ll> { + fn get_intrinsic(&self, key: &str) -> Self::Value; + + /// Declare any llvm intrinsics that you might need + fn declare_intrinsic( + &self, + key: &str + ) -> Option; +} diff --git a/src/librustc_codegen_ssa/interfaces/misc.rs b/src/librustc_codegen_ssa/interfaces/misc.rs new file mode 100644 index 0000000000000..73ae29888a72f --- /dev/null +++ b/src/librustc_codegen_ssa/interfaces/misc.rs @@ -0,0 +1,41 @@ +// Copyright 2018 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use std::cell::RefCell; +use rustc::util::nodemap::FxHashMap; +use rustc::ty::{Ty, self, Instance}; +use super::Backend; +use rustc::session::Session; +use libc::c_uint; +use rustc::mir::mono::Stats; +use std::sync::Arc; +use rustc_mir::monomorphize::partitioning::CodegenUnit; + +pub trait MiscMethods<'ll, 'tcx: 'll> : Backend<'ll> { + fn vtables(&self) -> &RefCell< + FxHashMap<(Ty<'tcx>, ty::PolyExistentialTraitRef<'tcx>), Self::Value> + >; + fn check_overflow(&self) -> bool; + fn instances(&self) -> &RefCell, Self::Value>>; + fn get_fn(&self, instance: Instance<'tcx>) -> Self::Value; + fn get_param(&self, llfn: Self::Value, index: c_uint) -> Self::Value; + fn eh_personality(&self) -> Self::Value; + fn eh_unwind_resume(&self) -> Self::Value; + fn sess(&self) -> &Session; + fn stats(&self) -> &RefCell; + fn consume_stats(self) -> RefCell; + fn codegen_unit(&self) -> &Arc>; + fn statics_to_rauw(&self) -> &RefCell>; + fn env_alloca_allowed(&self) -> bool; + fn used_statics(&self) -> &RefCell>; + fn set_frame_pointer_elimination(&self, llfn: Self::Value); + fn apply_target_cpu_attr(&self, llfn: Self::Value); + fn create_used_variable(&self); +} diff --git a/src/librustc_codegen_ssa/interfaces/mod.rs b/src/librustc_codegen_ssa/interfaces/mod.rs new file mode 100644 index 0000000000000..62088d998bf27 --- /dev/null +++ b/src/librustc_codegen_ssa/interfaces/mod.rs @@ -0,0 +1,67 @@ +// Copyright 2018 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Interface of a Rust codegen backend +//! +//! This crate defines all the traits that have to be implemented by a codegen backend in order to +//! use the backend-agnostic codegen code in `rustc_codegen_ssa`. +//! +//! The interface is designed around two backend-specific data structures, the codegen context and +//! the builder. The codegen context is supposed to be read-only after its creation and during the +//! actual codegen, while the builder stores the information about the function during codegen and +//! is used to produce the instructions of the backend IR. +//! +//! Finaly, a third `Backend` structure has to implement methods related to how codegen information +//! is passed to the backend, especially for asynchronous compilation. +//! +//! The traits contain associated types that are backend-specific, such as the backend's value or +//! basic blocks. + +use std::fmt; +mod backend; +mod misc; +mod statics; +mod declare; +mod builder; +mod consts; +mod type_; +mod intrinsic; +mod debuginfo; +mod abi; +mod asm; +mod write; + +use rustc::ty::Ty; +use rustc::ty::layout::{LayoutOf, HasTyCtxt, TyLayout}; + +pub use self::backend::{Backend, ExtraBackendMethods}; +pub use self::misc::MiscMethods; +pub use self::statics::StaticMethods; +pub use self::declare::{DeclareMethods, PreDefineMethods}; +pub use self::builder::{BuilderMethods, HasCodegen}; +pub use self::consts::ConstMethods; +pub use self::type_::{TypeMethods, BaseTypeMethods, DerivedTypeMethods, + LayoutTypeMethods, ArgTypeMethods}; +pub use self::intrinsic::{IntrinsicCallMethods, IntrinsicDeclarationMethods}; +pub use self::debuginfo::{DebugInfoMethods, DebugInfoBuilderMethods}; +pub use self::abi::{AbiMethods, AbiBuilderMethods}; +pub use self::asm::{AsmMethods, AsmBuilderMethods}; +pub use self::write::{WriteBackendMethods, ThinBufferMethods, ModuleBufferMethods}; + + +pub trait CodegenObject : Copy + PartialEq + fmt::Debug {} + +pub trait CodegenMethods<'a, 'll: 'a, 'tcx: 'll> : + Backend<'ll> + TypeMethods<'a, 'll, 'tcx> + MiscMethods<'ll, 'tcx> + ConstMethods<'ll, 'tcx> + + StaticMethods<'ll> + DebugInfoMethods<'ll, 'tcx> + AbiMethods<'tcx> + + IntrinsicDeclarationMethods<'ll> + DeclareMethods<'ll, 'tcx> + AsmMethods + + PreDefineMethods<'ll, 'tcx> + where &'a Self : 'a + LayoutOf, TyLayout = TyLayout<'tcx>> + HasTyCtxt<'tcx> +{} diff --git a/src/librustc_codegen_ssa/interfaces/statics.rs b/src/librustc_codegen_ssa/interfaces/statics.rs new file mode 100644 index 0000000000000..1c77b891cc96e --- /dev/null +++ b/src/librustc_codegen_ssa/interfaces/statics.rs @@ -0,0 +1,37 @@ +// Copyright 2018 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use rustc::ty::layout::Align; +use rustc::hir::def_id::DefId; +use super::Backend; + +pub trait StaticMethods<'ll> : Backend<'ll> { + fn static_ptrcast(&self, val: Self::Value, ty: Self::Type) -> Self::Value; + fn static_bitcast(&self, val: Self::Value, ty: Self::Type) -> Self::Value; + fn static_addr_of_mut( + &self, + cv: Self::Value, + align: Align, + kind: Option<&str>, + ) -> Self::Value; + fn static_addr_of( + &self, + cv: Self::Value, + align: Align, + kind: Option<&str>, + ) -> Self::Value; + fn get_static(&self, def_id: DefId) -> Self::Value; + fn codegen_static( + &self, + def_id: DefId, + is_mutable: bool, + ); + fn static_replace_all_uses(&self, old_g: Self::Value, new_g: Self::Value); +} diff --git a/src/librustc_codegen_ssa/interfaces/type_.rs b/src/librustc_codegen_ssa/interfaces/type_.rs new file mode 100644 index 0000000000000..9f29c74469f4e --- /dev/null +++ b/src/librustc_codegen_ssa/interfaces/type_.rs @@ -0,0 +1,226 @@ +// Copyright 2018 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use super::Backend; +use super::builder::HasCodegen; +use super::misc::MiscMethods; +use common::{self, TypeKind}; +use syntax::ast; +use rustc::ty::layout::{self, Align, Size}; +use std::cell::RefCell; +use rustc::util::nodemap::FxHashMap; +use rustc::ty::{self, Ty, TyCtxt}; +use rustc::ty::layout::{TyLayout, LayoutOf, HasTyCtxt}; +use rustc_target::abi::call::{ArgType, CastTarget, FnType, Reg}; +use mir::place::PlaceRef; + + +pub trait BaseTypeMethods<'ll, 'tcx: 'll> : Backend<'ll> { + fn type_void(&self) -> Self::Type; + fn type_metadata(&self) -> Self::Type; + fn type_i1(&self) -> Self::Type; + fn type_i8(&self) -> Self::Type; + fn type_i16(&self) -> Self::Type; + fn type_i32(&self) -> Self::Type; + fn type_i64(&self) -> Self::Type; + fn type_i128(&self) -> Self::Type; + + // Creates an integer type with the given number of bits, e.g. i24 + fn type_ix(&self, num_bits: u64) -> Self::Type; + fn type_isize(&self) -> Self::Type; + + fn type_f32(&self) -> Self::Type; + fn type_f64(&self) -> Self::Type; + fn type_x86_mmx(&self) -> Self::Type; + + fn type_func(&self, args: &[Self::Type], ret: Self::Type) -> Self::Type; + fn type_variadic_func(&self, args: &[Self::Type], ret: Self::Type) -> Self::Type; + fn type_struct(&self, els: &[Self::Type], packed: bool) -> Self::Type; + fn type_named_struct(&self, name: &str) -> Self::Type; + fn type_array(&self, ty: Self::Type, len: u64) -> Self::Type; + fn type_vector(&self, ty: Self::Type, len: u64) -> Self::Type; + fn type_kind(&self, ty: Self::Type) -> TypeKind; + fn set_struct_body(&self, ty: Self::Type, els: &[Self::Type], packed: bool); + fn type_ptr_to(&self, ty: Self::Type) -> Self::Type; + fn element_type(&self, ty: Self::Type) -> Self::Type; + + /// Return the number of elements in `self` if it is a LLVM vector type. + fn vector_length(&self, ty: Self::Type) -> usize; + + fn func_params_types(&self, ty: Self::Type) -> Vec; + fn float_width(&self, ty: Self::Type) -> usize; + + /// Retrieve the bit width of the integer type `self`. + fn int_width(&self, ty: Self::Type) -> u64; + + fn val_ty(&self, v: Self::Value) -> Self::Type; + fn scalar_lltypes(&self) -> &RefCell, Self::Type>>; + fn tcx(&self) -> &TyCtxt<'ll, 'tcx, 'tcx>; +} + +pub trait DerivedTypeMethods<'a, 'll: 'a, 'tcx: 'll> : + Backend<'ll> + BaseTypeMethods<'ll, 'tcx> + MiscMethods<'ll, 'tcx> + where &'a Self : 'a + LayoutOf, TyLayout = TyLayout<'tcx>> + HasTyCtxt<'tcx> +{ + + fn type_bool(&self) -> Self::Type { + self.type_i8() + } + + fn type_char(&self) -> Self::Type { + self.type_i32() + } + + fn type_i8p(&self) -> Self::Type { + self.type_ptr_to(self.type_i8()) + } + + fn type_int(&self) -> Self::Type { + match &self.sess().target.target.target_c_int_width[..] { + "16" => self.type_i16(), + "32" => self.type_i32(), + "64" => self.type_i64(), + width => bug!("Unsupported target_c_int_width: {}", width), + } + } + + fn type_int_from_ty( + &self, + t: ast::IntTy + ) -> Self::Type { + match t { + ast::IntTy::Isize => self.type_isize(), + ast::IntTy::I8 => self.type_i8(), + ast::IntTy::I16 => self.type_i16(), + ast::IntTy::I32 => self.type_i32(), + ast::IntTy::I64 => self.type_i64(), + ast::IntTy::I128 => self.type_i128(), + } + } + + fn type_uint_from_ty( + &self, + t: ast::UintTy + ) -> Self::Type { + match t { + ast::UintTy::Usize => self.type_isize(), + ast::UintTy::U8 => self.type_i8(), + ast::UintTy::U16 => self.type_i16(), + ast::UintTy::U32 => self.type_i32(), + ast::UintTy::U64 => self.type_i64(), + ast::UintTy::U128 => self.type_i128(), + } + } + + fn type_float_from_ty( + &self, + t: ast::FloatTy + ) -> Self::Type { + match t { + ast::FloatTy::F32 => self.type_f32(), + ast::FloatTy::F64 => self.type_f64(), + } + } + + fn type_from_integer(&self, i: layout::Integer) -> Self::Type { + use rustc::ty::layout::Integer::*; + match i { + I8 => self.type_i8(), + I16 => self.type_i16(), + I32 => self.type_i32(), + I64 => self.type_i64(), + I128 => self.type_i128(), + } + } + + fn type_pointee_for_abi_align(&'a self, align: Align) -> Self::Type { + // FIXME(eddyb) We could find a better approximation if ity.align < align. + let ity = layout::Integer::approximate_abi_align(self, align); + self.type_from_integer(ity) + } + + fn type_padding_filler( + &'a self, + size: Size, + align: Align + ) -> Self::Type { + let unit = layout::Integer::approximate_abi_align(self, align); + let size = size.bytes(); + let unit_size = unit.size().bytes(); + assert_eq!(size % unit_size, 0); + self.type_array(self.type_from_integer(unit), size / unit_size) + } + + fn type_needs_drop(&self, ty: Ty<'tcx>) -> bool { + common::type_needs_drop(*self.tcx(), ty) + } + + fn type_is_sized(&self, ty: Ty<'tcx>) -> bool { + common::type_is_sized(*self.tcx(), ty) + } + + fn type_is_freeze(&self, ty: Ty<'tcx>) -> bool { + common::type_is_freeze(*self.tcx(), ty) + } + + fn type_has_metadata(&self, ty: Ty<'tcx>) -> bool { + use syntax_pos::DUMMY_SP; + if ty.is_sized(self.tcx().at(DUMMY_SP), ty::ParamEnv::reveal_all()) { + return false; + } + + let tail = self.tcx().struct_tail(ty); + match tail.sty { + ty::Foreign(..) => false, + ty::Str | ty::Slice(..) | ty::Dynamic(..) => true, + _ => bug!("unexpected unsized tail: {:?}", tail.sty), + } + } +} + +pub trait LayoutTypeMethods<'ll, 'tcx> : Backend<'ll> { + fn backend_type(&self, ty: &TyLayout<'tcx>) -> Self::Type; + fn cast_backend_type(&self, ty: &CastTarget) -> Self::Type; + fn fn_backend_type(&self, ty: &FnType<'tcx, Ty<'tcx>>) -> Self::Type; + fn reg_backend_type(&self, ty: &Reg) -> Self::Type; + fn immediate_backend_type(&self, ty: &TyLayout<'tcx>) -> Self::Type; + fn is_backend_immediate(&self, ty: &TyLayout<'tcx>) -> bool; + fn is_backend_scalar_pair(&self, ty: &TyLayout<'tcx>) -> bool; + fn backend_field_index(&self, ty: &TyLayout<'tcx>, index: usize) -> u64; + fn scalar_pair_element_backend_type<'a>( + &self, + ty: &TyLayout<'tcx>, + index: usize, + immediate: bool + ) -> Self::Type; +} + +pub trait ArgTypeMethods<'a, 'll: 'a, 'tcx: 'll> : HasCodegen<'a, 'll, 'tcx> + where &'a Self::CodegenCx : + LayoutOf, TyLayout = TyLayout<'tcx>> + HasTyCtxt<'tcx> +{ + fn store_fn_arg( + &mut self, + ty: &ArgType<'tcx, Ty<'tcx>>, + idx: &mut usize, dst: PlaceRef<'tcx, >::Value> + ); + fn store_arg_ty( + &mut self, + ty: &ArgType<'tcx, Ty<'tcx>>, + val: >::Value, + dst: PlaceRef<'tcx, >::Value> + ); + fn memory_ty(&self, ty: &ArgType<'tcx, Ty<'tcx>>) -> >::Type; +} + +pub trait TypeMethods<'a, 'll: 'a, 'tcx: 'll> : + BaseTypeMethods<'ll, 'tcx> + DerivedTypeMethods<'a, 'll, 'tcx> + LayoutTypeMethods<'ll, 'tcx> + where &'a Self : 'a + LayoutOf, TyLayout = TyLayout<'tcx>> + HasTyCtxt<'tcx> +{} diff --git a/src/librustc_codegen_ssa/interfaces/write.rs b/src/librustc_codegen_ssa/interfaces/write.rs new file mode 100644 index 0000000000000..a0c08670a20df --- /dev/null +++ b/src/librustc_codegen_ssa/interfaces/write.rs @@ -0,0 +1,78 @@ +// Copyright 2018 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use back::write::{ModuleConfig, CodegenContext, DiagnosticHandlers}; +use back::lto::{SerializedModule, LtoModuleCodegen, ThinModule}; +use {ModuleCodegen, CompiledModule}; + +use rustc::dep_graph::WorkProduct; +use rustc::util::time_graph::Timeline; +use rustc_errors::{FatalError, Handler}; + +pub trait WriteBackendMethods : 'static + Sized + Clone { + type Module : Send + Sync; + type TargetMachine : Clone; + type ModuleBuffer : ModuleBufferMethods; + type Context : ?Sized; + type ThinData : Send + Sync; + type ThinBuffer : ThinBufferMethods; + + /// Performs LTO, which in the case of full LTO means merging all modules into + /// a single one and returning it for further optimizing. For ThinLTO, it will + /// do the global analysis necessary and return two lists, one of the modules + /// the need optimization and another for modules that can simply be copied over + /// from the incr. comp. cache. + fn run_lto( + cgcx: &CodegenContext, + modules: Vec>, + cached_modules: Vec<(SerializedModule, WorkProduct)>, + timeline: &mut Timeline + ) -> Result<(Vec>, Vec), FatalError>; + fn new_diagnostic_handlers<'a>( + cgcx: &'a CodegenContext, + handler: &'a Handler, + llcx: &'a Self::Context + ) -> DiagnosticHandlers<'a, Self>; + fn drop_diagnostic_handlers<'a>(diag: &mut DiagnosticHandlers<'a, Self>); + fn print_pass_timings(&self); + unsafe fn optimize( + cgcx: &CodegenContext, + diag_handler: &Handler, + module: &ModuleCodegen, + config: &ModuleConfig, + timeline: &mut Timeline + ) -> Result<(), FatalError>; + unsafe fn optimize_thin( + cgcx: &CodegenContext, + thin: &mut ThinModule, + timeline: &mut Timeline + ) -> Result, FatalError>; + unsafe fn codegen( + cgcx: &CodegenContext, + diag_handler: &Handler, + module: ModuleCodegen, + config: &ModuleConfig, + timeline: &mut Timeline + ) -> Result; + fn run_lto_pass_manager( + cgcx: &CodegenContext, + llmod: &ModuleCodegen, + config: &ModuleConfig, + thin: bool + ); +} + +pub trait ThinBufferMethods : Send + Sync { + fn data(&self) -> &[u8]; +} + +pub trait ModuleBufferMethods : Send + Sync { + fn data(&self) -> &[u8]; +} diff --git a/src/librustc_codegen_ssa/lib.rs b/src/librustc_codegen_ssa/lib.rs new file mode 100644 index 0000000000000..7e8f76fd97630 --- /dev/null +++ b/src/librustc_codegen_ssa/lib.rs @@ -0,0 +1,187 @@ +// Copyright 2017 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +#![doc(html_logo_url = "https://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png", + html_favicon_url = "https://doc.rust-lang.org/favicon.ico", + html_root_url = "https://doc.rust-lang.org/nightly/")] + +#![feature(box_patterns)] +#![feature(box_syntax)] +#![feature(custom_attribute)] +#![feature(libc)] +#![feature(rustc_diagnostic_macros)] +#![feature(in_band_lifetimes)] +#![feature(slice_sort_by_cached_key)] +#![feature(nll)] +#![allow(unused_attributes)] +#![allow(dead_code)] +#![feature(quote)] + +//! This crate contains codegen code that is used by all codegen backends (LLVM and others). +//! The backend-agnostic functions of this crate use functions defined in various traits that +//! have to be implemented by each backends. + +#[macro_use] extern crate bitflags; +#[macro_use] extern crate log; +extern crate rustc_apfloat; +#[macro_use] extern crate rustc; +extern crate rustc_target; +extern crate rustc_mir; +#[macro_use] extern crate syntax; +extern crate syntax_pos; +extern crate rustc_incremental; +extern crate rustc_codegen_utils; +extern crate rustc_data_structures; +extern crate rustc_allocator; +extern crate rustc_fs_util; +extern crate serialize; +extern crate rustc_errors; +extern crate rustc_demangle; +extern crate cc; +extern crate libc; +extern crate jobserver; +extern crate memmap; +extern crate num_cpus; + +use std::path::PathBuf; +use rustc::dep_graph::WorkProduct; +use rustc::session::config::{OutputFilenames, OutputType}; +use rustc::middle::lang_items::LangItem; +use rustc::hir::def_id::CrateNum; +use rustc_data_structures::fx::{FxHashMap, FxHashSet}; +use rustc_data_structures::sync::Lrc; +use rustc_data_structures::svh::Svh; +use rustc::middle::cstore::{LibSource, CrateSource, NativeLibrary}; +use syntax_pos::symbol::Symbol; + +// NB: This module needs to be declared first so diagnostics are +// registered before they are used. +mod diagnostics; + +pub mod common; +pub mod interfaces; +pub mod mir; +pub mod debuginfo; +pub mod base; +pub mod callee; +pub mod glue; +pub mod meth; +pub mod mono_item; +pub mod back; + +pub struct ModuleCodegen { + /// The name of the module. When the crate may be saved between + /// compilations, incremental compilation requires that name be + /// unique amongst **all** crates. Therefore, it should contain + /// something unique to this crate (e.g., a module path) as well + /// as the crate name and disambiguator. + /// We currently generate these names via CodegenUnit::build_cgu_name(). + pub name: String, + pub module_llvm: M, + pub kind: ModuleKind, +} + +pub const RLIB_BYTECODE_EXTENSION: &str = "bc.z"; + +impl ModuleCodegen { + pub fn into_compiled_module(self, + emit_obj: bool, + emit_bc: bool, + emit_bc_compressed: bool, + outputs: &OutputFilenames) -> CompiledModule { + let object = if emit_obj { + Some(outputs.temp_path(OutputType::Object, Some(&self.name))) + } else { + None + }; + let bytecode = if emit_bc { + Some(outputs.temp_path(OutputType::Bitcode, Some(&self.name))) + } else { + None + }; + let bytecode_compressed = if emit_bc_compressed { + Some(outputs.temp_path(OutputType::Bitcode, Some(&self.name)) + .with_extension(RLIB_BYTECODE_EXTENSION)) + } else { + None + }; + + CompiledModule { + name: self.name.clone(), + kind: self.kind, + object, + bytecode, + bytecode_compressed, + } + } +} + +#[derive(Debug)] +pub struct CompiledModule { + pub name: String, + pub kind: ModuleKind, + pub object: Option, + pub bytecode: Option, + pub bytecode_compressed: Option, +} + +pub struct CachedModuleCodegen { + pub name: String, + pub source: WorkProduct, +} + +#[derive(Copy, Clone, Debug, PartialEq)] +pub enum ModuleKind { + Regular, + Metadata, + Allocator, +} + +bitflags! { + pub struct MemFlags: u8 { + const VOLATILE = 1 << 0; + const NONTEMPORAL = 1 << 1; + const UNALIGNED = 1 << 2; + } +} + +/// Misc info we load from metadata to persist beyond the tcx +pub struct CrateInfo { + pub panic_runtime: Option, + pub compiler_builtins: Option, + pub profiler_runtime: Option, + pub sanitizer_runtime: Option, + pub is_no_builtins: FxHashSet, + pub native_libraries: FxHashMap>>, + pub crate_name: FxHashMap, + pub used_libraries: Lrc>, + pub link_args: Lrc>, + pub used_crate_source: FxHashMap>, + pub used_crates_static: Vec<(CrateNum, LibSource)>, + pub used_crates_dynamic: Vec<(CrateNum, LibSource)>, + pub wasm_imports: FxHashMap, + pub lang_item_to_crate: FxHashMap, + pub missing_lang_items: FxHashMap>, +} + + +pub struct CodegenResults { + pub crate_name: Symbol, + pub modules: Vec, + pub allocator_module: Option, + pub metadata_module: CompiledModule, + pub crate_hash: Svh, + pub metadata: rustc::middle::cstore::EncodedMetadata, + pub windows_subsystem: Option, + pub linker_info: back::linker::LinkerInfo, + pub crate_info: CrateInfo, +} + +__build_diagnostic_array! { librustc_codegen_ssa, DIAGNOSTICS } diff --git a/src/librustc_codegen_llvm/meth.rs b/src/librustc_codegen_ssa/meth.rs similarity index 54% rename from src/librustc_codegen_llvm/meth.rs rename to src/librustc_codegen_ssa/meth.rs index 29c2e71960c2c..dc69baa606d66 100644 --- a/src/librustc_codegen_llvm/meth.rs +++ b/src/librustc_codegen_ssa/meth.rs @@ -8,18 +8,14 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use abi::{FnType, FnTypeExt}; +use rustc_target::abi::call::FnType; use callee; -use common::*; -use builder::Builder; -use consts; -use monomorphize; -use type_::Type; -use value::Value; +use rustc_mir::monomorphize; + +use interfaces::*; use rustc::ty::{self, Ty}; -use rustc::ty::layout::HasDataLayout; -use debuginfo; +use rustc::ty::layout::{LayoutOf, HasDataLayout, HasTyCtxt, TyLayout}; #[derive(Copy, Clone, Debug)] pub struct VirtualIndex(u64); @@ -33,28 +29,46 @@ impl<'a, 'tcx> VirtualIndex { VirtualIndex(index as u64 + 3) } - pub fn get_fn(self, bx: &Builder<'a, 'll, 'tcx>, - llvtable: &'ll Value, - fn_ty: &FnType<'tcx, Ty<'tcx>>) -> &'ll Value { + pub fn get_fn>( + self, + bx: &mut Bx, + llvtable: >::Value, + fn_ty: &FnType<'tcx, Ty<'tcx>> + ) -> >::Value + where &'a Bx::CodegenCx : + LayoutOf, TyLayout = TyLayout<'tcx>> + HasTyCtxt<'tcx> + { // Load the data pointer from the object. debug!("get_fn({:?}, {:?})", llvtable, self); - let llvtable = bx.pointercast(llvtable, fn_ty.llvm_type(bx.cx).ptr_to().ptr_to()); + let llvtable = bx.pointercast( + llvtable, + bx.cx().type_ptr_to(bx.cx().type_ptr_to(bx.cx().fn_backend_type(fn_ty))) + ); let ptr_align = bx.tcx().data_layout.pointer_align; - let ptr = bx.load(bx.inbounds_gep(llvtable, &[C_usize(bx.cx, self.0)]), ptr_align); + let gep = bx.inbounds_gep(llvtable, &[bx.cx().const_usize(self.0)]); + let ptr = bx.load(gep, ptr_align); bx.nonnull_metadata(ptr); // Vtable loads are invariant bx.set_invariant_load(ptr); ptr } - pub fn get_usize(self, bx: &Builder<'a, 'll, 'tcx>, llvtable: &'ll Value) -> &'ll Value { + pub fn get_usize>( + self, + bx: &mut Bx, + llvtable: >::Value + ) -> >::Value + where &'a Bx::CodegenCx : + LayoutOf, TyLayout = TyLayout<'tcx>> + HasTyCtxt<'tcx> + { // Load the data pointer from the object. debug!("get_int({:?}, {:?})", llvtable, self); - let llvtable = bx.pointercast(llvtable, Type::isize(bx.cx).ptr_to()); + let llvtable = bx.pointercast(llvtable, bx.cx().type_ptr_to(bx.cx().type_isize())); let usize_align = bx.tcx().data_layout.pointer_align; - let ptr = bx.load(bx.inbounds_gep(llvtable, &[C_usize(bx.cx, self.0)]), usize_align); + let gep = bx.inbounds_gep(llvtable, &[bx.cx().const_usize(self.0)]); + let ptr = bx.load(gep, usize_align); // Vtable loads are invariant bx.set_invariant_load(ptr); ptr @@ -69,47 +83,47 @@ impl<'a, 'tcx> VirtualIndex { /// The `trait_ref` encodes the erased self type. Hence if we are /// making an object `Foo` from a value of type `Foo`, then /// `trait_ref` would map `T:Trait`. -pub fn get_vtable( - cx: &CodegenCx<'ll, 'tcx>, +pub fn get_vtable<'a, 'll: 'a, 'tcx: 'll, Cx: 'a + CodegenMethods<'a, 'll, 'tcx>>( + cx: &'a Cx, ty: Ty<'tcx>, trait_ref: ty::PolyExistentialTraitRef<'tcx>, -) -> &'ll Value { - let tcx = cx.tcx; +) -> Cx::Value where &'a Cx: LayoutOf, TyLayout = TyLayout<'tcx>> + HasTyCtxt<'tcx> { + let tcx = cx.tcx(); debug!("get_vtable(ty={:?}, trait_ref={:?})", ty, trait_ref); // Check the cache. - if let Some(&val) = cx.vtables.borrow().get(&(ty, trait_ref)) { + if let Some(&val) = cx.vtables().borrow().get(&(ty, trait_ref)) { return val; } // Not in the cache. Build it. - let nullptr = C_null(Type::i8p(cx)); + let nullptr = cx.const_null(cx.type_i8p()); - let methods = tcx.vtable_methods(trait_ref.with_self_ty(tcx, ty)); + let methods = tcx.vtable_methods(trait_ref.with_self_ty(*tcx, ty)); let methods = methods.iter().cloned().map(|opt_mth| { opt_mth.map_or(nullptr, |(def_id, substs)| { callee::resolve_and_get_fn(cx, def_id, substs) }) }); - let (size, align) = cx.size_and_align_of(ty); + let (size, align) = cx.layout_of(ty).size_and_align(); // ///////////////////////////////////////////////////////////////////////////////////////////// // If you touch this code, be sure to also make the corresponding changes to // `get_vtable` in rust_mir/interpret/traits.rs // ///////////////////////////////////////////////////////////////////////////////////////////// let components: Vec<_> = [ - callee::get_fn(cx, monomorphize::resolve_drop_in_place(cx.tcx, ty)), - C_usize(cx, size.bytes()), - C_usize(cx, align.abi()) + cx.get_fn(monomorphize::resolve_drop_in_place(*cx.tcx(), ty)), + cx.const_usize(size.bytes()), + cx.const_usize(align.abi()) ].iter().cloned().chain(methods).collect(); - let vtable_const = C_struct(cx, &components, false); + let vtable_const = cx.const_struct(&components, false); let align = cx.data_layout().pointer_align; - let vtable = consts::addr_of(cx, vtable_const, align, Some("vtable")); + let vtable = cx.static_addr_of(vtable_const, align, Some("vtable")); - debuginfo::create_vtable_metadata(cx, ty, vtable); + cx.create_vtable_metadata(ty, vtable); - cx.vtables.borrow_mut().insert((ty, trait_ref), vtable); + cx.vtables().borrow_mut().insert((ty, trait_ref), vtable); vtable } diff --git a/src/librustc_codegen_llvm/mir/analyze.rs b/src/librustc_codegen_ssa/mir/analyze.rs similarity index 88% rename from src/librustc_codegen_llvm/mir/analyze.rs rename to src/librustc_codegen_ssa/mir/analyze.rs index a0d6cc4629589..2330cbd54928f 100644 --- a/src/librustc_codegen_llvm/mir/analyze.rs +++ b/src/librustc_codegen_ssa/mir/analyze.rs @@ -17,12 +17,16 @@ use rustc_data_structures::indexed_vec::{Idx, IndexVec}; use rustc::mir::{self, Location, TerminatorKind}; use rustc::mir::visit::{Visitor, PlaceContext}; use rustc::mir::traversal; -use rustc::ty; -use rustc::ty::layout::LayoutOf; -use type_of::LayoutLlvmExt; +use rustc::ty::{self, Ty}; +use rustc::ty::layout::{LayoutOf, HasTyCtxt, TyLayout}; use super::FunctionCx; +use interfaces::*; -pub fn non_ssa_locals(fx: &FunctionCx<'a, 'll, 'tcx>) -> BitSet { +pub fn non_ssa_locals<'a, 'f, 'll: 'a + 'f, 'tcx: 'll, Cx: 'a + CodegenMethods<'a, 'll, 'tcx>>( + fx: &FunctionCx<'a, 'f, 'll, 'tcx, Cx> +) -> BitSet + where &'a Cx : LayoutOf, TyLayout=TyLayout<'tcx>> + HasTyCtxt<'tcx> +{ let mir = fx.mir; let mut analyzer = LocalAnalyzer::new(fx); @@ -32,10 +36,10 @@ pub fn non_ssa_locals(fx: &FunctionCx<'a, 'll, 'tcx>) -> BitSet { let ty = fx.monomorphize(&ty); debug!("local {} has type {:?}", index, ty); let layout = fx.cx.layout_of(ty); - if layout.is_llvm_immediate() { + if fx.cx.is_backend_immediate(&layout) { // These sorts of types are immediates that we can store // in an Value without an alloca. - } else if layout.is_llvm_scalar_pair() { + } else if fx.cx.is_backend_scalar_pair(&layout) { // We allow pairs and uses of any of their 2 fields. } else { // These sorts of types require an alloca. Note that @@ -51,8 +55,13 @@ pub fn non_ssa_locals(fx: &FunctionCx<'a, 'll, 'tcx>) -> BitSet { analyzer.non_ssa_locals } -struct LocalAnalyzer<'mir, 'a: 'mir, 'll: 'a, 'tcx: 'll> { - fx: &'mir FunctionCx<'a, 'll, 'tcx>, +struct LocalAnalyzer< + 'mir, 'a: 'mir, 'f: 'mir, 'll: 'a + 'f, 'tcx: 'll, + Cx: 'a + CodegenMethods<'a, 'll, 'tcx> + > + where &'a Cx : LayoutOf, TyLayout = TyLayout<'tcx>> + HasTyCtxt<'tcx> +{ + fx: &'mir FunctionCx<'a, 'f, 'll, 'tcx, Cx>, dominators: Dominators, non_ssa_locals: BitSet, // The location of the first visited direct assignment to each @@ -60,8 +69,10 @@ struct LocalAnalyzer<'mir, 'a: 'mir, 'll: 'a, 'tcx: 'll> { first_assignment: IndexVec } -impl LocalAnalyzer<'mir, 'a, 'll, 'tcx> { - fn new(fx: &'mir FunctionCx<'a, 'll, 'tcx>) -> Self { +impl> LocalAnalyzer<'mir, 'a, 'f, 'll, 'tcx, Cx> + where &'a Cx : LayoutOf, TyLayout=TyLayout<'tcx>> + HasTyCtxt<'tcx> +{ + fn new(fx: &'mir FunctionCx<'a, 'f, 'll, 'tcx, Cx>) -> Self { let invalid_location = mir::BasicBlock::new(fx.mir.basic_blocks().len()).start_location(); let mut analyzer = LocalAnalyzer { @@ -102,7 +113,10 @@ impl LocalAnalyzer<'mir, 'a, 'll, 'tcx> { } } -impl Visitor<'tcx> for LocalAnalyzer<'mir, 'a, 'll, 'tcx> { +impl<'mir, 'a: 'mir, 'f: 'mir, 'll: 'a + 'f, 'tcx: 'll, Cx: 'a + CodegenMethods<'a, 'll, 'tcx>> + Visitor<'tcx> for LocalAnalyzer<'mir, 'a, 'f, 'll, 'tcx, Cx> where + &'a Cx : LayoutOf, TyLayout=TyLayout<'tcx>> + HasTyCtxt<'tcx> +{ fn visit_assign(&mut self, block: mir::BasicBlock, place: &mir::Place<'tcx>, @@ -137,7 +151,7 @@ impl Visitor<'tcx> for LocalAnalyzer<'mir, 'a, 'll, 'tcx> { _ => None, }; if let Some((def_id, args)) = check { - if Some(def_id) == self.fx.cx.tcx.lang_items().box_free_fn() { + if Some(def_id) == self.fx.cx.tcx().lang_items().box_free_fn() { // box_free(x) shares with `drop x` the property that it // is not guaranteed to be statically dominated by the // definition of x, so x must always be in an alloca. @@ -164,19 +178,19 @@ impl Visitor<'tcx> for LocalAnalyzer<'mir, 'a, 'll, 'tcx> { _ => false }; if is_consume { - let base_ty = proj.base.ty(self.fx.mir, cx.tcx); + let base_ty = proj.base.ty(self.fx.mir, *cx.tcx()); let base_ty = self.fx.monomorphize(&base_ty); // ZSTs don't require any actual memory access. - let elem_ty = base_ty.projection_ty(cx.tcx, &proj.elem).to_ty(cx.tcx); + let elem_ty = base_ty.projection_ty(*cx.tcx(), &proj.elem).to_ty(*cx.tcx()); let elem_ty = self.fx.monomorphize(&elem_ty); if cx.layout_of(elem_ty).is_zst() { return; } if let mir::ProjectionElem::Field(..) = proj.elem { - let layout = cx.layout_of(base_ty.to_ty(cx.tcx)); - if layout.is_llvm_immediate() || layout.is_llvm_scalar_pair() { + let layout = cx.layout_of(base_ty.to_ty(*cx.tcx())); + if cx.is_backend_immediate(&layout) || cx.is_backend_scalar_pair(&layout) { // Recurse with the same context, instead of `Projection`, // potentially stopping at non-operand projections, // which would trigger `not_ssa` on locals. @@ -234,8 +248,8 @@ impl Visitor<'tcx> for LocalAnalyzer<'mir, 'a, 'll, 'tcx> { } PlaceContext::Drop => { - let ty = mir::Place::Local(local).ty(self.fx.mir, self.fx.cx.tcx); - let ty = self.fx.monomorphize(&ty.to_ty(self.fx.cx.tcx)); + let ty = mir::Place::Local(local).ty(self.fx.mir, *self.fx.cx.tcx()); + let ty = self.fx.monomorphize(&ty.to_ty(*self.fx.cx.tcx())); // Only need the place if we're actually dropping it. if self.fx.cx.type_needs_drop(ty) { diff --git a/src/librustc_codegen_llvm/mir/block.rs b/src/librustc_codegen_ssa/mir/block.rs similarity index 71% rename from src/librustc_codegen_llvm/mir/block.rs rename to src/librustc_codegen_ssa/mir/block.rs index 68e30227185c0..5c4f0ed111ac8 100644 --- a/src/librustc_codegen_llvm/mir/block.rs +++ b/src/librustc_codegen_ssa/mir/block.rs @@ -8,23 +8,20 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use llvm::{self, BasicBlock}; use rustc::middle::lang_items; use rustc::ty::{self, Ty, TypeFoldable}; -use rustc::ty::layout::{self, LayoutOf}; +use rustc::ty::layout::{self, LayoutOf, HasTyCtxt, TyLayout}; use rustc::mir; use rustc::mir::interpret::EvalErrorKind; -use abi::{Abi, ArgType, ArgTypeExt, FnType, FnTypeExt, LlvmType, PassMode}; +use rustc_target::abi::call::{ArgType, FnType, PassMode}; +use rustc_target::spec::abi::Abi; use base; -use callee; -use builder::{Builder, MemFlags}; -use common::{self, C_bool, C_str_slice, C_struct, C_u32, C_uint_big, C_undef}; -use consts; +use MemFlags; +use common::{self, IntPredicate}; use meth; -use monomorphize; -use type_of::LayoutLlvmExt; -use type_::Type; -use value::Value; +use rustc_mir::monomorphize; + +use interfaces::*; use syntax::symbol::Symbol; use syntax_pos::Pos; @@ -34,9 +31,14 @@ use super::place::PlaceRef; use super::operand::OperandRef; use super::operand::OperandValue::{Pair, Ref, Immediate}; -impl FunctionCx<'a, 'll, 'tcx> { - pub fn codegen_block(&mut self, bb: mir::BasicBlock) { - let mut bx = self.build_block(bb); +impl<'a, 'f, 'll: 'a + 'f, 'tcx: 'll, Cx: 'a + CodegenMethods<'a, 'll, 'tcx>> + FunctionCx<'a, 'f, 'll, 'tcx, Cx> + where &'a Cx: LayoutOf, TyLayout=TyLayout<'tcx>> + HasTyCtxt<'tcx> +{ + pub fn codegen_block>(&mut self, bb: mir::BasicBlock) + where Bx: BuilderMethods<'a, 'll, 'tcx, CodegenCx=Cx> + { + let mut bx : Bx = self.build_block(bb); let data = &self.mir[bb]; debug!("codegen_block({:?}={:?})", bb, data); @@ -48,11 +50,12 @@ impl FunctionCx<'a, 'll, 'tcx> { self.codegen_terminator(bx, bb, data.terminator()); } - fn codegen_terminator(&mut self, - mut bx: Builder<'a, 'll, 'tcx>, - bb: mir::BasicBlock, - terminator: &mir::Terminator<'tcx>) - { + fn codegen_terminator>( + &mut self, + mut bx: Bx, + bb: mir::BasicBlock, + terminator: &mir::Terminator<'tcx> + ) where Bx: BuilderMethods<'a, 'll, 'tcx, CodegenCx = Cx> { debug!("codegen_terminator: {:?}", terminator); // Create the cleanup bundle, if needed. @@ -74,11 +77,11 @@ impl FunctionCx<'a, 'll, 'tcx> { => (lltarget, false), (None, Some(_)) => { // jump *into* cleanup - need a landing pad if GNU - (this.landing_pad_to(target), false) + (this.landing_pad_to::(target), false) } (Some(_), None) => span_bug!(span, "{:?} - jump out of cleanup?", terminator), (Some(_), Some(_)) => { - (this.landing_pad_to(target), true) + (this.landing_pad_to::(target), true) } } }; @@ -90,7 +93,7 @@ impl FunctionCx<'a, 'll, 'tcx> { debug!("llblock: creating cleanup trampoline for {:?}", target); let name = &format!("{:?}_cleanup_trampoline_{:?}", bb, target); - let trampoline = this.new_block(name); + let mut trampoline : Bx = this.new_block(name); trampoline.cleanup_ret(cleanup_pad.unwrap(), Some(lltarget)); trampoline.llbb() } else { @@ -98,57 +101,58 @@ impl FunctionCx<'a, 'll, 'tcx> { } }; - let funclet_br = |this: &mut Self, bx: Builder<'_, 'll, '_>, target: mir::BasicBlock| { - let (lltarget, is_cleanupret) = lltarget(this, target); - if is_cleanupret { - // micro-optimization: generate a `ret` rather than a jump - // to a trampoline. - bx.cleanup_ret(cleanup_pad.unwrap(), Some(lltarget)); - } else { - bx.br(lltarget); - } - }; + let funclet_br = + |this: &mut Self, bx: &mut Bx, target: mir::BasicBlock| { + let (lltarget, is_cleanupret) = lltarget(this, target); + if is_cleanupret { + // micro-optimization: generate a `ret` rather than a jump + // to a trampoline. + bx.cleanup_ret(cleanup_pad.unwrap(), Some(lltarget)); + } else { + bx.br(lltarget); + } + }; let do_call = | this: &mut Self, - bx: Builder<'a, 'll, 'tcx>, + bx: &mut Bx, fn_ty: FnType<'tcx, Ty<'tcx>>, - fn_ptr: &'ll Value, - llargs: &[&'ll Value], - destination: Option<(ReturnDest<'ll, 'tcx>, mir::BasicBlock)>, + fn_ptr: Cx::Value, + llargs: &[Cx::Value], + destination: Option<(ReturnDest<'tcx, Cx::Value>, mir::BasicBlock)>, cleanup: Option | { if let Some(cleanup) = cleanup { let ret_bx = if let Some((_, target)) = destination { this.blocks[target] } else { - this.unreachable_block() + this.unreachable_block::() }; let invokeret = bx.invoke(fn_ptr, &llargs, ret_bx, llblock(this, cleanup), cleanup_bundle); - fn_ty.apply_attrs_callsite(&bx, invokeret); + bx.apply_attrs_callsite(&fn_ty, invokeret); if let Some((ret_dest, target)) = destination { - let ret_bx = this.build_block(target); - this.set_debug_loc(&ret_bx, terminator.source_info); - this.store_return(&ret_bx, ret_dest, &fn_ty.ret, invokeret); + let mut ret_bx = this.build_block::(target); + this.set_debug_loc(&mut ret_bx, terminator.source_info); + this.store_return(&mut ret_bx, ret_dest, &fn_ty.ret, invokeret); } } else { let llret = bx.call(fn_ptr, &llargs, cleanup_bundle); - fn_ty.apply_attrs_callsite(&bx, llret); + bx.apply_attrs_callsite(&fn_ty, llret); if this.mir[bb].is_cleanup { // Cleanup is always the cold path. Don't inline // drop glue. Also, when there is a deeply-nested // struct, there are "symmetry" issues that cause // exponential inlining - see issue #41696. - llvm::Attribute::NoInline.apply_callsite(llvm::AttributePlace::Function, llret); + bx.do_not_inline(llret); } if let Some((ret_dest, target)) = destination { - this.store_return(&bx, ret_dest, &fn_ty.ret, llret); + this.store_return(bx, ret_dest, &fn_ty.ret, llret); funclet_br(this, bx, target); } else { bx.unreachable(); @@ -156,24 +160,26 @@ impl FunctionCx<'a, 'll, 'tcx> { } }; - self.set_debug_loc(&bx, terminator.source_info); + self.set_debug_loc(&mut bx, terminator.source_info); match terminator.kind { mir::TerminatorKind::Resume => { if let Some(cleanup_pad) = cleanup_pad { bx.cleanup_ret(cleanup_pad, None); } else { - let slot = self.get_personality_slot(&bx); - let lp0 = slot.project_field(&bx, 0).load(&bx).immediate(); - let lp1 = slot.project_field(&bx, 1).load(&bx).immediate(); - slot.storage_dead(&bx); - - if !bx.sess().target.target.options.custom_unwind_resume { - let mut lp = C_undef(self.landing_pad_type()); + let slot = self.get_personality_slot(&mut bx); + let pr0 = slot.project_field(&mut bx, 0); + let lp0 = bx.load_ref(&pr0).immediate(); + let pr1 = slot.project_field(&mut bx, 1); + let lp1 = bx.load_ref(&pr1).immediate(); + slot.storage_dead(&mut bx); + + if !bx.cx().sess().target.target.options.custom_unwind_resume { + let mut lp = bx.cx().const_undef(self.landing_pad_type()); lp = bx.insert_value(lp, lp0, 0); lp = bx.insert_value(lp, lp1, 1); bx.resume(lp); } else { - bx.call(bx.cx.eh_unwind_resume(), &[lp0], cleanup_bundle); + bx.call(bx.cx().eh_unwind_resume(), &[lp0], cleanup_bundle); bx.unreachable(); } } @@ -181,17 +187,17 @@ impl FunctionCx<'a, 'll, 'tcx> { mir::TerminatorKind::Abort => { // Call core::intrinsics::abort() - let fnname = bx.cx.get_intrinsic(&("llvm.trap")); + let fnname = bx.cx().get_intrinsic(&("llvm.trap")); bx.call(fnname, &[], None); bx.unreachable(); } mir::TerminatorKind::Goto { target } => { - funclet_br(self, bx, target); + funclet_br(self, &mut bx, target); } mir::TerminatorKind::SwitchInt { ref discr, switch_ty, ref values, ref targets } => { - let discr = self.codegen_operand(&bx, discr); + let discr = self.codegen_operand(&mut bx, discr); if targets.len() == 2 { // If there are two targets, emit br instead of switch let lltrue = llblock(self, targets[0]); @@ -205,9 +211,11 @@ impl FunctionCx<'a, 'll, 'tcx> { bx.cond_br(discr.immediate(), lltrue, llfalse); } } else { - let switch_llty = bx.cx.layout_of(switch_ty).immediate_llvm_type(bx.cx); - let llval = C_uint_big(switch_llty, values[0]); - let cmp = bx.icmp(llvm::IntEQ, discr.immediate(), llval); + let switch_llty = bx.cx().immediate_backend_type( + &bx.cx().layout_of(switch_ty) + ); + let llval = bx.cx().const_uint_big(switch_llty, values[0]); + let cmp = bx.icmp(IntPredicate::IntEQ, discr.immediate(), llval); bx.cond_br(cmp, lltrue, llfalse); } } else { @@ -215,9 +223,11 @@ impl FunctionCx<'a, 'll, 'tcx> { let switch = bx.switch(discr.immediate(), llblock(self, *otherwise), values.len()); - let switch_llty = bx.cx.layout_of(switch_ty).immediate_llvm_type(bx.cx); + let switch_llty = bx.cx().immediate_backend_type( + &bx.cx().layout_of(switch_ty) + ); for (&value, target) in values.iter().zip(targets) { - let llval = C_uint_big(switch_llty, value); + let llval = bx.cx().const_uint_big(switch_llty, value); let llbb = llblock(self, *target); bx.add_case(switch, llval, llbb) } @@ -232,11 +242,14 @@ impl FunctionCx<'a, 'll, 'tcx> { } PassMode::Direct(_) | PassMode::Pair(..) => { - let op = self.codegen_consume(&bx, &mir::Place::Local(mir::RETURN_PLACE)); + let op = self.codegen_consume( + &mut bx, + &mir::Place::Local(mir::RETURN_PLACE) + ); if let Ref(llval, _, align) = op.val { bx.load(llval, align) } else { - op.immediate_or_packed_pair(&bx) + op.immediate_or_packed_pair(&mut bx) } } @@ -254,8 +267,12 @@ impl FunctionCx<'a, 'll, 'tcx> { }; let llslot = match op.val { Immediate(_) | Pair(..) => { - let scratch = PlaceRef::alloca(&bx, self.fn_ty.ret.layout, "ret"); - op.val.store(&bx, scratch); + let scratch = PlaceRef::alloca( + &mut bx, + self.fn_ty.ret.layout, + "ret" + ); + op.val.store(&mut bx, scratch); scratch.llval } Ref(llval, _, align) => { @@ -264,9 +281,10 @@ impl FunctionCx<'a, 'll, 'tcx> { llval } }; - bx.load( - bx.pointercast(llslot, cast_ty.llvm_type(bx.cx).ptr_to()), - self.fn_ty.ret.layout.align) + let addr = bx.pointercast(llslot, bx.cx().type_ptr_to( + bx.cx().cast_backend_type(&cast_ty) + )); + bx.load(addr, self.fn_ty.ret.layout.align) } }; bx.ret(llval); @@ -279,15 +297,15 @@ impl FunctionCx<'a, 'll, 'tcx> { mir::TerminatorKind::Drop { ref location, target, unwind } => { let ty = location.ty(self.mir, bx.tcx()).to_ty(bx.tcx()); let ty = self.monomorphize(&ty); - let drop_fn = monomorphize::resolve_drop_in_place(bx.cx.tcx, ty); + let drop_fn = monomorphize::resolve_drop_in_place(*bx.cx().tcx(), ty); if let ty::InstanceDef::DropGlue(_, None) = drop_fn.def { // we don't actually need to drop anything. - funclet_br(self, bx, target); + funclet_br(self, &mut bx, target); return } - let place = self.codegen_place(&bx, location); + let place = self.codegen_place(&mut bx, location); let (args1, args2); let mut args = if let Some(llextra) = place.llextra { args2 = [place.llval, llextra]; @@ -298,30 +316,30 @@ impl FunctionCx<'a, 'll, 'tcx> { }; let (drop_fn, fn_ty) = match ty.sty { ty::Dynamic(..) => { - let fn_ty = drop_fn.ty(bx.cx.tcx); - let sig = common::ty_fn_sig(bx.cx, fn_ty); + let fn_ty = drop_fn.ty(*bx.cx().tcx()); + let sig = common::ty_fn_sig(bx.cx(), fn_ty); let sig = bx.tcx().normalize_erasing_late_bound_regions( ty::ParamEnv::reveal_all(), &sig, ); - let fn_ty = FnType::new_vtable(bx.cx, sig, &[]); + let fn_ty = bx.cx().new_vtable(sig, &[]); let vtable = args[1]; args = &args[..1]; - (meth::DESTRUCTOR.get_fn(&bx, vtable, &fn_ty), fn_ty) + (meth::DESTRUCTOR.get_fn(&mut bx, vtable, &fn_ty), fn_ty) } _ => { - (callee::get_fn(bx.cx, drop_fn), - FnType::of_instance(bx.cx, &drop_fn)) + (bx.cx().get_fn(drop_fn), + bx.cx().fn_type_of_instance(&drop_fn)) } }; - do_call(self, bx, fn_ty, drop_fn, args, + do_call(self, &mut bx, fn_ty, drop_fn, args, Some((ReturnDest::Nothing, target)), unwind); } mir::TerminatorKind::Assert { ref cond, expected, ref msg, target, cleanup } => { - let cond = self.codegen_operand(&bx, cond).immediate(); - let mut const_cond = common::const_to_opt_u128(cond, false).map(|c| c == 1); + let cond = self.codegen_operand(&mut bx, cond).immediate(); + let mut const_cond = bx.cx().const_to_opt_u128(cond, false).map(|c| c == 1); // This case can currently arise only from functions marked // with #[rustc_inherit_overflow_checks] and inlined from @@ -330,7 +348,7 @@ impl FunctionCx<'a, 'll, 'tcx> { // NOTE: Unlike binops, negation doesn't have its own // checked operation, just a comparison with the minimum // value, so we have to check for the assert message. - if !bx.cx.check_overflow { + if !bx.cx().check_overflow() { if let mir::interpret::EvalErrorKind::OverflowNeg = *msg { const_cond = Some(expected); } @@ -338,17 +356,17 @@ impl FunctionCx<'a, 'll, 'tcx> { // Don't codegen the panic block if success if known. if const_cond == Some(expected) { - funclet_br(self, bx, target); + funclet_br(self, &mut bx, target); return; } // Pass the condition through llvm.expect for branch hinting. - let expect = bx.cx.get_intrinsic(&"llvm.expect.i1"); - let cond = bx.call(expect, &[cond, C_bool(bx.cx, expected)], None); + let expect = bx.cx().get_intrinsic(&"llvm.expect.i1"); + let cond = bx.call(expect, &[cond, bx.cx().const_bool(expected)], None); // Create the failure block and the conditional branch to it. let lltarget = llblock(self, target); - let panic_block = self.new_block("panic"); + let panic_block : Bx = self.new_block("panic"); if expected { bx.cond_br(cond, lltarget, panic_block.llbb()); } else { @@ -357,14 +375,14 @@ impl FunctionCx<'a, 'll, 'tcx> { // After this point, bx is the block for the call to panic. bx = panic_block; - self.set_debug_loc(&bx, terminator.source_info); + self.set_debug_loc(&mut bx, terminator.source_info); // Get the location information. - let loc = bx.sess().source_map().lookup_char_pos(span.lo()); + let loc = bx.cx().sess().source_map().lookup_char_pos(span.lo()); let filename = Symbol::intern(&loc.file.name.to_string()).as_str(); - let filename = C_str_slice(bx.cx, filename); - let line = C_u32(bx.cx, loc.line as u32); - let col = C_u32(bx.cx, loc.col.to_usize() as u32 + 1); + let filename = bx.cx().const_str_slice(filename); + let line = bx.cx().const_u32(loc.line as u32); + let col = bx.cx().const_u32(loc.col.to_usize() as u32 + 1); let align = tcx.data_layout.aggregate_align .max(tcx.data_layout.i32_align) .max(tcx.data_layout.pointer_align); @@ -375,25 +393,28 @@ impl FunctionCx<'a, 'll, 'tcx> { let len = self.codegen_operand(&mut bx, len).immediate(); let index = self.codegen_operand(&mut bx, index).immediate(); - let file_line_col = C_struct(bx.cx, &[filename, line, col], false); - let file_line_col = consts::addr_of(bx.cx, - file_line_col, - align, - Some("panic_bounds_check_loc")); + let file_line_col = bx.cx().const_struct(&[filename, line, col], false); + let file_line_col = bx.cx().static_addr_of( + file_line_col, + align, + Some("panic_bounds_check_loc") + ); (lang_items::PanicBoundsCheckFnLangItem, vec![file_line_col, index, len]) } _ => { let str = msg.description(); let msg_str = Symbol::intern(str).as_str(); - let msg_str = C_str_slice(bx.cx, msg_str); - let msg_file_line_col = C_struct(bx.cx, - &[msg_str, filename, line, col], - false); - let msg_file_line_col = consts::addr_of(bx.cx, - msg_file_line_col, - align, - Some("panic_loc")); + let msg_str = bx.cx().const_str_slice(msg_str); + let msg_file_line_col = bx.cx().const_struct( + &[msg_str, filename, line, col], + false + ); + let msg_file_line_col = bx.cx().static_addr_of( + msg_file_line_col, + align, + Some("panic_loc") + ); (lang_items::PanicFnLangItem, vec![msg_file_line_col]) } @@ -402,11 +423,11 @@ impl FunctionCx<'a, 'll, 'tcx> { // Obtain the panic entry point. let def_id = common::langcall(bx.tcx(), Some(span), "", lang_item); let instance = ty::Instance::mono(bx.tcx(), def_id); - let fn_ty = FnType::of_instance(bx.cx, &instance); - let llfn = callee::get_fn(bx.cx, instance); + let fn_ty = bx.cx().fn_type_of_instance(&instance); + let llfn = bx.cx().get_fn(instance); // Codegen the actual panic invoke/call. - do_call(self, bx, fn_ty, llfn, &args, None, cleanup); + do_call(self, &mut bx, fn_ty, llfn, &args, None, cleanup); } mir::TerminatorKind::DropAndReplace { .. } => { @@ -421,11 +442,11 @@ impl FunctionCx<'a, 'll, 'tcx> { from_hir_call: _ } => { // Create the callee. This is a fn ptr or zero-sized and hence a kind of scalar. - let callee = self.codegen_operand(&bx, func); + let callee = self.codegen_operand(&mut bx, func); let (instance, mut llfn) = match callee.layout.ty.sty { ty::FnDef(def_id, substs) => { - (Some(ty::Instance::resolve(bx.cx.tcx, + (Some(ty::Instance::resolve(*bx.cx().tcx(), ty::ParamEnv::reveal_all(), def_id, substs).unwrap()), @@ -455,8 +476,8 @@ impl FunctionCx<'a, 'll, 'tcx> { if intrinsic == Some("transmute") { if let Some(destination_ref) = destination.as_ref() { let &(ref dest, target) = destination_ref; - self.codegen_transmute(&bx, &args[0], dest); - funclet_br(self, bx, target); + self.codegen_transmute(&mut bx, &args[0], dest); + funclet_br(self, &mut bx, target); } else { // If we are trying to transmute to an uninhabited type, // it is likely there is no allotted destination. In fact, @@ -464,7 +485,7 @@ impl FunctionCx<'a, 'll, 'tcx> { // we can do what we like. Here, we declare that transmuting // into an uninhabited type is impossible, so anything following // it must be unreachable. - assert_eq!(bx.cx.layout_of(sig.output()).abi, layout::Abi::Uninhabited); + assert_eq!(bx.cx().layout_of(sig.output()).abi, layout::Abi::Uninhabited); bx.unreachable(); } return; @@ -478,26 +499,26 @@ impl FunctionCx<'a, 'll, 'tcx> { let fn_ty = match def { Some(ty::InstanceDef::Virtual(..)) => { - FnType::new_vtable(bx.cx, sig, &extra_args) + bx.cx().new_vtable(sig, &extra_args) } Some(ty::InstanceDef::DropGlue(_, None)) => { // empty drop glue - a nop. let &(_, target) = destination.as_ref().unwrap(); - funclet_br(self, bx, target); + funclet_br(self, &mut bx, target); return; } - _ => FnType::new(bx.cx, sig, &extra_args) + _ => bx.cx().new_fn_type(sig, &extra_args) }; // emit a panic instead of instantiating an uninhabited type if (intrinsic == Some("init") || intrinsic == Some("uninit")) && fn_ty.ret.layout.abi.is_uninhabited() { - let loc = bx.sess().source_map().lookup_char_pos(span.lo()); + let loc = bx.cx().sess().source_map().lookup_char_pos(span.lo()); let filename = Symbol::intern(&loc.file.name.to_string()).as_str(); - let filename = C_str_slice(bx.cx, filename); - let line = C_u32(bx.cx, loc.line as u32); - let col = C_u32(bx.cx, loc.col.to_usize() as u32 + 1); + let filename = bx.cx().const_str_slice(filename); + let line = bx.cx().const_u32(loc.line as u32); + let col = bx.cx().const_u32(loc.col.to_usize() as u32 + 1); let align = tcx.data_layout.aggregate_align .max(tcx.data_layout.i32_align) .max(tcx.data_layout.pointer_align); @@ -508,26 +529,28 @@ impl FunctionCx<'a, 'll, 'tcx> { if intrinsic == Some("init") { "zeroed" } else { "uninitialized" } ); let msg_str = Symbol::intern(&str).as_str(); - let msg_str = C_str_slice(bx.cx, msg_str); - let msg_file_line_col = C_struct(bx.cx, - &[msg_str, filename, line, col], - false); - let msg_file_line_col = consts::addr_of(bx.cx, - msg_file_line_col, - align, - Some("panic_loc")); + let msg_str = bx.cx().const_str_slice(msg_str); + let msg_file_line_col = bx.cx().const_struct( + &[msg_str, filename, line, col], + false + ); + let msg_file_line_col = bx.cx().static_addr_of( + msg_file_line_col, + align, + Some("panic_loc") + ); // Obtain the panic entry point. let def_id = common::langcall(bx.tcx(), Some(span), "", lang_items::PanicFnLangItem); let instance = ty::Instance::mono(bx.tcx(), def_id); - let fn_ty = FnType::of_instance(bx.cx, &instance); - let llfn = callee::get_fn(bx.cx, instance); + let fn_ty = bx.cx().fn_type_of_instance(&instance); + let llfn = bx.cx().get_fn(instance); // Codegen the actual panic invoke/call. do_call( self, - bx, + &mut bx, fn_ty, llfn, &[msg_file_line_col], @@ -544,19 +567,17 @@ impl FunctionCx<'a, 'll, 'tcx> { // Prepare the return value destination let ret_dest = if let Some((ref dest, _)) = *destination { let is_intrinsic = intrinsic.is_some(); - self.make_return_dest(&bx, dest, &fn_ty.ret, &mut llargs, + self.make_return_dest(&mut bx, dest, &fn_ty.ret, &mut llargs, is_intrinsic) } else { ReturnDest::Nothing }; if intrinsic.is_some() && intrinsic != Some("drop_in_place") { - use intrinsic::codegen_intrinsic_call; - let dest = match ret_dest { _ if fn_ty.ret.is_indirect() => llargs[0], ReturnDest::Nothing => { - C_undef(fn_ty.ret.memory_ty(bx.cx).ptr_to()) + bx.cx().const_undef(bx.cx().type_ptr_to(bx.memory_ty(&fn_ty.ret))) } ReturnDest::IndirectOperand(dst, _) | ReturnDest::Store(dst) => dst.llval, @@ -590,7 +611,7 @@ impl FunctionCx<'a, 'll, 'tcx> { ); return OperandRef { val: Immediate(llval), - layout: bx.cx.layout_of(ty), + layout: bx.cx().layout_of(ty), }; }, @@ -608,26 +629,26 @@ impl FunctionCx<'a, 'll, 'tcx> { ); return OperandRef { val: Immediate(llval), - layout: bx.cx.layout_of(ty) + layout: bx.cx().layout_of(ty) }; } } } - self.codegen_operand(&bx, arg) + self.codegen_operand(&mut bx, arg) }).collect(); - let callee_ty = instance.as_ref().unwrap().ty(bx.cx.tcx); - codegen_intrinsic_call(&bx, callee_ty, &fn_ty, &args, dest, - terminator.source_info.span); + let callee_ty = instance.as_ref().unwrap().ty(*bx.cx().tcx()); + bx.codegen_intrinsic_call(callee_ty, &fn_ty, &args, dest, + terminator.source_info.span); if let ReturnDest::IndirectOperand(dst, _) = ret_dest { - self.store_return(&bx, ret_dest, &fn_ty.ret, dst.llval); + self.store_return(&mut bx, ret_dest, &fn_ty.ret, dst.llval); } if let Some((_, target)) = *destination { - funclet_br(self, bx, target); + funclet_br(self, &mut bx, target); } else { bx.unreachable(); } @@ -644,11 +665,11 @@ impl FunctionCx<'a, 'll, 'tcx> { }; for (i, arg) in first_args.iter().enumerate() { - let mut op = self.codegen_operand(&bx, arg); + let mut op = self.codegen_operand(&mut bx, arg); if let (0, Some(ty::InstanceDef::Virtual(_, idx))) = (i, def) { if let Pair(data_ptr, meta) = op.val { llfn = Some(meth::VirtualIndex::from_index(idx) - .get_fn(&bx, meta, &fn_ty)); + .get_fn(&mut bx, meta, &fn_ty)); llargs.push(data_ptr); continue; } @@ -659,27 +680,27 @@ impl FunctionCx<'a, 'll, 'tcx> { match (arg, op.val) { (&mir::Operand::Copy(_), Ref(_, None, _)) | (&mir::Operand::Constant(_), Ref(_, None, _)) => { - let tmp = PlaceRef::alloca(&bx, op.layout, "const"); - op.val.store(&bx, tmp); + let tmp = PlaceRef::alloca(&mut bx, op.layout, "const"); + op.val.store(&mut bx, tmp); op.val = Ref(tmp.llval, None, tmp.align); } _ => {} } - self.codegen_argument(&bx, op, &mut llargs, &fn_ty.args[i]); + self.codegen_argument(&mut bx, op, &mut llargs, &fn_ty.args[i]); } if let Some(tup) = untuple { - self.codegen_arguments_untupled(&bx, tup, &mut llargs, + self.codegen_arguments_untupled(&mut bx, tup, &mut llargs, &fn_ty.args[first_args.len()..]) } let fn_ptr = match (llfn, instance) { (Some(llfn), _) => llfn, - (None, Some(instance)) => callee::get_fn(bx.cx, instance), + (None, Some(instance)) => bx.cx().get_fn(instance), _ => span_bug!(span, "no llfn for call"), }; - do_call(self, bx, fn_ty, fn_ptr, &llargs, + do_call(self, &mut bx, fn_ty, fn_ptr, &llargs, destination.as_ref().map(|&(_, target)| (ret_dest, target)), cleanup); } @@ -690,14 +711,16 @@ impl FunctionCx<'a, 'll, 'tcx> { } } - fn codegen_argument(&mut self, - bx: &Builder<'a, 'll, 'tcx>, - op: OperandRef<'ll, 'tcx>, - llargs: &mut Vec<&'ll Value>, - arg: &ArgType<'tcx, Ty<'tcx>>) { + fn codegen_argument>( + &mut self, + bx: &mut Bx, + op: OperandRef<'tcx, Cx::Value>, + llargs: &mut Vec, + arg: &ArgType<'tcx, Ty<'tcx>> + ) { // Fill padding with undef value, where applicable. if let Some(ty) = arg.pad { - llargs.push(C_undef(ty.llvm_type(bx.cx))); + llargs.push(bx.cx().const_undef(bx.cx().reg_backend_type(&ty))) } if arg.is_ignore() { @@ -756,8 +779,10 @@ impl FunctionCx<'a, 'll, 'tcx> { if by_ref && !arg.is_indirect() { // Have to load the argument, maybe while casting it. if let PassMode::Cast(ty) = arg.mode { - llval = bx.load(bx.pointercast(llval, ty.llvm_type(bx.cx).ptr_to()), - align.min(arg.layout.align)); + let addr = bx.pointercast(llval, bx.cx().type_ptr_to( + bx.cx().cast_backend_type(&ty)) + ); + llval = bx.load(addr, align.min(arg.layout.align)); } else { // We can't use `PlaceRef::load` here because the argument // may have a type we don't treat as immediate, but the ABI @@ -778,11 +803,13 @@ impl FunctionCx<'a, 'll, 'tcx> { llargs.push(llval); } - fn codegen_arguments_untupled(&mut self, - bx: &Builder<'a, 'll, 'tcx>, - operand: &mir::Operand<'tcx>, - llargs: &mut Vec<&'ll Value>, - args: &[ArgType<'tcx, Ty<'tcx>>]) { + fn codegen_arguments_untupled>( + &mut self, + bx: &mut Bx, + operand: &mir::Operand<'tcx>, + llargs: &mut Vec, + args: &[ArgType<'tcx, Ty<'tcx>>] + ) { let tuple = self.codegen_operand(bx, operand); // Handle both by-ref and immediate tuples. @@ -790,7 +817,8 @@ impl FunctionCx<'a, 'll, 'tcx> { let tuple_ptr = PlaceRef::new_sized(llval, tuple.layout, align); for i in 0..tuple.layout.fields.count() { let field_ptr = tuple_ptr.project_field(bx, i); - self.codegen_argument(bx, field_ptr.load(bx), llargs, &args[i]); + let load_ref = bx.load_ref(&field_ptr); + self.codegen_argument(bx, load_ref , llargs, &args[i]); } } else if let Ref(_, Some(_), _) = tuple.val { bug!("closure arguments must be sized") @@ -803,14 +831,17 @@ impl FunctionCx<'a, 'll, 'tcx> { } } - fn get_personality_slot(&mut self, bx: &Builder<'a, 'll, 'tcx>) -> PlaceRef<'ll, 'tcx> { - let cx = bx.cx; + fn get_personality_slot>( + &mut self, + bx: &mut Bx + ) -> PlaceRef<'tcx, Cx::Value> where Bx : BuilderMethods<'a, 'll, 'tcx, CodegenCx=Cx> { + let cx = bx.cx(); if let Some(slot) = self.personality_slot { slot } else { - let layout = cx.layout_of(cx.tcx.intern_tup(&[ - cx.tcx.mk_mut_ptr(cx.tcx.types.u8), - cx.tcx.types.i32 + let layout = cx.layout_of(cx.tcx().intern_tup(&[ + cx.tcx().mk_mut_ptr(cx.tcx().types.u8), + cx.tcx().types.i32 ])); let slot = PlaceRef::alloca(bx, layout, "personalityslot"); self.personality_slot = Some(slot); @@ -821,65 +852,79 @@ impl FunctionCx<'a, 'll, 'tcx> { /// Return the landingpad wrapper around the given basic block /// /// No-op in MSVC SEH scheme. - fn landing_pad_to(&mut self, target_bb: mir::BasicBlock) -> &'ll BasicBlock { + fn landing_pad_to>( + &mut self, + target_bb: mir::BasicBlock + ) -> Cx::BasicBlock { if let Some(block) = self.landing_pads[target_bb] { return block; } let block = self.blocks[target_bb]; - let landing_pad = self.landing_pad_uncached(block); + let landing_pad = self.landing_pad_uncached::(block); self.landing_pads[target_bb] = Some(landing_pad); landing_pad } - fn landing_pad_uncached(&mut self, target_bb: &'ll BasicBlock) -> &'ll BasicBlock { + fn landing_pad_uncached>( + &mut self, + target_bb: Cx::BasicBlock + ) -> Cx::BasicBlock { if base::wants_msvc_seh(self.cx.sess()) { span_bug!(self.mir.span, "landing pad was not inserted?") } - let bx = self.new_block("cleanup"); + let mut bx : Bx = self.new_block("cleanup"); let llpersonality = self.cx.eh_personality(); let llretty = self.landing_pad_type(); let lp = bx.landing_pad(llretty, llpersonality, 1); bx.set_cleanup(lp); - let slot = self.get_personality_slot(&bx); - slot.storage_live(&bx); - Pair(bx.extract_value(lp, 0), bx.extract_value(lp, 1)).store(&bx, slot); + let slot = self.get_personality_slot(&mut bx); + slot.storage_live(&mut bx); + Pair(bx.extract_value(lp, 0), bx.extract_value(lp, 1)).store(&mut bx, slot); bx.br(target_bb); bx.llbb() } - fn landing_pad_type(&self) -> &'ll Type { + fn landing_pad_type(&self) -> Cx::Type { let cx = self.cx; - Type::struct_(cx, &[Type::i8p(cx), Type::i32(cx)], false) + cx.type_struct(&[cx.type_i8p(), cx.type_i32()], false) } - fn unreachable_block(&mut self) -> &'ll BasicBlock { + fn unreachable_block>( + &mut self + ) -> Cx::BasicBlock { self.unreachable_block.unwrap_or_else(|| { - let bl = self.new_block("unreachable"); + let mut bl : Bx = self.new_block("unreachable"); bl.unreachable(); self.unreachable_block = Some(bl.llbb()); bl.llbb() }) } - pub fn new_block(&self, name: &str) -> Builder<'a, 'll, 'tcx> { - Builder::new_block(self.cx, self.llfn, name) + pub fn new_block>(&self, name: &str) -> Bx { + Bx::new_block(self.cx, self.llfn, name) } - pub fn build_block(&self, bb: mir::BasicBlock) -> Builder<'a, 'll, 'tcx> { - let bx = Builder::with_cx(self.cx); + pub fn build_block>( + &self, + bb: mir::BasicBlock + ) -> Bx { + let mut bx = Bx::with_cx(self.cx); bx.position_at_end(self.blocks[bb]); bx } - fn make_return_dest(&mut self, bx: &Builder<'a, 'll, 'tcx>, - dest: &mir::Place<'tcx>, fn_ret: &ArgType<'tcx, Ty<'tcx>>, - llargs: &mut Vec<&'ll Value>, is_intrinsic: bool) - -> ReturnDest<'ll, 'tcx> { + fn make_return_dest>( + &mut self, + bx: &mut Bx, + dest: &mir::Place<'tcx>, + fn_ret: &ArgType<'tcx, Ty<'tcx>>, + llargs: &mut Vec, is_intrinsic: bool + ) -> ReturnDest<'tcx, Cx::Value> { // If the return is ignored, we can just return a do-nothing ReturnDest if fn_ret.is_ignore() { return ReturnDest::Nothing; @@ -933,20 +978,23 @@ impl FunctionCx<'a, 'll, 'tcx> { } } - fn codegen_transmute(&mut self, bx: &Builder<'a, 'll, 'tcx>, - src: &mir::Operand<'tcx>, - dst: &mir::Place<'tcx>) { + fn codegen_transmute>( + &mut self, + bx: &mut Bx, + src: &mir::Operand<'tcx>, + dst: &mir::Place<'tcx> + ) { if let mir::Place::Local(index) = *dst { match self.locals[index] { LocalRef::Place(place) => self.codegen_transmute_into(bx, src, place), LocalRef::UnsizedPlace(_) => bug!("transmute must not involve unsized locals"), LocalRef::Operand(None) => { - let dst_layout = bx.cx.layout_of(self.monomorphized_place_ty(dst)); + let dst_layout = bx.cx().layout_of(self.monomorphized_place_ty(dst)); assert!(!dst_layout.ty.has_erasable_regions()); let place = PlaceRef::alloca(bx, dst_layout, "transmute_temp"); place.storage_live(bx); self.codegen_transmute_into(bx, src, place); - let op = place.load(bx); + let op = bx.load_ref(&place); place.storage_dead(bx); self.locals[index] = LocalRef::Operand(Some(op)); } @@ -961,30 +1009,35 @@ impl FunctionCx<'a, 'll, 'tcx> { } } - fn codegen_transmute_into(&mut self, bx: &Builder<'a, 'll, 'tcx>, - src: &mir::Operand<'tcx>, - dst: PlaceRef<'ll, 'tcx>) { + fn codegen_transmute_into>( + &mut self, + bx: &mut Bx, + src: &mir::Operand<'tcx>, + dst: PlaceRef<'tcx, Cx::Value> + ) { let src = self.codegen_operand(bx, src); - let llty = src.layout.llvm_type(bx.cx); - let cast_ptr = bx.pointercast(dst.llval, llty.ptr_to()); + let llty = bx.cx().backend_type(&src.layout); + let cast_ptr = bx.pointercast(dst.llval, bx.cx().type_ptr_to(llty)); let align = src.layout.align.min(dst.layout.align); src.val.store(bx, PlaceRef::new_sized(cast_ptr, src.layout, align)); } // Stores the return value of a function call into it's final location. - fn store_return(&mut self, - bx: &Builder<'a, 'll, 'tcx>, - dest: ReturnDest<'ll, 'tcx>, - ret_ty: &ArgType<'tcx, Ty<'tcx>>, - llval: &'ll Value) { + fn store_return>( + &mut self, + bx: &mut Bx, + dest: ReturnDest<'tcx, Cx::Value>, + ret_ty: &ArgType<'tcx, Ty<'tcx>>, + llval: Cx::Value + ) { use self::ReturnDest::*; match dest { Nothing => (), - Store(dst) => ret_ty.store(bx, llval, dst), + Store(dst) => bx.store_arg_ty(&ret_ty, llval, dst), IndirectOperand(tmp, index) => { - let op = tmp.load(bx); + let op = bx.load_ref(&tmp); tmp.storage_dead(bx); self.locals[index] = LocalRef::Operand(Some(op)); } @@ -993,8 +1046,8 @@ impl FunctionCx<'a, 'll, 'tcx> { let op = if let PassMode::Cast(_) = ret_ty.mode { let tmp = PlaceRef::alloca(bx, ret_ty.layout, "tmp_ret"); tmp.storage_live(bx); - ret_ty.store(bx, llval, tmp); - let op = tmp.load(bx); + bx.store_arg_ty(&ret_ty, llval, tmp); + let op = bx.load_ref(&tmp); tmp.storage_dead(bx); op } else { @@ -1006,13 +1059,13 @@ impl FunctionCx<'a, 'll, 'tcx> { } } -enum ReturnDest<'ll, 'tcx> { +enum ReturnDest<'tcx, V> { // Do nothing, the return value is indirect or ignored Nothing, // Store the return value to the pointer - Store(PlaceRef<'ll, 'tcx>), + Store(PlaceRef<'tcx, V>), // Stores an indirect return value to an operand local place - IndirectOperand(PlaceRef<'ll, 'tcx>, mir::Local), + IndirectOperand(PlaceRef<'tcx, V>, mir::Local), // Stores a direct return value to an operand local place DirectOperand(mir::Local) } diff --git a/src/librustc_codegen_ssa/mir/constant.rs b/src/librustc_codegen_ssa/mir/constant.rs new file mode 100644 index 0000000000000..98fdc53ef0400 --- /dev/null +++ b/src/librustc_codegen_ssa/mir/constant.rs @@ -0,0 +1,109 @@ +// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +use rustc::mir::interpret::ConstEvalErr; +use rustc_mir::const_eval::const_field; +use rustc::mir; +use rustc_data_structures::indexed_vec::Idx; +use rustc_data_structures::sync::Lrc; +use rustc::mir::interpret::{GlobalId, ConstValue}; +use rustc::ty::{self, Ty}; +use rustc::ty::layout::{self, LayoutOf, TyLayout, HasTyCtxt}; +use syntax::source_map::Span; +use interfaces::*; + +use super::FunctionCx; + +impl<'a, 'f, 'll: 'a + 'f, 'tcx: 'll, Cx: 'a + CodegenMethods<'a, 'll, 'tcx>> + FunctionCx<'a, 'f, 'll, 'tcx, Cx> where + &'a Cx: LayoutOf, TyLayout=TyLayout<'tcx>> + HasTyCtxt<'tcx> +{ + fn fully_evaluate>( + &mut self, + bx: &Bx, + constant: &'tcx ty::Const<'tcx>, + ) -> Result<&'tcx ty::Const<'tcx>, Lrc>> { + match constant.val { + ConstValue::Unevaluated(def_id, ref substs) => { + let tcx = bx.tcx(); + let param_env = ty::ParamEnv::reveal_all(); + let instance = ty::Instance::resolve(tcx, param_env, def_id, substs).unwrap(); + let cid = GlobalId { + instance, + promoted: None, + }; + tcx.const_eval(param_env.and(cid)) + }, + _ => Ok(constant), + } + } + + pub fn eval_mir_constant>( + &mut self, + bx: &Bx, + constant: &mir::Constant<'tcx>, + ) -> Result<&'tcx ty::Const<'tcx>, Lrc>> { + let c = self.monomorphize(&constant.literal); + self.fully_evaluate(bx, c) + } + + /// process constant containing SIMD shuffle indices + pub fn simd_shuffle_indices>( + &mut self, + bx: &Bx, + span: Span, + ty: Ty<'tcx>, + constant: Result<&'tcx ty::Const<'tcx>, Lrc>>, + ) -> (Cx::Value, Ty<'tcx>) { + constant + .and_then(|c| { + let field_ty = c.ty.builtin_index().unwrap(); + let fields = match c.ty.sty { + ty::Array(_, n) => n.unwrap_usize(bx.tcx()), + ref other => bug!("invalid simd shuffle type: {}", other), + }; + let values: Result, Lrc<_>> = (0..fields).map(|field| { + let field = const_field( + bx.tcx(), + ty::ParamEnv::reveal_all(), + self.instance, + None, + mir::Field::new(field as usize), + c, + )?; + if let Some(prim) = field.val.try_to_scalar() { + let layout = bx.cx().layout_of(field_ty); + let scalar = match layout.abi { + layout::Abi::Scalar(ref x) => x, + _ => bug!("from_const: invalid ByVal layout: {:#?}", layout) + }; + Ok(bx.cx().scalar_to_backend( + prim, scalar, + bx.cx().immediate_backend_type(&layout), + )) + } else { + bug!("simd shuffle field {:?}", field) + } + }).collect(); + let llval = bx.cx().const_struct(&values?, false); + Ok((llval, c.ty)) + }) + .unwrap_or_else(|e| { + e.report_as_error( + bx.tcx().at(span), + "could not evaluate shuffle_indices at compile time", + ); + // We've errored, so we don't have to produce working code. + let ty = self.monomorphize(&ty); + let llty = bx.cx().backend_type(&bx.cx().layout_of(ty)); + (bx.cx().const_undef(llty), ty) + }) + } +} diff --git a/src/librustc_codegen_llvm/mir/mod.rs b/src/librustc_codegen_ssa/mir/mod.rs similarity index 75% rename from src/librustc_codegen_llvm/mir/mod.rs rename to src/librustc_codegen_ssa/mir/mod.rs index a6e2ccf92e4e3..fcca370e0a31a 100644 --- a/src/librustc_codegen_llvm/mir/mod.rs +++ b/src/librustc_codegen_ssa/mir/mod.rs @@ -8,23 +8,18 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use common::{C_i32, C_null}; use libc::c_uint; -use llvm::{self, BasicBlock}; -use llvm::debuginfo::DIScope; use rustc::ty::{self, Ty, TypeFoldable, UpvarSubsts}; -use rustc::ty::layout::{LayoutOf, TyLayout}; +use rustc::ty::layout::{LayoutOf, TyLayout, HasTyCtxt}; use rustc::mir::{self, Mir}; use rustc::ty::subst::Substs; use rustc::session::config::DebugInfo; +use common::Funclet; +use debuginfo::{self, VariableAccess, VariableKind, FunctionDebugContext}; +use rustc_mir::monomorphize::Instance; +use rustc_target::abi::call::{FnType, PassMode}; +use interfaces::*; use base; -use builder::Builder; -use common::{CodegenCx, Funclet}; -use debuginfo::{self, declare_local, VariableAccess, VariableKind, FunctionDebugContext}; -use monomorphize::Instance; -use abi::{ArgTypeExt, FnType, FnTypeExt, PassMode}; -use type_::Type; -use value::Value; use syntax_pos::{DUMMY_SP, NO_EXPANSION, BytePos, Span}; use syntax::symbol::keywords; @@ -34,8 +29,6 @@ use std::iter; use rustc_data_structures::bit_set::BitSet; use rustc_data_structures::indexed_vec::IndexVec; -pub use self::constant::codegen_static_initializer; - use self::analyze::CleanupKind; use self::place::PlaceRef; use rustc::mir::traversal; @@ -43,16 +36,18 @@ use rustc::mir::traversal; use self::operand::{OperandRef, OperandValue}; /// Master context for codegenning from MIR. -pub struct FunctionCx<'a, 'll: 'a, 'tcx: 'll> { +pub struct FunctionCx<'a, 'f, 'll: 'a + 'f, 'tcx: 'll, Cx: 'a + CodegenMethods<'a, 'll, 'tcx>> + where &'a Cx : LayoutOf, TyLayout = TyLayout<'tcx>> + HasTyCtxt<'tcx> +{ instance: Instance<'tcx>, mir: &'a mir::Mir<'tcx>, - debug_context: FunctionDebugContext<'ll>, + debug_context: FunctionDebugContext<>::DIScope>, - llfn: &'ll Value, + llfn: Cx::Value, - cx: &'a CodegenCx<'ll, 'tcx>, + cx: &'a Cx, fn_ty: FnType<'tcx, Ty<'tcx>>, @@ -63,24 +58,25 @@ pub struct FunctionCx<'a, 'll: 'a, 'tcx: 'll> { /// don't really care about it very much. Anyway, this value /// contains an alloca into which the personality is stored and /// then later loaded when generating the DIVERGE_BLOCK. - personality_slot: Option>, + personality_slot: Option>, /// A `Block` for each MIR `BasicBlock` - blocks: IndexVec, + blocks: IndexVec, /// The funclet status of each basic block cleanup_kinds: IndexVec, /// When targeting MSVC, this stores the cleanup info for each funclet - /// BB. This is initialized as we compute the funclets' head block in RPO. - funclets: &'a IndexVec>>, + /// BB. Thisrustup component add rustfmt-preview is initialized as we compute the funclets' + /// head block in RPO. + funclets: &'f IndexVec>>, /// This stores the landing-pad block for a given BB, computed lazily on GNU /// and eagerly on MSVC. - landing_pads: IndexVec>, + landing_pads: IndexVec>, /// Cached unreachable block - unreachable_block: Option<&'ll BasicBlock>, + unreachable_block: Option, /// The location where each MIR arg/var/tmp/ret is stored. This is /// usually an `PlaceRef` representing an alloca, but not always: @@ -97,32 +93,46 @@ pub struct FunctionCx<'a, 'll: 'a, 'tcx: 'll> { /// /// Avoiding allocs can also be important for certain intrinsics, /// notably `expect`. - locals: IndexVec>, + locals: IndexVec>, /// Debug information for MIR scopes. - scopes: IndexVec>, + scopes: IndexVec>, /// If this function is being monomorphized, this contains the type substitutions used. param_substs: &'tcx Substs<'tcx>, } -impl FunctionCx<'a, 'll, 'tcx> { +impl<'a, 'f, 'll: 'a + 'f, 'tcx: 'll, Cx: 'a + CodegenMethods<'a, 'll, 'tcx>> + FunctionCx<'a, 'f, 'll, 'tcx, Cx> + where &'a Cx : LayoutOf, TyLayout = TyLayout<'tcx>> + HasTyCtxt<'tcx> +{ pub fn monomorphize(&self, value: &T) -> T where T: TypeFoldable<'tcx> { - self.cx.tcx.subst_and_normalize_erasing_regions( + self.cx.tcx().subst_and_normalize_erasing_regions( self.param_substs, ty::ParamEnv::reveal_all(), value, ) } +} - pub fn set_debug_loc(&mut self, bx: &Builder<'_, 'll, '_>, source_info: mir::SourceInfo) { +impl<'a, 'f, 'll: 'a + 'f, 'tcx: 'll, Cx: 'a + CodegenMethods<'a, 'll, 'tcx>> + FunctionCx<'a, 'f, 'll, 'tcx, Cx> + where &'a Cx : LayoutOf, TyLayout = TyLayout<'tcx>> + HasTyCtxt<'tcx> +{ + pub fn set_debug_loc>( + &mut self, + bx: &mut Bx, + source_info: mir::SourceInfo + ) where Bx::CodegenCx : DebugInfoMethods<'ll, 'tcx, DIScope = Cx::DIScope>, + &'a Bx::CodegenCx : LayoutOf, TyLayout = TyLayout<'tcx>> + HasTyCtxt<'tcx> + { let (scope, span) = self.debug_loc(source_info); - debuginfo::set_source_location(&self.debug_context, bx, scope, span); + bx.set_source_location(&self.debug_context, scope, span); } - pub fn debug_loc(&mut self, source_info: mir::SourceInfo) -> (Option<&'ll DIScope>, Span) { + pub fn debug_loc(&mut self, source_info: mir::SourceInfo) -> (Option, Span) { // Bail out if debug info emission is not enabled. match self.debug_context { FunctionDebugContext::DebugInfoDisabled | @@ -162,34 +172,40 @@ impl FunctionCx<'a, 'll, 'tcx> { // corresponding to span's containing source scope. If so, we need to create a DIScope // "extension" into that file. fn scope_metadata_for_loc(&self, scope_id: mir::SourceScope, pos: BytePos) - -> Option<&'ll DIScope> { + -> Option { let scope_metadata = self.scopes[scope_id].scope_metadata; if pos < self.scopes[scope_id].file_start_pos || pos >= self.scopes[scope_id].file_end_pos { let cm = self.cx.sess().source_map(); let defining_crate = self.debug_context.get_ref(DUMMY_SP).defining_crate; - Some(debuginfo::extend_scope_to_file(self.cx, - scope_metadata.unwrap(), - &cm.lookup_char_pos(pos).file, - defining_crate)) + Some(self.cx.extend_scope_to_file( + scope_metadata.unwrap(), + &cm.lookup_char_pos(pos).file, + defining_crate + )) } else { scope_metadata } } } -enum LocalRef<'ll, 'tcx> { - Place(PlaceRef<'ll, 'tcx>), +enum LocalRef<'tcx, V> { + Place(PlaceRef<'tcx, V>), /// `UnsizedPlace(p)`: `p` itself is a thin pointer (indirect place). /// `*p` is the fat pointer that references the actual unsized place. /// Every time it is initialized, we have to reallocate the place /// and update the fat pointer. That's the reason why it is indirect. - UnsizedPlace(PlaceRef<'ll, 'tcx>), - Operand(Option>), + UnsizedPlace(PlaceRef<'tcx, V>), + Operand(Option>), } -impl LocalRef<'ll, 'tcx> { - fn new_operand(cx: &CodegenCx<'ll, 'tcx>, layout: TyLayout<'tcx>) -> LocalRef<'ll, 'tcx> { +impl<'a, 'll: 'a, 'tcx: 'll, V : 'll + CodegenObject> LocalRef<'tcx, V> { + fn new_operand>( + cx: &'a Cx, + layout: TyLayout<'tcx> + ) -> LocalRef<'tcx, V> where Cx: Backend<'ll, Value=V>, + &'a Cx : LayoutOf, TyLayout = TyLayout<'tcx>> + HasTyCtxt<'tcx> + { if layout.is_zst() { // Zero-size temporaries aren't always initialized, which // doesn't matter because they don't contain data, but @@ -203,18 +219,18 @@ impl LocalRef<'ll, 'tcx> { /////////////////////////////////////////////////////////////////////////// -pub fn codegen_mir( - cx: &'a CodegenCx<'ll, 'tcx>, - llfn: &'ll Value, +pub fn codegen_mir<'a, 'll: 'a, 'tcx: 'll, Bx: BuilderMethods<'a, 'll, 'tcx>>( + cx: &'a Bx::CodegenCx, + llfn: >::Value, mir: &'a Mir<'tcx>, instance: Instance<'tcx>, sig: ty::FnSig<'tcx>, -) { - let fn_ty = FnType::new(cx, sig, &[]); +) where &'a Bx::CodegenCx : LayoutOf, TyLayout=TyLayout<'tcx>> + HasTyCtxt<'tcx> { + let fn_ty = cx.new_fn_type(sig, &[]); debug!("fn_ty: {:?}", fn_ty); let debug_context = - debuginfo::create_function_debug_context(cx, instance, sig, llfn, mir); - let bx = Builder::new_block(cx, llfn, "start"); + cx.create_function_debug_context(instance, sig, llfn, mir); + let mut bx = Bx::new_block(cx, llfn, "start"); if mir.basic_blocks().iter().any(|bb| bb.is_cleanup) { bx.set_personality_fn(cx.eh_personality()); @@ -224,7 +240,7 @@ pub fn codegen_mir( // Allocate a `Block` for every basic block, except // the start block, if nothing loops back to it. let reentrant_start_block = !mir.predecessors_for(mir::START_BLOCK).is_empty(); - let block_bxs: IndexVec = + let block_bxs: IndexVec>::BasicBlock> = mir.basic_blocks().indices().map(|bb| { if bb == mir::START_BLOCK && !reentrant_start_block { bx.llbb() @@ -234,8 +250,8 @@ pub fn codegen_mir( }).collect(); // Compute debuginfo scopes from MIR scopes. - let scopes = debuginfo::create_mir_scopes(cx, mir, &debug_context); - let (landing_pads, funclets) = create_funclets(mir, &bx, &cleanup_kinds, &block_bxs); + let scopes = cx.create_mir_scopes(mir, &debug_context); + let (landing_pads, funclets) = create_funclets(mir, &mut bx, &cleanup_kinds, &block_bxs); let mut fx = FunctionCx { instance, @@ -262,37 +278,38 @@ pub fn codegen_mir( // Allocate variable and temp allocas fx.locals = { - let args = arg_local_refs(&bx, &fx, &fx.scopes, &memory_locals); + let args = arg_local_refs(&mut bx, &fx, &fx.scopes, &memory_locals); let mut allocate_local = |local| { let decl = &mir.local_decls[local]; - let layout = bx.cx.layout_of(fx.monomorphize(&decl.ty)); + let layout = bx.cx().layout_of(fx.monomorphize(&decl.ty)); assert!(!layout.ty.has_erasable_regions()); if let Some(name) = decl.name { // User variable let debug_scope = fx.scopes[decl.visibility_scope]; - let dbg = debug_scope.is_valid() && bx.sess().opts.debuginfo == DebugInfo::Full; + let dbg = + debug_scope.is_valid() && bx.cx().sess().opts.debuginfo == DebugInfo::Full; if !memory_locals.contains(local) && !dbg { debug!("alloc: {:?} ({}) -> operand", local, name); - return LocalRef::new_operand(bx.cx, layout); + return LocalRef::new_operand(bx.cx(), layout); } debug!("alloc: {:?} ({}) -> place", local, name); if layout.is_unsized() { let indirect_place = - PlaceRef::alloca_unsized_indirect(&bx, layout, &name.as_str()); + PlaceRef::alloca_unsized_indirect(&mut bx, layout, &name.as_str()); // FIXME: add an appropriate debuginfo LocalRef::UnsizedPlace(indirect_place) } else { - let place = PlaceRef::alloca(&bx, layout, &name.as_str()); + let place = PlaceRef::alloca(&mut bx, layout, &name.as_str()); if dbg { let (scope, span) = fx.debug_loc(mir::SourceInfo { span: decl.source_info.span, scope: decl.visibility_scope, }); - declare_local(&bx, &fx.debug_context, name, layout.ty, scope.unwrap(), + bx.declare_local(&fx.debug_context, name, layout.ty, scope.unwrap(), VariableAccess::DirectVariable { alloca: place.llval }, VariableKind::LocalVariable, span); } @@ -302,23 +319,27 @@ pub fn codegen_mir( // Temporary or return place if local == mir::RETURN_PLACE && fx.fn_ty.ret.is_indirect() { debug!("alloc: {:?} (return place) -> place", local); - let llretptr = llvm::get_param(llfn, 0); + let llretptr = fx.cx.get_param(llfn, 0); LocalRef::Place(PlaceRef::new_sized(llretptr, layout, layout.align)) } else if memory_locals.contains(local) { debug!("alloc: {:?} -> place", local); if layout.is_unsized() { let indirect_place = - PlaceRef::alloca_unsized_indirect(&bx, layout, &format!("{:?}", local)); + PlaceRef::alloca_unsized_indirect( + &mut bx, + layout, + &format!("{:?}", local) + ); LocalRef::UnsizedPlace(indirect_place) } else { - LocalRef::Place(PlaceRef::alloca(&bx, layout, &format!("{:?}", local))) + LocalRef::Place(PlaceRef::alloca(&mut bx, layout, &format!("{:?}", local))) } } else { // If this is an immediate local, we do not create an // alloca in advance. Instead we wait until we see the // definition and update the operand there. debug!("alloc: {:?} -> operand", local); - LocalRef::new_operand(bx.cx, layout) + LocalRef::new_operand(bx.cx(), layout) } } }; @@ -346,7 +367,7 @@ pub fn codegen_mir( // Codegen the body of each block using reverse postorder for (bb, _) in rpo { visited.insert(bb.index()); - fx.codegen_block(bb); + fx.codegen_block::(bb); } // Remove blocks that haven't been visited, or have no @@ -355,24 +376,23 @@ pub fn codegen_mir( // Unreachable block if !visited.contains(bb.index()) { debug!("codegen_mir: block {:?} was not visited", bb); - unsafe { - llvm::LLVMDeleteBasicBlock(fx.blocks[bb]); - } + bx.delete_basic_block(fx.blocks[bb]); } } } -fn create_funclets( +fn create_funclets<'a, 'll: 'a, 'tcx: 'll, Bx: BuilderMethods<'a, 'll, 'tcx>>( mir: &'a Mir<'tcx>, - bx: &Builder<'a, 'll, 'tcx>, + bx: &mut Bx, cleanup_kinds: &IndexVec, - block_bxs: &IndexVec) - -> (IndexVec>, - IndexVec>>) + block_bxs: &IndexVec>::BasicBlock>) + -> (IndexVec>::BasicBlock>>, + IndexVec>::Value>>>) + where &'a Bx::CodegenCx : LayoutOf, TyLayout = TyLayout<'tcx>> + HasTyCtxt<'tcx> { block_bxs.iter_enumerated().zip(cleanup_kinds).map(|((bb, &llbb), cleanup_kind)| { match *cleanup_kind { - CleanupKind::Funclet if base::wants_msvc_seh(bx.sess()) => {} + CleanupKind::Funclet if base::wants_msvc_seh(bx.cx().sess()) => {} _ => return (None, None) } @@ -400,8 +420,8 @@ fn create_funclets( // bar(); // } Some(&mir::TerminatorKind::Abort) => { - let cs_bx = bx.build_sibling_block(&format!("cs_funclet{:?}", bb)); - let cp_bx = bx.build_sibling_block(&format!("cp_funclet{:?}", bb)); + let mut cs_bx = bx.build_sibling_block(&format!("cs_funclet{:?}", bb)); + let mut cp_bx = bx.build_sibling_block(&format!("cp_funclet{:?}", bb)); ret_llbb = cs_bx.llbb(); let cs = cs_bx.catch_switch(None, None, 1); @@ -411,13 +431,13 @@ fn create_funclets( // C++ personality function, but `catch (...)` has no type so // it's null. The 64 here is actually a bitfield which // represents that this is a catch-all block. - let null = C_null(Type::i8p(bx.cx)); - let sixty_four = C_i32(bx.cx, 64); + let null = bx.cx().const_null(bx.cx().type_i8p()); + let sixty_four = bx.cx().const_i32(64); cleanup = cp_bx.catch_pad(cs, &[null, sixty_four, null]); cp_bx.br(llbb); } _ => { - let cleanup_bx = bx.build_sibling_block(&format!("funclet_{:?}", bb)); + let mut cleanup_bx = bx.build_sibling_block(&format!("funclet_{:?}", bb)); ret_llbb = cleanup_bx.llbb(); cleanup = cleanup_bx.cleanup_pad(None, &[]); cleanup_bx.br(llbb); @@ -431,12 +451,17 @@ fn create_funclets( /// Produce, for each argument, a `Value` pointing at the /// argument's value. As arguments are places, these are always /// indirect. -fn arg_local_refs( - bx: &Builder<'a, 'll, 'tcx>, - fx: &FunctionCx<'a, 'll, 'tcx>, - scopes: &IndexVec>, +fn arg_local_refs<'a, 'f, 'll: 'a + 'f, 'tcx: 'll, Bx: BuilderMethods<'a, 'll, 'tcx>>( + bx: &mut Bx, + fx: &FunctionCx<'a, 'f, 'll, 'tcx, Bx::CodegenCx>, + scopes: &IndexVec< + mir::SourceScope, + debuginfo::MirDebugScope<>::DIScope> + >, memory_locals: &BitSet, -) -> Vec> { +) -> Vec>::Value>> + where &'a Bx::CodegenCx : LayoutOf, TyLayout=TyLayout<'tcx>> + HasTyCtxt<'tcx> +{ let mir = fx.mir; let tcx = bx.tcx(); let mut idx = 0; @@ -444,7 +469,7 @@ fn arg_local_refs( // Get the argument scope, if it exists and if we need it. let arg_scope = scopes[mir::OUTERMOST_SOURCE_SCOPE]; - let arg_scope = if bx.sess().opts.debuginfo == DebugInfo::Full { + let arg_scope = if bx.cx().sess().opts.debuginfo == DebugInfo::Full { arg_scope.scope_metadata } else { None @@ -471,14 +496,15 @@ fn arg_local_refs( _ => bug!("spread argument isn't a tuple?!") }; - let place = PlaceRef::alloca(bx, bx.cx.layout_of(arg_ty), &name); + let place = PlaceRef::alloca(bx, bx.cx().layout_of(arg_ty), &name); for i in 0..tupled_arg_tys.len() { let arg = &fx.fn_ty.args[idx]; idx += 1; if arg.pad.is_some() { llarg_idx += 1; } - arg.store_fn_arg(bx, &mut llarg_idx, place.project_field(bx, i)); + let pr_field = place.project_field(bx, i); + bx.store_fn_arg(arg, &mut llarg_idx, pr_field); } // Now that we have one alloca that contains the aggregate value, @@ -487,8 +513,7 @@ fn arg_local_refs( let variable_access = VariableAccess::DirectVariable { alloca: place.llval }; - declare_local( - bx, + bx.declare_local( &fx.debug_context, arg_decl.name.unwrap_or(keywords::Invalid.name()), arg_ty, scope, @@ -514,21 +539,21 @@ fn arg_local_refs( let local = |op| LocalRef::Operand(Some(op)); match arg.mode { PassMode::Ignore => { - return local(OperandRef::new_zst(bx.cx, arg.layout)); + return local(OperandRef::new_zst(bx.cx(), arg.layout)); } PassMode::Direct(_) => { - let llarg = llvm::get_param(bx.llfn(), llarg_idx as c_uint); + let llarg = bx.cx().get_param(bx.llfn(), llarg_idx as c_uint); bx.set_value_name(llarg, &name); llarg_idx += 1; return local( OperandRef::from_immediate_or_packed_pair(bx, llarg, arg.layout)); } PassMode::Pair(..) => { - let a = llvm::get_param(bx.llfn(), llarg_idx as c_uint); + let a = bx.cx().get_param(bx.llfn(), llarg_idx as c_uint); bx.set_value_name(a, &(name.clone() + ".0")); llarg_idx += 1; - let b = llvm::get_param(bx.llfn(), llarg_idx as c_uint); + let b = bx.cx().get_param(bx.llfn(), llarg_idx as c_uint); bx.set_value_name(b, &(name + ".1")); llarg_idx += 1; @@ -545,25 +570,25 @@ fn arg_local_refs( // Don't copy an indirect argument to an alloca, the caller // already put it in a temporary alloca and gave it up. // FIXME: lifetimes - let llarg = llvm::get_param(bx.llfn(), llarg_idx as c_uint); + let llarg = bx.cx().get_param(bx.llfn(), llarg_idx as c_uint); bx.set_value_name(llarg, &name); llarg_idx += 1; PlaceRef::new_sized(llarg, arg.layout, arg.layout.align) } else if arg.is_unsized_indirect() { // As the storage for the indirect argument lives during // the whole function call, we just copy the fat pointer. - let llarg = llvm::get_param(bx.llfn(), llarg_idx as c_uint); + let llarg = bx.cx().get_param(bx.llfn(), llarg_idx as c_uint); llarg_idx += 1; - let llextra = llvm::get_param(bx.llfn(), llarg_idx as c_uint); + let llextra = bx.cx().get_param(bx.llfn(), llarg_idx as c_uint); llarg_idx += 1; let indirect_operand = OperandValue::Pair(llarg, llextra); let tmp = PlaceRef::alloca_unsized_indirect(bx, arg.layout, &name); - indirect_operand.store(&bx, tmp); + indirect_operand.store(bx, tmp); tmp } else { let tmp = PlaceRef::alloca(bx, arg.layout, &name); - arg.store_fn_arg(bx, &mut llarg_idx, tmp); + bx.store_fn_arg(arg, &mut llarg_idx, tmp); tmp }; arg_scope.map(|scope| { @@ -577,8 +602,7 @@ fn arg_local_refs( alloca: place.llval }; - declare_local( - bx, + bx.declare_local( &fx.debug_context, arg_decl.name.unwrap_or(keywords::Invalid.name()), arg.layout.ty, @@ -593,7 +617,7 @@ fn arg_local_refs( // Or is it the closure environment? let (closure_layout, env_ref) = match arg.layout.ty.sty { ty::RawPtr(ty::TypeAndMut { ty, .. }) | - ty::Ref(_, ty, _) => (bx.cx.layout_of(ty), true), + ty::Ref(_, ty, _) => (bx.cx().layout_of(ty), true), _ => (arg.layout, false) }; @@ -612,10 +636,10 @@ fn arg_local_refs( // doesn't actually strip the offset when splitting the closure // environment into its components so it ends up out of bounds. // (cuviper) It seems to be fine without the alloca on LLVM 6 and later. - let env_alloca = !env_ref && unsafe { llvm::LLVMRustVersionMajor() < 6 }; + let env_alloca = !env_ref && bx.cx().env_alloca_allowed(); let env_ptr = if env_alloca { let scratch = PlaceRef::alloca(bx, - bx.cx.layout_of(tcx.mk_mut_ptr(arg.layout.ty)), + bx.cx().layout_of(tcx.mk_mut_ptr(arg.layout.ty)), "__debuginfo_env_ptr"); bx.store(place.llval, scratch.llval, scratch.align); scratch.llval @@ -626,12 +650,7 @@ fn arg_local_refs( for (i, (decl, ty)) in mir.upvar_decls.iter().zip(upvar_tys).enumerate() { let byte_offset_of_var_in_env = closure_layout.fields.offset(i).bytes(); - let ops = unsafe { - [llvm::LLVMRustDIBuilderCreateOpDeref(), - llvm::LLVMRustDIBuilderCreateOpPlusUconst(), - byte_offset_of_var_in_env as i64, - llvm::LLVMRustDIBuilderCreateOpDeref()] - }; + let ops = bx.cx().debuginfo_upvar_decls_ops_sequence(byte_offset_of_var_in_env); // The environment and the capture can each be indirect. @@ -650,8 +669,7 @@ fn arg_local_refs( alloca: env_ptr, address_operations: &ops }; - declare_local( - bx, + bx.declare_local( &fx.debug_context, decl.debug_name, ty, @@ -672,7 +690,7 @@ fn arg_local_refs( mod analyze; mod block; -mod constant; +pub mod constant; pub mod place; pub mod operand; mod rvalue; diff --git a/src/librustc_codegen_llvm/mir/operand.rs b/src/librustc_codegen_ssa/mir/operand.rs similarity index 59% rename from src/librustc_codegen_llvm/mir/operand.rs rename to src/librustc_codegen_ssa/mir/operand.rs index ab43531240f3f..ab37e149933e2 100644 --- a/src/librustc_codegen_llvm/mir/operand.rs +++ b/src/librustc_codegen_ssa/mir/operand.rs @@ -10,38 +10,35 @@ use rustc::mir::interpret::{ConstValue, ConstEvalErr}; use rustc::mir; -use rustc::ty; -use rustc::ty::layout::{self, Align, LayoutOf, TyLayout}; +use rustc::ty::{self, Ty}; +use rustc::ty::layout::{self, Align, LayoutOf, TyLayout, HasTyCtxt}; use rustc_data_structures::sync::Lrc; use base; -use common::{CodegenCx, C_undef, C_usize}; -use builder::{Builder, MemFlags}; -use value::Value; -use type_of::LayoutLlvmExt; -use type_::Type; +use MemFlags; use glue; +use interfaces::*; + use std::fmt; use super::{FunctionCx, LocalRef}; -use super::constant::scalar_to_llvm; use super::place::PlaceRef; /// The representation of a Rust value. The enum variant is in fact /// uniquely determined by the value's type, but is kept as a /// safety check. #[derive(Copy, Clone, Debug)] -pub enum OperandValue<'ll> { +pub enum OperandValue { /// A reference to the actual operand. The data is guaranteed /// to be valid for the operand's lifetime. /// The second value, if any, is the extra data (vtable or length) /// which indicates that it refers to an unsized rvalue. - Ref(&'ll Value, Option<&'ll Value>, Align), + Ref(V, Option, Align), /// A single LLVM value. - Immediate(&'ll Value), + Immediate(V), /// A pair of immediate LLVM values. Used by fat pointers too. - Pair(&'ll Value, &'ll Value) + Pair(V, V) } /// An `OperandRef` is an "SSA" reference to a Rust value, along with @@ -53,37 +50,45 @@ pub enum OperandValue<'ll> { /// directly is sure to cause problems -- use `OperandRef::store` /// instead. #[derive(Copy, Clone)] -pub struct OperandRef<'ll, 'tcx> { +pub struct OperandRef<'tcx, V> { // The value. - pub val: OperandValue<'ll>, + pub val: OperandValue, // The layout of value, based on its Rust type. pub layout: TyLayout<'tcx>, } -impl fmt::Debug for OperandRef<'ll, 'tcx> { +impl fmt::Debug for OperandRef<'tcx, V> { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "OperandRef({:?} @ {:?})", self.val, self.layout) } } -impl OperandRef<'ll, 'tcx> { - pub fn new_zst(cx: &CodegenCx<'ll, 'tcx>, - layout: TyLayout<'tcx>) -> OperandRef<'ll, 'tcx> { +impl<'a, 'll: 'a, 'tcx: 'll, V : 'll + CodegenObject> OperandRef<'tcx, V> { + pub fn new_zst>( + cx: &'a Cx, + layout: TyLayout<'tcx> + ) -> OperandRef<'tcx, V> where Cx : Backend<'ll, Value = V>, + &'a Cx : LayoutOf, TyLayout = TyLayout<'tcx>> + HasTyCtxt<'tcx> + { assert!(layout.is_zst()); OperandRef { - val: OperandValue::Immediate(C_undef(layout.immediate_llvm_type(cx))), + val: OperandValue::Immediate(cx.const_undef(cx.immediate_backend_type(&layout))), layout } } - pub fn from_const(bx: &Builder<'a, 'll, 'tcx>, - val: &'tcx ty::Const<'tcx>) - -> Result, Lrc>> { - let layout = bx.cx.layout_of(val.ty); + pub fn from_const>( + bx: &mut Bx, + val: &'tcx ty::Const<'tcx> + ) -> Result, Lrc>> where + Bx::CodegenCx : Backend<'ll, Value = V>, + &'a Bx::CodegenCx: LayoutOf, TyLayout = TyLayout<'tcx>> + HasTyCtxt<'tcx> + { + let layout = bx.cx().layout_of(val.ty); if layout.is_zst() { - return Ok(OperandRef::new_zst(bx.cx, layout)); + return Ok(OperandRef::new_zst(bx.cx(), layout)); } let val = match val.val { @@ -93,11 +98,10 @@ impl OperandRef<'ll, 'tcx> { layout::Abi::Scalar(ref x) => x, _ => bug!("from_const: invalid ByVal layout: {:#?}", layout) }; - let llval = scalar_to_llvm( - bx.cx, + let llval = bx.cx().scalar_to_backend( x, scalar, - layout.immediate_llvm_type(bx.cx), + bx.cx().immediate_backend_type(&layout), ); OperandValue::Immediate(llval) }, @@ -106,15 +110,13 @@ impl OperandRef<'ll, 'tcx> { layout::Abi::ScalarPair(ref a, ref b) => (a, b), _ => bug!("from_const: invalid ScalarPair layout: {:#?}", layout) }; - let a_llval = scalar_to_llvm( - bx.cx, + let a_llval = bx.cx().scalar_to_backend( a, a_scalar, - layout.scalar_pair_element_llvm_type(bx.cx, 0, true), + bx.cx().scalar_pair_element_backend_type(&layout, 0, true), ); - let b_layout = layout.scalar_pair_element_llvm_type(bx.cx, 1, true); - let b_llval = scalar_to_llvm( - bx.cx, + let b_layout = bx.cx().scalar_pair_element_backend_type(&layout, 1, true); + let b_llval = bx.cx().scalar_to_backend( b, b_scalar, b_layout, @@ -122,7 +124,7 @@ impl OperandRef<'ll, 'tcx> { OperandValue::Pair(a_llval, b_llval) }, ConstValue::ByRef(_, alloc, offset) => { - return Ok(PlaceRef::from_const_alloc(bx, layout, alloc, offset).load(bx)); + return Ok(bx.load_ref(&bx.cx().from_const_alloc(layout, alloc, offset))); }, }; @@ -131,17 +133,25 @@ impl OperandRef<'ll, 'tcx> { layout }) } +} +impl<'a, 'll: 'a, 'tcx: 'll, V : 'll + CodegenObject> OperandRef<'tcx, V> { /// Asserts that this operand refers to a scalar and returns /// a reference to its value. - pub fn immediate(self) -> &'ll Value { + pub fn immediate(self) -> V { match self.val { OperandValue::Immediate(s) => s, _ => bug!("not immediate: {:?}", self) } } - pub fn deref(self, cx: &CodegenCx<'ll, 'tcx>) -> PlaceRef<'ll, 'tcx> { + pub fn deref>( + self, + cx: &'a Cx + ) -> PlaceRef<'tcx, V> where + Cx: Backend<'ll, Value=V>, + &'a Cx: LayoutOf, TyLayout = TyLayout<'tcx>> + HasTyCtxt<'tcx> + { let projected_ty = self.layout.ty.builtin_deref(true) .unwrap_or_else(|| bug!("deref of non-pointer {:?}", self)).ty; let (llptr, llextra) = match self.val { @@ -160,15 +170,22 @@ impl OperandRef<'ll, 'tcx> { /// If this operand is a `Pair`, we return an aggregate with the two values. /// For other cases, see `immediate`. - pub fn immediate_or_packed_pair(self, bx: &Builder<'a, 'll, 'tcx>) -> &'ll Value { + pub fn immediate_or_packed_pair>( + self, + bx: &mut Bx + ) -> V where Bx::CodegenCx : Backend<'ll, Value=V>, + &'a Bx::CodegenCx : LayoutOf, TyLayout = TyLayout<'tcx>> + HasTyCtxt<'tcx> + { if let OperandValue::Pair(a, b) = self.val { - let llty = self.layout.llvm_type(bx.cx); + let llty = bx.cx().backend_type(&self.layout); debug!("Operand::immediate_or_packed_pair: packing {:?} into {:?}", self, llty); // Reconstruct the immediate aggregate. - let mut llpair = C_undef(llty); - llpair = bx.insert_value(llpair, base::from_immediate(bx, a), 0); - llpair = bx.insert_value(llpair, base::from_immediate(bx, b), 1); + let mut llpair = bx.cx().const_undef(llty); + let imm_a = base::from_immediate(bx, a); + let imm_b = base::from_immediate(bx, b); + llpair = bx.insert_value(llpair, imm_a, 0); + llpair = bx.insert_value(llpair, imm_b, 1); llpair } else { self.immediate() @@ -176,17 +193,23 @@ impl OperandRef<'ll, 'tcx> { } /// If the type is a pair, we return a `Pair`, otherwise, an `Immediate`. - pub fn from_immediate_or_packed_pair(bx: &Builder<'a, 'll, 'tcx>, - llval: &'ll Value, - layout: TyLayout<'tcx>) - -> OperandRef<'ll, 'tcx> { + pub fn from_immediate_or_packed_pair>( + bx: &mut Bx, + llval: >::Value, + layout: TyLayout<'tcx> + ) -> OperandRef<'tcx, >::Value> + where Bx::CodegenCx : Backend<'ll, Value=V>, + &'a Bx::CodegenCx : LayoutOf, TyLayout = TyLayout<'tcx>> + HasTyCtxt<'tcx> + { let val = if let layout::Abi::ScalarPair(ref a, ref b) = layout.abi { debug!("Operand::from_immediate_or_packed_pair: unpacking {:?} @ {:?}", llval, layout); // Deconstruct the immediate aggregate. - let a_llval = base::to_immediate_scalar(bx, bx.extract_value(llval, 0), a); - let b_llval = base::to_immediate_scalar(bx, bx.extract_value(llval, 1), b); + let a_llval = bx.extract_value(llval, 0); + let a_llval = base::to_immediate_scalar(bx, a_llval, a); + let b_llval = bx.extract_value(llval, 1); + let b_llval = base::to_immediate_scalar(bx, b_llval, b); OperandValue::Pair(a_llval, b_llval) } else { OperandValue::Immediate(llval) @@ -194,14 +217,20 @@ impl OperandRef<'ll, 'tcx> { OperandRef { val, layout } } - pub fn extract_field(&self, bx: &Builder<'a, 'll, 'tcx>, i: usize) -> OperandRef<'ll, 'tcx> { - let field = self.layout.field(bx.cx, i); + pub fn extract_field>( + &self, bx: &mut Bx, + i: usize + ) -> OperandRef<'tcx, >::Value> where + Bx::CodegenCx : Backend<'ll, Value=V>, + &'a Bx::CodegenCx: LayoutOf, TyLayout = TyLayout<'tcx>> + HasTyCtxt<'tcx> + { + let field = self.layout.field(bx.cx(), i); let offset = self.layout.fields.offset(i); let mut val = match (self.val, &self.layout.abi) { // If the field is ZST, it has no data. _ if field.is_zst() => { - return OperandRef::new_zst(bx.cx, field); + return OperandRef::new_zst(bx.cx(), field); } // Newtype of a scalar, scalar pair or vector. @@ -214,12 +243,12 @@ impl OperandRef<'ll, 'tcx> { // Extract a scalar component from a pair. (OperandValue::Pair(a_llval, b_llval), &layout::Abi::ScalarPair(ref a, ref b)) => { if offset.bytes() == 0 { - assert_eq!(field.size, a.value.size(bx.cx)); + assert_eq!(field.size, a.value.size(bx.cx())); OperandValue::Immediate(a_llval) } else { - assert_eq!(offset, a.value.size(bx.cx) - .abi_align(b.value.align(bx.cx))); - assert_eq!(field.size, b.value.size(bx.cx)); + assert_eq!(offset, a.value.size(bx.cx()) + .abi_align(b.value.align(bx.cx()))); + assert_eq!(field.size, b.value.size(bx.cx())); OperandValue::Immediate(b_llval) } } @@ -227,7 +256,7 @@ impl OperandRef<'ll, 'tcx> { // `#[repr(simd)]` types are also immediate. (OperandValue::Immediate(llval), &layout::Abi::Vector { .. }) => { OperandValue::Immediate( - bx.extract_element(llval, C_usize(bx.cx, i as u64))) + bx.extract_element(llval, bx.cx().const_usize(i as u64))) } _ => bug!("OperandRef::extract_field({:?}): not applicable", self) @@ -236,11 +265,11 @@ impl OperandRef<'ll, 'tcx> { // HACK(eddyb) have to bitcast pointers until LLVM removes pointee types. match val { OperandValue::Immediate(ref mut llval) => { - *llval = bx.bitcast(*llval, field.immediate_llvm_type(bx.cx)); + *llval = bx.bitcast(*llval, bx.cx().immediate_backend_type(&field)); } OperandValue::Pair(ref mut a, ref mut b) => { - *a = bx.bitcast(*a, field.scalar_pair_element_llvm_type(bx.cx, 0, true)); - *b = bx.bitcast(*b, field.scalar_pair_element_llvm_type(bx.cx, 1, true)); + *a = bx.bitcast(*a, bx.cx().scalar_pair_element_backend_type(&field, 0, true)); + *b = bx.bitcast(*b, bx.cx().scalar_pair_element_backend_type(&field, 1, true)); } OperandValue::Ref(..) => bug!() } @@ -252,29 +281,55 @@ impl OperandRef<'ll, 'tcx> { } } -impl OperandValue<'ll> { - pub fn store(self, bx: &Builder<'a, 'll, 'tcx>, dest: PlaceRef<'ll, 'tcx>) { +impl<'a, 'll: 'a, 'tcx: 'll, V : 'll + CodegenObject> OperandValue { + pub fn store>( + self, + bx: &mut Bx, + dest: PlaceRef<'tcx, >::Value> + ) where Bx::CodegenCx : Backend<'ll, Value = V>, + &'a Bx::CodegenCx : LayoutOf, TyLayout = TyLayout<'tcx>> + HasTyCtxt<'tcx> + { self.store_with_flags(bx, dest, MemFlags::empty()); } - pub fn volatile_store(self, bx: &Builder<'a, 'll, 'tcx>, dest: PlaceRef<'ll, 'tcx>) { + pub fn volatile_store>( + self, + bx: &mut Bx, + dest: PlaceRef<'tcx, >::Value> + ) where Bx::CodegenCx : Backend<'ll, Value = V>, + &'a Bx::CodegenCx : LayoutOf, TyLayout = TyLayout<'tcx>> + HasTyCtxt<'tcx> + { self.store_with_flags(bx, dest, MemFlags::VOLATILE); } - pub fn unaligned_volatile_store(self, bx: &Builder<'a, 'll, 'tcx>, dest: PlaceRef<'ll, 'tcx>) { + pub fn unaligned_volatile_store>( + self, + bx: &mut Bx, + dest: PlaceRef<'tcx, >::Value> + ) where Bx::CodegenCx : Backend<'ll, Value = V>, + &'a Bx::CodegenCx : LayoutOf, TyLayout = TyLayout<'tcx>> + HasTyCtxt<'tcx> + { self.store_with_flags(bx, dest, MemFlags::VOLATILE | MemFlags::UNALIGNED); } - pub fn nontemporal_store(self, bx: &Builder<'a, 'll, 'tcx>, dest: PlaceRef<'ll, 'tcx>) { + pub fn nontemporal_store>( + self, + bx: &mut Bx, + dest: PlaceRef<'tcx, >::Value> + ) where Bx::CodegenCx : Backend<'ll, Value = V>, + &'a Bx::CodegenCx : LayoutOf, TyLayout = TyLayout<'tcx>> + HasTyCtxt<'tcx> + { self.store_with_flags(bx, dest, MemFlags::NONTEMPORAL); } - fn store_with_flags( + fn store_with_flags>( self, - bx: &Builder<'a, 'll, 'tcx>, - dest: PlaceRef<'ll, 'tcx>, + bx: &mut Bx, + dest: PlaceRef<'tcx, >::Value>, flags: MemFlags, - ) { + ) where Bx::CodegenCx : Backend<'ll, Value = V>, + &'a Bx::CodegenCx : LayoutOf, TyLayout = TyLayout<'tcx>> + HasTyCtxt<'tcx> + { debug!("OperandRef::store: operand={:?}, dest={:?}", self, dest); // Avoid generating stores of zero-sized values, because the only way to have a zero-sized // value is through `undef`, and store itself is useless. @@ -302,8 +357,14 @@ impl OperandValue<'ll> { } } } - - pub fn store_unsized(self, bx: &Builder<'a, 'll, 'tcx>, indirect_dest: PlaceRef<'ll, 'tcx>) { + pub fn store_unsized>( + self, + bx: &mut Bx, + indirect_dest: PlaceRef<'tcx, V> + ) where + Bx::CodegenCx : Backend<'ll, Value = V>, + &'a Bx::CodegenCx: LayoutOf, TyLayout=TyLayout<'tcx>> + HasTyCtxt<'tcx> + { debug!("OperandRef::store_unsized: operand={:?}, indirect_dest={:?}", self, indirect_dest); let flags = MemFlags::empty(); @@ -323,21 +384,26 @@ impl OperandValue<'ll> { let min_align = Align::from_bits(8, 8).unwrap(); // Allocate an appropriate region on the stack, and copy the value into it - let (llsize, _) = glue::size_and_align_of_dst(&bx, unsized_ty, Some(llextra)); - let lldst = bx.array_alloca(Type::i8(bx.cx), llsize, "unsized_tmp", max_align); - base::call_memcpy(&bx, lldst, llptr, llsize, min_align, flags); + let (llsize, _) = glue::size_and_align_of_dst(bx, unsized_ty, Some(llextra)); + let lldst = bx.array_alloca(bx.cx().type_i8(), llsize, "unsized_tmp", max_align); + bx.call_memcpy(lldst, llptr, llsize, min_align, flags); // Store the allocated region and the extra to the indirect place. let indirect_operand = OperandValue::Pair(lldst, llextra); - indirect_operand.store(&bx, indirect_dest); + indirect_operand.store(bx, indirect_dest); } } -impl FunctionCx<'a, 'll, 'tcx> { - fn maybe_codegen_consume_direct(&mut self, - bx: &Builder<'a, 'll, 'tcx>, - place: &mir::Place<'tcx>) - -> Option> +impl<'a, 'f, 'll: 'a + 'f, 'tcx: 'll, Cx: 'a + CodegenMethods<'a, 'll, 'tcx>> + FunctionCx<'a, 'f, 'll, 'tcx, Cx> + where &'a Cx : LayoutOf, TyLayout = TyLayout<'tcx>> + HasTyCtxt<'tcx> +{ + fn maybe_codegen_consume_direct>( + &mut self, + bx: &mut Bx, + place: &mir::Place<'tcx> + ) -> Option> where + &'a Bx::CodegenCx: LayoutOf, TyLayout = TyLayout<'tcx>> + HasTyCtxt<'tcx> { debug!("maybe_codegen_consume_direct(place={:?})", place); @@ -369,9 +435,9 @@ impl FunctionCx<'a, 'll, 'tcx> { // ZSTs don't require any actual memory access. // FIXME(eddyb) deduplicate this with the identical // checks in `codegen_consume` and `extract_field`. - let elem = o.layout.field(bx.cx, 0); + let elem = o.layout.field(bx.cx(), 0); if elem.is_zst() { - return Some(OperandRef::new_zst(bx.cx, elem)); + return Some(OperandRef::new_zst(bx.cx(), elem)); } } _ => {} @@ -382,19 +448,21 @@ impl FunctionCx<'a, 'll, 'tcx> { None } - pub fn codegen_consume(&mut self, - bx: &Builder<'a, 'll, 'tcx>, - place: &mir::Place<'tcx>) - -> OperandRef<'ll, 'tcx> + pub fn codegen_consume>( + &mut self, + bx: &mut Bx, + place: &mir::Place<'tcx> + ) -> OperandRef<'tcx, Cx::Value> where + &'a Bx::CodegenCx: LayoutOf, TyLayout = TyLayout<'tcx>> + HasTyCtxt<'tcx> { debug!("codegen_consume(place={:?})", place); let ty = self.monomorphized_place_ty(place); - let layout = bx.cx.layout_of(ty); + let layout = bx.cx().layout_of(ty); // ZSTs don't require any actual memory access. if layout.is_zst() { - return OperandRef::new_zst(bx.cx, layout); + return OperandRef::new_zst(bx.cx(), layout); } if let Some(o) = self.maybe_codegen_consume_direct(bx, place) { @@ -403,13 +471,17 @@ impl FunctionCx<'a, 'll, 'tcx> { // for most places, to consume them we just load them // out from their home - self.codegen_place(bx, place).load(bx) + let addr = self.codegen_place(bx, place); + bx.load_ref(&addr) } - pub fn codegen_operand(&mut self, - bx: &Builder<'a, 'll, 'tcx>, - operand: &mir::Operand<'tcx>) - -> OperandRef<'ll, 'tcx> + pub fn codegen_operand>( + &mut self, + bx: &mut Bx, + operand: &mir::Operand<'tcx> + ) -> OperandRef<'tcx, Cx::Value> where + Bx : BuilderMethods<'a, 'll, 'tcx, CodegenCx=Cx>, + &'a Bx::CodegenCx: LayoutOf, TyLayout = TyLayout<'tcx>> + HasTyCtxt<'tcx> { debug!("codegen_operand(operand={:?})", operand); @@ -430,15 +502,15 @@ impl FunctionCx<'a, 'll, 'tcx> { ); // Allow RalfJ to sleep soundly knowing that even refactorings that remove // the above error (or silence it under some conditions) will not cause UB - let fnname = bx.cx.get_intrinsic(&("llvm.trap")); + let fnname = bx.cx().get_intrinsic(&("llvm.trap")); bx.call(fnname, &[], None); // We've errored, so we don't have to produce working code. - let layout = bx.cx.layout_of(ty); - PlaceRef::new_sized( - C_undef(layout.llvm_type(bx.cx).ptr_to()), + let layout = bx.cx().layout_of(ty); + bx.load_ref(&PlaceRef::new_sized( + bx.cx().const_undef(bx.cx().type_ptr_to(bx.cx().backend_type(&layout))), layout, layout.align, - ).load(bx) + )) }) } } diff --git a/src/librustc_codegen_llvm/mir/place.rs b/src/librustc_codegen_ssa/mir/place.rs similarity index 59% rename from src/librustc_codegen_llvm/mir/place.rs rename to src/librustc_codegen_ssa/mir/place.rs index e7b6f5908a4d1..c03be9c63b544 100644 --- a/src/librustc_codegen_llvm/mir/place.rs +++ b/src/librustc_codegen_ssa/mir/place.rs @@ -8,31 +8,25 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use llvm::{self, LLVMConstInBoundsGEP}; use rustc::ty::{self, Ty}; -use rustc::ty::layout::{self, Align, TyLayout, LayoutOf, Size}; +use rustc::ty::layout::{self, Align, TyLayout, LayoutOf, HasTyCtxt}; use rustc::mir; use rustc::mir::tcx::PlaceTy; -use base; -use builder::Builder; -use common::{CodegenCx, C_undef, C_usize, C_u8, C_u32, C_uint, C_null, C_uint_big}; -use consts; -use type_of::LayoutLlvmExt; -use type_::Type; -use value::Value; +use common::IntPredicate; use glue; -use mir::constant::const_alloc_to_llvm; + +use interfaces::*; use super::{FunctionCx, LocalRef}; -use super::operand::{OperandRef, OperandValue}; +use super::operand::OperandValue; #[derive(Copy, Clone, Debug)] -pub struct PlaceRef<'ll, 'tcx> { +pub struct PlaceRef<'tcx, V> { /// Pointer to the contents of the place - pub llval: &'ll Value, + pub llval: V, /// This place's extra data if it is unsized, or null - pub llextra: Option<&'ll Value>, + pub llextra: Option, /// Monomorphized type of this place, including variant information pub layout: TyLayout<'tcx>, @@ -41,12 +35,12 @@ pub struct PlaceRef<'ll, 'tcx> { pub align: Align, } -impl PlaceRef<'ll, 'tcx> { +impl<'a, 'll: 'a, 'tcx: 'll, V : 'll + CodegenObject> PlaceRef<'tcx, V> { pub fn new_sized( - llval: &'ll Value, + llval: V, layout: TyLayout<'tcx>, align: Align, - ) -> PlaceRef<'ll, 'tcx> { + ) -> PlaceRef<'tcx, V> { assert!(!layout.is_unsized()); PlaceRef { llval, @@ -56,126 +50,71 @@ impl PlaceRef<'ll, 'tcx> { } } - pub fn from_const_alloc( - bx: &Builder<'a, 'll, 'tcx>, + pub fn alloca>( + bx: &mut Bx, layout: TyLayout<'tcx>, - alloc: &mir::interpret::Allocation, - offset: Size, - ) -> PlaceRef<'ll, 'tcx> { - let init = const_alloc_to_llvm(bx.cx, alloc); - let base_addr = consts::addr_of(bx.cx, init, layout.align, None); - - let llval = unsafe { LLVMConstInBoundsGEP( - consts::bitcast(base_addr, Type::i8p(bx.cx)), - &C_usize(bx.cx, offset.bytes()), - 1, - )}; - let llval = consts::bitcast(llval, layout.llvm_type(bx.cx).ptr_to()); - PlaceRef::new_sized(llval, layout, alloc.align) - } - - pub fn alloca(bx: &Builder<'a, 'll, 'tcx>, layout: TyLayout<'tcx>, name: &str) - -> PlaceRef<'ll, 'tcx> { + name: &str + ) -> PlaceRef<'tcx, V> where Bx::CodegenCx : Backend<'ll, Value=V>, + &'a Bx::CodegenCx : LayoutOf, TyLayout = TyLayout<'tcx>> + HasTyCtxt<'tcx> + { debug!("alloca({:?}: {:?})", name, layout); assert!(!layout.is_unsized(), "tried to statically allocate unsized place"); - let tmp = bx.alloca(layout.llvm_type(bx.cx), name, layout.align); + let tmp = bx.alloca(bx.cx().backend_type(&layout), name, layout.align); Self::new_sized(tmp, layout, layout.align) } /// Returns a place for an indirect reference to an unsized place. - pub fn alloca_unsized_indirect(bx: &Builder<'a, 'll, 'tcx>, layout: TyLayout<'tcx>, name: &str) - -> PlaceRef<'ll, 'tcx> { + pub fn alloca_unsized_indirect>( + bx: &mut Bx, + layout: TyLayout<'tcx>, + name: &str + ) -> PlaceRef<'tcx, V> + where &'a Bx::CodegenCx: LayoutOf, TyLayout=TyLayout<'tcx>> + HasTyCtxt<'tcx>, + Bx::CodegenCx : Backend<'ll, Value=V> + { debug!("alloca_unsized_indirect({:?}: {:?})", name, layout); assert!(layout.is_unsized(), "tried to allocate indirect place for sized values"); - let ptr_ty = bx.cx.tcx.mk_mut_ptr(layout.ty); - let ptr_layout = bx.cx.layout_of(ptr_ty); + let ptr_ty = bx.cx().tcx().mk_mut_ptr(layout.ty); + let ptr_layout = bx.cx().layout_of(ptr_ty); Self::alloca(bx, ptr_layout, name) } - pub fn len(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Value { + pub fn len>( + &self, + cx: &Cx + ) -> V where Cx : Backend<'ll, Value=V>, + &'a Cx : LayoutOf, TyLayout = TyLayout<'tcx>> + HasTyCtxt<'tcx> + { if let layout::FieldPlacement::Array { count, .. } = self.layout.fields { if self.layout.is_unsized() { assert_eq!(count, 0); self.llextra.unwrap() } else { - C_usize(cx, count) + cx.const_usize(count) } } else { bug!("unexpected layout `{:#?}` in PlaceRef::len", self.layout) } } - pub fn load(&self, bx: &Builder<'a, 'll, 'tcx>) -> OperandRef<'ll, 'tcx> { - debug!("PlaceRef::load: {:?}", self); - - assert_eq!(self.llextra.is_some(), self.layout.is_unsized()); - - if self.layout.is_zst() { - return OperandRef::new_zst(bx.cx, self.layout); - } - - let scalar_load_metadata = |load, scalar: &layout::Scalar| { - let vr = scalar.valid_range.clone(); - match scalar.value { - layout::Int(..) => { - let range = scalar.valid_range_exclusive(bx.cx); - if range.start != range.end { - bx.range_metadata(load, range); - } - } - layout::Pointer if vr.start() < vr.end() && !vr.contains(&0) => { - bx.nonnull_metadata(load); - } - _ => {} - } - }; - - let val = if let Some(llextra) = self.llextra { - OperandValue::Ref(self.llval, Some(llextra), self.align) - } else if self.layout.is_llvm_immediate() { - let mut const_llval = None; - unsafe { - if let Some(global) = llvm::LLVMIsAGlobalVariable(self.llval) { - if llvm::LLVMIsGlobalConstant(global) == llvm::True { - const_llval = llvm::LLVMGetInitializer(global); - } - } - } - let llval = const_llval.unwrap_or_else(|| { - let load = bx.load(self.llval, self.align); - if let layout::Abi::Scalar(ref scalar) = self.layout.abi { - scalar_load_metadata(load, scalar); - } - load - }); - OperandValue::Immediate(base::to_immediate(bx, llval, self.layout)) - } else if let layout::Abi::ScalarPair(ref a, ref b) = self.layout.abi { - let load = |i, scalar: &layout::Scalar| { - let llptr = bx.struct_gep(self.llval, i as u64); - let load = bx.load(llptr, self.align); - scalar_load_metadata(load, scalar); - if scalar.is_bool() { - bx.trunc(load, Type::i1(bx.cx)) - } else { - load - } - }; - OperandValue::Pair(load(0, a), load(1, b)) - } else { - OperandValue::Ref(self.llval, None, self.align) - }; - - OperandRef { val, layout: self.layout } - } +} +impl<'a, 'll: 'a, 'tcx: 'll, V : 'll + CodegenObject> PlaceRef<'tcx, V> { /// Access a field, at a point when the value's case is known. - pub fn project_field(self, bx: &Builder<'a, 'll, 'tcx>, ix: usize) -> PlaceRef<'ll, 'tcx> { - let cx = bx.cx; + pub fn project_field>( + self, bx: &mut Bx, + ix: usize + ) -> PlaceRef<'tcx, >::Value> + where + Bx::CodegenCx : Backend<'ll, Value = V>, + &'a Bx::CodegenCx: LayoutOf, TyLayout = TyLayout<'tcx>> + HasTyCtxt<'tcx> + { + let cx = bx.cx(); let field = self.layout.field(cx, ix); let offset = self.layout.fields.offset(ix); let effective_field_align = self.align.restrict_for_offset(offset); - let simple = || { + let mut simple = || { // Unions and newtypes only use an offset of 0. let llval = if offset.bytes() == 0 { self.llval @@ -184,11 +123,11 @@ impl PlaceRef<'ll, 'tcx> { assert_eq!(offset, a.value.size(cx).abi_align(b.value.align(cx))); bx.struct_gep(self.llval, 1) } else { - bx.struct_gep(self.llval, self.layout.llvm_field_index(ix)) + bx.struct_gep(self.llval, bx.cx().backend_field_index(&self.layout, ix)) }; PlaceRef { // HACK(eddyb) have to bitcast pointers until LLVM removes pointee types. - llval: bx.pointercast(llval, field.llvm_type(cx).ptr_to()), + llval: bx.pointercast(llval, cx.type_ptr_to(cx.backend_type(&field))), llextra: if cx.type_has_metadata(field.ty) { self.llextra } else { @@ -239,7 +178,7 @@ impl PlaceRef<'ll, 'tcx> { let meta = self.llextra; - let unaligned_offset = C_usize(cx, offset.bytes()); + let unaligned_offset = cx.const_usize(offset.bytes()); // Get the alignment of the field let (_, unsized_align) = glue::size_and_align_of_dst(bx, field.ty, meta); @@ -250,22 +189,23 @@ impl PlaceRef<'ll, 'tcx> { // (unaligned offset + (align - 1)) & -align // Calculate offset - let align_sub_1 = bx.sub(unsized_align, C_usize(cx, 1u64)); - let offset = bx.and(bx.add(unaligned_offset, align_sub_1), - bx.neg(unsized_align)); + let align_sub_1 = bx.sub(unsized_align, cx.const_usize(1u64)); + let and_lhs = bx.add(unaligned_offset, align_sub_1); + let and_rhs = bx.neg(unsized_align); + let offset = bx.and(and_lhs, and_rhs); debug!("struct_field_ptr: DST field offset: {:?}", offset); // Cast and adjust pointer - let byte_ptr = bx.pointercast(self.llval, Type::i8p(cx)); + let byte_ptr = bx.pointercast(self.llval, cx.type_i8p()); let byte_ptr = bx.gep(byte_ptr, &[offset]); // Finally, cast back to the type expected - let ll_fty = field.llvm_type(cx); + let ll_fty = cx.backend_type(&field); debug!("struct_field_ptr: Field type is {:?}", ll_fty); PlaceRef { - llval: bx.pointercast(byte_ptr, ll_fty.ptr_to()), + llval: bx.pointercast(byte_ptr, bx.cx().type_ptr_to(ll_fty)), llextra: self.llextra, layout: field, align: effective_field_align, @@ -273,24 +213,31 @@ impl PlaceRef<'ll, 'tcx> { } /// Obtain the actual discriminant of a value. - pub fn codegen_get_discr(self, bx: &Builder<'a, 'll, 'tcx>, cast_to: Ty<'tcx>) -> &'ll Value { - let cast_to = bx.cx.layout_of(cast_to).immediate_llvm_type(bx.cx); + pub fn codegen_get_discr>( + self, + bx: &mut Bx, + cast_to: Ty<'tcx> + ) -> V where + Bx::CodegenCx : Backend<'ll, Value = V>, + &'a Bx::CodegenCx: LayoutOf, TyLayout = TyLayout<'tcx>> + HasTyCtxt<'tcx> + { + let cast_to = bx.cx().immediate_backend_type(&bx.cx().layout_of(cast_to)); if self.layout.abi.is_uninhabited() { - return C_undef(cast_to); + return bx.cx().const_undef(cast_to); } match self.layout.variants { layout::Variants::Single { index } => { let discr_val = self.layout.ty.ty_adt_def().map_or( index as u128, - |def| def.discriminant_for_variant(bx.cx.tcx, index).val); - return C_uint_big(cast_to, discr_val); + |def| def.discriminant_for_variant(*bx.cx().tcx(), index).val); + return bx.cx().const_uint_big(cast_to, discr_val); } layout::Variants::Tagged { .. } | layout::Variants::NicheFilling { .. } => {}, } let discr = self.project_field(bx, 0); - let lldiscr = discr.load(bx).immediate(); + let lldiscr = bx.load_ref(&discr).immediate(); match self.layout.variants { layout::Variants::Single { .. } => bug!(), layout::Variants::Tagged { ref tag, .. } => { @@ -310,26 +257,29 @@ impl PlaceRef<'ll, 'tcx> { niche_start, .. } => { - let niche_llty = discr.layout.immediate_llvm_type(bx.cx); + let niche_llty = bx.cx().immediate_backend_type(&discr.layout); if niche_variants.start() == niche_variants.end() { // FIXME(eddyb) Check the actual primitive type here. let niche_llval = if niche_start == 0 { - // HACK(eddyb) Using `C_null` as it works on all types. - C_null(niche_llty) + // HACK(eddyb) Using `c_null` as it works on all types. + bx.cx().const_null(niche_llty) } else { - C_uint_big(niche_llty, niche_start) + bx.cx().const_uint_big(niche_llty, niche_start) }; - bx.select(bx.icmp(llvm::IntEQ, lldiscr, niche_llval), - C_uint(cast_to, *niche_variants.start() as u64), - C_uint(cast_to, dataful_variant as u64)) + let select_arg = bx.icmp(IntPredicate::IntEQ, lldiscr, niche_llval); + bx.select(select_arg, + bx.cx().const_uint(cast_to, *niche_variants.start() as u64), + bx.cx().const_uint(cast_to, dataful_variant as u64)) } else { // Rebase from niche values to discriminant values. let delta = niche_start.wrapping_sub(*niche_variants.start() as u128); - let lldiscr = bx.sub(lldiscr, C_uint_big(niche_llty, delta)); - let lldiscr_max = C_uint(niche_llty, *niche_variants.end() as u64); - bx.select(bx.icmp(llvm::IntULE, lldiscr, lldiscr_max), - bx.intcast(lldiscr, cast_to, false), - C_uint(cast_to, dataful_variant as u64)) + let lldiscr = bx.sub(lldiscr, bx.cx().const_uint_big(niche_llty, delta)); + let lldiscr_max = bx.cx().const_uint(niche_llty, *niche_variants.end() as u64); + let select_arg = bx.icmp(IntPredicate::IntULE, lldiscr, lldiscr_max); + let cast = bx.intcast(lldiscr, cast_to, false); + bx.select(select_arg, + cast, + bx.cx().const_uint(cast_to, dataful_variant as u64)) } } } @@ -337,8 +287,15 @@ impl PlaceRef<'ll, 'tcx> { /// Set the discriminant for a new value of the given case of the given /// representation. - pub fn codegen_set_discr(&self, bx: &Builder<'a, 'll, 'tcx>, variant_index: usize) { - if self.layout.for_variant(bx.cx, variant_index).abi.is_uninhabited() { + pub fn codegen_set_discr>( + &self, + bx: &mut Bx, + variant_index: usize + ) where + Bx::CodegenCx : Backend<'ll, Value=V>, + &'a Bx::CodegenCx : LayoutOf, TyLayout = TyLayout<'tcx>> + HasTyCtxt<'tcx> + { + if self.layout.for_variant(bx.cx(), variant_index).abi.is_uninhabited() { return; } match self.layout.variants { @@ -351,7 +308,7 @@ impl PlaceRef<'ll, 'tcx> { .discriminant_for_variant(bx.tcx(), variant_index) .val; bx.store( - C_uint_big(ptr.layout.llvm_type(bx.cx), to), + bx.cx().const_uint_big(bx.cx().backend_type(&ptr.layout), to), ptr.llval, ptr.align); } @@ -362,75 +319,104 @@ impl PlaceRef<'ll, 'tcx> { .. } => { if variant_index != dataful_variant { - if bx.sess().target.target.arch == "arm" || - bx.sess().target.target.arch == "aarch64" { + if bx.cx().sess().target.target.arch == "arm" || + bx.cx().sess().target.target.arch == "aarch64" { // Issue #34427: As workaround for LLVM bug on ARM, // use memset of 0 before assigning niche value. - let llptr = bx.pointercast(self.llval, Type::i8(bx.cx).ptr_to()); - let fill_byte = C_u8(bx.cx, 0); + let llptr = bx.pointercast( + self.llval, + bx.cx().type_ptr_to(bx.cx().type_i8()) + ); + let fill_byte = bx.cx().const_u8(0); let (size, align) = self.layout.size_and_align(); - let size = C_usize(bx.cx, size.bytes()); - let align = C_u32(bx.cx, align.abi() as u32); - base::call_memset(bx, llptr, fill_byte, size, align, false); + let size = bx.cx().const_usize(size.bytes()); + let align = bx.cx().const_u32(align.abi() as u32); + bx.call_memset(llptr, fill_byte, size, align, false); } let niche = self.project_field(bx, 0); - let niche_llty = niche.layout.immediate_llvm_type(bx.cx); + let niche_llty = bx.cx().immediate_backend_type(&niche.layout); let niche_value = ((variant_index - *niche_variants.start()) as u128) .wrapping_add(niche_start); // FIXME(eddyb) Check the actual primitive type here. let niche_llval = if niche_value == 0 { - // HACK(eddyb) Using `C_null` as it works on all types. - C_null(niche_llty) + // HACK(eddyb) Using `c_null` as it works on all types. + bx.cx().const_null(niche_llty) } else { - C_uint_big(niche_llty, niche_value) + bx.cx().const_uint_big(niche_llty, niche_value) }; OperandValue::Immediate(niche_llval).store(bx, niche); } } } } +} - pub fn project_index(&self, bx: &Builder<'a, 'll, 'tcx>, llindex: &'ll Value) - -> PlaceRef<'ll, 'tcx> { +impl<'a, 'll: 'a, 'tcx: 'll, V : 'll + CodegenObject> PlaceRef<'tcx, V> { + pub fn project_index>( + &self, + bx: &mut Bx, + llindex: V + ) -> PlaceRef<'tcx, V> where + Bx::CodegenCx : Backend<'ll, Value=V>, + &'a Bx::CodegenCx: LayoutOf, TyLayout = TyLayout<'tcx>> + HasTyCtxt<'tcx> + { PlaceRef { - llval: bx.inbounds_gep(self.llval, &[C_usize(bx.cx, 0), llindex]), + llval: bx.inbounds_gep(self.llval, &[bx.cx().const_usize(0), llindex]), llextra: None, - layout: self.layout.field(bx.cx, 0), + layout: self.layout.field(bx.cx(), 0), align: self.align } } - pub fn project_downcast(&self, bx: &Builder<'a, 'll, 'tcx>, variant_index: usize) - -> PlaceRef<'ll, 'tcx> { + pub fn project_downcast>( + &self, + bx: &mut Bx, + variant_index: usize + ) -> PlaceRef<'tcx, V> where + Bx::CodegenCx : Backend<'ll, Value=V>, + &'a Bx::CodegenCx: LayoutOf, TyLayout = TyLayout<'tcx>> + HasTyCtxt<'tcx> + { let mut downcast = *self; - downcast.layout = self.layout.for_variant(bx.cx, variant_index); + downcast.layout = self.layout.for_variant(bx.cx(), variant_index); // Cast to the appropriate variant struct type. - let variant_ty = downcast.layout.llvm_type(bx.cx); - downcast.llval = bx.pointercast(downcast.llval, variant_ty.ptr_to()); + let variant_ty = bx.cx().backend_type(&downcast.layout); + downcast.llval = bx.pointercast(downcast.llval, bx.cx().type_ptr_to(variant_ty)); downcast } +} - pub fn storage_live(&self, bx: &Builder<'a, 'll, 'tcx>) { +impl<'a, 'll: 'a, 'tcx: 'll, V : 'll + CodegenObject> PlaceRef<'tcx, V> { + pub fn storage_live>(&self, bx: &mut Bx) + where Bx::CodegenCx : Backend<'ll, Value = V>, + &'a Bx::CodegenCx : LayoutOf, TyLayout = TyLayout<'tcx>> + HasTyCtxt<'tcx> + { bx.lifetime_start(self.llval, self.layout.size); } - pub fn storage_dead(&self, bx: &Builder<'a, 'll, 'tcx>) { + pub fn storage_dead>(&self, bx: &mut Bx) + where Bx::CodegenCx : Backend<'ll, Value = V>, + &'a Bx::CodegenCx : LayoutOf, TyLayout = TyLayout<'tcx>> + HasTyCtxt<'tcx> + { bx.lifetime_end(self.llval, self.layout.size); } } -impl FunctionCx<'a, 'll, 'tcx> { - pub fn codegen_place(&mut self, - bx: &Builder<'a, 'll, 'tcx>, - place: &mir::Place<'tcx>) - -> PlaceRef<'ll, 'tcx> { +impl<'a, 'f, 'll: 'a + 'f, 'tcx: 'll, Cx: 'a + CodegenMethods<'a, 'll, 'tcx>> + FunctionCx<'a, 'f, 'll, 'tcx, Cx> where + &'a Cx: LayoutOf, TyLayout=TyLayout<'tcx>> + HasTyCtxt<'tcx> +{ + pub fn codegen_place>( + &mut self, + bx: &mut Bx, + place: &mir::Place<'tcx> + ) -> PlaceRef<'tcx, Cx::Value> { debug!("codegen_place(place={:?})", place); - let cx = bx.cx; - let tcx = cx.tcx; + let cx = bx.cx(); + let tcx = cx.tcx(); if let mir::Place::Local(index) = *place { match self.locals[index] { @@ -438,7 +424,7 @@ impl FunctionCx<'a, 'll, 'tcx> { return place; } LocalRef::UnsizedPlace(place) => { - return place.load(bx).deref(&cx); + return bx.load_ref(&place).deref(cx); } LocalRef::Operand(..) => { bug!("using operand local {:?} as place", place); @@ -458,7 +444,7 @@ impl FunctionCx<'a, 'll, 'tcx> { match bx.tcx().const_eval(param_env.and(cid)) { Ok(val) => match val.val { mir::interpret::ConstValue::ByRef(_, alloc, offset) => { - PlaceRef::from_const_alloc(bx, layout, alloc, offset) + bx.cx().from_const_alloc(layout, alloc, offset) } _ => bug!("promoteds should have an allocation: {:?}", val), }, @@ -467,23 +453,25 @@ impl FunctionCx<'a, 'll, 'tcx> { // and compile-time agree on values // With floats that won't always be true // so we generate an abort - let fnname = bx.cx.get_intrinsic(&("llvm.trap")); + let fnname = bx.cx().get_intrinsic(&("llvm.trap")); bx.call(fnname, &[], None); - let llval = C_undef(layout.llvm_type(bx.cx).ptr_to()); + let llval = bx.cx().const_undef( + bx.cx().type_ptr_to(bx.cx().backend_type(&layout)) + ); PlaceRef::new_sized(llval, layout, layout.align) } } } mir::Place::Static(box mir::Static { def_id, ty }) => { let layout = cx.layout_of(self.monomorphize(&ty)); - PlaceRef::new_sized(consts::get_static(cx, def_id), layout, layout.align) + PlaceRef::new_sized(cx.get_static(def_id), layout, layout.align) }, mir::Place::Projection(box mir::Projection { ref base, elem: mir::ProjectionElem::Deref }) => { // Load the pointer from its location. - self.codegen_consume(bx, base).deref(bx.cx) + self.codegen_consume(bx, base).deref(bx.cx()) } mir::Place::Projection(ref projection) => { let cg_base = self.codegen_place(bx, &projection.base); @@ -502,33 +490,33 @@ impl FunctionCx<'a, 'll, 'tcx> { mir::ProjectionElem::ConstantIndex { offset, from_end: false, min_length: _ } => { - let lloffset = C_usize(bx.cx, offset as u64); + let lloffset = bx.cx().const_usize(offset as u64); cg_base.project_index(bx, lloffset) } mir::ProjectionElem::ConstantIndex { offset, from_end: true, min_length: _ } => { - let lloffset = C_usize(bx.cx, offset as u64); - let lllen = cg_base.len(bx.cx); + let lloffset = bx.cx().const_usize(offset as u64); + let lllen = cg_base.len(bx.cx()); let llindex = bx.sub(lllen, lloffset); cg_base.project_index(bx, llindex) } mir::ProjectionElem::Subslice { from, to } => { let mut subslice = cg_base.project_index(bx, - C_usize(bx.cx, from as u64)); + bx.cx().const_usize(from as u64)); let projected_ty = PlaceTy::Ty { ty: cg_base.layout.ty } - .projection_ty(tcx, &projection.elem).to_ty(bx.tcx()); - subslice.layout = bx.cx.layout_of(self.monomorphize(&projected_ty)); + .projection_ty(*tcx, &projection.elem).to_ty(bx.tcx()); + subslice.layout = bx.cx().layout_of(self.monomorphize(&projected_ty)); if subslice.layout.is_unsized() { subslice.llextra = Some(bx.sub(cg_base.llextra.unwrap(), - C_usize(bx.cx, (from as u64) + (to as u64)))); + bx.cx().const_usize((from as u64) + (to as u64)))); } // Cast the place pointer type to the new // array or slice type (*[%_; new_len]). subslice.llval = bx.pointercast(subslice.llval, - subslice.layout.llvm_type(bx.cx).ptr_to()); + bx.cx().type_ptr_to(bx.cx().backend_type(&subslice.layout))); subslice } @@ -543,8 +531,8 @@ impl FunctionCx<'a, 'll, 'tcx> { } pub fn monomorphized_place_ty(&self, place: &mir::Place<'tcx>) -> Ty<'tcx> { - let tcx = self.cx.tcx; - let place_ty = place.ty(self.mir, tcx); - self.monomorphize(&place_ty.to_ty(tcx)) + let tcx = self.cx.tcx(); + let place_ty = place.ty(self.mir, *tcx); + self.monomorphize(&place_ty.to_ty(*tcx)) } } diff --git a/src/librustc_codegen_llvm/mir/rvalue.rs b/src/librustc_codegen_ssa/mir/rvalue.rs similarity index 68% rename from src/librustc_codegen_llvm/mir/rvalue.rs rename to src/librustc_codegen_ssa/mir/rvalue.rs index fa22bdff94ddd..7a97bf20c02a1 100644 --- a/src/librustc_codegen_llvm/mir/rvalue.rs +++ b/src/librustc_codegen_ssa/mir/rvalue.rs @@ -8,57 +8,55 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -use llvm; use rustc::ty::{self, Ty}; use rustc::ty::cast::{CastTy, IntTy}; -use rustc::ty::layout::{self, LayoutOf}; +use rustc::ty::layout::{self, LayoutOf, HasTyCtxt, TyLayout}; use rustc::mir; use rustc::middle::lang_items::ExchangeMallocFnLangItem; use rustc_apfloat::{ieee, Float, Status, Round}; use std::{u128, i128}; use base; -use builder::Builder; use callee; -use common::{self, val_ty}; -use common::{C_bool, C_u8, C_i32, C_u32, C_u64, C_undef, C_null, C_usize, C_uint, C_uint_big}; -use consts; -use monomorphize; -use type_::Type; -use type_of::LayoutLlvmExt; -use value::Value; +use common::{self, RealPredicate, IntPredicate}; +use rustc_mir::monomorphize; + +use interfaces::*; use super::{FunctionCx, LocalRef}; use super::operand::{OperandRef, OperandValue}; use super::place::PlaceRef; -impl FunctionCx<'a, 'll, 'tcx> { - pub fn codegen_rvalue(&mut self, - bx: Builder<'a, 'll, 'tcx>, - dest: PlaceRef<'ll, 'tcx>, - rvalue: &mir::Rvalue<'tcx>) - -> Builder<'a, 'll, 'tcx> - { +impl<'a, 'f, 'll: 'a + 'f, 'tcx: 'll, Cx: 'a + CodegenMethods<'a, 'll, 'tcx>> + FunctionCx<'a, 'f, 'll, 'tcx, Cx> where + &'a Cx: LayoutOf, TyLayout = TyLayout<'tcx>> + HasTyCtxt<'tcx> +{ + pub fn codegen_rvalue>( + &mut self, + mut bx: Bx, + dest: PlaceRef<'tcx, Cx::Value>, + rvalue: &mir::Rvalue<'tcx> + ) -> Bx { debug!("codegen_rvalue(dest.llval={:?}, rvalue={:?})", dest.llval, rvalue); match *rvalue { mir::Rvalue::Use(ref operand) => { - let cg_operand = self.codegen_operand(&bx, operand); + let cg_operand = self.codegen_operand(&mut bx, operand); // FIXME: consider not copying constants through stack. (fixable by codegenning // constants into OperandValue::Ref, why don’t we do that yet if we don’t?) - cg_operand.val.store(&bx, dest); + cg_operand.val.store(&mut bx, dest); bx } mir::Rvalue::Cast(mir::CastKind::Unsize, ref source, _) => { // The destination necessarily contains a fat pointer, so if // it's a scalar pair, it's a fat pointer or newtype thereof. - if dest.layout.is_llvm_scalar_pair() { + if bx.cx().is_backend_scalar_pair(&dest.layout) { // into-coerce of a thin pointer to a fat pointer - just // use the operand path. - let (bx, temp) = self.codegen_rvalue_operand(bx, rvalue); - temp.val.store(&bx, dest); + let (mut bx, temp) = self.codegen_rvalue_operand(bx, rvalue); + temp.val.store(&mut bx, dest); return bx; } @@ -66,7 +64,7 @@ impl FunctionCx<'a, 'll, 'tcx> { // this to be eliminated by MIR building, but // `CoerceUnsized` can be passed by a where-clause, // so the (generic) MIR may not be able to expand it. - let operand = self.codegen_operand(&bx, source); + let operand = self.codegen_operand(&mut bx, source); match operand.val { OperandValue::Pair(..) | OperandValue::Immediate(_) => { @@ -77,15 +75,15 @@ impl FunctionCx<'a, 'll, 'tcx> { // index into the struct, and this case isn't // important enough for it. debug!("codegen_rvalue: creating ugly alloca"); - let scratch = PlaceRef::alloca(&bx, operand.layout, "__unsize_temp"); - scratch.storage_live(&bx); - operand.val.store(&bx, scratch); - base::coerce_unsized_into(&bx, scratch, dest); - scratch.storage_dead(&bx); + let scratch = PlaceRef::alloca(&mut bx, operand.layout, "__unsize_temp"); + scratch.storage_live(&mut bx); + operand.val.store(&mut bx, scratch); + base::coerce_unsized_into(&mut bx, scratch, dest); + scratch.storage_dead(&mut bx); } OperandValue::Ref(llref, None, align) => { let source = PlaceRef::new_sized(llref, operand.layout, align); - base::coerce_unsized_into(&bx, source, dest); + base::coerce_unsized_into(&mut bx, source, dest); } OperandValue::Ref(_, Some(_), _) => { bug!("unsized coercion on an unsized rvalue") @@ -95,51 +93,51 @@ impl FunctionCx<'a, 'll, 'tcx> { } mir::Rvalue::Repeat(ref elem, count) => { - let cg_elem = self.codegen_operand(&bx, elem); + let cg_elem = self.codegen_operand(&mut bx, elem); // Do not generate the loop for zero-sized elements or empty arrays. if dest.layout.is_zst() { return bx; } - - let start = dest.project_index(&bx, C_usize(bx.cx, 0)).llval; + let zero = bx.cx().const_usize(0); + let start = dest.project_index(&mut bx, zero).llval; if let OperandValue::Immediate(v) = cg_elem.val { - let align = C_i32(bx.cx, dest.align.abi() as i32); - let size = C_usize(bx.cx, dest.layout.size.bytes()); + let align = bx.cx().const_i32(dest.align.abi() as i32); + let size = bx.cx().const_usize(dest.layout.size.bytes()); // Use llvm.memset.p0i8.* to initialize all zero arrays - if common::is_const_integral(v) && common::const_to_uint(v) == 0 { - let fill = C_u8(bx.cx, 0); - base::call_memset(&bx, start, fill, size, align, false); + if bx.cx().is_const_integral(v) && bx.cx().const_to_uint(v) == 0 { + let fill = bx.cx().const_u8(0); + bx.call_memset(start, fill, size, align, false); return bx; } // Use llvm.memset.p0i8.* to initialize byte arrays - let v = base::from_immediate(&bx, v); - if common::val_ty(v) == Type::i8(bx.cx) { - base::call_memset(&bx, start, v, size, align, false); + let v = base::from_immediate(&mut bx, v); + if bx.cx().val_ty(v) == bx.cx().type_i8() { + bx.call_memset(start, v, size, align, false); return bx; } } - let count = C_usize(bx.cx, count); - let end = dest.project_index(&bx, count).llval; + let count = bx.cx().const_usize(count); + let end = dest.project_index(&mut bx, count).llval; - let header_bx = bx.build_sibling_block("repeat_loop_header"); - let body_bx = bx.build_sibling_block("repeat_loop_body"); + let mut header_bx = bx.build_sibling_block("repeat_loop_header"); + let mut body_bx = bx.build_sibling_block("repeat_loop_body"); let next_bx = bx.build_sibling_block("repeat_loop_next"); bx.br(header_bx.llbb()); - let current = header_bx.phi(common::val_ty(start), &[start], &[bx.llbb()]); + let current = header_bx.phi(bx.cx().val_ty(start), &[start], &[bx.llbb()]); - let keep_going = header_bx.icmp(llvm::IntNE, current, end); + let keep_going = header_bx.icmp(IntPredicate::IntNE, current, end); header_bx.cond_br(keep_going, body_bx.llbb(), next_bx.llbb()); - cg_elem.val.store(&body_bx, + cg_elem.val.store(&mut body_bx, PlaceRef::new_sized(current, cg_elem.layout, dest.align)); - let next = body_bx.inbounds_gep(current, &[C_usize(bx.cx, 1)]); + let next = body_bx.inbounds_gep(current, &[bx.cx().const_usize(1)]); body_bx.br(header_bx.llbb()); header_bx.add_incoming_to_phi(current, next, body_bx.llbb()); @@ -149,9 +147,9 @@ impl FunctionCx<'a, 'll, 'tcx> { mir::Rvalue::Aggregate(ref kind, ref operands) => { let (dest, active_field_index) = match **kind { mir::AggregateKind::Adt(adt_def, variant_index, _, _, active_field_index) => { - dest.codegen_set_discr(&bx, variant_index); + dest.codegen_set_discr(&mut bx, variant_index); if adt_def.is_enum() { - (dest.project_downcast(&bx, variant_index), active_field_index) + (dest.project_downcast(&mut bx, variant_index), active_field_index) } else { (dest, active_field_index) } @@ -159,11 +157,12 @@ impl FunctionCx<'a, 'll, 'tcx> { _ => (dest, None) }; for (i, operand) in operands.iter().enumerate() { - let op = self.codegen_operand(&bx, operand); + let op = self.codegen_operand(&mut bx, operand); // Do not generate stores and GEPis for zero-sized fields. if !op.layout.is_zst() { let field_index = active_field_index.unwrap_or(i); - op.val.store(&bx, dest.project_field(&bx, field_index)); + let field = dest.project_field(&mut bx, field_index); + op.val.store(&mut bx, field); } } bx @@ -171,26 +170,26 @@ impl FunctionCx<'a, 'll, 'tcx> { _ => { assert!(self.rvalue_creates_operand(rvalue)); - let (bx, temp) = self.codegen_rvalue_operand(bx, rvalue); - temp.val.store(&bx, dest); + let (mut bx, temp) = self.codegen_rvalue_operand(bx, rvalue); + temp.val.store(&mut bx, dest); bx } } } - pub fn codegen_rvalue_unsized(&mut self, - bx: Builder<'a, 'll, 'tcx>, - indirect_dest: PlaceRef<'ll, 'tcx>, + pub fn codegen_rvalue_unsized>(&mut self, + mut bx: Bx, + indirect_dest: PlaceRef<'tcx, Cx::Value>, rvalue: &mir::Rvalue<'tcx>) - -> Builder<'a, 'll, 'tcx> + -> Bx { debug!("codegen_rvalue_unsized(indirect_dest.llval={:?}, rvalue={:?})", indirect_dest.llval, rvalue); match *rvalue { mir::Rvalue::Use(ref operand) => { - let cg_operand = self.codegen_operand(&bx, operand); - cg_operand.val.store_unsized(&bx, indirect_dest); + let cg_operand = self.codegen_operand(&mut bx, operand); + cg_operand.val.store_unsized(&mut bx, indirect_dest); bx } @@ -198,29 +197,29 @@ impl FunctionCx<'a, 'll, 'tcx> { } } - pub fn codegen_rvalue_operand(&mut self, - bx: Builder<'a, 'll, 'tcx>, - rvalue: &mir::Rvalue<'tcx>) - -> (Builder<'a, 'll, 'tcx>, OperandRef<'ll, 'tcx>) - { + pub fn codegen_rvalue_operand>( + &mut self, + mut bx: Bx, + rvalue: &mir::Rvalue<'tcx> + ) -> (Bx, OperandRef<'tcx, Cx::Value>) { assert!(self.rvalue_creates_operand(rvalue), "cannot codegen {:?} to operand", rvalue); match *rvalue { mir::Rvalue::Cast(ref kind, ref source, mir_cast_ty) => { - let operand = self.codegen_operand(&bx, source); + let operand = self.codegen_operand(&mut bx, source); debug!("cast operand is {:?}", operand); - let cast = bx.cx.layout_of(self.monomorphize(&mir_cast_ty)); + let cast = bx.cx().layout_of(self.monomorphize(&mir_cast_ty)); let val = match *kind { mir::CastKind::ReifyFnPointer => { match operand.layout.ty.sty { ty::FnDef(def_id, substs) => { - if bx.cx.tcx.has_attr(def_id, "rustc_args_required_const") { + if bx.cx().tcx().has_attr(def_id, "rustc_args_required_const") { bug!("reifying a fn ptr that requires \ const arguments"); } OperandValue::Immediate( - callee::resolve_and_get_fn(bx.cx, def_id, substs)) + callee::resolve_and_get_fn(bx.cx(), def_id, substs)) } _ => { bug!("{} cannot be reified to a fn ptr", operand.layout.ty) @@ -231,8 +230,8 @@ impl FunctionCx<'a, 'll, 'tcx> { match operand.layout.ty.sty { ty::Closure(def_id, substs) => { let instance = monomorphize::resolve_closure( - bx.cx.tcx, def_id, substs, ty::ClosureKind::FnOnce); - OperandValue::Immediate(callee::get_fn(bx.cx, instance)) + *bx.cx().tcx(), def_id, substs, ty::ClosureKind::FnOnce); + OperandValue::Immediate(bx.cx().get_fn(instance)) } _ => { bug!("{} cannot be cast to a fn ptr", operand.layout.ty) @@ -244,7 +243,7 @@ impl FunctionCx<'a, 'll, 'tcx> { operand.val } mir::CastKind::Unsize => { - assert!(cast.is_llvm_scalar_pair()); + assert!(bx.cx().is_backend_scalar_pair(&cast)); match operand.val { OperandValue::Pair(lldata, llextra) => { // unsize from a fat pointer - this is a @@ -255,12 +254,12 @@ impl FunctionCx<'a, 'll, 'tcx> { // HACK(eddyb) have to bitcast pointers // until LLVM removes pointee types. let lldata = bx.pointercast(lldata, - cast.scalar_pair_element_llvm_type(bx.cx, 0, true)); + bx.cx().scalar_pair_element_backend_type(&cast, 0, true)); OperandValue::Pair(lldata, llextra) } OperandValue::Immediate(lldata) => { // "standard" unsize - let (lldata, llextra) = base::unsize_thin_ptr(&bx, lldata, + let (lldata, llextra) = base::unsize_thin_ptr(&mut bx, lldata, operand.layout.ty, cast.ty); OperandValue::Pair(lldata, llextra) } @@ -270,16 +269,16 @@ impl FunctionCx<'a, 'll, 'tcx> { } } } - mir::CastKind::Misc if operand.layout.is_llvm_scalar_pair() => { + mir::CastKind::Misc if bx.cx().is_backend_scalar_pair(&operand.layout) => { if let OperandValue::Pair(data_ptr, meta) = operand.val { - if cast.is_llvm_scalar_pair() { + if bx.cx().is_backend_scalar_pair(&cast) { let data_cast = bx.pointercast(data_ptr, - cast.scalar_pair_element_llvm_type(bx.cx, 0, true)); + bx.cx().scalar_pair_element_backend_type(&cast, 0, true)); OperandValue::Pair(data_cast, meta) } else { // cast to thin-ptr // Cast of fat-ptr to thin-ptr is an extraction of data-ptr and // pointer-cast of that pointer to desired pointer type. - let llcast_ty = cast.immediate_llvm_type(bx.cx); + let llcast_ty = bx.cx().immediate_backend_type(&cast); let llval = bx.pointercast(data_ptr, llcast_ty); OperandValue::Immediate(llval) } @@ -288,25 +287,26 @@ impl FunctionCx<'a, 'll, 'tcx> { } } mir::CastKind::Misc => { - assert!(cast.is_llvm_immediate()); - let ll_t_out = cast.immediate_llvm_type(bx.cx); + assert!(bx.cx().is_backend_immediate(&cast)); + let ll_t_out = bx.cx().immediate_backend_type(&cast); if operand.layout.abi.is_uninhabited() { + let val = OperandValue::Immediate(bx.cx().const_undef(ll_t_out)); return (bx, OperandRef { - val: OperandValue::Immediate(C_undef(ll_t_out)), + val, layout: cast, }); } let r_t_in = CastTy::from_ty(operand.layout.ty) .expect("bad input type for cast"); let r_t_out = CastTy::from_ty(cast.ty).expect("bad output type for cast"); - let ll_t_in = operand.layout.immediate_llvm_type(bx.cx); + let ll_t_in = bx.cx().immediate_backend_type(&operand.layout); match operand.layout.variants { layout::Variants::Single { index } => { if let Some(def) = operand.layout.ty.ty_adt_def() { let discr_val = def - .discriminant_for_variant(bx.cx.tcx, index) + .discriminant_for_variant(*bx.cx().tcx(), index) .val; - let discr = C_uint_big(ll_t_out, discr_val); + let discr = bx.cx().const_uint_big(ll_t_out, discr_val); return (bx, OperandRef { val: OperandValue::Immediate(discr), layout: cast, @@ -327,18 +327,22 @@ impl FunctionCx<'a, 'll, 'tcx> { // then `i1 1` (i.e. E::B) is effectively `i8 -1`. signed = !scalar.is_bool() && s; - let er = scalar.valid_range_exclusive(bx.cx); + let er = scalar.valid_range_exclusive(bx.cx()); if er.end != er.start && scalar.valid_range.end() > scalar.valid_range.start() { // We want `table[e as usize]` to not // have bound checks, and this is the most // convenient place to put the `assume`. - - base::call_assume(&bx, bx.icmp( - llvm::IntULE, + let ll_t_in_const = bx.cx().const_uint_big( + ll_t_in, + *scalar.valid_range.end() + ); + let cmp = bx.icmp( + IntPredicate::IntULE, llval, - C_uint_big(ll_t_in, *scalar.valid_range.end()) - )); + ll_t_in_const + ); + base::call_assume(&mut bx, cmp); } } } @@ -348,8 +352,8 @@ impl FunctionCx<'a, 'll, 'tcx> { bx.intcast(llval, ll_t_out, signed) } (CastTy::Float, CastTy::Float) => { - let srcsz = ll_t_in.float_width(); - let dstsz = ll_t_out.float_width(); + let srcsz = bx.cx().float_width(ll_t_in); + let dstsz = bx.cx().float_width(ll_t_out); if dstsz > srcsz { bx.fpext(llval, ll_t_out) } else if srcsz > dstsz { @@ -366,15 +370,15 @@ impl FunctionCx<'a, 'll, 'tcx> { (CastTy::FnPtr, CastTy::Int(_)) => bx.ptrtoint(llval, ll_t_out), (CastTy::Int(_), CastTy::Ptr(_)) => { - let usize_llval = bx.intcast(llval, bx.cx.isize_ty, signed); + let usize_llval = bx.intcast(llval, bx.cx().type_isize(), signed); bx.inttoptr(usize_llval, ll_t_out) } (CastTy::Int(_), CastTy::Float) => - cast_int_to_float(&bx, signed, llval, ll_t_in, ll_t_out), + cast_int_to_float(&mut bx, signed, llval, ll_t_in, ll_t_out), (CastTy::Float, CastTy::Int(IntTy::I)) => - cast_float_to_int(&bx, true, llval, ll_t_in, ll_t_out), + cast_float_to_int(&mut bx, true, llval, ll_t_in, ll_t_out), (CastTy::Float, CastTy::Int(_)) => - cast_float_to_int(&bx, false, llval, ll_t_in, ll_t_out), + cast_float_to_int(&mut bx, false, llval, ll_t_in, ll_t_out), _ => bug!("unsupported cast: {:?} to {:?}", operand.layout.ty, cast.ty) }; OperandValue::Immediate(newval) @@ -387,42 +391,42 @@ impl FunctionCx<'a, 'll, 'tcx> { } mir::Rvalue::Ref(_, bk, ref place) => { - let cg_place = self.codegen_place(&bx, place); + let cg_place = self.codegen_place(&mut bx, place); let ty = cg_place.layout.ty; // Note: places are indirect, so storing the `llval` into the // destination effectively creates a reference. - let val = if !bx.cx.type_has_metadata(ty) { + let val = if !bx.cx().type_has_metadata(ty) { OperandValue::Immediate(cg_place.llval) } else { OperandValue::Pair(cg_place.llval, cg_place.llextra.unwrap()) }; (bx, OperandRef { val, - layout: self.cx.layout_of(self.cx.tcx.mk_ref( - self.cx.tcx.types.re_erased, + layout: self.cx.layout_of(self.cx.tcx().mk_ref( + self.cx.tcx().types.re_erased, ty::TypeAndMut { ty, mutbl: bk.to_mutbl_lossy() } )), }) } mir::Rvalue::Len(ref place) => { - let size = self.evaluate_array_len(&bx, place); + let size = self.evaluate_array_len(&mut bx, place); let operand = OperandRef { val: OperandValue::Immediate(size), - layout: bx.cx.layout_of(bx.tcx().types.usize), + layout: bx.cx().layout_of(bx.tcx().types.usize), }; (bx, operand) } mir::Rvalue::BinaryOp(op, ref lhs, ref rhs) => { - let lhs = self.codegen_operand(&bx, lhs); - let rhs = self.codegen_operand(&bx, rhs); + let lhs = self.codegen_operand(&mut bx, lhs); + let rhs = self.codegen_operand(&mut bx, rhs); let llresult = match (lhs.val, rhs.val) { (OperandValue::Pair(lhs_addr, lhs_extra), OperandValue::Pair(rhs_addr, rhs_extra)) => { - self.codegen_fat_ptr_binop(&bx, op, + self.codegen_fat_ptr_binop(&mut bx, op, lhs_addr, lhs_extra, rhs_addr, rhs_extra, lhs.layout.ty) @@ -430,36 +434,36 @@ impl FunctionCx<'a, 'll, 'tcx> { (OperandValue::Immediate(lhs_val), OperandValue::Immediate(rhs_val)) => { - self.codegen_scalar_binop(&bx, op, lhs_val, rhs_val, lhs.layout.ty) + self.codegen_scalar_binop(&mut bx, op, lhs_val, rhs_val, lhs.layout.ty) } _ => bug!() }; let operand = OperandRef { val: OperandValue::Immediate(llresult), - layout: bx.cx.layout_of( + layout: bx.cx().layout_of( op.ty(bx.tcx(), lhs.layout.ty, rhs.layout.ty)), }; (bx, operand) } mir::Rvalue::CheckedBinaryOp(op, ref lhs, ref rhs) => { - let lhs = self.codegen_operand(&bx, lhs); - let rhs = self.codegen_operand(&bx, rhs); - let result = self.codegen_scalar_checked_binop(&bx, op, + let lhs = self.codegen_operand(&mut bx, lhs); + let rhs = self.codegen_operand(&mut bx, rhs); + let result = self.codegen_scalar_checked_binop(&mut bx, op, lhs.immediate(), rhs.immediate(), lhs.layout.ty); let val_ty = op.ty(bx.tcx(), lhs.layout.ty, rhs.layout.ty); let operand_ty = bx.tcx().intern_tup(&[val_ty, bx.tcx().types.bool]); let operand = OperandRef { val: result, - layout: bx.cx.layout_of(operand_ty) + layout: bx.cx().layout_of(operand_ty) }; (bx, operand) } mir::Rvalue::UnaryOp(op, ref operand) => { - let operand = self.codegen_operand(&bx, operand); + let operand = self.codegen_operand(&mut bx, operand); let lloperand = operand.immediate(); let is_float = operand.layout.ty.is_fp(); let llval = match op { @@ -478,8 +482,8 @@ impl FunctionCx<'a, 'll, 'tcx> { mir::Rvalue::Discriminant(ref place) => { let discr_ty = rvalue.ty(&*self.mir, bx.tcx()); - let discr = self.codegen_place(&bx, place) - .codegen_get_discr(&bx, discr_ty); + let discr = self.codegen_place(&mut bx, place) + .codegen_get_discr(&mut bx, discr_ty); (bx, OperandRef { val: OperandValue::Immediate(discr), layout: self.cx.layout_of(discr_ty) @@ -487,8 +491,8 @@ impl FunctionCx<'a, 'll, 'tcx> { } mir::Rvalue::NullaryOp(mir::NullOp::SizeOf, ty) => { - assert!(bx.cx.type_is_sized(ty)); - let val = C_usize(bx.cx, bx.cx.size_of(ty).bytes()); + assert!(bx.cx().type_is_sized(ty)); + let val = bx.cx().const_usize(bx.cx().layout_of(ty).size.bytes()); let tcx = bx.tcx(); (bx, OperandRef { val: OperandValue::Immediate(val), @@ -498,22 +502,23 @@ impl FunctionCx<'a, 'll, 'tcx> { mir::Rvalue::NullaryOp(mir::NullOp::Box, content_ty) => { let content_ty: Ty<'tcx> = self.monomorphize(&content_ty); - let (size, align) = bx.cx.size_and_align_of(content_ty); - let llsize = C_usize(bx.cx, size.bytes()); - let llalign = C_usize(bx.cx, align.abi()); - let box_layout = bx.cx.layout_of(bx.tcx().mk_box(content_ty)); - let llty_ptr = box_layout.llvm_type(bx.cx); + let (size, align) = bx.cx().layout_of(content_ty).size_and_align(); + let llsize = bx.cx().const_usize(size.bytes()); + let llalign = bx.cx().const_usize(align.abi()); + let box_layout = bx.cx().layout_of(bx.tcx().mk_box(content_ty)); + let llty_ptr = bx.cx().backend_type(&box_layout); // Allocate space: let def_id = match bx.tcx().lang_items().require(ExchangeMallocFnLangItem) { Ok(id) => id, Err(s) => { - bx.sess().fatal(&format!("allocation of `{}` {}", box_layout.ty, s)); + bx.cx().sess().fatal(&format!("allocation of `{}` {}", box_layout.ty, s)); } }; let instance = ty::Instance::mono(bx.tcx(), def_id); - let r = callee::get_fn(bx.cx, instance); - let val = bx.pointercast(bx.call(r, &[llsize, llalign], None), llty_ptr); + let r = bx.cx().get_fn(instance); + let call = bx.call(r, &[llsize, llalign], None); + let val = bx.pointercast(call, llty_ptr); let operand = OperandRef { val: OperandValue::Immediate(val), @@ -522,48 +527,48 @@ impl FunctionCx<'a, 'll, 'tcx> { (bx, operand) } mir::Rvalue::Use(ref operand) => { - let operand = self.codegen_operand(&bx, operand); + let operand = self.codegen_operand(&mut bx, operand); (bx, operand) } mir::Rvalue::Repeat(..) | mir::Rvalue::Aggregate(..) => { // According to `rvalue_creates_operand`, only ZST // aggregate rvalues are allowed to be operands. - let ty = rvalue.ty(self.mir, self.cx.tcx); + let ty = rvalue.ty(self.mir, *self.cx.tcx()); (bx, OperandRef::new_zst(self.cx, self.cx.layout_of(self.monomorphize(&ty)))) } } } - fn evaluate_array_len( + fn evaluate_array_len>( &mut self, - bx: &Builder<'a, 'll, 'tcx>, + bx: &mut Bx, place: &mir::Place<'tcx>, - ) -> &'ll Value { + ) -> Cx::Value { // ZST are passed as operands and require special handling // because codegen_place() panics if Local is operand. if let mir::Place::Local(index) = *place { if let LocalRef::Operand(Some(op)) = self.locals[index] { if let ty::Array(_, n) = op.layout.ty.sty { - let n = n.unwrap_usize(bx.cx.tcx); - return common::C_usize(bx.cx, n); + let n = n.unwrap_usize(*bx.cx().tcx()); + return bx.cx().const_usize(n); } } } // use common size calculation for non zero-sized types - let cg_value = self.codegen_place(&bx, place); - return cg_value.len(bx.cx); + let cg_value = self.codegen_place(bx, place); + return cg_value.len(bx.cx()); } - pub fn codegen_scalar_binop( + pub fn codegen_scalar_binop>( &mut self, - bx: &Builder<'a, 'll, 'tcx>, + bx: &mut Bx, op: mir::BinOp, - lhs: &'ll Value, - rhs: &'ll Value, + lhs: Cx::Value, + rhs: Cx::Value, input_ty: Ty<'tcx>, - ) -> &'ll Value { + ) -> Cx::Value { let is_float = input_ty.is_fp(); let is_signed = input_ty.is_signed(); let is_unit = input_ty.is_unit(); @@ -605,7 +610,7 @@ impl FunctionCx<'a, 'll, 'tcx> { mir::BinOp::Shr => common::build_unchecked_rshift(bx, input_ty, lhs, rhs), mir::BinOp::Ne | mir::BinOp::Lt | mir::BinOp::Gt | mir::BinOp::Eq | mir::BinOp::Le | mir::BinOp::Ge => if is_unit { - C_bool(bx.cx, match op { + bx.cx().const_bool(match op { mir::BinOp::Ne | mir::BinOp::Lt | mir::BinOp::Gt => false, mir::BinOp::Eq | mir::BinOp::Le | mir::BinOp::Ge => true, _ => unreachable!() @@ -624,47 +629,42 @@ impl FunctionCx<'a, 'll, 'tcx> { } } - pub fn codegen_fat_ptr_binop( + pub fn codegen_fat_ptr_binop>( &mut self, - bx: &Builder<'a, 'll, 'tcx>, + bx: &mut Bx, op: mir::BinOp, - lhs_addr: &'ll Value, - lhs_extra: &'ll Value, - rhs_addr: &'ll Value, - rhs_extra: &'ll Value, + lhs_addr: Cx::Value, + lhs_extra: Cx::Value, + rhs_addr: Cx::Value, + rhs_extra: Cx::Value, _input_ty: Ty<'tcx>, - ) -> &'ll Value { + ) -> Cx::Value { match op { mir::BinOp::Eq => { - bx.and( - bx.icmp(llvm::IntEQ, lhs_addr, rhs_addr), - bx.icmp(llvm::IntEQ, lhs_extra, rhs_extra) - ) + let lhs = bx.icmp(IntPredicate::IntEQ, lhs_addr, rhs_addr); + let rhs = bx.icmp(IntPredicate::IntEQ, lhs_extra, rhs_extra); + bx.and(lhs, rhs) } mir::BinOp::Ne => { - bx.or( - bx.icmp(llvm::IntNE, lhs_addr, rhs_addr), - bx.icmp(llvm::IntNE, lhs_extra, rhs_extra) - ) + let lhs = bx.icmp(IntPredicate::IntNE, lhs_addr, rhs_addr); + let rhs = bx.icmp(IntPredicate::IntNE, lhs_extra, rhs_extra); + bx.or(lhs, rhs) } mir::BinOp::Le | mir::BinOp::Lt | mir::BinOp::Ge | mir::BinOp::Gt => { // a OP b ~ a.0 STRICT(OP) b.0 | (a.0 == b.0 && a.1 OP a.1) let (op, strict_op) = match op { - mir::BinOp::Lt => (llvm::IntULT, llvm::IntULT), - mir::BinOp::Le => (llvm::IntULE, llvm::IntULT), - mir::BinOp::Gt => (llvm::IntUGT, llvm::IntUGT), - mir::BinOp::Ge => (llvm::IntUGE, llvm::IntUGT), + mir::BinOp::Lt => (IntPredicate::IntULT, IntPredicate::IntULT), + mir::BinOp::Le => (IntPredicate::IntULE, IntPredicate::IntULT), + mir::BinOp::Gt => (IntPredicate::IntUGT, IntPredicate::IntUGT), + mir::BinOp::Ge => (IntPredicate::IntUGE, IntPredicate::IntUGT), _ => bug!(), }; - - bx.or( - bx.icmp(strict_op, lhs_addr, rhs_addr), - bx.and( - bx.icmp(llvm::IntEQ, lhs_addr, rhs_addr), - bx.icmp(op, lhs_extra, rhs_extra) - ) - ) + let lhs = bx.icmp(strict_op, lhs_addr, rhs_addr); + let and_lhs = bx.icmp(IntPredicate::IntEQ, lhs_addr, rhs_addr); + let and_rhs = bx.icmp(op, lhs_extra, rhs_extra); + let rhs = bx.and(and_lhs, and_rhs); + bx.or(lhs, rhs) } _ => { bug!("unexpected fat ptr binop"); @@ -672,19 +672,21 @@ impl FunctionCx<'a, 'll, 'tcx> { } } - pub fn codegen_scalar_checked_binop(&mut self, - bx: &Builder<'a, 'll, 'tcx>, - op: mir::BinOp, - lhs: &'ll Value, - rhs: &'ll Value, - input_ty: Ty<'tcx>) -> OperandValue<'ll> { + pub fn codegen_scalar_checked_binop>( + &mut self, + bx: &mut Bx, + op: mir::BinOp, + lhs: Cx::Value, + rhs: Cx::Value, + input_ty: Ty<'tcx> + ) -> OperandValue { // This case can currently arise only from functions marked // with #[rustc_inherit_overflow_checks] and inlined from // another crate (mostly core::num generic/#[inline] fns), // while the current crate doesn't use overflow checks. - if !bx.cx.check_overflow { + if !bx.cx().check_overflow() { let val = self.codegen_scalar_binop(bx, op, lhs, rhs, input_ty); - return OperandValue::Pair(val, C_bool(bx.cx, false)); + return OperandValue::Pair(val, bx.cx().const_bool(false)); } let (val, of) = match op { @@ -703,12 +705,12 @@ impl FunctionCx<'a, 'll, 'tcx> { bx.extract_value(res, 1)) } mir::BinOp::Shl | mir::BinOp::Shr => { - let lhs_llty = val_ty(lhs); - let rhs_llty = val_ty(rhs); - let invert_mask = common::shift_mask_val(&bx, lhs_llty, rhs_llty, true); + let lhs_llty = bx.cx().val_ty(lhs); + let rhs_llty = bx.cx().val_ty(rhs); + let invert_mask = common::shift_mask_val(bx, lhs_llty, rhs_llty, true); let outer_bits = bx.and(rhs, invert_mask); - let of = bx.icmp(llvm::IntNE, outer_bits, C_null(rhs_llty)); + let of = bx.icmp(IntPredicate::IntNE, outer_bits, bx.cx().const_null(rhs_llty)); let val = self.codegen_scalar_binop(bx, op, lhs, rhs, input_ty); (val, of) @@ -720,7 +722,12 @@ impl FunctionCx<'a, 'll, 'tcx> { OperandValue::Pair(val, of) } +} +impl<'a, 'f, 'll: 'a + 'f, 'tcx: 'll, Cx: 'a + CodegenMethods<'a, 'll, 'tcx>> + FunctionCx<'a, 'f, 'll, 'tcx, Cx> where + &'a Cx : LayoutOf, TyLayout=TyLayout<'tcx>> + HasTyCtxt<'tcx> +{ pub fn rvalue_creates_operand(&self, rvalue: &mir::Rvalue<'tcx>) -> bool { match *rvalue { mir::Rvalue::Ref(..) | @@ -735,7 +742,7 @@ impl FunctionCx<'a, 'll, 'tcx> { true, mir::Rvalue::Repeat(..) | mir::Rvalue::Aggregate(..) => { - let ty = rvalue.ty(self.mir, self.cx.tcx); + let ty = rvalue.ty(self.mir, *self.cx.tcx()); let ty = self.monomorphize(&ty); self.cx.layout_of(ty).is_zst() } @@ -750,7 +757,13 @@ enum OverflowOp { Add, Sub, Mul } -fn get_overflow_intrinsic(oop: OverflowOp, bx: &Builder<'_, 'll, '_>, ty: Ty) -> &'ll Value { +fn get_overflow_intrinsic<'a, 'll: 'a, 'tcx: 'll, Bx: BuilderMethods<'a, 'll, 'tcx>>( + oop: OverflowOp, + bx: &mut Bx, + ty: Ty +) -> >::Value + where &'a Bx::CodegenCx : LayoutOf, TyLayout = TyLayout<'tcx>> + HasTyCtxt<'tcx> +{ use syntax::ast::IntTy::*; use syntax::ast::UintTy::*; use rustc::ty::{Int, Uint}; @@ -812,18 +825,24 @@ fn get_overflow_intrinsic(oop: OverflowOp, bx: &Builder<'_, 'll, '_>, ty: Ty) -> }, }; - bx.cx.get_intrinsic(&name) + bx.cx().get_intrinsic(&name) } -fn cast_int_to_float(bx: &Builder<'_, 'll, '_>, - signed: bool, - x: &'ll Value, - int_ty: &'ll Type, - float_ty: &'ll Type) -> &'ll Value { +fn cast_int_to_float<'a, 'll: 'a, 'tcx: 'll, Bx: BuilderMethods<'a, 'll, 'tcx>>( + bx: &mut Bx, + signed: bool, + x: >::Value, + int_ty: >::Type, + float_ty: >::Type +) -> >::Value + where &'a Bx::CodegenCx : LayoutOf, TyLayout = TyLayout<'tcx>> + HasTyCtxt<'tcx> +{ // Most integer types, even i128, fit into [-f32::MAX, f32::MAX] after rounding. // It's only u128 -> f32 that can cause overflows (i.e., should yield infinity). // LLVM's uitofp produces undef in those cases, so we manually check for that case. - let is_u128_to_f32 = !signed && int_ty.int_width() == 128 && float_ty.float_width() == 32; + let is_u128_to_f32 = !signed && + bx.cx().int_width(int_ty) == 128 && + bx.cx().float_width(float_ty) == 32; if is_u128_to_f32 { // All inputs greater or equal to (f32::MAX + 0.5 ULP) are rounded to infinity, // and for everything else LLVM's uitofp works just fine. @@ -831,11 +850,12 @@ fn cast_int_to_float(bx: &Builder<'_, 'll, '_>, use rustc_apfloat::Float; const MAX_F32_PLUS_HALF_ULP: u128 = ((1 << (Single::PRECISION + 1)) - 1) << (Single::MAX_EXP - Single::PRECISION as i16); - let max = C_uint_big(int_ty, MAX_F32_PLUS_HALF_ULP); - let overflow = bx.icmp(llvm::IntUGE, x, max); - let infinity_bits = C_u32(bx.cx, ieee::Single::INFINITY.to_bits() as u32); - let infinity = consts::bitcast(infinity_bits, float_ty); - bx.select(overflow, infinity, bx.uitofp(x, float_ty)) + let max = bx.cx().const_uint_big(int_ty, MAX_F32_PLUS_HALF_ULP); + let overflow = bx.icmp(IntPredicate::IntUGE, x, max); + let infinity_bits = bx.cx().const_u32(ieee::Single::INFINITY.to_bits() as u32); + let infinity = bx.bitcast(infinity_bits, float_ty); + let fp = bx.uitofp(x, float_ty); + bx.select(overflow, infinity, fp) } else { if signed { bx.sitofp(x, float_ty) @@ -845,20 +865,27 @@ fn cast_int_to_float(bx: &Builder<'_, 'll, '_>, } } -fn cast_float_to_int(bx: &Builder<'_, 'll, '_>, - signed: bool, - x: &'ll Value, - float_ty: &'ll Type, - int_ty: &'ll Type) -> &'ll Value { +fn cast_float_to_int<'a, 'll: 'a, 'tcx: 'll, Bx: BuilderMethods<'a, 'll, 'tcx>>( + bx: &mut Bx, + signed: bool, + x: >::Value, + float_ty: >::Type, + int_ty: >::Type +) -> >::Value + where &'a Bx::CodegenCx : LayoutOf, TyLayout = TyLayout<'tcx>> + HasTyCtxt<'tcx> +{ let fptosui_result = if signed { bx.fptosi(x, int_ty) } else { bx.fptoui(x, int_ty) }; - if !bx.sess().opts.debugging_opts.saturating_float_casts { + if !bx.cx().sess().opts.debugging_opts.saturating_float_casts { return fptosui_result; } + + let int_width = bx.cx().int_width(int_ty); + let float_width = bx.cx().float_width(float_ty); // LLVM's fpto[su]i returns undef when the input x is infinite, NaN, or does not fit into the // destination integer type after rounding towards zero. This `undef` value can cause UB in // safe code (see issue #10184), so we implement a saturating conversion on top of it: @@ -878,39 +905,50 @@ fn cast_float_to_int(bx: &Builder<'_, 'll, '_>, // On the other hand, f_max works even if int_ty::MAX is greater than float_ty::MAX. Because // we're rounding towards zero, we just get float_ty::MAX (which is always an integer). // This already happens today with u128::MAX = 2^128 - 1 > f32::MAX. - fn compute_clamp_bounds(signed: bool, int_ty: &Type) -> (u128, u128) { - let rounded_min = F::from_i128_r(int_min(signed, int_ty), Round::TowardZero); - assert_eq!(rounded_min.status, Status::OK); - let rounded_max = F::from_u128_r(int_max(signed, int_ty), Round::TowardZero); - assert!(rounded_max.value.is_finite()); - (rounded_min.value.to_bits(), rounded_max.value.to_bits()) - } - fn int_max(signed: bool, int_ty: &Type) -> u128 { - let shift_amount = 128 - int_ty.int_width(); + let int_max = |signed: bool, int_width: u64| -> u128 { + let shift_amount = 128 - int_width; if signed { i128::MAX as u128 >> shift_amount } else { u128::MAX >> shift_amount } - } - fn int_min(signed: bool, int_ty: &Type) -> i128 { + }; + let int_min = |signed: bool, int_width: u64| -> i128 { if signed { - i128::MIN >> (128 - int_ty.int_width()) + i128::MIN >> (128 - int_width) } else { 0 } - } - let float_bits_to_llval = |bits| { - let bits_llval = match float_ty.float_width() { - 32 => C_u32(bx.cx, bits as u32), - 64 => C_u64(bx.cx, bits as u64), + }; + + let compute_clamp_bounds_single = + |signed: bool, int_width: u64| -> (u128, u128) { + let rounded_min = ieee::Single::from_i128_r(int_min(signed, int_width), Round::TowardZero); + assert_eq!(rounded_min.status, Status::OK); + let rounded_max = ieee::Single::from_u128_r(int_max(signed, int_width), Round::TowardZero); + assert!(rounded_max.value.is_finite()); + (rounded_min.value.to_bits(), rounded_max.value.to_bits()) + }; + let compute_clamp_bounds_double = + |signed: bool, int_width: u64| -> (u128, u128) { + let rounded_min = ieee::Double::from_i128_r(int_min(signed, int_width), Round::TowardZero); + assert_eq!(rounded_min.status, Status::OK); + let rounded_max = ieee::Double::from_u128_r(int_max(signed, int_width), Round::TowardZero); + assert!(rounded_max.value.is_finite()); + (rounded_min.value.to_bits(), rounded_max.value.to_bits()) + }; + + let mut float_bits_to_llval = |bits| { + let bits_llval = match float_width { + 32 => bx.cx().const_u32(bits as u32), + 64 => bx.cx().const_u64(bits as u64), n => bug!("unsupported float width {}", n), }; - consts::bitcast(bits_llval, float_ty) + bx.bitcast(bits_llval, float_ty) }; - let (f_min, f_max) = match float_ty.float_width() { - 32 => compute_clamp_bounds::(signed, int_ty), - 64 => compute_clamp_bounds::(signed, int_ty), + let (f_min, f_max) = match float_width { + 32 => compute_clamp_bounds_single(signed, int_width), + 64 => compute_clamp_bounds_double(signed, int_width), n => bug!("unsupported float width {}", n), }; let f_min = float_bits_to_llval(f_min); @@ -956,10 +994,10 @@ fn cast_float_to_int(bx: &Builder<'_, 'll, '_>, // negation, and the negation can be merged into the select. Therefore, it not necessarily any // more expensive than a ordered ("normal") comparison. Whether these optimizations will be // performed is ultimately up to the backend, but at least x86 does perform them. - let less_or_nan = bx.fcmp(llvm::RealULT, x, f_min); - let greater = bx.fcmp(llvm::RealOGT, x, f_max); - let int_max = C_uint_big(int_ty, int_max(signed, int_ty)); - let int_min = C_uint_big(int_ty, int_min(signed, int_ty) as u128); + let less_or_nan = bx.fcmp(RealPredicate::RealULT, x, f_min); + let greater = bx.fcmp(RealPredicate::RealOGT, x, f_max); + let int_max = bx.cx().const_uint_big(int_ty, int_max(signed, int_width)); + let int_min = bx.cx().const_uint_big(int_ty, int_min(signed, int_width) as u128); let s0 = bx.select(less_or_nan, int_min, fptosui_result); let s1 = bx.select(greater, int_max, s0); @@ -968,7 +1006,9 @@ fn cast_float_to_int(bx: &Builder<'_, 'll, '_>, // Therefore we only need to execute this step for signed integer types. if signed { // LLVM has no isNaN predicate, so we use (x == x) instead - bx.select(bx.fcmp(llvm::RealOEQ, x, x), s1, C_uint(int_ty, 0)) + let zero = bx.cx().const_uint(int_ty, 0); + let cmp = bx.fcmp(RealPredicate::RealOEQ, x, x); + bx.select(cmp, s1, zero) } else { s1 } diff --git a/src/librustc_codegen_llvm/mir/statement.rs b/src/librustc_codegen_ssa/mir/statement.rs similarity index 75% rename from src/librustc_codegen_llvm/mir/statement.rs rename to src/librustc_codegen_ssa/mir/statement.rs index 93be0074f6e9b..e3f598f842fc1 100644 --- a/src/librustc_codegen_llvm/mir/statement.rs +++ b/src/librustc_codegen_ssa/mir/statement.rs @@ -10,21 +10,25 @@ use rustc::mir; -use asm; -use builder::Builder; - use super::FunctionCx; use super::LocalRef; use super::OperandValue; +use rustc::ty::Ty; +use rustc::ty::layout::{TyLayout, HasTyCtxt, LayoutOf}; +use interfaces::*; -impl FunctionCx<'a, 'll, 'tcx> { - pub fn codegen_statement(&mut self, - bx: Builder<'a, 'll, 'tcx>, - statement: &mir::Statement<'tcx>) - -> Builder<'a, 'll, 'tcx> { +impl<'a, 'f, 'll: 'a + 'f, 'tcx: 'll, Cx: 'a + CodegenMethods<'a, 'll, 'tcx>> + FunctionCx<'a, 'f, 'll, 'tcx, Cx> where + &'a Cx: LayoutOf, TyLayout = TyLayout<'tcx>> + HasTyCtxt<'tcx> +{ + pub fn codegen_statement>( + &mut self, + mut bx: Bx, + statement: &mir::Statement<'tcx> + ) -> Bx { debug!("codegen_statement(statement={:?})", statement); - self.set_debug_loc(&bx, statement.source_info); + self.set_debug_loc(&mut bx, statement.source_info); match statement.kind { mir::StatementKind::Assign(ref place, ref rvalue) => { if let mir::Place::Local(index) = *place { @@ -53,39 +57,39 @@ impl FunctionCx<'a, 'll, 'tcx> { } } } else { - let cg_dest = self.codegen_place(&bx, place); + let cg_dest = self.codegen_place(&mut bx, place); self.codegen_rvalue(bx, cg_dest, rvalue) } } mir::StatementKind::SetDiscriminant{ref place, variant_index} => { - self.codegen_place(&bx, place) - .codegen_set_discr(&bx, variant_index); + self.codegen_place(&mut bx, place) + .codegen_set_discr(&mut bx, variant_index); bx } mir::StatementKind::StorageLive(local) => { if let LocalRef::Place(cg_place) = self.locals[local] { - cg_place.storage_live(&bx); + cg_place.storage_live(&mut bx); } else if let LocalRef::UnsizedPlace(cg_indirect_place) = self.locals[local] { - cg_indirect_place.storage_live(&bx); + cg_indirect_place.storage_live(&mut bx); } bx } mir::StatementKind::StorageDead(local) => { if let LocalRef::Place(cg_place) = self.locals[local] { - cg_place.storage_dead(&bx); + cg_place.storage_dead(&mut bx); } else if let LocalRef::UnsizedPlace(cg_indirect_place) = self.locals[local] { - cg_indirect_place.storage_dead(&bx); + cg_indirect_place.storage_dead(&mut bx); } bx } mir::StatementKind::InlineAsm { ref asm, ref outputs, ref inputs } => { let outputs = outputs.iter().map(|output| { - self.codegen_place(&bx, output) + self.codegen_place(&mut bx, output) }).collect(); let input_vals = inputs.iter() .try_fold(Vec::with_capacity(inputs.len()), |mut acc, input| { - let op = self.codegen_operand(&bx, input); + let op = self.codegen_operand(&mut bx, input); if let OperandValue::Immediate(_) = op.val { acc.push(op.immediate()); Ok(acc) @@ -95,13 +99,13 @@ impl FunctionCx<'a, 'll, 'tcx> { }); if input_vals.is_err() { - span_err!(bx.sess(), statement.source_info.span, E0669, + span_err!(bx.cx().sess(), statement.source_info.span, E0669, "invalid value for constraint in inline assembly"); } else { let input_vals = input_vals.unwrap(); - let res = asm::codegen_inline_asm(&bx, asm, outputs, input_vals); + let res = bx.codegen_inline_asm(asm, outputs, input_vals); if !res { - span_err!(bx.sess(), statement.source_info.span, E0668, + span_err!(bx.cx().sess(), statement.source_info.span, E0668, "malformed inline assembly"); } } diff --git a/src/librustc_codegen_ssa/mono_item.rs b/src/librustc_codegen_ssa/mono_item.rs new file mode 100644 index 0000000000000..03f0da08c4bf7 --- /dev/null +++ b/src/librustc_codegen_ssa/mono_item.rs @@ -0,0 +1,124 @@ +// Copyright 2016 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Walks the crate looking for items/impl-items/trait-items that have +//! either a `rustc_symbol_name` or `rustc_item_path` attribute and +//! generates an error giving, respectively, the symbol name or +//! item-path. This is used for unit testing the code that generates +//! paths etc in all kinds of annoying scenarios. + +use base; +use rustc::hir; +use rustc::hir::def::Def; +use rustc::mir::mono::{Linkage, Visibility}; +use rustc::ty::Ty; +use rustc::ty::layout::{LayoutOf, HasTyCtxt, TyLayout}; +use std::fmt; +use interfaces::*; + +pub use rustc::mir::mono::MonoItem; + +pub use rustc_mir::monomorphize::item::MonoItemExt as BaseMonoItemExt; + +pub trait MonoItemExt<'a, 'll: 'a, 'tcx: 'll> : fmt::Debug + BaseMonoItemExt<'ll, 'tcx> +{ + fn define>(&self, cx: &'a Bx::CodegenCx) where + &'a Bx::CodegenCx : LayoutOf, TyLayout=TyLayout<'tcx>> + HasTyCtxt<'tcx> + { + debug!("BEGIN IMPLEMENTING '{} ({})' in cgu {}", + self.to_string(*cx.tcx()), + self.to_raw_string(), + cx.codegen_unit().name()); + + match *self.as_mono_item() { + MonoItem::Static(def_id) => { + let tcx = *cx.tcx(); + let is_mutable = match tcx.describe_def(def_id) { + Some(Def::Static(_, is_mutable)) => is_mutable, + Some(other) => { + bug!("Expected Def::Static, found {:?}", other) + } + None => { + bug!("Expected Def::Static for {:?}, found nothing", def_id) + } + }; + cx.codegen_static(def_id, is_mutable); + } + MonoItem::GlobalAsm(node_id) => { + let item = cx.tcx().hir.expect_item(node_id); + if let hir::ItemKind::GlobalAsm(ref ga) = item.node { + cx.codegen_global_asm(ga); + } else { + span_bug!(item.span, "Mismatch between hir::Item type and MonoItem type") + } + } + MonoItem::Fn(instance) => { + base::codegen_instance::<'a, 'll, 'tcx, Bx>(&cx, instance); + } + } + + debug!("END IMPLEMENTING '{} ({})' in cgu {}", + self.to_string(*cx.tcx()), + self.to_raw_string(), + cx.codegen_unit().name()); + } + + fn predefine>( + &self, + cx: &'a Bx::CodegenCx, + linkage: Linkage, + visibility: Visibility + ) where + &'a Bx::CodegenCx : LayoutOf, TyLayout=TyLayout<'tcx>> + HasTyCtxt<'tcx> + { + debug!("BEGIN PREDEFINING '{} ({})' in cgu {}", + self.to_string(*cx.tcx()), + self.to_raw_string(), + cx.codegen_unit().name()); + + let symbol_name = self.symbol_name(*cx.tcx()).as_str(); + + debug!("symbol {}", &symbol_name); + + match *self.as_mono_item() { + MonoItem::Static(def_id) => { + cx.predefine_static(def_id, linkage, visibility, &symbol_name); + } + MonoItem::Fn(instance) => { + cx.predefine_fn(instance, linkage, visibility, &symbol_name); + } + MonoItem::GlobalAsm(..) => {} + } + + debug!("END PREDEFINING '{} ({})' in cgu {}", + self.to_string(*cx.tcx()), + self.to_raw_string(), + cx.codegen_unit().name()); + } + + fn to_raw_string(&self) -> String { + match *self.as_mono_item() { + MonoItem::Fn(instance) => { + format!("Fn({:?}, {})", + instance.def, + instance.substs.as_ptr() as usize) + } + MonoItem::Static(id) => { + format!("Static({:?})", id) + } + MonoItem::GlobalAsm(id) => { + format!("GlobalAsm({:?})", id) + } + } + } +} + +impl<'a, 'll:'a, 'tcx: 'll> MonoItemExt<'a, 'll, 'tcx> + for MonoItem<'tcx> {} diff --git a/src/librustc_codegen_utils/lib.rs b/src/librustc_codegen_utils/lib.rs index 03b3b20a4e772..2d1f12b2d278d 100644 --- a/src/librustc_codegen_utils/lib.rs +++ b/src/librustc_codegen_utils/lib.rs @@ -21,6 +21,7 @@ #![feature(custom_attribute)] #![feature(nll)] #![allow(unused_attributes)] +#![allow(dead_code)] #![feature(quote)] #![feature(rustc_diagnostic_macros)] diff --git a/src/librustc_driver/Cargo.toml b/src/librustc_driver/Cargo.toml index 470c8b03d0bca..2587a707510f6 100644 --- a/src/librustc_driver/Cargo.toml +++ b/src/librustc_driver/Cargo.toml @@ -32,6 +32,7 @@ rustc_resolve = { path = "../librustc_resolve" } rustc_save_analysis = { path = "../librustc_save_analysis" } rustc_traits = { path = "../librustc_traits" } rustc_codegen_utils = { path = "../librustc_codegen_utils" } +rustc_codegen_ssa = { path = "../librustc_codegen_ssa" } rustc_typeck = { path = "../librustc_typeck" } serialize = { path = "../libserialize" } syntax = { path = "../libsyntax" } diff --git a/src/librustc_driver/lib.rs b/src/librustc_driver/lib.rs index 276b7290c2ef0..58539846abff8 100644 --- a/src/librustc_driver/lib.rs +++ b/src/librustc_driver/lib.rs @@ -54,6 +54,7 @@ extern crate rustc_resolve; extern crate rustc_save_analysis; extern crate rustc_traits; extern crate rustc_codegen_utils; +extern crate rustc_codegen_ssa; extern crate rustc_typeck; extern crate scoped_tls; extern crate serialize;