diff --git a/compiler/rustc_codegen_llvm/src/builder.rs b/compiler/rustc_codegen_llvm/src/builder.rs index 5675a5d981241..d8fa7786c3780 100644 --- a/compiler/rustc_codegen_llvm/src/builder.rs +++ b/compiler/rustc_codegen_llvm/src/builder.rs @@ -497,9 +497,10 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { OperandValue::Immediate(self.to_immediate(llval, place.layout)) } else if let abi::Abi::ScalarPair(ref a, ref b) = place.layout.abi { let b_offset = a.value.size(self).align_to(b.value.align(self).abi); + let pair_ty = place.layout.llvm_type(self); let mut load = |i, scalar: &abi::Scalar, align| { - let llptr = self.struct_gep(place.llval, i as u64); + let llptr = self.struct_gep(pair_ty, place.llval, i as u64); let llty = place.layout.scalar_pair_element_llvm_type(self, i, false); let load = self.load(llty, llptr, align); scalar_load_metadata(self, load, scalar); @@ -543,7 +544,11 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { .val .store(&mut body_bx, PlaceRef::new_sized_aligned(current, cg_elem.layout, align)); - let next = body_bx.inbounds_gep(current, &[self.const_usize(1)]); + let next = body_bx.inbounds_gep( + self.backend_type(cg_elem.layout), + current, + &[self.const_usize(1)], + ); body_bx.br(header_bx.llbb()); header_bx.add_incoming_to_phi(current, next, body_bx.llbb()); @@ -639,10 +644,11 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { } } - fn gep(&mut self, ptr: &'ll Value, indices: &[&'ll Value]) -> &'ll Value { + fn gep(&mut self, ty: &'ll Type, ptr: &'ll Value, indices: &[&'ll Value]) -> &'ll Value { unsafe { - llvm::LLVMBuildGEP( + llvm::LLVMBuildGEP2( self.llbuilder, + ty, ptr, indices.as_ptr(), indices.len() as c_uint, @@ -651,10 +657,16 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { } } - fn inbounds_gep(&mut self, ptr: &'ll Value, indices: &[&'ll Value]) -> &'ll Value { + fn inbounds_gep( + &mut self, + ty: &'ll Type, + ptr: &'ll Value, + indices: &[&'ll Value], + ) -> &'ll Value { unsafe { - llvm::LLVMBuildInBoundsGEP( + llvm::LLVMBuildInBoundsGEP2( self.llbuilder, + ty, ptr, indices.as_ptr(), indices.len() as c_uint, @@ -663,9 +675,9 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { } } - fn struct_gep(&mut self, ptr: &'ll Value, idx: u64) -> &'ll Value { + fn struct_gep(&mut self, ty: &'ll Type, ptr: &'ll Value, idx: u64) -> &'ll Value { assert_eq!(idx as c_uint as u64, idx); - unsafe { llvm::LLVMBuildStructGEP(self.llbuilder, ptr, idx as c_uint, UNNAMED) } + unsafe { llvm::LLVMBuildStructGEP2(self.llbuilder, ty, ptr, idx as c_uint, UNNAMED) } } /* Casts */ diff --git a/compiler/rustc_codegen_llvm/src/common.rs b/compiler/rustc_codegen_llvm/src/common.rs index 35e72621c565d..5532f53e40823 100644 --- a/compiler/rustc_codegen_llvm/src/common.rs +++ b/compiler/rustc_codegen_llvm/src/common.rs @@ -268,7 +268,8 @@ impl ConstMethods<'tcx> for CodegenCx<'ll, 'tcx> { } }; let llval = unsafe { - llvm::LLVMConstInBoundsGEP( + llvm::LLVMRustConstInBoundsGEP2( + self.type_i8(), self.const_bitcast(base_addr, self.type_i8p_ext(base_addr_space)), &self.const_usize(offset.bytes()), 1, @@ -303,7 +304,8 @@ impl ConstMethods<'tcx> for CodegenCx<'ll, 'tcx> { let base_addr = self.static_addr_of(init, alloc.align, None); let llval = unsafe { - llvm::LLVMConstInBoundsGEP( + llvm::LLVMRustConstInBoundsGEP2( + self.type_i8(), self.const_bitcast(base_addr, self.type_i8p()), &self.const_usize(offset.bytes()), 1, diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/gdb.rs b/compiler/rustc_codegen_llvm/src/debuginfo/gdb.rs index de3f719b8163c..c33d35cc285d3 100644 --- a/compiler/rustc_codegen_llvm/src/debuginfo/gdb.rs +++ b/compiler/rustc_codegen_llvm/src/debuginfo/gdb.rs @@ -15,12 +15,11 @@ use rustc_span::symbol::sym; /// .debug_gdb_scripts global is referenced, so it isn't removed by the linker. pub fn insert_reference_to_gdb_debug_scripts_section_global(bx: &mut Builder<'_, '_, '_>) { if needs_gdb_debug_scripts_section(bx) { - let gdb_debug_scripts_section = get_or_insert_gdb_debug_scripts_section_global(bx); + let gdb_debug_scripts_section = + bx.const_bitcast(get_or_insert_gdb_debug_scripts_section_global(bx), bx.type_i8p()); // Load just the first byte as that's all that's necessary to force // LLVM to keep around the reference to the global. - let indices = [bx.const_i32(0), bx.const_i32(0)]; - let element = bx.inbounds_gep(gdb_debug_scripts_section, &indices); - let volative_load_instruction = bx.volatile_load(bx.type_i8(), element); + let volative_load_instruction = bx.volatile_load(bx.type_i8(), gdb_debug_scripts_section); unsafe { llvm::LLVMSetAlignment(volative_load_instruction, 1); } diff --git a/compiler/rustc_codegen_llvm/src/intrinsic.rs b/compiler/rustc_codegen_llvm/src/intrinsic.rs index a48a694b630f0..56563668de6b0 100644 --- a/compiler/rustc_codegen_llvm/src/intrinsic.rs +++ b/compiler/rustc_codegen_llvm/src/intrinsic.rs @@ -686,11 +686,19 @@ fn codegen_emcc_try( // create an alloca and pass a pointer to that. let ptr_align = bx.tcx().data_layout.pointer_align.abi; let i8_align = bx.tcx().data_layout.i8_align.abi; - let catch_data = - catch.alloca(bx.type_struct(&[bx.type_i8p(), bx.type_bool()], false), ptr_align); - let catch_data_0 = catch.inbounds_gep(catch_data, &[bx.const_usize(0), bx.const_usize(0)]); + let catch_data_type = bx.type_struct(&[bx.type_i8p(), bx.type_bool()], false); + let catch_data = catch.alloca(catch_data_type, ptr_align); + let catch_data_0 = catch.inbounds_gep( + catch_data_type, + catch_data, + &[bx.const_usize(0), bx.const_usize(0)], + ); catch.store(ptr, catch_data_0, ptr_align); - let catch_data_1 = catch.inbounds_gep(catch_data, &[bx.const_usize(0), bx.const_usize(1)]); + let catch_data_1 = catch.inbounds_gep( + catch_data_type, + catch_data, + &[bx.const_usize(0), bx.const_usize(1)], + ); catch.store(is_rust_panic, catch_data_1, i8_align); let catch_data = catch.bitcast(catch_data, bx.type_i8p()); diff --git a/compiler/rustc_codegen_llvm/src/llvm/ffi.rs b/compiler/rustc_codegen_llvm/src/llvm/ffi.rs index 68d566cca095c..e803ad6d88e3c 100644 --- a/compiler/rustc_codegen_llvm/src/llvm/ffi.rs +++ b/compiler/rustc_codegen_llvm/src/llvm/ffi.rs @@ -1011,7 +1011,8 @@ extern "C" { pub fn LLVMConstVector(ScalarConstantVals: *const &Value, Size: c_uint) -> &Value; // Constant expressions - pub fn LLVMConstInBoundsGEP( + pub fn LLVMRustConstInBoundsGEP2( + ty: &'a Type, ConstantVal: &'a Value, ConstantIndices: *const &'a Value, NumIndices: c_uint, @@ -1394,22 +1395,25 @@ extern "C" { pub fn LLVMBuildStore(B: &Builder<'a>, Val: &'a Value, Ptr: &'a Value) -> &'a Value; - pub fn LLVMBuildGEP( + pub fn LLVMBuildGEP2( B: &Builder<'a>, + Ty: &'a Type, Pointer: &'a Value, Indices: *const &'a Value, NumIndices: c_uint, Name: *const c_char, ) -> &'a Value; - pub fn LLVMBuildInBoundsGEP( + pub fn LLVMBuildInBoundsGEP2( B: &Builder<'a>, + Ty: &'a Type, Pointer: &'a Value, Indices: *const &'a Value, NumIndices: c_uint, Name: *const c_char, ) -> &'a Value; - pub fn LLVMBuildStructGEP( + pub fn LLVMBuildStructGEP2( B: &Builder<'a>, + Ty: &'a Type, Pointer: &'a Value, Idx: c_uint, Name: *const c_char, diff --git a/compiler/rustc_codegen_llvm/src/va_arg.rs b/compiler/rustc_codegen_llvm/src/va_arg.rs index 9df1bd7d1d9bb..c9fb09570c35a 100644 --- a/compiler/rustc_codegen_llvm/src/va_arg.rs +++ b/compiler/rustc_codegen_llvm/src/va_arg.rs @@ -50,12 +50,12 @@ fn emit_direct_ptr_va_arg( let aligned_size = size.align_to(slot_size).bytes() as i32; let full_direct_size = bx.cx().const_i32(aligned_size); - let next = bx.inbounds_gep(addr, &[full_direct_size]); + let next = bx.inbounds_gep(bx.type_i8(), addr, &[full_direct_size]); bx.store(next, va_list_addr, bx.tcx().data_layout.pointer_align.abi); if size.bytes() < slot_size.bytes() && bx.tcx().sess.target.endian == Endian::Big { let adjusted_size = bx.cx().const_i32((slot_size.bytes() - size.bytes()) as i32); - let adjusted = bx.inbounds_gep(addr, &[adjusted_size]); + let adjusted = bx.inbounds_gep(bx.type_i8(), addr, &[adjusted_size]); (bx.bitcast(adjusted, bx.cx().type_ptr_to(llty)), addr_align) } else { (bx.bitcast(addr, bx.cx().type_ptr_to(llty)), addr_align) @@ -98,6 +98,7 @@ fn emit_aapcs_va_arg( // Implementation of the AAPCS64 calling convention for va_args see // https://github.com/ARM-software/abi-aa/blob/master/aapcs64/aapcs64.rst let va_list_addr = list.immediate(); + let va_list_ty = list.deref(bx.cx).layout.llvm_type(bx); let layout = bx.cx.layout_of(target_ty); let mut maybe_reg = bx.build_sibling_block("va_arg.maybe_reg"); @@ -109,11 +110,11 @@ fn emit_aapcs_va_arg( let gr_type = target_ty.is_any_ptr() || target_ty.is_integral(); let (reg_off, reg_top_index, slot_size) = if gr_type { - let gr_offs = bx.struct_gep(va_list_addr, 7); + let gr_offs = bx.struct_gep(va_list_ty, va_list_addr, 7); let nreg = (layout.size.bytes() + 7) / 8; (gr_offs, 3, nreg * 8) } else { - let vr_off = bx.struct_gep(va_list_addr, 9); + let vr_off = bx.struct_gep(va_list_ty, va_list_addr, 9); let nreg = (layout.size.bytes() + 15) / 16; (vr_off, 5, nreg * 16) }; @@ -141,15 +142,15 @@ fn emit_aapcs_va_arg( maybe_reg.cond_br(use_stack, &on_stack.llbb(), &in_reg.llbb()); let top_type = bx.type_i8p(); - let top = in_reg.struct_gep(va_list_addr, reg_top_index); + let top = in_reg.struct_gep(va_list_ty, va_list_addr, reg_top_index); let top = in_reg.load(top_type, top, bx.tcx().data_layout.pointer_align.abi); // reg_value = *(@top + reg_off_v); - let mut reg_addr = in_reg.gep(top, &[reg_off_v]); + let mut reg_addr = in_reg.gep(bx.type_i8(), top, &[reg_off_v]); if bx.tcx().sess.target.endian == Endian::Big && layout.size.bytes() != slot_size { // On big-endian systems the value is right-aligned in its slot. let offset = bx.const_i32((slot_size - layout.size.bytes()) as i32); - reg_addr = in_reg.gep(reg_addr, &[offset]); + reg_addr = in_reg.gep(bx.type_i8(), reg_addr, &[offset]); } let reg_type = layout.llvm_type(bx); let reg_addr = in_reg.bitcast(reg_addr, bx.cx.type_ptr_to(reg_type)); diff --git a/compiler/rustc_codegen_ssa/src/base.rs b/compiler/rustc_codegen_ssa/src/base.rs index 87b342e844391..b60677267849a 100644 --- a/compiler/rustc_codegen_ssa/src/base.rs +++ b/compiler/rustc_codegen_ssa/src/base.rs @@ -168,8 +168,11 @@ pub fn unsized_info<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>( let ptr_ty = cx.type_i8p(); let ptr_align = cx.tcx().data_layout.pointer_align.abi; let llvtable = bx.pointercast(old_info, bx.type_ptr_to(ptr_ty)); - let gep = - bx.inbounds_gep(llvtable, &[bx.const_usize(u64::try_from(entry_idx).unwrap())]); + let gep = bx.inbounds_gep( + ptr_ty, + llvtable, + &[bx.const_usize(u64::try_from(entry_idx).unwrap())], + ); let new_vptr = bx.load(ptr_ty, gep, ptr_align); bx.nonnull_metadata(new_vptr); // Vtable loads are invariant. diff --git a/compiler/rustc_codegen_ssa/src/meth.rs b/compiler/rustc_codegen_ssa/src/meth.rs index b392b2c4ab8ac..efeec5b728413 100644 --- a/compiler/rustc_codegen_ssa/src/meth.rs +++ b/compiler/rustc_codegen_ssa/src/meth.rs @@ -23,7 +23,7 @@ impl<'a, 'tcx> VirtualIndex { let llty = bx.fn_ptr_backend_type(fn_abi); let llvtable = bx.pointercast(llvtable, bx.type_ptr_to(llty)); let ptr_align = bx.tcx().data_layout.pointer_align.abi; - let gep = bx.inbounds_gep(llvtable, &[bx.const_usize(self.0)]); + let gep = bx.inbounds_gep(llty, llvtable, &[bx.const_usize(self.0)]); let ptr = bx.load(llty, gep, ptr_align); bx.nonnull_metadata(ptr); // Vtable loads are invariant. @@ -42,7 +42,7 @@ impl<'a, 'tcx> VirtualIndex { let llty = bx.type_isize(); let llvtable = bx.pointercast(llvtable, bx.type_ptr_to(llty)); let usize_align = bx.tcx().data_layout.pointer_align.abi; - let gep = bx.inbounds_gep(llvtable, &[bx.const_usize(self.0)]); + let gep = bx.inbounds_gep(llty, llvtable, &[bx.const_usize(self.0)]); let ptr = bx.load(llty, gep, usize_align); // Vtable loads are invariant. bx.set_invariant_load(ptr); diff --git a/compiler/rustc_codegen_ssa/src/mir/intrinsic.rs b/compiler/rustc_codegen_ssa/src/mir/intrinsic.rs index 56ff1b3934c13..75999225c031d 100644 --- a/compiler/rustc_codegen_ssa/src/mir/intrinsic.rs +++ b/compiler/rustc_codegen_ssa/src/mir/intrinsic.rs @@ -116,14 +116,18 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { OperandRef::from_const(bx, value, ret_ty).immediate_or_packed_pair(bx) } sym::offset => { + let ty = substs.type_at(0); + let layout = bx.layout_of(ty); let ptr = args[0].immediate(); let offset = args[1].immediate(); - bx.inbounds_gep(ptr, &[offset]) + bx.inbounds_gep(bx.backend_type(layout), ptr, &[offset]) } sym::arith_offset => { + let ty = substs.type_at(0); + let layout = bx.layout_of(ty); let ptr = args[0].immediate(); let offset = args[1].immediate(); - bx.gep(ptr, &[offset]) + bx.gep(bx.backend_type(layout), ptr, &[offset]) } sym::copy => { copy_intrinsic( diff --git a/compiler/rustc_codegen_ssa/src/mir/operand.rs b/compiler/rustc_codegen_ssa/src/mir/operand.rs index 3e8386bc88fed..cfb2befdf9137 100644 --- a/compiler/rustc_codegen_ssa/src/mir/operand.rs +++ b/compiler/rustc_codegen_ssa/src/mir/operand.rs @@ -311,14 +311,15 @@ impl<'a, 'tcx, V: CodegenObject> OperandValue { Abi::ScalarPair(ref a, ref b) => (a, b), _ => bug!("store_with_flags: invalid ScalarPair layout: {:#?}", dest.layout), }; + let ty = bx.backend_type(dest.layout); let b_offset = a_scalar.value.size(bx).align_to(b_scalar.value.align(bx).abi); - let llptr = bx.struct_gep(dest.llval, 0); + let llptr = bx.struct_gep(ty, dest.llval, 0); let val = bx.from_immediate(a); let align = dest.align; bx.store_with_flags(val, llptr, align, flags); - let llptr = bx.struct_gep(dest.llval, 1); + let llptr = bx.struct_gep(ty, dest.llval, 1); let val = bx.from_immediate(b); let align = dest.align.restrict_for_offset(b_offset); bx.store_with_flags(val, llptr, align, flags); diff --git a/compiler/rustc_codegen_ssa/src/mir/place.rs b/compiler/rustc_codegen_ssa/src/mir/place.rs index 66d9d1a1e0c49..20be46606a0aa 100644 --- a/compiler/rustc_codegen_ssa/src/mir/place.rs +++ b/compiler/rustc_codegen_ssa/src/mir/place.rs @@ -103,12 +103,13 @@ impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> { if offset == a.value.size(bx.cx()).align_to(b.value.align(bx.cx()).abi) => { // Offset matches second field. - bx.struct_gep(self.llval, 1) + let ty = bx.backend_type(self.layout); + bx.struct_gep(ty, self.llval, 1) } Abi::Scalar(_) | Abi::ScalarPair(..) | Abi::Vector { .. } if field.is_zst() => { // ZST fields are not included in Scalar, ScalarPair, and Vector layouts, so manually offset the pointer. let byte_ptr = bx.pointercast(self.llval, bx.cx().type_i8p()); - bx.gep(byte_ptr, &[bx.const_usize(offset.bytes())]) + bx.gep(bx.cx().type_i8(), byte_ptr, &[bx.const_usize(offset.bytes())]) } Abi::Scalar(_) | Abi::ScalarPair(..) => { // All fields of Scalar and ScalarPair layouts must have been handled by this point. @@ -119,7 +120,10 @@ impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> { self.layout ); } - _ => bx.struct_gep(self.llval, bx.cx().backend_field_index(self.layout, ix)), + _ => { + let ty = bx.backend_type(self.layout); + bx.struct_gep(ty, self.llval, bx.cx().backend_field_index(self.layout, ix)) + } }; PlaceRef { // HACK(eddyb): have to bitcast pointers until LLVM removes pointee types. @@ -185,7 +189,7 @@ impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> { // Cast and adjust pointer. let byte_ptr = bx.pointercast(self.llval, bx.cx().type_i8p()); - let byte_ptr = bx.gep(byte_ptr, &[offset]); + let byte_ptr = bx.gep(bx.cx().type_i8(), byte_ptr, &[offset]); // Finally, cast back to the type expected. let ll_fty = bx.cx().backend_type(field); @@ -380,7 +384,11 @@ impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> { }; PlaceRef { - llval: bx.inbounds_gep(self.llval, &[bx.cx().const_usize(0), llindex]), + llval: bx.inbounds_gep( + bx.cx().backend_type(self.layout), + self.llval, + &[bx.cx().const_usize(0), llindex], + ), llextra: None, layout, align: self.align.restrict_for_offset(offset), diff --git a/compiler/rustc_codegen_ssa/src/mir/rvalue.rs b/compiler/rustc_codegen_ssa/src/mir/rvalue.rs index 7e1dfeb2457eb..cbb401c63d152 100644 --- a/compiler/rustc_codegen_ssa/src/mir/rvalue.rs +++ b/compiler/rustc_codegen_ssa/src/mir/rvalue.rs @@ -636,7 +636,14 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { mir::BinOp::BitOr => bx.or(lhs, rhs), mir::BinOp::BitAnd => bx.and(lhs, rhs), mir::BinOp::BitXor => bx.xor(lhs, rhs), - mir::BinOp::Offset => bx.inbounds_gep(lhs, &[rhs]), + mir::BinOp::Offset => { + let pointee_type = input_ty + .builtin_deref(true) + .unwrap_or_else(|| bug!("deref of non-pointer {:?}", input_ty)) + .ty; + let llty = bx.cx().backend_type(bx.cx().layout_of(pointee_type)); + bx.inbounds_gep(llty, lhs, &[rhs]) + } mir::BinOp::Shl => common::build_unchecked_lshift(bx, lhs, rhs), mir::BinOp::Shr => common::build_unchecked_rshift(bx, input_ty, lhs, rhs), mir::BinOp::Ne diff --git a/compiler/rustc_codegen_ssa/src/traits/builder.rs b/compiler/rustc_codegen_ssa/src/traits/builder.rs index f0c232a97bc94..f2c523148530f 100644 --- a/compiler/rustc_codegen_ssa/src/traits/builder.rs +++ b/compiler/rustc_codegen_ssa/src/traits/builder.rs @@ -176,9 +176,14 @@ pub trait BuilderMethods<'a, 'tcx>: size: Size, ); - fn gep(&mut self, ptr: Self::Value, indices: &[Self::Value]) -> Self::Value; - fn inbounds_gep(&mut self, ptr: Self::Value, indices: &[Self::Value]) -> Self::Value; - fn struct_gep(&mut self, ptr: Self::Value, idx: u64) -> Self::Value; + fn gep(&mut self, ty: Self::Type, ptr: Self::Value, indices: &[Self::Value]) -> Self::Value; + fn inbounds_gep( + &mut self, + ty: Self::Type, + ptr: Self::Value, + indices: &[Self::Value], + ) -> Self::Value; + fn struct_gep(&mut self, ty: Self::Type, ptr: Self::Value, idx: u64) -> Self::Value; fn trunc(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value; fn sext(&mut self, val: Self::Value, dest_ty: Self::Type) -> Self::Value; diff --git a/compiler/rustc_llvm/llvm-wrapper/RustWrapper.cpp b/compiler/rustc_llvm/llvm-wrapper/RustWrapper.cpp index 4cdc8a4155bcc..7666803911e0b 100644 --- a/compiler/rustc_llvm/llvm-wrapper/RustWrapper.cpp +++ b/compiler/rustc_llvm/llvm-wrapper/RustWrapper.cpp @@ -1551,6 +1551,16 @@ extern "C" void LLVMRustSetLinkage(LLVMValueRef V, LLVMSetLinkage(V, fromRust(RustLinkage)); } +extern "C" LLVMValueRef LLVMRustConstInBoundsGEP2(LLVMTypeRef Ty, + LLVMValueRef ConstantVal, + LLVMValueRef *ConstantIndices, + unsigned NumIndices) { + ArrayRef IdxList(unwrap(ConstantIndices, NumIndices), + NumIndices); + Constant *Val = unwrap(ConstantVal); + return wrap(ConstantExpr::getInBoundsGetElementPtr(unwrap(Ty), Val, IdxList)); +} + // Returns true if both high and low were successfully set. Fails in case constant wasn’t any of // the common sizes (1, 8, 16, 32, 64, 128 bits) extern "C" bool LLVMRustConstInt128Get(LLVMValueRef CV, bool sext, uint64_t *high, uint64_t *low)