diff --git a/.mailmap b/.mailmap index da8044de405a6..9587aaab35945 100644 --- a/.mailmap +++ b/.mailmap @@ -184,6 +184,7 @@ Neil Pankey Nick Platt Nicole Mazzuca Nif Ward +Oliver Middleton Oliver Scherer Oliver Scherer Oliver Scherer diff --git a/Cargo.lock b/Cargo.lock index f2eaf470658b3..57e4a22742759 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3256,13 +3256,9 @@ version = "1.0.0" dependencies = [ "byteorder", "crossbeam-utils 0.6.5", - "parking_lot 0.7.1", - "rand 0.6.1", - "scopeguard 0.3.3", "serde", "serde_json", "smallvec", - "syn 0.15.35", "winapi 0.3.6", ] diff --git a/src/liballoc/collections/linked_list/tests.rs b/src/liballoc/collections/linked_list/tests.rs index 9a6c57d286970..ecb5948f11b36 100644 --- a/src/liballoc/collections/linked_list/tests.rs +++ b/src/liballoc/collections/linked_list/tests.rs @@ -102,8 +102,8 @@ fn test_append() { assert_eq!(m.pop_front(), Some(elt)) } assert_eq!(n.len(), 0); - // let's make sure it's working properly, since we - // did some direct changes to private members + // Let's make sure it's working properly, since we + // did some direct changes to private members. n.push_back(3); assert_eq!(n.len(), 1); assert_eq!(n.pop_front(), Some(3)); diff --git a/src/liballoc/raw_vec.rs b/src/liballoc/raw_vec.rs index bc8a38f6b3aad..cf025eee4358b 100644 --- a/src/liballoc/raw_vec.rs +++ b/src/liballoc/raw_vec.rs @@ -19,26 +19,26 @@ mod tests; /// involved. This type is excellent for building your own data structures like Vec and VecDeque. /// In particular: /// -/// * Produces Unique::empty() on zero-sized types -/// * Produces Unique::empty() on zero-length allocations -/// * Catches all overflows in capacity computations (promotes them to "capacity overflow" panics) -/// * Guards against 32-bit systems allocating more than isize::MAX bytes -/// * Guards against overflowing your length -/// * Aborts on OOM or calls handle_alloc_error as applicable -/// * Avoids freeing Unique::empty() -/// * Contains a ptr::Unique and thus endows the user with all related benefits +/// * Produces `Unique::empty()` on zero-sized types. +/// * Produces `Unique::empty()` on zero-length allocations. +/// * Catches all overflows in capacity computations (promotes them to "capacity overflow" panics). +/// * Guards against 32-bit systems allocating more than isize::MAX bytes. +/// * Guards against overflowing your length. +/// * Aborts on OOM or calls `handle_alloc_error` as applicable. +/// * Avoids freeing `Unique::empty()`. +/// * Contains a `ptr::Unique` and thus endows the user with all related benefits. /// /// This type does not in anyway inspect the memory that it manages. When dropped it *will* -/// free its memory, but it *won't* try to Drop its contents. It is up to the user of RawVec -/// to handle the actual things *stored* inside of a RawVec. +/// free its memory, but it *won't* try to drop its contents. It is up to the user of `RawVec` +/// to handle the actual things *stored* inside of a `RawVec`. /// -/// Note that a RawVec always forces its capacity to be usize::MAX for zero-sized types. -/// This enables you to use capacity growing logic catch the overflows in your length +/// Note that a `RawVec` always forces its capacity to be `usize::MAX` for zero-sized types. +/// This enables you to use capacity-growing logic catch the overflows in your length /// that might occur with zero-sized types. /// -/// However this means that you need to be careful when round-tripping this type -/// with a `Box<[T]>`: `capacity()` won't yield the len. However `with_capacity`, -/// `shrink_to_fit`, and `from_box` will actually set RawVec's private capacity +/// The above means that you need to be careful when round-tripping this type with a +/// `Box<[T]>`, since `capacity()` won't yield the length. However, `with_capacity`, +/// `shrink_to_fit`, and `from_box` will actually set `RawVec`'s private capacity /// field. This allows zero-sized types to not be special-cased by consumers of /// this type. #[allow(missing_debug_implementations)] @@ -49,14 +49,14 @@ pub struct RawVec { } impl RawVec { - /// Like `new` but parameterized over the choice of allocator for - /// the returned RawVec. + /// Like `new`, but parameterized over the choice of allocator for + /// the returned `RawVec`. pub const fn new_in(a: A) -> Self { - // !0 is usize::MAX. This branch should be stripped at compile time. - // FIXME(mark-i-m): use this line when `if`s are allowed in `const` + // `!0` is `usize::MAX`. This branch should be stripped at compile time. + // FIXME(mark-i-m): use this line when `if`s are allowed in `const`: //let cap = if mem::size_of::() == 0 { !0 } else { 0 }; - // Unique::empty() doubles as "unallocated" and "zero-sized allocation" + // `Unique::empty()` doubles as "unallocated" and "zero-sized allocation". RawVec { ptr: Unique::empty(), // FIXME(mark-i-m): use `cap` when ifs are allowed in const @@ -65,15 +65,15 @@ impl RawVec { } } - /// Like `with_capacity` but parameterized over the choice of - /// allocator for the returned RawVec. + /// Like `with_capacity`, but parameterized over the choice of + /// allocator for the returned `RawVec`. #[inline] pub fn with_capacity_in(capacity: usize, a: A) -> Self { RawVec::allocate_in(capacity, false, a) } - /// Like `with_capacity_zeroed` but parameterized over the choice - /// of allocator for the returned RawVec. + /// Like `with_capacity_zeroed`, but parameterized over the choice + /// of allocator for the returned `RawVec`. #[inline] pub fn with_capacity_zeroed_in(capacity: usize, a: A) -> Self { RawVec::allocate_in(capacity, true, a) @@ -86,7 +86,7 @@ impl RawVec { let alloc_size = capacity.checked_mul(elem_size).unwrap_or_else(|| capacity_overflow()); alloc_guard(alloc_size).unwrap_or_else(|_| capacity_overflow()); - // handles ZSTs and `capacity = 0` alike + // Handles ZSTs and `capacity == 0` alike. let ptr = if alloc_size == 0 { NonNull::::dangling() } else { @@ -113,20 +113,20 @@ impl RawVec { } impl RawVec { - /// Creates the biggest possible RawVec (on the system heap) - /// without allocating. If T has positive size, then this makes a - /// RawVec with capacity 0. If T has 0 size, then it makes a - /// RawVec with capacity `usize::MAX`. Useful for implementing + /// Creates the biggest possible `RawVec` (on the system heap) + /// without allocating. If `T` has positive size, then this makes a + /// `RawVec` with capacity `0`. If `T` is zero-sized, then it makes a + /// `RawVec` with capacity `usize::MAX`. Useful for implementing /// delayed allocation. pub const fn new() -> Self { Self::new_in(Global) } - /// Creates a RawVec (on the system heap) with exactly the + /// Creates a `RawVec` (on the system heap) with exactly the /// capacity and alignment requirements for a `[T; capacity]`. This is - /// equivalent to calling RawVec::new when `capacity` is 0 or T is + /// equivalent to calling `RawVec::new` when `capacity` is `0` or `T` is /// zero-sized. Note that if `T` is zero-sized this means you will - /// *not* get a RawVec with the requested capacity! + /// *not* get a `RawVec` with the requested capacity. /// /// # Panics /// @@ -136,13 +136,13 @@ impl RawVec { /// /// # Aborts /// - /// Aborts on OOM + /// Aborts on OOM. #[inline] pub fn with_capacity(capacity: usize) -> Self { RawVec::allocate_in(capacity, false, Global) } - /// Like `with_capacity` but guarantees the buffer is zeroed. + /// Like `with_capacity`, but guarantees the buffer is zeroed. #[inline] pub fn with_capacity_zeroed(capacity: usize) -> Self { RawVec::allocate_in(capacity, true, Global) @@ -150,13 +150,13 @@ impl RawVec { } impl RawVec { - /// Reconstitutes a RawVec from a pointer, capacity, and allocator. + /// Reconstitutes a `RawVec` from a pointer, capacity, and allocator. /// /// # Undefined Behavior /// - /// The ptr must be allocated (via the given allocator `a`), and with the given capacity. The - /// capacity cannot exceed `isize::MAX` (only a concern on 32-bit systems). - /// If the ptr and capacity come from a RawVec created via `a`, then this is guaranteed. + /// The `ptr` must be allocated (via the given allocator `a`), and with the given `capacity`. + /// The `capacity` cannot exceed `isize::MAX` (only a concern on 32-bit systems). + /// If the `ptr` and `capacity` come from a `RawVec` created via `a`, then this is guaranteed. pub unsafe fn from_raw_parts_in(ptr: *mut T, capacity: usize, a: A) -> Self { RawVec { ptr: Unique::new_unchecked(ptr), @@ -167,13 +167,13 @@ impl RawVec { } impl RawVec { - /// Reconstitutes a RawVec from a pointer, capacity. + /// Reconstitutes a `RawVec` from a pointer and capacity. /// /// # Undefined Behavior /// - /// The ptr must be allocated (on the system heap), and with the given capacity. The - /// capacity cannot exceed `isize::MAX` (only a concern on 32-bit systems). - /// If the ptr and capacity come from a RawVec, then this is guaranteed. + /// The `ptr` must be allocated (on the system heap), and with the given `capacity`. + /// The `capacity` cannot exceed `isize::MAX` (only a concern on 32-bit systems). + /// If the `ptr` and `capacity` come from a `RawVec`, then this is guaranteed. pub unsafe fn from_raw_parts(ptr: *mut T, capacity: usize) -> Self { RawVec { ptr: Unique::new_unchecked(ptr), @@ -194,7 +194,7 @@ impl RawVec { impl RawVec { /// Gets a raw pointer to the start of the allocation. Note that this is - /// Unique::empty() if `capacity = 0` or T is zero-sized. In the former case, you must + /// `Unique::empty()` if `capacity == 0` or `T` is zero-sized. In the former case, you must /// be careful. pub fn ptr(&self) -> *mut T { self.ptr.as_ptr() @@ -212,12 +212,12 @@ impl RawVec { } } - /// Returns a shared reference to the allocator backing this RawVec. + /// Returns a shared reference to the allocator backing this `RawVec`. pub fn alloc(&self) -> &A { &self.a } - /// Returns a mutable reference to the allocator backing this RawVec. + /// Returns a mutable reference to the allocator backing this `RawVec`. pub fn alloc_mut(&mut self) -> &mut A { &mut self.a } @@ -247,7 +247,7 @@ impl RawVec { /// /// # Panics /// - /// * Panics if T is zero-sized on the assumption that you managed to exhaust + /// * Panics if `T` is zero-sized on the assumption that you managed to exhaust /// all `usize::MAX` slots in your imaginary buffer. /// * Panics on 32-bit platforms if the requested capacity exceeds /// `isize::MAX` bytes. @@ -290,20 +290,20 @@ impl RawVec { unsafe { let elem_size = mem::size_of::(); - // since we set the capacity to usize::MAX when elem_size is - // 0, getting to here necessarily means the RawVec is overfull. + // Since we set the capacity to `usize::MAX` when `elem_size` is + // 0, getting to here necessarily means the `RawVec` is overfull. assert!(elem_size != 0, "capacity overflow"); let (new_cap, uniq) = match self.current_layout() { Some(cur) => { // Since we guarantee that we never allocate more than - // isize::MAX bytes, `elem_size * self.cap <= isize::MAX` as + // `isize::MAX` bytes, `elem_size * self.cap <= isize::MAX` as // a precondition, so this can't overflow. Additionally the // alignment will never be too large as to "not be // satisfiable", so `Layout::from_size_align` will always // return `Some`. // - // tl;dr; we bypass runtime checks due to dynamic assertions + // TL;DR, we bypass runtime checks due to dynamic assertions // in this module, allowing us to use // `from_size_align_unchecked`. let new_cap = 2 * self.cap; @@ -320,8 +320,8 @@ impl RawVec { } } None => { - // skip to 4 because tiny Vec's are dumb; but not if that - // would cause overflow + // Skip to 4 because tiny `Vec`'s are dumb; but not if that + // would cause overflow. let new_cap = if elem_size > (!0) / 8 { 1 } else { 4 }; match self.a.alloc_array::(new_cap) { Ok(ptr) => (new_cap, ptr.into()), @@ -342,7 +342,7 @@ impl RawVec { /// /// # Panics /// - /// * Panics if T is zero-sized on the assumption that you managed to exhaust + /// * Panics if `T` is zero-sized on the assumption that you managed to exhaust /// all `usize::MAX` slots in your imaginary buffer. /// * Panics on 32-bit platforms if the requested capacity exceeds /// `isize::MAX` bytes. @@ -356,15 +356,15 @@ impl RawVec { None => return false, // nothing to double }; - // since we set the capacity to usize::MAX when elem_size is - // 0, getting to here necessarily means the RawVec is overfull. + // Since we set the capacity to `usize::MAX` when `elem_size` is + // 0, getting to here necessarily means the `RawVec` is overfull. assert!(elem_size != 0, "capacity overflow"); - // Since we guarantee that we never allocate more than isize::MAX + // Since we guarantee that we never allocate more than `isize::MAX` // bytes, `elem_size * self.cap <= isize::MAX` as a precondition, so // this can't overflow. // - // Similarly like with `double` above we can go straight to + // Similarly to with `double` above, we can go straight to // `Layout::from_size_align_unchecked` as we know this won't // overflow and the alignment is sufficiently small. let new_cap = 2 * self.cap; @@ -409,7 +409,7 @@ impl RawVec { /// /// # Aborts /// - /// Aborts on OOM + /// Aborts on OOM. pub fn reserve_exact(&mut self, used_capacity: usize, needed_extra_capacity: usize) { match self.reserve_internal(used_capacity, needed_extra_capacity, Infallible, Exact) { Err(CapacityOverflow) => capacity_overflow(), @@ -424,7 +424,7 @@ impl RawVec { fn amortized_new_size(&self, used_capacity: usize, needed_extra_capacity: usize) -> Result { - // Nothing we can really do about these checks :( + // Nothing we can really do about these checks, sadly. let required_cap = used_capacity.checked_add(needed_extra_capacity) .ok_or(CapacityOverflow)?; // Cannot overflow, because `cap <= isize::MAX`, and type of `cap` is `usize`. @@ -459,7 +459,7 @@ impl RawVec { /// /// # Aborts /// - /// Aborts on OOM + /// Aborts on OOM. /// /// # Examples /// @@ -538,7 +538,7 @@ impl RawVec { // Here, `cap < used_capacity + needed_extra_capacity <= new_cap` // (regardless of whether `self.cap - used_capacity` wrapped). - // Therefore we can safely call grow_in_place. + // Therefore, we can safely call `grow_in_place`. let new_layout = Layout::new::().repeat(new_cap).unwrap().0; // FIXME: may crash and burn on over-reserve @@ -576,14 +576,14 @@ impl RawVec { return; } - // This check is my waterloo; it's the only thing Vec wouldn't have to do. + // This check is my waterloo; it's the only thing `Vec` wouldn't have to do. assert!(self.cap >= amount, "Tried to shrink to a larger capacity"); if amount == 0 { // We want to create a new zero-length vector within the - // same allocator. We use ptr::write to avoid an + // same allocator. We use `ptr::write` to avoid an // erroneous attempt to drop the contents, and we use - // ptr::read to sidestep condition against destructuring + // `ptr::read` to sidestep condition against destructuring // types that implement Drop. unsafe { @@ -600,7 +600,7 @@ impl RawVec { // // We also know that `self.cap` is greater than `amount`, and // consequently we don't need runtime checks for creating either - // layout + // layout. let old_size = elem_size * self.cap; let new_size = elem_size * amount; let align = mem::align_of::(); @@ -653,7 +653,7 @@ impl RawVec { return Ok(()); } - // Nothing we can really do about these checks :( + // Nothing we can really do about these checks, sadly. let new_cap = match strategy { Exact => used_capacity.checked_add(needed_extra_capacity).ok_or(CapacityOverflow)?, Amortized => self.amortized_new_size(used_capacity, needed_extra_capacity)?, @@ -692,7 +692,7 @@ impl RawVec { /// Converts the entire buffer into `Box<[T]>`. /// /// Note that this will correctly reconstitute any `cap` changes - /// that may have been performed. (see description of type for details) + /// that may have been performed. (See description of type for details.) /// /// # Undefined Behavior /// @@ -700,7 +700,7 @@ impl RawVec { /// the rules around uninitialized boxed values are not finalized yet, /// but until they are, it is advisable to avoid them. pub unsafe fn into_box(self) -> Box<[T]> { - // NOTE: not calling `capacity()` here, actually using the real `cap` field! + // NOTE: not calling `capacity()` here; actually using the real `cap` field! let slice = slice::from_raw_parts_mut(self.ptr(), self.cap); let output: Box<[T]> = Box::from_raw(slice); mem::forget(self); @@ -709,7 +709,7 @@ impl RawVec { } impl RawVec { - /// Frees the memory owned by the RawVec *without* trying to Drop its contents. + /// Frees the memory owned by the `RawVec` *without* trying to drop its contents. pub unsafe fn dealloc_buffer(&mut self) { let elem_size = mem::size_of::(); if elem_size != 0 { @@ -721,22 +721,20 @@ impl RawVec { } unsafe impl<#[may_dangle] T, A: Alloc> Drop for RawVec { - /// Frees the memory owned by the RawVec *without* trying to Drop its contents. + /// Frees the memory owned by the `RawVec` *without* trying to drop its contents. fn drop(&mut self) { unsafe { self.dealloc_buffer(); } } } - - // We need to guarantee the following: -// * We don't ever allocate `> isize::MAX` byte-size objects -// * We don't overflow `usize::MAX` and actually allocate too little +// * We don't ever allocate `> isize::MAX` byte-size objects. +// * We don't overflow `usize::MAX` and actually allocate too little. // // On 64-bit we just need to check for overflow since trying to allocate // `> isize::MAX` bytes will surely fail. On 32-bit and 16-bit we need to add // an extra guard for this in case we're running on a platform which can use -// all 4GB in user-space. e.g., PAE or x32 +// all 4GB in user-space, e.g., PAE or x32. #[inline] fn alloc_guard(alloc_size: usize) -> Result<(), TryReserveError> { @@ -751,5 +749,5 @@ fn alloc_guard(alloc_size: usize) -> Result<(), TryReserveError> { // ensure that the code generation related to these panics is minimal as there's // only one location which panics rather than a bunch throughout the module. fn capacity_overflow() -> ! { - panic!("capacity overflow") + panic!("capacity overflow"); } diff --git a/src/liballoc/raw_vec/tests.rs b/src/liballoc/raw_vec/tests.rs index c389898d1ef04..d35b62fc1ef15 100644 --- a/src/liballoc/raw_vec/tests.rs +++ b/src/liballoc/raw_vec/tests.rs @@ -5,12 +5,12 @@ fn allocator_param() { use crate::alloc::AllocErr; // Writing a test of integration between third-party - // allocators and RawVec is a little tricky because the RawVec + // allocators and `RawVec` is a little tricky because the `RawVec` // API does not expose fallible allocation methods, so we // cannot check what happens when allocator is exhausted // (beyond detecting a panic). // - // Instead, this just checks that the RawVec methods do at + // Instead, this just checks that the `RawVec` methods do at // least go through the Allocator API when it reserves // storage. @@ -44,7 +44,7 @@ fn allocator_param() { fn reserve_does_not_overallocate() { { let mut v: RawVec = RawVec::new(); - // First `reserve` allocates like `reserve_exact` + // First, `reserve` allocates like `reserve_exact`. v.reserve(0, 9); assert_eq!(9, v.capacity()); } diff --git a/src/liballoc/rc.rs b/src/liballoc/rc.rs index 2b222caf13f3d..439f24e95ca0d 100644 --- a/src/liballoc/rc.rs +++ b/src/liballoc/rc.rs @@ -567,7 +567,7 @@ impl Rc { /// let x = Rc::from_raw(x_ptr); /// assert_eq!(&*x, "hello"); /// - /// // Further calls to `Rc::from_raw(x_ptr)` would be memory unsafe. + /// // Further calls to `Rc::from_raw(x_ptr)` would be memory-unsafe. /// } /// /// // The memory was freed when `x` went out of scope above, so `x_ptr` is now dangling! diff --git a/src/liballoc/sync.rs b/src/liballoc/sync.rs index 9ffc1673e5ab8..3bf22a87feaa7 100644 --- a/src/liballoc/sync.rs +++ b/src/liballoc/sync.rs @@ -547,7 +547,7 @@ impl Arc { /// let x = Arc::from_raw(x_ptr); /// assert_eq!(&*x, "hello"); /// - /// // Further calls to `Arc::from_raw(x_ptr)` would be memory unsafe. + /// // Further calls to `Arc::from_raw(x_ptr)` would be memory-unsafe. /// } /// /// // The memory was freed when `x` went out of scope above, so `x_ptr` is now dangling! diff --git a/src/liballoc/vec.rs b/src/liballoc/vec.rs index d5dc2d4b8688d..c513658c842e5 100644 --- a/src/liballoc/vec.rs +++ b/src/liballoc/vec.rs @@ -685,21 +685,25 @@ impl Vec { /// [`drain`]: #method.drain #[stable(feature = "rust1", since = "1.0.0")] pub fn truncate(&mut self, len: usize) { - let current_len = self.len; - unsafe { - let mut ptr = self.as_mut_ptr().add(self.len); - // Set the final length at the end, keeping in mind that - // dropping an element might panic. Works around a missed - // optimization, as seen in the following issue: - // https://github.com/rust-lang/rust/issues/51802 - let mut local_len = SetLenOnDrop::new(&mut self.len); + if mem::needs_drop::() { + let current_len = self.len; + unsafe { + let mut ptr = self.as_mut_ptr().add(self.len); + // Set the final length at the end, keeping in mind that + // dropping an element might panic. Works around a missed + // optimization, as seen in the following issue: + // https://github.com/rust-lang/rust/issues/51802 + let mut local_len = SetLenOnDrop::new(&mut self.len); - // drop any extra elements - for _ in len..current_len { - local_len.decrement_len(1); - ptr = ptr.offset(-1); - ptr::drop_in_place(ptr); + // drop any extra elements + for _ in len..current_len { + local_len.decrement_len(1); + ptr = ptr.offset(-1); + ptr::drop_in_place(ptr); + } } + } else if len <= self.len { + self.len = len; } } diff --git a/src/libcore/any.rs b/src/libcore/any.rs index e8a0a88f12a7e..0afbf4f134679 100644 --- a/src/libcore/any.rs +++ b/src/libcore/any.rs @@ -153,13 +153,13 @@ impl dyn Any { #[stable(feature = "rust1", since = "1.0.0")] #[inline] pub fn is(&self) -> bool { - // Get TypeId of the type this function is instantiated with + // Get `TypeId` of the type this function is instantiated with. let t = TypeId::of::(); - // Get TypeId of the type in the trait object + // Get `TypeId` of the type in the trait object. let concrete = self.type_id(); - // Compare both TypeIds on equality + // Compare both `TypeId`s on equality. t == concrete } diff --git a/src/libcore/marker.rs b/src/libcore/marker.rs index 89af2528c052a..347e7dce6e67d 100644 --- a/src/libcore/marker.rs +++ b/src/libcore/marker.rs @@ -602,10 +602,10 @@ unsafe impl Freeze for *mut T {} unsafe impl Freeze for &T {} unsafe impl Freeze for &mut T {} -/// Types which can be safely moved after being pinned. +/// Types that can be safely moved after being pinned. /// /// Since Rust itself has no notion of immovable types, and considers moves -/// (e.g. through assignment or [`mem::replace`]) to always be safe, +/// (e.g., through assignment or [`mem::replace`]) to always be safe, /// this trait cannot prevent types from moving by itself. /// /// Instead it is used to prevent moves through the type system, diff --git a/src/libcore/ptr/mod.rs b/src/libcore/ptr/mod.rs index f5fbd1a6b1325..13ccc9b252a77 100644 --- a/src/libcore/ptr/mod.rs +++ b/src/libcore/ptr/mod.rs @@ -1042,7 +1042,7 @@ impl *const T { (self as *const u8) == null() } - /// Cast to a pointer to a different type + /// Casts to a pointer of another type. #[stable(feature = "ptr_cast", since = "1.38.0")] #[inline] pub const fn cast(self) -> *const U { @@ -1726,7 +1726,7 @@ impl *mut T { (self as *mut u8) == null_mut() } - /// Cast to a pointer to a different type + /// Casts to a pointer of another type. #[stable(feature = "ptr_cast", since = "1.38.0")] #[inline] pub const fn cast(self) -> *mut U { diff --git a/src/libcore/ptr/non_null.rs b/src/libcore/ptr/non_null.rs index ad3d1ce396ab7..7dcd57f1f9858 100644 --- a/src/libcore/ptr/non_null.rs +++ b/src/libcore/ptr/non_null.rs @@ -125,7 +125,7 @@ impl NonNull { &mut *self.as_ptr() } - /// Cast to a pointer of another type + /// Casts to a pointer of another type. #[stable(feature = "nonnull_cast", since = "1.27.0")] #[inline] pub const fn cast(self) -> NonNull { diff --git a/src/librustc/hir/mod.rs b/src/librustc/hir/mod.rs index f5e644625729b..2c8590aa4e3fa 100644 --- a/src/librustc/hir/mod.rs +++ b/src/librustc/hir/mod.rs @@ -190,7 +190,7 @@ pub enum ParamName { Fresh(usize), /// Indicates an illegal name was given and an error has been - /// repored (so we should squelch other derived errors). Occurs + /// reported (so we should squelch other derived errors). Occurs /// when, e.g., `'_` is used in the wrong place. Error, } diff --git a/src/librustc/infer/error_reporting/mod.rs b/src/librustc/infer/error_reporting/mod.rs index 5883be6e26883..ab24b3f2f059f 100644 --- a/src/librustc/infer/error_reporting/mod.rs +++ b/src/librustc/infer/error_reporting/mod.rs @@ -55,7 +55,8 @@ use crate::hir::def_id::DefId; use crate::hir::Node; use crate::infer::opaque_types; use crate::middle::region; -use crate::traits::{ObligationCause, ObligationCauseCode}; +use crate::traits::{IfExpressionCause, MatchExpressionArmCause, ObligationCause}; +use crate::traits::{ObligationCauseCode}; use crate::ty::error::TypeError; use crate::ty::{self, subst::{Subst, SubstsRef}, Region, Ty, TyCtxt, TypeFoldable}; use errors::{Applicability, DiagnosticBuilder, DiagnosticStyledString}; @@ -624,13 +625,13 @@ impl<'a, 'tcx> InferCtxt<'a, 'tcx> { } } } - ObligationCauseCode::MatchExpressionArm { + ObligationCauseCode::MatchExpressionArm(box MatchExpressionArmCause { source, ref prior_arms, last_ty, discrim_hir_id, .. - } => match source { + }) => match source { hir::MatchSource::IfLetDesugar { .. } => { let msg = "`if let` arms have incompatible types"; err.span_label(cause.span, msg); @@ -681,7 +682,7 @@ impl<'a, 'tcx> InferCtxt<'a, 'tcx> { } } }, - ObligationCauseCode::IfExpression { then, outer, semicolon } => { + ObligationCauseCode::IfExpression(box IfExpressionCause { then, outer, semicolon }) => { err.span_label(then, "expected because of this"); outer.map(|sp| err.span_label(sp, "if and else have incompatible types")); if let Some(sp) = semicolon { @@ -1622,13 +1623,15 @@ impl<'tcx> ObligationCause<'tcx> { use crate::traits::ObligationCauseCode::*; match self.code { CompareImplMethodObligation { .. } => Error0308("method not compatible with trait"), - MatchExpressionArm { source, .. } => Error0308(match source { - hir::MatchSource::IfLetDesugar { .. } => "`if let` arms have incompatible types", - hir::MatchSource::TryDesugar => { - "try expression alternatives have incompatible types" - } - _ => "match arms have incompatible types", - }), + MatchExpressionArm(box MatchExpressionArmCause { source, .. }) => + Error0308(match source { + hir::MatchSource::IfLetDesugar { .. } => + "`if let` arms have incompatible types", + hir::MatchSource::TryDesugar => { + "try expression alternatives have incompatible types" + } + _ => "match arms have incompatible types", + }), IfExpression { .. } => Error0308("if and else have incompatible types"), IfExpressionWithNoElse => Error0317("if may be missing an else clause"), MainFunctionType => Error0580("main function has wrong type"), @@ -1656,7 +1659,7 @@ impl<'tcx> ObligationCause<'tcx> { match self.code { CompareImplMethodObligation { .. } => "method type is compatible with trait", ExprAssignable => "expression is assignable", - MatchExpressionArm { source, .. } => match source { + MatchExpressionArm(box MatchExpressionArmCause { source, .. }) => match source { hir::MatchSource::IfLetDesugar { .. } => "`if let` arms have compatible types", _ => "match arms have compatible types", }, diff --git a/src/librustc/mir/interpret/value.rs b/src/librustc/mir/interpret/value.rs index d72d879059369..b8bc741419738 100644 --- a/src/librustc/mir/interpret/value.rs +++ b/src/librustc/mir/interpret/value.rs @@ -17,8 +17,8 @@ pub struct RawConst<'tcx> { pub ty: Ty<'tcx>, } -/// Represents a constant value in Rust. `Scalar` and `ScalarPair` are optimizations that -/// match the `LocalState` optimizations for easy conversions between `Value` and `ConstValue`. +/// Represents a constant value in Rust. `Scalar` and `Slice` are optimizations for +/// array length computations, enum discriminants and the pattern matching logic. #[derive(Copy, Clone, Debug, Eq, PartialEq, PartialOrd, Ord, RustcEncodable, RustcDecodable, Hash, HashStable)] pub enum ConstValue<'tcx> { diff --git a/src/librustc/traits/fulfill.rs b/src/librustc/traits/fulfill.rs index c1de4939c1d91..4494c034d51e2 100644 --- a/src/librustc/traits/fulfill.rs +++ b/src/librustc/traits/fulfill.rs @@ -68,6 +68,10 @@ pub struct PendingPredicateObligation<'tcx> { pub stalled_on: Vec>, } +// `PendingPredicateObligation` is used a lot. Make sure it doesn't unintentionally get bigger. +#[cfg(target_arch = "x86_64")] +static_assert_size!(PendingPredicateObligation<'_>, 136); + impl<'a, 'tcx> FulfillmentContext<'tcx> { /// Creates a new fulfillment context. pub fn new() -> FulfillmentContext<'tcx> { diff --git a/src/librustc/traits/mod.rs b/src/librustc/traits/mod.rs index 1ca92d79fa5f6..d2683090add40 100644 --- a/src/librustc/traits/mod.rs +++ b/src/librustc/traits/mod.rs @@ -123,6 +123,10 @@ pub struct Obligation<'tcx, T> { pub type PredicateObligation<'tcx> = Obligation<'tcx, ty::Predicate<'tcx>>; pub type TraitObligation<'tcx> = Obligation<'tcx, ty::PolyTraitPredicate<'tcx>>; +// `PredicateObligation` is used a lot. Make sure it doesn't unintentionally get bigger. +#[cfg(target_arch = "x86_64")] +static_assert_size!(PredicateObligation<'_>, 112); + /// The reason why we incurred this obligation; used for error reporting. #[derive(Clone, Debug, PartialEq, Eq, Hash)] pub struct ObligationCause<'tcx> { @@ -147,7 +151,8 @@ impl<'tcx> ObligationCause<'tcx> { ObligationCauseCode::StartFunctionType => { tcx.sess.source_map().def_span(self.span) } - ObligationCauseCode::MatchExpressionArm { arm_span, .. } => arm_span, + ObligationCauseCode::MatchExpressionArm( + box MatchExpressionArmCause { arm_span, .. }) => arm_span, _ => self.span, } } @@ -223,23 +228,13 @@ pub enum ObligationCauseCode<'tcx> { ExprAssignable, /// Computing common supertype in the arms of a match expression - MatchExpressionArm { - arm_span: Span, - source: hir::MatchSource, - prior_arms: Vec, - last_ty: Ty<'tcx>, - discrim_hir_id: hir::HirId, - }, + MatchExpressionArm(Box>), /// Computing common supertype in the pattern guard for the arms of a match expression MatchExpressionArmPattern { span: Span, ty: Ty<'tcx> }, /// Computing common supertype in an if expression - IfExpression { - then: Span, - outer: Option, - semicolon: Option, - }, + IfExpression(Box), /// Computing common supertype of an if expression with no else counter-part IfExpressionWithNoElse, @@ -269,6 +264,26 @@ pub enum ObligationCauseCode<'tcx> { TrivialBound, } +// `ObligationCauseCode` is used a lot. Make sure it doesn't unintentionally get bigger. +#[cfg(target_arch = "x86_64")] +static_assert_size!(ObligationCauseCode<'_>, 32); + +#[derive(Clone, Debug, PartialEq, Eq, Hash)] +pub struct MatchExpressionArmCause<'tcx> { + pub arm_span: Span, + pub source: hir::MatchSource, + pub prior_arms: Vec, + pub last_ty: Ty<'tcx>, + pub discrim_hir_id: hir::HirId, +} + +#[derive(Clone, Debug, PartialEq, Eq, Hash)] +pub struct IfExpressionCause { + pub then: Span, + pub outer: Option, + pub semicolon: Option, +} + #[derive(Clone, Debug, PartialEq, Eq, Hash)] pub struct DerivedObligationCause<'tcx> { /// The trait reference of the parent obligation that led to the diff --git a/src/librustc/traits/structural_impls.rs b/src/librustc/traits/structural_impls.rs index 05b698eb4c4ea..6930c9368282b 100644 --- a/src/librustc/traits/structural_impls.rs +++ b/src/librustc/traits/structural_impls.rs @@ -508,31 +508,33 @@ impl<'a, 'tcx> Lift<'tcx> for traits::ObligationCauseCode<'a> { trait_item_def_id, }), super::ExprAssignable => Some(super::ExprAssignable), - super::MatchExpressionArm { + super::MatchExpressionArm(box super::MatchExpressionArmCause { arm_span, source, ref prior_arms, last_ty, discrim_hir_id, - } => { + }) => { tcx.lift(&last_ty).map(|last_ty| { - super::MatchExpressionArm { + super::MatchExpressionArm(box super::MatchExpressionArmCause { arm_span, source, prior_arms: prior_arms.clone(), last_ty, discrim_hir_id, - } + }) }) } super::MatchExpressionArmPattern { span, ty } => { tcx.lift(&ty).map(|ty| super::MatchExpressionArmPattern { span, ty }) } - super::IfExpression { then, outer, semicolon } => Some(super::IfExpression { - then, - outer, - semicolon, - }), + super::IfExpression(box super::IfExpressionCause { then, outer, semicolon }) => { + Some(super::IfExpression(box super::IfExpressionCause { + then, + outer, + semicolon, + })) + } super::IfExpressionWithNoElse => Some(super::IfExpressionWithNoElse), super::MainFunctionType => Some(super::MainFunctionType), super::StartFunctionType => Some(super::StartFunctionType), diff --git a/src/librustc/ty/context.rs b/src/librustc/ty/context.rs index 8e8472a5aacc9..25d921b7cea4c 100644 --- a/src/librustc/ty/context.rs +++ b/src/librustc/ty/context.rs @@ -2396,9 +2396,9 @@ impl<'tcx> TyCtxt<'tcx> { } #[inline] - pub fn mk_lang_item(self, ty: Ty<'tcx>, item: lang_items::LangItem) -> Ty<'tcx> { - let def_id = self.require_lang_item(item, None); - self.mk_generic_adt(def_id, ty) + pub fn mk_lang_item(self, ty: Ty<'tcx>, item: lang_items::LangItem) -> Option> { + let def_id = self.lang_items().require(item).ok()?; + Some(self.mk_generic_adt(def_id, ty)) } #[inline] diff --git a/src/librustc_codegen_llvm/abi.rs b/src/librustc_codegen_llvm/abi.rs index ff87afe0c444b..2ca517dc3b1a7 100644 --- a/src/librustc_codegen_llvm/abi.rs +++ b/src/librustc_codegen_llvm/abi.rs @@ -229,7 +229,7 @@ impl ArgTypeExt<'ll, 'tcx> for ArgType<'tcx, Ty<'tcx>> { // We instead thus allocate some scratch space... let scratch_size = cast.size(bx); let scratch_align = cast.align(bx); - let llscratch = bx.alloca(cast.llvm_type(bx), "abi_cast", scratch_align); + let llscratch = bx.alloca(cast.llvm_type(bx), scratch_align); bx.lifetime_start(llscratch, scratch_size); // ...where we first store the value... diff --git a/src/librustc_codegen_llvm/builder.rs b/src/librustc_codegen_llvm/builder.rs index e13a5ecc2ebfd..423a01ad1f937 100644 --- a/src/librustc_codegen_llvm/builder.rs +++ b/src/librustc_codegen_llvm/builder.rs @@ -387,23 +387,17 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { ) } - fn alloca(&mut self, ty: &'ll Type, name: &str, align: Align) -> &'ll Value { + fn alloca(&mut self, ty: &'ll Type, align: Align) -> &'ll Value { let mut bx = Builder::with_cx(self.cx); bx.position_at_start(unsafe { llvm::LLVMGetFirstBasicBlock(self.llfn()) }); - bx.dynamic_alloca(ty, name, align) + bx.dynamic_alloca(ty, align) } - fn dynamic_alloca(&mut self, ty: &'ll Type, name: &str, align: Align) -> &'ll Value { + fn dynamic_alloca(&mut self, ty: &'ll Type, align: Align) -> &'ll Value { unsafe { - let alloca = if name.is_empty() { - llvm::LLVMBuildAlloca(self.llbuilder, ty, UNNAMED) - } else { - let name = SmallCStr::new(name); - llvm::LLVMBuildAlloca(self.llbuilder, ty, - name.as_ptr()) - }; + let alloca = llvm::LLVMBuildAlloca(self.llbuilder, ty, UNNAMED); llvm::LLVMSetAlignment(alloca, align.bytes() as c_uint); alloca } @@ -412,16 +406,9 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { fn array_alloca(&mut self, ty: &'ll Type, len: &'ll Value, - name: &str, align: Align) -> &'ll Value { unsafe { - let alloca = if name.is_empty() { - llvm::LLVMBuildArrayAlloca(self.llbuilder, ty, len, UNNAMED) - } else { - let name = SmallCStr::new(name); - llvm::LLVMBuildArrayAlloca(self.llbuilder, ty, len, - name.as_ptr()) - }; + let alloca = llvm::LLVMBuildArrayAlloca(self.llbuilder, ty, len, UNNAMED); llvm::LLVMSetAlignment(alloca, align.bytes() as c_uint); alloca } diff --git a/src/librustc_codegen_llvm/intrinsic.rs b/src/librustc_codegen_llvm/intrinsic.rs index fc0b9ffd11d83..5fbfe9138f2a4 100644 --- a/src/librustc_codegen_llvm/intrinsic.rs +++ b/src/librustc_codegen_llvm/intrinsic.rs @@ -871,7 +871,7 @@ fn codegen_msvc_try( // More information can be found in libstd's seh.rs implementation. let i64p = bx.type_ptr_to(bx.type_i64()); let ptr_align = bx.tcx().data_layout.pointer_align.abi; - let slot = bx.alloca(i64p, "slot", ptr_align); + let slot = bx.alloca(i64p, ptr_align); bx.invoke(func, &[data], normal.llbb(), catchswitch.llbb(), None); normal.ret(bx.const_i32(0)); diff --git a/src/librustc_codegen_ssa/mir/block.rs b/src/librustc_codegen_ssa/mir/block.rs index 915006938fe56..8829a33992ae3 100644 --- a/src/librustc_codegen_ssa/mir/block.rs +++ b/src/librustc_codegen_ssa/mir/block.rs @@ -276,7 +276,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { let llslot = match op.val { Immediate(_) | Pair(..) => { let scratch = - PlaceRef::alloca(&mut bx, self.fn_ty.ret.layout, "ret"); + PlaceRef::alloca(&mut bx, self.fn_ty.ret.layout); op.val.store(&mut bx, scratch); scratch.llval } @@ -767,7 +767,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { match (arg, op.val) { (&mir::Operand::Copy(_), Ref(_, None, _)) | (&mir::Operand::Constant(_), Ref(_, None, _)) => { - let tmp = PlaceRef::alloca(&mut bx, op.layout, "const"); + let tmp = PlaceRef::alloca(&mut bx, op.layout); op.val.store(&mut bx, tmp); op.val = Ref(tmp.llval, None, tmp.align); } @@ -925,7 +925,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { Immediate(_) | Pair(..) => { match arg.mode { PassMode::Indirect(..) | PassMode::Cast(_) => { - let scratch = PlaceRef::alloca(bx, arg.layout, "arg"); + let scratch = PlaceRef::alloca(bx, arg.layout); op.val.store(bx, scratch); (scratch.llval, scratch.align, true) } @@ -940,7 +940,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { // think that ATM (Rust 1.16) we only pass temporaries, but we shouldn't // have scary latent bugs around. - let scratch = PlaceRef::alloca(bx, arg.layout, "arg"); + let scratch = PlaceRef::alloca(bx, arg.layout); base::memcpy_ty(bx, scratch.llval, scratch.align, llval, align, op.layout, MemFlags::empty()); (scratch.llval, scratch.align, true) @@ -1017,7 +1017,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { cx.tcx().mk_mut_ptr(cx.tcx().types.u8), cx.tcx().types.i32 ])); - let slot = PlaceRef::alloca(bx, layout, "personalityslot"); + let slot = PlaceRef::alloca(bx, layout); self.personality_slot = Some(slot); slot } @@ -1116,7 +1116,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { return if fn_ret.is_indirect() { // Odd, but possible, case, we have an operand temporary, // but the calling convention has an indirect return. - let tmp = PlaceRef::alloca(bx, fn_ret.layout, "tmp_ret"); + let tmp = PlaceRef::alloca(bx, fn_ret.layout); tmp.storage_live(bx); llargs.push(tmp.llval); ReturnDest::IndirectOperand(tmp, index) @@ -1124,7 +1124,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { // Currently, intrinsics always need a location to store // the result, so we create a temporary `alloca` for the // result. - let tmp = PlaceRef::alloca(bx, fn_ret.layout, "tmp_ret"); + let tmp = PlaceRef::alloca(bx, fn_ret.layout); tmp.storage_live(bx); ReturnDest::IndirectOperand(tmp, index) } else { @@ -1174,7 +1174,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { LocalRef::Operand(None) => { let dst_layout = bx.layout_of(self.monomorphized_place_ty(&dst.as_ref())); assert!(!dst_layout.ty.has_erasable_regions()); - let place = PlaceRef::alloca(bx, dst_layout, "transmute_temp"); + let place = PlaceRef::alloca(bx, dst_layout); place.storage_live(bx); self.codegen_transmute_into(bx, src, place); let op = bx.load_operand(place); @@ -1227,7 +1227,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { DirectOperand(index) => { // If there is a cast, we have to store and reload. let op = if let PassMode::Cast(_) = ret_ty.mode { - let tmp = PlaceRef::alloca(bx, ret_ty.layout, "tmp_ret"); + let tmp = PlaceRef::alloca(bx, ret_ty.layout); tmp.storage_live(bx); bx.store_arg_ty(&ret_ty, llval, tmp); let op = bx.load_operand(tmp); diff --git a/src/librustc_codegen_ssa/mir/mod.rs b/src/librustc_codegen_ssa/mir/mod.rs index 00e9ca01f4dd2..aa3971a1da81a 100644 --- a/src/librustc_codegen_ssa/mir/mod.rs +++ b/src/librustc_codegen_ssa/mir/mod.rs @@ -268,11 +268,13 @@ pub fn codegen_mir<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>( debug!("alloc: {:?} ({}) -> place", local, name); if layout.is_unsized() { let indirect_place = - PlaceRef::alloca_unsized_indirect(&mut bx, layout, &name.as_str()); + PlaceRef::alloca_unsized_indirect(&mut bx, layout); + bx.set_var_name(indirect_place.llval, name); // FIXME: add an appropriate debuginfo LocalRef::UnsizedPlace(indirect_place) } else { - let place = PlaceRef::alloca(&mut bx, layout, &name.as_str()); + let place = PlaceRef::alloca(&mut bx, layout); + bx.set_var_name(place.llval, name); if dbg { let (scope, span) = fx.debug_loc(mir::SourceInfo { span: decl.source_info.span, @@ -293,14 +295,13 @@ pub fn codegen_mir<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>( } else if memory_locals.contains(local) { debug!("alloc: {:?} -> place", local); if layout.is_unsized() { - let indirect_place = PlaceRef::alloca_unsized_indirect( - &mut bx, - layout, - &format!("{:?}", local), - ); + let indirect_place = PlaceRef::alloca_unsized_indirect(&mut bx, layout); + bx.set_var_name(indirect_place.llval, format_args!("{:?}", local)); LocalRef::UnsizedPlace(indirect_place) } else { - LocalRef::Place(PlaceRef::alloca(&mut bx, layout, &format!("{:?}", local))) + let place = PlaceRef::alloca(&mut bx, layout); + bx.set_var_name(place.llval, format_args!("{:?}", local)); + LocalRef::Place(place) } } else { // If this is an immediate local, we do not create an @@ -452,10 +453,11 @@ fn arg_local_refs<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>( mir.args_iter().enumerate().map(|(arg_index, local)| { let arg_decl = &mir.local_decls[local]; + // FIXME(eddyb) don't allocate a `String` unless it gets used. let name = if let Some(name) = arg_decl.name { name.as_str().to_string() } else { - format!("arg{}", arg_index) + format!("{:?}", local) }; if Some(local) == mir.spread_arg { @@ -470,7 +472,8 @@ fn arg_local_refs<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>( _ => bug!("spread argument isn't a tuple?!") }; - let place = PlaceRef::alloca(bx, bx.layout_of(arg_ty), &name); + let place = PlaceRef::alloca(bx, bx.layout_of(arg_ty)); + bx.set_var_name(place.llval, name); for i in 0..tupled_arg_tys.len() { let arg = &fx.fn_ty.args[idx]; idx += 1; @@ -558,11 +561,13 @@ fn arg_local_refs<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>( llarg_idx += 1; let indirect_operand = OperandValue::Pair(llarg, llextra); - let tmp = PlaceRef::alloca_unsized_indirect(bx, arg.layout, &name); + let tmp = PlaceRef::alloca_unsized_indirect(bx, arg.layout); + bx.set_var_name(tmp.llval, name); indirect_operand.store(bx, tmp); tmp } else { - let tmp = PlaceRef::alloca(bx, arg.layout, &name); + let tmp = PlaceRef::alloca(bx, arg.layout); + bx.set_var_name(tmp.llval, name); if fx.fn_ty.c_variadic && last_arg_idx.map(|idx| arg_index == idx).unwrap_or(false) { let va_list_did = match tcx.lang_items().va_list() { Some(did) => did, diff --git a/src/librustc_codegen_ssa/mir/operand.rs b/src/librustc_codegen_ssa/mir/operand.rs index 58a13d685ddf9..daa25b2ea0591 100644 --- a/src/librustc_codegen_ssa/mir/operand.rs +++ b/src/librustc_codegen_ssa/mir/operand.rs @@ -367,7 +367,7 @@ impl<'a, 'tcx, V: CodegenObject> OperandValue { // Allocate an appropriate region on the stack, and copy the value into it let (llsize, _) = glue::size_and_align_of_dst(bx, unsized_ty, Some(llextra)); - let lldst = bx.array_alloca(bx.cx().type_i8(), llsize, "unsized_tmp", max_align); + let lldst = bx.array_alloca(bx.cx().type_i8(), llsize, max_align); bx.memcpy(lldst, max_align, llptr, min_align, llsize, flags); // Store the allocated region and the extra to the indirect place. diff --git a/src/librustc_codegen_ssa/mir/place.rs b/src/librustc_codegen_ssa/mir/place.rs index ef9fc36b0f173..a4b4cb53bb1fb 100644 --- a/src/librustc_codegen_ssa/mir/place.rs +++ b/src/librustc_codegen_ssa/mir/place.rs @@ -71,11 +71,9 @@ impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> { pub fn alloca>( bx: &mut Bx, layout: TyLayout<'tcx>, - name: &str ) -> Self { - debug!("alloca({:?}: {:?})", name, layout); assert!(!layout.is_unsized(), "tried to statically allocate unsized place"); - let tmp = bx.alloca(bx.cx().backend_type(layout), name, layout.align.abi); + let tmp = bx.alloca(bx.cx().backend_type(layout), layout.align.abi); Self::new_sized(tmp, layout) } @@ -83,13 +81,11 @@ impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> { pub fn alloca_unsized_indirect>( bx: &mut Bx, layout: TyLayout<'tcx>, - name: &str, ) -> Self { - debug!("alloca_unsized_indirect({:?}: {:?})", name, layout); assert!(layout.is_unsized(), "tried to allocate indirect place for sized values"); let ptr_ty = bx.cx().tcx().mk_mut_ptr(layout.ty); let ptr_layout = bx.cx().layout_of(ptr_ty); - Self::alloca(bx, ptr_layout, name) + Self::alloca(bx, ptr_layout) } pub fn len>( diff --git a/src/librustc_codegen_ssa/mir/rvalue.rs b/src/librustc_codegen_ssa/mir/rvalue.rs index 0a932bc3e1574..f21836a953c22 100644 --- a/src/librustc_codegen_ssa/mir/rvalue.rs +++ b/src/librustc_codegen_ssa/mir/rvalue.rs @@ -64,7 +64,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { // index into the struct, and this case isn't // important enough for it. debug!("codegen_rvalue: creating ugly alloca"); - let scratch = PlaceRef::alloca(&mut bx, operand.layout, "__unsize_temp"); + let scratch = PlaceRef::alloca(&mut bx, operand.layout); scratch.storage_live(&mut bx); operand.val.store(&mut bx, scratch); base::coerce_unsized_into(&mut bx, scratch, dest); diff --git a/src/librustc_codegen_ssa/traits/builder.rs b/src/librustc_codegen_ssa/traits/builder.rs index 3a144f0b0e0aa..1886701fb3a88 100644 --- a/src/librustc_codegen_ssa/traits/builder.rs +++ b/src/librustc_codegen_ssa/traits/builder.rs @@ -109,13 +109,12 @@ pub trait BuilderMethods<'a, 'tcx>: rhs: Self::Value, ) -> (Self::Value, Self::Value); - fn alloca(&mut self, ty: Self::Type, name: &str, align: Align) -> Self::Value; - fn dynamic_alloca(&mut self, ty: Self::Type, name: &str, align: Align) -> Self::Value; + fn alloca(&mut self, ty: Self::Type, align: Align) -> Self::Value; + fn dynamic_alloca(&mut self, ty: Self::Type, align: Align) -> Self::Value; fn array_alloca( &mut self, ty: Self::Type, len: Self::Value, - name: &str, align: Align, ) -> Self::Value; diff --git a/src/librustc_data_structures/obligation_forest/mod.rs b/src/librustc_data_structures/obligation_forest/mod.rs index 04d2b23ab1a1c..6c52e626ababd 100644 --- a/src/librustc_data_structures/obligation_forest/mod.rs +++ b/src/librustc_data_structures/obligation_forest/mod.rs @@ -559,13 +559,20 @@ impl ObligationForest { trace } - #[inline] - fn mark_neighbors_as_waiting_from(&self, node: &Node) { + // This always-inlined function is for the hot call site. + #[inline(always)] + fn inlined_mark_neighbors_as_waiting_from(&self, node: &Node) { for dependent in node.parent.iter().chain(node.dependents.iter()) { self.mark_as_waiting_from(&self.nodes[dependent.get()]); } } + // This never-inlined function is for the cold call site. + #[inline(never)] + fn uninlined_mark_neighbors_as_waiting_from(&self, node: &Node) { + self.inlined_mark_neighbors_as_waiting_from(node) + } + /// Marks all nodes that depend on a pending node as `NodeState::Waiting`. fn mark_as_waiting(&self) { for node in &self.nodes { @@ -576,7 +583,8 @@ impl ObligationForest { for node in &self.nodes { if node.state.get() == NodeState::Pending { - self.mark_neighbors_as_waiting_from(node); + // This call site is hot. + self.inlined_mark_neighbors_as_waiting_from(node); } } } @@ -588,7 +596,8 @@ impl ObligationForest { NodeState::Pending | NodeState::Done => {}, } - self.mark_neighbors_as_waiting_from(node); + // This call site is cold. + self.uninlined_mark_neighbors_as_waiting_from(node); } /// Compresses the vector, removing all popped nodes. This adjusts diff --git a/src/librustc_errors/annotate_snippet_emitter_writer.rs b/src/librustc_errors/annotate_snippet_emitter_writer.rs index 39b906141f74c..c626dd0434d52 100644 --- a/src/librustc_errors/annotate_snippet_emitter_writer.rs +++ b/src/librustc_errors/annotate_snippet_emitter_writer.rs @@ -37,7 +37,7 @@ impl Emitter for AnnotateSnippetEmitterWriter { &mut primary_span, &mut children, &db.level, - db.handler.flags.external_macro_backtrace); + db.handler().flags.external_macro_backtrace); self.emit_messages_default(&db.level, db.message(), diff --git a/src/librustc_errors/diagnostic_builder.rs b/src/librustc_errors/diagnostic_builder.rs index 41d0638f7c6bd..7b8902f125aee 100644 --- a/src/librustc_errors/diagnostic_builder.rs +++ b/src/librustc_errors/diagnostic_builder.rs @@ -18,8 +18,17 @@ use log::debug; /// extending `HandlerFlags`, accessed via `self.handler.flags`. #[must_use] #[derive(Clone)] -pub struct DiagnosticBuilder<'a> { - pub handler: &'a Handler, +pub struct DiagnosticBuilder<'a>(Box>); + +/// This is a large type, and often used as a return value, especially within +/// the frequently-used `PResult` type. In theory, return value optimization +/// (RVO) should avoid unnecessary copying. In practice, it does not (at the +/// time of writing). The split between `DiagnosticBuilder` and +/// `DiagnosticBuilderInner` exists to avoid many `memcpy` calls. +#[must_use] +#[derive(Clone)] +struct DiagnosticBuilderInner<'a> { + handler: &'a Handler, diagnostic: Diagnostic, allow_suggestions: bool, } @@ -52,7 +61,7 @@ macro_rules! forward { ) => { $(#[$attrs])* pub fn $n(&mut self, $($name: $ty),*) -> &mut Self { - self.diagnostic.$n($($name),*); + self.0.diagnostic.$n($($name),*); self } }; @@ -69,7 +78,7 @@ macro_rules! forward { ) => { $(#[$attrs])* pub fn $n>(&mut self, $($name: $ty),*) -> &mut Self { - self.diagnostic.$n($($name),*); + self.0.diagnostic.$n($($name),*); self } }; @@ -79,24 +88,28 @@ impl<'a> Deref for DiagnosticBuilder<'a> { type Target = Diagnostic; fn deref(&self) -> &Diagnostic { - &self.diagnostic + &self.0.diagnostic } } impl<'a> DerefMut for DiagnosticBuilder<'a> { fn deref_mut(&mut self) -> &mut Diagnostic { - &mut self.diagnostic + &mut self.0.diagnostic } } impl<'a> DiagnosticBuilder<'a> { + pub fn handler(&self) -> &'a Handler{ + self.0.handler + } + /// Emit the diagnostic. pub fn emit(&mut self) { if self.cancelled() { return; } - self.handler.emit_db(&self); + self.0.handler.emit_db(&self); self.cancel(); } @@ -115,8 +128,8 @@ impl<'a> DiagnosticBuilder<'a> { /// Buffers the diagnostic for later emission, unless handler /// has disabled such buffering. pub fn buffer(mut self, buffered_diagnostics: &mut Vec) { - if self.handler.flags.dont_buffer_diagnostics || - self.handler.flags.treat_err_as_bug.is_some() + if self.0.handler.flags.dont_buffer_diagnostics || + self.0.handler.flags.treat_err_as_bug.is_some() { self.emit(); return; @@ -126,7 +139,7 @@ impl<'a> DiagnosticBuilder<'a> { // implements `Drop`. let diagnostic; unsafe { - diagnostic = std::ptr::read(&self.diagnostic); + diagnostic = std::ptr::read(&self.0.diagnostic); std::mem::forget(self); }; // Logging here is useful to help track down where in logs an error was @@ -144,7 +157,7 @@ impl<'a> DiagnosticBuilder<'a> { span: Option, ) -> &mut Self { let span = span.map(|s| s.into()).unwrap_or_else(|| MultiSpan::new()); - self.diagnostic.sub(level, message, span, None); + self.0.diagnostic.sub(level, message, span, None); self } @@ -160,7 +173,7 @@ impl<'a> DiagnosticBuilder<'a> { /// locally in whichever way makes the most sense. pub fn delay_as_bug(&mut self) { self.level = Level::Bug; - self.handler.delay_as_bug(self.diagnostic.clone()); + self.0.handler.delay_as_bug(self.0.diagnostic.clone()); self.cancel(); } @@ -171,7 +184,7 @@ impl<'a> DiagnosticBuilder<'a> { /// then the snippet will just include that `Span`, which is /// called the primary span. pub fn span_label>(&mut self, span: Span, label: T) -> &mut Self { - self.diagnostic.span_label(span, label); + self.0.diagnostic.span_label(span, label); self } @@ -208,10 +221,10 @@ impl<'a> DiagnosticBuilder<'a> { suggestion: Vec<(Span, String)>, applicability: Applicability, ) -> &mut Self { - if !self.allow_suggestions { + if !self.0.allow_suggestions { return self } - self.diagnostic.multipart_suggestion( + self.0.diagnostic.multipart_suggestion( msg, suggestion, applicability, @@ -225,10 +238,10 @@ impl<'a> DiagnosticBuilder<'a> { suggestion: Vec<(Span, String)>, applicability: Applicability, ) -> &mut Self { - if !self.allow_suggestions { + if !self.0.allow_suggestions { return self } - self.diagnostic.tool_only_multipart_suggestion( + self.0.diagnostic.tool_only_multipart_suggestion( msg, suggestion, applicability, @@ -236,7 +249,6 @@ impl<'a> DiagnosticBuilder<'a> { self } - pub fn span_suggestion( &mut self, sp: Span, @@ -244,10 +256,10 @@ impl<'a> DiagnosticBuilder<'a> { suggestion: String, applicability: Applicability, ) -> &mut Self { - if !self.allow_suggestions { + if !self.0.allow_suggestions { return self } - self.diagnostic.span_suggestion( + self.0.diagnostic.span_suggestion( sp, msg, suggestion, @@ -263,10 +275,10 @@ impl<'a> DiagnosticBuilder<'a> { suggestions: impl Iterator, applicability: Applicability, ) -> &mut Self { - if !self.allow_suggestions { + if !self.0.allow_suggestions { return self } - self.diagnostic.span_suggestions( + self.0.diagnostic.span_suggestions( sp, msg, suggestions, @@ -282,10 +294,10 @@ impl<'a> DiagnosticBuilder<'a> { suggestion: String, applicability: Applicability, ) -> &mut Self { - if !self.allow_suggestions { + if !self.0.allow_suggestions { return self } - self.diagnostic.span_suggestion_short( + self.0.diagnostic.span_suggestion_short( sp, msg, suggestion, @@ -301,10 +313,10 @@ impl<'a> DiagnosticBuilder<'a> { suggestion: String, applicability: Applicability, ) -> &mut Self { - if !self.allow_suggestions { + if !self.0.allow_suggestions { return self } - self.diagnostic.span_suggestion_hidden( + self.0.diagnostic.span_suggestion_hidden( sp, msg, suggestion, @@ -320,10 +332,10 @@ impl<'a> DiagnosticBuilder<'a> { suggestion: String, applicability: Applicability, ) -> &mut Self { - if !self.allow_suggestions { + if !self.0.allow_suggestions { return self } - self.diagnostic.tool_only_span_suggestion( + self.0.diagnostic.tool_only_span_suggestion( sp, msg, suggestion, @@ -336,7 +348,7 @@ impl<'a> DiagnosticBuilder<'a> { forward!(pub fn code(&mut self, s: DiagnosticId) -> &mut Self); pub fn allow_suggestions(&mut self, allow: bool) -> &mut Self { - self.allow_suggestions = allow; + self.0.allow_suggestions = allow; self } @@ -359,19 +371,18 @@ impl<'a> DiagnosticBuilder<'a> { /// Creates a new `DiagnosticBuilder` with an already constructed /// diagnostic. - pub fn new_diagnostic(handler: &'a Handler, diagnostic: Diagnostic) - -> DiagnosticBuilder<'a> { - DiagnosticBuilder { + pub fn new_diagnostic(handler: &'a Handler, diagnostic: Diagnostic) -> DiagnosticBuilder<'a> { + DiagnosticBuilder(Box::new(DiagnosticBuilderInner { handler, diagnostic, allow_suggestions: true, - } + })) } } impl<'a> Debug for DiagnosticBuilder<'a> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - self.diagnostic.fmt(f) + self.0.diagnostic.fmt(f) } } @@ -381,7 +392,7 @@ impl<'a> Drop for DiagnosticBuilder<'a> { fn drop(&mut self) { if !panicking() && !self.cancelled() { let mut db = DiagnosticBuilder::new( - self.handler, + self.0.handler, Level::Bug, "the following error was constructed but not emitted", ); diff --git a/src/librustc_errors/emitter.rs b/src/librustc_errors/emitter.rs index 0ce69eecc6b1e..66608361c8d44 100644 --- a/src/librustc_errors/emitter.rs +++ b/src/librustc_errors/emitter.rs @@ -385,7 +385,7 @@ impl Emitter for EmitterWriter { &mut primary_span, &mut children, &db.level, - db.handler.flags.external_macro_backtrace); + db.handler().flags.external_macro_backtrace); self.emit_messages_default(&db.level, &db.styled_message(), diff --git a/src/librustc_mir/const_eval.rs b/src/librustc_mir/const_eval.rs index 3e02497947aa8..57ddaa4eff038 100644 --- a/src/librustc_mir/const_eval.rs +++ b/src/librustc_mir/const_eval.rs @@ -589,7 +589,7 @@ pub fn const_eval_provider<'tcx>( tcx: TyCtxt<'tcx>, key: ty::ParamEnvAnd<'tcx, GlobalId<'tcx>>, ) -> ::rustc::mir::interpret::ConstEvalResult<'tcx> { - // see comment in const_eval_provider for what we're doing here + // see comment in const_eval_raw_provider for what we're doing here if key.param_env.reveal == Reveal::All { let mut key = key.clone(); key.param_env.reveal = Reveal::UserFacing; diff --git a/src/librustc_mir/hair/pattern/mod.rs b/src/librustc_mir/hair/pattern/mod.rs index 6caccfddfa422..4aaa5e8ee259a 100644 --- a/src/librustc_mir/hair/pattern/mod.rs +++ b/src/librustc_mir/hair/pattern/mod.rs @@ -1229,7 +1229,13 @@ fn search_for_adt_without_structural_match<'tcx>(tcx: TyCtxt<'tcx>, ty::RawPtr(..) => { // `#[structural_match]` ignores substructure of // `*const _`/`*mut _`, so skip super_visit_with - + // + // (But still tell caller to continue search.) + return false; + } + ty::FnDef(..) | ty::FnPtr(..) => { + // types of formals and return in `fn(_) -> _` are also irrelevant + // // (But still tell caller to continue search.) return false; } diff --git a/src/librustc_typeck/check/_match.rs b/src/librustc_typeck/check/_match.rs index 7427ae9ce8de3..308a3d8ebc2cf 100644 --- a/src/librustc_typeck/check/_match.rs +++ b/src/librustc_typeck/check/_match.rs @@ -2,7 +2,8 @@ use crate::check::{FnCtxt, Expectation, Diverges, Needs}; use crate::check::coercion::CoerceMany; use rustc::hir::{self, ExprKind}; use rustc::infer::type_variable::{TypeVariableOrigin, TypeVariableOriginKind}; -use rustc::traits::{ObligationCause, ObligationCauseCode}; +use rustc::traits::{IfExpressionCause, MatchExpressionArmCause, ObligationCause}; +use rustc::traits::{ObligationCauseCode}; use rustc::ty::Ty; use syntax_pos::Span; @@ -146,13 +147,15 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { // The reason for the first arm to fail is not that the match arms diverge, // but rather that there's a prior obligation that doesn't hold. 0 => (arm_span, ObligationCauseCode::BlockTailExpression(arm.body.hir_id)), - _ => (expr.span, ObligationCauseCode::MatchExpressionArm { - arm_span, - source: match_src, - prior_arms: other_arms.clone(), - last_ty: prior_arm_ty.unwrap(), - discrim_hir_id: discrim.hir_id, - }), + _ => (expr.span, + ObligationCauseCode::MatchExpressionArm(box MatchExpressionArmCause { + arm_span, + source: match_src, + prior_arms: other_arms.clone(), + last_ty: prior_arm_ty.unwrap(), + discrim_hir_id: discrim.hir_id, + }) + ), }; let cause = self.cause(span, code); coercion.coerce(self, &cause, &arm.body, arm_ty); @@ -345,11 +348,11 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { }; // Finally construct the cause: - self.cause(error_sp, ObligationCauseCode::IfExpression { + self.cause(error_sp, ObligationCauseCode::IfExpression(box IfExpressionCause { then: then_sp, outer: outer_sp, semicolon: remove_semicolon, - }) + })) } fn demand_discriminant_type( diff --git a/src/librustc_typeck/check/closure.rs b/src/librustc_typeck/check/closure.rs index e9370429f3f55..d626bff150020 100644 --- a/src/librustc_typeck/check/closure.rs +++ b/src/librustc_typeck/check/closure.rs @@ -529,11 +529,11 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { ); // recreated from (*) above // Check that E' = S'. - let cause = &self.misc(hir_ty.span); + let cause = self.misc(hir_ty.span); let InferOk { value: (), obligations, - } = self.at(cause, self.param_env) + } = self.at(&cause, self.param_env) .eq(*expected_ty, supplied_ty)?; all_obligations.extend(obligations); @@ -549,7 +549,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { ); all_obligations.push( Obligation::new( - cause.clone(), + cause, self.param_env, ty::Predicate::TypeOutlives( ty::Binder::dummy( diff --git a/src/librustc_typeck/check/compare_method.rs b/src/librustc_typeck/check/compare_method.rs index f22499f547272..51adf501b4db8 100644 --- a/src/librustc_typeck/check/compare_method.rs +++ b/src/librustc_typeck/check/compare_method.rs @@ -309,7 +309,7 @@ fn compare_predicate_entailment<'tcx>( let cause = ObligationCause { span: impl_err_span, - ..cause.clone() + ..cause }; let mut diag = struct_span_err!(tcx.sess, diff --git a/src/librustc_typeck/check/expr.rs b/src/librustc_typeck/check/expr.rs index da72dfd155182..56bd903040ab4 100644 --- a/src/librustc_typeck/check/expr.rs +++ b/src/librustc_typeck/check/expr.rs @@ -813,18 +813,20 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { error: MethodError<'tcx> ) { let rcvr = &args[0]; - let try_alt_rcvr = |err: &mut DiagnosticBuilder<'_>, new_rcvr_t| { - if let Ok(pick) = self.lookup_probe( - span, - segment.ident, - new_rcvr_t, - rcvr, - probe::ProbeScope::AllTraits, - ) { - err.span_label( - pick.item.ident.span, - &format!("the method is available for `{}` here", new_rcvr_t), - ); + let try_alt_rcvr = |err: &mut DiagnosticBuilder<'_>, rcvr_t, lang_item| { + if let Some(new_rcvr_t) = self.tcx.mk_lang_item(rcvr_t, lang_item) { + if let Ok(pick) = self.lookup_probe( + span, + segment.ident, + new_rcvr_t, + rcvr, + probe::ProbeScope::AllTraits, + ) { + err.span_label( + pick.item.ident.span, + &format!("the method is available for `{}` here", new_rcvr_t), + ); + } } }; @@ -840,17 +842,10 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { // Try alternative arbitrary self types that could fulfill this call. // FIXME: probe for all types that *could* be arbitrary self-types, not // just this whitelist. - let box_rcvr_t = self.tcx.mk_box(rcvr_t); - try_alt_rcvr(&mut err, box_rcvr_t); - let pin_rcvr_t = self.tcx.mk_lang_item( - rcvr_t, - lang_items::PinTypeLangItem, - ); - try_alt_rcvr(&mut err, pin_rcvr_t); - let arc_rcvr_t = self.tcx.mk_lang_item(rcvr_t, lang_items::Arc); - try_alt_rcvr(&mut err, arc_rcvr_t); - let rc_rcvr_t = self.tcx.mk_lang_item(rcvr_t, lang_items::Rc); - try_alt_rcvr(&mut err, rc_rcvr_t); + try_alt_rcvr(&mut err, rcvr_t, lang_items::OwnedBoxLangItem); + try_alt_rcvr(&mut err, rcvr_t, lang_items::PinTypeLangItem); + try_alt_rcvr(&mut err, rcvr_t, lang_items::Arc); + try_alt_rcvr(&mut err, rcvr_t, lang_items::Rc); } err.emit(); } diff --git a/src/libstd/env.rs b/src/libstd/env.rs index eca93399e5807..b89893692698c 100644 --- a/src/libstd/env.rs +++ b/src/libstd/env.rs @@ -290,7 +290,7 @@ impl Error for VarError { /// /// Note that while concurrent access to environment variables is safe in Rust, /// some platforms only expose inherently unsafe non-threadsafe APIs for -/// inspecting the environment. As a result extra care needs to be taken when +/// inspecting the environment. As a result, extra care needs to be taken when /// auditing calls to unsafe external FFI functions to ensure that any external /// environment accesses are properly synchronized with accesses in Rust. /// diff --git a/src/libstd/error.rs b/src/libstd/error.rs index 998d59f90a255..4a1bb75d588c9 100644 --- a/src/libstd/error.rs +++ b/src/libstd/error.rs @@ -197,10 +197,10 @@ pub trait Error: Debug + Display { #[stable(feature = "error_source", since = "1.30.0")] fn source(&self) -> Option<&(dyn Error + 'static)> { None } - /// Gets the `TypeId` of `self` + /// Gets the `TypeId` of `self`. #[doc(hidden)] #[unstable(feature = "error_type_id", - reason = "this is memory unsafe to override in user code", + reason = "this is memory-unsafe to override in user code", issue = "60784")] fn type_id(&self, _: private::Internal) -> TypeId where Self: 'static { TypeId::of::() @@ -616,19 +616,19 @@ impl Error for char::ParseCharError { } } -// copied from any.rs +// Copied from `any.rs`. impl dyn Error + 'static { /// Returns `true` if the boxed type is the same as `T` #[stable(feature = "error_downcast", since = "1.3.0")] #[inline] pub fn is(&self) -> bool { - // Get TypeId of the type this function is instantiated with + // Get `TypeId` of the type this function is instantiated with. let t = TypeId::of::(); - // Get TypeId of the type in the trait object + // Get `TypeId` of the type in the trait object. let boxed = self.type_id(private::Internal); - // Compare both TypeIds on equality + // Compare both `TypeId`s on equality. t == boxed } @@ -662,21 +662,21 @@ impl dyn Error + 'static { } impl dyn Error + 'static + Send { - /// Forwards to the method defined on the type `Any`. + /// Forwards to the method defined on the type `dyn Error`. #[stable(feature = "error_downcast", since = "1.3.0")] #[inline] pub fn is(&self) -> bool { ::is::(self) } - /// Forwards to the method defined on the type `Any`. + /// Forwards to the method defined on the type `dyn Error`. #[stable(feature = "error_downcast", since = "1.3.0")] #[inline] pub fn downcast_ref(&self) -> Option<&T> { ::downcast_ref::(self) } - /// Forwards to the method defined on the type `Any`. + /// Forwards to the method defined on the type `dyn Error`. #[stable(feature = "error_downcast", since = "1.3.0")] #[inline] pub fn downcast_mut(&mut self) -> Option<&mut T> { @@ -685,21 +685,21 @@ impl dyn Error + 'static + Send { } impl dyn Error + 'static + Send + Sync { - /// Forwards to the method defined on the type `Any`. + /// Forwards to the method defined on the type `dyn Error`. #[stable(feature = "error_downcast", since = "1.3.0")] #[inline] pub fn is(&self) -> bool { ::is::(self) } - /// Forwards to the method defined on the type `Any`. + /// Forwards to the method defined on the type `dyn Error`. #[stable(feature = "error_downcast", since = "1.3.0")] #[inline] pub fn downcast_ref(&self) -> Option<&T> { ::downcast_ref::(self) } - /// Forwards to the method defined on the type `Any`. + /// Forwards to the method defined on the type `dyn Error`. #[stable(feature = "error_downcast", since = "1.3.0")] #[inline] pub fn downcast_mut(&mut self) -> Option<&mut T> { @@ -710,7 +710,7 @@ impl dyn Error + 'static + Send + Sync { impl dyn Error { #[inline] #[stable(feature = "error_downcast", since = "1.3.0")] - /// Attempt to downcast the box to a concrete type. + /// Attempts to downcast the box to a concrete type. pub fn downcast(self: Box) -> Result, Box> { if self.is::() { unsafe { @@ -878,12 +878,12 @@ impl<'a> Iterator for ErrorIter<'a> { impl dyn Error + Send { #[inline] #[stable(feature = "error_downcast", since = "1.3.0")] - /// Attempt to downcast the box to a concrete type. + /// Attempts to downcast the box to a concrete type. pub fn downcast(self: Box) -> Result, Box> { let err: Box = self; ::downcast(err).map_err(|s| unsafe { - // reapply the Send marker + // Reapply the `Send` marker. transmute::, Box>(s) }) } @@ -892,12 +892,12 @@ impl dyn Error + Send { impl dyn Error + Send + Sync { #[inline] #[stable(feature = "error_downcast", since = "1.3.0")] - /// Attempt to downcast the box to a concrete type. + /// Attempts to downcast the box to a concrete type. pub fn downcast(self: Box) -> Result, Box> { let err: Box = self; ::downcast(err).map_err(|s| unsafe { - // reapply the Send+Sync marker + // Reapply the `Send + Sync` marker. transmute::, Box>(s) }) } diff --git a/src/libstd/ffi/c_str.rs b/src/libstd/ffi/c_str.rs index bb346fb4db515..d7f4cc5d1fdaa 100644 --- a/src/libstd/ffi/c_str.rs +++ b/src/libstd/ffi/c_str.rs @@ -615,7 +615,7 @@ impl CString { } // Turns this `CString` into an empty string to prevent -// memory unsafe code from working by accident. Inline +// memory-unsafe code from working by accident. Inline // to prevent LLVM from optimizing it away in debug builds. #[stable(feature = "cstring_drop", since = "1.13.0")] impl Drop for CString { diff --git a/src/libstd/io/stdio.rs b/src/libstd/io/stdio.rs index 990c0eb8955e4..c798ee0e2209a 100644 --- a/src/libstd/io/stdio.rs +++ b/src/libstd/io/stdio.rs @@ -201,9 +201,9 @@ pub struct StdinLock<'a> { /// /// Each handle returned is a reference to a shared global buffer whose access /// is synchronized via a mutex. If you need more explicit control over -/// locking, see the [`lock() method`][lock]. +/// locking, see the [`Stdin::lock`] method. /// -/// [lock]: struct.Stdin.html#method.lock +/// [`Stdin::lock`]: struct.Stdin.html#method.lock /// /// ### Note: Windows Portability Consideration /// When operating in a console, the Windows implementation of this stream does not support @@ -425,9 +425,9 @@ pub struct StdoutLock<'a> { /// /// Each handle returned is a reference to a shared global buffer whose access /// is synchronized via a mutex. If you need more explicit control over -/// locking, see the [Stdout::lock] method. +/// locking, see the [`Stdout::lock`] method. /// -/// [Stdout::lock]: struct.Stdout.html#method.lock +/// [`Stdout::lock`]: struct.Stdout.html#method.lock /// /// ### Note: Windows Portability Consideration /// When operating in a console, the Windows implementation of this stream does not support diff --git a/src/libstd/process.rs b/src/libstd/process.rs index 000f80f99e7a9..c50025ab7d1de 100644 --- a/src/libstd/process.rs +++ b/src/libstd/process.rs @@ -1595,7 +1595,7 @@ pub fn id() -> u32 { /// A trait for implementing arbitrary return types in the `main` function. /// -/// The c-main function only supports to return integers as return type. +/// The C-main function only supports to return integers as return type. /// So, every type implementing the `Termination` trait has to be converted /// to an integer. /// diff --git a/src/libstd/sys/vxworks/process/mod.rs b/src/libstd/sys/vxworks/process/mod.rs index 4dc706006f4ce..1fc88fbde742f 100644 --- a/src/libstd/sys/vxworks/process/mod.rs +++ b/src/libstd/sys/vxworks/process/mod.rs @@ -1,5 +1,6 @@ pub use self::process_common::{Command, ExitStatus, ExitCode, Stdio, StdioPipes}; pub use self::process_inner::Process; +pub use crate::ffi::OsString as EnvKey; mod process_common; #[path = "process_vxworks.rs"] diff --git a/src/libstd/sys/vxworks/process/process_common.rs b/src/libstd/sys/vxworks/process/process_common.rs index 509140229fd3a..13648abd1e447 100644 --- a/src/libstd/sys/vxworks/process/process_common.rs +++ b/src/libstd/sys/vxworks/process/process_common.rs @@ -12,8 +12,6 @@ use crate::collections::BTreeMap; use libc::{c_int, gid_t, uid_t, c_char, EXIT_SUCCESS, EXIT_FAILURE}; -pub use crate::ffi::OsString as EnvKey; - //////////////////////////////////////////////////////////////////////////////// // Command //////////////////////////////////////////////////////////////////////////////// diff --git a/src/libstd/sys/vxworks/rand.rs b/src/libstd/sys/vxworks/rand.rs index 1ec0cbe4dcf5b..c22880db2bf03 100644 --- a/src/libstd/sys/vxworks/rand.rs +++ b/src/libstd/sys/vxworks/rand.rs @@ -14,17 +14,24 @@ pub fn hashmap_random_keys() -> (u64, u64) { mod imp { use libc; use crate::io; - - extern "C" { - fn randBytes (randBuf: *mut libc::c_uchar, - numOfBytes: libc::c_int) -> libc::c_int; - } + use core::sync::atomic::{AtomicBool, Ordering::Relaxed}; pub fn fill_bytes(v: &mut [u8]) { + static RNG_INIT: AtomicBool = AtomicBool::new(false); + while !RNG_INIT.load(Relaxed) { + let ret = unsafe { libc::randSecure() }; + if ret < 0 { + panic!("couldn't generate random bytes: {}", io::Error::last_os_error()); + } else if ret > 0 { + RNG_INIT.store(true, Relaxed); + break; + } + unsafe { libc::usleep(10) }; + } let ret = unsafe { - randBytes(v.as_mut_ptr() as *mut libc::c_uchar, v.len() as libc::c_int) + libc::randABytes(v.as_mut_ptr() as *mut libc::c_uchar, v.len() as libc::c_int) }; - if ret == -1 { + if ret < 0 { panic!("couldn't generate random bytes: {}", io::Error::last_os_error()); } } diff --git a/src/libstd/time.rs b/src/libstd/time.rs index dbec4da24f96a..3bf2b8be1fe8e 100644 --- a/src/libstd/time.rs +++ b/src/libstd/time.rs @@ -59,6 +59,30 @@ pub use core::time::Duration; /// println!("{}", now.elapsed().as_secs()); /// } /// ``` +/// +/// # Underlying System calls +/// Currently, the following system calls are being used to get the current time using `now()`: +/// +/// | Platform | System call | +/// |:---------:|:--------------------------------------------------------------------:| +/// | Cloud ABI | [clock_time_get (Monotonic Clock)] | +/// | SGX | [`insecure_time` usercall]. More information on [timekeeping in SGX] | +/// | UNIX | [clock_time_get (Monotonic Clock)] | +/// | Darwin | [mach_absolute_time] | +/// | VXWorks | [clock_gettime (Monotonic Clock)] | +/// | WASI | [__wasi_clock_time_get (Monotonic Clock)] | +/// | Windows | [QueryPerformanceCounter] | +/// +/// [QueryPerformanceCounter]: https://docs.microsoft.com/en-us/windows/win32/api/profileapi/nf-profileapi-queryperformancecounter +/// [`insecure_time` usercall]: https://edp.fortanix.com/docs/api/fortanix_sgx_abi/struct.Usercalls.html#method.insecure_time +/// [timekeeping in SGX]: https://edp.fortanix.com/docs/concepts/rust-std/#codestdtimecode +/// [__wasi_clock_time_get (Monotonic Clock)]: https://github.com/CraneStation/wasmtime/blob/master/docs/WASI-api.md#clock_time_get +/// [clock_gettime (Monotonic Clock)]: https://linux.die.net/man/3/clock_gettime +/// [mach_absolute_time]: https://developer.apple.com/library/archive/documentation/Darwin/Conceptual/KernelProgramming/services/services.html +/// [clock_time_get (Monotonic Clock)]: https://github.com/NuxiNL/cloudabi/blob/master/cloudabi.txt +/// +/// **Disclaimer:** These system calls might change over time. +/// #[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] #[stable(feature = "time2", since = "1.8.0")] pub struct Instant(time::Instant); @@ -114,6 +138,28 @@ pub struct Instant(time::Instant); /// } /// } /// ``` +/// +/// # Underlying System calls +/// Currently, the following system calls are being used to get the current time using `now()`: +/// +/// | Platform | System call | +/// |:---------:|:--------------------------------------------------------------------:| +/// | Cloud ABI | [clock_time_get (Realtime Clock)] | +/// | SGX | [`insecure_time` usercall]. More information on [timekeeping in SGX] | +/// | UNIX | [clock_gettime (Realtime Clock)] | +/// | DARWIN | [gettimeofday] | +/// | VXWorks | [clock_gettime (Realtime Clock)] | +/// | WASI | [__wasi_clock_time_get (Realtime Clock)] | +/// | Windows | [GetSystemTimeAsFileTime] | +/// +/// [clock_time_get (Realtime Clock)]: https://github.com/NuxiNL/cloudabi/blob/master/cloudabi.txt +/// [gettimeofday]: http://man7.org/linux/man-pages/man2/gettimeofday.2.html +/// [clock_gettime (Realtime Clock)]: https://linux.die.net/man/3/clock_gettime +/// [__wasi_clock_time_get (Realtime Clock)]: https://github.com/CraneStation/wasmtime/blob/master/docs/WASI-api.md#clock_time_get +/// [GetSystemTimeAsFileTime]: https://docs.microsoft.com/en-us/windows/win32/api/sysinfoapi/nf-sysinfoapi-getsystemtimeasfiletime +/// +/// **Disclaimer:** These system calls might change over time. +/// #[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)] #[stable(feature = "time2", since = "1.8.0")] pub struct SystemTime(time::SystemTime); diff --git a/src/libsyntax/parse/mod.rs b/src/libsyntax/parse/mod.rs index bc1bc00ac8405..2441a027f9940 100644 --- a/src/libsyntax/parse/mod.rs +++ b/src/libsyntax/parse/mod.rs @@ -13,6 +13,8 @@ use crate::symbol::Symbol; use errors::{Applicability, FatalError, Level, Handler, ColorConfig, Diagnostic, DiagnosticBuilder}; use rustc_data_structures::fx::{FxHashSet, FxHashMap}; +#[cfg(target_arch = "x86_64")] +use rustc_data_structures::static_assert_size; use rustc_data_structures::sync::{Lrc, Lock, Once}; use syntax_pos::{Span, SourceFile, FileName, MultiSpan}; use syntax_pos::edition::Edition; @@ -38,6 +40,11 @@ crate mod unescape_error_reporting; pub type PResult<'a, T> = Result>; +// `PResult` is used a lot. Make sure it doesn't unintentionally get bigger. +// (See also the comment on `DiagnosticBuilderInner`.) +#[cfg(target_arch = "x86_64")] +static_assert_size!(PResult<'_, bool>, 16); + /// Collected spans during parsing for places where a certain feature was /// used and should be feature gated accordingly in `check_crate`. #[derive(Default)] diff --git a/src/test/codegen/adjustments.rs b/src/test/codegen/adjustments.rs index ae2ff9994fdf0..ded310d0aebb1 100644 --- a/src/test/codegen/adjustments.rs +++ b/src/test/codegen/adjustments.rs @@ -3,7 +3,7 @@ #![crate_type = "lib"] // Hack to get the correct size for the length part in slices -// CHECK: @helper([[USIZE:i[0-9]+]] %arg0) +// CHECK: @helper([[USIZE:i[0-9]+]] %_1) #[no_mangle] pub fn helper(_: usize) { } diff --git a/src/test/codegen/fastcall-inreg.rs b/src/test/codegen/fastcall-inreg.rs index e152e6e9d1333..f67487c83ba23 100644 --- a/src/test/codegen/fastcall-inreg.rs +++ b/src/test/codegen/fastcall-inreg.rs @@ -49,27 +49,27 @@ #![crate_type = "lib"] pub mod tests { - // CHECK: @f1(i32 inreg %arg0, i32 inreg %arg1, i32 %arg2) + // CHECK: @f1(i32 inreg %_1, i32 inreg %_2, i32 %_3) #[no_mangle] pub extern "fastcall" fn f1(_: i32, _: i32, _: i32) {} - // CHECK: @f2(i32* inreg %arg0, i32* inreg %arg1, i32* %arg2) + // CHECK: @f2(i32* inreg %_1, i32* inreg %_2, i32* %_3) #[no_mangle] pub extern "fastcall" fn f2(_: *const i32, _: *const i32, _: *const i32) {} - // CHECK: @f3(float %arg0, i32 inreg %arg1, i32 inreg %arg2, i32 %arg3) + // CHECK: @f3(float %_1, i32 inreg %_2, i32 inreg %_3, i32 %_4) #[no_mangle] pub extern "fastcall" fn f3(_: f32, _: i32, _: i32, _: i32) {} - // CHECK: @f4(i32 inreg %arg0, float %arg1, i32 inreg %arg2, i32 %arg3) + // CHECK: @f4(i32 inreg %_1, float %_2, i32 inreg %_3, i32 %_4) #[no_mangle] pub extern "fastcall" fn f4(_: i32, _: f32, _: i32, _: i32) {} - // CHECK: @f5(i64 %arg0, i32 %arg1) + // CHECK: @f5(i64 %_1, i32 %_2) #[no_mangle] pub extern "fastcall" fn f5(_: i64, _: i32) {} - // CHECK: @f6(i1 inreg zeroext %arg0, i32 inreg %arg1, i32 %arg2) + // CHECK: @f6(i1 inreg zeroext %_1, i32 inreg %_2, i32 %_3) #[no_mangle] pub extern "fastcall" fn f6(_: bool, _: i32, _: i32) {} } diff --git a/src/test/codegen/function-arguments.rs b/src/test/codegen/function-arguments.rs index bd121ef24adae..7e1791cd4f296 100644 --- a/src/test/codegen/function-arguments.rs +++ b/src/test/codegen/function-arguments.rs @@ -18,48 +18,48 @@ pub fn boolean(x: bool) -> bool { x } -// CHECK: @readonly_borrow(i32* noalias readonly align 4 dereferenceable(4) %arg0) +// CHECK: @readonly_borrow(i32* noalias readonly align 4 dereferenceable(4) %_1) // FIXME #25759 This should also have `nocapture` #[no_mangle] pub fn readonly_borrow(_: &i32) { } -// CHECK: @static_borrow(i32* noalias readonly align 4 dereferenceable(4) %arg0) +// CHECK: @static_borrow(i32* noalias readonly align 4 dereferenceable(4) %_1) // static borrow may be captured #[no_mangle] pub fn static_borrow(_: &'static i32) { } -// CHECK: @named_borrow(i32* noalias readonly align 4 dereferenceable(4) %arg0) +// CHECK: @named_borrow(i32* noalias readonly align 4 dereferenceable(4) %_1) // borrow with named lifetime may be captured #[no_mangle] pub fn named_borrow<'r>(_: &'r i32) { } -// CHECK: @unsafe_borrow(i16* align 2 dereferenceable(2) %arg0) +// CHECK: @unsafe_borrow(i16* align 2 dereferenceable(2) %_1) // unsafe interior means this isn't actually readonly and there may be aliases ... #[no_mangle] pub fn unsafe_borrow(_: &UnsafeInner) { } -// CHECK: @mutable_unsafe_borrow(i16* align 2 dereferenceable(2) %arg0) +// CHECK: @mutable_unsafe_borrow(i16* align 2 dereferenceable(2) %_1) // ... unless this is a mutable borrow, those never alias #[no_mangle] pub fn mutable_unsafe_borrow(_: &mut UnsafeInner) { } -// CHECK: @mutable_borrow(i32* align 4 dereferenceable(4) %arg0) +// CHECK: @mutable_borrow(i32* align 4 dereferenceable(4) %_1) // FIXME #25759 This should also have `nocapture` #[no_mangle] pub fn mutable_borrow(_: &mut i32) { } -// CHECK: @indirect_struct(%S* noalias nocapture dereferenceable(32) %arg0) +// CHECK: @indirect_struct(%S* noalias nocapture dereferenceable(32) %_1) #[no_mangle] pub fn indirect_struct(_: S) { } -// CHECK: @borrowed_struct(%S* noalias readonly align 4 dereferenceable(32) %arg0) +// CHECK: @borrowed_struct(%S* noalias readonly align 4 dereferenceable(32) %_1) // FIXME #25759 This should also have `nocapture` #[no_mangle] pub fn borrowed_struct(_: &S) { @@ -80,36 +80,36 @@ pub fn struct_return() -> S { } // Hack to get the correct size for the length part in slices -// CHECK: @helper([[USIZE:i[0-9]+]] %arg0) +// CHECK: @helper([[USIZE:i[0-9]+]] %_1) #[no_mangle] pub fn helper(_: usize) { } -// CHECK: @slice([0 x i8]* noalias nonnull readonly align 1 %arg0.0, [[USIZE]] %arg0.1) +// CHECK: @slice([0 x i8]* noalias nonnull readonly align 1 %_1.0, [[USIZE]] %_1.1) // FIXME #25759 This should also have `nocapture` #[no_mangle] pub fn slice(_: &[u8]) { } -// CHECK: @mutable_slice([0 x i8]* nonnull align 1 %arg0.0, [[USIZE]] %arg0.1) +// CHECK: @mutable_slice([0 x i8]* nonnull align 1 %_1.0, [[USIZE]] %_1.1) // FIXME #25759 This should also have `nocapture` #[no_mangle] pub fn mutable_slice(_: &mut [u8]) { } -// CHECK: @unsafe_slice([0 x i16]* nonnull align 2 %arg0.0, [[USIZE]] %arg0.1) +// CHECK: @unsafe_slice([0 x i16]* nonnull align 2 %_1.0, [[USIZE]] %_1.1) // unsafe interior means this isn't actually readonly and there may be aliases ... #[no_mangle] pub fn unsafe_slice(_: &[UnsafeInner]) { } -// CHECK: @str([0 x i8]* noalias nonnull readonly align 1 %arg0.0, [[USIZE]] %arg0.1) +// CHECK: @str([0 x i8]* noalias nonnull readonly align 1 %_1.0, [[USIZE]] %_1.1) // FIXME #25759 This should also have `nocapture` #[no_mangle] pub fn str(_: &[u8]) { } -// CHECK: @trait_borrow({}* nonnull align 1 %arg0.0, [3 x [[USIZE]]]* noalias readonly align {{.*}} dereferenceable({{.*}}) %arg0.1) +// CHECK: @trait_borrow({}* nonnull align 1 %_1.0, [3 x [[USIZE]]]* noalias readonly align {{.*}} dereferenceable({{.*}}) %_1.1) // FIXME #25759 This should also have `nocapture` #[no_mangle] pub fn trait_borrow(_: &Drop) { diff --git a/src/test/codegen/personality_lifetimes.rs b/src/test/codegen/personality_lifetimes.rs index 0d3d537a2723d..05888c0e733ad 100644 --- a/src/test/codegen/personality_lifetimes.rs +++ b/src/test/codegen/personality_lifetimes.rs @@ -20,12 +20,13 @@ pub fn test() { let _s = S; // Check that the personality slot alloca gets a lifetime start in each cleanup block, not just // in the first one. + // CHECK: [[SLOT:%[0-9]+]] = alloca { i8*, i32 } // CHECK-LABEL: cleanup: - // CHECK: bitcast{{.*}}personalityslot - // CHECK-NEXT: call void @llvm.lifetime.start + // CHECK: [[BITCAST:%[0-9]+]] = bitcast { i8*, i32 }* [[SLOT]] to i8* + // CHECK-NEXT: call void @llvm.lifetime.start.{{.*}}({{.*}}, i8* [[BITCAST]]) // CHECK-LABEL: cleanup1: - // CHECK: bitcast{{.*}}personalityslot - // CHECK-NEXT: call void @llvm.lifetime.start + // CHECK: [[BITCAST1:%[0-9]+]] = bitcast { i8*, i32 }* [[SLOT]] to i8* + // CHECK-NEXT: call void @llvm.lifetime.start.{{.*}}({{.*}}, i8* [[BITCAST1]]) might_unwind(); let _t = S; might_unwind(); diff --git a/src/test/codegen/refs.rs b/src/test/codegen/refs.rs index cbb9942347673..15f99fd0c22a0 100644 --- a/src/test/codegen/refs.rs +++ b/src/test/codegen/refs.rs @@ -3,7 +3,7 @@ #![crate_type = "lib"] // Hack to get the correct size for the length part in slices -// CHECK: @helper([[USIZE:i[0-9]+]] %arg0) +// CHECK: @helper([[USIZE:i[0-9]+]] %_1) #[no_mangle] pub fn helper(_: usize) { } diff --git a/src/test/codegen/repeat-trusted-len.rs b/src/test/codegen/repeat-trusted-len.rs index c348a8f7b8b8f..87f29f6047c6a 100644 --- a/src/test/codegen/repeat-trusted-len.rs +++ b/src/test/codegen/repeat-trusted-len.rs @@ -6,7 +6,7 @@ use std::iter; -// CHECK: @helper([[USIZE:i[0-9]+]] %arg0) +// CHECK: @helper([[USIZE:i[0-9]+]] %_1) #[no_mangle] pub fn helper(_: usize) { } diff --git a/src/test/codegen/repr-transparent.rs b/src/test/codegen/repr-transparent.rs index c9f3837565808..e705d5ce3cd72 100644 --- a/src/test/codegen/repr-transparent.rs +++ b/src/test/codegen/repr-transparent.rs @@ -14,21 +14,21 @@ pub struct Zst2(()); #[repr(transparent)] pub struct F32(f32); -// CHECK: define float @test_F32(float %arg0) +// CHECK: define float @test_F32(float %_1) #[no_mangle] pub extern fn test_F32(_: F32) -> F32 { loop {} } #[repr(transparent)] pub struct Ptr(*mut u8); -// CHECK: define i8* @test_Ptr(i8* %arg0) +// CHECK: define i8* @test_Ptr(i8* %_1) #[no_mangle] pub extern fn test_Ptr(_: Ptr) -> Ptr { loop {} } #[repr(transparent)] pub struct WithZst(u64, Zst1); -// CHECK: define i64 @test_WithZst(i64 %arg0) +// CHECK: define i64 @test_WithZst(i64 %_1) #[no_mangle] pub extern fn test_WithZst(_: WithZst) -> WithZst { loop {} } @@ -36,14 +36,14 @@ pub extern fn test_WithZst(_: WithZst) -> WithZst { loop {} } pub struct WithZeroSizedArray(*const f32, [i8; 0]); // Apparently we use i32* when newtype-unwrapping f32 pointers. Whatever. -// CHECK: define i32* @test_WithZeroSizedArray(i32* %arg0) +// CHECK: define i32* @test_WithZeroSizedArray(i32* %_1) #[no_mangle] pub extern fn test_WithZeroSizedArray(_: WithZeroSizedArray) -> WithZeroSizedArray { loop {} } #[repr(transparent)] pub struct Generic(T); -// CHECK: define double @test_Generic(double %arg0) +// CHECK: define double @test_Generic(double %_1) #[no_mangle] pub extern fn test_Generic(_: Generic) -> Generic { loop {} } @@ -53,14 +53,14 @@ pub struct GenericPlusZst(T, Zst2); #[repr(u8)] pub enum Bool { True, False, FileNotFound } -// CHECK: define{{( zeroext)?}} i8 @test_Gpz(i8{{( zeroext)?}} %arg0) +// CHECK: define{{( zeroext)?}} i8 @test_Gpz(i8{{( zeroext)?}} %_1) #[no_mangle] pub extern fn test_Gpz(_: GenericPlusZst) -> GenericPlusZst { loop {} } #[repr(transparent)] pub struct LifetimePhantom<'a, T: 'a>(*const T, PhantomData<&'a T>); -// CHECK: define i16* @test_LifetimePhantom(i16* %arg0) +// CHECK: define i16* @test_LifetimePhantom(i16* %_1) #[no_mangle] pub extern fn test_LifetimePhantom(_: LifetimePhantom) -> LifetimePhantom { loop {} } @@ -70,28 +70,28 @@ pub struct UnitPhantom { val: T, unit: PhantomData } pub struct Px; -// CHECK: define float @test_UnitPhantom(float %arg0) +// CHECK: define float @test_UnitPhantom(float %_1) #[no_mangle] pub extern fn test_UnitPhantom(_: UnitPhantom) -> UnitPhantom { loop {} } #[repr(transparent)] pub struct TwoZsts(Zst1, i8, Zst2); -// CHECK: define{{( signext)?}} i8 @test_TwoZsts(i8{{( signext)?}} %arg0) +// CHECK: define{{( signext)?}} i8 @test_TwoZsts(i8{{( signext)?}} %_1) #[no_mangle] pub extern fn test_TwoZsts(_: TwoZsts) -> TwoZsts { loop {} } #[repr(transparent)] pub struct Nested1(Zst2, Generic); -// CHECK: define double @test_Nested1(double %arg0) +// CHECK: define double @test_Nested1(double %_1) #[no_mangle] pub extern fn test_Nested1(_: Nested1) -> Nested1 { loop {} } #[repr(transparent)] pub struct Nested2(Nested1, Zst1); -// CHECK: define double @test_Nested2(double %arg0) +// CHECK: define double @test_Nested2(double %_1) #[no_mangle] pub extern fn test_Nested2(_: Nested2) -> Nested2 { loop {} } @@ -101,7 +101,7 @@ struct f32x4(f32, f32, f32, f32); #[repr(transparent)] pub struct Vector(f32x4); -// CHECK: define <4 x float> @test_Vector(<4 x float> %arg0) +// CHECK: define <4 x float> @test_Vector(<4 x float> %_1) #[no_mangle] pub extern fn test_Vector(_: Vector) -> Vector { loop {} } @@ -111,7 +111,7 @@ impl Mirror for T { type It = Self; } #[repr(transparent)] pub struct StructWithProjection(::It); -// CHECK: define float @test_Projection(float %arg0) +// CHECK: define float @test_Projection(float %_1) #[no_mangle] pub extern fn test_Projection(_: StructWithProjection) -> StructWithProjection { loop {} } @@ -120,7 +120,7 @@ pub enum EnumF32 { Variant(F32) } -// CHECK: define float @test_EnumF32(float %arg0) +// CHECK: define float @test_EnumF32(float %_1) #[no_mangle] pub extern fn test_EnumF32(_: EnumF32) -> EnumF32 { loop {} } @@ -129,7 +129,7 @@ pub enum EnumF32WithZsts { Variant(Zst1, F32, Zst2) } -// CHECK: define float @test_EnumF32WithZsts(float %arg0) +// CHECK: define float @test_EnumF32WithZsts(float %_1) #[no_mangle] pub extern fn test_EnumF32WithZsts(_: EnumF32WithZsts) -> EnumF32WithZsts { loop {} } @@ -138,7 +138,7 @@ pub union UnionF32 { field: F32, } -// CHECK: define float @test_UnionF32(float %arg0) +// CHECK: define float @test_UnionF32(float %_1) #[no_mangle] pub extern fn test_UnionF32(_: UnionF32) -> UnionF32 { loop {} } @@ -149,7 +149,7 @@ pub union UnionF32WithZsts { zst2: Zst2, } -// CHECK: define float @test_UnionF32WithZsts(float %arg0) +// CHECK: define float @test_UnionF32WithZsts(float %_1) #[no_mangle] pub extern fn test_UnionF32WithZsts(_: UnionF32WithZsts) -> UnionF32WithZsts { loop {} } diff --git a/src/test/codegen/scalar-pair-bool.rs b/src/test/codegen/scalar-pair-bool.rs index 78d1025b13c77..d91ee7f816ded 100644 --- a/src/test/codegen/scalar-pair-bool.rs +++ b/src/test/codegen/scalar-pair-bool.rs @@ -20,24 +20,24 @@ pub fn pair_i32_bool(pair: (i32, bool)) -> (i32, bool) { pair } -// CHECK: define { i8, i8 } @pair_and_or(i1 zeroext %arg0.0, i1 zeroext %arg0.1) +// CHECK: define { i8, i8 } @pair_and_or(i1 zeroext %_1.0, i1 zeroext %_1.1) #[no_mangle] pub fn pair_and_or((a, b): (bool, bool)) -> (bool, bool) { // Make sure it can operate directly on the unpacked args - // CHECK: and i1 %arg0.0, %arg0.1 - // CHECK: or i1 %arg0.0, %arg0.1 + // CHECK: and i1 %_1.0, %_1.1 + // CHECK: or i1 %_1.0, %_1.1 (a && b, a || b) } -// CHECK: define void @pair_branches(i1 zeroext %arg0.0, i1 zeroext %arg0.1) +// CHECK: define void @pair_branches(i1 zeroext %_1.0, i1 zeroext %_1.1) #[no_mangle] pub fn pair_branches((a, b): (bool, bool)) { // Make sure it can branch directly on the unpacked bool args - // CHECK: br i1 %arg0.0 + // CHECK: br i1 %_1.0 if a { println!("Hello!"); } - // CHECK: br i1 %arg0.1 + // CHECK: br i1 %_1.1 if b { println!("Goodbye!"); } diff --git a/src/test/codegen/union-abi.rs b/src/test/codegen/union-abi.rs index b7baffe16695a..7339df17b057a 100644 --- a/src/test/codegen/union-abi.rs +++ b/src/test/codegen/union-abi.rs @@ -16,38 +16,38 @@ pub struct i64x4(i64, i64, i64, i64); #[derive(Copy, Clone)] pub union UnionI64x4{ a:(), b: i64x4 } -// CHECK: define void @test_UnionI64x4(<4 x i64>* {{.*}} %arg0) +// CHECK: define void @test_UnionI64x4(<4 x i64>* {{.*}} %_1) #[no_mangle] pub fn test_UnionI64x4(_: UnionI64x4) { loop {} } pub union UnionI64x4_{ a: i64x4, b: (), c:i64x4, d: Unhab, e: ((),()), f: UnionI64x4 } -// CHECK: define void @test_UnionI64x4_(<4 x i64>* {{.*}} %arg0) +// CHECK: define void @test_UnionI64x4_(<4 x i64>* {{.*}} %_1) #[no_mangle] pub fn test_UnionI64x4_(_: UnionI64x4_) { loop {} } pub union UnionI64x4I64{ a: i64x4, b: i64 } -// CHECK: define void @test_UnionI64x4I64(%UnionI64x4I64* {{.*}} %arg0) +// CHECK: define void @test_UnionI64x4I64(%UnionI64x4I64* {{.*}} %_1) #[no_mangle] pub fn test_UnionI64x4I64(_: UnionI64x4I64) { loop {} } pub union UnionI64x4Tuple{ a: i64x4, b: (i64, i64, i64, i64) } -// CHECK: define void @test_UnionI64x4Tuple(%UnionI64x4Tuple* {{.*}} %arg0) +// CHECK: define void @test_UnionI64x4Tuple(%UnionI64x4Tuple* {{.*}} %_1) #[no_mangle] pub fn test_UnionI64x4Tuple(_: UnionI64x4Tuple) { loop {} } pub union UnionF32{a:f32} -// CHECK: define float @test_UnionF32(float %arg0) +// CHECK: define float @test_UnionF32(float %_1) #[no_mangle] pub fn test_UnionF32(_: UnionF32) -> UnionF32 { loop {} } pub union UnionF32F32{a:f32, b:f32} -// CHECK: define float @test_UnionF32F32(float %arg0) +// CHECK: define float @test_UnionF32F32(float %_1) #[no_mangle] pub fn test_UnionF32F32(_: UnionF32F32) -> UnionF32F32 { loop {} } @@ -58,13 +58,13 @@ pub union UnionF32U32{a:f32, b:u32} pub fn test_UnionF32U32(_: UnionF32U32) -> UnionF32U32 { loop {} } pub union UnionU128{a:u128} -// CHECK: define i128 @test_UnionU128(i128 %arg0) +// CHECK: define i128 @test_UnionU128(i128 %_1) #[no_mangle] pub fn test_UnionU128(_: UnionU128) -> UnionU128 { loop {} } #[repr(C)] pub union CUnionU128{a:u128} -// CHECK: define void @test_CUnionU128(%CUnionU128* {{.*}} %arg0) +// CHECK: define void @test_CUnionU128(%CUnionU128* {{.*}} %_1) #[no_mangle] pub fn test_CUnionU128(_: CUnionU128) { loop {} } diff --git a/src/test/ui/issues/issue-64430.rs b/src/test/ui/issues/issue-64430.rs new file mode 100644 index 0000000000000..0bc66e06e6731 --- /dev/null +++ b/src/test/ui/issues/issue-64430.rs @@ -0,0 +1,14 @@ +// compile-flags:-C panic=abort + +#![no_std] +pub struct Foo; + +fn main() { + Foo.bar() + //~^ ERROR E0599 +} + +#[panic_handler] +fn panic(_info: &core::panic::PanicInfo) -> ! { + loop{} +} diff --git a/src/test/ui/issues/issue-64430.stderr b/src/test/ui/issues/issue-64430.stderr new file mode 100644 index 0000000000000..f1b2de8d8b36f --- /dev/null +++ b/src/test/ui/issues/issue-64430.stderr @@ -0,0 +1,12 @@ +error[E0599]: no method named `bar` found for type `Foo` in the current scope + --> $DIR/issue-64430.rs:7:9 + | +LL | pub struct Foo; + | --------------- method `bar` not found for this +... +LL | Foo.bar() + | ^^^ method not found in `Foo` + +error: aborting due to previous error + +For more information about this error, try `rustc --explain E0599`. diff --git a/src/test/ui/rfc1445/fn-ptr-is-structurally-matchable.rs b/src/test/ui/rfc1445/fn-ptr-is-structurally-matchable.rs new file mode 100644 index 0000000000000..5b378fb2a5928 --- /dev/null +++ b/src/test/ui/rfc1445/fn-ptr-is-structurally-matchable.rs @@ -0,0 +1,135 @@ +// run-pass + +// This file checks that fn ptrs are considered structurally matchable. +// See also rust-lang/rust#63479. + +fn main() { + let mut count = 0; + + // A type which is not structurally matchable: + struct NotSM; + + // And one that is: + #[derive(PartialEq, Eq)] + struct SM; + + fn trivial() {} + + fn sm_to(_: SM) {} + fn not_sm_to(_: NotSM) {} + fn to_sm() -> SM { SM } + fn to_not_sm() -> NotSM { NotSM } + + // To recreate the scenario of interest in #63479, we need to add + // a ref-level-of-indirection so that we descend into the type. + + fn r_sm_to(_: &SM) {} + fn r_not_sm_to(_: &NotSM) {} + fn r_to_r_sm(_: &()) -> &SM { &SM } + fn r_to_r_not_sm(_: &()) -> &NotSM { &NotSM } + + #[derive(PartialEq, Eq)] + struct Wrap(T); + + // In the code below, we put the match input into a local so that + // we can assign it an explicit type that is an fn ptr instead of + // a singleton type of the fn itself that the type inference would + // otherwise assign. + + // Check that fn() is #[structural_match] + const CFN1: Wrap = Wrap(trivial); + let input: Wrap = Wrap(trivial); + match Wrap(input) { + Wrap(CFN1) => count += 1, + Wrap(_) => {} + }; + + // Check that fn(T) is #[structural_match] when T is too. + const CFN2: Wrap = Wrap(sm_to); + let input: Wrap = Wrap(sm_to); + match Wrap(input) { + Wrap(CFN2) => count += 1, + Wrap(_) => {} + }; + + // Check that fn() -> T is #[structural_match] when T is too. + const CFN3: Wrap SM> = Wrap(to_sm); + let input: Wrap SM> = Wrap(to_sm); + match Wrap(input) { + Wrap(CFN3) => count += 1, + Wrap(_) => {} + }; + + // Check that fn(T) is #[structural_match] even if T is not. + const CFN4: Wrap = Wrap(not_sm_to); + let input: Wrap = Wrap(not_sm_to); + match Wrap(input) { + Wrap(CFN4) => count += 1, + Wrap(_) => {} + }; + + // Check that fn() -> T is #[structural_match] even if T is not. + const CFN5: Wrap NotSM> = Wrap(to_not_sm); + let input: Wrap NotSM> = Wrap(to_not_sm); + match Wrap(input) { + Wrap(CFN5) => count += 1, + Wrap(_) => {} + }; + + // Check that fn(&T) is #[structural_match] when T is too. + const CFN6: Wrap = Wrap(r_sm_to); + let input: Wrap = Wrap(r_sm_to); + match Wrap(input) { + Wrap(CFN6) => count += 1, + Wrap(_) => {} + }; + + // Check that fn() -> &T is #[structural_match] when T is too. + const CFN7: Wrap &SM> = Wrap(r_to_r_sm); + let input: Wrap &SM> = Wrap(r_to_r_sm); + match Wrap(input) { + Wrap(CFN7) => count += 1, + Wrap(_) => {} + }; + + // Check that fn(T) is #[structural_match] even if T is not. + const CFN8: Wrap = Wrap(r_not_sm_to); + let input: Wrap = Wrap(r_not_sm_to); + match Wrap(input) { + Wrap(CFN8) => count += 1, + Wrap(_) => {} + }; + + // Check that fn() -> T is #[structural_match] even if T is not. + const CFN9: Wrap &NotSM> = Wrap(r_to_r_not_sm); + let input: Wrap &NotSM> = Wrap(r_to_r_not_sm); + match Wrap(input) { + Wrap(CFN9) => count += 1, + Wrap(_) => {} + }; + + // Check that a type which has fn ptrs is `#[structural_match]`. + #[derive(PartialEq, Eq)] + struct Foo { + alpha: fn(NotSM), + beta: fn() -> NotSM, + gamma: fn(SM), + delta: fn() -> SM, + } + + const CFOO: Foo = Foo { + alpha: not_sm_to, + beta: to_not_sm, + gamma: sm_to, + delta: to_sm, + }; + + let input = Foo { alpha: not_sm_to, beta: to_not_sm, gamma: sm_to, delta: to_sm }; + match input { + CFOO => count += 1, + Foo { .. } => {} + }; + + // Final count must be 10 now if all + assert_eq!(count, 10); +} diff --git a/src/test/ui/rfc1445/issue-63479-match-fnptr.rs b/src/test/ui/rfc1445/issue-63479-match-fnptr.rs new file mode 100644 index 0000000000000..b3c91cec580bf --- /dev/null +++ b/src/test/ui/rfc1445/issue-63479-match-fnptr.rs @@ -0,0 +1,36 @@ +// run-pass + +// The actual regression test from #63479. (Including this because my +// first draft at fn-ptr-is-structurally-matchable.rs failed to actually +// cover the case this hit; I've since expanded it accordingly, but the +// experience left me wary of leaving this regression test out.) + +#[derive(Eq)] +struct A { + a: i64 +} + +impl PartialEq for A { + #[inline] + fn eq(&self, other: &Self) -> bool { + self.a.eq(&other.a) + } +} + +type Fn = fn(&[A]); + +fn my_fn(_args: &[A]) { + println!("hello world"); +} + +const TEST: Fn = my_fn; + +struct B(Fn); + +fn main() { + let s = B(my_fn); + match s { + B(TEST) => println!("matched"), + _ => panic!("didn't match") + }; +} diff --git a/src/tools/error_index_generator/build.rs b/src/tools/error_index_generator/build.rs index 832aa3b1c8dfd..592b3f14c85af 100644 --- a/src/tools/error_index_generator/build.rs +++ b/src/tools/error_index_generator/build.rs @@ -35,7 +35,7 @@ fn register_all() -> Vec<(&'static str, Option<&'static str>)> { ($($ecode:ident: $message:expr,)* ; $($code:ident,)*) => ( $( {long_codes.extend([ - (stringify!($ecode), Some(stringify!($message))), + (stringify!($ecode), Some($message)), ].iter());} )* $( diff --git a/src/tools/rustc-workspace-hack/Cargo.toml b/src/tools/rustc-workspace-hack/Cargo.toml index 4ce411223de43..d068e1cf307b8 100644 --- a/src/tools/rustc-workspace-hack/Cargo.toml +++ b/src/tools/rustc-workspace-hack/Cargo.toml @@ -14,12 +14,6 @@ path = "lib.rs" # For documentation about what this is and why in the world these dependencies # are appearing, see `README.md`. -[build-dependencies] -# Currently Cargo/RLS depend on `failure` which depends on `synstructure` which -# enables this feature. Clippy, however, does not depend on anything that -# enables this feature. Enable it unconditionally. -syn = { version = "0.15", features = ['extra-traits'] } - [target.'cfg(windows)'.dependencies.winapi] version = "0.3" features = [ @@ -65,14 +59,10 @@ features = [ [dependencies] curl-sys = { version = "0.4.13", features = ["http2", "libnghttp2-sys"], optional = true } crossbeam-utils = { version = "0.6.5", features = ["nightly"] } -parking_lot = { version = "0.7", features = ['nightly'] } -rand = { version = "0.6.1", features = ["i128_support"] } serde = { version = "1.0.82", features = ['derive'] } serde_json = { version = "1.0.31", features = ["raw_value"] } smallvec = { version = "0.6", features = ['union', 'may_dangle'] } -scopeguard = { version = "0.3.3", features = ["use_std", "default"] } byteorder = { version = "1.2.7", features = ["i128"] } -syn = { version = "0.15.35", features = ["extra-traits", "full"] } [target.'cfg(not(windows))'.dependencies]