From 16af5c80982cb74131f1a045cf38181c7e949487 Mon Sep 17 00:00:00 2001 From: Peter Jaszkowiak Date: Thu, 25 May 2023 15:00:47 -0600 Subject: [PATCH 01/15] fallible allocator experiment --- library/alloc/src/boxed.rs | 44 +- .../alloc/src/collections/vec_deque/mod.rs | 374 ++++++++--------- library/alloc/src/falloc.rs | 95 +++++ library/alloc/src/lib.rs | 4 + library/alloc/src/raw_vec.rs | 162 ++------ library/alloc/src/raw_vec/tests.rs | 60 +-- library/alloc/src/vec/mod.rs | 380 ++++++++++-------- library/alloc/src/vec/spec_extend.rs | 19 +- library/alloc/src/vec/spec_from_elem.rs | 66 ++- library/alloc/src/vec/spec_from_iter.rs | 6 +- .../alloc/src/vec/spec_from_iter_nested.rs | 8 +- library/alloc/src/vec/splice.rs | 20 +- 12 files changed, 663 insertions(+), 575 deletions(-) create mode 100644 library/alloc/src/falloc.rs diff --git a/library/alloc/src/boxed.rs b/library/alloc/src/boxed.rs index 1768687e8cd02..32e9d44487d9e 100644 --- a/library/alloc/src/boxed.rs +++ b/library/alloc/src/boxed.rs @@ -167,9 +167,9 @@ use core::task::{Context, Poll}; #[cfg(not(no_global_oom_handling))] use crate::alloc::{handle_alloc_error, WriteCloneIntoRaw}; -use crate::alloc::{AllocError, Allocator, Global, Layout}; #[cfg(not(no_global_oom_handling))] use crate::borrow::Cow; +use crate::falloc::{AllocError, Allocator, Global, Layout}; use crate::raw_vec::RawVec; #[cfg(not(no_global_oom_handling))] use crate::str::from_boxed_utf8_unchecked; @@ -624,7 +624,9 @@ impl Box<[T]> { #[unstable(feature = "new_uninit", issue = "63291")] #[must_use] pub fn new_uninit_slice(len: usize) -> Box<[mem::MaybeUninit]> { - unsafe { RawVec::with_capacity(len).into_box(len) } + unsafe { + ::map_result(RawVec::with_capacity_in(len, Global)).into_box(len) + } } /// Constructs a new boxed slice with uninitialized contents, with the memory @@ -649,7 +651,10 @@ impl Box<[T]> { #[unstable(feature = "new_uninit", issue = "63291")] #[must_use] pub fn new_zeroed_slice(len: usize) -> Box<[mem::MaybeUninit]> { - unsafe { RawVec::with_capacity_zeroed(len).into_box(len) } + unsafe { + ::map_result(RawVec::with_capacity_zeroed_in(len, Global)) + .into_box(len) + } } /// Constructs a new boxed slice with uninitialized contents. Returns an error if @@ -675,14 +680,7 @@ impl Box<[T]> { #[unstable(feature = "allocator_api", issue = "32838")] #[inline] pub fn try_new_uninit_slice(len: usize) -> Result]>, AllocError> { - unsafe { - let layout = match Layout::array::>(len) { - Ok(l) => l, - Err(_) => return Err(AllocError), - }; - let ptr = Global.allocate(layout)?; - Ok(RawVec::from_raw_parts_in(ptr.as_mut_ptr() as *mut _, len, Global).into_box(len)) - } + unsafe { Ok(RawVec::with_capacity_in(len, Global).map_err(|_| AllocError)?.into_box(len)) } } /// Constructs a new boxed slice with uninitialized contents, with the memory @@ -708,12 +706,7 @@ impl Box<[T]> { #[inline] pub fn try_new_zeroed_slice(len: usize) -> Result]>, AllocError> { unsafe { - let layout = match Layout::array::>(len) { - Ok(l) => l, - Err(_) => return Err(AllocError), - }; - let ptr = Global.allocate_zeroed(layout)?; - Ok(RawVec::from_raw_parts_in(ptr.as_mut_ptr() as *mut _, len, Global).into_box(len)) + Ok(RawVec::with_capacity_zeroed_in(len, Global).map_err(|_| AllocError)?.into_box(len)) } } } @@ -741,12 +734,11 @@ impl Box<[T], A> { /// /// assert_eq!(*values, [1, 2, 3]) /// ``` - #[cfg(not(no_global_oom_handling))] #[unstable(feature = "allocator_api", issue = "32838")] // #[unstable(feature = "new_uninit", issue = "63291")] #[must_use] - pub fn new_uninit_slice_in(len: usize, alloc: A) -> Box<[mem::MaybeUninit], A> { - unsafe { RawVec::with_capacity_in(len, alloc).into_box(len) } + pub fn new_uninit_slice_in(len: usize, alloc: A) -> A::Result], A>> { + unsafe { A::map_result(RawVec::with_capacity_in(len, alloc).map(|r| r.into_box(len))) } } /// Constructs a new boxed slice with uninitialized contents in the provided allocator, @@ -769,12 +761,13 @@ impl Box<[T], A> { /// ``` /// /// [zeroed]: mem::MaybeUninit::zeroed - #[cfg(not(no_global_oom_handling))] #[unstable(feature = "allocator_api", issue = "32838")] // #[unstable(feature = "new_uninit", issue = "63291")] #[must_use] - pub fn new_zeroed_slice_in(len: usize, alloc: A) -> Box<[mem::MaybeUninit], A> { - unsafe { RawVec::with_capacity_zeroed_in(len, alloc).into_box(len) } + pub fn new_zeroed_slice_in(len: usize, alloc: A) -> A::Result], A>> { + unsafe { + A::map_result(RawVec::with_capacity_zeroed_in(len, alloc).map(|r| r.into_box(len))) + } } } @@ -1474,7 +1467,7 @@ impl BoxFromSlice for Box<[T]> { #[inline] fn from_slice(slice: &[T]) -> Self { let len = slice.len(); - let buf = RawVec::with_capacity(len); + let buf = ::map_result(RawVec::with_capacity_in(len, Global)); unsafe { ptr::copy_nonoverlapping(slice.as_ptr(), buf.ptr(), len); buf.into_box(slice.len()).assume_init() @@ -2014,9 +2007,8 @@ impl FromIterator for Box<[I]> { } } -#[cfg(not(no_global_oom_handling))] #[stable(feature = "box_slice_clone", since = "1.3.0")] -impl Clone for Box<[T], A> { +impl = Self> + Clone> Clone for Box<[T], A> { fn clone(&self) -> Self { let alloc = Box::allocator(self).clone(); self.to_vec_in(alloc).into_boxed_slice() diff --git a/library/alloc/src/collections/vec_deque/mod.rs b/library/alloc/src/collections/vec_deque/mod.rs index 896da37f94c02..c7deff635cd73 100644 --- a/library/alloc/src/collections/vec_deque/mod.rs +++ b/library/alloc/src/collections/vec_deque/mod.rs @@ -22,9 +22,9 @@ use core::slice; #[allow(unused_imports)] use core::mem; -use crate::alloc::{Allocator, Global}; use crate::collections::TryReserveError; use crate::collections::TryReserveErrorKind; +use crate::falloc::{Allocator, Global}; use crate::raw_vec::RawVec; use crate::vec::Vec; @@ -106,7 +106,10 @@ pub struct VecDeque< } #[stable(feature = "rust1", since = "1.0.0")] -impl Clone for VecDeque { +impl Clone for VecDeque +where + A: Allocator = Self> + Clone, +{ fn clone(&self) -> Self { let mut deq = Self::with_capacity_in(self.len(), self.allocator().clone()); deq.extend(self.iter().cloned()); @@ -577,6 +580,10 @@ impl VecDeque { VecDeque { head: 0, len: 0, buf: RawVec::new_in(alloc) } } + fn try_with_capacity_in(capacity: usize, alloc: A) -> Result { + Ok(VecDeque { head: 0, len: 0, buf: RawVec::with_capacity_in(capacity, alloc)? }) + } + /// Creates an empty deque with space for at least `capacity` elements. /// /// # Examples @@ -587,8 +594,8 @@ impl VecDeque { /// let deque: VecDeque = VecDeque::with_capacity(10); /// ``` #[unstable(feature = "allocator_api", issue = "32838")] - pub fn with_capacity_in(capacity: usize, alloc: A) -> VecDeque { - VecDeque { head: 0, len: 0, buf: RawVec::with_capacity_in(capacity, alloc) } + pub fn with_capacity_in(capacity: usize, alloc: A) -> A::Result> { + A::map_result(Self::try_with_capacity_in(capacity, alloc)) } /// Creates a `VecDeque` from a raw allocation, when the initialized @@ -751,16 +758,8 @@ impl VecDeque { /// /// [`reserve`]: VecDeque::reserve #[stable(feature = "rust1", since = "1.0.0")] - pub fn reserve_exact(&mut self, additional: usize) { - let new_cap = self.len.checked_add(additional).expect("capacity overflow"); - let old_cap = self.capacity(); - - if new_cap > old_cap { - self.buf.reserve_exact(self.len, additional); - unsafe { - self.handle_capacity_increase(old_cap); - } - } + pub fn reserve_exact(&mut self, additional: usize) -> A::Result<()> { + A::map_result(self.try_reserve_exact(additional)) } /// Reserves capacity for at least `additional` more elements to be inserted in the given @@ -780,18 +779,8 @@ impl VecDeque { /// assert!(buf.capacity() >= 11); /// ``` #[stable(feature = "rust1", since = "1.0.0")] - pub fn reserve(&mut self, additional: usize) { - let new_cap = self.len.checked_add(additional).expect("capacity overflow"); - let old_cap = self.capacity(); - - if new_cap > old_cap { - // we don't need to reserve_exact(), as the size doesn't have - // to be a power of 2. - self.buf.reserve(self.len, additional); - unsafe { - self.handle_capacity_increase(old_cap); - } - } + pub fn reserve(&mut self, additional: usize) -> A::Result<()> { + A::map_result(self.try_reserve(additional)) } /// Tries to reserve the minimum capacity for at least `additional` more elements to @@ -838,7 +827,7 @@ impl VecDeque { let old_cap = self.capacity(); if new_cap > old_cap { - self.buf.try_reserve_exact(self.len, additional)?; + self.buf.reserve_exact(self.len, additional)?; unsafe { self.handle_capacity_increase(old_cap); } @@ -886,7 +875,7 @@ impl VecDeque { let old_cap = self.capacity(); if new_cap > old_cap { - self.buf.try_reserve(self.len, additional)?; + self.buf.reserve(self.len, additional)?; unsafe { self.handle_capacity_increase(old_cap); } @@ -936,80 +925,85 @@ impl VecDeque { /// assert!(buf.capacity() >= 4); /// ``` #[stable(feature = "shrink_to", since = "1.56.0")] - pub fn shrink_to(&mut self, min_capacity: usize) { - let target_cap = min_capacity.max(self.len); - - // never shrink ZSTs - if T::IS_ZST || self.capacity() <= target_cap { - return; - } + pub fn shrink_to(&mut self, min_capacity: usize) -> A::Result<()> { + A::map_result((|| { + // Substitute for try block + let target_cap = min_capacity.max(self.len); + + // never shrink ZSTs + if T::IS_ZST || self.capacity() <= target_cap { + return Ok(()); + } - // There are three cases of interest: - // All elements are out of desired bounds - // Elements are contiguous, and tail is out of desired bounds - // Elements are discontiguous - // - // At all other times, element positions are unaffected. + // There are three cases of interest: + // All elements are out of desired bounds + // Elements are contiguous, and tail is out of desired bounds + // Elements are discontiguous + // + // At all other times, element positions are unaffected. - // `head` and `len` are at most `isize::MAX` and `target_cap < self.capacity()`, so nothing can - // overflow. - let tail_outside = (target_cap + 1..=self.capacity()).contains(&(self.head + self.len)); + // `head` and `len` are at most `isize::MAX` and `target_cap < self.capacity()`, so nothing can + // overflow. + let tail_outside = (target_cap + 1..=self.capacity()).contains(&(self.head + self.len)); - if self.len == 0 { - self.head = 0; - } else if self.head >= target_cap && tail_outside { - // Head and tail are both out of bounds, so copy all of them to the front. - // - // H := head - // L := last element - // H L - // [. . . . . . . . o o o o o o o . ] - // H L - // [o o o o o o o . ] - unsafe { - // nonoverlapping because `self.head >= target_cap >= self.len`. - self.copy_nonoverlapping(self.head, 0, self.len); - } - self.head = 0; - } else if self.head < target_cap && tail_outside { - // Head is in bounds, tail is out of bounds. - // Copy the overflowing part to the beginning of the - // buffer. This won't overlap because `target_cap >= self.len`. - // - // H := head - // L := last element - // H L - // [. . . o o o o o o o . . . . . . ] - // L H - // [o o . o o o o o ] - let len = self.head + self.len - target_cap; - unsafe { - self.copy_nonoverlapping(target_cap, 0, len); - } - } else if !self.is_contiguous() { - // The head slice is at least partially out of bounds, tail is in bounds. - // Copy the head backwards so it lines up with the target capacity. - // This won't overlap because `target_cap >= self.len`. - // - // H := head - // L := last element - // L H - // [o o o o o . . . . . . . . . o o ] - // L H - // [o o o o o . o o ] - let head_len = self.capacity() - self.head; - let new_head = target_cap - head_len; - unsafe { - // can't use `copy_nonoverlapping()` here because the new and old - // regions for the head might overlap. - self.copy(self.head, new_head, head_len); + if self.len == 0 { + self.head = 0; + } else if self.head >= target_cap && tail_outside { + // Head and tail are both out of bounds, so copy all of them to the front. + // + // H := head + // L := last element + // H L + // [. . . . . . . . o o o o o o o . ] + // H L + // [o o o o o o o . ] + unsafe { + // nonoverlapping because `self.head >= target_cap >= self.len`. + self.copy_nonoverlapping(self.head, 0, self.len); + } + self.head = 0; + } else if self.head < target_cap && tail_outside { + // Head is in bounds, tail is out of bounds. + // Copy the overflowing part to the beginning of the + // buffer. This won't overlap because `target_cap >= self.len`. + // + // H := head + // L := last element + // H L + // [. . . o o o o o o o . . . . . . ] + // L H + // [o o . o o o o o ] + let len = self.head + self.len - target_cap; + unsafe { + self.copy_nonoverlapping(target_cap, 0, len); + } + } else if !self.is_contiguous() { + // The head slice is at least partially out of bounds, tail is in bounds. + // Copy the head backwards so it lines up with the target capacity. + // This won't overlap because `target_cap >= self.len`. + // + // H := head + // L := last element + // L H + // [o o o o o . . . . . . . . . o o ] + // L H + // [o o o o o . o o ] + let head_len = self.capacity() - self.head; + let new_head = target_cap - head_len; + unsafe { + // can't use `copy_nonoverlapping()` here because the new and old + // regions for the head might overlap. + self.copy(self.head, new_head, head_len); + } + self.head = new_head; } - self.head = new_head; - } - self.buf.shrink_to_fit(target_cap); + self.buf.shrink_to(target_cap)?; - debug_assert!(self.head < self.capacity() || self.capacity() == 0); - debug_assert!(self.len <= self.capacity()); + debug_assert!(self.head < self.capacity() || self.capacity() == 0); + debug_assert!(self.len <= self.capacity()); + + Ok(()) + })()) } /// Shortens the deque, keeping the first `len` elements and dropping @@ -1628,17 +1622,22 @@ impl VecDeque { /// assert_eq!(d.front(), Some(&2)); /// ``` #[stable(feature = "rust1", since = "1.0.0")] - pub fn push_front(&mut self, value: T) { - if self.is_full() { - self.grow(); - } + pub fn push_front(&mut self, value: T) -> A::Result<()> { + A::map_result((|| { + // Substitute for try block + if self.is_full() { + self.grow()?; + } - self.head = self.wrap_sub(self.head, 1); - self.len += 1; + self.head = self.wrap_sub(self.head, 1); + self.len += 1; - unsafe { - self.buffer_write(self.head, value); - } + unsafe { + self.buffer_write(self.head, value); + } + + Ok(()) + })()) } /// Appends an element to the back of the deque. @@ -1654,13 +1653,18 @@ impl VecDeque { /// assert_eq!(3, *buf.back().unwrap()); /// ``` #[stable(feature = "rust1", since = "1.0.0")] - pub fn push_back(&mut self, value: T) { - if self.is_full() { - self.grow(); - } + pub fn push_back(&mut self, value: T) -> A::Result<()> { + A::map_result((|| { + // Substsitute for try block + if self.is_full() { + self.grow()?; + } - unsafe { self.buffer_write(self.to_physical_idx(self.len), value) } - self.len += 1; + unsafe { self.buffer_write(self.to_physical_idx(self.len), value) } + self.len += 1; + + Ok(()) + })()) } #[inline] @@ -1763,32 +1767,37 @@ impl VecDeque { /// assert_eq!(vec_deque, &['a', 'd', 'b', 'c']); /// ``` #[stable(feature = "deque_extras_15", since = "1.5.0")] - pub fn insert(&mut self, index: usize, value: T) { - assert!(index <= self.len(), "index out of bounds"); - if self.is_full() { - self.grow(); - } - - let k = self.len - index; - if k < index { - // `index + 1` can't overflow, because if index was usize::MAX, then either the - // assert would've failed, or the deque would've tried to grow past usize::MAX - // and panicked. - unsafe { - // see `remove()` for explanation why this wrap_copy() call is safe. - self.wrap_copy(self.to_physical_idx(index), self.to_physical_idx(index + 1), k); - self.buffer_write(self.to_physical_idx(index), value); - self.len += 1; + pub fn insert(&mut self, index: usize, value: T) -> A::Result<()> { + A::map_result((|| { + // Substitute for try block + assert!(index <= self.len(), "index out of bounds"); + if self.is_full() { + self.grow()?; } - } else { - let old_head = self.head; - self.head = self.wrap_sub(self.head, 1); - unsafe { - self.wrap_copy(old_head, self.head, index); - self.buffer_write(self.to_physical_idx(index), value); - self.len += 1; + + let k = self.len - index; + if k < index { + // `index + 1` can't overflow, because if index was usize::MAX, then either the + // assert would've failed, or the deque would've tried to grow past usize::MAX + // and panicked. + unsafe { + // see `remove()` for explanation why this wrap_copy() call is safe. + self.wrap_copy(self.to_physical_idx(index), self.to_physical_idx(index + 1), k); + self.buffer_write(self.to_physical_idx(index), value); + self.len += 1; + } + } else { + let old_head = self.head; + self.head = self.wrap_sub(self.head, 1); + unsafe { + self.wrap_copy(old_head, self.head, index); + self.buffer_write(self.to_physical_idx(index), value); + self.len += 1; + } } - } + + Ok(()) + })()) } /// Removes and returns the element at `index` from the deque. @@ -1865,51 +1874,57 @@ impl VecDeque { #[inline] #[must_use = "use `.truncate()` if you don't need the other half"] #[stable(feature = "split_off", since = "1.4.0")] - pub fn split_off(&mut self, at: usize) -> Self + pub fn split_off(&mut self, at: usize) -> A::Result where A: Clone, { - let len = self.len; - assert!(at <= len, "`at` out of bounds"); - - let other_len = len - at; - let mut other = VecDeque::with_capacity_in(other_len, self.allocator().clone()); - - unsafe { - let (first_half, second_half) = self.as_slices(); - - let first_len = first_half.len(); - let second_len = second_half.len(); - if at < first_len { - // `at` lies in the first half. - let amount_in_first = first_len - at; + A::map_result((|| { + let len = self.len; + assert!(at <= len, "`at` out of bounds"); - ptr::copy_nonoverlapping(first_half.as_ptr().add(at), other.ptr(), amount_in_first); + let other_len = len - at; + let mut other = VecDeque::try_with_capacity_in(other_len, self.allocator().clone())?; - // just take all of the second half. - ptr::copy_nonoverlapping( - second_half.as_ptr(), - other.ptr().add(amount_in_first), - second_len, - ); - } else { - // `at` lies in the second half, need to factor in the elements we skipped - // in the first half. - let offset = at - first_len; - let amount_in_second = second_len - offset; - ptr::copy_nonoverlapping( - second_half.as_ptr().add(offset), - other.ptr(), - amount_in_second, - ); + unsafe { + let (first_half, second_half) = self.as_slices(); + + let first_len = first_half.len(); + let second_len = second_half.len(); + if at < first_len { + // `at` lies in the first half. + let amount_in_first = first_len - at; + + ptr::copy_nonoverlapping( + first_half.as_ptr().add(at), + other.ptr(), + amount_in_first, + ); + + // just take all of the second half. + ptr::copy_nonoverlapping( + second_half.as_ptr(), + other.ptr().add(amount_in_first), + second_len, + ); + } else { + // `at` lies in the second half, need to factor in the elements we skipped + // in the first half. + let offset = at - first_len; + let amount_in_second = second_len - offset; + ptr::copy_nonoverlapping( + second_half.as_ptr().add(offset), + other.ptr(), + amount_in_second, + ); + } } - } - // Cleanup where the ends of the buffers are - self.len = at; - other.len = other_len; + // Cleanup where the ends of the buffers are + self.len = at; + other.len = other_len; - other + Ok(other) + })()) } /// Moves all the elements of `other` into `self`, leaving `other` empty. @@ -2053,16 +2068,17 @@ impl VecDeque { // be called in cold paths. // This may panic or abort #[inline(never)] - fn grow(&mut self) { + fn grow(&mut self) -> Result<(), TryReserveError> { // Extend or possibly remove this assertion when valid use-cases for growing the // buffer without it being full emerge debug_assert!(self.is_full()); let old_cap = self.capacity(); - self.buf.reserve_for_push(old_cap); - unsafe { - self.handle_capacity_increase(old_cap); - } - debug_assert!(!self.is_full()); + self.buf.reserve_for_push(old_cap).map(|_| { + unsafe { + self.handle_capacity_increase(old_cap); + } + debug_assert!(!self.is_full()); + }) } /// Modifies the deque in-place so that `len()` is equal to `new_len`, diff --git a/library/alloc/src/falloc.rs b/library/alloc/src/falloc.rs new file mode 100644 index 0000000000000..04428d807cf00 --- /dev/null +++ b/library/alloc/src/falloc.rs @@ -0,0 +1,95 @@ +pub use crate::alloc::{AllocError, Global, GlobalAlloc, Layout, LayoutError}; + +/// An implementation of `Allocator` can allocate, grow, shrink, and deallocate arbitrary blocks of +/// data described via [`Layout`][]. +/// +/// `Allocator` is designed to be implemented on ZSTs, references, or smart pointers because having +/// an allocator like `MyAlloc([u8; N])` cannot be moved, without updating the pointers to the +/// allocated memory. +/// +/// Unlike [`GlobalAlloc`][], zero-sized allocations are allowed in `Allocator`. If an underlying +/// allocator does not support this (like jemalloc) or return a null pointer (such as +/// `libc::malloc`), this must be caught by the implementation. +/// +/// ### Currently allocated memory +/// +/// Some of the methods require that a memory block be *currently allocated* via an allocator. This +/// means that: +/// +/// * the starting address for that memory block was previously returned by [`allocate`], [`grow`], or +/// [`shrink`], and +/// +/// * the memory block has not been subsequently deallocated, where blocks are either deallocated +/// directly by being passed to [`deallocate`] or were changed by being passed to [`grow`] or +/// [`shrink`] that returns `Ok`. If `grow` or `shrink` have returned `Err`, the passed pointer +/// remains valid. +/// +/// [`allocate`]: Allocator::allocate +/// [`grow`]: Allocator::grow +/// [`shrink`]: Allocator::shrink +/// [`deallocate`]: Allocator::deallocate +/// +/// ### Memory fitting +/// +/// Some of the methods require that a layout *fit* a memory block. What it means for a layout to +/// "fit" a memory block means (or equivalently, for a memory block to "fit" a layout) is that the +/// following conditions must hold: +/// +/// * The block must be allocated with the same alignment as [`layout.align()`], and +/// +/// * The provided [`layout.size()`] must fall in the range `min ..= max`, where: +/// - `min` is the size of the layout most recently used to allocate the block, and +/// - `max` is the latest actual size returned from [`allocate`], [`grow`], or [`shrink`]. +/// +/// [`layout.align()`]: Layout::align +/// [`layout.size()`]: Layout::size +/// +/// # Safety +/// +/// * Memory blocks returned from an allocator must point to valid memory and retain their validity +/// until the instance and all of its copies and clones are dropped, +/// +/// * copying, cloning, or moving the allocator must not invalidate memory blocks returned from this +/// allocator. A copied or cloned allocator must behave like the same allocator, and +/// +/// * any pointer to a memory block which is [*currently allocated*] may be passed to any other +/// method of the allocator. +/// +/// [*currently allocated*]: #currently-allocated-memory +#[unstable(feature = "allocator_api", issue = "32838")] +pub unsafe trait Allocator: crate::alloc::Allocator { + #[must_use] // Doesn't actually work + type Result; + + #[must_use] + fn map_result(result: Result) -> Self::Result; +} + +#[cfg(not(no_global_oom_handling))] +use crate::alloc::handle_alloc_error; +#[cfg(not(no_global_oom_handling))] +use crate::collections::{TryReserveError, TryReserveErrorKind}; + +// One central function responsible for reporting capacity overflows. This'll +// ensure that the code generation related to these panics is minimal as there's +// only one location which panics rather than a bunch throughout the module. +#[cfg(not(no_global_oom_handling))] +pub(crate) fn capacity_overflow() -> ! { + panic!("capacity overflow"); +} + +#[unstable(feature = "allocator_api", issue = "32838")] +#[cfg(not(no_global_oom_handling))] +unsafe impl Allocator for X { + type Result = T; + + fn map_result(result: Result) -> Self::Result { + match result { + Err(error) => match error.kind() { + TryReserveErrorKind::CapacityOverflow => capacity_overflow(), + TryReserveErrorKind::AllocError { layout, .. } => handle_alloc_error(layout), + }, + Ok(x) => x, + } + } +} diff --git a/library/alloc/src/lib.rs b/library/alloc/src/lib.rs index 59fa91c1066dc..2774aa250928f 100644 --- a/library/alloc/src/lib.rs +++ b/library/alloc/src/lib.rs @@ -226,6 +226,10 @@ mod raw_vec; pub mod alloc; +// Fallible allocation experiment + +mod falloc; + // Primitive types using the heaps above // Need to conditionally define the mod from `boxed.rs` to avoid diff --git a/library/alloc/src/raw_vec.rs b/library/alloc/src/raw_vec.rs index dfd30d99cf041..841c3151b5479 100644 --- a/library/alloc/src/raw_vec.rs +++ b/library/alloc/src/raw_vec.rs @@ -7,17 +7,14 @@ use core::mem::{self, ManuallyDrop, MaybeUninit, SizedTypeProperties}; use core::ptr::{self, NonNull, Unique}; use core::slice; -#[cfg(not(no_global_oom_handling))] -use crate::alloc::handle_alloc_error; -use crate::alloc::{Allocator, Global, Layout}; use crate::boxed::Box; use crate::collections::TryReserveError; use crate::collections::TryReserveErrorKind::*; +use crate::falloc::{Allocator, Global, Layout}; #[cfg(test)] mod tests; -#[cfg(not(no_global_oom_handling))] enum AllocInit { /// The contents of the new memory are uninitialized. Uninitialized, @@ -71,34 +68,6 @@ impl RawVec { pub const fn new() -> Self { Self::new_in(Global) } - - /// Creates a `RawVec` (on the system heap) with exactly the - /// capacity and alignment requirements for a `[T; capacity]`. This is - /// equivalent to calling `RawVec::new` when `capacity` is `0` or `T` is - /// zero-sized. Note that if `T` is zero-sized this means you will - /// *not* get a `RawVec` with the requested capacity. - /// - /// # Panics - /// - /// Panics if the requested capacity exceeds `isize::MAX` bytes. - /// - /// # Aborts - /// - /// Aborts on OOM. - #[cfg(not(any(no_global_oom_handling, test)))] - #[must_use] - #[inline] - pub fn with_capacity(capacity: usize) -> Self { - Self::with_capacity_in(capacity, Global) - } - - /// Like `with_capacity`, but guarantees the buffer is zeroed. - #[cfg(not(any(no_global_oom_handling, test)))] - #[must_use] - #[inline] - pub fn with_capacity_zeroed(capacity: usize) -> Self { - Self::with_capacity_zeroed_in(capacity, Global) - } } impl RawVec { @@ -122,19 +91,19 @@ impl RawVec { Self { ptr: Unique::dangling(), cap: 0, alloc } } - /// Like `with_capacity`, but parameterized over the choice of - /// allocator for the returned `RawVec`. - #[cfg(not(no_global_oom_handling))] + /// Creates a `RawVec` (with the given allocator) with exactly the + /// capacity and alignment requirements for a `[T; capacity]`. This is + /// equivalent to calling `RawVec::new` when `capacity` is `0` or `T` is + /// zero-sized. Note that if `T` is zero-sized this means you will + /// *not* get a `RawVec` with the requested capacity. #[inline] - pub fn with_capacity_in(capacity: usize, alloc: A) -> Self { + pub fn with_capacity_in(capacity: usize, alloc: A) -> Result { Self::allocate_in(capacity, AllocInit::Uninitialized, alloc) } - /// Like `with_capacity_zeroed`, but parameterized over the choice - /// of allocator for the returned `RawVec`. - #[cfg(not(no_global_oom_handling))] + /// Like `with_capacity_in`, but guarantees the buffer is zeroed. #[inline] - pub fn with_capacity_zeroed_in(capacity: usize, alloc: A) -> Self { + pub fn with_capacity_zeroed_in(capacity: usize, alloc: A) -> Result { Self::allocate_in(capacity, AllocInit::Zeroed, alloc) } @@ -164,39 +133,33 @@ impl RawVec { } } - #[cfg(not(no_global_oom_handling))] - fn allocate_in(capacity: usize, init: AllocInit, alloc: A) -> Self { + fn allocate_in(capacity: usize, init: AllocInit, alloc: A) -> Result { // Don't allocate here because `Drop` will not deallocate when `capacity` is 0. if T::IS_ZST || capacity == 0 { - Self::new_in(alloc) + Ok(Self::new_in(alloc)) } else { // We avoid `unwrap_or_else` here because it bloats the amount of // LLVM IR generated. let layout = match Layout::array::(capacity) { Ok(layout) => layout, - Err(_) => capacity_overflow(), + Err(_) => Err(CapacityOverflow)?, }; - match alloc_guard(layout.size()) { - Ok(_) => {} - Err(_) => capacity_overflow(), - } + alloc_guard(layout.size())?; let result = match init { AllocInit::Uninitialized => alloc.allocate(layout), AllocInit::Zeroed => alloc.allocate_zeroed(layout), }; - let ptr = match result { - Ok(ptr) => ptr, - Err(_) => handle_alloc_error(layout), - }; + + let ptr = result.map_err(|_| AllocError { layout, non_exhaustive: () })?; // Allocators currently return a `NonNull<[u8]>` whose length // matches the size requested. If that ever changes, the capacity // here should change to `ptr.len() / mem::size_of::()`. - Self { + Ok(Self { ptr: unsafe { Unique::new_unchecked(ptr.cast().as_ptr()) }, cap: capacity, alloc, - } + }) } } @@ -265,50 +228,20 @@ impl RawVec { /// code *you* write that relies on the behavior of this function may break. /// /// This is ideal for implementing a bulk-push operation like `extend`. - /// - /// # Panics - /// - /// Panics if the new capacity exceeds `isize::MAX` bytes. - /// - /// # Aborts - /// - /// Aborts on OOM. - #[cfg(not(no_global_oom_handling))] #[inline] - pub fn reserve(&mut self, len: usize, additional: usize) { - // Callers expect this function to be very cheap when there is already sufficient capacity. - // Therefore, we move all the resizing and error-handling logic from grow_amortized and - // handle_reserve behind a call, while making sure that this function is likely to be - // inlined as just a comparison and a call if the comparison fails. - #[cold] - fn do_reserve_and_handle( - slf: &mut RawVec, - len: usize, - additional: usize, - ) { - handle_reserve(slf.grow_amortized(len, additional)); - } - + pub fn reserve(&mut self, len: usize, additional: usize) -> Result<(), TryReserveError> { if self.needs_to_grow(len, additional) { - do_reserve_and_handle(self, len, additional); + self.grow_amortized(len, additional) + } else { + Ok(()) } } /// A specialized version of `reserve()` used only by the hot and /// oft-instantiated `Vec::push()`, which does its own capacity check. - #[cfg(not(no_global_oom_handling))] #[inline(never)] - pub fn reserve_for_push(&mut self, len: usize) { - handle_reserve(self.grow_amortized(len, 1)); - } - - /// The same as `reserve`, but returns on errors instead of panicking or aborting. - pub fn try_reserve(&mut self, len: usize, additional: usize) -> Result<(), TryReserveError> { - if self.needs_to_grow(len, additional) { - self.grow_amortized(len, additional) - } else { - Ok(()) - } + pub fn reserve_for_push(&mut self, len: usize) -> Result<(), TryReserveError> { + self.grow_amortized(len, 1) } /// Ensures that the buffer contains at least enough space to hold `len + @@ -320,25 +253,7 @@ impl RawVec { /// If `len` exceeds `self.capacity()`, this may fail to actually allocate /// the requested space. This is not really unsafe, but the unsafe code /// *you* write that relies on the behavior of this function may break. - /// - /// # Panics - /// - /// Panics if the new capacity exceeds `isize::MAX` bytes. - /// - /// # Aborts - /// - /// Aborts on OOM. - #[cfg(not(no_global_oom_handling))] - pub fn reserve_exact(&mut self, len: usize, additional: usize) { - handle_reserve(self.try_reserve_exact(len, additional)); - } - - /// The same as `reserve_exact`, but returns on errors instead of panicking or aborting. - pub fn try_reserve_exact( - &mut self, - len: usize, - additional: usize, - ) -> Result<(), TryReserveError> { + pub fn reserve_exact(&mut self, len: usize, additional: usize) -> Result<(), TryReserveError> { if self.needs_to_grow(len, additional) { self.grow_exact(len, additional) } else { Ok(()) } } @@ -348,13 +263,8 @@ impl RawVec { /// # Panics /// /// Panics if the given amount is *larger* than the current capacity. - /// - /// # Aborts - /// - /// Aborts on OOM. - #[cfg(not(no_global_oom_handling))] - pub fn shrink_to_fit(&mut self, cap: usize) { - handle_reserve(self.shrink(cap)); + pub fn shrink_to(&mut self, cap: usize) -> Result<(), TryReserveError> { + self.shrink(cap) } } @@ -425,7 +335,6 @@ impl RawVec { Ok(()) } - #[cfg(not(no_global_oom_handling))] fn shrink(&mut self, cap: usize) -> Result<(), TryReserveError> { assert!(cap <= self.capacity(), "Tried to shrink to a larger capacity"); @@ -487,17 +396,6 @@ unsafe impl<#[may_dangle] T, A: Allocator> Drop for RawVec { } } -// Central function for reserve error handling. -#[cfg(not(no_global_oom_handling))] -#[inline] -fn handle_reserve(result: Result<(), TryReserveError>) { - match result.map_err(|e| e.kind()) { - Err(CapacityOverflow) => capacity_overflow(), - Err(AllocError { layout, .. }) => handle_alloc_error(layout), - Ok(()) => { /* yay */ } - } -} - // We need to guarantee the following: // * We don't ever allocate `> isize::MAX` byte-size objects. // * We don't overflow `usize::MAX` and actually allocate too little. @@ -515,11 +413,3 @@ fn alloc_guard(alloc_size: usize) -> Result<(), TryReserveError> { Ok(()) } } - -// One central function responsible for reporting capacity overflows. This'll -// ensure that the code generation related to these panics is minimal as there's -// only one location which panics rather than a bunch throughout the module. -#[cfg(not(no_global_oom_handling))] -fn capacity_overflow() -> ! { - panic!("capacity overflow"); -} diff --git a/library/alloc/src/raw_vec/tests.rs b/library/alloc/src/raw_vec/tests.rs index ff322f0da97c6..4b87b65eed57d 100644 --- a/library/alloc/src/raw_vec/tests.rs +++ b/library/alloc/src/raw_vec/tests.rs @@ -2,7 +2,7 @@ use super::*; use std::cell::Cell; #[test] -fn allocator_param() { +fn allocator_param() -> Result<(), TryReserveError> { use crate::alloc::AllocError; // Writing a test of integration between third-party @@ -20,7 +20,7 @@ fn allocator_param() { struct BoundedAlloc { fuel: Cell, } - unsafe impl Allocator for BoundedAlloc { + unsafe impl core::alloc::Allocator for BoundedAlloc { fn allocate(&self, layout: Layout) -> Result, AllocError> { let size = layout.size(); if size > self.fuel.get() { @@ -40,42 +40,46 @@ fn allocator_param() { } let a = BoundedAlloc { fuel: Cell::new(500) }; - let mut v: RawVec = RawVec::with_capacity_in(50, a); + let mut v: RawVec = RawVec::with_capacity_in(50, a)?; assert_eq!(v.alloc.fuel.get(), 450); - v.reserve(50, 150); // (causes a realloc, thus using 50 + 150 = 200 units of fuel) + v.reserve(50, 150)?; // (causes a realloc, thus using 50 + 150 = 200 units of fuel) assert_eq!(v.alloc.fuel.get(), 250); + + Ok(()) } #[test] -fn reserve_does_not_overallocate() { +fn reserve_does_not_overallocate() -> Result<(), TryReserveError> { { let mut v: RawVec = RawVec::new(); // First, `reserve` allocates like `reserve_exact`. - v.reserve(0, 9); + v.reserve(0, 9)?; assert_eq!(9, v.capacity()); } { let mut v: RawVec = RawVec::new(); - v.reserve(0, 7); + v.reserve(0, 7)?; assert_eq!(7, v.capacity()); // 97 is more than double of 7, so `reserve` should work // like `reserve_exact`. - v.reserve(7, 90); + v.reserve(7, 90)?; assert_eq!(97, v.capacity()); } { let mut v: RawVec = RawVec::new(); - v.reserve(0, 12); + v.reserve(0, 12)?; assert_eq!(12, v.capacity()); - v.reserve(12, 3); + v.reserve(12, 3)?; // 3 is less than half of 12, so `reserve` must grow // exponentially. At the time of writing this test grow // factor is 2, so new capacity is 24, however, grow factor // of 1.5 is OK too. Hence `>= 18` in assert. assert!(v.capacity() >= 12 + 12 / 2); } + + Ok(()) } struct ZST; @@ -88,7 +92,7 @@ fn zst_sanity(v: &RawVec) { } #[test] -fn zst() { +fn zst() -> Result<(), TryReserveError> { let cap_err = Err(crate::collections::TryReserveErrorKind::CapacityOverflow.into()); assert_eq!(std::mem::size_of::(), 0); @@ -98,19 +102,19 @@ fn zst() { let v: RawVec = RawVec::new(); zst_sanity(&v); - let v: RawVec = RawVec::with_capacity_in(100, Global); + let v: RawVec = RawVec::with_capacity_in(100, Global)?; zst_sanity(&v); - let v: RawVec = RawVec::with_capacity_in(100, Global); + let v: RawVec = RawVec::with_capacity_in(100, Global)?; zst_sanity(&v); - let v: RawVec = RawVec::allocate_in(0, AllocInit::Uninitialized, Global); + let v: RawVec = RawVec::allocate_in(0, AllocInit::Uninitialized, Global)?; zst_sanity(&v); - let v: RawVec = RawVec::allocate_in(100, AllocInit::Uninitialized, Global); + let v: RawVec = RawVec::allocate_in(100, AllocInit::Uninitialized, Global)?; zst_sanity(&v); - let mut v: RawVec = RawVec::allocate_in(usize::MAX, AllocInit::Uninitialized, Global); + let mut v: RawVec = RawVec::allocate_in(usize::MAX, AllocInit::Uninitialized, Global)?; zst_sanity(&v); // Check all these operations work as expected with zero-sized elements. @@ -119,20 +123,20 @@ fn zst() { assert!(v.needs_to_grow(101, usize::MAX - 100)); zst_sanity(&v); - v.reserve(100, usize::MAX - 100); + v.reserve(100, usize::MAX - 100)?; //v.reserve(101, usize::MAX - 100); // panics, in `zst_reserve_panic` below zst_sanity(&v); - v.reserve_exact(100, usize::MAX - 100); + v.reserve_exact(100, usize::MAX - 100)?; //v.reserve_exact(101, usize::MAX - 100); // panics, in `zst_reserve_exact_panic` below zst_sanity(&v); - assert_eq!(v.try_reserve(100, usize::MAX - 100), Ok(())); - assert_eq!(v.try_reserve(101, usize::MAX - 100), cap_err); + assert_eq!(v.reserve(100, usize::MAX - 100), Ok(())); + assert_eq!(v.reserve(101, usize::MAX - 100), cap_err); zst_sanity(&v); - assert_eq!(v.try_reserve_exact(100, usize::MAX - 100), Ok(())); - assert_eq!(v.try_reserve_exact(101, usize::MAX - 100), cap_err); + assert_eq!(v.reserve_exact(100, usize::MAX - 100), Ok(())); + assert_eq!(v.reserve_exact(101, usize::MAX - 100), cap_err); zst_sanity(&v); assert_eq!(v.grow_amortized(100, usize::MAX - 100), cap_err); @@ -142,22 +146,26 @@ fn zst() { assert_eq!(v.grow_exact(100, usize::MAX - 100), cap_err); assert_eq!(v.grow_exact(101, usize::MAX - 100), cap_err); zst_sanity(&v); + + Ok(()) } #[test] -#[should_panic(expected = "capacity overflow")] fn zst_reserve_panic() { + let cap_err = Err(crate::collections::TryReserveErrorKind::CapacityOverflow.into()); + let mut v: RawVec = RawVec::new(); zst_sanity(&v); - v.reserve(101, usize::MAX - 100); + assert_eq!(v.reserve(101, usize::MAX - 100), cap_err); } #[test] -#[should_panic(expected = "capacity overflow")] fn zst_reserve_exact_panic() { + let cap_err = Err(crate::collections::TryReserveErrorKind::CapacityOverflow.into()); + let mut v: RawVec = RawVec::new(); zst_sanity(&v); - v.reserve_exact(101, usize::MAX - 100); + assert_eq!(v.reserve_exact(101, usize::MAX - 100), cap_err); } diff --git a/library/alloc/src/vec/mod.rs b/library/alloc/src/vec/mod.rs index d89cdff8e366c..190b9d867cf08 100644 --- a/library/alloc/src/vec/mod.rs +++ b/library/alloc/src/vec/mod.rs @@ -65,10 +65,10 @@ use core::ops::{self, Index, IndexMut, Range, RangeBounds}; use core::ptr::{self, NonNull}; use core::slice::{self, SliceIndex}; -use crate::alloc::{Allocator, Global}; use crate::borrow::{Cow, ToOwned}; use crate::boxed::Box; -use crate::collections::TryReserveError; +use crate::collections::{TryReserveError, TryReserveErrorKind}; +use crate::falloc::{Allocator, Global}; use crate::raw_vec::RawVec; #[unstable(feature = "drain_filter", reason = "recently added", issue = "43244")] @@ -98,7 +98,6 @@ pub use self::into_iter::IntoIter; mod into_iter; -#[cfg(not(no_global_oom_handling))] use self::is_zero::IsZero; mod is_zero; @@ -108,10 +107,8 @@ mod in_place_collect; mod partial_eq; -#[cfg(not(no_global_oom_handling))] use self::spec_from_elem::SpecFromElem; -#[cfg(not(no_global_oom_handling))] mod spec_from_elem; #[cfg(not(no_global_oom_handling))] @@ -138,10 +135,8 @@ use self::spec_from_iter::SpecFromIter; #[cfg(not(no_global_oom_handling))] mod spec_from_iter; -#[cfg(not(no_global_oom_handling))] use self::spec_extend::SpecExtend; -#[cfg(not(no_global_oom_handling))] mod spec_extend; /// A contiguous growable array type, written as `Vec`, short for 'vector'. @@ -610,6 +605,17 @@ impl Vec { Vec { buf: RawVec::new_in(alloc), len: 0 } } + pub(crate) fn try_with_capacity_in(capacity: usize, alloc: A) -> Result { + Ok(Vec { buf: RawVec::with_capacity_in(capacity, alloc)?, len: 0 }) + } + + pub(crate) fn try_with_capacity_zeroed_in( + capacity: usize, + alloc: A, + ) -> Result { + Ok(Vec { buf: RawVec::with_capacity_zeroed_in(capacity, alloc)?, len: 0 }) + } + /// Constructs a new, empty `Vec` with at least the specified capacity /// with the provided allocator. /// @@ -665,11 +671,10 @@ impl Vec { /// let vec_units = Vec::<(), System>::with_capacity_in(10, System); /// assert_eq!(vec_units.capacity(), usize::MAX); /// ``` - #[cfg(not(no_global_oom_handling))] #[inline] #[unstable(feature = "allocator_api", issue = "32838")] - pub fn with_capacity_in(capacity: usize, alloc: A) -> Self { - Vec { buf: RawVec::with_capacity_in(capacity, alloc), len: 0 } + pub fn with_capacity_in(capacity: usize, alloc: A) -> A::Result { + A::map_result(Self::try_with_capacity_in(capacity, alloc)) } /// Creates a `Vec` directly from a pointer, a capacity, a length, @@ -902,10 +907,9 @@ impl Vec { /// vec.reserve(10); /// assert!(vec.capacity() >= 11); /// ``` - #[cfg(not(no_global_oom_handling))] #[stable(feature = "rust1", since = "1.0.0")] - pub fn reserve(&mut self, additional: usize) { - self.buf.reserve(self.len, additional); + pub fn reserve(&mut self, additional: usize) -> A::Result<()> { + A::map_result(self.try_reserve(additional)) } /// Reserves the minimum capacity for at least `additional` more elements to @@ -932,10 +936,9 @@ impl Vec { /// vec.reserve_exact(10); /// assert!(vec.capacity() >= 11); /// ``` - #[cfg(not(no_global_oom_handling))] #[stable(feature = "rust1", since = "1.0.0")] - pub fn reserve_exact(&mut self, additional: usize) { - self.buf.reserve_exact(self.len, additional); + pub fn reserve_exact(&mut self, additional: usize) -> A::Result<()> { + A::map_result(self.try_reserve_exact(additional)) } /// Tries to reserve capacity for at least `additional` more elements to be inserted @@ -972,7 +975,7 @@ impl Vec { /// ``` #[stable(feature = "try_reserve", since = "1.57.0")] pub fn try_reserve(&mut self, additional: usize) -> Result<(), TryReserveError> { - self.buf.try_reserve(self.len, additional) + self.buf.reserve(self.len, additional) } /// Tries to reserve the minimum capacity for at least `additional` @@ -1015,7 +1018,18 @@ impl Vec { /// ``` #[stable(feature = "try_reserve", since = "1.57.0")] pub fn try_reserve_exact(&mut self, additional: usize) -> Result<(), TryReserveError> { - self.buf.try_reserve_exact(self.len, additional) + self.buf.reserve_exact(self.len, additional) + } + + fn try_shrink_to_fit(&mut self) -> Result<(), TryReserveError> { + // The capacity is never less than the length, and there's nothing to do when + // they are equal, so we can avoid the panic case in `RawVec::shrink_to_fit` + // by only calling it with a greater capacity. + if self.capacity() > self.len { + self.buf.shrink_to(self.len)?; + } + + Ok(()) } /// Shrinks the capacity of the vector as much as possible. @@ -1032,15 +1046,9 @@ impl Vec { /// vec.shrink_to_fit(); /// assert!(vec.capacity() >= 3); /// ``` - #[cfg(not(no_global_oom_handling))] #[stable(feature = "rust1", since = "1.0.0")] - pub fn shrink_to_fit(&mut self) { - // The capacity is never less than the length, and there's nothing to do when - // they are equal, so we can avoid the panic case in `RawVec::shrink_to_fit` - // by only calling it with a greater capacity. - if self.capacity() > self.len { - self.buf.shrink_to_fit(self.len); - } + pub fn shrink_to_fit(&mut self) -> A::Result<()> { + A::map_result(self.try_shrink_to_fit()) } /// Shrinks the capacity of the vector with a lower bound. @@ -1061,12 +1069,13 @@ impl Vec { /// vec.shrink_to(0); /// assert!(vec.capacity() >= 3); /// ``` - #[cfg(not(no_global_oom_handling))] #[stable(feature = "shrink_to", since = "1.56.0")] - pub fn shrink_to(&mut self, min_capacity: usize) { - if self.capacity() > min_capacity { - self.buf.shrink_to_fit(cmp::max(self.len, min_capacity)); - } + pub fn shrink_to(&mut self, min_capacity: usize) -> A::Result<()> { + A::map_result(if self.capacity() > min_capacity { + self.buf.shrink_to(cmp::max(self.len, min_capacity)) + } else { + Ok(()) + }) } /// Converts the vector into [`Box<[T]>`][owned slice]. @@ -1094,16 +1103,18 @@ impl Vec { /// let slice = vec.into_boxed_slice(); /// assert_eq!(slice.into_vec().capacity(), 3); /// ``` - #[cfg(not(no_global_oom_handling))] #[stable(feature = "rust1", since = "1.0.0")] - pub fn into_boxed_slice(mut self) -> Box<[T], A> { - unsafe { - self.shrink_to_fit(); - let me = ManuallyDrop::new(self); - let buf = ptr::read(&me.buf); - let len = me.len(); - buf.into_box(len).assume_init() - } + pub fn into_boxed_slice(mut self) -> A::Result> { + A::map_result((|| { + // Substitute for try block + self.try_shrink_to_fit()?; + unsafe { + let me = ManuallyDrop::new(self); + let buf = ptr::read(&me.buf); + let len = me.len(); + Ok(buf.into_box(len).assume_init()) + } + })()) } /// Shortens the vector, keeping the first `len` elements and dropping @@ -1430,42 +1441,46 @@ impl Vec { /// vec.insert(4, 5); /// assert_eq!(vec, [1, 4, 2, 3, 5]); /// ``` - #[cfg(not(no_global_oom_handling))] #[stable(feature = "rust1", since = "1.0.0")] - pub fn insert(&mut self, index: usize, element: T) { - #[cold] - #[inline(never)] - fn assert_failed(index: usize, len: usize) -> ! { - panic!("insertion index (is {index}) should be <= len (is {len})"); - } + pub fn insert(&mut self, index: usize, element: T) -> A::Result<()> { + A::map_result((|| { + // Substitute for try block + #[cold] + #[inline(never)] + fn assert_failed(index: usize, len: usize) -> ! { + panic!("insertion index (is {index}) should be <= len (is {len})"); + } - let len = self.len(); + let len = self.len(); - // space for the new element - if len == self.buf.capacity() { - self.reserve(1); - } + // space for the new element + if len == self.buf.capacity() { + self.buf.reserve_for_push(len)? + } - unsafe { - // infallible - // The spot to put the new value - { - let p = self.as_mut_ptr().add(index); - if index < len { - // Shift everything over to make space. (Duplicating the - // `index`th element into two consecutive places.) - ptr::copy(p, p.add(1), len - index); - } else if index == len { - // No elements need shifting. - } else { - assert_failed(index, len); + unsafe { + // infallible + // The spot to put the new value + { + let p = self.as_mut_ptr().add(index); + if index < len { + // Shift everything over to make space. (Duplicating the + // `index`th element into two consecutive places.) + ptr::copy(p, p.add(1), len - index); + } else if index == len { + // No elements need shifting. + } else { + assert_failed(index, len); + } + // Write it in, overwriting the first copy of the `index`th + // element. + ptr::write(p, element); } - // Write it in, overwriting the first copy of the `index`th - // element. - ptr::write(p, element); + self.set_len(len + 1); } - self.set_len(len + 1); - } + + Ok(()) + })()) } /// Removes and returns the element at position `index` within the vector, @@ -1818,20 +1833,24 @@ impl Vec { /// vec.push(3); /// assert_eq!(vec, [1, 2, 3]); /// ``` - #[cfg(not(no_global_oom_handling))] #[inline] #[stable(feature = "rust1", since = "1.0.0")] - pub fn push(&mut self, value: T) { - // This will panic or abort if we would allocate > isize::MAX bytes - // or if the length increment would overflow for zero-sized types. - if self.len == self.buf.capacity() { - self.buf.reserve_for_push(self.len); - } - unsafe { - let end = self.as_mut_ptr().add(self.len); - ptr::write(end, value); - self.len += 1; - } + pub fn push(&mut self, value: T) -> A::Result<()> { + A::map_result((|| { + // Substitute for try block + // This will panic or abort if we would allocate > isize::MAX bytes + // or if the length increment would overflow for zero-sized types. + if self.len == self.buf.capacity() { + self.buf.reserve_for_push(self.len)? + } + unsafe { + let end = self.as_mut_ptr().add(self.len); + ptr::write(end, value); + self.len += 1; + } + + Ok(()) + })()) } /// Appends an element if there is sufficient spare capacity, otherwise an error is returned @@ -1922,25 +1941,30 @@ impl Vec { /// assert_eq!(vec, [1, 2, 3, 4, 5, 6]); /// assert_eq!(vec2, []); /// ``` - #[cfg(not(no_global_oom_handling))] #[inline] #[stable(feature = "append", since = "1.4.0")] - pub fn append(&mut self, other: &mut Self) { - unsafe { - self.append_elements(other.as_slice() as _); - other.set_len(0); - } + pub fn append(&mut self, other: &mut Self) -> A::Result<()> { + A::map_result((|| { + // Substitute for try block + unsafe { + self.append_elements(other.as_slice() as _)?; + other.set_len(0); + } + + Ok(()) + })()) } /// Appends elements to `self` from other buffer. - #[cfg(not(no_global_oom_handling))] #[inline] - unsafe fn append_elements(&mut self, other: *const [T]) { + unsafe fn append_elements(&mut self, other: *const [T]) -> Result<(), TryReserveError> { let count = unsafe { (*other).len() }; - self.reserve(count); + self.try_reserve(count)?; let len = self.len(); unsafe { ptr::copy_nonoverlapping(other as *const T, self.as_mut_ptr().add(len), count) }; self.len += count; + + Ok(()) } /// Removes the specified range from the vector in bulk, returning all @@ -2084,43 +2108,45 @@ impl Vec { /// assert_eq!(vec, [1]); /// assert_eq!(vec2, [2, 3]); /// ``` - #[cfg(not(no_global_oom_handling))] #[inline] #[must_use = "use `.truncate()` if you don't need the other half"] #[stable(feature = "split_off", since = "1.4.0")] - pub fn split_off(&mut self, at: usize) -> Self + pub fn split_off(&mut self, at: usize) -> A::Result where A: Clone, { - #[cold] - #[inline(never)] - fn assert_failed(at: usize, len: usize) -> ! { - panic!("`at` split index (is {at}) should be <= len (is {len})"); - } + A::map_result((|| { + // Substitute for try block + #[cold] + #[inline(never)] + fn assert_failed(at: usize, len: usize) -> ! { + panic!("`at` split index (is {at}) should be <= len (is {len})"); + } - if at > self.len() { - assert_failed(at, self.len()); - } + if at > self.len() { + assert_failed(at, self.len()); + } - if at == 0 { - // the new vector can take over the original buffer and avoid the copy - return mem::replace( - self, - Vec::with_capacity_in(self.capacity(), self.allocator().clone()), - ); - } + if at == 0 { + // the new vector can take over the original buffer and avoid the copy + return Ok(mem::replace( + self, + Vec::try_with_capacity_in(self.capacity(), self.allocator().clone())?, + )); + } - let other_len = self.len - at; - let mut other = Vec::with_capacity_in(other_len, self.allocator().clone()); + let other_len = self.len - at; + let mut other = Vec::try_with_capacity_in(other_len, self.allocator().clone())?; - // Unsafely `set_len` and copy items to `other`. - unsafe { - self.set_len(at); - other.set_len(other_len); + // Unsafely `set_len` and copy items to `other`. + unsafe { + self.set_len(at); + other.set_len(other_len); - ptr::copy_nonoverlapping(self.as_ptr().add(at), other.as_mut_ptr(), other.len()); - } - other + ptr::copy_nonoverlapping(self.as_ptr().add(at), other.as_mut_ptr(), other.len()); + } + Ok(other) + })()) } /// Resizes the `Vec` in-place so that `len` is equal to `new_len`. @@ -2149,18 +2175,22 @@ impl Vec { /// vec.resize_with(4, || { p *= 2; p }); /// assert_eq!(vec, [2, 4, 8, 16]); /// ``` - #[cfg(not(no_global_oom_handling))] #[stable(feature = "vec_resize_with", since = "1.33.0")] - pub fn resize_with(&mut self, new_len: usize, f: F) + pub fn resize_with(&mut self, new_len: usize, f: F) -> A::Result<()> where F: FnMut() -> T, { - let len = self.len(); - if new_len > len { - self.extend_trusted(iter::repeat_with(f).take(new_len - len)); - } else { - self.truncate(new_len); - } + A::map_result((|| { + // Substitute for try block + let len = self.len(); + if new_len > len { + self.extend_trusted(iter::repeat_with(f).take(new_len - len))?; + } else { + self.truncate(new_len); + } + + Ok(()) + })()) } /// Consumes and leaks the `Vec`, returning a mutable reference to the contents, @@ -2349,16 +2379,20 @@ impl Vec { /// vec.resize(2, 0); /// assert_eq!(vec, [1, 2]); /// ``` - #[cfg(not(no_global_oom_handling))] #[stable(feature = "vec_resize", since = "1.5.0")] - pub fn resize(&mut self, new_len: usize, value: T) { - let len = self.len(); + pub fn resize(&mut self, new_len: usize, value: T) -> A::Result<()> { + A::map_result((|| { + // Substitute for try block + let len = self.len(); - if new_len > len { - self.extend_with(new_len - len, value) - } else { - self.truncate(new_len); - } + if new_len > len { + self.extend_with(new_len - len, value)?; + } else { + self.truncate(new_len); + } + + Ok(()) + })()) } /// Clones and appends all elements in a slice to the `Vec`. @@ -2382,8 +2416,8 @@ impl Vec { /// [`extend`]: Vec::extend #[cfg(not(no_global_oom_handling))] #[stable(feature = "vec_extend_from_slice", since = "1.6.0")] - pub fn extend_from_slice(&mut self, other: &[T]) { - self.spec_extend(other.iter()) + pub fn extend_from_slice(&mut self, other: &[T]) -> A::Result<()> { + A::map_result(self.spec_extend(other.iter())) } /// Copies elements from `src` range to the end of the vector. @@ -2409,18 +2443,23 @@ impl Vec { /// ``` #[cfg(not(no_global_oom_handling))] #[stable(feature = "vec_extend_from_within", since = "1.53.0")] - pub fn extend_from_within(&mut self, src: R) + pub fn extend_from_within(&mut self, src: R) -> A::Result<()> where R: RangeBounds, { - let range = slice::range(src, ..self.len()); - self.reserve(range.len()); + A::map_result((|| { + // Substitute for try block + let range = slice::range(src, ..self.len()); + self.try_reserve(range.len())?; - // SAFETY: - // - `slice::range` guarantees that the given range is valid for indexing self - unsafe { - self.spec_extend_from_within(range); - } + // SAFETY: + // - `slice::range` guarantees that the given range is valid for indexing self + unsafe { + self.spec_extend_from_within(range); + } + + Ok(()) + })()) } } @@ -2472,8 +2511,8 @@ impl Vec<[T; N], A> { impl Vec { #[cfg(not(no_global_oom_handling))] /// Extend the vector by `n` clones of value. - fn extend_with(&mut self, n: usize, value: T) { - self.reserve(n); + fn extend_with(&mut self, n: usize, value: T) -> Result<(), TryReserveError> { + self.try_reserve(n)?; unsafe { let mut ptr = self.as_mut_ptr().add(self.len()); @@ -2498,6 +2537,8 @@ impl Vec { // len set by scope guard } + + Ok(()) } } @@ -2528,17 +2569,15 @@ impl Vec { //////////////////////////////////////////////////////////////////////////////// #[doc(hidden)] -#[cfg(not(no_global_oom_handling))] #[stable(feature = "rust1", since = "1.0.0")] -pub fn from_elem(elem: T, n: usize) -> Vec { - ::from_elem(elem, n, Global) +pub fn from_elem(elem: T, n: usize) -> ::Result> { + ::map_result(::from_elem(elem, n, Global)) } #[doc(hidden)] -#[cfg(not(no_global_oom_handling))] #[unstable(feature = "allocator_api", issue = "32838")] -pub fn from_elem_in(elem: T, n: usize, alloc: A) -> Vec { - ::from_elem(elem, n, alloc) +pub fn from_elem_in(elem: T, n: usize, alloc: A) -> A::Result> { + A::map_result(::from_elem(elem, n, alloc)) } trait ExtendFromWithinSpec { @@ -2761,12 +2800,11 @@ impl<'a, T, A: Allocator> IntoIterator for &'a mut Vec { } } -#[cfg(not(no_global_oom_handling))] #[stable(feature = "rust1", since = "1.0.0")] -impl Extend for Vec { +impl = ()>> Extend for Vec { #[inline] fn extend>(&mut self, iter: I) { - >::spec_extend(self, iter.into_iter()) + A::map_result(>::spec_extend(self, iter.into_iter())) } #[inline] @@ -2783,8 +2821,10 @@ impl Extend for Vec { impl Vec { // leaf method to which various SpecFrom/SpecExtend implementations delegate when // they have no further optimizations to apply - #[cfg(not(no_global_oom_handling))] - fn extend_desugared>(&mut self, mut iterator: I) { + fn extend_desugared>( + &mut self, + mut iterator: I, + ) -> Result<(), TryReserveError> { // This is the case for a general iterator. // // This function should be the moral equivalent of: @@ -2796,7 +2836,7 @@ impl Vec { let len = self.len(); if len == self.capacity() { let (lower, _) = iterator.size_hint(); - self.reserve(lower.saturating_add(1)); + self.try_reserve(lower.saturating_add(1))?; } unsafe { ptr::write(self.as_mut_ptr().add(len), element); @@ -2806,12 +2846,16 @@ impl Vec { self.set_len(len + 1); } } + + Ok(()) } // specific extend for `TrustedLen` iterators, called both by the specializations // and internal places where resolving specialization makes compilation slower - #[cfg(not(no_global_oom_handling))] - fn extend_trusted(&mut self, iterator: impl iter::TrustedLen) { + fn extend_trusted( + &mut self, + iterator: impl iter::TrustedLen, + ) -> Result<(), TryReserveError> { let (low, high) = iterator.size_hint(); if let Some(additional) = high { debug_assert_eq!( @@ -2820,7 +2864,7 @@ impl Vec { "TrustedLen iterator's size hint is not exact: {:?}", (low, high) ); - self.reserve(additional); + self.try_reserve(additional)?; unsafe { let ptr = self.as_mut_ptr(); let mut local_len = SetLenOnDrop::new(&mut self.len); @@ -2832,13 +2876,15 @@ impl Vec { local_len.increment_len(1); }); } + + Ok(()) } else { // Per TrustedLen contract a `None` upper bound means that the iterator length // truly exceeds usize::MAX, which would eventually lead to a capacity overflow anyway. // Since the other branch already panics eagerly (via `reserve()`) we do the same here. // This avoids additional codegen for a fallback code path which would eventually // panic anyway. - panic!("capacity overflow"); + Err(TryReserveErrorKind::CapacityOverflow.into()) } } @@ -2882,6 +2928,7 @@ impl Vec { where R: RangeBounds, I: IntoIterator, + A: Allocator = ()>, { Splice { drain: self.drain(range), replace_with: replace_with.into_iter() } } @@ -2952,11 +2999,10 @@ impl Vec { /// append the entire slice at once. /// /// [`copy_from_slice`]: slice::copy_from_slice -#[cfg(not(no_global_oom_handling))] #[stable(feature = "extend_ref", since = "1.2.0")] -impl<'a, T: Copy + 'a, A: Allocator + 'a> Extend<&'a T> for Vec { +impl<'a, T: Copy + 'a, A: Allocator = ()> + 'a> Extend<&'a T> for Vec { fn extend>(&mut self, iter: I) { - self.spec_extend(iter.into_iter()) + A::map_result(self.spec_extend(iter.into_iter())) } #[inline] @@ -3153,10 +3199,12 @@ impl From> for Vec { } // note: test pulls in std, which causes errors here -#[cfg(not(no_global_oom_handling))] #[cfg(not(test))] #[stable(feature = "box_from_vec", since = "1.20.0")] -impl From> for Box<[T], A> { +impl From> for Box<[T], A> +where + A: Allocator = Self>, +{ /// Convert a vector into a boxed slice. /// /// If `v` has excess capacity, its items will be moved into a diff --git a/library/alloc/src/vec/spec_extend.rs b/library/alloc/src/vec/spec_extend.rs index 56065ce565bfc..6b976ba6df12b 100644 --- a/library/alloc/src/vec/spec_extend.rs +++ b/library/alloc/src/vec/spec_extend.rs @@ -1,4 +1,5 @@ use crate::alloc::Allocator; +use crate::collections::TryReserveError; use core::iter::TrustedLen; use core::slice::{self}; @@ -6,14 +7,14 @@ use super::{IntoIter, Vec}; // Specialization trait used for Vec::extend pub(super) trait SpecExtend { - fn spec_extend(&mut self, iter: I); + fn spec_extend(&mut self, iter: I) -> Result<(), TryReserveError>; } impl SpecExtend for Vec where I: Iterator, { - default fn spec_extend(&mut self, iter: I) { + default fn spec_extend(&mut self, iter: I) -> Result<(), TryReserveError> { self.extend_desugared(iter) } } @@ -22,17 +23,19 @@ impl SpecExtend for Vec where I: TrustedLen, { - default fn spec_extend(&mut self, iterator: I) { + default fn spec_extend(&mut self, iterator: I) -> Result<(), TryReserveError> { self.extend_trusted(iterator) } } impl SpecExtend> for Vec { - fn spec_extend(&mut self, mut iterator: IntoIter) { + fn spec_extend(&mut self, mut iterator: IntoIter) -> Result<(), TryReserveError> { unsafe { - self.append_elements(iterator.as_slice() as _); + self.append_elements(iterator.as_slice() as _)?; } iterator.forget_remaining_elements(); + + Ok(()) } } @@ -41,7 +44,7 @@ where I: Iterator, T: Clone, { - default fn spec_extend(&mut self, iterator: I) { + default fn spec_extend(&mut self, iterator: I) -> Result<(), TryReserveError> { self.spec_extend(iterator.cloned()) } } @@ -50,8 +53,8 @@ impl<'a, T: 'a, A: Allocator + 'a> SpecExtend<&'a T, slice::Iter<'a, T>> for Vec where T: Copy, { - fn spec_extend(&mut self, iterator: slice::Iter<'a, T>) { + fn spec_extend(&mut self, iterator: slice::Iter<'a, T>) -> Result<(), TryReserveError> { let slice = iterator.as_slice(); - unsafe { self.append_elements(slice) }; + unsafe { self.append_elements(slice) } } } diff --git a/library/alloc/src/vec/spec_from_elem.rs b/library/alloc/src/vec/spec_from_elem.rs index da43d17bf3624..e059bb292eef4 100644 --- a/library/alloc/src/vec/spec_from_elem.rs +++ b/library/alloc/src/vec/spec_from_elem.rs @@ -1,61 +1,87 @@ use core::ptr; -use crate::alloc::Allocator; -use crate::raw_vec::RawVec; +use crate::collections::TryReserveError; +use crate::falloc::Allocator; use super::{IsZero, Vec}; // Specialization trait used for Vec::from_elem pub(super) trait SpecFromElem: Sized { - fn from_elem(elem: Self, n: usize, alloc: A) -> Vec; + fn from_elem( + elem: Self, + n: usize, + alloc: A, + ) -> Result, TryReserveError>; } impl SpecFromElem for T { - default fn from_elem(elem: Self, n: usize, alloc: A) -> Vec { - let mut v = Vec::with_capacity_in(n, alloc); - v.extend_with(n, elem); - v + default fn from_elem( + elem: Self, + n: usize, + alloc: A, + ) -> Result, TryReserveError> { + let mut v = Vec::try_with_capacity_in(n, alloc)?; + v.extend_with(n, elem)?; + Ok(v) } } impl SpecFromElem for T { #[inline] - default fn from_elem(elem: T, n: usize, alloc: A) -> Vec { + default fn from_elem( + elem: T, + n: usize, + alloc: A, + ) -> Result, TryReserveError> { if elem.is_zero() { - return Vec { buf: RawVec::with_capacity_zeroed_in(n, alloc), len: n }; + let mut v = Vec::try_with_capacity_zeroed_in(n, alloc)?; + unsafe { v.set_len(n) }; + return Ok(v); } - let mut v = Vec::with_capacity_in(n, alloc); - v.extend_with(n, elem); - v + let mut v = Vec::try_with_capacity_in(n, alloc)?; + v.extend_with(n, elem)?; + Ok(v) } } impl SpecFromElem for i8 { #[inline] - fn from_elem(elem: i8, n: usize, alloc: A) -> Vec { + fn from_elem( + elem: i8, + n: usize, + alloc: A, + ) -> Result, TryReserveError> { if elem == 0 { - return Vec { buf: RawVec::with_capacity_zeroed_in(n, alloc), len: n }; + let mut v = Vec::try_with_capacity_zeroed_in(n, alloc)?; + unsafe { v.set_len(n) }; + return Ok(v); } unsafe { - let mut v = Vec::with_capacity_in(n, alloc); + let mut v = Vec::try_with_capacity_in(n, alloc)?; ptr::write_bytes(v.as_mut_ptr(), elem as u8, n); v.set_len(n); - v + Ok(v) } } } impl SpecFromElem for u8 { #[inline] - fn from_elem(elem: u8, n: usize, alloc: A) -> Vec { + fn from_elem( + elem: u8, + n: usize, + alloc: A, + ) -> Result, TryReserveError> { if elem == 0 { - return Vec { buf: RawVec::with_capacity_zeroed_in(n, alloc), len: n }; + let mut v = Vec::try_with_capacity_zeroed_in(n, alloc)?; + unsafe { v.set_len(n) }; + return Ok(v); } unsafe { - let mut v = Vec::with_capacity_in(n, alloc); + let mut v = Vec::try_with_capacity_in(n, alloc)?; ptr::write_bytes(v.as_mut_ptr(), elem, n); v.set_len(n); - v + Ok(v) } } } diff --git a/library/alloc/src/vec/spec_from_iter.rs b/library/alloc/src/vec/spec_from_iter.rs index efa6868473e49..e9e37e27eeb15 100644 --- a/library/alloc/src/vec/spec_from_iter.rs +++ b/library/alloc/src/vec/spec_from_iter.rs @@ -1,5 +1,7 @@ use core::mem::ManuallyDrop; -use core::ptr::{self}; +use core::ptr; + +use crate::falloc::{Allocator, Global}; use super::{IntoIter, SpecExtend, SpecFromIterNested, Vec}; @@ -58,7 +60,7 @@ impl SpecFromIter> for Vec { let mut vec = Vec::new(); // must delegate to spec_extend() since extend() itself delegates // to spec_from for empty Vecs - vec.spec_extend(iterator); + let () = ::map_result(vec.spec_extend(iterator)); vec } } diff --git a/library/alloc/src/vec/spec_from_iter_nested.rs b/library/alloc/src/vec/spec_from_iter_nested.rs index f915ebb86e5a5..3c61fcf66c47b 100644 --- a/library/alloc/src/vec/spec_from_iter_nested.rs +++ b/library/alloc/src/vec/spec_from_iter_nested.rs @@ -2,6 +2,7 @@ use core::cmp; use core::iter::TrustedLen; use core::ptr; +use crate::falloc::{Allocator, Global}; use crate::raw_vec::RawVec; use super::{SpecExtend, Vec}; @@ -40,7 +41,10 @@ where }; // must delegate to spec_extend() since extend() itself delegates // to spec_from for empty Vecs - as SpecExtend>::spec_extend(&mut vector, iterator); + let () = ::map_result( as SpecExtend>::spec_extend( + &mut vector, + iterator, + )); vector } } @@ -59,7 +63,7 @@ where _ => panic!("capacity overflow"), }; // reuse extend specialization for TrustedLen - vector.spec_extend(iterator); + let () = ::map_result(vector.spec_extend(iterator)); vector } } diff --git a/library/alloc/src/vec/splice.rs b/library/alloc/src/vec/splice.rs index 852fdcc3f5ce7..f0eccb7d9fedc 100644 --- a/library/alloc/src/vec/splice.rs +++ b/library/alloc/src/vec/splice.rs @@ -1,6 +1,6 @@ -use crate::alloc::{Allocator, Global}; -use core::ptr::{self}; -use core::slice::{self}; +use crate::falloc::{Allocator, Global}; +use core::ptr; +use core::slice; use super::{Drain, Vec}; @@ -21,14 +21,14 @@ use super::{Drain, Vec}; pub struct Splice< 'a, I: Iterator + 'a, - #[unstable(feature = "allocator_api", issue = "32838")] A: Allocator + 'a = Global, + #[unstable(feature = "allocator_api", issue = "32838")] A: Allocator = ()> + 'a = Global, > { pub(super) drain: Drain<'a, I::Item, A>, pub(super) replace_with: I, } #[stable(feature = "vec_splice", since = "1.21.0")] -impl Iterator for Splice<'_, I, A> { +impl = ()>> Iterator for Splice<'_, I, A> { type Item = I::Item; fn next(&mut self) -> Option { @@ -41,17 +41,17 @@ impl Iterator for Splice<'_, I, A> { } #[stable(feature = "vec_splice", since = "1.21.0")] -impl DoubleEndedIterator for Splice<'_, I, A> { +impl = ()>> DoubleEndedIterator for Splice<'_, I, A> { fn next_back(&mut self) -> Option { self.drain.next_back() } } #[stable(feature = "vec_splice", since = "1.21.0")] -impl ExactSizeIterator for Splice<'_, I, A> {} +impl = ()>> ExactSizeIterator for Splice<'_, I, A> {} #[stable(feature = "vec_splice", since = "1.21.0")] -impl Drop for Splice<'_, I, A> { +impl = ()>> Drop for Splice<'_, I, A> { fn drop(&mut self) { self.drain.by_ref().for_each(drop); // At this point draining is done and the only remaining tasks are splicing @@ -98,7 +98,7 @@ impl Drop for Splice<'_, I, A> { } /// Private helper methods for `Splice::drop` -impl Drain<'_, T, A> { +impl = ()>> Drain<'_, T, A> { /// The range from `self.vec.len` to `self.tail_start` contains elements /// that have been moved out. /// Fill that range as much as possible with new elements from the `replace_with` iterator. @@ -126,7 +126,7 @@ impl Drain<'_, T, A> { unsafe fn move_tail(&mut self, additional: usize) { let vec = unsafe { self.vec.as_mut() }; let len = self.tail_start + self.tail_len; - vec.buf.reserve(len, additional); + let () = A::map_result(vec.buf.reserve(len, additional)); let new_tail_start = self.tail_start + additional; unsafe { From 8528f6613ac79b6f3425c6f780fb3ad84f0c58be Mon Sep 17 00:00:00 2001 From: Peter Jaszkowiak Date: Thu, 25 May 2023 22:56:04 -0600 Subject: [PATCH 02/15] parameterize Allocator::Result on error type --- library/alloc/src/boxed.rs | 13 +++-- .../alloc/src/collections/vec_deque/mod.rs | 21 ++++---- library/alloc/src/falloc.rs | 44 ++++++++++++----- library/alloc/src/raw_vec.rs | 2 +- library/alloc/src/vec/mod.rs | 49 +++++++++++-------- library/alloc/src/vec/splice.rs | 18 ++++--- 6 files changed, 97 insertions(+), 50 deletions(-) diff --git a/library/alloc/src/boxed.rs b/library/alloc/src/boxed.rs index 32e9d44487d9e..a507d75a544ea 100644 --- a/library/alloc/src/boxed.rs +++ b/library/alloc/src/boxed.rs @@ -169,6 +169,7 @@ use core::task::{Context, Poll}; use crate::alloc::{handle_alloc_error, WriteCloneIntoRaw}; #[cfg(not(no_global_oom_handling))] use crate::borrow::Cow; +use crate::collections::TryReserveError; use crate::falloc::{AllocError, Allocator, Global, Layout}; use crate::raw_vec::RawVec; #[cfg(not(no_global_oom_handling))] @@ -737,7 +738,10 @@ impl Box<[T], A> { #[unstable(feature = "allocator_api", issue = "32838")] // #[unstable(feature = "new_uninit", issue = "63291")] #[must_use] - pub fn new_uninit_slice_in(len: usize, alloc: A) -> A::Result], A>> { + pub fn new_uninit_slice_in( + len: usize, + alloc: A, + ) -> A::Result], A>, TryReserveError> { unsafe { A::map_result(RawVec::with_capacity_in(len, alloc).map(|r| r.into_box(len))) } } @@ -764,7 +768,10 @@ impl Box<[T], A> { #[unstable(feature = "allocator_api", issue = "32838")] // #[unstable(feature = "new_uninit", issue = "63291")] #[must_use] - pub fn new_zeroed_slice_in(len: usize, alloc: A) -> A::Result], A>> { + pub fn new_zeroed_slice_in( + len: usize, + alloc: A, + ) -> A::Result], A>, TryReserveError> { unsafe { A::map_result(RawVec::with_capacity_zeroed_in(len, alloc).map(|r| r.into_box(len))) } @@ -2008,7 +2015,7 @@ impl FromIterator for Box<[I]> { } #[stable(feature = "box_slice_clone", since = "1.3.0")] -impl = Self> + Clone> Clone for Box<[T], A> { +impl = Self> + Clone> Clone for Box<[T], A> { fn clone(&self) -> Self { let alloc = Box::allocator(self).clone(); self.to_vec_in(alloc).into_boxed_slice() diff --git a/library/alloc/src/collections/vec_deque/mod.rs b/library/alloc/src/collections/vec_deque/mod.rs index c7deff635cd73..3152c5fde7d01 100644 --- a/library/alloc/src/collections/vec_deque/mod.rs +++ b/library/alloc/src/collections/vec_deque/mod.rs @@ -108,7 +108,7 @@ pub struct VecDeque< #[stable(feature = "rust1", since = "1.0.0")] impl Clone for VecDeque where - A: Allocator = Self> + Clone, + A: Allocator = Self> + Clone, { fn clone(&self) -> Self { let mut deq = Self::with_capacity_in(self.len(), self.allocator().clone()); @@ -594,7 +594,10 @@ impl VecDeque { /// let deque: VecDeque = VecDeque::with_capacity(10); /// ``` #[unstable(feature = "allocator_api", issue = "32838")] - pub fn with_capacity_in(capacity: usize, alloc: A) -> A::Result> { + pub fn with_capacity_in( + capacity: usize, + alloc: A, + ) -> A::Result, TryReserveError> { A::map_result(Self::try_with_capacity_in(capacity, alloc)) } @@ -758,7 +761,7 @@ impl VecDeque { /// /// [`reserve`]: VecDeque::reserve #[stable(feature = "rust1", since = "1.0.0")] - pub fn reserve_exact(&mut self, additional: usize) -> A::Result<()> { + pub fn reserve_exact(&mut self, additional: usize) -> A::Result<(), TryReserveError> { A::map_result(self.try_reserve_exact(additional)) } @@ -779,7 +782,7 @@ impl VecDeque { /// assert!(buf.capacity() >= 11); /// ``` #[stable(feature = "rust1", since = "1.0.0")] - pub fn reserve(&mut self, additional: usize) -> A::Result<()> { + pub fn reserve(&mut self, additional: usize) -> A::Result<(), TryReserveError> { A::map_result(self.try_reserve(additional)) } @@ -925,7 +928,7 @@ impl VecDeque { /// assert!(buf.capacity() >= 4); /// ``` #[stable(feature = "shrink_to", since = "1.56.0")] - pub fn shrink_to(&mut self, min_capacity: usize) -> A::Result<()> { + pub fn shrink_to(&mut self, min_capacity: usize) -> A::Result<(), TryReserveError> { A::map_result((|| { // Substitute for try block let target_cap = min_capacity.max(self.len); @@ -1622,7 +1625,7 @@ impl VecDeque { /// assert_eq!(d.front(), Some(&2)); /// ``` #[stable(feature = "rust1", since = "1.0.0")] - pub fn push_front(&mut self, value: T) -> A::Result<()> { + pub fn push_front(&mut self, value: T) -> A::Result<(), TryReserveError> { A::map_result((|| { // Substitute for try block if self.is_full() { @@ -1653,7 +1656,7 @@ impl VecDeque { /// assert_eq!(3, *buf.back().unwrap()); /// ``` #[stable(feature = "rust1", since = "1.0.0")] - pub fn push_back(&mut self, value: T) -> A::Result<()> { + pub fn push_back(&mut self, value: T) -> A::Result<(), TryReserveError> { A::map_result((|| { // Substsitute for try block if self.is_full() { @@ -1767,7 +1770,7 @@ impl VecDeque { /// assert_eq!(vec_deque, &['a', 'd', 'b', 'c']); /// ``` #[stable(feature = "deque_extras_15", since = "1.5.0")] - pub fn insert(&mut self, index: usize, value: T) -> A::Result<()> { + pub fn insert(&mut self, index: usize, value: T) -> A::Result<(), TryReserveError> { A::map_result((|| { // Substitute for try block assert!(index <= self.len(), "index out of bounds"); @@ -1874,7 +1877,7 @@ impl VecDeque { #[inline] #[must_use = "use `.truncate()` if you don't need the other half"] #[stable(feature = "split_off", since = "1.4.0")] - pub fn split_off(&mut self, at: usize) -> A::Result + pub fn split_off(&mut self, at: usize) -> A::Result where A: Clone, { diff --git a/library/alloc/src/falloc.rs b/library/alloc/src/falloc.rs index 04428d807cf00..73295c159761d 100644 --- a/library/alloc/src/falloc.rs +++ b/library/alloc/src/falloc.rs @@ -1,3 +1,5 @@ +use core::error::Error; + pub use crate::alloc::{AllocError, Global, GlobalAlloc, Layout, LayoutError}; /// An implementation of `Allocator` can allocate, grow, shrink, and deallocate arbitrary blocks of @@ -59,10 +61,14 @@ pub use crate::alloc::{AllocError, Global, GlobalAlloc, Layout, LayoutError}; #[unstable(feature = "allocator_api", issue = "32838")] pub unsafe trait Allocator: crate::alloc::Allocator { #[must_use] // Doesn't actually work - type Result; + type Result + where + E: Error + IntoLayout; #[must_use] - fn map_result(result: Result) -> Self::Result; + fn map_result(result: Result) -> Self::Result + where + E: Error + IntoLayout; } #[cfg(not(no_global_oom_handling))] @@ -78,18 +84,34 @@ pub(crate) fn capacity_overflow() -> ! { panic!("capacity overflow"); } +#[unstable(feature = "allocator_api", issue = "32838")] +pub trait IntoLayout { + #[cfg(not(no_global_oom_handling))] + fn into_layout(self) -> Layout; +} + +#[unstable(feature = "allocator_api", issue = "32838")] +impl IntoLayout for TryReserveError { + #[cfg(not(no_global_oom_handling))] + fn into_layout(self) -> Layout { + match self.kind() { + TryReserveErrorKind::CapacityOverflow => capacity_overflow(), + TryReserveErrorKind::AllocError { layout, .. } => layout, + } + } +} + #[unstable(feature = "allocator_api", issue = "32838")] #[cfg(not(no_global_oom_handling))] unsafe impl Allocator for X { - type Result = T; + type Result = T + where + E: Error + IntoLayout; - fn map_result(result: Result) -> Self::Result { - match result { - Err(error) => match error.kind() { - TryReserveErrorKind::CapacityOverflow => capacity_overflow(), - TryReserveErrorKind::AllocError { layout, .. } => handle_alloc_error(layout), - }, - Ok(x) => x, - } + fn map_result(result: Result) -> Self::Result + where + E: Error + IntoLayout, + { + result.unwrap_or_else(|error| handle_alloc_error(error.into_layout())) } } diff --git a/library/alloc/src/raw_vec.rs b/library/alloc/src/raw_vec.rs index 841c3151b5479..6dd274f374d3f 100644 --- a/library/alloc/src/raw_vec.rs +++ b/library/alloc/src/raw_vec.rs @@ -7,10 +7,10 @@ use core::mem::{self, ManuallyDrop, MaybeUninit, SizedTypeProperties}; use core::ptr::{self, NonNull, Unique}; use core::slice; +use crate::alloc::{Allocator, Global, Layout}; use crate::boxed::Box; use crate::collections::TryReserveError; use crate::collections::TryReserveErrorKind::*; -use crate::falloc::{Allocator, Global, Layout}; #[cfg(test)] mod tests; diff --git a/library/alloc/src/vec/mod.rs b/library/alloc/src/vec/mod.rs index 190b9d867cf08..9c8fc123de1c3 100644 --- a/library/alloc/src/vec/mod.rs +++ b/library/alloc/src/vec/mod.rs @@ -673,7 +673,7 @@ impl Vec { /// ``` #[inline] #[unstable(feature = "allocator_api", issue = "32838")] - pub fn with_capacity_in(capacity: usize, alloc: A) -> A::Result { + pub fn with_capacity_in(capacity: usize, alloc: A) -> A::Result { A::map_result(Self::try_with_capacity_in(capacity, alloc)) } @@ -908,7 +908,7 @@ impl Vec { /// assert!(vec.capacity() >= 11); /// ``` #[stable(feature = "rust1", since = "1.0.0")] - pub fn reserve(&mut self, additional: usize) -> A::Result<()> { + pub fn reserve(&mut self, additional: usize) -> A::Result<(), TryReserveError> { A::map_result(self.try_reserve(additional)) } @@ -937,7 +937,7 @@ impl Vec { /// assert!(vec.capacity() >= 11); /// ``` #[stable(feature = "rust1", since = "1.0.0")] - pub fn reserve_exact(&mut self, additional: usize) -> A::Result<()> { + pub fn reserve_exact(&mut self, additional: usize) -> A::Result<(), TryReserveError> { A::map_result(self.try_reserve_exact(additional)) } @@ -1047,7 +1047,7 @@ impl Vec { /// assert!(vec.capacity() >= 3); /// ``` #[stable(feature = "rust1", since = "1.0.0")] - pub fn shrink_to_fit(&mut self) -> A::Result<()> { + pub fn shrink_to_fit(&mut self) -> A::Result<(), TryReserveError> { A::map_result(self.try_shrink_to_fit()) } @@ -1070,7 +1070,7 @@ impl Vec { /// assert!(vec.capacity() >= 3); /// ``` #[stable(feature = "shrink_to", since = "1.56.0")] - pub fn shrink_to(&mut self, min_capacity: usize) -> A::Result<()> { + pub fn shrink_to(&mut self, min_capacity: usize) -> A::Result<(), TryReserveError> { A::map_result(if self.capacity() > min_capacity { self.buf.shrink_to(cmp::max(self.len, min_capacity)) } else { @@ -1104,7 +1104,7 @@ impl Vec { /// assert_eq!(slice.into_vec().capacity(), 3); /// ``` #[stable(feature = "rust1", since = "1.0.0")] - pub fn into_boxed_slice(mut self) -> A::Result> { + pub fn into_boxed_slice(mut self) -> A::Result, TryReserveError> { A::map_result((|| { // Substitute for try block self.try_shrink_to_fit()?; @@ -1442,7 +1442,7 @@ impl Vec { /// assert_eq!(vec, [1, 4, 2, 3, 5]); /// ``` #[stable(feature = "rust1", since = "1.0.0")] - pub fn insert(&mut self, index: usize, element: T) -> A::Result<()> { + pub fn insert(&mut self, index: usize, element: T) -> A::Result<(), TryReserveError> { A::map_result((|| { // Substitute for try block #[cold] @@ -1835,7 +1835,7 @@ impl Vec { /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] - pub fn push(&mut self, value: T) -> A::Result<()> { + pub fn push(&mut self, value: T) -> A::Result<(), TryReserveError> { A::map_result((|| { // Substitute for try block // This will panic or abort if we would allocate > isize::MAX bytes @@ -1943,7 +1943,7 @@ impl Vec { /// ``` #[inline] #[stable(feature = "append", since = "1.4.0")] - pub fn append(&mut self, other: &mut Self) -> A::Result<()> { + pub fn append(&mut self, other: &mut Self) -> A::Result<(), TryReserveError> { A::map_result((|| { // Substitute for try block unsafe { @@ -2111,7 +2111,7 @@ impl Vec { #[inline] #[must_use = "use `.truncate()` if you don't need the other half"] #[stable(feature = "split_off", since = "1.4.0")] - pub fn split_off(&mut self, at: usize) -> A::Result + pub fn split_off(&mut self, at: usize) -> A::Result where A: Clone, { @@ -2176,7 +2176,7 @@ impl Vec { /// assert_eq!(vec, [2, 4, 8, 16]); /// ``` #[stable(feature = "vec_resize_with", since = "1.33.0")] - pub fn resize_with(&mut self, new_len: usize, f: F) -> A::Result<()> + pub fn resize_with(&mut self, new_len: usize, f: F) -> A::Result<(), TryReserveError> where F: FnMut() -> T, { @@ -2380,7 +2380,7 @@ impl Vec { /// assert_eq!(vec, [1, 2]); /// ``` #[stable(feature = "vec_resize", since = "1.5.0")] - pub fn resize(&mut self, new_len: usize, value: T) -> A::Result<()> { + pub fn resize(&mut self, new_len: usize, value: T) -> A::Result<(), TryReserveError> { A::map_result((|| { // Substitute for try block let len = self.len(); @@ -2416,7 +2416,7 @@ impl Vec { /// [`extend`]: Vec::extend #[cfg(not(no_global_oom_handling))] #[stable(feature = "vec_extend_from_slice", since = "1.6.0")] - pub fn extend_from_slice(&mut self, other: &[T]) -> A::Result<()> { + pub fn extend_from_slice(&mut self, other: &[T]) -> A::Result<(), TryReserveError> { A::map_result(self.spec_extend(other.iter())) } @@ -2443,7 +2443,7 @@ impl Vec { /// ``` #[cfg(not(no_global_oom_handling))] #[stable(feature = "vec_extend_from_within", since = "1.53.0")] - pub fn extend_from_within(&mut self, src: R) -> A::Result<()> + pub fn extend_from_within(&mut self, src: R) -> A::Result<(), TryReserveError> where R: RangeBounds, { @@ -2570,13 +2570,20 @@ impl Vec { #[doc(hidden)] #[stable(feature = "rust1", since = "1.0.0")] -pub fn from_elem(elem: T, n: usize) -> ::Result> { +pub fn from_elem( + elem: T, + n: usize, +) -> ::Result, TryReserveError> { ::map_result(::from_elem(elem, n, Global)) } #[doc(hidden)] #[unstable(feature = "allocator_api", issue = "32838")] -pub fn from_elem_in(elem: T, n: usize, alloc: A) -> A::Result> { +pub fn from_elem_in( + elem: T, + n: usize, + alloc: A, +) -> A::Result, TryReserveError> { A::map_result(::from_elem(elem, n, alloc)) } @@ -2801,7 +2808,7 @@ impl<'a, T, A: Allocator> IntoIterator for &'a mut Vec { } #[stable(feature = "rust1", since = "1.0.0")] -impl = ()>> Extend for Vec { +impl = ()>> Extend for Vec { #[inline] fn extend>(&mut self, iter: I) { A::map_result(>::spec_extend(self, iter.into_iter())) @@ -2928,7 +2935,7 @@ impl Vec { where R: RangeBounds, I: IntoIterator, - A: Allocator = ()>, + A: Allocator = ()>, { Splice { drain: self.drain(range), replace_with: replace_with.into_iter() } } @@ -3000,7 +3007,9 @@ impl Vec { /// /// [`copy_from_slice`]: slice::copy_from_slice #[stable(feature = "extend_ref", since = "1.2.0")] -impl<'a, T: Copy + 'a, A: Allocator = ()> + 'a> Extend<&'a T> for Vec { +impl<'a, T: Copy + 'a, A: Allocator = ()> + 'a> Extend<&'a T> + for Vec +{ fn extend>(&mut self, iter: I) { A::map_result(self.spec_extend(iter.into_iter())) } @@ -3203,7 +3212,7 @@ impl From> for Vec { #[stable(feature = "box_from_vec", since = "1.20.0")] impl From> for Box<[T], A> where - A: Allocator = Self>, + A: Allocator = Self>, { /// Convert a vector into a boxed slice. /// diff --git a/library/alloc/src/vec/splice.rs b/library/alloc/src/vec/splice.rs index f0eccb7d9fedc..24b0a4bcba1e9 100644 --- a/library/alloc/src/vec/splice.rs +++ b/library/alloc/src/vec/splice.rs @@ -1,3 +1,4 @@ +use crate::collections::TryReserveError; use crate::falloc::{Allocator, Global}; use core::ptr; use core::slice; @@ -21,14 +22,14 @@ use super::{Drain, Vec}; pub struct Splice< 'a, I: Iterator + 'a, - #[unstable(feature = "allocator_api", issue = "32838")] A: Allocator = ()> + 'a = Global, + #[unstable(feature = "allocator_api", issue = "32838")] A: Allocator = ()> + 'a = Global, > { pub(super) drain: Drain<'a, I::Item, A>, pub(super) replace_with: I, } #[stable(feature = "vec_splice", since = "1.21.0")] -impl = ()>> Iterator for Splice<'_, I, A> { +impl = ()>> Iterator for Splice<'_, I, A> { type Item = I::Item; fn next(&mut self) -> Option { @@ -41,17 +42,22 @@ impl = ()>> Iterator for Splice<'_, I, A> { } #[stable(feature = "vec_splice", since = "1.21.0")] -impl = ()>> DoubleEndedIterator for Splice<'_, I, A> { +impl = ()>> DoubleEndedIterator + for Splice<'_, I, A> +{ fn next_back(&mut self) -> Option { self.drain.next_back() } } #[stable(feature = "vec_splice", since = "1.21.0")] -impl = ()>> ExactSizeIterator for Splice<'_, I, A> {} +impl = ()>> ExactSizeIterator + for Splice<'_, I, A> +{ +} #[stable(feature = "vec_splice", since = "1.21.0")] -impl = ()>> Drop for Splice<'_, I, A> { +impl = ()>> Drop for Splice<'_, I, A> { fn drop(&mut self) { self.drain.by_ref().for_each(drop); // At this point draining is done and the only remaining tasks are splicing @@ -98,7 +104,7 @@ impl = ()>> Drop for Splice<'_, I, A> { } /// Private helper methods for `Splice::drop` -impl = ()>> Drain<'_, T, A> { +impl = ()>> Drain<'_, T, A> { /// The range from `self.vec.len` to `self.tail_start` contains elements /// that have been moved out. /// Fill that range as much as possible with new elements from the `replace_with` iterator. From bbc87578ac9285986b7f39cf9c251fe0f979b60a Mon Sep 17 00:00:00 2001 From: Peter Jaszkowiak Date: Thu, 25 May 2023 22:56:50 -0600 Subject: [PATCH 03/15] fix tests changes fixed codegen/vec-shrink-panik --- tests/codegen/vec-shrink-panik.rs | 19 ------------------- tests/ui/error-codes/E0401.stderr | 2 +- .../e0119/conflict-with-std.stderr | 2 +- tests/ui/hygiene/panic-location.run.stderr | 2 +- 4 files changed, 3 insertions(+), 22 deletions(-) diff --git a/tests/codegen/vec-shrink-panik.rs b/tests/codegen/vec-shrink-panik.rs index 606d68ff3ab38..5ffd79993d26c 100644 --- a/tests/codegen/vec-shrink-panik.rs +++ b/tests/codegen/vec-shrink-panik.rs @@ -22,14 +22,6 @@ pub fn shrink_to_fit(vec: &mut Vec) { // CHECK-LABEL: @issue71861 #[no_mangle] pub fn issue71861(vec: Vec) -> Box<[u32]> { - // CHECK-NOT: panic - - // Call to panic_cannot_unwind in case of double-panic is expected - // on LLVM 16 and older, but other panics are not. - // old: filter - // old-NEXT: ; call core::panicking::panic_cannot_unwind - // old-NEXT: panic_cannot_unwind - // CHECK-NOT: panic vec.into_boxed_slice() } @@ -37,17 +29,6 @@ pub fn issue71861(vec: Vec) -> Box<[u32]> { // CHECK-LABEL: @issue75636 #[no_mangle] pub fn issue75636<'a>(iter: &[&'a str]) -> Box<[&'a str]> { - // CHECK-NOT: panic - - // Call to panic_cannot_unwind in case of double-panic is expected, - // on LLVM 16 and older, but other panics are not. - // old: filter - // old-NEXT: ; call core::panicking::panic_cannot_unwind - // old-NEXT: panic_cannot_unwind - // CHECK-NOT: panic iter.iter().copied().collect() } - -// old: ; core::panicking::panic_cannot_unwind -// old: declare void @{{.*}}panic_cannot_unwind diff --git a/tests/ui/error-codes/E0401.stderr b/tests/ui/error-codes/E0401.stderr index fa4b91cacef72..2e65ef27a5879 100644 --- a/tests/ui/error-codes/E0401.stderr +++ b/tests/ui/error-codes/E0401.stderr @@ -53,7 +53,7 @@ LL | bfnr(x); - impl Fn for &F where A: Tuple, F: Fn, F: ?Sized; - impl Fn for Box - where Args: Tuple, F: Fn, A: Allocator, F: ?Sized; + where Args: Tuple, F: Fn, A: alloc::falloc::Allocator, F: ?Sized; note: required by a bound in `bfnr` --> $DIR/E0401.rs:4:30 | diff --git a/tests/ui/error-codes/e0119/conflict-with-std.stderr b/tests/ui/error-codes/e0119/conflict-with-std.stderr index ef888a1c2871e..9634a9e3282f9 100644 --- a/tests/ui/error-codes/e0119/conflict-with-std.stderr +++ b/tests/ui/error-codes/e0119/conflict-with-std.stderr @@ -6,7 +6,7 @@ LL | impl AsRef for Box { | = note: conflicting implementation in crate `alloc`: - impl AsRef for Box - where A: Allocator, T: ?Sized; + where A: alloc::falloc::Allocator, T: ?Sized; error[E0119]: conflicting implementations of trait `From` for type `S` --> $DIR/conflict-with-std.rs:12:1 diff --git a/tests/ui/hygiene/panic-location.run.stderr b/tests/ui/hygiene/panic-location.run.stderr index a7252a4002770..c5c4587c93eb1 100644 --- a/tests/ui/hygiene/panic-location.run.stderr +++ b/tests/ui/hygiene/panic-location.run.stderr @@ -1,2 +1,2 @@ -thread 'main' panicked at 'capacity overflow', library/alloc/src/raw_vec.rs:524:5 +thread 'main' panicked at 'capacity overflow', library/alloc/src/falloc.rs:84:5 note: run with `RUST_BACKTRACE=1` environment variable to display a backtrace From b9af8ee483b5c8fc8f042fafed2086a4122f1b9b Mon Sep 17 00:00:00 2001 From: Peter Jaszkowiak Date: Fri, 26 May 2023 20:42:26 -0600 Subject: [PATCH 04/15] fix tidy check --- library/alloc/src/falloc.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/library/alloc/src/falloc.rs b/library/alloc/src/falloc.rs index 73295c159761d..09fb98f8b7082 100644 --- a/library/alloc/src/falloc.rs +++ b/library/alloc/src/falloc.rs @@ -26,10 +26,10 @@ pub use crate::alloc::{AllocError, Global, GlobalAlloc, Layout, LayoutError}; /// [`shrink`] that returns `Ok`. If `grow` or `shrink` have returned `Err`, the passed pointer /// remains valid. /// -/// [`allocate`]: Allocator::allocate -/// [`grow`]: Allocator::grow -/// [`shrink`]: Allocator::shrink -/// [`deallocate`]: Allocator::deallocate +/// [`allocate`]: crate::alloc::Allocator::allocate +/// [`grow`]: crate::alloc::Allocator::grow +/// [`shrink`]: crate::alloc::Allocator::shrink +/// [`deallocate`]: crate::alloc::Allocator::deallocate /// /// ### Memory fitting /// From 320b870be2879e9a9d6217bf63fbcc6e794780f7 Mon Sep 17 00:00:00 2001 From: Peter Jaszkowiak Date: Sat, 27 May 2023 10:40:36 -0600 Subject: [PATCH 05/15] fix no_global_oom_handling --- library/alloc/src/boxed.rs | 1 + library/alloc/src/falloc.rs | 40 +++++++++++++++++++++++++++--------- library/alloc/src/vec/mod.rs | 3 --- 3 files changed, 31 insertions(+), 13 deletions(-) diff --git a/library/alloc/src/boxed.rs b/library/alloc/src/boxed.rs index a507d75a544ea..89ef44a266e16 100644 --- a/library/alloc/src/boxed.rs +++ b/library/alloc/src/boxed.rs @@ -2014,6 +2014,7 @@ impl FromIterator for Box<[I]> { } } +#[cfg(not(no_global_oom_handling))] #[stable(feature = "box_slice_clone", since = "1.3.0")] impl = Self> + Clone> Clone for Box<[T], A> { fn clone(&self) -> Self { diff --git a/library/alloc/src/falloc.rs b/library/alloc/src/falloc.rs index 09fb98f8b7082..7e6ef00c78f72 100644 --- a/library/alloc/src/falloc.rs +++ b/library/alloc/src/falloc.rs @@ -60,15 +60,25 @@ pub use crate::alloc::{AllocError, Global, GlobalAlloc, Layout, LayoutError}; /// [*currently allocated*]: #currently-allocated-memory #[unstable(feature = "allocator_api", issue = "32838")] pub unsafe trait Allocator: crate::alloc::Allocator { + #[cfg(not(no_global_oom_handling))] #[must_use] // Doesn't actually work - type Result + type Result where - E: Error + IntoLayout; + E: IntoLayout; + #[cfg(not(no_global_oom_handling))] #[must_use] - fn map_result(result: Result) -> Self::Result + fn map_result(result: Result) -> Self::Result where - E: Error + IntoLayout; + E: IntoLayout; + + #[cfg(no_global_oom_handling)] + #[must_use] // Doesn't actually work + type Result; + + #[cfg(no_global_oom_handling)] + #[must_use] + fn map_result(result: Result) -> Self::Result; } #[cfg(not(no_global_oom_handling))] @@ -84,15 +94,15 @@ pub(crate) fn capacity_overflow() -> ! { panic!("capacity overflow"); } +#[cfg(not(no_global_oom_handling))] #[unstable(feature = "allocator_api", issue = "32838")] pub trait IntoLayout { - #[cfg(not(no_global_oom_handling))] fn into_layout(self) -> Layout; } +#[cfg(not(no_global_oom_handling))] #[unstable(feature = "allocator_api", issue = "32838")] impl IntoLayout for TryReserveError { - #[cfg(not(no_global_oom_handling))] fn into_layout(self) -> Layout { match self.kind() { TryReserveErrorKind::CapacityOverflow => capacity_overflow(), @@ -104,14 +114,24 @@ impl IntoLayout for TryReserveError { #[unstable(feature = "allocator_api", issue = "32838")] #[cfg(not(no_global_oom_handling))] unsafe impl Allocator for X { - type Result = T + type Result = T where - E: Error + IntoLayout; + E: IntoLayout; - fn map_result(result: Result) -> Self::Result + fn map_result(result: Result) -> Self::Result where - E: Error + IntoLayout, + E: IntoLayout, { result.unwrap_or_else(|error| handle_alloc_error(error.into_layout())) } } + +#[unstable(feature = "allocator_api", issue = "32838")] +#[cfg(no_global_oom_handling)] +unsafe impl Allocator for X { + type Result = Result; + + fn map_result(result: Result) -> Self::Result { + result + } +} diff --git a/library/alloc/src/vec/mod.rs b/library/alloc/src/vec/mod.rs index 9c8fc123de1c3..29a6b78760b7a 100644 --- a/library/alloc/src/vec/mod.rs +++ b/library/alloc/src/vec/mod.rs @@ -53,7 +53,6 @@ #![stable(feature = "rust1", since = "1.0.0")] -#[cfg(not(no_global_oom_handling))] use core::cmp; use core::cmp::Ordering; use core::fmt; @@ -111,10 +110,8 @@ use self::spec_from_elem::SpecFromElem; mod spec_from_elem; -#[cfg(not(no_global_oom_handling))] use self::set_len_on_drop::SetLenOnDrop; -#[cfg(not(no_global_oom_handling))] mod set_len_on_drop; #[cfg(not(no_global_oom_handling))] From 2b151545ff0e046f8627c13ef9ad7e0aba0b90d7 Mon Sep 17 00:00:00 2001 From: Peter Jaszkowiak Date: Sat, 27 May 2023 13:38:52 -0600 Subject: [PATCH 06/15] fix tests --- tests/ui/hygiene/panic-location.run.stderr | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/ui/hygiene/panic-location.run.stderr b/tests/ui/hygiene/panic-location.run.stderr index c5c4587c93eb1..d1271ffb9a135 100644 --- a/tests/ui/hygiene/panic-location.run.stderr +++ b/tests/ui/hygiene/panic-location.run.stderr @@ -1,2 +1,2 @@ -thread 'main' panicked at 'capacity overflow', library/alloc/src/falloc.rs:84:5 +thread 'main' panicked at 'capacity overflow', library/alloc/src/falloc.rs:94:5 note: run with `RUST_BACKTRACE=1` environment variable to display a backtrace From dfb32b207546471ba4740eda73b53c9ef31a737e Mon Sep 17 00:00:00 2001 From: Peter Jaszkowiak Date: Thu, 1 Jun 2023 23:27:11 -0600 Subject: [PATCH 07/15] replace Allocator trait, add adapters --- compiler/rustc_serialize/src/serialize.rs | 12 +- library/alloc/src/alloc.rs | 31 +- library/alloc/src/boxed.rs | 10 +- library/alloc/src/collections/btree/append.rs | 2 +- library/alloc/src/collections/btree/fix.rs | 2 +- library/alloc/src/collections/btree/remove.rs | 2 +- library/alloc/src/collections/btree/split.rs | 2 +- .../src/collections/vec_deque/into_iter.rs | 11 +- library/alloc/src/falloc.rs | 541 +++++++++++++++++- library/alloc/src/raw_vec.rs | 3 +- library/alloc/src/raw_vec/tests.rs | 2 +- library/alloc/src/slice.rs | 32 +- library/alloc/src/vec/in_place_collect.rs | 2 +- library/alloc/src/vec/into_iter.rs | 9 +- library/alloc/src/vec/mod.rs | 9 +- library/alloc/tests/vec.rs | 14 +- library/std/src/alloc.rs | 11 + 17 files changed, 653 insertions(+), 42 deletions(-) diff --git a/compiler/rustc_serialize/src/serialize.rs b/compiler/rustc_serialize/src/serialize.rs index 06166cabc1872..3945612038832 100644 --- a/compiler/rustc_serialize/src/serialize.rs +++ b/compiler/rustc_serialize/src/serialize.rs @@ -3,6 +3,7 @@ use std::alloc::Allocator; use std::borrow::Cow; use std::cell::{Cell, RefCell}; +use std::collections::TryReserveError; use std::marker::PhantomData; use std::path; use std::rc::Rc; @@ -273,7 +274,11 @@ impl Decodable for PhantomData { } } -impl> Decodable for Box<[T], A> { +impl> Decodable for Box<[T], A> +where + A: Allocator, TryReserveError> = Vec>, + A: Allocator, TryReserveError> = Box<[T], A>>, +{ fn decode(d: &mut D) -> Box<[T], A> { let v: Vec = Decodable::decode(d); v.into_boxed_slice() @@ -308,7 +313,10 @@ impl> Encodable for Vec { } } -impl, A: Allocator + Default> Decodable for Vec { +impl, A: Default> Decodable for Vec +where + A: Allocator, TryReserveError> = Vec>, +{ default fn decode(d: &mut D) -> Vec { let len = d.read_usize(); let allocator = A::default(); diff --git a/library/alloc/src/alloc.rs b/library/alloc/src/alloc.rs index 8c4f6a73d7fea..09b99316f54c7 100644 --- a/library/alloc/src/alloc.rs +++ b/library/alloc/src/alloc.rs @@ -2,6 +2,8 @@ #![stable(feature = "alloc_module", since = "1.28.0")] +#[cfg(not(test))] +use core::error::Error; #[cfg(not(test))] use core::intrinsics; use core::intrinsics::{min_align_of_val, size_of_val}; @@ -10,9 +12,15 @@ use core::ptr::Unique; #[cfg(not(test))] use core::ptr::{self, NonNull}; +#[unstable(feature = "allocator_api", issue = "32838")] +pub use crate::falloc::{Allocator, FallibleAdapter}; +#[unstable(feature = "allocator_api", issue = "32838")] +#[cfg(not(no_global_oom_handling))] +pub use crate::falloc::{InfallibleAdapter, IntoLayout}; #[stable(feature = "alloc_module", since = "1.28.0")] #[doc(inline)] -pub use core::alloc::*; +#[allow(deprecated)] +pub use core::alloc::{AllocError, GlobalAlloc, Layout, LayoutErr, LayoutError}; #[cfg(test)] mod tests; @@ -321,6 +329,27 @@ unsafe impl Allocator for Global { }, } } + + #[cfg(not(no_global_oom_handling))] + type Result = T + where + E: IntoLayout; + + #[cfg(not(no_global_oom_handling))] + fn map_result(result: Result) -> Self::Result + where + E: IntoLayout, + { + result.unwrap_or_else(|e| handle_alloc_error(e.into_layout())) + } + + #[cfg(no_global_oom_handling)] + type Result = Result; + + #[cfg(no_global_oom_handling)] + fn map_result(result: Result) -> Self::Result { + result + } } /// The allocator for unique pointers. diff --git a/library/alloc/src/boxed.rs b/library/alloc/src/boxed.rs index 89ef44a266e16..c150748b26bba 100644 --- a/library/alloc/src/boxed.rs +++ b/library/alloc/src/boxed.rs @@ -2016,7 +2016,15 @@ impl FromIterator for Box<[I]> { #[cfg(not(no_global_oom_handling))] #[stable(feature = "box_slice_clone", since = "1.3.0")] -impl = Self> + Clone> Clone for Box<[T], A> { +impl Clone for Box<[T], A> +where + // Would like to see something like this work eventually, + // using `feature(non_lifetime_binders)` (#108185), but + // for now we'll have to enumerate each case that's needed. + // A: for Allocator = X>, + A: Allocator, TryReserveError> = Vec>, + A: Allocator = Self>, +{ fn clone(&self) -> Self { let alloc = Box::allocator(self).clone(); self.to_vec_in(alloc).into_boxed_slice() diff --git a/library/alloc/src/collections/btree/append.rs b/library/alloc/src/collections/btree/append.rs index b6989afb6255d..b6e29e4f543c5 100644 --- a/library/alloc/src/collections/btree/append.rs +++ b/library/alloc/src/collections/btree/append.rs @@ -1,6 +1,6 @@ use super::merge_iter::MergeIterInner; use super::node::{self, Root}; -use core::alloc::Allocator; +use crate::alloc::Allocator; use core::iter::FusedIterator; impl Root { diff --git a/library/alloc/src/collections/btree/fix.rs b/library/alloc/src/collections/btree/fix.rs index 91b61218005a6..afc3ce93232a2 100644 --- a/library/alloc/src/collections/btree/fix.rs +++ b/library/alloc/src/collections/btree/fix.rs @@ -1,6 +1,6 @@ use super::map::MIN_LEN; use super::node::{marker, ForceResult::*, Handle, LeftOrRight::*, NodeRef, Root}; -use core::alloc::Allocator; +use crate::alloc::Allocator; impl<'a, K: 'a, V: 'a> NodeRef, K, V, marker::LeafOrInternal> { /// Stocks up a possibly underfull node by merging with or stealing from a diff --git a/library/alloc/src/collections/btree/remove.rs b/library/alloc/src/collections/btree/remove.rs index 0904299254f0a..8666287948d6a 100644 --- a/library/alloc/src/collections/btree/remove.rs +++ b/library/alloc/src/collections/btree/remove.rs @@ -1,6 +1,6 @@ use super::map::MIN_LEN; use super::node::{marker, ForceResult::*, Handle, LeftOrRight::*, NodeRef}; -use core::alloc::Allocator; +use crate::alloc::Allocator; impl<'a, K: 'a, V: 'a> Handle, K, V, marker::LeafOrInternal>, marker::KV> { /// Removes a key-value pair from the tree, and returns that pair, as well as diff --git a/library/alloc/src/collections/btree/split.rs b/library/alloc/src/collections/btree/split.rs index 638dc98fc3e41..7747c613c9401 100644 --- a/library/alloc/src/collections/btree/split.rs +++ b/library/alloc/src/collections/btree/split.rs @@ -1,6 +1,6 @@ use super::node::{ForceResult::*, Root}; use super::search::SearchResult::*; -use core::alloc::Allocator; +use crate::alloc::Allocator; use core::borrow::Borrow; impl Root { diff --git a/library/alloc/src/collections/vec_deque/into_iter.rs b/library/alloc/src/collections/vec_deque/into_iter.rs index d9e274df0f5f2..150b116727968 100644 --- a/library/alloc/src/collections/vec_deque/into_iter.rs +++ b/library/alloc/src/collections/vec_deque/into_iter.rs @@ -12,7 +12,6 @@ use super::VecDeque; /// (provided by the [`IntoIterator`] trait). See its documentation for more. /// /// [`into_iter`]: VecDeque::into_iter -#[derive(Clone)] #[stable(feature = "rust1", since = "1.0.0")] pub struct IntoIter< T, @@ -21,6 +20,16 @@ pub struct IntoIter< inner: VecDeque, } +#[stable(feature = "rust1", since = "1.0.0")] +impl Clone for IntoIter +where + VecDeque: Clone, +{ + fn clone(&self) -> Self { + Self { inner: self.inner.clone() } + } +} + impl IntoIter { pub(super) fn new(inner: VecDeque) -> Self { IntoIter { inner } diff --git a/library/alloc/src/falloc.rs b/library/alloc/src/falloc.rs index 7e6ef00c78f72..b400d993e6364 100644 --- a/library/alloc/src/falloc.rs +++ b/library/alloc/src/falloc.rs @@ -1,5 +1,8 @@ use core::error::Error; +use core::ptr::{self, NonNull}; +#[cfg(not(no_global_oom_handling))] +pub use crate::alloc::handle_alloc_error; pub use crate::alloc::{AllocError, Global, GlobalAlloc, Layout, LayoutError}; /// An implementation of `Allocator` can allocate, grow, shrink, and deallocate arbitrary blocks of @@ -26,10 +29,10 @@ pub use crate::alloc::{AllocError, Global, GlobalAlloc, Layout, LayoutError}; /// [`shrink`] that returns `Ok`. If `grow` or `shrink` have returned `Err`, the passed pointer /// remains valid. /// -/// [`allocate`]: crate::alloc::Allocator::allocate -/// [`grow`]: crate::alloc::Allocator::grow -/// [`shrink`]: crate::alloc::Allocator::shrink -/// [`deallocate`]: crate::alloc::Allocator::deallocate +/// [`allocate`]: crate::falloc::Allocator::allocate +/// [`grow`]: crate::falloc::Allocator::grow +/// [`shrink`]: crate::falloc::Allocator::shrink +/// [`deallocate`]: crate::falloc::Allocator::deallocate /// /// ### Memory fitting /// @@ -59,32 +62,380 @@ pub use crate::alloc::{AllocError, Global, GlobalAlloc, Layout, LayoutError}; /// /// [*currently allocated*]: #currently-allocated-memory #[unstable(feature = "allocator_api", issue = "32838")] -pub unsafe trait Allocator: crate::alloc::Allocator { +pub unsafe trait Allocator { + /// Attempts to allocate a block of memory. + /// + /// On success, returns a [`NonNull<[u8]>`][NonNull] meeting the size and alignment guarantees of `layout`. + /// + /// The returned block may have a larger size than specified by `layout.size()`, and may or may + /// not have its contents initialized. + /// + /// # Errors + /// + /// Returning `Err` indicates that either memory is exhausted or `layout` does not meet + /// allocator's size or alignment constraints. + /// + /// Implementations are encouraged to return `Err` on memory exhaustion rather than panicking or + /// aborting, but this is not a strict requirement. (Specifically: it is *legal* to implement + /// this trait atop an underlying native allocation library that aborts on memory exhaustion.) + /// + /// Clients wishing to abort computation in response to an allocation error are encouraged to + /// call the [`handle_alloc_error`] function, rather than directly invoking `panic!` or similar. + /// + /// [`handle_alloc_error`]: ../../alloc/alloc/fn.handle_alloc_error.html + fn allocate(&self, layout: Layout) -> Result, AllocError>; + + /// Behaves like `allocate`, but also ensures that the returned memory is zero-initialized. + /// + /// # Errors + /// + /// Returning `Err` indicates that either memory is exhausted or `layout` does not meet + /// allocator's size or alignment constraints. + /// + /// Implementations are encouraged to return `Err` on memory exhaustion rather than panicking or + /// aborting, but this is not a strict requirement. (Specifically: it is *legal* to implement + /// this trait atop an underlying native allocation library that aborts on memory exhaustion.) + /// + /// Clients wishing to abort computation in response to an allocation error are encouraged to + /// call the [`handle_alloc_error`] function, rather than directly invoking `panic!` or similar. + /// + /// [`handle_alloc_error`]: ../../alloc/alloc/fn.handle_alloc_error.html + fn allocate_zeroed(&self, layout: Layout) -> Result, AllocError> { + let ptr = self.allocate(layout)?; + // SAFETY: `alloc` returns a valid memory block + unsafe { ptr.as_non_null_ptr().as_ptr().write_bytes(0, ptr.len()) } + Ok(ptr) + } + + /// Deallocates the memory referenced by `ptr`. + /// + /// # Safety + /// + /// * `ptr` must denote a block of memory [*currently allocated*] via this allocator, and + /// * `layout` must [*fit*] that block of memory. + /// + /// [*currently allocated*]: #currently-allocated-memory + /// [*fit*]: #memory-fitting + unsafe fn deallocate(&self, ptr: NonNull, layout: Layout); + + /// Attempts to extend the memory block. + /// + /// Returns a new [`NonNull<[u8]>`][NonNull] containing a pointer and the actual size of the allocated + /// memory. The pointer is suitable for holding data described by `new_layout`. To accomplish + /// this, the allocator may extend the allocation referenced by `ptr` to fit the new layout. + /// + /// If this returns `Ok`, then ownership of the memory block referenced by `ptr` has been + /// transferred to this allocator. Any access to the old `ptr` is Undefined Behavior, even if the + /// allocation was grown in-place. The newly returned pointer is the only valid pointer + /// for accessing this memory now. + /// + /// If this method returns `Err`, then ownership of the memory block has not been transferred to + /// this allocator, and the contents of the memory block are unaltered. + /// + /// # Safety + /// + /// * `ptr` must denote a block of memory [*currently allocated*] via this allocator. + /// * `old_layout` must [*fit*] that block of memory (The `new_layout` argument need not fit it.). + /// * `new_layout.size()` must be greater than or equal to `old_layout.size()`. + /// + /// Note that `new_layout.align()` need not be the same as `old_layout.align()`. + /// + /// [*currently allocated*]: #currently-allocated-memory + /// [*fit*]: #memory-fitting + /// + /// # Errors + /// + /// Returns `Err` if the new layout does not meet the allocator's size and alignment + /// constraints of the allocator, or if growing otherwise fails. + /// + /// Implementations are encouraged to return `Err` on memory exhaustion rather than panicking or + /// aborting, but this is not a strict requirement. (Specifically: it is *legal* to implement + /// this trait atop an underlying native allocation library that aborts on memory exhaustion.) + /// + /// Clients wishing to abort computation in response to an allocation error are encouraged to + /// call the [`handle_alloc_error`] function, rather than directly invoking `panic!` or similar. + /// + /// [`handle_alloc_error`]: ../../alloc/alloc/fn.handle_alloc_error.html + unsafe fn grow( + &self, + ptr: NonNull, + old_layout: Layout, + new_layout: Layout, + ) -> Result, AllocError> { + debug_assert!( + new_layout.size() >= old_layout.size(), + "`new_layout.size()` must be greater than or equal to `old_layout.size()`" + ); + + let new_ptr = self.allocate(new_layout)?; + + // SAFETY: because `new_layout.size()` must be greater than or equal to + // `old_layout.size()`, both the old and new memory allocation are valid for reads and + // writes for `old_layout.size()` bytes. Also, because the old allocation wasn't yet + // deallocated, it cannot overlap `new_ptr`. Thus, the call to `copy_nonoverlapping` is + // safe. The safety contract for `dealloc` must be upheld by the caller. + unsafe { + ptr::copy_nonoverlapping(ptr.as_ptr(), new_ptr.as_mut_ptr(), old_layout.size()); + self.deallocate(ptr, old_layout); + } + + Ok(new_ptr) + } + + /// Behaves like `grow`, but also ensures that the new contents are set to zero before being + /// returned. + /// + /// The memory block will contain the following contents after a successful call to + /// `grow_zeroed`: + /// * Bytes `0..old_layout.size()` are preserved from the original allocation. + /// * Bytes `old_layout.size()..old_size` will either be preserved or zeroed, depending on + /// the allocator implementation. `old_size` refers to the size of the memory block prior + /// to the `grow_zeroed` call, which may be larger than the size that was originally + /// requested when it was allocated. + /// * Bytes `old_size..new_size` are zeroed. `new_size` refers to the size of the memory + /// block returned by the `grow_zeroed` call. + /// + /// # Safety + /// + /// * `ptr` must denote a block of memory [*currently allocated*] via this allocator. + /// * `old_layout` must [*fit*] that block of memory (The `new_layout` argument need not fit it.). + /// * `new_layout.size()` must be greater than or equal to `old_layout.size()`. + /// + /// Note that `new_layout.align()` need not be the same as `old_layout.align()`. + /// + /// [*currently allocated*]: #currently-allocated-memory + /// [*fit*]: #memory-fitting + /// + /// # Errors + /// + /// Returns `Err` if the new layout does not meet the allocator's size and alignment + /// constraints of the allocator, or if growing otherwise fails. + /// + /// Implementations are encouraged to return `Err` on memory exhaustion rather than panicking or + /// aborting, but this is not a strict requirement. (Specifically: it is *legal* to implement + /// this trait atop an underlying native allocation library that aborts on memory exhaustion.) + /// + /// Clients wishing to abort computation in response to an allocation error are encouraged to + /// call the [`handle_alloc_error`] function, rather than directly invoking `panic!` or similar. + /// + /// [`handle_alloc_error`]: ../../alloc/alloc/fn.handle_alloc_error.html + unsafe fn grow_zeroed( + &self, + ptr: NonNull, + old_layout: Layout, + new_layout: Layout, + ) -> Result, AllocError> { + debug_assert!( + new_layout.size() >= old_layout.size(), + "`new_layout.size()` must be greater than or equal to `old_layout.size()`" + ); + + let new_ptr = self.allocate_zeroed(new_layout)?; + + // SAFETY: because `new_layout.size()` must be greater than or equal to + // `old_layout.size()`, both the old and new memory allocation are valid for reads and + // writes for `old_layout.size()` bytes. Also, because the old allocation wasn't yet + // deallocated, it cannot overlap `new_ptr`. Thus, the call to `copy_nonoverlapping` is + // safe. The safety contract for `dealloc` must be upheld by the caller. + unsafe { + ptr::copy_nonoverlapping(ptr.as_ptr(), new_ptr.as_mut_ptr(), old_layout.size()); + self.deallocate(ptr, old_layout); + } + + Ok(new_ptr) + } + + /// Attempts to shrink the memory block. + /// + /// Returns a new [`NonNull<[u8]>`][NonNull] containing a pointer and the actual size of the allocated + /// memory. The pointer is suitable for holding data described by `new_layout`. To accomplish + /// this, the allocator may shrink the allocation referenced by `ptr` to fit the new layout. + /// + /// If this returns `Ok`, then ownership of the memory block referenced by `ptr` has been + /// transferred to this allocator. Any access to the old `ptr` is Undefined Behavior, even if the + /// allocation was shrunk in-place. The newly returned pointer is the only valid pointer + /// for accessing this memory now. + /// + /// If this method returns `Err`, then ownership of the memory block has not been transferred to + /// this allocator, and the contents of the memory block are unaltered. + /// + /// # Safety + /// + /// * `ptr` must denote a block of memory [*currently allocated*] via this allocator. + /// * `old_layout` must [*fit*] that block of memory (The `new_layout` argument need not fit it.). + /// * `new_layout.size()` must be smaller than or equal to `old_layout.size()`. + /// + /// Note that `new_layout.align()` need not be the same as `old_layout.align()`. + /// + /// [*currently allocated*]: #currently-allocated-memory + /// [*fit*]: #memory-fitting + /// + /// # Errors + /// + /// Returns `Err` if the new layout does not meet the allocator's size and alignment + /// constraints of the allocator, or if shrinking otherwise fails. + /// + /// Implementations are encouraged to return `Err` on memory exhaustion rather than panicking or + /// aborting, but this is not a strict requirement. (Specifically: it is *legal* to implement + /// this trait atop an underlying native allocation library that aborts on memory exhaustion.) + /// + /// Clients wishing to abort computation in response to an allocation error are encouraged to + /// call the [`handle_alloc_error`] function, rather than directly invoking `panic!` or similar. + /// + /// [`handle_alloc_error`]: ../../alloc/alloc/fn.handle_alloc_error.html + unsafe fn shrink( + &self, + ptr: NonNull, + old_layout: Layout, + new_layout: Layout, + ) -> Result, AllocError> { + debug_assert!( + new_layout.size() <= old_layout.size(), + "`new_layout.size()` must be smaller than or equal to `old_layout.size()`" + ); + + let new_ptr = self.allocate(new_layout)?; + + // SAFETY: because `new_layout.size()` must be lower than or equal to + // `old_layout.size()`, both the old and new memory allocation are valid for reads and + // writes for `new_layout.size()` bytes. Also, because the old allocation wasn't yet + // deallocated, it cannot overlap `new_ptr`. Thus, the call to `copy_nonoverlapping` is + // safe. The safety contract for `dealloc` must be upheld by the caller. + unsafe { + ptr::copy_nonoverlapping(ptr.as_ptr(), new_ptr.as_mut_ptr(), new_layout.size()); + self.deallocate(ptr, old_layout); + } + + Ok(new_ptr) + } + + /// Creates a "by reference" adapter for this instance of `Allocator`. + /// + /// The returned adapter also implements `Allocator` and will simply borrow this. + #[inline(always)] + fn by_ref(&self) -> &Self + where + Self: Sized, + { + self + } + + /// Result type returned by functions that are conditionally fallible. + /// + /// - "Infallible" allocators set `type Result = T` + /// - "Fallible" allocators set `type Result = Result` #[cfg(not(no_global_oom_handling))] #[must_use] // Doesn't actually work type Result where E: IntoLayout; + /// Function to map allocation results into `Self::Result`. + /// + /// - For "Infallible" allocators, this should call [`handle_alloc_error`] + /// - For "Fallible" allocators, this is just the identity function #[cfg(not(no_global_oom_handling))] #[must_use] fn map_result(result: Result) -> Self::Result where E: IntoLayout; + /// Result type returned by functions that are conditionally fallible. + /// + /// - "Infallible" allocators set `type Result = T` + /// - "Fallible" allocators set `type Result = Result` #[cfg(no_global_oom_handling)] #[must_use] // Doesn't actually work type Result; + /// Function to map allocation results into `Self::Result`. + /// + /// - For "Infallible" allocators, this should call [`handle_alloc_error`] + /// - For "Fallible" allocators, this is just the identity function #[cfg(no_global_oom_handling)] #[must_use] fn map_result(result: Result) -> Self::Result; } +#[unstable(feature = "allocator_api", issue = "32838")] +unsafe impl Allocator for &A +where + A: Allocator + ?Sized, +{ + #[inline] + fn allocate(&self, layout: Layout) -> Result, AllocError> { + (**self).allocate(layout) + } + + #[inline] + fn allocate_zeroed(&self, layout: Layout) -> Result, AllocError> { + (**self).allocate_zeroed(layout) + } + + #[inline] + unsafe fn deallocate(&self, ptr: NonNull, layout: Layout) { + // SAFETY: the safety contract must be upheld by the caller + unsafe { (**self).deallocate(ptr, layout) } + } + + #[inline] + unsafe fn grow( + &self, + ptr: NonNull, + old_layout: Layout, + new_layout: Layout, + ) -> Result, AllocError> { + // SAFETY: the safety contract must be upheld by the caller + unsafe { (**self).grow(ptr, old_layout, new_layout) } + } + + #[inline] + unsafe fn grow_zeroed( + &self, + ptr: NonNull, + old_layout: Layout, + new_layout: Layout, + ) -> Result, AllocError> { + // SAFETY: the safety contract must be upheld by the caller + unsafe { (**self).grow_zeroed(ptr, old_layout, new_layout) } + } + + #[inline] + unsafe fn shrink( + &self, + ptr: NonNull, + old_layout: Layout, + new_layout: Layout, + ) -> Result, AllocError> { + // SAFETY: the safety contract must be upheld by the caller + unsafe { (**self).shrink(ptr, old_layout, new_layout) } + } + + #[cfg(not(no_global_oom_handling))] + type Result = A::Result + where + E: IntoLayout; + + #[cfg(not(no_global_oom_handling))] + fn map_result(result: Result) -> Self::Result + where + E: IntoLayout, + { + A::map_result(result) + } + + #[cfg(no_global_oom_handling)] + type Result = A::Result; + + #[cfg(no_global_oom_handling)] + fn map_result(result: Result) -> Self::Result { + A::map_result(result) + } +} + +use crate::collections::TryReserveError; #[cfg(not(no_global_oom_handling))] -use crate::alloc::handle_alloc_error; -#[cfg(not(no_global_oom_handling))] -use crate::collections::{TryReserveError, TryReserveErrorKind}; +use crate::collections::TryReserveErrorKind; // One central function responsible for reporting capacity overflows. This'll // ensure that the code generation related to these panics is minimal as there's @@ -94,9 +445,12 @@ pub(crate) fn capacity_overflow() -> ! { panic!("capacity overflow"); } +/// Trait for converting an error into a `Layout` struct, +/// used for passing the layout to [`handle_alloc_error`]. #[cfg(not(no_global_oom_handling))] #[unstable(feature = "allocator_api", issue = "32838")] pub trait IntoLayout { + /// Convert into a `Layout` struct. fn into_layout(self) -> Layout; } @@ -111,9 +465,74 @@ impl IntoLayout for TryReserveError { } } +/// Wrapper around an existing allocator allowing one to +/// use a fallible allocator as an infallible one. +#[cfg(not(no_global_oom_handling))] #[unstable(feature = "allocator_api", issue = "32838")] +#[derive(Debug)] +pub struct InfallibleAdapter(A); + +#[cfg(not(no_global_oom_handling))] +impl InfallibleAdapter { + /// Unwrap the adapter, returning the original allocator. + #[unstable(feature = "allocator_api", issue = "32838")] + pub fn into_inner(self) -> A { + self.0 + } +} + #[cfg(not(no_global_oom_handling))] -unsafe impl Allocator for X { +#[unstable(feature = "allocator_api", issue = "32838")] +unsafe impl Allocator for InfallibleAdapter +where + A: Allocator = Result<(), TryReserveError>>, +{ + fn allocate(&self, layout: Layout) -> Result, AllocError> { + self.0.allocate(layout) + } + + unsafe fn deallocate(&self, ptr: core::ptr::NonNull, layout: Layout) { + unsafe { self.0.deallocate(ptr, layout) } + } + + fn allocate_zeroed(&self, layout: Layout) -> Result, AllocError> { + self.0.allocate_zeroed(layout) + } + + unsafe fn grow( + &self, + ptr: core::ptr::NonNull, + old_layout: Layout, + new_layout: Layout, + ) -> Result, AllocError> { + unsafe { self.0.grow(ptr, old_layout, new_layout) } + } + + unsafe fn grow_zeroed( + &self, + ptr: core::ptr::NonNull, + old_layout: Layout, + new_layout: Layout, + ) -> Result, AllocError> { + unsafe { self.0.grow_zeroed(ptr, old_layout, new_layout) } + } + + unsafe fn shrink( + &self, + ptr: core::ptr::NonNull, + old_layout: Layout, + new_layout: Layout, + ) -> Result, AllocError> { + unsafe { self.0.shrink(ptr, old_layout, new_layout) } + } + + fn by_ref(&self) -> &Self + where + Self: Sized, + { + self + } + type Result = T where E: IntoLayout; @@ -122,16 +541,114 @@ unsafe impl Allocator for X { where E: IntoLayout, { - result.unwrap_or_else(|error| handle_alloc_error(error.into_layout())) + result.unwrap_or_else(|e| handle_alloc_error(e.into_layout())) + } +} + +#[cfg(not(no_global_oom_handling))] +#[unstable(feature = "allocator_api", issue = "32838")] +impl From for InfallibleAdapter +where + A: Allocator = Result<(), TryReserveError>>, +{ + fn from(value: A) -> Self { + InfallibleAdapter(value) } } +/// Wrapper around an existing allocator allowing one to +/// use an infallible allocator as a fallible one. #[unstable(feature = "allocator_api", issue = "32838")] -#[cfg(no_global_oom_handling)] -unsafe impl Allocator for X { +#[derive(Debug)] +pub struct FallibleAdapter(A); + +impl FallibleAdapter { + /// Unwrap the adapter, returning the original allocator. + #[unstable(feature = "allocator_api", issue = "32838")] + pub fn into_inner(self) -> A { + self.0 + } +} + +#[unstable(feature = "allocator_api", issue = "32838")] +unsafe impl Allocator for FallibleAdapter +where + A: Allocator = ()>, +{ + fn allocate(&self, layout: Layout) -> Result, AllocError> { + self.0.allocate(layout) + } + + unsafe fn deallocate(&self, ptr: core::ptr::NonNull, layout: Layout) { + unsafe { self.0.deallocate(ptr, layout) } + } + + fn allocate_zeroed(&self, layout: Layout) -> Result, AllocError> { + self.0.allocate_zeroed(layout) + } + + unsafe fn grow( + &self, + ptr: core::ptr::NonNull, + old_layout: Layout, + new_layout: Layout, + ) -> Result, AllocError> { + unsafe { self.0.grow(ptr, old_layout, new_layout) } + } + + unsafe fn grow_zeroed( + &self, + ptr: core::ptr::NonNull, + old_layout: Layout, + new_layout: Layout, + ) -> Result, AllocError> { + unsafe { self.0.grow_zeroed(ptr, old_layout, new_layout) } + } + + unsafe fn shrink( + &self, + ptr: core::ptr::NonNull, + old_layout: Layout, + new_layout: Layout, + ) -> Result, AllocError> { + unsafe { self.0.shrink(ptr, old_layout, new_layout) } + } + + fn by_ref(&self) -> &Self + where + Self: Sized, + { + self + } + + #[cfg(not(no_global_oom_handling))] + type Result = Result + where + E: IntoLayout; + + #[cfg(not(no_global_oom_handling))] + fn map_result(result: Result) -> Self::Result + where + E: IntoLayout, + { + result + } + + #[cfg(no_global_oom_handling)] type Result = Result; + #[cfg(no_global_oom_handling)] fn map_result(result: Result) -> Self::Result { result } } + +#[unstable(feature = "allocator_api", issue = "32838")] +impl From for FallibleAdapter +where + A: Allocator = ()>, +{ + fn from(value: A) -> Self { + FallibleAdapter(value) + } +} diff --git a/library/alloc/src/raw_vec.rs b/library/alloc/src/raw_vec.rs index 6dd274f374d3f..6b5af1963909f 100644 --- a/library/alloc/src/raw_vec.rs +++ b/library/alloc/src/raw_vec.rs @@ -1,13 +1,12 @@ #![unstable(feature = "raw_vec_internals", reason = "unstable const warnings", issue = "none")] -use core::alloc::LayoutError; use core::cmp; use core::intrinsics; use core::mem::{self, ManuallyDrop, MaybeUninit, SizedTypeProperties}; use core::ptr::{self, NonNull, Unique}; use core::slice; -use crate::alloc::{Allocator, Global, Layout}; +use crate::alloc::{Allocator, Global, Layout, LayoutError}; use crate::boxed::Box; use crate::collections::TryReserveError; use crate::collections::TryReserveErrorKind::*; diff --git a/library/alloc/src/raw_vec/tests.rs b/library/alloc/src/raw_vec/tests.rs index 4b87b65eed57d..bc78afc123fea 100644 --- a/library/alloc/src/raw_vec/tests.rs +++ b/library/alloc/src/raw_vec/tests.rs @@ -20,7 +20,7 @@ fn allocator_param() -> Result<(), TryReserveError> { struct BoundedAlloc { fuel: Cell, } - unsafe impl core::alloc::Allocator for BoundedAlloc { + unsafe impl crate::alloc::Allocator for BoundedAlloc { fn allocate(&self, layout: Layout) -> Result, AllocError> { let size = layout.size(); if size > self.fuel.get() { diff --git a/library/alloc/src/slice.rs b/library/alloc/src/slice.rs index 093dcbbe8bf77..db325aeb50217 100644 --- a/library/alloc/src/slice.rs +++ b/library/alloc/src/slice.rs @@ -28,6 +28,8 @@ use crate::alloc::{self, Global}; #[cfg(not(no_global_oom_handling))] use crate::borrow::ToOwned; use crate::boxed::Box; +#[cfg(not(no_global_oom_handling))] +use crate::collections::TryReserveError; use crate::vec::Vec; #[cfg(test)] @@ -89,9 +91,11 @@ pub use hack::to_vec; // `core::slice::SliceExt` - we need to supply these functions for the // `test_permutations` test pub(crate) mod hack { - use core::alloc::Allocator; + use crate::alloc::Allocator; use crate::boxed::Box; + #[cfg(not(no_global_oom_handling))] + use crate::collections::TryReserveError; use crate::vec::Vec; // We shouldn't add inline attribute to this since this is used in @@ -107,13 +111,16 @@ pub(crate) mod hack { #[cfg(not(no_global_oom_handling))] #[inline] - pub fn to_vec(s: &[T], alloc: A) -> Vec { + pub fn to_vec( + s: &[T], + alloc: A, + ) -> Result, TryReserveError> { T::to_vec(s, alloc) } #[cfg(not(no_global_oom_handling))] pub trait ConvertVec { - fn to_vec(s: &[Self], alloc: A) -> Vec + fn to_vec(s: &[Self], alloc: A) -> Result, TryReserveError> where Self: Sized; } @@ -121,7 +128,10 @@ pub(crate) mod hack { #[cfg(not(no_global_oom_handling))] impl ConvertVec for T { #[inline] - default fn to_vec(s: &[Self], alloc: A) -> Vec { + default fn to_vec( + s: &[Self], + alloc: A, + ) -> Result, TryReserveError> { struct DropGuard<'a, T, A: Allocator> { vec: &'a mut Vec, num_init: usize, @@ -136,7 +146,7 @@ pub(crate) mod hack { } } } - let mut vec = Vec::with_capacity_in(s.len(), alloc); + let mut vec = Vec::try_with_capacity_in(s.len(), alloc)?; let mut guard = DropGuard { vec: &mut vec, num_init: 0 }; let slots = guard.vec.spare_capacity_mut(); // .take(slots.len()) is necessary for LLVM to remove bounds checks @@ -151,15 +161,15 @@ pub(crate) mod hack { unsafe { vec.set_len(s.len()); } - vec + Ok(vec) } } #[cfg(not(no_global_oom_handling))] impl ConvertVec for T { #[inline] - fn to_vec(s: &[Self], alloc: A) -> Vec { - let mut v = Vec::with_capacity_in(s.len(), alloc); + fn to_vec(s: &[Self], alloc: A) -> Result, TryReserveError> { + let mut v = Vec::try_with_capacity_in(s.len(), alloc)?; // SAFETY: // allocated above with the capacity of `s`, and initialize to `s.len()` in // ptr::copy_to_non_overlapping below. @@ -167,7 +177,7 @@ pub(crate) mod hack { s.as_ptr().copy_to_nonoverlapping(v.as_mut_ptr(), s.len()); v.set_len(s.len()); } - v + Ok(v) } } } @@ -433,12 +443,12 @@ impl [T] { #[rustc_allow_incoherent_impl] #[inline] #[unstable(feature = "allocator_api", issue = "32838")] - pub fn to_vec_in(&self, alloc: A) -> Vec + pub fn to_vec_in(&self, alloc: A) -> A::Result, TryReserveError> where T: Clone, { // N.B., see the `hack` module in this file for more details. - hack::to_vec(self, alloc) + A::map_result(hack::to_vec(self, alloc)) } /// Converts `self` into a vector without clones or allocation. diff --git a/library/alloc/src/vec/in_place_collect.rs b/library/alloc/src/vec/in_place_collect.rs index 5ecd0479971ea..86b8a32f94416 100644 --- a/library/alloc/src/vec/in_place_collect.rs +++ b/library/alloc/src/vec/in_place_collect.rs @@ -43,7 +43,7 @@ //! Additionally this specialization doesn't make sense for ZSTs as there is no reallocation to //! avoid and it would make pointer arithmetic more difficult. //! -//! [`Allocator`]: core::alloc::Allocator +//! [`Allocator`]: crate::alloc::Allocator //! //! # Drop- and panic-safety //! diff --git a/library/alloc/src/vec/into_iter.rs b/library/alloc/src/vec/into_iter.rs index b2db2fdfd18f1..bf5c9e9434680 100644 --- a/library/alloc/src/vec/into_iter.rs +++ b/library/alloc/src/vec/into_iter.rs @@ -1,7 +1,11 @@ #[cfg(not(no_global_oom_handling))] use super::AsVecIntoIter; +#[cfg(not(no_global_oom_handling))] +use super::Vec; use crate::alloc::{Allocator, Global}; #[cfg(not(no_global_oom_handling))] +use crate::collections::TryReserveError; +#[cfg(not(no_global_oom_handling))] use crate::collections::VecDeque; use crate::raw_vec::RawVec; use core::array; @@ -383,7 +387,10 @@ where #[cfg(not(no_global_oom_handling))] #[stable(feature = "vec_into_iter_clone", since = "1.8.0")] -impl Clone for IntoIter { +impl Clone for IntoIter +where + A: Allocator, TryReserveError> = Vec>, +{ #[cfg(not(test))] fn clone(&self) -> Self { self.as_slice().to_vec_in(self.alloc.deref().clone()).into_iter() diff --git a/library/alloc/src/vec/mod.rs b/library/alloc/src/vec/mod.rs index 29a6b78760b7a..6dd0f785c15e9 100644 --- a/library/alloc/src/vec/mod.rs +++ b/library/alloc/src/vec/mod.rs @@ -1732,7 +1732,7 @@ impl Vec { } /* INVARIANT: vec.len() > read >= write > write-1 >= 0 */ - struct FillGapOnDrop<'a, T, A: core::alloc::Allocator> { + struct FillGapOnDrop<'a, T, A: crate::alloc::Allocator> { /* Offset of the element we want to check if it is duplicate */ read: usize, @@ -1744,7 +1744,7 @@ impl Vec { vec: &'a mut Vec, } - impl<'a, T, A: core::alloc::Allocator> Drop for FillGapOnDrop<'a, T, A> { + impl<'a, T, A: crate::alloc::Allocator> Drop for FillGapOnDrop<'a, T, A> { fn drop(&mut self) { /* This code gets executed when `same_bucket` panics */ @@ -2663,7 +2663,10 @@ impl ops::DerefMut for Vec { #[cfg(not(no_global_oom_handling))] #[stable(feature = "rust1", since = "1.0.0")] -impl Clone for Vec { +impl Clone for Vec +where + A: Allocator = Self>, +{ #[cfg(not(test))] fn clone(&self) -> Self { let alloc = self.allocator().clone(); diff --git a/library/alloc/tests/vec.rs b/library/alloc/tests/vec.rs index cc4c1f1272865..116a89b278482 100644 --- a/library/alloc/tests/vec.rs +++ b/library/alloc/tests/vec.rs @@ -1,9 +1,8 @@ -use core::alloc::{Allocator, Layout}; use core::assert_eq; use core::iter::IntoIterator; use core::num::NonZeroUsize; use core::ptr::NonNull; -use std::alloc::System; +use std::alloc::{Allocator, Layout, System, FallibleAdapter, IntoLayout, handle_alloc_error}; use std::assert_matches::assert_matches; use std::borrow::Cow; use std::cell::Cell; @@ -1096,6 +1095,17 @@ fn test_into_iter_drop_allocator() { // Safety: Invariants passed to caller. unsafe { System.deallocate(ptr, layout) } } + + type Result = T + where + E: IntoLayout; + + fn map_result(result: Result) -> Self::Result + where + E: IntoLayout + { + result.unwrap_or_else(|e| handle_alloc_error(e.into_layout())) + } } let mut drop_count = 0; diff --git a/library/std/src/alloc.rs b/library/std/src/alloc.rs index ec774e62debbf..1099805d15064 100644 --- a/library/std/src/alloc.rs +++ b/library/std/src/alloc.rs @@ -284,6 +284,17 @@ unsafe impl Allocator for System { }, } } + + type Result = T + where + E: IntoLayout; + + fn map_result(result: Result) -> Self::Result + where + E: IntoLayout, + { + result.unwrap_or_else(|e| handle_alloc_error(e.into_layout())) + } } static HOOK: AtomicPtr<()> = AtomicPtr::new(ptr::null_mut()); From adc0771a8c328814d9454850a0c868954d6faf22 Mon Sep 17 00:00:00 2001 From: Peter Jaszkowiak Date: Thu, 1 Jun 2023 23:53:54 -0600 Subject: [PATCH 08/15] refactor IntoLayout to HandleAllocError --- library/alloc/src/alloc.rs | 8 ++++---- library/alloc/src/falloc.rs | 34 +++++++++++++++++----------------- library/std/src/alloc.rs | 6 +++--- 3 files changed, 24 insertions(+), 24 deletions(-) diff --git a/library/alloc/src/alloc.rs b/library/alloc/src/alloc.rs index 09b99316f54c7..76da3f4b9624e 100644 --- a/library/alloc/src/alloc.rs +++ b/library/alloc/src/alloc.rs @@ -16,7 +16,7 @@ use core::ptr::{self, NonNull}; pub use crate::falloc::{Allocator, FallibleAdapter}; #[unstable(feature = "allocator_api", issue = "32838")] #[cfg(not(no_global_oom_handling))] -pub use crate::falloc::{InfallibleAdapter, IntoLayout}; +pub use crate::falloc::{HandleAllocError, InfallibleAdapter}; #[stable(feature = "alloc_module", since = "1.28.0")] #[doc(inline)] #[allow(deprecated)] @@ -333,14 +333,14 @@ unsafe impl Allocator for Global { #[cfg(not(no_global_oom_handling))] type Result = T where - E: IntoLayout; + E: HandleAllocError; #[cfg(not(no_global_oom_handling))] fn map_result(result: Result) -> Self::Result where - E: IntoLayout, + E: HandleAllocError, { - result.unwrap_or_else(|e| handle_alloc_error(e.into_layout())) + result.unwrap_or_else(|e| e.handle_alloc_error()) } #[cfg(no_global_oom_handling)] diff --git a/library/alloc/src/falloc.rs b/library/alloc/src/falloc.rs index b400d993e6364..df609ca370a8d 100644 --- a/library/alloc/src/falloc.rs +++ b/library/alloc/src/falloc.rs @@ -328,7 +328,7 @@ pub unsafe trait Allocator { #[must_use] // Doesn't actually work type Result where - E: IntoLayout; + E: HandleAllocError; /// Function to map allocation results into `Self::Result`. /// @@ -338,7 +338,7 @@ pub unsafe trait Allocator { #[must_use] fn map_result(result: Result) -> Self::Result where - E: IntoLayout; + E: HandleAllocError; /// Result type returned by functions that are conditionally fallible. /// @@ -414,12 +414,12 @@ where #[cfg(not(no_global_oom_handling))] type Result = A::Result where - E: IntoLayout; + E: HandleAllocError; #[cfg(not(no_global_oom_handling))] fn map_result(result: Result) -> Self::Result where - E: IntoLayout, + E: HandleAllocError, { A::map_result(result) } @@ -445,22 +445,22 @@ pub(crate) fn capacity_overflow() -> ! { panic!("capacity overflow"); } -/// Trait for converting an error into a `Layout` struct, -/// used for passing the layout to [`handle_alloc_error`]. +/// Trait for handling alloc errors for allocators which +/// panic or abort instead of returning errors. #[cfg(not(no_global_oom_handling))] #[unstable(feature = "allocator_api", issue = "32838")] -pub trait IntoLayout { - /// Convert into a `Layout` struct. - fn into_layout(self) -> Layout; +pub trait HandleAllocError { + /// Globally handle this allocation error, using [`handle_alloc_error`] + fn handle_alloc_error(self) -> !; } #[cfg(not(no_global_oom_handling))] #[unstable(feature = "allocator_api", issue = "32838")] -impl IntoLayout for TryReserveError { - fn into_layout(self) -> Layout { +impl HandleAllocError for TryReserveError { + fn handle_alloc_error(self) -> ! { match self.kind() { TryReserveErrorKind::CapacityOverflow => capacity_overflow(), - TryReserveErrorKind::AllocError { layout, .. } => layout, + TryReserveErrorKind::AllocError { layout, .. } => handle_alloc_error(layout), } } } @@ -535,13 +535,13 @@ where type Result = T where - E: IntoLayout; + E: HandleAllocError; fn map_result(result: Result) -> Self::Result where - E: IntoLayout, + E: HandleAllocError, { - result.unwrap_or_else(|e| handle_alloc_error(e.into_layout())) + result.unwrap_or_else(|e| e.handle_alloc_error()) } } @@ -624,12 +624,12 @@ where #[cfg(not(no_global_oom_handling))] type Result = Result where - E: IntoLayout; + E: HandleAllocError; #[cfg(not(no_global_oom_handling))] fn map_result(result: Result) -> Self::Result where - E: IntoLayout, + E: HandleAllocError, { result } diff --git a/library/std/src/alloc.rs b/library/std/src/alloc.rs index 1099805d15064..c9d07f927ca16 100644 --- a/library/std/src/alloc.rs +++ b/library/std/src/alloc.rs @@ -287,13 +287,13 @@ unsafe impl Allocator for System { type Result = T where - E: IntoLayout; + E: HandleAllocError; fn map_result(result: Result) -> Self::Result where - E: IntoLayout, + E: HandleAllocError, { - result.unwrap_or_else(|e| handle_alloc_error(e.into_layout())) + result.unwrap_or_else(|e| e.handle_alloc_error()) } } From 4b5eb49918058f576749e2f0fc79038fc1d87a73 Mon Sep 17 00:00:00 2001 From: Peter Jaszkowiak Date: Mon, 5 Jun 2023 16:56:48 -0600 Subject: [PATCH 09/15] refactor for object safety uses separate trait so there's no generic associated types --- compiler/rustc_serialize/src/serialize.rs | 8 +- library/alloc/src/alloc.rs | 26 +-- library/alloc/src/boxed.rs | 37 ++-- .../alloc/src/collections/vec_deque/mod.rs | 36 ++-- library/alloc/src/falloc.rs | 182 ++++++++---------- library/alloc/src/slice.rs | 6 +- library/alloc/src/vec/into_iter.rs | 8 +- library/alloc/src/vec/mod.rs | 99 +++++----- library/alloc/src/vec/spec_from_iter.rs | 4 +- .../alloc/src/vec/spec_from_iter_nested.rs | 11 +- library/alloc/src/vec/splice.rs | 22 +-- library/std/src/alloc.rs | 11 +- 12 files changed, 206 insertions(+), 244 deletions(-) diff --git a/compiler/rustc_serialize/src/serialize.rs b/compiler/rustc_serialize/src/serialize.rs index 3945612038832..7c50d9391431c 100644 --- a/compiler/rustc_serialize/src/serialize.rs +++ b/compiler/rustc_serialize/src/serialize.rs @@ -1,9 +1,8 @@ //! Support code for encoding and decoding types. -use std::alloc::Allocator; +use std::alloc::{Allocator, Fatal}; use std::borrow::Cow; use std::cell::{Cell, RefCell}; -use std::collections::TryReserveError; use std::marker::PhantomData; use std::path; use std::rc::Rc; @@ -276,8 +275,7 @@ impl Decodable for PhantomData { impl> Decodable for Box<[T], A> where - A: Allocator, TryReserveError> = Vec>, - A: Allocator, TryReserveError> = Box<[T], A>>, + A: Allocator, { fn decode(d: &mut D) -> Box<[T], A> { let v: Vec = Decodable::decode(d); @@ -315,7 +313,7 @@ impl> Encodable for Vec { impl, A: Default> Decodable for Vec where - A: Allocator, TryReserveError> = Vec>, + A: Allocator, { default fn decode(d: &mut D) -> Vec { let len = d.read_usize(); diff --git a/library/alloc/src/alloc.rs b/library/alloc/src/alloc.rs index 76da3f4b9624e..fd6b0d56d02ab 100644 --- a/library/alloc/src/alloc.rs +++ b/library/alloc/src/alloc.rs @@ -2,8 +2,6 @@ #![stable(feature = "alloc_module", since = "1.28.0")] -#[cfg(not(test))] -use core::error::Error; #[cfg(not(test))] use core::intrinsics; use core::intrinsics::{min_align_of_val, size_of_val}; @@ -13,10 +11,10 @@ use core::ptr::Unique; use core::ptr::{self, NonNull}; #[unstable(feature = "allocator_api", issue = "32838")] -pub use crate::falloc::{Allocator, FallibleAdapter}; +pub use crate::falloc::{Allocator, Fallible}; #[unstable(feature = "allocator_api", issue = "32838")] #[cfg(not(no_global_oom_handling))] -pub use crate::falloc::{HandleAllocError, InfallibleAdapter}; +pub use crate::falloc::{FallibleAdapter, Fatal, HandleAllocError, InfallibleAdapter}; #[stable(feature = "alloc_module", since = "1.28.0")] #[doc(inline)] #[allow(deprecated)] @@ -331,25 +329,9 @@ unsafe impl Allocator for Global { } #[cfg(not(no_global_oom_handling))] - type Result = T - where - E: HandleAllocError; - - #[cfg(not(no_global_oom_handling))] - fn map_result(result: Result) -> Self::Result - where - E: HandleAllocError, - { - result.unwrap_or_else(|e| e.handle_alloc_error()) - } - - #[cfg(no_global_oom_handling)] - type Result = Result; - + type ErrorHandling = Fatal; #[cfg(no_global_oom_handling)] - fn map_result(result: Result) -> Self::Result { - result - } + type ErrorHandling = Fallible; } /// The allocator for unique pointers. diff --git a/library/alloc/src/boxed.rs b/library/alloc/src/boxed.rs index c150748b26bba..9b1c8a1b40d2f 100644 --- a/library/alloc/src/boxed.rs +++ b/library/alloc/src/boxed.rs @@ -170,7 +170,9 @@ use crate::alloc::{handle_alloc_error, WriteCloneIntoRaw}; #[cfg(not(no_global_oom_handling))] use crate::borrow::Cow; use crate::collections::TryReserveError; -use crate::falloc::{AllocError, Allocator, Global, Layout}; +#[cfg(not(no_global_oom_handling))] +use crate::falloc::Fatal; +use crate::falloc::{AllocError, AllocResult, Allocator, ErrorHandling, Global, Layout}; use crate::raw_vec::RawVec; #[cfg(not(no_global_oom_handling))] use crate::str::from_boxed_utf8_unchecked; @@ -626,7 +628,8 @@ impl Box<[T]> { #[must_use] pub fn new_uninit_slice(len: usize) -> Box<[mem::MaybeUninit]> { unsafe { - ::map_result(RawVec::with_capacity_in(len, Global)).into_box(len) + ::ErrorHandling::map_result(RawVec::with_capacity_in(len, Global)) + .into_box(len) } } @@ -653,8 +656,10 @@ impl Box<[T]> { #[must_use] pub fn new_zeroed_slice(len: usize) -> Box<[mem::MaybeUninit]> { unsafe { - ::map_result(RawVec::with_capacity_zeroed_in(len, Global)) - .into_box(len) + ::ErrorHandling::map_result(RawVec::with_capacity_zeroed_in( + len, Global, + )) + .into_box(len) } } @@ -741,8 +746,12 @@ impl Box<[T], A> { pub fn new_uninit_slice_in( len: usize, alloc: A, - ) -> A::Result], A>, TryReserveError> { - unsafe { A::map_result(RawVec::with_capacity_in(len, alloc).map(|r| r.into_box(len))) } + ) -> AllocResult], A>, TryReserveError> { + unsafe { + A::ErrorHandling::map_result( + RawVec::with_capacity_in(len, alloc).map(|r| r.into_box(len)), + ) + } } /// Constructs a new boxed slice with uninitialized contents in the provided allocator, @@ -771,9 +780,11 @@ impl Box<[T], A> { pub fn new_zeroed_slice_in( len: usize, alloc: A, - ) -> A::Result], A>, TryReserveError> { + ) -> AllocResult], A>, TryReserveError> { unsafe { - A::map_result(RawVec::with_capacity_zeroed_in(len, alloc).map(|r| r.into_box(len))) + A::ErrorHandling::map_result( + RawVec::with_capacity_zeroed_in(len, alloc).map(|r| r.into_box(len)), + ) } } } @@ -1474,7 +1485,8 @@ impl BoxFromSlice for Box<[T]> { #[inline] fn from_slice(slice: &[T]) -> Self { let len = slice.len(); - let buf = ::map_result(RawVec::with_capacity_in(len, Global)); + let buf = + ::ErrorHandling::map_result(RawVec::with_capacity_in(len, Global)); unsafe { ptr::copy_nonoverlapping(slice.as_ptr(), buf.ptr(), len); buf.into_box(slice.len()).assume_init() @@ -2018,12 +2030,7 @@ impl FromIterator for Box<[I]> { #[stable(feature = "box_slice_clone", since = "1.3.0")] impl Clone for Box<[T], A> where - // Would like to see something like this work eventually, - // using `feature(non_lifetime_binders)` (#108185), but - // for now we'll have to enumerate each case that's needed. - // A: for Allocator = X>, - A: Allocator, TryReserveError> = Vec>, - A: Allocator = Self>, + A: Allocator, { fn clone(&self) -> Self { let alloc = Box::allocator(self).clone(); diff --git a/library/alloc/src/collections/vec_deque/mod.rs b/library/alloc/src/collections/vec_deque/mod.rs index 3152c5fde7d01..40b502a1d63fb 100644 --- a/library/alloc/src/collections/vec_deque/mod.rs +++ b/library/alloc/src/collections/vec_deque/mod.rs @@ -24,7 +24,7 @@ use core::mem; use crate::collections::TryReserveError; use crate::collections::TryReserveErrorKind; -use crate::falloc::{Allocator, Global}; +use crate::falloc::{AllocResult, Allocator, ErrorHandling, Fatal, Global}; use crate::raw_vec::RawVec; use crate::vec::Vec; @@ -108,7 +108,7 @@ pub struct VecDeque< #[stable(feature = "rust1", since = "1.0.0")] impl Clone for VecDeque where - A: Allocator = Self> + Clone, + A: Allocator + Clone, { fn clone(&self) -> Self { let mut deq = Self::with_capacity_in(self.len(), self.allocator().clone()); @@ -597,8 +597,8 @@ impl VecDeque { pub fn with_capacity_in( capacity: usize, alloc: A, - ) -> A::Result, TryReserveError> { - A::map_result(Self::try_with_capacity_in(capacity, alloc)) + ) -> AllocResult, TryReserveError> { + A::ErrorHandling::map_result(Self::try_with_capacity_in(capacity, alloc)) } /// Creates a `VecDeque` from a raw allocation, when the initialized @@ -761,8 +761,8 @@ impl VecDeque { /// /// [`reserve`]: VecDeque::reserve #[stable(feature = "rust1", since = "1.0.0")] - pub fn reserve_exact(&mut self, additional: usize) -> A::Result<(), TryReserveError> { - A::map_result(self.try_reserve_exact(additional)) + pub fn reserve_exact(&mut self, additional: usize) -> AllocResult { + A::ErrorHandling::map_result(self.try_reserve_exact(additional)) } /// Reserves capacity for at least `additional` more elements to be inserted in the given @@ -782,8 +782,8 @@ impl VecDeque { /// assert!(buf.capacity() >= 11); /// ``` #[stable(feature = "rust1", since = "1.0.0")] - pub fn reserve(&mut self, additional: usize) -> A::Result<(), TryReserveError> { - A::map_result(self.try_reserve(additional)) + pub fn reserve(&mut self, additional: usize) -> AllocResult { + A::ErrorHandling::map_result(self.try_reserve(additional)) } /// Tries to reserve the minimum capacity for at least `additional` more elements to @@ -928,8 +928,8 @@ impl VecDeque { /// assert!(buf.capacity() >= 4); /// ``` #[stable(feature = "shrink_to", since = "1.56.0")] - pub fn shrink_to(&mut self, min_capacity: usize) -> A::Result<(), TryReserveError> { - A::map_result((|| { + pub fn shrink_to(&mut self, min_capacity: usize) -> AllocResult { + A::ErrorHandling::map_result((|| { // Substitute for try block let target_cap = min_capacity.max(self.len); @@ -1625,8 +1625,8 @@ impl VecDeque { /// assert_eq!(d.front(), Some(&2)); /// ``` #[stable(feature = "rust1", since = "1.0.0")] - pub fn push_front(&mut self, value: T) -> A::Result<(), TryReserveError> { - A::map_result((|| { + pub fn push_front(&mut self, value: T) -> AllocResult { + A::ErrorHandling::map_result((|| { // Substitute for try block if self.is_full() { self.grow()?; @@ -1656,8 +1656,8 @@ impl VecDeque { /// assert_eq!(3, *buf.back().unwrap()); /// ``` #[stable(feature = "rust1", since = "1.0.0")] - pub fn push_back(&mut self, value: T) -> A::Result<(), TryReserveError> { - A::map_result((|| { + pub fn push_back(&mut self, value: T) -> AllocResult { + A::ErrorHandling::map_result((|| { // Substsitute for try block if self.is_full() { self.grow()?; @@ -1770,8 +1770,8 @@ impl VecDeque { /// assert_eq!(vec_deque, &['a', 'd', 'b', 'c']); /// ``` #[stable(feature = "deque_extras_15", since = "1.5.0")] - pub fn insert(&mut self, index: usize, value: T) -> A::Result<(), TryReserveError> { - A::map_result((|| { + pub fn insert(&mut self, index: usize, value: T) -> AllocResult { + A::ErrorHandling::map_result((|| { // Substitute for try block assert!(index <= self.len(), "index out of bounds"); if self.is_full() { @@ -1877,11 +1877,11 @@ impl VecDeque { #[inline] #[must_use = "use `.truncate()` if you don't need the other half"] #[stable(feature = "split_off", since = "1.4.0")] - pub fn split_off(&mut self, at: usize) -> A::Result + pub fn split_off(&mut self, at: usize) -> AllocResult where A: Clone, { - A::map_result((|| { + A::ErrorHandling::map_result((|| { let len = self.len; assert!(at <= len, "`at` out of bounds"); diff --git a/library/alloc/src/falloc.rs b/library/alloc/src/falloc.rs index df609ca370a8d..c9a02ca3dc267 100644 --- a/library/alloc/src/falloc.rs +++ b/library/alloc/src/falloc.rs @@ -320,6 +320,21 @@ pub unsafe trait Allocator { self } + /// The mode of error handling for types using this allocator. + /// + /// [`Infallible`] means that any allocation failures should be handled + /// globally, often by panicking or aborting. Functions performing + /// allocation will simply return the value or nothing. + /// + /// [`Fallible`] means that any allocation failures should be handled + /// at the point of use. Functions performing allocation will return + /// `Result`. + type ErrorHandling: ErrorHandling; +} + +// FIXME: this trait should be sealed +#[unstable(feature = "allocator_api", issue = "32838")] +pub trait ErrorHandling { /// Result type returned by functions that are conditionally fallible. /// /// - "Infallible" allocators set `type Result = T` @@ -357,6 +372,62 @@ pub unsafe trait Allocator { fn map_result(result: Result) -> Self::Result; } +#[derive(Debug)] +#[unstable(feature = "allocator_api", issue = "32838")] +/// Error handling mode to use when the user of the type wants to handle +/// allocation failures at the point of use. Functions performing +/// allocation will return `Result`. +pub struct Fallible; + +#[unstable(feature = "allocator_api", issue = "32838")] +impl ErrorHandling for Fallible { + #[cfg(not(no_global_oom_handling))] + type Result = Result + where + E: HandleAllocError; + + #[cfg(not(no_global_oom_handling))] + fn map_result(result: Result) -> Self::Result + where + E: HandleAllocError, + { + result + } + + #[cfg(no_global_oom_handling)] + type Result = Result; + + #[cfg(no_global_oom_handling)] + fn map_result(result: Result) -> Self::Result { + result + } +} + +#[derive(Debug)] +#[cfg(not(no_global_oom_handling))] +#[unstable(feature = "allocator_api", issue = "32838")] +/// Error handling mode to use when the user of the type wants to ignore +/// allocation failures, treating them as a fatal error. Functions +/// performing allocation will return values directly. +pub struct Fatal; + +#[cfg(not(no_global_oom_handling))] +#[unstable(feature = "allocator_api", issue = "32838")] +impl ErrorHandling for Fatal { + #[cfg(not(no_global_oom_handling))] + type Result = T + where + E: HandleAllocError; + + #[cfg(not(no_global_oom_handling))] + fn map_result(result: Result) -> Self::Result + where + E: HandleAllocError, + { + result.unwrap_or_else(|e| e.handle_alloc_error()) + } +} + #[unstable(feature = "allocator_api", issue = "32838")] unsafe impl Allocator for &A where @@ -411,31 +482,11 @@ where unsafe { (**self).shrink(ptr, old_layout, new_layout) } } - #[cfg(not(no_global_oom_handling))] - type Result = A::Result - where - E: HandleAllocError; - - #[cfg(not(no_global_oom_handling))] - fn map_result(result: Result) -> Self::Result - where - E: HandleAllocError, - { - A::map_result(result) - } - - #[cfg(no_global_oom_handling)] - type Result = A::Result; - - #[cfg(no_global_oom_handling)] - fn map_result(result: Result) -> Self::Result { - A::map_result(result) - } + type ErrorHandling = A::ErrorHandling; } -use crate::collections::TryReserveError; #[cfg(not(no_global_oom_handling))] -use crate::collections::TryReserveErrorKind; +use crate::collections::{TryReserveError, TryReserveErrorKind}; // One central function responsible for reporting capacity overflows. This'll // ensure that the code generation related to these panics is minimal as there's @@ -470,23 +521,11 @@ impl HandleAllocError for TryReserveError { #[cfg(not(no_global_oom_handling))] #[unstable(feature = "allocator_api", issue = "32838")] #[derive(Debug)] -pub struct InfallibleAdapter(A); - -#[cfg(not(no_global_oom_handling))] -impl InfallibleAdapter { - /// Unwrap the adapter, returning the original allocator. - #[unstable(feature = "allocator_api", issue = "32838")] - pub fn into_inner(self) -> A { - self.0 - } -} +pub struct InfallibleAdapter>(pub A); #[cfg(not(no_global_oom_handling))] #[unstable(feature = "allocator_api", issue = "32838")] -unsafe impl Allocator for InfallibleAdapter -where - A: Allocator = Result<(), TryReserveError>>, -{ +unsafe impl> Allocator for InfallibleAdapter { fn allocate(&self, layout: Layout) -> Result, AllocError> { self.0.allocate(layout) } @@ -533,48 +572,19 @@ where self } - type Result = T - where - E: HandleAllocError; - - fn map_result(result: Result) -> Self::Result - where - E: HandleAllocError, - { - result.unwrap_or_else(|e| e.handle_alloc_error()) - } -} - -#[cfg(not(no_global_oom_handling))] -#[unstable(feature = "allocator_api", issue = "32838")] -impl From for InfallibleAdapter -where - A: Allocator = Result<(), TryReserveError>>, -{ - fn from(value: A) -> Self { - InfallibleAdapter(value) - } + type ErrorHandling = Fatal; } /// Wrapper around an existing allocator allowing one to /// use an infallible allocator as a fallible one. +#[cfg(not(no_global_oom_handling))] #[unstable(feature = "allocator_api", issue = "32838")] #[derive(Debug)] -pub struct FallibleAdapter(A); - -impl FallibleAdapter { - /// Unwrap the adapter, returning the original allocator. - #[unstable(feature = "allocator_api", issue = "32838")] - pub fn into_inner(self) -> A { - self.0 - } -} +pub struct FallibleAdapter>(pub A); #[unstable(feature = "allocator_api", issue = "32838")] -unsafe impl Allocator for FallibleAdapter -where - A: Allocator = ()>, -{ +#[cfg(not(no_global_oom_handling))] +unsafe impl> Allocator for FallibleAdapter { fn allocate(&self, layout: Layout) -> Result, AllocError> { self.0.allocate(layout) } @@ -621,34 +631,8 @@ where self } - #[cfg(not(no_global_oom_handling))] - type Result = Result - where - E: HandleAllocError; - - #[cfg(not(no_global_oom_handling))] - fn map_result(result: Result) -> Self::Result - where - E: HandleAllocError, - { - result - } - - #[cfg(no_global_oom_handling)] - type Result = Result; - - #[cfg(no_global_oom_handling)] - fn map_result(result: Result) -> Self::Result { - result - } + type ErrorHandling = Fallible; } -#[unstable(feature = "allocator_api", issue = "32838")] -impl From for FallibleAdapter -where - A: Allocator = ()>, -{ - fn from(value: A) -> Self { - FallibleAdapter(value) - } -} +pub(crate) type AllocResult = + <::ErrorHandling as ErrorHandling>::Result; diff --git a/library/alloc/src/slice.rs b/library/alloc/src/slice.rs index db325aeb50217..2fa1d0d99760b 100644 --- a/library/alloc/src/slice.rs +++ b/library/alloc/src/slice.rs @@ -30,6 +30,8 @@ use crate::borrow::ToOwned; use crate::boxed::Box; #[cfg(not(no_global_oom_handling))] use crate::collections::TryReserveError; +#[cfg(not(no_global_oom_handling))] +use crate::falloc::{AllocResult, ErrorHandling}; use crate::vec::Vec; #[cfg(test)] @@ -443,12 +445,12 @@ impl [T] { #[rustc_allow_incoherent_impl] #[inline] #[unstable(feature = "allocator_api", issue = "32838")] - pub fn to_vec_in(&self, alloc: A) -> A::Result, TryReserveError> + pub fn to_vec_in(&self, alloc: A) -> AllocResult, TryReserveError> where T: Clone, { // N.B., see the `hack` module in this file for more details. - A::map_result(hack::to_vec(self, alloc)) + A::ErrorHandling::map_result(hack::to_vec(self, alloc)) } /// Converts `self` into a vector without clones or allocation. diff --git a/library/alloc/src/vec/into_iter.rs b/library/alloc/src/vec/into_iter.rs index bf5c9e9434680..02ae94536d8e6 100644 --- a/library/alloc/src/vec/into_iter.rs +++ b/library/alloc/src/vec/into_iter.rs @@ -1,12 +1,10 @@ #[cfg(not(no_global_oom_handling))] use super::AsVecIntoIter; -#[cfg(not(no_global_oom_handling))] -use super::Vec; use crate::alloc::{Allocator, Global}; #[cfg(not(no_global_oom_handling))] -use crate::collections::TryReserveError; -#[cfg(not(no_global_oom_handling))] use crate::collections::VecDeque; +#[cfg(not(no_global_oom_handling))] +use crate::falloc::Fatal; use crate::raw_vec::RawVec; use core::array; use core::fmt; @@ -389,7 +387,7 @@ where #[stable(feature = "vec_into_iter_clone", since = "1.8.0")] impl Clone for IntoIter where - A: Allocator, TryReserveError> = Vec>, + A: Allocator, { #[cfg(not(test))] fn clone(&self) -> Self { diff --git a/library/alloc/src/vec/mod.rs b/library/alloc/src/vec/mod.rs index 6dd0f785c15e9..666513908a156 100644 --- a/library/alloc/src/vec/mod.rs +++ b/library/alloc/src/vec/mod.rs @@ -67,7 +67,9 @@ use core::slice::{self, SliceIndex}; use crate::borrow::{Cow, ToOwned}; use crate::boxed::Box; use crate::collections::{TryReserveError, TryReserveErrorKind}; -use crate::falloc::{Allocator, Global}; +#[cfg(not(no_global_oom_handling))] +use crate::falloc::Fatal; +use crate::falloc::{AllocResult, Allocator, ErrorHandling, Global}; use crate::raw_vec::RawVec; #[unstable(feature = "drain_filter", reason = "recently added", issue = "43244")] @@ -132,8 +134,10 @@ use self::spec_from_iter::SpecFromIter; #[cfg(not(no_global_oom_handling))] mod spec_from_iter; +#[cfg(not(no_global_oom_handling))] use self::spec_extend::SpecExtend; +#[cfg(not(no_global_oom_handling))] mod spec_extend; /// A contiguous growable array type, written as `Vec`, short for 'vector'. @@ -670,8 +674,8 @@ impl Vec { /// ``` #[inline] #[unstable(feature = "allocator_api", issue = "32838")] - pub fn with_capacity_in(capacity: usize, alloc: A) -> A::Result { - A::map_result(Self::try_with_capacity_in(capacity, alloc)) + pub fn with_capacity_in(capacity: usize, alloc: A) -> AllocResult { + A::ErrorHandling::map_result(Self::try_with_capacity_in(capacity, alloc)) } /// Creates a `Vec` directly from a pointer, a capacity, a length, @@ -905,8 +909,8 @@ impl Vec { /// assert!(vec.capacity() >= 11); /// ``` #[stable(feature = "rust1", since = "1.0.0")] - pub fn reserve(&mut self, additional: usize) -> A::Result<(), TryReserveError> { - A::map_result(self.try_reserve(additional)) + pub fn reserve(&mut self, additional: usize) -> AllocResult { + A::ErrorHandling::map_result(self.try_reserve(additional)) } /// Reserves the minimum capacity for at least `additional` more elements to @@ -934,8 +938,8 @@ impl Vec { /// assert!(vec.capacity() >= 11); /// ``` #[stable(feature = "rust1", since = "1.0.0")] - pub fn reserve_exact(&mut self, additional: usize) -> A::Result<(), TryReserveError> { - A::map_result(self.try_reserve_exact(additional)) + pub fn reserve_exact(&mut self, additional: usize) -> AllocResult { + A::ErrorHandling::map_result(self.try_reserve_exact(additional)) } /// Tries to reserve capacity for at least `additional` more elements to be inserted @@ -1044,8 +1048,8 @@ impl Vec { /// assert!(vec.capacity() >= 3); /// ``` #[stable(feature = "rust1", since = "1.0.0")] - pub fn shrink_to_fit(&mut self) -> A::Result<(), TryReserveError> { - A::map_result(self.try_shrink_to_fit()) + pub fn shrink_to_fit(&mut self) -> AllocResult { + A::ErrorHandling::map_result(self.try_shrink_to_fit()) } /// Shrinks the capacity of the vector with a lower bound. @@ -1067,8 +1071,8 @@ impl Vec { /// assert!(vec.capacity() >= 3); /// ``` #[stable(feature = "shrink_to", since = "1.56.0")] - pub fn shrink_to(&mut self, min_capacity: usize) -> A::Result<(), TryReserveError> { - A::map_result(if self.capacity() > min_capacity { + pub fn shrink_to(&mut self, min_capacity: usize) -> AllocResult { + A::ErrorHandling::map_result(if self.capacity() > min_capacity { self.buf.shrink_to(cmp::max(self.len, min_capacity)) } else { Ok(()) @@ -1101,8 +1105,8 @@ impl Vec { /// assert_eq!(slice.into_vec().capacity(), 3); /// ``` #[stable(feature = "rust1", since = "1.0.0")] - pub fn into_boxed_slice(mut self) -> A::Result, TryReserveError> { - A::map_result((|| { + pub fn into_boxed_slice(mut self) -> AllocResult, TryReserveError> { + A::ErrorHandling::map_result((|| { // Substitute for try block self.try_shrink_to_fit()?; unsafe { @@ -1439,8 +1443,8 @@ impl Vec { /// assert_eq!(vec, [1, 4, 2, 3, 5]); /// ``` #[stable(feature = "rust1", since = "1.0.0")] - pub fn insert(&mut self, index: usize, element: T) -> A::Result<(), TryReserveError> { - A::map_result((|| { + pub fn insert(&mut self, index: usize, element: T) -> AllocResult { + A::ErrorHandling::map_result((|| { // Substitute for try block #[cold] #[inline(never)] @@ -1832,8 +1836,8 @@ impl Vec { /// ``` #[inline] #[stable(feature = "rust1", since = "1.0.0")] - pub fn push(&mut self, value: T) -> A::Result<(), TryReserveError> { - A::map_result((|| { + pub fn push(&mut self, value: T) -> AllocResult { + A::ErrorHandling::map_result((|| { // Substitute for try block // This will panic or abort if we would allocate > isize::MAX bytes // or if the length increment would overflow for zero-sized types. @@ -1940,8 +1944,8 @@ impl Vec { /// ``` #[inline] #[stable(feature = "append", since = "1.4.0")] - pub fn append(&mut self, other: &mut Self) -> A::Result<(), TryReserveError> { - A::map_result((|| { + pub fn append(&mut self, other: &mut Self) -> AllocResult { + A::ErrorHandling::map_result((|| { // Substitute for try block unsafe { self.append_elements(other.as_slice() as _)?; @@ -2108,11 +2112,11 @@ impl Vec { #[inline] #[must_use = "use `.truncate()` if you don't need the other half"] #[stable(feature = "split_off", since = "1.4.0")] - pub fn split_off(&mut self, at: usize) -> A::Result + pub fn split_off(&mut self, at: usize) -> AllocResult where A: Clone, { - A::map_result((|| { + A::ErrorHandling::map_result((|| { // Substitute for try block #[cold] #[inline(never)] @@ -2173,11 +2177,11 @@ impl Vec { /// assert_eq!(vec, [2, 4, 8, 16]); /// ``` #[stable(feature = "vec_resize_with", since = "1.33.0")] - pub fn resize_with(&mut self, new_len: usize, f: F) -> A::Result<(), TryReserveError> + pub fn resize_with(&mut self, new_len: usize, f: F) -> AllocResult where F: FnMut() -> T, { - A::map_result((|| { + A::ErrorHandling::map_result((|| { // Substitute for try block let len = self.len(); if new_len > len { @@ -2377,8 +2381,8 @@ impl Vec { /// assert_eq!(vec, [1, 2]); /// ``` #[stable(feature = "vec_resize", since = "1.5.0")] - pub fn resize(&mut self, new_len: usize, value: T) -> A::Result<(), TryReserveError> { - A::map_result((|| { + pub fn resize(&mut self, new_len: usize, value: T) -> AllocResult { + A::ErrorHandling::map_result((|| { // Substitute for try block let len = self.len(); @@ -2413,8 +2417,8 @@ impl Vec { /// [`extend`]: Vec::extend #[cfg(not(no_global_oom_handling))] #[stable(feature = "vec_extend_from_slice", since = "1.6.0")] - pub fn extend_from_slice(&mut self, other: &[T]) -> A::Result<(), TryReserveError> { - A::map_result(self.spec_extend(other.iter())) + pub fn extend_from_slice(&mut self, other: &[T]) -> AllocResult { + A::ErrorHandling::map_result(self.spec_extend(other.iter())) } /// Copies elements from `src` range to the end of the vector. @@ -2440,11 +2444,11 @@ impl Vec { /// ``` #[cfg(not(no_global_oom_handling))] #[stable(feature = "vec_extend_from_within", since = "1.53.0")] - pub fn extend_from_within(&mut self, src: R) -> A::Result<(), TryReserveError> + pub fn extend_from_within(&mut self, src: R) -> AllocResult where R: RangeBounds, { - A::map_result((|| { + A::ErrorHandling::map_result((|| { // Substitute for try block let range = slice::range(src, ..self.len()); self.try_reserve(range.len())?; @@ -2567,11 +2571,10 @@ impl Vec { #[doc(hidden)] #[stable(feature = "rust1", since = "1.0.0")] -pub fn from_elem( - elem: T, - n: usize, -) -> ::Result, TryReserveError> { - ::map_result(::from_elem(elem, n, Global)) +pub fn from_elem(elem: T, n: usize) -> AllocResult, TryReserveError> { + ::ErrorHandling::map_result(::from_elem( + elem, n, Global, + )) } #[doc(hidden)] @@ -2580,8 +2583,8 @@ pub fn from_elem_in( elem: T, n: usize, alloc: A, -) -> A::Result, TryReserveError> { - A::map_result(::from_elem(elem, n, alloc)) +) -> AllocResult, TryReserveError> { + A::ErrorHandling::map_result(::from_elem(elem, n, alloc)) } trait ExtendFromWithinSpec { @@ -2665,7 +2668,7 @@ impl ops::DerefMut for Vec { #[stable(feature = "rust1", since = "1.0.0")] impl Clone for Vec where - A: Allocator = Self>, + A: Allocator, { #[cfg(not(test))] fn clone(&self) -> Self { @@ -2807,11 +2810,15 @@ impl<'a, T, A: Allocator> IntoIterator for &'a mut Vec { } } +#[cfg(not(no_global_oom_handling))] #[stable(feature = "rust1", since = "1.0.0")] -impl = ()>> Extend for Vec { +impl> Extend for Vec { #[inline] fn extend>(&mut self, iter: I) { - A::map_result(>::spec_extend(self, iter.into_iter())) + A::ErrorHandling::map_result(>::spec_extend( + self, + iter.into_iter(), + )) } #[inline] @@ -2828,6 +2835,7 @@ impl = ()>> Extend for Vec impl Vec { // leaf method to which various SpecFrom/SpecExtend implementations delegate when // they have no further optimizations to apply + #[cfg(not(no_global_oom_handling))] fn extend_desugared>( &mut self, mut iterator: I, @@ -2935,7 +2943,7 @@ impl Vec { where R: RangeBounds, I: IntoIterator, - A: Allocator = ()>, + A: Allocator, { Splice { drain: self.drain(range), replace_with: replace_with.into_iter() } } @@ -3007,11 +3015,10 @@ impl Vec { /// /// [`copy_from_slice`]: slice::copy_from_slice #[stable(feature = "extend_ref", since = "1.2.0")] -impl<'a, T: Copy + 'a, A: Allocator = ()> + 'a> Extend<&'a T> - for Vec -{ +#[cfg(not(no_global_oom_handling))] +impl<'a, T: Copy + 'a, A: Allocator + 'a> Extend<&'a T> for Vec { fn extend>(&mut self, iter: I) { - A::map_result(self.spec_extend(iter.into_iter())) + A::ErrorHandling::map_result(self.spec_extend(iter.into_iter())) } #[inline] @@ -3208,11 +3215,11 @@ impl From> for Vec { } // note: test pulls in std, which causes errors here -#[cfg(not(test))] +#[cfg(all(not(test), not(no_global_oom_handling)))] #[stable(feature = "box_from_vec", since = "1.20.0")] impl From> for Box<[T], A> where - A: Allocator = Self>, + A: Allocator, { /// Convert a vector into a boxed slice. /// diff --git a/library/alloc/src/vec/spec_from_iter.rs b/library/alloc/src/vec/spec_from_iter.rs index e9e37e27eeb15..6b7f4ebad57dc 100644 --- a/library/alloc/src/vec/spec_from_iter.rs +++ b/library/alloc/src/vec/spec_from_iter.rs @@ -1,7 +1,7 @@ use core::mem::ManuallyDrop; use core::ptr; -use crate::falloc::{Allocator, Global}; +use crate::falloc::{Allocator, ErrorHandling, Global}; use super::{IntoIter, SpecExtend, SpecFromIterNested, Vec}; @@ -60,7 +60,7 @@ impl SpecFromIter> for Vec { let mut vec = Vec::new(); // must delegate to spec_extend() since extend() itself delegates // to spec_from for empty Vecs - let () = ::map_result(vec.spec_extend(iterator)); + let () = ::ErrorHandling::map_result(vec.spec_extend(iterator)); vec } } diff --git a/library/alloc/src/vec/spec_from_iter_nested.rs b/library/alloc/src/vec/spec_from_iter_nested.rs index 3c61fcf66c47b..e580dd5ee5192 100644 --- a/library/alloc/src/vec/spec_from_iter_nested.rs +++ b/library/alloc/src/vec/spec_from_iter_nested.rs @@ -2,7 +2,7 @@ use core::cmp; use core::iter::TrustedLen; use core::ptr; -use crate::falloc::{Allocator, Global}; +use crate::falloc::{Allocator, ErrorHandling, Global}; use crate::raw_vec::RawVec; use super::{SpecExtend, Vec}; @@ -41,10 +41,9 @@ where }; // must delegate to spec_extend() since extend() itself delegates // to spec_from for empty Vecs - let () = ::map_result( as SpecExtend>::spec_extend( - &mut vector, - iterator, - )); + let () = ::ErrorHandling::map_result( + as SpecExtend>::spec_extend(&mut vector, iterator), + ); vector } } @@ -63,7 +62,7 @@ where _ => panic!("capacity overflow"), }; // reuse extend specialization for TrustedLen - let () = ::map_result(vector.spec_extend(iterator)); + let () = ::ErrorHandling::map_result(vector.spec_extend(iterator)); vector } } diff --git a/library/alloc/src/vec/splice.rs b/library/alloc/src/vec/splice.rs index 24b0a4bcba1e9..26cd4390f58b6 100644 --- a/library/alloc/src/vec/splice.rs +++ b/library/alloc/src/vec/splice.rs @@ -1,5 +1,4 @@ -use crate::collections::TryReserveError; -use crate::falloc::{Allocator, Global}; +use crate::falloc::{Allocator, ErrorHandling, Fatal, Global}; use core::ptr; use core::slice; @@ -22,14 +21,14 @@ use super::{Drain, Vec}; pub struct Splice< 'a, I: Iterator + 'a, - #[unstable(feature = "allocator_api", issue = "32838")] A: Allocator = ()> + 'a = Global, + #[unstable(feature = "allocator_api", issue = "32838")] A: Allocator + 'a = Global, > { pub(super) drain: Drain<'a, I::Item, A>, pub(super) replace_with: I, } #[stable(feature = "vec_splice", since = "1.21.0")] -impl = ()>> Iterator for Splice<'_, I, A> { +impl> Iterator for Splice<'_, I, A> { type Item = I::Item; fn next(&mut self) -> Option { @@ -42,22 +41,17 @@ impl = ()>> Iterator for S } #[stable(feature = "vec_splice", since = "1.21.0")] -impl = ()>> DoubleEndedIterator - for Splice<'_, I, A> -{ +impl> DoubleEndedIterator for Splice<'_, I, A> { fn next_back(&mut self) -> Option { self.drain.next_back() } } #[stable(feature = "vec_splice", since = "1.21.0")] -impl = ()>> ExactSizeIterator - for Splice<'_, I, A> -{ -} +impl> ExactSizeIterator for Splice<'_, I, A> {} #[stable(feature = "vec_splice", since = "1.21.0")] -impl = ()>> Drop for Splice<'_, I, A> { +impl> Drop for Splice<'_, I, A> { fn drop(&mut self) { self.drain.by_ref().for_each(drop); // At this point draining is done and the only remaining tasks are splicing @@ -104,7 +98,7 @@ impl = ()>> Drop for Splic } /// Private helper methods for `Splice::drop` -impl = ()>> Drain<'_, T, A> { +impl> Drain<'_, T, A> { /// The range from `self.vec.len` to `self.tail_start` contains elements /// that have been moved out. /// Fill that range as much as possible with new elements from the `replace_with` iterator. @@ -132,7 +126,7 @@ impl = ()>> Drain<'_, T, A> { unsafe fn move_tail(&mut self, additional: usize) { let vec = unsafe { self.vec.as_mut() }; let len = self.tail_start + self.tail_len; - let () = A::map_result(vec.buf.reserve(len, additional)); + let () = A::ErrorHandling::map_result(vec.buf.reserve(len, additional)); let new_tail_start = self.tail_start + additional; unsafe { diff --git a/library/std/src/alloc.rs b/library/std/src/alloc.rs index c9d07f927ca16..5c9c5ecac7f52 100644 --- a/library/std/src/alloc.rs +++ b/library/std/src/alloc.rs @@ -285,16 +285,7 @@ unsafe impl Allocator for System { } } - type Result = T - where - E: HandleAllocError; - - fn map_result(result: Result) -> Self::Result - where - E: HandleAllocError, - { - result.unwrap_or_else(|e| e.handle_alloc_error()) - } + type ErrorHandling = Fatal; } static HOOK: AtomicPtr<()> = AtomicPtr::new(ptr::null_mut()); From 0e723c3cc7a1aee7d6105dee9ebc1305157df2dd Mon Sep 17 00:00:00 2001 From: Peter Jaszkowiak Date: Mon, 5 Jun 2023 16:56:55 -0600 Subject: [PATCH 10/15] fix tests --- library/alloc/tests/vec.rs | 13 ++----------- tests/ui/allocator/object-safe.rs | 4 ++-- tests/ui/box/large-allocator-ice.rs | 3 ++- tests/ui/box/leak-alloc.rs | 4 +++- tests/ui/box/leak-alloc.stderr | 2 +- .../debuginfo/debuginfo-box-with-large-allocator.rs | 3 ++- tests/ui/error-codes/E0401.stderr | 2 +- tests/ui/error-codes/e0119/conflict-with-std.stderr | 2 +- tests/ui/hygiene/panic-location.run.stderr | 2 +- 9 files changed, 15 insertions(+), 20 deletions(-) diff --git a/library/alloc/tests/vec.rs b/library/alloc/tests/vec.rs index 116a89b278482..7ad6c8f107599 100644 --- a/library/alloc/tests/vec.rs +++ b/library/alloc/tests/vec.rs @@ -2,7 +2,7 @@ use core::assert_eq; use core::iter::IntoIterator; use core::num::NonZeroUsize; use core::ptr::NonNull; -use std::alloc::{Allocator, Layout, System, FallibleAdapter, IntoLayout, handle_alloc_error}; +use std::alloc::{Allocator, Layout, System, Fatal}; use std::assert_matches::assert_matches; use std::borrow::Cow; use std::cell::Cell; @@ -1096,16 +1096,7 @@ fn test_into_iter_drop_allocator() { unsafe { System.deallocate(ptr, layout) } } - type Result = T - where - E: IntoLayout; - - fn map_result(result: Result) -> Self::Result - where - E: IntoLayout - { - result.unwrap_or_else(|e| handle_alloc_error(e.into_layout())) - } + type ErrorHandling = Fatal; } let mut drop_count = 0; diff --git a/tests/ui/allocator/object-safe.rs b/tests/ui/allocator/object-safe.rs index fae7ab7fe3319..6bee8fc5d98f9 100644 --- a/tests/ui/allocator/object-safe.rs +++ b/tests/ui/allocator/object-safe.rs @@ -4,9 +4,9 @@ #![feature(allocator_api)] -use std::alloc::{Allocator, System}; +use std::alloc::{Allocator, System, Fatal}; -fn ensure_object_safe(_: &dyn Allocator) {} +fn ensure_object_safe(_: &dyn Allocator) {} fn main() { ensure_object_safe(&System); diff --git a/tests/ui/box/large-allocator-ice.rs b/tests/ui/box/large-allocator-ice.rs index b3a882ff089b0..3e45d877d5a06 100644 --- a/tests/ui/box/large-allocator-ice.rs +++ b/tests/ui/box/large-allocator-ice.rs @@ -2,7 +2,7 @@ #![feature(allocator_api)] #![allow(unused_must_use)] -use std::alloc::Allocator; +use std::alloc::{Allocator, Fatal}; struct BigAllocator([usize; 2]); @@ -16,6 +16,7 @@ unsafe impl Allocator for BigAllocator { unsafe fn deallocate(&self, _: std::ptr::NonNull, _: std::alloc::Layout) { todo!() } + type ErrorHandling = Fatal; } fn main() { diff --git a/tests/ui/box/leak-alloc.rs b/tests/ui/box/leak-alloc.rs index 3f0f39f448b91..c3fe1ff60362b 100644 --- a/tests/ui/box/leak-alloc.rs +++ b/tests/ui/box/leak-alloc.rs @@ -1,6 +1,6 @@ #![feature(allocator_api)] -use std::alloc::{AllocError, Allocator, Layout, System}; +use std::alloc::{AllocError, Allocator, Layout, System, Fatal}; use std::ptr::NonNull; use std::boxed::Box; @@ -15,6 +15,8 @@ unsafe impl Allocator for Alloc { unsafe fn deallocate(&self, ptr: NonNull, layout: Layout) { System.deallocate(ptr, layout) } + + type ErrorHandling = Fatal; } fn use_value(_: u32) {} diff --git a/tests/ui/box/leak-alloc.stderr b/tests/ui/box/leak-alloc.stderr index 5140b58934a5c..2f1e0d78bd3a8 100644 --- a/tests/ui/box/leak-alloc.stderr +++ b/tests/ui/box/leak-alloc.stderr @@ -1,5 +1,5 @@ error[E0505]: cannot move out of `alloc` because it is borrowed - --> $DIR/leak-alloc.rs:26:10 + --> $DIR/leak-alloc.rs:28:10 | LL | let alloc = Alloc {}; | ----- binding `alloc` declared here diff --git a/tests/ui/debuginfo/debuginfo-box-with-large-allocator.rs b/tests/ui/debuginfo/debuginfo-box-with-large-allocator.rs index 761539227a79c..3ec87a48301f4 100644 --- a/tests/ui/debuginfo/debuginfo-box-with-large-allocator.rs +++ b/tests/ui/debuginfo/debuginfo-box-with-large-allocator.rs @@ -4,7 +4,7 @@ #![feature(allocator_api)] -use std::alloc::{AllocError, Allocator, Layout}; +use std::alloc::{AllocError, Allocator, Layout, Fatal}; use std::ptr::NonNull; struct ZST; @@ -16,6 +16,7 @@ unsafe impl Allocator for &ZST { unsafe fn deallocate(&self, ptr: NonNull, layout: Layout) { todo!() } + type ErrorHandling = Fatal; } fn main() { diff --git a/tests/ui/error-codes/E0401.stderr b/tests/ui/error-codes/E0401.stderr index 2e65ef27a5879..fa4b91cacef72 100644 --- a/tests/ui/error-codes/E0401.stderr +++ b/tests/ui/error-codes/E0401.stderr @@ -53,7 +53,7 @@ LL | bfnr(x); - impl Fn for &F where A: Tuple, F: Fn, F: ?Sized; - impl Fn for Box - where Args: Tuple, F: Fn, A: alloc::falloc::Allocator, F: ?Sized; + where Args: Tuple, F: Fn, A: Allocator, F: ?Sized; note: required by a bound in `bfnr` --> $DIR/E0401.rs:4:30 | diff --git a/tests/ui/error-codes/e0119/conflict-with-std.stderr b/tests/ui/error-codes/e0119/conflict-with-std.stderr index 9634a9e3282f9..ef888a1c2871e 100644 --- a/tests/ui/error-codes/e0119/conflict-with-std.stderr +++ b/tests/ui/error-codes/e0119/conflict-with-std.stderr @@ -6,7 +6,7 @@ LL | impl AsRef for Box { | = note: conflicting implementation in crate `alloc`: - impl AsRef for Box - where A: alloc::falloc::Allocator, T: ?Sized; + where A: Allocator, T: ?Sized; error[E0119]: conflicting implementations of trait `From` for type `S` --> $DIR/conflict-with-std.rs:12:1 diff --git a/tests/ui/hygiene/panic-location.run.stderr b/tests/ui/hygiene/panic-location.run.stderr index d1271ffb9a135..926af22bc44c0 100644 --- a/tests/ui/hygiene/panic-location.run.stderr +++ b/tests/ui/hygiene/panic-location.run.stderr @@ -1,2 +1,2 @@ -thread 'main' panicked at 'capacity overflow', library/alloc/src/falloc.rs:94:5 +thread 'main' panicked at 'capacity overflow', library/alloc/src/falloc.rs:518:5 note: run with `RUST_BACKTRACE=1` environment variable to display a backtrace From 6689a37165684c09f718edca074974e43836d3a5 Mon Sep 17 00:00:00 2001 From: Peter Jaszkowiak Date: Mon, 5 Jun 2023 21:56:21 -0600 Subject: [PATCH 11/15] move to core to fix tests --- library/alloc/src/alloc.rs | 33 +- library/alloc/src/boxed.rs | 6 +- .../alloc/src/collections/vec_deque/mod.rs | 2 +- library/alloc/src/falloc.rs | 638 ------------------ library/alloc/src/lib.rs | 4 - library/alloc/src/raw_vec/tests.rs | 3 +- library/alloc/src/slice.rs | 6 +- library/alloc/src/vec/into_iter.rs | 12 +- library/alloc/src/vec/mod.rs | 12 +- library/alloc/src/vec/spec_from_elem.rs | 2 +- library/alloc/src/vec/spec_from_iter.rs | 2 +- .../alloc/src/vec/spec_from_iter_nested.rs | 2 +- library/alloc/src/vec/splice.rs | 2 +- library/alloc/tests/boxed.rs | 4 +- library/core/src/alloc/mod.rs | 242 +++++++ tests/ui/hygiene/panic-location.run.stderr | 2 +- 16 files changed, 300 insertions(+), 672 deletions(-) delete mode 100644 library/alloc/src/falloc.rs diff --git a/library/alloc/src/alloc.rs b/library/alloc/src/alloc.rs index fd6b0d56d02ab..bff52575c195a 100644 --- a/library/alloc/src/alloc.rs +++ b/library/alloc/src/alloc.rs @@ -10,15 +10,9 @@ use core::ptr::Unique; #[cfg(not(test))] use core::ptr::{self, NonNull}; -#[unstable(feature = "allocator_api", issue = "32838")] -pub use crate::falloc::{Allocator, Fallible}; -#[unstable(feature = "allocator_api", issue = "32838")] -#[cfg(not(no_global_oom_handling))] -pub use crate::falloc::{FallibleAdapter, Fatal, HandleAllocError, InfallibleAdapter}; #[stable(feature = "alloc_module", since = "1.28.0")] #[doc(inline)] -#[allow(deprecated)] -pub use core::alloc::{AllocError, GlobalAlloc, Layout, LayoutErr, LayoutError}; +pub use core::alloc::*; #[cfg(test)] mod tests; @@ -454,3 +448,28 @@ impl WriteCloneIntoRaw for T { unsafe { target.copy_from_nonoverlapping(self, 1) }; } } + +#[cfg(not(no_global_oom_handling))] +use crate::collections::{TryReserveError, TryReserveErrorKind}; + +// One central function responsible for reporting capacity overflows. This'll +// ensure that the code generation related to these panics is minimal as there's +// only one location which panics rather than a bunch throughout the module. +#[cfg(not(no_global_oom_handling))] +pub(crate) fn capacity_overflow() -> ! { + panic!("capacity overflow"); +} + +#[cfg(not(no_global_oom_handling))] +#[unstable(feature = "allocator_api", issue = "32838")] +impl HandleAllocError for TryReserveError { + fn handle_alloc_error(self) -> ! { + match self.kind() { + TryReserveErrorKind::CapacityOverflow => capacity_overflow(), + TryReserveErrorKind::AllocError { layout, .. } => handle_alloc_error(layout), + } + } +} + +pub(crate) type AllocResult = + <::ErrorHandling as ErrorHandling>::Result; diff --git a/library/alloc/src/boxed.rs b/library/alloc/src/boxed.rs index 9b1c8a1b40d2f..12b4e85e37512 100644 --- a/library/alloc/src/boxed.rs +++ b/library/alloc/src/boxed.rs @@ -165,14 +165,14 @@ use core::pin::Pin; use core::ptr::{self, Unique}; use core::task::{Context, Poll}; +#[cfg(not(no_global_oom_handling))] +use crate::alloc::Fatal; #[cfg(not(no_global_oom_handling))] use crate::alloc::{handle_alloc_error, WriteCloneIntoRaw}; +use crate::alloc::{AllocError, AllocResult, Allocator, ErrorHandling, Global, Layout}; #[cfg(not(no_global_oom_handling))] use crate::borrow::Cow; use crate::collections::TryReserveError; -#[cfg(not(no_global_oom_handling))] -use crate::falloc::Fatal; -use crate::falloc::{AllocError, AllocResult, Allocator, ErrorHandling, Global, Layout}; use crate::raw_vec::RawVec; #[cfg(not(no_global_oom_handling))] use crate::str::from_boxed_utf8_unchecked; diff --git a/library/alloc/src/collections/vec_deque/mod.rs b/library/alloc/src/collections/vec_deque/mod.rs index 40b502a1d63fb..72cacb966a93a 100644 --- a/library/alloc/src/collections/vec_deque/mod.rs +++ b/library/alloc/src/collections/vec_deque/mod.rs @@ -22,9 +22,9 @@ use core::slice; #[allow(unused_imports)] use core::mem; +use crate::alloc::{AllocResult, Allocator, ErrorHandling, Fatal, Global}; use crate::collections::TryReserveError; use crate::collections::TryReserveErrorKind; -use crate::falloc::{AllocResult, Allocator, ErrorHandling, Fatal, Global}; use crate::raw_vec::RawVec; use crate::vec::Vec; diff --git a/library/alloc/src/falloc.rs b/library/alloc/src/falloc.rs deleted file mode 100644 index c9a02ca3dc267..0000000000000 --- a/library/alloc/src/falloc.rs +++ /dev/null @@ -1,638 +0,0 @@ -use core::error::Error; -use core::ptr::{self, NonNull}; - -#[cfg(not(no_global_oom_handling))] -pub use crate::alloc::handle_alloc_error; -pub use crate::alloc::{AllocError, Global, GlobalAlloc, Layout, LayoutError}; - -/// An implementation of `Allocator` can allocate, grow, shrink, and deallocate arbitrary blocks of -/// data described via [`Layout`][]. -/// -/// `Allocator` is designed to be implemented on ZSTs, references, or smart pointers because having -/// an allocator like `MyAlloc([u8; N])` cannot be moved, without updating the pointers to the -/// allocated memory. -/// -/// Unlike [`GlobalAlloc`][], zero-sized allocations are allowed in `Allocator`. If an underlying -/// allocator does not support this (like jemalloc) or return a null pointer (such as -/// `libc::malloc`), this must be caught by the implementation. -/// -/// ### Currently allocated memory -/// -/// Some of the methods require that a memory block be *currently allocated* via an allocator. This -/// means that: -/// -/// * the starting address for that memory block was previously returned by [`allocate`], [`grow`], or -/// [`shrink`], and -/// -/// * the memory block has not been subsequently deallocated, where blocks are either deallocated -/// directly by being passed to [`deallocate`] or were changed by being passed to [`grow`] or -/// [`shrink`] that returns `Ok`. If `grow` or `shrink` have returned `Err`, the passed pointer -/// remains valid. -/// -/// [`allocate`]: crate::falloc::Allocator::allocate -/// [`grow`]: crate::falloc::Allocator::grow -/// [`shrink`]: crate::falloc::Allocator::shrink -/// [`deallocate`]: crate::falloc::Allocator::deallocate -/// -/// ### Memory fitting -/// -/// Some of the methods require that a layout *fit* a memory block. What it means for a layout to -/// "fit" a memory block means (or equivalently, for a memory block to "fit" a layout) is that the -/// following conditions must hold: -/// -/// * The block must be allocated with the same alignment as [`layout.align()`], and -/// -/// * The provided [`layout.size()`] must fall in the range `min ..= max`, where: -/// - `min` is the size of the layout most recently used to allocate the block, and -/// - `max` is the latest actual size returned from [`allocate`], [`grow`], or [`shrink`]. -/// -/// [`layout.align()`]: Layout::align -/// [`layout.size()`]: Layout::size -/// -/// # Safety -/// -/// * Memory blocks returned from an allocator must point to valid memory and retain their validity -/// until the instance and all of its copies and clones are dropped, -/// -/// * copying, cloning, or moving the allocator must not invalidate memory blocks returned from this -/// allocator. A copied or cloned allocator must behave like the same allocator, and -/// -/// * any pointer to a memory block which is [*currently allocated*] may be passed to any other -/// method of the allocator. -/// -/// [*currently allocated*]: #currently-allocated-memory -#[unstable(feature = "allocator_api", issue = "32838")] -pub unsafe trait Allocator { - /// Attempts to allocate a block of memory. - /// - /// On success, returns a [`NonNull<[u8]>`][NonNull] meeting the size and alignment guarantees of `layout`. - /// - /// The returned block may have a larger size than specified by `layout.size()`, and may or may - /// not have its contents initialized. - /// - /// # Errors - /// - /// Returning `Err` indicates that either memory is exhausted or `layout` does not meet - /// allocator's size or alignment constraints. - /// - /// Implementations are encouraged to return `Err` on memory exhaustion rather than panicking or - /// aborting, but this is not a strict requirement. (Specifically: it is *legal* to implement - /// this trait atop an underlying native allocation library that aborts on memory exhaustion.) - /// - /// Clients wishing to abort computation in response to an allocation error are encouraged to - /// call the [`handle_alloc_error`] function, rather than directly invoking `panic!` or similar. - /// - /// [`handle_alloc_error`]: ../../alloc/alloc/fn.handle_alloc_error.html - fn allocate(&self, layout: Layout) -> Result, AllocError>; - - /// Behaves like `allocate`, but also ensures that the returned memory is zero-initialized. - /// - /// # Errors - /// - /// Returning `Err` indicates that either memory is exhausted or `layout` does not meet - /// allocator's size or alignment constraints. - /// - /// Implementations are encouraged to return `Err` on memory exhaustion rather than panicking or - /// aborting, but this is not a strict requirement. (Specifically: it is *legal* to implement - /// this trait atop an underlying native allocation library that aborts on memory exhaustion.) - /// - /// Clients wishing to abort computation in response to an allocation error are encouraged to - /// call the [`handle_alloc_error`] function, rather than directly invoking `panic!` or similar. - /// - /// [`handle_alloc_error`]: ../../alloc/alloc/fn.handle_alloc_error.html - fn allocate_zeroed(&self, layout: Layout) -> Result, AllocError> { - let ptr = self.allocate(layout)?; - // SAFETY: `alloc` returns a valid memory block - unsafe { ptr.as_non_null_ptr().as_ptr().write_bytes(0, ptr.len()) } - Ok(ptr) - } - - /// Deallocates the memory referenced by `ptr`. - /// - /// # Safety - /// - /// * `ptr` must denote a block of memory [*currently allocated*] via this allocator, and - /// * `layout` must [*fit*] that block of memory. - /// - /// [*currently allocated*]: #currently-allocated-memory - /// [*fit*]: #memory-fitting - unsafe fn deallocate(&self, ptr: NonNull, layout: Layout); - - /// Attempts to extend the memory block. - /// - /// Returns a new [`NonNull<[u8]>`][NonNull] containing a pointer and the actual size of the allocated - /// memory. The pointer is suitable for holding data described by `new_layout`. To accomplish - /// this, the allocator may extend the allocation referenced by `ptr` to fit the new layout. - /// - /// If this returns `Ok`, then ownership of the memory block referenced by `ptr` has been - /// transferred to this allocator. Any access to the old `ptr` is Undefined Behavior, even if the - /// allocation was grown in-place. The newly returned pointer is the only valid pointer - /// for accessing this memory now. - /// - /// If this method returns `Err`, then ownership of the memory block has not been transferred to - /// this allocator, and the contents of the memory block are unaltered. - /// - /// # Safety - /// - /// * `ptr` must denote a block of memory [*currently allocated*] via this allocator. - /// * `old_layout` must [*fit*] that block of memory (The `new_layout` argument need not fit it.). - /// * `new_layout.size()` must be greater than or equal to `old_layout.size()`. - /// - /// Note that `new_layout.align()` need not be the same as `old_layout.align()`. - /// - /// [*currently allocated*]: #currently-allocated-memory - /// [*fit*]: #memory-fitting - /// - /// # Errors - /// - /// Returns `Err` if the new layout does not meet the allocator's size and alignment - /// constraints of the allocator, or if growing otherwise fails. - /// - /// Implementations are encouraged to return `Err` on memory exhaustion rather than panicking or - /// aborting, but this is not a strict requirement. (Specifically: it is *legal* to implement - /// this trait atop an underlying native allocation library that aborts on memory exhaustion.) - /// - /// Clients wishing to abort computation in response to an allocation error are encouraged to - /// call the [`handle_alloc_error`] function, rather than directly invoking `panic!` or similar. - /// - /// [`handle_alloc_error`]: ../../alloc/alloc/fn.handle_alloc_error.html - unsafe fn grow( - &self, - ptr: NonNull, - old_layout: Layout, - new_layout: Layout, - ) -> Result, AllocError> { - debug_assert!( - new_layout.size() >= old_layout.size(), - "`new_layout.size()` must be greater than or equal to `old_layout.size()`" - ); - - let new_ptr = self.allocate(new_layout)?; - - // SAFETY: because `new_layout.size()` must be greater than or equal to - // `old_layout.size()`, both the old and new memory allocation are valid for reads and - // writes for `old_layout.size()` bytes. Also, because the old allocation wasn't yet - // deallocated, it cannot overlap `new_ptr`. Thus, the call to `copy_nonoverlapping` is - // safe. The safety contract for `dealloc` must be upheld by the caller. - unsafe { - ptr::copy_nonoverlapping(ptr.as_ptr(), new_ptr.as_mut_ptr(), old_layout.size()); - self.deallocate(ptr, old_layout); - } - - Ok(new_ptr) - } - - /// Behaves like `grow`, but also ensures that the new contents are set to zero before being - /// returned. - /// - /// The memory block will contain the following contents after a successful call to - /// `grow_zeroed`: - /// * Bytes `0..old_layout.size()` are preserved from the original allocation. - /// * Bytes `old_layout.size()..old_size` will either be preserved or zeroed, depending on - /// the allocator implementation. `old_size` refers to the size of the memory block prior - /// to the `grow_zeroed` call, which may be larger than the size that was originally - /// requested when it was allocated. - /// * Bytes `old_size..new_size` are zeroed. `new_size` refers to the size of the memory - /// block returned by the `grow_zeroed` call. - /// - /// # Safety - /// - /// * `ptr` must denote a block of memory [*currently allocated*] via this allocator. - /// * `old_layout` must [*fit*] that block of memory (The `new_layout` argument need not fit it.). - /// * `new_layout.size()` must be greater than or equal to `old_layout.size()`. - /// - /// Note that `new_layout.align()` need not be the same as `old_layout.align()`. - /// - /// [*currently allocated*]: #currently-allocated-memory - /// [*fit*]: #memory-fitting - /// - /// # Errors - /// - /// Returns `Err` if the new layout does not meet the allocator's size and alignment - /// constraints of the allocator, or if growing otherwise fails. - /// - /// Implementations are encouraged to return `Err` on memory exhaustion rather than panicking or - /// aborting, but this is not a strict requirement. (Specifically: it is *legal* to implement - /// this trait atop an underlying native allocation library that aborts on memory exhaustion.) - /// - /// Clients wishing to abort computation in response to an allocation error are encouraged to - /// call the [`handle_alloc_error`] function, rather than directly invoking `panic!` or similar. - /// - /// [`handle_alloc_error`]: ../../alloc/alloc/fn.handle_alloc_error.html - unsafe fn grow_zeroed( - &self, - ptr: NonNull, - old_layout: Layout, - new_layout: Layout, - ) -> Result, AllocError> { - debug_assert!( - new_layout.size() >= old_layout.size(), - "`new_layout.size()` must be greater than or equal to `old_layout.size()`" - ); - - let new_ptr = self.allocate_zeroed(new_layout)?; - - // SAFETY: because `new_layout.size()` must be greater than or equal to - // `old_layout.size()`, both the old and new memory allocation are valid for reads and - // writes for `old_layout.size()` bytes. Also, because the old allocation wasn't yet - // deallocated, it cannot overlap `new_ptr`. Thus, the call to `copy_nonoverlapping` is - // safe. The safety contract for `dealloc` must be upheld by the caller. - unsafe { - ptr::copy_nonoverlapping(ptr.as_ptr(), new_ptr.as_mut_ptr(), old_layout.size()); - self.deallocate(ptr, old_layout); - } - - Ok(new_ptr) - } - - /// Attempts to shrink the memory block. - /// - /// Returns a new [`NonNull<[u8]>`][NonNull] containing a pointer and the actual size of the allocated - /// memory. The pointer is suitable for holding data described by `new_layout`. To accomplish - /// this, the allocator may shrink the allocation referenced by `ptr` to fit the new layout. - /// - /// If this returns `Ok`, then ownership of the memory block referenced by `ptr` has been - /// transferred to this allocator. Any access to the old `ptr` is Undefined Behavior, even if the - /// allocation was shrunk in-place. The newly returned pointer is the only valid pointer - /// for accessing this memory now. - /// - /// If this method returns `Err`, then ownership of the memory block has not been transferred to - /// this allocator, and the contents of the memory block are unaltered. - /// - /// # Safety - /// - /// * `ptr` must denote a block of memory [*currently allocated*] via this allocator. - /// * `old_layout` must [*fit*] that block of memory (The `new_layout` argument need not fit it.). - /// * `new_layout.size()` must be smaller than or equal to `old_layout.size()`. - /// - /// Note that `new_layout.align()` need not be the same as `old_layout.align()`. - /// - /// [*currently allocated*]: #currently-allocated-memory - /// [*fit*]: #memory-fitting - /// - /// # Errors - /// - /// Returns `Err` if the new layout does not meet the allocator's size and alignment - /// constraints of the allocator, or if shrinking otherwise fails. - /// - /// Implementations are encouraged to return `Err` on memory exhaustion rather than panicking or - /// aborting, but this is not a strict requirement. (Specifically: it is *legal* to implement - /// this trait atop an underlying native allocation library that aborts on memory exhaustion.) - /// - /// Clients wishing to abort computation in response to an allocation error are encouraged to - /// call the [`handle_alloc_error`] function, rather than directly invoking `panic!` or similar. - /// - /// [`handle_alloc_error`]: ../../alloc/alloc/fn.handle_alloc_error.html - unsafe fn shrink( - &self, - ptr: NonNull, - old_layout: Layout, - new_layout: Layout, - ) -> Result, AllocError> { - debug_assert!( - new_layout.size() <= old_layout.size(), - "`new_layout.size()` must be smaller than or equal to `old_layout.size()`" - ); - - let new_ptr = self.allocate(new_layout)?; - - // SAFETY: because `new_layout.size()` must be lower than or equal to - // `old_layout.size()`, both the old and new memory allocation are valid for reads and - // writes for `new_layout.size()` bytes. Also, because the old allocation wasn't yet - // deallocated, it cannot overlap `new_ptr`. Thus, the call to `copy_nonoverlapping` is - // safe. The safety contract for `dealloc` must be upheld by the caller. - unsafe { - ptr::copy_nonoverlapping(ptr.as_ptr(), new_ptr.as_mut_ptr(), new_layout.size()); - self.deallocate(ptr, old_layout); - } - - Ok(new_ptr) - } - - /// Creates a "by reference" adapter for this instance of `Allocator`. - /// - /// The returned adapter also implements `Allocator` and will simply borrow this. - #[inline(always)] - fn by_ref(&self) -> &Self - where - Self: Sized, - { - self - } - - /// The mode of error handling for types using this allocator. - /// - /// [`Infallible`] means that any allocation failures should be handled - /// globally, often by panicking or aborting. Functions performing - /// allocation will simply return the value or nothing. - /// - /// [`Fallible`] means that any allocation failures should be handled - /// at the point of use. Functions performing allocation will return - /// `Result`. - type ErrorHandling: ErrorHandling; -} - -// FIXME: this trait should be sealed -#[unstable(feature = "allocator_api", issue = "32838")] -pub trait ErrorHandling { - /// Result type returned by functions that are conditionally fallible. - /// - /// - "Infallible" allocators set `type Result = T` - /// - "Fallible" allocators set `type Result = Result` - #[cfg(not(no_global_oom_handling))] - #[must_use] // Doesn't actually work - type Result - where - E: HandleAllocError; - - /// Function to map allocation results into `Self::Result`. - /// - /// - For "Infallible" allocators, this should call [`handle_alloc_error`] - /// - For "Fallible" allocators, this is just the identity function - #[cfg(not(no_global_oom_handling))] - #[must_use] - fn map_result(result: Result) -> Self::Result - where - E: HandleAllocError; - - /// Result type returned by functions that are conditionally fallible. - /// - /// - "Infallible" allocators set `type Result = T` - /// - "Fallible" allocators set `type Result = Result` - #[cfg(no_global_oom_handling)] - #[must_use] // Doesn't actually work - type Result; - - /// Function to map allocation results into `Self::Result`. - /// - /// - For "Infallible" allocators, this should call [`handle_alloc_error`] - /// - For "Fallible" allocators, this is just the identity function - #[cfg(no_global_oom_handling)] - #[must_use] - fn map_result(result: Result) -> Self::Result; -} - -#[derive(Debug)] -#[unstable(feature = "allocator_api", issue = "32838")] -/// Error handling mode to use when the user of the type wants to handle -/// allocation failures at the point of use. Functions performing -/// allocation will return `Result`. -pub struct Fallible; - -#[unstable(feature = "allocator_api", issue = "32838")] -impl ErrorHandling for Fallible { - #[cfg(not(no_global_oom_handling))] - type Result = Result - where - E: HandleAllocError; - - #[cfg(not(no_global_oom_handling))] - fn map_result(result: Result) -> Self::Result - where - E: HandleAllocError, - { - result - } - - #[cfg(no_global_oom_handling)] - type Result = Result; - - #[cfg(no_global_oom_handling)] - fn map_result(result: Result) -> Self::Result { - result - } -} - -#[derive(Debug)] -#[cfg(not(no_global_oom_handling))] -#[unstable(feature = "allocator_api", issue = "32838")] -/// Error handling mode to use when the user of the type wants to ignore -/// allocation failures, treating them as a fatal error. Functions -/// performing allocation will return values directly. -pub struct Fatal; - -#[cfg(not(no_global_oom_handling))] -#[unstable(feature = "allocator_api", issue = "32838")] -impl ErrorHandling for Fatal { - #[cfg(not(no_global_oom_handling))] - type Result = T - where - E: HandleAllocError; - - #[cfg(not(no_global_oom_handling))] - fn map_result(result: Result) -> Self::Result - where - E: HandleAllocError, - { - result.unwrap_or_else(|e| e.handle_alloc_error()) - } -} - -#[unstable(feature = "allocator_api", issue = "32838")] -unsafe impl Allocator for &A -where - A: Allocator + ?Sized, -{ - #[inline] - fn allocate(&self, layout: Layout) -> Result, AllocError> { - (**self).allocate(layout) - } - - #[inline] - fn allocate_zeroed(&self, layout: Layout) -> Result, AllocError> { - (**self).allocate_zeroed(layout) - } - - #[inline] - unsafe fn deallocate(&self, ptr: NonNull, layout: Layout) { - // SAFETY: the safety contract must be upheld by the caller - unsafe { (**self).deallocate(ptr, layout) } - } - - #[inline] - unsafe fn grow( - &self, - ptr: NonNull, - old_layout: Layout, - new_layout: Layout, - ) -> Result, AllocError> { - // SAFETY: the safety contract must be upheld by the caller - unsafe { (**self).grow(ptr, old_layout, new_layout) } - } - - #[inline] - unsafe fn grow_zeroed( - &self, - ptr: NonNull, - old_layout: Layout, - new_layout: Layout, - ) -> Result, AllocError> { - // SAFETY: the safety contract must be upheld by the caller - unsafe { (**self).grow_zeroed(ptr, old_layout, new_layout) } - } - - #[inline] - unsafe fn shrink( - &self, - ptr: NonNull, - old_layout: Layout, - new_layout: Layout, - ) -> Result, AllocError> { - // SAFETY: the safety contract must be upheld by the caller - unsafe { (**self).shrink(ptr, old_layout, new_layout) } - } - - type ErrorHandling = A::ErrorHandling; -} - -#[cfg(not(no_global_oom_handling))] -use crate::collections::{TryReserveError, TryReserveErrorKind}; - -// One central function responsible for reporting capacity overflows. This'll -// ensure that the code generation related to these panics is minimal as there's -// only one location which panics rather than a bunch throughout the module. -#[cfg(not(no_global_oom_handling))] -pub(crate) fn capacity_overflow() -> ! { - panic!("capacity overflow"); -} - -/// Trait for handling alloc errors for allocators which -/// panic or abort instead of returning errors. -#[cfg(not(no_global_oom_handling))] -#[unstable(feature = "allocator_api", issue = "32838")] -pub trait HandleAllocError { - /// Globally handle this allocation error, using [`handle_alloc_error`] - fn handle_alloc_error(self) -> !; -} - -#[cfg(not(no_global_oom_handling))] -#[unstable(feature = "allocator_api", issue = "32838")] -impl HandleAllocError for TryReserveError { - fn handle_alloc_error(self) -> ! { - match self.kind() { - TryReserveErrorKind::CapacityOverflow => capacity_overflow(), - TryReserveErrorKind::AllocError { layout, .. } => handle_alloc_error(layout), - } - } -} - -/// Wrapper around an existing allocator allowing one to -/// use a fallible allocator as an infallible one. -#[cfg(not(no_global_oom_handling))] -#[unstable(feature = "allocator_api", issue = "32838")] -#[derive(Debug)] -pub struct InfallibleAdapter>(pub A); - -#[cfg(not(no_global_oom_handling))] -#[unstable(feature = "allocator_api", issue = "32838")] -unsafe impl> Allocator for InfallibleAdapter { - fn allocate(&self, layout: Layout) -> Result, AllocError> { - self.0.allocate(layout) - } - - unsafe fn deallocate(&self, ptr: core::ptr::NonNull, layout: Layout) { - unsafe { self.0.deallocate(ptr, layout) } - } - - fn allocate_zeroed(&self, layout: Layout) -> Result, AllocError> { - self.0.allocate_zeroed(layout) - } - - unsafe fn grow( - &self, - ptr: core::ptr::NonNull, - old_layout: Layout, - new_layout: Layout, - ) -> Result, AllocError> { - unsafe { self.0.grow(ptr, old_layout, new_layout) } - } - - unsafe fn grow_zeroed( - &self, - ptr: core::ptr::NonNull, - old_layout: Layout, - new_layout: Layout, - ) -> Result, AllocError> { - unsafe { self.0.grow_zeroed(ptr, old_layout, new_layout) } - } - - unsafe fn shrink( - &self, - ptr: core::ptr::NonNull, - old_layout: Layout, - new_layout: Layout, - ) -> Result, AllocError> { - unsafe { self.0.shrink(ptr, old_layout, new_layout) } - } - - fn by_ref(&self) -> &Self - where - Self: Sized, - { - self - } - - type ErrorHandling = Fatal; -} - -/// Wrapper around an existing allocator allowing one to -/// use an infallible allocator as a fallible one. -#[cfg(not(no_global_oom_handling))] -#[unstable(feature = "allocator_api", issue = "32838")] -#[derive(Debug)] -pub struct FallibleAdapter>(pub A); - -#[unstable(feature = "allocator_api", issue = "32838")] -#[cfg(not(no_global_oom_handling))] -unsafe impl> Allocator for FallibleAdapter { - fn allocate(&self, layout: Layout) -> Result, AllocError> { - self.0.allocate(layout) - } - - unsafe fn deallocate(&self, ptr: core::ptr::NonNull, layout: Layout) { - unsafe { self.0.deallocate(ptr, layout) } - } - - fn allocate_zeroed(&self, layout: Layout) -> Result, AllocError> { - self.0.allocate_zeroed(layout) - } - - unsafe fn grow( - &self, - ptr: core::ptr::NonNull, - old_layout: Layout, - new_layout: Layout, - ) -> Result, AllocError> { - unsafe { self.0.grow(ptr, old_layout, new_layout) } - } - - unsafe fn grow_zeroed( - &self, - ptr: core::ptr::NonNull, - old_layout: Layout, - new_layout: Layout, - ) -> Result, AllocError> { - unsafe { self.0.grow_zeroed(ptr, old_layout, new_layout) } - } - - unsafe fn shrink( - &self, - ptr: core::ptr::NonNull, - old_layout: Layout, - new_layout: Layout, - ) -> Result, AllocError> { - unsafe { self.0.shrink(ptr, old_layout, new_layout) } - } - - fn by_ref(&self) -> &Self - where - Self: Sized, - { - self - } - - type ErrorHandling = Fallible; -} - -pub(crate) type AllocResult = - <::ErrorHandling as ErrorHandling>::Result; diff --git a/library/alloc/src/lib.rs b/library/alloc/src/lib.rs index 2774aa250928f..59fa91c1066dc 100644 --- a/library/alloc/src/lib.rs +++ b/library/alloc/src/lib.rs @@ -226,10 +226,6 @@ mod raw_vec; pub mod alloc; -// Fallible allocation experiment - -mod falloc; - // Primitive types using the heaps above // Need to conditionally define the mod from `boxed.rs` to avoid diff --git a/library/alloc/src/raw_vec/tests.rs b/library/alloc/src/raw_vec/tests.rs index bc78afc123fea..b444df014582a 100644 --- a/library/alloc/src/raw_vec/tests.rs +++ b/library/alloc/src/raw_vec/tests.rs @@ -3,7 +3,7 @@ use std::cell::Cell; #[test] fn allocator_param() -> Result<(), TryReserveError> { - use crate::alloc::AllocError; + use crate::alloc::{AllocError, Fatal}; // Writing a test of integration between third-party // allocators and `RawVec` is a little tricky because the `RawVec` @@ -37,6 +37,7 @@ fn allocator_param() -> Result<(), TryReserveError> { unsafe fn deallocate(&self, ptr: NonNull, layout: Layout) { unsafe { Global.deallocate(ptr, layout) } } + type ErrorHandling = Fatal; } let a = BoundedAlloc { fuel: Cell::new(500) }; diff --git a/library/alloc/src/slice.rs b/library/alloc/src/slice.rs index 2fa1d0d99760b..38c4e8ab1d4ae 100644 --- a/library/alloc/src/slice.rs +++ b/library/alloc/src/slice.rs @@ -26,12 +26,12 @@ use crate::alloc::Allocator; #[cfg(not(no_global_oom_handling))] use crate::alloc::{self, Global}; #[cfg(not(no_global_oom_handling))] +use crate::alloc::{AllocResult, ErrorHandling}; +#[cfg(not(no_global_oom_handling))] use crate::borrow::ToOwned; use crate::boxed::Box; #[cfg(not(no_global_oom_handling))] use crate::collections::TryReserveError; -#[cfg(not(no_global_oom_handling))] -use crate::falloc::{AllocResult, ErrorHandling}; use crate::vec::Vec; #[cfg(test)] @@ -837,7 +837,7 @@ impl ToOwned for [T] { #[cfg(test)] fn to_owned(&self) -> Vec { - hack::to_vec(self, Global) + ::ErrorHandling::map_result(hack::to_vec(self, Global)) } fn clone_into(&self, target: &mut Vec) { diff --git a/library/alloc/src/vec/into_iter.rs b/library/alloc/src/vec/into_iter.rs index 02ae94536d8e6..5a0cfaaf1845b 100644 --- a/library/alloc/src/vec/into_iter.rs +++ b/library/alloc/src/vec/into_iter.rs @@ -1,10 +1,12 @@ #[cfg(not(no_global_oom_handling))] use super::AsVecIntoIter; +#[cfg(test)] +use crate::alloc::ErrorHandling; +#[cfg(not(no_global_oom_handling))] +use crate::alloc::Fatal; use crate::alloc::{Allocator, Global}; #[cfg(not(no_global_oom_handling))] use crate::collections::VecDeque; -#[cfg(not(no_global_oom_handling))] -use crate::falloc::Fatal; use crate::raw_vec::RawVec; use core::array; use core::fmt; @@ -395,7 +397,11 @@ where } #[cfg(test)] fn clone(&self) -> Self { - crate::slice::to_vec(self.as_slice(), self.alloc.deref().clone()).into_iter() + A::ErrorHandling::map_result(crate::slice::to_vec( + self.as_slice(), + self.alloc.deref().clone(), + )) + .into_iter() } } diff --git a/library/alloc/src/vec/mod.rs b/library/alloc/src/vec/mod.rs index 666513908a156..0597b51c6d951 100644 --- a/library/alloc/src/vec/mod.rs +++ b/library/alloc/src/vec/mod.rs @@ -64,12 +64,12 @@ use core::ops::{self, Index, IndexMut, Range, RangeBounds}; use core::ptr::{self, NonNull}; use core::slice::{self, SliceIndex}; +#[cfg(not(no_global_oom_handling))] +use crate::alloc::Fatal; +use crate::alloc::{AllocResult, Allocator, ErrorHandling, Global}; use crate::borrow::{Cow, ToOwned}; use crate::boxed::Box; use crate::collections::{TryReserveError, TryReserveErrorKind}; -#[cfg(not(no_global_oom_handling))] -use crate::falloc::Fatal; -use crate::falloc::{AllocResult, Allocator, ErrorHandling, Global}; use crate::raw_vec::RawVec; #[unstable(feature = "drain_filter", reason = "recently added", issue = "43244")] @@ -2683,7 +2683,7 @@ where #[cfg(test)] fn clone(&self) -> Self { let alloc = self.allocator().clone(); - crate::slice::to_vec(&**self, alloc) + ::ErrorHandling::map_result(crate::slice::to_vec(&**self, alloc)) } fn clone_from(&mut self, other: &Self) { @@ -3127,7 +3127,7 @@ impl From<&[T]> for Vec { } #[cfg(test)] fn from(s: &[T]) -> Vec { - crate::slice::to_vec(s, Global) + ::ErrorHandling::map_result(crate::slice::to_vec(s, Global)) } } @@ -3147,7 +3147,7 @@ impl From<&mut [T]> for Vec { } #[cfg(test)] fn from(s: &mut [T]) -> Vec { - crate::slice::to_vec(s, Global) + ::ErrorHandling::map_result(crate::slice::to_vec(s, Global)) } } diff --git a/library/alloc/src/vec/spec_from_elem.rs b/library/alloc/src/vec/spec_from_elem.rs index e059bb292eef4..096d4882942f5 100644 --- a/library/alloc/src/vec/spec_from_elem.rs +++ b/library/alloc/src/vec/spec_from_elem.rs @@ -1,7 +1,7 @@ use core::ptr; +use crate::alloc::Allocator; use crate::collections::TryReserveError; -use crate::falloc::Allocator; use super::{IsZero, Vec}; diff --git a/library/alloc/src/vec/spec_from_iter.rs b/library/alloc/src/vec/spec_from_iter.rs index 6b7f4ebad57dc..458190220e88f 100644 --- a/library/alloc/src/vec/spec_from_iter.rs +++ b/library/alloc/src/vec/spec_from_iter.rs @@ -1,7 +1,7 @@ use core::mem::ManuallyDrop; use core::ptr; -use crate::falloc::{Allocator, ErrorHandling, Global}; +use crate::alloc::{Allocator, ErrorHandling, Global}; use super::{IntoIter, SpecExtend, SpecFromIterNested, Vec}; diff --git a/library/alloc/src/vec/spec_from_iter_nested.rs b/library/alloc/src/vec/spec_from_iter_nested.rs index e580dd5ee5192..3ee29a14379cd 100644 --- a/library/alloc/src/vec/spec_from_iter_nested.rs +++ b/library/alloc/src/vec/spec_from_iter_nested.rs @@ -2,7 +2,7 @@ use core::cmp; use core::iter::TrustedLen; use core::ptr; -use crate::falloc::{Allocator, ErrorHandling, Global}; +use crate::alloc::{Allocator, ErrorHandling, Global}; use crate::raw_vec::RawVec; use super::{SpecExtend, Vec}; diff --git a/library/alloc/src/vec/splice.rs b/library/alloc/src/vec/splice.rs index 26cd4390f58b6..308bd992d24d8 100644 --- a/library/alloc/src/vec/splice.rs +++ b/library/alloc/src/vec/splice.rs @@ -1,4 +1,4 @@ -use crate::falloc::{Allocator, ErrorHandling, Fatal, Global}; +use crate::alloc::{Allocator, ErrorHandling, Fatal, Global}; use core::ptr; use core::slice; diff --git a/library/alloc/tests/boxed.rs b/library/alloc/tests/boxed.rs index 4cacee0414d7d..19565c3491dd0 100644 --- a/library/alloc/tests/boxed.rs +++ b/library/alloc/tests/boxed.rs @@ -1,4 +1,4 @@ -use core::alloc::{AllocError, Allocator, Layout}; +use core::alloc::{AllocError, Allocator, Layout, Fatal}; use core::cell::Cell; use core::mem::MaybeUninit; use core::ptr::NonNull; @@ -178,4 +178,6 @@ unsafe impl Allocator for ConstAllocator { { self } + + type ErrorHandling = Fatal; } diff --git a/library/core/src/alloc/mod.rs b/library/core/src/alloc/mod.rs index 78091c0172955..0a42e1a7213d3 100644 --- a/library/core/src/alloc/mod.rs +++ b/library/core/src/alloc/mod.rs @@ -363,6 +363,120 @@ pub unsafe trait Allocator { { self } + + /// The mode of error handling for types using this allocator. + /// + /// [`Infallible`] means that any allocation failures should be handled + /// globally, often by panicking or aborting. Functions performing + /// allocation will simply return the value or nothing. + /// + /// [`Fallible`] means that any allocation failures should be handled + /// at the point of use. Functions performing allocation will return + /// `Result`. + type ErrorHandling: ErrorHandling; +} + +/// Trait for handling alloc errors for allocators which +/// panic or abort instead of returning errors. +#[cfg(not(no_global_oom_handling))] +#[unstable(feature = "allocator_api", issue = "32838")] +pub trait HandleAllocError { + /// Globally handle this allocation error, using [`handle_alloc_error`] + fn handle_alloc_error(self) -> !; +} + +// FIXME: this trait should be sealed +#[unstable(feature = "allocator_api", issue = "32838")] +pub trait ErrorHandling { + /// Result type returned by functions that are conditionally fallible. + /// + /// - "Infallible" allocators set `type Result = T` + /// - "Fallible" allocators set `type Result = Result` + #[cfg(not(no_global_oom_handling))] + type Result + where + E: HandleAllocError; + + /// Function to map allocation results into `Self::Result`. + /// + /// - For "Infallible" allocators, this should call [`handle_alloc_error`] + /// - For "Fallible" allocators, this is just the identity function + #[cfg(not(no_global_oom_handling))] + #[must_use] + fn map_result(result: Result) -> Self::Result + where + E: HandleAllocError; + + /// Result type returned by functions that are conditionally fallible. + /// + /// - "Infallible" allocators set `type Result = T` + /// - "Fallible" allocators set `type Result = Result` + #[cfg(no_global_oom_handling)] + type Result; + + /// Function to map allocation results into `Self::Result`. + /// + /// - For "Infallible" allocators, this should call [`handle_alloc_error`] + /// - For "Fallible" allocators, this is just the identity function + #[cfg(no_global_oom_handling)] + #[must_use] + fn map_result(result: Result) -> Self::Result; +} + +/// Error handling mode to use when the user of the type wants to handle +/// allocation failures at the point of use. Functions performing +/// allocation will return `Result`. +#[derive(Debug)] +#[unstable(feature = "allocator_api", issue = "32838")] +pub struct Fallible; + +#[unstable(feature = "allocator_api", issue = "32838")] +impl ErrorHandling for Fallible { + #[cfg(not(no_global_oom_handling))] + type Result = Result + where + E: HandleAllocError; + + #[cfg(not(no_global_oom_handling))] + fn map_result(result: Result) -> Self::Result + where + E: HandleAllocError, + { + result + } + + #[cfg(no_global_oom_handling)] + type Result = Result; + + #[cfg(no_global_oom_handling)] + fn map_result(result: Result) -> Self::Result { + result + } +} + +/// Error handling mode to use when the user of the type wants to ignore +/// allocation failures, treating them as a fatal error. Functions +/// performing allocation will return values directly. +#[derive(Debug)] +#[cfg(not(no_global_oom_handling))] +#[unstable(feature = "allocator_api", issue = "32838")] +pub struct Fatal; + +#[cfg(not(no_global_oom_handling))] +#[unstable(feature = "allocator_api", issue = "32838")] +impl ErrorHandling for Fatal { + #[cfg(not(no_global_oom_handling))] + type Result = T + where + E: HandleAllocError; + + #[cfg(not(no_global_oom_handling))] + fn map_result(result: Result) -> Self::Result + where + E: HandleAllocError, + { + result.unwrap_or_else(|e| e.handle_alloc_error()) + } } #[unstable(feature = "allocator_api", issue = "32838")] @@ -418,4 +532,132 @@ where // SAFETY: the safety contract must be upheld by the caller unsafe { (**self).shrink(ptr, old_layout, new_layout) } } + + type ErrorHandling = A::ErrorHandling; +} + +/// Wrapper around an existing allocator allowing one to +/// use a fallible allocator as an infallible one. +#[cfg(not(no_global_oom_handling))] +#[unstable(feature = "allocator_api", issue = "32838")] +#[derive(Debug)] +pub struct FatalAdapter>(pub A); + +#[cfg(not(no_global_oom_handling))] +#[unstable(feature = "allocator_api", issue = "32838")] +unsafe impl> Allocator for FatalAdapter { + fn allocate(&self, layout: Layout) -> Result, AllocError> { + self.0.allocate(layout) + } + + fn allocate_zeroed(&self, layout: Layout) -> Result, AllocError> { + self.0.allocate_zeroed(layout) + } + + unsafe fn deallocate(&self, ptr: core::ptr::NonNull, layout: Layout) { + // SAFETY: the safety contract must be upheld by the caller + unsafe { self.0.deallocate(ptr, layout) } + } + + unsafe fn grow( + &self, + ptr: core::ptr::NonNull, + old_layout: Layout, + new_layout: Layout, + ) -> Result, AllocError> { + // SAFETY: the safety contract must be upheld by the caller + unsafe { self.0.grow(ptr, old_layout, new_layout) } + } + + unsafe fn grow_zeroed( + &self, + ptr: core::ptr::NonNull, + old_layout: Layout, + new_layout: Layout, + ) -> Result, AllocError> { + // SAFETY: the safety contract must be upheld by the caller + unsafe { self.0.grow_zeroed(ptr, old_layout, new_layout) } + } + + unsafe fn shrink( + &self, + ptr: core::ptr::NonNull, + old_layout: Layout, + new_layout: Layout, + ) -> Result, AllocError> { + // SAFETY: the safety contract must be upheld by the caller + unsafe { self.0.shrink(ptr, old_layout, new_layout) } + } + + fn by_ref(&self) -> &Self + where + Self: Sized, + { + self + } + + type ErrorHandling = Fatal; +} + +/// Wrapper around an existing allocator allowing one to +/// use an infallible allocator as a fallible one. +#[cfg(not(no_global_oom_handling))] +#[unstable(feature = "allocator_api", issue = "32838")] +#[derive(Debug)] +pub struct FallibleAdapter>(pub A); + +#[unstable(feature = "allocator_api", issue = "32838")] +#[cfg(not(no_global_oom_handling))] +unsafe impl> Allocator for FallibleAdapter { + fn allocate(&self, layout: Layout) -> Result, AllocError> { + self.0.allocate(layout) + } + + fn allocate_zeroed(&self, layout: Layout) -> Result, AllocError> { + self.0.allocate_zeroed(layout) + } + + unsafe fn deallocate(&self, ptr: core::ptr::NonNull, layout: Layout) { + // SAFETY: the safety contract must be upheld by the caller + unsafe { self.0.deallocate(ptr, layout) } + } + + unsafe fn grow( + &self, + ptr: core::ptr::NonNull, + old_layout: Layout, + new_layout: Layout, + ) -> Result, AllocError> { + // SAFETY: the safety contract must be upheld by the caller + unsafe { self.0.grow(ptr, old_layout, new_layout) } + } + + unsafe fn grow_zeroed( + &self, + ptr: core::ptr::NonNull, + old_layout: Layout, + new_layout: Layout, + ) -> Result, AllocError> { + // SAFETY: the safety contract must be upheld by the caller + unsafe { self.0.grow_zeroed(ptr, old_layout, new_layout) } + } + + unsafe fn shrink( + &self, + ptr: core::ptr::NonNull, + old_layout: Layout, + new_layout: Layout, + ) -> Result, AllocError> { + // SAFETY: the safety contract must be upheld by the caller + unsafe { self.0.shrink(ptr, old_layout, new_layout) } + } + + fn by_ref(&self) -> &Self + where + Self: Sized, + { + self + } + + type ErrorHandling = Fallible; } diff --git a/tests/ui/hygiene/panic-location.run.stderr b/tests/ui/hygiene/panic-location.run.stderr index 926af22bc44c0..f29de8d621dee 100644 --- a/tests/ui/hygiene/panic-location.run.stderr +++ b/tests/ui/hygiene/panic-location.run.stderr @@ -1,2 +1,2 @@ -thread 'main' panicked at 'capacity overflow', library/alloc/src/falloc.rs:518:5 +thread 'main' panicked at 'capacity overflow', library/alloc/src/alloc.rs:460:5 note: run with `RUST_BACKTRACE=1` environment variable to display a backtrace From 3f359d61790b192956d413ed0e260771f62ce3b7 Mon Sep 17 00:00:00 2001 From: Peter Jaszkowiak Date: Tue, 6 Jun 2023 21:57:41 -0600 Subject: [PATCH 12/15] fix miri test and docs --- library/core/src/alloc/mod.rs | 6 +++--- src/tools/miri/tests/pass/box-custom-alloc.rs | 6 +++++- 2 files changed, 8 insertions(+), 4 deletions(-) diff --git a/library/core/src/alloc/mod.rs b/library/core/src/alloc/mod.rs index 0a42e1a7213d3..f3595e52e474a 100644 --- a/library/core/src/alloc/mod.rs +++ b/library/core/src/alloc/mod.rs @@ -366,7 +366,7 @@ pub unsafe trait Allocator { /// The mode of error handling for types using this allocator. /// - /// [`Infallible`] means that any allocation failures should be handled + /// [`Fatal`] means that any allocation failures should be handled /// globally, often by panicking or aborting. Functions performing /// allocation will simply return the value or nothing. /// @@ -381,7 +381,7 @@ pub unsafe trait Allocator { #[cfg(not(no_global_oom_handling))] #[unstable(feature = "allocator_api", issue = "32838")] pub trait HandleAllocError { - /// Globally handle this allocation error, using [`handle_alloc_error`] + /// Globally handle this allocation error fn handle_alloc_error(self) -> !; } @@ -399,7 +399,7 @@ pub trait ErrorHandling { /// Function to map allocation results into `Self::Result`. /// - /// - For "Infallible" allocators, this should call [`handle_alloc_error`] + /// - For "Infallible" allocators, this should call [`HandleAllocError::handle_alloc_error`] /// - For "Fallible" allocators, this is just the identity function #[cfg(not(no_global_oom_handling))] #[must_use] diff --git a/src/tools/miri/tests/pass/box-custom-alloc.rs b/src/tools/miri/tests/pass/box-custom-alloc.rs index 155e3d74ab9c1..65a2c0dfa347d 100644 --- a/src/tools/miri/tests/pass/box-custom-alloc.rs +++ b/src/tools/miri/tests/pass/box-custom-alloc.rs @@ -4,7 +4,7 @@ #![feature(allocator_api, trait_upcasting)] use std::alloc::Layout; -use std::alloc::{AllocError, Allocator}; +use std::alloc::{AllocError, Allocator, Fatal}; use std::cell::Cell; use std::mem::MaybeUninit; use std::ptr::{self, NonNull}; @@ -28,6 +28,8 @@ unsafe impl<'shared, 'a: 'shared> Allocator for &'shared OnceAlloc<'a> { } unsafe fn deallocate(&self, _ptr: NonNull, _layout: Layout) {} + + type ErrorHandling = Fatal; } trait MyTrait { @@ -68,6 +70,8 @@ unsafe impl<'shared, 'a: 'shared> Allocator for OnceAllocRef<'shared, 'a> { unsafe fn deallocate(&self, ptr: NonNull, layout: Layout) { self.0.deallocate(ptr, layout) } + + type ErrorHandling = Fatal; } /// `Box` is an `Aggregate`. From 5fb1362fa7128f6854656b643c114f4debc5008c Mon Sep 17 00:00:00 2001 From: Peter Jaszkowiak Date: Wed, 7 Jun 2023 19:16:54 -0600 Subject: [PATCH 13/15] fix no_global_oom_handling --- library/alloc/src/vec/mod.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/library/alloc/src/vec/mod.rs b/library/alloc/src/vec/mod.rs index 0597b51c6d951..33b45a060394a 100644 --- a/library/alloc/src/vec/mod.rs +++ b/library/alloc/src/vec/mod.rs @@ -2510,7 +2510,6 @@ impl Vec<[T; N], A> { } impl Vec { - #[cfg(not(no_global_oom_handling))] /// Extend the vector by `n` clones of value. fn extend_with(&mut self, n: usize, value: T) -> Result<(), TryReserveError> { self.try_reserve(n)?; From 5cfd1d6224985dbf145df4439422763c8b3e1fcb Mon Sep 17 00:00:00 2001 From: Peter Jaszkowiak Date: Wed, 7 Jun 2023 22:02:59 -0600 Subject: [PATCH 14/15] move fatal alloc error handling to alloc --- library/alloc/src/alloc.rs | 190 +++++++++++++++++++ library/alloc/src/lib.rs | 2 + library/alloc/tests/boxed.rs | 2 +- library/core/src/alloc/mod.rs | 206 +-------------------- tests/ui/hygiene/panic-location.run.stderr | 2 +- 5 files changed, 204 insertions(+), 198 deletions(-) diff --git a/library/alloc/src/alloc.rs b/library/alloc/src/alloc.rs index bff52575c195a..ca988d8772c35 100644 --- a/library/alloc/src/alloc.rs +++ b/library/alloc/src/alloc.rs @@ -449,6 +449,196 @@ impl WriteCloneIntoRaw for T { } } +#[cfg(all(not(no_global_oom_handling), not(test)))] +use core::error::Error; + +/// Trait for handling alloc errors for allocators which +/// panic or abort instead of returning errors. +#[unstable(feature = "allocator_api", issue = "32838")] +#[cfg(all(not(no_global_oom_handling), not(test)))] +#[rustc_specialization_trait] +pub trait HandleAllocError: Error { + /// Globally handle this allocation error + fn handle_alloc_error(self) -> !; +} + +/// Error handling mode to use when the user of the type wants to ignore +/// allocation failures, treating them as a fatal error. Functions +/// performing allocation will return values directly. +#[derive(Debug)] +#[unstable(feature = "allocator_api", issue = "32838")] +#[cfg(all(not(no_global_oom_handling), not(test)))] +pub struct Fatal; + +#[unstable(feature = "alloc_internals", issue = "none")] +#[cfg(all(not(no_global_oom_handling), not(test)))] +impl error_handling_sealed::Sealed for Fatal {} + +#[unstable(feature = "allocator_api", issue = "32838")] +#[cfg(all(not(no_global_oom_handling), not(test)))] +impl ErrorHandling for Fatal { + type Result = T; + + fn map_result(result: Result) -> Self::Result { + /// Hack around lack of `cfg(no_global_oom_handling)` in core. + /// + /// Using post-monomorphization errors and specialization, + /// we can enforce that any error used with `Fatal` implements + /// `HandleAllocError`, without requiring that all errors used + /// with fallible allocation implement it. This also allows + /// for `HandleAllocError` to live with the rest of the + /// global allocation handling in the `alloc` crate. + trait HandleAllocErrorInternal { + fn handle_alloc_error_internal(self) -> !; + } + impl HandleAllocErrorInternal for E { + default fn handle_alloc_error_internal(self) -> ! { + const { + panic!( + "user must implement `HandleAllocError` for any error type used with the `Fatal` kind of `ErrorHandling`" + ) + } + } + } + impl HandleAllocErrorInternal for E { + fn handle_alloc_error_internal(self) -> ! { + self.handle_alloc_error() + } + } + + result.unwrap_or_else(|e| e.handle_alloc_error_internal()) + } +} + +/// Wrapper around an existing allocator allowing one to +/// use a fallible allocator as an infallible one. +#[unstable(feature = "allocator_api", issue = "32838")] +#[cfg(all(not(no_global_oom_handling), not(test)))] +#[derive(Debug)] +pub struct FatalAdapter>(pub A); + +#[unstable(feature = "allocator_api", issue = "32838")] +#[cfg(all(not(no_global_oom_handling), not(test)))] +unsafe impl> Allocator for FatalAdapter { + fn allocate(&self, layout: Layout) -> Result, AllocError> { + self.0.allocate(layout) + } + + fn allocate_zeroed(&self, layout: Layout) -> Result, AllocError> { + self.0.allocate_zeroed(layout) + } + + unsafe fn deallocate(&self, ptr: core::ptr::NonNull, layout: Layout) { + // SAFETY: the safety contract must be upheld by the caller + unsafe { self.0.deallocate(ptr, layout) } + } + + unsafe fn grow( + &self, + ptr: core::ptr::NonNull, + old_layout: Layout, + new_layout: Layout, + ) -> Result, AllocError> { + // SAFETY: the safety contract must be upheld by the caller + unsafe { self.0.grow(ptr, old_layout, new_layout) } + } + + unsafe fn grow_zeroed( + &self, + ptr: core::ptr::NonNull, + old_layout: Layout, + new_layout: Layout, + ) -> Result, AllocError> { + // SAFETY: the safety contract must be upheld by the caller + unsafe { self.0.grow_zeroed(ptr, old_layout, new_layout) } + } + + unsafe fn shrink( + &self, + ptr: core::ptr::NonNull, + old_layout: Layout, + new_layout: Layout, + ) -> Result, AllocError> { + // SAFETY: the safety contract must be upheld by the caller + unsafe { self.0.shrink(ptr, old_layout, new_layout) } + } + + fn by_ref(&self) -> &Self + where + Self: Sized, + { + self + } + + type ErrorHandling = Fatal; +} + +/// Wrapper around an existing allocator allowing one to +/// use an infallible allocator as a fallible one. +#[unstable(feature = "allocator_api", issue = "32838")] +#[cfg(all(not(no_global_oom_handling), not(test)))] +#[derive(Debug)] +pub struct FallibleAdapter>(pub A); + +#[unstable(feature = "allocator_api", issue = "32838")] +#[cfg(all(not(no_global_oom_handling), not(test)))] +unsafe impl> Allocator for FallibleAdapter { + fn allocate(&self, layout: Layout) -> Result, AllocError> { + self.0.allocate(layout) + } + + fn allocate_zeroed(&self, layout: Layout) -> Result, AllocError> { + self.0.allocate_zeroed(layout) + } + + unsafe fn deallocate(&self, ptr: core::ptr::NonNull, layout: Layout) { + // SAFETY: the safety contract must be upheld by the caller + unsafe { self.0.deallocate(ptr, layout) } + } + + unsafe fn grow( + &self, + ptr: core::ptr::NonNull, + old_layout: Layout, + new_layout: Layout, + ) -> Result, AllocError> { + // SAFETY: the safety contract must be upheld by the caller + unsafe { self.0.grow(ptr, old_layout, new_layout) } + } + + unsafe fn grow_zeroed( + &self, + ptr: core::ptr::NonNull, + old_layout: Layout, + new_layout: Layout, + ) -> Result, AllocError> { + // SAFETY: the safety contract must be upheld by the caller + unsafe { self.0.grow_zeroed(ptr, old_layout, new_layout) } + } + + unsafe fn shrink( + &self, + ptr: core::ptr::NonNull, + old_layout: Layout, + new_layout: Layout, + ) -> Result, AllocError> { + // SAFETY: the safety contract must be upheld by the caller + unsafe { self.0.shrink(ptr, old_layout, new_layout) } + } + + fn by_ref(&self) -> &Self + where + Self: Sized, + { + self + } + + type ErrorHandling = Fallible; +} + +#[cfg(test)] +pub use std::alloc::{FallibleAdapter, Fatal, FatalAdapter, HandleAllocError}; + #[cfg(not(no_global_oom_handling))] use crate::collections::{TryReserveError, TryReserveErrorKind}; diff --git a/library/alloc/src/lib.rs b/library/alloc/src/lib.rs index 59fa91c1066dc..27e779838c24d 100644 --- a/library/alloc/src/lib.rs +++ b/library/alloc/src/lib.rs @@ -95,6 +95,7 @@ #![cfg_attr(not(no_global_oom_handling), feature(const_btree_len))] #![cfg_attr(test, feature(is_sorted))] #![cfg_attr(test, feature(new_uninit))] +#![feature(alloc_internals)] #![feature(alloc_layout_extra)] #![feature(allocator_api)] #![feature(array_chunks)] @@ -115,6 +116,7 @@ #![feature(const_pin)] #![feature(const_refs_to_cell)] #![feature(const_size_of_val)] +#![feature(const_type_name)] #![feature(const_waker)] #![feature(core_intrinsics)] #![feature(core_panic)] diff --git a/library/alloc/tests/boxed.rs b/library/alloc/tests/boxed.rs index 19565c3491dd0..47356d0020382 100644 --- a/library/alloc/tests/boxed.rs +++ b/library/alloc/tests/boxed.rs @@ -1,4 +1,4 @@ -use core::alloc::{AllocError, Allocator, Layout, Fatal}; +use alloc::alloc::{AllocError, Allocator, Layout, Fatal}; use core::cell::Cell; use core::mem::MaybeUninit; use core::ptr::NonNull; diff --git a/library/core/src/alloc/mod.rs b/library/core/src/alloc/mod.rs index f3595e52e474a..6f4c36e27b013 100644 --- a/library/core/src/alloc/mod.rs +++ b/library/core/src/alloc/mod.rs @@ -376,49 +376,26 @@ pub unsafe trait Allocator { type ErrorHandling: ErrorHandling; } -/// Trait for handling alloc errors for allocators which -/// panic or abort instead of returning errors. -#[cfg(not(no_global_oom_handling))] -#[unstable(feature = "allocator_api", issue = "32838")] -pub trait HandleAllocError { - /// Globally handle this allocation error - fn handle_alloc_error(self) -> !; +#[unstable(feature = "alloc_internals", issue = "none")] +#[doc(hidden)] +pub mod error_handling_sealed { + pub trait Sealed {} } +use error_handling_sealed::Sealed; // FIXME: this trait should be sealed #[unstable(feature = "allocator_api", issue = "32838")] -pub trait ErrorHandling { +pub trait ErrorHandling: Sealed { /// Result type returned by functions that are conditionally fallible. /// /// - "Infallible" allocators set `type Result = T` /// - "Fallible" allocators set `type Result = Result` - #[cfg(not(no_global_oom_handling))] - type Result - where - E: HandleAllocError; - - /// Function to map allocation results into `Self::Result`. - /// - /// - For "Infallible" allocators, this should call [`HandleAllocError::handle_alloc_error`] - /// - For "Fallible" allocators, this is just the identity function - #[cfg(not(no_global_oom_handling))] - #[must_use] - fn map_result(result: Result) -> Self::Result - where - E: HandleAllocError; - - /// Result type returned by functions that are conditionally fallible. - /// - /// - "Infallible" allocators set `type Result = T` - /// - "Fallible" allocators set `type Result = Result` - #[cfg(no_global_oom_handling)] type Result; /// Function to map allocation results into `Self::Result`. /// - /// - For "Infallible" allocators, this should call [`handle_alloc_error`] + /// - For "Infallible" allocators, this should call [`HandleAllocError::handle_alloc_error`] /// - For "Fallible" allocators, this is just the identity function - #[cfg(no_global_oom_handling)] #[must_use] fn map_result(result: Result) -> Self::Result; } @@ -431,54 +408,17 @@ pub trait ErrorHandling { pub struct Fallible; #[unstable(feature = "allocator_api", issue = "32838")] -impl ErrorHandling for Fallible { - #[cfg(not(no_global_oom_handling))] - type Result = Result - where - E: HandleAllocError; - - #[cfg(not(no_global_oom_handling))] - fn map_result(result: Result) -> Self::Result - where - E: HandleAllocError, - { - result - } +impl Sealed for Fallible {} - #[cfg(no_global_oom_handling)] +#[unstable(feature = "allocator_api", issue = "32838")] +impl ErrorHandling for Fallible { type Result = Result; - #[cfg(no_global_oom_handling)] fn map_result(result: Result) -> Self::Result { result } } -/// Error handling mode to use when the user of the type wants to ignore -/// allocation failures, treating them as a fatal error. Functions -/// performing allocation will return values directly. -#[derive(Debug)] -#[cfg(not(no_global_oom_handling))] -#[unstable(feature = "allocator_api", issue = "32838")] -pub struct Fatal; - -#[cfg(not(no_global_oom_handling))] -#[unstable(feature = "allocator_api", issue = "32838")] -impl ErrorHandling for Fatal { - #[cfg(not(no_global_oom_handling))] - type Result = T - where - E: HandleAllocError; - - #[cfg(not(no_global_oom_handling))] - fn map_result(result: Result) -> Self::Result - where - E: HandleAllocError, - { - result.unwrap_or_else(|e| e.handle_alloc_error()) - } -} - #[unstable(feature = "allocator_api", issue = "32838")] unsafe impl Allocator for &A where @@ -535,129 +475,3 @@ where type ErrorHandling = A::ErrorHandling; } - -/// Wrapper around an existing allocator allowing one to -/// use a fallible allocator as an infallible one. -#[cfg(not(no_global_oom_handling))] -#[unstable(feature = "allocator_api", issue = "32838")] -#[derive(Debug)] -pub struct FatalAdapter>(pub A); - -#[cfg(not(no_global_oom_handling))] -#[unstable(feature = "allocator_api", issue = "32838")] -unsafe impl> Allocator for FatalAdapter { - fn allocate(&self, layout: Layout) -> Result, AllocError> { - self.0.allocate(layout) - } - - fn allocate_zeroed(&self, layout: Layout) -> Result, AllocError> { - self.0.allocate_zeroed(layout) - } - - unsafe fn deallocate(&self, ptr: core::ptr::NonNull, layout: Layout) { - // SAFETY: the safety contract must be upheld by the caller - unsafe { self.0.deallocate(ptr, layout) } - } - - unsafe fn grow( - &self, - ptr: core::ptr::NonNull, - old_layout: Layout, - new_layout: Layout, - ) -> Result, AllocError> { - // SAFETY: the safety contract must be upheld by the caller - unsafe { self.0.grow(ptr, old_layout, new_layout) } - } - - unsafe fn grow_zeroed( - &self, - ptr: core::ptr::NonNull, - old_layout: Layout, - new_layout: Layout, - ) -> Result, AllocError> { - // SAFETY: the safety contract must be upheld by the caller - unsafe { self.0.grow_zeroed(ptr, old_layout, new_layout) } - } - - unsafe fn shrink( - &self, - ptr: core::ptr::NonNull, - old_layout: Layout, - new_layout: Layout, - ) -> Result, AllocError> { - // SAFETY: the safety contract must be upheld by the caller - unsafe { self.0.shrink(ptr, old_layout, new_layout) } - } - - fn by_ref(&self) -> &Self - where - Self: Sized, - { - self - } - - type ErrorHandling = Fatal; -} - -/// Wrapper around an existing allocator allowing one to -/// use an infallible allocator as a fallible one. -#[cfg(not(no_global_oom_handling))] -#[unstable(feature = "allocator_api", issue = "32838")] -#[derive(Debug)] -pub struct FallibleAdapter>(pub A); - -#[unstable(feature = "allocator_api", issue = "32838")] -#[cfg(not(no_global_oom_handling))] -unsafe impl> Allocator for FallibleAdapter { - fn allocate(&self, layout: Layout) -> Result, AllocError> { - self.0.allocate(layout) - } - - fn allocate_zeroed(&self, layout: Layout) -> Result, AllocError> { - self.0.allocate_zeroed(layout) - } - - unsafe fn deallocate(&self, ptr: core::ptr::NonNull, layout: Layout) { - // SAFETY: the safety contract must be upheld by the caller - unsafe { self.0.deallocate(ptr, layout) } - } - - unsafe fn grow( - &self, - ptr: core::ptr::NonNull, - old_layout: Layout, - new_layout: Layout, - ) -> Result, AllocError> { - // SAFETY: the safety contract must be upheld by the caller - unsafe { self.0.grow(ptr, old_layout, new_layout) } - } - - unsafe fn grow_zeroed( - &self, - ptr: core::ptr::NonNull, - old_layout: Layout, - new_layout: Layout, - ) -> Result, AllocError> { - // SAFETY: the safety contract must be upheld by the caller - unsafe { self.0.grow_zeroed(ptr, old_layout, new_layout) } - } - - unsafe fn shrink( - &self, - ptr: core::ptr::NonNull, - old_layout: Layout, - new_layout: Layout, - ) -> Result, AllocError> { - // SAFETY: the safety contract must be upheld by the caller - unsafe { self.0.shrink(ptr, old_layout, new_layout) } - } - - fn by_ref(&self) -> &Self - where - Self: Sized, - { - self - } - - type ErrorHandling = Fallible; -} diff --git a/tests/ui/hygiene/panic-location.run.stderr b/tests/ui/hygiene/panic-location.run.stderr index f29de8d621dee..65180c8047143 100644 --- a/tests/ui/hygiene/panic-location.run.stderr +++ b/tests/ui/hygiene/panic-location.run.stderr @@ -1,2 +1,2 @@ -thread 'main' panicked at 'capacity overflow', library/alloc/src/alloc.rs:460:5 +thread 'main' panicked at 'capacity overflow', library/alloc/src/alloc.rs:650:5 note: run with `RUST_BACKTRACE=1` environment variable to display a backtrace From 0357918ab5df566f5c933b5296f7fa11910c63df Mon Sep 17 00:00:00 2001 From: Peter Jaszkowiak Date: Thu, 8 Jun 2023 08:41:16 -0600 Subject: [PATCH 15/15] fix docs --- library/core/src/alloc/mod.rs | 22 +++++++++++++++------- 1 file changed, 15 insertions(+), 7 deletions(-) diff --git a/library/core/src/alloc/mod.rs b/library/core/src/alloc/mod.rs index 6f4c36e27b013..6a57c2b0251dc 100644 --- a/library/core/src/alloc/mod.rs +++ b/library/core/src/alloc/mod.rs @@ -366,11 +366,11 @@ pub unsafe trait Allocator { /// The mode of error handling for types using this allocator. /// - /// [`Fatal`] means that any allocation failures should be handled + /// `Fatal` means that any allocation failures should be handled /// globally, often by panicking or aborting. Functions performing /// allocation will simply return the value or nothing. /// - /// [`Fallible`] means that any allocation failures should be handled + /// `Fallible` means that any allocation failures should be handled /// at the point of use. Functions performing allocation will return /// `Result`. type ErrorHandling: ErrorHandling; @@ -383,19 +383,27 @@ pub mod error_handling_sealed { } use error_handling_sealed::Sealed; -// FIXME: this trait should be sealed +/// The mode of error handling for types using an allocator. +/// +/// `Fatal` means that any allocation failures should be handled +/// globally, often by panicking or aborting. Functions performing +/// allocation will simply return the value or nothing. +/// +/// `Fallible` means that any allocation failures should be handled +/// at the point of use. Functions performing allocation will return +/// `Result`. #[unstable(feature = "allocator_api", issue = "32838")] pub trait ErrorHandling: Sealed { /// Result type returned by functions that are conditionally fallible. /// - /// - "Infallible" allocators set `type Result = T` - /// - "Fallible" allocators set `type Result = Result` + /// - `Fatal` allocators set `type Result = T` + /// - `Fallible` allocators set `type Result = Result` type Result; /// Function to map allocation results into `Self::Result`. /// - /// - For "Infallible" allocators, this should call [`HandleAllocError::handle_alloc_error`] - /// - For "Fallible" allocators, this is just the identity function + /// - For `Fatal` allocators, this should unwrap the result + /// - For `Fallible` allocators, this is just the identity function #[must_use] fn map_result(result: Result) -> Self::Result; }