From 16af5c80982cb74131f1a045cf38181c7e949487 Mon Sep 17 00:00:00 2001
From: Peter Jaszkowiak
Date: Thu, 25 May 2023 15:00:47 -0600
Subject: [PATCH 01/15] fallible allocator experiment
---
library/alloc/src/boxed.rs | 44 +-
.../alloc/src/collections/vec_deque/mod.rs | 374 ++++++++---------
library/alloc/src/falloc.rs | 95 +++++
library/alloc/src/lib.rs | 4 +
library/alloc/src/raw_vec.rs | 162 ++------
library/alloc/src/raw_vec/tests.rs | 60 +--
library/alloc/src/vec/mod.rs | 380 ++++++++++--------
library/alloc/src/vec/spec_extend.rs | 19 +-
library/alloc/src/vec/spec_from_elem.rs | 66 ++-
library/alloc/src/vec/spec_from_iter.rs | 6 +-
.../alloc/src/vec/spec_from_iter_nested.rs | 8 +-
library/alloc/src/vec/splice.rs | 20 +-
12 files changed, 663 insertions(+), 575 deletions(-)
create mode 100644 library/alloc/src/falloc.rs
diff --git a/library/alloc/src/boxed.rs b/library/alloc/src/boxed.rs
index 1768687e8cd02..32e9d44487d9e 100644
--- a/library/alloc/src/boxed.rs
+++ b/library/alloc/src/boxed.rs
@@ -167,9 +167,9 @@ use core::task::{Context, Poll};
#[cfg(not(no_global_oom_handling))]
use crate::alloc::{handle_alloc_error, WriteCloneIntoRaw};
-use crate::alloc::{AllocError, Allocator, Global, Layout};
#[cfg(not(no_global_oom_handling))]
use crate::borrow::Cow;
+use crate::falloc::{AllocError, Allocator, Global, Layout};
use crate::raw_vec::RawVec;
#[cfg(not(no_global_oom_handling))]
use crate::str::from_boxed_utf8_unchecked;
@@ -624,7 +624,9 @@ impl Box<[T]> {
#[unstable(feature = "new_uninit", issue = "63291")]
#[must_use]
pub fn new_uninit_slice(len: usize) -> Box<[mem::MaybeUninit]> {
- unsafe { RawVec::with_capacity(len).into_box(len) }
+ unsafe {
+ ::map_result(RawVec::with_capacity_in(len, Global)).into_box(len)
+ }
}
/// Constructs a new boxed slice with uninitialized contents, with the memory
@@ -649,7 +651,10 @@ impl Box<[T]> {
#[unstable(feature = "new_uninit", issue = "63291")]
#[must_use]
pub fn new_zeroed_slice(len: usize) -> Box<[mem::MaybeUninit]> {
- unsafe { RawVec::with_capacity_zeroed(len).into_box(len) }
+ unsafe {
+ ::map_result(RawVec::with_capacity_zeroed_in(len, Global))
+ .into_box(len)
+ }
}
/// Constructs a new boxed slice with uninitialized contents. Returns an error if
@@ -675,14 +680,7 @@ impl Box<[T]> {
#[unstable(feature = "allocator_api", issue = "32838")]
#[inline]
pub fn try_new_uninit_slice(len: usize) -> Result]>, AllocError> {
- unsafe {
- let layout = match Layout::array::>(len) {
- Ok(l) => l,
- Err(_) => return Err(AllocError),
- };
- let ptr = Global.allocate(layout)?;
- Ok(RawVec::from_raw_parts_in(ptr.as_mut_ptr() as *mut _, len, Global).into_box(len))
- }
+ unsafe { Ok(RawVec::with_capacity_in(len, Global).map_err(|_| AllocError)?.into_box(len)) }
}
/// Constructs a new boxed slice with uninitialized contents, with the memory
@@ -708,12 +706,7 @@ impl Box<[T]> {
#[inline]
pub fn try_new_zeroed_slice(len: usize) -> Result]>, AllocError> {
unsafe {
- let layout = match Layout::array::>(len) {
- Ok(l) => l,
- Err(_) => return Err(AllocError),
- };
- let ptr = Global.allocate_zeroed(layout)?;
- Ok(RawVec::from_raw_parts_in(ptr.as_mut_ptr() as *mut _, len, Global).into_box(len))
+ Ok(RawVec::with_capacity_zeroed_in(len, Global).map_err(|_| AllocError)?.into_box(len))
}
}
}
@@ -741,12 +734,11 @@ impl Box<[T], A> {
///
/// assert_eq!(*values, [1, 2, 3])
/// ```
- #[cfg(not(no_global_oom_handling))]
#[unstable(feature = "allocator_api", issue = "32838")]
// #[unstable(feature = "new_uninit", issue = "63291")]
#[must_use]
- pub fn new_uninit_slice_in(len: usize, alloc: A) -> Box<[mem::MaybeUninit], A> {
- unsafe { RawVec::with_capacity_in(len, alloc).into_box(len) }
+ pub fn new_uninit_slice_in(len: usize, alloc: A) -> A::Result], A>> {
+ unsafe { A::map_result(RawVec::with_capacity_in(len, alloc).map(|r| r.into_box(len))) }
}
/// Constructs a new boxed slice with uninitialized contents in the provided allocator,
@@ -769,12 +761,13 @@ impl Box<[T], A> {
/// ```
///
/// [zeroed]: mem::MaybeUninit::zeroed
- #[cfg(not(no_global_oom_handling))]
#[unstable(feature = "allocator_api", issue = "32838")]
// #[unstable(feature = "new_uninit", issue = "63291")]
#[must_use]
- pub fn new_zeroed_slice_in(len: usize, alloc: A) -> Box<[mem::MaybeUninit], A> {
- unsafe { RawVec::with_capacity_zeroed_in(len, alloc).into_box(len) }
+ pub fn new_zeroed_slice_in(len: usize, alloc: A) -> A::Result], A>> {
+ unsafe {
+ A::map_result(RawVec::with_capacity_zeroed_in(len, alloc).map(|r| r.into_box(len)))
+ }
}
}
@@ -1474,7 +1467,7 @@ impl BoxFromSlice for Box<[T]> {
#[inline]
fn from_slice(slice: &[T]) -> Self {
let len = slice.len();
- let buf = RawVec::with_capacity(len);
+ let buf = ::map_result(RawVec::with_capacity_in(len, Global));
unsafe {
ptr::copy_nonoverlapping(slice.as_ptr(), buf.ptr(), len);
buf.into_box(slice.len()).assume_init()
@@ -2014,9 +2007,8 @@ impl FromIterator for Box<[I]> {
}
}
-#[cfg(not(no_global_oom_handling))]
#[stable(feature = "box_slice_clone", since = "1.3.0")]
-impl Clone for Box<[T], A> {
+impl = Self> + Clone> Clone for Box<[T], A> {
fn clone(&self) -> Self {
let alloc = Box::allocator(self).clone();
self.to_vec_in(alloc).into_boxed_slice()
diff --git a/library/alloc/src/collections/vec_deque/mod.rs b/library/alloc/src/collections/vec_deque/mod.rs
index 896da37f94c02..c7deff635cd73 100644
--- a/library/alloc/src/collections/vec_deque/mod.rs
+++ b/library/alloc/src/collections/vec_deque/mod.rs
@@ -22,9 +22,9 @@ use core::slice;
#[allow(unused_imports)]
use core::mem;
-use crate::alloc::{Allocator, Global};
use crate::collections::TryReserveError;
use crate::collections::TryReserveErrorKind;
+use crate::falloc::{Allocator, Global};
use crate::raw_vec::RawVec;
use crate::vec::Vec;
@@ -106,7 +106,10 @@ pub struct VecDeque<
}
#[stable(feature = "rust1", since = "1.0.0")]
-impl Clone for VecDeque {
+impl Clone for VecDeque
+where
+ A: Allocator = Self> + Clone,
+{
fn clone(&self) -> Self {
let mut deq = Self::with_capacity_in(self.len(), self.allocator().clone());
deq.extend(self.iter().cloned());
@@ -577,6 +580,10 @@ impl VecDeque {
VecDeque { head: 0, len: 0, buf: RawVec::new_in(alloc) }
}
+ fn try_with_capacity_in(capacity: usize, alloc: A) -> Result {
+ Ok(VecDeque { head: 0, len: 0, buf: RawVec::with_capacity_in(capacity, alloc)? })
+ }
+
/// Creates an empty deque with space for at least `capacity` elements.
///
/// # Examples
@@ -587,8 +594,8 @@ impl VecDeque {
/// let deque: VecDeque = VecDeque::with_capacity(10);
/// ```
#[unstable(feature = "allocator_api", issue = "32838")]
- pub fn with_capacity_in(capacity: usize, alloc: A) -> VecDeque {
- VecDeque { head: 0, len: 0, buf: RawVec::with_capacity_in(capacity, alloc) }
+ pub fn with_capacity_in(capacity: usize, alloc: A) -> A::Result> {
+ A::map_result(Self::try_with_capacity_in(capacity, alloc))
}
/// Creates a `VecDeque` from a raw allocation, when the initialized
@@ -751,16 +758,8 @@ impl VecDeque {
///
/// [`reserve`]: VecDeque::reserve
#[stable(feature = "rust1", since = "1.0.0")]
- pub fn reserve_exact(&mut self, additional: usize) {
- let new_cap = self.len.checked_add(additional).expect("capacity overflow");
- let old_cap = self.capacity();
-
- if new_cap > old_cap {
- self.buf.reserve_exact(self.len, additional);
- unsafe {
- self.handle_capacity_increase(old_cap);
- }
- }
+ pub fn reserve_exact(&mut self, additional: usize) -> A::Result<()> {
+ A::map_result(self.try_reserve_exact(additional))
}
/// Reserves capacity for at least `additional` more elements to be inserted in the given
@@ -780,18 +779,8 @@ impl VecDeque {
/// assert!(buf.capacity() >= 11);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
- pub fn reserve(&mut self, additional: usize) {
- let new_cap = self.len.checked_add(additional).expect("capacity overflow");
- let old_cap = self.capacity();
-
- if new_cap > old_cap {
- // we don't need to reserve_exact(), as the size doesn't have
- // to be a power of 2.
- self.buf.reserve(self.len, additional);
- unsafe {
- self.handle_capacity_increase(old_cap);
- }
- }
+ pub fn reserve(&mut self, additional: usize) -> A::Result<()> {
+ A::map_result(self.try_reserve(additional))
}
/// Tries to reserve the minimum capacity for at least `additional` more elements to
@@ -838,7 +827,7 @@ impl VecDeque {
let old_cap = self.capacity();
if new_cap > old_cap {
- self.buf.try_reserve_exact(self.len, additional)?;
+ self.buf.reserve_exact(self.len, additional)?;
unsafe {
self.handle_capacity_increase(old_cap);
}
@@ -886,7 +875,7 @@ impl VecDeque {
let old_cap = self.capacity();
if new_cap > old_cap {
- self.buf.try_reserve(self.len, additional)?;
+ self.buf.reserve(self.len, additional)?;
unsafe {
self.handle_capacity_increase(old_cap);
}
@@ -936,80 +925,85 @@ impl VecDeque {
/// assert!(buf.capacity() >= 4);
/// ```
#[stable(feature = "shrink_to", since = "1.56.0")]
- pub fn shrink_to(&mut self, min_capacity: usize) {
- let target_cap = min_capacity.max(self.len);
-
- // never shrink ZSTs
- if T::IS_ZST || self.capacity() <= target_cap {
- return;
- }
+ pub fn shrink_to(&mut self, min_capacity: usize) -> A::Result<()> {
+ A::map_result((|| {
+ // Substitute for try block
+ let target_cap = min_capacity.max(self.len);
+
+ // never shrink ZSTs
+ if T::IS_ZST || self.capacity() <= target_cap {
+ return Ok(());
+ }
- // There are three cases of interest:
- // All elements are out of desired bounds
- // Elements are contiguous, and tail is out of desired bounds
- // Elements are discontiguous
- //
- // At all other times, element positions are unaffected.
+ // There are three cases of interest:
+ // All elements are out of desired bounds
+ // Elements are contiguous, and tail is out of desired bounds
+ // Elements are discontiguous
+ //
+ // At all other times, element positions are unaffected.
- // `head` and `len` are at most `isize::MAX` and `target_cap < self.capacity()`, so nothing can
- // overflow.
- let tail_outside = (target_cap + 1..=self.capacity()).contains(&(self.head + self.len));
+ // `head` and `len` are at most `isize::MAX` and `target_cap < self.capacity()`, so nothing can
+ // overflow.
+ let tail_outside = (target_cap + 1..=self.capacity()).contains(&(self.head + self.len));
- if self.len == 0 {
- self.head = 0;
- } else if self.head >= target_cap && tail_outside {
- // Head and tail are both out of bounds, so copy all of them to the front.
- //
- // H := head
- // L := last element
- // H L
- // [. . . . . . . . o o o o o o o . ]
- // H L
- // [o o o o o o o . ]
- unsafe {
- // nonoverlapping because `self.head >= target_cap >= self.len`.
- self.copy_nonoverlapping(self.head, 0, self.len);
- }
- self.head = 0;
- } else if self.head < target_cap && tail_outside {
- // Head is in bounds, tail is out of bounds.
- // Copy the overflowing part to the beginning of the
- // buffer. This won't overlap because `target_cap >= self.len`.
- //
- // H := head
- // L := last element
- // H L
- // [. . . o o o o o o o . . . . . . ]
- // L H
- // [o o . o o o o o ]
- let len = self.head + self.len - target_cap;
- unsafe {
- self.copy_nonoverlapping(target_cap, 0, len);
- }
- } else if !self.is_contiguous() {
- // The head slice is at least partially out of bounds, tail is in bounds.
- // Copy the head backwards so it lines up with the target capacity.
- // This won't overlap because `target_cap >= self.len`.
- //
- // H := head
- // L := last element
- // L H
- // [o o o o o . . . . . . . . . o o ]
- // L H
- // [o o o o o . o o ]
- let head_len = self.capacity() - self.head;
- let new_head = target_cap - head_len;
- unsafe {
- // can't use `copy_nonoverlapping()` here because the new and old
- // regions for the head might overlap.
- self.copy(self.head, new_head, head_len);
+ if self.len == 0 {
+ self.head = 0;
+ } else if self.head >= target_cap && tail_outside {
+ // Head and tail are both out of bounds, so copy all of them to the front.
+ //
+ // H := head
+ // L := last element
+ // H L
+ // [. . . . . . . . o o o o o o o . ]
+ // H L
+ // [o o o o o o o . ]
+ unsafe {
+ // nonoverlapping because `self.head >= target_cap >= self.len`.
+ self.copy_nonoverlapping(self.head, 0, self.len);
+ }
+ self.head = 0;
+ } else if self.head < target_cap && tail_outside {
+ // Head is in bounds, tail is out of bounds.
+ // Copy the overflowing part to the beginning of the
+ // buffer. This won't overlap because `target_cap >= self.len`.
+ //
+ // H := head
+ // L := last element
+ // H L
+ // [. . . o o o o o o o . . . . . . ]
+ // L H
+ // [o o . o o o o o ]
+ let len = self.head + self.len - target_cap;
+ unsafe {
+ self.copy_nonoverlapping(target_cap, 0, len);
+ }
+ } else if !self.is_contiguous() {
+ // The head slice is at least partially out of bounds, tail is in bounds.
+ // Copy the head backwards so it lines up with the target capacity.
+ // This won't overlap because `target_cap >= self.len`.
+ //
+ // H := head
+ // L := last element
+ // L H
+ // [o o o o o . . . . . . . . . o o ]
+ // L H
+ // [o o o o o . o o ]
+ let head_len = self.capacity() - self.head;
+ let new_head = target_cap - head_len;
+ unsafe {
+ // can't use `copy_nonoverlapping()` here because the new and old
+ // regions for the head might overlap.
+ self.copy(self.head, new_head, head_len);
+ }
+ self.head = new_head;
}
- self.head = new_head;
- }
- self.buf.shrink_to_fit(target_cap);
+ self.buf.shrink_to(target_cap)?;
- debug_assert!(self.head < self.capacity() || self.capacity() == 0);
- debug_assert!(self.len <= self.capacity());
+ debug_assert!(self.head < self.capacity() || self.capacity() == 0);
+ debug_assert!(self.len <= self.capacity());
+
+ Ok(())
+ })())
}
/// Shortens the deque, keeping the first `len` elements and dropping
@@ -1628,17 +1622,22 @@ impl VecDeque {
/// assert_eq!(d.front(), Some(&2));
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
- pub fn push_front(&mut self, value: T) {
- if self.is_full() {
- self.grow();
- }
+ pub fn push_front(&mut self, value: T) -> A::Result<()> {
+ A::map_result((|| {
+ // Substitute for try block
+ if self.is_full() {
+ self.grow()?;
+ }
- self.head = self.wrap_sub(self.head, 1);
- self.len += 1;
+ self.head = self.wrap_sub(self.head, 1);
+ self.len += 1;
- unsafe {
- self.buffer_write(self.head, value);
- }
+ unsafe {
+ self.buffer_write(self.head, value);
+ }
+
+ Ok(())
+ })())
}
/// Appends an element to the back of the deque.
@@ -1654,13 +1653,18 @@ impl VecDeque {
/// assert_eq!(3, *buf.back().unwrap());
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
- pub fn push_back(&mut self, value: T) {
- if self.is_full() {
- self.grow();
- }
+ pub fn push_back(&mut self, value: T) -> A::Result<()> {
+ A::map_result((|| {
+ // Substsitute for try block
+ if self.is_full() {
+ self.grow()?;
+ }
- unsafe { self.buffer_write(self.to_physical_idx(self.len), value) }
- self.len += 1;
+ unsafe { self.buffer_write(self.to_physical_idx(self.len), value) }
+ self.len += 1;
+
+ Ok(())
+ })())
}
#[inline]
@@ -1763,32 +1767,37 @@ impl VecDeque {
/// assert_eq!(vec_deque, &['a', 'd', 'b', 'c']);
/// ```
#[stable(feature = "deque_extras_15", since = "1.5.0")]
- pub fn insert(&mut self, index: usize, value: T) {
- assert!(index <= self.len(), "index out of bounds");
- if self.is_full() {
- self.grow();
- }
-
- let k = self.len - index;
- if k < index {
- // `index + 1` can't overflow, because if index was usize::MAX, then either the
- // assert would've failed, or the deque would've tried to grow past usize::MAX
- // and panicked.
- unsafe {
- // see `remove()` for explanation why this wrap_copy() call is safe.
- self.wrap_copy(self.to_physical_idx(index), self.to_physical_idx(index + 1), k);
- self.buffer_write(self.to_physical_idx(index), value);
- self.len += 1;
+ pub fn insert(&mut self, index: usize, value: T) -> A::Result<()> {
+ A::map_result((|| {
+ // Substitute for try block
+ assert!(index <= self.len(), "index out of bounds");
+ if self.is_full() {
+ self.grow()?;
}
- } else {
- let old_head = self.head;
- self.head = self.wrap_sub(self.head, 1);
- unsafe {
- self.wrap_copy(old_head, self.head, index);
- self.buffer_write(self.to_physical_idx(index), value);
- self.len += 1;
+
+ let k = self.len - index;
+ if k < index {
+ // `index + 1` can't overflow, because if index was usize::MAX, then either the
+ // assert would've failed, or the deque would've tried to grow past usize::MAX
+ // and panicked.
+ unsafe {
+ // see `remove()` for explanation why this wrap_copy() call is safe.
+ self.wrap_copy(self.to_physical_idx(index), self.to_physical_idx(index + 1), k);
+ self.buffer_write(self.to_physical_idx(index), value);
+ self.len += 1;
+ }
+ } else {
+ let old_head = self.head;
+ self.head = self.wrap_sub(self.head, 1);
+ unsafe {
+ self.wrap_copy(old_head, self.head, index);
+ self.buffer_write(self.to_physical_idx(index), value);
+ self.len += 1;
+ }
}
- }
+
+ Ok(())
+ })())
}
/// Removes and returns the element at `index` from the deque.
@@ -1865,51 +1874,57 @@ impl VecDeque {
#[inline]
#[must_use = "use `.truncate()` if you don't need the other half"]
#[stable(feature = "split_off", since = "1.4.0")]
- pub fn split_off(&mut self, at: usize) -> Self
+ pub fn split_off(&mut self, at: usize) -> A::Result
where
A: Clone,
{
- let len = self.len;
- assert!(at <= len, "`at` out of bounds");
-
- let other_len = len - at;
- let mut other = VecDeque::with_capacity_in(other_len, self.allocator().clone());
-
- unsafe {
- let (first_half, second_half) = self.as_slices();
-
- let first_len = first_half.len();
- let second_len = second_half.len();
- if at < first_len {
- // `at` lies in the first half.
- let amount_in_first = first_len - at;
+ A::map_result((|| {
+ let len = self.len;
+ assert!(at <= len, "`at` out of bounds");
- ptr::copy_nonoverlapping(first_half.as_ptr().add(at), other.ptr(), amount_in_first);
+ let other_len = len - at;
+ let mut other = VecDeque::try_with_capacity_in(other_len, self.allocator().clone())?;
- // just take all of the second half.
- ptr::copy_nonoverlapping(
- second_half.as_ptr(),
- other.ptr().add(amount_in_first),
- second_len,
- );
- } else {
- // `at` lies in the second half, need to factor in the elements we skipped
- // in the first half.
- let offset = at - first_len;
- let amount_in_second = second_len - offset;
- ptr::copy_nonoverlapping(
- second_half.as_ptr().add(offset),
- other.ptr(),
- amount_in_second,
- );
+ unsafe {
+ let (first_half, second_half) = self.as_slices();
+
+ let first_len = first_half.len();
+ let second_len = second_half.len();
+ if at < first_len {
+ // `at` lies in the first half.
+ let amount_in_first = first_len - at;
+
+ ptr::copy_nonoverlapping(
+ first_half.as_ptr().add(at),
+ other.ptr(),
+ amount_in_first,
+ );
+
+ // just take all of the second half.
+ ptr::copy_nonoverlapping(
+ second_half.as_ptr(),
+ other.ptr().add(amount_in_first),
+ second_len,
+ );
+ } else {
+ // `at` lies in the second half, need to factor in the elements we skipped
+ // in the first half.
+ let offset = at - first_len;
+ let amount_in_second = second_len - offset;
+ ptr::copy_nonoverlapping(
+ second_half.as_ptr().add(offset),
+ other.ptr(),
+ amount_in_second,
+ );
+ }
}
- }
- // Cleanup where the ends of the buffers are
- self.len = at;
- other.len = other_len;
+ // Cleanup where the ends of the buffers are
+ self.len = at;
+ other.len = other_len;
- other
+ Ok(other)
+ })())
}
/// Moves all the elements of `other` into `self`, leaving `other` empty.
@@ -2053,16 +2068,17 @@ impl VecDeque {
// be called in cold paths.
// This may panic or abort
#[inline(never)]
- fn grow(&mut self) {
+ fn grow(&mut self) -> Result<(), TryReserveError> {
// Extend or possibly remove this assertion when valid use-cases for growing the
// buffer without it being full emerge
debug_assert!(self.is_full());
let old_cap = self.capacity();
- self.buf.reserve_for_push(old_cap);
- unsafe {
- self.handle_capacity_increase(old_cap);
- }
- debug_assert!(!self.is_full());
+ self.buf.reserve_for_push(old_cap).map(|_| {
+ unsafe {
+ self.handle_capacity_increase(old_cap);
+ }
+ debug_assert!(!self.is_full());
+ })
}
/// Modifies the deque in-place so that `len()` is equal to `new_len`,
diff --git a/library/alloc/src/falloc.rs b/library/alloc/src/falloc.rs
new file mode 100644
index 0000000000000..04428d807cf00
--- /dev/null
+++ b/library/alloc/src/falloc.rs
@@ -0,0 +1,95 @@
+pub use crate::alloc::{AllocError, Global, GlobalAlloc, Layout, LayoutError};
+
+/// An implementation of `Allocator` can allocate, grow, shrink, and deallocate arbitrary blocks of
+/// data described via [`Layout`][].
+///
+/// `Allocator` is designed to be implemented on ZSTs, references, or smart pointers because having
+/// an allocator like `MyAlloc([u8; N])` cannot be moved, without updating the pointers to the
+/// allocated memory.
+///
+/// Unlike [`GlobalAlloc`][], zero-sized allocations are allowed in `Allocator`. If an underlying
+/// allocator does not support this (like jemalloc) or return a null pointer (such as
+/// `libc::malloc`), this must be caught by the implementation.
+///
+/// ### Currently allocated memory
+///
+/// Some of the methods require that a memory block be *currently allocated* via an allocator. This
+/// means that:
+///
+/// * the starting address for that memory block was previously returned by [`allocate`], [`grow`], or
+/// [`shrink`], and
+///
+/// * the memory block has not been subsequently deallocated, where blocks are either deallocated
+/// directly by being passed to [`deallocate`] or were changed by being passed to [`grow`] or
+/// [`shrink`] that returns `Ok`. If `grow` or `shrink` have returned `Err`, the passed pointer
+/// remains valid.
+///
+/// [`allocate`]: Allocator::allocate
+/// [`grow`]: Allocator::grow
+/// [`shrink`]: Allocator::shrink
+/// [`deallocate`]: Allocator::deallocate
+///
+/// ### Memory fitting
+///
+/// Some of the methods require that a layout *fit* a memory block. What it means for a layout to
+/// "fit" a memory block means (or equivalently, for a memory block to "fit" a layout) is that the
+/// following conditions must hold:
+///
+/// * The block must be allocated with the same alignment as [`layout.align()`], and
+///
+/// * The provided [`layout.size()`] must fall in the range `min ..= max`, where:
+/// - `min` is the size of the layout most recently used to allocate the block, and
+/// - `max` is the latest actual size returned from [`allocate`], [`grow`], or [`shrink`].
+///
+/// [`layout.align()`]: Layout::align
+/// [`layout.size()`]: Layout::size
+///
+/// # Safety
+///
+/// * Memory blocks returned from an allocator must point to valid memory and retain their validity
+/// until the instance and all of its copies and clones are dropped,
+///
+/// * copying, cloning, or moving the allocator must not invalidate memory blocks returned from this
+/// allocator. A copied or cloned allocator must behave like the same allocator, and
+///
+/// * any pointer to a memory block which is [*currently allocated*] may be passed to any other
+/// method of the allocator.
+///
+/// [*currently allocated*]: #currently-allocated-memory
+#[unstable(feature = "allocator_api", issue = "32838")]
+pub unsafe trait Allocator: crate::alloc::Allocator {
+ #[must_use] // Doesn't actually work
+ type Result;
+
+ #[must_use]
+ fn map_result(result: Result) -> Self::Result;
+}
+
+#[cfg(not(no_global_oom_handling))]
+use crate::alloc::handle_alloc_error;
+#[cfg(not(no_global_oom_handling))]
+use crate::collections::{TryReserveError, TryReserveErrorKind};
+
+// One central function responsible for reporting capacity overflows. This'll
+// ensure that the code generation related to these panics is minimal as there's
+// only one location which panics rather than a bunch throughout the module.
+#[cfg(not(no_global_oom_handling))]
+pub(crate) fn capacity_overflow() -> ! {
+ panic!("capacity overflow");
+}
+
+#[unstable(feature = "allocator_api", issue = "32838")]
+#[cfg(not(no_global_oom_handling))]
+unsafe impl Allocator for X {
+ type Result = T;
+
+ fn map_result(result: Result) -> Self::Result {
+ match result {
+ Err(error) => match error.kind() {
+ TryReserveErrorKind::CapacityOverflow => capacity_overflow(),
+ TryReserveErrorKind::AllocError { layout, .. } => handle_alloc_error(layout),
+ },
+ Ok(x) => x,
+ }
+ }
+}
diff --git a/library/alloc/src/lib.rs b/library/alloc/src/lib.rs
index 59fa91c1066dc..2774aa250928f 100644
--- a/library/alloc/src/lib.rs
+++ b/library/alloc/src/lib.rs
@@ -226,6 +226,10 @@ mod raw_vec;
pub mod alloc;
+// Fallible allocation experiment
+
+mod falloc;
+
// Primitive types using the heaps above
// Need to conditionally define the mod from `boxed.rs` to avoid
diff --git a/library/alloc/src/raw_vec.rs b/library/alloc/src/raw_vec.rs
index dfd30d99cf041..841c3151b5479 100644
--- a/library/alloc/src/raw_vec.rs
+++ b/library/alloc/src/raw_vec.rs
@@ -7,17 +7,14 @@ use core::mem::{self, ManuallyDrop, MaybeUninit, SizedTypeProperties};
use core::ptr::{self, NonNull, Unique};
use core::slice;
-#[cfg(not(no_global_oom_handling))]
-use crate::alloc::handle_alloc_error;
-use crate::alloc::{Allocator, Global, Layout};
use crate::boxed::Box;
use crate::collections::TryReserveError;
use crate::collections::TryReserveErrorKind::*;
+use crate::falloc::{Allocator, Global, Layout};
#[cfg(test)]
mod tests;
-#[cfg(not(no_global_oom_handling))]
enum AllocInit {
/// The contents of the new memory are uninitialized.
Uninitialized,
@@ -71,34 +68,6 @@ impl RawVec {
pub const fn new() -> Self {
Self::new_in(Global)
}
-
- /// Creates a `RawVec` (on the system heap) with exactly the
- /// capacity and alignment requirements for a `[T; capacity]`. This is
- /// equivalent to calling `RawVec::new` when `capacity` is `0` or `T` is
- /// zero-sized. Note that if `T` is zero-sized this means you will
- /// *not* get a `RawVec` with the requested capacity.
- ///
- /// # Panics
- ///
- /// Panics if the requested capacity exceeds `isize::MAX` bytes.
- ///
- /// # Aborts
- ///
- /// Aborts on OOM.
- #[cfg(not(any(no_global_oom_handling, test)))]
- #[must_use]
- #[inline]
- pub fn with_capacity(capacity: usize) -> Self {
- Self::with_capacity_in(capacity, Global)
- }
-
- /// Like `with_capacity`, but guarantees the buffer is zeroed.
- #[cfg(not(any(no_global_oom_handling, test)))]
- #[must_use]
- #[inline]
- pub fn with_capacity_zeroed(capacity: usize) -> Self {
- Self::with_capacity_zeroed_in(capacity, Global)
- }
}
impl RawVec {
@@ -122,19 +91,19 @@ impl RawVec {
Self { ptr: Unique::dangling(), cap: 0, alloc }
}
- /// Like `with_capacity`, but parameterized over the choice of
- /// allocator for the returned `RawVec`.
- #[cfg(not(no_global_oom_handling))]
+ /// Creates a `RawVec` (with the given allocator) with exactly the
+ /// capacity and alignment requirements for a `[T; capacity]`. This is
+ /// equivalent to calling `RawVec::new` when `capacity` is `0` or `T` is
+ /// zero-sized. Note that if `T` is zero-sized this means you will
+ /// *not* get a `RawVec` with the requested capacity.
#[inline]
- pub fn with_capacity_in(capacity: usize, alloc: A) -> Self {
+ pub fn with_capacity_in(capacity: usize, alloc: A) -> Result {
Self::allocate_in(capacity, AllocInit::Uninitialized, alloc)
}
- /// Like `with_capacity_zeroed`, but parameterized over the choice
- /// of allocator for the returned `RawVec`.
- #[cfg(not(no_global_oom_handling))]
+ /// Like `with_capacity_in`, but guarantees the buffer is zeroed.
#[inline]
- pub fn with_capacity_zeroed_in(capacity: usize, alloc: A) -> Self {
+ pub fn with_capacity_zeroed_in(capacity: usize, alloc: A) -> Result {
Self::allocate_in(capacity, AllocInit::Zeroed, alloc)
}
@@ -164,39 +133,33 @@ impl RawVec {
}
}
- #[cfg(not(no_global_oom_handling))]
- fn allocate_in(capacity: usize, init: AllocInit, alloc: A) -> Self {
+ fn allocate_in(capacity: usize, init: AllocInit, alloc: A) -> Result {
// Don't allocate here because `Drop` will not deallocate when `capacity` is 0.
if T::IS_ZST || capacity == 0 {
- Self::new_in(alloc)
+ Ok(Self::new_in(alloc))
} else {
// We avoid `unwrap_or_else` here because it bloats the amount of
// LLVM IR generated.
let layout = match Layout::array::(capacity) {
Ok(layout) => layout,
- Err(_) => capacity_overflow(),
+ Err(_) => Err(CapacityOverflow)?,
};
- match alloc_guard(layout.size()) {
- Ok(_) => {}
- Err(_) => capacity_overflow(),
- }
+ alloc_guard(layout.size())?;
let result = match init {
AllocInit::Uninitialized => alloc.allocate(layout),
AllocInit::Zeroed => alloc.allocate_zeroed(layout),
};
- let ptr = match result {
- Ok(ptr) => ptr,
- Err(_) => handle_alloc_error(layout),
- };
+
+ let ptr = result.map_err(|_| AllocError { layout, non_exhaustive: () })?;
// Allocators currently return a `NonNull<[u8]>` whose length
// matches the size requested. If that ever changes, the capacity
// here should change to `ptr.len() / mem::size_of::()`.
- Self {
+ Ok(Self {
ptr: unsafe { Unique::new_unchecked(ptr.cast().as_ptr()) },
cap: capacity,
alloc,
- }
+ })
}
}
@@ -265,50 +228,20 @@ impl RawVec {
/// code *you* write that relies on the behavior of this function may break.
///
/// This is ideal for implementing a bulk-push operation like `extend`.
- ///
- /// # Panics
- ///
- /// Panics if the new capacity exceeds `isize::MAX` bytes.
- ///
- /// # Aborts
- ///
- /// Aborts on OOM.
- #[cfg(not(no_global_oom_handling))]
#[inline]
- pub fn reserve(&mut self, len: usize, additional: usize) {
- // Callers expect this function to be very cheap when there is already sufficient capacity.
- // Therefore, we move all the resizing and error-handling logic from grow_amortized and
- // handle_reserve behind a call, while making sure that this function is likely to be
- // inlined as just a comparison and a call if the comparison fails.
- #[cold]
- fn do_reserve_and_handle(
- slf: &mut RawVec,
- len: usize,
- additional: usize,
- ) {
- handle_reserve(slf.grow_amortized(len, additional));
- }
-
+ pub fn reserve(&mut self, len: usize, additional: usize) -> Result<(), TryReserveError> {
if self.needs_to_grow(len, additional) {
- do_reserve_and_handle(self, len, additional);
+ self.grow_amortized(len, additional)
+ } else {
+ Ok(())
}
}
/// A specialized version of `reserve()` used only by the hot and
/// oft-instantiated `Vec::push()`, which does its own capacity check.
- #[cfg(not(no_global_oom_handling))]
#[inline(never)]
- pub fn reserve_for_push(&mut self, len: usize) {
- handle_reserve(self.grow_amortized(len, 1));
- }
-
- /// The same as `reserve`, but returns on errors instead of panicking or aborting.
- pub fn try_reserve(&mut self, len: usize, additional: usize) -> Result<(), TryReserveError> {
- if self.needs_to_grow(len, additional) {
- self.grow_amortized(len, additional)
- } else {
- Ok(())
- }
+ pub fn reserve_for_push(&mut self, len: usize) -> Result<(), TryReserveError> {
+ self.grow_amortized(len, 1)
}
/// Ensures that the buffer contains at least enough space to hold `len +
@@ -320,25 +253,7 @@ impl RawVec {
/// If `len` exceeds `self.capacity()`, this may fail to actually allocate
/// the requested space. This is not really unsafe, but the unsafe code
/// *you* write that relies on the behavior of this function may break.
- ///
- /// # Panics
- ///
- /// Panics if the new capacity exceeds `isize::MAX` bytes.
- ///
- /// # Aborts
- ///
- /// Aborts on OOM.
- #[cfg(not(no_global_oom_handling))]
- pub fn reserve_exact(&mut self, len: usize, additional: usize) {
- handle_reserve(self.try_reserve_exact(len, additional));
- }
-
- /// The same as `reserve_exact`, but returns on errors instead of panicking or aborting.
- pub fn try_reserve_exact(
- &mut self,
- len: usize,
- additional: usize,
- ) -> Result<(), TryReserveError> {
+ pub fn reserve_exact(&mut self, len: usize, additional: usize) -> Result<(), TryReserveError> {
if self.needs_to_grow(len, additional) { self.grow_exact(len, additional) } else { Ok(()) }
}
@@ -348,13 +263,8 @@ impl RawVec {
/// # Panics
///
/// Panics if the given amount is *larger* than the current capacity.
- ///
- /// # Aborts
- ///
- /// Aborts on OOM.
- #[cfg(not(no_global_oom_handling))]
- pub fn shrink_to_fit(&mut self, cap: usize) {
- handle_reserve(self.shrink(cap));
+ pub fn shrink_to(&mut self, cap: usize) -> Result<(), TryReserveError> {
+ self.shrink(cap)
}
}
@@ -425,7 +335,6 @@ impl RawVec {
Ok(())
}
- #[cfg(not(no_global_oom_handling))]
fn shrink(&mut self, cap: usize) -> Result<(), TryReserveError> {
assert!(cap <= self.capacity(), "Tried to shrink to a larger capacity");
@@ -487,17 +396,6 @@ unsafe impl<#[may_dangle] T, A: Allocator> Drop for RawVec {
}
}
-// Central function for reserve error handling.
-#[cfg(not(no_global_oom_handling))]
-#[inline]
-fn handle_reserve(result: Result<(), TryReserveError>) {
- match result.map_err(|e| e.kind()) {
- Err(CapacityOverflow) => capacity_overflow(),
- Err(AllocError { layout, .. }) => handle_alloc_error(layout),
- Ok(()) => { /* yay */ }
- }
-}
-
// We need to guarantee the following:
// * We don't ever allocate `> isize::MAX` byte-size objects.
// * We don't overflow `usize::MAX` and actually allocate too little.
@@ -515,11 +413,3 @@ fn alloc_guard(alloc_size: usize) -> Result<(), TryReserveError> {
Ok(())
}
}
-
-// One central function responsible for reporting capacity overflows. This'll
-// ensure that the code generation related to these panics is minimal as there's
-// only one location which panics rather than a bunch throughout the module.
-#[cfg(not(no_global_oom_handling))]
-fn capacity_overflow() -> ! {
- panic!("capacity overflow");
-}
diff --git a/library/alloc/src/raw_vec/tests.rs b/library/alloc/src/raw_vec/tests.rs
index ff322f0da97c6..4b87b65eed57d 100644
--- a/library/alloc/src/raw_vec/tests.rs
+++ b/library/alloc/src/raw_vec/tests.rs
@@ -2,7 +2,7 @@ use super::*;
use std::cell::Cell;
#[test]
-fn allocator_param() {
+fn allocator_param() -> Result<(), TryReserveError> {
use crate::alloc::AllocError;
// Writing a test of integration between third-party
@@ -20,7 +20,7 @@ fn allocator_param() {
struct BoundedAlloc {
fuel: Cell,
}
- unsafe impl Allocator for BoundedAlloc {
+ unsafe impl core::alloc::Allocator for BoundedAlloc {
fn allocate(&self, layout: Layout) -> Result, AllocError> {
let size = layout.size();
if size > self.fuel.get() {
@@ -40,42 +40,46 @@ fn allocator_param() {
}
let a = BoundedAlloc { fuel: Cell::new(500) };
- let mut v: RawVec = RawVec::with_capacity_in(50, a);
+ let mut v: RawVec = RawVec::with_capacity_in(50, a)?;
assert_eq!(v.alloc.fuel.get(), 450);
- v.reserve(50, 150); // (causes a realloc, thus using 50 + 150 = 200 units of fuel)
+ v.reserve(50, 150)?; // (causes a realloc, thus using 50 + 150 = 200 units of fuel)
assert_eq!(v.alloc.fuel.get(), 250);
+
+ Ok(())
}
#[test]
-fn reserve_does_not_overallocate() {
+fn reserve_does_not_overallocate() -> Result<(), TryReserveError> {
{
let mut v: RawVec = RawVec::new();
// First, `reserve` allocates like `reserve_exact`.
- v.reserve(0, 9);
+ v.reserve(0, 9)?;
assert_eq!(9, v.capacity());
}
{
let mut v: RawVec = RawVec::new();
- v.reserve(0, 7);
+ v.reserve(0, 7)?;
assert_eq!(7, v.capacity());
// 97 is more than double of 7, so `reserve` should work
// like `reserve_exact`.
- v.reserve(7, 90);
+ v.reserve(7, 90)?;
assert_eq!(97, v.capacity());
}
{
let mut v: RawVec = RawVec::new();
- v.reserve(0, 12);
+ v.reserve(0, 12)?;
assert_eq!(12, v.capacity());
- v.reserve(12, 3);
+ v.reserve(12, 3)?;
// 3 is less than half of 12, so `reserve` must grow
// exponentially. At the time of writing this test grow
// factor is 2, so new capacity is 24, however, grow factor
// of 1.5 is OK too. Hence `>= 18` in assert.
assert!(v.capacity() >= 12 + 12 / 2);
}
+
+ Ok(())
}
struct ZST;
@@ -88,7 +92,7 @@ fn zst_sanity(v: &RawVec) {
}
#[test]
-fn zst() {
+fn zst() -> Result<(), TryReserveError> {
let cap_err = Err(crate::collections::TryReserveErrorKind::CapacityOverflow.into());
assert_eq!(std::mem::size_of::(), 0);
@@ -98,19 +102,19 @@ fn zst() {
let v: RawVec = RawVec::new();
zst_sanity(&v);
- let v: RawVec = RawVec::with_capacity_in(100, Global);
+ let v: RawVec = RawVec::with_capacity_in(100, Global)?;
zst_sanity(&v);
- let v: RawVec = RawVec::with_capacity_in(100, Global);
+ let v: RawVec = RawVec::with_capacity_in(100, Global)?;
zst_sanity(&v);
- let v: RawVec = RawVec::allocate_in(0, AllocInit::Uninitialized, Global);
+ let v: RawVec = RawVec::allocate_in(0, AllocInit::Uninitialized, Global)?;
zst_sanity(&v);
- let v: RawVec = RawVec::allocate_in(100, AllocInit::Uninitialized, Global);
+ let v: RawVec = RawVec::allocate_in(100, AllocInit::Uninitialized, Global)?;
zst_sanity(&v);
- let mut v: RawVec = RawVec::allocate_in(usize::MAX, AllocInit::Uninitialized, Global);
+ let mut v: RawVec = RawVec::allocate_in(usize::MAX, AllocInit::Uninitialized, Global)?;
zst_sanity(&v);
// Check all these operations work as expected with zero-sized elements.
@@ -119,20 +123,20 @@ fn zst() {
assert!(v.needs_to_grow(101, usize::MAX - 100));
zst_sanity(&v);
- v.reserve(100, usize::MAX - 100);
+ v.reserve(100, usize::MAX - 100)?;
//v.reserve(101, usize::MAX - 100); // panics, in `zst_reserve_panic` below
zst_sanity(&v);
- v.reserve_exact(100, usize::MAX - 100);
+ v.reserve_exact(100, usize::MAX - 100)?;
//v.reserve_exact(101, usize::MAX - 100); // panics, in `zst_reserve_exact_panic` below
zst_sanity(&v);
- assert_eq!(v.try_reserve(100, usize::MAX - 100), Ok(()));
- assert_eq!(v.try_reserve(101, usize::MAX - 100), cap_err);
+ assert_eq!(v.reserve(100, usize::MAX - 100), Ok(()));
+ assert_eq!(v.reserve(101, usize::MAX - 100), cap_err);
zst_sanity(&v);
- assert_eq!(v.try_reserve_exact(100, usize::MAX - 100), Ok(()));
- assert_eq!(v.try_reserve_exact(101, usize::MAX - 100), cap_err);
+ assert_eq!(v.reserve_exact(100, usize::MAX - 100), Ok(()));
+ assert_eq!(v.reserve_exact(101, usize::MAX - 100), cap_err);
zst_sanity(&v);
assert_eq!(v.grow_amortized(100, usize::MAX - 100), cap_err);
@@ -142,22 +146,26 @@ fn zst() {
assert_eq!(v.grow_exact(100, usize::MAX - 100), cap_err);
assert_eq!(v.grow_exact(101, usize::MAX - 100), cap_err);
zst_sanity(&v);
+
+ Ok(())
}
#[test]
-#[should_panic(expected = "capacity overflow")]
fn zst_reserve_panic() {
+ let cap_err = Err(crate::collections::TryReserveErrorKind::CapacityOverflow.into());
+
let mut v: RawVec = RawVec::new();
zst_sanity(&v);
- v.reserve(101, usize::MAX - 100);
+ assert_eq!(v.reserve(101, usize::MAX - 100), cap_err);
}
#[test]
-#[should_panic(expected = "capacity overflow")]
fn zst_reserve_exact_panic() {
+ let cap_err = Err(crate::collections::TryReserveErrorKind::CapacityOverflow.into());
+
let mut v: RawVec = RawVec::new();
zst_sanity(&v);
- v.reserve_exact(101, usize::MAX - 100);
+ assert_eq!(v.reserve_exact(101, usize::MAX - 100), cap_err);
}
diff --git a/library/alloc/src/vec/mod.rs b/library/alloc/src/vec/mod.rs
index d89cdff8e366c..190b9d867cf08 100644
--- a/library/alloc/src/vec/mod.rs
+++ b/library/alloc/src/vec/mod.rs
@@ -65,10 +65,10 @@ use core::ops::{self, Index, IndexMut, Range, RangeBounds};
use core::ptr::{self, NonNull};
use core::slice::{self, SliceIndex};
-use crate::alloc::{Allocator, Global};
use crate::borrow::{Cow, ToOwned};
use crate::boxed::Box;
-use crate::collections::TryReserveError;
+use crate::collections::{TryReserveError, TryReserveErrorKind};
+use crate::falloc::{Allocator, Global};
use crate::raw_vec::RawVec;
#[unstable(feature = "drain_filter", reason = "recently added", issue = "43244")]
@@ -98,7 +98,6 @@ pub use self::into_iter::IntoIter;
mod into_iter;
-#[cfg(not(no_global_oom_handling))]
use self::is_zero::IsZero;
mod is_zero;
@@ -108,10 +107,8 @@ mod in_place_collect;
mod partial_eq;
-#[cfg(not(no_global_oom_handling))]
use self::spec_from_elem::SpecFromElem;
-#[cfg(not(no_global_oom_handling))]
mod spec_from_elem;
#[cfg(not(no_global_oom_handling))]
@@ -138,10 +135,8 @@ use self::spec_from_iter::SpecFromIter;
#[cfg(not(no_global_oom_handling))]
mod spec_from_iter;
-#[cfg(not(no_global_oom_handling))]
use self::spec_extend::SpecExtend;
-#[cfg(not(no_global_oom_handling))]
mod spec_extend;
/// A contiguous growable array type, written as `Vec`, short for 'vector'.
@@ -610,6 +605,17 @@ impl Vec {
Vec { buf: RawVec::new_in(alloc), len: 0 }
}
+ pub(crate) fn try_with_capacity_in(capacity: usize, alloc: A) -> Result {
+ Ok(Vec { buf: RawVec::with_capacity_in(capacity, alloc)?, len: 0 })
+ }
+
+ pub(crate) fn try_with_capacity_zeroed_in(
+ capacity: usize,
+ alloc: A,
+ ) -> Result {
+ Ok(Vec { buf: RawVec::with_capacity_zeroed_in(capacity, alloc)?, len: 0 })
+ }
+
/// Constructs a new, empty `Vec` with at least the specified capacity
/// with the provided allocator.
///
@@ -665,11 +671,10 @@ impl Vec {
/// let vec_units = Vec::<(), System>::with_capacity_in(10, System);
/// assert_eq!(vec_units.capacity(), usize::MAX);
/// ```
- #[cfg(not(no_global_oom_handling))]
#[inline]
#[unstable(feature = "allocator_api", issue = "32838")]
- pub fn with_capacity_in(capacity: usize, alloc: A) -> Self {
- Vec { buf: RawVec::with_capacity_in(capacity, alloc), len: 0 }
+ pub fn with_capacity_in(capacity: usize, alloc: A) -> A::Result {
+ A::map_result(Self::try_with_capacity_in(capacity, alloc))
}
/// Creates a `Vec` directly from a pointer, a capacity, a length,
@@ -902,10 +907,9 @@ impl Vec {
/// vec.reserve(10);
/// assert!(vec.capacity() >= 11);
/// ```
- #[cfg(not(no_global_oom_handling))]
#[stable(feature = "rust1", since = "1.0.0")]
- pub fn reserve(&mut self, additional: usize) {
- self.buf.reserve(self.len, additional);
+ pub fn reserve(&mut self, additional: usize) -> A::Result<()> {
+ A::map_result(self.try_reserve(additional))
}
/// Reserves the minimum capacity for at least `additional` more elements to
@@ -932,10 +936,9 @@ impl Vec {
/// vec.reserve_exact(10);
/// assert!(vec.capacity() >= 11);
/// ```
- #[cfg(not(no_global_oom_handling))]
#[stable(feature = "rust1", since = "1.0.0")]
- pub fn reserve_exact(&mut self, additional: usize) {
- self.buf.reserve_exact(self.len, additional);
+ pub fn reserve_exact(&mut self, additional: usize) -> A::Result<()> {
+ A::map_result(self.try_reserve_exact(additional))
}
/// Tries to reserve capacity for at least `additional` more elements to be inserted
@@ -972,7 +975,7 @@ impl Vec {
/// ```
#[stable(feature = "try_reserve", since = "1.57.0")]
pub fn try_reserve(&mut self, additional: usize) -> Result<(), TryReserveError> {
- self.buf.try_reserve(self.len, additional)
+ self.buf.reserve(self.len, additional)
}
/// Tries to reserve the minimum capacity for at least `additional`
@@ -1015,7 +1018,18 @@ impl Vec {
/// ```
#[stable(feature = "try_reserve", since = "1.57.0")]
pub fn try_reserve_exact(&mut self, additional: usize) -> Result<(), TryReserveError> {
- self.buf.try_reserve_exact(self.len, additional)
+ self.buf.reserve_exact(self.len, additional)
+ }
+
+ fn try_shrink_to_fit(&mut self) -> Result<(), TryReserveError> {
+ // The capacity is never less than the length, and there's nothing to do when
+ // they are equal, so we can avoid the panic case in `RawVec::shrink_to_fit`
+ // by only calling it with a greater capacity.
+ if self.capacity() > self.len {
+ self.buf.shrink_to(self.len)?;
+ }
+
+ Ok(())
}
/// Shrinks the capacity of the vector as much as possible.
@@ -1032,15 +1046,9 @@ impl Vec {
/// vec.shrink_to_fit();
/// assert!(vec.capacity() >= 3);
/// ```
- #[cfg(not(no_global_oom_handling))]
#[stable(feature = "rust1", since = "1.0.0")]
- pub fn shrink_to_fit(&mut self) {
- // The capacity is never less than the length, and there's nothing to do when
- // they are equal, so we can avoid the panic case in `RawVec::shrink_to_fit`
- // by only calling it with a greater capacity.
- if self.capacity() > self.len {
- self.buf.shrink_to_fit(self.len);
- }
+ pub fn shrink_to_fit(&mut self) -> A::Result<()> {
+ A::map_result(self.try_shrink_to_fit())
}
/// Shrinks the capacity of the vector with a lower bound.
@@ -1061,12 +1069,13 @@ impl Vec {
/// vec.shrink_to(0);
/// assert!(vec.capacity() >= 3);
/// ```
- #[cfg(not(no_global_oom_handling))]
#[stable(feature = "shrink_to", since = "1.56.0")]
- pub fn shrink_to(&mut self, min_capacity: usize) {
- if self.capacity() > min_capacity {
- self.buf.shrink_to_fit(cmp::max(self.len, min_capacity));
- }
+ pub fn shrink_to(&mut self, min_capacity: usize) -> A::Result<()> {
+ A::map_result(if self.capacity() > min_capacity {
+ self.buf.shrink_to(cmp::max(self.len, min_capacity))
+ } else {
+ Ok(())
+ })
}
/// Converts the vector into [`Box<[T]>`][owned slice].
@@ -1094,16 +1103,18 @@ impl Vec {
/// let slice = vec.into_boxed_slice();
/// assert_eq!(slice.into_vec().capacity(), 3);
/// ```
- #[cfg(not(no_global_oom_handling))]
#[stable(feature = "rust1", since = "1.0.0")]
- pub fn into_boxed_slice(mut self) -> Box<[T], A> {
- unsafe {
- self.shrink_to_fit();
- let me = ManuallyDrop::new(self);
- let buf = ptr::read(&me.buf);
- let len = me.len();
- buf.into_box(len).assume_init()
- }
+ pub fn into_boxed_slice(mut self) -> A::Result> {
+ A::map_result((|| {
+ // Substitute for try block
+ self.try_shrink_to_fit()?;
+ unsafe {
+ let me = ManuallyDrop::new(self);
+ let buf = ptr::read(&me.buf);
+ let len = me.len();
+ Ok(buf.into_box(len).assume_init())
+ }
+ })())
}
/// Shortens the vector, keeping the first `len` elements and dropping
@@ -1430,42 +1441,46 @@ impl Vec {
/// vec.insert(4, 5);
/// assert_eq!(vec, [1, 4, 2, 3, 5]);
/// ```
- #[cfg(not(no_global_oom_handling))]
#[stable(feature = "rust1", since = "1.0.0")]
- pub fn insert(&mut self, index: usize, element: T) {
- #[cold]
- #[inline(never)]
- fn assert_failed(index: usize, len: usize) -> ! {
- panic!("insertion index (is {index}) should be <= len (is {len})");
- }
+ pub fn insert(&mut self, index: usize, element: T) -> A::Result<()> {
+ A::map_result((|| {
+ // Substitute for try block
+ #[cold]
+ #[inline(never)]
+ fn assert_failed(index: usize, len: usize) -> ! {
+ panic!("insertion index (is {index}) should be <= len (is {len})");
+ }
- let len = self.len();
+ let len = self.len();
- // space for the new element
- if len == self.buf.capacity() {
- self.reserve(1);
- }
+ // space for the new element
+ if len == self.buf.capacity() {
+ self.buf.reserve_for_push(len)?
+ }
- unsafe {
- // infallible
- // The spot to put the new value
- {
- let p = self.as_mut_ptr().add(index);
- if index < len {
- // Shift everything over to make space. (Duplicating the
- // `index`th element into two consecutive places.)
- ptr::copy(p, p.add(1), len - index);
- } else if index == len {
- // No elements need shifting.
- } else {
- assert_failed(index, len);
+ unsafe {
+ // infallible
+ // The spot to put the new value
+ {
+ let p = self.as_mut_ptr().add(index);
+ if index < len {
+ // Shift everything over to make space. (Duplicating the
+ // `index`th element into two consecutive places.)
+ ptr::copy(p, p.add(1), len - index);
+ } else if index == len {
+ // No elements need shifting.
+ } else {
+ assert_failed(index, len);
+ }
+ // Write it in, overwriting the first copy of the `index`th
+ // element.
+ ptr::write(p, element);
}
- // Write it in, overwriting the first copy of the `index`th
- // element.
- ptr::write(p, element);
+ self.set_len(len + 1);
}
- self.set_len(len + 1);
- }
+
+ Ok(())
+ })())
}
/// Removes and returns the element at position `index` within the vector,
@@ -1818,20 +1833,24 @@ impl Vec {
/// vec.push(3);
/// assert_eq!(vec, [1, 2, 3]);
/// ```
- #[cfg(not(no_global_oom_handling))]
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
- pub fn push(&mut self, value: T) {
- // This will panic or abort if we would allocate > isize::MAX bytes
- // or if the length increment would overflow for zero-sized types.
- if self.len == self.buf.capacity() {
- self.buf.reserve_for_push(self.len);
- }
- unsafe {
- let end = self.as_mut_ptr().add(self.len);
- ptr::write(end, value);
- self.len += 1;
- }
+ pub fn push(&mut self, value: T) -> A::Result<()> {
+ A::map_result((|| {
+ // Substitute for try block
+ // This will panic or abort if we would allocate > isize::MAX bytes
+ // or if the length increment would overflow for zero-sized types.
+ if self.len == self.buf.capacity() {
+ self.buf.reserve_for_push(self.len)?
+ }
+ unsafe {
+ let end = self.as_mut_ptr().add(self.len);
+ ptr::write(end, value);
+ self.len += 1;
+ }
+
+ Ok(())
+ })())
}
/// Appends an element if there is sufficient spare capacity, otherwise an error is returned
@@ -1922,25 +1941,30 @@ impl Vec {
/// assert_eq!(vec, [1, 2, 3, 4, 5, 6]);
/// assert_eq!(vec2, []);
/// ```
- #[cfg(not(no_global_oom_handling))]
#[inline]
#[stable(feature = "append", since = "1.4.0")]
- pub fn append(&mut self, other: &mut Self) {
- unsafe {
- self.append_elements(other.as_slice() as _);
- other.set_len(0);
- }
+ pub fn append(&mut self, other: &mut Self) -> A::Result<()> {
+ A::map_result((|| {
+ // Substitute for try block
+ unsafe {
+ self.append_elements(other.as_slice() as _)?;
+ other.set_len(0);
+ }
+
+ Ok(())
+ })())
}
/// Appends elements to `self` from other buffer.
- #[cfg(not(no_global_oom_handling))]
#[inline]
- unsafe fn append_elements(&mut self, other: *const [T]) {
+ unsafe fn append_elements(&mut self, other: *const [T]) -> Result<(), TryReserveError> {
let count = unsafe { (*other).len() };
- self.reserve(count);
+ self.try_reserve(count)?;
let len = self.len();
unsafe { ptr::copy_nonoverlapping(other as *const T, self.as_mut_ptr().add(len), count) };
self.len += count;
+
+ Ok(())
}
/// Removes the specified range from the vector in bulk, returning all
@@ -2084,43 +2108,45 @@ impl Vec {
/// assert_eq!(vec, [1]);
/// assert_eq!(vec2, [2, 3]);
/// ```
- #[cfg(not(no_global_oom_handling))]
#[inline]
#[must_use = "use `.truncate()` if you don't need the other half"]
#[stable(feature = "split_off", since = "1.4.0")]
- pub fn split_off(&mut self, at: usize) -> Self
+ pub fn split_off(&mut self, at: usize) -> A::Result
where
A: Clone,
{
- #[cold]
- #[inline(never)]
- fn assert_failed(at: usize, len: usize) -> ! {
- panic!("`at` split index (is {at}) should be <= len (is {len})");
- }
+ A::map_result((|| {
+ // Substitute for try block
+ #[cold]
+ #[inline(never)]
+ fn assert_failed(at: usize, len: usize) -> ! {
+ panic!("`at` split index (is {at}) should be <= len (is {len})");
+ }
- if at > self.len() {
- assert_failed(at, self.len());
- }
+ if at > self.len() {
+ assert_failed(at, self.len());
+ }
- if at == 0 {
- // the new vector can take over the original buffer and avoid the copy
- return mem::replace(
- self,
- Vec::with_capacity_in(self.capacity(), self.allocator().clone()),
- );
- }
+ if at == 0 {
+ // the new vector can take over the original buffer and avoid the copy
+ return Ok(mem::replace(
+ self,
+ Vec::try_with_capacity_in(self.capacity(), self.allocator().clone())?,
+ ));
+ }
- let other_len = self.len - at;
- let mut other = Vec::with_capacity_in(other_len, self.allocator().clone());
+ let other_len = self.len - at;
+ let mut other = Vec::try_with_capacity_in(other_len, self.allocator().clone())?;
- // Unsafely `set_len` and copy items to `other`.
- unsafe {
- self.set_len(at);
- other.set_len(other_len);
+ // Unsafely `set_len` and copy items to `other`.
+ unsafe {
+ self.set_len(at);
+ other.set_len(other_len);
- ptr::copy_nonoverlapping(self.as_ptr().add(at), other.as_mut_ptr(), other.len());
- }
- other
+ ptr::copy_nonoverlapping(self.as_ptr().add(at), other.as_mut_ptr(), other.len());
+ }
+ Ok(other)
+ })())
}
/// Resizes the `Vec` in-place so that `len` is equal to `new_len`.
@@ -2149,18 +2175,22 @@ impl Vec {
/// vec.resize_with(4, || { p *= 2; p });
/// assert_eq!(vec, [2, 4, 8, 16]);
/// ```
- #[cfg(not(no_global_oom_handling))]
#[stable(feature = "vec_resize_with", since = "1.33.0")]
- pub fn resize_with(&mut self, new_len: usize, f: F)
+ pub fn resize_with(&mut self, new_len: usize, f: F) -> A::Result<()>
where
F: FnMut() -> T,
{
- let len = self.len();
- if new_len > len {
- self.extend_trusted(iter::repeat_with(f).take(new_len - len));
- } else {
- self.truncate(new_len);
- }
+ A::map_result((|| {
+ // Substitute for try block
+ let len = self.len();
+ if new_len > len {
+ self.extend_trusted(iter::repeat_with(f).take(new_len - len))?;
+ } else {
+ self.truncate(new_len);
+ }
+
+ Ok(())
+ })())
}
/// Consumes and leaks the `Vec`, returning a mutable reference to the contents,
@@ -2349,16 +2379,20 @@ impl Vec {
/// vec.resize(2, 0);
/// assert_eq!(vec, [1, 2]);
/// ```
- #[cfg(not(no_global_oom_handling))]
#[stable(feature = "vec_resize", since = "1.5.0")]
- pub fn resize(&mut self, new_len: usize, value: T) {
- let len = self.len();
+ pub fn resize(&mut self, new_len: usize, value: T) -> A::Result<()> {
+ A::map_result((|| {
+ // Substitute for try block
+ let len = self.len();
- if new_len > len {
- self.extend_with(new_len - len, value)
- } else {
- self.truncate(new_len);
- }
+ if new_len > len {
+ self.extend_with(new_len - len, value)?;
+ } else {
+ self.truncate(new_len);
+ }
+
+ Ok(())
+ })())
}
/// Clones and appends all elements in a slice to the `Vec`.
@@ -2382,8 +2416,8 @@ impl Vec {
/// [`extend`]: Vec::extend
#[cfg(not(no_global_oom_handling))]
#[stable(feature = "vec_extend_from_slice", since = "1.6.0")]
- pub fn extend_from_slice(&mut self, other: &[T]) {
- self.spec_extend(other.iter())
+ pub fn extend_from_slice(&mut self, other: &[T]) -> A::Result<()> {
+ A::map_result(self.spec_extend(other.iter()))
}
/// Copies elements from `src` range to the end of the vector.
@@ -2409,18 +2443,23 @@ impl Vec {
/// ```
#[cfg(not(no_global_oom_handling))]
#[stable(feature = "vec_extend_from_within", since = "1.53.0")]
- pub fn extend_from_within(&mut self, src: R)
+ pub fn extend_from_within(&mut self, src: R) -> A::Result<()>
where
R: RangeBounds,
{
- let range = slice::range(src, ..self.len());
- self.reserve(range.len());
+ A::map_result((|| {
+ // Substitute for try block
+ let range = slice::range(src, ..self.len());
+ self.try_reserve(range.len())?;
- // SAFETY:
- // - `slice::range` guarantees that the given range is valid for indexing self
- unsafe {
- self.spec_extend_from_within(range);
- }
+ // SAFETY:
+ // - `slice::range` guarantees that the given range is valid for indexing self
+ unsafe {
+ self.spec_extend_from_within(range);
+ }
+
+ Ok(())
+ })())
}
}
@@ -2472,8 +2511,8 @@ impl Vec<[T; N], A> {
impl Vec {
#[cfg(not(no_global_oom_handling))]
/// Extend the vector by `n` clones of value.
- fn extend_with(&mut self, n: usize, value: T) {
- self.reserve(n);
+ fn extend_with(&mut self, n: usize, value: T) -> Result<(), TryReserveError> {
+ self.try_reserve(n)?;
unsafe {
let mut ptr = self.as_mut_ptr().add(self.len());
@@ -2498,6 +2537,8 @@ impl Vec {
// len set by scope guard
}
+
+ Ok(())
}
}
@@ -2528,17 +2569,15 @@ impl Vec {
////////////////////////////////////////////////////////////////////////////////
#[doc(hidden)]
-#[cfg(not(no_global_oom_handling))]
#[stable(feature = "rust1", since = "1.0.0")]
-pub fn from_elem(elem: T, n: usize) -> Vec {
- ::from_elem(elem, n, Global)
+pub fn from_elem(elem: T, n: usize) -> ::Result> {
+ ::map_result(::from_elem(elem, n, Global))
}
#[doc(hidden)]
-#[cfg(not(no_global_oom_handling))]
#[unstable(feature = "allocator_api", issue = "32838")]
-pub fn from_elem_in(elem: T, n: usize, alloc: A) -> Vec {
- ::from_elem(elem, n, alloc)
+pub fn from_elem_in(elem: T, n: usize, alloc: A) -> A::Result> {
+ A::map_result(::from_elem(elem, n, alloc))
}
trait ExtendFromWithinSpec {
@@ -2761,12 +2800,11 @@ impl<'a, T, A: Allocator> IntoIterator for &'a mut Vec {
}
}
-#[cfg(not(no_global_oom_handling))]
#[stable(feature = "rust1", since = "1.0.0")]
-impl Extend for Vec {
+impl = ()>> Extend for Vec {
#[inline]
fn extend>(&mut self, iter: I) {
- >::spec_extend(self, iter.into_iter())
+ A::map_result(>::spec_extend(self, iter.into_iter()))
}
#[inline]
@@ -2783,8 +2821,10 @@ impl Extend for Vec {
impl Vec {
// leaf method to which various SpecFrom/SpecExtend implementations delegate when
// they have no further optimizations to apply
- #[cfg(not(no_global_oom_handling))]
- fn extend_desugared>(&mut self, mut iterator: I) {
+ fn extend_desugared>(
+ &mut self,
+ mut iterator: I,
+ ) -> Result<(), TryReserveError> {
// This is the case for a general iterator.
//
// This function should be the moral equivalent of:
@@ -2796,7 +2836,7 @@ impl Vec {
let len = self.len();
if len == self.capacity() {
let (lower, _) = iterator.size_hint();
- self.reserve(lower.saturating_add(1));
+ self.try_reserve(lower.saturating_add(1))?;
}
unsafe {
ptr::write(self.as_mut_ptr().add(len), element);
@@ -2806,12 +2846,16 @@ impl Vec {
self.set_len(len + 1);
}
}
+
+ Ok(())
}
// specific extend for `TrustedLen` iterators, called both by the specializations
// and internal places where resolving specialization makes compilation slower
- #[cfg(not(no_global_oom_handling))]
- fn extend_trusted(&mut self, iterator: impl iter::TrustedLen- ) {
+ fn extend_trusted(
+ &mut self,
+ iterator: impl iter::TrustedLen
- ,
+ ) -> Result<(), TryReserveError> {
let (low, high) = iterator.size_hint();
if let Some(additional) = high {
debug_assert_eq!(
@@ -2820,7 +2864,7 @@ impl Vec {
"TrustedLen iterator's size hint is not exact: {:?}",
(low, high)
);
- self.reserve(additional);
+ self.try_reserve(additional)?;
unsafe {
let ptr = self.as_mut_ptr();
let mut local_len = SetLenOnDrop::new(&mut self.len);
@@ -2832,13 +2876,15 @@ impl Vec {
local_len.increment_len(1);
});
}
+
+ Ok(())
} else {
// Per TrustedLen contract a `None` upper bound means that the iterator length
// truly exceeds usize::MAX, which would eventually lead to a capacity overflow anyway.
// Since the other branch already panics eagerly (via `reserve()`) we do the same here.
// This avoids additional codegen for a fallback code path which would eventually
// panic anyway.
- panic!("capacity overflow");
+ Err(TryReserveErrorKind::CapacityOverflow.into())
}
}
@@ -2882,6 +2928,7 @@ impl Vec {
where
R: RangeBounds,
I: IntoIterator
- ,
+ A: Allocator = ()>,
{
Splice { drain: self.drain(range), replace_with: replace_with.into_iter() }
}
@@ -2952,11 +2999,10 @@ impl Vec {
/// append the entire slice at once.
///
/// [`copy_from_slice`]: slice::copy_from_slice
-#[cfg(not(no_global_oom_handling))]
#[stable(feature = "extend_ref", since = "1.2.0")]
-impl<'a, T: Copy + 'a, A: Allocator + 'a> Extend<&'a T> for Vec {
+impl<'a, T: Copy + 'a, A: Allocator = ()> + 'a> Extend<&'a T> for Vec {
fn extend>(&mut self, iter: I) {
- self.spec_extend(iter.into_iter())
+ A::map_result(self.spec_extend(iter.into_iter()))
}
#[inline]
@@ -3153,10 +3199,12 @@ impl From> for Vec {
}
// note: test pulls in std, which causes errors here
-#[cfg(not(no_global_oom_handling))]
#[cfg(not(test))]
#[stable(feature = "box_from_vec", since = "1.20.0")]
-impl From> for Box<[T], A> {
+impl From> for Box<[T], A>
+where
+ A: Allocator = Self>,
+{
/// Convert a vector into a boxed slice.
///
/// If `v` has excess capacity, its items will be moved into a
diff --git a/library/alloc/src/vec/spec_extend.rs b/library/alloc/src/vec/spec_extend.rs
index 56065ce565bfc..6b976ba6df12b 100644
--- a/library/alloc/src/vec/spec_extend.rs
+++ b/library/alloc/src/vec/spec_extend.rs
@@ -1,4 +1,5 @@
use crate::alloc::Allocator;
+use crate::collections::TryReserveError;
use core::iter::TrustedLen;
use core::slice::{self};
@@ -6,14 +7,14 @@ use super::{IntoIter, Vec};
// Specialization trait used for Vec::extend
pub(super) trait SpecExtend {
- fn spec_extend(&mut self, iter: I);
+ fn spec_extend(&mut self, iter: I) -> Result<(), TryReserveError>;
}
impl SpecExtend for Vec
where
I: Iterator
- ,
{
- default fn spec_extend(&mut self, iter: I) {
+ default fn spec_extend(&mut self, iter: I) -> Result<(), TryReserveError> {
self.extend_desugared(iter)
}
}
@@ -22,17 +23,19 @@ impl SpecExtend for Vec
where
I: TrustedLen
- ,
{
- default fn spec_extend(&mut self, iterator: I) {
+ default fn spec_extend(&mut self, iterator: I) -> Result<(), TryReserveError> {
self.extend_trusted(iterator)
}
}
impl SpecExtend> for Vec {
- fn spec_extend(&mut self, mut iterator: IntoIter) {
+ fn spec_extend(&mut self, mut iterator: IntoIter) -> Result<(), TryReserveError> {
unsafe {
- self.append_elements(iterator.as_slice() as _);
+ self.append_elements(iterator.as_slice() as _)?;
}
iterator.forget_remaining_elements();
+
+ Ok(())
}
}
@@ -41,7 +44,7 @@ where
I: Iterator
- ,
T: Clone,
{
- default fn spec_extend(&mut self, iterator: I) {
+ default fn spec_extend(&mut self, iterator: I) -> Result<(), TryReserveError> {
self.spec_extend(iterator.cloned())
}
}
@@ -50,8 +53,8 @@ impl<'a, T: 'a, A: Allocator + 'a> SpecExtend<&'a T, slice::Iter<'a, T>> for Vec
where
T: Copy,
{
- fn spec_extend(&mut self, iterator: slice::Iter<'a, T>) {
+ fn spec_extend(&mut self, iterator: slice::Iter<'a, T>) -> Result<(), TryReserveError> {
let slice = iterator.as_slice();
- unsafe { self.append_elements(slice) };
+ unsafe { self.append_elements(slice) }
}
}
diff --git a/library/alloc/src/vec/spec_from_elem.rs b/library/alloc/src/vec/spec_from_elem.rs
index da43d17bf3624..e059bb292eef4 100644
--- a/library/alloc/src/vec/spec_from_elem.rs
+++ b/library/alloc/src/vec/spec_from_elem.rs
@@ -1,61 +1,87 @@
use core::ptr;
-use crate::alloc::Allocator;
-use crate::raw_vec::RawVec;
+use crate::collections::TryReserveError;
+use crate::falloc::Allocator;
use super::{IsZero, Vec};
// Specialization trait used for Vec::from_elem
pub(super) trait SpecFromElem: Sized {
- fn from_elem(elem: Self, n: usize, alloc: A) -> Vec;
+ fn from_elem(
+ elem: Self,
+ n: usize,
+ alloc: A,
+ ) -> Result, TryReserveError>;
}
impl SpecFromElem for T {
- default fn from_elem(elem: Self, n: usize, alloc: A) -> Vec {
- let mut v = Vec::with_capacity_in(n, alloc);
- v.extend_with(n, elem);
- v
+ default fn from_elem(
+ elem: Self,
+ n: usize,
+ alloc: A,
+ ) -> Result