|
| 1 | +//! Iterator over Multiboot2 structures. Technically, the process is the same |
| 2 | +//! for iterating |
| 3 | +
|
| 4 | +use crate::{increase_to_alignment, BytesRef, Header, ALIGNMENT}; |
| 5 | +use core::marker::PhantomData; |
| 6 | + |
| 7 | +/// Iterates over the tag bytes of a structure while guaranteeing all necessary |
| 8 | +/// memory rules of Multiboot2 and Rustc/Miri. It emits elements of type |
| 9 | +/// [`BytesRef`]. |
| 10 | +/// |
| 11 | +/// This only operates on bytes rather than tags, as I didn't find a solution |
| 12 | +/// to make the iterator `Clone`able and also make it work with DSTs. However, |
| 13 | +/// the necessary glue-code for users is minimal. |
| 14 | +#[derive(Clone, Debug)] |
| 15 | +pub struct TagBytesIter<'a, H: Header> { |
| 16 | + /// Absolute offset to next tag and updated in each iteration. |
| 17 | + next_tag_offset: usize, |
| 18 | + buffer: &'a [u8], |
| 19 | + // Ensure that all instances are bound to a specific `Header`. |
| 20 | + // Otherwise, UB can happen. |
| 21 | + _t: PhantomData<H>, |
| 22 | +} |
| 23 | + |
| 24 | +impl<'a, H: Header> TagBytesIter<'a, H> { |
| 25 | + /// Creates a new iterator. |
| 26 | + #[must_use] |
| 27 | + pub fn new(mem: &'a [u8]) -> Self { |
| 28 | + // Assert alignment. |
| 29 | + assert_eq!(mem.as_ptr().align_offset(ALIGNMENT), 0); |
| 30 | + |
| 31 | + TagBytesIter { |
| 32 | + next_tag_offset: 0, |
| 33 | + buffer: mem, |
| 34 | + _t: PhantomData, |
| 35 | + } |
| 36 | + } |
| 37 | +} |
| 38 | + |
| 39 | +impl<'a, H: Header> Iterator for TagBytesIter<'a, H> { |
| 40 | + type Item = BytesRef<'a, H>; |
| 41 | + |
| 42 | + fn next(&mut self) -> Option<Self::Item> { |
| 43 | + if self.next_tag_offset == self.buffer.len() { |
| 44 | + return None; |
| 45 | + } |
| 46 | + assert!(self.next_tag_offset < self.buffer.len()); |
| 47 | + |
| 48 | + let ptr = unsafe { self.buffer.as_ptr().add(self.next_tag_offset) }.cast::<H>(); |
| 49 | + let tag_hdr = unsafe { &*ptr }; |
| 50 | + |
| 51 | + // Get relevant byte portion for the next tag. This includes padding |
| 52 | + // bytes to fulfill Rust memory guarantees. Otherwise, Miri complains. |
| 53 | + // See <https://doc.rust-lang.org/reference/type-layout.html>. |
| 54 | + let bytes = { |
| 55 | + let from = self.next_tag_offset; |
| 56 | + let len = size_of::<H>() + tag_hdr.payload_len(); |
| 57 | + let to = from + len; |
| 58 | + |
| 59 | + // The size of (the allocation for) a value is always a multiple of |
| 60 | + // its alignment. |
| 61 | + // https://doc.rust-lang.org/reference/type-layout.html |
| 62 | + let to = increase_to_alignment(to); |
| 63 | + |
| 64 | + // Update ptr for next iteration. |
| 65 | + self.next_tag_offset += to - from; |
| 66 | + |
| 67 | + &self.buffer[from..to] |
| 68 | + }; |
| 69 | + |
| 70 | + // unwrap: We should not come to this point at all, if there are memory |
| 71 | + // issues. |
| 72 | + let bytes = BytesRef::try_from(bytes).unwrap(); |
| 73 | + Some(bytes) |
| 74 | + } |
| 75 | +} |
0 commit comments