Skip to content

Perf improvements to collections::BitSet. #25230

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 2 commits into from
May 18, 2015
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
157 changes: 83 additions & 74 deletions src/libcollections/bit.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1452,7 +1452,7 @@ impl BitSet {
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn iter(&self) -> bit_set::Iter {
SetIter {set: self, next_idx: 0}
SetIter(BlockIter::from_blocks(self.bit_vec.blocks()))
}

/// Iterator over each u32 stored in `self` union `other`.
Expand All @@ -1477,13 +1477,11 @@ impl BitSet {
pub fn union<'a>(&'a self, other: &'a BitSet) -> Union<'a> {
fn or(w1: u32, w2: u32) -> u32 { w1 | w2 }

Union(TwoBitPositions {
set: self,
other: other,
Union(BlockIter::from_blocks(TwoBitPositions {
set: self.bit_vec.blocks(),
other: other.bit_vec.blocks(),
merge: or,
current_word: 0,
next_idx: 0
})
}))
}

/// Iterator over each usize stored in `self` intersect `other`.
Expand All @@ -1508,13 +1506,12 @@ impl BitSet {
pub fn intersection<'a>(&'a self, other: &'a BitSet) -> Intersection<'a> {
fn bitand(w1: u32, w2: u32) -> u32 { w1 & w2 }
let min = cmp::min(self.bit_vec.len(), other.bit_vec.len());
Intersection(TwoBitPositions {
set: self,
other: other,

Intersection(BlockIter::from_blocks(TwoBitPositions {
set: self.bit_vec.blocks(),
other: other.bit_vec.blocks(),
merge: bitand,
current_word: 0,
next_idx: 0
}.take(min))
}).take(min))
}

/// Iterator over each usize stored in the `self` setminus `other`.
Expand Down Expand Up @@ -1546,13 +1543,11 @@ impl BitSet {
pub fn difference<'a>(&'a self, other: &'a BitSet) -> Difference<'a> {
fn diff(w1: u32, w2: u32) -> u32 { w1 & !w2 }

Difference(TwoBitPositions {
set: self,
other: other,
Difference(BlockIter::from_blocks(TwoBitPositions {
set: self.bit_vec.blocks(),
other: other.bit_vec.blocks(),
merge: diff,
current_word: 0,
next_idx: 0
})
}))
}

/// Iterator over each u32 stored in the symmetric difference of `self` and `other`.
Expand All @@ -1578,13 +1573,11 @@ impl BitSet {
pub fn symmetric_difference<'a>(&'a self, other: &'a BitSet) -> SymmetricDifference<'a> {
fn bitxor(w1: u32, w2: u32) -> u32 { w1 ^ w2 }

SymmetricDifference(TwoBitPositions {
set: self,
other: other,
SymmetricDifference(BlockIter::from_blocks(TwoBitPositions {
set: self.bit_vec.blocks(),
other: other.bit_vec.blocks(),
merge: bitxor,
current_word: 0,
next_idx: 0
})
}))
}

/// Unions in-place with the specified other bit vector.
Expand Down Expand Up @@ -1808,98 +1801,114 @@ impl hash::Hash for BitSet {
}
}

/// An iterator for `BitSet`.
#[derive(Clone)]
#[stable(feature = "rust1", since = "1.0.0")]
pub struct SetIter<'a> {
set: &'a BitSet,
next_idx: usize
struct BlockIter<T> where T: Iterator<Item=u32> {
head: u32,
head_offset: usize,
tail: T,
}

impl<'a, T> BlockIter<T> where T: Iterator<Item=u32> {
fn from_blocks(mut blocks: T) -> BlockIter<T> {
let h = blocks.next().unwrap_or(0);
BlockIter {tail: blocks, head: h, head_offset: 0}
}
}

/// An iterator combining two `BitSet` iterators.
#[derive(Clone)]
struct TwoBitPositions<'a> {
set: &'a BitSet,
other: &'a BitSet,
set: Blocks<'a>,
other: Blocks<'a>,
merge: fn(u32, u32) -> u32,
current_word: u32,
next_idx: usize
}

/// An iterator for `BitSet`.
#[derive(Clone)]
#[stable(feature = "rust1", since = "1.0.0")]
pub struct SetIter<'a>(BlockIter<Blocks<'a>>);
#[derive(Clone)]
#[stable(feature = "rust1", since = "1.0.0")]
pub struct Union<'a>(TwoBitPositions<'a>);
pub struct Union<'a>(BlockIter<TwoBitPositions<'a>>);
#[derive(Clone)]
#[stable(feature = "rust1", since = "1.0.0")]
pub struct Intersection<'a>(Take<TwoBitPositions<'a>>);
pub struct Intersection<'a>(Take<BlockIter<TwoBitPositions<'a>>>);
#[derive(Clone)]
#[stable(feature = "rust1", since = "1.0.0")]
pub struct Difference<'a>(TwoBitPositions<'a>);
pub struct Difference<'a>(BlockIter<TwoBitPositions<'a>>);
#[derive(Clone)]
#[stable(feature = "rust1", since = "1.0.0")]
pub struct SymmetricDifference<'a>(TwoBitPositions<'a>);
pub struct SymmetricDifference<'a>(BlockIter<TwoBitPositions<'a>>);

#[stable(feature = "rust1", since = "1.0.0")]
impl<'a> Iterator for SetIter<'a> {
impl<'a, T> Iterator for BlockIter<T> where T: Iterator<Item=u32> {
type Item = usize;

fn next(&mut self) -> Option<usize> {
while self.next_idx < self.set.bit_vec.len() {
let idx = self.next_idx;
self.next_idx += 1;

if self.set.contains(&idx) {
return Some(idx);
while self.head == 0 {
match self.tail.next() {
Some(w) => self.head = w,
None => return None
}
self.head_offset += u32::BITS;
}

return None;
// from the current block, isolate the
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

It might be more efficient to use http://doc.rust-lang.org/nightly/std/primitive.u32.html#method.trailing_zeros to get this index. (At least, I'd hope that LLVM lowers that instruction to the most efficient thing for the given platform.)

Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@huonw Ahh, you're probably right in the general case because the particular opcode (BSF) is more widely supported, whereas POPCNT needs to be explicitly enabled..

It would also remove the additional bit-twiddling.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

In fact, I would assume that using the direct method (which actually calls an LLVM intrinsic) would always be at least as fast as this method: if it isn't, then it's an LLVM bug (since LLVM can implement the intrinsic via this method on platforms where this is the fastest).

Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I'm not sure why it would be an LLVM bug; llvm.cttz will generally result in a BSF instruction on most hardware. That's fine, except that BSF is comparatively very slow, several multiples slower than POPCNT (the instruction behind llvm.ctpop) on the same hardware.

However, BSF is more widely supported, and where POPCNT isn't available (or isn't enabled) then the resulting assembly is an additional ~12 instructions on 64-bit hardware (I don't know about 32-bit).

A good review of this algorithm (and the various ways of implementing it) can be found here. The fastest popcnt-based variant is consistently faster (~10-25%), although in the rust implementation the difference is less pronounced due to constant factors being more dominant.

As I said, std isn't normally compiled with the relevant cpu feature enabled for most of my points to be relevant, so it seems to make more sense to use your suggestion, which I've made here.

// LSB and subtract 1, producing k:
// a block with a number of set bits
// equal to the index of the LSB
let k = (self.head & (!self.head + 1)) - 1;
// update block, removing the LSB
self.head &= self.head - 1;
// return offset + (index of LSB)
Some(self.head_offset + (u32::count_ones(k) as usize))
}

#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
(0, Some(self.set.bit_vec.len() - self.next_idx))
match self.tail.size_hint() {
(_, Some(h)) => (0, Some(1 + h * (u32::BITS as usize))),
_ => (0, None)
}
}
}

#[stable(feature = "rust1", since = "1.0.0")]
impl<'a> Iterator for TwoBitPositions<'a> {
type Item = usize;

fn next(&mut self) -> Option<usize> {
while self.next_idx < self.set.bit_vec.len() ||
self.next_idx < self.other.bit_vec.len() {
let bit_idx = self.next_idx % u32::BITS;
if bit_idx == 0 {
let s_bit_vec = &self.set.bit_vec;
let o_bit_vec = &self.other.bit_vec;
// Merging the two words is a bit of an awkward dance since
// one BitVec might be longer than the other
let word_idx = self.next_idx / u32::BITS;
let w1 = if word_idx < s_bit_vec.storage.len() {
s_bit_vec.storage[word_idx]
} else { 0 };
let w2 = if word_idx < o_bit_vec.storage.len() {
o_bit_vec.storage[word_idx]
} else { 0 };
self.current_word = (self.merge)(w1, w2);
}

self.next_idx += 1;
if self.current_word & (1 << bit_idx) != 0 {
return Some(self.next_idx - 1);
}
type Item = u32;

fn next(&mut self) -> Option<u32> {
match (self.set.next(), self.other.next()) {
(Some(a), Some(b)) => Some((self.merge)(a, b)),
(Some(a), None) => Some((self.merge)(a, 0)),
(None, Some(b)) => Some((self.merge)(0, b)),
_ => return None
}
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

💖

return None;
}

#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
let cap = cmp::max(self.set.bit_vec.len(), self.other.bit_vec.len());
(0, Some(cap - self.next_idx))
let (a, au) = self.set.size_hint();
let (b, bu) = self.other.size_hint();

let upper = match (au, bu) {
(Some(au), Some(bu)) => Some(cmp::max(au, bu)),
_ => None
};

(cmp::max(a, b), upper)
}
}

#[stable(feature = "rust1", since = "1.0.0")]
impl<'a> Iterator for SetIter<'a> {
type Item = usize;

#[inline] fn next(&mut self) -> Option<usize> { self.0.next() }
#[inline] fn size_hint(&self) -> (usize, Option<usize>) { self.0.size_hint() }
}

#[stable(feature = "rust1", since = "1.0.0")]
impl<'a> Iterator for Union<'a> {
type Item = usize;
Expand Down