Skip to content

Commit 68ebd49

Browse files
committed
refactored
1 parent d6ecdde commit 68ebd49

File tree

1 file changed

+74
-79
lines changed
  • src/libstd/collections/hash

1 file changed

+74
-79
lines changed

src/libstd/collections/hash/map.rs

Lines changed: 74 additions & 79 deletions
Original file line numberDiff line numberDiff line change
@@ -635,7 +635,9 @@ impl<K, V, S, H> HashMap<K, V, S>
635635

636636
if self.table.capacity() < min_cap {
637637
let new_capacity = max(min_cap.next_power_of_two(), INITIAL_CAPACITY);
638+
let old_size = self.table.size();
638639
self.resize(new_capacity);
640+
assert_eq!(self.table.size(), old_size);
639641
}
640642
}
641643

@@ -647,7 +649,6 @@ impl<K, V, S, H> HashMap<K, V, S>
647649
assert!(self.table.size() <= new_capacity);
648650
assert!(new_capacity.is_power_of_two() || new_capacity == 0);
649651

650-
let old_size = self.table.size();
651652
let old_capacity = self.table.capacity();
652653

653654
if self.table.capacity() == 0 || self.table.size() == 0 {
@@ -658,91 +659,85 @@ impl<K, V, S, H> HashMap<K, V, S>
658659
// Grow the table.
659660
let is_inplace = self.table.grow_inplace(new_capacity);
660661

661-
let mut old_table = if is_inplace {
662+
let mut destination = if is_inplace {
663+
// Resizing in-place.
662664
None
663665
} else {
664-
Some(replace(&mut self.table, RawTable::new(new_capacity)))
666+
// Borrow self.table in both branches to satisfy the checker.
667+
Some(RawTable::new(new_capacity))
665668
};
666669

667-
{
668-
let (source, mut destination) = if let Some(ref mut old_table) = old_table {
669-
// Borrow self.table in both branches to satisfy the checker.
670-
(old_table, Some(&mut self.table))
671-
} else {
672-
// Resizing in-place.
673-
(&mut self.table, None)
674-
};
670+
// Iterate over `old_capacity` buckets, which constitute half of
671+
// the table which was resized in-place, or the entire
672+
// `old_table`.
673+
let mut bucket = Bucket::at_index(&mut self.table, 0).unwrap().iter_to(old_capacity);
675674

676-
// Iterate over `old_capacity` buckets, which constitute half of
677-
// the table which was resized in-place, or the entire
678-
// `old_table`.
679-
let mut bucket = Bucket::at_index(source, 0).unwrap().iter_to(old_capacity);
680-
681-
// "So a few of the first shall be last: for many be called,
682-
// but few chosen."
683-
//
684-
// We'll most likely encounter a few buckets at the beginning that
685-
// have their initial buckets near the end of the table. They were
686-
// placed at the beginning as the probe wrapped around the table
687-
// during insertion. We must skip forward to a bucket that won't
688-
// get reinserted too early and won't unfairly steal others spot.
689-
// This eliminates the need for robin hood.
690-
loop {
691-
bucket = match bucket.peek() {
692-
Full(full) => {
693-
if full.displacement() == 0 {
694-
// This bucket occupies its ideal spot.
695-
// It indicates the start of another "cluster".
696-
bucket = full.into_bucket();
697-
break;
698-
}
699-
// Leaving this bucket in the last cluster for later.
700-
full.into_bucket()
701-
}
702-
Empty(b) => {
703-
// Encountered a hole between clusters.
704-
b.into_bucket()
675+
// "So a few of the first shall be last: for many be called,
676+
// but few chosen."
677+
//
678+
// We'll most likely encounter a few buckets at the beginning that
679+
// have their initial buckets near the end of the table. They were
680+
// placed at the beginning as the probe wrapped around the table
681+
// during insertion. We must skip forward to a bucket that won't
682+
// get reinserted too early and won't unfairly steal others spot.
683+
// This eliminates the need for robin hood.
684+
loop {
685+
bucket = match bucket.peek() {
686+
Full(full) => {
687+
if full.displacement() == 0 {
688+
// This bucket occupies its ideal spot.
689+
// It indicates the start of another "cluster".
690+
bucket = full.into_bucket();
691+
break;
705692
}
706-
};
707-
bucket.next();
708-
}
693+
// Leaving this bucket in the last cluster for later.
694+
full.into_bucket()
695+
}
696+
Empty(b) => {
697+
// Encountered a hole between clusters.
698+
b.into_bucket()
699+
}
700+
};
701+
bucket.next();
702+
}
709703

710-
// This is how the buckets might be laid out in memory:
711-
// ($ marks an initialized bucket)
712-
// ________________
713-
// |$$$_$$$$$$_$$$$$|
714-
//
715-
// But we've skipped the entire initial cluster of buckets
716-
// and will continue iteration in this order:
717-
// ________________
718-
// |$$$$$$_$$$$$
719-
// ^ wrap around once end is reached
720-
// ________________
721-
// $$$_____________|
722-
// ^ exit once table.size == 0
723-
let idx_end = bucket.index() + old_capacity;
724-
725-
while bucket.index() != idx_end {
726-
bucket = match bucket.peek() {
727-
Full(bucket) => {
728-
let h = *bucket.read().0;
729-
let (b, k, v) = bucket.take();
730-
731-
if let Some(ref mut dest) = destination {
732-
insert_hashed_ordered(&mut **dest, h, k, v);
733-
b.into_bucket()
734-
} else {
735-
// Resizing in-place.
736-
insert_hashed_ordered(b.into_bucket(), h, k, v)
737-
}
704+
// This is how the buckets might be laid out in memory:
705+
// ($ marks an initialized bucket)
706+
// ________________
707+
// |$$$_$$$$$$_$$$$$|
708+
//
709+
// But we've skipped the entire initial cluster of buckets
710+
// and will continue iteration in this order:
711+
// ________________
712+
// |$$$$$$_$$$$$
713+
// ^ wrap around once end is reached
714+
// ________________
715+
// $$$_____________|
716+
// ^ exit once table.size == 0
717+
let idx_end = bucket.index() + old_capacity;
718+
719+
while bucket.index() != idx_end {
720+
bucket = match bucket.peek() {
721+
Full(bucket) => {
722+
let h = *bucket.read().0;
723+
let (b, k, v) = bucket.take();
724+
725+
if let Some(ref mut dest) = destination {
726+
insert_hashed_ordered(dest, h, k, v);
727+
b.into_bucket()
728+
} else {
729+
// Resizing in-place.
730+
insert_hashed_ordered(b.into_bucket(), h, k, v)
738731
}
739-
Empty(b) => b.into_bucket()
740-
};
741-
bucket.next(); // wraps at old_capacity
742-
}
743-
};
732+
}
733+
Empty(b) => b.into_bucket()
734+
};
735+
bucket.next(); // wraps at old_capacity
736+
}
744737

745-
assert_eq!(self.table.size(), old_size);
738+
if let Some(dest) = destination {
739+
replace(bucket.into_table(), dest);
740+
}
746741
}
747742

748743
/// Shrinks the capacity of the map as much as possible. It will drop
@@ -1884,8 +1879,8 @@ mod test_map {
18841879
#[test]
18851880
fn test_empty_entry() {
18861881
let mut m: HashMap<int, bool> = HashMap::new();
1887-
assert!(m.entry(&0).is_err());
1888-
assert!(*m.entry(&0).get().unwrap_or_else(|e| e.insert(true)));
1882+
assert!(m.entry(0).is_err());
1883+
assert!(*m.entry(0).get().unwrap_or_else(|e| e.insert(true)));
18891884
assert_eq!(m.len(), 1);
18901885
}
18911886

0 commit comments

Comments
 (0)