Skip to content

Commit eda6780

Browse files
committed
copy_nonverlapping_memory for hashes, round_up_to_next
1 parent 407572c commit eda6780

File tree

1 file changed

+60
-15
lines changed

1 file changed

+60
-15
lines changed

src/libstd/collections/hash/table.rs

Lines changed: 60 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -450,9 +450,7 @@ impl<K, V> RawTable<K, V> {
450450
};
451451
}
452452

453-
let elem_size = size_of::<(K, V)>() + size_of::<u64>();
454-
let size = capacity.checked_mul(elem_size).expect("capacity overflow");
455-
let hashes = allocate(size, align::<K, V>());
453+
let hashes = allocate(checked_size_generic::<K, V>(capacity), align::<K, V>());
456454
if hashes.is_null() { ::alloc::oom() }
457455

458456
RawTable {
@@ -483,25 +481,39 @@ impl<K, V> RawTable<K, V> {
483481
}
484482

485483
pub fn grow_inplace(&mut self, capacity: uint) -> bool {
486-
if self.middle.0.is_null() || capacity < self.capacity {
484+
assert!(capacity.is_power_of_two());
485+
assert!(capacity >= self.capacity);
486+
487+
if self.middle.0.is_null() {
487488
return false;
488489
}
489490

490-
let size = self.capacity * (size_of::<u64>() + size_of::<(K, V)>());
491-
let new_size = (size_of::<u64>() + size_of::<(K, V)>()).checked_mul(capacity)
492-
.expect("capacity overflow");
491+
let new_size = checked_size_generic::<K, V>(capacity);
492+
493493
unsafe {
494494
let ptr = self.middle.0.offset(-(self.capacity as isize)) as *mut u8;
495495
let is_inplace = reallocate_inplace(ptr,
496-
size,
496+
size_generic::<K, V>(self.capacity),
497497
new_size,
498498
align::<K, V>()) >= new_size;
499499

500500
if is_inplace {
501-
let hashes = self.middle.0.offset((capacity - self.capacity) as isize) as *mut Option<SafeHash>;
502-
copy_memory(hashes, self.middle.0 as *const Option<SafeHash>, self.capacity);
501+
let cap_diff = (capacity - self.capacity) as isize;
502+
let hashes = self.middle.0.offset(cap_diff) as *mut Option<SafeHash>;
503+
// Copy the array of hashes. Maybe it's already in cache.
504+
if size_of::<(K, V)>() >= size_of::<Option<SafeHash>>() {
505+
// The regions of memory occupied by old and new hash arrays are disjoint.
506+
// before: [KVKVKVKV|h h h h ]
507+
// after: [KVKVKVKV|KVKVKVKV|h h h h h h h h ]
508+
copy_nonoverlapping_memory(hashes, self.middle.0 as *const _, self.capacity);
509+
} else {
510+
// before: [KVKVKVKV|h h |h h ]
511+
// after: [KVKVKVKV|KVKVKVKV|h h h h h h h h ]
512+
copy_memory(hashes, self.middle.0 as *const _, self.capacity);
513+
}
503514
zero_memory(hashes.offset(self.capacity as int), capacity - self.capacity);
504-
self.middle = Unique(self.middle.0.offset((capacity - self.capacity) as isize));
515+
516+
self.middle = Unique(self.middle.0.offset(cap_diff));
505517
self.capacity = capacity;
506518
}
507519

@@ -536,6 +548,42 @@ impl<K, V> RawTable<K, V> {
536548
}
537549
}
538550

551+
/// Rounds up to a multiple of a power of two. Returns the closest multiple
552+
/// of `target_alignment` that is higher or equal to `unrounded`.
553+
///
554+
/// # Panics
555+
///
556+
/// Panics if `target_alignment` is not a power of two.
557+
fn round_up_to_next(unrounded: uint, target_alignment: uint) -> uint {
558+
assert!(target_alignment.is_power_of_two());
559+
(unrounded + target_alignment - 1) & !(target_alignment - 1)
560+
}
561+
562+
#[test]
563+
fn test_rounding() {
564+
assert_eq!(round_up_to_next(0, 4), 0);
565+
assert_eq!(round_up_to_next(1, 4), 4);
566+
assert_eq!(round_up_to_next(2, 4), 4);
567+
assert_eq!(round_up_to_next(3, 4), 4);
568+
assert_eq!(round_up_to_next(4, 4), 4);
569+
assert_eq!(round_up_to_next(5, 4), 8);
570+
}
571+
572+
#[inline]
573+
fn size_generic<K, V>(capacity: usize) -> usize {
574+
let hash_align = min_align_of::<Option<SafeHash>>();
575+
round_up_to_next(size_of::<(K, V)>() * capacity, hash_align) + size_of::<SafeHash>() * capacity
576+
}
577+
578+
fn checked_size_generic<K, V>(capacity: usize) -> usize {
579+
let size = size_generic::<K, V>(capacity);
580+
let elem_size = size_of::<(K, V)>() + size_of::<SafeHash>();
581+
assert!(size >= capacity.checked_mul(elem_size).expect("capacity overflow"),
582+
"capacity overflow");
583+
size
584+
}
585+
586+
#[inline]
539587
fn align<K, V>() -> usize {
540588
cmp::max(mem::min_align_of::<(K, V)>(), mem::min_align_of::<u64>())
541589
}
@@ -579,7 +627,6 @@ impl<K, V, M: Clone> Clone for RawFullBuckets<K, V, M> {
579627
raw: self.raw,
580628
hashes_end: self.hashes_end,
581629
table: self.table.clone(),
582-
// marker: marker::ContravariantLifetime,
583630
}
584631
}
585632
}
@@ -693,11 +740,9 @@ impl<K, V> Drop for RawTable<K, V> {
693740
}
694741
}
695742

696-
let size = self.capacity * (size_of::<u64>() + size_of::<(K, V)>());
697-
698743
unsafe {
699744
let ptr = self.middle.0.offset(-(self.capacity as isize)) as *mut u8;
700-
deallocate(ptr, size, align::<K, V>());
745+
deallocate(ptr, size_generic::<K, V>(self.capacity), align::<K, V>());
701746
}
702747
}
703748
}

0 commit comments

Comments
 (0)