@@ -450,9 +450,7 @@ impl<K, V> RawTable<K, V> {
450
450
} ;
451
451
}
452
452
453
- let elem_size = size_of :: < ( K , V ) > ( ) + size_of :: < u64 > ( ) ;
454
- let size = capacity. checked_mul ( elem_size) . expect ( "capacity overflow" ) ;
455
- let hashes = allocate ( size, align :: < K , V > ( ) ) ;
453
+ let hashes = allocate ( checked_size_generic :: < K , V > ( capacity) , align :: < K , V > ( ) ) ;
456
454
if hashes. is_null ( ) { :: alloc:: oom ( ) }
457
455
458
456
RawTable {
@@ -483,25 +481,39 @@ impl<K, V> RawTable<K, V> {
483
481
}
484
482
485
483
pub fn grow_inplace ( & mut self , capacity : uint ) -> bool {
486
- if self . middle . 0 . is_null ( ) || capacity < self . capacity {
484
+ assert ! ( capacity. is_power_of_two( ) ) ;
485
+ assert ! ( capacity >= self . capacity) ;
486
+
487
+ if self . middle . 0 . is_null ( ) {
487
488
return false ;
488
489
}
489
490
490
- let size = self . capacity * ( size_of :: < u64 > ( ) + size_of :: < ( K , V ) > ( ) ) ;
491
- let new_size = ( size_of :: < u64 > ( ) + size_of :: < ( K , V ) > ( ) ) . checked_mul ( capacity)
492
- . expect ( "capacity overflow" ) ;
491
+ let new_size = checked_size_generic :: < K , V > ( capacity) ;
492
+
493
493
unsafe {
494
494
let ptr = self . middle . 0 . offset ( -( self . capacity as isize ) ) as * mut u8 ;
495
495
let is_inplace = reallocate_inplace ( ptr,
496
- size ,
496
+ size_generic :: < K , V > ( self . capacity ) ,
497
497
new_size,
498
498
align :: < K , V > ( ) ) >= new_size;
499
499
500
500
if is_inplace {
501
- let hashes = self . middle . 0 . offset ( ( capacity - self . capacity ) as isize ) as * mut Option < SafeHash > ;
502
- copy_memory ( hashes, self . middle . 0 as * const Option < SafeHash > , self . capacity ) ;
501
+ let cap_diff = ( capacity - self . capacity ) as isize ;
502
+ let hashes = self . middle . 0 . offset ( cap_diff) as * mut Option < SafeHash > ;
503
+ // Copy the array of hashes. Maybe it's already in cache.
504
+ if size_of :: < ( K , V ) > ( ) >= size_of :: < Option < SafeHash > > ( ) {
505
+ // The regions of memory occupied by old and new hash arrays are disjoint.
506
+ // before: [KVKVKVKV|h h h h ]
507
+ // after: [KVKVKVKV|KVKVKVKV|h h h h h h h h ]
508
+ copy_nonoverlapping_memory ( hashes, self . middle . 0 as * const _ , self . capacity ) ;
509
+ } else {
510
+ // before: [KVKVKVKV|h h |h h ]
511
+ // after: [KVKVKVKV|KVKVKVKV|h h h h h h h h ]
512
+ copy_memory ( hashes, self . middle . 0 as * const _ , self . capacity ) ;
513
+ }
503
514
zero_memory ( hashes. offset ( self . capacity as int ) , capacity - self . capacity ) ;
504
- self . middle = Unique ( self . middle . 0 . offset ( ( capacity - self . capacity ) as isize ) ) ;
515
+
516
+ self . middle = Unique ( self . middle . 0 . offset ( cap_diff) ) ;
505
517
self . capacity = capacity;
506
518
}
507
519
@@ -536,6 +548,42 @@ impl<K, V> RawTable<K, V> {
536
548
}
537
549
}
538
550
551
+ /// Rounds up to a multiple of a power of two. Returns the closest multiple
552
+ /// of `target_alignment` that is higher or equal to `unrounded`.
553
+ ///
554
+ /// # Panics
555
+ ///
556
+ /// Panics if `target_alignment` is not a power of two.
557
+ fn round_up_to_next ( unrounded : uint , target_alignment : uint ) -> uint {
558
+ assert ! ( target_alignment. is_power_of_two( ) ) ;
559
+ ( unrounded + target_alignment - 1 ) & !( target_alignment - 1 )
560
+ }
561
+
562
+ #[ test]
563
+ fn test_rounding ( ) {
564
+ assert_eq ! ( round_up_to_next( 0 , 4 ) , 0 ) ;
565
+ assert_eq ! ( round_up_to_next( 1 , 4 ) , 4 ) ;
566
+ assert_eq ! ( round_up_to_next( 2 , 4 ) , 4 ) ;
567
+ assert_eq ! ( round_up_to_next( 3 , 4 ) , 4 ) ;
568
+ assert_eq ! ( round_up_to_next( 4 , 4 ) , 4 ) ;
569
+ assert_eq ! ( round_up_to_next( 5 , 4 ) , 8 ) ;
570
+ }
571
+
572
+ #[ inline]
573
+ fn size_generic < K , V > ( capacity : usize ) -> usize {
574
+ let hash_align = min_align_of :: < Option < SafeHash > > ( ) ;
575
+ round_up_to_next ( size_of :: < ( K , V ) > ( ) * capacity, hash_align) + size_of :: < SafeHash > ( ) * capacity
576
+ }
577
+
578
+ fn checked_size_generic < K , V > ( capacity : usize ) -> usize {
579
+ let size = size_generic :: < K , V > ( capacity) ;
580
+ let elem_size = size_of :: < ( K , V ) > ( ) + size_of :: < SafeHash > ( ) ;
581
+ assert ! ( size >= capacity. checked_mul( elem_size) . expect( "capacity overflow" ) ,
582
+ "capacity overflow" ) ;
583
+ size
584
+ }
585
+
586
+ #[ inline]
539
587
fn align < K , V > ( ) -> usize {
540
588
cmp:: max ( mem:: min_align_of :: < ( K , V ) > ( ) , mem:: min_align_of :: < u64 > ( ) )
541
589
}
@@ -579,7 +627,6 @@ impl<K, V, M: Clone> Clone for RawFullBuckets<K, V, M> {
579
627
raw : self . raw ,
580
628
hashes_end : self . hashes_end ,
581
629
table : self . table . clone ( ) ,
582
- // marker: marker::ContravariantLifetime,
583
630
}
584
631
}
585
632
}
@@ -693,11 +740,9 @@ impl<K, V> Drop for RawTable<K, V> {
693
740
}
694
741
}
695
742
696
- let size = self . capacity * ( size_of :: < u64 > ( ) + size_of :: < ( K , V ) > ( ) ) ;
697
-
698
743
unsafe {
699
744
let ptr = self . middle . 0 . offset ( -( self . capacity as isize ) ) as * mut u8 ;
700
- deallocate ( ptr, size , align :: < K , V > ( ) ) ;
745
+ deallocate ( ptr, size_generic :: < K , V > ( self . capacity ) , align :: < K , V > ( ) ) ;
701
746
}
702
747
}
703
748
}
0 commit comments