@@ -275,20 +275,20 @@ impl MemoryCellClocks {
275
275
/// not used previously as atomic memory.
276
276
fn load_acquire (
277
277
& mut self ,
278
- clocks : & mut ThreadClockSet ,
278
+ thread_clocks : & mut ThreadClockSet ,
279
279
index : VectorIdx ,
280
280
) -> Result < ( ) , DataRace > {
281
- self . atomic_read_detect ( clocks , index) ?;
281
+ self . atomic_read_detect ( thread_clocks , index) ?;
282
282
if let Some ( atomic) = self . atomic ( ) {
283
- clocks . clock . join ( & atomic. sync_vector ) ;
283
+ thread_clocks . clock . join ( & atomic. sync_vector ) ;
284
284
}
285
285
Ok ( ( ) )
286
286
}
287
287
288
288
/// Checks if the memory cell access is ordered with all prior atomic reads and writes
289
- fn race_free_with_atomic ( & self , clocks : & ThreadClockSet ) -> bool {
289
+ fn race_free_with_atomic ( & self , thread_clocks : & ThreadClockSet ) -> bool {
290
290
if let Some ( atomic) = self . atomic ( ) {
291
- atomic. read_vector <= clocks . clock && atomic. write_vector <= clocks . clock
291
+ atomic. read_vector <= thread_clocks . clock && atomic. write_vector <= thread_clocks . clock
292
292
} else {
293
293
true
294
294
}
@@ -299,81 +299,97 @@ impl MemoryCellClocks {
299
299
/// not used previously as atomic memory.
300
300
fn load_relaxed (
301
301
& mut self ,
302
- clocks : & mut ThreadClockSet ,
302
+ thread_clocks : & mut ThreadClockSet ,
303
303
index : VectorIdx ,
304
304
) -> Result < ( ) , DataRace > {
305
- self . atomic_read_detect ( clocks , index) ?;
305
+ self . atomic_read_detect ( thread_clocks , index) ?;
306
306
if let Some ( atomic) = self . atomic ( ) {
307
- clocks . fence_acquire . join ( & atomic. sync_vector ) ;
307
+ thread_clocks . fence_acquire . join ( & atomic. sync_vector ) ;
308
308
}
309
309
Ok ( ( ) )
310
310
}
311
311
312
312
/// Update the memory cell data-race tracking for atomic
313
313
/// store release semantics.
314
- fn store_release ( & mut self , clocks : & ThreadClockSet , index : VectorIdx ) -> Result < ( ) , DataRace > {
315
- self . atomic_write_detect ( clocks, index) ?;
314
+ fn store_release (
315
+ & mut self ,
316
+ thread_clocks : & ThreadClockSet ,
317
+ index : VectorIdx ,
318
+ ) -> Result < ( ) , DataRace > {
319
+ self . atomic_write_detect ( thread_clocks, index) ?;
316
320
let atomic = self . atomic_mut ( ) ;
317
- atomic. sync_vector . clone_from ( & clocks . clock ) ;
321
+ atomic. sync_vector . clone_from ( & thread_clocks . clock ) ;
318
322
Ok ( ( ) )
319
323
}
320
324
321
325
/// Update the memory cell data-race tracking for atomic
322
326
/// store relaxed semantics.
323
- fn store_relaxed ( & mut self , clocks : & ThreadClockSet , index : VectorIdx ) -> Result < ( ) , DataRace > {
324
- self . atomic_write_detect ( clocks, index) ?;
327
+ fn store_relaxed (
328
+ & mut self ,
329
+ thread_clocks : & ThreadClockSet ,
330
+ index : VectorIdx ,
331
+ ) -> Result < ( ) , DataRace > {
332
+ self . atomic_write_detect ( thread_clocks, index) ?;
325
333
326
334
// The handling of release sequences was changed in C++20 and so
327
335
// the code here is different to the paper since now all relaxed
328
336
// stores block release sequences. The exception for same-thread
329
337
// relaxed stores has been removed.
330
338
let atomic = self . atomic_mut ( ) ;
331
- atomic. sync_vector . clone_from ( & clocks . fence_release ) ;
339
+ atomic. sync_vector . clone_from ( & thread_clocks . fence_release ) ;
332
340
Ok ( ( ) )
333
341
}
334
342
335
343
/// Update the memory cell data-race tracking for atomic
336
344
/// store release semantics for RMW operations.
337
- fn rmw_release ( & mut self , clocks : & ThreadClockSet , index : VectorIdx ) -> Result < ( ) , DataRace > {
338
- self . atomic_write_detect ( clocks, index) ?;
345
+ fn rmw_release (
346
+ & mut self ,
347
+ thread_clocks : & ThreadClockSet ,
348
+ index : VectorIdx ,
349
+ ) -> Result < ( ) , DataRace > {
350
+ self . atomic_write_detect ( thread_clocks, index) ?;
339
351
let atomic = self . atomic_mut ( ) ;
340
- atomic. sync_vector . join ( & clocks . clock ) ;
352
+ atomic. sync_vector . join ( & thread_clocks . clock ) ;
341
353
Ok ( ( ) )
342
354
}
343
355
344
356
/// Update the memory cell data-race tracking for atomic
345
357
/// store relaxed semantics for RMW operations.
346
- fn rmw_relaxed ( & mut self , clocks : & ThreadClockSet , index : VectorIdx ) -> Result < ( ) , DataRace > {
347
- self . atomic_write_detect ( clocks, index) ?;
358
+ fn rmw_relaxed (
359
+ & mut self ,
360
+ thread_clocks : & ThreadClockSet ,
361
+ index : VectorIdx ,
362
+ ) -> Result < ( ) , DataRace > {
363
+ self . atomic_write_detect ( thread_clocks, index) ?;
348
364
let atomic = self . atomic_mut ( ) ;
349
- atomic. sync_vector . join ( & clocks . fence_release ) ;
365
+ atomic. sync_vector . join ( & thread_clocks . fence_release ) ;
350
366
Ok ( ( ) )
351
367
}
352
368
353
369
/// Detect data-races with an atomic read, caused by a non-atomic write that does
354
370
/// not happen-before the atomic-read.
355
371
fn atomic_read_detect (
356
372
& mut self ,
357
- clocks : & ThreadClockSet ,
373
+ thread_clocks : & ThreadClockSet ,
358
374
index : VectorIdx ,
359
375
) -> Result < ( ) , DataRace > {
360
- log:: trace!( "Atomic read with vectors: {:#?} :: {:#?}" , self , clocks ) ;
376
+ log:: trace!( "Atomic read with vectors: {:#?} :: {:#?}" , self , thread_clocks ) ;
361
377
let atomic = self . atomic_mut ( ) ;
362
- atomic. read_vector . set_at_index ( & clocks . clock , index) ;
363
- if self . write <= clocks . clock [ self . write_index ] { Ok ( ( ) ) } else { Err ( DataRace ) }
378
+ atomic. read_vector . set_at_index ( & thread_clocks . clock , index) ;
379
+ if self . write <= thread_clocks . clock [ self . write_index ] { Ok ( ( ) ) } else { Err ( DataRace ) }
364
380
}
365
381
366
382
/// Detect data-races with an atomic write, either with a non-atomic read or with
367
383
/// a non-atomic write.
368
384
fn atomic_write_detect (
369
385
& mut self ,
370
- clocks : & ThreadClockSet ,
386
+ thread_clocks : & ThreadClockSet ,
371
387
index : VectorIdx ,
372
388
) -> Result < ( ) , DataRace > {
373
- log:: trace!( "Atomic write with vectors: {:#?} :: {:#?}" , self , clocks ) ;
389
+ log:: trace!( "Atomic write with vectors: {:#?} :: {:#?}" , self , thread_clocks ) ;
374
390
let atomic = self . atomic_mut ( ) ;
375
- atomic. write_vector . set_at_index ( & clocks . clock , index) ;
376
- if self . write <= clocks . clock [ self . write_index ] && self . read <= clocks . clock {
391
+ atomic. write_vector . set_at_index ( & thread_clocks . clock , index) ;
392
+ if self . write <= thread_clocks . clock [ self . write_index ] && self . read <= thread_clocks . clock {
377
393
Ok ( ( ) )
378
394
} else {
379
395
Err ( DataRace )
@@ -384,21 +400,21 @@ impl MemoryCellClocks {
384
400
/// returns true if a data-race is detected.
385
401
fn read_race_detect (
386
402
& mut self ,
387
- clocks : & mut ThreadClockSet ,
403
+ thread_clocks : & mut ThreadClockSet ,
388
404
index : VectorIdx ,
389
405
current_span : Span ,
390
406
) -> Result < ( ) , DataRace > {
391
- log:: trace!( "Unsynchronized read with vectors: {:#?} :: {:#?}" , self , clocks ) ;
407
+ log:: trace!( "Unsynchronized read with vectors: {:#?} :: {:#?}" , self , thread_clocks ) ;
392
408
if !current_span. is_dummy ( ) {
393
- clocks . clock [ index] . span = current_span;
409
+ thread_clocks . clock [ index] . span = current_span;
394
410
}
395
- if self . write <= clocks . clock [ self . write_index ] {
411
+ if self . write <= thread_clocks . clock [ self . write_index ] {
396
412
let race_free = if let Some ( atomic) = self . atomic ( ) {
397
- atomic. write_vector <= clocks . clock
413
+ atomic. write_vector <= thread_clocks . clock
398
414
} else {
399
415
true
400
416
} ;
401
- self . read . set_at_index ( & clocks . clock , index) ;
417
+ self . read . set_at_index ( & thread_clocks . clock , index) ;
402
418
if race_free { Ok ( ( ) ) } else { Err ( DataRace ) }
403
419
} else {
404
420
Err ( DataRace )
@@ -409,22 +425,23 @@ impl MemoryCellClocks {
409
425
/// returns true if a data-race is detected.
410
426
fn write_race_detect (
411
427
& mut self ,
412
- clocks : & mut ThreadClockSet ,
428
+ thread_clocks : & mut ThreadClockSet ,
413
429
index : VectorIdx ,
414
430
write_type : WriteType ,
415
431
current_span : Span ,
416
432
) -> Result < ( ) , DataRace > {
417
- log:: trace!( "Unsynchronized write with vectors: {:#?} :: {:#?}" , self , clocks ) ;
433
+ log:: trace!( "Unsynchronized write with vectors: {:#?} :: {:#?}" , self , thread_clocks ) ;
418
434
if !current_span. is_dummy ( ) {
419
- clocks . clock [ index] . span = current_span;
435
+ thread_clocks . clock [ index] . span = current_span;
420
436
}
421
- if self . write <= clocks . clock [ self . write_index ] && self . read <= clocks . clock {
437
+ if self . write <= thread_clocks . clock [ self . write_index ] && self . read <= thread_clocks . clock {
422
438
let race_free = if let Some ( atomic) = self . atomic ( ) {
423
- atomic. write_vector <= clocks. clock && atomic. read_vector <= clocks. clock
439
+ atomic. write_vector <= thread_clocks. clock
440
+ && atomic. read_vector <= thread_clocks. clock
424
441
} else {
425
442
true
426
443
} ;
427
- self . write = clocks . clock [ index] ;
444
+ self . write = thread_clocks . clock [ index] ;
428
445
self . write_index = index;
429
446
self . write_type = write_type;
430
447
if race_free {
@@ -764,24 +781,24 @@ impl VClockAlloc {
764
781
fn report_data_race < ' tcx > (
765
782
global : & GlobalState ,
766
783
thread_mgr : & ThreadManager < ' _ , ' _ > ,
767
- range : & MemoryCellClocks ,
784
+ mem_clocks : & MemoryCellClocks ,
768
785
action : & str ,
769
786
is_atomic : bool ,
770
787
ptr_dbg : Pointer < AllocId > ,
771
788
) -> InterpResult < ' tcx > {
772
789
let ( current_index, current_clocks) = global. current_thread_state ( thread_mgr) ;
773
790
let write_clock;
774
- let ( other_action, other_thread, other_clock) = if range . write
775
- > current_clocks. clock [ range . write_index ]
791
+ let ( other_action, other_thread, other_clock) = if mem_clocks . write
792
+ > current_clocks. clock [ mem_clocks . write_index ]
776
793
{
777
794
// Convert the write action into the vector clock it
778
795
// represents for diagnostic purposes.
779
- write_clock = VClock :: new_with_index ( range . write_index , range . write ) ;
780
- ( range . write_type . get_descriptor ( ) , range . write_index , & write_clock)
781
- } else if let Some ( idx) = Self :: find_gt_index ( & range . read , & current_clocks. clock ) {
782
- ( "Read" , idx, & range . read )
796
+ write_clock = VClock :: new_with_index ( mem_clocks . write_index , mem_clocks . write ) ;
797
+ ( mem_clocks . write_type . get_descriptor ( ) , mem_clocks . write_index , & write_clock)
798
+ } else if let Some ( idx) = Self :: find_gt_index ( & mem_clocks . read , & current_clocks. clock ) {
799
+ ( "Read" , idx, & mem_clocks . read )
783
800
} else if !is_atomic {
784
- if let Some ( atomic) = range . atomic ( ) {
801
+ if let Some ( atomic) = mem_clocks . atomic ( ) {
785
802
if let Some ( idx) = Self :: find_gt_index ( & atomic. write_vector , & current_clocks. clock )
786
803
{
787
804
( "Atomic Store" , idx, & atomic. write_vector )
@@ -832,10 +849,10 @@ impl VClockAlloc {
832
849
thread_mgr : & ThreadManager < ' _ , ' _ > ,
833
850
) -> bool {
834
851
if global. race_detecting ( ) {
835
- let ( _, clocks ) = global. current_thread_state ( thread_mgr) ;
852
+ let ( _, thread_clocks ) = global. current_thread_state ( thread_mgr) ;
836
853
let alloc_ranges = self . alloc_ranges . borrow ( ) ;
837
- for ( _, range ) in alloc_ranges. iter ( range. start , range. size ) {
838
- if !range . race_free_with_atomic ( & clocks ) {
854
+ for ( _, mem_clocks ) in alloc_ranges. iter ( range. start , range. size ) {
855
+ if !mem_clocks . race_free_with_atomic ( & thread_clocks ) {
839
856
return false ;
840
857
}
841
858
}
@@ -857,16 +874,18 @@ impl VClockAlloc {
857
874
let current_span = machine. current_span ( ) ;
858
875
let global = machine. data_race . as_ref ( ) . unwrap ( ) ;
859
876
if global. race_detecting ( ) {
860
- let ( index, mut clocks ) = global. current_thread_state_mut ( & machine. threads ) ;
877
+ let ( index, mut thread_clocks ) = global. current_thread_state_mut ( & machine. threads ) ;
861
878
let mut alloc_ranges = self . alloc_ranges . borrow_mut ( ) ;
862
- for ( offset, range) in alloc_ranges. iter_mut ( range. start , range. size ) {
863
- if let Err ( DataRace ) = range. read_race_detect ( & mut clocks, index, current_span) {
864
- drop ( clocks) ;
879
+ for ( offset, mem_clocks) in alloc_ranges. iter_mut ( range. start , range. size ) {
880
+ if let Err ( DataRace ) =
881
+ mem_clocks. read_race_detect ( & mut thread_clocks, index, current_span)
882
+ {
883
+ drop ( thread_clocks) ;
865
884
// Report data-race.
866
885
return Self :: report_data_race (
867
886
global,
868
887
& machine. threads ,
869
- range ,
888
+ mem_clocks ,
870
889
"Read" ,
871
890
false ,
872
891
Pointer :: new ( alloc_id, offset) ,
@@ -890,17 +909,22 @@ impl VClockAlloc {
890
909
let current_span = machine. current_span ( ) ;
891
910
let global = machine. data_race . as_mut ( ) . unwrap ( ) ;
892
911
if global. race_detecting ( ) {
893
- let ( index, mut clocks) = global. current_thread_state_mut ( & machine. threads ) ;
894
- for ( offset, range) in self . alloc_ranges . get_mut ( ) . iter_mut ( range. start , range. size ) {
895
- if let Err ( DataRace ) =
896
- range. write_race_detect ( & mut clocks, index, write_type, current_span)
897
- {
898
- drop ( clocks) ;
912
+ let ( index, mut thread_clocks) = global. current_thread_state_mut ( & machine. threads ) ;
913
+ for ( offset, mem_clocks) in
914
+ self . alloc_ranges . get_mut ( ) . iter_mut ( range. start , range. size )
915
+ {
916
+ if let Err ( DataRace ) = mem_clocks. write_race_detect (
917
+ & mut thread_clocks,
918
+ index,
919
+ write_type,
920
+ current_span,
921
+ ) {
922
+ drop ( thread_clocks) ;
899
923
// Report data-race
900
924
return Self :: report_data_race (
901
925
global,
902
926
& machine. threads ,
903
- range ,
927
+ mem_clocks ,
904
928
write_type. get_descriptor ( ) ,
905
929
false ,
906
930
Pointer :: new ( alloc_id, offset) ,
@@ -1125,16 +1149,17 @@ trait EvalContextPrivExt<'mir, 'tcx: 'mir>: MiriInterpCxExt<'mir, 'tcx> {
1125
1149
data_race. maybe_perform_sync_operation (
1126
1150
& this. machine . threads ,
1127
1151
current_span,
1128
- |index, mut clocks | {
1129
- for ( offset, range ) in
1152
+ |index, mut thread_clocks | {
1153
+ for ( offset, mem_clocks ) in
1130
1154
alloc_meta. alloc_ranges . borrow_mut ( ) . iter_mut ( base_offset, size)
1131
1155
{
1132
- if let Err ( DataRace ) = op ( range, & mut clocks, index, atomic) {
1133
- mem:: drop ( clocks) ;
1156
+ if let Err ( DataRace ) = op ( mem_clocks, & mut thread_clocks, index, atomic)
1157
+ {
1158
+ mem:: drop ( thread_clocks) ;
1134
1159
return VClockAlloc :: report_data_race (
1135
1160
data_race,
1136
1161
& this. machine . threads ,
1137
- range ,
1162
+ mem_clocks ,
1138
1163
description,
1139
1164
true ,
1140
1165
Pointer :: new ( alloc_id, offset) ,
@@ -1150,13 +1175,14 @@ trait EvalContextPrivExt<'mir, 'tcx: 'mir>: MiriInterpCxExt<'mir, 'tcx> {
1150
1175
1151
1176
// Log changes to atomic memory.
1152
1177
if log:: log_enabled!( log:: Level :: Trace ) {
1153
- for ( _offset, range) in alloc_meta. alloc_ranges . borrow ( ) . iter ( base_offset, size)
1178
+ for ( _offset, mem_clocks) in
1179
+ alloc_meta. alloc_ranges . borrow ( ) . iter ( base_offset, size)
1154
1180
{
1155
1181
log:: trace!(
1156
1182
"Updated atomic memory({:?}, size={}) to {:#?}" ,
1157
1183
place. ptr,
1158
1184
size. bytes( ) ,
1159
- range . atomic_ops
1185
+ mem_clocks . atomic_ops
1160
1186
) ;
1161
1187
}
1162
1188
}
0 commit comments