@@ -345,18 +345,6 @@ impl<D: Deps> DepGraphData<D> {
345
345
task : fn ( Ctxt , A ) -> R ,
346
346
hash_result : Option < fn ( & mut StableHashingContext < ' _ > , & R ) -> Fingerprint > ,
347
347
) -> ( R , DepNodeIndex ) {
348
- // If the following assertion triggers, it can have two reasons:
349
- // 1. Something is wrong with DepNode creation, either here or
350
- // in `DepGraph::try_mark_green()`.
351
- // 2. Two distinct query keys get mapped to the same `DepNode`
352
- // (see for example #48923).
353
- assert ! (
354
- !self . dep_node_exists( & key) ,
355
- "forcing query with already existing `DepNode`\n \
356
- - query-key: {arg:?}\n \
357
- - dep-node: {key:?}"
358
- ) ;
359
-
360
348
let with_deps = |task_deps| D :: with_deps ( task_deps, || task ( cx, arg) ) ;
361
349
let ( result, edges) = if cx. dep_context ( ) . is_eval_always ( key. kind ) {
362
350
( with_deps ( TaskDepsRef :: EvalAlways ) , EdgesVec :: new ( ) )
@@ -443,7 +431,31 @@ impl<D: Deps> DepGraphData<D> {
443
431
hash : self . current . anon_id_seed . combine ( hasher. finish ( ) ) . into ( ) ,
444
432
} ;
445
433
446
- self . current . intern_new_node ( target_dep_node, task_deps, Fingerprint :: ZERO )
434
+ // The DepNodes generated by the process above are not unique. 2 queries could
435
+ // have exactly the same dependencies. However, deserialization does not handle
436
+ // duplicated nodes, so we do the deduplication here directly.
437
+ //
438
+ // As anonymous nodes are a small quantity compared to the full dep-graph, the
439
+ // memory impact of this `anon_node_to_index` map remains tolerable, and helps
440
+ // us avoid useless growth of the graph with almost-equivalent nodes.
441
+ match self
442
+ . current
443
+ . anon_node_to_index
444
+ . get_shard_by_value ( & target_dep_node)
445
+ . lock ( )
446
+ . entry ( target_dep_node)
447
+ {
448
+ Entry :: Occupied ( entry) => * entry. get ( ) ,
449
+ Entry :: Vacant ( entry) => {
450
+ let dep_node_index = self . current . intern_new_node (
451
+ target_dep_node,
452
+ task_deps,
453
+ Fingerprint :: ZERO ,
454
+ ) ;
455
+ entry. insert ( dep_node_index) ;
456
+ dep_node_index
457
+ }
458
+ }
447
459
}
448
460
} ;
449
461
@@ -607,20 +619,6 @@ impl<D: Deps> DepGraph<D> {
607
619
}
608
620
609
621
impl < D : Deps > DepGraphData < D > {
610
- #[ inline]
611
- fn dep_node_index_of_opt ( & self , dep_node : & DepNode ) -> Option < DepNodeIndex > {
612
- if let Some ( prev_index) = self . previous . node_to_index_opt ( dep_node) {
613
- self . current . prev_index_to_index . lock ( ) [ prev_index]
614
- } else {
615
- self . current . new_node_to_index . lock_shard_by_value ( dep_node) . get ( dep_node) . copied ( )
616
- }
617
- }
618
-
619
- #[ inline]
620
- fn dep_node_exists ( & self , dep_node : & DepNode ) -> bool {
621
- self . dep_node_index_of_opt ( dep_node) . is_some ( )
622
- }
623
-
624
622
fn node_color ( & self , dep_node : & DepNode ) -> Option < DepNodeColor > {
625
623
if let Some ( prev_index) = self . previous . node_to_index_opt ( dep_node) {
626
624
self . colors . get ( prev_index)
@@ -653,11 +651,6 @@ impl<D: Deps> DepGraphData<D> {
653
651
}
654
652
655
653
impl < D : Deps > DepGraph < D > {
656
- #[ inline]
657
- pub fn dep_node_exists ( & self , dep_node : & DepNode ) -> bool {
658
- self . data . as_ref ( ) . is_some_and ( |data| data. dep_node_exists ( dep_node) )
659
- }
660
-
661
654
/// Checks whether a previous work product exists for `v` and, if
662
655
/// so, return the path that leads to it. Used to skip doing work.
663
656
pub fn previous_work_product ( & self , v : & WorkProductId ) -> Option < WorkProduct > {
@@ -838,10 +831,7 @@ impl<D: Deps> DepGraphData<D> {
838
831
let frame = MarkFrame { index : prev_dep_node_index, parent : frame } ;
839
832
840
833
#[ cfg( not( parallel_compiler) ) ]
841
- {
842
- debug_assert ! ( !self . dep_node_exists( dep_node) ) ;
843
- debug_assert ! ( self . colors. get( prev_dep_node_index) . is_none( ) ) ;
844
- }
834
+ debug_assert ! ( self . colors. get( prev_dep_node_index) . is_none( ) ) ;
845
835
846
836
// We never try to mark eval_always nodes as green
847
837
debug_assert ! ( !qcx. dep_context( ) . is_eval_always( dep_node. kind) ) ;
@@ -1038,24 +1028,24 @@ rustc_index::newtype_index! {
1038
1028
/// largest in the compiler.
1039
1029
///
1040
1030
/// For this reason, we avoid storing `DepNode`s more than once as map
1041
- /// keys. The `new_node_to_index ` map only contains nodes not in the previous
1031
+ /// keys. The `anon_node_to_index ` map only contains nodes of anonymous queries not in the previous
1042
1032
/// graph, and we map nodes in the previous graph to indices via a two-step
1043
1033
/// mapping. `SerializedDepGraph` maps from `DepNode` to `SerializedDepNodeIndex`,
1044
1034
/// and the `prev_index_to_index` vector (which is more compact and faster than
1045
1035
/// using a map) maps from `SerializedDepNodeIndex` to `DepNodeIndex`.
1046
1036
///
1047
- /// This struct uses three locks internally. The `data`, `new_node_to_index `,
1037
+ /// This struct uses three locks internally. The `data`, `anon_node_to_index `,
1048
1038
/// and `prev_index_to_index` fields are locked separately. Operations that take
1049
1039
/// a `DepNodeIndex` typically just access the `data` field.
1050
1040
///
1051
1041
/// We only need to manipulate at most two locks simultaneously:
1052
- /// `new_node_to_index ` and `data`, or `prev_index_to_index` and `data`. When
1053
- /// manipulating both, we acquire `new_node_to_index ` or `prev_index_to_index`
1042
+ /// `anon_node_to_index ` and `data`, or `prev_index_to_index` and `data`. When
1043
+ /// manipulating both, we acquire `anon_node_to_index ` or `prev_index_to_index`
1054
1044
/// first, and `data` second.
1055
1045
pub ( super ) struct CurrentDepGraph < D : Deps > {
1056
1046
encoder : GraphEncoder < D > ,
1057
- new_node_to_index : Sharded < FxHashMap < DepNode , DepNodeIndex > > ,
1058
1047
prev_index_to_index : Lock < IndexVec < SerializedDepNodeIndex , Option < DepNodeIndex > > > ,
1048
+ anon_node_to_index : Sharded < FxHashMap < DepNode , DepNodeIndex > > ,
1059
1049
1060
1050
/// This is used to verify that fingerprints do not change between the creation of a node
1061
1051
/// and its recomputation.
@@ -1123,7 +1113,7 @@ impl<D: Deps> CurrentDepGraph<D> {
1123
1113
profiler,
1124
1114
previous,
1125
1115
) ,
1126
- new_node_to_index : Sharded :: new ( || {
1116
+ anon_node_to_index : Sharded :: new ( || {
1127
1117
FxHashMap :: with_capacity_and_hasher (
1128
1118
new_node_count_estimate / sharded:: shards ( ) ,
1129
1119
Default :: default ( ) ,
@@ -1158,14 +1148,7 @@ impl<D: Deps> CurrentDepGraph<D> {
1158
1148
edges : EdgesVec ,
1159
1149
current_fingerprint : Fingerprint ,
1160
1150
) -> DepNodeIndex {
1161
- let dep_node_index = match self . new_node_to_index . lock_shard_by_value ( & key) . entry ( key) {
1162
- Entry :: Occupied ( entry) => * entry. get ( ) ,
1163
- Entry :: Vacant ( entry) => {
1164
- let dep_node_index = self . encoder . send ( key, current_fingerprint, edges) ;
1165
- entry. insert ( dep_node_index) ;
1166
- dep_node_index
1167
- }
1168
- } ;
1151
+ let dep_node_index = self . encoder . send ( key, current_fingerprint, edges) ;
1169
1152
1170
1153
#[ cfg( debug_assertions) ]
1171
1154
self . record_edge ( dep_node_index, key, current_fingerprint) ;
@@ -1235,8 +1218,6 @@ impl<D: Deps> CurrentDepGraph<D> {
1235
1218
prev_graph : & SerializedDepGraph ,
1236
1219
prev_index : SerializedDepNodeIndex ,
1237
1220
) -> DepNodeIndex {
1238
- self . debug_assert_not_in_new_nodes ( prev_graph, prev_index) ;
1239
-
1240
1221
let mut prev_index_to_index = self . prev_index_to_index . lock ( ) ;
1241
1222
1242
1223
match prev_index_to_index[ prev_index] {
@@ -1254,19 +1235,6 @@ impl<D: Deps> CurrentDepGraph<D> {
1254
1235
}
1255
1236
}
1256
1237
}
1257
-
1258
- #[ inline]
1259
- fn debug_assert_not_in_new_nodes (
1260
- & self ,
1261
- prev_graph : & SerializedDepGraph ,
1262
- prev_index : SerializedDepNodeIndex ,
1263
- ) {
1264
- let node = & prev_graph. index_to_node ( prev_index) ;
1265
- debug_assert ! (
1266
- !self . new_node_to_index. lock_shard_by_value( node) . contains_key( node) ,
1267
- "node from previous graph present in new node collection"
1268
- ) ;
1269
- }
1270
1238
}
1271
1239
1272
1240
#[ derive( Debug , Clone , Copy ) ]
@@ -1388,7 +1356,7 @@ fn panic_on_forbidden_read<D: Deps>(data: &DepGraphData<D>, dep_node_index: DepN
1388
1356
1389
1357
if dep_node. is_none ( ) {
1390
1358
// Try to find it among the new nodes
1391
- for shard in data. current . new_node_to_index . lock_shards ( ) {
1359
+ for shard in data. current . anon_node_to_index . lock_shards ( ) {
1392
1360
if let Some ( ( node, _) ) = shard. iter ( ) . find ( |( _, index) | * * index == dep_node_index) {
1393
1361
dep_node = Some ( * node) ;
1394
1362
break ;
0 commit comments