Skip to content

Commit 052fdf8

Browse files
committed
Only use the new node hashmap for anonymous nodes.
1 parent 563203f commit 052fdf8

File tree

2 files changed

+34
-72
lines changed
  • compiler

2 files changed

+34
-72
lines changed

compiler/rustc_codegen_ssa/src/base.rs

Lines changed: 0 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1056,12 +1056,6 @@ pub fn determine_cgu_reuse<'tcx>(tcx: TyCtxt<'tcx>, cgu: &CodegenUnit<'tcx>) ->
10561056
// know that later). If we are not doing LTO, there is only one optimized
10571057
// version of each module, so we re-use that.
10581058
let dep_node = cgu.codegen_dep_node(tcx);
1059-
assert!(
1060-
!tcx.dep_graph.dep_node_exists(&dep_node),
1061-
"CompileCodegenUnit dep-node for CGU `{}` already exists before marking.",
1062-
cgu.name()
1063-
);
1064-
10651059
if tcx.try_mark_green(&dep_node) {
10661060
// We can re-use either the pre- or the post-thinlto state. If no LTO is
10671061
// being performed then we can use post-LTO artifacts, otherwise we must

compiler/rustc_query_system/src/dep_graph/graph.rs

Lines changed: 34 additions & 66 deletions
Original file line numberDiff line numberDiff line change
@@ -345,18 +345,6 @@ impl<D: Deps> DepGraphData<D> {
345345
task: fn(Ctxt, A) -> R,
346346
hash_result: Option<fn(&mut StableHashingContext<'_>, &R) -> Fingerprint>,
347347
) -> (R, DepNodeIndex) {
348-
// If the following assertion triggers, it can have two reasons:
349-
// 1. Something is wrong with DepNode creation, either here or
350-
// in `DepGraph::try_mark_green()`.
351-
// 2. Two distinct query keys get mapped to the same `DepNode`
352-
// (see for example #48923).
353-
assert!(
354-
!self.dep_node_exists(&key),
355-
"forcing query with already existing `DepNode`\n\
356-
- query-key: {arg:?}\n\
357-
- dep-node: {key:?}"
358-
);
359-
360348
let with_deps = |task_deps| D::with_deps(task_deps, || task(cx, arg));
361349
let (result, edges) = if cx.dep_context().is_eval_always(key.kind) {
362350
(with_deps(TaskDepsRef::EvalAlways), EdgesVec::new())
@@ -443,7 +431,31 @@ impl<D: Deps> DepGraphData<D> {
443431
hash: self.current.anon_id_seed.combine(hasher.finish()).into(),
444432
};
445433

446-
self.current.intern_new_node(target_dep_node, task_deps, Fingerprint::ZERO)
434+
// The DepNodes generated by the process above are not unique. 2 queries could
435+
// have exactly the same dependencies. However, deserialization does not handle
436+
// duplicated nodes, so we do the deduplication here directly.
437+
//
438+
// As anonymous nodes are a small quantity compared to the full dep-graph, the
439+
// memory impact of this `anon_node_to_index` map remains tolerable, and helps
440+
// us avoid useless growth of the graph with almost-equivalent nodes.
441+
match self
442+
.current
443+
.anon_node_to_index
444+
.get_shard_by_value(&target_dep_node)
445+
.lock()
446+
.entry(target_dep_node)
447+
{
448+
Entry::Occupied(entry) => *entry.get(),
449+
Entry::Vacant(entry) => {
450+
let dep_node_index = self.current.intern_new_node(
451+
target_dep_node,
452+
task_deps,
453+
Fingerprint::ZERO,
454+
);
455+
entry.insert(dep_node_index);
456+
dep_node_index
457+
}
458+
}
447459
}
448460
};
449461

@@ -607,20 +619,6 @@ impl<D: Deps> DepGraph<D> {
607619
}
608620

609621
impl<D: Deps> DepGraphData<D> {
610-
#[inline]
611-
fn dep_node_index_of_opt(&self, dep_node: &DepNode) -> Option<DepNodeIndex> {
612-
if let Some(prev_index) = self.previous.node_to_index_opt(dep_node) {
613-
self.current.prev_index_to_index.lock()[prev_index]
614-
} else {
615-
self.current.new_node_to_index.lock_shard_by_value(dep_node).get(dep_node).copied()
616-
}
617-
}
618-
619-
#[inline]
620-
fn dep_node_exists(&self, dep_node: &DepNode) -> bool {
621-
self.dep_node_index_of_opt(dep_node).is_some()
622-
}
623-
624622
fn node_color(&self, dep_node: &DepNode) -> Option<DepNodeColor> {
625623
if let Some(prev_index) = self.previous.node_to_index_opt(dep_node) {
626624
self.colors.get(prev_index)
@@ -653,11 +651,6 @@ impl<D: Deps> DepGraphData<D> {
653651
}
654652

655653
impl<D: Deps> DepGraph<D> {
656-
#[inline]
657-
pub fn dep_node_exists(&self, dep_node: &DepNode) -> bool {
658-
self.data.as_ref().is_some_and(|data| data.dep_node_exists(dep_node))
659-
}
660-
661654
/// Checks whether a previous work product exists for `v` and, if
662655
/// so, return the path that leads to it. Used to skip doing work.
663656
pub fn previous_work_product(&self, v: &WorkProductId) -> Option<WorkProduct> {
@@ -838,10 +831,7 @@ impl<D: Deps> DepGraphData<D> {
838831
let frame = MarkFrame { index: prev_dep_node_index, parent: frame };
839832

840833
#[cfg(not(parallel_compiler))]
841-
{
842-
debug_assert!(!self.dep_node_exists(dep_node));
843-
debug_assert!(self.colors.get(prev_dep_node_index).is_none());
844-
}
834+
debug_assert!(self.colors.get(prev_dep_node_index).is_none());
845835

846836
// We never try to mark eval_always nodes as green
847837
debug_assert!(!qcx.dep_context().is_eval_always(dep_node.kind));
@@ -1038,24 +1028,24 @@ rustc_index::newtype_index! {
10381028
/// largest in the compiler.
10391029
///
10401030
/// For this reason, we avoid storing `DepNode`s more than once as map
1041-
/// keys. The `new_node_to_index` map only contains nodes not in the previous
1031+
/// keys. The `anon_node_to_index` map only contains nodes of anonymous queries not in the previous
10421032
/// graph, and we map nodes in the previous graph to indices via a two-step
10431033
/// mapping. `SerializedDepGraph` maps from `DepNode` to `SerializedDepNodeIndex`,
10441034
/// and the `prev_index_to_index` vector (which is more compact and faster than
10451035
/// using a map) maps from `SerializedDepNodeIndex` to `DepNodeIndex`.
10461036
///
1047-
/// This struct uses three locks internally. The `data`, `new_node_to_index`,
1037+
/// This struct uses three locks internally. The `data`, `anon_node_to_index`,
10481038
/// and `prev_index_to_index` fields are locked separately. Operations that take
10491039
/// a `DepNodeIndex` typically just access the `data` field.
10501040
///
10511041
/// We only need to manipulate at most two locks simultaneously:
1052-
/// `new_node_to_index` and `data`, or `prev_index_to_index` and `data`. When
1053-
/// manipulating both, we acquire `new_node_to_index` or `prev_index_to_index`
1042+
/// `anon_node_to_index` and `data`, or `prev_index_to_index` and `data`. When
1043+
/// manipulating both, we acquire `anon_node_to_index` or `prev_index_to_index`
10541044
/// first, and `data` second.
10551045
pub(super) struct CurrentDepGraph<D: Deps> {
10561046
encoder: GraphEncoder<D>,
1057-
new_node_to_index: Sharded<FxHashMap<DepNode, DepNodeIndex>>,
10581047
prev_index_to_index: Lock<IndexVec<SerializedDepNodeIndex, Option<DepNodeIndex>>>,
1048+
anon_node_to_index: Sharded<FxHashMap<DepNode, DepNodeIndex>>,
10591049

10601050
/// This is used to verify that fingerprints do not change between the creation of a node
10611051
/// and its recomputation.
@@ -1123,7 +1113,7 @@ impl<D: Deps> CurrentDepGraph<D> {
11231113
profiler,
11241114
previous,
11251115
),
1126-
new_node_to_index: Sharded::new(|| {
1116+
anon_node_to_index: Sharded::new(|| {
11271117
FxHashMap::with_capacity_and_hasher(
11281118
new_node_count_estimate / sharded::shards(),
11291119
Default::default(),
@@ -1158,14 +1148,7 @@ impl<D: Deps> CurrentDepGraph<D> {
11581148
edges: EdgesVec,
11591149
current_fingerprint: Fingerprint,
11601150
) -> DepNodeIndex {
1161-
let dep_node_index = match self.new_node_to_index.lock_shard_by_value(&key).entry(key) {
1162-
Entry::Occupied(entry) => *entry.get(),
1163-
Entry::Vacant(entry) => {
1164-
let dep_node_index = self.encoder.send(key, current_fingerprint, edges);
1165-
entry.insert(dep_node_index);
1166-
dep_node_index
1167-
}
1168-
};
1151+
let dep_node_index = self.encoder.send(key, current_fingerprint, edges);
11691152

11701153
#[cfg(debug_assertions)]
11711154
self.record_edge(dep_node_index, key, current_fingerprint);
@@ -1235,8 +1218,6 @@ impl<D: Deps> CurrentDepGraph<D> {
12351218
prev_graph: &SerializedDepGraph,
12361219
prev_index: SerializedDepNodeIndex,
12371220
) -> DepNodeIndex {
1238-
self.debug_assert_not_in_new_nodes(prev_graph, prev_index);
1239-
12401221
let mut prev_index_to_index = self.prev_index_to_index.lock();
12411222

12421223
match prev_index_to_index[prev_index] {
@@ -1254,19 +1235,6 @@ impl<D: Deps> CurrentDepGraph<D> {
12541235
}
12551236
}
12561237
}
1257-
1258-
#[inline]
1259-
fn debug_assert_not_in_new_nodes(
1260-
&self,
1261-
prev_graph: &SerializedDepGraph,
1262-
prev_index: SerializedDepNodeIndex,
1263-
) {
1264-
let node = &prev_graph.index_to_node(prev_index);
1265-
debug_assert!(
1266-
!self.new_node_to_index.lock_shard_by_value(node).contains_key(node),
1267-
"node from previous graph present in new node collection"
1268-
);
1269-
}
12701238
}
12711239

12721240
#[derive(Debug, Clone, Copy)]
@@ -1388,7 +1356,7 @@ fn panic_on_forbidden_read<D: Deps>(data: &DepGraphData<D>, dep_node_index: DepN
13881356

13891357
if dep_node.is_none() {
13901358
// Try to find it among the new nodes
1391-
for shard in data.current.new_node_to_index.lock_shards() {
1359+
for shard in data.current.anon_node_to_index.lock_shards() {
13921360
if let Some((node, _)) = shard.iter().find(|(_, index)| **index == dep_node_index) {
13931361
dep_node = Some(*node);
13941362
break;

0 commit comments

Comments
 (0)