Skip to content

Commit fc04774

Browse files
committed
use Sharded in caches
1 parent fb6218d commit fc04774

File tree

3 files changed

+24
-95
lines changed

3 files changed

+24
-95
lines changed

compiler/rustc_data_structures/src/sharded.rs

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -9,15 +9,11 @@ use std::mem;
99
#[cfg_attr(parallel_compiler, repr(align(64)))]
1010
struct CacheAligned<T>(T);
1111

12-
#[cfg(parallel_compiler)]
1312
// 32 shards is sufficient to reduce contention on an 8-core Ryzen 7 1700,
1413
// but this should be tested on higher core count CPUs. How the `Sharded` type gets used
1514
// may also affect the ideal number of shards.
1615
const SHARD_BITS: usize = 5;
1716

18-
#[cfg(not(parallel_compiler))]
19-
const SHARD_BITS: usize = 0;
20-
2117
pub const SHARDS: usize = 1 << SHARD_BITS;
2218

2319
/// An array of cache-line aligned inner locked structures with convenience methods.

compiler/rustc_query_system/src/query/caches.rs

Lines changed: 10 additions & 47 deletions
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,6 @@ use crate::dep_graph::DepNodeIndex;
22

33
use rustc_data_structures::fx::FxHashMap;
44
use rustc_data_structures::sharded;
5-
#[cfg(parallel_compiler)]
65
use rustc_data_structures::sharded::Sharded;
76
use rustc_data_structures::sync::Lock;
87
use rustc_index::vec::{Idx, IndexVec};
@@ -37,10 +36,7 @@ impl<'tcx, K: Eq + Hash, V: 'tcx> CacheSelector<'tcx, V> for DefaultCacheSelecto
3736
}
3837

3938
pub struct DefaultCache<K, V> {
40-
#[cfg(parallel_compiler)]
4139
cache: Sharded<FxHashMap<K, (V, DepNodeIndex)>>,
42-
#[cfg(not(parallel_compiler))]
43-
cache: Lock<FxHashMap<K, (V, DepNodeIndex)>>,
4440
}
4541

4642
impl<K, V> Default for DefaultCache<K, V> {
@@ -60,40 +56,26 @@ where
6056
#[inline(always)]
6157
fn lookup(&self, key: &K) -> Option<(V, DepNodeIndex)> {
6258
let key_hash = sharded::make_hash(key);
63-
#[cfg(parallel_compiler)]
6459
let lock = self.cache.get_shard_by_hash(key_hash).lock();
65-
#[cfg(not(parallel_compiler))]
66-
let lock = self.cache.lock();
60+
6761
let result = lock.raw_entry().from_key_hashed_nocheck(key_hash, key);
6862

6963
if let Some((_, value)) = result { Some(*value) } else { None }
7064
}
7165

7266
#[inline]
7367
fn complete(&self, key: K, value: V, index: DepNodeIndex) {
74-
#[cfg(parallel_compiler)]
7568
let mut lock = self.cache.get_shard_by_value(&key).lock();
76-
#[cfg(not(parallel_compiler))]
77-
let mut lock = self.cache.lock();
69+
7870
// We may be overwriting another value. This is all right, since the dep-graph
7971
// will check that the fingerprint matches.
8072
lock.insert(key, (value, index));
8173
}
8274

8375
fn iter(&self, f: &mut dyn FnMut(&Self::Key, &Self::Value, DepNodeIndex)) {
84-
#[cfg(parallel_compiler)]
85-
{
86-
let shards = self.cache.lock_shards();
87-
for shard in shards.iter() {
88-
for (k, v) in shard.iter() {
89-
f(k, &v.0, v.1);
90-
}
91-
}
92-
}
93-
#[cfg(not(parallel_compiler))]
94-
{
95-
let map = self.cache.lock();
96-
for (k, v) in map.iter() {
76+
let shards = self.cache.lock_shards();
77+
for shard in shards.iter() {
78+
for (k, v) in shard.iter() {
9779
f(k, &v.0, v.1);
9880
}
9981
}
@@ -151,10 +133,7 @@ impl<'tcx, K: Idx, V: 'tcx> CacheSelector<'tcx, V> for VecCacheSelector<K> {
151133
}
152134

153135
pub struct VecCache<K: Idx, V> {
154-
#[cfg(parallel_compiler)]
155136
cache: Sharded<IndexVec<K, Option<(V, DepNodeIndex)>>>,
156-
#[cfg(not(parallel_compiler))]
157-
cache: Lock<IndexVec<K, Option<(V, DepNodeIndex)>>>,
158137
}
159138

160139
impl<K: Idx, V> Default for VecCache<K, V> {
@@ -173,38 +152,22 @@ where
173152

174153
#[inline(always)]
175154
fn lookup(&self, key: &K) -> Option<(V, DepNodeIndex)> {
176-
#[cfg(parallel_compiler)]
177155
let lock = self.cache.get_shard_by_hash(key.index() as u64).lock();
178-
#[cfg(not(parallel_compiler))]
179-
let lock = self.cache.lock();
156+
180157
if let Some(Some(value)) = lock.get(*key) { Some(*value) } else { None }
181158
}
182159

183160
#[inline]
184161
fn complete(&self, key: K, value: V, index: DepNodeIndex) {
185-
#[cfg(parallel_compiler)]
186162
let mut lock = self.cache.get_shard_by_hash(key.index() as u64).lock();
187-
#[cfg(not(parallel_compiler))]
188-
let mut lock = self.cache.lock();
163+
189164
lock.insert(key, (value, index));
190165
}
191166

192167
fn iter(&self, f: &mut dyn FnMut(&Self::Key, &Self::Value, DepNodeIndex)) {
193-
#[cfg(parallel_compiler)]
194-
{
195-
let shards = self.cache.lock_shards();
196-
for shard in shards.iter() {
197-
for (k, v) in shard.iter_enumerated() {
198-
if let Some(v) = v {
199-
f(&k, &v.0, v.1);
200-
}
201-
}
202-
}
203-
}
204-
#[cfg(not(parallel_compiler))]
205-
{
206-
let map = self.cache.lock();
207-
for (k, v) in map.iter_enumerated() {
168+
let shards = self.cache.lock_shards();
169+
for shard in shards.iter() {
170+
for (k, v) in shard.iter_enumerated() {
208171
if let Some(v) = v {
209172
f(&k, &v.0, v.1);
210173
}

compiler/rustc_query_system/src/query/plumbing.rs

Lines changed: 14 additions & 44 deletions
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,10 @@ use rustc_data_structures::stack::ensure_sufficient_stack;
1818
use rustc_data_structures::sync::Lock;
1919
#[cfg(parallel_compiler)]
2020
use rustc_data_structures::{cold_path, sharded::Sharded};
21-
use rustc_errors::{DiagnosticBuilder, ErrorGuaranteed, FatalError};
21+
use rustc_data_structures::profiling::TimingGuard;
22+
use rustc_data_structures::sharded::Sharded;
23+
use rustc_data_structures::stack::ensure_sufficient_stack;
24+
use rustc_data_structures::sync::{Lock, LockGuard};use rustc_errors::{DiagnosticBuilder, ErrorGuaranteed, FatalError};
2225
use rustc_span::{Span, DUMMY_SP};
2326
use std::cell::Cell;
2427
use std::collections::hash_map::Entry;
@@ -30,10 +33,7 @@ use thin_vec::ThinVec;
3033
use super::QueryConfig;
3134

3235
pub struct QueryState<K, D: DepKind> {
33-
#[cfg(parallel_compiler)]
3436
active: Sharded<FxHashMap<K, QueryResult<D>>>,
35-
#[cfg(not(parallel_compiler))]
36-
active: Lock<FxHashMap<K, QueryResult<D>>>,
3737
}
3838

3939
/// Indicates the state of a query for a given key in a query map.
@@ -52,15 +52,8 @@ where
5252
D: DepKind,
5353
{
5454
pub fn all_inactive(&self) -> bool {
55-
#[cfg(parallel_compiler)]
56-
{
57-
let shards = self.active.lock_shards();
58-
shards.iter().all(|shard| shard.is_empty())
59-
}
60-
#[cfg(not(parallel_compiler))]
61-
{
62-
self.active.lock().is_empty()
63-
}
55+
let shards = self.active.lock_shards();
56+
shards.iter().all(|shard| shard.is_empty())
6457
}
6558

6659
pub fn try_collect_active_jobs<Qcx: Copy>(
@@ -69,27 +62,11 @@ where
6962
make_query: fn(Qcx, K) -> QueryStackFrame<D>,
7063
jobs: &mut QueryMap<D>,
7164
) -> Option<()> {
72-
#[cfg(parallel_compiler)]
73-
{
74-
// We use try_lock_shards here since we are called from the
75-
// deadlock handler, and this shouldn't be locked.
76-
let shards = self.active.try_lock_shards()?;
77-
for shard in shards.iter() {
78-
for (k, v) in shard.iter() {
79-
if let QueryResult::Started(ref job) = *v {
80-
let query = make_query(qcx, *k);
81-
jobs.insert(job.id, QueryJobInfo { query, job: job.clone() });
82-
}
83-
}
84-
}
85-
}
86-
#[cfg(not(parallel_compiler))]
87-
{
88-
// We use try_lock here since we are called from the
89-
// deadlock handler, and this shouldn't be locked.
90-
// (FIXME: Is this relevant for non-parallel compilers? It doesn't
91-
// really hurt much.)
92-
for (k, v) in self.active.try_lock()?.iter() {
65+
// We use try_lock_shards here since we are called from the
66+
// deadlock handler, and this shouldn't be locked.
67+
let shards = self.active.try_lock_shards()?;
68+
for shard in shards.iter() {
69+
for (k, v) in shard.iter() {
9370
if let QueryResult::Started(ref job) = *v {
9471
let query = make_query(qcx, *k);
9572
jobs.insert(job.id, QueryJobInfo { query, job: job.clone() });
@@ -183,10 +160,8 @@ where
183160
cache.complete(key, result, dep_node_index);
184161

185162
let job = {
186-
#[cfg(parallel_compiler)]
187163
let mut lock = state.active.get_shard_by_value(&key).lock();
188-
#[cfg(not(parallel_compiler))]
189-
let mut lock = state.active.lock();
164+
190165
match lock.remove(&key).unwrap() {
191166
QueryResult::Started(job) => job,
192167
QueryResult::Poisoned => panic!(),
@@ -208,10 +183,8 @@ where
208183
// Poison the query so jobs waiting on it panic.
209184
let state = self.state;
210185
let job = {
211-
#[cfg(parallel_compiler)]
212186
let mut shard = state.active.get_shard_by_value(&self.key).lock();
213-
#[cfg(not(parallel_compiler))]
214-
let mut shard = state.active.lock();
187+
215188
let job = match shard.remove(&self.key).unwrap() {
216189
QueryResult::Started(job) => job,
217190
QueryResult::Poisoned => panic!(),
@@ -324,11 +297,8 @@ where
324297
Qcx: QueryContext,
325298
{
326299
let state = query.query_state(qcx);
327-
#[cfg(parallel_compiler)]
328300
let mut state_lock = state.active.get_shard_by_value(&key).lock();
329-
#[cfg(not(parallel_compiler))]
330-
let mut state_lock = state.active.lock();
331-
301+
let mut state_lock = state.active.get_shard_by_value(&key).lock();
332302
// For the parallel compiler we need to check both the query cache and query state structures
333303
// while holding the state lock to ensure that 1) the query has not yet completed and 2) the
334304
// query is not still executing. Without checking the query cache here, we can end up

0 commit comments

Comments
 (0)