rustc_query_system/dep_graph/
graph.rs

1use std::assert_matches::assert_matches;
2use std::collections::hash_map::Entry;
3use std::fmt::Debug;
4use std::hash::Hash;
5use std::marker::PhantomData;
6use std::sync::Arc;
7use std::sync::atomic::{AtomicU32, Ordering};
8
9use rustc_data_structures::fingerprint::Fingerprint;
10use rustc_data_structures::fx::{FxHashMap, FxHashSet};
11use rustc_data_structures::profiling::{QueryInvocationId, SelfProfilerRef};
12use rustc_data_structures::sharded::{self, Sharded};
13use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
14use rustc_data_structures::sync::{AtomicU64, Lock};
15use rustc_data_structures::unord::UnordMap;
16use rustc_index::IndexVec;
17use rustc_macros::{Decodable, Encodable};
18use rustc_serialize::opaque::{FileEncodeResult, FileEncoder};
19use tracing::{debug, instrument};
20#[cfg(debug_assertions)]
21use {super::debug::EdgeFilter, std::env};
22
23use super::query::DepGraphQuery;
24use super::serialized::{GraphEncoder, SerializedDepGraph, SerializedDepNodeIndex};
25use super::{DepContext, DepKind, DepNode, Deps, HasDepContext, WorkProductId};
26use crate::dep_graph::edges::EdgesVec;
27use crate::ich::StableHashingContext;
28use crate::query::{QueryContext, QuerySideEffects};
29
30#[derive(Clone)]
31pub struct DepGraph<D: Deps> {
32    data: Option<Arc<DepGraphData<D>>>,
33
34    /// This field is used for assigning DepNodeIndices when running in
35    /// non-incremental mode. Even in non-incremental mode we make sure that
36    /// each task has a `DepNodeIndex` that uniquely identifies it. This unique
37    /// ID is used for self-profiling.
38    virtual_dep_node_index: Arc<AtomicU32>,
39}
40
41rustc_index::newtype_index! {
42    pub struct DepNodeIndex {}
43}
44
45// We store a large collection of these in `prev_index_to_index` during
46// non-full incremental builds, and want to ensure that the element size
47// doesn't inadvertently increase.
48rustc_data_structures::static_assert_size!(Option<DepNodeIndex>, 4);
49
50impl DepNodeIndex {
51    const SINGLETON_DEPENDENCYLESS_ANON_NODE: DepNodeIndex = DepNodeIndex::ZERO;
52    pub const FOREVER_RED_NODE: DepNodeIndex = DepNodeIndex::from_u32(1);
53}
54
55impl From<DepNodeIndex> for QueryInvocationId {
56    #[inline(always)]
57    fn from(dep_node_index: DepNodeIndex) -> Self {
58        QueryInvocationId(dep_node_index.as_u32())
59    }
60}
61
62pub struct MarkFrame<'a> {
63    index: SerializedDepNodeIndex,
64    parent: Option<&'a MarkFrame<'a>>,
65}
66
67enum DepNodeColor {
68    Red,
69    Green(DepNodeIndex),
70}
71
72impl DepNodeColor {
73    #[inline]
74    fn is_green(self) -> bool {
75        match self {
76            DepNodeColor::Red => false,
77            DepNodeColor::Green(_) => true,
78        }
79    }
80}
81
82pub(crate) struct DepGraphData<D: Deps> {
83    /// The new encoding of the dependency graph, optimized for red/green
84    /// tracking. The `current` field is the dependency graph of only the
85    /// current compilation session: We don't merge the previous dep-graph into
86    /// current one anymore, but we do reference shared data to save space.
87    current: CurrentDepGraph<D>,
88
89    /// The dep-graph from the previous compilation session. It contains all
90    /// nodes and edges as well as all fingerprints of nodes that have them.
91    previous: Arc<SerializedDepGraph>,
92
93    colors: DepNodeColorMap,
94
95    processed_side_effects: Lock<FxHashSet<DepNodeIndex>>,
96
97    /// When we load, there may be `.o` files, cached MIR, or other such
98    /// things available to us. If we find that they are not dirty, we
99    /// load the path to the file storing those work-products here into
100    /// this map. We can later look for and extract that data.
101    previous_work_products: WorkProductMap,
102
103    dep_node_debug: Lock<FxHashMap<DepNode, String>>,
104
105    /// Used by incremental compilation tests to assert that
106    /// a particular query result was decoded from disk
107    /// (not just marked green)
108    debug_loaded_from_disk: Lock<FxHashSet<DepNode>>,
109}
110
111pub fn hash_result<R>(hcx: &mut StableHashingContext<'_>, result: &R) -> Fingerprint
112where
113    R: for<'a> HashStable<StableHashingContext<'a>>,
114{
115    let mut stable_hasher = StableHasher::new();
116    result.hash_stable(hcx, &mut stable_hasher);
117    stable_hasher.finish()
118}
119
120impl<D: Deps> DepGraph<D> {
121    pub fn new(
122        profiler: &SelfProfilerRef,
123        prev_graph: Arc<SerializedDepGraph>,
124        prev_work_products: WorkProductMap,
125        encoder: FileEncoder,
126        record_graph: bool,
127        record_stats: bool,
128    ) -> DepGraph<D> {
129        let prev_graph_node_count = prev_graph.node_count();
130
131        let current = CurrentDepGraph::new(
132            profiler,
133            prev_graph_node_count,
134            encoder,
135            record_graph,
136            record_stats,
137            Arc::clone(&prev_graph),
138        );
139
140        let colors = DepNodeColorMap::new(prev_graph_node_count);
141
142        // Instantiate a dependy-less node only once for anonymous queries.
143        let _green_node_index = current.intern_new_node(
144            DepNode { kind: D::DEP_KIND_NULL, hash: current.anon_id_seed.into() },
145            EdgesVec::new(),
146            Fingerprint::ZERO,
147        );
148        assert_eq!(_green_node_index, DepNodeIndex::SINGLETON_DEPENDENCYLESS_ANON_NODE);
149
150        // Instantiate a dependy-less red node only once for anonymous queries.
151        let (red_node_index, red_node_prev_index_and_color) = current.intern_node(
152            &prev_graph,
153            DepNode { kind: D::DEP_KIND_RED, hash: Fingerprint::ZERO.into() },
154            EdgesVec::new(),
155            None,
156        );
157        assert_eq!(red_node_index, DepNodeIndex::FOREVER_RED_NODE);
158        match red_node_prev_index_and_color {
159            None => {
160                // This is expected when we have no previous compilation session.
161                assert!(prev_graph_node_count == 0);
162            }
163            Some((prev_red_node_index, DepNodeColor::Red)) => {
164                assert_eq!(prev_red_node_index.as_usize(), red_node_index.as_usize());
165                colors.insert(prev_red_node_index, DepNodeColor::Red);
166            }
167            Some((_, DepNodeColor::Green(_))) => {
168                // There must be a logic error somewhere if we hit this branch.
169                panic!("DepNodeIndex::FOREVER_RED_NODE evaluated to DepNodeColor::Green")
170            }
171        }
172
173        DepGraph {
174            data: Some(Arc::new(DepGraphData {
175                previous_work_products: prev_work_products,
176                dep_node_debug: Default::default(),
177                current,
178                processed_side_effects: Default::default(),
179                previous: prev_graph,
180                colors,
181                debug_loaded_from_disk: Default::default(),
182            })),
183            virtual_dep_node_index: Arc::new(AtomicU32::new(0)),
184        }
185    }
186
187    pub fn new_disabled() -> DepGraph<D> {
188        DepGraph { data: None, virtual_dep_node_index: Arc::new(AtomicU32::new(0)) }
189    }
190
191    #[inline]
192    pub(crate) fn data(&self) -> Option<&DepGraphData<D>> {
193        self.data.as_deref()
194    }
195
196    /// Returns `true` if we are actually building the full dep-graph, and `false` otherwise.
197    #[inline]
198    pub fn is_fully_enabled(&self) -> bool {
199        self.data.is_some()
200    }
201
202    pub fn with_query(&self, f: impl Fn(&DepGraphQuery)) {
203        if let Some(data) = &self.data {
204            data.current.encoder.with_query(f)
205        }
206    }
207
208    pub fn assert_ignored(&self) {
209        if let Some(..) = self.data {
210            D::read_deps(|task_deps| {
211                assert_matches!(
212                    task_deps,
213                    TaskDepsRef::Ignore,
214                    "expected no task dependency tracking"
215                );
216            })
217        }
218    }
219
220    pub fn with_ignore<OP, R>(&self, op: OP) -> R
221    where
222        OP: FnOnce() -> R,
223    {
224        D::with_deps(TaskDepsRef::Ignore, op)
225    }
226
227    /// Used to wrap the deserialization of a query result from disk,
228    /// This method enforces that no new `DepNodes` are created during
229    /// query result deserialization.
230    ///
231    /// Enforcing this makes the query dep graph simpler - all nodes
232    /// must be created during the query execution, and should be
233    /// created from inside the 'body' of a query (the implementation
234    /// provided by a particular compiler crate).
235    ///
236    /// Consider the case of three queries `A`, `B`, and `C`, where
237    /// `A` invokes `B` and `B` invokes `C`:
238    ///
239    /// `A -> B -> C`
240    ///
241    /// Suppose that decoding the result of query `B` required re-computing
242    /// the query `C`. If we did not create a fresh `TaskDeps` when
243    /// decoding `B`, we would still be using the `TaskDeps` for query `A`
244    /// (if we needed to re-execute `A`). This would cause us to create
245    /// a new edge `A -> C`. If this edge did not previously
246    /// exist in the `DepGraph`, then we could end up with a different
247    /// `DepGraph` at the end of compilation, even if there were no
248    /// meaningful changes to the overall program (e.g. a newline was added).
249    /// In addition, this edge might cause a subsequent compilation run
250    /// to try to force `C` before marking other necessary nodes green. If
251    /// `C` did not exist in the new compilation session, then we could
252    /// get an ICE. Normally, we would have tried (and failed) to mark
253    /// some other query green (e.g. `item_children`) which was used
254    /// to obtain `C`, which would prevent us from ever trying to force
255    /// a nonexistent `D`.
256    ///
257    /// It might be possible to enforce that all `DepNode`s read during
258    /// deserialization already exist in the previous `DepGraph`. In
259    /// the above example, we would invoke `D` during the deserialization
260    /// of `B`. Since we correctly create a new `TaskDeps` from the decoding
261    /// of `B`, this would result in an edge `B -> D`. If that edge already
262    /// existed (with the same `DepPathHash`es), then it should be correct
263    /// to allow the invocation of the query to proceed during deserialization
264    /// of a query result. We would merely assert that the dep-graph fragment
265    /// that would have been added by invoking `C` while decoding `B`
266    /// is equivalent to the dep-graph fragment that we already instantiated for B
267    /// (at the point where we successfully marked B as green).
268    ///
269    /// However, this would require additional complexity
270    /// in the query infrastructure, and is not currently needed by the
271    /// decoding of any query results. Should the need arise in the future,
272    /// we should consider extending the query system with this functionality.
273    pub fn with_query_deserialization<OP, R>(&self, op: OP) -> R
274    where
275        OP: FnOnce() -> R,
276    {
277        D::with_deps(TaskDepsRef::Forbid, op)
278    }
279
280    #[inline(always)]
281    pub fn with_task<Ctxt: HasDepContext<Deps = D>, A: Debug, R>(
282        &self,
283        key: DepNode,
284        cx: Ctxt,
285        arg: A,
286        task: fn(Ctxt, A) -> R,
287        hash_result: Option<fn(&mut StableHashingContext<'_>, &R) -> Fingerprint>,
288    ) -> (R, DepNodeIndex) {
289        match self.data() {
290            Some(data) => data.with_task(key, cx, arg, task, hash_result),
291            None => (task(cx, arg), self.next_virtual_depnode_index()),
292        }
293    }
294
295    pub fn with_anon_task<Tcx: DepContext<Deps = D>, OP, R>(
296        &self,
297        cx: Tcx,
298        dep_kind: DepKind,
299        op: OP,
300    ) -> (R, DepNodeIndex)
301    where
302        OP: FnOnce() -> R,
303    {
304        match self.data() {
305            Some(data) => {
306                let (result, index) = data.with_anon_task_inner(cx, dep_kind, op);
307                self.read_index(index);
308                (result, index)
309            }
310            None => (op(), self.next_virtual_depnode_index()),
311        }
312    }
313}
314
315impl<D: Deps> DepGraphData<D> {
316    /// Starts a new dep-graph task. Dep-graph tasks are specified
317    /// using a free function (`task`) and **not** a closure -- this
318    /// is intentional because we want to exercise tight control over
319    /// what state they have access to. In particular, we want to
320    /// prevent implicit 'leaks' of tracked state into the task (which
321    /// could then be read without generating correct edges in the
322    /// dep-graph -- see the [rustc dev guide] for more details on
323    /// the dep-graph). To this end, the task function gets exactly two
324    /// pieces of state: the context `cx` and an argument `arg`. Both
325    /// of these bits of state must be of some type that implements
326    /// `DepGraphSafe` and hence does not leak.
327    ///
328    /// The choice of two arguments is not fundamental. One argument
329    /// would work just as well, since multiple values can be
330    /// collected using tuples. However, using two arguments works out
331    /// to be quite convenient, since it is common to need a context
332    /// (`cx`) and some argument (e.g., a `DefId` identifying what
333    /// item to process).
334    ///
335    /// For cases where you need some other number of arguments:
336    ///
337    /// - If you only need one argument, just use `()` for the `arg`
338    ///   parameter.
339    /// - If you need 3+ arguments, use a tuple for the
340    ///   `arg` parameter.
341    ///
342    /// [rustc dev guide]: https://rustc-dev-guide.rust-lang.org/queries/incremental-compilation.html
343    #[inline(always)]
344    pub(crate) fn with_task<Ctxt: HasDepContext<Deps = D>, A: Debug, R>(
345        &self,
346        key: DepNode,
347        cx: Ctxt,
348        arg: A,
349        task: fn(Ctxt, A) -> R,
350        hash_result: Option<fn(&mut StableHashingContext<'_>, &R) -> Fingerprint>,
351    ) -> (R, DepNodeIndex) {
352        // If the following assertion triggers, it can have two reasons:
353        // 1. Something is wrong with DepNode creation, either here or
354        //    in `DepGraph::try_mark_green()`.
355        // 2. Two distinct query keys get mapped to the same `DepNode`
356        //    (see for example #48923).
357        assert!(
358            !self.dep_node_exists(&key),
359            "forcing query with already existing `DepNode`\n\
360                 - query-key: {arg:?}\n\
361                 - dep-node: {key:?}"
362        );
363
364        let with_deps = |task_deps| D::with_deps(task_deps, || task(cx, arg));
365        let (result, edges) = if cx.dep_context().is_eval_always(key.kind) {
366            (with_deps(TaskDepsRef::EvalAlways), EdgesVec::new())
367        } else {
368            let task_deps = Lock::new(TaskDeps {
369                #[cfg(debug_assertions)]
370                node: Some(key),
371                reads: EdgesVec::new(),
372                read_set: Default::default(),
373                phantom_data: PhantomData,
374            });
375            (with_deps(TaskDepsRef::Allow(&task_deps)), task_deps.into_inner().reads)
376        };
377
378        let dcx = cx.dep_context();
379        let dep_node_index =
380            self.hash_result_and_intern_node(dcx, key, edges, &result, hash_result);
381
382        (result, dep_node_index)
383    }
384
385    /// Executes something within an "anonymous" task, that is, a task the
386    /// `DepNode` of which is determined by the list of inputs it read from.
387    ///
388    /// NOTE: this does not actually count as a read of the DepNode here.
389    /// Using the result of this task without reading the DepNode will result
390    /// in untracked dependencies which may lead to ICEs as nodes are
391    /// incorrectly marked green.
392    ///
393    /// FIXME: This could perhaps return a `WithDepNode` to ensure that the
394    /// user of this function actually performs the read; we'll have to see
395    /// how to make that work with `anon` in `execute_job_incr`, though.
396    pub(crate) fn with_anon_task_inner<Tcx: DepContext<Deps = D>, OP, R>(
397        &self,
398        cx: Tcx,
399        dep_kind: DepKind,
400        op: OP,
401    ) -> (R, DepNodeIndex)
402    where
403        OP: FnOnce() -> R,
404    {
405        debug_assert!(!cx.is_eval_always(dep_kind));
406
407        let task_deps = Lock::new(TaskDeps::default());
408        let result = D::with_deps(TaskDepsRef::Allow(&task_deps), op);
409        let task_deps = task_deps.into_inner();
410        let task_deps = task_deps.reads;
411
412        let dep_node_index = match task_deps.len() {
413            0 => {
414                // Because the dep-node id of anon nodes is computed from the sets of its
415                // dependencies we already know what the ID of this dependency-less node is
416                // going to be (i.e. equal to the precomputed
417                // `SINGLETON_DEPENDENCYLESS_ANON_NODE`). As a consequence we can skip creating
418                // a `StableHasher` and sending the node through interning.
419                DepNodeIndex::SINGLETON_DEPENDENCYLESS_ANON_NODE
420            }
421            1 => {
422                // When there is only one dependency, don't bother creating a node.
423                task_deps[0]
424            }
425            _ => {
426                // The dep node indices are hashed here instead of hashing the dep nodes of the
427                // dependencies. These indices may refer to different nodes per session, but this isn't
428                // a problem here because we that ensure the final dep node hash is per session only by
429                // combining it with the per session random number `anon_id_seed`. This hash only need
430                // to map the dependencies to a single value on a per session basis.
431                let mut hasher = StableHasher::new();
432                task_deps.hash(&mut hasher);
433
434                let target_dep_node = DepNode {
435                    kind: dep_kind,
436                    // Fingerprint::combine() is faster than sending Fingerprint
437                    // through the StableHasher (at least as long as StableHasher
438                    // is so slow).
439                    hash: self.current.anon_id_seed.combine(hasher.finish()).into(),
440                };
441
442                self.current.intern_new_node(target_dep_node, task_deps, Fingerprint::ZERO)
443            }
444        };
445
446        (result, dep_node_index)
447    }
448
449    /// Intern the new `DepNode` with the dependencies up-to-now.
450    fn hash_result_and_intern_node<Ctxt: DepContext<Deps = D>, R>(
451        &self,
452        cx: &Ctxt,
453        node: DepNode,
454        edges: EdgesVec,
455        result: &R,
456        hash_result: Option<fn(&mut StableHashingContext<'_>, &R) -> Fingerprint>,
457    ) -> DepNodeIndex {
458        let hashing_timer = cx.profiler().incr_result_hashing();
459        let current_fingerprint = hash_result.map(|hash_result| {
460            cx.with_stable_hashing_context(|mut hcx| hash_result(&mut hcx, result))
461        });
462
463        // Intern the new `DepNode` with the dependencies up-to-now.
464        let (dep_node_index, prev_and_color) =
465            self.current.intern_node(&self.previous, node, edges, current_fingerprint);
466
467        hashing_timer.finish_with_query_invocation_id(dep_node_index.into());
468
469        if let Some((prev_index, color)) = prev_and_color {
470            debug_assert!(
471                self.colors.get(prev_index).is_none(),
472                "DepGraph::with_task() - Duplicate DepNodeColor insertion for {node:?}",
473            );
474
475            self.colors.insert(prev_index, color);
476        }
477
478        dep_node_index
479    }
480}
481
482impl<D: Deps> DepGraph<D> {
483    #[inline]
484    pub fn read_index(&self, dep_node_index: DepNodeIndex) {
485        if let Some(ref data) = self.data {
486            D::read_deps(|task_deps| {
487                let mut task_deps = match task_deps {
488                    TaskDepsRef::Allow(deps) => deps.lock(),
489                    TaskDepsRef::EvalAlways => {
490                        // We don't need to record dependencies of eval_always
491                        // queries. They are re-evaluated unconditionally anyway.
492                        return;
493                    }
494                    TaskDepsRef::Ignore => return,
495                    TaskDepsRef::Forbid => {
496                        // Reading is forbidden in this context. ICE with a useful error message.
497                        panic_on_forbidden_read(data, dep_node_index)
498                    }
499                };
500                let task_deps = &mut *task_deps;
501
502                if cfg!(debug_assertions) {
503                    data.current.total_read_count.fetch_add(1, Ordering::Relaxed);
504                }
505
506                // As long as we only have a low number of reads we can avoid doing a hash
507                // insert and potentially allocating/reallocating the hashmap
508                let new_read = if task_deps.reads.len() < EdgesVec::INLINE_CAPACITY {
509                    task_deps.reads.iter().all(|other| *other != dep_node_index)
510                } else {
511                    task_deps.read_set.insert(dep_node_index)
512                };
513                if new_read {
514                    task_deps.reads.push(dep_node_index);
515                    if task_deps.reads.len() == EdgesVec::INLINE_CAPACITY {
516                        // Fill `read_set` with what we have so far so we can use the hashset
517                        // next time
518                        task_deps.read_set.extend(task_deps.reads.iter().copied());
519                    }
520
521                    #[cfg(debug_assertions)]
522                    {
523                        if let Some(target) = task_deps.node {
524                            if let Some(ref forbidden_edge) = data.current.forbidden_edge {
525                                let src = forbidden_edge.index_to_node.lock()[&dep_node_index];
526                                if forbidden_edge.test(&src, &target) {
527                                    panic!("forbidden edge {:?} -> {:?} created", src, target)
528                                }
529                            }
530                        }
531                    }
532                } else if cfg!(debug_assertions) {
533                    data.current.total_duplicate_read_count.fetch_add(1, Ordering::Relaxed);
534                }
535            })
536        }
537    }
538
539    /// Create a node when we force-feed a value into the query cache.
540    /// This is used to remove cycles during type-checking const generic parameters.
541    ///
542    /// As usual in the query system, we consider the current state of the calling query
543    /// only depends on the list of dependencies up to now. As a consequence, the value
544    /// that this query gives us can only depend on those dependencies too. Therefore,
545    /// it is sound to use the current dependency set for the created node.
546    ///
547    /// During replay, the order of the nodes is relevant in the dependency graph.
548    /// So the unchanged replay will mark the caller query before trying to mark this one.
549    /// If there is a change to report, the caller query will be re-executed before this one.
550    ///
551    /// FIXME: If the code is changed enough for this node to be marked before requiring the
552    /// caller's node, we suppose that those changes will be enough to mark this node red and
553    /// force a recomputation using the "normal" way.
554    pub fn with_feed_task<Ctxt: DepContext<Deps = D>, R: Debug>(
555        &self,
556        node: DepNode,
557        cx: Ctxt,
558        result: &R,
559        hash_result: Option<fn(&mut StableHashingContext<'_>, &R) -> Fingerprint>,
560    ) -> DepNodeIndex {
561        if let Some(data) = self.data.as_ref() {
562            // The caller query has more dependencies than the node we are creating. We may
563            // encounter a case where this created node is marked as green, but the caller query is
564            // subsequently marked as red or recomputed. In this case, we will end up feeding a
565            // value to an existing node.
566            //
567            // For sanity, we still check that the loaded stable hash and the new one match.
568            if let Some(prev_index) = data.previous.node_to_index_opt(&node) {
569                let dep_node_index = data.current.prev_index_to_index.lock()[prev_index];
570                if let Some(dep_node_index) = dep_node_index {
571                    crate::query::incremental_verify_ich(
572                        cx,
573                        data,
574                        result,
575                        prev_index,
576                        hash_result,
577                        |value| format!("{value:?}"),
578                    );
579
580                    #[cfg(debug_assertions)]
581                    if hash_result.is_some() {
582                        data.current.record_edge(
583                            dep_node_index,
584                            node,
585                            data.prev_fingerprint_of(prev_index),
586                        );
587                    }
588
589                    return dep_node_index;
590                }
591            }
592
593            let mut edges = EdgesVec::new();
594            D::read_deps(|task_deps| match task_deps {
595                TaskDepsRef::Allow(deps) => edges.extend(deps.lock().reads.iter().copied()),
596                TaskDepsRef::EvalAlways => {
597                    edges.push(DepNodeIndex::FOREVER_RED_NODE);
598                }
599                TaskDepsRef::Ignore => {}
600                TaskDepsRef::Forbid => {
601                    panic!("Cannot summarize when dependencies are not recorded.")
602                }
603            });
604
605            data.hash_result_and_intern_node(&cx, node, edges, result, hash_result)
606        } else {
607            // Incremental compilation is turned off. We just execute the task
608            // without tracking. We still provide a dep-node index that uniquely
609            // identifies the task so that we have a cheap way of referring to
610            // the query for self-profiling.
611            self.next_virtual_depnode_index()
612        }
613    }
614}
615
616impl<D: Deps> DepGraphData<D> {
617    #[inline]
618    fn dep_node_index_of_opt(&self, dep_node: &DepNode) -> Option<DepNodeIndex> {
619        if let Some(prev_index) = self.previous.node_to_index_opt(dep_node) {
620            self.current.prev_index_to_index.lock()[prev_index]
621        } else {
622            self.current.new_node_to_index.lock_shard_by_value(dep_node).get(dep_node).copied()
623        }
624    }
625
626    #[inline]
627    fn dep_node_exists(&self, dep_node: &DepNode) -> bool {
628        self.dep_node_index_of_opt(dep_node).is_some()
629    }
630
631    fn node_color(&self, dep_node: &DepNode) -> Option<DepNodeColor> {
632        if let Some(prev_index) = self.previous.node_to_index_opt(dep_node) {
633            self.colors.get(prev_index)
634        } else {
635            // This is a node that did not exist in the previous compilation session.
636            None
637        }
638    }
639
640    /// Returns true if the given node has been marked as green during the
641    /// current compilation session. Used in various assertions
642    #[inline]
643    pub(crate) fn is_index_green(&self, prev_index: SerializedDepNodeIndex) -> bool {
644        self.colors.get(prev_index).is_some_and(|c| c.is_green())
645    }
646
647    #[inline]
648    pub(crate) fn prev_fingerprint_of(&self, prev_index: SerializedDepNodeIndex) -> Fingerprint {
649        self.previous.fingerprint_by_index(prev_index)
650    }
651
652    #[inline]
653    pub(crate) fn prev_node_of(&self, prev_index: SerializedDepNodeIndex) -> DepNode {
654        self.previous.index_to_node(prev_index)
655    }
656
657    pub(crate) fn mark_debug_loaded_from_disk(&self, dep_node: DepNode) {
658        self.debug_loaded_from_disk.lock().insert(dep_node);
659    }
660}
661
662impl<D: Deps> DepGraph<D> {
663    #[inline]
664    pub fn dep_node_exists(&self, dep_node: &DepNode) -> bool {
665        self.data.as_ref().is_some_and(|data| data.dep_node_exists(dep_node))
666    }
667
668    /// Checks whether a previous work product exists for `v` and, if
669    /// so, return the path that leads to it. Used to skip doing work.
670    pub fn previous_work_product(&self, v: &WorkProductId) -> Option<WorkProduct> {
671        self.data.as_ref().and_then(|data| data.previous_work_products.get(v).cloned())
672    }
673
674    /// Access the map of work-products created during the cached run. Only
675    /// used during saving of the dep-graph.
676    pub fn previous_work_products(&self) -> &WorkProductMap {
677        &self.data.as_ref().unwrap().previous_work_products
678    }
679
680    pub fn debug_was_loaded_from_disk(&self, dep_node: DepNode) -> bool {
681        self.data.as_ref().unwrap().debug_loaded_from_disk.lock().contains(&dep_node)
682    }
683
684    #[cfg(debug_assertions)]
685    #[inline(always)]
686    pub(crate) fn register_dep_node_debug_str<F>(&self, dep_node: DepNode, debug_str_gen: F)
687    where
688        F: FnOnce() -> String,
689    {
690        let dep_node_debug = &self.data.as_ref().unwrap().dep_node_debug;
691
692        if dep_node_debug.borrow().contains_key(&dep_node) {
693            return;
694        }
695        let debug_str = self.with_ignore(debug_str_gen);
696        dep_node_debug.borrow_mut().insert(dep_node, debug_str);
697    }
698
699    pub fn dep_node_debug_str(&self, dep_node: DepNode) -> Option<String> {
700        self.data.as_ref()?.dep_node_debug.borrow().get(&dep_node).cloned()
701    }
702
703    fn node_color(&self, dep_node: &DepNode) -> Option<DepNodeColor> {
704        if let Some(ref data) = self.data {
705            return data.node_color(dep_node);
706        }
707
708        None
709    }
710
711    pub fn try_mark_green<Qcx: QueryContext<Deps = D>>(
712        &self,
713        qcx: Qcx,
714        dep_node: &DepNode,
715    ) -> Option<(SerializedDepNodeIndex, DepNodeIndex)> {
716        self.data().and_then(|data| data.try_mark_green(qcx, dep_node))
717    }
718}
719
720impl<D: Deps> DepGraphData<D> {
721    /// Try to mark a node index for the node dep_node.
722    ///
723    /// A node will have an index, when it's already been marked green, or when we can mark it
724    /// green. This function will mark the current task as a reader of the specified node, when
725    /// a node index can be found for that node.
726    pub(crate) fn try_mark_green<Qcx: QueryContext<Deps = D>>(
727        &self,
728        qcx: Qcx,
729        dep_node: &DepNode,
730    ) -> Option<(SerializedDepNodeIndex, DepNodeIndex)> {
731        debug_assert!(!qcx.dep_context().is_eval_always(dep_node.kind));
732
733        // Return None if the dep node didn't exist in the previous session
734        let prev_index = self.previous.node_to_index_opt(dep_node)?;
735
736        match self.colors.get(prev_index) {
737            Some(DepNodeColor::Green(dep_node_index)) => Some((prev_index, dep_node_index)),
738            Some(DepNodeColor::Red) => None,
739            None => {
740                // This DepNode and the corresponding query invocation existed
741                // in the previous compilation session too, so we can try to
742                // mark it as green by recursively marking all of its
743                // dependencies green.
744                self.try_mark_previous_green(qcx, prev_index, dep_node, None)
745                    .map(|dep_node_index| (prev_index, dep_node_index))
746            }
747        }
748    }
749
750    #[instrument(skip(self, qcx, parent_dep_node_index, frame), level = "debug")]
751    fn try_mark_parent_green<Qcx: QueryContext<Deps = D>>(
752        &self,
753        qcx: Qcx,
754        parent_dep_node_index: SerializedDepNodeIndex,
755        frame: Option<&MarkFrame<'_>>,
756    ) -> Option<()> {
757        let dep_dep_node_color = self.colors.get(parent_dep_node_index);
758        let dep_dep_node = &self.previous.index_to_node(parent_dep_node_index);
759
760        match dep_dep_node_color {
761            Some(DepNodeColor::Green(_)) => {
762                // This dependency has been marked as green before, we are
763                // still fine and can continue with checking the other
764                // dependencies.
765                debug!("dependency {dep_dep_node:?} was immediately green");
766                return Some(());
767            }
768            Some(DepNodeColor::Red) => {
769                // We found a dependency the value of which has changed
770                // compared to the previous compilation session. We cannot
771                // mark the DepNode as green and also don't need to bother
772                // with checking any of the other dependencies.
773                debug!("dependency {dep_dep_node:?} was immediately red");
774                return None;
775            }
776            None => {}
777        }
778
779        // We don't know the state of this dependency. If it isn't
780        // an eval_always node, let's try to mark it green recursively.
781        if !qcx.dep_context().is_eval_always(dep_dep_node.kind) {
782            debug!(
783                "state of dependency {:?} ({}) is unknown, trying to mark it green",
784                dep_dep_node, dep_dep_node.hash,
785            );
786
787            let node_index =
788                self.try_mark_previous_green(qcx, parent_dep_node_index, dep_dep_node, frame);
789
790            if node_index.is_some() {
791                debug!("managed to MARK dependency {dep_dep_node:?} as green",);
792                return Some(());
793            }
794        }
795
796        // We failed to mark it green, so we try to force the query.
797        debug!("trying to force dependency {dep_dep_node:?}");
798        if !qcx.dep_context().try_force_from_dep_node(*dep_dep_node, frame) {
799            // The DepNode could not be forced.
800            debug!("dependency {dep_dep_node:?} could not be forced");
801            return None;
802        }
803
804        let dep_dep_node_color = self.colors.get(parent_dep_node_index);
805
806        match dep_dep_node_color {
807            Some(DepNodeColor::Green(_)) => {
808                debug!("managed to FORCE dependency {dep_dep_node:?} to green");
809                return Some(());
810            }
811            Some(DepNodeColor::Red) => {
812                debug!("dependency {dep_dep_node:?} was red after forcing",);
813                return None;
814            }
815            None => {}
816        }
817
818        if let None = qcx.dep_context().sess().dcx().has_errors_or_delayed_bugs() {
819            panic!("try_mark_previous_green() - Forcing the DepNode should have set its color")
820        }
821
822        // If the query we just forced has resulted in
823        // some kind of compilation error, we cannot rely on
824        // the dep-node color having been properly updated.
825        // This means that the query system has reached an
826        // invalid state. We let the compiler continue (by
827        // returning `None`) so it can emit error messages
828        // and wind down, but rely on the fact that this
829        // invalid state will not be persisted to the
830        // incremental compilation cache because of
831        // compilation errors being present.
832        debug!("dependency {dep_dep_node:?} resulted in compilation error",);
833        return None;
834    }
835
836    /// Try to mark a dep-node which existed in the previous compilation session as green.
837    #[instrument(skip(self, qcx, prev_dep_node_index, frame), level = "debug")]
838    fn try_mark_previous_green<Qcx: QueryContext<Deps = D>>(
839        &self,
840        qcx: Qcx,
841        prev_dep_node_index: SerializedDepNodeIndex,
842        dep_node: &DepNode,
843        frame: Option<&MarkFrame<'_>>,
844    ) -> Option<DepNodeIndex> {
845        let frame = MarkFrame { index: prev_dep_node_index, parent: frame };
846
847        // We never try to mark eval_always nodes as green
848        debug_assert!(!qcx.dep_context().is_eval_always(dep_node.kind));
849
850        debug_assert_eq!(self.previous.index_to_node(prev_dep_node_index), *dep_node);
851
852        let prev_deps = self.previous.edge_targets_from(prev_dep_node_index);
853
854        for dep_dep_node_index in prev_deps {
855            self.try_mark_parent_green(qcx, dep_dep_node_index, Some(&frame))?;
856        }
857
858        // If we got here without hitting a `return` that means that all
859        // dependencies of this DepNode could be marked as green. Therefore we
860        // can also mark this DepNode as green.
861
862        // There may be multiple threads trying to mark the same dep node green concurrently
863
864        // We allocating an entry for the node in the current dependency graph and
865        // adding all the appropriate edges imported from the previous graph
866        let dep_node_index =
867            self.current.promote_node_and_deps_to_current(&self.previous, prev_dep_node_index);
868
869        // ... emitting any stored diagnostic ...
870
871        // FIXME: Store the fact that a node has diagnostics in a bit in the dep graph somewhere
872        // Maybe store a list on disk and encode this fact in the DepNodeState
873        let side_effects = qcx.load_side_effects(prev_dep_node_index);
874
875        if side_effects.maybe_any() {
876            qcx.dep_context().dep_graph().with_query_deserialization(|| {
877                self.emit_side_effects(qcx, dep_node_index, side_effects)
878            });
879        }
880
881        // ... and finally storing a "Green" entry in the color map.
882        // Multiple threads can all write the same color here
883        self.colors.insert(prev_dep_node_index, DepNodeColor::Green(dep_node_index));
884
885        debug!("successfully marked {dep_node:?} as green");
886        Some(dep_node_index)
887    }
888
889    /// Atomically emits some loaded diagnostics.
890    /// This may be called concurrently on multiple threads for the same dep node.
891    #[cold]
892    #[inline(never)]
893    fn emit_side_effects<Qcx: QueryContext<Deps = D>>(
894        &self,
895        qcx: Qcx,
896        dep_node_index: DepNodeIndex,
897        side_effects: QuerySideEffects,
898    ) {
899        let mut processed = self.processed_side_effects.lock();
900
901        if processed.insert(dep_node_index) {
902            // We were the first to insert the node in the set so this thread
903            // must process side effects
904
905            // Promote the previous diagnostics to the current session.
906            qcx.store_side_effects(dep_node_index, side_effects.clone());
907
908            let dcx = qcx.dep_context().sess().dcx();
909
910            for diagnostic in side_effects.diagnostics {
911                dcx.emit_diagnostic(diagnostic);
912            }
913        }
914    }
915}
916
917impl<D: Deps> DepGraph<D> {
918    /// Returns true if the given node has been marked as red during the
919    /// current compilation session. Used in various assertions
920    pub fn is_red(&self, dep_node: &DepNode) -> bool {
921        matches!(self.node_color(dep_node), Some(DepNodeColor::Red))
922    }
923
924    /// Returns true if the given node has been marked as green during the
925    /// current compilation session. Used in various assertions
926    pub fn is_green(&self, dep_node: &DepNode) -> bool {
927        self.node_color(dep_node).is_some_and(|c| c.is_green())
928    }
929
930    /// This method loads all on-disk cacheable query results into memory, so
931    /// they can be written out to the new cache file again. Most query results
932    /// will already be in memory but in the case where we marked something as
933    /// green but then did not need the value, that value will never have been
934    /// loaded from disk.
935    ///
936    /// This method will only load queries that will end up in the disk cache.
937    /// Other queries will not be executed.
938    pub fn exec_cache_promotions<Tcx: DepContext>(&self, tcx: Tcx) {
939        let _prof_timer = tcx.profiler().generic_activity("incr_comp_query_cache_promotion");
940
941        let data = self.data.as_ref().unwrap();
942        for prev_index in data.colors.values.indices() {
943            match data.colors.get(prev_index) {
944                Some(DepNodeColor::Green(_)) => {
945                    let dep_node = data.previous.index_to_node(prev_index);
946                    tcx.try_load_from_on_disk_cache(dep_node);
947                }
948                None | Some(DepNodeColor::Red) => {
949                    // We can skip red nodes because a node can only be marked
950                    // as red if the query result was recomputed and thus is
951                    // already in memory.
952                }
953            }
954        }
955    }
956
957    pub fn print_incremental_info(&self) {
958        if let Some(data) = &self.data {
959            data.current.encoder.print_incremental_info(
960                data.current.total_read_count.load(Ordering::Relaxed),
961                data.current.total_duplicate_read_count.load(Ordering::Relaxed),
962            )
963        }
964    }
965
966    pub fn finish_encoding(&self) -> FileEncodeResult {
967        if let Some(data) = &self.data { data.current.encoder.finish() } else { Ok(0) }
968    }
969
970    pub(crate) fn next_virtual_depnode_index(&self) -> DepNodeIndex {
971        debug_assert!(self.data.is_none());
972        let index = self.virtual_dep_node_index.fetch_add(1, Ordering::Relaxed);
973        DepNodeIndex::from_u32(index)
974    }
975}
976
977/// A "work product" is an intermediate result that we save into the
978/// incremental directory for later re-use. The primary example are
979/// the object files that we save for each partition at code
980/// generation time.
981///
982/// Each work product is associated with a dep-node, representing the
983/// process that produced the work-product. If that dep-node is found
984/// to be dirty when we load up, then we will delete the work-product
985/// at load time. If the work-product is found to be clean, then we
986/// will keep a record in the `previous_work_products` list.
987///
988/// In addition, work products have an associated hash. This hash is
989/// an extra hash that can be used to decide if the work-product from
990/// a previous compilation can be re-used (in addition to the dirty
991/// edges check).
992///
993/// As the primary example, consider the object files we generate for
994/// each partition. In the first run, we create partitions based on
995/// the symbols that need to be compiled. For each partition P, we
996/// hash the symbols in P and create a `WorkProduct` record associated
997/// with `DepNode::CodegenUnit(P)`; the hash is the set of symbols
998/// in P.
999///
1000/// The next time we compile, if the `DepNode::CodegenUnit(P)` is
1001/// judged to be clean (which means none of the things we read to
1002/// generate the partition were found to be dirty), it will be loaded
1003/// into previous work products. We will then regenerate the set of
1004/// symbols in the partition P and hash them (note that new symbols
1005/// may be added -- for example, new monomorphizations -- even if
1006/// nothing in P changed!). We will compare that hash against the
1007/// previous hash. If it matches up, we can reuse the object file.
1008#[derive(Clone, Debug, Encodable, Decodable)]
1009pub struct WorkProduct {
1010    pub cgu_name: String,
1011    /// Saved files associated with this CGU. In each key/value pair, the value is the path to the
1012    /// saved file and the key is some identifier for the type of file being saved.
1013    ///
1014    /// By convention, file extensions are currently used as identifiers, i.e. the key "o" maps to
1015    /// the object file's path, and "dwo" to the dwarf object file's path.
1016    pub saved_files: UnordMap<String, String>,
1017}
1018
1019pub type WorkProductMap = UnordMap<WorkProductId, WorkProduct>;
1020
1021// Index type for `DepNodeData`'s edges.
1022rustc_index::newtype_index! {
1023    struct EdgeIndex {}
1024}
1025
1026/// `CurrentDepGraph` stores the dependency graph for the current session. It
1027/// will be populated as we run queries or tasks. We never remove nodes from the
1028/// graph: they are only added.
1029///
1030/// The nodes in it are identified by a `DepNodeIndex`. We avoid keeping the nodes
1031/// in memory. This is important, because these graph structures are some of the
1032/// largest in the compiler.
1033///
1034/// For this reason, we avoid storing `DepNode`s more than once as map
1035/// keys. The `new_node_to_index` map only contains nodes not in the previous
1036/// graph, and we map nodes in the previous graph to indices via a two-step
1037/// mapping. `SerializedDepGraph` maps from `DepNode` to `SerializedDepNodeIndex`,
1038/// and the `prev_index_to_index` vector (which is more compact and faster than
1039/// using a map) maps from `SerializedDepNodeIndex` to `DepNodeIndex`.
1040///
1041/// This struct uses three locks internally. The `data`, `new_node_to_index`,
1042/// and `prev_index_to_index` fields are locked separately. Operations that take
1043/// a `DepNodeIndex` typically just access the `data` field.
1044///
1045/// We only need to manipulate at most two locks simultaneously:
1046/// `new_node_to_index` and `data`, or `prev_index_to_index` and `data`. When
1047/// manipulating both, we acquire `new_node_to_index` or `prev_index_to_index`
1048/// first, and `data` second.
1049pub(super) struct CurrentDepGraph<D: Deps> {
1050    encoder: GraphEncoder<D>,
1051    new_node_to_index: Sharded<FxHashMap<DepNode, DepNodeIndex>>,
1052    prev_index_to_index: Lock<IndexVec<SerializedDepNodeIndex, Option<DepNodeIndex>>>,
1053
1054    /// This is used to verify that fingerprints do not change between the creation of a node
1055    /// and its recomputation.
1056    #[cfg(debug_assertions)]
1057    fingerprints: Lock<IndexVec<DepNodeIndex, Option<Fingerprint>>>,
1058
1059    /// Used to trap when a specific edge is added to the graph.
1060    /// This is used for debug purposes and is only active with `debug_assertions`.
1061    #[cfg(debug_assertions)]
1062    forbidden_edge: Option<EdgeFilter>,
1063
1064    /// Anonymous `DepNode`s are nodes whose IDs we compute from the list of
1065    /// their edges. This has the beneficial side-effect that multiple anonymous
1066    /// nodes can be coalesced into one without changing the semantics of the
1067    /// dependency graph. However, the merging of nodes can lead to a subtle
1068    /// problem during red-green marking: The color of an anonymous node from
1069    /// the current session might "shadow" the color of the node with the same
1070    /// ID from the previous session. In order to side-step this problem, we make
1071    /// sure that anonymous `NodeId`s allocated in different sessions don't overlap.
1072    /// This is implemented by mixing a session-key into the ID fingerprint of
1073    /// each anon node. The session-key is just a random number generated when
1074    /// the `DepGraph` is created.
1075    anon_id_seed: Fingerprint,
1076
1077    /// These are simple counters that are for profiling and
1078    /// debugging and only active with `debug_assertions`.
1079    total_read_count: AtomicU64,
1080    total_duplicate_read_count: AtomicU64,
1081}
1082
1083impl<D: Deps> CurrentDepGraph<D> {
1084    fn new(
1085        profiler: &SelfProfilerRef,
1086        prev_graph_node_count: usize,
1087        encoder: FileEncoder,
1088        record_graph: bool,
1089        record_stats: bool,
1090        previous: Arc<SerializedDepGraph>,
1091    ) -> Self {
1092        use std::time::{SystemTime, UNIX_EPOCH};
1093
1094        let duration = SystemTime::now().duration_since(UNIX_EPOCH).unwrap();
1095        let nanos = duration.as_nanos();
1096        let mut stable_hasher = StableHasher::new();
1097        nanos.hash(&mut stable_hasher);
1098        let anon_id_seed = stable_hasher.finish();
1099
1100        #[cfg(debug_assertions)]
1101        let forbidden_edge = match env::var("RUST_FORBID_DEP_GRAPH_EDGE") {
1102            Ok(s) => match EdgeFilter::new(&s) {
1103                Ok(f) => Some(f),
1104                Err(err) => panic!("RUST_FORBID_DEP_GRAPH_EDGE invalid: {}", err),
1105            },
1106            Err(_) => None,
1107        };
1108
1109        let new_node_count_estimate = 102 * prev_graph_node_count / 100 + 200;
1110
1111        CurrentDepGraph {
1112            encoder: GraphEncoder::new(
1113                encoder,
1114                prev_graph_node_count,
1115                record_graph,
1116                record_stats,
1117                profiler,
1118                previous,
1119            ),
1120            new_node_to_index: Sharded::new(|| {
1121                FxHashMap::with_capacity_and_hasher(
1122                    new_node_count_estimate / sharded::shards(),
1123                    Default::default(),
1124                )
1125            }),
1126            prev_index_to_index: Lock::new(IndexVec::from_elem_n(None, prev_graph_node_count)),
1127            anon_id_seed,
1128            #[cfg(debug_assertions)]
1129            forbidden_edge,
1130            #[cfg(debug_assertions)]
1131            fingerprints: Lock::new(IndexVec::from_elem_n(None, new_node_count_estimate)),
1132            total_read_count: AtomicU64::new(0),
1133            total_duplicate_read_count: AtomicU64::new(0),
1134        }
1135    }
1136
1137    #[cfg(debug_assertions)]
1138    fn record_edge(&self, dep_node_index: DepNodeIndex, key: DepNode, fingerprint: Fingerprint) {
1139        if let Some(forbidden_edge) = &self.forbidden_edge {
1140            forbidden_edge.index_to_node.lock().insert(dep_node_index, key);
1141        }
1142        let previous = *self.fingerprints.lock().get_or_insert_with(dep_node_index, || fingerprint);
1143        assert_eq!(previous, fingerprint, "Unstable fingerprints for {:?}", key);
1144    }
1145
1146    /// Writes the node to the current dep-graph and allocates a `DepNodeIndex` for it.
1147    /// Assumes that this is a node that has no equivalent in the previous dep-graph.
1148    #[inline(always)]
1149    fn intern_new_node(
1150        &self,
1151        key: DepNode,
1152        edges: EdgesVec,
1153        current_fingerprint: Fingerprint,
1154    ) -> DepNodeIndex {
1155        let dep_node_index = match self.new_node_to_index.lock_shard_by_value(&key).entry(key) {
1156            Entry::Occupied(entry) => *entry.get(),
1157            Entry::Vacant(entry) => {
1158                let dep_node_index = self.encoder.send(key, current_fingerprint, edges);
1159                entry.insert(dep_node_index);
1160                dep_node_index
1161            }
1162        };
1163
1164        #[cfg(debug_assertions)]
1165        self.record_edge(dep_node_index, key, current_fingerprint);
1166
1167        dep_node_index
1168    }
1169
1170    fn intern_node(
1171        &self,
1172        prev_graph: &SerializedDepGraph,
1173        key: DepNode,
1174        edges: EdgesVec,
1175        fingerprint: Option<Fingerprint>,
1176    ) -> (DepNodeIndex, Option<(SerializedDepNodeIndex, DepNodeColor)>) {
1177        if let Some(prev_index) = prev_graph.node_to_index_opt(&key) {
1178            let get_dep_node_index = |fingerprint| {
1179                let mut prev_index_to_index = self.prev_index_to_index.lock();
1180
1181                let dep_node_index = match prev_index_to_index[prev_index] {
1182                    Some(dep_node_index) => dep_node_index,
1183                    None => {
1184                        let dep_node_index = self.encoder.send(key, fingerprint, edges);
1185                        prev_index_to_index[prev_index] = Some(dep_node_index);
1186                        dep_node_index
1187                    }
1188                };
1189
1190                #[cfg(debug_assertions)]
1191                self.record_edge(dep_node_index, key, fingerprint);
1192
1193                dep_node_index
1194            };
1195
1196            // Determine the color and index of the new `DepNode`.
1197            if let Some(fingerprint) = fingerprint {
1198                if fingerprint == prev_graph.fingerprint_by_index(prev_index) {
1199                    // This is a green node: it existed in the previous compilation,
1200                    // its query was re-executed, and it has the same result as before.
1201                    let dep_node_index = get_dep_node_index(fingerprint);
1202                    (dep_node_index, Some((prev_index, DepNodeColor::Green(dep_node_index))))
1203                } else {
1204                    // This is a red node: it existed in the previous compilation, its query
1205                    // was re-executed, but it has a different result from before.
1206                    let dep_node_index = get_dep_node_index(fingerprint);
1207                    (dep_node_index, Some((prev_index, DepNodeColor::Red)))
1208                }
1209            } else {
1210                // This is a red node, effectively: it existed in the previous compilation
1211                // session, its query was re-executed, but it doesn't compute a result hash
1212                // (i.e. it represents a `no_hash` query), so we have no way of determining
1213                // whether or not the result was the same as before.
1214                let dep_node_index = get_dep_node_index(Fingerprint::ZERO);
1215                (dep_node_index, Some((prev_index, DepNodeColor::Red)))
1216            }
1217        } else {
1218            let fingerprint = fingerprint.unwrap_or(Fingerprint::ZERO);
1219
1220            // This is a new node: it didn't exist in the previous compilation session.
1221            let dep_node_index = self.intern_new_node(key, edges, fingerprint);
1222
1223            (dep_node_index, None)
1224        }
1225    }
1226
1227    fn promote_node_and_deps_to_current(
1228        &self,
1229        prev_graph: &SerializedDepGraph,
1230        prev_index: SerializedDepNodeIndex,
1231    ) -> DepNodeIndex {
1232        self.debug_assert_not_in_new_nodes(prev_graph, prev_index);
1233
1234        let mut prev_index_to_index = self.prev_index_to_index.lock();
1235
1236        match prev_index_to_index[prev_index] {
1237            Some(dep_node_index) => dep_node_index,
1238            None => {
1239                let dep_node_index = self.encoder.send_promoted(prev_index, &*prev_index_to_index);
1240                prev_index_to_index[prev_index] = Some(dep_node_index);
1241                #[cfg(debug_assertions)]
1242                self.record_edge(
1243                    dep_node_index,
1244                    prev_graph.index_to_node(prev_index),
1245                    prev_graph.fingerprint_by_index(prev_index),
1246                );
1247                dep_node_index
1248            }
1249        }
1250    }
1251
1252    #[inline]
1253    fn debug_assert_not_in_new_nodes(
1254        &self,
1255        prev_graph: &SerializedDepGraph,
1256        prev_index: SerializedDepNodeIndex,
1257    ) {
1258        let node = &prev_graph.index_to_node(prev_index);
1259        debug_assert!(
1260            !self.new_node_to_index.lock_shard_by_value(node).contains_key(node),
1261            "node from previous graph present in new node collection"
1262        );
1263    }
1264}
1265
1266#[derive(Debug, Clone, Copy)]
1267pub enum TaskDepsRef<'a> {
1268    /// New dependencies can be added to the
1269    /// `TaskDeps`. This is used when executing a 'normal' query
1270    /// (no `eval_always` modifier)
1271    Allow(&'a Lock<TaskDeps>),
1272    /// This is used when executing an `eval_always` query. We don't
1273    /// need to track dependencies for a query that's always
1274    /// re-executed -- but we need to know that this is an `eval_always`
1275    /// query in order to emit dependencies to `DepNodeIndex::FOREVER_RED_NODE`
1276    /// when directly feeding other queries.
1277    EvalAlways,
1278    /// New dependencies are ignored. This is also used for `dep_graph.with_ignore`.
1279    Ignore,
1280    /// Any attempt to add new dependencies will cause a panic.
1281    /// This is used when decoding a query result from disk,
1282    /// to ensure that the decoding process doesn't itself
1283    /// require the execution of any queries.
1284    Forbid,
1285}
1286
1287#[derive(Debug)]
1288pub struct TaskDeps {
1289    #[cfg(debug_assertions)]
1290    node: Option<DepNode>,
1291    reads: EdgesVec,
1292    read_set: FxHashSet<DepNodeIndex>,
1293    phantom_data: PhantomData<DepNode>,
1294}
1295
1296impl Default for TaskDeps {
1297    fn default() -> Self {
1298        Self {
1299            #[cfg(debug_assertions)]
1300            node: None,
1301            reads: EdgesVec::new(),
1302            read_set: FxHashSet::default(),
1303            phantom_data: PhantomData,
1304        }
1305    }
1306}
1307
1308// A data structure that stores Option<DepNodeColor> values as a contiguous
1309// array, using one u32 per entry.
1310struct DepNodeColorMap {
1311    values: IndexVec<SerializedDepNodeIndex, AtomicU32>,
1312}
1313
1314const COMPRESSED_NONE: u32 = 0;
1315const COMPRESSED_RED: u32 = 1;
1316const COMPRESSED_FIRST_GREEN: u32 = 2;
1317
1318impl DepNodeColorMap {
1319    fn new(size: usize) -> DepNodeColorMap {
1320        DepNodeColorMap { values: (0..size).map(|_| AtomicU32::new(COMPRESSED_NONE)).collect() }
1321    }
1322
1323    #[inline]
1324    fn get(&self, index: SerializedDepNodeIndex) -> Option<DepNodeColor> {
1325        match self.values[index].load(Ordering::Acquire) {
1326            COMPRESSED_NONE => None,
1327            COMPRESSED_RED => Some(DepNodeColor::Red),
1328            value => {
1329                Some(DepNodeColor::Green(DepNodeIndex::from_u32(value - COMPRESSED_FIRST_GREEN)))
1330            }
1331        }
1332    }
1333
1334    #[inline]
1335    fn insert(&self, index: SerializedDepNodeIndex, color: DepNodeColor) {
1336        self.values[index].store(
1337            match color {
1338                DepNodeColor::Red => COMPRESSED_RED,
1339                DepNodeColor::Green(index) => index.as_u32() + COMPRESSED_FIRST_GREEN,
1340            },
1341            Ordering::Release,
1342        )
1343    }
1344}
1345
1346#[inline(never)]
1347#[cold]
1348pub(crate) fn print_markframe_trace<D: Deps>(graph: &DepGraph<D>, frame: Option<&MarkFrame<'_>>) {
1349    let data = graph.data.as_ref().unwrap();
1350
1351    eprintln!("there was a panic while trying to force a dep node");
1352    eprintln!("try_mark_green dep node stack:");
1353
1354    let mut i = 0;
1355    let mut current = frame;
1356    while let Some(frame) = current {
1357        let node = data.previous.index_to_node(frame.index);
1358        eprintln!("#{i} {node:?}");
1359        current = frame.parent;
1360        i += 1;
1361    }
1362
1363    eprintln!("end of try_mark_green dep node stack");
1364}
1365
1366#[cold]
1367#[inline(never)]
1368fn panic_on_forbidden_read<D: Deps>(data: &DepGraphData<D>, dep_node_index: DepNodeIndex) -> ! {
1369    // We have to do an expensive reverse-lookup of the DepNode that
1370    // corresponds to `dep_node_index`, but that's OK since we are about
1371    // to ICE anyway.
1372    let mut dep_node = None;
1373
1374    // First try to find the dep node among those that already existed in the
1375    // previous session
1376    for (prev_index, index) in data.current.prev_index_to_index.lock().iter_enumerated() {
1377        if index == &Some(dep_node_index) {
1378            dep_node = Some(data.previous.index_to_node(prev_index));
1379            break;
1380        }
1381    }
1382
1383    if dep_node.is_none() {
1384        // Try to find it among the new nodes
1385        for shard in data.current.new_node_to_index.lock_shards() {
1386            if let Some((node, _)) = shard.iter().find(|(_, index)| **index == dep_node_index) {
1387                dep_node = Some(*node);
1388                break;
1389            }
1390        }
1391    }
1392
1393    let dep_node = dep_node.map_or_else(
1394        || format!("with index {:?}", dep_node_index),
1395        |dep_node| format!("`{:?}`", dep_node),
1396    );
1397
1398    panic!(
1399        "Error: trying to record dependency on DepNode {dep_node} in a \
1400         context that does not allow it (e.g. during query deserialization). \
1401         The most common case of recording a dependency on a DepNode `foo` is \
1402         when the corresponding query `foo` is invoked. Invoking queries is not \
1403         allowed as part of loading something from the incremental on-disk cache. \
1404         See <https://github.com/rust-lang/rust/pull/91919>."
1405    )
1406}