1use std::assert_matches::assert_matches;
2use std::collections::hash_map::Entry;
3use std::fmt::Debug;
4use std::hash::Hash;
5use std::marker::PhantomData;
6use std::sync::Arc;
7use std::sync::atomic::{AtomicU32, Ordering};
8
9use rustc_data_structures::fingerprint::Fingerprint;
10use rustc_data_structures::fx::{FxHashMap, FxHashSet};
11use rustc_data_structures::profiling::{QueryInvocationId, SelfProfilerRef};
12use rustc_data_structures::sharded::{self, Sharded};
13use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
14use rustc_data_structures::sync::{AtomicU64, Lock};
15use rustc_data_structures::unord::UnordMap;
16use rustc_index::IndexVec;
17use rustc_macros::{Decodable, Encodable};
18use rustc_serialize::opaque::{FileEncodeResult, FileEncoder};
19use tracing::{debug, instrument};
20#[cfg(debug_assertions)]
21use {super::debug::EdgeFilter, std::env};
22
23use super::query::DepGraphQuery;
24use super::serialized::{GraphEncoder, SerializedDepGraph, SerializedDepNodeIndex};
25use super::{DepContext, DepKind, DepNode, Deps, HasDepContext, WorkProductId};
26use crate::dep_graph::edges::EdgesVec;
27use crate::ich::StableHashingContext;
28use crate::query::{QueryContext, QuerySideEffects};
29
30#[derive(Clone)]
31pub struct DepGraph<D: Deps> {
32 data: Option<Arc<DepGraphData<D>>>,
33
34 virtual_dep_node_index: Arc<AtomicU32>,
39}
40
41rustc_index::newtype_index! {
42 pub struct DepNodeIndex {}
43}
44
45rustc_data_structures::static_assert_size!(Option<DepNodeIndex>, 4);
49
50impl DepNodeIndex {
51 const SINGLETON_DEPENDENCYLESS_ANON_NODE: DepNodeIndex = DepNodeIndex::ZERO;
52 pub const FOREVER_RED_NODE: DepNodeIndex = DepNodeIndex::from_u32(1);
53}
54
55impl From<DepNodeIndex> for QueryInvocationId {
56 #[inline(always)]
57 fn from(dep_node_index: DepNodeIndex) -> Self {
58 QueryInvocationId(dep_node_index.as_u32())
59 }
60}
61
62pub struct MarkFrame<'a> {
63 index: SerializedDepNodeIndex,
64 parent: Option<&'a MarkFrame<'a>>,
65}
66
67enum DepNodeColor {
68 Red,
69 Green(DepNodeIndex),
70}
71
72impl DepNodeColor {
73 #[inline]
74 fn is_green(self) -> bool {
75 match self {
76 DepNodeColor::Red => false,
77 DepNodeColor::Green(_) => true,
78 }
79 }
80}
81
82pub(crate) struct DepGraphData<D: Deps> {
83 current: CurrentDepGraph<D>,
88
89 previous: Arc<SerializedDepGraph>,
92
93 colors: DepNodeColorMap,
94
95 processed_side_effects: Lock<FxHashSet<DepNodeIndex>>,
96
97 previous_work_products: WorkProductMap,
102
103 dep_node_debug: Lock<FxHashMap<DepNode, String>>,
104
105 debug_loaded_from_disk: Lock<FxHashSet<DepNode>>,
109}
110
111pub fn hash_result<R>(hcx: &mut StableHashingContext<'_>, result: &R) -> Fingerprint
112where
113 R: for<'a> HashStable<StableHashingContext<'a>>,
114{
115 let mut stable_hasher = StableHasher::new();
116 result.hash_stable(hcx, &mut stable_hasher);
117 stable_hasher.finish()
118}
119
120impl<D: Deps> DepGraph<D> {
121 pub fn new(
122 profiler: &SelfProfilerRef,
123 prev_graph: Arc<SerializedDepGraph>,
124 prev_work_products: WorkProductMap,
125 encoder: FileEncoder,
126 record_graph: bool,
127 record_stats: bool,
128 ) -> DepGraph<D> {
129 let prev_graph_node_count = prev_graph.node_count();
130
131 let current = CurrentDepGraph::new(
132 profiler,
133 prev_graph_node_count,
134 encoder,
135 record_graph,
136 record_stats,
137 Arc::clone(&prev_graph),
138 );
139
140 let colors = DepNodeColorMap::new(prev_graph_node_count);
141
142 let _green_node_index = current.intern_new_node(
144 DepNode { kind: D::DEP_KIND_NULL, hash: current.anon_id_seed.into() },
145 EdgesVec::new(),
146 Fingerprint::ZERO,
147 );
148 assert_eq!(_green_node_index, DepNodeIndex::SINGLETON_DEPENDENCYLESS_ANON_NODE);
149
150 let (red_node_index, red_node_prev_index_and_color) = current.intern_node(
152 &prev_graph,
153 DepNode { kind: D::DEP_KIND_RED, hash: Fingerprint::ZERO.into() },
154 EdgesVec::new(),
155 None,
156 );
157 assert_eq!(red_node_index, DepNodeIndex::FOREVER_RED_NODE);
158 match red_node_prev_index_and_color {
159 None => {
160 assert!(prev_graph_node_count == 0);
162 }
163 Some((prev_red_node_index, DepNodeColor::Red)) => {
164 assert_eq!(prev_red_node_index.as_usize(), red_node_index.as_usize());
165 colors.insert(prev_red_node_index, DepNodeColor::Red);
166 }
167 Some((_, DepNodeColor::Green(_))) => {
168 panic!("DepNodeIndex::FOREVER_RED_NODE evaluated to DepNodeColor::Green")
170 }
171 }
172
173 DepGraph {
174 data: Some(Arc::new(DepGraphData {
175 previous_work_products: prev_work_products,
176 dep_node_debug: Default::default(),
177 current,
178 processed_side_effects: Default::default(),
179 previous: prev_graph,
180 colors,
181 debug_loaded_from_disk: Default::default(),
182 })),
183 virtual_dep_node_index: Arc::new(AtomicU32::new(0)),
184 }
185 }
186
187 pub fn new_disabled() -> DepGraph<D> {
188 DepGraph { data: None, virtual_dep_node_index: Arc::new(AtomicU32::new(0)) }
189 }
190
191 #[inline]
192 pub(crate) fn data(&self) -> Option<&DepGraphData<D>> {
193 self.data.as_deref()
194 }
195
196 #[inline]
198 pub fn is_fully_enabled(&self) -> bool {
199 self.data.is_some()
200 }
201
202 pub fn with_query(&self, f: impl Fn(&DepGraphQuery)) {
203 if let Some(data) = &self.data {
204 data.current.encoder.with_query(f)
205 }
206 }
207
208 pub fn assert_ignored(&self) {
209 if let Some(..) = self.data {
210 D::read_deps(|task_deps| {
211 assert_matches!(
212 task_deps,
213 TaskDepsRef::Ignore,
214 "expected no task dependency tracking"
215 );
216 })
217 }
218 }
219
220 pub fn with_ignore<OP, R>(&self, op: OP) -> R
221 where
222 OP: FnOnce() -> R,
223 {
224 D::with_deps(TaskDepsRef::Ignore, op)
225 }
226
227 pub fn with_query_deserialization<OP, R>(&self, op: OP) -> R
274 where
275 OP: FnOnce() -> R,
276 {
277 D::with_deps(TaskDepsRef::Forbid, op)
278 }
279
280 #[inline(always)]
281 pub fn with_task<Ctxt: HasDepContext<Deps = D>, A: Debug, R>(
282 &self,
283 key: DepNode,
284 cx: Ctxt,
285 arg: A,
286 task: fn(Ctxt, A) -> R,
287 hash_result: Option<fn(&mut StableHashingContext<'_>, &R) -> Fingerprint>,
288 ) -> (R, DepNodeIndex) {
289 match self.data() {
290 Some(data) => data.with_task(key, cx, arg, task, hash_result),
291 None => (task(cx, arg), self.next_virtual_depnode_index()),
292 }
293 }
294
295 pub fn with_anon_task<Tcx: DepContext<Deps = D>, OP, R>(
296 &self,
297 cx: Tcx,
298 dep_kind: DepKind,
299 op: OP,
300 ) -> (R, DepNodeIndex)
301 where
302 OP: FnOnce() -> R,
303 {
304 match self.data() {
305 Some(data) => {
306 let (result, index) = data.with_anon_task_inner(cx, dep_kind, op);
307 self.read_index(index);
308 (result, index)
309 }
310 None => (op(), self.next_virtual_depnode_index()),
311 }
312 }
313}
314
315impl<D: Deps> DepGraphData<D> {
316 #[inline(always)]
344 pub(crate) fn with_task<Ctxt: HasDepContext<Deps = D>, A: Debug, R>(
345 &self,
346 key: DepNode,
347 cx: Ctxt,
348 arg: A,
349 task: fn(Ctxt, A) -> R,
350 hash_result: Option<fn(&mut StableHashingContext<'_>, &R) -> Fingerprint>,
351 ) -> (R, DepNodeIndex) {
352 assert!(
358 !self.dep_node_exists(&key),
359 "forcing query with already existing `DepNode`\n\
360 - query-key: {arg:?}\n\
361 - dep-node: {key:?}"
362 );
363
364 let with_deps = |task_deps| D::with_deps(task_deps, || task(cx, arg));
365 let (result, edges) = if cx.dep_context().is_eval_always(key.kind) {
366 (with_deps(TaskDepsRef::EvalAlways), EdgesVec::new())
367 } else {
368 let task_deps = Lock::new(TaskDeps {
369 #[cfg(debug_assertions)]
370 node: Some(key),
371 reads: EdgesVec::new(),
372 read_set: Default::default(),
373 phantom_data: PhantomData,
374 });
375 (with_deps(TaskDepsRef::Allow(&task_deps)), task_deps.into_inner().reads)
376 };
377
378 let dcx = cx.dep_context();
379 let dep_node_index =
380 self.hash_result_and_intern_node(dcx, key, edges, &result, hash_result);
381
382 (result, dep_node_index)
383 }
384
385 pub(crate) fn with_anon_task_inner<Tcx: DepContext<Deps = D>, OP, R>(
397 &self,
398 cx: Tcx,
399 dep_kind: DepKind,
400 op: OP,
401 ) -> (R, DepNodeIndex)
402 where
403 OP: FnOnce() -> R,
404 {
405 debug_assert!(!cx.is_eval_always(dep_kind));
406
407 let task_deps = Lock::new(TaskDeps::default());
408 let result = D::with_deps(TaskDepsRef::Allow(&task_deps), op);
409 let task_deps = task_deps.into_inner();
410 let task_deps = task_deps.reads;
411
412 let dep_node_index = match task_deps.len() {
413 0 => {
414 DepNodeIndex::SINGLETON_DEPENDENCYLESS_ANON_NODE
420 }
421 1 => {
422 task_deps[0]
424 }
425 _ => {
426 let mut hasher = StableHasher::new();
432 task_deps.hash(&mut hasher);
433
434 let target_dep_node = DepNode {
435 kind: dep_kind,
436 hash: self.current.anon_id_seed.combine(hasher.finish()).into(),
440 };
441
442 self.current.intern_new_node(target_dep_node, task_deps, Fingerprint::ZERO)
443 }
444 };
445
446 (result, dep_node_index)
447 }
448
449 fn hash_result_and_intern_node<Ctxt: DepContext<Deps = D>, R>(
451 &self,
452 cx: &Ctxt,
453 node: DepNode,
454 edges: EdgesVec,
455 result: &R,
456 hash_result: Option<fn(&mut StableHashingContext<'_>, &R) -> Fingerprint>,
457 ) -> DepNodeIndex {
458 let hashing_timer = cx.profiler().incr_result_hashing();
459 let current_fingerprint = hash_result.map(|hash_result| {
460 cx.with_stable_hashing_context(|mut hcx| hash_result(&mut hcx, result))
461 });
462
463 let (dep_node_index, prev_and_color) =
465 self.current.intern_node(&self.previous, node, edges, current_fingerprint);
466
467 hashing_timer.finish_with_query_invocation_id(dep_node_index.into());
468
469 if let Some((prev_index, color)) = prev_and_color {
470 debug_assert!(
471 self.colors.get(prev_index).is_none(),
472 "DepGraph::with_task() - Duplicate DepNodeColor insertion for {node:?}",
473 );
474
475 self.colors.insert(prev_index, color);
476 }
477
478 dep_node_index
479 }
480}
481
482impl<D: Deps> DepGraph<D> {
483 #[inline]
484 pub fn read_index(&self, dep_node_index: DepNodeIndex) {
485 if let Some(ref data) = self.data {
486 D::read_deps(|task_deps| {
487 let mut task_deps = match task_deps {
488 TaskDepsRef::Allow(deps) => deps.lock(),
489 TaskDepsRef::EvalAlways => {
490 return;
493 }
494 TaskDepsRef::Ignore => return,
495 TaskDepsRef::Forbid => {
496 panic_on_forbidden_read(data, dep_node_index)
498 }
499 };
500 let task_deps = &mut *task_deps;
501
502 if cfg!(debug_assertions) {
503 data.current.total_read_count.fetch_add(1, Ordering::Relaxed);
504 }
505
506 let new_read = if task_deps.reads.len() < EdgesVec::INLINE_CAPACITY {
509 task_deps.reads.iter().all(|other| *other != dep_node_index)
510 } else {
511 task_deps.read_set.insert(dep_node_index)
512 };
513 if new_read {
514 task_deps.reads.push(dep_node_index);
515 if task_deps.reads.len() == EdgesVec::INLINE_CAPACITY {
516 task_deps.read_set.extend(task_deps.reads.iter().copied());
519 }
520
521 #[cfg(debug_assertions)]
522 {
523 if let Some(target) = task_deps.node {
524 if let Some(ref forbidden_edge) = data.current.forbidden_edge {
525 let src = forbidden_edge.index_to_node.lock()[&dep_node_index];
526 if forbidden_edge.test(&src, &target) {
527 panic!("forbidden edge {:?} -> {:?} created", src, target)
528 }
529 }
530 }
531 }
532 } else if cfg!(debug_assertions) {
533 data.current.total_duplicate_read_count.fetch_add(1, Ordering::Relaxed);
534 }
535 })
536 }
537 }
538
539 pub fn with_feed_task<Ctxt: DepContext<Deps = D>, R: Debug>(
555 &self,
556 node: DepNode,
557 cx: Ctxt,
558 result: &R,
559 hash_result: Option<fn(&mut StableHashingContext<'_>, &R) -> Fingerprint>,
560 ) -> DepNodeIndex {
561 if let Some(data) = self.data.as_ref() {
562 if let Some(prev_index) = data.previous.node_to_index_opt(&node) {
569 let dep_node_index = data.current.prev_index_to_index.lock()[prev_index];
570 if let Some(dep_node_index) = dep_node_index {
571 crate::query::incremental_verify_ich(
572 cx,
573 data,
574 result,
575 prev_index,
576 hash_result,
577 |value| format!("{value:?}"),
578 );
579
580 #[cfg(debug_assertions)]
581 if hash_result.is_some() {
582 data.current.record_edge(
583 dep_node_index,
584 node,
585 data.prev_fingerprint_of(prev_index),
586 );
587 }
588
589 return dep_node_index;
590 }
591 }
592
593 let mut edges = EdgesVec::new();
594 D::read_deps(|task_deps| match task_deps {
595 TaskDepsRef::Allow(deps) => edges.extend(deps.lock().reads.iter().copied()),
596 TaskDepsRef::EvalAlways => {
597 edges.push(DepNodeIndex::FOREVER_RED_NODE);
598 }
599 TaskDepsRef::Ignore => {}
600 TaskDepsRef::Forbid => {
601 panic!("Cannot summarize when dependencies are not recorded.")
602 }
603 });
604
605 data.hash_result_and_intern_node(&cx, node, edges, result, hash_result)
606 } else {
607 self.next_virtual_depnode_index()
612 }
613 }
614}
615
616impl<D: Deps> DepGraphData<D> {
617 #[inline]
618 fn dep_node_index_of_opt(&self, dep_node: &DepNode) -> Option<DepNodeIndex> {
619 if let Some(prev_index) = self.previous.node_to_index_opt(dep_node) {
620 self.current.prev_index_to_index.lock()[prev_index]
621 } else {
622 self.current.new_node_to_index.lock_shard_by_value(dep_node).get(dep_node).copied()
623 }
624 }
625
626 #[inline]
627 fn dep_node_exists(&self, dep_node: &DepNode) -> bool {
628 self.dep_node_index_of_opt(dep_node).is_some()
629 }
630
631 fn node_color(&self, dep_node: &DepNode) -> Option<DepNodeColor> {
632 if let Some(prev_index) = self.previous.node_to_index_opt(dep_node) {
633 self.colors.get(prev_index)
634 } else {
635 None
637 }
638 }
639
640 #[inline]
643 pub(crate) fn is_index_green(&self, prev_index: SerializedDepNodeIndex) -> bool {
644 self.colors.get(prev_index).is_some_and(|c| c.is_green())
645 }
646
647 #[inline]
648 pub(crate) fn prev_fingerprint_of(&self, prev_index: SerializedDepNodeIndex) -> Fingerprint {
649 self.previous.fingerprint_by_index(prev_index)
650 }
651
652 #[inline]
653 pub(crate) fn prev_node_of(&self, prev_index: SerializedDepNodeIndex) -> DepNode {
654 self.previous.index_to_node(prev_index)
655 }
656
657 pub(crate) fn mark_debug_loaded_from_disk(&self, dep_node: DepNode) {
658 self.debug_loaded_from_disk.lock().insert(dep_node);
659 }
660}
661
662impl<D: Deps> DepGraph<D> {
663 #[inline]
664 pub fn dep_node_exists(&self, dep_node: &DepNode) -> bool {
665 self.data.as_ref().is_some_and(|data| data.dep_node_exists(dep_node))
666 }
667
668 pub fn previous_work_product(&self, v: &WorkProductId) -> Option<WorkProduct> {
671 self.data.as_ref().and_then(|data| data.previous_work_products.get(v).cloned())
672 }
673
674 pub fn previous_work_products(&self) -> &WorkProductMap {
677 &self.data.as_ref().unwrap().previous_work_products
678 }
679
680 pub fn debug_was_loaded_from_disk(&self, dep_node: DepNode) -> bool {
681 self.data.as_ref().unwrap().debug_loaded_from_disk.lock().contains(&dep_node)
682 }
683
684 #[cfg(debug_assertions)]
685 #[inline(always)]
686 pub(crate) fn register_dep_node_debug_str<F>(&self, dep_node: DepNode, debug_str_gen: F)
687 where
688 F: FnOnce() -> String,
689 {
690 let dep_node_debug = &self.data.as_ref().unwrap().dep_node_debug;
691
692 if dep_node_debug.borrow().contains_key(&dep_node) {
693 return;
694 }
695 let debug_str = self.with_ignore(debug_str_gen);
696 dep_node_debug.borrow_mut().insert(dep_node, debug_str);
697 }
698
699 pub fn dep_node_debug_str(&self, dep_node: DepNode) -> Option<String> {
700 self.data.as_ref()?.dep_node_debug.borrow().get(&dep_node).cloned()
701 }
702
703 fn node_color(&self, dep_node: &DepNode) -> Option<DepNodeColor> {
704 if let Some(ref data) = self.data {
705 return data.node_color(dep_node);
706 }
707
708 None
709 }
710
711 pub fn try_mark_green<Qcx: QueryContext<Deps = D>>(
712 &self,
713 qcx: Qcx,
714 dep_node: &DepNode,
715 ) -> Option<(SerializedDepNodeIndex, DepNodeIndex)> {
716 self.data().and_then(|data| data.try_mark_green(qcx, dep_node))
717 }
718}
719
720impl<D: Deps> DepGraphData<D> {
721 pub(crate) fn try_mark_green<Qcx: QueryContext<Deps = D>>(
727 &self,
728 qcx: Qcx,
729 dep_node: &DepNode,
730 ) -> Option<(SerializedDepNodeIndex, DepNodeIndex)> {
731 debug_assert!(!qcx.dep_context().is_eval_always(dep_node.kind));
732
733 let prev_index = self.previous.node_to_index_opt(dep_node)?;
735
736 match self.colors.get(prev_index) {
737 Some(DepNodeColor::Green(dep_node_index)) => Some((prev_index, dep_node_index)),
738 Some(DepNodeColor::Red) => None,
739 None => {
740 self.try_mark_previous_green(qcx, prev_index, dep_node, None)
745 .map(|dep_node_index| (prev_index, dep_node_index))
746 }
747 }
748 }
749
750 #[instrument(skip(self, qcx, parent_dep_node_index, frame), level = "debug")]
751 fn try_mark_parent_green<Qcx: QueryContext<Deps = D>>(
752 &self,
753 qcx: Qcx,
754 parent_dep_node_index: SerializedDepNodeIndex,
755 frame: Option<&MarkFrame<'_>>,
756 ) -> Option<()> {
757 let dep_dep_node_color = self.colors.get(parent_dep_node_index);
758 let dep_dep_node = &self.previous.index_to_node(parent_dep_node_index);
759
760 match dep_dep_node_color {
761 Some(DepNodeColor::Green(_)) => {
762 debug!("dependency {dep_dep_node:?} was immediately green");
766 return Some(());
767 }
768 Some(DepNodeColor::Red) => {
769 debug!("dependency {dep_dep_node:?} was immediately red");
774 return None;
775 }
776 None => {}
777 }
778
779 if !qcx.dep_context().is_eval_always(dep_dep_node.kind) {
782 debug!(
783 "state of dependency {:?} ({}) is unknown, trying to mark it green",
784 dep_dep_node, dep_dep_node.hash,
785 );
786
787 let node_index =
788 self.try_mark_previous_green(qcx, parent_dep_node_index, dep_dep_node, frame);
789
790 if node_index.is_some() {
791 debug!("managed to MARK dependency {dep_dep_node:?} as green",);
792 return Some(());
793 }
794 }
795
796 debug!("trying to force dependency {dep_dep_node:?}");
798 if !qcx.dep_context().try_force_from_dep_node(*dep_dep_node, frame) {
799 debug!("dependency {dep_dep_node:?} could not be forced");
801 return None;
802 }
803
804 let dep_dep_node_color = self.colors.get(parent_dep_node_index);
805
806 match dep_dep_node_color {
807 Some(DepNodeColor::Green(_)) => {
808 debug!("managed to FORCE dependency {dep_dep_node:?} to green");
809 return Some(());
810 }
811 Some(DepNodeColor::Red) => {
812 debug!("dependency {dep_dep_node:?} was red after forcing",);
813 return None;
814 }
815 None => {}
816 }
817
818 if let None = qcx.dep_context().sess().dcx().has_errors_or_delayed_bugs() {
819 panic!("try_mark_previous_green() - Forcing the DepNode should have set its color")
820 }
821
822 debug!("dependency {dep_dep_node:?} resulted in compilation error",);
833 return None;
834 }
835
836 #[instrument(skip(self, qcx, prev_dep_node_index, frame), level = "debug")]
838 fn try_mark_previous_green<Qcx: QueryContext<Deps = D>>(
839 &self,
840 qcx: Qcx,
841 prev_dep_node_index: SerializedDepNodeIndex,
842 dep_node: &DepNode,
843 frame: Option<&MarkFrame<'_>>,
844 ) -> Option<DepNodeIndex> {
845 let frame = MarkFrame { index: prev_dep_node_index, parent: frame };
846
847 debug_assert!(!qcx.dep_context().is_eval_always(dep_node.kind));
849
850 debug_assert_eq!(self.previous.index_to_node(prev_dep_node_index), *dep_node);
851
852 let prev_deps = self.previous.edge_targets_from(prev_dep_node_index);
853
854 for dep_dep_node_index in prev_deps {
855 self.try_mark_parent_green(qcx, dep_dep_node_index, Some(&frame))?;
856 }
857
858 let dep_node_index =
867 self.current.promote_node_and_deps_to_current(&self.previous, prev_dep_node_index);
868
869 let side_effects = qcx.load_side_effects(prev_dep_node_index);
874
875 if side_effects.maybe_any() {
876 qcx.dep_context().dep_graph().with_query_deserialization(|| {
877 self.emit_side_effects(qcx, dep_node_index, side_effects)
878 });
879 }
880
881 self.colors.insert(prev_dep_node_index, DepNodeColor::Green(dep_node_index));
884
885 debug!("successfully marked {dep_node:?} as green");
886 Some(dep_node_index)
887 }
888
889 #[cold]
892 #[inline(never)]
893 fn emit_side_effects<Qcx: QueryContext<Deps = D>>(
894 &self,
895 qcx: Qcx,
896 dep_node_index: DepNodeIndex,
897 side_effects: QuerySideEffects,
898 ) {
899 let mut processed = self.processed_side_effects.lock();
900
901 if processed.insert(dep_node_index) {
902 qcx.store_side_effects(dep_node_index, side_effects.clone());
907
908 let dcx = qcx.dep_context().sess().dcx();
909
910 for diagnostic in side_effects.diagnostics {
911 dcx.emit_diagnostic(diagnostic);
912 }
913 }
914 }
915}
916
917impl<D: Deps> DepGraph<D> {
918 pub fn is_red(&self, dep_node: &DepNode) -> bool {
921 matches!(self.node_color(dep_node), Some(DepNodeColor::Red))
922 }
923
924 pub fn is_green(&self, dep_node: &DepNode) -> bool {
927 self.node_color(dep_node).is_some_and(|c| c.is_green())
928 }
929
930 pub fn exec_cache_promotions<Tcx: DepContext>(&self, tcx: Tcx) {
939 let _prof_timer = tcx.profiler().generic_activity("incr_comp_query_cache_promotion");
940
941 let data = self.data.as_ref().unwrap();
942 for prev_index in data.colors.values.indices() {
943 match data.colors.get(prev_index) {
944 Some(DepNodeColor::Green(_)) => {
945 let dep_node = data.previous.index_to_node(prev_index);
946 tcx.try_load_from_on_disk_cache(dep_node);
947 }
948 None | Some(DepNodeColor::Red) => {
949 }
953 }
954 }
955 }
956
957 pub fn print_incremental_info(&self) {
958 if let Some(data) = &self.data {
959 data.current.encoder.print_incremental_info(
960 data.current.total_read_count.load(Ordering::Relaxed),
961 data.current.total_duplicate_read_count.load(Ordering::Relaxed),
962 )
963 }
964 }
965
966 pub fn finish_encoding(&self) -> FileEncodeResult {
967 if let Some(data) = &self.data { data.current.encoder.finish() } else { Ok(0) }
968 }
969
970 pub(crate) fn next_virtual_depnode_index(&self) -> DepNodeIndex {
971 debug_assert!(self.data.is_none());
972 let index = self.virtual_dep_node_index.fetch_add(1, Ordering::Relaxed);
973 DepNodeIndex::from_u32(index)
974 }
975}
976
977#[derive(Clone, Debug, Encodable, Decodable)]
1009pub struct WorkProduct {
1010 pub cgu_name: String,
1011 pub saved_files: UnordMap<String, String>,
1017}
1018
1019pub type WorkProductMap = UnordMap<WorkProductId, WorkProduct>;
1020
1021rustc_index::newtype_index! {
1023 struct EdgeIndex {}
1024}
1025
1026pub(super) struct CurrentDepGraph<D: Deps> {
1050 encoder: GraphEncoder<D>,
1051 new_node_to_index: Sharded<FxHashMap<DepNode, DepNodeIndex>>,
1052 prev_index_to_index: Lock<IndexVec<SerializedDepNodeIndex, Option<DepNodeIndex>>>,
1053
1054 #[cfg(debug_assertions)]
1057 fingerprints: Lock<IndexVec<DepNodeIndex, Option<Fingerprint>>>,
1058
1059 #[cfg(debug_assertions)]
1062 forbidden_edge: Option<EdgeFilter>,
1063
1064 anon_id_seed: Fingerprint,
1076
1077 total_read_count: AtomicU64,
1080 total_duplicate_read_count: AtomicU64,
1081}
1082
1083impl<D: Deps> CurrentDepGraph<D> {
1084 fn new(
1085 profiler: &SelfProfilerRef,
1086 prev_graph_node_count: usize,
1087 encoder: FileEncoder,
1088 record_graph: bool,
1089 record_stats: bool,
1090 previous: Arc<SerializedDepGraph>,
1091 ) -> Self {
1092 use std::time::{SystemTime, UNIX_EPOCH};
1093
1094 let duration = SystemTime::now().duration_since(UNIX_EPOCH).unwrap();
1095 let nanos = duration.as_nanos();
1096 let mut stable_hasher = StableHasher::new();
1097 nanos.hash(&mut stable_hasher);
1098 let anon_id_seed = stable_hasher.finish();
1099
1100 #[cfg(debug_assertions)]
1101 let forbidden_edge = match env::var("RUST_FORBID_DEP_GRAPH_EDGE") {
1102 Ok(s) => match EdgeFilter::new(&s) {
1103 Ok(f) => Some(f),
1104 Err(err) => panic!("RUST_FORBID_DEP_GRAPH_EDGE invalid: {}", err),
1105 },
1106 Err(_) => None,
1107 };
1108
1109 let new_node_count_estimate = 102 * prev_graph_node_count / 100 + 200;
1110
1111 CurrentDepGraph {
1112 encoder: GraphEncoder::new(
1113 encoder,
1114 prev_graph_node_count,
1115 record_graph,
1116 record_stats,
1117 profiler,
1118 previous,
1119 ),
1120 new_node_to_index: Sharded::new(|| {
1121 FxHashMap::with_capacity_and_hasher(
1122 new_node_count_estimate / sharded::shards(),
1123 Default::default(),
1124 )
1125 }),
1126 prev_index_to_index: Lock::new(IndexVec::from_elem_n(None, prev_graph_node_count)),
1127 anon_id_seed,
1128 #[cfg(debug_assertions)]
1129 forbidden_edge,
1130 #[cfg(debug_assertions)]
1131 fingerprints: Lock::new(IndexVec::from_elem_n(None, new_node_count_estimate)),
1132 total_read_count: AtomicU64::new(0),
1133 total_duplicate_read_count: AtomicU64::new(0),
1134 }
1135 }
1136
1137 #[cfg(debug_assertions)]
1138 fn record_edge(&self, dep_node_index: DepNodeIndex, key: DepNode, fingerprint: Fingerprint) {
1139 if let Some(forbidden_edge) = &self.forbidden_edge {
1140 forbidden_edge.index_to_node.lock().insert(dep_node_index, key);
1141 }
1142 let previous = *self.fingerprints.lock().get_or_insert_with(dep_node_index, || fingerprint);
1143 assert_eq!(previous, fingerprint, "Unstable fingerprints for {:?}", key);
1144 }
1145
1146 #[inline(always)]
1149 fn intern_new_node(
1150 &self,
1151 key: DepNode,
1152 edges: EdgesVec,
1153 current_fingerprint: Fingerprint,
1154 ) -> DepNodeIndex {
1155 let dep_node_index = match self.new_node_to_index.lock_shard_by_value(&key).entry(key) {
1156 Entry::Occupied(entry) => *entry.get(),
1157 Entry::Vacant(entry) => {
1158 let dep_node_index = self.encoder.send(key, current_fingerprint, edges);
1159 entry.insert(dep_node_index);
1160 dep_node_index
1161 }
1162 };
1163
1164 #[cfg(debug_assertions)]
1165 self.record_edge(dep_node_index, key, current_fingerprint);
1166
1167 dep_node_index
1168 }
1169
1170 fn intern_node(
1171 &self,
1172 prev_graph: &SerializedDepGraph,
1173 key: DepNode,
1174 edges: EdgesVec,
1175 fingerprint: Option<Fingerprint>,
1176 ) -> (DepNodeIndex, Option<(SerializedDepNodeIndex, DepNodeColor)>) {
1177 if let Some(prev_index) = prev_graph.node_to_index_opt(&key) {
1178 let get_dep_node_index = |fingerprint| {
1179 let mut prev_index_to_index = self.prev_index_to_index.lock();
1180
1181 let dep_node_index = match prev_index_to_index[prev_index] {
1182 Some(dep_node_index) => dep_node_index,
1183 None => {
1184 let dep_node_index = self.encoder.send(key, fingerprint, edges);
1185 prev_index_to_index[prev_index] = Some(dep_node_index);
1186 dep_node_index
1187 }
1188 };
1189
1190 #[cfg(debug_assertions)]
1191 self.record_edge(dep_node_index, key, fingerprint);
1192
1193 dep_node_index
1194 };
1195
1196 if let Some(fingerprint) = fingerprint {
1198 if fingerprint == prev_graph.fingerprint_by_index(prev_index) {
1199 let dep_node_index = get_dep_node_index(fingerprint);
1202 (dep_node_index, Some((prev_index, DepNodeColor::Green(dep_node_index))))
1203 } else {
1204 let dep_node_index = get_dep_node_index(fingerprint);
1207 (dep_node_index, Some((prev_index, DepNodeColor::Red)))
1208 }
1209 } else {
1210 let dep_node_index = get_dep_node_index(Fingerprint::ZERO);
1215 (dep_node_index, Some((prev_index, DepNodeColor::Red)))
1216 }
1217 } else {
1218 let fingerprint = fingerprint.unwrap_or(Fingerprint::ZERO);
1219
1220 let dep_node_index = self.intern_new_node(key, edges, fingerprint);
1222
1223 (dep_node_index, None)
1224 }
1225 }
1226
1227 fn promote_node_and_deps_to_current(
1228 &self,
1229 prev_graph: &SerializedDepGraph,
1230 prev_index: SerializedDepNodeIndex,
1231 ) -> DepNodeIndex {
1232 self.debug_assert_not_in_new_nodes(prev_graph, prev_index);
1233
1234 let mut prev_index_to_index = self.prev_index_to_index.lock();
1235
1236 match prev_index_to_index[prev_index] {
1237 Some(dep_node_index) => dep_node_index,
1238 None => {
1239 let dep_node_index = self.encoder.send_promoted(prev_index, &*prev_index_to_index);
1240 prev_index_to_index[prev_index] = Some(dep_node_index);
1241 #[cfg(debug_assertions)]
1242 self.record_edge(
1243 dep_node_index,
1244 prev_graph.index_to_node(prev_index),
1245 prev_graph.fingerprint_by_index(prev_index),
1246 );
1247 dep_node_index
1248 }
1249 }
1250 }
1251
1252 #[inline]
1253 fn debug_assert_not_in_new_nodes(
1254 &self,
1255 prev_graph: &SerializedDepGraph,
1256 prev_index: SerializedDepNodeIndex,
1257 ) {
1258 let node = &prev_graph.index_to_node(prev_index);
1259 debug_assert!(
1260 !self.new_node_to_index.lock_shard_by_value(node).contains_key(node),
1261 "node from previous graph present in new node collection"
1262 );
1263 }
1264}
1265
1266#[derive(Debug, Clone, Copy)]
1267pub enum TaskDepsRef<'a> {
1268 Allow(&'a Lock<TaskDeps>),
1272 EvalAlways,
1278 Ignore,
1280 Forbid,
1285}
1286
1287#[derive(Debug)]
1288pub struct TaskDeps {
1289 #[cfg(debug_assertions)]
1290 node: Option<DepNode>,
1291 reads: EdgesVec,
1292 read_set: FxHashSet<DepNodeIndex>,
1293 phantom_data: PhantomData<DepNode>,
1294}
1295
1296impl Default for TaskDeps {
1297 fn default() -> Self {
1298 Self {
1299 #[cfg(debug_assertions)]
1300 node: None,
1301 reads: EdgesVec::new(),
1302 read_set: FxHashSet::default(),
1303 phantom_data: PhantomData,
1304 }
1305 }
1306}
1307
1308struct DepNodeColorMap {
1311 values: IndexVec<SerializedDepNodeIndex, AtomicU32>,
1312}
1313
1314const COMPRESSED_NONE: u32 = 0;
1315const COMPRESSED_RED: u32 = 1;
1316const COMPRESSED_FIRST_GREEN: u32 = 2;
1317
1318impl DepNodeColorMap {
1319 fn new(size: usize) -> DepNodeColorMap {
1320 DepNodeColorMap { values: (0..size).map(|_| AtomicU32::new(COMPRESSED_NONE)).collect() }
1321 }
1322
1323 #[inline]
1324 fn get(&self, index: SerializedDepNodeIndex) -> Option<DepNodeColor> {
1325 match self.values[index].load(Ordering::Acquire) {
1326 COMPRESSED_NONE => None,
1327 COMPRESSED_RED => Some(DepNodeColor::Red),
1328 value => {
1329 Some(DepNodeColor::Green(DepNodeIndex::from_u32(value - COMPRESSED_FIRST_GREEN)))
1330 }
1331 }
1332 }
1333
1334 #[inline]
1335 fn insert(&self, index: SerializedDepNodeIndex, color: DepNodeColor) {
1336 self.values[index].store(
1337 match color {
1338 DepNodeColor::Red => COMPRESSED_RED,
1339 DepNodeColor::Green(index) => index.as_u32() + COMPRESSED_FIRST_GREEN,
1340 },
1341 Ordering::Release,
1342 )
1343 }
1344}
1345
1346#[inline(never)]
1347#[cold]
1348pub(crate) fn print_markframe_trace<D: Deps>(graph: &DepGraph<D>, frame: Option<&MarkFrame<'_>>) {
1349 let data = graph.data.as_ref().unwrap();
1350
1351 eprintln!("there was a panic while trying to force a dep node");
1352 eprintln!("try_mark_green dep node stack:");
1353
1354 let mut i = 0;
1355 let mut current = frame;
1356 while let Some(frame) = current {
1357 let node = data.previous.index_to_node(frame.index);
1358 eprintln!("#{i} {node:?}");
1359 current = frame.parent;
1360 i += 1;
1361 }
1362
1363 eprintln!("end of try_mark_green dep node stack");
1364}
1365
1366#[cold]
1367#[inline(never)]
1368fn panic_on_forbidden_read<D: Deps>(data: &DepGraphData<D>, dep_node_index: DepNodeIndex) -> ! {
1369 let mut dep_node = None;
1373
1374 for (prev_index, index) in data.current.prev_index_to_index.lock().iter_enumerated() {
1377 if index == &Some(dep_node_index) {
1378 dep_node = Some(data.previous.index_to_node(prev_index));
1379 break;
1380 }
1381 }
1382
1383 if dep_node.is_none() {
1384 for shard in data.current.new_node_to_index.lock_shards() {
1386 if let Some((node, _)) = shard.iter().find(|(_, index)| **index == dep_node_index) {
1387 dep_node = Some(*node);
1388 break;
1389 }
1390 }
1391 }
1392
1393 let dep_node = dep_node.map_or_else(
1394 || format!("with index {:?}", dep_node_index),
1395 |dep_node| format!("`{:?}`", dep_node),
1396 );
1397
1398 panic!(
1399 "Error: trying to record dependency on DepNode {dep_node} in a \
1400 context that does not allow it (e.g. during query deserialization). \
1401 The most common case of recording a dependency on a DepNode `foo` is \
1402 when the corresponding query `foo` is invoked. Invoking queries is not \
1403 allowed as part of loading something from the incremental on-disk cache. \
1404 See <https://github.com/rust-lang/rust/pull/91919>."
1405 )
1406}