rustc_abi/
lib.rs

1// tidy-alphabetical-start
2#![cfg_attr(feature = "nightly", allow(internal_features))]
3#![cfg_attr(feature = "nightly", doc(rust_logo))]
4#![cfg_attr(feature = "nightly", feature(assert_matches))]
5#![cfg_attr(feature = "nightly", feature(rustc_attrs))]
6#![cfg_attr(feature = "nightly", feature(rustdoc_internals))]
7#![cfg_attr(feature = "nightly", feature(step_trait))]
8#![warn(unreachable_pub)]
9// tidy-alphabetical-end
10
11/*! ABI handling for rustc
12
13## What is an "ABI"?
14
15Literally, "application binary interface", which means it is everything about how code interacts,
16at the machine level, with other code. This means it technically covers all of the following:
17- object binary format for e.g. relocations or offset tables
18- in-memory layout of types
19- procedure calling conventions
20
21When we discuss "ABI" in the context of rustc, we are probably discussing calling conventions.
22To describe those `rustc_abi` also covers type layout, as it must for values passed on the stack.
23Despite `rustc_abi` being about calling conventions, it is good to remember these usages exist.
24You will encounter all of them and more if you study target-specific codegen enough!
25Even in general conversation, when someone says "the Rust ABI is unstable", it may allude to
26either or both of
27- `repr(Rust)` types have a mostly-unspecified layout
28- `extern "Rust" fn(A) -> R` has an unspecified calling convention
29
30## Crate Goal
31
32ABI is a foundational concept, so the `rustc_abi` crate serves as an equally foundational crate.
33It cannot carry all details relevant to an ABI: those permeate code generation and linkage.
34Instead, `rustc_abi` is intended to provide the interface for reasoning about the binary interface.
35It should contain traits and types that other crates then use in their implementation.
36For example, a platform's `extern "C" fn` calling convention will be implemented in `rustc_target`
37but `rustc_abi` contains the types for calculating layout and describing register-passing.
38This makes it easier to describe things in the same way across targets, codegen backends, and
39even other Rust compilers, such as rust-analyzer!
40
41*/
42
43use std::fmt;
44#[cfg(feature = "nightly")]
45use std::iter::Step;
46use std::num::{NonZeroUsize, ParseIntError};
47use std::ops::{Add, AddAssign, Mul, RangeInclusive, Sub};
48use std::str::FromStr;
49
50use bitflags::bitflags;
51#[cfg(feature = "nightly")]
52use rustc_data_structures::stable_hasher::StableOrd;
53use rustc_index::{Idx, IndexSlice, IndexVec};
54#[cfg(feature = "nightly")]
55use rustc_macros::{Decodable_Generic, Encodable_Generic, HashStable_Generic};
56
57mod callconv;
58mod layout;
59#[cfg(test)]
60mod tests;
61
62mod extern_abi;
63
64pub use callconv::{Heterogeneous, HomogeneousAggregate, Reg, RegKind};
65pub use extern_abi::{ExternAbi, all_names};
66#[cfg(feature = "nightly")]
67pub use layout::{FIRST_VARIANT, FieldIdx, Layout, TyAbiInterface, TyAndLayout, VariantIdx};
68pub use layout::{LayoutCalculator, LayoutCalculatorError};
69
70/// Requirements for a `StableHashingContext` to be used in this crate.
71/// This is a hack to allow using the `HashStable_Generic` derive macro
72/// instead of implementing everything in `rustc_middle`.
73#[cfg(feature = "nightly")]
74pub trait HashStableContext {}
75
76#[derive(Clone, Copy, PartialEq, Eq, Default)]
77#[cfg_attr(feature = "nightly", derive(Encodable_Generic, Decodable_Generic, HashStable_Generic))]
78pub struct ReprFlags(u8);
79
80bitflags! {
81    impl ReprFlags: u8 {
82        const IS_C               = 1 << 0;
83        const IS_SIMD            = 1 << 1;
84        const IS_TRANSPARENT     = 1 << 2;
85        // Internal only for now. If true, don't reorder fields.
86        // On its own it does not prevent ABI optimizations.
87        const IS_LINEAR          = 1 << 3;
88        // If true, the type's crate has opted into layout randomization.
89        // Other flags can still inhibit reordering and thus randomization.
90        // The seed stored in `ReprOptions.field_shuffle_seed`.
91        const RANDOMIZE_LAYOUT   = 1 << 4;
92        // Any of these flags being set prevent field reordering optimisation.
93        const FIELD_ORDER_UNOPTIMIZABLE   = ReprFlags::IS_C.bits()
94                                 | ReprFlags::IS_SIMD.bits()
95                                 | ReprFlags::IS_LINEAR.bits();
96        const ABI_UNOPTIMIZABLE = ReprFlags::IS_C.bits() | ReprFlags::IS_SIMD.bits();
97    }
98}
99
100// This is the same as `rustc_data_structures::external_bitflags_debug` but without the
101// `rustc_data_structures` to make it build on stable.
102impl std::fmt::Debug for ReprFlags {
103    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
104        bitflags::parser::to_writer(self, f)
105    }
106}
107
108#[derive(Copy, Clone, Debug, Eq, PartialEq)]
109#[cfg_attr(feature = "nightly", derive(Encodable_Generic, Decodable_Generic, HashStable_Generic))]
110pub enum IntegerType {
111    /// Pointer-sized integer type, i.e. `isize` and `usize`. The field shows signedness, e.g.
112    /// `Pointer(true)` means `isize`.
113    Pointer(bool),
114    /// Fixed-sized integer type, e.g. `i8`, `u32`, `i128`. The bool field shows signedness, e.g.
115    /// `Fixed(I8, false)` means `u8`.
116    Fixed(Integer, bool),
117}
118
119impl IntegerType {
120    pub fn is_signed(&self) -> bool {
121        match self {
122            IntegerType::Pointer(b) => *b,
123            IntegerType::Fixed(_, b) => *b,
124        }
125    }
126}
127
128/// Represents the repr options provided by the user.
129#[derive(Copy, Clone, Debug, Eq, PartialEq, Default)]
130#[cfg_attr(feature = "nightly", derive(Encodable_Generic, Decodable_Generic, HashStable_Generic))]
131pub struct ReprOptions {
132    pub int: Option<IntegerType>,
133    pub align: Option<Align>,
134    pub pack: Option<Align>,
135    pub flags: ReprFlags,
136    /// The seed to be used for randomizing a type's layout
137    ///
138    /// Note: This could technically be a `u128` which would
139    /// be the "most accurate" hash as it'd encompass the item and crate
140    /// hash without loss, but it does pay the price of being larger.
141    /// Everything's a tradeoff, a 64-bit seed should be sufficient for our
142    /// purposes (primarily `-Z randomize-layout`)
143    pub field_shuffle_seed: u64,
144}
145
146impl ReprOptions {
147    #[inline]
148    pub fn simd(&self) -> bool {
149        self.flags.contains(ReprFlags::IS_SIMD)
150    }
151
152    #[inline]
153    pub fn c(&self) -> bool {
154        self.flags.contains(ReprFlags::IS_C)
155    }
156
157    #[inline]
158    pub fn packed(&self) -> bool {
159        self.pack.is_some()
160    }
161
162    #[inline]
163    pub fn transparent(&self) -> bool {
164        self.flags.contains(ReprFlags::IS_TRANSPARENT)
165    }
166
167    #[inline]
168    pub fn linear(&self) -> bool {
169        self.flags.contains(ReprFlags::IS_LINEAR)
170    }
171
172    /// Returns the discriminant type, given these `repr` options.
173    /// This must only be called on enums!
174    pub fn discr_type(&self) -> IntegerType {
175        self.int.unwrap_or(IntegerType::Pointer(true))
176    }
177
178    /// Returns `true` if this `#[repr()]` should inhabit "smart enum
179    /// layout" optimizations, such as representing `Foo<&T>` as a
180    /// single pointer.
181    pub fn inhibit_enum_layout_opt(&self) -> bool {
182        self.c() || self.int.is_some()
183    }
184
185    pub fn inhibit_newtype_abi_optimization(&self) -> bool {
186        self.flags.intersects(ReprFlags::ABI_UNOPTIMIZABLE)
187    }
188
189    /// Returns `true` if this `#[repr()]` guarantees a fixed field order,
190    /// e.g. `repr(C)` or `repr(<int>)`.
191    pub fn inhibit_struct_field_reordering(&self) -> bool {
192        self.flags.intersects(ReprFlags::FIELD_ORDER_UNOPTIMIZABLE) || self.int.is_some()
193    }
194
195    /// Returns `true` if this type is valid for reordering and `-Z randomize-layout`
196    /// was enabled for its declaration crate.
197    pub fn can_randomize_type_layout(&self) -> bool {
198        !self.inhibit_struct_field_reordering() && self.flags.contains(ReprFlags::RANDOMIZE_LAYOUT)
199    }
200
201    /// Returns `true` if this `#[repr()]` should inhibit union ABI optimisations.
202    pub fn inhibits_union_abi_opt(&self) -> bool {
203        self.c()
204    }
205}
206
207/// Parsed [Data layout](https://llvm.org/docs/LangRef.html#data-layout)
208/// for a target, which contains everything needed to compute layouts.
209#[derive(Debug, PartialEq, Eq)]
210pub struct TargetDataLayout {
211    pub endian: Endian,
212    pub i1_align: AbiAndPrefAlign,
213    pub i8_align: AbiAndPrefAlign,
214    pub i16_align: AbiAndPrefAlign,
215    pub i32_align: AbiAndPrefAlign,
216    pub i64_align: AbiAndPrefAlign,
217    pub i128_align: AbiAndPrefAlign,
218    pub f16_align: AbiAndPrefAlign,
219    pub f32_align: AbiAndPrefAlign,
220    pub f64_align: AbiAndPrefAlign,
221    pub f128_align: AbiAndPrefAlign,
222    pub pointer_size: Size,
223    pub pointer_align: AbiAndPrefAlign,
224    pub aggregate_align: AbiAndPrefAlign,
225
226    /// Alignments for vector types.
227    pub vector_align: Vec<(Size, AbiAndPrefAlign)>,
228
229    pub instruction_address_space: AddressSpace,
230
231    /// Minimum size of #[repr(C)] enums (default c_int::BITS, usually 32)
232    /// Note: This isn't in LLVM's data layout string, it is `short_enum`
233    /// so the only valid spec for LLVM is c_int::BITS or 8
234    pub c_enum_min_size: Integer,
235}
236
237impl Default for TargetDataLayout {
238    /// Creates an instance of `TargetDataLayout`.
239    fn default() -> TargetDataLayout {
240        let align = |bits| Align::from_bits(bits).unwrap();
241        TargetDataLayout {
242            endian: Endian::Big,
243            i1_align: AbiAndPrefAlign::new(align(8)),
244            i8_align: AbiAndPrefAlign::new(align(8)),
245            i16_align: AbiAndPrefAlign::new(align(16)),
246            i32_align: AbiAndPrefAlign::new(align(32)),
247            i64_align: AbiAndPrefAlign { abi: align(32), pref: align(64) },
248            i128_align: AbiAndPrefAlign { abi: align(32), pref: align(64) },
249            f16_align: AbiAndPrefAlign::new(align(16)),
250            f32_align: AbiAndPrefAlign::new(align(32)),
251            f64_align: AbiAndPrefAlign::new(align(64)),
252            f128_align: AbiAndPrefAlign::new(align(128)),
253            pointer_size: Size::from_bits(64),
254            pointer_align: AbiAndPrefAlign::new(align(64)),
255            aggregate_align: AbiAndPrefAlign { abi: align(0), pref: align(64) },
256            vector_align: vec![
257                (Size::from_bits(64), AbiAndPrefAlign::new(align(64))),
258                (Size::from_bits(128), AbiAndPrefAlign::new(align(128))),
259            ],
260            instruction_address_space: AddressSpace::DATA,
261            c_enum_min_size: Integer::I32,
262        }
263    }
264}
265
266pub enum TargetDataLayoutErrors<'a> {
267    InvalidAddressSpace { addr_space: &'a str, cause: &'a str, err: ParseIntError },
268    InvalidBits { kind: &'a str, bit: &'a str, cause: &'a str, err: ParseIntError },
269    MissingAlignment { cause: &'a str },
270    InvalidAlignment { cause: &'a str, err: AlignFromBytesError },
271    InconsistentTargetArchitecture { dl: &'a str, target: &'a str },
272    InconsistentTargetPointerWidth { pointer_size: u64, target: u32 },
273    InvalidBitsSize { err: String },
274}
275
276impl TargetDataLayout {
277    /// Parse data layout from an
278    /// [llvm data layout string](https://llvm.org/docs/LangRef.html#data-layout)
279    ///
280    /// This function doesn't fill `c_enum_min_size` and it will always be `I32` since it can not be
281    /// determined from llvm string.
282    pub fn parse_from_llvm_datalayout_string<'a>(
283        input: &'a str,
284    ) -> Result<TargetDataLayout, TargetDataLayoutErrors<'a>> {
285        // Parse an address space index from a string.
286        let parse_address_space = |s: &'a str, cause: &'a str| {
287            s.parse::<u32>().map(AddressSpace).map_err(|err| {
288                TargetDataLayoutErrors::InvalidAddressSpace { addr_space: s, cause, err }
289            })
290        };
291
292        // Parse a bit count from a string.
293        let parse_bits = |s: &'a str, kind: &'a str, cause: &'a str| {
294            s.parse::<u64>().map_err(|err| TargetDataLayoutErrors::InvalidBits {
295                kind,
296                bit: s,
297                cause,
298                err,
299            })
300        };
301
302        // Parse a size string.
303        let parse_size =
304            |s: &'a str, cause: &'a str| parse_bits(s, "size", cause).map(Size::from_bits);
305
306        // Parse an alignment string.
307        let parse_align = |s: &[&'a str], cause: &'a str| {
308            if s.is_empty() {
309                return Err(TargetDataLayoutErrors::MissingAlignment { cause });
310            }
311            let align_from_bits = |bits| {
312                Align::from_bits(bits)
313                    .map_err(|err| TargetDataLayoutErrors::InvalidAlignment { cause, err })
314            };
315            let abi = parse_bits(s[0], "alignment", cause)?;
316            let pref = s.get(1).map_or(Ok(abi), |pref| parse_bits(pref, "alignment", cause))?;
317            Ok(AbiAndPrefAlign { abi: align_from_bits(abi)?, pref: align_from_bits(pref)? })
318        };
319
320        let mut dl = TargetDataLayout::default();
321        let mut i128_align_src = 64;
322        for spec in input.split('-') {
323            let spec_parts = spec.split(':').collect::<Vec<_>>();
324
325            match &*spec_parts {
326                ["e"] => dl.endian = Endian::Little,
327                ["E"] => dl.endian = Endian::Big,
328                [p] if p.starts_with('P') => {
329                    dl.instruction_address_space = parse_address_space(&p[1..], "P")?
330                }
331                ["a", ref a @ ..] => dl.aggregate_align = parse_align(a, "a")?,
332                ["f16", ref a @ ..] => dl.f16_align = parse_align(a, "f16")?,
333                ["f32", ref a @ ..] => dl.f32_align = parse_align(a, "f32")?,
334                ["f64", ref a @ ..] => dl.f64_align = parse_align(a, "f64")?,
335                ["f128", ref a @ ..] => dl.f128_align = parse_align(a, "f128")?,
336                // FIXME(erikdesjardins): we should be parsing nonzero address spaces
337                // this will require replacing TargetDataLayout::{pointer_size,pointer_align}
338                // with e.g. `fn pointer_size_in(AddressSpace)`
339                [p @ "p", s, ref a @ ..] | [p @ "p0", s, ref a @ ..] => {
340                    dl.pointer_size = parse_size(s, p)?;
341                    dl.pointer_align = parse_align(a, p)?;
342                }
343                [s, ref a @ ..] if s.starts_with('i') => {
344                    let Ok(bits) = s[1..].parse::<u64>() else {
345                        parse_size(&s[1..], "i")?; // For the user error.
346                        continue;
347                    };
348                    let a = parse_align(a, s)?;
349                    match bits {
350                        1 => dl.i1_align = a,
351                        8 => dl.i8_align = a,
352                        16 => dl.i16_align = a,
353                        32 => dl.i32_align = a,
354                        64 => dl.i64_align = a,
355                        _ => {}
356                    }
357                    if bits >= i128_align_src && bits <= 128 {
358                        // Default alignment for i128 is decided by taking the alignment of
359                        // largest-sized i{64..=128}.
360                        i128_align_src = bits;
361                        dl.i128_align = a;
362                    }
363                }
364                [s, ref a @ ..] if s.starts_with('v') => {
365                    let v_size = parse_size(&s[1..], "v")?;
366                    let a = parse_align(a, s)?;
367                    if let Some(v) = dl.vector_align.iter_mut().find(|v| v.0 == v_size) {
368                        v.1 = a;
369                        continue;
370                    }
371                    // No existing entry, add a new one.
372                    dl.vector_align.push((v_size, a));
373                }
374                _ => {} // Ignore everything else.
375            }
376        }
377        Ok(dl)
378    }
379
380    /// Returns **exclusive** upper bound on object size in bytes.
381    ///
382    /// The theoretical maximum object size is defined as the maximum positive `isize` value.
383    /// This ensures that the `offset` semantics remain well-defined by allowing it to correctly
384    /// index every address within an object along with one byte past the end, along with allowing
385    /// `isize` to store the difference between any two pointers into an object.
386    ///
387    /// LLVM uses a 64-bit integer to represent object size in *bits*, but we care only for bytes,
388    /// so we adopt such a more-constrained size bound due to its technical limitations.
389    #[inline]
390    pub fn obj_size_bound(&self) -> u64 {
391        match self.pointer_size.bits() {
392            16 => 1 << 15,
393            32 => 1 << 31,
394            64 => 1 << 61,
395            bits => panic!("obj_size_bound: unknown pointer bit size {bits}"),
396        }
397    }
398
399    #[inline]
400    pub fn ptr_sized_integer(&self) -> Integer {
401        use Integer::*;
402        match self.pointer_size.bits() {
403            16 => I16,
404            32 => I32,
405            64 => I64,
406            bits => panic!("ptr_sized_integer: unknown pointer bit size {bits}"),
407        }
408    }
409
410    #[inline]
411    pub fn vector_align(&self, vec_size: Size) -> AbiAndPrefAlign {
412        for &(size, align) in &self.vector_align {
413            if size == vec_size {
414                return align;
415            }
416        }
417        // Default to natural alignment, which is what LLVM does.
418        // That is, use the size, rounded up to a power of 2.
419        AbiAndPrefAlign::new(Align::from_bytes(vec_size.bytes().next_power_of_two()).unwrap())
420    }
421}
422
423pub trait HasDataLayout {
424    fn data_layout(&self) -> &TargetDataLayout;
425}
426
427impl HasDataLayout for TargetDataLayout {
428    #[inline]
429    fn data_layout(&self) -> &TargetDataLayout {
430        self
431    }
432}
433
434// used by rust-analyzer
435impl HasDataLayout for &TargetDataLayout {
436    #[inline]
437    fn data_layout(&self) -> &TargetDataLayout {
438        (**self).data_layout()
439    }
440}
441
442/// Endianness of the target, which must match cfg(target-endian).
443#[derive(Copy, Clone, PartialEq, Eq)]
444pub enum Endian {
445    Little,
446    Big,
447}
448
449impl Endian {
450    pub fn as_str(&self) -> &'static str {
451        match self {
452            Self::Little => "little",
453            Self::Big => "big",
454        }
455    }
456}
457
458impl fmt::Debug for Endian {
459    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
460        f.write_str(self.as_str())
461    }
462}
463
464impl FromStr for Endian {
465    type Err = String;
466
467    fn from_str(s: &str) -> Result<Self, Self::Err> {
468        match s {
469            "little" => Ok(Self::Little),
470            "big" => Ok(Self::Big),
471            _ => Err(format!(r#"unknown endian: "{s}""#)),
472        }
473    }
474}
475
476/// Size of a type in bytes.
477#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
478#[cfg_attr(feature = "nightly", derive(Encodable_Generic, Decodable_Generic, HashStable_Generic))]
479pub struct Size {
480    raw: u64,
481}
482
483#[cfg(feature = "nightly")]
484impl StableOrd for Size {
485    const CAN_USE_UNSTABLE_SORT: bool = true;
486
487    // `Ord` is implemented as just comparing numerical values and numerical values
488    // are not changed by (de-)serialization.
489    const THIS_IMPLEMENTATION_HAS_BEEN_TRIPLE_CHECKED: () = ();
490}
491
492// This is debug-printed a lot in larger structs, don't waste too much space there
493impl fmt::Debug for Size {
494    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
495        write!(f, "Size({} bytes)", self.bytes())
496    }
497}
498
499impl Size {
500    pub const ZERO: Size = Size { raw: 0 };
501
502    /// Rounds `bits` up to the next-higher byte boundary, if `bits` is
503    /// not a multiple of 8.
504    pub fn from_bits(bits: impl TryInto<u64>) -> Size {
505        let bits = bits.try_into().ok().unwrap();
506        // Avoid potential overflow from `bits + 7`.
507        Size { raw: bits / 8 + ((bits % 8) + 7) / 8 }
508    }
509
510    #[inline]
511    pub fn from_bytes(bytes: impl TryInto<u64>) -> Size {
512        let bytes: u64 = bytes.try_into().ok().unwrap();
513        Size { raw: bytes }
514    }
515
516    #[inline]
517    pub fn bytes(self) -> u64 {
518        self.raw
519    }
520
521    #[inline]
522    pub fn bytes_usize(self) -> usize {
523        self.bytes().try_into().unwrap()
524    }
525
526    #[inline]
527    pub fn bits(self) -> u64 {
528        #[cold]
529        fn overflow(bytes: u64) -> ! {
530            panic!("Size::bits: {bytes} bytes in bits doesn't fit in u64")
531        }
532
533        self.bytes().checked_mul(8).unwrap_or_else(|| overflow(self.bytes()))
534    }
535
536    #[inline]
537    pub fn bits_usize(self) -> usize {
538        self.bits().try_into().unwrap()
539    }
540
541    #[inline]
542    pub fn align_to(self, align: Align) -> Size {
543        let mask = align.bytes() - 1;
544        Size::from_bytes((self.bytes() + mask) & !mask)
545    }
546
547    #[inline]
548    pub fn is_aligned(self, align: Align) -> bool {
549        let mask = align.bytes() - 1;
550        self.bytes() & mask == 0
551    }
552
553    #[inline]
554    pub fn checked_add<C: HasDataLayout>(self, offset: Size, cx: &C) -> Option<Size> {
555        let dl = cx.data_layout();
556
557        let bytes = self.bytes().checked_add(offset.bytes())?;
558
559        if bytes < dl.obj_size_bound() { Some(Size::from_bytes(bytes)) } else { None }
560    }
561
562    #[inline]
563    pub fn checked_mul<C: HasDataLayout>(self, count: u64, cx: &C) -> Option<Size> {
564        let dl = cx.data_layout();
565
566        let bytes = self.bytes().checked_mul(count)?;
567        if bytes < dl.obj_size_bound() { Some(Size::from_bytes(bytes)) } else { None }
568    }
569
570    /// Truncates `value` to `self` bits and then sign-extends it to 128 bits
571    /// (i.e., if it is negative, fill with 1's on the left).
572    #[inline]
573    pub fn sign_extend(self, value: u128) -> i128 {
574        let size = self.bits();
575        if size == 0 {
576            // Truncated until nothing is left.
577            return 0;
578        }
579        // Sign-extend it.
580        let shift = 128 - size;
581        // Shift the unsigned value to the left, then shift back to the right as signed
582        // (essentially fills with sign bit on the left).
583        ((value << shift) as i128) >> shift
584    }
585
586    /// Truncates `value` to `self` bits.
587    #[inline]
588    pub fn truncate(self, value: u128) -> u128 {
589        let size = self.bits();
590        if size == 0 {
591            // Truncated until nothing is left.
592            return 0;
593        }
594        let shift = 128 - size;
595        // Truncate (shift left to drop out leftover values, shift right to fill with zeroes).
596        (value << shift) >> shift
597    }
598
599    #[inline]
600    pub fn signed_int_min(&self) -> i128 {
601        self.sign_extend(1_u128 << (self.bits() - 1))
602    }
603
604    #[inline]
605    pub fn signed_int_max(&self) -> i128 {
606        i128::MAX >> (128 - self.bits())
607    }
608
609    #[inline]
610    pub fn unsigned_int_max(&self) -> u128 {
611        u128::MAX >> (128 - self.bits())
612    }
613}
614
615// Panicking addition, subtraction and multiplication for convenience.
616// Avoid during layout computation, return `LayoutError` instead.
617
618impl Add for Size {
619    type Output = Size;
620    #[inline]
621    fn add(self, other: Size) -> Size {
622        Size::from_bytes(self.bytes().checked_add(other.bytes()).unwrap_or_else(|| {
623            panic!("Size::add: {} + {} doesn't fit in u64", self.bytes(), other.bytes())
624        }))
625    }
626}
627
628impl Sub for Size {
629    type Output = Size;
630    #[inline]
631    fn sub(self, other: Size) -> Size {
632        Size::from_bytes(self.bytes().checked_sub(other.bytes()).unwrap_or_else(|| {
633            panic!("Size::sub: {} - {} would result in negative size", self.bytes(), other.bytes())
634        }))
635    }
636}
637
638impl Mul<Size> for u64 {
639    type Output = Size;
640    #[inline]
641    fn mul(self, size: Size) -> Size {
642        size * self
643    }
644}
645
646impl Mul<u64> for Size {
647    type Output = Size;
648    #[inline]
649    fn mul(self, count: u64) -> Size {
650        match self.bytes().checked_mul(count) {
651            Some(bytes) => Size::from_bytes(bytes),
652            None => panic!("Size::mul: {} * {} doesn't fit in u64", self.bytes(), count),
653        }
654    }
655}
656
657impl AddAssign for Size {
658    #[inline]
659    fn add_assign(&mut self, other: Size) {
660        *self = *self + other;
661    }
662}
663
664#[cfg(feature = "nightly")]
665impl Step for Size {
666    #[inline]
667    fn steps_between(start: &Self, end: &Self) -> (usize, Option<usize>) {
668        u64::steps_between(&start.bytes(), &end.bytes())
669    }
670
671    #[inline]
672    fn forward_checked(start: Self, count: usize) -> Option<Self> {
673        u64::forward_checked(start.bytes(), count).map(Self::from_bytes)
674    }
675
676    #[inline]
677    fn forward(start: Self, count: usize) -> Self {
678        Self::from_bytes(u64::forward(start.bytes(), count))
679    }
680
681    #[inline]
682    unsafe fn forward_unchecked(start: Self, count: usize) -> Self {
683        Self::from_bytes(unsafe { u64::forward_unchecked(start.bytes(), count) })
684    }
685
686    #[inline]
687    fn backward_checked(start: Self, count: usize) -> Option<Self> {
688        u64::backward_checked(start.bytes(), count).map(Self::from_bytes)
689    }
690
691    #[inline]
692    fn backward(start: Self, count: usize) -> Self {
693        Self::from_bytes(u64::backward(start.bytes(), count))
694    }
695
696    #[inline]
697    unsafe fn backward_unchecked(start: Self, count: usize) -> Self {
698        Self::from_bytes(unsafe { u64::backward_unchecked(start.bytes(), count) })
699    }
700}
701
702/// Alignment of a type in bytes (always a power of two).
703#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
704#[cfg_attr(feature = "nightly", derive(Encodable_Generic, Decodable_Generic, HashStable_Generic))]
705pub struct Align {
706    pow2: u8,
707}
708
709// This is debug-printed a lot in larger structs, don't waste too much space there
710impl fmt::Debug for Align {
711    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
712        write!(f, "Align({} bytes)", self.bytes())
713    }
714}
715
716#[derive(Clone, Copy)]
717pub enum AlignFromBytesError {
718    NotPowerOfTwo(u64),
719    TooLarge(u64),
720}
721
722impl AlignFromBytesError {
723    pub fn diag_ident(self) -> &'static str {
724        match self {
725            Self::NotPowerOfTwo(_) => "not_power_of_two",
726            Self::TooLarge(_) => "too_large",
727        }
728    }
729
730    pub fn align(self) -> u64 {
731        let (Self::NotPowerOfTwo(align) | Self::TooLarge(align)) = self;
732        align
733    }
734}
735
736impl fmt::Debug for AlignFromBytesError {
737    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
738        fmt::Display::fmt(self, f)
739    }
740}
741
742impl fmt::Display for AlignFromBytesError {
743    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
744        match self {
745            AlignFromBytesError::NotPowerOfTwo(align) => write!(f, "`{align}` is not a power of 2"),
746            AlignFromBytesError::TooLarge(align) => write!(f, "`{align}` is too large"),
747        }
748    }
749}
750
751impl Align {
752    pub const ONE: Align = Align { pow2: 0 };
753    pub const EIGHT: Align = Align { pow2: 3 };
754    // LLVM has a maximal supported alignment of 2^29, we inherit that.
755    pub const MAX: Align = Align { pow2: 29 };
756
757    #[inline]
758    pub fn from_bits(bits: u64) -> Result<Align, AlignFromBytesError> {
759        Align::from_bytes(Size::from_bits(bits).bytes())
760    }
761
762    #[inline]
763    pub const fn from_bytes(align: u64) -> Result<Align, AlignFromBytesError> {
764        // Treat an alignment of 0 bytes like 1-byte alignment.
765        if align == 0 {
766            return Ok(Align::ONE);
767        }
768
769        #[cold]
770        const fn not_power_of_2(align: u64) -> AlignFromBytesError {
771            AlignFromBytesError::NotPowerOfTwo(align)
772        }
773
774        #[cold]
775        const fn too_large(align: u64) -> AlignFromBytesError {
776            AlignFromBytesError::TooLarge(align)
777        }
778
779        let tz = align.trailing_zeros();
780        if align != (1 << tz) {
781            return Err(not_power_of_2(align));
782        }
783
784        let pow2 = tz as u8;
785        if pow2 > Self::MAX.pow2 {
786            return Err(too_large(align));
787        }
788
789        Ok(Align { pow2 })
790    }
791
792    #[inline]
793    pub fn bytes(self) -> u64 {
794        1 << self.pow2
795    }
796
797    #[inline]
798    pub fn bytes_usize(self) -> usize {
799        self.bytes().try_into().unwrap()
800    }
801
802    #[inline]
803    pub fn bits(self) -> u64 {
804        self.bytes() * 8
805    }
806
807    #[inline]
808    pub fn bits_usize(self) -> usize {
809        self.bits().try_into().unwrap()
810    }
811
812    /// Computes the best alignment possible for the given offset
813    /// (the largest power of two that the offset is a multiple of).
814    ///
815    /// N.B., for an offset of `0`, this happens to return `2^64`.
816    #[inline]
817    pub fn max_for_offset(offset: Size) -> Align {
818        Align { pow2: offset.bytes().trailing_zeros() as u8 }
819    }
820
821    /// Lower the alignment, if necessary, such that the given offset
822    /// is aligned to it (the offset is a multiple of the alignment).
823    #[inline]
824    pub fn restrict_for_offset(self, offset: Size) -> Align {
825        self.min(Align::max_for_offset(offset))
826    }
827}
828
829/// A pair of alignments, ABI-mandated and preferred.
830///
831/// The "preferred" alignment is an LLVM concept that is virtually meaningless to Rust code:
832/// it is not exposed semantically to programmers nor can they meaningfully affect it.
833/// The only concern for us is that preferred alignment must not be less than the mandated alignment
834/// and thus in practice the two values are almost always identical.
835///
836/// An example of a rare thing actually affected by preferred alignment is aligning of statics.
837/// It is of effectively no consequence for layout in structs and on the stack.
838#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]
839#[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
840pub struct AbiAndPrefAlign {
841    pub abi: Align,
842    pub pref: Align,
843}
844
845impl AbiAndPrefAlign {
846    #[inline]
847    pub fn new(align: Align) -> AbiAndPrefAlign {
848        AbiAndPrefAlign { abi: align, pref: align }
849    }
850
851    #[inline]
852    pub fn min(self, other: AbiAndPrefAlign) -> AbiAndPrefAlign {
853        AbiAndPrefAlign { abi: self.abi.min(other.abi), pref: self.pref.min(other.pref) }
854    }
855
856    #[inline]
857    pub fn max(self, other: AbiAndPrefAlign) -> AbiAndPrefAlign {
858        AbiAndPrefAlign { abi: self.abi.max(other.abi), pref: self.pref.max(other.pref) }
859    }
860}
861
862/// Integers, also used for enum discriminants.
863#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)]
864#[cfg_attr(feature = "nightly", derive(Encodable_Generic, Decodable_Generic, HashStable_Generic))]
865pub enum Integer {
866    I8,
867    I16,
868    I32,
869    I64,
870    I128,
871}
872
873impl Integer {
874    pub fn int_ty_str(self) -> &'static str {
875        use Integer::*;
876        match self {
877            I8 => "i8",
878            I16 => "i16",
879            I32 => "i32",
880            I64 => "i64",
881            I128 => "i128",
882        }
883    }
884
885    pub fn uint_ty_str(self) -> &'static str {
886        use Integer::*;
887        match self {
888            I8 => "u8",
889            I16 => "u16",
890            I32 => "u32",
891            I64 => "u64",
892            I128 => "u128",
893        }
894    }
895
896    #[inline]
897    pub fn size(self) -> Size {
898        use Integer::*;
899        match self {
900            I8 => Size::from_bytes(1),
901            I16 => Size::from_bytes(2),
902            I32 => Size::from_bytes(4),
903            I64 => Size::from_bytes(8),
904            I128 => Size::from_bytes(16),
905        }
906    }
907
908    /// Gets the Integer type from an IntegerType.
909    pub fn from_attr<C: HasDataLayout>(cx: &C, ity: IntegerType) -> Integer {
910        let dl = cx.data_layout();
911
912        match ity {
913            IntegerType::Pointer(_) => dl.ptr_sized_integer(),
914            IntegerType::Fixed(x, _) => x,
915        }
916    }
917
918    pub fn align<C: HasDataLayout>(self, cx: &C) -> AbiAndPrefAlign {
919        use Integer::*;
920        let dl = cx.data_layout();
921
922        match self {
923            I8 => dl.i8_align,
924            I16 => dl.i16_align,
925            I32 => dl.i32_align,
926            I64 => dl.i64_align,
927            I128 => dl.i128_align,
928        }
929    }
930
931    /// Returns the largest signed value that can be represented by this Integer.
932    #[inline]
933    pub fn signed_max(self) -> i128 {
934        use Integer::*;
935        match self {
936            I8 => i8::MAX as i128,
937            I16 => i16::MAX as i128,
938            I32 => i32::MAX as i128,
939            I64 => i64::MAX as i128,
940            I128 => i128::MAX,
941        }
942    }
943
944    /// Finds the smallest Integer type which can represent the signed value.
945    #[inline]
946    pub fn fit_signed(x: i128) -> Integer {
947        use Integer::*;
948        match x {
949            -0x0000_0000_0000_0080..=0x0000_0000_0000_007f => I8,
950            -0x0000_0000_0000_8000..=0x0000_0000_0000_7fff => I16,
951            -0x0000_0000_8000_0000..=0x0000_0000_7fff_ffff => I32,
952            -0x8000_0000_0000_0000..=0x7fff_ffff_ffff_ffff => I64,
953            _ => I128,
954        }
955    }
956
957    /// Finds the smallest Integer type which can represent the unsigned value.
958    #[inline]
959    pub fn fit_unsigned(x: u128) -> Integer {
960        use Integer::*;
961        match x {
962            0..=0x0000_0000_0000_00ff => I8,
963            0..=0x0000_0000_0000_ffff => I16,
964            0..=0x0000_0000_ffff_ffff => I32,
965            0..=0xffff_ffff_ffff_ffff => I64,
966            _ => I128,
967        }
968    }
969
970    /// Finds the smallest integer with the given alignment.
971    pub fn for_align<C: HasDataLayout>(cx: &C, wanted: Align) -> Option<Integer> {
972        use Integer::*;
973        let dl = cx.data_layout();
974
975        [I8, I16, I32, I64, I128].into_iter().find(|&candidate| {
976            wanted == candidate.align(dl).abi && wanted.bytes() == candidate.size().bytes()
977        })
978    }
979
980    /// Find the largest integer with the given alignment or less.
981    pub fn approximate_align<C: HasDataLayout>(cx: &C, wanted: Align) -> Integer {
982        use Integer::*;
983        let dl = cx.data_layout();
984
985        // FIXME(eddyb) maybe include I128 in the future, when it works everywhere.
986        for candidate in [I64, I32, I16] {
987            if wanted >= candidate.align(dl).abi && wanted.bytes() >= candidate.size().bytes() {
988                return candidate;
989            }
990        }
991        I8
992    }
993
994    // FIXME(eddyb) consolidate this and other methods that find the appropriate
995    // `Integer` given some requirements.
996    #[inline]
997    pub fn from_size(size: Size) -> Result<Self, String> {
998        match size.bits() {
999            8 => Ok(Integer::I8),
1000            16 => Ok(Integer::I16),
1001            32 => Ok(Integer::I32),
1002            64 => Ok(Integer::I64),
1003            128 => Ok(Integer::I128),
1004            _ => Err(format!("rust does not support integers with {} bits", size.bits())),
1005        }
1006    }
1007}
1008
1009/// Floating-point types.
1010#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)]
1011#[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
1012pub enum Float {
1013    F16,
1014    F32,
1015    F64,
1016    F128,
1017}
1018
1019impl Float {
1020    pub fn size(self) -> Size {
1021        use Float::*;
1022
1023        match self {
1024            F16 => Size::from_bits(16),
1025            F32 => Size::from_bits(32),
1026            F64 => Size::from_bits(64),
1027            F128 => Size::from_bits(128),
1028        }
1029    }
1030
1031    pub fn align<C: HasDataLayout>(self, cx: &C) -> AbiAndPrefAlign {
1032        use Float::*;
1033        let dl = cx.data_layout();
1034
1035        match self {
1036            F16 => dl.f16_align,
1037            F32 => dl.f32_align,
1038            F64 => dl.f64_align,
1039            F128 => dl.f128_align,
1040        }
1041    }
1042}
1043
1044/// Fundamental unit of memory access and layout.
1045#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]
1046#[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
1047pub enum Primitive {
1048    /// The `bool` is the signedness of the `Integer` type.
1049    ///
1050    /// One would think we would not care about such details this low down,
1051    /// but some ABIs are described in terms of C types and ISAs where the
1052    /// integer arithmetic is done on {sign,zero}-extended registers, e.g.
1053    /// a negative integer passed by zero-extension will appear positive in
1054    /// the callee, and most operations on it will produce the wrong values.
1055    Int(Integer, bool),
1056    Float(Float),
1057    Pointer(AddressSpace),
1058}
1059
1060impl Primitive {
1061    pub fn size<C: HasDataLayout>(self, cx: &C) -> Size {
1062        use Primitive::*;
1063        let dl = cx.data_layout();
1064
1065        match self {
1066            Int(i, _) => i.size(),
1067            Float(f) => f.size(),
1068            // FIXME(erikdesjardins): ignoring address space is technically wrong, pointers in
1069            // different address spaces can have different sizes
1070            // (but TargetDataLayout doesn't currently parse that part of the DL string)
1071            Pointer(_) => dl.pointer_size,
1072        }
1073    }
1074
1075    pub fn align<C: HasDataLayout>(self, cx: &C) -> AbiAndPrefAlign {
1076        use Primitive::*;
1077        let dl = cx.data_layout();
1078
1079        match self {
1080            Int(i, _) => i.align(dl),
1081            Float(f) => f.align(dl),
1082            // FIXME(erikdesjardins): ignoring address space is technically wrong, pointers in
1083            // different address spaces can have different alignments
1084            // (but TargetDataLayout doesn't currently parse that part of the DL string)
1085            Pointer(_) => dl.pointer_align,
1086        }
1087    }
1088}
1089
1090/// Inclusive wrap-around range of valid values, that is, if
1091/// start > end, it represents `start..=MAX`, followed by `0..=end`.
1092///
1093/// That is, for an i8 primitive, a range of `254..=2` means following
1094/// sequence:
1095///
1096///    254 (-2), 255 (-1), 0, 1, 2
1097///
1098/// This is intended specifically to mirror LLVM’s `!range` metadata semantics.
1099#[derive(Clone, Copy, PartialEq, Eq, Hash)]
1100#[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
1101pub struct WrappingRange {
1102    pub start: u128,
1103    pub end: u128,
1104}
1105
1106impl WrappingRange {
1107    pub fn full(size: Size) -> Self {
1108        Self { start: 0, end: size.unsigned_int_max() }
1109    }
1110
1111    /// Returns `true` if `v` is contained in the range.
1112    #[inline(always)]
1113    pub fn contains(&self, v: u128) -> bool {
1114        if self.start <= self.end {
1115            self.start <= v && v <= self.end
1116        } else {
1117            self.start <= v || v <= self.end
1118        }
1119    }
1120
1121    /// Returns `self` with replaced `start`
1122    #[inline(always)]
1123    fn with_start(mut self, start: u128) -> Self {
1124        self.start = start;
1125        self
1126    }
1127
1128    /// Returns `self` with replaced `end`
1129    #[inline(always)]
1130    fn with_end(mut self, end: u128) -> Self {
1131        self.end = end;
1132        self
1133    }
1134
1135    /// Returns `true` if `size` completely fills the range.
1136    #[inline]
1137    fn is_full_for(&self, size: Size) -> bool {
1138        let max_value = size.unsigned_int_max();
1139        debug_assert!(self.start <= max_value && self.end <= max_value);
1140        self.start == (self.end.wrapping_add(1) & max_value)
1141    }
1142}
1143
1144impl fmt::Debug for WrappingRange {
1145    fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
1146        if self.start > self.end {
1147            write!(fmt, "(..={}) | ({}..)", self.end, self.start)?;
1148        } else {
1149            write!(fmt, "{}..={}", self.start, self.end)?;
1150        }
1151        Ok(())
1152    }
1153}
1154
1155/// Information about one scalar component of a Rust type.
1156#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)]
1157#[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
1158pub enum Scalar {
1159    Initialized {
1160        value: Primitive,
1161
1162        // FIXME(eddyb) always use the shortest range, e.g., by finding
1163        // the largest space between two consecutive valid values and
1164        // taking everything else as the (shortest) valid range.
1165        valid_range: WrappingRange,
1166    },
1167    Union {
1168        /// Even for unions, we need to use the correct registers for the kind of
1169        /// values inside the union, so we keep the `Primitive` type around. We
1170        /// also use it to compute the size of the scalar.
1171        /// However, unions never have niches and even allow undef,
1172        /// so there is no `valid_range`.
1173        value: Primitive,
1174    },
1175}
1176
1177impl Scalar {
1178    #[inline]
1179    pub fn is_bool(&self) -> bool {
1180        use Integer::*;
1181        matches!(
1182            self,
1183            Scalar::Initialized {
1184                value: Primitive::Int(I8, false),
1185                valid_range: WrappingRange { start: 0, end: 1 }
1186            }
1187        )
1188    }
1189
1190    /// Get the primitive representation of this type, ignoring the valid range and whether the
1191    /// value is allowed to be undefined (due to being a union).
1192    pub fn primitive(&self) -> Primitive {
1193        match *self {
1194            Scalar::Initialized { value, .. } | Scalar::Union { value } => value,
1195        }
1196    }
1197
1198    pub fn align(self, cx: &impl HasDataLayout) -> AbiAndPrefAlign {
1199        self.primitive().align(cx)
1200    }
1201
1202    pub fn size(self, cx: &impl HasDataLayout) -> Size {
1203        self.primitive().size(cx)
1204    }
1205
1206    #[inline]
1207    pub fn to_union(&self) -> Self {
1208        Self::Union { value: self.primitive() }
1209    }
1210
1211    #[inline]
1212    pub fn valid_range(&self, cx: &impl HasDataLayout) -> WrappingRange {
1213        match *self {
1214            Scalar::Initialized { valid_range, .. } => valid_range,
1215            Scalar::Union { value } => WrappingRange::full(value.size(cx)),
1216        }
1217    }
1218
1219    #[inline]
1220    /// Allows the caller to mutate the valid range. This operation will panic if attempted on a
1221    /// union.
1222    pub fn valid_range_mut(&mut self) -> &mut WrappingRange {
1223        match self {
1224            Scalar::Initialized { valid_range, .. } => valid_range,
1225            Scalar::Union { .. } => panic!("cannot change the valid range of a union"),
1226        }
1227    }
1228
1229    /// Returns `true` if all possible numbers are valid, i.e `valid_range` covers the whole
1230    /// layout.
1231    #[inline]
1232    pub fn is_always_valid<C: HasDataLayout>(&self, cx: &C) -> bool {
1233        match *self {
1234            Scalar::Initialized { valid_range, .. } => valid_range.is_full_for(self.size(cx)),
1235            Scalar::Union { .. } => true,
1236        }
1237    }
1238
1239    /// Returns `true` if this type can be left uninit.
1240    #[inline]
1241    pub fn is_uninit_valid(&self) -> bool {
1242        match *self {
1243            Scalar::Initialized { .. } => false,
1244            Scalar::Union { .. } => true,
1245        }
1246    }
1247
1248    /// Returns `true` if this is a signed integer scalar
1249    #[inline]
1250    pub fn is_signed(&self) -> bool {
1251        match self.primitive() {
1252            Primitive::Int(_, signed) => signed,
1253            _ => false,
1254        }
1255    }
1256}
1257
1258// NOTE: This struct is generic over the FieldIdx for rust-analyzer usage.
1259/// Describes how the fields of a type are located in memory.
1260#[derive(PartialEq, Eq, Hash, Clone, Debug)]
1261#[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
1262pub enum FieldsShape<FieldIdx: Idx> {
1263    /// Scalar primitives and `!`, which never have fields.
1264    Primitive,
1265
1266    /// All fields start at no offset. The `usize` is the field count.
1267    Union(NonZeroUsize),
1268
1269    /// Array/vector-like placement, with all fields of identical types.
1270    Array { stride: Size, count: u64 },
1271
1272    /// Struct-like placement, with precomputed offsets.
1273    ///
1274    /// Fields are guaranteed to not overlap, but note that gaps
1275    /// before, between and after all the fields are NOT always
1276    /// padding, and as such their contents may not be discarded.
1277    /// For example, enum variants leave a gap at the start,
1278    /// where the discriminant field in the enum layout goes.
1279    Arbitrary {
1280        /// Offsets for the first byte of each field,
1281        /// ordered to match the source definition order.
1282        /// This vector does not go in increasing order.
1283        // FIXME(eddyb) use small vector optimization for the common case.
1284        offsets: IndexVec<FieldIdx, Size>,
1285
1286        /// Maps source order field indices to memory order indices,
1287        /// depending on how the fields were reordered (if at all).
1288        /// This is a permutation, with both the source order and the
1289        /// memory order using the same (0..n) index ranges.
1290        ///
1291        /// Note that during computation of `memory_index`, sometimes
1292        /// it is easier to operate on the inverse mapping (that is,
1293        /// from memory order to source order), and that is usually
1294        /// named `inverse_memory_index`.
1295        ///
1296        // FIXME(eddyb) build a better abstraction for permutations, if possible.
1297        // FIXME(camlorn) also consider small vector optimization here.
1298        memory_index: IndexVec<FieldIdx, u32>,
1299    },
1300}
1301
1302impl<FieldIdx: Idx> FieldsShape<FieldIdx> {
1303    #[inline]
1304    pub fn count(&self) -> usize {
1305        match *self {
1306            FieldsShape::Primitive => 0,
1307            FieldsShape::Union(count) => count.get(),
1308            FieldsShape::Array { count, .. } => count.try_into().unwrap(),
1309            FieldsShape::Arbitrary { ref offsets, .. } => offsets.len(),
1310        }
1311    }
1312
1313    #[inline]
1314    pub fn offset(&self, i: usize) -> Size {
1315        match *self {
1316            FieldsShape::Primitive => {
1317                unreachable!("FieldsShape::offset: `Primitive`s have no fields")
1318            }
1319            FieldsShape::Union(count) => {
1320                assert!(i < count.get(), "tried to access field {i} of union with {count} fields");
1321                Size::ZERO
1322            }
1323            FieldsShape::Array { stride, count } => {
1324                let i = u64::try_from(i).unwrap();
1325                assert!(i < count, "tried to access field {i} of array with {count} fields");
1326                stride * i
1327            }
1328            FieldsShape::Arbitrary { ref offsets, .. } => offsets[FieldIdx::new(i)],
1329        }
1330    }
1331
1332    #[inline]
1333    pub fn memory_index(&self, i: usize) -> usize {
1334        match *self {
1335            FieldsShape::Primitive => {
1336                unreachable!("FieldsShape::memory_index: `Primitive`s have no fields")
1337            }
1338            FieldsShape::Union(_) | FieldsShape::Array { .. } => i,
1339            FieldsShape::Arbitrary { ref memory_index, .. } => {
1340                memory_index[FieldIdx::new(i)].try_into().unwrap()
1341            }
1342        }
1343    }
1344
1345    /// Gets source indices of the fields by increasing offsets.
1346    #[inline]
1347    pub fn index_by_increasing_offset(&self) -> impl ExactSizeIterator<Item = usize> + '_ {
1348        let mut inverse_small = [0u8; 64];
1349        let mut inverse_big = IndexVec::new();
1350        let use_small = self.count() <= inverse_small.len();
1351
1352        // We have to write this logic twice in order to keep the array small.
1353        if let FieldsShape::Arbitrary { ref memory_index, .. } = *self {
1354            if use_small {
1355                for (field_idx, &mem_idx) in memory_index.iter_enumerated() {
1356                    inverse_small[mem_idx as usize] = field_idx.index() as u8;
1357                }
1358            } else {
1359                inverse_big = memory_index.invert_bijective_mapping();
1360            }
1361        }
1362
1363        // Primitives don't really have fields in the way that structs do,
1364        // but having this return an empty iterator for them is unhelpful
1365        // since that makes them look kinda like ZSTs, which they're not.
1366        let pseudofield_count = if let FieldsShape::Primitive = self { 1 } else { self.count() };
1367
1368        (0..pseudofield_count).map(move |i| match *self {
1369            FieldsShape::Primitive | FieldsShape::Union(_) | FieldsShape::Array { .. } => i,
1370            FieldsShape::Arbitrary { .. } => {
1371                if use_small {
1372                    inverse_small[i] as usize
1373                } else {
1374                    inverse_big[i as u32].index()
1375                }
1376            }
1377        })
1378    }
1379}
1380
1381/// An identifier that specifies the address space that some operation
1382/// should operate on. Special address spaces have an effect on code generation,
1383/// depending on the target and the address spaces it implements.
1384#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
1385#[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
1386pub struct AddressSpace(pub u32);
1387
1388impl AddressSpace {
1389    /// The default address space, corresponding to data space.
1390    pub const DATA: Self = AddressSpace(0);
1391}
1392
1393/// The way we represent values to the backend
1394///
1395/// Previously this was conflated with the "ABI" a type is given, as in the platform-specific ABI.
1396/// In reality, this implies little about that, but is mostly used to describe the syntactic form
1397/// emitted for the backend, as most backends handle SSA values and blobs of memory differently.
1398/// The psABI may need consideration in doing so, but this enum does not constitute a promise for
1399/// how the value will be lowered to the calling convention, in itself.
1400///
1401/// Generally, a codegen backend will prefer to handle smaller values as a scalar or short vector,
1402/// and larger values will usually prefer to be represented as memory.
1403#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)]
1404#[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
1405pub enum BackendRepr {
1406    Uninhabited,
1407    Scalar(Scalar),
1408    ScalarPair(Scalar, Scalar),
1409    Vector {
1410        element: Scalar,
1411        count: u64,
1412    },
1413    // FIXME: I sometimes use memory, sometimes use an IR aggregate!
1414    Memory {
1415        /// If true, the size is exact, otherwise it's only a lower bound.
1416        sized: bool,
1417    },
1418}
1419
1420impl BackendRepr {
1421    /// Returns `true` if the layout corresponds to an unsized type.
1422    #[inline]
1423    pub fn is_unsized(&self) -> bool {
1424        match *self {
1425            BackendRepr::Uninhabited
1426            | BackendRepr::Scalar(_)
1427            | BackendRepr::ScalarPair(..)
1428            | BackendRepr::Vector { .. } => false,
1429            BackendRepr::Memory { sized } => !sized,
1430        }
1431    }
1432
1433    #[inline]
1434    pub fn is_sized(&self) -> bool {
1435        !self.is_unsized()
1436    }
1437
1438    /// Returns `true` if this is a single signed integer scalar
1439    #[inline]
1440    pub fn is_signed(&self) -> bool {
1441        match self {
1442            BackendRepr::Scalar(scal) => scal.is_signed(),
1443            _ => panic!("`is_signed` on non-scalar ABI {self:?}"),
1444        }
1445    }
1446
1447    /// Returns `true` if this is an uninhabited type
1448    #[inline]
1449    pub fn is_uninhabited(&self) -> bool {
1450        matches!(*self, BackendRepr::Uninhabited)
1451    }
1452
1453    /// Returns `true` if this is a scalar type
1454    #[inline]
1455    pub fn is_scalar(&self) -> bool {
1456        matches!(*self, BackendRepr::Scalar(_))
1457    }
1458
1459    /// Returns `true` if this is a bool
1460    #[inline]
1461    pub fn is_bool(&self) -> bool {
1462        matches!(*self, BackendRepr::Scalar(s) if s.is_bool())
1463    }
1464
1465    /// Returns the fixed alignment of this ABI, if any is mandated.
1466    pub fn inherent_align<C: HasDataLayout>(&self, cx: &C) -> Option<AbiAndPrefAlign> {
1467        Some(match *self {
1468            BackendRepr::Scalar(s) => s.align(cx),
1469            BackendRepr::ScalarPair(s1, s2) => s1.align(cx).max(s2.align(cx)),
1470            BackendRepr::Vector { element, count } => {
1471                cx.data_layout().vector_align(element.size(cx) * count)
1472            }
1473            BackendRepr::Uninhabited | BackendRepr::Memory { .. } => return None,
1474        })
1475    }
1476
1477    /// Returns the fixed size of this ABI, if any is mandated.
1478    pub fn inherent_size<C: HasDataLayout>(&self, cx: &C) -> Option<Size> {
1479        Some(match *self {
1480            BackendRepr::Scalar(s) => {
1481                // No padding in scalars.
1482                s.size(cx)
1483            }
1484            BackendRepr::ScalarPair(s1, s2) => {
1485                // May have some padding between the pair.
1486                let field2_offset = s1.size(cx).align_to(s2.align(cx).abi);
1487                (field2_offset + s2.size(cx)).align_to(self.inherent_align(cx)?.abi)
1488            }
1489            BackendRepr::Vector { element, count } => {
1490                // No padding in vectors, except possibly for trailing padding
1491                // to make the size a multiple of align (e.g. for vectors of size 3).
1492                (element.size(cx) * count).align_to(self.inherent_align(cx)?.abi)
1493            }
1494            BackendRepr::Uninhabited | BackendRepr::Memory { .. } => return None,
1495        })
1496    }
1497
1498    /// Discard validity range information and allow undef.
1499    pub fn to_union(&self) -> Self {
1500        match *self {
1501            BackendRepr::Scalar(s) => BackendRepr::Scalar(s.to_union()),
1502            BackendRepr::ScalarPair(s1, s2) => {
1503                BackendRepr::ScalarPair(s1.to_union(), s2.to_union())
1504            }
1505            BackendRepr::Vector { element, count } => {
1506                BackendRepr::Vector { element: element.to_union(), count }
1507            }
1508            BackendRepr::Uninhabited | BackendRepr::Memory { .. } => {
1509                BackendRepr::Memory { sized: true }
1510            }
1511        }
1512    }
1513
1514    pub fn eq_up_to_validity(&self, other: &Self) -> bool {
1515        match (self, other) {
1516            // Scalar, Vector, ScalarPair have `Scalar` in them where we ignore validity ranges.
1517            // We do *not* ignore the sign since it matters for some ABIs (e.g. s390x).
1518            (BackendRepr::Scalar(l), BackendRepr::Scalar(r)) => l.primitive() == r.primitive(),
1519            (
1520                BackendRepr::Vector { element: element_l, count: count_l },
1521                BackendRepr::Vector { element: element_r, count: count_r },
1522            ) => element_l.primitive() == element_r.primitive() && count_l == count_r,
1523            (BackendRepr::ScalarPair(l1, l2), BackendRepr::ScalarPair(r1, r2)) => {
1524                l1.primitive() == r1.primitive() && l2.primitive() == r2.primitive()
1525            }
1526            // Everything else must be strictly identical.
1527            _ => self == other,
1528        }
1529    }
1530}
1531
1532// NOTE: This struct is generic over the FieldIdx and VariantIdx for rust-analyzer usage.
1533#[derive(PartialEq, Eq, Hash, Clone, Debug)]
1534#[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
1535pub enum Variants<FieldIdx: Idx, VariantIdx: Idx> {
1536    /// A type with no valid variants. Must be uninhabited.
1537    Empty,
1538
1539    /// Single enum variants, structs/tuples, unions, and all non-ADTs.
1540    Single {
1541        /// Always `0` for types that cannot have multiple variants.
1542        index: VariantIdx,
1543    },
1544
1545    /// Enum-likes with more than one variant: each variant comes with
1546    /// a *discriminant* (usually the same as the variant index but the user can
1547    /// assign explicit discriminant values). That discriminant is encoded
1548    /// as a *tag* on the machine. The layout of each variant is
1549    /// a struct, and they all have space reserved for the tag.
1550    /// For enums, the tag is the sole field of the layout.
1551    Multiple {
1552        tag: Scalar,
1553        tag_encoding: TagEncoding<VariantIdx>,
1554        tag_field: usize,
1555        variants: IndexVec<VariantIdx, LayoutData<FieldIdx, VariantIdx>>,
1556    },
1557}
1558
1559// NOTE: This struct is generic over the VariantIdx for rust-analyzer usage.
1560#[derive(PartialEq, Eq, Hash, Clone, Debug)]
1561#[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
1562pub enum TagEncoding<VariantIdx: Idx> {
1563    /// The tag directly stores the discriminant, but possibly with a smaller layout
1564    /// (so converting the tag to the discriminant can require sign extension).
1565    Direct,
1566
1567    /// Niche (values invalid for a type) encoding the discriminant:
1568    /// Discriminant and variant index coincide.
1569    /// The variant `untagged_variant` contains a niche at an arbitrary
1570    /// offset (field `tag_field` of the enum), which for a variant with
1571    /// discriminant `d` is set to
1572    /// `(d - niche_variants.start).wrapping_add(niche_start)`
1573    /// (this is wrapping arithmetic using the type of the niche field).
1574    ///
1575    /// For example, `Option<(usize, &T)>`  is represented such that
1576    /// `None` has a null pointer for the second tuple field, and
1577    /// `Some` is the identity function (with a non-null reference).
1578    ///
1579    /// Other variants that are not `untagged_variant` and that are outside the `niche_variants`
1580    /// range cannot be represented; they must be uninhabited.
1581    Niche {
1582        untagged_variant: VariantIdx,
1583        /// This range *may* contain `untagged_variant`; that is then just a "dead value" and
1584        /// not used to encode anything.
1585        niche_variants: RangeInclusive<VariantIdx>,
1586        /// This is inbounds of the type of the niche field
1587        /// (not sign-extended, i.e., all bits beyond the niche field size are 0).
1588        niche_start: u128,
1589    },
1590}
1591
1592#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)]
1593#[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
1594pub struct Niche {
1595    pub offset: Size,
1596    pub value: Primitive,
1597    pub valid_range: WrappingRange,
1598}
1599
1600impl Niche {
1601    pub fn from_scalar<C: HasDataLayout>(cx: &C, offset: Size, scalar: Scalar) -> Option<Self> {
1602        let Scalar::Initialized { value, valid_range } = scalar else { return None };
1603        let niche = Niche { offset, value, valid_range };
1604        if niche.available(cx) > 0 { Some(niche) } else { None }
1605    }
1606
1607    pub fn available<C: HasDataLayout>(&self, cx: &C) -> u128 {
1608        let Self { value, valid_range: v, .. } = *self;
1609        let size = value.size(cx);
1610        assert!(size.bits() <= 128);
1611        let max_value = size.unsigned_int_max();
1612
1613        // Find out how many values are outside the valid range.
1614        let niche = v.end.wrapping_add(1)..v.start;
1615        niche.end.wrapping_sub(niche.start) & max_value
1616    }
1617
1618    pub fn reserve<C: HasDataLayout>(&self, cx: &C, count: u128) -> Option<(u128, Scalar)> {
1619        assert!(count > 0);
1620
1621        let Self { value, valid_range: v, .. } = *self;
1622        let size = value.size(cx);
1623        assert!(size.bits() <= 128);
1624        let max_value = size.unsigned_int_max();
1625
1626        let niche = v.end.wrapping_add(1)..v.start;
1627        let available = niche.end.wrapping_sub(niche.start) & max_value;
1628        if count > available {
1629            return None;
1630        }
1631
1632        // Extend the range of valid values being reserved by moving either `v.start` or `v.end`
1633        // bound. Given an eventual `Option<T>`, we try to maximize the chance for `None` to occupy
1634        // the niche of zero. This is accomplished by preferring enums with 2 variants(`count==1`)
1635        // and always taking the shortest path to niche zero. Having `None` in niche zero can
1636        // enable some special optimizations.
1637        //
1638        // Bound selection criteria:
1639        // 1. Select closest to zero given wrapping semantics.
1640        // 2. Avoid moving past zero if possible.
1641        //
1642        // In practice this means that enums with `count > 1` are unlikely to claim niche zero,
1643        // since they have to fit perfectly. If niche zero is already reserved, the selection of
1644        // bounds are of little interest.
1645        let move_start = |v: WrappingRange| {
1646            let start = v.start.wrapping_sub(count) & max_value;
1647            Some((start, Scalar::Initialized { value, valid_range: v.with_start(start) }))
1648        };
1649        let move_end = |v: WrappingRange| {
1650            let start = v.end.wrapping_add(1) & max_value;
1651            let end = v.end.wrapping_add(count) & max_value;
1652            Some((start, Scalar::Initialized { value, valid_range: v.with_end(end) }))
1653        };
1654        let distance_end_zero = max_value - v.end;
1655        if v.start > v.end {
1656            // zero is unavailable because wrapping occurs
1657            move_end(v)
1658        } else if v.start <= distance_end_zero {
1659            if count <= v.start {
1660                move_start(v)
1661            } else {
1662                // moved past zero, use other bound
1663                move_end(v)
1664            }
1665        } else {
1666            let end = v.end.wrapping_add(count) & max_value;
1667            let overshot_zero = (1..=v.end).contains(&end);
1668            if overshot_zero {
1669                // moved past zero, use other bound
1670                move_start(v)
1671            } else {
1672                move_end(v)
1673            }
1674        }
1675    }
1676}
1677
1678// NOTE: This struct is generic over the FieldIdx and VariantIdx for rust-analyzer usage.
1679#[derive(PartialEq, Eq, Hash, Clone)]
1680#[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
1681pub struct LayoutData<FieldIdx: Idx, VariantIdx: Idx> {
1682    /// Says where the fields are located within the layout.
1683    pub fields: FieldsShape<FieldIdx>,
1684
1685    /// Encodes information about multi-variant layouts.
1686    /// Even with `Multiple` variants, a layout still has its own fields! Those are then
1687    /// shared between all variants. One of them will be the discriminant,
1688    /// but e.g. coroutines can have more.
1689    ///
1690    /// To access all fields of this layout, both `fields` and the fields of the active variant
1691    /// must be taken into account.
1692    pub variants: Variants<FieldIdx, VariantIdx>,
1693
1694    /// The `backend_repr` defines how this data will be represented to the codegen backend,
1695    /// and encodes value restrictions via `valid_range`.
1696    ///
1697    /// Note that this is entirely orthogonal to the recursive structure defined by
1698    /// `variants` and `fields`; for example, `ManuallyDrop<Result<isize, isize>>` has
1699    /// `IrForm::ScalarPair`! So, even with non-`Memory` `backend_repr`, `fields` and `variants`
1700    /// have to be taken into account to find all fields of this layout.
1701    pub backend_repr: BackendRepr,
1702
1703    /// The leaf scalar with the largest number of invalid values
1704    /// (i.e. outside of its `valid_range`), if it exists.
1705    pub largest_niche: Option<Niche>,
1706
1707    pub align: AbiAndPrefAlign,
1708    pub size: Size,
1709
1710    /// The largest alignment explicitly requested with `repr(align)` on this type or any field.
1711    /// Only used on i686-windows, where the argument passing ABI is different when alignment is
1712    /// requested, even if the requested alignment is equal to the natural alignment.
1713    pub max_repr_align: Option<Align>,
1714
1715    /// The alignment the type would have, ignoring any `repr(align)` but including `repr(packed)`.
1716    /// Only used on aarch64-linux, where the argument passing ABI ignores the requested alignment
1717    /// in some cases.
1718    pub unadjusted_abi_align: Align,
1719
1720    /// The randomization seed based on this type's own repr and its fields.
1721    ///
1722    /// Since randomization is toggled on a per-crate basis even crates that do not have randomization
1723    /// enabled should still calculate a seed so that downstream uses can use it to distinguish different
1724    /// types.
1725    ///
1726    /// For every T and U for which we do not guarantee that a repr(Rust) `Foo<T>` can be coerced or
1727    /// transmuted to `Foo<U>` we aim to create probalistically distinct seeds so that Foo can choose
1728    /// to reorder its fields based on that information. The current implementation is a conservative
1729    /// approximation of this goal.
1730    pub randomization_seed: u64,
1731}
1732
1733impl<FieldIdx: Idx, VariantIdx: Idx> LayoutData<FieldIdx, VariantIdx> {
1734    /// Returns `true` if this is an aggregate type (including a ScalarPair!)
1735    pub fn is_aggregate(&self) -> bool {
1736        match self.backend_repr {
1737            BackendRepr::Uninhabited | BackendRepr::Scalar(_) | BackendRepr::Vector { .. } => false,
1738            BackendRepr::ScalarPair(..) | BackendRepr::Memory { .. } => true,
1739        }
1740    }
1741
1742    /// Returns `true` if this is an uninhabited type
1743    pub fn is_uninhabited(&self) -> bool {
1744        self.backend_repr.is_uninhabited()
1745    }
1746
1747    pub fn scalar<C: HasDataLayout>(cx: &C, scalar: Scalar) -> Self {
1748        let largest_niche = Niche::from_scalar(cx, Size::ZERO, scalar);
1749        let size = scalar.size(cx);
1750        let align = scalar.align(cx);
1751
1752        let range = scalar.valid_range(cx);
1753
1754        // All primitive types for which we don't have subtype coercions should get a distinct seed,
1755        // so that types wrapping them can use randomization to arrive at distinct layouts.
1756        //
1757        // Some type information is already lost at this point, so as an approximation we derive
1758        // the seed from what remains. For example on 64-bit targets usize and u64 can no longer
1759        // be distinguished.
1760        let randomization_seed = size
1761            .bytes()
1762            .wrapping_add(
1763                match scalar.primitive() {
1764                    Primitive::Int(_, true) => 1,
1765                    Primitive::Int(_, false) => 2,
1766                    Primitive::Float(_) => 3,
1767                    Primitive::Pointer(_) => 4,
1768                } << 32,
1769            )
1770            // distinguishes references from pointers
1771            .wrapping_add((range.start as u64).rotate_right(16))
1772            // distinguishes char from u32 and bool from u8
1773            .wrapping_add((range.end as u64).rotate_right(16));
1774
1775        LayoutData {
1776            variants: Variants::Single { index: VariantIdx::new(0) },
1777            fields: FieldsShape::Primitive,
1778            backend_repr: BackendRepr::Scalar(scalar),
1779            largest_niche,
1780            size,
1781            align,
1782            max_repr_align: None,
1783            unadjusted_abi_align: align.abi,
1784            randomization_seed,
1785        }
1786    }
1787}
1788
1789impl<FieldIdx: Idx, VariantIdx: Idx> fmt::Debug for LayoutData<FieldIdx, VariantIdx>
1790where
1791    FieldsShape<FieldIdx>: fmt::Debug,
1792    Variants<FieldIdx, VariantIdx>: fmt::Debug,
1793{
1794    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
1795        // This is how `Layout` used to print before it become
1796        // `Interned<LayoutS>`. We print it like this to avoid having to update
1797        // expected output in a lot of tests.
1798        let LayoutData {
1799            size,
1800            align,
1801            backend_repr,
1802            fields,
1803            largest_niche,
1804            variants,
1805            max_repr_align,
1806            unadjusted_abi_align,
1807            ref randomization_seed,
1808        } = self;
1809        f.debug_struct("Layout")
1810            .field("size", size)
1811            .field("align", align)
1812            .field("abi", backend_repr)
1813            .field("fields", fields)
1814            .field("largest_niche", largest_niche)
1815            .field("variants", variants)
1816            .field("max_repr_align", max_repr_align)
1817            .field("unadjusted_abi_align", unadjusted_abi_align)
1818            .field("randomization_seed", randomization_seed)
1819            .finish()
1820    }
1821}
1822
1823#[derive(Copy, Clone, PartialEq, Eq, Debug)]
1824pub enum PointerKind {
1825    /// Shared reference. `frozen` indicates the absence of any `UnsafeCell`.
1826    SharedRef { frozen: bool },
1827    /// Mutable reference. `unpin` indicates the absence of any pinned data.
1828    MutableRef { unpin: bool },
1829    /// Box. `unpin` indicates the absence of any pinned data. `global` indicates whether this box
1830    /// uses the global allocator or a custom one.
1831    Box { unpin: bool, global: bool },
1832}
1833
1834/// Encodes extra information we have about a pointer.
1835/// Note that this information is advisory only, and backends are free to ignore it:
1836/// if the information is wrong, that can cause UB, but if the information is absent,
1837/// that must always be okay.
1838#[derive(Copy, Clone, Debug)]
1839pub struct PointeeInfo {
1840    /// If this is `None`, then this is a raw pointer, so size and alignment are not guaranteed to
1841    /// be reliable.
1842    pub safe: Option<PointerKind>,
1843    /// If `safe` is `Some`, then the pointer is either null or dereferenceable for this many bytes.
1844    /// On a function argument, "dereferenceable" here means "dereferenceable for the entire duration
1845    /// of this function call", i.e. it is UB for the memory that this pointer points to to be freed
1846    /// while this function is still running.
1847    /// The size can be zero if the pointer is not dereferenceable.
1848    pub size: Size,
1849    /// If `safe` is `Some`, then the pointer is aligned as indicated.
1850    pub align: Align,
1851}
1852
1853impl<FieldIdx: Idx, VariantIdx: Idx> LayoutData<FieldIdx, VariantIdx> {
1854    /// Returns `true` if the layout corresponds to an unsized type.
1855    #[inline]
1856    pub fn is_unsized(&self) -> bool {
1857        self.backend_repr.is_unsized()
1858    }
1859
1860    #[inline]
1861    pub fn is_sized(&self) -> bool {
1862        self.backend_repr.is_sized()
1863    }
1864
1865    /// Returns `true` if the type is sized and a 1-ZST (meaning it has size 0 and alignment 1).
1866    pub fn is_1zst(&self) -> bool {
1867        self.is_sized() && self.size.bytes() == 0 && self.align.abi.bytes() == 1
1868    }
1869
1870    /// Returns `true` if the type is a ZST and not unsized.
1871    ///
1872    /// Note that this does *not* imply that the type is irrelevant for layout! It can still have
1873    /// non-trivial alignment constraints. You probably want to use `is_1zst` instead.
1874    pub fn is_zst(&self) -> bool {
1875        match self.backend_repr {
1876            BackendRepr::Scalar(_) | BackendRepr::ScalarPair(..) | BackendRepr::Vector { .. } => {
1877                false
1878            }
1879            BackendRepr::Uninhabited => self.size.bytes() == 0,
1880            BackendRepr::Memory { sized } => sized && self.size.bytes() == 0,
1881        }
1882    }
1883
1884    /// Checks if these two `Layout` are equal enough to be considered "the same for all function
1885    /// call ABIs". Note however that real ABIs depend on more details that are not reflected in the
1886    /// `Layout`; the `PassMode` need to be compared as well. Also note that we assume
1887    /// aggregates are passed via `PassMode::Indirect` or `PassMode::Cast`; more strict
1888    /// checks would otherwise be required.
1889    pub fn eq_abi(&self, other: &Self) -> bool {
1890        // The one thing that we are not capturing here is that for unsized types, the metadata must
1891        // also have the same ABI, and moreover that the same metadata leads to the same size. The
1892        // 2nd point is quite hard to check though.
1893        self.size == other.size
1894            && self.is_sized() == other.is_sized()
1895            && self.backend_repr.eq_up_to_validity(&other.backend_repr)
1896            && self.backend_repr.is_bool() == other.backend_repr.is_bool()
1897            && self.align.abi == other.align.abi
1898            && self.max_repr_align == other.max_repr_align
1899            && self.unadjusted_abi_align == other.unadjusted_abi_align
1900    }
1901}
1902
1903#[derive(Copy, Clone, Debug)]
1904pub enum StructKind {
1905    /// A tuple, closure, or univariant which cannot be coerced to unsized.
1906    AlwaysSized,
1907    /// A univariant, the last field of which may be coerced to unsized.
1908    MaybeUnsized,
1909    /// A univariant, but with a prefix of an arbitrary size & alignment (e.g., enum tag).
1910    Prefixed(Size, Align),
1911}