rustc_abi/
lib.rs

1// tidy-alphabetical-start
2#![cfg_attr(feature = "nightly", allow(internal_features))]
3#![cfg_attr(feature = "nightly", doc(rust_logo))]
4#![cfg_attr(feature = "nightly", feature(assert_matches))]
5#![cfg_attr(feature = "nightly", feature(rustc_attrs))]
6#![cfg_attr(feature = "nightly", feature(rustdoc_internals))]
7#![cfg_attr(feature = "nightly", feature(step_trait))]
8// tidy-alphabetical-end
9
10/*! ABI handling for rustc
11
12## What is an "ABI"?
13
14Literally, "application binary interface", which means it is everything about how code interacts,
15at the machine level, with other code. This means it technically covers all of the following:
16- object binary format for e.g. relocations or offset tables
17- in-memory layout of types
18- procedure calling conventions
19
20When we discuss "ABI" in the context of rustc, we are probably discussing calling conventions.
21To describe those `rustc_abi` also covers type layout, as it must for values passed on the stack.
22Despite `rustc_abi` being about calling conventions, it is good to remember these usages exist.
23You will encounter all of them and more if you study target-specific codegen enough!
24Even in general conversation, when someone says "the Rust ABI is unstable", it may allude to
25either or both of
26- `repr(Rust)` types have a mostly-unspecified layout
27- `extern "Rust" fn(A) -> R` has an unspecified calling convention
28
29## Crate Goal
30
31ABI is a foundational concept, so the `rustc_abi` crate serves as an equally foundational crate.
32It cannot carry all details relevant to an ABI: those permeate code generation and linkage.
33Instead, `rustc_abi` is intended to provide the interface for reasoning about the binary interface.
34It should contain traits and types that other crates then use in their implementation.
35For example, a platform's `extern "C" fn` calling convention will be implemented in `rustc_target`
36but `rustc_abi` contains the types for calculating layout and describing register-passing.
37This makes it easier to describe things in the same way across targets, codegen backends, and
38even other Rust compilers, such as rust-analyzer!
39
40*/
41
42use std::fmt;
43#[cfg(feature = "nightly")]
44use std::iter::Step;
45use std::num::{NonZeroUsize, ParseIntError};
46use std::ops::{Add, AddAssign, Deref, Mul, RangeFull, RangeInclusive, Sub};
47use std::str::FromStr;
48
49use bitflags::bitflags;
50#[cfg(feature = "nightly")]
51use rustc_data_structures::stable_hasher::StableOrd;
52use rustc_hashes::Hash64;
53use rustc_index::{Idx, IndexSlice, IndexVec};
54#[cfg(feature = "nightly")]
55use rustc_macros::{Decodable_NoContext, Encodable_NoContext, HashStable_Generic};
56
57mod callconv;
58mod canon_abi;
59mod extern_abi;
60mod layout;
61#[cfg(test)]
62mod tests;
63
64pub use callconv::{Heterogeneous, HomogeneousAggregate, Reg, RegKind};
65pub use canon_abi::{ArmCall, CanonAbi, InterruptKind, X86Call};
66pub use extern_abi::{ExternAbi, all_names};
67#[cfg(feature = "nightly")]
68pub use layout::{FIRST_VARIANT, FieldIdx, Layout, TyAbiInterface, TyAndLayout, VariantIdx};
69pub use layout::{LayoutCalculator, LayoutCalculatorError};
70
71/// Requirements for a `StableHashingContext` to be used in this crate.
72/// This is a hack to allow using the `HashStable_Generic` derive macro
73/// instead of implementing everything in `rustc_middle`.
74#[cfg(feature = "nightly")]
75pub trait HashStableContext {}
76
77#[derive(Clone, Copy, PartialEq, Eq, Default)]
78#[cfg_attr(
79    feature = "nightly",
80    derive(Encodable_NoContext, Decodable_NoContext, HashStable_Generic)
81)]
82pub struct ReprFlags(u8);
83
84bitflags! {
85    impl ReprFlags: u8 {
86        const IS_C               = 1 << 0;
87        const IS_SIMD            = 1 << 1;
88        const IS_TRANSPARENT     = 1 << 2;
89        // Internal only for now. If true, don't reorder fields.
90        // On its own it does not prevent ABI optimizations.
91        const IS_LINEAR          = 1 << 3;
92        // If true, the type's crate has opted into layout randomization.
93        // Other flags can still inhibit reordering and thus randomization.
94        // The seed stored in `ReprOptions.field_shuffle_seed`.
95        const RANDOMIZE_LAYOUT   = 1 << 4;
96        // Any of these flags being set prevent field reordering optimisation.
97        const FIELD_ORDER_UNOPTIMIZABLE   = ReprFlags::IS_C.bits()
98                                 | ReprFlags::IS_SIMD.bits()
99                                 | ReprFlags::IS_LINEAR.bits();
100        const ABI_UNOPTIMIZABLE = ReprFlags::IS_C.bits() | ReprFlags::IS_SIMD.bits();
101    }
102}
103
104// This is the same as `rustc_data_structures::external_bitflags_debug` but without the
105// `rustc_data_structures` to make it build on stable.
106impl std::fmt::Debug for ReprFlags {
107    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
108        bitflags::parser::to_writer(self, f)
109    }
110}
111
112#[derive(Copy, Clone, Debug, Eq, PartialEq)]
113#[cfg_attr(
114    feature = "nightly",
115    derive(Encodable_NoContext, Decodable_NoContext, HashStable_Generic)
116)]
117pub enum IntegerType {
118    /// Pointer-sized integer type, i.e. `isize` and `usize`. The field shows signedness, e.g.
119    /// `Pointer(true)` means `isize`.
120    Pointer(bool),
121    /// Fixed-sized integer type, e.g. `i8`, `u32`, `i128`. The bool field shows signedness, e.g.
122    /// `Fixed(I8, false)` means `u8`.
123    Fixed(Integer, bool),
124}
125
126impl IntegerType {
127    pub fn is_signed(&self) -> bool {
128        match self {
129            IntegerType::Pointer(b) => *b,
130            IntegerType::Fixed(_, b) => *b,
131        }
132    }
133}
134
135/// Represents the repr options provided by the user.
136#[derive(Copy, Clone, Debug, Eq, PartialEq, Default)]
137#[cfg_attr(
138    feature = "nightly",
139    derive(Encodable_NoContext, Decodable_NoContext, HashStable_Generic)
140)]
141pub struct ReprOptions {
142    pub int: Option<IntegerType>,
143    pub align: Option<Align>,
144    pub pack: Option<Align>,
145    pub flags: ReprFlags,
146    /// The seed to be used for randomizing a type's layout
147    ///
148    /// Note: This could technically be a `u128` which would
149    /// be the "most accurate" hash as it'd encompass the item and crate
150    /// hash without loss, but it does pay the price of being larger.
151    /// Everything's a tradeoff, a 64-bit seed should be sufficient for our
152    /// purposes (primarily `-Z randomize-layout`)
153    pub field_shuffle_seed: Hash64,
154}
155
156impl ReprOptions {
157    #[inline]
158    pub fn simd(&self) -> bool {
159        self.flags.contains(ReprFlags::IS_SIMD)
160    }
161
162    #[inline]
163    pub fn c(&self) -> bool {
164        self.flags.contains(ReprFlags::IS_C)
165    }
166
167    #[inline]
168    pub fn packed(&self) -> bool {
169        self.pack.is_some()
170    }
171
172    #[inline]
173    pub fn transparent(&self) -> bool {
174        self.flags.contains(ReprFlags::IS_TRANSPARENT)
175    }
176
177    #[inline]
178    pub fn linear(&self) -> bool {
179        self.flags.contains(ReprFlags::IS_LINEAR)
180    }
181
182    /// Returns the discriminant type, given these `repr` options.
183    /// This must only be called on enums!
184    pub fn discr_type(&self) -> IntegerType {
185        self.int.unwrap_or(IntegerType::Pointer(true))
186    }
187
188    /// Returns `true` if this `#[repr()]` should inhabit "smart enum
189    /// layout" optimizations, such as representing `Foo<&T>` as a
190    /// single pointer.
191    pub fn inhibit_enum_layout_opt(&self) -> bool {
192        self.c() || self.int.is_some()
193    }
194
195    pub fn inhibit_newtype_abi_optimization(&self) -> bool {
196        self.flags.intersects(ReprFlags::ABI_UNOPTIMIZABLE)
197    }
198
199    /// Returns `true` if this `#[repr()]` guarantees a fixed field order,
200    /// e.g. `repr(C)` or `repr(<int>)`.
201    pub fn inhibit_struct_field_reordering(&self) -> bool {
202        self.flags.intersects(ReprFlags::FIELD_ORDER_UNOPTIMIZABLE) || self.int.is_some()
203    }
204
205    /// Returns `true` if this type is valid for reordering and `-Z randomize-layout`
206    /// was enabled for its declaration crate.
207    pub fn can_randomize_type_layout(&self) -> bool {
208        !self.inhibit_struct_field_reordering() && self.flags.contains(ReprFlags::RANDOMIZE_LAYOUT)
209    }
210
211    /// Returns `true` if this `#[repr()]` should inhibit union ABI optimisations.
212    pub fn inhibits_union_abi_opt(&self) -> bool {
213        self.c()
214    }
215}
216
217/// The maximum supported number of lanes in a SIMD vector.
218///
219/// This value is selected based on backend support:
220/// * LLVM does not appear to have a vector width limit.
221/// * Cranelift stores the base-2 log of the lane count in a 4 bit integer.
222pub const MAX_SIMD_LANES: u64 = 1 << 0xF;
223
224/// How pointers are represented in a given address space
225#[derive(Copy, Clone, Debug, PartialEq, Eq)]
226pub struct PointerSpec {
227    /// The size of the bitwise representation of the pointer.
228    pointer_size: Size,
229    /// The alignment of pointers for this address space
230    pointer_align: AbiAlign,
231    /// The size of the value a pointer can be offset by in this address space.
232    pointer_offset: Size,
233    /// Pointers into this address space contain extra metadata
234    /// FIXME(workingjubilee): Consider adequately reflecting this in the compiler?
235    _is_fat: bool,
236}
237
238/// Parsed [Data layout](https://llvm.org/docs/LangRef.html#data-layout)
239/// for a target, which contains everything needed to compute layouts.
240#[derive(Debug, PartialEq, Eq)]
241pub struct TargetDataLayout {
242    pub endian: Endian,
243    pub i1_align: AbiAlign,
244    pub i8_align: AbiAlign,
245    pub i16_align: AbiAlign,
246    pub i32_align: AbiAlign,
247    pub i64_align: AbiAlign,
248    pub i128_align: AbiAlign,
249    pub f16_align: AbiAlign,
250    pub f32_align: AbiAlign,
251    pub f64_align: AbiAlign,
252    pub f128_align: AbiAlign,
253    pub aggregate_align: AbiAlign,
254
255    /// Alignments for vector types.
256    pub vector_align: Vec<(Size, AbiAlign)>,
257
258    pub default_address_space: AddressSpace,
259    pub default_address_space_pointer_spec: PointerSpec,
260
261    /// Address space information of all known address spaces.
262    ///
263    /// # Note
264    ///
265    /// This vector does not contain the [`PointerSpec`] relative to the default address space,
266    /// which instead lives in [`Self::default_address_space_pointer_spec`].
267    address_space_info: Vec<(AddressSpace, PointerSpec)>,
268
269    pub instruction_address_space: AddressSpace,
270
271    /// Minimum size of #[repr(C)] enums (default c_int::BITS, usually 32)
272    /// Note: This isn't in LLVM's data layout string, it is `short_enum`
273    /// so the only valid spec for LLVM is c_int::BITS or 8
274    pub c_enum_min_size: Integer,
275}
276
277impl Default for TargetDataLayout {
278    /// Creates an instance of `TargetDataLayout`.
279    fn default() -> TargetDataLayout {
280        let align = |bits| Align::from_bits(bits).unwrap();
281        TargetDataLayout {
282            endian: Endian::Big,
283            i1_align: AbiAlign::new(align(8)),
284            i8_align: AbiAlign::new(align(8)),
285            i16_align: AbiAlign::new(align(16)),
286            i32_align: AbiAlign::new(align(32)),
287            i64_align: AbiAlign::new(align(32)),
288            i128_align: AbiAlign::new(align(32)),
289            f16_align: AbiAlign::new(align(16)),
290            f32_align: AbiAlign::new(align(32)),
291            f64_align: AbiAlign::new(align(64)),
292            f128_align: AbiAlign::new(align(128)),
293            aggregate_align: AbiAlign { abi: align(8) },
294            vector_align: vec![
295                (Size::from_bits(64), AbiAlign::new(align(64))),
296                (Size::from_bits(128), AbiAlign::new(align(128))),
297            ],
298            default_address_space: AddressSpace::ZERO,
299            default_address_space_pointer_spec: PointerSpec {
300                pointer_size: Size::from_bits(64),
301                pointer_align: AbiAlign::new(align(64)),
302                pointer_offset: Size::from_bits(64),
303                _is_fat: false,
304            },
305            address_space_info: vec![],
306            instruction_address_space: AddressSpace::ZERO,
307            c_enum_min_size: Integer::I32,
308        }
309    }
310}
311
312pub enum TargetDataLayoutErrors<'a> {
313    InvalidAddressSpace { addr_space: &'a str, cause: &'a str, err: ParseIntError },
314    InvalidBits { kind: &'a str, bit: &'a str, cause: &'a str, err: ParseIntError },
315    MissingAlignment { cause: &'a str },
316    InvalidAlignment { cause: &'a str, err: AlignFromBytesError },
317    InconsistentTargetArchitecture { dl: &'a str, target: &'a str },
318    InconsistentTargetPointerWidth { pointer_size: u64, target: u32 },
319    InvalidBitsSize { err: String },
320    UnknownPointerSpecification { err: String },
321}
322
323impl TargetDataLayout {
324    /// Parse data layout from an
325    /// [llvm data layout string](https://llvm.org/docs/LangRef.html#data-layout)
326    ///
327    /// This function doesn't fill `c_enum_min_size` and it will always be `I32` since it can not be
328    /// determined from llvm string.
329    pub fn parse_from_llvm_datalayout_string<'a>(
330        input: &'a str,
331        default_address_space: AddressSpace,
332    ) -> Result<TargetDataLayout, TargetDataLayoutErrors<'a>> {
333        // Parse an address space index from a string.
334        let parse_address_space = |s: &'a str, cause: &'a str| {
335            s.parse::<u32>().map(AddressSpace).map_err(|err| {
336                TargetDataLayoutErrors::InvalidAddressSpace { addr_space: s, cause, err }
337            })
338        };
339
340        // Parse a bit count from a string.
341        let parse_bits = |s: &'a str, kind: &'a str, cause: &'a str| {
342            s.parse::<u64>().map_err(|err| TargetDataLayoutErrors::InvalidBits {
343                kind,
344                bit: s,
345                cause,
346                err,
347            })
348        };
349
350        // Parse a size string.
351        let parse_size =
352            |s: &'a str, cause: &'a str| parse_bits(s, "size", cause).map(Size::from_bits);
353
354        // Parse an alignment string.
355        let parse_align_str = |s: &'a str, cause: &'a str| {
356            let align_from_bits = |bits| {
357                Align::from_bits(bits)
358                    .map_err(|err| TargetDataLayoutErrors::InvalidAlignment { cause, err })
359            };
360            let abi = parse_bits(s, "alignment", cause)?;
361            Ok(AbiAlign::new(align_from_bits(abi)?))
362        };
363
364        // Parse an alignment sequence, possibly in the form `<align>[:<preferred_alignment>]`,
365        // ignoring the secondary alignment specifications.
366        let parse_align_seq = |s: &[&'a str], cause: &'a str| {
367            if s.is_empty() {
368                return Err(TargetDataLayoutErrors::MissingAlignment { cause });
369            }
370            parse_align_str(s[0], cause)
371        };
372
373        let mut dl = TargetDataLayout::default();
374        dl.default_address_space = default_address_space;
375
376        let mut i128_align_src = 64;
377        for spec in input.split('-') {
378            let spec_parts = spec.split(':').collect::<Vec<_>>();
379
380            match &*spec_parts {
381                ["e"] => dl.endian = Endian::Little,
382                ["E"] => dl.endian = Endian::Big,
383                [p] if p.starts_with('P') => {
384                    dl.instruction_address_space = parse_address_space(&p[1..], "P")?
385                }
386                ["a", a @ ..] => dl.aggregate_align = parse_align_seq(a, "a")?,
387                ["f16", a @ ..] => dl.f16_align = parse_align_seq(a, "f16")?,
388                ["f32", a @ ..] => dl.f32_align = parse_align_seq(a, "f32")?,
389                ["f64", a @ ..] => dl.f64_align = parse_align_seq(a, "f64")?,
390                ["f128", a @ ..] => dl.f128_align = parse_align_seq(a, "f128")?,
391                [p, s, a @ ..] if p.starts_with("p") => {
392                    let mut p = p.strip_prefix('p').unwrap();
393                    let mut _is_fat = false;
394
395                    // Some targets, such as CHERI, use the 'f' suffix in the p- spec to signal that
396                    // they use 'fat' pointers. The resulting prefix may look like `pf<addr_space>`.
397
398                    if p.starts_with('f') {
399                        p = p.strip_prefix('f').unwrap();
400                        _is_fat = true;
401                    }
402
403                    // However, we currently don't take into account further specifications:
404                    // an error is emitted instead.
405                    if p.starts_with(char::is_alphabetic) {
406                        return Err(TargetDataLayoutErrors::UnknownPointerSpecification {
407                            err: p.to_string(),
408                        });
409                    }
410
411                    let addr_space = if !p.is_empty() {
412                        parse_address_space(p, "p-")?
413                    } else {
414                        AddressSpace::ZERO
415                    };
416
417                    let pointer_size = parse_size(s, "p-")?;
418                    let pointer_align = parse_align_seq(a, "p-")?;
419                    let info = PointerSpec {
420                        pointer_offset: pointer_size,
421                        pointer_size,
422                        pointer_align,
423                        _is_fat,
424                    };
425                    if addr_space == default_address_space {
426                        dl.default_address_space_pointer_spec = info;
427                    } else {
428                        match dl.address_space_info.iter_mut().find(|(a, _)| *a == addr_space) {
429                            Some(e) => e.1 = info,
430                            None => {
431                                dl.address_space_info.push((addr_space, info));
432                            }
433                        }
434                    }
435                }
436                [p, s, a, _pr, i] if p.starts_with("p") => {
437                    let mut p = p.strip_prefix('p').unwrap();
438                    let mut _is_fat = false;
439
440                    // Some targets, such as CHERI, use the 'f' suffix in the p- spec to signal that
441                    // they use 'fat' pointers. The resulting prefix may look like `pf<addr_space>`.
442
443                    if p.starts_with('f') {
444                        p = p.strip_prefix('f').unwrap();
445                        _is_fat = true;
446                    }
447
448                    // However, we currently don't take into account further specifications:
449                    // an error is emitted instead.
450                    if p.starts_with(char::is_alphabetic) {
451                        return Err(TargetDataLayoutErrors::UnknownPointerSpecification {
452                            err: p.to_string(),
453                        });
454                    }
455
456                    let addr_space = if !p.is_empty() {
457                        parse_address_space(p, "p")?
458                    } else {
459                        AddressSpace::ZERO
460                    };
461
462                    let info = PointerSpec {
463                        pointer_size: parse_size(s, "p-")?,
464                        pointer_align: parse_align_str(a, "p-")?,
465                        pointer_offset: parse_size(i, "p-")?,
466                        _is_fat,
467                    };
468
469                    if addr_space == default_address_space {
470                        dl.default_address_space_pointer_spec = info;
471                    } else {
472                        match dl.address_space_info.iter_mut().find(|(a, _)| *a == addr_space) {
473                            Some(e) => e.1 = info,
474                            None => {
475                                dl.address_space_info.push((addr_space, info));
476                            }
477                        }
478                    }
479                }
480
481                [s, a @ ..] if s.starts_with('i') => {
482                    let Ok(bits) = s[1..].parse::<u64>() else {
483                        parse_size(&s[1..], "i")?; // For the user error.
484                        continue;
485                    };
486                    let a = parse_align_seq(a, s)?;
487                    match bits {
488                        1 => dl.i1_align = a,
489                        8 => dl.i8_align = a,
490                        16 => dl.i16_align = a,
491                        32 => dl.i32_align = a,
492                        64 => dl.i64_align = a,
493                        _ => {}
494                    }
495                    if bits >= i128_align_src && bits <= 128 {
496                        // Default alignment for i128 is decided by taking the alignment of
497                        // largest-sized i{64..=128}.
498                        i128_align_src = bits;
499                        dl.i128_align = a;
500                    }
501                }
502                [s, a @ ..] if s.starts_with('v') => {
503                    let v_size = parse_size(&s[1..], "v")?;
504                    let a = parse_align_seq(a, s)?;
505                    if let Some(v) = dl.vector_align.iter_mut().find(|v| v.0 == v_size) {
506                        v.1 = a;
507                        continue;
508                    }
509                    // No existing entry, add a new one.
510                    dl.vector_align.push((v_size, a));
511                }
512                _ => {} // Ignore everything else.
513            }
514        }
515
516        // Inherit, if not given, address space information for specific LLVM elements from the
517        // default data address space.
518        if (dl.instruction_address_space != dl.default_address_space)
519            && dl
520                .address_space_info
521                .iter()
522                .find(|(a, _)| *a == dl.instruction_address_space)
523                .is_none()
524        {
525            dl.address_space_info.push((
526                dl.instruction_address_space,
527                dl.default_address_space_pointer_spec.clone(),
528            ));
529        }
530
531        Ok(dl)
532    }
533
534    /// Returns **exclusive** upper bound on object size in bytes, in the default data address
535    /// space.
536    ///
537    /// The theoretical maximum object size is defined as the maximum positive `isize` value.
538    /// This ensures that the `offset` semantics remain well-defined by allowing it to correctly
539    /// index every address within an object along with one byte past the end, along with allowing
540    /// `isize` to store the difference between any two pointers into an object.
541    ///
542    /// LLVM uses a 64-bit integer to represent object size in *bits*, but we care only for bytes,
543    /// so we adopt such a more-constrained size bound due to its technical limitations.
544    #[inline]
545    pub fn obj_size_bound(&self) -> u64 {
546        match self.pointer_size().bits() {
547            16 => 1 << 15,
548            32 => 1 << 31,
549            64 => 1 << 61,
550            bits => panic!("obj_size_bound: unknown pointer bit size {bits}"),
551        }
552    }
553
554    /// Returns **exclusive** upper bound on object size in bytes.
555    ///
556    /// The theoretical maximum object size is defined as the maximum positive `isize` value.
557    /// This ensures that the `offset` semantics remain well-defined by allowing it to correctly
558    /// index every address within an object along with one byte past the end, along with allowing
559    /// `isize` to store the difference between any two pointers into an object.
560    ///
561    /// LLVM uses a 64-bit integer to represent object size in *bits*, but we care only for bytes,
562    /// so we adopt such a more-constrained size bound due to its technical limitations.
563    #[inline]
564    pub fn obj_size_bound_in(&self, address_space: AddressSpace) -> u64 {
565        match self.pointer_size_in(address_space).bits() {
566            16 => 1 << 15,
567            32 => 1 << 31,
568            64 => 1 << 61,
569            bits => panic!("obj_size_bound: unknown pointer bit size {bits}"),
570        }
571    }
572
573    #[inline]
574    pub fn ptr_sized_integer(&self) -> Integer {
575        use Integer::*;
576        match self.pointer_offset().bits() {
577            16 => I16,
578            32 => I32,
579            64 => I64,
580            bits => panic!("ptr_sized_integer: unknown pointer bit size {bits}"),
581        }
582    }
583
584    #[inline]
585    pub fn ptr_sized_integer_in(&self, address_space: AddressSpace) -> Integer {
586        use Integer::*;
587        match self.pointer_offset_in(address_space).bits() {
588            16 => I16,
589            32 => I32,
590            64 => I64,
591            bits => panic!("ptr_sized_integer: unknown pointer bit size {bits}"),
592        }
593    }
594
595    /// psABI-mandated alignment for a vector type, if any
596    #[inline]
597    fn cabi_vector_align(&self, vec_size: Size) -> Option<AbiAlign> {
598        self.vector_align
599            .iter()
600            .find(|(size, _align)| *size == vec_size)
601            .map(|(_size, align)| *align)
602    }
603
604    /// an alignment resembling the one LLVM would pick for a vector
605    #[inline]
606    pub fn llvmlike_vector_align(&self, vec_size: Size) -> AbiAlign {
607        self.cabi_vector_align(vec_size).unwrap_or(AbiAlign::new(
608            Align::from_bytes(vec_size.bytes().next_power_of_two()).unwrap(),
609        ))
610    }
611
612    /// Get the pointer size in the default data address space.
613    #[inline]
614    pub fn pointer_size(&self) -> Size {
615        self.default_address_space_pointer_spec.pointer_size
616    }
617
618    /// Get the pointer size in a specific address space.
619    #[inline]
620    pub fn pointer_size_in(&self, c: AddressSpace) -> Size {
621        if c == self.default_address_space {
622            return self.default_address_space_pointer_spec.pointer_size;
623        }
624
625        if let Some(e) = self.address_space_info.iter().find(|(a, _)| a == &c) {
626            e.1.pointer_size
627        } else {
628            panic!("Use of unknown address space {c:?}");
629        }
630    }
631
632    /// Get the pointer index in the default data address space.
633    #[inline]
634    pub fn pointer_offset(&self) -> Size {
635        self.default_address_space_pointer_spec.pointer_offset
636    }
637
638    /// Get the pointer index in a specific address space.
639    #[inline]
640    pub fn pointer_offset_in(&self, c: AddressSpace) -> Size {
641        if c == self.default_address_space {
642            return self.default_address_space_pointer_spec.pointer_offset;
643        }
644
645        if let Some(e) = self.address_space_info.iter().find(|(a, _)| a == &c) {
646            e.1.pointer_offset
647        } else {
648            panic!("Use of unknown address space {c:?}");
649        }
650    }
651
652    /// Get the pointer alignment in the default data address space.
653    #[inline]
654    pub fn pointer_align(&self) -> AbiAlign {
655        self.default_address_space_pointer_spec.pointer_align
656    }
657
658    /// Get the pointer alignment in a specific address space.
659    #[inline]
660    pub fn pointer_align_in(&self, c: AddressSpace) -> AbiAlign {
661        if c == self.default_address_space {
662            return self.default_address_space_pointer_spec.pointer_align;
663        }
664
665        if let Some(e) = self.address_space_info.iter().find(|(a, _)| a == &c) {
666            e.1.pointer_align
667        } else {
668            panic!("Use of unknown address space {c:?}");
669        }
670    }
671}
672
673pub trait HasDataLayout {
674    fn data_layout(&self) -> &TargetDataLayout;
675}
676
677impl HasDataLayout for TargetDataLayout {
678    #[inline]
679    fn data_layout(&self) -> &TargetDataLayout {
680        self
681    }
682}
683
684// used by rust-analyzer
685impl HasDataLayout for &TargetDataLayout {
686    #[inline]
687    fn data_layout(&self) -> &TargetDataLayout {
688        (**self).data_layout()
689    }
690}
691
692/// Endianness of the target, which must match cfg(target-endian).
693#[derive(Copy, Clone, PartialEq, Eq)]
694pub enum Endian {
695    Little,
696    Big,
697}
698
699impl Endian {
700    pub fn as_str(&self) -> &'static str {
701        match self {
702            Self::Little => "little",
703            Self::Big => "big",
704        }
705    }
706}
707
708impl fmt::Debug for Endian {
709    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
710        f.write_str(self.as_str())
711    }
712}
713
714impl FromStr for Endian {
715    type Err = String;
716
717    fn from_str(s: &str) -> Result<Self, Self::Err> {
718        match s {
719            "little" => Ok(Self::Little),
720            "big" => Ok(Self::Big),
721            _ => Err(format!(r#"unknown endian: "{s}""#)),
722        }
723    }
724}
725
726/// Size of a type in bytes.
727#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
728#[cfg_attr(
729    feature = "nightly",
730    derive(Encodable_NoContext, Decodable_NoContext, HashStable_Generic)
731)]
732pub struct Size {
733    raw: u64,
734}
735
736#[cfg(feature = "nightly")]
737impl StableOrd for Size {
738    const CAN_USE_UNSTABLE_SORT: bool = true;
739
740    // `Ord` is implemented as just comparing numerical values and numerical values
741    // are not changed by (de-)serialization.
742    const THIS_IMPLEMENTATION_HAS_BEEN_TRIPLE_CHECKED: () = ();
743}
744
745// This is debug-printed a lot in larger structs, don't waste too much space there
746impl fmt::Debug for Size {
747    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
748        write!(f, "Size({} bytes)", self.bytes())
749    }
750}
751
752impl Size {
753    pub const ZERO: Size = Size { raw: 0 };
754
755    /// Rounds `bits` up to the next-higher byte boundary, if `bits` is
756    /// not a multiple of 8.
757    pub fn from_bits(bits: impl TryInto<u64>) -> Size {
758        let bits = bits.try_into().ok().unwrap();
759        Size { raw: bits.div_ceil(8) }
760    }
761
762    #[inline]
763    pub fn from_bytes(bytes: impl TryInto<u64>) -> Size {
764        let bytes: u64 = bytes.try_into().ok().unwrap();
765        Size { raw: bytes }
766    }
767
768    #[inline]
769    pub fn bytes(self) -> u64 {
770        self.raw
771    }
772
773    #[inline]
774    pub fn bytes_usize(self) -> usize {
775        self.bytes().try_into().unwrap()
776    }
777
778    #[inline]
779    pub fn bits(self) -> u64 {
780        #[cold]
781        fn overflow(bytes: u64) -> ! {
782            panic!("Size::bits: {bytes} bytes in bits doesn't fit in u64")
783        }
784
785        self.bytes().checked_mul(8).unwrap_or_else(|| overflow(self.bytes()))
786    }
787
788    #[inline]
789    pub fn bits_usize(self) -> usize {
790        self.bits().try_into().unwrap()
791    }
792
793    #[inline]
794    pub fn align_to(self, align: Align) -> Size {
795        let mask = align.bytes() - 1;
796        Size::from_bytes((self.bytes() + mask) & !mask)
797    }
798
799    #[inline]
800    pub fn is_aligned(self, align: Align) -> bool {
801        let mask = align.bytes() - 1;
802        self.bytes() & mask == 0
803    }
804
805    #[inline]
806    pub fn checked_add<C: HasDataLayout>(self, offset: Size, cx: &C) -> Option<Size> {
807        let dl = cx.data_layout();
808
809        let bytes = self.bytes().checked_add(offset.bytes())?;
810
811        if bytes < dl.obj_size_bound() { Some(Size::from_bytes(bytes)) } else { None }
812    }
813
814    #[inline]
815    pub fn checked_mul<C: HasDataLayout>(self, count: u64, cx: &C) -> Option<Size> {
816        let dl = cx.data_layout();
817
818        let bytes = self.bytes().checked_mul(count)?;
819        if bytes < dl.obj_size_bound() { Some(Size::from_bytes(bytes)) } else { None }
820    }
821
822    /// Truncates `value` to `self` bits and then sign-extends it to 128 bits
823    /// (i.e., if it is negative, fill with 1's on the left).
824    #[inline]
825    pub fn sign_extend(self, value: u128) -> i128 {
826        let size = self.bits();
827        if size == 0 {
828            // Truncated until nothing is left.
829            return 0;
830        }
831        // Sign-extend it.
832        let shift = 128 - size;
833        // Shift the unsigned value to the left, then shift back to the right as signed
834        // (essentially fills with sign bit on the left).
835        ((value << shift) as i128) >> shift
836    }
837
838    /// Truncates `value` to `self` bits.
839    #[inline]
840    pub fn truncate(self, value: u128) -> u128 {
841        let size = self.bits();
842        if size == 0 {
843            // Truncated until nothing is left.
844            return 0;
845        }
846        let shift = 128 - size;
847        // Truncate (shift left to drop out leftover values, shift right to fill with zeroes).
848        (value << shift) >> shift
849    }
850
851    #[inline]
852    pub fn signed_int_min(&self) -> i128 {
853        self.sign_extend(1_u128 << (self.bits() - 1))
854    }
855
856    #[inline]
857    pub fn signed_int_max(&self) -> i128 {
858        i128::MAX >> (128 - self.bits())
859    }
860
861    #[inline]
862    pub fn unsigned_int_max(&self) -> u128 {
863        u128::MAX >> (128 - self.bits())
864    }
865}
866
867// Panicking addition, subtraction and multiplication for convenience.
868// Avoid during layout computation, return `LayoutError` instead.
869
870impl Add for Size {
871    type Output = Size;
872    #[inline]
873    fn add(self, other: Size) -> Size {
874        Size::from_bytes(self.bytes().checked_add(other.bytes()).unwrap_or_else(|| {
875            panic!("Size::add: {} + {} doesn't fit in u64", self.bytes(), other.bytes())
876        }))
877    }
878}
879
880impl Sub for Size {
881    type Output = Size;
882    #[inline]
883    fn sub(self, other: Size) -> Size {
884        Size::from_bytes(self.bytes().checked_sub(other.bytes()).unwrap_or_else(|| {
885            panic!("Size::sub: {} - {} would result in negative size", self.bytes(), other.bytes())
886        }))
887    }
888}
889
890impl Mul<Size> for u64 {
891    type Output = Size;
892    #[inline]
893    fn mul(self, size: Size) -> Size {
894        size * self
895    }
896}
897
898impl Mul<u64> for Size {
899    type Output = Size;
900    #[inline]
901    fn mul(self, count: u64) -> Size {
902        match self.bytes().checked_mul(count) {
903            Some(bytes) => Size::from_bytes(bytes),
904            None => panic!("Size::mul: {} * {} doesn't fit in u64", self.bytes(), count),
905        }
906    }
907}
908
909impl AddAssign for Size {
910    #[inline]
911    fn add_assign(&mut self, other: Size) {
912        *self = *self + other;
913    }
914}
915
916#[cfg(feature = "nightly")]
917impl Step for Size {
918    #[inline]
919    fn steps_between(start: &Self, end: &Self) -> (usize, Option<usize>) {
920        u64::steps_between(&start.bytes(), &end.bytes())
921    }
922
923    #[inline]
924    fn forward_checked(start: Self, count: usize) -> Option<Self> {
925        u64::forward_checked(start.bytes(), count).map(Self::from_bytes)
926    }
927
928    #[inline]
929    fn forward(start: Self, count: usize) -> Self {
930        Self::from_bytes(u64::forward(start.bytes(), count))
931    }
932
933    #[inline]
934    unsafe fn forward_unchecked(start: Self, count: usize) -> Self {
935        Self::from_bytes(unsafe { u64::forward_unchecked(start.bytes(), count) })
936    }
937
938    #[inline]
939    fn backward_checked(start: Self, count: usize) -> Option<Self> {
940        u64::backward_checked(start.bytes(), count).map(Self::from_bytes)
941    }
942
943    #[inline]
944    fn backward(start: Self, count: usize) -> Self {
945        Self::from_bytes(u64::backward(start.bytes(), count))
946    }
947
948    #[inline]
949    unsafe fn backward_unchecked(start: Self, count: usize) -> Self {
950        Self::from_bytes(unsafe { u64::backward_unchecked(start.bytes(), count) })
951    }
952}
953
954/// Alignment of a type in bytes (always a power of two).
955#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
956#[cfg_attr(
957    feature = "nightly",
958    derive(Encodable_NoContext, Decodable_NoContext, HashStable_Generic)
959)]
960pub struct Align {
961    pow2: u8,
962}
963
964// This is debug-printed a lot in larger structs, don't waste too much space there
965impl fmt::Debug for Align {
966    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
967        write!(f, "Align({} bytes)", self.bytes())
968    }
969}
970
971#[derive(Clone, Copy)]
972pub enum AlignFromBytesError {
973    NotPowerOfTwo(u64),
974    TooLarge(u64),
975}
976
977impl AlignFromBytesError {
978    pub fn diag_ident(self) -> &'static str {
979        match self {
980            Self::NotPowerOfTwo(_) => "not_power_of_two",
981            Self::TooLarge(_) => "too_large",
982        }
983    }
984
985    pub fn align(self) -> u64 {
986        let (Self::NotPowerOfTwo(align) | Self::TooLarge(align)) = self;
987        align
988    }
989}
990
991impl fmt::Debug for AlignFromBytesError {
992    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
993        fmt::Display::fmt(self, f)
994    }
995}
996
997impl fmt::Display for AlignFromBytesError {
998    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
999        match self {
1000            AlignFromBytesError::NotPowerOfTwo(align) => write!(f, "`{align}` is not a power of 2"),
1001            AlignFromBytesError::TooLarge(align) => write!(f, "`{align}` is too large"),
1002        }
1003    }
1004}
1005
1006impl Align {
1007    pub const ONE: Align = Align { pow2: 0 };
1008    pub const EIGHT: Align = Align { pow2: 3 };
1009    // LLVM has a maximal supported alignment of 2^29, we inherit that.
1010    pub const MAX: Align = Align { pow2: 29 };
1011
1012    #[inline]
1013    pub fn from_bits(bits: u64) -> Result<Align, AlignFromBytesError> {
1014        Align::from_bytes(Size::from_bits(bits).bytes())
1015    }
1016
1017    #[inline]
1018    pub const fn from_bytes(align: u64) -> Result<Align, AlignFromBytesError> {
1019        // Treat an alignment of 0 bytes like 1-byte alignment.
1020        if align == 0 {
1021            return Ok(Align::ONE);
1022        }
1023
1024        #[cold]
1025        const fn not_power_of_2(align: u64) -> AlignFromBytesError {
1026            AlignFromBytesError::NotPowerOfTwo(align)
1027        }
1028
1029        #[cold]
1030        const fn too_large(align: u64) -> AlignFromBytesError {
1031            AlignFromBytesError::TooLarge(align)
1032        }
1033
1034        let tz = align.trailing_zeros();
1035        if align != (1 << tz) {
1036            return Err(not_power_of_2(align));
1037        }
1038
1039        let pow2 = tz as u8;
1040        if pow2 > Self::MAX.pow2 {
1041            return Err(too_large(align));
1042        }
1043
1044        Ok(Align { pow2 })
1045    }
1046
1047    #[inline]
1048    pub const fn bytes(self) -> u64 {
1049        1 << self.pow2
1050    }
1051
1052    #[inline]
1053    pub fn bytes_usize(self) -> usize {
1054        self.bytes().try_into().unwrap()
1055    }
1056
1057    #[inline]
1058    pub const fn bits(self) -> u64 {
1059        self.bytes() * 8
1060    }
1061
1062    #[inline]
1063    pub fn bits_usize(self) -> usize {
1064        self.bits().try_into().unwrap()
1065    }
1066
1067    /// Obtain the greatest factor of `size` that is an alignment
1068    /// (the largest power of two the Size is a multiple of).
1069    ///
1070    /// Note that all numbers are factors of 0
1071    #[inline]
1072    pub fn max_aligned_factor(size: Size) -> Align {
1073        Align { pow2: size.bytes().trailing_zeros() as u8 }
1074    }
1075
1076    /// Reduces Align to an aligned factor of `size`.
1077    #[inline]
1078    pub fn restrict_for_offset(self, size: Size) -> Align {
1079        self.min(Align::max_aligned_factor(size))
1080    }
1081}
1082
1083/// A pair of alignments, ABI-mandated and preferred.
1084///
1085/// The "preferred" alignment is an LLVM concept that is virtually meaningless to Rust code:
1086/// it is not exposed semantically to programmers nor can they meaningfully affect it.
1087/// The only concern for us is that preferred alignment must not be less than the mandated alignment
1088/// and thus in practice the two values are almost always identical.
1089///
1090/// An example of a rare thing actually affected by preferred alignment is aligning of statics.
1091/// It is of effectively no consequence for layout in structs and on the stack.
1092#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]
1093#[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
1094pub struct AbiAlign {
1095    pub abi: Align,
1096}
1097
1098impl AbiAlign {
1099    #[inline]
1100    pub fn new(align: Align) -> AbiAlign {
1101        AbiAlign { abi: align }
1102    }
1103
1104    #[inline]
1105    pub fn min(self, other: AbiAlign) -> AbiAlign {
1106        AbiAlign { abi: self.abi.min(other.abi) }
1107    }
1108
1109    #[inline]
1110    pub fn max(self, other: AbiAlign) -> AbiAlign {
1111        AbiAlign { abi: self.abi.max(other.abi) }
1112    }
1113}
1114
1115impl Deref for AbiAlign {
1116    type Target = Align;
1117
1118    fn deref(&self) -> &Self::Target {
1119        &self.abi
1120    }
1121}
1122
1123/// Integers, also used for enum discriminants.
1124#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)]
1125#[cfg_attr(
1126    feature = "nightly",
1127    derive(Encodable_NoContext, Decodable_NoContext, HashStable_Generic)
1128)]
1129pub enum Integer {
1130    I8,
1131    I16,
1132    I32,
1133    I64,
1134    I128,
1135}
1136
1137impl Integer {
1138    pub fn int_ty_str(self) -> &'static str {
1139        use Integer::*;
1140        match self {
1141            I8 => "i8",
1142            I16 => "i16",
1143            I32 => "i32",
1144            I64 => "i64",
1145            I128 => "i128",
1146        }
1147    }
1148
1149    pub fn uint_ty_str(self) -> &'static str {
1150        use Integer::*;
1151        match self {
1152            I8 => "u8",
1153            I16 => "u16",
1154            I32 => "u32",
1155            I64 => "u64",
1156            I128 => "u128",
1157        }
1158    }
1159
1160    #[inline]
1161    pub fn size(self) -> Size {
1162        use Integer::*;
1163        match self {
1164            I8 => Size::from_bytes(1),
1165            I16 => Size::from_bytes(2),
1166            I32 => Size::from_bytes(4),
1167            I64 => Size::from_bytes(8),
1168            I128 => Size::from_bytes(16),
1169        }
1170    }
1171
1172    /// Gets the Integer type from an IntegerType.
1173    pub fn from_attr<C: HasDataLayout>(cx: &C, ity: IntegerType) -> Integer {
1174        let dl = cx.data_layout();
1175
1176        match ity {
1177            IntegerType::Pointer(_) => dl.ptr_sized_integer(),
1178            IntegerType::Fixed(x, _) => x,
1179        }
1180    }
1181
1182    pub fn align<C: HasDataLayout>(self, cx: &C) -> AbiAlign {
1183        use Integer::*;
1184        let dl = cx.data_layout();
1185
1186        match self {
1187            I8 => dl.i8_align,
1188            I16 => dl.i16_align,
1189            I32 => dl.i32_align,
1190            I64 => dl.i64_align,
1191            I128 => dl.i128_align,
1192        }
1193    }
1194
1195    /// Returns the largest signed value that can be represented by this Integer.
1196    #[inline]
1197    pub fn signed_max(self) -> i128 {
1198        use Integer::*;
1199        match self {
1200            I8 => i8::MAX as i128,
1201            I16 => i16::MAX as i128,
1202            I32 => i32::MAX as i128,
1203            I64 => i64::MAX as i128,
1204            I128 => i128::MAX,
1205        }
1206    }
1207
1208    /// Returns the smallest signed value that can be represented by this Integer.
1209    #[inline]
1210    pub fn signed_min(self) -> i128 {
1211        use Integer::*;
1212        match self {
1213            I8 => i8::MIN as i128,
1214            I16 => i16::MIN as i128,
1215            I32 => i32::MIN as i128,
1216            I64 => i64::MIN as i128,
1217            I128 => i128::MIN,
1218        }
1219    }
1220
1221    /// Finds the smallest Integer type which can represent the signed value.
1222    #[inline]
1223    pub fn fit_signed(x: i128) -> Integer {
1224        use Integer::*;
1225        match x {
1226            -0x0000_0000_0000_0080..=0x0000_0000_0000_007f => I8,
1227            -0x0000_0000_0000_8000..=0x0000_0000_0000_7fff => I16,
1228            -0x0000_0000_8000_0000..=0x0000_0000_7fff_ffff => I32,
1229            -0x8000_0000_0000_0000..=0x7fff_ffff_ffff_ffff => I64,
1230            _ => I128,
1231        }
1232    }
1233
1234    /// Finds the smallest Integer type which can represent the unsigned value.
1235    #[inline]
1236    pub fn fit_unsigned(x: u128) -> Integer {
1237        use Integer::*;
1238        match x {
1239            0..=0x0000_0000_0000_00ff => I8,
1240            0..=0x0000_0000_0000_ffff => I16,
1241            0..=0x0000_0000_ffff_ffff => I32,
1242            0..=0xffff_ffff_ffff_ffff => I64,
1243            _ => I128,
1244        }
1245    }
1246
1247    /// Finds the smallest integer with the given alignment.
1248    pub fn for_align<C: HasDataLayout>(cx: &C, wanted: Align) -> Option<Integer> {
1249        use Integer::*;
1250        let dl = cx.data_layout();
1251
1252        [I8, I16, I32, I64, I128].into_iter().find(|&candidate| {
1253            wanted == candidate.align(dl).abi && wanted.bytes() == candidate.size().bytes()
1254        })
1255    }
1256
1257    /// Find the largest integer with the given alignment or less.
1258    pub fn approximate_align<C: HasDataLayout>(cx: &C, wanted: Align) -> Integer {
1259        use Integer::*;
1260        let dl = cx.data_layout();
1261
1262        // FIXME(eddyb) maybe include I128 in the future, when it works everywhere.
1263        for candidate in [I64, I32, I16] {
1264            if wanted >= candidate.align(dl).abi && wanted.bytes() >= candidate.size().bytes() {
1265                return candidate;
1266            }
1267        }
1268        I8
1269    }
1270
1271    // FIXME(eddyb) consolidate this and other methods that find the appropriate
1272    // `Integer` given some requirements.
1273    #[inline]
1274    pub fn from_size(size: Size) -> Result<Self, String> {
1275        match size.bits() {
1276            8 => Ok(Integer::I8),
1277            16 => Ok(Integer::I16),
1278            32 => Ok(Integer::I32),
1279            64 => Ok(Integer::I64),
1280            128 => Ok(Integer::I128),
1281            _ => Err(format!("rust does not support integers with {} bits", size.bits())),
1282        }
1283    }
1284}
1285
1286/// Floating-point types.
1287#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)]
1288#[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
1289pub enum Float {
1290    F16,
1291    F32,
1292    F64,
1293    F128,
1294}
1295
1296impl Float {
1297    pub fn size(self) -> Size {
1298        use Float::*;
1299
1300        match self {
1301            F16 => Size::from_bits(16),
1302            F32 => Size::from_bits(32),
1303            F64 => Size::from_bits(64),
1304            F128 => Size::from_bits(128),
1305        }
1306    }
1307
1308    pub fn align<C: HasDataLayout>(self, cx: &C) -> AbiAlign {
1309        use Float::*;
1310        let dl = cx.data_layout();
1311
1312        match self {
1313            F16 => dl.f16_align,
1314            F32 => dl.f32_align,
1315            F64 => dl.f64_align,
1316            F128 => dl.f128_align,
1317        }
1318    }
1319}
1320
1321/// Fundamental unit of memory access and layout.
1322#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]
1323#[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
1324pub enum Primitive {
1325    /// The `bool` is the signedness of the `Integer` type.
1326    ///
1327    /// One would think we would not care about such details this low down,
1328    /// but some ABIs are described in terms of C types and ISAs where the
1329    /// integer arithmetic is done on {sign,zero}-extended registers, e.g.
1330    /// a negative integer passed by zero-extension will appear positive in
1331    /// the callee, and most operations on it will produce the wrong values.
1332    Int(Integer, bool),
1333    Float(Float),
1334    Pointer(AddressSpace),
1335}
1336
1337impl Primitive {
1338    pub fn size<C: HasDataLayout>(self, cx: &C) -> Size {
1339        use Primitive::*;
1340        let dl = cx.data_layout();
1341
1342        match self {
1343            Int(i, _) => i.size(),
1344            Float(f) => f.size(),
1345            Pointer(a) => dl.pointer_size_in(a),
1346        }
1347    }
1348
1349    pub fn align<C: HasDataLayout>(self, cx: &C) -> AbiAlign {
1350        use Primitive::*;
1351        let dl = cx.data_layout();
1352
1353        match self {
1354            Int(i, _) => i.align(dl),
1355            Float(f) => f.align(dl),
1356            Pointer(a) => dl.pointer_align_in(a),
1357        }
1358    }
1359}
1360
1361/// Inclusive wrap-around range of valid values, that is, if
1362/// start > end, it represents `start..=MAX`, followed by `0..=end`.
1363///
1364/// That is, for an i8 primitive, a range of `254..=2` means following
1365/// sequence:
1366///
1367///    254 (-2), 255 (-1), 0, 1, 2
1368///
1369/// This is intended specifically to mirror LLVM’s `!range` metadata semantics.
1370#[derive(Clone, Copy, PartialEq, Eq, Hash)]
1371#[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
1372pub struct WrappingRange {
1373    pub start: u128,
1374    pub end: u128,
1375}
1376
1377impl WrappingRange {
1378    pub fn full(size: Size) -> Self {
1379        Self { start: 0, end: size.unsigned_int_max() }
1380    }
1381
1382    /// Returns `true` if `v` is contained in the range.
1383    #[inline(always)]
1384    pub fn contains(&self, v: u128) -> bool {
1385        if self.start <= self.end {
1386            self.start <= v && v <= self.end
1387        } else {
1388            self.start <= v || v <= self.end
1389        }
1390    }
1391
1392    /// Returns `true` if all the values in `other` are contained in this range,
1393    /// when the values are considered as having width `size`.
1394    #[inline(always)]
1395    pub fn contains_range(&self, other: Self, size: Size) -> bool {
1396        if self.is_full_for(size) {
1397            true
1398        } else {
1399            let trunc = |x| size.truncate(x);
1400
1401            let delta = self.start;
1402            let max = trunc(self.end.wrapping_sub(delta));
1403
1404            let other_start = trunc(other.start.wrapping_sub(delta));
1405            let other_end = trunc(other.end.wrapping_sub(delta));
1406
1407            // Having shifted both input ranges by `delta`, now we only need to check
1408            // whether `0..=max` contains `other_start..=other_end`, which can only
1409            // happen if the other doesn't wrap since `self` isn't everything.
1410            (other_start <= other_end) && (other_end <= max)
1411        }
1412    }
1413
1414    /// Returns `self` with replaced `start`
1415    #[inline(always)]
1416    fn with_start(mut self, start: u128) -> Self {
1417        self.start = start;
1418        self
1419    }
1420
1421    /// Returns `self` with replaced `end`
1422    #[inline(always)]
1423    fn with_end(mut self, end: u128) -> Self {
1424        self.end = end;
1425        self
1426    }
1427
1428    /// Returns `true` if `size` completely fills the range.
1429    ///
1430    /// Note that this is *not* the same as `self == WrappingRange::full(size)`.
1431    /// Niche calculations can produce full ranges which are not the canonical one;
1432    /// for example `Option<NonZero<u16>>` gets `valid_range: (..=0) | (1..)`.
1433    #[inline]
1434    fn is_full_for(&self, size: Size) -> bool {
1435        let max_value = size.unsigned_int_max();
1436        debug_assert!(self.start <= max_value && self.end <= max_value);
1437        self.start == (self.end.wrapping_add(1) & max_value)
1438    }
1439
1440    /// Checks whether this range is considered non-wrapping when the values are
1441    /// interpreted as *unsigned* numbers of width `size`.
1442    ///
1443    /// Returns `Ok(true)` if there's no wrap-around, `Ok(false)` if there is,
1444    /// and `Err(..)` if the range is full so it depends how you think about it.
1445    #[inline]
1446    pub fn no_unsigned_wraparound(&self, size: Size) -> Result<bool, RangeFull> {
1447        if self.is_full_for(size) { Err(..) } else { Ok(self.start <= self.end) }
1448    }
1449
1450    /// Checks whether this range is considered non-wrapping when the values are
1451    /// interpreted as *signed* numbers of width `size`.
1452    ///
1453    /// This is heavily dependent on the `size`, as `100..=200` does wrap when
1454    /// interpreted as `i8`, but doesn't when interpreted as `i16`.
1455    ///
1456    /// Returns `Ok(true)` if there's no wrap-around, `Ok(false)` if there is,
1457    /// and `Err(..)` if the range is full so it depends how you think about it.
1458    #[inline]
1459    pub fn no_signed_wraparound(&self, size: Size) -> Result<bool, RangeFull> {
1460        if self.is_full_for(size) {
1461            Err(..)
1462        } else {
1463            let start: i128 = size.sign_extend(self.start);
1464            let end: i128 = size.sign_extend(self.end);
1465            Ok(start <= end)
1466        }
1467    }
1468}
1469
1470impl fmt::Debug for WrappingRange {
1471    fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
1472        if self.start > self.end {
1473            write!(fmt, "(..={}) | ({}..)", self.end, self.start)?;
1474        } else {
1475            write!(fmt, "{}..={}", self.start, self.end)?;
1476        }
1477        Ok(())
1478    }
1479}
1480
1481/// Information about one scalar component of a Rust type.
1482#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)]
1483#[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
1484pub enum Scalar {
1485    Initialized {
1486        value: Primitive,
1487
1488        // FIXME(eddyb) always use the shortest range, e.g., by finding
1489        // the largest space between two consecutive valid values and
1490        // taking everything else as the (shortest) valid range.
1491        valid_range: WrappingRange,
1492    },
1493    Union {
1494        /// Even for unions, we need to use the correct registers for the kind of
1495        /// values inside the union, so we keep the `Primitive` type around. We
1496        /// also use it to compute the size of the scalar.
1497        /// However, unions never have niches and even allow undef,
1498        /// so there is no `valid_range`.
1499        value: Primitive,
1500    },
1501}
1502
1503impl Scalar {
1504    #[inline]
1505    pub fn is_bool(&self) -> bool {
1506        use Integer::*;
1507        matches!(
1508            self,
1509            Scalar::Initialized {
1510                value: Primitive::Int(I8, false),
1511                valid_range: WrappingRange { start: 0, end: 1 }
1512            }
1513        )
1514    }
1515
1516    /// Get the primitive representation of this type, ignoring the valid range and whether the
1517    /// value is allowed to be undefined (due to being a union).
1518    pub fn primitive(&self) -> Primitive {
1519        match *self {
1520            Scalar::Initialized { value, .. } | Scalar::Union { value } => value,
1521        }
1522    }
1523
1524    pub fn align(self, cx: &impl HasDataLayout) -> AbiAlign {
1525        self.primitive().align(cx)
1526    }
1527
1528    pub fn size(self, cx: &impl HasDataLayout) -> Size {
1529        self.primitive().size(cx)
1530    }
1531
1532    #[inline]
1533    pub fn to_union(&self) -> Self {
1534        Self::Union { value: self.primitive() }
1535    }
1536
1537    #[inline]
1538    pub fn valid_range(&self, cx: &impl HasDataLayout) -> WrappingRange {
1539        match *self {
1540            Scalar::Initialized { valid_range, .. } => valid_range,
1541            Scalar::Union { value } => WrappingRange::full(value.size(cx)),
1542        }
1543    }
1544
1545    #[inline]
1546    /// Allows the caller to mutate the valid range. This operation will panic if attempted on a
1547    /// union.
1548    pub fn valid_range_mut(&mut self) -> &mut WrappingRange {
1549        match self {
1550            Scalar::Initialized { valid_range, .. } => valid_range,
1551            Scalar::Union { .. } => panic!("cannot change the valid range of a union"),
1552        }
1553    }
1554
1555    /// Returns `true` if all possible numbers are valid, i.e `valid_range` covers the whole
1556    /// layout.
1557    #[inline]
1558    pub fn is_always_valid<C: HasDataLayout>(&self, cx: &C) -> bool {
1559        match *self {
1560            Scalar::Initialized { valid_range, .. } => valid_range.is_full_for(self.size(cx)),
1561            Scalar::Union { .. } => true,
1562        }
1563    }
1564
1565    /// Returns `true` if this type can be left uninit.
1566    #[inline]
1567    pub fn is_uninit_valid(&self) -> bool {
1568        match *self {
1569            Scalar::Initialized { .. } => false,
1570            Scalar::Union { .. } => true,
1571        }
1572    }
1573
1574    /// Returns `true` if this is a signed integer scalar
1575    #[inline]
1576    pub fn is_signed(&self) -> bool {
1577        match self.primitive() {
1578            Primitive::Int(_, signed) => signed,
1579            _ => false,
1580        }
1581    }
1582}
1583
1584// NOTE: This struct is generic over the FieldIdx for rust-analyzer usage.
1585/// Describes how the fields of a type are located in memory.
1586#[derive(PartialEq, Eq, Hash, Clone, Debug)]
1587#[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
1588pub enum FieldsShape<FieldIdx: Idx> {
1589    /// Scalar primitives and `!`, which never have fields.
1590    Primitive,
1591
1592    /// All fields start at no offset. The `usize` is the field count.
1593    Union(NonZeroUsize),
1594
1595    /// Array/vector-like placement, with all fields of identical types.
1596    Array { stride: Size, count: u64 },
1597
1598    /// Struct-like placement, with precomputed offsets.
1599    ///
1600    /// Fields are guaranteed to not overlap, but note that gaps
1601    /// before, between and after all the fields are NOT always
1602    /// padding, and as such their contents may not be discarded.
1603    /// For example, enum variants leave a gap at the start,
1604    /// where the discriminant field in the enum layout goes.
1605    Arbitrary {
1606        /// Offsets for the first byte of each field,
1607        /// ordered to match the source definition order.
1608        /// This vector does not go in increasing order.
1609        // FIXME(eddyb) use small vector optimization for the common case.
1610        offsets: IndexVec<FieldIdx, Size>,
1611
1612        /// Maps source order field indices to memory order indices,
1613        /// depending on how the fields were reordered (if at all).
1614        /// This is a permutation, with both the source order and the
1615        /// memory order using the same (0..n) index ranges.
1616        ///
1617        /// Note that during computation of `memory_index`, sometimes
1618        /// it is easier to operate on the inverse mapping (that is,
1619        /// from memory order to source order), and that is usually
1620        /// named `inverse_memory_index`.
1621        ///
1622        // FIXME(eddyb) build a better abstraction for permutations, if possible.
1623        // FIXME(camlorn) also consider small vector optimization here.
1624        memory_index: IndexVec<FieldIdx, u32>,
1625    },
1626}
1627
1628impl<FieldIdx: Idx> FieldsShape<FieldIdx> {
1629    #[inline]
1630    pub fn count(&self) -> usize {
1631        match *self {
1632            FieldsShape::Primitive => 0,
1633            FieldsShape::Union(count) => count.get(),
1634            FieldsShape::Array { count, .. } => count.try_into().unwrap(),
1635            FieldsShape::Arbitrary { ref offsets, .. } => offsets.len(),
1636        }
1637    }
1638
1639    #[inline]
1640    pub fn offset(&self, i: usize) -> Size {
1641        match *self {
1642            FieldsShape::Primitive => {
1643                unreachable!("FieldsShape::offset: `Primitive`s have no fields")
1644            }
1645            FieldsShape::Union(count) => {
1646                assert!(i < count.get(), "tried to access field {i} of union with {count} fields");
1647                Size::ZERO
1648            }
1649            FieldsShape::Array { stride, count } => {
1650                let i = u64::try_from(i).unwrap();
1651                assert!(i < count, "tried to access field {i} of array with {count} fields");
1652                stride * i
1653            }
1654            FieldsShape::Arbitrary { ref offsets, .. } => offsets[FieldIdx::new(i)],
1655        }
1656    }
1657
1658    #[inline]
1659    pub fn memory_index(&self, i: usize) -> usize {
1660        match *self {
1661            FieldsShape::Primitive => {
1662                unreachable!("FieldsShape::memory_index: `Primitive`s have no fields")
1663            }
1664            FieldsShape::Union(_) | FieldsShape::Array { .. } => i,
1665            FieldsShape::Arbitrary { ref memory_index, .. } => {
1666                memory_index[FieldIdx::new(i)].try_into().unwrap()
1667            }
1668        }
1669    }
1670
1671    /// Gets source indices of the fields by increasing offsets.
1672    #[inline]
1673    pub fn index_by_increasing_offset(&self) -> impl ExactSizeIterator<Item = usize> {
1674        let mut inverse_small = [0u8; 64];
1675        let mut inverse_big = IndexVec::new();
1676        let use_small = self.count() <= inverse_small.len();
1677
1678        // We have to write this logic twice in order to keep the array small.
1679        if let FieldsShape::Arbitrary { ref memory_index, .. } = *self {
1680            if use_small {
1681                for (field_idx, &mem_idx) in memory_index.iter_enumerated() {
1682                    inverse_small[mem_idx as usize] = field_idx.index() as u8;
1683                }
1684            } else {
1685                inverse_big = memory_index.invert_bijective_mapping();
1686            }
1687        }
1688
1689        // Primitives don't really have fields in the way that structs do,
1690        // but having this return an empty iterator for them is unhelpful
1691        // since that makes them look kinda like ZSTs, which they're not.
1692        let pseudofield_count = if let FieldsShape::Primitive = self { 1 } else { self.count() };
1693
1694        (0..pseudofield_count).map(move |i| match *self {
1695            FieldsShape::Primitive | FieldsShape::Union(_) | FieldsShape::Array { .. } => i,
1696            FieldsShape::Arbitrary { .. } => {
1697                if use_small {
1698                    inverse_small[i] as usize
1699                } else {
1700                    inverse_big[i as u32].index()
1701                }
1702            }
1703        })
1704    }
1705}
1706
1707/// An identifier that specifies the address space that some operation
1708/// should operate on. Special address spaces have an effect on code generation,
1709/// depending on the target and the address spaces it implements.
1710#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
1711#[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
1712pub struct AddressSpace(pub u32);
1713
1714impl AddressSpace {
1715    /// LLVM's `0` address space.
1716    pub const ZERO: Self = AddressSpace(0);
1717}
1718
1719/// The way we represent values to the backend
1720///
1721/// Previously this was conflated with the "ABI" a type is given, as in the platform-specific ABI.
1722/// In reality, this implies little about that, but is mostly used to describe the syntactic form
1723/// emitted for the backend, as most backends handle SSA values and blobs of memory differently.
1724/// The psABI may need consideration in doing so, but this enum does not constitute a promise for
1725/// how the value will be lowered to the calling convention, in itself.
1726///
1727/// Generally, a codegen backend will prefer to handle smaller values as a scalar or short vector,
1728/// and larger values will usually prefer to be represented as memory.
1729#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)]
1730#[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
1731pub enum BackendRepr {
1732    Scalar(Scalar),
1733    ScalarPair(Scalar, Scalar),
1734    SimdVector {
1735        element: Scalar,
1736        count: u64,
1737    },
1738    // FIXME: I sometimes use memory, sometimes use an IR aggregate!
1739    Memory {
1740        /// If true, the size is exact, otherwise it's only a lower bound.
1741        sized: bool,
1742    },
1743}
1744
1745impl BackendRepr {
1746    /// Returns `true` if the layout corresponds to an unsized type.
1747    #[inline]
1748    pub fn is_unsized(&self) -> bool {
1749        match *self {
1750            BackendRepr::Scalar(_)
1751            | BackendRepr::ScalarPair(..)
1752            | BackendRepr::SimdVector { .. } => false,
1753            BackendRepr::Memory { sized } => !sized,
1754        }
1755    }
1756
1757    #[inline]
1758    pub fn is_sized(&self) -> bool {
1759        !self.is_unsized()
1760    }
1761
1762    /// Returns `true` if this is a single signed integer scalar.
1763    /// Sanity check: panics if this is not a scalar type (see PR #70189).
1764    #[inline]
1765    pub fn is_signed(&self) -> bool {
1766        match self {
1767            BackendRepr::Scalar(scal) => scal.is_signed(),
1768            _ => panic!("`is_signed` on non-scalar ABI {self:?}"),
1769        }
1770    }
1771
1772    /// Returns `true` if this is a scalar type
1773    #[inline]
1774    pub fn is_scalar(&self) -> bool {
1775        matches!(*self, BackendRepr::Scalar(_))
1776    }
1777
1778    /// Returns `true` if this is a bool
1779    #[inline]
1780    pub fn is_bool(&self) -> bool {
1781        matches!(*self, BackendRepr::Scalar(s) if s.is_bool())
1782    }
1783
1784    /// The psABI alignment for a `Scalar` or `ScalarPair`
1785    ///
1786    /// `None` for other variants.
1787    pub fn scalar_align<C: HasDataLayout>(&self, cx: &C) -> Option<Align> {
1788        match *self {
1789            BackendRepr::Scalar(s) => Some(s.align(cx).abi),
1790            BackendRepr::ScalarPair(s1, s2) => Some(s1.align(cx).max(s2.align(cx)).abi),
1791            // The align of a Vector can vary in surprising ways
1792            BackendRepr::SimdVector { .. } | BackendRepr::Memory { .. } => None,
1793        }
1794    }
1795
1796    /// The psABI size for a `Scalar` or `ScalarPair`
1797    ///
1798    /// `None` for other variants
1799    pub fn scalar_size<C: HasDataLayout>(&self, cx: &C) -> Option<Size> {
1800        match *self {
1801            // No padding in scalars.
1802            BackendRepr::Scalar(s) => Some(s.size(cx)),
1803            // May have some padding between the pair.
1804            BackendRepr::ScalarPair(s1, s2) => {
1805                let field2_offset = s1.size(cx).align_to(s2.align(cx).abi);
1806                let size = (field2_offset + s2.size(cx)).align_to(
1807                    self.scalar_align(cx)
1808                        // We absolutely must have an answer here or everything is FUBAR.
1809                        .unwrap(),
1810                );
1811                Some(size)
1812            }
1813            // The size of a Vector can vary in surprising ways
1814            BackendRepr::SimdVector { .. } | BackendRepr::Memory { .. } => None,
1815        }
1816    }
1817
1818    /// Discard validity range information and allow undef.
1819    pub fn to_union(&self) -> Self {
1820        match *self {
1821            BackendRepr::Scalar(s) => BackendRepr::Scalar(s.to_union()),
1822            BackendRepr::ScalarPair(s1, s2) => {
1823                BackendRepr::ScalarPair(s1.to_union(), s2.to_union())
1824            }
1825            BackendRepr::SimdVector { element, count } => {
1826                BackendRepr::SimdVector { element: element.to_union(), count }
1827            }
1828            BackendRepr::Memory { .. } => BackendRepr::Memory { sized: true },
1829        }
1830    }
1831
1832    pub fn eq_up_to_validity(&self, other: &Self) -> bool {
1833        match (self, other) {
1834            // Scalar, Vector, ScalarPair have `Scalar` in them where we ignore validity ranges.
1835            // We do *not* ignore the sign since it matters for some ABIs (e.g. s390x).
1836            (BackendRepr::Scalar(l), BackendRepr::Scalar(r)) => l.primitive() == r.primitive(),
1837            (
1838                BackendRepr::SimdVector { element: element_l, count: count_l },
1839                BackendRepr::SimdVector { element: element_r, count: count_r },
1840            ) => element_l.primitive() == element_r.primitive() && count_l == count_r,
1841            (BackendRepr::ScalarPair(l1, l2), BackendRepr::ScalarPair(r1, r2)) => {
1842                l1.primitive() == r1.primitive() && l2.primitive() == r2.primitive()
1843            }
1844            // Everything else must be strictly identical.
1845            _ => self == other,
1846        }
1847    }
1848}
1849
1850// NOTE: This struct is generic over the FieldIdx and VariantIdx for rust-analyzer usage.
1851#[derive(PartialEq, Eq, Hash, Clone, Debug)]
1852#[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
1853pub enum Variants<FieldIdx: Idx, VariantIdx: Idx> {
1854    /// A type with no valid variants. Must be uninhabited.
1855    Empty,
1856
1857    /// Single enum variants, structs/tuples, unions, and all non-ADTs.
1858    Single {
1859        /// Always `0` for types that cannot have multiple variants.
1860        index: VariantIdx,
1861    },
1862
1863    /// Enum-likes with more than one variant: each variant comes with
1864    /// a *discriminant* (usually the same as the variant index but the user can
1865    /// assign explicit discriminant values). That discriminant is encoded
1866    /// as a *tag* on the machine. The layout of each variant is
1867    /// a struct, and they all have space reserved for the tag.
1868    /// For enums, the tag is the sole field of the layout.
1869    Multiple {
1870        tag: Scalar,
1871        tag_encoding: TagEncoding<VariantIdx>,
1872        tag_field: FieldIdx,
1873        variants: IndexVec<VariantIdx, LayoutData<FieldIdx, VariantIdx>>,
1874    },
1875}
1876
1877// NOTE: This struct is generic over the VariantIdx for rust-analyzer usage.
1878#[derive(PartialEq, Eq, Hash, Clone, Debug)]
1879#[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
1880pub enum TagEncoding<VariantIdx: Idx> {
1881    /// The tag directly stores the discriminant, but possibly with a smaller layout
1882    /// (so converting the tag to the discriminant can require sign extension).
1883    Direct,
1884
1885    /// Niche (values invalid for a type) encoding the discriminant.
1886    /// Note that for this encoding, the discriminant and variant index of each variant coincide!
1887    /// This invariant is codified as part of [`layout_sanity_check`](../rustc_ty_utils/layout/invariant/fn.layout_sanity_check.html).
1888    ///
1889    /// The variant `untagged_variant` contains a niche at an arbitrary
1890    /// offset (field [`Variants::Multiple::tag_field`] of the enum).
1891    /// For a variant with variant index `i`, such that `i != untagged_variant`,
1892    /// the tag is set to `(i - niche_variants.start).wrapping_add(niche_start)`
1893    /// (this is wrapping arithmetic using the type of the niche field, cf. the
1894    /// [`tag_for_variant`](../rustc_const_eval/interpret/struct.InterpCx.html#method.tag_for_variant)
1895    /// query implementation).
1896    /// To recover the variant index `i` from a `tag`, the above formula has to be reversed,
1897    /// i.e. `i = tag.wrapping_sub(niche_start) + niche_variants.start`. If `i` ends up outside
1898    /// `niche_variants`, the tag must have encoded the `untagged_variant`.
1899    ///
1900    /// For example, `Option<(usize, &T)>`  is represented such that the tag for
1901    /// `None` is the null pointer in the second tuple field, and
1902    /// `Some` is the identity function (with a non-null reference)
1903    /// and has no additional tag, i.e. the reference being non-null uniquely identifies this variant.
1904    ///
1905    /// Other variants that are not `untagged_variant` and that are outside the `niche_variants`
1906    /// range cannot be represented; they must be uninhabited.
1907    /// Nonetheless, uninhabited variants can also fall into the range of `niche_variants`.
1908    Niche {
1909        untagged_variant: VariantIdx,
1910        /// This range *may* contain `untagged_variant` or uninhabited variants;
1911        /// these are then just "dead values" and not used to encode anything.
1912        niche_variants: RangeInclusive<VariantIdx>,
1913        /// This is inbounds of the type of the niche field
1914        /// (not sign-extended, i.e., all bits beyond the niche field size are 0).
1915        niche_start: u128,
1916    },
1917}
1918
1919#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)]
1920#[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
1921pub struct Niche {
1922    pub offset: Size,
1923    pub value: Primitive,
1924    pub valid_range: WrappingRange,
1925}
1926
1927impl Niche {
1928    pub fn from_scalar<C: HasDataLayout>(cx: &C, offset: Size, scalar: Scalar) -> Option<Self> {
1929        let Scalar::Initialized { value, valid_range } = scalar else { return None };
1930        let niche = Niche { offset, value, valid_range };
1931        if niche.available(cx) > 0 { Some(niche) } else { None }
1932    }
1933
1934    pub fn available<C: HasDataLayout>(&self, cx: &C) -> u128 {
1935        let Self { value, valid_range: v, .. } = *self;
1936        let size = value.size(cx);
1937        assert!(size.bits() <= 128);
1938        let max_value = size.unsigned_int_max();
1939
1940        // Find out how many values are outside the valid range.
1941        let niche = v.end.wrapping_add(1)..v.start;
1942        niche.end.wrapping_sub(niche.start) & max_value
1943    }
1944
1945    pub fn reserve<C: HasDataLayout>(&self, cx: &C, count: u128) -> Option<(u128, Scalar)> {
1946        assert!(count > 0);
1947
1948        let Self { value, valid_range: v, .. } = *self;
1949        let size = value.size(cx);
1950        assert!(size.bits() <= 128);
1951        let max_value = size.unsigned_int_max();
1952
1953        let niche = v.end.wrapping_add(1)..v.start;
1954        let available = niche.end.wrapping_sub(niche.start) & max_value;
1955        if count > available {
1956            return None;
1957        }
1958
1959        // Extend the range of valid values being reserved by moving either `v.start` or `v.end`
1960        // bound. Given an eventual `Option<T>`, we try to maximize the chance for `None` to occupy
1961        // the niche of zero. This is accomplished by preferring enums with 2 variants(`count==1`)
1962        // and always taking the shortest path to niche zero. Having `None` in niche zero can
1963        // enable some special optimizations.
1964        //
1965        // Bound selection criteria:
1966        // 1. Select closest to zero given wrapping semantics.
1967        // 2. Avoid moving past zero if possible.
1968        //
1969        // In practice this means that enums with `count > 1` are unlikely to claim niche zero,
1970        // since they have to fit perfectly. If niche zero is already reserved, the selection of
1971        // bounds are of little interest.
1972        let move_start = |v: WrappingRange| {
1973            let start = v.start.wrapping_sub(count) & max_value;
1974            Some((start, Scalar::Initialized { value, valid_range: v.with_start(start) }))
1975        };
1976        let move_end = |v: WrappingRange| {
1977            let start = v.end.wrapping_add(1) & max_value;
1978            let end = v.end.wrapping_add(count) & max_value;
1979            Some((start, Scalar::Initialized { value, valid_range: v.with_end(end) }))
1980        };
1981        let distance_end_zero = max_value - v.end;
1982        if v.start > v.end {
1983            // zero is unavailable because wrapping occurs
1984            move_end(v)
1985        } else if v.start <= distance_end_zero {
1986            if count <= v.start {
1987                move_start(v)
1988            } else {
1989                // moved past zero, use other bound
1990                move_end(v)
1991            }
1992        } else {
1993            let end = v.end.wrapping_add(count) & max_value;
1994            let overshot_zero = (1..=v.end).contains(&end);
1995            if overshot_zero {
1996                // moved past zero, use other bound
1997                move_start(v)
1998            } else {
1999                move_end(v)
2000            }
2001        }
2002    }
2003}
2004
2005// NOTE: This struct is generic over the FieldIdx and VariantIdx for rust-analyzer usage.
2006#[derive(PartialEq, Eq, Hash, Clone)]
2007#[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
2008pub struct LayoutData<FieldIdx: Idx, VariantIdx: Idx> {
2009    /// Says where the fields are located within the layout.
2010    pub fields: FieldsShape<FieldIdx>,
2011
2012    /// Encodes information about multi-variant layouts.
2013    /// Even with `Multiple` variants, a layout still has its own fields! Those are then
2014    /// shared between all variants. One of them will be the discriminant,
2015    /// but e.g. coroutines can have more.
2016    ///
2017    /// To access all fields of this layout, both `fields` and the fields of the active variant
2018    /// must be taken into account.
2019    pub variants: Variants<FieldIdx, VariantIdx>,
2020
2021    /// The `backend_repr` defines how this data will be represented to the codegen backend,
2022    /// and encodes value restrictions via `valid_range`.
2023    ///
2024    /// Note that this is entirely orthogonal to the recursive structure defined by
2025    /// `variants` and `fields`; for example, `ManuallyDrop<Result<isize, isize>>` has
2026    /// `IrForm::ScalarPair`! So, even with non-`Memory` `backend_repr`, `fields` and `variants`
2027    /// have to be taken into account to find all fields of this layout.
2028    pub backend_repr: BackendRepr,
2029
2030    /// The leaf scalar with the largest number of invalid values
2031    /// (i.e. outside of its `valid_range`), if it exists.
2032    pub largest_niche: Option<Niche>,
2033    /// Is this type known to be uninhabted?
2034    ///
2035    /// This is separate from BackendRepr because uninhabited return types can affect ABI,
2036    /// especially in the case of by-pointer struct returns, which allocate stack even when unused.
2037    pub uninhabited: bool,
2038
2039    pub align: AbiAlign,
2040    pub size: Size,
2041
2042    /// The largest alignment explicitly requested with `repr(align)` on this type or any field.
2043    /// Only used on i686-windows, where the argument passing ABI is different when alignment is
2044    /// requested, even if the requested alignment is equal to the natural alignment.
2045    pub max_repr_align: Option<Align>,
2046
2047    /// The alignment the type would have, ignoring any `repr(align)` but including `repr(packed)`.
2048    /// Only used on aarch64-linux, where the argument passing ABI ignores the requested alignment
2049    /// in some cases.
2050    pub unadjusted_abi_align: Align,
2051
2052    /// The randomization seed based on this type's own repr and its fields.
2053    ///
2054    /// Since randomization is toggled on a per-crate basis even crates that do not have randomization
2055    /// enabled should still calculate a seed so that downstream uses can use it to distinguish different
2056    /// types.
2057    ///
2058    /// For every T and U for which we do not guarantee that a repr(Rust) `Foo<T>` can be coerced or
2059    /// transmuted to `Foo<U>` we aim to create probalistically distinct seeds so that Foo can choose
2060    /// to reorder its fields based on that information. The current implementation is a conservative
2061    /// approximation of this goal.
2062    pub randomization_seed: Hash64,
2063}
2064
2065impl<FieldIdx: Idx, VariantIdx: Idx> LayoutData<FieldIdx, VariantIdx> {
2066    /// Returns `true` if this is an aggregate type (including a ScalarPair!)
2067    pub fn is_aggregate(&self) -> bool {
2068        match self.backend_repr {
2069            BackendRepr::Scalar(_) | BackendRepr::SimdVector { .. } => false,
2070            BackendRepr::ScalarPair(..) | BackendRepr::Memory { .. } => true,
2071        }
2072    }
2073
2074    /// Returns `true` if this is an uninhabited type
2075    pub fn is_uninhabited(&self) -> bool {
2076        self.uninhabited
2077    }
2078}
2079
2080impl<FieldIdx: Idx, VariantIdx: Idx> fmt::Debug for LayoutData<FieldIdx, VariantIdx>
2081where
2082    FieldsShape<FieldIdx>: fmt::Debug,
2083    Variants<FieldIdx, VariantIdx>: fmt::Debug,
2084{
2085    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2086        // This is how `Layout` used to print before it become
2087        // `Interned<LayoutData>`. We print it like this to avoid having to update
2088        // expected output in a lot of tests.
2089        let LayoutData {
2090            size,
2091            align,
2092            backend_repr,
2093            fields,
2094            largest_niche,
2095            uninhabited,
2096            variants,
2097            max_repr_align,
2098            unadjusted_abi_align,
2099            randomization_seed,
2100        } = self;
2101        f.debug_struct("Layout")
2102            .field("size", size)
2103            .field("align", align)
2104            .field("backend_repr", backend_repr)
2105            .field("fields", fields)
2106            .field("largest_niche", largest_niche)
2107            .field("uninhabited", uninhabited)
2108            .field("variants", variants)
2109            .field("max_repr_align", max_repr_align)
2110            .field("unadjusted_abi_align", unadjusted_abi_align)
2111            .field("randomization_seed", randomization_seed)
2112            .finish()
2113    }
2114}
2115
2116#[derive(Copy, Clone, PartialEq, Eq, Debug)]
2117pub enum PointerKind {
2118    /// Shared reference. `frozen` indicates the absence of any `UnsafeCell`.
2119    SharedRef { frozen: bool },
2120    /// Mutable reference. `unpin` indicates the absence of any pinned data.
2121    MutableRef { unpin: bool },
2122    /// Box. `unpin` indicates the absence of any pinned data. `global` indicates whether this box
2123    /// uses the global allocator or a custom one.
2124    Box { unpin: bool, global: bool },
2125}
2126
2127/// Encodes extra information we have about a pointer.
2128/// Note that this information is advisory only, and backends are free to ignore it:
2129/// if the information is wrong, that can cause UB, but if the information is absent,
2130/// that must always be okay.
2131#[derive(Copy, Clone, Debug)]
2132pub struct PointeeInfo {
2133    /// If this is `None`, then this is a raw pointer, so size and alignment are not guaranteed to
2134    /// be reliable.
2135    pub safe: Option<PointerKind>,
2136    /// If `safe` is `Some`, then the pointer is either null or dereferenceable for this many bytes.
2137    /// On a function argument, "dereferenceable" here means "dereferenceable for the entire duration
2138    /// of this function call", i.e. it is UB for the memory that this pointer points to be freed
2139    /// while this function is still running.
2140    /// The size can be zero if the pointer is not dereferenceable.
2141    pub size: Size,
2142    /// If `safe` is `Some`, then the pointer is aligned as indicated.
2143    pub align: Align,
2144}
2145
2146impl<FieldIdx: Idx, VariantIdx: Idx> LayoutData<FieldIdx, VariantIdx> {
2147    /// Returns `true` if the layout corresponds to an unsized type.
2148    #[inline]
2149    pub fn is_unsized(&self) -> bool {
2150        self.backend_repr.is_unsized()
2151    }
2152
2153    #[inline]
2154    pub fn is_sized(&self) -> bool {
2155        self.backend_repr.is_sized()
2156    }
2157
2158    /// Returns `true` if the type is sized and a 1-ZST (meaning it has size 0 and alignment 1).
2159    pub fn is_1zst(&self) -> bool {
2160        self.is_sized() && self.size.bytes() == 0 && self.align.abi.bytes() == 1
2161    }
2162
2163    /// Returns `true` if the type is a ZST and not unsized.
2164    ///
2165    /// Note that this does *not* imply that the type is irrelevant for layout! It can still have
2166    /// non-trivial alignment constraints. You probably want to use `is_1zst` instead.
2167    pub fn is_zst(&self) -> bool {
2168        match self.backend_repr {
2169            BackendRepr::Scalar(_)
2170            | BackendRepr::ScalarPair(..)
2171            | BackendRepr::SimdVector { .. } => false,
2172            BackendRepr::Memory { sized } => sized && self.size.bytes() == 0,
2173        }
2174    }
2175
2176    /// Checks if these two `Layout` are equal enough to be considered "the same for all function
2177    /// call ABIs". Note however that real ABIs depend on more details that are not reflected in the
2178    /// `Layout`; the `PassMode` need to be compared as well. Also note that we assume
2179    /// aggregates are passed via `PassMode::Indirect` or `PassMode::Cast`; more strict
2180    /// checks would otherwise be required.
2181    pub fn eq_abi(&self, other: &Self) -> bool {
2182        // The one thing that we are not capturing here is that for unsized types, the metadata must
2183        // also have the same ABI, and moreover that the same metadata leads to the same size. The
2184        // 2nd point is quite hard to check though.
2185        self.size == other.size
2186            && self.is_sized() == other.is_sized()
2187            && self.backend_repr.eq_up_to_validity(&other.backend_repr)
2188            && self.backend_repr.is_bool() == other.backend_repr.is_bool()
2189            && self.align.abi == other.align.abi
2190            && self.max_repr_align == other.max_repr_align
2191            && self.unadjusted_abi_align == other.unadjusted_abi_align
2192    }
2193}
2194
2195#[derive(Copy, Clone, Debug)]
2196pub enum StructKind {
2197    /// A tuple, closure, or univariant which cannot be coerced to unsized.
2198    AlwaysSized,
2199    /// A univariant, the last field of which may be coerced to unsized.
2200    MaybeUnsized,
2201    /// A univariant, but with a prefix of an arbitrary size & alignment (e.g., enum tag).
2202    Prefixed(Size, Align),
2203}
2204
2205#[derive(Clone, Debug)]
2206pub enum AbiFromStrErr {
2207    /// not a known ABI
2208    Unknown,
2209    /// no "-unwind" variant can be used here
2210    NoExplicitUnwind,
2211}