core/intrinsics/
fallback.rs

1#![unstable(
2    feature = "core_intrinsics_fallbacks",
3    reason = "The fallbacks will never be stable, as they exist only to be called \
4              by the fallback MIR, but they're exported so they can be tested on \
5              platforms where the fallback MIR isn't actually used",
6    issue = "none"
7)]
8#![allow(missing_docs)]
9
10#[rustc_const_unstable(feature = "core_intrinsics_fallbacks", issue = "none")]
11#[cfg(not(feature = "ferrocene_certified"))]
12pub const trait CarryingMulAdd: Copy + 'static {
13    type Unsigned: Copy + 'static;
14    fn carrying_mul_add(
15        self,
16        multiplicand: Self,
17        addend: Self,
18        carry: Self,
19    ) -> (Self::Unsigned, Self);
20}
21
22#[cfg(not(feature = "ferrocene_certified"))]
23macro_rules! impl_carrying_mul_add_by_widening {
24    ($($t:ident $u:ident $w:ident,)+) => {$(
25        #[rustc_const_unstable(feature = "core_intrinsics_fallbacks", issue = "none")]
26        impl const CarryingMulAdd for $t {
27            type Unsigned = $u;
28            #[inline]
29            fn carrying_mul_add(self, a: Self, b: Self, c: Self) -> ($u, $t) {
30                let wide = (self as $w) * (a as $w) + (b as $w) + (c as $w);
31                (wide as _, (wide >> Self::BITS) as _)
32            }
33        }
34    )+};
35}
36#[cfg(not(feature = "ferrocene_certified"))]
37impl_carrying_mul_add_by_widening! {
38    u8 u8 u16,
39    u16 u16 u32,
40    u32 u32 u64,
41    u64 u64 u128,
42    usize usize UDoubleSize,
43    i8 u8 i16,
44    i16 u16 i32,
45    i32 u32 i64,
46    i64 u64 i128,
47    isize usize UDoubleSize,
48}
49
50#[cfg(not(feature = "ferrocene_certified"))]
51#[cfg(target_pointer_width = "16")]
52type UDoubleSize = u32;
53#[cfg(not(feature = "ferrocene_certified"))]
54#[cfg(target_pointer_width = "32")]
55type UDoubleSize = u64;
56#[cfg(not(feature = "ferrocene_certified"))]
57#[cfg(target_pointer_width = "64")]
58type UDoubleSize = u128;
59
60#[cfg(not(feature = "ferrocene_certified"))]
61#[inline]
62const fn wide_mul_u128(a: u128, b: u128) -> (u128, u128) {
63    #[inline]
64    const fn to_low_high(x: u128) -> [u128; 2] {
65        const MASK: u128 = u64::MAX as _;
66        [x & MASK, x >> 64]
67    }
68    #[inline]
69    const fn from_low_high(x: [u128; 2]) -> u128 {
70        x[0] | (x[1] << 64)
71    }
72    #[inline]
73    const fn scalar_mul(low_high: [u128; 2], k: u128) -> [u128; 3] {
74        let [x, c] = to_low_high(k * low_high[0]);
75        let [y, z] = to_low_high(k * low_high[1] + c);
76        [x, y, z]
77    }
78    let a = to_low_high(a);
79    let b = to_low_high(b);
80    let low = scalar_mul(a, b[0]);
81    let high = scalar_mul(a, b[1]);
82    let r0 = low[0];
83    let [r1, c] = to_low_high(low[1] + high[0]);
84    let [r2, c] = to_low_high(low[2] + high[1] + c);
85    let r3 = high[2] + c;
86    (from_low_high([r0, r1]), from_low_high([r2, r3]))
87}
88
89#[cfg(not(feature = "ferrocene_certified"))]
90#[rustc_const_unstable(feature = "core_intrinsics_fallbacks", issue = "none")]
91impl const CarryingMulAdd for u128 {
92    type Unsigned = u128;
93    #[inline]
94    fn carrying_mul_add(self, b: u128, c: u128, d: u128) -> (u128, u128) {
95        let (low, mut high) = wide_mul_u128(self, b);
96        let (low, carry) = u128::overflowing_add(low, c);
97        high += carry as u128;
98        let (low, carry) = u128::overflowing_add(low, d);
99        high += carry as u128;
100        (low, high)
101    }
102}
103
104#[cfg(not(feature = "ferrocene_certified"))]
105#[rustc_const_unstable(feature = "core_intrinsics_fallbacks", issue = "none")]
106impl const CarryingMulAdd for i128 {
107    type Unsigned = u128;
108    #[inline]
109    fn carrying_mul_add(self, b: i128, c: i128, d: i128) -> (u128, i128) {
110        let (low, high) = wide_mul_u128(self as u128, b as u128);
111        let mut high = high as i128;
112        high = high.wrapping_add(i128::wrapping_mul(self >> 127, b));
113        high = high.wrapping_add(i128::wrapping_mul(self, b >> 127));
114        let (low, carry) = u128::overflowing_add(low, c as u128);
115        high = high.wrapping_add((carry as i128) + (c >> 127));
116        let (low, carry) = u128::overflowing_add(low, d as u128);
117        high = high.wrapping_add((carry as i128) + (d >> 127));
118        (low, high)
119    }
120}
121
122#[rustc_const_unstable(feature = "core_intrinsics_fallbacks", issue = "none")]
123pub const trait DisjointBitOr: Copy + 'static {
124    /// See [`super::disjoint_bitor`]; we just need the trait indirection to handle
125    /// different types since calling intrinsics with generics doesn't work.
126    unsafe fn disjoint_bitor(self, other: Self) -> Self;
127}
128macro_rules! zero {
129    (bool) => {
130        false
131    };
132    ($t:ident) => {
133        0
134    };
135}
136macro_rules! impl_disjoint_bitor {
137    ($($t:ident,)+) => {$(
138        #[rustc_const_unstable(feature = "core_intrinsics_fallbacks", issue = "none")]
139        impl const DisjointBitOr for $t {
140            #[cfg_attr(miri, track_caller)]
141            #[inline]
142            unsafe fn disjoint_bitor(self, other: Self) -> Self {
143                // Note that the assume here is required for UB detection in Miri!
144
145                // SAFETY: our precondition is that there are no bits in common,
146                // so this is just telling that to the backend.
147                unsafe { super::assume((self & other) == zero!($t)) };
148                self | other
149            }
150        }
151    )+};
152}
153impl_disjoint_bitor! {
154    bool,
155    u8, u16, u32, u64, u128, usize,
156    i8, i16, i32, i64, i128, isize,
157}
158
159#[rustc_const_unstable(feature = "core_intrinsics_fallbacks", issue = "none")]
160pub const trait FunnelShift: Copy + 'static {
161    /// See [`super::unchecked_funnel_shl`]; we just need the trait indirection to handle
162    /// different types since calling intrinsics with generics doesn't work.
163    unsafe fn unchecked_funnel_shl(self, rhs: Self, shift: u32) -> Self;
164
165    /// See [`super::unchecked_funnel_shr`]; we just need the trait indirection to handle
166    /// different types since calling intrinsics with generics doesn't work.
167    unsafe fn unchecked_funnel_shr(self, rhs: Self, shift: u32) -> Self;
168}
169
170macro_rules! impl_funnel_shifts {
171    ($($type:ident),*) => {$(
172        #[rustc_const_unstable(feature = "core_intrinsics_fallbacks", issue = "none")]
173        impl const FunnelShift for $type {
174            #[cfg_attr(miri, track_caller)]
175            #[inline]
176            unsafe fn unchecked_funnel_shl(self, rhs: Self, shift: u32) -> Self {
177                // This implementation is also used by Miri so we have to check the precondition.
178                // SAFETY: this is guaranteed by the caller
179                unsafe { super::assume(shift < $type::BITS) };
180                if shift == 0 {
181                    self
182                } else {
183                    // SAFETY:
184                    //  - `shift < T::BITS`, which satisfies `unchecked_shl`
185                    //  - this also ensures that `T::BITS - shift < T::BITS` (shift = 0 is checked
186                    //    above), which satisfies `unchecked_shr`
187                    //  - because the types are unsigned, the combination are disjoint bits (this is
188                    //    not true if they're signed, since SHR will fill in the empty space with a
189                    //    sign bit, not zero)
190                    unsafe {
191                        super::disjoint_bitor(
192                            super::unchecked_shl(self, shift),
193                            super::unchecked_shr(rhs, $type::BITS - shift),
194                        )
195                    }
196                }
197            }
198
199            #[cfg_attr(miri, track_caller)]
200            #[inline]
201            unsafe fn unchecked_funnel_shr(self, rhs: Self, shift: u32) -> Self {
202                // This implementation is also used by Miri so we have to check the precondition.
203                // SAFETY: this is guaranteed by the caller
204                unsafe { super::assume(shift < $type::BITS) };
205                if shift == 0 {
206                    rhs
207                } else {
208                    // SAFETY:
209                    //  - `shift < T::BITS`, which satisfies `unchecked_shr`
210                    //  - this also ensures that `T::BITS - shift < T::BITS` (shift = 0 is checked
211                    //    above), which satisfies `unchecked_shl`
212                    //  - because the types are unsigned, the combination are disjoint bits (this is
213                    //    not true if they're signed, since SHR will fill in the empty space with a
214                    //    sign bit, not zero)
215                    unsafe {
216                        super::disjoint_bitor(
217                            super::unchecked_shl(self, $type::BITS - shift),
218                            super::unchecked_shr(rhs, shift),
219                        )
220                    }
221                }
222            }
223        }
224    )*};
225}
226
227impl_funnel_shifts! {
228    u8, u16, u32, u64, u128, usize
229}