Skip to main content

core/intrinsics/
fallback.rs

1#![unstable(
2    feature = "core_intrinsics_fallbacks",
3    reason = "The fallbacks will never be stable, as they exist only to be called \
4              by the fallback MIR, but they're exported so they can be tested on \
5              platforms where the fallback MIR isn't actually used",
6    issue = "none"
7)]
8#![allow(missing_docs)]
9
10#[rustc_const_unstable(feature = "core_intrinsics_fallbacks", issue = "none")]
11pub const trait CarryingMulAdd: Copy + 'static {
12    type Unsigned: Copy + 'static;
13    fn carrying_mul_add(
14        self,
15        multiplicand: Self,
16        addend: Self,
17        carry: Self,
18    ) -> (Self::Unsigned, Self);
19}
20
21macro_rules! impl_carrying_mul_add_by_widening {
22    ($($t:ident $u:ident $w:ident,)+) => {$(
23        #[rustc_const_unstable(feature = "core_intrinsics_fallbacks", issue = "none")]
24        impl const CarryingMulAdd for $t {
25            type Unsigned = $u;
26            #[inline]
27            #[ferrocene::prevalidated]
28            fn carrying_mul_add(self, a: Self, b: Self, c: Self) -> ($u, $t) {
29                let wide = (self as $w) * (a as $w) + (b as $w) + (c as $w);
30                (wide as _, (wide >> Self::BITS) as _)
31            }
32        }
33    )+};
34}
35impl_carrying_mul_add_by_widening! {
36    u8 u8 u16,
37    u16 u16 u32,
38    u32 u32 u64,
39    u64 u64 u128,
40    usize usize UDoubleSize,
41    i8 u8 i16,
42    i16 u16 i32,
43    i32 u32 i64,
44    i64 u64 i128,
45    isize usize UDoubleSize,
46}
47
48#[cfg(target_pointer_width = "16")]
49type UDoubleSize = u32;
50#[cfg(target_pointer_width = "32")]
51type UDoubleSize = u64;
52#[cfg(target_pointer_width = "64")]
53type UDoubleSize = u128;
54
55#[inline]
56#[ferrocene::prevalidated]
57const fn wide_mul_u128(a: u128, b: u128) -> (u128, u128) {
58    #[inline]
59    #[ferrocene::prevalidated]
60    const fn to_low_high(x: u128) -> [u128; 2] {
61        const MASK: u128 = u64::MAX as _;
62        [x & MASK, x >> 64]
63    }
64    #[inline]
65    #[ferrocene::prevalidated]
66    const fn from_low_high(x: [u128; 2]) -> u128 {
67        x[0] | (x[1] << 64)
68    }
69    #[inline]
70    #[ferrocene::prevalidated]
71    const fn scalar_mul(low_high: [u128; 2], k: u128) -> [u128; 3] {
72        let [x, c] = to_low_high(k * low_high[0]);
73        let [y, z] = to_low_high(k * low_high[1] + c);
74        [x, y, z]
75    }
76    let a = to_low_high(a);
77    let b = to_low_high(b);
78    let low = scalar_mul(a, b[0]);
79    let high = scalar_mul(a, b[1]);
80    let r0 = low[0];
81    let [r1, c] = to_low_high(low[1] + high[0]);
82    let [r2, c] = to_low_high(low[2] + high[1] + c);
83    let r3 = high[2] + c;
84    (from_low_high([r0, r1]), from_low_high([r2, r3]))
85}
86
87#[rustc_const_unstable(feature = "core_intrinsics_fallbacks", issue = "none")]
88impl const CarryingMulAdd for u128 {
89    type Unsigned = u128;
90    #[inline]
91    #[ferrocene::prevalidated]
92    fn carrying_mul_add(self, b: u128, c: u128, d: u128) -> (u128, u128) {
93        let (low, mut high) = wide_mul_u128(self, b);
94        let (low, carry) = u128::overflowing_add(low, c);
95        high += carry as u128;
96        let (low, carry) = u128::overflowing_add(low, d);
97        high += carry as u128;
98        (low, high)
99    }
100}
101
102#[rustc_const_unstable(feature = "core_intrinsics_fallbacks", issue = "none")]
103impl const CarryingMulAdd for i128 {
104    type Unsigned = u128;
105    #[inline]
106    #[ferrocene::prevalidated]
107    fn carrying_mul_add(self, b: i128, c: i128, d: i128) -> (u128, i128) {
108        let (low, high) = wide_mul_u128(self as u128, b as u128);
109        let mut high = high as i128;
110        high = high.wrapping_add(i128::wrapping_mul(self >> 127, b));
111        high = high.wrapping_add(i128::wrapping_mul(self, b >> 127));
112        let (low, carry) = u128::overflowing_add(low, c as u128);
113        high = high.wrapping_add((carry as i128) + (c >> 127));
114        let (low, carry) = u128::overflowing_add(low, d as u128);
115        high = high.wrapping_add((carry as i128) + (d >> 127));
116        (low, high)
117    }
118}
119
120#[rustc_const_unstable(feature = "core_intrinsics_fallbacks", issue = "none")]
121pub const trait DisjointBitOr: Copy + 'static {
122    /// See [`super::disjoint_bitor`]; we just need the trait indirection to handle
123    /// different types since calling intrinsics with generics doesn't work.
124    unsafe fn disjoint_bitor(self, other: Self) -> Self;
125}
126macro_rules! zero {
127    (bool) => {
128        false
129    };
130    ($t:ident) => {
131        0
132    };
133}
134macro_rules! impl_disjoint_bitor {
135    ($($t:ident,)+) => {$(
136        #[rustc_const_unstable(feature = "core_intrinsics_fallbacks", issue = "none")]
137        impl const DisjointBitOr for $t {
138            #[cfg_attr(miri, track_caller)]
139            #[inline]
140            #[ferrocene::prevalidated]
141            unsafe fn disjoint_bitor(self, other: Self) -> Self {
142                // Note that the assume here is required for UB detection in Miri!
143
144                // SAFETY: our precondition is that there are no bits in common,
145                // so this is just telling that to the backend.
146                unsafe { super::assume((self & other) == zero!($t)) };
147                self | other
148            }
149        }
150    )+};
151}
152impl_disjoint_bitor! {
153    bool,
154    u8, u16, u32, u64, u128, usize,
155    i8, i16, i32, i64, i128, isize,
156}
157
158#[rustc_const_unstable(feature = "core_intrinsics_fallbacks", issue = "none")]
159pub const trait FunnelShift: Copy + 'static {
160    /// See [`super::unchecked_funnel_shl`]; we just need the trait indirection to handle
161    /// different types since calling intrinsics with generics doesn't work.
162    unsafe fn unchecked_funnel_shl(self, rhs: Self, shift: u32) -> Self;
163
164    /// See [`super::unchecked_funnel_shr`]; we just need the trait indirection to handle
165    /// different types since calling intrinsics with generics doesn't work.
166    unsafe fn unchecked_funnel_shr(self, rhs: Self, shift: u32) -> Self;
167}
168
169macro_rules! impl_funnel_shifts {
170    ($($type:ident),*) => {$(
171        #[rustc_const_unstable(feature = "core_intrinsics_fallbacks", issue = "none")]
172        impl const FunnelShift for $type {
173            #[cfg_attr(miri, track_caller)]
174            #[inline]
175            #[ferrocene::prevalidated]
176            unsafe fn unchecked_funnel_shl(self, rhs: Self, shift: u32) -> Self {
177                // This implementation is also used by Miri so we have to check the precondition.
178                // SAFETY: this is guaranteed by the caller
179                unsafe { super::assume(shift < $type::BITS) };
180                if shift == 0 {
181                    self
182                } else {
183                    // SAFETY:
184                    //  - `shift < T::BITS`, which satisfies `unchecked_shl`
185                    //  - this also ensures that `T::BITS - shift < T::BITS` (shift = 0 is checked
186                    //    above), which satisfies `unchecked_shr`
187                    //  - because the types are unsigned, the combination are disjoint bits (this is
188                    //    not true if they're signed, since SHR will fill in the empty space with a
189                    //    sign bit, not zero)
190                    unsafe {
191                        super::disjoint_bitor(
192                            super::unchecked_shl(self, shift),
193                            super::unchecked_shr(rhs, $type::BITS - shift),
194                        )
195                    }
196                }
197            }
198
199            #[cfg_attr(miri, track_caller)]
200            #[inline]
201            #[ferrocene::prevalidated]
202            unsafe fn unchecked_funnel_shr(self, rhs: Self, shift: u32) -> Self {
203                // This implementation is also used by Miri so we have to check the precondition.
204                // SAFETY: this is guaranteed by the caller
205                unsafe { super::assume(shift < $type::BITS) };
206                if shift == 0 {
207                    rhs
208                } else {
209                    // SAFETY:
210                    //  - `shift < T::BITS`, which satisfies `unchecked_shr`
211                    //  - this also ensures that `T::BITS - shift < T::BITS` (shift = 0 is checked
212                    //    above), which satisfies `unchecked_shl`
213                    //  - because the types are unsigned, the combination are disjoint bits (this is
214                    //    not true if they're signed, since SHR will fill in the empty space with a
215                    //    sign bit, not zero)
216                    unsafe {
217                        super::disjoint_bitor(
218                            super::unchecked_shl(self, $type::BITS - shift),
219                            super::unchecked_shr(rhs, shift),
220                        )
221                    }
222                }
223            }
224        }
225    )*};
226}
227
228impl_funnel_shifts! {
229    u8, u16, u32, u64, u128, usize
230}
231
232#[rustc_const_unstable(feature = "core_intrinsics_fallbacks", issue = "none")]
233pub const trait CarrylessMul: Copy + 'static {
234    /// See [`super::carryless_mul`]; we just need the trait indirection to handle
235    /// different types since calling intrinsics with generics doesn't work.
236    fn carryless_mul(self, rhs: Self) -> Self;
237}
238
239macro_rules! impl_carryless_mul{
240    ($($type:ident),*) => {$(
241        #[rustc_const_unstable(feature = "core_intrinsics_fallbacks", issue = "none")]
242        impl const CarrylessMul for $type {
243            #[inline]
244            #[ferrocene::prevalidated]
245            fn carryless_mul(self, rhs: Self) -> Self {
246                let mut result = 0;
247                let mut i = 0;
248
249                while i < $type::BITS {
250                    // If the i-th bit in rhs is set.
251                    if (rhs >> i) & 1 != 0 {
252                        // Then xor the result with `self` shifted to the left by i positions.
253                        result ^= self << i;
254                    }
255                    i += 1;
256                }
257
258                result
259            }
260        }
261    )*};
262}
263
264impl_carryless_mul! {
265    u8, u16, u32, u64, u128, usize
266}