core/slice/
cmp.rs

1//! Comparison traits for `[T]`.
2
3#[cfg(not(feature = "ferrocene_certified"))]
4use super::{from_raw_parts, memchr};
5#[cfg(not(feature = "ferrocene_certified"))]
6use crate::ascii;
7#[cfg(not(feature = "ferrocene_certified"))]
8use crate::cmp::{self, BytewiseEq, Ordering};
9#[cfg(not(feature = "ferrocene_certified"))]
10use crate::intrinsics::compare_bytes;
11#[cfg(not(feature = "ferrocene_certified"))]
12use crate::num::NonZero;
13#[cfg(not(feature = "ferrocene_certified"))]
14use crate::ops::ControlFlow;
15
16#[stable(feature = "rust1", since = "1.0.0")]
17#[rustc_const_unstable(feature = "const_cmp", issue = "143800")]
18impl<T, U> const PartialEq<[U]> for [T]
19where
20    T: [const] PartialEq<U>,
21{
22    fn eq(&self, other: &[U]) -> bool {
23        SlicePartialEq::equal(self, other)
24    }
25
26    fn ne(&self, other: &[U]) -> bool {
27        SlicePartialEq::not_equal(self, other)
28    }
29}
30
31#[stable(feature = "rust1", since = "1.0.0")]
32#[rustc_const_unstable(feature = "const_cmp", issue = "143800")]
33impl<T: [const] Eq> const Eq for [T] {}
34
35/// Implements comparison of slices [lexicographically](Ord#lexicographical-comparison).
36#[stable(feature = "rust1", since = "1.0.0")]
37#[cfg(not(feature = "ferrocene_certified"))]
38impl<T: Ord> Ord for [T] {
39    fn cmp(&self, other: &[T]) -> Ordering {
40        SliceOrd::compare(self, other)
41    }
42}
43
44#[inline]
45#[cfg(not(feature = "ferrocene_certified"))]
46const fn as_underlying(x: ControlFlow<bool>) -> u8 {
47    // SAFETY: This will only compile if `bool` and `ControlFlow<bool>` have the same
48    // size (which isn't guaranteed but this is libcore). Because they have the same
49    // size, it's a niched implementation, which in one byte means there can't be
50    // any uninitialized memory. The callers then only check for `0` or `1` from this,
51    // which must necessarily match the `Break` variant, and we're fine no matter
52    // what ends up getting picked as the value representing `Continue(())`.
53    unsafe { crate::mem::transmute(x) }
54}
55
56/// Implements comparison of slices [lexicographically](Ord#lexicographical-comparison).
57#[stable(feature = "rust1", since = "1.0.0")]
58#[cfg(not(feature = "ferrocene_certified"))]
59impl<T: PartialOrd> PartialOrd for [T] {
60    #[inline]
61    fn partial_cmp(&self, other: &[T]) -> Option<Ordering> {
62        SlicePartialOrd::partial_compare(self, other)
63    }
64    #[inline]
65    fn lt(&self, other: &Self) -> bool {
66        // This is certainly not the obvious way to implement these methods.
67        // Unfortunately, using anything that looks at the discriminant means that
68        // LLVM sees a check for `2` (aka `ControlFlow<bool>::Continue(())`) and
69        // gets very distracted by that, ending up generating extraneous code.
70        // This should be changed to something simpler once either LLVM is smarter,
71        // see <https://github.com/llvm/llvm-project/issues/132678>, or we generate
72        // niche discriminant checks in a way that doesn't trigger it.
73
74        as_underlying(self.__chaining_lt(other)) == 1
75    }
76    #[inline]
77    fn le(&self, other: &Self) -> bool {
78        as_underlying(self.__chaining_le(other)) != 0
79    }
80    #[inline]
81    fn gt(&self, other: &Self) -> bool {
82        as_underlying(self.__chaining_gt(other)) == 1
83    }
84    #[inline]
85    fn ge(&self, other: &Self) -> bool {
86        as_underlying(self.__chaining_ge(other)) != 0
87    }
88    #[inline]
89    fn __chaining_lt(&self, other: &Self) -> ControlFlow<bool> {
90        SliceChain::chaining_lt(self, other)
91    }
92    #[inline]
93    fn __chaining_le(&self, other: &Self) -> ControlFlow<bool> {
94        SliceChain::chaining_le(self, other)
95    }
96    #[inline]
97    fn __chaining_gt(&self, other: &Self) -> ControlFlow<bool> {
98        SliceChain::chaining_gt(self, other)
99    }
100    #[inline]
101    fn __chaining_ge(&self, other: &Self) -> ControlFlow<bool> {
102        SliceChain::chaining_ge(self, other)
103    }
104}
105
106#[doc(hidden)]
107// intermediate trait for specialization of slice's PartialEq
108#[rustc_const_unstable(feature = "const_cmp", issue = "143800")]
109const trait SlicePartialEq<B> {
110    fn equal(&self, other: &[B]) -> bool;
111
112    fn not_equal(&self, other: &[B]) -> bool {
113        !self.equal(other)
114    }
115}
116
117// Generic slice equality
118#[rustc_const_unstable(feature = "const_cmp", issue = "143800")]
119impl<A, B> const SlicePartialEq<B> for [A]
120where
121    A: [const] PartialEq<B>,
122{
123    default fn equal(&self, other: &[B]) -> bool {
124        if self.len() != other.len() {
125            return false;
126        }
127
128        // Implemented as explicit indexing rather
129        // than zipped iterators for performance reasons.
130        // See PR https://github.com/rust-lang/rust/pull/116846
131        // FIXME(const_hack): make this a `for idx in 0..self.len()` loop.
132        let mut idx = 0;
133        while idx < self.len() {
134            // bound checks are optimized away
135            if self[idx] != other[idx] {
136                return false;
137            }
138            idx += 1;
139        }
140
141        true
142    }
143}
144
145// When each element can be compared byte-wise, we can compare all the bytes
146// from the whole size in one call to the intrinsics.
147#[rustc_const_unstable(feature = "const_cmp", issue = "143800")]
148#[cfg(not(feature = "ferrocene_certified"))]
149impl<A, B> const SlicePartialEq<B> for [A]
150where
151    A: [const] BytewiseEq<B>,
152{
153    fn equal(&self, other: &[B]) -> bool {
154        if self.len() != other.len() {
155            return false;
156        }
157
158        // SAFETY: `self` and `other` are references and are thus guaranteed to be valid.
159        // The two slices have been checked to have the same size above.
160        unsafe {
161            let size = size_of_val(self);
162            compare_bytes(self.as_ptr() as *const u8, other.as_ptr() as *const u8, size) == 0
163        }
164    }
165}
166
167#[doc(hidden)]
168#[rustc_const_unstable(feature = "const_cmp", issue = "143800")]
169#[cfg(not(feature = "ferrocene_certified"))]
170// intermediate trait for specialization of slice's PartialOrd
171const trait SlicePartialOrd: Sized {
172    fn partial_compare(left: &[Self], right: &[Self]) -> Option<Ordering>;
173}
174
175#[doc(hidden)]
176#[rustc_const_unstable(feature = "const_cmp", issue = "143800")]
177#[cfg(not(feature = "ferrocene_certified"))]
178// intermediate trait for specialization of slice's PartialOrd chaining methods
179const trait SliceChain: Sized {
180    fn chaining_lt(left: &[Self], right: &[Self]) -> ControlFlow<bool>;
181    fn chaining_le(left: &[Self], right: &[Self]) -> ControlFlow<bool>;
182    fn chaining_gt(left: &[Self], right: &[Self]) -> ControlFlow<bool>;
183    fn chaining_ge(left: &[Self], right: &[Self]) -> ControlFlow<bool>;
184}
185
186#[cfg(not(feature = "ferrocene_certified"))]
187type AlwaysBreak<B> = ControlFlow<B, crate::convert::Infallible>;
188
189#[cfg(not(feature = "ferrocene_certified"))]
190impl<A: PartialOrd> SlicePartialOrd for A {
191    default fn partial_compare(left: &[A], right: &[A]) -> Option<Ordering> {
192        let elem_chain = |a, b| match PartialOrd::partial_cmp(a, b) {
193            Some(Ordering::Equal) => ControlFlow::Continue(()),
194            non_eq => ControlFlow::Break(non_eq),
195        };
196        let len_chain = |a: &_, b: &_| ControlFlow::Break(usize::partial_cmp(a, b));
197        let AlwaysBreak::Break(b) = chaining_impl(left, right, elem_chain, len_chain);
198        b
199    }
200}
201
202#[cfg(not(feature = "ferrocene_certified"))]
203impl<A: PartialOrd> SliceChain for A {
204    default fn chaining_lt(left: &[Self], right: &[Self]) -> ControlFlow<bool> {
205        chaining_impl(left, right, PartialOrd::__chaining_lt, usize::__chaining_lt)
206    }
207    default fn chaining_le(left: &[Self], right: &[Self]) -> ControlFlow<bool> {
208        chaining_impl(left, right, PartialOrd::__chaining_le, usize::__chaining_le)
209    }
210    default fn chaining_gt(left: &[Self], right: &[Self]) -> ControlFlow<bool> {
211        chaining_impl(left, right, PartialOrd::__chaining_gt, usize::__chaining_gt)
212    }
213    default fn chaining_ge(left: &[Self], right: &[Self]) -> ControlFlow<bool> {
214        chaining_impl(left, right, PartialOrd::__chaining_ge, usize::__chaining_ge)
215    }
216}
217
218#[inline]
219#[cfg(not(feature = "ferrocene_certified"))]
220fn chaining_impl<'l, 'r, A: PartialOrd, B, C>(
221    left: &'l [A],
222    right: &'r [A],
223    elem_chain: impl Fn(&'l A, &'r A) -> ControlFlow<B>,
224    len_chain: impl for<'a> FnOnce(&'a usize, &'a usize) -> ControlFlow<B, C>,
225) -> ControlFlow<B, C> {
226    let l = cmp::min(left.len(), right.len());
227
228    // Slice to the loop iteration range to enable bound check
229    // elimination in the compiler
230    let lhs = &left[..l];
231    let rhs = &right[..l];
232
233    for i in 0..l {
234        elem_chain(&lhs[i], &rhs[i])?;
235    }
236
237    len_chain(&left.len(), &right.len())
238}
239
240// This is the impl that we would like to have. Unfortunately it's not sound.
241// See `partial_ord_slice.rs`.
242/*
243impl<A> SlicePartialOrd for A
244where
245    A: Ord,
246{
247    default fn partial_compare(left: &[A], right: &[A]) -> Option<Ordering> {
248        Some(SliceOrd::compare(left, right))
249    }
250}
251*/
252
253#[rustc_const_unstable(feature = "const_cmp", issue = "143800")]
254#[cfg(not(feature = "ferrocene_certified"))]
255impl<A: [const] AlwaysApplicableOrd> const SlicePartialOrd for A {
256    fn partial_compare(left: &[A], right: &[A]) -> Option<Ordering> {
257        Some(SliceOrd::compare(left, right))
258    }
259}
260
261#[rustc_specialization_trait]
262#[rustc_const_unstable(feature = "const_cmp", issue = "143800")]
263#[cfg(not(feature = "ferrocene_certified"))]
264const trait AlwaysApplicableOrd: [const] SliceOrd + [const] Ord {}
265
266#[cfg(not(feature = "ferrocene_certified"))]
267macro_rules! always_applicable_ord {
268    ($([$($p:tt)*] $t:ty,)*) => {
269        $(impl<$($p)*> AlwaysApplicableOrd for $t {})*
270    }
271}
272
273#[cfg(not(feature = "ferrocene_certified"))]
274always_applicable_ord! {
275    [] u8, [] u16, [] u32, [] u64, [] u128, [] usize,
276    [] i8, [] i16, [] i32, [] i64, [] i128, [] isize,
277    [] bool, [] char,
278    [T: ?Sized] *const T, [T: ?Sized] *mut T,
279    [T: AlwaysApplicableOrd] &T,
280    [T: AlwaysApplicableOrd] &mut T,
281    [T: AlwaysApplicableOrd] Option<T>,
282}
283
284#[doc(hidden)]
285#[rustc_const_unstable(feature = "const_cmp", issue = "143800")]
286#[cfg(not(feature = "ferrocene_certified"))]
287// intermediate trait for specialization of slice's Ord
288const trait SliceOrd: Sized {
289    fn compare(left: &[Self], right: &[Self]) -> Ordering;
290}
291
292#[cfg(not(feature = "ferrocene_certified"))]
293impl<A: Ord> SliceOrd for A {
294    default fn compare(left: &[Self], right: &[Self]) -> Ordering {
295        let elem_chain = |a, b| match Ord::cmp(a, b) {
296            Ordering::Equal => ControlFlow::Continue(()),
297            non_eq => ControlFlow::Break(non_eq),
298        };
299        let len_chain = |a: &_, b: &_| ControlFlow::Break(usize::cmp(a, b));
300        let AlwaysBreak::Break(b) = chaining_impl(left, right, elem_chain, len_chain);
301        b
302    }
303}
304
305/// Marks that a type should be treated as an unsigned byte for comparisons.
306///
307/// # Safety
308/// * The type must be readable as an `u8`, meaning it has to have the same
309///   layout as `u8` and always be initialized.
310/// * For every `x` and `y` of this type, `Ord(x, y)` must return the same
311///   value as `Ord::cmp(transmute::<_, u8>(x), transmute::<_, u8>(y))`.
312#[rustc_specialization_trait]
313#[cfg(not(feature = "ferrocene_certified"))]
314const unsafe trait UnsignedBytewiseOrd: [const] Ord {}
315
316#[rustc_const_unstable(feature = "const_cmp", issue = "143800")]
317#[cfg(not(feature = "ferrocene_certified"))]
318unsafe impl const UnsignedBytewiseOrd for bool {}
319#[rustc_const_unstable(feature = "const_cmp", issue = "143800")]
320#[cfg(not(feature = "ferrocene_certified"))]
321unsafe impl const UnsignedBytewiseOrd for u8 {}
322#[rustc_const_unstable(feature = "const_cmp", issue = "143800")]
323#[cfg(not(feature = "ferrocene_certified"))]
324unsafe impl const UnsignedBytewiseOrd for NonZero<u8> {}
325#[rustc_const_unstable(feature = "const_cmp", issue = "143800")]
326#[cfg(not(feature = "ferrocene_certified"))]
327unsafe impl const UnsignedBytewiseOrd for Option<NonZero<u8>> {}
328#[rustc_const_unstable(feature = "const_cmp", issue = "143800")]
329#[cfg(not(feature = "ferrocene_certified"))]
330unsafe impl const UnsignedBytewiseOrd for ascii::Char {}
331
332// `compare_bytes` compares a sequence of unsigned bytes lexicographically, so
333// use it if the requirements for `UnsignedBytewiseOrd` are fulfilled.
334#[rustc_const_unstable(feature = "const_cmp", issue = "143800")]
335#[cfg(not(feature = "ferrocene_certified"))]
336impl<A: [const] Ord + [const] UnsignedBytewiseOrd> const SliceOrd for A {
337    #[inline]
338    fn compare(left: &[Self], right: &[Self]) -> Ordering {
339        // Since the length of a slice is always less than or equal to
340        // isize::MAX, this never underflows.
341        let diff = left.len() as isize - right.len() as isize;
342        // This comparison gets optimized away (on x86_64 and ARM) because the
343        // subtraction updates flags.
344        let len = if left.len() < right.len() { left.len() } else { right.len() };
345        let left = left.as_ptr().cast();
346        let right = right.as_ptr().cast();
347        // SAFETY: `left` and `right` are references and are thus guaranteed to
348        // be valid. `UnsignedBytewiseOrd` is only implemented for types that
349        // are valid u8s and can be compared the same way. We use the minimum
350        // of both lengths which guarantees that both regions are valid for
351        // reads in that interval.
352        let mut order = unsafe { compare_bytes(left, right, len) as isize };
353        if order == 0 {
354            order = diff;
355        }
356        order.cmp(&0)
357    }
358}
359
360// Don't generate our own chaining loops for `memcmp`-able things either.
361
362#[rustc_const_unstable(feature = "const_cmp", issue = "143800")]
363#[cfg(not(feature = "ferrocene_certified"))]
364impl<A: [const] PartialOrd + [const] UnsignedBytewiseOrd> const SliceChain for A {
365    #[inline]
366    fn chaining_lt(left: &[Self], right: &[Self]) -> ControlFlow<bool> {
367        match SliceOrd::compare(left, right) {
368            Ordering::Equal => ControlFlow::Continue(()),
369            ne => ControlFlow::Break(ne.is_lt()),
370        }
371    }
372    #[inline]
373    fn chaining_le(left: &[Self], right: &[Self]) -> ControlFlow<bool> {
374        match SliceOrd::compare(left, right) {
375            Ordering::Equal => ControlFlow::Continue(()),
376            ne => ControlFlow::Break(ne.is_le()),
377        }
378    }
379    #[inline]
380    fn chaining_gt(left: &[Self], right: &[Self]) -> ControlFlow<bool> {
381        match SliceOrd::compare(left, right) {
382            Ordering::Equal => ControlFlow::Continue(()),
383            ne => ControlFlow::Break(ne.is_gt()),
384        }
385    }
386    #[inline]
387    fn chaining_ge(left: &[Self], right: &[Self]) -> ControlFlow<bool> {
388        match SliceOrd::compare(left, right) {
389            Ordering::Equal => ControlFlow::Continue(()),
390            ne => ControlFlow::Break(ne.is_ge()),
391        }
392    }
393}
394
395#[cfg(not(feature = "ferrocene_certified"))]
396pub(super) trait SliceContains: Sized {
397    fn slice_contains(&self, x: &[Self]) -> bool;
398}
399
400#[cfg(not(feature = "ferrocene_certified"))]
401impl<T> SliceContains for T
402where
403    T: PartialEq,
404{
405    default fn slice_contains(&self, x: &[Self]) -> bool {
406        x.iter().any(|y| *y == *self)
407    }
408}
409
410#[cfg(not(feature = "ferrocene_certified"))]
411impl SliceContains for u8 {
412    #[inline]
413    fn slice_contains(&self, x: &[Self]) -> bool {
414        memchr::memchr(*self, x).is_some()
415    }
416}
417
418#[cfg(not(feature = "ferrocene_certified"))]
419impl SliceContains for i8 {
420    #[inline]
421    fn slice_contains(&self, x: &[Self]) -> bool {
422        let byte = *self as u8;
423        // SAFETY: `i8` and `u8` have the same memory layout, thus casting `x.as_ptr()`
424        // as `*const u8` is safe. The `x.as_ptr()` comes from a reference and is thus guaranteed
425        // to be valid for reads for the length of the slice `x.len()`, which cannot be larger
426        // than `isize::MAX`. The returned slice is never mutated.
427        let bytes: &[u8] = unsafe { from_raw_parts(x.as_ptr() as *const u8, x.len()) };
428        memchr::memchr(byte, bytes).is_some()
429    }
430}
431
432#[cfg(not(feature = "ferrocene_certified"))]
433macro_rules! impl_slice_contains {
434    ($($t:ty),*) => {
435        $(
436            impl SliceContains for $t {
437                #[inline]
438                fn slice_contains(&self, arr: &[$t]) -> bool {
439                    // Make our LANE_COUNT 4x the normal lane count (aiming for 128 bit vectors).
440                    // The compiler will nicely unroll it.
441                    const LANE_COUNT: usize = 4 * (128 / (size_of::<$t>() * 8));
442                    // SIMD
443                    let mut chunks = arr.chunks_exact(LANE_COUNT);
444                    for chunk in &mut chunks {
445                        if chunk.iter().fold(false, |acc, x| acc | (*x == *self)) {
446                            return true;
447                        }
448                    }
449                    // Scalar remainder
450                    return chunks.remainder().iter().any(|x| *x == *self);
451                }
452            }
453        )*
454    };
455}
456
457#[cfg(not(feature = "ferrocene_certified"))]
458impl_slice_contains!(u16, u32, u64, i16, i32, i64, f32, f64, usize, isize, char);