Skip to main content

core/slice/
ascii.rs

1//! Operations on ASCII `[u8]`.
2
3/// Ferrocene addition: Hidden module to test crate-internal functionality
4#[doc(hidden)]
5#[unstable(feature = "ferrocene_test", issue = "none")]
6pub(crate) mod ferrocene_test;
7
8use core::ascii::EscapeDefault;
9
10use crate::fmt::{self, Write};
11#[cfg(not(all(target_arch = "loongarch64", target_feature = "lsx")))]
12use crate::intrinsics::const_eval_select;
13use crate::{ascii, iter, ops};
14
15impl [u8] {
16    /// Checks if all bytes in this slice are within the ASCII range.
17    ///
18    /// An empty slice returns `true`.
19    #[stable(feature = "ascii_methods_on_intrinsics", since = "1.23.0")]
20    #[rustc_const_stable(feature = "const_slice_is_ascii", since = "1.74.0")]
21    #[must_use]
22    #[inline]
23    #[ferrocene::prevalidated]
24    pub const fn is_ascii(&self) -> bool {
25        is_ascii(self)
26    }
27
28    /// If this slice [`is_ascii`](Self::is_ascii), returns it as a slice of
29    /// [ASCII characters](`ascii::Char`), otherwise returns `None`.
30    #[unstable(feature = "ascii_char", issue = "110998")]
31    #[must_use]
32    #[inline]
33    pub const fn as_ascii(&self) -> Option<&[ascii::Char]> {
34        if self.is_ascii() {
35            // SAFETY: Just checked that it's ASCII
36            Some(unsafe { self.as_ascii_unchecked() })
37        } else {
38            None
39        }
40    }
41
42    /// Converts this slice of bytes into a slice of ASCII characters,
43    /// without checking whether they're valid.
44    ///
45    /// # Safety
46    ///
47    /// Every byte in the slice must be in `0..=127`, or else this is UB.
48    #[unstable(feature = "ascii_char", issue = "110998")]
49    #[must_use]
50    #[inline]
51    pub const unsafe fn as_ascii_unchecked(&self) -> &[ascii::Char] {
52        let byte_ptr: *const [u8] = self;
53        let ascii_ptr = byte_ptr as *const [ascii::Char];
54        // SAFETY: The caller promised all the bytes are ASCII
55        unsafe { &*ascii_ptr }
56    }
57
58    /// Checks that two slices are an ASCII case-insensitive match.
59    ///
60    /// Same as `to_ascii_lowercase(a) == to_ascii_lowercase(b)`,
61    /// but without allocating and copying temporaries.
62    #[stable(feature = "ascii_methods_on_intrinsics", since = "1.23.0")]
63    #[rustc_const_stable(feature = "const_eq_ignore_ascii_case", since = "1.89.0")]
64    #[must_use]
65    #[inline]
66    #[ferrocene::prevalidated]
67    pub const fn eq_ignore_ascii_case(&self, other: &[u8]) -> bool {
68        if self.len() != other.len() {
69            return false;
70        }
71
72        #[cfg(all(target_arch = "x86_64", target_feature = "sse2"))]
73        {
74            const CHUNK_SIZE: usize = 16;
75            // The following function has two invariants:
76            // 1. The slice lengths must be equal, which we checked above.
77            // 2. The slice lengths must greater than or equal to N, which this
78            //    if-statement is checking.
79            if self.len() >= CHUNK_SIZE {
80                return self.eq_ignore_ascii_case_chunks::<CHUNK_SIZE>(other);
81            }
82        }
83
84        self.eq_ignore_ascii_case_simple(other)
85    }
86
87    /// ASCII case-insensitive equality check without chunk-at-a-time
88    /// optimization.
89    #[inline]
90    #[ferrocene::prevalidated]
91    const fn eq_ignore_ascii_case_simple(&self, other: &[u8]) -> bool {
92        // FIXME(const-hack): This implementation can be reverted when
93        // `core::iter::zip` is allowed in const. The original implementation:
94        //  self.len() == other.len() && iter::zip(self, other).all(|(a, b)| a.eq_ignore_ascii_case(b))
95        let mut a = self;
96        let mut b = other;
97
98        while let ([first_a, rest_a @ ..], [first_b, rest_b @ ..]) = (a, b) {
99            if first_a.eq_ignore_ascii_case(&first_b) {
100                a = rest_a;
101                b = rest_b;
102            } else {
103                return false;
104            }
105        }
106
107        true
108    }
109
110    /// Optimized version of `eq_ignore_ascii_case` to process chunks at a time.
111    ///
112    /// Platforms that have SIMD instructions may benefit from this
113    /// implementation over `eq_ignore_ascii_case_simple`.
114    ///
115    /// # Invariants
116    ///
117    /// The caller must guarantee that the slices are equal in length, and the
118    /// slice lengths are greater than or equal to `N` bytes.
119    #[ferrocene::prevalidated]
120    #[cfg(all(target_arch = "x86_64", target_feature = "sse2"))]
121    #[inline]
122    const fn eq_ignore_ascii_case_chunks<const N: usize>(&self, other: &[u8]) -> bool {
123        // FIXME(const-hack): The while-loops that follow should be replaced by
124        // for-loops when available in const.
125
126        let (self_chunks, self_rem) = self.as_chunks::<N>();
127        let (other_chunks, _) = other.as_chunks::<N>();
128
129        // Branchless check to encourage auto-vectorization
130        #[ferrocene::prevalidated]
131        #[inline(always)]
132        const fn eq_ignore_ascii_inner<const L: usize>(lhs: &[u8; L], rhs: &[u8; L]) -> bool {
133            let mut equal_ascii = true;
134            let mut j = 0;
135            while j < L {
136                equal_ascii &= lhs[j].eq_ignore_ascii_case(&rhs[j]);
137                j += 1;
138            }
139
140            equal_ascii
141        }
142
143        // Process the chunks, returning early if an inequality is found
144        let mut i = 0;
145        while i < self_chunks.len() && i < other_chunks.len() {
146            if !eq_ignore_ascii_inner(&self_chunks[i], &other_chunks[i]) {
147                return false;
148            }
149            i += 1;
150        }
151
152        // Check the length invariant which is necessary for the tail-handling
153        // logic to be correct. This should have been upheld by the caller,
154        // otherwise lengths less than N will compare as true without any
155        // checking.
156        debug_assert!(self.len() >= N);
157
158        // If there are remaining tails, load the last N bytes in the slices to
159        // avoid falling back to per-byte checking.
160        if !self_rem.is_empty() {
161            if let (Some(a_rem), Some(b_rem)) = (self.last_chunk::<N>(), other.last_chunk::<N>()) {
162                if !eq_ignore_ascii_inner(a_rem, b_rem) {
163                    return false;
164                }
165            }
166        }
167
168        true
169    }
170
171    /// Converts this slice to its ASCII upper case equivalent in-place.
172    ///
173    /// ASCII letters 'a' to 'z' are mapped to 'A' to 'Z',
174    /// but non-ASCII letters are unchanged.
175    ///
176    /// To return a new uppercased value without modifying the existing one, use
177    /// [`to_ascii_uppercase`].
178    ///
179    /// [`to_ascii_uppercase`]: #method.to_ascii_uppercase
180    #[stable(feature = "ascii_methods_on_intrinsics", since = "1.23.0")]
181    #[rustc_const_stable(feature = "const_make_ascii", since = "1.84.0")]
182    #[inline]
183    pub const fn make_ascii_uppercase(&mut self) {
184        // FIXME(const-hack): We would like to simply iterate using `for` loops but this isn't currently allowed in constant expressions.
185        let mut i = 0;
186        while i < self.len() {
187            let byte = &mut self[i];
188            byte.make_ascii_uppercase();
189            i += 1;
190        }
191    }
192
193    /// Converts this slice to its ASCII lower case equivalent in-place.
194    ///
195    /// ASCII letters 'A' to 'Z' are mapped to 'a' to 'z',
196    /// but non-ASCII letters are unchanged.
197    ///
198    /// To return a new lowercased value without modifying the existing one, use
199    /// [`to_ascii_lowercase`].
200    ///
201    /// [`to_ascii_lowercase`]: #method.to_ascii_lowercase
202    #[stable(feature = "ascii_methods_on_intrinsics", since = "1.23.0")]
203    #[rustc_const_stable(feature = "const_make_ascii", since = "1.84.0")]
204    #[inline]
205    pub const fn make_ascii_lowercase(&mut self) {
206        // FIXME(const-hack): We would like to simply iterate using `for` loops but this isn't currently allowed in constant expressions.
207        let mut i = 0;
208        while i < self.len() {
209            let byte = &mut self[i];
210            byte.make_ascii_lowercase();
211            i += 1;
212        }
213    }
214
215    /// Returns an iterator that produces an escaped version of this slice,
216    /// treating it as an ASCII string.
217    ///
218    /// # Examples
219    ///
220    /// ```
221    /// let s = b"0\t\r\n'\"\\\x9d";
222    /// let escaped = s.escape_ascii().to_string();
223    /// assert_eq!(escaped, "0\\t\\r\\n\\'\\\"\\\\\\x9d");
224    /// ```
225    #[must_use = "this returns the escaped bytes as an iterator, \
226                  without modifying the original"]
227    #[stable(feature = "inherent_ascii_escape", since = "1.60.0")]
228    #[ferrocene::prevalidated]
229    pub fn escape_ascii(&self) -> EscapeAscii<'_> {
230        EscapeAscii { inner: self.iter().flat_map(EscapeByte) }
231    }
232
233    /// Returns a byte slice with leading ASCII whitespace bytes removed.
234    ///
235    /// 'Whitespace' refers to the definition used by
236    /// [`u8::is_ascii_whitespace`]. Importantly, this definition excludes
237    /// the `\0x0B` byte even though it has the Unicode [`White_Space`] property
238    /// and is removed by [`str::trim_start`].
239    ///
240    /// [`White_Space`]: https://www.unicode.org/reports/tr44/#White_Space
241    ///
242    /// # Examples
243    ///
244    /// ```
245    /// assert_eq!(b" \t hello world\n".trim_ascii_start(), b"hello world\n");
246    /// assert_eq!(b"  ".trim_ascii_start(), b"");
247    /// assert_eq!(b"".trim_ascii_start(), b"");
248    /// ```
249    #[stable(feature = "byte_slice_trim_ascii", since = "1.80.0")]
250    #[rustc_const_stable(feature = "byte_slice_trim_ascii", since = "1.80.0")]
251    #[inline]
252    pub const fn trim_ascii_start(&self) -> &[u8] {
253        let mut bytes = self;
254        // Note: A pattern matching based approach (instead of indexing) allows
255        // making the function const.
256        while let [first, rest @ ..] = bytes {
257            if first.is_ascii_whitespace() {
258                bytes = rest;
259            } else {
260                break;
261            }
262        }
263        bytes
264    }
265
266    /// Returns a byte slice with trailing ASCII whitespace bytes removed.
267    ///
268    /// 'Whitespace' refers to the definition used by
269    /// [`u8::is_ascii_whitespace`]. Importantly, this definition excludes
270    /// the `\0x0B` byte even though it has the Unicode [`White_Space`] property
271    /// and is removed by [`str::trim_end`].
272    ///
273    /// [`White_Space`]: https://www.unicode.org/reports/tr44/#White_Space
274    ///
275    /// # Examples
276    ///
277    /// ```
278    /// assert_eq!(b"\r hello world\n ".trim_ascii_end(), b"\r hello world");
279    /// assert_eq!(b"  ".trim_ascii_end(), b"");
280    /// assert_eq!(b"".trim_ascii_end(), b"");
281    /// ```
282    #[stable(feature = "byte_slice_trim_ascii", since = "1.80.0")]
283    #[rustc_const_stable(feature = "byte_slice_trim_ascii", since = "1.80.0")]
284    #[inline]
285    pub const fn trim_ascii_end(&self) -> &[u8] {
286        let mut bytes = self;
287        // Note: A pattern matching based approach (instead of indexing) allows
288        // making the function const.
289        while let [rest @ .., last] = bytes {
290            if last.is_ascii_whitespace() {
291                bytes = rest;
292            } else {
293                break;
294            }
295        }
296        bytes
297    }
298
299    /// Returns a byte slice with leading and trailing ASCII whitespace bytes
300    /// removed.
301    ///
302    /// 'Whitespace' refers to the definition used by
303    /// [`u8::is_ascii_whitespace`]. Importantly, this definition excludes
304    /// the `\0x0B` byte even though it has the Unicode [`White_Space`] property
305    /// and is removed by [`str::trim`].
306    ///
307    /// [`White_Space`]: https://www.unicode.org/reports/tr44/#White_Space
308    ///
309    /// # Examples
310    ///
311    /// ```
312    /// assert_eq!(b"\r hello world\n ".trim_ascii(), b"hello world");
313    /// assert_eq!(b"  ".trim_ascii(), b"");
314    /// assert_eq!(b"".trim_ascii(), b"");
315    /// ```
316    #[stable(feature = "byte_slice_trim_ascii", since = "1.80.0")]
317    #[rustc_const_stable(feature = "byte_slice_trim_ascii", since = "1.80.0")]
318    #[inline]
319    pub const fn trim_ascii(&self) -> &[u8] {
320        self.trim_ascii_start().trim_ascii_end()
321    }
322}
323
324impl_fn_for_zst! {
325    #[derive(Clone)]
326    struct EscapeByte impl Fn = |byte: &u8| -> ascii::EscapeDefault {
327        ascii::escape_default(*byte)
328    };
329}
330
331/// An iterator over the escaped version of a byte slice.
332///
333/// This `struct` is created by the [`slice::escape_ascii`] method. See its
334/// documentation for more information.
335#[stable(feature = "inherent_ascii_escape", since = "1.60.0")]
336#[derive(Clone)]
337#[must_use = "iterators are lazy and do nothing unless consumed"]
338#[ferrocene::prevalidated]
339pub struct EscapeAscii<'a> {
340    inner: iter::FlatMap<super::Iter<'a, u8>, ascii::EscapeDefault, EscapeByte>,
341}
342
343#[stable(feature = "inherent_ascii_escape", since = "1.60.0")]
344impl<'a> iter::Iterator for EscapeAscii<'a> {
345    type Item = u8;
346    #[inline]
347    fn next(&mut self) -> Option<u8> {
348        self.inner.next()
349    }
350    #[inline]
351    fn size_hint(&self) -> (usize, Option<usize>) {
352        self.inner.size_hint()
353    }
354    #[inline]
355    fn try_fold<Acc, Fold, R>(&mut self, init: Acc, fold: Fold) -> R
356    where
357        Fold: FnMut(Acc, Self::Item) -> R,
358        R: ops::Try<Output = Acc>,
359    {
360        self.inner.try_fold(init, fold)
361    }
362    #[inline]
363    fn fold<Acc, Fold>(self, init: Acc, fold: Fold) -> Acc
364    where
365        Fold: FnMut(Acc, Self::Item) -> Acc,
366    {
367        self.inner.fold(init, fold)
368    }
369    #[inline]
370    fn last(mut self) -> Option<u8> {
371        self.next_back()
372    }
373}
374
375#[stable(feature = "inherent_ascii_escape", since = "1.60.0")]
376impl<'a> iter::DoubleEndedIterator for EscapeAscii<'a> {
377    fn next_back(&mut self) -> Option<u8> {
378        self.inner.next_back()
379    }
380}
381#[stable(feature = "inherent_ascii_escape", since = "1.60.0")]
382impl<'a> iter::FusedIterator for EscapeAscii<'a> {}
383#[stable(feature = "inherent_ascii_escape", since = "1.60.0")]
384impl<'a> fmt::Display for EscapeAscii<'a> {
385    #[ferrocene::prevalidated]
386    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
387        // disassemble iterator, including front/back parts of flatmap in case it has been partially consumed
388        let (front, slice, back) = self.clone().inner.into_parts();
389        let front = front.unwrap_or(EscapeDefault::empty());
390        let mut bytes = slice.unwrap_or_default().as_slice();
391        let back = back.unwrap_or(EscapeDefault::empty());
392
393        // usually empty, so the formatter won't have to do any work
394        for byte in front {
395            f.write_char(byte as char)?;
396        }
397
398        #[ferrocene::prevalidated]
399        fn needs_escape(b: u8) -> bool {
400            b > 0x7E || b < 0x20 || b == b'\\' || b == b'\'' || b == b'"'
401        }
402
403        while bytes.len() > 0 {
404            // fast path for the printable, non-escaped subset of ascii
405            let prefix = bytes.iter().take_while(|&&b| !needs_escape(b)).count();
406            // SAFETY: prefix length was derived by counting bytes in the same splice, so it's in-bounds
407            let (prefix, remainder) = unsafe { bytes.split_at_unchecked(prefix) };
408            // SAFETY: prefix is a valid utf8 sequence, as it's a subset of ASCII
409            let prefix = unsafe { crate::str::from_utf8_unchecked(prefix) };
410
411            f.write_str(prefix)?; // the fast part
412
413            bytes = remainder;
414
415            if let Some(&b) = bytes.first() {
416                // guaranteed to be non-empty, better to write it as a str
417                fmt::Display::fmt(&ascii::escape_default(b), f)?;
418                bytes = &bytes[1..];
419            }
420        }
421
422        // also usually empty
423        for byte in back {
424            f.write_char(byte as char)?;
425        }
426        Ok(())
427    }
428}
429#[stable(feature = "inherent_ascii_escape", since = "1.60.0")]
430impl<'a> fmt::Debug for EscapeAscii<'a> {
431    #[ferrocene::prevalidated]
432    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
433        f.debug_struct("EscapeAscii").finish_non_exhaustive()
434    }
435}
436
437/// ASCII test *without* the chunk-at-a-time optimizations.
438///
439/// This is carefully structured to produce nice small code -- it's smaller in
440/// `-O` than what the "obvious" ways produces under `-C opt-level=s`.  If you
441/// touch it, be sure to run (and update if needed) the assembly test.
442#[unstable(feature = "str_internals", issue = "none")]
443#[doc(hidden)]
444#[inline]
445#[ferrocene::prevalidated]
446pub const fn is_ascii_simple(mut bytes: &[u8]) -> bool {
447    while let [rest @ .., last] = bytes {
448        if !last.is_ascii() {
449            break;
450        }
451        bytes = rest;
452    }
453    bytes.is_empty()
454}
455
456/// Optimized ASCII test that will use usize-at-a-time operations instead of
457/// byte-at-a-time operations (when possible).
458///
459/// The algorithm we use here is pretty simple. If `s` is too short, we just
460/// check each byte and be done with it. Otherwise:
461///
462/// - Read the first word with an unaligned load.
463/// - Align the pointer, read subsequent words until end with aligned loads.
464/// - Read the last `usize` from `s` with an unaligned load.
465///
466/// If any of these loads produces something for which `contains_nonascii`
467/// (above) returns true, then we know the answer is false.
468#[cfg(not(any(
469    all(target_arch = "x86_64", target_feature = "sse2"),
470    all(target_arch = "loongarch64", target_feature = "lsx")
471)))]
472#[inline]
473#[rustc_allow_const_fn_unstable(const_eval_select)] // fallback impl has same behavior
474#[ferrocene::prevalidated]
475const fn is_ascii(s: &[u8]) -> bool {
476    // The runtime version behaves the same as the compiletime version, it's
477    // just more optimized.
478    const_eval_select!(
479        @capture { s: &[u8] } -> bool:
480        if const {
481            is_ascii_simple(s)
482        } else {
483            /// Returns `true` if any byte in the word `v` is nonascii (>= 128). Snarfed
484            /// from `../str/mod.rs`, which does something similar for utf8 validation.
485            #[ferrocene::prevalidated]
486            const fn contains_nonascii(v: usize) -> bool {
487                const NONASCII_MASK: usize = usize::repeat_u8(0x80);
488                (NONASCII_MASK & v) != 0
489            }
490
491            const USIZE_SIZE: usize = size_of::<usize>();
492
493            let len = s.len();
494            let align_offset = s.as_ptr().align_offset(USIZE_SIZE);
495
496            // If we wouldn't gain anything from the word-at-a-time implementation, fall
497            // back to a scalar loop.
498            //
499            // We also do this for architectures where `size_of::<usize>()` isn't
500            // sufficient alignment for `usize`, because it's a weird edge case.
501            if len < USIZE_SIZE || len < align_offset || USIZE_SIZE < align_of::<usize>() {
502                return is_ascii_simple(s);
503            }
504
505            // We always read the first word unaligned, which means `align_offset` is
506            // 0, we'd read the same value again for the aligned read.
507            let offset_to_aligned = if align_offset == 0 { USIZE_SIZE } else { align_offset };
508
509            let start = s.as_ptr();
510            // SAFETY: We verify `len < USIZE_SIZE` above.
511            let first_word = unsafe { (start as *const usize).read_unaligned() };
512
513            if contains_nonascii(first_word) {
514                return false;
515            }
516            // We checked this above, somewhat implicitly. Note that `offset_to_aligned`
517            // is either `align_offset` or `USIZE_SIZE`, both of are explicitly checked
518            // above.
519            debug_assert!(offset_to_aligned <= len);
520
521            // SAFETY: word_ptr is the (properly aligned) usize ptr we use to read the
522            // middle chunk of the slice.
523            let mut word_ptr = unsafe { start.add(offset_to_aligned) as *const usize };
524
525            // `byte_pos` is the byte index of `word_ptr`, used for loop end checks.
526            let mut byte_pos = offset_to_aligned;
527
528            // Paranoia check about alignment, since we're about to do a bunch of
529            // unaligned loads. In practice this should be impossible barring a bug in
530            // `align_offset` though.
531            // While this method is allowed to spuriously fail in CTFE, if it doesn't
532            // have alignment information it should have given a `usize::MAX` for
533            // `align_offset` earlier, sending things through the scalar path instead of
534            // this one, so this check should pass if it's reachable.
535            debug_assert!(word_ptr.is_aligned_to(align_of::<usize>()));
536
537            // Read subsequent words until the last aligned word, excluding the last
538            // aligned word by itself to be done in tail check later, to ensure that
539            // tail is always one `usize` at most to extra branch `byte_pos == len`.
540            while byte_pos < len - USIZE_SIZE {
541                // Sanity check that the read is in bounds
542                debug_assert!(byte_pos + USIZE_SIZE <= len);
543                // And that our assumptions about `byte_pos` hold.
544                debug_assert!(word_ptr.cast::<u8>() == start.wrapping_add(byte_pos));
545
546                // SAFETY: We know `word_ptr` is properly aligned (because of
547                // `align_offset`), and we know that we have enough bytes between `word_ptr` and the end
548                let word = unsafe { word_ptr.read() };
549                if contains_nonascii(word) {
550                    return false;
551                }
552
553                byte_pos += USIZE_SIZE;
554                // SAFETY: We know that `byte_pos <= len - USIZE_SIZE`, which means that
555                // after this `add`, `word_ptr` will be at most one-past-the-end.
556                word_ptr = unsafe { word_ptr.add(1) };
557            }
558
559            // Sanity check to ensure there really is only one `usize` left. This should
560            // be guaranteed by our loop condition.
561            debug_assert!(byte_pos <= len && len - byte_pos <= USIZE_SIZE);
562
563            // SAFETY: This relies on `len >= USIZE_SIZE`, which we check at the start.
564            let last_word = unsafe { (start.add(len - USIZE_SIZE) as *const usize).read_unaligned() };
565
566            !contains_nonascii(last_word)
567        }
568    )
569}
570
571/// Chunk size for SSE2 vectorized ASCII checking (4x 16-byte loads).
572#[cfg(all(target_arch = "x86_64", target_feature = "sse2"))]
573const SSE2_CHUNK_SIZE: usize = 64;
574
575#[cfg(all(target_arch = "x86_64", target_feature = "sse2"))]
576#[inline]
577fn is_ascii_sse2(bytes: &[u8]) -> bool {
578    use crate::arch::x86_64::{__m128i, _mm_loadu_si128, _mm_movemask_epi8, _mm_or_si128};
579
580    let (chunks, rest) = bytes.as_chunks::<SSE2_CHUNK_SIZE>();
581
582    for chunk in chunks {
583        let ptr = chunk.as_ptr();
584        // SAFETY: chunk is 64 bytes. SSE2 is baseline on x86_64.
585        let mask = unsafe {
586            let a1 = _mm_loadu_si128(ptr as *const __m128i);
587            let a2 = _mm_loadu_si128(ptr.add(16) as *const __m128i);
588            let b1 = _mm_loadu_si128(ptr.add(32) as *const __m128i);
589            let b2 = _mm_loadu_si128(ptr.add(48) as *const __m128i);
590            // OR all chunks - if any byte has high bit set, combined will too.
591            let combined = _mm_or_si128(_mm_or_si128(a1, a2), _mm_or_si128(b1, b2));
592            // Create a mask from the MSBs of each byte.
593            // If any byte is >= 128, its MSB is 1, so the mask will be non-zero.
594            _mm_movemask_epi8(combined)
595        };
596        if mask != 0 {
597            return false;
598        }
599    }
600
601    // Handle remaining bytes
602    rest.iter().all(|b| b.is_ascii())
603}
604
605/// ASCII test optimized to use the `pmovmskb` instruction on `x86-64`.
606///
607/// Uses explicit SSE2 intrinsics to prevent LLVM from auto-vectorizing with
608/// broken AVX-512 code that extracts mask bits one-by-one.
609#[ferrocene::prevalidated]
610#[cfg(all(target_arch = "x86_64", target_feature = "sse2"))]
611#[inline]
612#[rustc_allow_const_fn_unstable(const_eval_select)]
613const fn is_ascii(bytes: &[u8]) -> bool {
614    const USIZE_SIZE: usize = size_of::<usize>();
615    const NONASCII_MASK: usize = usize::MAX / 255 * 0x80;
616
617    const_eval_select!(
618        @capture { bytes: &[u8] } -> bool:
619        if const {
620            is_ascii_simple(bytes)
621        } else {
622            // For small inputs, use usize-at-a-time processing to avoid SSE2 call overhead.
623            if bytes.len() < SSE2_CHUNK_SIZE {
624                let chunks = bytes.chunks_exact(USIZE_SIZE);
625                let remainder = chunks.remainder();
626                for chunk in chunks {
627                    let word = usize::from_ne_bytes(chunk.try_into().unwrap());
628                    if (word & NONASCII_MASK) != 0 {
629                        return false;
630                    }
631                }
632                return remainder.iter().all(|b| b.is_ascii());
633            }
634
635            // Bug in the lint: is_ascii isn't validated, only the expansion of `is_ascii::runtime`
636            #[allow(ferrocene::unvalidated)]
637            is_ascii_sse2(bytes)
638        }
639    )
640}
641
642/// ASCII test optimized to use the `vmskltz.b` instruction on `loongarch64`.
643///
644/// Other platforms are not likely to benefit from this code structure, so they
645/// use SWAR techniques to test for ASCII in `usize`-sized chunks.
646#[cfg(all(target_arch = "loongarch64", target_feature = "lsx"))]
647#[inline]
648const fn is_ascii(bytes: &[u8]) -> bool {
649    // Process chunks of 32 bytes at a time in the fast path to enable
650    // auto-vectorization and use of `vmskltz.b`. Two 128-bit vector registers
651    // can be OR'd together and then the resulting vector can be tested for
652    // non-ASCII bytes.
653    const CHUNK_SIZE: usize = 32;
654
655    let mut i = 0;
656
657    while i + CHUNK_SIZE <= bytes.len() {
658        let chunk_end = i + CHUNK_SIZE;
659
660        // Get LLVM to produce a `vmskltz.b` instruction on loongarch64 which
661        // creates a mask from the most significant bit of each byte.
662        // ASCII bytes are less than 128 (0x80), so their most significant
663        // bit is unset.
664        let mut count = 0;
665        while i < chunk_end {
666            count += bytes[i].is_ascii() as u8;
667            i += 1;
668        }
669
670        // All bytes should be <= 127 so count is equal to chunk size.
671        if count != CHUNK_SIZE as u8 {
672            return false;
673        }
674    }
675
676    // Process the remaining `bytes.len() % N` bytes.
677    let mut is_ascii = true;
678    while i < bytes.len() {
679        is_ascii &= bytes[i].is_ascii();
680        i += 1;
681    }
682
683    is_ascii
684}