Skip to main content

core/slice/
ascii.rs

1//! Operations on ASCII `[u8]`.
2
3/// Ferrocene addition: Hidden module to test crate-internal functionality
4#[doc(hidden)]
5#[unstable(feature = "ferrocene_test", issue = "none")]
6#[cfg(not(feature = "ferrocene_subset"))]
7pub(crate) mod ferrocene_test;
8
9use core::ascii::EscapeDefault;
10
11use crate::fmt::{self, Write};
12#[cfg(not(all(target_arch = "loongarch64", target_feature = "lsx")))]
13use crate::intrinsics::const_eval_select;
14#[cfg(not(feature = "ferrocene_subset"))]
15use crate::{ascii, iter, ops};
16
17// Ferrocene addition: imports for the certified subset
18#[rustfmt::skip]
19#[cfg(feature = "ferrocene_subset")]
20use crate::{ascii, iter};
21
22impl [u8] {
23    /// Checks if all bytes in this slice are within the ASCII range.
24    ///
25    /// An empty slice returns `true`.
26    #[stable(feature = "ascii_methods_on_intrinsics", since = "1.23.0")]
27    #[rustc_const_stable(feature = "const_slice_is_ascii", since = "1.74.0")]
28    #[must_use]
29    #[inline]
30    pub const fn is_ascii(&self) -> bool {
31        is_ascii(self)
32    }
33
34    /// If this slice [`is_ascii`](Self::is_ascii), returns it as a slice of
35    /// [ASCII characters](`ascii::Char`), otherwise returns `None`.
36    #[cfg(not(feature = "ferrocene_subset"))]
37    #[unstable(feature = "ascii_char", issue = "110998")]
38    #[must_use]
39    #[inline]
40    pub const fn as_ascii(&self) -> Option<&[ascii::Char]> {
41        if self.is_ascii() {
42            // SAFETY: Just checked that it's ASCII
43            Some(unsafe { self.as_ascii_unchecked() })
44        } else {
45            None
46        }
47    }
48
49    /// Converts this slice of bytes into a slice of ASCII characters,
50    /// without checking whether they're valid.
51    ///
52    /// # Safety
53    ///
54    /// Every byte in the slice must be in `0..=127`, or else this is UB.
55    #[cfg(not(feature = "ferrocene_subset"))]
56    #[unstable(feature = "ascii_char", issue = "110998")]
57    #[must_use]
58    #[inline]
59    pub const unsafe fn as_ascii_unchecked(&self) -> &[ascii::Char] {
60        let byte_ptr: *const [u8] = self;
61        let ascii_ptr = byte_ptr as *const [ascii::Char];
62        // SAFETY: The caller promised all the bytes are ASCII
63        unsafe { &*ascii_ptr }
64    }
65
66    /// Checks that two slices are an ASCII case-insensitive match.
67    ///
68    /// Same as `to_ascii_lowercase(a) == to_ascii_lowercase(b)`,
69    /// but without allocating and copying temporaries.
70    #[stable(feature = "ascii_methods_on_intrinsics", since = "1.23.0")]
71    #[rustc_const_stable(feature = "const_eq_ignore_ascii_case", since = "1.89.0")]
72    #[must_use]
73    #[inline]
74    pub const fn eq_ignore_ascii_case(&self, other: &[u8]) -> bool {
75        if self.len() != other.len() {
76            return false;
77        }
78
79        #[cfg(all(target_arch = "x86_64", target_feature = "sse2"))]
80        {
81            const CHUNK_SIZE: usize = 16;
82            // The following function has two invariants:
83            // 1. The slice lengths must be equal, which we checked above.
84            // 2. The slice lengths must greater than or equal to N, which this
85            //    if-statement is checking.
86            if self.len() >= CHUNK_SIZE {
87                return self.eq_ignore_ascii_case_chunks::<CHUNK_SIZE>(other);
88            }
89        }
90
91        self.eq_ignore_ascii_case_simple(other)
92    }
93
94    /// ASCII case-insensitive equality check without chunk-at-a-time
95    /// optimization.
96    #[inline]
97    const fn eq_ignore_ascii_case_simple(&self, other: &[u8]) -> bool {
98        // FIXME(const-hack): This implementation can be reverted when
99        // `core::iter::zip` is allowed in const. The original implementation:
100        //  self.len() == other.len() && iter::zip(self, other).all(|(a, b)| a.eq_ignore_ascii_case(b))
101        let mut a = self;
102        let mut b = other;
103
104        while let ([first_a, rest_a @ ..], [first_b, rest_b @ ..]) = (a, b) {
105            if first_a.eq_ignore_ascii_case(&first_b) {
106                a = rest_a;
107                b = rest_b;
108            } else {
109                return false;
110            }
111        }
112
113        true
114    }
115
116    /// Optimized version of `eq_ignore_ascii_case` to process chunks at a time.
117    ///
118    /// Platforms that have SIMD instructions may benefit from this
119    /// implementation over `eq_ignore_ascii_case_simple`.
120    ///
121    /// # Invariants
122    ///
123    /// The caller must guarantee that the slices are equal in length, and the
124    /// slice lengths are greater than or equal to `N` bytes.
125    #[cfg(all(target_arch = "x86_64", target_feature = "sse2"))]
126    #[inline]
127    const fn eq_ignore_ascii_case_chunks<const N: usize>(&self, other: &[u8]) -> bool {
128        // FIXME(const-hack): The while-loops that follow should be replaced by
129        // for-loops when available in const.
130
131        let (self_chunks, self_rem) = self.as_chunks::<N>();
132        let (other_chunks, _) = other.as_chunks::<N>();
133
134        // Branchless check to encourage auto-vectorization
135        #[inline(always)]
136        const fn eq_ignore_ascii_inner<const L: usize>(lhs: &[u8; L], rhs: &[u8; L]) -> bool {
137            let mut equal_ascii = true;
138            let mut j = 0;
139            while j < L {
140                equal_ascii &= lhs[j].eq_ignore_ascii_case(&rhs[j]);
141                j += 1;
142            }
143
144            equal_ascii
145        }
146
147        // Process the chunks, returning early if an inequality is found
148        let mut i = 0;
149        while i < self_chunks.len() && i < other_chunks.len() {
150            if !eq_ignore_ascii_inner(&self_chunks[i], &other_chunks[i]) {
151                return false;
152            }
153            i += 1;
154        }
155
156        // Check the length invariant which is necessary for the tail-handling
157        // logic to be correct. This should have been upheld by the caller,
158        // otherwise lengths less than N will compare as true without any
159        // checking.
160        debug_assert!(self.len() >= N);
161
162        // If there are remaining tails, load the last N bytes in the slices to
163        // avoid falling back to per-byte checking.
164        if !self_rem.is_empty() {
165            if let (Some(a_rem), Some(b_rem)) = (self.last_chunk::<N>(), other.last_chunk::<N>()) {
166                if !eq_ignore_ascii_inner(a_rem, b_rem) {
167                    return false;
168                }
169            }
170        }
171
172        true
173    }
174
175    /// Converts this slice to its ASCII upper case equivalent in-place.
176    ///
177    /// ASCII letters 'a' to 'z' are mapped to 'A' to 'Z',
178    /// but non-ASCII letters are unchanged.
179    ///
180    /// To return a new uppercased value without modifying the existing one, use
181    /// [`to_ascii_uppercase`].
182    ///
183    /// [`to_ascii_uppercase`]: #method.to_ascii_uppercase
184    #[cfg(not(feature = "ferrocene_subset"))]
185    #[stable(feature = "ascii_methods_on_intrinsics", since = "1.23.0")]
186    #[rustc_const_stable(feature = "const_make_ascii", since = "1.84.0")]
187    #[inline]
188    pub const fn make_ascii_uppercase(&mut self) {
189        // FIXME(const-hack): We would like to simply iterate using `for` loops but this isn't currently allowed in constant expressions.
190        let mut i = 0;
191        while i < self.len() {
192            let byte = &mut self[i];
193            byte.make_ascii_uppercase();
194            i += 1;
195        }
196    }
197
198    /// Converts this slice to its ASCII lower case equivalent in-place.
199    ///
200    /// ASCII letters 'A' to 'Z' are mapped to 'a' to 'z',
201    /// but non-ASCII letters are unchanged.
202    ///
203    /// To return a new lowercased value without modifying the existing one, use
204    /// [`to_ascii_lowercase`].
205    ///
206    /// [`to_ascii_lowercase`]: #method.to_ascii_lowercase
207    #[cfg(not(feature = "ferrocene_subset"))]
208    #[stable(feature = "ascii_methods_on_intrinsics", since = "1.23.0")]
209    #[rustc_const_stable(feature = "const_make_ascii", since = "1.84.0")]
210    #[inline]
211    pub const fn make_ascii_lowercase(&mut self) {
212        // FIXME(const-hack): We would like to simply iterate using `for` loops but this isn't currently allowed in constant expressions.
213        let mut i = 0;
214        while i < self.len() {
215            let byte = &mut self[i];
216            byte.make_ascii_lowercase();
217            i += 1;
218        }
219    }
220
221    /// Returns an iterator that produces an escaped version of this slice,
222    /// treating it as an ASCII string.
223    ///
224    /// # Examples
225    ///
226    /// ```
227    /// let s = b"0\t\r\n'\"\\\x9d";
228    /// let escaped = s.escape_ascii().to_string();
229    /// assert_eq!(escaped, "0\\t\\r\\n\\'\\\"\\\\\\x9d");
230    /// ```
231    #[must_use = "this returns the escaped bytes as an iterator, \
232                  without modifying the original"]
233    #[stable(feature = "inherent_ascii_escape", since = "1.60.0")]
234    pub fn escape_ascii(&self) -> EscapeAscii<'_> {
235        EscapeAscii { inner: self.iter().flat_map(EscapeByte) }
236    }
237
238    /// Returns a byte slice with leading ASCII whitespace bytes removed.
239    ///
240    /// 'Whitespace' refers to the definition used by
241    /// [`u8::is_ascii_whitespace`].
242    ///
243    /// # Examples
244    ///
245    /// ```
246    /// assert_eq!(b" \t hello world\n".trim_ascii_start(), b"hello world\n");
247    /// assert_eq!(b"  ".trim_ascii_start(), b"");
248    /// assert_eq!(b"".trim_ascii_start(), b"");
249    /// ```
250    #[cfg(not(feature = "ferrocene_subset"))]
251    #[stable(feature = "byte_slice_trim_ascii", since = "1.80.0")]
252    #[rustc_const_stable(feature = "byte_slice_trim_ascii", since = "1.80.0")]
253    #[inline]
254    pub const fn trim_ascii_start(&self) -> &[u8] {
255        let mut bytes = self;
256        // Note: A pattern matching based approach (instead of indexing) allows
257        // making the function const.
258        while let [first, rest @ ..] = bytes {
259            if first.is_ascii_whitespace() {
260                bytes = rest;
261            } else {
262                break;
263            }
264        }
265        bytes
266    }
267
268    /// Returns a byte slice with trailing ASCII whitespace bytes removed.
269    ///
270    /// 'Whitespace' refers to the definition used by
271    /// [`u8::is_ascii_whitespace`].
272    ///
273    /// # Examples
274    ///
275    /// ```
276    /// assert_eq!(b"\r hello world\n ".trim_ascii_end(), b"\r hello world");
277    /// assert_eq!(b"  ".trim_ascii_end(), b"");
278    /// assert_eq!(b"".trim_ascii_end(), b"");
279    /// ```
280    #[cfg(not(feature = "ferrocene_subset"))]
281    #[stable(feature = "byte_slice_trim_ascii", since = "1.80.0")]
282    #[rustc_const_stable(feature = "byte_slice_trim_ascii", since = "1.80.0")]
283    #[inline]
284    pub const fn trim_ascii_end(&self) -> &[u8] {
285        let mut bytes = self;
286        // Note: A pattern matching based approach (instead of indexing) allows
287        // making the function const.
288        while let [rest @ .., last] = bytes {
289            if last.is_ascii_whitespace() {
290                bytes = rest;
291            } else {
292                break;
293            }
294        }
295        bytes
296    }
297
298    /// Returns a byte slice with leading and trailing ASCII whitespace bytes
299    /// removed.
300    ///
301    /// 'Whitespace' refers to the definition used by
302    /// [`u8::is_ascii_whitespace`].
303    ///
304    /// # Examples
305    ///
306    /// ```
307    /// assert_eq!(b"\r hello world\n ".trim_ascii(), b"hello world");
308    /// assert_eq!(b"  ".trim_ascii(), b"");
309    /// assert_eq!(b"".trim_ascii(), b"");
310    /// ```
311    #[cfg(not(feature = "ferrocene_subset"))]
312    #[stable(feature = "byte_slice_trim_ascii", since = "1.80.0")]
313    #[rustc_const_stable(feature = "byte_slice_trim_ascii", since = "1.80.0")]
314    #[inline]
315    pub const fn trim_ascii(&self) -> &[u8] {
316        self.trim_ascii_start().trim_ascii_end()
317    }
318}
319
320impl_fn_for_zst! {
321    #[derive(Clone)]
322    struct EscapeByte impl Fn = |byte: &u8| -> ascii::EscapeDefault {
323        ascii::escape_default(*byte)
324    };
325}
326
327/// An iterator over the escaped version of a byte slice.
328///
329/// This `struct` is created by the [`slice::escape_ascii`] method. See its
330/// documentation for more information.
331#[stable(feature = "inherent_ascii_escape", since = "1.60.0")]
332#[derive(Clone)]
333#[must_use = "iterators are lazy and do nothing unless consumed"]
334pub struct EscapeAscii<'a> {
335    inner: iter::FlatMap<super::Iter<'a, u8>, ascii::EscapeDefault, EscapeByte>,
336}
337
338#[cfg(not(feature = "ferrocene_subset"))]
339#[stable(feature = "inherent_ascii_escape", since = "1.60.0")]
340impl<'a> iter::Iterator for EscapeAscii<'a> {
341    type Item = u8;
342    #[inline]
343    fn next(&mut self) -> Option<u8> {
344        self.inner.next()
345    }
346    #[inline]
347    fn size_hint(&self) -> (usize, Option<usize>) {
348        self.inner.size_hint()
349    }
350    #[inline]
351    fn try_fold<Acc, Fold, R>(&mut self, init: Acc, fold: Fold) -> R
352    where
353        Fold: FnMut(Acc, Self::Item) -> R,
354        R: ops::Try<Output = Acc>,
355    {
356        self.inner.try_fold(init, fold)
357    }
358    #[inline]
359    fn fold<Acc, Fold>(self, init: Acc, fold: Fold) -> Acc
360    where
361        Fold: FnMut(Acc, Self::Item) -> Acc,
362    {
363        self.inner.fold(init, fold)
364    }
365    #[inline]
366    fn last(mut self) -> Option<u8> {
367        self.next_back()
368    }
369}
370
371#[cfg(not(feature = "ferrocene_subset"))]
372#[stable(feature = "inherent_ascii_escape", since = "1.60.0")]
373impl<'a> iter::DoubleEndedIterator for EscapeAscii<'a> {
374    fn next_back(&mut self) -> Option<u8> {
375        self.inner.next_back()
376    }
377}
378#[cfg(not(feature = "ferrocene_subset"))]
379#[stable(feature = "inherent_ascii_escape", since = "1.60.0")]
380impl<'a> iter::FusedIterator for EscapeAscii<'a> {}
381#[stable(feature = "inherent_ascii_escape", since = "1.60.0")]
382impl<'a> fmt::Display for EscapeAscii<'a> {
383    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
384        // disassemble iterator, including front/back parts of flatmap in case it has been partially consumed
385        let (front, slice, back) = self.clone().inner.into_parts();
386        let front = front.unwrap_or(EscapeDefault::empty());
387        let mut bytes = slice.unwrap_or_default().as_slice();
388        let back = back.unwrap_or(EscapeDefault::empty());
389
390        // usually empty, so the formatter won't have to do any work
391        for byte in front {
392            f.write_char(byte as char)?;
393        }
394
395        fn needs_escape(b: u8) -> bool {
396            b > 0x7E || b < 0x20 || b == b'\\' || b == b'\'' || b == b'"'
397        }
398
399        while bytes.len() > 0 {
400            // fast path for the printable, non-escaped subset of ascii
401            let prefix = bytes.iter().take_while(|&&b| !needs_escape(b)).count();
402            // SAFETY: prefix length was derived by counting bytes in the same splice, so it's in-bounds
403            let (prefix, remainder) = unsafe { bytes.split_at_unchecked(prefix) };
404            // SAFETY: prefix is a valid utf8 sequence, as it's a subset of ASCII
405            let prefix = unsafe { crate::str::from_utf8_unchecked(prefix) };
406
407            f.write_str(prefix)?; // the fast part
408
409            bytes = remainder;
410
411            if let Some(&b) = bytes.first() {
412                // guaranteed to be non-empty, better to write it as a str
413                fmt::Display::fmt(&ascii::escape_default(b), f)?;
414                bytes = &bytes[1..];
415            }
416        }
417
418        // also usually empty
419        for byte in back {
420            f.write_char(byte as char)?;
421        }
422        Ok(())
423    }
424}
425#[stable(feature = "inherent_ascii_escape", since = "1.60.0")]
426impl<'a> fmt::Debug for EscapeAscii<'a> {
427    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
428        f.debug_struct("EscapeAscii").finish_non_exhaustive()
429    }
430}
431
432/// ASCII test *without* the chunk-at-a-time optimizations.
433///
434/// This is carefully structured to produce nice small code -- it's smaller in
435/// `-O` than what the "obvious" ways produces under `-C opt-level=s`.  If you
436/// touch it, be sure to run (and update if needed) the assembly test.
437#[unstable(feature = "str_internals", issue = "none")]
438#[doc(hidden)]
439#[inline]
440pub const fn is_ascii_simple(mut bytes: &[u8]) -> bool {
441    while let [rest @ .., last] = bytes {
442        if !last.is_ascii() {
443            break;
444        }
445        bytes = rest;
446    }
447    bytes.is_empty()
448}
449
450/// Optimized ASCII test that will use usize-at-a-time operations instead of
451/// byte-at-a-time operations (when possible).
452///
453/// The algorithm we use here is pretty simple. If `s` is too short, we just
454/// check each byte and be done with it. Otherwise:
455///
456/// - Read the first word with an unaligned load.
457/// - Align the pointer, read subsequent words until end with aligned loads.
458/// - Read the last `usize` from `s` with an unaligned load.
459///
460/// If any of these loads produces something for which `contains_nonascii`
461/// (above) returns true, then we know the answer is false.
462#[cfg(not(any(
463    all(target_arch = "x86_64", target_feature = "sse2"),
464    all(target_arch = "loongarch64", target_feature = "lsx")
465)))]
466#[inline]
467#[rustc_allow_const_fn_unstable(const_eval_select)] // fallback impl has same behavior
468const fn is_ascii(s: &[u8]) -> bool {
469    // The runtime version behaves the same as the compiletime version, it's
470    // just more optimized.
471    const_eval_select!(
472        @capture { s: &[u8] } -> bool:
473        if const {
474            is_ascii_simple(s)
475        } else {
476            /// Returns `true` if any byte in the word `v` is nonascii (>= 128). Snarfed
477            /// from `../str/mod.rs`, which does something similar for utf8 validation.
478            const fn contains_nonascii(v: usize) -> bool {
479                const NONASCII_MASK: usize = usize::repeat_u8(0x80);
480                (NONASCII_MASK & v) != 0
481            }
482
483            const USIZE_SIZE: usize = size_of::<usize>();
484
485            let len = s.len();
486            let align_offset = s.as_ptr().align_offset(USIZE_SIZE);
487
488            // If we wouldn't gain anything from the word-at-a-time implementation, fall
489            // back to a scalar loop.
490            //
491            // We also do this for architectures where `size_of::<usize>()` isn't
492            // sufficient alignment for `usize`, because it's a weird edge case.
493            if len < USIZE_SIZE || len < align_offset || USIZE_SIZE < align_of::<usize>() {
494                return is_ascii_simple(s);
495            }
496
497            // We always read the first word unaligned, which means `align_offset` is
498            // 0, we'd read the same value again for the aligned read.
499            let offset_to_aligned = if align_offset == 0 { USIZE_SIZE } else { align_offset };
500
501            let start = s.as_ptr();
502            // SAFETY: We verify `len < USIZE_SIZE` above.
503            let first_word = unsafe { (start as *const usize).read_unaligned() };
504
505            if contains_nonascii(first_word) {
506                return false;
507            }
508            // We checked this above, somewhat implicitly. Note that `offset_to_aligned`
509            // is either `align_offset` or `USIZE_SIZE`, both of are explicitly checked
510            // above.
511            debug_assert!(offset_to_aligned <= len);
512
513            // SAFETY: word_ptr is the (properly aligned) usize ptr we use to read the
514            // middle chunk of the slice.
515            let mut word_ptr = unsafe { start.add(offset_to_aligned) as *const usize };
516
517            // `byte_pos` is the byte index of `word_ptr`, used for loop end checks.
518            let mut byte_pos = offset_to_aligned;
519
520            // Paranoia check about alignment, since we're about to do a bunch of
521            // unaligned loads. In practice this should be impossible barring a bug in
522            // `align_offset` though.
523            // While this method is allowed to spuriously fail in CTFE, if it doesn't
524            // have alignment information it should have given a `usize::MAX` for
525            // `align_offset` earlier, sending things through the scalar path instead of
526            // this one, so this check should pass if it's reachable.
527            debug_assert!(word_ptr.is_aligned_to(align_of::<usize>()));
528
529            // Read subsequent words until the last aligned word, excluding the last
530            // aligned word by itself to be done in tail check later, to ensure that
531            // tail is always one `usize` at most to extra branch `byte_pos == len`.
532            while byte_pos < len - USIZE_SIZE {
533                // Sanity check that the read is in bounds
534                debug_assert!(byte_pos + USIZE_SIZE <= len);
535                // And that our assumptions about `byte_pos` hold.
536                debug_assert!(word_ptr.cast::<u8>() == start.wrapping_add(byte_pos));
537
538                // SAFETY: We know `word_ptr` is properly aligned (because of
539                // `align_offset`), and we know that we have enough bytes between `word_ptr` and the end
540                let word = unsafe { word_ptr.read() };
541                if contains_nonascii(word) {
542                    return false;
543                }
544
545                byte_pos += USIZE_SIZE;
546                // SAFETY: We know that `byte_pos <= len - USIZE_SIZE`, which means that
547                // after this `add`, `word_ptr` will be at most one-past-the-end.
548                word_ptr = unsafe { word_ptr.add(1) };
549            }
550
551            // Sanity check to ensure there really is only one `usize` left. This should
552            // be guaranteed by our loop condition.
553            debug_assert!(byte_pos <= len && len - byte_pos <= USIZE_SIZE);
554
555            // SAFETY: This relies on `len >= USIZE_SIZE`, which we check at the start.
556            let last_word = unsafe { (start.add(len - USIZE_SIZE) as *const usize).read_unaligned() };
557
558            !contains_nonascii(last_word)
559        }
560    )
561}
562
563/// Chunk size for SSE2 vectorized ASCII checking (4x 16-byte loads).
564#[cfg(all(target_arch = "x86_64", target_feature = "sse2"))]
565const SSE2_CHUNK_SIZE: usize = 64;
566
567#[cfg(all(target_arch = "x86_64", target_feature = "sse2"))]
568#[inline]
569fn is_ascii_sse2(bytes: &[u8]) -> bool {
570    use crate::arch::x86_64::{__m128i, _mm_loadu_si128, _mm_movemask_epi8, _mm_or_si128};
571
572    let (chunks, rest) = bytes.as_chunks::<SSE2_CHUNK_SIZE>();
573
574    for chunk in chunks {
575        let ptr = chunk.as_ptr();
576        // SAFETY: chunk is 64 bytes. SSE2 is baseline on x86_64.
577        let mask = unsafe {
578            let a1 = _mm_loadu_si128(ptr as *const __m128i);
579            let a2 = _mm_loadu_si128(ptr.add(16) as *const __m128i);
580            let b1 = _mm_loadu_si128(ptr.add(32) as *const __m128i);
581            let b2 = _mm_loadu_si128(ptr.add(48) as *const __m128i);
582            // OR all chunks - if any byte has high bit set, combined will too.
583            let combined = _mm_or_si128(_mm_or_si128(a1, a2), _mm_or_si128(b1, b2));
584            // Create a mask from the MSBs of each byte.
585            // If any byte is >= 128, its MSB is 1, so the mask will be non-zero.
586            _mm_movemask_epi8(combined)
587        };
588        if mask != 0 {
589            return false;
590        }
591    }
592
593    // Handle remaining bytes
594    rest.iter().all(|b| b.is_ascii())
595}
596
597/// ASCII test optimized to use the `pmovmskb` instruction on `x86-64`.
598///
599/// Uses explicit SSE2 intrinsics to prevent LLVM from auto-vectorizing with
600/// broken AVX-512 code that extracts mask bits one-by-one.
601#[cfg(all(target_arch = "x86_64", target_feature = "sse2"))]
602#[inline]
603#[rustc_allow_const_fn_unstable(const_eval_select)]
604const fn is_ascii(bytes: &[u8]) -> bool {
605    const USIZE_SIZE: usize = size_of::<usize>();
606    const NONASCII_MASK: usize = usize::MAX / 255 * 0x80;
607
608    const_eval_select!(
609        @capture { bytes: &[u8] } -> bool:
610        if const {
611            is_ascii_simple(bytes)
612        } else {
613            // For small inputs, use usize-at-a-time processing to avoid SSE2 call overhead.
614            if bytes.len() < SSE2_CHUNK_SIZE {
615                let chunks = bytes.chunks_exact(USIZE_SIZE);
616                let remainder = chunks.remainder();
617                for chunk in chunks {
618                    let word = usize::from_ne_bytes(chunk.try_into().unwrap());
619                    if (word & NONASCII_MASK) != 0 {
620                        return false;
621                    }
622                }
623                return remainder.iter().all(|b| b.is_ascii());
624            }
625
626            is_ascii_sse2(bytes)
627        }
628    )
629}
630
631/// ASCII test optimized to use the `vmskltz.b` instruction on `loongarch64`.
632///
633/// Other platforms are not likely to benefit from this code structure, so they
634/// use SWAR techniques to test for ASCII in `usize`-sized chunks.
635#[cfg(all(target_arch = "loongarch64", target_feature = "lsx"))]
636#[inline]
637const fn is_ascii(bytes: &[u8]) -> bool {
638    // Process chunks of 32 bytes at a time in the fast path to enable
639    // auto-vectorization and use of `vmskltz.b`. Two 128-bit vector registers
640    // can be OR'd together and then the resulting vector can be tested for
641    // non-ASCII bytes.
642    const CHUNK_SIZE: usize = 32;
643
644    let mut i = 0;
645
646    while i + CHUNK_SIZE <= bytes.len() {
647        let chunk_end = i + CHUNK_SIZE;
648
649        // Get LLVM to produce a `vmskltz.b` instruction on loongarch64 which
650        // creates a mask from the most significant bit of each byte.
651        // ASCII bytes are less than 128 (0x80), so their most significant
652        // bit is unset.
653        let mut count = 0;
654        while i < chunk_end {
655            count += bytes[i].is_ascii() as u8;
656            i += 1;
657        }
658
659        // All bytes should be <= 127 so count is equal to chunk size.
660        if count != CHUNK_SIZE as u8 {
661            return false;
662        }
663    }
664
665    // Process the remaining `bytes.len() % N` bytes.
666    let mut is_ascii = true;
667    while i < bytes.len() {
668        is_ascii &= bytes[i].is_ascii();
669        i += 1;
670    }
671
672    is_ascii
673}