core/slice/
ascii.rs

1//! Operations on ASCII `[u8]`.
2
3#[cfg(not(feature = "ferrocene_subset"))]
4use core::ascii::EscapeDefault;
5
6#[cfg(not(feature = "ferrocene_subset"))]
7use crate::fmt::{self, Write};
8#[cfg(not(any(
9    all(target_arch = "x86_64", target_feature = "sse2"),
10    all(target_arch = "loongarch64", target_feature = "lsx")
11)))]
12use crate::intrinsics::const_eval_select;
13#[cfg(not(feature = "ferrocene_subset"))]
14use crate::{ascii, iter, ops};
15
16impl [u8] {
17    /// Checks if all bytes in this slice are within the ASCII range.
18    ///
19    /// An empty slice returns `true`.
20    #[stable(feature = "ascii_methods_on_intrinsics", since = "1.23.0")]
21    #[rustc_const_stable(feature = "const_slice_is_ascii", since = "1.74.0")]
22    #[must_use]
23    #[inline]
24    pub const fn is_ascii(&self) -> bool {
25        is_ascii(self)
26    }
27
28    /// If this slice [`is_ascii`](Self::is_ascii), returns it as a slice of
29    /// [ASCII characters](`ascii::Char`), otherwise returns `None`.
30    #[cfg(not(feature = "ferrocene_subset"))]
31    #[unstable(feature = "ascii_char", issue = "110998")]
32    #[must_use]
33    #[inline]
34    pub const fn as_ascii(&self) -> Option<&[ascii::Char]> {
35        if self.is_ascii() {
36            // SAFETY: Just checked that it's ASCII
37            Some(unsafe { self.as_ascii_unchecked() })
38        } else {
39            None
40        }
41    }
42
43    /// Converts this slice of bytes into a slice of ASCII characters,
44    /// without checking whether they're valid.
45    ///
46    /// # Safety
47    ///
48    /// Every byte in the slice must be in `0..=127`, or else this is UB.
49    #[cfg(not(feature = "ferrocene_subset"))]
50    #[unstable(feature = "ascii_char", issue = "110998")]
51    #[must_use]
52    #[inline]
53    pub const unsafe fn as_ascii_unchecked(&self) -> &[ascii::Char] {
54        let byte_ptr: *const [u8] = self;
55        let ascii_ptr = byte_ptr as *const [ascii::Char];
56        // SAFETY: The caller promised all the bytes are ASCII
57        unsafe { &*ascii_ptr }
58    }
59
60    /// Checks that two slices are an ASCII case-insensitive match.
61    ///
62    /// Same as `to_ascii_lowercase(a) == to_ascii_lowercase(b)`,
63    /// but without allocating and copying temporaries.
64    #[stable(feature = "ascii_methods_on_intrinsics", since = "1.23.0")]
65    #[rustc_const_stable(feature = "const_eq_ignore_ascii_case", since = "1.89.0")]
66    #[must_use]
67    #[inline]
68    pub const fn eq_ignore_ascii_case(&self, other: &[u8]) -> bool {
69        if self.len() != other.len() {
70            return false;
71        }
72
73        // FIXME(const-hack): This implementation can be reverted when
74        // `core::iter::zip` is allowed in const. The original implementation:
75        //  self.len() == other.len() && iter::zip(self, other).all(|(a, b)| a.eq_ignore_ascii_case(b))
76        let mut a = self;
77        let mut b = other;
78
79        while let ([first_a, rest_a @ ..], [first_b, rest_b @ ..]) = (a, b) {
80            if first_a.eq_ignore_ascii_case(&first_b) {
81                a = rest_a;
82                b = rest_b;
83            } else {
84                return false;
85            }
86        }
87
88        true
89    }
90
91    /// Converts this slice to its ASCII upper case equivalent in-place.
92    ///
93    /// ASCII letters 'a' to 'z' are mapped to 'A' to 'Z',
94    /// but non-ASCII letters are unchanged.
95    ///
96    /// To return a new uppercased value without modifying the existing one, use
97    /// [`to_ascii_uppercase`].
98    ///
99    /// [`to_ascii_uppercase`]: #method.to_ascii_uppercase
100    #[cfg(not(feature = "ferrocene_subset"))]
101    #[stable(feature = "ascii_methods_on_intrinsics", since = "1.23.0")]
102    #[rustc_const_stable(feature = "const_make_ascii", since = "1.84.0")]
103    #[inline]
104    pub const fn make_ascii_uppercase(&mut self) {
105        // FIXME(const-hack): We would like to simply iterate using `for` loops but this isn't currently allowed in constant expressions.
106        let mut i = 0;
107        while i < self.len() {
108            let byte = &mut self[i];
109            byte.make_ascii_uppercase();
110            i += 1;
111        }
112    }
113
114    /// Converts this slice to its ASCII lower case equivalent in-place.
115    ///
116    /// ASCII letters 'A' to 'Z' are mapped to 'a' to 'z',
117    /// but non-ASCII letters are unchanged.
118    ///
119    /// To return a new lowercased value without modifying the existing one, use
120    /// [`to_ascii_lowercase`].
121    ///
122    /// [`to_ascii_lowercase`]: #method.to_ascii_lowercase
123    #[cfg(not(feature = "ferrocene_subset"))]
124    #[stable(feature = "ascii_methods_on_intrinsics", since = "1.23.0")]
125    #[rustc_const_stable(feature = "const_make_ascii", since = "1.84.0")]
126    #[inline]
127    pub const fn make_ascii_lowercase(&mut self) {
128        // FIXME(const-hack): We would like to simply iterate using `for` loops but this isn't currently allowed in constant expressions.
129        let mut i = 0;
130        while i < self.len() {
131            let byte = &mut self[i];
132            byte.make_ascii_lowercase();
133            i += 1;
134        }
135    }
136
137    /// Returns an iterator that produces an escaped version of this slice,
138    /// treating it as an ASCII string.
139    ///
140    /// # Examples
141    ///
142    /// ```
143    /// let s = b"0\t\r\n'\"\\\x9d";
144    /// let escaped = s.escape_ascii().to_string();
145    /// assert_eq!(escaped, "0\\t\\r\\n\\'\\\"\\\\\\x9d");
146    /// ```
147    #[cfg(not(feature = "ferrocene_subset"))]
148    #[must_use = "this returns the escaped bytes as an iterator, \
149                  without modifying the original"]
150    #[stable(feature = "inherent_ascii_escape", since = "1.60.0")]
151    pub fn escape_ascii(&self) -> EscapeAscii<'_> {
152        EscapeAscii { inner: self.iter().flat_map(EscapeByte) }
153    }
154
155    /// Returns a byte slice with leading ASCII whitespace bytes removed.
156    ///
157    /// 'Whitespace' refers to the definition used by
158    /// [`u8::is_ascii_whitespace`].
159    ///
160    /// # Examples
161    ///
162    /// ```
163    /// assert_eq!(b" \t hello world\n".trim_ascii_start(), b"hello world\n");
164    /// assert_eq!(b"  ".trim_ascii_start(), b"");
165    /// assert_eq!(b"".trim_ascii_start(), b"");
166    /// ```
167    #[cfg(not(feature = "ferrocene_subset"))]
168    #[stable(feature = "byte_slice_trim_ascii", since = "1.80.0")]
169    #[rustc_const_stable(feature = "byte_slice_trim_ascii", since = "1.80.0")]
170    #[inline]
171    pub const fn trim_ascii_start(&self) -> &[u8] {
172        let mut bytes = self;
173        // Note: A pattern matching based approach (instead of indexing) allows
174        // making the function const.
175        while let [first, rest @ ..] = bytes {
176            if first.is_ascii_whitespace() {
177                bytes = rest;
178            } else {
179                break;
180            }
181        }
182        bytes
183    }
184
185    /// Returns a byte slice with trailing ASCII whitespace bytes removed.
186    ///
187    /// 'Whitespace' refers to the definition used by
188    /// [`u8::is_ascii_whitespace`].
189    ///
190    /// # Examples
191    ///
192    /// ```
193    /// assert_eq!(b"\r hello world\n ".trim_ascii_end(), b"\r hello world");
194    /// assert_eq!(b"  ".trim_ascii_end(), b"");
195    /// assert_eq!(b"".trim_ascii_end(), b"");
196    /// ```
197    #[cfg(not(feature = "ferrocene_subset"))]
198    #[stable(feature = "byte_slice_trim_ascii", since = "1.80.0")]
199    #[rustc_const_stable(feature = "byte_slice_trim_ascii", since = "1.80.0")]
200    #[inline]
201    pub const fn trim_ascii_end(&self) -> &[u8] {
202        let mut bytes = self;
203        // Note: A pattern matching based approach (instead of indexing) allows
204        // making the function const.
205        while let [rest @ .., last] = bytes {
206            if last.is_ascii_whitespace() {
207                bytes = rest;
208            } else {
209                break;
210            }
211        }
212        bytes
213    }
214
215    /// Returns a byte slice with leading and trailing ASCII whitespace bytes
216    /// removed.
217    ///
218    /// 'Whitespace' refers to the definition used by
219    /// [`u8::is_ascii_whitespace`].
220    ///
221    /// # Examples
222    ///
223    /// ```
224    /// assert_eq!(b"\r hello world\n ".trim_ascii(), b"hello world");
225    /// assert_eq!(b"  ".trim_ascii(), b"");
226    /// assert_eq!(b"".trim_ascii(), b"");
227    /// ```
228    #[cfg(not(feature = "ferrocene_subset"))]
229    #[stable(feature = "byte_slice_trim_ascii", since = "1.80.0")]
230    #[rustc_const_stable(feature = "byte_slice_trim_ascii", since = "1.80.0")]
231    #[inline]
232    pub const fn trim_ascii(&self) -> &[u8] {
233        self.trim_ascii_start().trim_ascii_end()
234    }
235}
236
237#[cfg(not(feature = "ferrocene_subset"))]
238impl_fn_for_zst! {
239    #[derive(Clone)]
240    struct EscapeByte impl Fn = |byte: &u8| -> ascii::EscapeDefault {
241        ascii::escape_default(*byte)
242    };
243}
244
245/// An iterator over the escaped version of a byte slice.
246///
247/// This `struct` is created by the [`slice::escape_ascii`] method. See its
248/// documentation for more information.
249#[stable(feature = "inherent_ascii_escape", since = "1.60.0")]
250#[derive(Clone)]
251#[must_use = "iterators are lazy and do nothing unless consumed"]
252#[cfg(not(feature = "ferrocene_subset"))]
253pub struct EscapeAscii<'a> {
254    inner: iter::FlatMap<super::Iter<'a, u8>, ascii::EscapeDefault, EscapeByte>,
255}
256
257#[cfg(not(feature = "ferrocene_subset"))]
258#[stable(feature = "inherent_ascii_escape", since = "1.60.0")]
259impl<'a> iter::Iterator for EscapeAscii<'a> {
260    type Item = u8;
261    #[inline]
262    fn next(&mut self) -> Option<u8> {
263        self.inner.next()
264    }
265    #[inline]
266    fn size_hint(&self) -> (usize, Option<usize>) {
267        self.inner.size_hint()
268    }
269    #[inline]
270    fn try_fold<Acc, Fold, R>(&mut self, init: Acc, fold: Fold) -> R
271    where
272        Fold: FnMut(Acc, Self::Item) -> R,
273        R: ops::Try<Output = Acc>,
274    {
275        self.inner.try_fold(init, fold)
276    }
277    #[inline]
278    fn fold<Acc, Fold>(self, init: Acc, fold: Fold) -> Acc
279    where
280        Fold: FnMut(Acc, Self::Item) -> Acc,
281    {
282        self.inner.fold(init, fold)
283    }
284    #[inline]
285    fn last(mut self) -> Option<u8> {
286        self.next_back()
287    }
288}
289
290#[cfg(not(feature = "ferrocene_subset"))]
291#[stable(feature = "inherent_ascii_escape", since = "1.60.0")]
292impl<'a> iter::DoubleEndedIterator for EscapeAscii<'a> {
293    fn next_back(&mut self) -> Option<u8> {
294        self.inner.next_back()
295    }
296}
297#[cfg(not(feature = "ferrocene_subset"))]
298#[stable(feature = "inherent_ascii_escape", since = "1.60.0")]
299impl<'a> iter::FusedIterator for EscapeAscii<'a> {}
300#[cfg(not(feature = "ferrocene_subset"))]
301#[stable(feature = "inherent_ascii_escape", since = "1.60.0")]
302impl<'a> fmt::Display for EscapeAscii<'a> {
303    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
304        // disassemble iterator, including front/back parts of flatmap in case it has been partially consumed
305        let (front, slice, back) = self.clone().inner.into_parts();
306        let front = front.unwrap_or(EscapeDefault::empty());
307        let mut bytes = slice.unwrap_or_default().as_slice();
308        let back = back.unwrap_or(EscapeDefault::empty());
309
310        // usually empty, so the formatter won't have to do any work
311        for byte in front {
312            f.write_char(byte as char)?;
313        }
314
315        fn needs_escape(b: u8) -> bool {
316            b > 0x7E || b < 0x20 || b == b'\\' || b == b'\'' || b == b'"'
317        }
318
319        while bytes.len() > 0 {
320            // fast path for the printable, non-escaped subset of ascii
321            let prefix = bytes.iter().take_while(|&&b| !needs_escape(b)).count();
322            // SAFETY: prefix length was derived by counting bytes in the same splice, so it's in-bounds
323            let (prefix, remainder) = unsafe { bytes.split_at_unchecked(prefix) };
324            // SAFETY: prefix is a valid utf8 sequence, as it's a subset of ASCII
325            let prefix = unsafe { crate::str::from_utf8_unchecked(prefix) };
326
327            f.write_str(prefix)?; // the fast part
328
329            bytes = remainder;
330
331            if let Some(&b) = bytes.first() {
332                // guaranteed to be non-empty, better to write it as a str
333                fmt::Display::fmt(&ascii::escape_default(b), f)?;
334                bytes = &bytes[1..];
335            }
336        }
337
338        // also usually empty
339        for byte in back {
340            f.write_char(byte as char)?;
341        }
342        Ok(())
343    }
344}
345#[cfg(not(feature = "ferrocene_subset"))]
346#[stable(feature = "inherent_ascii_escape", since = "1.60.0")]
347impl<'a> fmt::Debug for EscapeAscii<'a> {
348    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
349        f.debug_struct("EscapeAscii").finish_non_exhaustive()
350    }
351}
352
353/// ASCII test *without* the chunk-at-a-time optimizations.
354///
355/// This is carefully structured to produce nice small code -- it's smaller in
356/// `-O` than what the "obvious" ways produces under `-C opt-level=s`.  If you
357/// touch it, be sure to run (and update if needed) the assembly test.
358#[unstable(feature = "str_internals", issue = "none")]
359#[doc(hidden)]
360#[inline]
361pub const fn is_ascii_simple(mut bytes: &[u8]) -> bool {
362    while let [rest @ .., last] = bytes {
363        if !last.is_ascii() {
364            break;
365        }
366        bytes = rest;
367    }
368    bytes.is_empty()
369}
370
371/// Optimized ASCII test that will use usize-at-a-time operations instead of
372/// byte-at-a-time operations (when possible).
373///
374/// The algorithm we use here is pretty simple. If `s` is too short, we just
375/// check each byte and be done with it. Otherwise:
376///
377/// - Read the first word with an unaligned load.
378/// - Align the pointer, read subsequent words until end with aligned loads.
379/// - Read the last `usize` from `s` with an unaligned load.
380///
381/// If any of these loads produces something for which `contains_nonascii`
382/// (above) returns true, then we know the answer is false.
383#[cfg(not(any(
384    all(target_arch = "x86_64", target_feature = "sse2"),
385    all(target_arch = "loongarch64", target_feature = "lsx")
386)))]
387#[inline]
388#[rustc_allow_const_fn_unstable(const_eval_select)] // fallback impl has same behavior
389const fn is_ascii(s: &[u8]) -> bool {
390    // The runtime version behaves the same as the compiletime version, it's
391    // just more optimized.
392    const_eval_select!(
393        @capture { s: &[u8] } -> bool:
394        if const {
395            is_ascii_simple(s)
396        } else {
397            /// Returns `true` if any byte in the word `v` is nonascii (>= 128). Snarfed
398            /// from `../str/mod.rs`, which does something similar for utf8 validation.
399            const fn contains_nonascii(v: usize) -> bool {
400                const NONASCII_MASK: usize = usize::repeat_u8(0x80);
401                (NONASCII_MASK & v) != 0
402            }
403
404            const USIZE_SIZE: usize = size_of::<usize>();
405
406            let len = s.len();
407            let align_offset = s.as_ptr().align_offset(USIZE_SIZE);
408
409            // If we wouldn't gain anything from the word-at-a-time implementation, fall
410            // back to a scalar loop.
411            //
412            // We also do this for architectures where `size_of::<usize>()` isn't
413            // sufficient alignment for `usize`, because it's a weird edge case.
414            if len < USIZE_SIZE || len < align_offset || USIZE_SIZE < align_of::<usize>() {
415                return is_ascii_simple(s);
416            }
417
418            // We always read the first word unaligned, which means `align_offset` is
419            // 0, we'd read the same value again for the aligned read.
420            let offset_to_aligned = if align_offset == 0 { USIZE_SIZE } else { align_offset };
421
422            let start = s.as_ptr();
423            // SAFETY: We verify `len < USIZE_SIZE` above.
424            let first_word = unsafe { (start as *const usize).read_unaligned() };
425
426            if contains_nonascii(first_word) {
427                return false;
428            }
429            // We checked this above, somewhat implicitly. Note that `offset_to_aligned`
430            // is either `align_offset` or `USIZE_SIZE`, both of are explicitly checked
431            // above.
432            debug_assert!(offset_to_aligned <= len);
433
434            // SAFETY: word_ptr is the (properly aligned) usize ptr we use to read the
435            // middle chunk of the slice.
436            let mut word_ptr = unsafe { start.add(offset_to_aligned) as *const usize };
437
438            // `byte_pos` is the byte index of `word_ptr`, used for loop end checks.
439            let mut byte_pos = offset_to_aligned;
440
441            // Paranoia check about alignment, since we're about to do a bunch of
442            // unaligned loads. In practice this should be impossible barring a bug in
443            // `align_offset` though.
444            // While this method is allowed to spuriously fail in CTFE, if it doesn't
445            // have alignment information it should have given a `usize::MAX` for
446            // `align_offset` earlier, sending things through the scalar path instead of
447            // this one, so this check should pass if it's reachable.
448            debug_assert!(word_ptr.is_aligned_to(align_of::<usize>()));
449
450            // Read subsequent words until the last aligned word, excluding the last
451            // aligned word by itself to be done in tail check later, to ensure that
452            // tail is always one `usize` at most to extra branch `byte_pos == len`.
453            while byte_pos < len - USIZE_SIZE {
454                // Sanity check that the read is in bounds
455                debug_assert!(byte_pos + USIZE_SIZE <= len);
456                // And that our assumptions about `byte_pos` hold.
457                debug_assert!(word_ptr.cast::<u8>() == start.wrapping_add(byte_pos));
458
459                // SAFETY: We know `word_ptr` is properly aligned (because of
460                // `align_offset`), and we know that we have enough bytes between `word_ptr` and the end
461                let word = unsafe { word_ptr.read() };
462                if contains_nonascii(word) {
463                    return false;
464                }
465
466                byte_pos += USIZE_SIZE;
467                // SAFETY: We know that `byte_pos <= len - USIZE_SIZE`, which means that
468                // after this `add`, `word_ptr` will be at most one-past-the-end.
469                word_ptr = unsafe { word_ptr.add(1) };
470            }
471
472            // Sanity check to ensure there really is only one `usize` left. This should
473            // be guaranteed by our loop condition.
474            debug_assert!(byte_pos <= len && len - byte_pos <= USIZE_SIZE);
475
476            // SAFETY: This relies on `len >= USIZE_SIZE`, which we check at the start.
477            let last_word = unsafe { (start.add(len - USIZE_SIZE) as *const usize).read_unaligned() };
478
479            !contains_nonascii(last_word)
480        }
481    )
482}
483
484/// ASCII test optimized to use the `pmovmskb` instruction on `x86-64` and the
485/// `vmskltz.b` instruction on `loongarch64`.
486///
487/// Other platforms are not likely to benefit from this code structure, so they
488/// use SWAR techniques to test for ASCII in `usize`-sized chunks.
489#[cfg(any(
490    all(target_arch = "x86_64", target_feature = "sse2"),
491    all(target_arch = "loongarch64", target_feature = "lsx")
492))]
493#[inline]
494const fn is_ascii(bytes: &[u8]) -> bool {
495    // Process chunks of 32 bytes at a time in the fast path to enable
496    // auto-vectorization and use of `pmovmskb`. Two 128-bit vector registers
497    // can be OR'd together and then the resulting vector can be tested for
498    // non-ASCII bytes.
499    const CHUNK_SIZE: usize = 32;
500
501    let mut i = 0;
502
503    while i + CHUNK_SIZE <= bytes.len() {
504        let chunk_end = i + CHUNK_SIZE;
505
506        // Get LLVM to produce a `pmovmskb` instruction on x86-64 which
507        // creates a mask from the most significant bit of each byte.
508        // ASCII bytes are less than 128 (0x80), so their most significant
509        // bit is unset.
510        let mut count = 0;
511        while i < chunk_end {
512            count += bytes[i].is_ascii() as u8;
513            i += 1;
514        }
515
516        // All bytes should be <= 127 so count is equal to chunk size.
517        if count != CHUNK_SIZE as u8 {
518            return false;
519        }
520    }
521
522    // Process the remaining `bytes.len() % N` bytes.
523    let mut is_ascii = true;
524    while i < bytes.len() {
525        is_ascii &= bytes[i].is_ascii();
526        i += 1;
527    }
528
529    is_ascii
530}