core/slice/ascii.rs
1//! Operations on ASCII `[u8]`.
2
3#[cfg(not(feature = "ferrocene_subset"))]
4use core::ascii::EscapeDefault;
5
6#[cfg(not(feature = "ferrocene_subset"))]
7use crate::fmt::{self, Write};
8#[cfg(not(any(
9 all(target_arch = "x86_64", target_feature = "sse2"),
10 all(target_arch = "loongarch64", target_feature = "lsx")
11)))]
12#[cfg(not(feature = "ferrocene_subset"))]
13use crate::intrinsics::const_eval_select;
14#[cfg(not(feature = "ferrocene_subset"))]
15use crate::{ascii, iter, ops};
16
17impl [u8] {
18 /// Checks if all bytes in this slice are within the ASCII range.
19 ///
20 /// An empty slice returns `true`.
21 #[cfg(not(feature = "ferrocene_subset"))]
22 #[stable(feature = "ascii_methods_on_intrinsics", since = "1.23.0")]
23 #[rustc_const_stable(feature = "const_slice_is_ascii", since = "1.74.0")]
24 #[must_use]
25 #[inline]
26 pub const fn is_ascii(&self) -> bool {
27 is_ascii(self)
28 }
29
30 /// If this slice [`is_ascii`](Self::is_ascii), returns it as a slice of
31 /// [ASCII characters](`ascii::Char`), otherwise returns `None`.
32 #[cfg(not(feature = "ferrocene_subset"))]
33 #[unstable(feature = "ascii_char", issue = "110998")]
34 #[must_use]
35 #[inline]
36 pub const fn as_ascii(&self) -> Option<&[ascii::Char]> {
37 if self.is_ascii() {
38 // SAFETY: Just checked that it's ASCII
39 Some(unsafe { self.as_ascii_unchecked() })
40 } else {
41 None
42 }
43 }
44
45 /// Converts this slice of bytes into a slice of ASCII characters,
46 /// without checking whether they're valid.
47 ///
48 /// # Safety
49 ///
50 /// Every byte in the slice must be in `0..=127`, or else this is UB.
51 #[cfg(not(feature = "ferrocene_subset"))]
52 #[unstable(feature = "ascii_char", issue = "110998")]
53 #[must_use]
54 #[inline]
55 pub const unsafe fn as_ascii_unchecked(&self) -> &[ascii::Char] {
56 let byte_ptr: *const [u8] = self;
57 let ascii_ptr = byte_ptr as *const [ascii::Char];
58 // SAFETY: The caller promised all the bytes are ASCII
59 unsafe { &*ascii_ptr }
60 }
61
62 /// Checks that two slices are an ASCII case-insensitive match.
63 ///
64 /// Same as `to_ascii_lowercase(a) == to_ascii_lowercase(b)`,
65 /// but without allocating and copying temporaries.
66 #[stable(feature = "ascii_methods_on_intrinsics", since = "1.23.0")]
67 #[rustc_const_stable(feature = "const_eq_ignore_ascii_case", since = "1.89.0")]
68 #[must_use]
69 #[inline]
70 pub const fn eq_ignore_ascii_case(&self, other: &[u8]) -> bool {
71 if self.len() != other.len() {
72 return false;
73 }
74
75 // FIXME(const-hack): This implementation can be reverted when
76 // `core::iter::zip` is allowed in const. The original implementation:
77 // self.len() == other.len() && iter::zip(self, other).all(|(a, b)| a.eq_ignore_ascii_case(b))
78 let mut a = self;
79 let mut b = other;
80
81 while let ([first_a, rest_a @ ..], [first_b, rest_b @ ..]) = (a, b) {
82 if first_a.eq_ignore_ascii_case(&first_b) {
83 a = rest_a;
84 b = rest_b;
85 } else {
86 return false;
87 }
88 }
89
90 true
91 }
92
93 /// Converts this slice to its ASCII upper case equivalent in-place.
94 ///
95 /// ASCII letters 'a' to 'z' are mapped to 'A' to 'Z',
96 /// but non-ASCII letters are unchanged.
97 ///
98 /// To return a new uppercased value without modifying the existing one, use
99 /// [`to_ascii_uppercase`].
100 ///
101 /// [`to_ascii_uppercase`]: #method.to_ascii_uppercase
102 #[cfg(not(feature = "ferrocene_subset"))]
103 #[stable(feature = "ascii_methods_on_intrinsics", since = "1.23.0")]
104 #[rustc_const_stable(feature = "const_make_ascii", since = "1.84.0")]
105 #[inline]
106 pub const fn make_ascii_uppercase(&mut self) {
107 // FIXME(const-hack): We would like to simply iterate using `for` loops but this isn't currently allowed in constant expressions.
108 let mut i = 0;
109 while i < self.len() {
110 let byte = &mut self[i];
111 byte.make_ascii_uppercase();
112 i += 1;
113 }
114 }
115
116 /// Converts this slice to its ASCII lower case equivalent in-place.
117 ///
118 /// ASCII letters 'A' to 'Z' are mapped to 'a' to 'z',
119 /// but non-ASCII letters are unchanged.
120 ///
121 /// To return a new lowercased value without modifying the existing one, use
122 /// [`to_ascii_lowercase`].
123 ///
124 /// [`to_ascii_lowercase`]: #method.to_ascii_lowercase
125 #[cfg(not(feature = "ferrocene_subset"))]
126 #[stable(feature = "ascii_methods_on_intrinsics", since = "1.23.0")]
127 #[rustc_const_stable(feature = "const_make_ascii", since = "1.84.0")]
128 #[inline]
129 pub const fn make_ascii_lowercase(&mut self) {
130 // FIXME(const-hack): We would like to simply iterate using `for` loops but this isn't currently allowed in constant expressions.
131 let mut i = 0;
132 while i < self.len() {
133 let byte = &mut self[i];
134 byte.make_ascii_lowercase();
135 i += 1;
136 }
137 }
138
139 /// Returns an iterator that produces an escaped version of this slice,
140 /// treating it as an ASCII string.
141 ///
142 /// # Examples
143 ///
144 /// ```
145 /// let s = b"0\t\r\n'\"\\\x9d";
146 /// let escaped = s.escape_ascii().to_string();
147 /// assert_eq!(escaped, "0\\t\\r\\n\\'\\\"\\\\\\x9d");
148 /// ```
149 #[cfg(not(feature = "ferrocene_subset"))]
150 #[must_use = "this returns the escaped bytes as an iterator, \
151 without modifying the original"]
152 #[stable(feature = "inherent_ascii_escape", since = "1.60.0")]
153 pub fn escape_ascii(&self) -> EscapeAscii<'_> {
154 EscapeAscii { inner: self.iter().flat_map(EscapeByte) }
155 }
156
157 /// Returns a byte slice with leading ASCII whitespace bytes removed.
158 ///
159 /// 'Whitespace' refers to the definition used by
160 /// [`u8::is_ascii_whitespace`].
161 ///
162 /// # Examples
163 ///
164 /// ```
165 /// assert_eq!(b" \t hello world\n".trim_ascii_start(), b"hello world\n");
166 /// assert_eq!(b" ".trim_ascii_start(), b"");
167 /// assert_eq!(b"".trim_ascii_start(), b"");
168 /// ```
169 #[cfg(not(feature = "ferrocene_subset"))]
170 #[stable(feature = "byte_slice_trim_ascii", since = "1.80.0")]
171 #[rustc_const_stable(feature = "byte_slice_trim_ascii", since = "1.80.0")]
172 #[inline]
173 pub const fn trim_ascii_start(&self) -> &[u8] {
174 let mut bytes = self;
175 // Note: A pattern matching based approach (instead of indexing) allows
176 // making the function const.
177 while let [first, rest @ ..] = bytes {
178 if first.is_ascii_whitespace() {
179 bytes = rest;
180 } else {
181 break;
182 }
183 }
184 bytes
185 }
186
187 /// Returns a byte slice with trailing ASCII whitespace bytes removed.
188 ///
189 /// 'Whitespace' refers to the definition used by
190 /// [`u8::is_ascii_whitespace`].
191 ///
192 /// # Examples
193 ///
194 /// ```
195 /// assert_eq!(b"\r hello world\n ".trim_ascii_end(), b"\r hello world");
196 /// assert_eq!(b" ".trim_ascii_end(), b"");
197 /// assert_eq!(b"".trim_ascii_end(), b"");
198 /// ```
199 #[cfg(not(feature = "ferrocene_subset"))]
200 #[stable(feature = "byte_slice_trim_ascii", since = "1.80.0")]
201 #[rustc_const_stable(feature = "byte_slice_trim_ascii", since = "1.80.0")]
202 #[inline]
203 pub const fn trim_ascii_end(&self) -> &[u8] {
204 let mut bytes = self;
205 // Note: A pattern matching based approach (instead of indexing) allows
206 // making the function const.
207 while let [rest @ .., last] = bytes {
208 if last.is_ascii_whitespace() {
209 bytes = rest;
210 } else {
211 break;
212 }
213 }
214 bytes
215 }
216
217 /// Returns a byte slice with leading and trailing ASCII whitespace bytes
218 /// removed.
219 ///
220 /// 'Whitespace' refers to the definition used by
221 /// [`u8::is_ascii_whitespace`].
222 ///
223 /// # Examples
224 ///
225 /// ```
226 /// assert_eq!(b"\r hello world\n ".trim_ascii(), b"hello world");
227 /// assert_eq!(b" ".trim_ascii(), b"");
228 /// assert_eq!(b"".trim_ascii(), b"");
229 /// ```
230 #[cfg(not(feature = "ferrocene_subset"))]
231 #[stable(feature = "byte_slice_trim_ascii", since = "1.80.0")]
232 #[rustc_const_stable(feature = "byte_slice_trim_ascii", since = "1.80.0")]
233 #[inline]
234 pub const fn trim_ascii(&self) -> &[u8] {
235 self.trim_ascii_start().trim_ascii_end()
236 }
237}
238
239#[cfg(not(feature = "ferrocene_subset"))]
240impl_fn_for_zst! {
241 #[derive(Clone)]
242 struct EscapeByte impl Fn = |byte: &u8| -> ascii::EscapeDefault {
243 ascii::escape_default(*byte)
244 };
245}
246
247/// An iterator over the escaped version of a byte slice.
248///
249/// This `struct` is created by the [`slice::escape_ascii`] method. See its
250/// documentation for more information.
251#[stable(feature = "inherent_ascii_escape", since = "1.60.0")]
252#[derive(Clone)]
253#[must_use = "iterators are lazy and do nothing unless consumed"]
254#[cfg(not(feature = "ferrocene_subset"))]
255pub struct EscapeAscii<'a> {
256 inner: iter::FlatMap<super::Iter<'a, u8>, ascii::EscapeDefault, EscapeByte>,
257}
258
259#[cfg(not(feature = "ferrocene_subset"))]
260#[stable(feature = "inherent_ascii_escape", since = "1.60.0")]
261impl<'a> iter::Iterator for EscapeAscii<'a> {
262 type Item = u8;
263 #[inline]
264 fn next(&mut self) -> Option<u8> {
265 self.inner.next()
266 }
267 #[inline]
268 fn size_hint(&self) -> (usize, Option<usize>) {
269 self.inner.size_hint()
270 }
271 #[inline]
272 fn try_fold<Acc, Fold, R>(&mut self, init: Acc, fold: Fold) -> R
273 where
274 Fold: FnMut(Acc, Self::Item) -> R,
275 R: ops::Try<Output = Acc>,
276 {
277 self.inner.try_fold(init, fold)
278 }
279 #[inline]
280 fn fold<Acc, Fold>(self, init: Acc, fold: Fold) -> Acc
281 where
282 Fold: FnMut(Acc, Self::Item) -> Acc,
283 {
284 self.inner.fold(init, fold)
285 }
286 #[inline]
287 fn last(mut self) -> Option<u8> {
288 self.next_back()
289 }
290}
291
292#[cfg(not(feature = "ferrocene_subset"))]
293#[stable(feature = "inherent_ascii_escape", since = "1.60.0")]
294impl<'a> iter::DoubleEndedIterator for EscapeAscii<'a> {
295 fn next_back(&mut self) -> Option<u8> {
296 self.inner.next_back()
297 }
298}
299#[cfg(not(feature = "ferrocene_subset"))]
300#[stable(feature = "inherent_ascii_escape", since = "1.60.0")]
301impl<'a> iter::FusedIterator for EscapeAscii<'a> {}
302#[cfg(not(feature = "ferrocene_subset"))]
303#[stable(feature = "inherent_ascii_escape", since = "1.60.0")]
304impl<'a> fmt::Display for EscapeAscii<'a> {
305 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
306 // disassemble iterator, including front/back parts of flatmap in case it has been partially consumed
307 let (front, slice, back) = self.clone().inner.into_parts();
308 let front = front.unwrap_or(EscapeDefault::empty());
309 let mut bytes = slice.unwrap_or_default().as_slice();
310 let back = back.unwrap_or(EscapeDefault::empty());
311
312 // usually empty, so the formatter won't have to do any work
313 for byte in front {
314 f.write_char(byte as char)?;
315 }
316
317 fn needs_escape(b: u8) -> bool {
318 b > 0x7E || b < 0x20 || b == b'\\' || b == b'\'' || b == b'"'
319 }
320
321 while bytes.len() > 0 {
322 // fast path for the printable, non-escaped subset of ascii
323 let prefix = bytes.iter().take_while(|&&b| !needs_escape(b)).count();
324 // SAFETY: prefix length was derived by counting bytes in the same splice, so it's in-bounds
325 let (prefix, remainder) = unsafe { bytes.split_at_unchecked(prefix) };
326 // SAFETY: prefix is a valid utf8 sequence, as it's a subset of ASCII
327 let prefix = unsafe { crate::str::from_utf8_unchecked(prefix) };
328
329 f.write_str(prefix)?; // the fast part
330
331 bytes = remainder;
332
333 if let Some(&b) = bytes.first() {
334 // guaranteed to be non-empty, better to write it as a str
335 fmt::Display::fmt(&ascii::escape_default(b), f)?;
336 bytes = &bytes[1..];
337 }
338 }
339
340 // also usually empty
341 for byte in back {
342 f.write_char(byte as char)?;
343 }
344 Ok(())
345 }
346}
347#[cfg(not(feature = "ferrocene_subset"))]
348#[stable(feature = "inherent_ascii_escape", since = "1.60.0")]
349impl<'a> fmt::Debug for EscapeAscii<'a> {
350 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
351 f.debug_struct("EscapeAscii").finish_non_exhaustive()
352 }
353}
354
355/// ASCII test *without* the chunk-at-a-time optimizations.
356///
357/// This is carefully structured to produce nice small code -- it's smaller in
358/// `-O` than what the "obvious" ways produces under `-C opt-level=s`. If you
359/// touch it, be sure to run (and update if needed) the assembly test.
360#[cfg(not(feature = "ferrocene_subset"))]
361#[unstable(feature = "str_internals", issue = "none")]
362#[doc(hidden)]
363#[inline]
364pub const fn is_ascii_simple(mut bytes: &[u8]) -> bool {
365 while let [rest @ .., last] = bytes {
366 if !last.is_ascii() {
367 break;
368 }
369 bytes = rest;
370 }
371 bytes.is_empty()
372}
373
374/// Optimized ASCII test that will use usize-at-a-time operations instead of
375/// byte-at-a-time operations (when possible).
376///
377/// The algorithm we use here is pretty simple. If `s` is too short, we just
378/// check each byte and be done with it. Otherwise:
379///
380/// - Read the first word with an unaligned load.
381/// - Align the pointer, read subsequent words until end with aligned loads.
382/// - Read the last `usize` from `s` with an unaligned load.
383///
384/// If any of these loads produces something for which `contains_nonascii`
385/// (above) returns true, then we know the answer is false.
386#[cfg(not(feature = "ferrocene_subset"))]
387#[cfg(not(any(
388 all(target_arch = "x86_64", target_feature = "sse2"),
389 all(target_arch = "loongarch64", target_feature = "lsx")
390)))]
391#[inline]
392#[rustc_allow_const_fn_unstable(const_eval_select)] // fallback impl has same behavior
393const fn is_ascii(s: &[u8]) -> bool {
394 // The runtime version behaves the same as the compiletime version, it's
395 // just more optimized.
396 const_eval_select!(
397 @capture { s: &[u8] } -> bool:
398 if const {
399 is_ascii_simple(s)
400 } else {
401 /// Returns `true` if any byte in the word `v` is nonascii (>= 128). Snarfed
402 /// from `../str/mod.rs`, which does something similar for utf8 validation.
403 const fn contains_nonascii(v: usize) -> bool {
404 const NONASCII_MASK: usize = usize::repeat_u8(0x80);
405 (NONASCII_MASK & v) != 0
406 }
407
408 const USIZE_SIZE: usize = size_of::<usize>();
409
410 let len = s.len();
411 let align_offset = s.as_ptr().align_offset(USIZE_SIZE);
412
413 // If we wouldn't gain anything from the word-at-a-time implementation, fall
414 // back to a scalar loop.
415 //
416 // We also do this for architectures where `size_of::<usize>()` isn't
417 // sufficient alignment for `usize`, because it's a weird edge case.
418 if len < USIZE_SIZE || len < align_offset || USIZE_SIZE < align_of::<usize>() {
419 return is_ascii_simple(s);
420 }
421
422 // We always read the first word unaligned, which means `align_offset` is
423 // 0, we'd read the same value again for the aligned read.
424 let offset_to_aligned = if align_offset == 0 { USIZE_SIZE } else { align_offset };
425
426 let start = s.as_ptr();
427 // SAFETY: We verify `len < USIZE_SIZE` above.
428 let first_word = unsafe { (start as *const usize).read_unaligned() };
429
430 if contains_nonascii(first_word) {
431 return false;
432 }
433 // We checked this above, somewhat implicitly. Note that `offset_to_aligned`
434 // is either `align_offset` or `USIZE_SIZE`, both of are explicitly checked
435 // above.
436 debug_assert!(offset_to_aligned <= len);
437
438 // SAFETY: word_ptr is the (properly aligned) usize ptr we use to read the
439 // middle chunk of the slice.
440 let mut word_ptr = unsafe { start.add(offset_to_aligned) as *const usize };
441
442 // `byte_pos` is the byte index of `word_ptr`, used for loop end checks.
443 let mut byte_pos = offset_to_aligned;
444
445 // Paranoia check about alignment, since we're about to do a bunch of
446 // unaligned loads. In practice this should be impossible barring a bug in
447 // `align_offset` though.
448 // While this method is allowed to spuriously fail in CTFE, if it doesn't
449 // have alignment information it should have given a `usize::MAX` for
450 // `align_offset` earlier, sending things through the scalar path instead of
451 // this one, so this check should pass if it's reachable.
452 debug_assert!(word_ptr.is_aligned_to(align_of::<usize>()));
453
454 // Read subsequent words until the last aligned word, excluding the last
455 // aligned word by itself to be done in tail check later, to ensure that
456 // tail is always one `usize` at most to extra branch `byte_pos == len`.
457 while byte_pos < len - USIZE_SIZE {
458 // Sanity check that the read is in bounds
459 debug_assert!(byte_pos + USIZE_SIZE <= len);
460 // And that our assumptions about `byte_pos` hold.
461 debug_assert!(word_ptr.cast::<u8>() == start.wrapping_add(byte_pos));
462
463 // SAFETY: We know `word_ptr` is properly aligned (because of
464 // `align_offset`), and we know that we have enough bytes between `word_ptr` and the end
465 let word = unsafe { word_ptr.read() };
466 if contains_nonascii(word) {
467 return false;
468 }
469
470 byte_pos += USIZE_SIZE;
471 // SAFETY: We know that `byte_pos <= len - USIZE_SIZE`, which means that
472 // after this `add`, `word_ptr` will be at most one-past-the-end.
473 word_ptr = unsafe { word_ptr.add(1) };
474 }
475
476 // Sanity check to ensure there really is only one `usize` left. This should
477 // be guaranteed by our loop condition.
478 debug_assert!(byte_pos <= len && len - byte_pos <= USIZE_SIZE);
479
480 // SAFETY: This relies on `len >= USIZE_SIZE`, which we check at the start.
481 let last_word = unsafe { (start.add(len - USIZE_SIZE) as *const usize).read_unaligned() };
482
483 !contains_nonascii(last_word)
484 }
485 )
486}
487
488/// ASCII test optimized to use the `pmovmskb` instruction on `x86-64` and the
489/// `vmskltz.b` instruction on `loongarch64`.
490///
491/// Other platforms are not likely to benefit from this code structure, so they
492/// use SWAR techniques to test for ASCII in `usize`-sized chunks.
493#[cfg(not(feature = "ferrocene_subset"))]
494#[cfg(any(
495 all(target_arch = "x86_64", target_feature = "sse2"),
496 all(target_arch = "loongarch64", target_feature = "lsx")
497))]
498#[inline]
499const fn is_ascii(bytes: &[u8]) -> bool {
500 // Process chunks of 32 bytes at a time in the fast path to enable
501 // auto-vectorization and use of `pmovmskb`. Two 128-bit vector registers
502 // can be OR'd together and then the resulting vector can be tested for
503 // non-ASCII bytes.
504 const CHUNK_SIZE: usize = 32;
505
506 let mut i = 0;
507
508 while i + CHUNK_SIZE <= bytes.len() {
509 let chunk_end = i + CHUNK_SIZE;
510
511 // Get LLVM to produce a `pmovmskb` instruction on x86-64 which
512 // creates a mask from the most significant bit of each byte.
513 // ASCII bytes are less than 128 (0x80), so their most significant
514 // bit is unset.
515 let mut count = 0;
516 while i < chunk_end {
517 count += bytes[i].is_ascii() as u8;
518 i += 1;
519 }
520
521 // All bytes should be <= 127 so count is equal to chunk size.
522 if count != CHUNK_SIZE as u8 {
523 return false;
524 }
525 }
526
527 // Process the remaining `bytes.len() % N` bytes.
528 let mut is_ascii = true;
529 while i < bytes.len() {
530 is_ascii &= bytes[i].is_ascii();
531 i += 1;
532 }
533
534 is_ascii
535}