core/slice/ascii.rs
1//! Operations on ASCII `[u8]`.
2
3use core::ascii::EscapeDefault;
4
5use crate::fmt::{self, Write};
6#[cfg(not(any(
7 all(target_arch = "x86_64", target_feature = "sse2"),
8 all(target_arch = "loongarch64", target_feature = "lsx")
9)))]
10use crate::intrinsics::const_eval_select;
11use crate::{ascii, iter, ops};
12
13impl [u8] {
14 /// Checks if all bytes in this slice are within the ASCII range.
15 ///
16 /// An empty slice returns `true`.
17 #[stable(feature = "ascii_methods_on_intrinsics", since = "1.23.0")]
18 #[rustc_const_stable(feature = "const_slice_is_ascii", since = "1.74.0")]
19 #[must_use]
20 #[inline]
21 pub const fn is_ascii(&self) -> bool {
22 is_ascii(self)
23 }
24
25 /// If this slice [`is_ascii`](Self::is_ascii), returns it as a slice of
26 /// [ASCII characters](`ascii::Char`), otherwise returns `None`.
27 #[unstable(feature = "ascii_char", issue = "110998")]
28 #[must_use]
29 #[inline]
30 pub const fn as_ascii(&self) -> Option<&[ascii::Char]> {
31 if self.is_ascii() {
32 // SAFETY: Just checked that it's ASCII
33 Some(unsafe { self.as_ascii_unchecked() })
34 } else {
35 None
36 }
37 }
38
39 /// Converts this slice of bytes into a slice of ASCII characters,
40 /// without checking whether they're valid.
41 ///
42 /// # Safety
43 ///
44 /// Every byte in the slice must be in `0..=127`, or else this is UB.
45 #[unstable(feature = "ascii_char", issue = "110998")]
46 #[must_use]
47 #[inline]
48 pub const unsafe fn as_ascii_unchecked(&self) -> &[ascii::Char] {
49 let byte_ptr: *const [u8] = self;
50 let ascii_ptr = byte_ptr as *const [ascii::Char];
51 // SAFETY: The caller promised all the bytes are ASCII
52 unsafe { &*ascii_ptr }
53 }
54
55 /// Checks that two slices are an ASCII case-insensitive match.
56 ///
57 /// Same as `to_ascii_lowercase(a) == to_ascii_lowercase(b)`,
58 /// but without allocating and copying temporaries.
59 #[stable(feature = "ascii_methods_on_intrinsics", since = "1.23.0")]
60 #[rustc_const_stable(feature = "const_eq_ignore_ascii_case", since = "1.89.0")]
61 #[must_use]
62 #[inline]
63 pub const fn eq_ignore_ascii_case(&self, other: &[u8]) -> bool {
64 if self.len() != other.len() {
65 return false;
66 }
67
68 // FIXME(const-hack): This implementation can be reverted when
69 // `core::iter::zip` is allowed in const. The original implementation:
70 // self.len() == other.len() && iter::zip(self, other).all(|(a, b)| a.eq_ignore_ascii_case(b))
71 let mut a = self;
72 let mut b = other;
73
74 while let ([first_a, rest_a @ ..], [first_b, rest_b @ ..]) = (a, b) {
75 if first_a.eq_ignore_ascii_case(&first_b) {
76 a = rest_a;
77 b = rest_b;
78 } else {
79 return false;
80 }
81 }
82
83 true
84 }
85
86 /// Converts this slice to its ASCII upper case equivalent in-place.
87 ///
88 /// ASCII letters 'a' to 'z' are mapped to 'A' to 'Z',
89 /// but non-ASCII letters are unchanged.
90 ///
91 /// To return a new uppercased value without modifying the existing one, use
92 /// [`to_ascii_uppercase`].
93 ///
94 /// [`to_ascii_uppercase`]: #method.to_ascii_uppercase
95 #[stable(feature = "ascii_methods_on_intrinsics", since = "1.23.0")]
96 #[rustc_const_stable(feature = "const_make_ascii", since = "1.84.0")]
97 #[inline]
98 pub const fn make_ascii_uppercase(&mut self) {
99 // FIXME(const-hack): We would like to simply iterate using `for` loops but this isn't currently allowed in constant expressions.
100 let mut i = 0;
101 while i < self.len() {
102 let byte = &mut self[i];
103 byte.make_ascii_uppercase();
104 i += 1;
105 }
106 }
107
108 /// Converts this slice to its ASCII lower case equivalent in-place.
109 ///
110 /// ASCII letters 'A' to 'Z' are mapped to 'a' to 'z',
111 /// but non-ASCII letters are unchanged.
112 ///
113 /// To return a new lowercased value without modifying the existing one, use
114 /// [`to_ascii_lowercase`].
115 ///
116 /// [`to_ascii_lowercase`]: #method.to_ascii_lowercase
117 #[stable(feature = "ascii_methods_on_intrinsics", since = "1.23.0")]
118 #[rustc_const_stable(feature = "const_make_ascii", since = "1.84.0")]
119 #[inline]
120 pub const fn make_ascii_lowercase(&mut self) {
121 // FIXME(const-hack): We would like to simply iterate using `for` loops but this isn't currently allowed in constant expressions.
122 let mut i = 0;
123 while i < self.len() {
124 let byte = &mut self[i];
125 byte.make_ascii_lowercase();
126 i += 1;
127 }
128 }
129
130 /// Returns an iterator that produces an escaped version of this slice,
131 /// treating it as an ASCII string.
132 ///
133 /// # Examples
134 ///
135 /// ```
136 /// let s = b"0\t\r\n'\"\\\x9d";
137 /// let escaped = s.escape_ascii().to_string();
138 /// assert_eq!(escaped, "0\\t\\r\\n\\'\\\"\\\\\\x9d");
139 /// ```
140 #[must_use = "this returns the escaped bytes as an iterator, \
141 without modifying the original"]
142 #[stable(feature = "inherent_ascii_escape", since = "1.60.0")]
143 pub fn escape_ascii(&self) -> EscapeAscii<'_> {
144 EscapeAscii { inner: self.iter().flat_map(EscapeByte) }
145 }
146
147 /// Returns a byte slice with leading ASCII whitespace bytes removed.
148 ///
149 /// 'Whitespace' refers to the definition used by
150 /// [`u8::is_ascii_whitespace`].
151 ///
152 /// # Examples
153 ///
154 /// ```
155 /// assert_eq!(b" \t hello world\n".trim_ascii_start(), b"hello world\n");
156 /// assert_eq!(b" ".trim_ascii_start(), b"");
157 /// assert_eq!(b"".trim_ascii_start(), b"");
158 /// ```
159 #[stable(feature = "byte_slice_trim_ascii", since = "1.80.0")]
160 #[rustc_const_stable(feature = "byte_slice_trim_ascii", since = "1.80.0")]
161 #[inline]
162 pub const fn trim_ascii_start(&self) -> &[u8] {
163 let mut bytes = self;
164 // Note: A pattern matching based approach (instead of indexing) allows
165 // making the function const.
166 while let [first, rest @ ..] = bytes {
167 if first.is_ascii_whitespace() {
168 bytes = rest;
169 } else {
170 break;
171 }
172 }
173 bytes
174 }
175
176 /// Returns a byte slice with trailing ASCII whitespace bytes removed.
177 ///
178 /// 'Whitespace' refers to the definition used by
179 /// [`u8::is_ascii_whitespace`].
180 ///
181 /// # Examples
182 ///
183 /// ```
184 /// assert_eq!(b"\r hello world\n ".trim_ascii_end(), b"\r hello world");
185 /// assert_eq!(b" ".trim_ascii_end(), b"");
186 /// assert_eq!(b"".trim_ascii_end(), b"");
187 /// ```
188 #[stable(feature = "byte_slice_trim_ascii", since = "1.80.0")]
189 #[rustc_const_stable(feature = "byte_slice_trim_ascii", since = "1.80.0")]
190 #[inline]
191 pub const fn trim_ascii_end(&self) -> &[u8] {
192 let mut bytes = self;
193 // Note: A pattern matching based approach (instead of indexing) allows
194 // making the function const.
195 while let [rest @ .., last] = bytes {
196 if last.is_ascii_whitespace() {
197 bytes = rest;
198 } else {
199 break;
200 }
201 }
202 bytes
203 }
204
205 /// Returns a byte slice with leading and trailing ASCII whitespace bytes
206 /// removed.
207 ///
208 /// 'Whitespace' refers to the definition used by
209 /// [`u8::is_ascii_whitespace`].
210 ///
211 /// # Examples
212 ///
213 /// ```
214 /// assert_eq!(b"\r hello world\n ".trim_ascii(), b"hello world");
215 /// assert_eq!(b" ".trim_ascii(), b"");
216 /// assert_eq!(b"".trim_ascii(), b"");
217 /// ```
218 #[stable(feature = "byte_slice_trim_ascii", since = "1.80.0")]
219 #[rustc_const_stable(feature = "byte_slice_trim_ascii", since = "1.80.0")]
220 #[inline]
221 pub const fn trim_ascii(&self) -> &[u8] {
222 self.trim_ascii_start().trim_ascii_end()
223 }
224}
225
226impl_fn_for_zst! {
227 #[derive(Clone)]
228 struct EscapeByte impl Fn = |byte: &u8| -> ascii::EscapeDefault {
229 ascii::escape_default(*byte)
230 };
231}
232
233/// An iterator over the escaped version of a byte slice.
234///
235/// This `struct` is created by the [`slice::escape_ascii`] method. See its
236/// documentation for more information.
237#[stable(feature = "inherent_ascii_escape", since = "1.60.0")]
238#[derive(Clone)]
239#[must_use = "iterators are lazy and do nothing unless consumed"]
240pub struct EscapeAscii<'a> {
241 inner: iter::FlatMap<super::Iter<'a, u8>, ascii::EscapeDefault, EscapeByte>,
242}
243
244#[stable(feature = "inherent_ascii_escape", since = "1.60.0")]
245impl<'a> iter::Iterator for EscapeAscii<'a> {
246 type Item = u8;
247 #[inline]
248 fn next(&mut self) -> Option<u8> {
249 self.inner.next()
250 }
251 #[inline]
252 fn size_hint(&self) -> (usize, Option<usize>) {
253 self.inner.size_hint()
254 }
255 #[inline]
256 fn try_fold<Acc, Fold, R>(&mut self, init: Acc, fold: Fold) -> R
257 where
258 Fold: FnMut(Acc, Self::Item) -> R,
259 R: ops::Try<Output = Acc>,
260 {
261 self.inner.try_fold(init, fold)
262 }
263 #[inline]
264 fn fold<Acc, Fold>(self, init: Acc, fold: Fold) -> Acc
265 where
266 Fold: FnMut(Acc, Self::Item) -> Acc,
267 {
268 self.inner.fold(init, fold)
269 }
270 #[inline]
271 fn last(mut self) -> Option<u8> {
272 self.next_back()
273 }
274}
275
276#[stable(feature = "inherent_ascii_escape", since = "1.60.0")]
277impl<'a> iter::DoubleEndedIterator for EscapeAscii<'a> {
278 fn next_back(&mut self) -> Option<u8> {
279 self.inner.next_back()
280 }
281}
282#[stable(feature = "inherent_ascii_escape", since = "1.60.0")]
283impl<'a> iter::FusedIterator for EscapeAscii<'a> {}
284#[stable(feature = "inherent_ascii_escape", since = "1.60.0")]
285impl<'a> fmt::Display for EscapeAscii<'a> {
286 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
287 // disassemble iterator, including front/back parts of flatmap in case it has been partially consumed
288 let (front, slice, back) = self.clone().inner.into_parts();
289 let front = front.unwrap_or(EscapeDefault::empty());
290 let mut bytes = slice.unwrap_or_default().as_slice();
291 let back = back.unwrap_or(EscapeDefault::empty());
292
293 // usually empty, so the formatter won't have to do any work
294 for byte in front {
295 f.write_char(byte as char)?;
296 }
297
298 fn needs_escape(b: u8) -> bool {
299 b > 0x7E || b < 0x20 || b == b'\\' || b == b'\'' || b == b'"'
300 }
301
302 while bytes.len() > 0 {
303 // fast path for the printable, non-escaped subset of ascii
304 let prefix = bytes.iter().take_while(|&&b| !needs_escape(b)).count();
305 // SAFETY: prefix length was derived by counting bytes in the same splice, so it's in-bounds
306 let (prefix, remainder) = unsafe { bytes.split_at_unchecked(prefix) };
307 // SAFETY: prefix is a valid utf8 sequence, as it's a subset of ASCII
308 let prefix = unsafe { crate::str::from_utf8_unchecked(prefix) };
309
310 f.write_str(prefix)?; // the fast part
311
312 bytes = remainder;
313
314 if let Some(&b) = bytes.first() {
315 // guaranteed to be non-empty, better to write it as a str
316 fmt::Display::fmt(&ascii::escape_default(b), f)?;
317 bytes = &bytes[1..];
318 }
319 }
320
321 // also usually empty
322 for byte in back {
323 f.write_char(byte as char)?;
324 }
325 Ok(())
326 }
327}
328#[stable(feature = "inherent_ascii_escape", since = "1.60.0")]
329impl<'a> fmt::Debug for EscapeAscii<'a> {
330 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
331 f.debug_struct("EscapeAscii").finish_non_exhaustive()
332 }
333}
334
335/// ASCII test *without* the chunk-at-a-time optimizations.
336///
337/// This is carefully structured to produce nice small code -- it's smaller in
338/// `-O` than what the "obvious" ways produces under `-C opt-level=s`. If you
339/// touch it, be sure to run (and update if needed) the assembly test.
340#[unstable(feature = "str_internals", issue = "none")]
341#[doc(hidden)]
342#[inline]
343pub const fn is_ascii_simple(mut bytes: &[u8]) -> bool {
344 while let [rest @ .., last] = bytes {
345 if !last.is_ascii() {
346 break;
347 }
348 bytes = rest;
349 }
350 bytes.is_empty()
351}
352
353/// Optimized ASCII test that will use usize-at-a-time operations instead of
354/// byte-at-a-time operations (when possible).
355///
356/// The algorithm we use here is pretty simple. If `s` is too short, we just
357/// check each byte and be done with it. Otherwise:
358///
359/// - Read the first word with an unaligned load.
360/// - Align the pointer, read subsequent words until end with aligned loads.
361/// - Read the last `usize` from `s` with an unaligned load.
362///
363/// If any of these loads produces something for which `contains_nonascii`
364/// (above) returns true, then we know the answer is false.
365#[cfg(not(any(
366 all(target_arch = "x86_64", target_feature = "sse2"),
367 all(target_arch = "loongarch64", target_feature = "lsx")
368)))]
369#[inline]
370#[rustc_allow_const_fn_unstable(const_eval_select)] // fallback impl has same behavior
371const fn is_ascii(s: &[u8]) -> bool {
372 // The runtime version behaves the same as the compiletime version, it's
373 // just more optimized.
374 const_eval_select!(
375 @capture { s: &[u8] } -> bool:
376 if const {
377 is_ascii_simple(s)
378 } else {
379 /// Returns `true` if any byte in the word `v` is nonascii (>= 128). Snarfed
380 /// from `../str/mod.rs`, which does something similar for utf8 validation.
381 const fn contains_nonascii(v: usize) -> bool {
382 const NONASCII_MASK: usize = usize::repeat_u8(0x80);
383 (NONASCII_MASK & v) != 0
384 }
385
386 const USIZE_SIZE: usize = size_of::<usize>();
387
388 let len = s.len();
389 let align_offset = s.as_ptr().align_offset(USIZE_SIZE);
390
391 // If we wouldn't gain anything from the word-at-a-time implementation, fall
392 // back to a scalar loop.
393 //
394 // We also do this for architectures where `size_of::<usize>()` isn't
395 // sufficient alignment for `usize`, because it's a weird edge case.
396 if len < USIZE_SIZE || len < align_offset || USIZE_SIZE < align_of::<usize>() {
397 return is_ascii_simple(s);
398 }
399
400 // We always read the first word unaligned, which means `align_offset` is
401 // 0, we'd read the same value again for the aligned read.
402 let offset_to_aligned = if align_offset == 0 { USIZE_SIZE } else { align_offset };
403
404 let start = s.as_ptr();
405 // SAFETY: We verify `len < USIZE_SIZE` above.
406 let first_word = unsafe { (start as *const usize).read_unaligned() };
407
408 if contains_nonascii(first_word) {
409 return false;
410 }
411 // We checked this above, somewhat implicitly. Note that `offset_to_aligned`
412 // is either `align_offset` or `USIZE_SIZE`, both of are explicitly checked
413 // above.
414 debug_assert!(offset_to_aligned <= len);
415
416 // SAFETY: word_ptr is the (properly aligned) usize ptr we use to read the
417 // middle chunk of the slice.
418 let mut word_ptr = unsafe { start.add(offset_to_aligned) as *const usize };
419
420 // `byte_pos` is the byte index of `word_ptr`, used for loop end checks.
421 let mut byte_pos = offset_to_aligned;
422
423 // Paranoia check about alignment, since we're about to do a bunch of
424 // unaligned loads. In practice this should be impossible barring a bug in
425 // `align_offset` though.
426 // While this method is allowed to spuriously fail in CTFE, if it doesn't
427 // have alignment information it should have given a `usize::MAX` for
428 // `align_offset` earlier, sending things through the scalar path instead of
429 // this one, so this check should pass if it's reachable.
430 debug_assert!(word_ptr.is_aligned_to(align_of::<usize>()));
431
432 // Read subsequent words until the last aligned word, excluding the last
433 // aligned word by itself to be done in tail check later, to ensure that
434 // tail is always one `usize` at most to extra branch `byte_pos == len`.
435 while byte_pos < len - USIZE_SIZE {
436 // Sanity check that the read is in bounds
437 debug_assert!(byte_pos + USIZE_SIZE <= len);
438 // And that our assumptions about `byte_pos` hold.
439 debug_assert!(word_ptr.cast::<u8>() == start.wrapping_add(byte_pos));
440
441 // SAFETY: We know `word_ptr` is properly aligned (because of
442 // `align_offset`), and we know that we have enough bytes between `word_ptr` and the end
443 let word = unsafe { word_ptr.read() };
444 if contains_nonascii(word) {
445 return false;
446 }
447
448 byte_pos += USIZE_SIZE;
449 // SAFETY: We know that `byte_pos <= len - USIZE_SIZE`, which means that
450 // after this `add`, `word_ptr` will be at most one-past-the-end.
451 word_ptr = unsafe { word_ptr.add(1) };
452 }
453
454 // Sanity check to ensure there really is only one `usize` left. This should
455 // be guaranteed by our loop condition.
456 debug_assert!(byte_pos <= len && len - byte_pos <= USIZE_SIZE);
457
458 // SAFETY: This relies on `len >= USIZE_SIZE`, which we check at the start.
459 let last_word = unsafe { (start.add(len - USIZE_SIZE) as *const usize).read_unaligned() };
460
461 !contains_nonascii(last_word)
462 }
463 )
464}
465
466/// ASCII test optimized to use the `pmovmskb` instruction on `x86-64` and the
467/// `vmskltz.b` instruction on `loongarch64`.
468///
469/// Other platforms are not likely to benefit from this code structure, so they
470/// use SWAR techniques to test for ASCII in `usize`-sized chunks.
471#[cfg(any(
472 all(target_arch = "x86_64", target_feature = "sse2"),
473 all(target_arch = "loongarch64", target_feature = "lsx")
474))]
475#[inline]
476const fn is_ascii(bytes: &[u8]) -> bool {
477 // Process chunks of 32 bytes at a time in the fast path to enable
478 // auto-vectorization and use of `pmovmskb`. Two 128-bit vector registers
479 // can be OR'd together and then the resulting vector can be tested for
480 // non-ASCII bytes.
481 const CHUNK_SIZE: usize = 32;
482
483 let mut i = 0;
484
485 while i + CHUNK_SIZE <= bytes.len() {
486 let chunk_end = i + CHUNK_SIZE;
487
488 // Get LLVM to produce a `pmovmskb` instruction on x86-64 which
489 // creates a mask from the most significant bit of each byte.
490 // ASCII bytes are less than 128 (0x80), so their most significant
491 // bit is unset.
492 let mut count = 0;
493 while i < chunk_end {
494 count += bytes[i].is_ascii() as u8;
495 i += 1;
496 }
497
498 // All bytes should be <= 127 so count is equal to chunk size.
499 if count != CHUNK_SIZE as u8 {
500 return false;
501 }
502 }
503
504 // Process the remaining `bytes.len() % N` bytes.
505 let mut is_ascii = true;
506 while i < bytes.len() {
507 is_ascii &= bytes[i].is_ascii();
508 i += 1;
509 }
510
511 is_ascii
512}