core/slice/ascii.rs
1//! Operations on ASCII `[u8]`.
2
3use core::ascii::EscapeDefault;
4
5use crate::fmt::{self, Write};
6#[cfg(not(all(target_arch = "loongarch64", target_feature = "lsx")))]
7use crate::intrinsics::const_eval_select;
8#[cfg(not(feature = "ferrocene_subset"))]
9use crate::{ascii, iter, ops};
10
11// Ferrocene addition: imports for the certified subset
12#[rustfmt::skip]
13#[cfg(feature = "ferrocene_subset")]
14use crate::{ascii, iter};
15
16impl [u8] {
17 /// Checks if all bytes in this slice are within the ASCII range.
18 ///
19 /// An empty slice returns `true`.
20 #[stable(feature = "ascii_methods_on_intrinsics", since = "1.23.0")]
21 #[rustc_const_stable(feature = "const_slice_is_ascii", since = "1.74.0")]
22 #[must_use]
23 #[inline]
24 pub const fn is_ascii(&self) -> bool {
25 is_ascii(self)
26 }
27
28 /// If this slice [`is_ascii`](Self::is_ascii), returns it as a slice of
29 /// [ASCII characters](`ascii::Char`), otherwise returns `None`.
30 #[cfg(not(feature = "ferrocene_subset"))]
31 #[unstable(feature = "ascii_char", issue = "110998")]
32 #[must_use]
33 #[inline]
34 pub const fn as_ascii(&self) -> Option<&[ascii::Char]> {
35 if self.is_ascii() {
36 // SAFETY: Just checked that it's ASCII
37 Some(unsafe { self.as_ascii_unchecked() })
38 } else {
39 None
40 }
41 }
42
43 /// Converts this slice of bytes into a slice of ASCII characters,
44 /// without checking whether they're valid.
45 ///
46 /// # Safety
47 ///
48 /// Every byte in the slice must be in `0..=127`, or else this is UB.
49 #[cfg(not(feature = "ferrocene_subset"))]
50 #[unstable(feature = "ascii_char", issue = "110998")]
51 #[must_use]
52 #[inline]
53 pub const unsafe fn as_ascii_unchecked(&self) -> &[ascii::Char] {
54 let byte_ptr: *const [u8] = self;
55 let ascii_ptr = byte_ptr as *const [ascii::Char];
56 // SAFETY: The caller promised all the bytes are ASCII
57 unsafe { &*ascii_ptr }
58 }
59
60 /// Checks that two slices are an ASCII case-insensitive match.
61 ///
62 /// Same as `to_ascii_lowercase(a) == to_ascii_lowercase(b)`,
63 /// but without allocating and copying temporaries.
64 #[stable(feature = "ascii_methods_on_intrinsics", since = "1.23.0")]
65 #[rustc_const_stable(feature = "const_eq_ignore_ascii_case", since = "1.89.0")]
66 #[must_use]
67 #[inline]
68 pub const fn eq_ignore_ascii_case(&self, other: &[u8]) -> bool {
69 if self.len() != other.len() {
70 return false;
71 }
72
73 #[cfg(all(target_arch = "x86_64", target_feature = "sse2"))]
74 {
75 const CHUNK_SIZE: usize = 16;
76 // The following function has two invariants:
77 // 1. The slice lengths must be equal, which we checked above.
78 // 2. The slice lengths must greater than or equal to N, which this
79 // if-statement is checking.
80 if self.len() >= CHUNK_SIZE {
81 return self.eq_ignore_ascii_case_chunks::<CHUNK_SIZE>(other);
82 }
83 }
84
85 self.eq_ignore_ascii_case_simple(other)
86 }
87
88 /// ASCII case-insensitive equality check without chunk-at-a-time
89 /// optimization.
90 #[inline]
91 const fn eq_ignore_ascii_case_simple(&self, other: &[u8]) -> bool {
92 // FIXME(const-hack): This implementation can be reverted when
93 // `core::iter::zip` is allowed in const. The original implementation:
94 // self.len() == other.len() && iter::zip(self, other).all(|(a, b)| a.eq_ignore_ascii_case(b))
95 let mut a = self;
96 let mut b = other;
97
98 while let ([first_a, rest_a @ ..], [first_b, rest_b @ ..]) = (a, b) {
99 if first_a.eq_ignore_ascii_case(&first_b) {
100 a = rest_a;
101 b = rest_b;
102 } else {
103 return false;
104 }
105 }
106
107 true
108 }
109
110 /// Optimized version of `eq_ignore_ascii_case` to process chunks at a time.
111 ///
112 /// Platforms that have SIMD instructions may benefit from this
113 /// implementation over `eq_ignore_ascii_case_simple`.
114 ///
115 /// # Invariants
116 ///
117 /// The caller must guarantee that the slices are equal in length, and the
118 /// slice lengths are greater than or equal to `N` bytes.
119 #[cfg(all(target_arch = "x86_64", target_feature = "sse2"))]
120 #[inline]
121 const fn eq_ignore_ascii_case_chunks<const N: usize>(&self, other: &[u8]) -> bool {
122 // FIXME(const-hack): The while-loops that follow should be replaced by
123 // for-loops when available in const.
124
125 let (self_chunks, self_rem) = self.as_chunks::<N>();
126 let (other_chunks, _) = other.as_chunks::<N>();
127
128 // Branchless check to encourage auto-vectorization
129 #[inline(always)]
130 const fn eq_ignore_ascii_inner<const L: usize>(lhs: &[u8; L], rhs: &[u8; L]) -> bool {
131 let mut equal_ascii = true;
132 let mut j = 0;
133 while j < L {
134 equal_ascii &= lhs[j].eq_ignore_ascii_case(&rhs[j]);
135 j += 1;
136 }
137
138 equal_ascii
139 }
140
141 // Process the chunks, returning early if an inequality is found
142 let mut i = 0;
143 while i < self_chunks.len() && i < other_chunks.len() {
144 if !eq_ignore_ascii_inner(&self_chunks[i], &other_chunks[i]) {
145 return false;
146 }
147 i += 1;
148 }
149
150 // Check the length invariant which is necessary for the tail-handling
151 // logic to be correct. This should have been upheld by the caller,
152 // otherwise lengths less than N will compare as true without any
153 // checking.
154 debug_assert!(self.len() >= N);
155
156 // If there are remaining tails, load the last N bytes in the slices to
157 // avoid falling back to per-byte checking.
158 if !self_rem.is_empty() {
159 if let (Some(a_rem), Some(b_rem)) = (self.last_chunk::<N>(), other.last_chunk::<N>()) {
160 if !eq_ignore_ascii_inner(a_rem, b_rem) {
161 return false;
162 }
163 }
164 }
165
166 true
167 }
168
169 /// Converts this slice to its ASCII upper case equivalent in-place.
170 ///
171 /// ASCII letters 'a' to 'z' are mapped to 'A' to 'Z',
172 /// but non-ASCII letters are unchanged.
173 ///
174 /// To return a new uppercased value without modifying the existing one, use
175 /// [`to_ascii_uppercase`].
176 ///
177 /// [`to_ascii_uppercase`]: #method.to_ascii_uppercase
178 #[cfg(not(feature = "ferrocene_subset"))]
179 #[stable(feature = "ascii_methods_on_intrinsics", since = "1.23.0")]
180 #[rustc_const_stable(feature = "const_make_ascii", since = "1.84.0")]
181 #[inline]
182 pub const fn make_ascii_uppercase(&mut self) {
183 // FIXME(const-hack): We would like to simply iterate using `for` loops but this isn't currently allowed in constant expressions.
184 let mut i = 0;
185 while i < self.len() {
186 let byte = &mut self[i];
187 byte.make_ascii_uppercase();
188 i += 1;
189 }
190 }
191
192 /// Converts this slice to its ASCII lower case equivalent in-place.
193 ///
194 /// ASCII letters 'A' to 'Z' are mapped to 'a' to 'z',
195 /// but non-ASCII letters are unchanged.
196 ///
197 /// To return a new lowercased value without modifying the existing one, use
198 /// [`to_ascii_lowercase`].
199 ///
200 /// [`to_ascii_lowercase`]: #method.to_ascii_lowercase
201 #[cfg(not(feature = "ferrocene_subset"))]
202 #[stable(feature = "ascii_methods_on_intrinsics", since = "1.23.0")]
203 #[rustc_const_stable(feature = "const_make_ascii", since = "1.84.0")]
204 #[inline]
205 pub const fn make_ascii_lowercase(&mut self) {
206 // FIXME(const-hack): We would like to simply iterate using `for` loops but this isn't currently allowed in constant expressions.
207 let mut i = 0;
208 while i < self.len() {
209 let byte = &mut self[i];
210 byte.make_ascii_lowercase();
211 i += 1;
212 }
213 }
214
215 /// Returns an iterator that produces an escaped version of this slice,
216 /// treating it as an ASCII string.
217 ///
218 /// # Examples
219 ///
220 /// ```
221 /// let s = b"0\t\r\n'\"\\\x9d";
222 /// let escaped = s.escape_ascii().to_string();
223 /// assert_eq!(escaped, "0\\t\\r\\n\\'\\\"\\\\\\x9d");
224 /// ```
225 #[must_use = "this returns the escaped bytes as an iterator, \
226 without modifying the original"]
227 #[stable(feature = "inherent_ascii_escape", since = "1.60.0")]
228 pub fn escape_ascii(&self) -> EscapeAscii<'_> {
229 EscapeAscii { inner: self.iter().flat_map(EscapeByte) }
230 }
231
232 /// Returns a byte slice with leading ASCII whitespace bytes removed.
233 ///
234 /// 'Whitespace' refers to the definition used by
235 /// [`u8::is_ascii_whitespace`].
236 ///
237 /// # Examples
238 ///
239 /// ```
240 /// assert_eq!(b" \t hello world\n".trim_ascii_start(), b"hello world\n");
241 /// assert_eq!(b" ".trim_ascii_start(), b"");
242 /// assert_eq!(b"".trim_ascii_start(), b"");
243 /// ```
244 #[cfg(not(feature = "ferrocene_subset"))]
245 #[stable(feature = "byte_slice_trim_ascii", since = "1.80.0")]
246 #[rustc_const_stable(feature = "byte_slice_trim_ascii", since = "1.80.0")]
247 #[inline]
248 pub const fn trim_ascii_start(&self) -> &[u8] {
249 let mut bytes = self;
250 // Note: A pattern matching based approach (instead of indexing) allows
251 // making the function const.
252 while let [first, rest @ ..] = bytes {
253 if first.is_ascii_whitespace() {
254 bytes = rest;
255 } else {
256 break;
257 }
258 }
259 bytes
260 }
261
262 /// Returns a byte slice with trailing ASCII whitespace bytes removed.
263 ///
264 /// 'Whitespace' refers to the definition used by
265 /// [`u8::is_ascii_whitespace`].
266 ///
267 /// # Examples
268 ///
269 /// ```
270 /// assert_eq!(b"\r hello world\n ".trim_ascii_end(), b"\r hello world");
271 /// assert_eq!(b" ".trim_ascii_end(), b"");
272 /// assert_eq!(b"".trim_ascii_end(), b"");
273 /// ```
274 #[cfg(not(feature = "ferrocene_subset"))]
275 #[stable(feature = "byte_slice_trim_ascii", since = "1.80.0")]
276 #[rustc_const_stable(feature = "byte_slice_trim_ascii", since = "1.80.0")]
277 #[inline]
278 pub const fn trim_ascii_end(&self) -> &[u8] {
279 let mut bytes = self;
280 // Note: A pattern matching based approach (instead of indexing) allows
281 // making the function const.
282 while let [rest @ .., last] = bytes {
283 if last.is_ascii_whitespace() {
284 bytes = rest;
285 } else {
286 break;
287 }
288 }
289 bytes
290 }
291
292 /// Returns a byte slice with leading and trailing ASCII whitespace bytes
293 /// removed.
294 ///
295 /// 'Whitespace' refers to the definition used by
296 /// [`u8::is_ascii_whitespace`].
297 ///
298 /// # Examples
299 ///
300 /// ```
301 /// assert_eq!(b"\r hello world\n ".trim_ascii(), b"hello world");
302 /// assert_eq!(b" ".trim_ascii(), b"");
303 /// assert_eq!(b"".trim_ascii(), b"");
304 /// ```
305 #[cfg(not(feature = "ferrocene_subset"))]
306 #[stable(feature = "byte_slice_trim_ascii", since = "1.80.0")]
307 #[rustc_const_stable(feature = "byte_slice_trim_ascii", since = "1.80.0")]
308 #[inline]
309 pub const fn trim_ascii(&self) -> &[u8] {
310 self.trim_ascii_start().trim_ascii_end()
311 }
312}
313
314impl_fn_for_zst! {
315 #[derive(Clone)]
316 struct EscapeByte impl Fn = |byte: &u8| -> ascii::EscapeDefault {
317 ascii::escape_default(*byte)
318 };
319}
320
321/// An iterator over the escaped version of a byte slice.
322///
323/// This `struct` is created by the [`slice::escape_ascii`] method. See its
324/// documentation for more information.
325#[stable(feature = "inherent_ascii_escape", since = "1.60.0")]
326#[derive(Clone)]
327#[must_use = "iterators are lazy and do nothing unless consumed"]
328pub struct EscapeAscii<'a> {
329 inner: iter::FlatMap<super::Iter<'a, u8>, ascii::EscapeDefault, EscapeByte>,
330}
331
332#[cfg(not(feature = "ferrocene_subset"))]
333#[stable(feature = "inherent_ascii_escape", since = "1.60.0")]
334impl<'a> iter::Iterator for EscapeAscii<'a> {
335 type Item = u8;
336 #[inline]
337 fn next(&mut self) -> Option<u8> {
338 self.inner.next()
339 }
340 #[inline]
341 fn size_hint(&self) -> (usize, Option<usize>) {
342 self.inner.size_hint()
343 }
344 #[inline]
345 fn try_fold<Acc, Fold, R>(&mut self, init: Acc, fold: Fold) -> R
346 where
347 Fold: FnMut(Acc, Self::Item) -> R,
348 R: ops::Try<Output = Acc>,
349 {
350 self.inner.try_fold(init, fold)
351 }
352 #[inline]
353 fn fold<Acc, Fold>(self, init: Acc, fold: Fold) -> Acc
354 where
355 Fold: FnMut(Acc, Self::Item) -> Acc,
356 {
357 self.inner.fold(init, fold)
358 }
359 #[inline]
360 fn last(mut self) -> Option<u8> {
361 self.next_back()
362 }
363}
364
365#[cfg(not(feature = "ferrocene_subset"))]
366#[stable(feature = "inherent_ascii_escape", since = "1.60.0")]
367impl<'a> iter::DoubleEndedIterator for EscapeAscii<'a> {
368 fn next_back(&mut self) -> Option<u8> {
369 self.inner.next_back()
370 }
371}
372#[cfg(not(feature = "ferrocene_subset"))]
373#[stable(feature = "inherent_ascii_escape", since = "1.60.0")]
374impl<'a> iter::FusedIterator for EscapeAscii<'a> {}
375#[stable(feature = "inherent_ascii_escape", since = "1.60.0")]
376impl<'a> fmt::Display for EscapeAscii<'a> {
377 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
378 // disassemble iterator, including front/back parts of flatmap in case it has been partially consumed
379 let (front, slice, back) = self.clone().inner.into_parts();
380 let front = front.unwrap_or(EscapeDefault::empty());
381 let mut bytes = slice.unwrap_or_default().as_slice();
382 let back = back.unwrap_or(EscapeDefault::empty());
383
384 // usually empty, so the formatter won't have to do any work
385 for byte in front {
386 f.write_char(byte as char)?;
387 }
388
389 fn needs_escape(b: u8) -> bool {
390 b > 0x7E || b < 0x20 || b == b'\\' || b == b'\'' || b == b'"'
391 }
392
393 while bytes.len() > 0 {
394 // fast path for the printable, non-escaped subset of ascii
395 let prefix = bytes.iter().take_while(|&&b| !needs_escape(b)).count();
396 // SAFETY: prefix length was derived by counting bytes in the same splice, so it's in-bounds
397 let (prefix, remainder) = unsafe { bytes.split_at_unchecked(prefix) };
398 // SAFETY: prefix is a valid utf8 sequence, as it's a subset of ASCII
399 let prefix = unsafe { crate::str::from_utf8_unchecked(prefix) };
400
401 f.write_str(prefix)?; // the fast part
402
403 bytes = remainder;
404
405 if let Some(&b) = bytes.first() {
406 // guaranteed to be non-empty, better to write it as a str
407 fmt::Display::fmt(&ascii::escape_default(b), f)?;
408 bytes = &bytes[1..];
409 }
410 }
411
412 // also usually empty
413 for byte in back {
414 f.write_char(byte as char)?;
415 }
416 Ok(())
417 }
418}
419#[stable(feature = "inherent_ascii_escape", since = "1.60.0")]
420impl<'a> fmt::Debug for EscapeAscii<'a> {
421 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
422 f.debug_struct("EscapeAscii").finish_non_exhaustive()
423 }
424}
425
426/// ASCII test *without* the chunk-at-a-time optimizations.
427///
428/// This is carefully structured to produce nice small code -- it's smaller in
429/// `-O` than what the "obvious" ways produces under `-C opt-level=s`. If you
430/// touch it, be sure to run (and update if needed) the assembly test.
431#[unstable(feature = "str_internals", issue = "none")]
432#[doc(hidden)]
433#[inline]
434pub const fn is_ascii_simple(mut bytes: &[u8]) -> bool {
435 while let [rest @ .., last] = bytes {
436 if !last.is_ascii() {
437 break;
438 }
439 bytes = rest;
440 }
441 bytes.is_empty()
442}
443
444/// Optimized ASCII test that will use usize-at-a-time operations instead of
445/// byte-at-a-time operations (when possible).
446///
447/// The algorithm we use here is pretty simple. If `s` is too short, we just
448/// check each byte and be done with it. Otherwise:
449///
450/// - Read the first word with an unaligned load.
451/// - Align the pointer, read subsequent words until end with aligned loads.
452/// - Read the last `usize` from `s` with an unaligned load.
453///
454/// If any of these loads produces something for which `contains_nonascii`
455/// (above) returns true, then we know the answer is false.
456#[cfg(not(any(
457 all(target_arch = "x86_64", target_feature = "sse2"),
458 all(target_arch = "loongarch64", target_feature = "lsx")
459)))]
460#[inline]
461#[rustc_allow_const_fn_unstable(const_eval_select)] // fallback impl has same behavior
462const fn is_ascii(s: &[u8]) -> bool {
463 // The runtime version behaves the same as the compiletime version, it's
464 // just more optimized.
465 const_eval_select!(
466 @capture { s: &[u8] } -> bool:
467 if const {
468 is_ascii_simple(s)
469 } else {
470 /// Returns `true` if any byte in the word `v` is nonascii (>= 128). Snarfed
471 /// from `../str/mod.rs`, which does something similar for utf8 validation.
472 const fn contains_nonascii(v: usize) -> bool {
473 const NONASCII_MASK: usize = usize::repeat_u8(0x80);
474 (NONASCII_MASK & v) != 0
475 }
476
477 const USIZE_SIZE: usize = size_of::<usize>();
478
479 let len = s.len();
480 let align_offset = s.as_ptr().align_offset(USIZE_SIZE);
481
482 // If we wouldn't gain anything from the word-at-a-time implementation, fall
483 // back to a scalar loop.
484 //
485 // We also do this for architectures where `size_of::<usize>()` isn't
486 // sufficient alignment for `usize`, because it's a weird edge case.
487 if len < USIZE_SIZE || len < align_offset || USIZE_SIZE < align_of::<usize>() {
488 return is_ascii_simple(s);
489 }
490
491 // We always read the first word unaligned, which means `align_offset` is
492 // 0, we'd read the same value again for the aligned read.
493 let offset_to_aligned = if align_offset == 0 { USIZE_SIZE } else { align_offset };
494
495 let start = s.as_ptr();
496 // SAFETY: We verify `len < USIZE_SIZE` above.
497 let first_word = unsafe { (start as *const usize).read_unaligned() };
498
499 if contains_nonascii(first_word) {
500 return false;
501 }
502 // We checked this above, somewhat implicitly. Note that `offset_to_aligned`
503 // is either `align_offset` or `USIZE_SIZE`, both of are explicitly checked
504 // above.
505 debug_assert!(offset_to_aligned <= len);
506
507 // SAFETY: word_ptr is the (properly aligned) usize ptr we use to read the
508 // middle chunk of the slice.
509 let mut word_ptr = unsafe { start.add(offset_to_aligned) as *const usize };
510
511 // `byte_pos` is the byte index of `word_ptr`, used for loop end checks.
512 let mut byte_pos = offset_to_aligned;
513
514 // Paranoia check about alignment, since we're about to do a bunch of
515 // unaligned loads. In practice this should be impossible barring a bug in
516 // `align_offset` though.
517 // While this method is allowed to spuriously fail in CTFE, if it doesn't
518 // have alignment information it should have given a `usize::MAX` for
519 // `align_offset` earlier, sending things through the scalar path instead of
520 // this one, so this check should pass if it's reachable.
521 debug_assert!(word_ptr.is_aligned_to(align_of::<usize>()));
522
523 // Read subsequent words until the last aligned word, excluding the last
524 // aligned word by itself to be done in tail check later, to ensure that
525 // tail is always one `usize` at most to extra branch `byte_pos == len`.
526 while byte_pos < len - USIZE_SIZE {
527 // Sanity check that the read is in bounds
528 debug_assert!(byte_pos + USIZE_SIZE <= len);
529 // And that our assumptions about `byte_pos` hold.
530 debug_assert!(word_ptr.cast::<u8>() == start.wrapping_add(byte_pos));
531
532 // SAFETY: We know `word_ptr` is properly aligned (because of
533 // `align_offset`), and we know that we have enough bytes between `word_ptr` and the end
534 let word = unsafe { word_ptr.read() };
535 if contains_nonascii(word) {
536 return false;
537 }
538
539 byte_pos += USIZE_SIZE;
540 // SAFETY: We know that `byte_pos <= len - USIZE_SIZE`, which means that
541 // after this `add`, `word_ptr` will be at most one-past-the-end.
542 word_ptr = unsafe { word_ptr.add(1) };
543 }
544
545 // Sanity check to ensure there really is only one `usize` left. This should
546 // be guaranteed by our loop condition.
547 debug_assert!(byte_pos <= len && len - byte_pos <= USIZE_SIZE);
548
549 // SAFETY: This relies on `len >= USIZE_SIZE`, which we check at the start.
550 let last_word = unsafe { (start.add(len - USIZE_SIZE) as *const usize).read_unaligned() };
551
552 !contains_nonascii(last_word)
553 }
554 )
555}
556
557/// Chunk size for SSE2 vectorized ASCII checking (4x 16-byte loads).
558#[cfg(all(target_arch = "x86_64", target_feature = "sse2"))]
559const SSE2_CHUNK_SIZE: usize = 64;
560
561#[cfg(all(target_arch = "x86_64", target_feature = "sse2"))]
562#[inline]
563fn is_ascii_sse2(bytes: &[u8]) -> bool {
564 use crate::arch::x86_64::{__m128i, _mm_loadu_si128, _mm_movemask_epi8, _mm_or_si128};
565
566 let (chunks, rest) = bytes.as_chunks::<SSE2_CHUNK_SIZE>();
567
568 for chunk in chunks {
569 let ptr = chunk.as_ptr();
570 // SAFETY: chunk is 64 bytes. SSE2 is baseline on x86_64.
571 let mask = unsafe {
572 let a1 = _mm_loadu_si128(ptr as *const __m128i);
573 let a2 = _mm_loadu_si128(ptr.add(16) as *const __m128i);
574 let b1 = _mm_loadu_si128(ptr.add(32) as *const __m128i);
575 let b2 = _mm_loadu_si128(ptr.add(48) as *const __m128i);
576 // OR all chunks - if any byte has high bit set, combined will too.
577 let combined = _mm_or_si128(_mm_or_si128(a1, a2), _mm_or_si128(b1, b2));
578 // Create a mask from the MSBs of each byte.
579 // If any byte is >= 128, its MSB is 1, so the mask will be non-zero.
580 _mm_movemask_epi8(combined)
581 };
582 if mask != 0 {
583 return false;
584 }
585 }
586
587 // Handle remaining bytes
588 rest.iter().all(|b| b.is_ascii())
589}
590
591/// ASCII test optimized to use the `pmovmskb` instruction on `x86-64`.
592///
593/// Uses explicit SSE2 intrinsics to prevent LLVM from auto-vectorizing with
594/// broken AVX-512 code that extracts mask bits one-by-one.
595#[cfg(all(target_arch = "x86_64", target_feature = "sse2"))]
596#[inline]
597#[rustc_allow_const_fn_unstable(const_eval_select)]
598const fn is_ascii(bytes: &[u8]) -> bool {
599 const USIZE_SIZE: usize = size_of::<usize>();
600 const NONASCII_MASK: usize = usize::MAX / 255 * 0x80;
601
602 const_eval_select!(
603 @capture { bytes: &[u8] } -> bool:
604 if const {
605 is_ascii_simple(bytes)
606 } else {
607 // For small inputs, use usize-at-a-time processing to avoid SSE2 call overhead.
608 if bytes.len() < SSE2_CHUNK_SIZE {
609 let chunks = bytes.chunks_exact(USIZE_SIZE);
610 let remainder = chunks.remainder();
611 for chunk in chunks {
612 let word = usize::from_ne_bytes(chunk.try_into().unwrap());
613 if (word & NONASCII_MASK) != 0 {
614 return false;
615 }
616 }
617 return remainder.iter().all(|b| b.is_ascii());
618 }
619
620 is_ascii_sse2(bytes)
621 }
622 )
623}
624
625/// ASCII test optimized to use the `vmskltz.b` instruction on `loongarch64`.
626///
627/// Other platforms are not likely to benefit from this code structure, so they
628/// use SWAR techniques to test for ASCII in `usize`-sized chunks.
629#[cfg(all(target_arch = "loongarch64", target_feature = "lsx"))]
630#[inline]
631const fn is_ascii(bytes: &[u8]) -> bool {
632 // Process chunks of 32 bytes at a time in the fast path to enable
633 // auto-vectorization and use of `vmskltz.b`. Two 128-bit vector registers
634 // can be OR'd together and then the resulting vector can be tested for
635 // non-ASCII bytes.
636 const CHUNK_SIZE: usize = 32;
637
638 let mut i = 0;
639
640 while i + CHUNK_SIZE <= bytes.len() {
641 let chunk_end = i + CHUNK_SIZE;
642
643 // Get LLVM to produce a `vmskltz.b` instruction on loongarch64 which
644 // creates a mask from the most significant bit of each byte.
645 // ASCII bytes are less than 128 (0x80), so their most significant
646 // bit is unset.
647 let mut count = 0;
648 while i < chunk_end {
649 count += bytes[i].is_ascii() as u8;
650 i += 1;
651 }
652
653 // All bytes should be <= 127 so count is equal to chunk size.
654 if count != CHUNK_SIZE as u8 {
655 return false;
656 }
657 }
658
659 // Process the remaining `bytes.len() % N` bytes.
660 let mut is_ascii = true;
661 while i < bytes.len() {
662 is_ascii &= bytes[i].is_ascii();
663 i += 1;
664 }
665
666 is_ascii
667}