core/slice/ascii.rs
1//! Operations on ASCII `[u8]`.
2
3/// Ferrocene addition: Hidden module to test crate-internal functionality
4#[doc(hidden)]
5#[unstable(feature = "ferrocene_test", issue = "none")]
6pub(crate) mod ferrocene_test;
7
8use core::ascii::EscapeDefault;
9
10use crate::fmt::{self, Write};
11#[cfg(not(all(target_arch = "loongarch64", target_feature = "lsx")))]
12use crate::intrinsics::const_eval_select;
13use crate::{ascii, iter, ops};
14
15impl [u8] {
16 /// Checks if all bytes in this slice are within the ASCII range.
17 ///
18 /// An empty slice returns `true`.
19 #[stable(feature = "ascii_methods_on_intrinsics", since = "1.23.0")]
20 #[rustc_const_stable(feature = "const_slice_is_ascii", since = "1.74.0")]
21 #[must_use]
22 #[inline]
23 #[ferrocene::prevalidated]
24 pub const fn is_ascii(&self) -> bool {
25 is_ascii(self)
26 }
27
28 /// If this slice [`is_ascii`](Self::is_ascii), returns it as a slice of
29 /// [ASCII characters](`ascii::Char`), otherwise returns `None`.
30 #[unstable(feature = "ascii_char", issue = "110998")]
31 #[must_use]
32 #[inline]
33 pub const fn as_ascii(&self) -> Option<&[ascii::Char]> {
34 if self.is_ascii() {
35 // SAFETY: Just checked that it's ASCII
36 Some(unsafe { self.as_ascii_unchecked() })
37 } else {
38 None
39 }
40 }
41
42 /// Converts this slice of bytes into a slice of ASCII characters,
43 /// without checking whether they're valid.
44 ///
45 /// # Safety
46 ///
47 /// Every byte in the slice must be in `0..=127`, or else this is UB.
48 #[unstable(feature = "ascii_char", issue = "110998")]
49 #[must_use]
50 #[inline]
51 pub const unsafe fn as_ascii_unchecked(&self) -> &[ascii::Char] {
52 let byte_ptr: *const [u8] = self;
53 let ascii_ptr = byte_ptr as *const [ascii::Char];
54 // SAFETY: The caller promised all the bytes are ASCII
55 unsafe { &*ascii_ptr }
56 }
57
58 /// Checks that two slices are an ASCII case-insensitive match.
59 ///
60 /// Same as `to_ascii_lowercase(a) == to_ascii_lowercase(b)`,
61 /// but without allocating and copying temporaries.
62 #[stable(feature = "ascii_methods_on_intrinsics", since = "1.23.0")]
63 #[rustc_const_stable(feature = "const_eq_ignore_ascii_case", since = "1.89.0")]
64 #[must_use]
65 #[inline]
66 #[ferrocene::prevalidated]
67 pub const fn eq_ignore_ascii_case(&self, other: &[u8]) -> bool {
68 if self.len() != other.len() {
69 return false;
70 }
71
72 #[cfg(all(target_arch = "x86_64", target_feature = "sse2"))]
73 {
74 const CHUNK_SIZE: usize = 16;
75 // The following function has two invariants:
76 // 1. The slice lengths must be equal, which we checked above.
77 // 2. The slice lengths must greater than or equal to N, which this
78 // if-statement is checking.
79 if self.len() >= CHUNK_SIZE {
80 return self.eq_ignore_ascii_case_chunks::<CHUNK_SIZE>(other);
81 }
82 }
83
84 self.eq_ignore_ascii_case_simple(other)
85 }
86
87 /// ASCII case-insensitive equality check without chunk-at-a-time
88 /// optimization.
89 #[inline]
90 #[ferrocene::prevalidated]
91 const fn eq_ignore_ascii_case_simple(&self, other: &[u8]) -> bool {
92 // FIXME(const-hack): This implementation can be reverted when
93 // `core::iter::zip` is allowed in const. The original implementation:
94 // self.len() == other.len() && iter::zip(self, other).all(|(a, b)| a.eq_ignore_ascii_case(b))
95 let mut a = self;
96 let mut b = other;
97
98 while let ([first_a, rest_a @ ..], [first_b, rest_b @ ..]) = (a, b) {
99 if first_a.eq_ignore_ascii_case(&first_b) {
100 a = rest_a;
101 b = rest_b;
102 } else {
103 return false;
104 }
105 }
106
107 true
108 }
109
110 /// Optimized version of `eq_ignore_ascii_case` to process chunks at a time.
111 ///
112 /// Platforms that have SIMD instructions may benefit from this
113 /// implementation over `eq_ignore_ascii_case_simple`.
114 ///
115 /// # Invariants
116 ///
117 /// The caller must guarantee that the slices are equal in length, and the
118 /// slice lengths are greater than or equal to `N` bytes.
119 #[cfg(all(target_arch = "x86_64", target_feature = "sse2"))]
120 #[inline]
121 const fn eq_ignore_ascii_case_chunks<const N: usize>(&self, other: &[u8]) -> bool {
122 // FIXME(const-hack): The while-loops that follow should be replaced by
123 // for-loops when available in const.
124
125 let (self_chunks, self_rem) = self.as_chunks::<N>();
126 let (other_chunks, _) = other.as_chunks::<N>();
127
128 // Branchless check to encourage auto-vectorization
129 #[inline(always)]
130 const fn eq_ignore_ascii_inner<const L: usize>(lhs: &[u8; L], rhs: &[u8; L]) -> bool {
131 let mut equal_ascii = true;
132 let mut j = 0;
133 while j < L {
134 equal_ascii &= lhs[j].eq_ignore_ascii_case(&rhs[j]);
135 j += 1;
136 }
137
138 equal_ascii
139 }
140
141 // Process the chunks, returning early if an inequality is found
142 let mut i = 0;
143 while i < self_chunks.len() && i < other_chunks.len() {
144 if !eq_ignore_ascii_inner(&self_chunks[i], &other_chunks[i]) {
145 return false;
146 }
147 i += 1;
148 }
149
150 // Check the length invariant which is necessary for the tail-handling
151 // logic to be correct. This should have been upheld by the caller,
152 // otherwise lengths less than N will compare as true without any
153 // checking.
154 debug_assert!(self.len() >= N);
155
156 // If there are remaining tails, load the last N bytes in the slices to
157 // avoid falling back to per-byte checking.
158 if !self_rem.is_empty() {
159 if let (Some(a_rem), Some(b_rem)) = (self.last_chunk::<N>(), other.last_chunk::<N>()) {
160 if !eq_ignore_ascii_inner(a_rem, b_rem) {
161 return false;
162 }
163 }
164 }
165
166 true
167 }
168
169 /// Converts this slice to its ASCII upper case equivalent in-place.
170 ///
171 /// ASCII letters 'a' to 'z' are mapped to 'A' to 'Z',
172 /// but non-ASCII letters are unchanged.
173 ///
174 /// To return a new uppercased value without modifying the existing one, use
175 /// [`to_ascii_uppercase`].
176 ///
177 /// [`to_ascii_uppercase`]: #method.to_ascii_uppercase
178 #[stable(feature = "ascii_methods_on_intrinsics", since = "1.23.0")]
179 #[rustc_const_stable(feature = "const_make_ascii", since = "1.84.0")]
180 #[inline]
181 pub const fn make_ascii_uppercase(&mut self) {
182 // FIXME(const-hack): We would like to simply iterate using `for` loops but this isn't currently allowed in constant expressions.
183 let mut i = 0;
184 while i < self.len() {
185 let byte = &mut self[i];
186 byte.make_ascii_uppercase();
187 i += 1;
188 }
189 }
190
191 /// Converts this slice to its ASCII lower case equivalent in-place.
192 ///
193 /// ASCII letters 'A' to 'Z' are mapped to 'a' to 'z',
194 /// but non-ASCII letters are unchanged.
195 ///
196 /// To return a new lowercased value without modifying the existing one, use
197 /// [`to_ascii_lowercase`].
198 ///
199 /// [`to_ascii_lowercase`]: #method.to_ascii_lowercase
200 #[stable(feature = "ascii_methods_on_intrinsics", since = "1.23.0")]
201 #[rustc_const_stable(feature = "const_make_ascii", since = "1.84.0")]
202 #[inline]
203 pub const fn make_ascii_lowercase(&mut self) {
204 // FIXME(const-hack): We would like to simply iterate using `for` loops but this isn't currently allowed in constant expressions.
205 let mut i = 0;
206 while i < self.len() {
207 let byte = &mut self[i];
208 byte.make_ascii_lowercase();
209 i += 1;
210 }
211 }
212
213 /// Returns an iterator that produces an escaped version of this slice,
214 /// treating it as an ASCII string.
215 ///
216 /// # Examples
217 ///
218 /// ```
219 /// let s = b"0\t\r\n'\"\\\x9d";
220 /// let escaped = s.escape_ascii().to_string();
221 /// assert_eq!(escaped, "0\\t\\r\\n\\'\\\"\\\\\\x9d");
222 /// ```
223 #[must_use = "this returns the escaped bytes as an iterator, \
224 without modifying the original"]
225 #[stable(feature = "inherent_ascii_escape", since = "1.60.0")]
226 #[ferrocene::prevalidated]
227 pub fn escape_ascii(&self) -> EscapeAscii<'_> {
228 EscapeAscii { inner: self.iter().flat_map(EscapeByte) }
229 }
230
231 /// Returns a byte slice with leading ASCII whitespace bytes removed.
232 ///
233 /// 'Whitespace' refers to the definition used by
234 /// [`u8::is_ascii_whitespace`]. Importantly, this definition excludes
235 /// the `\0x0B` byte even though it has the Unicode [`White_Space`] property
236 /// and is removed by [`str::trim_start`].
237 ///
238 /// [`White_Space`]: https://www.unicode.org/reports/tr44/#White_Space
239 ///
240 /// # Examples
241 ///
242 /// ```
243 /// assert_eq!(b" \t hello world\n".trim_ascii_start(), b"hello world\n");
244 /// assert_eq!(b" ".trim_ascii_start(), b"");
245 /// assert_eq!(b"".trim_ascii_start(), b"");
246 /// ```
247 #[stable(feature = "byte_slice_trim_ascii", since = "1.80.0")]
248 #[rustc_const_stable(feature = "byte_slice_trim_ascii", since = "1.80.0")]
249 #[inline]
250 pub const fn trim_ascii_start(&self) -> &[u8] {
251 let mut bytes = self;
252 // Note: A pattern matching based approach (instead of indexing) allows
253 // making the function const.
254 while let [first, rest @ ..] = bytes {
255 if first.is_ascii_whitespace() {
256 bytes = rest;
257 } else {
258 break;
259 }
260 }
261 bytes
262 }
263
264 /// Returns a byte slice with trailing ASCII whitespace bytes removed.
265 ///
266 /// 'Whitespace' refers to the definition used by
267 /// [`u8::is_ascii_whitespace`]. Importantly, this definition excludes
268 /// the `\0x0B` byte even though it has the Unicode [`White_Space`] property
269 /// and is removed by [`str::trim_end`].
270 ///
271 /// [`White_Space`]: https://www.unicode.org/reports/tr44/#White_Space
272 ///
273 /// # Examples
274 ///
275 /// ```
276 /// assert_eq!(b"\r hello world\n ".trim_ascii_end(), b"\r hello world");
277 /// assert_eq!(b" ".trim_ascii_end(), b"");
278 /// assert_eq!(b"".trim_ascii_end(), b"");
279 /// ```
280 #[stable(feature = "byte_slice_trim_ascii", since = "1.80.0")]
281 #[rustc_const_stable(feature = "byte_slice_trim_ascii", since = "1.80.0")]
282 #[inline]
283 pub const fn trim_ascii_end(&self) -> &[u8] {
284 let mut bytes = self;
285 // Note: A pattern matching based approach (instead of indexing) allows
286 // making the function const.
287 while let [rest @ .., last] = bytes {
288 if last.is_ascii_whitespace() {
289 bytes = rest;
290 } else {
291 break;
292 }
293 }
294 bytes
295 }
296
297 /// Returns a byte slice with leading and trailing ASCII whitespace bytes
298 /// removed.
299 ///
300 /// 'Whitespace' refers to the definition used by
301 /// [`u8::is_ascii_whitespace`]. Importantly, this definition excludes
302 /// the `\0x0B` byte even though it has the Unicode [`White_Space`] property
303 /// and is removed by [`str::trim`].
304 ///
305 /// [`White_Space`]: https://www.unicode.org/reports/tr44/#White_Space
306 ///
307 /// # Examples
308 ///
309 /// ```
310 /// assert_eq!(b"\r hello world\n ".trim_ascii(), b"hello world");
311 /// assert_eq!(b" ".trim_ascii(), b"");
312 /// assert_eq!(b"".trim_ascii(), b"");
313 /// ```
314 #[stable(feature = "byte_slice_trim_ascii", since = "1.80.0")]
315 #[rustc_const_stable(feature = "byte_slice_trim_ascii", since = "1.80.0")]
316 #[inline]
317 pub const fn trim_ascii(&self) -> &[u8] {
318 self.trim_ascii_start().trim_ascii_end()
319 }
320}
321
322impl_fn_for_zst! {
323 #[derive(Clone)]
324 struct EscapeByte impl Fn = |byte: &u8| -> ascii::EscapeDefault {
325 ascii::escape_default(*byte)
326 };
327}
328
329/// An iterator over the escaped version of a byte slice.
330///
331/// This `struct` is created by the [`slice::escape_ascii`] method. See its
332/// documentation for more information.
333#[stable(feature = "inherent_ascii_escape", since = "1.60.0")]
334#[derive(Clone)]
335#[must_use = "iterators are lazy and do nothing unless consumed"]
336#[ferrocene::prevalidated]
337pub struct EscapeAscii<'a> {
338 inner: iter::FlatMap<super::Iter<'a, u8>, ascii::EscapeDefault, EscapeByte>,
339}
340
341#[stable(feature = "inherent_ascii_escape", since = "1.60.0")]
342impl<'a> iter::Iterator for EscapeAscii<'a> {
343 type Item = u8;
344 #[inline]
345 fn next(&mut self) -> Option<u8> {
346 self.inner.next()
347 }
348 #[inline]
349 fn size_hint(&self) -> (usize, Option<usize>) {
350 self.inner.size_hint()
351 }
352 #[inline]
353 fn try_fold<Acc, Fold, R>(&mut self, init: Acc, fold: Fold) -> R
354 where
355 Fold: FnMut(Acc, Self::Item) -> R,
356 R: ops::Try<Output = Acc>,
357 {
358 self.inner.try_fold(init, fold)
359 }
360 #[inline]
361 fn fold<Acc, Fold>(self, init: Acc, fold: Fold) -> Acc
362 where
363 Fold: FnMut(Acc, Self::Item) -> Acc,
364 {
365 self.inner.fold(init, fold)
366 }
367 #[inline]
368 fn last(mut self) -> Option<u8> {
369 self.next_back()
370 }
371}
372
373#[stable(feature = "inherent_ascii_escape", since = "1.60.0")]
374impl<'a> iter::DoubleEndedIterator for EscapeAscii<'a> {
375 fn next_back(&mut self) -> Option<u8> {
376 self.inner.next_back()
377 }
378}
379#[stable(feature = "inherent_ascii_escape", since = "1.60.0")]
380impl<'a> iter::FusedIterator for EscapeAscii<'a> {}
381#[stable(feature = "inherent_ascii_escape", since = "1.60.0")]
382impl<'a> fmt::Display for EscapeAscii<'a> {
383 #[ferrocene::prevalidated]
384 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
385 // disassemble iterator, including front/back parts of flatmap in case it has been partially consumed
386 let (front, slice, back) = self.clone().inner.into_parts();
387 let front = front.unwrap_or(EscapeDefault::empty());
388 let mut bytes = slice.unwrap_or_default().as_slice();
389 let back = back.unwrap_or(EscapeDefault::empty());
390
391 // usually empty, so the formatter won't have to do any work
392 for byte in front {
393 f.write_char(byte as char)?;
394 }
395
396 #[ferrocene::prevalidated]
397 fn needs_escape(b: u8) -> bool {
398 b > 0x7E || b < 0x20 || b == b'\\' || b == b'\'' || b == b'"'
399 }
400
401 while bytes.len() > 0 {
402 // fast path for the printable, non-escaped subset of ascii
403 let prefix = bytes.iter().take_while(|&&b| !needs_escape(b)).count();
404 // SAFETY: prefix length was derived by counting bytes in the same splice, so it's in-bounds
405 let (prefix, remainder) = unsafe { bytes.split_at_unchecked(prefix) };
406 // SAFETY: prefix is a valid utf8 sequence, as it's a subset of ASCII
407 let prefix = unsafe { crate::str::from_utf8_unchecked(prefix) };
408
409 f.write_str(prefix)?; // the fast part
410
411 bytes = remainder;
412
413 if let Some(&b) = bytes.first() {
414 // guaranteed to be non-empty, better to write it as a str
415 fmt::Display::fmt(&ascii::escape_default(b), f)?;
416 bytes = &bytes[1..];
417 }
418 }
419
420 // also usually empty
421 for byte in back {
422 f.write_char(byte as char)?;
423 }
424 Ok(())
425 }
426}
427#[stable(feature = "inherent_ascii_escape", since = "1.60.0")]
428impl<'a> fmt::Debug for EscapeAscii<'a> {
429 #[ferrocene::prevalidated]
430 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
431 f.debug_struct("EscapeAscii").finish_non_exhaustive()
432 }
433}
434
435/// ASCII test *without* the chunk-at-a-time optimizations.
436///
437/// This is carefully structured to produce nice small code -- it's smaller in
438/// `-O` than what the "obvious" ways produces under `-C opt-level=s`. If you
439/// touch it, be sure to run (and update if needed) the assembly test.
440#[unstable(feature = "str_internals", issue = "none")]
441#[doc(hidden)]
442#[inline]
443#[ferrocene::prevalidated]
444pub const fn is_ascii_simple(mut bytes: &[u8]) -> bool {
445 while let [rest @ .., last] = bytes {
446 if !last.is_ascii() {
447 break;
448 }
449 bytes = rest;
450 }
451 bytes.is_empty()
452}
453
454/// Optimized ASCII test that will use usize-at-a-time operations instead of
455/// byte-at-a-time operations (when possible).
456///
457/// The algorithm we use here is pretty simple. If `s` is too short, we just
458/// check each byte and be done with it. Otherwise:
459///
460/// - Read the first word with an unaligned load.
461/// - Align the pointer, read subsequent words until end with aligned loads.
462/// - Read the last `usize` from `s` with an unaligned load.
463///
464/// If any of these loads produces something for which `contains_nonascii`
465/// (above) returns true, then we know the answer is false.
466#[cfg(not(any(
467 all(target_arch = "x86_64", target_feature = "sse2"),
468 all(target_arch = "loongarch64", target_feature = "lsx")
469)))]
470#[inline]
471#[rustc_allow_const_fn_unstable(const_eval_select)] // fallback impl has same behavior
472#[ferrocene::prevalidated]
473const fn is_ascii(s: &[u8]) -> bool {
474 // The runtime version behaves the same as the compiletime version, it's
475 // just more optimized.
476 const_eval_select!(
477 @capture { s: &[u8] } -> bool:
478 if const {
479 is_ascii_simple(s)
480 } else {
481 /// Returns `true` if any byte in the word `v` is nonascii (>= 128). Snarfed
482 /// from `../str/mod.rs`, which does something similar for utf8 validation.
483 #[ferrocene::prevalidated]
484 const fn contains_nonascii(v: usize) -> bool {
485 const NONASCII_MASK: usize = usize::repeat_u8(0x80);
486 (NONASCII_MASK & v) != 0
487 }
488
489 const USIZE_SIZE: usize = size_of::<usize>();
490
491 let len = s.len();
492 let align_offset = s.as_ptr().align_offset(USIZE_SIZE);
493
494 // If we wouldn't gain anything from the word-at-a-time implementation, fall
495 // back to a scalar loop.
496 //
497 // We also do this for architectures where `size_of::<usize>()` isn't
498 // sufficient alignment for `usize`, because it's a weird edge case.
499 if len < USIZE_SIZE || len < align_offset || USIZE_SIZE < align_of::<usize>() {
500 return is_ascii_simple(s);
501 }
502
503 // We always read the first word unaligned, which means `align_offset` is
504 // 0, we'd read the same value again for the aligned read.
505 let offset_to_aligned = if align_offset == 0 { USIZE_SIZE } else { align_offset };
506
507 let start = s.as_ptr();
508 // SAFETY: We verify `len < USIZE_SIZE` above.
509 let first_word = unsafe { (start as *const usize).read_unaligned() };
510
511 if contains_nonascii(first_word) {
512 return false;
513 }
514 // We checked this above, somewhat implicitly. Note that `offset_to_aligned`
515 // is either `align_offset` or `USIZE_SIZE`, both of are explicitly checked
516 // above.
517 debug_assert!(offset_to_aligned <= len);
518
519 // SAFETY: word_ptr is the (properly aligned) usize ptr we use to read the
520 // middle chunk of the slice.
521 let mut word_ptr = unsafe { start.add(offset_to_aligned) as *const usize };
522
523 // `byte_pos` is the byte index of `word_ptr`, used for loop end checks.
524 let mut byte_pos = offset_to_aligned;
525
526 // Paranoia check about alignment, since we're about to do a bunch of
527 // unaligned loads. In practice this should be impossible barring a bug in
528 // `align_offset` though.
529 // While this method is allowed to spuriously fail in CTFE, if it doesn't
530 // have alignment information it should have given a `usize::MAX` for
531 // `align_offset` earlier, sending things through the scalar path instead of
532 // this one, so this check should pass if it's reachable.
533 debug_assert!(word_ptr.is_aligned_to(align_of::<usize>()));
534
535 // Read subsequent words until the last aligned word, excluding the last
536 // aligned word by itself to be done in tail check later, to ensure that
537 // tail is always one `usize` at most to extra branch `byte_pos == len`.
538 while byte_pos < len - USIZE_SIZE {
539 // Sanity check that the read is in bounds
540 debug_assert!(byte_pos + USIZE_SIZE <= len);
541 // And that our assumptions about `byte_pos` hold.
542 debug_assert!(word_ptr.cast::<u8>() == start.wrapping_add(byte_pos));
543
544 // SAFETY: We know `word_ptr` is properly aligned (because of
545 // `align_offset`), and we know that we have enough bytes between `word_ptr` and the end
546 let word = unsafe { word_ptr.read() };
547 if contains_nonascii(word) {
548 return false;
549 }
550
551 byte_pos += USIZE_SIZE;
552 // SAFETY: We know that `byte_pos <= len - USIZE_SIZE`, which means that
553 // after this `add`, `word_ptr` will be at most one-past-the-end.
554 word_ptr = unsafe { word_ptr.add(1) };
555 }
556
557 // Sanity check to ensure there really is only one `usize` left. This should
558 // be guaranteed by our loop condition.
559 debug_assert!(byte_pos <= len && len - byte_pos <= USIZE_SIZE);
560
561 // SAFETY: This relies on `len >= USIZE_SIZE`, which we check at the start.
562 let last_word = unsafe { (start.add(len - USIZE_SIZE) as *const usize).read_unaligned() };
563
564 !contains_nonascii(last_word)
565 }
566 )
567}
568
569/// Chunk size for SSE2 vectorized ASCII checking (4x 16-byte loads).
570#[cfg(all(target_arch = "x86_64", target_feature = "sse2"))]
571const SSE2_CHUNK_SIZE: usize = 64;
572
573#[cfg(all(target_arch = "x86_64", target_feature = "sse2"))]
574#[inline]
575fn is_ascii_sse2(bytes: &[u8]) -> bool {
576 use crate::arch::x86_64::{__m128i, _mm_loadu_si128, _mm_movemask_epi8, _mm_or_si128};
577
578 let (chunks, rest) = bytes.as_chunks::<SSE2_CHUNK_SIZE>();
579
580 for chunk in chunks {
581 let ptr = chunk.as_ptr();
582 // SAFETY: chunk is 64 bytes. SSE2 is baseline on x86_64.
583 let mask = unsafe {
584 let a1 = _mm_loadu_si128(ptr as *const __m128i);
585 let a2 = _mm_loadu_si128(ptr.add(16) as *const __m128i);
586 let b1 = _mm_loadu_si128(ptr.add(32) as *const __m128i);
587 let b2 = _mm_loadu_si128(ptr.add(48) as *const __m128i);
588 // OR all chunks - if any byte has high bit set, combined will too.
589 let combined = _mm_or_si128(_mm_or_si128(a1, a2), _mm_or_si128(b1, b2));
590 // Create a mask from the MSBs of each byte.
591 // If any byte is >= 128, its MSB is 1, so the mask will be non-zero.
592 _mm_movemask_epi8(combined)
593 };
594 if mask != 0 {
595 return false;
596 }
597 }
598
599 // Handle remaining bytes
600 rest.iter().all(|b| b.is_ascii())
601}
602
603/// ASCII test optimized to use the `pmovmskb` instruction on `x86-64`.
604///
605/// Uses explicit SSE2 intrinsics to prevent LLVM from auto-vectorizing with
606/// broken AVX-512 code that extracts mask bits one-by-one.
607#[cfg(all(target_arch = "x86_64", target_feature = "sse2"))]
608#[inline]
609#[rustc_allow_const_fn_unstable(const_eval_select)]
610const fn is_ascii(bytes: &[u8]) -> bool {
611 const USIZE_SIZE: usize = size_of::<usize>();
612 const NONASCII_MASK: usize = usize::MAX / 255 * 0x80;
613
614 const_eval_select!(
615 @capture { bytes: &[u8] } -> bool:
616 if const {
617 is_ascii_simple(bytes)
618 } else {
619 // For small inputs, use usize-at-a-time processing to avoid SSE2 call overhead.
620 if bytes.len() < SSE2_CHUNK_SIZE {
621 let chunks = bytes.chunks_exact(USIZE_SIZE);
622 let remainder = chunks.remainder();
623 for chunk in chunks {
624 let word = usize::from_ne_bytes(chunk.try_into().unwrap());
625 if (word & NONASCII_MASK) != 0 {
626 return false;
627 }
628 }
629 return remainder.iter().all(|b| b.is_ascii());
630 }
631
632 // Bug in the lint: is_ascii isn't validated, only the expansion of `is_ascii::runtime`
633 #[allow(ferrocene::unvalidated)]
634 is_ascii_sse2(bytes)
635 }
636 )
637}
638
639/// ASCII test optimized to use the `vmskltz.b` instruction on `loongarch64`.
640///
641/// Other platforms are not likely to benefit from this code structure, so they
642/// use SWAR techniques to test for ASCII in `usize`-sized chunks.
643#[cfg(all(target_arch = "loongarch64", target_feature = "lsx"))]
644#[inline]
645const fn is_ascii(bytes: &[u8]) -> bool {
646 // Process chunks of 32 bytes at a time in the fast path to enable
647 // auto-vectorization and use of `vmskltz.b`. Two 128-bit vector registers
648 // can be OR'd together and then the resulting vector can be tested for
649 // non-ASCII bytes.
650 const CHUNK_SIZE: usize = 32;
651
652 let mut i = 0;
653
654 while i + CHUNK_SIZE <= bytes.len() {
655 let chunk_end = i + CHUNK_SIZE;
656
657 // Get LLVM to produce a `vmskltz.b` instruction on loongarch64 which
658 // creates a mask from the most significant bit of each byte.
659 // ASCII bytes are less than 128 (0x80), so their most significant
660 // bit is unset.
661 let mut count = 0;
662 while i < chunk_end {
663 count += bytes[i].is_ascii() as u8;
664 i += 1;
665 }
666
667 // All bytes should be <= 127 so count is equal to chunk size.
668 if count != CHUNK_SIZE as u8 {
669 return false;
670 }
671 }
672
673 // Process the remaining `bytes.len() % N` bytes.
674 let mut is_ascii = true;
675 while i < bytes.len() {
676 is_ascii &= bytes[i].is_ascii();
677 i += 1;
678 }
679
680 is_ascii
681}