<core::ops::index_range::IndexRange as core::slice::index::SliceIndex<[T]>>::get_unchecked::precondition_check #[ferrocene::annotation(
"This code cannot be covered because it causes an non-unwinding panic, which means it cannot be caught by any means in a test."
)]
const fn precondition_check($($name:$ty),*) {
if !$e {
let msg = concat!("unsafe precondition(s) violated: ", $message,
"\n\nThis indicates a bug in the program. \
This Undefined Behavior check is optional, and cannot be relied on for safety.");
::core::panicking::panic_nounwind_fmt(::core::fmt::Arguments::from_str(msg), false);
}
}
<core::ops::index_range::IndexRange as core::slice::index::SliceIndex<[T]>>::get_unchecked_mut::precondition_check #[ferrocene::annotation(
"This code cannot be covered because it causes an non-unwinding panic, which means it cannot be caught by any means in a test."
)]
const fn precondition_check($($name:$ty),*) {
if !$e {
let msg = concat!("unsafe precondition(s) violated: ", $message,
"\n\nThis indicates a bug in the program. \
This Undefined Behavior check is optional, and cannot be relied on for safety.");
::core::panicking::panic_nounwind_fmt(::core::fmt::Arguments::from_str(msg), false);
}
}
<core::ops::range::Range<usize> as core::slice::index::SliceIndex<[T]>>::get_unchecked::precondition_check #[ferrocene::annotation(
"This code cannot be covered because it causes an non-unwinding panic, which means it cannot be caught by any means in a test."
)]
const fn precondition_check($($name:$ty),*) {
if !$e {
let msg = concat!("unsafe precondition(s) violated: ", $message,
"\n\nThis indicates a bug in the program. \
This Undefined Behavior check is optional, and cannot be relied on for safety.");
::core::panicking::panic_nounwind_fmt(::core::fmt::Arguments::from_str(msg), false);
}
}
<core::ops::range::Range<usize> as core::slice::index::SliceIndex<[T]>>::get_unchecked_mut::precondition_check #[ferrocene::annotation(
"This code cannot be covered because it causes an non-unwinding panic, which means it cannot be caught by any means in a test."
)]
const fn precondition_check($($name:$ty),*) {
if !$e {
let msg = concat!("unsafe precondition(s) violated: ", $message,
"\n\nThis indicates a bug in the program. \
This Undefined Behavior check is optional, and cannot be relied on for safety.");
::core::panicking::panic_nounwind_fmt(::core::fmt::Arguments::from_str(msg), false);
}
}
<core::str::pattern::CharSearcher<'a> as core::str::pattern::Searcher<'a>>::next_match fn next_match(&mut self) -> Option<(usize, usize)> {
loop {
// get the haystack after the last character found
let bytes = self.haystack.as_bytes().get(self.finger..self.finger_back)?;
// the last byte of the utf8 encoded needle
// SAFETY: we have an invariant that `utf8_size < 5`
let last_byte = unsafe { *self.utf8_encoded.get_unchecked(self.utf8_size() - 1) };
if let Some(index) = memchr::memchr(last_byte, bytes) {
// The new finger is the index of the byte we found,
// plus one, since we memchr'd for the last byte of the character.
//
// Note that this doesn't always give us a finger on a UTF8 boundary.
// If we *didn't* find our character
// we may have indexed to the non-last byte of a 3-byte or 4-byte character.
// We can't just skip to the next valid starting byte because a character like
// ꁁ (U+A041 YI SYLLABLE PA), utf-8 `EA 81 81` will have us always find
// the second byte when searching for the third.
//
// However, this is totally okay. While we have the invariant that
// self.finger is on a UTF8 boundary, this invariant is not relied upon
// within this method (it is relied upon in CharSearcher::next()).
//
// We only exit this method when we reach the end of the string, or if we
// find something. When we find something the `finger` will be set
// to a UTF8 boundary.
self.finger += index + 1;
if self.finger >= self.utf8_size() {
let found_char = self.finger - self.utf8_size();
if let Some(slice) = self.haystack.as_bytes().get(found_char..self.finger) {
if slice == &self.utf8_encoded[0..self.utf8_size()] {
return Some((found_char, self.finger));
}
}
}
} else {
// found nothing, exit
self.finger = self.finger_back;
return None;
}
}
}
<str as core::fmt::Debug>::fmt fn fmt(&self, f: &mut Formatter<'_>) -> Result {
f.write_char('"')?;
// substring we know is printable
let mut printable_range = 0..0;
fn needs_escape(b: u8) -> bool {
b > 0x7E || b < 0x20 || b == b'\\' || b == b'"'
}
// the loop here first skips over runs of printable ASCII as a fast path.
// other chars (unicode, or ASCII that needs escaping) are then handled per-`char`.
let mut rest = self;
while rest.len() > 0 {
let Some(non_printable_start) = rest.as_bytes().iter().position(|&b| needs_escape(b))
else {
printable_range.end += rest.len();
break;
};
printable_range.end += non_printable_start;
// SAFETY: the position was derived from an iterator, so is known to be within bounds, and at a char boundary
rest = unsafe { rest.get_unchecked(non_printable_start..) };
let mut chars = rest.chars();
if let Some(c) = chars.next() {
let esc = c.escape_debug_ext(EscapeDebugExtArgs {
escape_grapheme_extended: true,
escape_single_quote: false,
escape_double_quote: true,
});
if esc.len() != 1 {
f.write_str(&self[printable_range.clone()])?;
Display::fmt(&esc, f)?;
printable_range.start = printable_range.end + c.len_utf8();
}
printable_range.end += c.len_utf8();
}
rest = chars.as_str();
}
f.write_str(&self[printable_range])?;
f.write_char('"')
}
<usize as core::slice::index::SliceIndex<[T]>>::get_unchecked::precondition_check #[ferrocene::annotation(
"This code cannot be covered because it causes an non-unwinding panic, which means it cannot be caught by any means in a test."
)]
const fn precondition_check($($name:$ty),*) {
if !$e {
let msg = concat!("unsafe precondition(s) violated: ", $message,
"\n\nThis indicates a bug in the program. \
This Undefined Behavior check is optional, and cannot be relied on for safety.");
::core::panicking::panic_nounwind_fmt(::core::fmt::Arguments::from_str(msg), false);
}
}
<usize as core::slice::index::SliceIndex<[T]>>::get_unchecked_mut::precondition_check #[ferrocene::annotation(
"This code cannot be covered because it causes an non-unwinding panic, which means it cannot be caught by any means in a test."
)]
const fn precondition_check($($name:$ty),*) {
if !$e {
let msg = concat!("unsafe precondition(s) violated: ", $message,
"\n\nThis indicates a bug in the program. \
This Undefined Behavior check is optional, and cannot be relied on for safety.");
::core::panicking::panic_nounwind_fmt(::core::fmt::Arguments::from_str(msg), false);
}
}
core::alloc::layout::Layout::from_size_align_unchecked::precondition_check #[ferrocene::annotation(
"This code cannot be covered because it causes an non-unwinding panic, which means it cannot be caught by any means in a test."
)]
const fn precondition_check($($name:$ty),*) {
if !$e {
let msg = concat!("unsafe precondition(s) violated: ", $message,
"\n\nThis indicates a bug in the program. \
This Undefined Behavior check is optional, and cannot be relied on for safety.");
::core::panicking::panic_nounwind_fmt(::core::fmt::Arguments::from_str(msg), false);
}
}
core::alloc::layout::Layout::from_size_alignment_unchecked::precondition_check #[ferrocene::annotation(
"This code cannot be covered because it causes an non-unwinding panic, which means it cannot be caught by any means in a test."
)]
const fn precondition_check($($name:$ty),*) {
if !$e {
let msg = concat!("unsafe precondition(s) violated: ", $message,
"\n\nThis indicates a bug in the program. \
This Undefined Behavior check is optional, and cannot be relied on for safety.");
::core::panicking::panic_nounwind_fmt(::core::fmt::Arguments::from_str(msg), false);
}
}
core::char::convert::from_u32_unchecked::precondition_check #[ferrocene::annotation(
"This code cannot be covered because it causes an non-unwinding panic, which means it cannot be caught by any means in a test."
)]
const fn precondition_check($($name:$ty),*) {
if !$e {
let msg = concat!("unsafe precondition(s) violated: ", $message,
"\n\nThis indicates a bug in the program. \
This Undefined Behavior check is optional, and cannot be relied on for safety.");
::core::panicking::panic_nounwind_fmt(::core::fmt::Arguments::from_str(msg), false);
}
}
core::cmp::impls::<impl core::cmp::Ord for bool>::cmp fn cmp(&self, other: &bool) -> Ordering {
// Casting to i8's and converting the difference to an Ordering generates
// more optimal assembly.
// See <https://github.com/rust-lang/rust/issues/66780> for more info.
match (*self as i8) - (*other as i8) {
-1 => Less,
0 => Equal,
1 => Greater,
#[ferrocene::annotation(
"This match arm cannot be covered because it is unreachable. See the safety comment below."
)]
// SAFETY: bool as i8 returns 0 or 1, so the difference can't be anything else
_ => unsafe { unreachable_unchecked() },
}
}
core::fmt::num::exp_u128 fn $fmt_fn(
f: &mut fmt::Formatter<'_>,
n: $T,
is_nonnegative: bool,
letter_e: u8
) -> fmt::Result {
debug_assert!(letter_e.is_ascii_alphabetic(), "single-byte character");
// Print the integer as a coefficient in range (-10, 10).
let mut exp = n.checked_ilog10().unwrap_or(0) as usize;
debug_assert!(n / (10 as $T).pow(exp as u32) < 10);
// Precisison is counted as the number of digits in the fraction.
let mut coef_prec = exp;
// Keep the digits as an integer (paired with its coef_prec count).
let mut coef = n;
// A Formatter may set the precision to a fixed number of decimals.
let more_prec = match f.precision() {
None => {
// Omit any and all trailing zeroes.
while coef_prec != 0 && coef % 10 == 0 {
coef /= 10;
coef_prec -= 1;
}
0
},
Some(fmt_prec) if fmt_prec >= coef_prec => {
// Count the number of additional zeroes needed.
fmt_prec - coef_prec
},
Some(fmt_prec) => {
// Count the number of digits to drop.
let less_prec = coef_prec - fmt_prec;
assert!(less_prec > 0);
// Scale down the coefficient/precision pair. For example,
// coef 123456 gets coef_prec 5 (to make 1.23456). To format
// the number with 2 decimals, i.e., fmt_prec 2, coef should
// be scaled by 10⁵⁻²=1000 to get coef 123 with coef_prec 2.
// SAFETY: Any precision less than coef_prec will cause a
// power of ten below the coef value.
let scale = unsafe {
(10 as $T).checked_pow(less_prec as u32).unwrap_unchecked()
};
let floor = coef / scale;
// Round half to even conform documentation.
let over = coef % scale;
let half = scale / 2;
let round_up = if over < half {
0
} else if over > half {
1
} else {
floor & 1 // round odd up to even
};
// Adding one to a scale down of at least 10 won't overflow.
coef = floor + round_up;
coef_prec = fmt_prec;
// The round_up may have caused the coefficient to reach 10
// (which is not permitted). For example, anything in range
// [9.95, 10) becomes 10.0 when adjusted to precision 1.
if round_up != 0 && coef.checked_ilog10().unwrap_or(0) as usize > coef_prec {
debug_assert_eq!(coef, (10 as $T).pow(coef_prec as u32 + 1));
coef /= 10; // drop one trailing zero
exp += 1; // one power of ten higher
}
0
},
};
// Allocate a text buffer with lazy initialization.
const MAX_DEC_N: usize = $T::MAX.ilog10() as usize + 1;
const MAX_COEF_LEN: usize = MAX_DEC_N + ".".len();
const MAX_TEXT_LEN: usize = MAX_COEF_LEN + "e99".len();
let mut buf = [MaybeUninit::<u8>::uninit(); MAX_TEXT_LEN];
// Encode the coefficient in buf[..coef_len].
let (lead_dec, coef_len) = if coef_prec == 0 && more_prec == 0 {
(coef, 1_usize) // single digit; no fraction
} else {
buf[1].write(b'.');
let fraction_range = 2..(2 + coef_prec);
// Consume the least-significant decimals from a working copy.
let mut remain = coef;
#[cfg(feature = "optimize_for_size")] {
for i in fraction_range.clone().rev() {
let digit = (remain % 10) as usize;
remain /= 10;
buf[i].write(b'0' + digit as u8);
}
}
#[cfg(not(feature = "optimize_for_size"))] {
// Write digits per two at a time with a lookup table.
for i in fraction_range.clone().skip(1).rev().step_by(2) {
let pair = (remain % 100) as usize;
remain /= 100;
buf[i - 1].write(DECIMAL_PAIRS[pair * 2 + 0]);
buf[i - 0].write(DECIMAL_PAIRS[pair * 2 + 1]);
}
// An odd number of digits leave one digit remaining.
if coef_prec & 1 != 0 {
let digit = (remain % 10) as usize;
remain /= 10;
buf[fraction_range.start].write(b'0' + digit as u8);
}
}
(remain, fraction_range.end)
};
debug_assert!(lead_dec < 10);
debug_assert!(lead_dec != 0 || coef == 0, "significant digits only");
buf[0].write(b'0' + lead_dec as u8);
// SAFETY: The number of decimals is limited, captured by MAX.
unsafe { core::hint::assert_unchecked(coef_len <= MAX_COEF_LEN) }
// Encode the scale factor in buf[coef_len..text_len].
buf[coef_len].write(letter_e);
let text_len: usize = match exp {
..10 => {
buf[coef_len + 1].write(b'0' + exp as u8);
coef_len + 2
},
10..100 => {
#[cfg(feature = "optimize_for_size")] {
buf[coef_len + 1].write(b'0' + (exp / 10) as u8);
buf[coef_len + 2].write(b'0' + (exp % 10) as u8);
}
#[cfg(not(feature = "optimize_for_size"))] {
buf[coef_len + 1].write(DECIMAL_PAIRS[exp * 2 + 0]);
buf[coef_len + 2].write(DECIMAL_PAIRS[exp * 2 + 1]);
}
coef_len + 3
},
#[ferrocene::annotation("Branch is unreachable. See SAFETY comment below.")]
_ => {
const { assert!($T::MAX.ilog10() < 100) };
// SAFETY: A `u256::MAX` would get exponent 77.
unsafe { core::hint::unreachable_unchecked() }
}
};
// SAFETY: All bytes up until text_len have been set.
let text = unsafe { buf[..text_len].assume_init_ref() };
if more_prec == 0 {
// SAFETY: Text is set with ASCII exclusively: either a decimal,
// or a LETTER_E, or a dot. ASCII implies valid UTF-8.
let as_str = unsafe { str::from_utf8_unchecked(text) };
f.pad_integral(is_nonnegative, "", as_str)
} else {
let parts = &[
numfmt::Part::Copy(&text[..coef_len]),
numfmt::Part::Zero(more_prec),
numfmt::Part::Copy(&text[coef_len..]),
];
let sign = if !is_nonnegative {
"-"
} else if f.sign_plus() {
"+"
} else {
""
};
// SAFETY: Text is set with ASCII exclusively: either a decimal,
// or a LETTER_E, or a dot. ASCII implies valid UTF-8.
unsafe { f.pad_formatted_parts(&numfmt::Formatted { sign, parts }) }
}
}
core::fmt::num::imp::exp_u64 fn $fmt_fn(
f: &mut fmt::Formatter<'_>,
n: $T,
is_nonnegative: bool,
letter_e: u8
) -> fmt::Result {
debug_assert!(letter_e.is_ascii_alphabetic(), "single-byte character");
// Print the integer as a coefficient in range (-10, 10).
let mut exp = n.checked_ilog10().unwrap_or(0) as usize;
debug_assert!(n / (10 as $T).pow(exp as u32) < 10);
// Precisison is counted as the number of digits in the fraction.
let mut coef_prec = exp;
// Keep the digits as an integer (paired with its coef_prec count).
let mut coef = n;
// A Formatter may set the precision to a fixed number of decimals.
let more_prec = match f.precision() {
None => {
// Omit any and all trailing zeroes.
while coef_prec != 0 && coef % 10 == 0 {
coef /= 10;
coef_prec -= 1;
}
0
},
Some(fmt_prec) if fmt_prec >= coef_prec => {
// Count the number of additional zeroes needed.
fmt_prec - coef_prec
},
Some(fmt_prec) => {
// Count the number of digits to drop.
let less_prec = coef_prec - fmt_prec;
assert!(less_prec > 0);
// Scale down the coefficient/precision pair. For example,
// coef 123456 gets coef_prec 5 (to make 1.23456). To format
// the number with 2 decimals, i.e., fmt_prec 2, coef should
// be scaled by 10⁵⁻²=1000 to get coef 123 with coef_prec 2.
// SAFETY: Any precision less than coef_prec will cause a
// power of ten below the coef value.
let scale = unsafe {
(10 as $T).checked_pow(less_prec as u32).unwrap_unchecked()
};
let floor = coef / scale;
// Round half to even conform documentation.
let over = coef % scale;
let half = scale / 2;
let round_up = if over < half {
0
} else if over > half {
1
} else {
floor & 1 // round odd up to even
};
// Adding one to a scale down of at least 10 won't overflow.
coef = floor + round_up;
coef_prec = fmt_prec;
// The round_up may have caused the coefficient to reach 10
// (which is not permitted). For example, anything in range
// [9.95, 10) becomes 10.0 when adjusted to precision 1.
if round_up != 0 && coef.checked_ilog10().unwrap_or(0) as usize > coef_prec {
debug_assert_eq!(coef, (10 as $T).pow(coef_prec as u32 + 1));
coef /= 10; // drop one trailing zero
exp += 1; // one power of ten higher
}
0
},
};
// Allocate a text buffer with lazy initialization.
const MAX_DEC_N: usize = $T::MAX.ilog10() as usize + 1;
const MAX_COEF_LEN: usize = MAX_DEC_N + ".".len();
const MAX_TEXT_LEN: usize = MAX_COEF_LEN + "e99".len();
let mut buf = [MaybeUninit::<u8>::uninit(); MAX_TEXT_LEN];
// Encode the coefficient in buf[..coef_len].
let (lead_dec, coef_len) = if coef_prec == 0 && more_prec == 0 {
(coef, 1_usize) // single digit; no fraction
} else {
buf[1].write(b'.');
let fraction_range = 2..(2 + coef_prec);
// Consume the least-significant decimals from a working copy.
let mut remain = coef;
#[cfg(feature = "optimize_for_size")] {
for i in fraction_range.clone().rev() {
let digit = (remain % 10) as usize;
remain /= 10;
buf[i].write(b'0' + digit as u8);
}
}
#[cfg(not(feature = "optimize_for_size"))] {
// Write digits per two at a time with a lookup table.
for i in fraction_range.clone().skip(1).rev().step_by(2) {
let pair = (remain % 100) as usize;
remain /= 100;
buf[i - 1].write(DECIMAL_PAIRS[pair * 2 + 0]);
buf[i - 0].write(DECIMAL_PAIRS[pair * 2 + 1]);
}
// An odd number of digits leave one digit remaining.
if coef_prec & 1 != 0 {
let digit = (remain % 10) as usize;
remain /= 10;
buf[fraction_range.start].write(b'0' + digit as u8);
}
}
(remain, fraction_range.end)
};
debug_assert!(lead_dec < 10);
debug_assert!(lead_dec != 0 || coef == 0, "significant digits only");
buf[0].write(b'0' + lead_dec as u8);
// SAFETY: The number of decimals is limited, captured by MAX.
unsafe { core::hint::assert_unchecked(coef_len <= MAX_COEF_LEN) }
// Encode the scale factor in buf[coef_len..text_len].
buf[coef_len].write(letter_e);
let text_len: usize = match exp {
..10 => {
buf[coef_len + 1].write(b'0' + exp as u8);
coef_len + 2
},
10..100 => {
#[cfg(feature = "optimize_for_size")] {
buf[coef_len + 1].write(b'0' + (exp / 10) as u8);
buf[coef_len + 2].write(b'0' + (exp % 10) as u8);
}
#[cfg(not(feature = "optimize_for_size"))] {
buf[coef_len + 1].write(DECIMAL_PAIRS[exp * 2 + 0]);
buf[coef_len + 2].write(DECIMAL_PAIRS[exp * 2 + 1]);
}
coef_len + 3
},
#[ferrocene::annotation("Branch is unreachable. See SAFETY comment below.")]
_ => {
const { assert!($T::MAX.ilog10() < 100) };
// SAFETY: A `u256::MAX` would get exponent 77.
unsafe { core::hint::unreachable_unchecked() }
}
};
// SAFETY: All bytes up until text_len have been set.
let text = unsafe { buf[..text_len].assume_init_ref() };
if more_prec == 0 {
// SAFETY: Text is set with ASCII exclusively: either a decimal,
// or a LETTER_E, or a dot. ASCII implies valid UTF-8.
let as_str = unsafe { str::from_utf8_unchecked(text) };
f.pad_integral(is_nonnegative, "", as_str)
} else {
let parts = &[
numfmt::Part::Copy(&text[..coef_len]),
numfmt::Part::Zero(more_prec),
numfmt::Part::Copy(&text[coef_len..]),
];
let sign = if !is_nonnegative {
"-"
} else if f.sign_plus() {
"+"
} else {
""
};
// SAFETY: Text is set with ASCII exclusively: either a decimal,
// or a LETTER_E, or a dot. ASCII implies valid UTF-8.
unsafe { f.pad_formatted_parts(&numfmt::Formatted { sign, parts }) }
}
}
core::fmt::rt::Argument::<'_>::fmt pub(super) unsafe fn fmt(&self, f: &mut Formatter<'_>) -> Result {
match self.ty {
// SAFETY:
// Because of the invariant that if `formatter` had the type
// `fn(&T, _) -> _` then `value` has type `&'b T` where `'b` is
// the lifetime of the `ArgumentType`, and because references
// and `NonNull` are ABI-compatible, this is completely equivalent
// to calling the original function passed to `new` with the
// original reference, which is sound.
ArgumentType::Placeholder { formatter, value, .. } => unsafe { formatter(value, f) },
#[ferrocene::annotation(
"Cannot be covered as this code is unreachable. See the SAFETY comment."
)]
// SAFETY: the caller promised this.
ArgumentType::Count(_) => unsafe { unreachable_unchecked() },
}
}
core::hint::assert_unchecked::precondition_check #[ferrocene::annotation(
"This code cannot be covered because it causes an non-unwinding panic, which means it cannot be caught by any means in a test."
)]
const fn precondition_check($($name:$ty),*) {
if !$e {
let msg = concat!("unsafe precondition(s) violated: ", $message,
"\n\nThis indicates a bug in the program. \
This Undefined Behavior check is optional, and cannot be relied on for safety.");
::core::panicking::panic_nounwind_fmt(::core::fmt::Arguments::from_str(msg), false);
}
}
core::hint::select_unpredictablepub const fn select_unpredictable<T>(condition: bool, true_val: T, false_val: T) -> T
where
T: [const] Destruct,
{
// FIXME(https://github.com/rust-lang/unsafe-code-guidelines/issues/245):
// Change this to use ManuallyDrop instead.
let mut true_val = MaybeUninit::new(true_val);
let mut false_val = MaybeUninit::new(false_val);
struct DropOnPanic<T> {
// Invariant: valid pointer and points to an initialized value that is not further used,
// i.e. it can be dropped by this guard.
inner: *mut T,
}
impl<T> Drop for DropOnPanic<T> {
fn drop(&mut self) {
// SAFETY: Must be guaranteed on construction of local type `DropOnPanic`.
unsafe { self.inner.drop_in_place() }
}
}
let true_ptr = true_val.as_mut_ptr();
let false_ptr = false_val.as_mut_ptr();
// SAFETY: The value that is not selected is dropped, and the selected one
// is returned. This is necessary because the intrinsic doesn't drop the
// value that is not selected.
unsafe {
// Extract the selected value first, ensure it is dropped as well if dropping the unselected
// value panics. We construct a temporary by-pointer guard around the selected value while
// dropping the unselected value. Arguments overlap here, so we can not use mutable
// reference for these arguments.
let guard = crate::intrinsics::select_unpredictable(condition, true_ptr, false_ptr);
let drop = crate::intrinsics::select_unpredictable(condition, false_ptr, true_ptr);
// SAFETY: both pointers are well-aligned and point to initialized values inside a
// `MaybeUninit` each. In both possible values for `condition` the pointer `guard` and
// `drop` do not alias (even though the two argument pairs we have selected from did alias
// each other).
let guard = DropOnPanic { inner: guard };
drop.drop_in_place();
crate::mem::forget(guard);
// Note that it is important to use the values here. Reading from the pointer we got makes
// LLVM forget the !unpredictable annotation sometimes (in tests, integer sized values in
// particular seemed to confuse it, also observed in llvm/llvm-project #82340).
crate::intrinsics::select_unpredictable(condition, true_val, false_val).assume_init()
}
}
core::hint::unreachable_unchecked::precondition_check #[ferrocene::annotation(
"This code cannot be covered because it causes an non-unwinding panic, which means it cannot be caught by any means in a test."
)]
const fn precondition_check($($name:$ty),*) {
if !$e {
let msg = concat!("unsafe precondition(s) violated: ", $message,
"\n\nThis indicates a bug in the program. \
This Undefined Behavior check is optional, and cannot be relied on for safety.");
::core::panicking::panic_nounwind_fmt(::core::fmt::Arguments::from_str(msg), false);
}
}
core::iter::traits::iterator::Iterator::collect fn collect<B: FromIterator<Self::Item>>(self) -> B
where
Self: Sized,
{
// This is too aggressive to turn on for everything all the time, but PR#137908
// accidentally noticed that some rustc iterators had malformed `size_hint`s,
// so this will help catch such things in debug-assertions-std runners,
// even if users won't actually ever see it.
#[ferrocene::annotation("We ship `core` with debug assertions enabled")]
if cfg!(debug_assertions) {
let hint = self.size_hint();
assert!(hint.1.is_none_or(|high| high >= hint.0), "Malformed size_hint {hint:?}");
}
FromIterator::from_iter(self)
}
core::mem::conjure_zstpub const unsafe fn conjure_zst<T>() -> T {
#[ferrocene::annotation(
"This assertion only runs in compilation, meaning that it cannot be covered in runtime"
)]
const_assert!(
size_of::<T>() == 0,
"mem::conjure_zst invoked on a non-zero-sized type",
"mem::conjure_zst invoked on type {name}, which is not zero-sized",
name: &str = crate::any::type_name::<T>()
);
// SAFETY: because the caller must guarantee that it's inhabited and zero-sized,
// there's nothing in the representation that needs to be set.
// `assume_init` calls `assert_inhabited`, so we don't need to here.
unsafe {
#[allow(clippy::uninit_assumed_init)]
MaybeUninit::uninit().assume_init()
}
}
core::num::<impl i128>::unchecked_neg::precondition_check #[ferrocene::annotation(
"This code cannot be covered because it causes an non-unwinding panic, which means it cannot be caught by any means in a test."
)]
const fn precondition_check($($name:$ty),*) {
if !$e {
let msg = concat!("unsafe precondition(s) violated: ", $message,
"\n\nThis indicates a bug in the program. \
This Undefined Behavior check is optional, and cannot be relied on for safety.");
::core::panicking::panic_nounwind_fmt(::core::fmt::Arguments::from_str(msg), false);
}
}
core::num::<impl i128>::unchecked_shl::precondition_check #[ferrocene::annotation(
"This code cannot be covered because it causes an non-unwinding panic, which means it cannot be caught by any means in a test."
)]
const fn precondition_check($($name:$ty),*) {
if !$e {
let msg = concat!("unsafe precondition(s) violated: ", $message,
"\n\nThis indicates a bug in the program. \
This Undefined Behavior check is optional, and cannot be relied on for safety.");
::core::panicking::panic_nounwind_fmt(::core::fmt::Arguments::from_str(msg), false);
}
}
core::num::<impl i128>::unchecked_shr::precondition_check #[ferrocene::annotation(
"This code cannot be covered because it causes an non-unwinding panic, which means it cannot be caught by any means in a test."
)]
const fn precondition_check($($name:$ty),*) {
if !$e {
let msg = concat!("unsafe precondition(s) violated: ", $message,
"\n\nThis indicates a bug in the program. \
This Undefined Behavior check is optional, and cannot be relied on for safety.");
::core::panicking::panic_nounwind_fmt(::core::fmt::Arguments::from_str(msg), false);
}
}
core::num::<impl i16>::unchecked_neg::precondition_check #[ferrocene::annotation(
"This code cannot be covered because it causes an non-unwinding panic, which means it cannot be caught by any means in a test."
)]
const fn precondition_check($($name:$ty),*) {
if !$e {
let msg = concat!("unsafe precondition(s) violated: ", $message,
"\n\nThis indicates a bug in the program. \
This Undefined Behavior check is optional, and cannot be relied on for safety.");
::core::panicking::panic_nounwind_fmt(::core::fmt::Arguments::from_str(msg), false);
}
}
core::num::<impl i16>::unchecked_shl::precondition_check #[ferrocene::annotation(
"This code cannot be covered because it causes an non-unwinding panic, which means it cannot be caught by any means in a test."
)]
const fn precondition_check($($name:$ty),*) {
if !$e {
let msg = concat!("unsafe precondition(s) violated: ", $message,
"\n\nThis indicates a bug in the program. \
This Undefined Behavior check is optional, and cannot be relied on for safety.");
::core::panicking::panic_nounwind_fmt(::core::fmt::Arguments::from_str(msg), false);
}
}
core::num::<impl i16>::unchecked_shr::precondition_check #[ferrocene::annotation(
"This code cannot be covered because it causes an non-unwinding panic, which means it cannot be caught by any means in a test."
)]
const fn precondition_check($($name:$ty),*) {
if !$e {
let msg = concat!("unsafe precondition(s) violated: ", $message,
"\n\nThis indicates a bug in the program. \
This Undefined Behavior check is optional, and cannot be relied on for safety.");
::core::panicking::panic_nounwind_fmt(::core::fmt::Arguments::from_str(msg), false);
}
}
core::num::<impl i32>::unchecked_neg::precondition_check #[ferrocene::annotation(
"This code cannot be covered because it causes an non-unwinding panic, which means it cannot be caught by any means in a test."
)]
const fn precondition_check($($name:$ty),*) {
if !$e {
let msg = concat!("unsafe precondition(s) violated: ", $message,
"\n\nThis indicates a bug in the program. \
This Undefined Behavior check is optional, and cannot be relied on for safety.");
::core::panicking::panic_nounwind_fmt(::core::fmt::Arguments::from_str(msg), false);
}
}
core::num::<impl i32>::unchecked_shl::precondition_check #[ferrocene::annotation(
"This code cannot be covered because it causes an non-unwinding panic, which means it cannot be caught by any means in a test."
)]
const fn precondition_check($($name:$ty),*) {
if !$e {
let msg = concat!("unsafe precondition(s) violated: ", $message,
"\n\nThis indicates a bug in the program. \
This Undefined Behavior check is optional, and cannot be relied on for safety.");
::core::panicking::panic_nounwind_fmt(::core::fmt::Arguments::from_str(msg), false);
}
}
core::num::<impl i32>::unchecked_shr::precondition_check #[ferrocene::annotation(
"This code cannot be covered because it causes an non-unwinding panic, which means it cannot be caught by any means in a test."
)]
const fn precondition_check($($name:$ty),*) {
if !$e {
let msg = concat!("unsafe precondition(s) violated: ", $message,
"\n\nThis indicates a bug in the program. \
This Undefined Behavior check is optional, and cannot be relied on for safety.");
::core::panicking::panic_nounwind_fmt(::core::fmt::Arguments::from_str(msg), false);
}
}
core::num::<impl i64>::unchecked_neg::precondition_check #[ferrocene::annotation(
"This code cannot be covered because it causes an non-unwinding panic, which means it cannot be caught by any means in a test."
)]
const fn precondition_check($($name:$ty),*) {
if !$e {
let msg = concat!("unsafe precondition(s) violated: ", $message,
"\n\nThis indicates a bug in the program. \
This Undefined Behavior check is optional, and cannot be relied on for safety.");
::core::panicking::panic_nounwind_fmt(::core::fmt::Arguments::from_str(msg), false);
}
}
core::num::<impl i64>::unchecked_shl::precondition_check #[ferrocene::annotation(
"This code cannot be covered because it causes an non-unwinding panic, which means it cannot be caught by any means in a test."
)]
const fn precondition_check($($name:$ty),*) {
if !$e {
let msg = concat!("unsafe precondition(s) violated: ", $message,
"\n\nThis indicates a bug in the program. \
This Undefined Behavior check is optional, and cannot be relied on for safety.");
::core::panicking::panic_nounwind_fmt(::core::fmt::Arguments::from_str(msg), false);
}
}
core::num::<impl i64>::unchecked_shr::precondition_check #[ferrocene::annotation(
"This code cannot be covered because it causes an non-unwinding panic, which means it cannot be caught by any means in a test."
)]
const fn precondition_check($($name:$ty),*) {
if !$e {
let msg = concat!("unsafe precondition(s) violated: ", $message,
"\n\nThis indicates a bug in the program. \
This Undefined Behavior check is optional, and cannot be relied on for safety.");
::core::panicking::panic_nounwind_fmt(::core::fmt::Arguments::from_str(msg), false);
}
}
core::num::<impl i8>::unchecked_neg::precondition_check #[ferrocene::annotation(
"This code cannot be covered because it causes an non-unwinding panic, which means it cannot be caught by any means in a test."
)]
const fn precondition_check($($name:$ty),*) {
if !$e {
let msg = concat!("unsafe precondition(s) violated: ", $message,
"\n\nThis indicates a bug in the program. \
This Undefined Behavior check is optional, and cannot be relied on for safety.");
::core::panicking::panic_nounwind_fmt(::core::fmt::Arguments::from_str(msg), false);
}
}
core::num::<impl i8>::unchecked_shl::precondition_check #[ferrocene::annotation(
"This code cannot be covered because it causes an non-unwinding panic, which means it cannot be caught by any means in a test."
)]
const fn precondition_check($($name:$ty),*) {
if !$e {
let msg = concat!("unsafe precondition(s) violated: ", $message,
"\n\nThis indicates a bug in the program. \
This Undefined Behavior check is optional, and cannot be relied on for safety.");
::core::panicking::panic_nounwind_fmt(::core::fmt::Arguments::from_str(msg), false);
}
}
core::num::<impl i8>::unchecked_shr::precondition_check #[ferrocene::annotation(
"This code cannot be covered because it causes an non-unwinding panic, which means it cannot be caught by any means in a test."
)]
const fn precondition_check($($name:$ty),*) {
if !$e {
let msg = concat!("unsafe precondition(s) violated: ", $message,
"\n\nThis indicates a bug in the program. \
This Undefined Behavior check is optional, and cannot be relied on for safety.");
::core::panicking::panic_nounwind_fmt(::core::fmt::Arguments::from_str(msg), false);
}
}
core::num::<impl isize>::unchecked_neg::precondition_check #[ferrocene::annotation(
"This code cannot be covered because it causes an non-unwinding panic, which means it cannot be caught by any means in a test."
)]
const fn precondition_check($($name:$ty),*) {
if !$e {
let msg = concat!("unsafe precondition(s) violated: ", $message,
"\n\nThis indicates a bug in the program. \
This Undefined Behavior check is optional, and cannot be relied on for safety.");
::core::panicking::panic_nounwind_fmt(::core::fmt::Arguments::from_str(msg), false);
}
}
core::num::<impl isize>::unchecked_shl::precondition_check #[ferrocene::annotation(
"This code cannot be covered because it causes an non-unwinding panic, which means it cannot be caught by any means in a test."
)]
const fn precondition_check($($name:$ty),*) {
if !$e {
let msg = concat!("unsafe precondition(s) violated: ", $message,
"\n\nThis indicates a bug in the program. \
This Undefined Behavior check is optional, and cannot be relied on for safety.");
::core::panicking::panic_nounwind_fmt(::core::fmt::Arguments::from_str(msg), false);
}
}
core::num::<impl isize>::unchecked_shr::precondition_check #[ferrocene::annotation(
"This code cannot be covered because it causes an non-unwinding panic, which means it cannot be caught by any means in a test."
)]
const fn precondition_check($($name:$ty),*) {
if !$e {
let msg = concat!("unsafe precondition(s) violated: ", $message,
"\n\nThis indicates a bug in the program. \
This Undefined Behavior check is optional, and cannot be relied on for safety.");
::core::panicking::panic_nounwind_fmt(::core::fmt::Arguments::from_str(msg), false);
}
}
core::num::<impl u128>::unchecked_add::precondition_check #[ferrocene::annotation(
"This code cannot be covered because it causes an non-unwinding panic, which means it cannot be caught by any means in a test."
)]
const fn precondition_check($($name:$ty),*) {
if !$e {
let msg = concat!("unsafe precondition(s) violated: ", $message,
"\n\nThis indicates a bug in the program. \
This Undefined Behavior check is optional, and cannot be relied on for safety.");
::core::panicking::panic_nounwind_fmt(::core::fmt::Arguments::from_str(msg), false);
}
}
core::num::<impl u128>::unchecked_shl::precondition_check #[ferrocene::annotation(
"This code cannot be covered because it causes an non-unwinding panic, which means it cannot be caught by any means in a test."
)]
const fn precondition_check($($name:$ty),*) {
if !$e {
let msg = concat!("unsafe precondition(s) violated: ", $message,
"\n\nThis indicates a bug in the program. \
This Undefined Behavior check is optional, and cannot be relied on for safety.");
::core::panicking::panic_nounwind_fmt(::core::fmt::Arguments::from_str(msg), false);
}
}
core::num::<impl u128>::unchecked_shr::precondition_check #[ferrocene::annotation(
"This code cannot be covered because it causes an non-unwinding panic, which means it cannot be caught by any means in a test."
)]
const fn precondition_check($($name:$ty),*) {
if !$e {
let msg = concat!("unsafe precondition(s) violated: ", $message,
"\n\nThis indicates a bug in the program. \
This Undefined Behavior check is optional, and cannot be relied on for safety.");
::core::panicking::panic_nounwind_fmt(::core::fmt::Arguments::from_str(msg), false);
}
}
core::num::<impl u128>::unchecked_sub::precondition_check #[ferrocene::annotation(
"This code cannot be covered because it causes an non-unwinding panic, which means it cannot be caught by any means in a test."
)]
const fn precondition_check($($name:$ty),*) {
if !$e {
let msg = concat!("unsafe precondition(s) violated: ", $message,
"\n\nThis indicates a bug in the program. \
This Undefined Behavior check is optional, and cannot be relied on for safety.");
::core::panicking::panic_nounwind_fmt(::core::fmt::Arguments::from_str(msg), false);
}
}
core::num::<impl u16>::unchecked_add::precondition_check #[ferrocene::annotation(
"This code cannot be covered because it causes an non-unwinding panic, which means it cannot be caught by any means in a test."
)]
const fn precondition_check($($name:$ty),*) {
if !$e {
let msg = concat!("unsafe precondition(s) violated: ", $message,
"\n\nThis indicates a bug in the program. \
This Undefined Behavior check is optional, and cannot be relied on for safety.");
::core::panicking::panic_nounwind_fmt(::core::fmt::Arguments::from_str(msg), false);
}
}
core::num::<impl u16>::unchecked_shl::precondition_check #[ferrocene::annotation(
"This code cannot be covered because it causes an non-unwinding panic, which means it cannot be caught by any means in a test."
)]
const fn precondition_check($($name:$ty),*) {
if !$e {
let msg = concat!("unsafe precondition(s) violated: ", $message,
"\n\nThis indicates a bug in the program. \
This Undefined Behavior check is optional, and cannot be relied on for safety.");
::core::panicking::panic_nounwind_fmt(::core::fmt::Arguments::from_str(msg), false);
}
}
core::num::<impl u16>::unchecked_shr::precondition_check #[ferrocene::annotation(
"This code cannot be covered because it causes an non-unwinding panic, which means it cannot be caught by any means in a test."
)]
const fn precondition_check($($name:$ty),*) {
if !$e {
let msg = concat!("unsafe precondition(s) violated: ", $message,
"\n\nThis indicates a bug in the program. \
This Undefined Behavior check is optional, and cannot be relied on for safety.");
::core::panicking::panic_nounwind_fmt(::core::fmt::Arguments::from_str(msg), false);
}
}
core::num::<impl u16>::unchecked_sub::precondition_check #[ferrocene::annotation(
"This code cannot be covered because it causes an non-unwinding panic, which means it cannot be caught by any means in a test."
)]
const fn precondition_check($($name:$ty),*) {
if !$e {
let msg = concat!("unsafe precondition(s) violated: ", $message,
"\n\nThis indicates a bug in the program. \
This Undefined Behavior check is optional, and cannot be relied on for safety.");
::core::panicking::panic_nounwind_fmt(::core::fmt::Arguments::from_str(msg), false);
}
}
core::num::<impl u32>::unchecked_add::precondition_check #[ferrocene::annotation(
"This code cannot be covered because it causes an non-unwinding panic, which means it cannot be caught by any means in a test."
)]
const fn precondition_check($($name:$ty),*) {
if !$e {
let msg = concat!("unsafe precondition(s) violated: ", $message,
"\n\nThis indicates a bug in the program. \
This Undefined Behavior check is optional, and cannot be relied on for safety.");
::core::panicking::panic_nounwind_fmt(::core::fmt::Arguments::from_str(msg), false);
}
}
core::num::<impl u32>::unchecked_shl::precondition_check #[ferrocene::annotation(
"This code cannot be covered because it causes an non-unwinding panic, which means it cannot be caught by any means in a test."
)]
const fn precondition_check($($name:$ty),*) {
if !$e {
let msg = concat!("unsafe precondition(s) violated: ", $message,
"\n\nThis indicates a bug in the program. \
This Undefined Behavior check is optional, and cannot be relied on for safety.");
::core::panicking::panic_nounwind_fmt(::core::fmt::Arguments::from_str(msg), false);
}
}
core::num::<impl u32>::unchecked_shr::precondition_check #[ferrocene::annotation(
"This code cannot be covered because it causes an non-unwinding panic, which means it cannot be caught by any means in a test."
)]
const fn precondition_check($($name:$ty),*) {
if !$e {
let msg = concat!("unsafe precondition(s) violated: ", $message,
"\n\nThis indicates a bug in the program. \
This Undefined Behavior check is optional, and cannot be relied on for safety.");
::core::panicking::panic_nounwind_fmt(::core::fmt::Arguments::from_str(msg), false);
}
}
core::num::<impl u32>::unchecked_sub::precondition_check #[ferrocene::annotation(
"This code cannot be covered because it causes an non-unwinding panic, which means it cannot be caught by any means in a test."
)]
const fn precondition_check($($name:$ty),*) {
if !$e {
let msg = concat!("unsafe precondition(s) violated: ", $message,
"\n\nThis indicates a bug in the program. \
This Undefined Behavior check is optional, and cannot be relied on for safety.");
::core::panicking::panic_nounwind_fmt(::core::fmt::Arguments::from_str(msg), false);
}
}
core::num::<impl u64>::unchecked_add::precondition_check #[ferrocene::annotation(
"This code cannot be covered because it causes an non-unwinding panic, which means it cannot be caught by any means in a test."
)]
const fn precondition_check($($name:$ty),*) {
if !$e {
let msg = concat!("unsafe precondition(s) violated: ", $message,
"\n\nThis indicates a bug in the program. \
This Undefined Behavior check is optional, and cannot be relied on for safety.");
::core::panicking::panic_nounwind_fmt(::core::fmt::Arguments::from_str(msg), false);
}
}
core::num::<impl u64>::unchecked_shl::precondition_check #[ferrocene::annotation(
"This code cannot be covered because it causes an non-unwinding panic, which means it cannot be caught by any means in a test."
)]
const fn precondition_check($($name:$ty),*) {
if !$e {
let msg = concat!("unsafe precondition(s) violated: ", $message,
"\n\nThis indicates a bug in the program. \
This Undefined Behavior check is optional, and cannot be relied on for safety.");
::core::panicking::panic_nounwind_fmt(::core::fmt::Arguments::from_str(msg), false);
}
}
core::num::<impl u64>::unchecked_shr::precondition_check #[ferrocene::annotation(
"This code cannot be covered because it causes an non-unwinding panic, which means it cannot be caught by any means in a test."
)]
const fn precondition_check($($name:$ty),*) {
if !$e {
let msg = concat!("unsafe precondition(s) violated: ", $message,
"\n\nThis indicates a bug in the program. \
This Undefined Behavior check is optional, and cannot be relied on for safety.");
::core::panicking::panic_nounwind_fmt(::core::fmt::Arguments::from_str(msg), false);
}
}
core::num::<impl u64>::unchecked_sub::precondition_check #[ferrocene::annotation(
"This code cannot be covered because it causes an non-unwinding panic, which means it cannot be caught by any means in a test."
)]
const fn precondition_check($($name:$ty),*) {
if !$e {
let msg = concat!("unsafe precondition(s) violated: ", $message,
"\n\nThis indicates a bug in the program. \
This Undefined Behavior check is optional, and cannot be relied on for safety.");
::core::panicking::panic_nounwind_fmt(::core::fmt::Arguments::from_str(msg), false);
}
}
core::num::<impl u8>::unchecked_add::precondition_check #[ferrocene::annotation(
"This code cannot be covered because it causes an non-unwinding panic, which means it cannot be caught by any means in a test."
)]
const fn precondition_check($($name:$ty),*) {
if !$e {
let msg = concat!("unsafe precondition(s) violated: ", $message,
"\n\nThis indicates a bug in the program. \
This Undefined Behavior check is optional, and cannot be relied on for safety.");
::core::panicking::panic_nounwind_fmt(::core::fmt::Arguments::from_str(msg), false);
}
}
core::num::<impl u8>::unchecked_shl::precondition_check #[ferrocene::annotation(
"This code cannot be covered because it causes an non-unwinding panic, which means it cannot be caught by any means in a test."
)]
const fn precondition_check($($name:$ty),*) {
if !$e {
let msg = concat!("unsafe precondition(s) violated: ", $message,
"\n\nThis indicates a bug in the program. \
This Undefined Behavior check is optional, and cannot be relied on for safety.");
::core::panicking::panic_nounwind_fmt(::core::fmt::Arguments::from_str(msg), false);
}
}
core::num::<impl u8>::unchecked_shr::precondition_check #[ferrocene::annotation(
"This code cannot be covered because it causes an non-unwinding panic, which means it cannot be caught by any means in a test."
)]
const fn precondition_check($($name:$ty),*) {
if !$e {
let msg = concat!("unsafe precondition(s) violated: ", $message,
"\n\nThis indicates a bug in the program. \
This Undefined Behavior check is optional, and cannot be relied on for safety.");
::core::panicking::panic_nounwind_fmt(::core::fmt::Arguments::from_str(msg), false);
}
}
core::num::<impl u8>::unchecked_sub::precondition_check #[ferrocene::annotation(
"This code cannot be covered because it causes an non-unwinding panic, which means it cannot be caught by any means in a test."
)]
const fn precondition_check($($name:$ty),*) {
if !$e {
let msg = concat!("unsafe precondition(s) violated: ", $message,
"\n\nThis indicates a bug in the program. \
This Undefined Behavior check is optional, and cannot be relied on for safety.");
::core::panicking::panic_nounwind_fmt(::core::fmt::Arguments::from_str(msg), false);
}
}
core::num::<impl usize>::unchecked_add::precondition_check #[ferrocene::annotation(
"This code cannot be covered because it causes an non-unwinding panic, which means it cannot be caught by any means in a test."
)]
const fn precondition_check($($name:$ty),*) {
if !$e {
let msg = concat!("unsafe precondition(s) violated: ", $message,
"\n\nThis indicates a bug in the program. \
This Undefined Behavior check is optional, and cannot be relied on for safety.");
::core::panicking::panic_nounwind_fmt(::core::fmt::Arguments::from_str(msg), false);
}
}
core::num::<impl usize>::unchecked_shl::precondition_check #[ferrocene::annotation(
"This code cannot be covered because it causes an non-unwinding panic, which means it cannot be caught by any means in a test."
)]
const fn precondition_check($($name:$ty),*) {
if !$e {
let msg = concat!("unsafe precondition(s) violated: ", $message,
"\n\nThis indicates a bug in the program. \
This Undefined Behavior check is optional, and cannot be relied on for safety.");
::core::panicking::panic_nounwind_fmt(::core::fmt::Arguments::from_str(msg), false);
}
}
core::num::<impl usize>::unchecked_shr::precondition_check #[ferrocene::annotation(
"This code cannot be covered because it causes an non-unwinding panic, which means it cannot be caught by any means in a test."
)]
const fn precondition_check($($name:$ty),*) {
if !$e {
let msg = concat!("unsafe precondition(s) violated: ", $message,
"\n\nThis indicates a bug in the program. \
This Undefined Behavior check is optional, and cannot be relied on for safety.");
::core::panicking::panic_nounwind_fmt(::core::fmt::Arguments::from_str(msg), false);
}
}
core::num::<impl usize>::unchecked_sub::precondition_check #[ferrocene::annotation(
"This code cannot be covered because it causes an non-unwinding panic, which means it cannot be caught by any means in a test."
)]
const fn precondition_check($($name:$ty),*) {
if !$e {
let msg = concat!("unsafe precondition(s) violated: ", $message,
"\n\nThis indicates a bug in the program. \
This Undefined Behavior check is optional, and cannot be relied on for safety.");
::core::panicking::panic_nounwind_fmt(::core::fmt::Arguments::from_str(msg), false);
}
}
core::num::flt2dec::strategy::dragon::format_shortestpub fn format_shortest<'a>(
d: &Decoded,
buf: &'a mut [MaybeUninit<u8>],
) -> (/*digits*/ &'a [u8], /*exp*/ i16) {
// the number `v` to format is known to be:
// - equal to `mant * 2^exp`;
// - preceded by `(mant - 2 * minus) * 2^exp` in the original type; and
// - followed by `(mant + 2 * plus) * 2^exp` in the original type.
//
// obviously, `minus` and `plus` cannot be zero. (for infinities, we use out-of-range values.)
// also we assume that at least one digit is generated, i.e., `mant` cannot be zero too.
//
// this also means that any number between `low = (mant - minus) * 2^exp` and
// `high = (mant + plus) * 2^exp` will map to this exact floating point number,
// with bounds included when the original mantissa was even (i.e., `!mant_was_odd`).
assert!(d.mant > 0);
assert!(d.minus > 0);
assert!(d.plus > 0);
assert!(d.mant.checked_add(d.plus).is_some());
assert!(d.mant.checked_sub(d.minus).is_some());
assert!(buf.len() >= MAX_SIG_DIGITS);
// `a.cmp(&b) < rounding` is `if d.inclusive {a <= b} else {a < b}`
let rounding = if d.inclusive { Ordering::Greater } else { Ordering::Equal };
// estimate `k_0` from original inputs satisfying `10^(k_0-1) < high <= 10^(k_0+1)`.
// the tight bound `k` satisfying `10^(k-1) < high <= 10^k` is calculated later.
let mut k = estimate_scaling_factor(d.mant + d.plus, d.exp);
// convert `{mant, plus, minus} * 2^exp` into the fractional form so that:
// - `v = mant / scale`
// - `low = (mant - minus) / scale`
// - `high = (mant + plus) / scale`
let mut mant = Big::from_u64(d.mant);
let mut minus = Big::from_u64(d.minus);
let mut plus = Big::from_u64(d.plus);
let mut scale = Big::from_small(1);
if d.exp < 0 {
scale.mul_pow2(-d.exp as usize);
} else {
mant.mul_pow2(d.exp as usize);
minus.mul_pow2(d.exp as usize);
plus.mul_pow2(d.exp as usize);
}
// divide `mant` by `10^k`. now `scale / 10 < mant + plus <= scale * 10`.
if k >= 0 {
mul_pow10(&mut scale, k as usize);
} else {
mul_pow10(&mut mant, -k as usize);
mul_pow10(&mut minus, -k as usize);
mul_pow10(&mut plus, -k as usize);
}
// fixup when `mant + plus > scale` (or `>=`).
// we are not actually modifying `scale`, since we can skip the initial multiplication instead.
// now `scale < mant + plus <= scale * 10` and we are ready to generate digits.
//
// note that `d[0]` *can* be zero, when `scale - plus < mant < scale`.
// in this case rounding-up condition (`up` below) will be triggered immediately.
if scale.cmp(mant.clone().add(&plus)) < rounding {
// equivalent to scaling `scale` by 10
k += 1;
} else {
mant.mul_small(10);
minus.mul_small(10);
plus.mul_small(10);
}
// cache `(2, 4, 8) * scale` for digit generation.
let mut scale2 = scale.clone();
scale2.mul_pow2(1);
let mut scale4 = scale.clone();
scale4.mul_pow2(2);
let mut scale8 = scale.clone();
scale8.mul_pow2(3);
let mut down;
let mut up;
let mut i = 0;
loop {
// invariants, where `d[0..n-1]` are digits generated so far:
// - `v = mant / scale * 10^(k-n-1) + d[0..n-1] * 10^(k-n)`
// - `v - low = minus / scale * 10^(k-n-1)`
// - `high - v = plus / scale * 10^(k-n-1)`
// - `(mant + plus) / scale <= 10` (thus `mant / scale < 10`)
// where `d[i..j]` is a shorthand for `d[i] * 10^(j-i) + ... + d[j-1] * 10 + d[j]`.
// generate one digit: `d[n] = floor(mant / scale) < 10`.
let (d, _) = div_rem_upto_16(&mut mant, &scale, &scale2, &scale4, &scale8);
debug_assert!(d < 10);
buf[i] = MaybeUninit::new(b'0' + d);
i += 1;
// this is a simplified description of the modified Dragon algorithm.
// many intermediate derivations and completeness arguments are omitted for convenience.
//
// start with modified invariants, as we've updated `n`:
// - `v = mant / scale * 10^(k-n) + d[0..n-1] * 10^(k-n)`
// - `v - low = minus / scale * 10^(k-n)`
// - `high - v = plus / scale * 10^(k-n)`
//
// assume that `d[0..n-1]` is the shortest representation between `low` and `high`,
// i.e., `d[0..n-1]` satisfies both of the following but `d[0..n-2]` doesn't:
// - `low < d[0..n-1] * 10^(k-n) < high` (bijectivity: digits round to `v`); and
// - `abs(v / 10^(k-n) - d[0..n-1]) <= 1/2` (the last digit is correct).
//
// the second condition simplifies to `2 * mant <= scale`.
// solving invariants in terms of `mant`, `low` and `high` yields
// a simpler version of the first condition: `-plus < mant < minus`.
// since `-plus < 0 <= mant`, we have the correct shortest representation
// when `mant < minus` and `2 * mant <= scale`.
// (the former becomes `mant <= minus` when the original mantissa is even.)
//
// when the second doesn't hold (`2 * mant > scale`), we need to increase the last digit.
// this is enough for restoring that condition: we already know that
// the digit generation guarantees `0 <= v / 10^(k-n) - d[0..n-1] < 1`.
// in this case, the first condition becomes `-plus < mant - scale < minus`.
// since `mant < scale` after the generation, we have `scale < mant + plus`.
// (again, this becomes `scale <= mant + plus` when the original mantissa is even.)
//
// in short:
// - stop and round `down` (keep digits as is) when `mant < minus` (or `<=`).
// - stop and round `up` (increase the last digit) when `scale < mant + plus` (or `<=`).
// - keep generating otherwise.
down = mant.cmp(&minus) < rounding;
up = scale.cmp(mant.clone().add(&plus)) < rounding;
if down || up {
break;
} // we have the shortest representation, proceed to the rounding
// restore the invariants.
// this makes the algorithm always terminating: `minus` and `plus` always increases,
// but `mant` is clipped modulo `scale` and `scale` is fixed.
mant.mul_small(10);
minus.mul_small(10);
plus.mul_small(10);
}
// rounding up happens when
// i) only the rounding-up condition was triggered, or
// ii) both conditions were triggered and tie breaking prefers rounding up.
if up && (!down || *mant.mul_pow2(1) >= scale) {
// if rounding up changes the length, the exponent should also change.
// it seems that this condition is very hard to satisfy (possibly impossible),
// but we are just being safe and consistent here.
// SAFETY: we initialized that memory above.
if let Some(c) = round_up(unsafe { buf[..i].assume_init_mut() }) {
buf[i] = MaybeUninit::new(c);
i += 1;
k += 1;
}
}
// SAFETY: we initialized that memory above.
(unsafe { buf[..i].assume_init_ref() }, k)
}
core::num::nonzero::NonZero::<T>::new_unchecked pub const unsafe fn new_unchecked(n: T) -> Self {
match Self::new(n) {
Some(n) => n,
#[ferrocene::annotation(
"This line cannot be covered as reaching `intrinsics::unreachable` is undefined behavior."
)]
None => {
// SAFETY: The caller guarantees that `n` is non-zero, so this is unreachable.
unsafe {
ub_checks::assert_unsafe_precondition!(
check_language_ub,
"NonZero::new_unchecked requires the argument to be non-zero",
() => false,
);
intrinsics::unreachable()
}
}
}
}
core::num::nonzero::NonZero::<T>::new_unchecked::precondition_check #[ferrocene::annotation(
"This code cannot be covered because it causes an non-unwinding panic, which means it cannot be caught by any means in a test."
)]
const fn precondition_check($($name:$ty),*) {
if !$e {
let msg = concat!("unsafe precondition(s) violated: ", $message,
"\n\nThis indicates a bug in the program. \
This Undefined Behavior check is optional, and cannot be relied on for safety.");
::core::panicking::panic_nounwind_fmt(::core::fmt::Arguments::from_str(msg), false);
}
}
core::ops::index_range::IndexRange::new_unchecked::precondition_check #[ferrocene::annotation(
"This code cannot be covered because it causes an non-unwinding panic, which means it cannot be caught by any means in a test."
)]
const fn precondition_check($($name:$ty),*) {
if !$e {
let msg = concat!("unsafe precondition(s) violated: ", $message,
"\n\nThis indicates a bug in the program. \
This Undefined Behavior check is optional, and cannot be relied on for safety.");
::core::panicking::panic_nounwind_fmt(::core::fmt::Arguments::from_str(msg), false);
}
}
core::option::Option::<T>::unwrap_unchecked pub const unsafe fn unwrap_unchecked(self) -> T {
match self {
Some(val) => val,
#[ferrocene::annotation(
"This line cannot be covered as reaching `unreachable_unchecked` is undefined behavior."
)]
// SAFETY: the safety contract must be upheld by the caller.
None => unsafe { hint::unreachable_unchecked() },
}
}
core::panicking::panic_bounds_checkfn panic_bounds_check(index: usize, len: usize) -> ! {
#[ferrocene::annotation(
"The `immediate-abort` behavior is not certified, we only support `abort`."
)]
if cfg!(panic = "immediate-abort") {
super::intrinsics::abort()
}
panic!("index out of bounds: the len is {len} but the index is {index}")
}
core::panicking::panic_fmtpub const fn panic_fmt(fmt: fmt::Arguments<'_>) -> ! {
#[ferrocene::annotation(
"The `immediate-abort` behavior is not certified, we only support `abort`."
)]
if cfg!(panic = "immediate-abort") {
super::intrinsics::abort()
};
// NOTE This function never crosses the FFI boundary; it's a Rust-to-Rust call
// that gets resolved to the `#[panic_handler]` function.
unsafe extern "Rust" {
#[lang = "panic_impl"]
fn panic_impl(pi: &PanicInfo<'_>) -> !;
}
let pi = PanicInfo::new(
&fmt,
Location::caller(),
/* can_unwind */ true,
/* force_no_backtrace */ false,
);
// SAFETY: `panic_impl` is defined in safe Rust code and thus is safe to call.
unsafe { panic_impl(&pi) }
}
core::ptr::alignment::Alignment::new_unchecked::precondition_check #[ferrocene::annotation(
"This code cannot be covered because it causes an non-unwinding panic, which means it cannot be caught by any means in a test."
)]
const fn precondition_check($($name:$ty),*) {
if !$e {
let msg = concat!("unsafe precondition(s) violated: ", $message,
"\n\nThis indicates a bug in the program. \
This Undefined Behavior check is optional, and cannot be relied on for safety.");
::core::panicking::panic_nounwind_fmt(::core::fmt::Arguments::from_str(msg), false);
}
}
core::ptr::const_ptr::<impl *const T>::add::precondition_check #[ferrocene::annotation(
"This code cannot be covered because it causes an non-unwinding panic, which means it cannot be caught by any means in a test."
)]
const fn precondition_check($($name:$ty),*) {
if !$e {
let msg = concat!("unsafe precondition(s) violated: ", $message,
"\n\nThis indicates a bug in the program. \
This Undefined Behavior check is optional, and cannot be relied on for safety.");
::core::panicking::panic_nounwind_fmt(::core::fmt::Arguments::from_str(msg), false);
}
}
core::ptr::const_ptr::<impl *const T>::guaranteed_eq pub const fn guaranteed_eq(self, other: *const T) -> Option<bool>
where
T: Sized,
{
match intrinsics::ptr_guaranteed_cmp(self, other) {
#[ferrocene::annotation(
"This cannot be reached in runtime code so it cannot be covered."
)]
2 => None,
other => Some(other == 1),
}
}
core::ptr::const_ptr::<impl *const T>::offset::precondition_check #[ferrocene::annotation(
"This code cannot be covered because it causes an non-unwinding panic, which means it cannot be caught by any means in a test."
)]
const fn precondition_check($($name:$ty),*) {
if !$e {
let msg = concat!("unsafe precondition(s) violated: ", $message,
"\n\nThis indicates a bug in the program. \
This Undefined Behavior check is optional, and cannot be relied on for safety.");
::core::panicking::panic_nounwind_fmt(::core::fmt::Arguments::from_str(msg), false);
}
}
core::ptr::const_ptr::<impl *const T>::offset_from_unsigned::precondition_check #[ferrocene::annotation(
"This code cannot be covered because it causes an non-unwinding panic, which means it cannot be caught by any means in a test."
)]
const fn precondition_check($($name:$ty),*) {
if !$e {
let msg = concat!("unsafe precondition(s) violated: ", $message,
"\n\nThis indicates a bug in the program. \
This Undefined Behavior check is optional, and cannot be relied on for safety.");
::core::panicking::panic_nounwind_fmt(::core::fmt::Arguments::from_str(msg), false);
}
}
core::ptr::copy::precondition_check #[ferrocene::annotation(
"This code cannot be covered because it causes an non-unwinding panic, which means it cannot be caught by any means in a test."
)]
const fn precondition_check($($name:$ty),*) {
if !$e {
let msg = concat!("unsafe precondition(s) violated: ", $message,
"\n\nThis indicates a bug in the program. \
This Undefined Behavior check is optional, and cannot be relied on for safety.");
::core::panicking::panic_nounwind_fmt(::core::fmt::Arguments::from_str(msg), false);
}
}
core::ptr::copy_nonoverlapping::precondition_check #[ferrocene::annotation(
"This code cannot be covered because it causes an non-unwinding panic, which means it cannot be caught by any means in a test."
)]
const fn precondition_check($($name:$ty),*) {
if !$e {
let msg = concat!("unsafe precondition(s) violated: ", $message,
"\n\nThis indicates a bug in the program. \
This Undefined Behavior check is optional, and cannot be relied on for safety.");
::core::panicking::panic_nounwind_fmt(::core::fmt::Arguments::from_str(msg), false);
}
}
core::ptr::mut_ptr::<impl *mut T>::add::precondition_check #[ferrocene::annotation(
"This code cannot be covered because it causes an non-unwinding panic, which means it cannot be caught by any means in a test."
)]
const fn precondition_check($($name:$ty),*) {
if !$e {
let msg = concat!("unsafe precondition(s) violated: ", $message,
"\n\nThis indicates a bug in the program. \
This Undefined Behavior check is optional, and cannot be relied on for safety.");
::core::panicking::panic_nounwind_fmt(::core::fmt::Arguments::from_str(msg), false);
}
}
core::ptr::mut_ptr::<impl *mut T>::offset::precondition_check #[ferrocene::annotation(
"This code cannot be covered because it causes an non-unwinding panic, which means it cannot be caught by any means in a test."
)]
const fn precondition_check($($name:$ty),*) {
if !$e {
let msg = concat!("unsafe precondition(s) violated: ", $message,
"\n\nThis indicates a bug in the program. \
This Undefined Behavior check is optional, and cannot be relied on for safety.");
::core::panicking::panic_nounwind_fmt(::core::fmt::Arguments::from_str(msg), false);
}
}
core::ptr::mut_ptr::<impl *mut T>::sub::precondition_check #[ferrocene::annotation(
"This code cannot be covered because it causes an non-unwinding panic, which means it cannot be caught by any means in a test."
)]
const fn precondition_check($($name:$ty),*) {
if !$e {
let msg = concat!("unsafe precondition(s) violated: ", $message,
"\n\nThis indicates a bug in the program. \
This Undefined Behavior check is optional, and cannot be relied on for safety.");
::core::panicking::panic_nounwind_fmt(::core::fmt::Arguments::from_str(msg), false);
}
}
core::ptr::non_null::NonNull::<T>::new_unchecked::precondition_check #[ferrocene::annotation(
"This code cannot be covered because it causes an non-unwinding panic, which means it cannot be caught by any means in a test."
)]
const fn precondition_check($($name:$ty),*) {
if !$e {
let msg = concat!("unsafe precondition(s) violated: ", $message,
"\n\nThis indicates a bug in the program. \
This Undefined Behavior check is optional, and cannot be relied on for safety.");
::core::panicking::panic_nounwind_fmt(::core::fmt::Arguments::from_str(msg), false);
}
}
core::ptr::read::precondition_check #[ferrocene::annotation(
"This code cannot be covered because it causes an non-unwinding panic, which means it cannot be caught by any means in a test."
)]
const fn precondition_check($($name:$ty),*) {
if !$e {
let msg = concat!("unsafe precondition(s) violated: ", $message,
"\n\nThis indicates a bug in the program. \
This Undefined Behavior check is optional, and cannot be relied on for safety.");
::core::panicking::panic_nounwind_fmt(::core::fmt::Arguments::from_str(msg), false);
}
}
core::ptr::read_volatile::precondition_check #[ferrocene::annotation(
"This code cannot be covered because it causes an non-unwinding panic, which means it cannot be caught by any means in a test."
)]
const fn precondition_check($($name:$ty),*) {
if !$e {
let msg = concat!("unsafe precondition(s) violated: ", $message,
"\n\nThis indicates a bug in the program. \
This Undefined Behavior check is optional, and cannot be relied on for safety.");
::core::panicking::panic_nounwind_fmt(::core::fmt::Arguments::from_str(msg), false);
}
}
core::ptr::replacepub const unsafe fn replace<T>(dst: *mut T, src: T) -> T {
// SAFETY: the caller must guarantee that `dst` is valid to be
// cast to a mutable reference (valid for writes, aligned, initialized),
// and cannot overlap `src` since `dst` must point to a distinct
// allocation. We are excluding null (with a ZST check) before creating a reference.
unsafe {
ub_checks::assert_unsafe_precondition!(
check_language_ub,
"ptr::replace requires that the pointer argument is aligned and non-null",
(
addr: *const () = dst as *const (),
align: usize = align_of::<T>(),
is_zst: bool = T::IS_ZST,
) => ub_checks::maybe_is_aligned_and_not_null(addr, align, is_zst)
);
if T::IS_ZST {
// If `T` is a ZST, `dst` is allowed to be null. However, we also don't have to actually
// do anything since there isn't actually any data to be copied anyway. All values of
// type `T` are bit-identical, so we can just return `src` here.
return src;
}
mem::replace(&mut *dst, src)
}
}
core::ptr::replace::precondition_check #[ferrocene::annotation(
"This code cannot be covered because it causes an non-unwinding panic, which means it cannot be caught by any means in a test."
)]
const fn precondition_check($($name:$ty),*) {
if !$e {
let msg = concat!("unsafe precondition(s) violated: ", $message,
"\n\nThis indicates a bug in the program. \
This Undefined Behavior check is optional, and cannot be relied on for safety.");
::core::panicking::panic_nounwind_fmt(::core::fmt::Arguments::from_str(msg), false);
}
}
core::ptr::swap_nonoverlapping::precondition_check #[ferrocene::annotation(
"This code cannot be covered because it causes an non-unwinding panic, which means it cannot be caught by any means in a test."
)]
const fn precondition_check($($name:$ty),*) {
if !$e {
let msg = concat!("unsafe precondition(s) violated: ", $message,
"\n\nThis indicates a bug in the program. \
This Undefined Behavior check is optional, and cannot be relied on for safety.");
::core::panicking::panic_nounwind_fmt(::core::fmt::Arguments::from_str(msg), false);
}
}
core::ptr::write::precondition_check #[ferrocene::annotation(
"This code cannot be covered because it causes an non-unwinding panic, which means it cannot be caught by any means in a test."
)]
const fn precondition_check($($name:$ty),*) {
if !$e {
let msg = concat!("unsafe precondition(s) violated: ", $message,
"\n\nThis indicates a bug in the program. \
This Undefined Behavior check is optional, and cannot be relied on for safety.");
::core::panicking::panic_nounwind_fmt(::core::fmt::Arguments::from_str(msg), false);
}
}
core::ptr::write_bytes::precondition_check #[ferrocene::annotation(
"This code cannot be covered because it causes an non-unwinding panic, which means it cannot be caught by any means in a test."
)]
const fn precondition_check($($name:$ty),*) {
if !$e {
let msg = concat!("unsafe precondition(s) violated: ", $message,
"\n\nThis indicates a bug in the program. \
This Undefined Behavior check is optional, and cannot be relied on for safety.");
::core::panicking::panic_nounwind_fmt(::core::fmt::Arguments::from_str(msg), false);
}
}
core::ptr::write_volatile::precondition_check #[ferrocene::annotation(
"This code cannot be covered because it causes an non-unwinding panic, which means it cannot be caught by any means in a test."
)]
const fn precondition_check($($name:$ty),*) {
if !$e {
let msg = concat!("unsafe precondition(s) violated: ", $message,
"\n\nThis indicates a bug in the program. \
This Undefined Behavior check is optional, and cannot be relied on for safety.");
::core::panicking::panic_nounwind_fmt(::core::fmt::Arguments::from_str(msg), false);
}
}
core::result::Result::<T, E>::unwrap_err_unchecked pub unsafe fn unwrap_err_unchecked(self) -> E {
match self {
#[ferrocene::annotation(
"This line cannot be covered as reaching `unreachable_unchecked` is undefined behavior"
)]
// SAFETY: the safety contract must be upheld by the caller.
Ok(_) => unsafe { hint::unreachable_unchecked() },
Err(e) => e,
}
}
core::result::Result::<T, E>::unwrap_unchecked pub const unsafe fn unwrap_unchecked(self) -> T {
match self {
Ok(t) => t,
#[ferrocene::annotation(
"This line cannot be covered as reaching `unreachable_unchecked` is undefined behavior"
)]
Err(e) => {
// FIXME(const-hack): to avoid E: const Destruct bound
super::mem::forget(e);
// SAFETY: the safety contract must be upheld by the caller.
unsafe { hint::unreachable_unchecked() }
}
}
}
core::slice::<impl [T]>::align_to_offsets fn align_to_offsets<U>(&self) -> (usize, usize) {
// What we gonna do about `rest` is figure out what multiple of `U`s we can put in a
// lowest number of `T`s. And how many `T`s we need for each such "multiple".
//
// Consider for example T=u8 U=u16. Then we can put 1 U in 2 Ts. Simple. Now, consider
// for example a case where size_of::<T> = 16, size_of::<U> = 24. We can put 2 Us in
// place of every 3 Ts in the `rest` slice. A bit more complicated.
//
// Formula to calculate this is:
//
// Us = lcm(size_of::<T>, size_of::<U>) / size_of::<U>
// Ts = lcm(size_of::<T>, size_of::<U>) / size_of::<T>
//
// Expanded and simplified:
//
// Us = size_of::<T> / gcd(size_of::<T>, size_of::<U>)
// Ts = size_of::<U> / gcd(size_of::<T>, size_of::<U>)
//
// Luckily since all this is constant-evaluated... performance here matters not!
#[ferrocene::annotation(
"the only use of this function is in a const block, which means it cannot be reached during runtime"
)]
const fn gcd(a: usize, b: usize) -> usize {
if b == 0 { a } else { gcd(b, a % b) }
}
// Explicitly wrap the function call in a const block so it gets
// constant-evaluated even in debug mode.
let gcd: usize = const { gcd(size_of::<T>(), size_of::<U>()) };
let ts: usize = size_of::<U>() / gcd;
let us: usize = size_of::<T>() / gcd;
// Armed with this knowledge, we can find how many `U`s we can fit!
let us_len = self.len() / ts * us;
// And how many `T`s will be in the trailing slice!
let ts_len = self.len() % ts;
(us_len, ts_len)
}
core::slice::<impl [T]>::as_chunks_unchecked::precondition_check #[ferrocene::annotation(
"This code cannot be covered because it causes an non-unwinding panic, which means it cannot be caught by any means in a test."
)]
const fn precondition_check($($name:$ty),*) {
if !$e {
let msg = concat!("unsafe precondition(s) violated: ", $message,
"\n\nThis indicates a bug in the program. \
This Undefined Behavior check is optional, and cannot be relied on for safety.");
::core::panicking::panic_nounwind_fmt(::core::fmt::Arguments::from_str(msg), false);
}
}
core::slice::<impl [T]>::split_at_mut_unchecked::precondition_check #[ferrocene::annotation(
"This code cannot be covered because it causes an non-unwinding panic, which means it cannot be caught by any means in a test."
)]
const fn precondition_check($($name:$ty),*) {
if !$e {
let msg = concat!("unsafe precondition(s) violated: ", $message,
"\n\nThis indicates a bug in the program. \
This Undefined Behavior check is optional, and cannot be relied on for safety.");
::core::panicking::panic_nounwind_fmt(::core::fmt::Arguments::from_str(msg), false);
}
}
core::slice::<impl [T]>::split_at_unchecked::precondition_check #[ferrocene::annotation(
"This code cannot be covered because it causes an non-unwinding panic, which means it cannot be caught by any means in a test."
)]
const fn precondition_check($($name:$ty),*) {
if !$e {
let msg = concat!("unsafe precondition(s) violated: ", $message,
"\n\nThis indicates a bug in the program. \
This Undefined Behavior check is optional, and cannot be relied on for safety.");
::core::panicking::panic_nounwind_fmt(::core::fmt::Arguments::from_str(msg), false);
}
}
core::slice::raw::from_raw_parts::precondition_check #[ferrocene::annotation(
"This code cannot be covered because it causes an non-unwinding panic, which means it cannot be caught by any means in a test."
)]
const fn precondition_check($($name:$ty),*) {
if !$e {
let msg = concat!("unsafe precondition(s) violated: ", $message,
"\n\nThis indicates a bug in the program. \
This Undefined Behavior check is optional, and cannot be relied on for safety.");
::core::panicking::panic_nounwind_fmt(::core::fmt::Arguments::from_str(msg), false);
}
}
core::slice::raw::from_raw_parts_mut::precondition_check #[ferrocene::annotation(
"This code cannot be covered because it causes an non-unwinding panic, which means it cannot be caught by any means in a test."
)]
const fn precondition_check($($name:$ty),*) {
if !$e {
let msg = concat!("unsafe precondition(s) violated: ", $message,
"\n\nThis indicates a bug in the program. \
This Undefined Behavior check is optional, and cannot be relied on for safety.");
::core::panicking::panic_nounwind_fmt(::core::fmt::Arguments::from_str(msg), false);
}
}
core::str::count::do_count_charsfn do_count_chars(s: &str) -> usize {
// For correctness, `CHUNK_SIZE` must be:
//
// - Less than or equal to 255, otherwise we'll overflow bytes in `counts`.
// - A multiple of `UNROLL_INNER`, otherwise our `break` inside the
// `body.chunks(CHUNK_SIZE)` loop is incorrect.
//
// For performance, `CHUNK_SIZE` should be:
// - Relatively cheap to `/` against (so some simple sum of powers of two).
// - Large enough to avoid paying for the cost of the `sum_bytes_in_usize`
// too often.
const CHUNK_SIZE: usize = 192;
// Check the properties of `CHUNK_SIZE` and `UNROLL_INNER` that are required
// for correctness.
const _: () = assert!(CHUNK_SIZE < 256);
const _: () = assert!(CHUNK_SIZE.is_multiple_of(UNROLL_INNER));
// SAFETY: transmuting `[u8]` to `[usize]` is safe except for size
// differences which are handled by `align_to`.
let (head, body, tail) = unsafe { s.as_bytes().align_to::<usize>() };
// This should be quite rare, and basically exists to handle the degenerate
// cases where align_to fails (as well as miri under symbolic alignment
// mode).
//
// The `unlikely` helps discourage LLVM from inlining the body, which is
// nice, as we would rather not mark the `char_count_general_case` function
// as cold.
if unlikely(body.is_empty() || head.len() > USIZE_SIZE || tail.len() > USIZE_SIZE) {
return char_count_general_case(s.as_bytes());
}
let mut total = char_count_general_case(head) + char_count_general_case(tail);
// Split `body` into `CHUNK_SIZE` chunks to reduce the frequency with which
// we call `sum_bytes_in_usize`.
for chunk in body.chunks(CHUNK_SIZE) {
// We accumulate intermediate sums in `counts`, where each byte contains
// a subset of the sum of this chunk, like a `[u8; size_of::<usize>()]`.
let mut counts = 0;
let (unrolled_chunks, remainder) = chunk.as_chunks::<UNROLL_INNER>();
for unrolled in unrolled_chunks {
for &word in unrolled {
// Because `CHUNK_SIZE` is < 256, this addition can't cause the
// count in any of the bytes to overflow into a subsequent byte.
counts += contains_non_continuation_byte(word);
}
}
// Sum the values in `counts` (which, again, is conceptually a `[u8;
// size_of::<usize>()]`), and accumulate the result into `total`.
total += sum_bytes_in_usize(counts);
// If there's any data in `remainder`, then handle it. This will only
// happen for the last `chunk` in `body.chunks()` (because `CHUNK_SIZE`
// is divisible by `UNROLL_INNER`), so we explicitly break at the end
// (which seems to help LLVM out).
if !remainder.is_empty() {
// Accumulate all the data in the remainder.
let mut counts = 0;
for &word in remainder {
counts += contains_non_continuation_byte(word);
}
total += sum_bytes_in_usize(counts);
break;
}
}
total
}
core::str::traits::<impl core::slice::index::SliceIndex<str> for core::ops::range::Range<usize>>::get_unchecked::precondition_check #[ferrocene::annotation(
"This code cannot be covered because it causes an non-unwinding panic, which means it cannot be caught by any means in a test."
)]
const fn precondition_check($($name:$ty),*) {
if !$e {
let msg = concat!("unsafe precondition(s) violated: ", $message,
"\n\nThis indicates a bug in the program. \
This Undefined Behavior check is optional, and cannot be relied on for safety.");
::core::panicking::panic_nounwind_fmt(::core::fmt::Arguments::from_str(msg), false);
}
}
core::str::traits::<impl core::slice::index::SliceIndex<str> for core::ops::range::Range<usize>>::get_unchecked_mut::precondition_check #[ferrocene::annotation(
"This code cannot be covered because it causes an non-unwinding panic, which means it cannot be caught by any means in a test."
)]
const fn precondition_check($($name:$ty),*) {
if !$e {
let msg = concat!("unsafe precondition(s) violated: ", $message,
"\n\nThis indicates a bug in the program. \
This Undefined Behavior check is optional, and cannot be relied on for safety.");
::core::panicking::panic_nounwind_fmt(::core::fmt::Arguments::from_str(msg), false);
}
}
core::sync::atomic::Atomic::<bool>::compare_exchange pub fn compare_exchange(
&self,
current: bool,
new: bool,
success: Ordering,
failure: Ordering,
) -> Result<bool, bool> {
if EMULATE_ATOMIC_BOOL {
#[ferrocene::annotation(
"Cannot be covered as this code does not run in any of the platforms for which we track coverage"
)]
{
// Pick the strongest ordering from success and failure.
let order = match (success, failure) {
(SeqCst, _) => SeqCst,
(_, SeqCst) => SeqCst,
(AcqRel, _) => AcqRel,
(_, AcqRel) => {
panic!("there is no such thing as an acquire-release failure ordering")
}
(Release, Acquire) => AcqRel,
(Acquire, _) => Acquire,
(_, Acquire) => Acquire,
(Release, Relaxed) => Release,
(_, Release) => panic!("there is no such thing as a release failure ordering"),
(Relaxed, Relaxed) => Relaxed,
};
let old = if current == new {
// This is a no-op, but we still need to perform the operation
// for memory ordering reasons.
self.fetch_or(false, order)
} else {
// This sets the value to the new one and returns the old one.
self.swap(new, order)
};
if old == current { Ok(old) } else { Err(old) }
}
} else {
// SAFETY: data races are prevented by atomic intrinsics.
match unsafe {
atomic_compare_exchange(
self.v.get().cast::<u8>(),
current as u8,
new as u8,
success,
failure,
)
} {
Ok(x) => Ok(x != 0),
Err(x) => Err(x != 0),
}
}
}
core::sync::atomic::Atomic::<bool>::swap pub fn swap(&self, val: bool, order: Ordering) -> bool {
if EMULATE_ATOMIC_BOOL {
#[ferrocene::annotation(
"Cannot be covered as this code does not run in any of the platforms for which we track coverage"
)]
if val { self.fetch_or(true, order) } else { self.fetch_and(false, order) }
} else {
// SAFETY: data races are prevented by atomic intrinsics.
unsafe { atomic_swap(self.v.get().cast::<u8>(), val as u8, order) != 0 }
}
}
core::time::Duration::try_from_secs_f32 #[ferrocene::annotation("`mantissa_bits` cannot be covered due to a coverage tooling bug")]
pub fn try_from_secs_f32(secs: f32) -> Result<Duration, TryFromFloatSecsError> {
try_from_secs!(
secs = secs,
mantissa_bits = 23,
exponent_bits = 8,
offset = 41,
bits_ty = u32,
double_ty = u64,
)
}
<core::any::TypeId as core::cmp::PartialEq>::eq::compiletime #[ferrocene::annotation("Cannot be covered as this only runs during compilation.")]
const fn compiletime$(<$($binders)*>)?($($arg: $ty),*) $( -> $ret )? {
// Don't warn if one of the arguments is unused.
$(let _ = $arg;)*
$compiletime
}
<core::hint::select_unpredictable::DropOnPanic<T> as core::ops::drop::Drop>::drop fn drop(&mut self) {
// SAFETY: Must be guaranteed on construction of local type `DropOnPanic`.
unsafe { self.inner.drop_in_place() }
}
core::cell::panic_already_borrowed::do_panic::compiletime #[ferrocene::annotation("Cannot be covered as this only runs during compilation.")]
const fn compiletime$(<$($binders)*>)?($($arg: $ty),*) $( -> $ret )? {
// Don't warn if one of the arguments is unused.
$(let _ = $arg;)*
$compiletime
}
core::cell::panic_already_mutably_borrowed::do_panic::compiletime #[ferrocene::annotation("Cannot be covered as this only runs during compilation.")]
const fn compiletime$(<$($binders)*>)?($($arg: $ty),*) $( -> $ret )? {
// Don't warn if one of the arguments is unused.
$(let _ = $arg;)*
$compiletime
}
core::char::methods::encode_utf8_raw::do_panic::compiletime #[ferrocene::annotation("Cannot be covered as this only runs during compilation.")]
const fn compiletime$(<$($binders)*>)?($($arg: $ty),*) $( -> $ret )? {
// Don't warn if one of the arguments is unused.
$(let _ = $arg;)*
$compiletime
}
core::clone::impls::<impl core::clone::Clone for !>::clone #[ferrocene::annotation(
"This function cannot be executed because it is impossible to create a value of type `!`"
)]
fn clone(&self) -> Self {
*self
}
core::clone::impls::<impl core::clone::Clone for *const T>::clone #[ferrocene::annotation(
"This function is thoroughly tested inside the `test_clone` test in `coretests`. The fact that is shown as uncovered is a bug in our coverage tooling."
)]
fn clone(&self) -> Self {
*self
}
core::clone::impls::<impl core::clone::Clone for *mut T>::clone #[ferrocene::annotation(
"This function is thoroughly tested inside the `test_clone` test in `coretests`. The fact that is shown as uncovered is a bug in our coverage tooling."
)]
fn clone(&self) -> Self {
*self
}
core::ffi::c_str::CStr::from_bytes_with_nul_unchecked::compiletime #[ferrocene::annotation("Cannot be covered as this only runs during compilation.")]
const fn compiletime$(<$($binders)*>)?($($arg: $ty),*) $( -> $ret )? {
// Don't warn if one of the arguments is unused.
$(let _ = $arg;)*
$compiletime
}
core::ffi::c_str::strlen::compiletime #[ferrocene::annotation("Cannot be covered as this only runs during compilation.")]
const fn compiletime$(<$($binders)*>)?($($arg: $ty),*) $( -> $ret )? {
// Don't warn if one of the arguments is unused.
$(let _ = $arg;)*
$compiletime
}
core::fmt::rt::Argument::<'_>::new_debug_noop #[ferrocene::annotation(
"This function is not publicly available in stable Rust. It is only available in nightly with the \"fmt_internals\" feature enabled. \
Also it is not emitted by the compiler unless the unstable compiler option `-Z fmt-debug=none` is passed to `rustc`. Unstable compiler options are not qualified, therefore this function will never be used in qualified code. \
See https://github.com/ferrocene/ferrocene/blob/205b3a16164939e0da17deb74285418b3d4991ae/compiler/rustc_ast_lowering/src/format.rs#L247."
)]
#[inline]
pub const fn new_debug_noop<T: Debug>(x: &T) -> Argument<'_> {
argument_new!(T, x, |_: &T, _| Ok(()))
}
core::intrinsics::assume#[ferrocene::annotation(
"Cannot be covered, since the purpose of the function is to never receive a `b` that is `false`, and if it does it will kill the process."
)]
pub const unsafe fn assume(b: bool) {
if !b {
// SAFETY: the caller must guarantee the argument is never `false`
unsafe { unreachable() }
}
}
core::intrinsics::carrying_mul_add#[ferrocene::annotation(
"All calls to this function are replaced during code generation unless the target doesn't have this intrinsic. In the latter case, the body of this function remains unchanged, meaning that it calls `intrinsics::fallback::CarryingMulAdd::carrying_mul_add` which is thoroughly tested. The correctness of the code generation is tested in `tests/codegen-llvm/intrinsics/carrying_mul_add.rs`"
)]
#[unstable(feature = "core_intrinsics", issue = "none")]
#[rustc_const_unstable(feature = "const_carrying_mul_add", issue = "85532")]
#[rustc_nounwind]
#[rustc_intrinsic]
#[miri::intrinsic_fallback_is_spec]
pub const fn carrying_mul_add<T: [const] fallback::CarryingMulAdd<Unsigned = U>, U>(
multiplier: T,
multiplicand: T,
addend: T,
carry: T,
) -> (U, T) {
multiplier.carrying_mul_add(multiplicand, addend, carry)
}
core::intrinsics::carryless_mul#[ferrocene::annotation(
"All calls to this function are replaced during code generation unless the target doesn't have this intrinsic. In the latter case, the body of this function remains unchanged, meaning that it calls `intrinsics::fallback::CarryingMul::carryless_mul` which is thoroughly tested. "
)]
pub const fn carryless_mul<T: [const] fallback::CarrylessMul>(a: T, b: T) -> T {
a.carryless_mul(b)
}
core::intrinsics::cold_path#[ferrocene::annotation(
"All calls of this function are removed during code generation as this is only a hint used to do certain optimizations. The correctness of the code generation is tested in `tests/codegen-llvm/intrinsics/cold_path.rs`, `tests/codegen-llvm/intrinsics/cold_path2.rs` and `tests/codegen-llvm/intrinsics/cold_path3.rs`."
)]
#[rustc_intrinsic]
#[rustc_nounwind]
#[miri::intrinsic_fallback_is_spec]
#[cold]
pub const fn cold_path() {}
core::intrinsics::const_make_global#[ferrocene::annotation("This function is also a noop in runtime so we can't cover it currently.")]
pub const unsafe fn const_make_global(ptr: *mut u8) -> *const u8 {
// const eval overrides this function; at runtime, it is a NOP.
ptr
}
core::intrinsics::disjoint_bitor#[ferrocene::annotation(
"All calls to this function are replaced during code generation unless the target doesn't have this intrinsic. In the latter case, the body of this function remains unchanged, meaning that it calls `intrinsics::fallback::DisjointBitOr::disjoint_bitor` which is thoroughly tested. The correctness of the code generation is tested in `tests/codegen-llvm/intrinsics/disjoint_bitor.rs`"
)]
pub const unsafe fn disjoint_bitor<T: [const] fallback::DisjointBitOr>(a: T, b: T) -> T {
// SAFETY: same preconditions as this function.
unsafe { fallback::DisjointBitOr::disjoint_bitor(a, b) }
}
core::intrinsics::is_val_statically_known#[ferrocene::annotation(
"All calls of this function are replaced during code generation, meaning that the code inside the function is never run. The correctness of the code generation is tested in `tests/codegen-llvm/is_val_statically_known.rs`"
)]
#[rustc_const_stable_indirect]
#[rustc_nounwind]
#[unstable(feature = "core_intrinsics", issue = "none")]
#[rustc_intrinsic]
pub const fn is_val_statically_known<T: Copy>(_arg: T) -> bool {
false
}
core::intrinsics::overflow_checks#[ferrocene::annotation(
"This function cannot trivially be tested since it depends on the build configuration. It was manually reviewed."
)]
pub const fn overflow_checks() -> bool {
cfg!(debug_assertions)
}
core::intrinsics::select_unpredictable#[ferrocene::annotation(
"All calls of this function are replaced during code generation, meaning that the code inside the function is never run. The correctness of the code generation is tested in `tests/codegen-llvm/intrinsics/select_unpredictable.rs`"
)]
#[unstable(feature = "core_intrinsics", issue = "none")]
#[rustc_const_unstable(feature = "const_select_unpredictable", issue = "145938")]
#[rustc_intrinsic]
#[rustc_nounwind]
#[miri::intrinsic_fallback_is_spec]
#[inline]
pub const fn select_unpredictable<T>(b: bool, true_val: T, false_val: T) -> T {
if b {
forget(false_val);
true_val
} else {
forget(true_val);
false_val
}
}
core::intrinsics::type_id_eq#[ferrocene::annotation("Cannot be covered as this code cannot be reached during runtime.")]
pub const fn type_id_eq(a: crate::any::TypeId, b: crate::any::TypeId) -> bool {
a.data == b.data
}
core::intrinsics::ub_checks#[ferrocene::annotation(
"This function is always used in `assert_unsafe_precondition` which produces an unwinding panic, meaning that we cannot cover it."
)]
pub const fn ub_checks() -> bool {
cfg!(ub_checks)
}
core::intrinsics::unchecked_funnel_shl#[ferrocene::annotation(
"All calls to this function are replaced during code generation unless the target doesn't have this intrinsic. In the latter case, the body of this function remains unchanged, meaning that it calls `intrinsics::fallback::FunnelShift::unchecked_funnel_shl` which is thoroughly tested. The correctness of the code generation is tested in `tests/codegen-llvm/intrinsics/rotate_left.rs`"
)]
pub const unsafe fn unchecked_funnel_shl<T: [const] fallback::FunnelShift>(
a: T,
b: T,
shift: u32,
) -> T {
// SAFETY: caller ensures that `shift` is in-range
unsafe { a.unchecked_funnel_shl(b, shift) }
}
core::intrinsics::unchecked_funnel_shr#[ferrocene::annotation(
"All calls to this function are replaced during code generation unless the target doesn't have this intrinsic. In the latter case, the body of this function remains unchanged, meaning that it calls `intrinsics::fallback::FunnelShift::unchecked_funnel_shr` which is thoroughly tested. "
)]
pub const unsafe fn unchecked_funnel_shr<T: [const] fallback::FunnelShift>(
a: T,
b: T,
shift: u32,
) -> T {
// SAFETY: caller ensures that `shift` is in-range
unsafe { a.unchecked_funnel_shr(b, shift) }
}
core::mem::conjure_zst::do_panic::compiletime #[ferrocene::annotation("Cannot be covered as this only runs during compilation.")]
const fn compiletime$(<$($binders)*>)?($($arg: $ty),*) $( -> $ret )? {
// Don't warn if one of the arguments is unused.
$(let _ = $arg;)*
$compiletime
}
core::num::<impl usize>::repeat_u16 #[ferrocene::annotation("This function is only being used in constants and cannot be covered")]
pub(crate) const fn repeat_u16(x: u16) -> usize {
let mut r = 0usize;
let mut i = 0;
while i < size_of::<usize>() {
// Use `wrapping_shl` to make it work on targets with 16-bit `usize`
r = r.wrapping_shl(16) | (x as usize);
i += 2;
}
r
}
core::num::dec2flt::float::pow2_to_pow10#[ferrocene::annotation("This function is only being used in constants and cannot be covered")]
const fn pow2_to_pow10(a: i64) -> i64 {
let res = (a as f64) / f64::consts::LOG2_10;
res as i64
}
core::num::from_ascii_radix_panic::do_panic::compiletime #[ferrocene::annotation("Cannot be covered as this only runs during compilation.")]
const fn compiletime$(<$($binders)*>)?($($arg: $ty),*) $( -> $ret )? {
// Don't warn if one of the arguments is unused.
$(let _ = $arg;)*
$compiletime
}
core::panicking::panic_cannot_unwind#[ferrocene::annotation("Cannot be covered as it causes a non-unwinding panic")]
fn panic_cannot_unwind() -> ! {
// Keep the text in sync with `UnwindTerminateReason::as_str` in `rustc_middle`.
panic_nounwind("panic in a function that cannot unwind")
}
core::panicking::panic_const::panic_const_add_overflow #[ferrocene::annotation("Cannot be covered as this code cannot be reached during runtime.")]
pub const fn $lang() -> ! {
// See the comment in `panic(&'static str)` for why we use `Arguments::from_str` here.
panic_fmt(fmt::Arguments::from_str($message));
}
core::panicking::panic_const::panic_const_async_fn_resumed #[ferrocene::annotation("Cannot be covered as this code cannot be reached during runtime.")]
pub const fn $lang() -> ! {
// See the comment in `panic(&'static str)` for why we use `Arguments::from_str` here.
panic_fmt(fmt::Arguments::from_str($message));
}
core::panicking::panic_const::panic_const_async_fn_resumed_drop #[ferrocene::annotation("Cannot be covered as this code cannot be reached during runtime.")]
pub const fn $lang() -> ! {
// See the comment in `panic(&'static str)` for why we use `Arguments::from_str` here.
panic_fmt(fmt::Arguments::from_str($message));
}
core::panicking::panic_const::panic_const_async_fn_resumed_panic #[ferrocene::annotation("Cannot be covered as this code cannot be reached during runtime.")]
pub const fn $lang() -> ! {
// See the comment in `panic(&'static str)` for why we use `Arguments::from_str` here.
panic_fmt(fmt::Arguments::from_str($message));
}
core::panicking::panic_const::panic_const_async_gen_fn_resumed #[ferrocene::annotation("Cannot be covered as this code cannot be reached during runtime.")]
pub const fn $lang() -> ! {
// See the comment in `panic(&'static str)` for why we use `Arguments::from_str` here.
panic_fmt(fmt::Arguments::from_str($message));
}
core::panicking::panic_const::panic_const_async_gen_fn_resumed_drop #[ferrocene::annotation("Cannot be covered as this code cannot be reached during runtime.")]
pub const fn $lang() -> ! {
// See the comment in `panic(&'static str)` for why we use `Arguments::from_str` here.
panic_fmt(fmt::Arguments::from_str($message));
}
core::panicking::panic_const::panic_const_async_gen_fn_resumed_panic #[ferrocene::annotation("Cannot be covered as this code cannot be reached during runtime.")]
pub const fn $lang() -> ! {
// See the comment in `panic(&'static str)` for why we use `Arguments::from_str` here.
panic_fmt(fmt::Arguments::from_str($message));
}
core::panicking::panic_const::panic_const_coroutine_resumed #[ferrocene::annotation("Cannot be covered as this code cannot be reached during runtime.")]
pub const fn $lang() -> ! {
// See the comment in `panic(&'static str)` for why we use `Arguments::from_str` here.
panic_fmt(fmt::Arguments::from_str($message));
}
core::panicking::panic_const::panic_const_coroutine_resumed_drop #[ferrocene::annotation("Cannot be covered as this code cannot be reached during runtime.")]
pub const fn $lang() -> ! {
// See the comment in `panic(&'static str)` for why we use `Arguments::from_str` here.
panic_fmt(fmt::Arguments::from_str($message));
}
core::panicking::panic_const::panic_const_coroutine_resumed_panic #[ferrocene::annotation("Cannot be covered as this code cannot be reached during runtime.")]
pub const fn $lang() -> ! {
// See the comment in `panic(&'static str)` for why we use `Arguments::from_str` here.
panic_fmt(fmt::Arguments::from_str($message));
}
core::panicking::panic_const::panic_const_div_by_zero #[ferrocene::annotation("Cannot be covered as this code cannot be reached during runtime.")]
pub const fn $lang() -> ! {
// See the comment in `panic(&'static str)` for why we use `Arguments::from_str` here.
panic_fmt(fmt::Arguments::from_str($message));
}
core::panicking::panic_const::panic_const_div_overflow #[ferrocene::annotation("Cannot be covered as this code cannot be reached during runtime.")]
pub const fn $lang() -> ! {
// See the comment in `panic(&'static str)` for why we use `Arguments::from_str` here.
panic_fmt(fmt::Arguments::from_str($message));
}
core::panicking::panic_const::panic_const_gen_fn_none #[ferrocene::annotation("Cannot be covered as this code cannot be reached during runtime.")]
pub const fn $lang() -> ! {
// See the comment in `panic(&'static str)` for why we use `Arguments::from_str` here.
panic_fmt(fmt::Arguments::from_str($message));
}
core::panicking::panic_const::panic_const_gen_fn_none_drop #[ferrocene::annotation("Cannot be covered as this code cannot be reached during runtime.")]
pub const fn $lang() -> ! {
// See the comment in `panic(&'static str)` for why we use `Arguments::from_str` here.
panic_fmt(fmt::Arguments::from_str($message));
}
core::panicking::panic_const::panic_const_gen_fn_none_panic #[ferrocene::annotation("Cannot be covered as this code cannot be reached during runtime.")]
pub const fn $lang() -> ! {
// See the comment in `panic(&'static str)` for why we use `Arguments::from_str` here.
panic_fmt(fmt::Arguments::from_str($message));
}
core::panicking::panic_const::panic_const_mul_overflow #[ferrocene::annotation("Cannot be covered as this code cannot be reached during runtime.")]
pub const fn $lang() -> ! {
// See the comment in `panic(&'static str)` for why we use `Arguments::from_str` here.
panic_fmt(fmt::Arguments::from_str($message));
}
core::panicking::panic_const::panic_const_neg_overflow #[ferrocene::annotation("Cannot be covered as this code cannot be reached during runtime.")]
pub const fn $lang() -> ! {
// See the comment in `panic(&'static str)` for why we use `Arguments::from_str` here.
panic_fmt(fmt::Arguments::from_str($message));
}
core::panicking::panic_const::panic_const_rem_by_zero #[ferrocene::annotation("Cannot be covered as this code cannot be reached during runtime.")]
pub const fn $lang() -> ! {
// See the comment in `panic(&'static str)` for why we use `Arguments::from_str` here.
panic_fmt(fmt::Arguments::from_str($message));
}
core::panicking::panic_const::panic_const_rem_overflow #[ferrocene::annotation("Cannot be covered as this code cannot be reached during runtime.")]
pub const fn $lang() -> ! {
// See the comment in `panic(&'static str)` for why we use `Arguments::from_str` here.
panic_fmt(fmt::Arguments::from_str($message));
}
core::panicking::panic_const::panic_const_shl_overflow #[ferrocene::annotation("Cannot be covered as this code cannot be reached during runtime.")]
pub const fn $lang() -> ! {
// See the comment in `panic(&'static str)` for why we use `Arguments::from_str` here.
panic_fmt(fmt::Arguments::from_str($message));
}
core::panicking::panic_const::panic_const_shr_overflow #[ferrocene::annotation("Cannot be covered as this code cannot be reached during runtime.")]
pub const fn $lang() -> ! {
// See the comment in `panic(&'static str)` for why we use `Arguments::from_str` here.
panic_fmt(fmt::Arguments::from_str($message));
}
core::panicking::panic_const::panic_const_sub_overflow #[ferrocene::annotation("Cannot be covered as this code cannot be reached during runtime.")]
pub const fn $lang() -> ! {
// See the comment in `panic(&'static str)` for why we use `Arguments::from_str` here.
panic_fmt(fmt::Arguments::from_str($message));
}
core::panicking::panic_in_cleanup#[ferrocene::annotation("Cannot be covered as it causes a non-unwinding panic")]
fn panic_in_cleanup() -> ! {
// Keep the text in sync with `UnwindTerminateReason::as_str` in `rustc_middle`.
panic_nounwind_nobacktrace("panic in a destructor during cleanup")
}
core::panicking::panic_misaligned_pointer_dereference#[ferrocene::annotation("Cannot be covered as it causes a non-unwinding panic")]
fn panic_misaligned_pointer_dereference(required: usize, found: usize) -> ! {
if cfg!(panic = "immediate-abort") {
super::intrinsics::abort()
}
panic_nounwind_fmt(
format_args!(
"misaligned pointer dereference: address must be a multiple of {required:#x} but is {found:#x}"
),
/* force_no_backtrace */ false,
);
}
core::panicking::panic_nounwind#[ferrocene::annotation("Cannot be covered as it causes a non-unwinding panic")]
pub const fn panic_nounwind(expr: &'static str) -> ! {
panic_nounwind_fmt(fmt::Arguments::from_str(expr), /* force_no_backtrace */ false);
}
core::panicking::panic_nounwind_fmt#[ferrocene::annotation("Cannot be covered as it causes a non-unwinding panic")]
pub const fn panic_nounwind_fmt(fmt: fmt::Arguments<'_>, _force_no_backtrace: bool) -> ! {
const_eval_select!(
@capture { fmt: fmt::Arguments<'_>, _force_no_backtrace: bool } -> !:
if const #[track_caller] {
// We don't unwind anyway at compile-time so we can call the regular `panic_fmt`.
panic_fmt(fmt)
} else #[track_caller] {
if cfg!(panic = "immediate-abort") {
super::intrinsics::abort()
}
// NOTE This function never crosses the FFI boundary; it's a Rust-to-Rust call
// that gets resolved to the `#[panic_handler]` function.
unsafe extern "Rust" {
#[lang = "panic_impl"]
fn panic_impl(pi: &PanicInfo<'_>) -> !;
}
// PanicInfo with the `can_unwind` flag set to false forces an abort.
let pi = PanicInfo::new(
&fmt,
Location::caller(),
/* can_unwind */ false,
_force_no_backtrace,
);
// SAFETY: `panic_impl` is defined in safe Rust code and thus is safe to call.
unsafe { panic_impl(&pi) }
}
)
}
core::panicking::panic_nounwind_fmt::compiletime #[ferrocene::annotation("Cannot be covered as this only runs during compilation.")]
const fn compiletime$(<$($binders)*>)?($($arg: $ty),*) $( -> $ret )? {
// Don't warn if one of the arguments is unused.
$(let _ = $arg;)*
$compiletime
}
core::panicking::panic_nounwind_nobacktrace#[ferrocene::annotation("Cannot be covered as it causes a non-unwinding panic")]
pub fn panic_nounwind_nobacktrace(expr: &'static str) -> ! {
panic_nounwind_fmt(fmt::Arguments::from_str(expr), /* force_no_backtrace */ true);
}
core::panicking::panic_null_pointer_dereference#[ferrocene::annotation("Cannot be covered as it causes a non-unwinding panic")]
fn panic_null_pointer_dereference() -> ! {
if cfg!(panic = "immediate-abort") {
super::intrinsics::abort()
}
panic_nounwind_fmt(
fmt::Arguments::from_str("null pointer dereference occurred"),
/* force_no_backtrace */ false,
)
}
core::panicking::panic_str_2015pub const fn panic_str_2015(expr: &str) -> ! {
panic_display(&expr);
}
core::panicking::unreachable_displaypub fn unreachable_display<T: fmt::Display>(x: &T) -> ! {
panic_fmt(format_args!("internal error: entered unreachable code: {}", *x));
}
core::profiling::compiler_copy#[ferrocene::annotation(
"This function cannot be covered as it is never called at runtime, see documentation"
)]
pub fn compiler_copy<T, const SIZE: usize>(_src: *const T, _dst: *mut T) {
unreachable!(
"compiler_copy marks where the compiler-generated a memcpy for Copies. It is never actually called."
)
}
core::profiling::compiler_move#[ferrocene::annotation(
"This function cannot be covered as it is never called at runtime, see documentation"
)]
pub fn compiler_move<T, const SIZE: usize>(_src: *const T, _dst: *mut T) {
unreachable!(
"compiler_move marks where the compiler-generated a memcpy for moves. It is never actually called."
)
}
core::ptr::const_ptr::<impl *const T>::add::runtime_add_nowrap::compiletime #[ferrocene::annotation("Cannot be covered as this only runs during compilation.")]
const fn compiletime$(<$($binders)*>)?($($arg: $ty),*) $( -> $ret )? {
// Don't warn if one of the arguments is unused.
$(let _ = $arg;)*
$compiletime
}
core::ptr::const_ptr::<impl *const T>::is_null::compiletime #[ferrocene::annotation("Cannot be covered as this only runs during compilation.")]
const fn compiletime$(<$($binders)*>)?($($arg: $ty),*) $( -> $ret )? {
// Don't warn if one of the arguments is unused.
$(let _ = $arg;)*
$compiletime
}
core::ptr::const_ptr::<impl *const T>::offset::runtime_offset_nowrap::compiletime #[ferrocene::annotation("Cannot be covered as this only runs during compilation.")]
const fn compiletime$(<$($binders)*>)?($($arg: $ty),*) $( -> $ret )? {
// Don't warn if one of the arguments is unused.
$(let _ = $arg;)*
$compiletime
}
core::ptr::const_ptr::<impl *const T>::offset_from_unsigned::runtime_ptr_ge::compiletime #[ferrocene::annotation("Cannot be covered as this only runs during compilation.")]
const fn compiletime$(<$($binders)*>)?($($arg: $ty),*) $( -> $ret )? {
// Don't warn if one of the arguments is unused.
$(let _ = $arg;)*
$compiletime
}
core::ptr::mut_ptr::<impl *mut T>::add::runtime_add_nowrap::compiletime #[ferrocene::annotation("Cannot be covered as this only runs during compilation.")]
const fn compiletime$(<$($binders)*>)?($($arg: $ty),*) $( -> $ret )? {
// Don't warn if one of the arguments is unused.
$(let _ = $arg;)*
$compiletime
}
core::ptr::mut_ptr::<impl *mut T>::offset::runtime_offset_nowrap::compiletime #[ferrocene::annotation("Cannot be covered as this only runs during compilation.")]
const fn compiletime$(<$($binders)*>)?($($arg: $ty),*) $( -> $ret )? {
// Don't warn if one of the arguments is unused.
$(let _ = $arg;)*
$compiletime
}
core::ptr::mut_ptr::<impl *mut T>::sub::runtime_sub_nowrap::compiletime #[ferrocene::annotation("Cannot be covered as this only runs during compilation.")]
const fn compiletime$(<$($binders)*>)?($($arg: $ty),*) $( -> $ret )? {
// Don't warn if one of the arguments is unused.
$(let _ = $arg;)*
$compiletime
}
core::ptr::swap_nonoverlapping::compiletime #[ferrocene::annotation("Cannot be covered as this only runs during compilation.")]
const fn compiletime$(<$($binders)*>)?($($arg: $ty),*) $( -> $ret )? {
// Don't warn if one of the arguments is unused.
$(let _ = $arg;)*
$compiletime
}
core::ptr::swap_nonoverlapping_const#[ferrocene::annotation(
"This function is only called inside `swap_nonoverlapping` as the `const` arm of a `const_eval_select!` so it cannot be covered"
)]
#[inline]
const unsafe fn swap_nonoverlapping_const<T>(x: *mut T, y: *mut T, count: usize) {
let mut i = 0;
while i < count {
// SAFETY: By precondition, `i` is in-bounds because it's below `n`
let x = unsafe { x.add(i) };
// SAFETY: By precondition, `i` is in-bounds because it's below `n`
// and it's distinct from `x` since the ranges are non-overlapping
let y = unsafe { y.add(i) };
// SAFETY: we're only ever given pointers that are valid to read/write,
// including being aligned, and nothing here panics so it's drop-safe.
unsafe {
// Note that it's critical that these use `copy_nonoverlapping`,
// rather than `read`/`write`, to avoid #134713 if T has padding.
let mut temp = MaybeUninit::<T>::uninit();
copy_nonoverlapping(x, temp.as_mut_ptr(), 1);
copy_nonoverlapping(y, x, 1);
copy_nonoverlapping(temp.as_ptr(), y, 1);
}
i += 1;
}
}
core::slice::<impl [T]>::align_to_offsets::gcd #[ferrocene::annotation(
"the only use of this function is in a const block, which means it cannot be reached during runtime"
)]
const fn gcd(a: usize, b: usize) -> usize {
if b == 0 { a } else { gcd(b, a % b) }
}
core::slice::<impl [T]>::len #[ferrocene::annotation(
"this function is guaranteed to be constant-evaluated as the size of arrays is always available at compilation"
)]
pub const fn len(&self) -> usize {
ptr::metadata(self)
}
core::slice::ascii::is_ascii::compiletime #[ferrocene::annotation("Cannot be covered as this only runs during compilation.")]
const fn compiletime$(<$($binders)*>)?($($arg: $ty),*) $( -> $ret )? {
// Don't warn if one of the arguments is unused.
$(let _ = $arg;)*
$compiletime
}
core::slice::copy_from_slice_impl::len_mismatch_fail::do_panic::compiletime #[ferrocene::annotation("Cannot be covered as this only runs during compilation.")]
const fn compiletime$(<$($binders)*>)?($($arg: $ty),*) $( -> $ret )? {
// Don't warn if one of the arguments is unused.
$(let _ = $arg;)*
$compiletime
}
core::slice::index::slice_index_fail::do_panic::compiletime #[ferrocene::annotation("Cannot be covered as this only runs during compilation.")]
const fn compiletime$(<$($binders)*>)?($($arg: $ty),*) $( -> $ret )? {
// Don't warn if one of the arguments is unused.
$(let _ = $arg;)*
$compiletime
}
core::slice::index::slice_index_fail::do_panic::compiletime #[ferrocene::annotation("Cannot be covered as this only runs during compilation.")]
const fn compiletime$(<$($binders)*>)?($($arg: $ty),*) $( -> $ret )? {
// Don't warn if one of the arguments is unused.
$(let _ = $arg;)*
$compiletime
}
core::slice::index::slice_index_fail::do_panic::compiletime #[ferrocene::annotation("Cannot be covered as this only runs during compilation.")]
const fn compiletime$(<$($binders)*>)?($($arg: $ty),*) $( -> $ret )? {
// Don't warn if one of the arguments is unused.
$(let _ = $arg;)*
$compiletime
}
core::slice::index::slice_index_fail::do_panic::compiletime #[ferrocene::annotation("Cannot be covered as this only runs during compilation.")]
const fn compiletime$(<$($binders)*>)?($($arg: $ty),*) $( -> $ret )? {
// Don't warn if one of the arguments is unused.
$(let _ = $arg;)*
$compiletime
}
core::slice::memchr::memchr_aligned::compiletime #[ferrocene::annotation("Cannot be covered as this only runs during compilation.")]
const fn compiletime$(<$($binders)*>)?($($arg: $ty),*) $( -> $ret )? {
// Don't warn if one of the arguments is unused.
$(let _ = $arg;)*
$compiletime
}
core::str::slice_error_fail_ct#[ferrocene::annotation("Cannot be covered as this only runs during compilation.")]
const fn slice_error_fail_ct(_: &str, _: usize, _: usize) -> ! {
panic!("failed to slice string");
}
core::str::validations::run_utf8_validation::compiletime #[ferrocene::annotation("Cannot be covered as this only runs during compilation.")]
const fn compiletime$(<$($binders)*>)?($($arg: $ty),*) $( -> $ret )? {
// Don't warn if one of the arguments is unused.
$(let _ = $arg;)*
$compiletime
}
core::ub_checks::check_language_ub::compiletime #[ferrocene::annotation("Cannot be covered as this only runs during compilation.")]
const fn compiletime$(<$($binders)*>)?($($arg: $ty),*) $( -> $ret )? {
// Don't warn if one of the arguments is unused.
$(let _ = $arg;)*
$compiletime
}
core::ub_checks::maybe_is_aligned::compiletime #[ferrocene::annotation("Cannot be covered as this only runs during compilation.")]
const fn compiletime$(<$($binders)*>)?($($arg: $ty),*) $( -> $ret )? {
// Don't warn if one of the arguments is unused.
$(let _ = $arg;)*
$compiletime
}
core::ub_checks::maybe_is_nonoverlapping::compiletime #[ferrocene::annotation("Cannot be covered as this only runs during compilation.")]
const fn compiletime$(<$($binders)*>)?($($arg: $ty),*) $( -> $ret )? {
// Don't warn if one of the arguments is unused.
$(let _ = $arg;)*
$compiletime
}
core::unicode::unicode_data::ShortOffsetRunHeader::new #[ferrocene::annotation(
"The only uses of this function are inside statics without any inner mutability. Meaning that they can't be covered"
)]
const fn new(start_index: usize, prefix_sum: u32) -> Self {
assert!(start_index < (1 << 11));
assert!(prefix_sum < (1 << 21));
Self((start_index as u32) << 21 | prefix_sum)
}
<&'b str as core::str::pattern::Pattern>::into_searcher fn into_searcher(self, haystack: &str) -> StrSearcher<'_, 'b> {
StrSearcher::new(haystack, self)
}
<&'b str as core::str::pattern::Pattern>::is_prefix_of fn is_prefix_of(self, haystack: &str) -> bool {
haystack.as_bytes().starts_with(self.as_bytes())
}
<&'b str as core::str::pattern::Pattern>::is_suffix_of fn is_suffix_of<'a>(self, haystack: &'a str) -> bool
where
Self::Searcher<'a>: ReverseSearcher<'a>,
{
haystack.as_bytes().ends_with(self.as_bytes())
}
<&T as core::borrow::Borrow<T>>::borrow fn borrow(&self) -> &T {
self
}
<&T as core::convert::AsRef<U>>::as_ref fn as_ref(&self) -> &U {
<T as AsRef<U>>::as_ref(*self)
}
<&T as core::fmt::Binary>::fmt fn fmt(&self, f: &mut Formatter<'_>) -> Result { $tr::fmt(&**self, f) }
<&T as core::fmt::Debug>::fmt fn fmt(&self, f: &mut Formatter<'_>) -> Result { $tr::fmt(&**self, f) }
<&T as core::fmt::Display>::fmt fn fmt(&self, f: &mut Formatter<'_>) -> Result { $tr::fmt(&**self, f) }
<&T as core::fmt::LowerExp>::fmt fn fmt(&self, f: &mut Formatter<'_>) -> Result { $tr::fmt(&**self, f) }
<&T as core::fmt::LowerHex>::fmt fn fmt(&self, f: &mut Formatter<'_>) -> Result { $tr::fmt(&**self, f) }
<&T as core::fmt::Octal>::fmt fn fmt(&self, f: &mut Formatter<'_>) -> Result { $tr::fmt(&**self, f) }
<&T as core::fmt::Pointer>::fmt fn fmt(&self, f: &mut Formatter<'_>) -> Result {
Pointer::fmt(&(*self as *const T), f)
}
<&T as core::fmt::UpperExp>::fmt fn fmt(&self, f: &mut Formatter<'_>) -> Result { $tr::fmt(&**self, f) }
<&T as core::fmt::UpperHex>::fmt fn fmt(&self, f: &mut Formatter<'_>) -> Result { $tr::fmt(&**self, f) }
<&T as core::ops::deref::Deref>::deref fn deref(&self) -> &T {
self
}
<&bool as core::ops::bit::BitAnd<&bool>>::bitand fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&bool as core::ops::bit::BitAnd<bool>>::bitand fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&bool as core::ops::bit::BitOr<&bool>>::bitor fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&bool as core::ops::bit::BitOr<bool>>::bitor fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&bool as core::ops::bit::BitXor<&bool>>::bitxor fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&bool as core::ops::bit::BitXor<bool>>::bitxor fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&bool as core::ops::bit::Not>::not fn $method(self) -> <$t as $imp>::Output {
$imp::$method(*self)
}
<&f128 as core::ops::arith::Add<&f128>>::add fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&f128 as core::ops::arith::Add<f128>>::add fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&f128 as core::ops::arith::Div<&f128>>::div fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&f128 as core::ops::arith::Div<f128>>::div fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&f128 as core::ops::arith::Mul<&f128>>::mul fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&f128 as core::ops::arith::Mul<f128>>::mul fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&f128 as core::ops::arith::Neg>::neg fn $method(self) -> <$t as $imp>::Output {
$imp::$method(*self)
}
<&f128 as core::ops::arith::Rem<&f128>>::rem fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&f128 as core::ops::arith::Rem<f128>>::rem fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&f128 as core::ops::arith::Sub<&f128>>::sub fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&f128 as core::ops::arith::Sub<f128>>::sub fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&f16 as core::ops::arith::Add<&f16>>::add fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&f16 as core::ops::arith::Add<f16>>::add fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&f16 as core::ops::arith::Div<&f16>>::div fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&f16 as core::ops::arith::Div<f16>>::div fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&f16 as core::ops::arith::Mul<&f16>>::mul fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&f16 as core::ops::arith::Mul<f16>>::mul fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&f16 as core::ops::arith::Neg>::neg fn $method(self) -> <$t as $imp>::Output {
$imp::$method(*self)
}
<&f16 as core::ops::arith::Rem<&f16>>::rem fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&f16 as core::ops::arith::Rem<f16>>::rem fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&f16 as core::ops::arith::Sub<&f16>>::sub fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&f16 as core::ops::arith::Sub<f16>>::sub fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&f32 as core::ops::arith::Add<&f32>>::add fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&f32 as core::ops::arith::Add<f32>>::add fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&f32 as core::ops::arith::Div<&f32>>::div fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&f32 as core::ops::arith::Div<f32>>::div fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&f32 as core::ops::arith::Mul<&f32>>::mul fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&f32 as core::ops::arith::Mul<f32>>::mul fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&f32 as core::ops::arith::Neg>::neg fn $method(self) -> <$t as $imp>::Output {
$imp::$method(*self)
}
<&f32 as core::ops::arith::Rem<&f32>>::rem fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&f32 as core::ops::arith::Rem<f32>>::rem fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&f32 as core::ops::arith::Sub<&f32>>::sub fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&f32 as core::ops::arith::Sub<f32>>::sub fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&f64 as core::ops::arith::Add<&f64>>::add fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&f64 as core::ops::arith::Add<f64>>::add fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&f64 as core::ops::arith::Div<&f64>>::div fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&f64 as core::ops::arith::Div<f64>>::div fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&f64 as core::ops::arith::Mul<&f64>>::mul fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&f64 as core::ops::arith::Mul<f64>>::mul fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&f64 as core::ops::arith::Neg>::neg fn $method(self) -> <$t as $imp>::Output {
$imp::$method(*self)
}
<&f64 as core::ops::arith::Rem<&f64>>::rem fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&f64 as core::ops::arith::Rem<f64>>::rem fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&f64 as core::ops::arith::Sub<&f64>>::sub fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&f64 as core::ops::arith::Sub<f64>>::sub fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i128 as core::ops::arith::Add<&i128>>::add fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i128 as core::ops::arith::Add<i128>>::add fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i128 as core::ops::arith::Div<&i128>>::div fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i128 as core::ops::arith::Div<i128>>::div fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i128 as core::ops::arith::Mul<&i128>>::mul fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i128 as core::ops::arith::Mul<i128>>::mul fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i128 as core::ops::arith::Neg>::neg fn $method(self) -> <$t as $imp>::Output {
$imp::$method(*self)
}
<&i128 as core::ops::arith::Rem<&i128>>::rem fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i128 as core::ops::arith::Rem<i128>>::rem fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i128 as core::ops::arith::Sub<&i128>>::sub fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i128 as core::ops::arith::Sub<i128>>::sub fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i128 as core::ops::bit::BitAnd<&i128>>::bitand fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i128 as core::ops::bit::BitAnd<i128>>::bitand fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i128 as core::ops::bit::BitOr<&i128>>::bitor fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i128 as core::ops::bit::BitOr<i128>>::bitor fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i128 as core::ops::bit::BitXor<&i128>>::bitxor fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i128 as core::ops::bit::BitXor<i128>>::bitxor fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i128 as core::ops::bit::Not>::not fn $method(self) -> <$t as $imp>::Output {
$imp::$method(*self)
}
<&i128 as core::ops::bit::Shl<&i128>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i128 as core::ops::bit::Shl<&i16>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i128 as core::ops::bit::Shl<&i32>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i128 as core::ops::bit::Shl<&i64>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i128 as core::ops::bit::Shl<&i8>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i128 as core::ops::bit::Shl<&isize>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i128 as core::ops::bit::Shl<&u128>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i128 as core::ops::bit::Shl<&u16>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i128 as core::ops::bit::Shl<&u32>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i128 as core::ops::bit::Shl<&u64>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i128 as core::ops::bit::Shl<&u8>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i128 as core::ops::bit::Shl<&usize>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i128 as core::ops::bit::Shl<i128>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i128 as core::ops::bit::Shl<i16>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i128 as core::ops::bit::Shl<i32>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i128 as core::ops::bit::Shl<i64>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i128 as core::ops::bit::Shl<i8>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i128 as core::ops::bit::Shl<isize>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i128 as core::ops::bit::Shl<u128>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i128 as core::ops::bit::Shl<u16>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i128 as core::ops::bit::Shl<u32>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i128 as core::ops::bit::Shl<u64>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i128 as core::ops::bit::Shl<u8>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i128 as core::ops::bit::Shl<usize>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i128 as core::ops::bit::Shr<&i128>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i128 as core::ops::bit::Shr<&i16>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i128 as core::ops::bit::Shr<&i32>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i128 as core::ops::bit::Shr<&i64>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i128 as core::ops::bit::Shr<&i8>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i128 as core::ops::bit::Shr<&isize>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i128 as core::ops::bit::Shr<&u128>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i128 as core::ops::bit::Shr<&u16>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i128 as core::ops::bit::Shr<&u32>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i128 as core::ops::bit::Shr<&u64>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i128 as core::ops::bit::Shr<&u8>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i128 as core::ops::bit::Shr<&usize>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i128 as core::ops::bit::Shr<i128>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i128 as core::ops::bit::Shr<i16>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i128 as core::ops::bit::Shr<i32>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i128 as core::ops::bit::Shr<i64>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i128 as core::ops::bit::Shr<i8>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i128 as core::ops::bit::Shr<isize>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i128 as core::ops::bit::Shr<u128>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i128 as core::ops::bit::Shr<u16>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i128 as core::ops::bit::Shr<u32>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i128 as core::ops::bit::Shr<u64>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i128 as core::ops::bit::Shr<u8>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i128 as core::ops::bit::Shr<usize>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i16 as core::ops::arith::Add<&i16>>::add fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i16 as core::ops::arith::Add<i16>>::add fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i16 as core::ops::arith::Div<&i16>>::div fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i16 as core::ops::arith::Div<i16>>::div fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i16 as core::ops::arith::Mul<&i16>>::mul fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i16 as core::ops::arith::Mul<i16>>::mul fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i16 as core::ops::arith::Neg>::neg fn $method(self) -> <$t as $imp>::Output {
$imp::$method(*self)
}
<&i16 as core::ops::arith::Rem<&i16>>::rem fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i16 as core::ops::arith::Rem<i16>>::rem fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i16 as core::ops::arith::Sub<&i16>>::sub fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i16 as core::ops::arith::Sub<i16>>::sub fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i16 as core::ops::bit::BitAnd<&i16>>::bitand fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i16 as core::ops::bit::BitAnd<i16>>::bitand fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i16 as core::ops::bit::BitOr<&i16>>::bitor fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i16 as core::ops::bit::BitOr<i16>>::bitor fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i16 as core::ops::bit::BitXor<&i16>>::bitxor fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i16 as core::ops::bit::BitXor<i16>>::bitxor fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i16 as core::ops::bit::Not>::not fn $method(self) -> <$t as $imp>::Output {
$imp::$method(*self)
}
<&i16 as core::ops::bit::Shl<&i128>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i16 as core::ops::bit::Shl<&i16>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i16 as core::ops::bit::Shl<&i32>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i16 as core::ops::bit::Shl<&i64>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i16 as core::ops::bit::Shl<&i8>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i16 as core::ops::bit::Shl<&isize>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i16 as core::ops::bit::Shl<&u128>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i16 as core::ops::bit::Shl<&u16>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i16 as core::ops::bit::Shl<&u32>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i16 as core::ops::bit::Shl<&u64>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i16 as core::ops::bit::Shl<&u8>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i16 as core::ops::bit::Shl<&usize>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i16 as core::ops::bit::Shl<i128>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i16 as core::ops::bit::Shl<i16>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i16 as core::ops::bit::Shl<i32>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i16 as core::ops::bit::Shl<i64>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i16 as core::ops::bit::Shl<i8>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i16 as core::ops::bit::Shl<isize>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i16 as core::ops::bit::Shl<u128>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i16 as core::ops::bit::Shl<u16>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i16 as core::ops::bit::Shl<u32>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i16 as core::ops::bit::Shl<u64>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i16 as core::ops::bit::Shl<u8>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i16 as core::ops::bit::Shl<usize>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i16 as core::ops::bit::Shr<&i128>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i16 as core::ops::bit::Shr<&i16>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i16 as core::ops::bit::Shr<&i32>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i16 as core::ops::bit::Shr<&i64>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i16 as core::ops::bit::Shr<&i8>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i16 as core::ops::bit::Shr<&isize>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i16 as core::ops::bit::Shr<&u128>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i16 as core::ops::bit::Shr<&u16>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i16 as core::ops::bit::Shr<&u32>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i16 as core::ops::bit::Shr<&u64>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i16 as core::ops::bit::Shr<&u8>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i16 as core::ops::bit::Shr<&usize>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i16 as core::ops::bit::Shr<i128>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i16 as core::ops::bit::Shr<i16>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i16 as core::ops::bit::Shr<i32>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i16 as core::ops::bit::Shr<i64>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i16 as core::ops::bit::Shr<i8>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i16 as core::ops::bit::Shr<isize>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i16 as core::ops::bit::Shr<u128>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i16 as core::ops::bit::Shr<u16>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i16 as core::ops::bit::Shr<u32>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i16 as core::ops::bit::Shr<u64>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i16 as core::ops::bit::Shr<u8>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i16 as core::ops::bit::Shr<usize>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i32 as core::ops::arith::Add<&i32>>::add fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i32 as core::ops::arith::Add<i32>>::add fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i32 as core::ops::arith::Div<&i32>>::div fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i32 as core::ops::arith::Div<i32>>::div fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i32 as core::ops::arith::Mul<&i32>>::mul fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i32 as core::ops::arith::Mul<i32>>::mul fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i32 as core::ops::arith::Neg>::neg fn $method(self) -> <$t as $imp>::Output {
$imp::$method(*self)
}
<&i32 as core::ops::arith::Rem<&i32>>::rem fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i32 as core::ops::arith::Rem<i32>>::rem fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i32 as core::ops::arith::Sub<&i32>>::sub fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i32 as core::ops::arith::Sub<i32>>::sub fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i32 as core::ops::bit::BitAnd<&i32>>::bitand fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i32 as core::ops::bit::BitAnd<i32>>::bitand fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i32 as core::ops::bit::BitOr<&i32>>::bitor fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i32 as core::ops::bit::BitOr<i32>>::bitor fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i32 as core::ops::bit::BitXor<&i32>>::bitxor fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i32 as core::ops::bit::BitXor<i32>>::bitxor fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i32 as core::ops::bit::Not>::not fn $method(self) -> <$t as $imp>::Output {
$imp::$method(*self)
}
<&i32 as core::ops::bit::Shl<&i128>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i32 as core::ops::bit::Shl<&i16>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i32 as core::ops::bit::Shl<&i32>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i32 as core::ops::bit::Shl<&i64>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i32 as core::ops::bit::Shl<&i8>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i32 as core::ops::bit::Shl<&isize>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i32 as core::ops::bit::Shl<&u128>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i32 as core::ops::bit::Shl<&u16>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i32 as core::ops::bit::Shl<&u32>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i32 as core::ops::bit::Shl<&u64>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i32 as core::ops::bit::Shl<&u8>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i32 as core::ops::bit::Shl<&usize>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i32 as core::ops::bit::Shl<i128>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i32 as core::ops::bit::Shl<i16>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i32 as core::ops::bit::Shl<i32>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i32 as core::ops::bit::Shl<i64>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i32 as core::ops::bit::Shl<i8>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i32 as core::ops::bit::Shl<isize>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i32 as core::ops::bit::Shl<u128>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i32 as core::ops::bit::Shl<u16>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i32 as core::ops::bit::Shl<u32>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i32 as core::ops::bit::Shl<u64>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i32 as core::ops::bit::Shl<u8>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i32 as core::ops::bit::Shl<usize>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i32 as core::ops::bit::Shr<&i128>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i32 as core::ops::bit::Shr<&i16>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i32 as core::ops::bit::Shr<&i32>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i32 as core::ops::bit::Shr<&i64>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i32 as core::ops::bit::Shr<&i8>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i32 as core::ops::bit::Shr<&isize>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i32 as core::ops::bit::Shr<&u128>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i32 as core::ops::bit::Shr<&u16>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i32 as core::ops::bit::Shr<&u32>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i32 as core::ops::bit::Shr<&u64>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i32 as core::ops::bit::Shr<&u8>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i32 as core::ops::bit::Shr<&usize>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i32 as core::ops::bit::Shr<i128>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i32 as core::ops::bit::Shr<i16>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i32 as core::ops::bit::Shr<i32>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i32 as core::ops::bit::Shr<i64>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i32 as core::ops::bit::Shr<i8>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i32 as core::ops::bit::Shr<isize>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i32 as core::ops::bit::Shr<u128>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i32 as core::ops::bit::Shr<u16>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i32 as core::ops::bit::Shr<u32>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i32 as core::ops::bit::Shr<u64>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i32 as core::ops::bit::Shr<u8>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i32 as core::ops::bit::Shr<usize>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i64 as core::ops::arith::Add<&i64>>::add fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i64 as core::ops::arith::Add<i64>>::add fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i64 as core::ops::arith::Div<&i64>>::div fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i64 as core::ops::arith::Div<i64>>::div fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i64 as core::ops::arith::Mul<&i64>>::mul fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i64 as core::ops::arith::Mul<i64>>::mul fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i64 as core::ops::arith::Neg>::neg fn $method(self) -> <$t as $imp>::Output {
$imp::$method(*self)
}
<&i64 as core::ops::arith::Rem<&i64>>::rem fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i64 as core::ops::arith::Rem<i64>>::rem fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i64 as core::ops::arith::Sub<&i64>>::sub fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i64 as core::ops::arith::Sub<i64>>::sub fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i64 as core::ops::bit::BitAnd<&i64>>::bitand fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i64 as core::ops::bit::BitAnd<i64>>::bitand fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i64 as core::ops::bit::BitOr<&i64>>::bitor fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i64 as core::ops::bit::BitOr<i64>>::bitor fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i64 as core::ops::bit::BitXor<&i64>>::bitxor fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i64 as core::ops::bit::BitXor<i64>>::bitxor fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i64 as core::ops::bit::Not>::not fn $method(self) -> <$t as $imp>::Output {
$imp::$method(*self)
}
<&i64 as core::ops::bit::Shl<&i128>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i64 as core::ops::bit::Shl<&i16>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i64 as core::ops::bit::Shl<&i32>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i64 as core::ops::bit::Shl<&i64>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i64 as core::ops::bit::Shl<&i8>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i64 as core::ops::bit::Shl<&isize>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i64 as core::ops::bit::Shl<&u128>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i64 as core::ops::bit::Shl<&u16>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i64 as core::ops::bit::Shl<&u32>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i64 as core::ops::bit::Shl<&u64>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i64 as core::ops::bit::Shl<&u8>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i64 as core::ops::bit::Shl<&usize>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i64 as core::ops::bit::Shl<i128>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i64 as core::ops::bit::Shl<i16>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i64 as core::ops::bit::Shl<i32>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i64 as core::ops::bit::Shl<i64>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i64 as core::ops::bit::Shl<i8>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i64 as core::ops::bit::Shl<isize>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i64 as core::ops::bit::Shl<u128>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i64 as core::ops::bit::Shl<u16>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i64 as core::ops::bit::Shl<u32>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i64 as core::ops::bit::Shl<u64>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i64 as core::ops::bit::Shl<u8>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i64 as core::ops::bit::Shl<usize>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i64 as core::ops::bit::Shr<&i128>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i64 as core::ops::bit::Shr<&i16>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i64 as core::ops::bit::Shr<&i32>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i64 as core::ops::bit::Shr<&i64>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i64 as core::ops::bit::Shr<&i8>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i64 as core::ops::bit::Shr<&isize>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i64 as core::ops::bit::Shr<&u128>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i64 as core::ops::bit::Shr<&u16>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i64 as core::ops::bit::Shr<&u32>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i64 as core::ops::bit::Shr<&u64>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i64 as core::ops::bit::Shr<&u8>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i64 as core::ops::bit::Shr<&usize>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i64 as core::ops::bit::Shr<i128>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i64 as core::ops::bit::Shr<i16>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i64 as core::ops::bit::Shr<i32>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i64 as core::ops::bit::Shr<i64>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i64 as core::ops::bit::Shr<i8>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i64 as core::ops::bit::Shr<isize>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i64 as core::ops::bit::Shr<u128>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i64 as core::ops::bit::Shr<u16>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i64 as core::ops::bit::Shr<u32>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i64 as core::ops::bit::Shr<u64>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i64 as core::ops::bit::Shr<u8>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i64 as core::ops::bit::Shr<usize>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i8 as core::ops::arith::Add<&i8>>::add fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i8 as core::ops::arith::Add<i8>>::add fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i8 as core::ops::arith::Div<&i8>>::div fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i8 as core::ops::arith::Div<i8>>::div fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i8 as core::ops::arith::Mul<&i8>>::mul fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i8 as core::ops::arith::Mul<i8>>::mul fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i8 as core::ops::arith::Neg>::neg fn $method(self) -> <$t as $imp>::Output {
$imp::$method(*self)
}
<&i8 as core::ops::arith::Rem<&i8>>::rem fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i8 as core::ops::arith::Rem<i8>>::rem fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i8 as core::ops::arith::Sub<&i8>>::sub fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i8 as core::ops::arith::Sub<i8>>::sub fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i8 as core::ops::bit::BitAnd<&i8>>::bitand fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i8 as core::ops::bit::BitAnd<i8>>::bitand fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i8 as core::ops::bit::BitOr<&i8>>::bitor fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i8 as core::ops::bit::BitOr<i8>>::bitor fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i8 as core::ops::bit::BitXor<&i8>>::bitxor fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i8 as core::ops::bit::BitXor<i8>>::bitxor fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i8 as core::ops::bit::Not>::not fn $method(self) -> <$t as $imp>::Output {
$imp::$method(*self)
}
<&i8 as core::ops::bit::Shl<&i128>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i8 as core::ops::bit::Shl<&i16>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i8 as core::ops::bit::Shl<&i32>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i8 as core::ops::bit::Shl<&i64>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i8 as core::ops::bit::Shl<&i8>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i8 as core::ops::bit::Shl<&isize>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i8 as core::ops::bit::Shl<&u128>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i8 as core::ops::bit::Shl<&u16>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i8 as core::ops::bit::Shl<&u32>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i8 as core::ops::bit::Shl<&u64>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i8 as core::ops::bit::Shl<&u8>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i8 as core::ops::bit::Shl<&usize>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i8 as core::ops::bit::Shl<i128>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i8 as core::ops::bit::Shl<i16>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i8 as core::ops::bit::Shl<i32>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i8 as core::ops::bit::Shl<i64>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i8 as core::ops::bit::Shl<i8>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i8 as core::ops::bit::Shl<isize>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i8 as core::ops::bit::Shl<u128>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i8 as core::ops::bit::Shl<u16>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i8 as core::ops::bit::Shl<u32>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i8 as core::ops::bit::Shl<u64>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i8 as core::ops::bit::Shl<u8>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i8 as core::ops::bit::Shl<usize>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i8 as core::ops::bit::Shr<&i128>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i8 as core::ops::bit::Shr<&i16>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i8 as core::ops::bit::Shr<&i32>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i8 as core::ops::bit::Shr<&i64>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i8 as core::ops::bit::Shr<&i8>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i8 as core::ops::bit::Shr<&isize>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i8 as core::ops::bit::Shr<&u128>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i8 as core::ops::bit::Shr<&u16>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i8 as core::ops::bit::Shr<&u32>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i8 as core::ops::bit::Shr<&u64>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i8 as core::ops::bit::Shr<&u8>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i8 as core::ops::bit::Shr<&usize>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i8 as core::ops::bit::Shr<i128>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i8 as core::ops::bit::Shr<i16>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i8 as core::ops::bit::Shr<i32>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i8 as core::ops::bit::Shr<i64>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i8 as core::ops::bit::Shr<i8>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i8 as core::ops::bit::Shr<isize>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i8 as core::ops::bit::Shr<u128>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i8 as core::ops::bit::Shr<u16>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i8 as core::ops::bit::Shr<u32>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i8 as core::ops::bit::Shr<u64>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i8 as core::ops::bit::Shr<u8>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i8 as core::ops::bit::Shr<usize>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&isize as core::ops::arith::Add<&isize>>::add fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&isize as core::ops::arith::Add<isize>>::add fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&isize as core::ops::arith::Div<&isize>>::div fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&isize as core::ops::arith::Div<isize>>::div fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&isize as core::ops::arith::Mul<&isize>>::mul fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&isize as core::ops::arith::Mul<isize>>::mul fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&isize as core::ops::arith::Neg>::neg fn $method(self) -> <$t as $imp>::Output {
$imp::$method(*self)
}
<&isize as core::ops::arith::Rem<&isize>>::rem fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&isize as core::ops::arith::Rem<isize>>::rem fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&isize as core::ops::arith::Sub<&isize>>::sub fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&isize as core::ops::arith::Sub<isize>>::sub fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&isize as core::ops::bit::BitAnd<&isize>>::bitand fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&isize as core::ops::bit::BitAnd<isize>>::bitand fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&isize as core::ops::bit::BitOr<&isize>>::bitor fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&isize as core::ops::bit::BitOr<isize>>::bitor fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&isize as core::ops::bit::BitXor<&isize>>::bitxor fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&isize as core::ops::bit::BitXor<isize>>::bitxor fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&isize as core::ops::bit::Not>::not fn $method(self) -> <$t as $imp>::Output {
$imp::$method(*self)
}
<&isize as core::ops::bit::Shl<&i128>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&isize as core::ops::bit::Shl<&i16>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&isize as core::ops::bit::Shl<&i32>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&isize as core::ops::bit::Shl<&i64>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&isize as core::ops::bit::Shl<&i8>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&isize as core::ops::bit::Shl<&isize>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&isize as core::ops::bit::Shl<&u128>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&isize as core::ops::bit::Shl<&u16>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&isize as core::ops::bit::Shl<&u32>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&isize as core::ops::bit::Shl<&u64>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&isize as core::ops::bit::Shl<&u8>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&isize as core::ops::bit::Shl<&usize>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&isize as core::ops::bit::Shl<i128>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&isize as core::ops::bit::Shl<i16>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&isize as core::ops::bit::Shl<i32>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&isize as core::ops::bit::Shl<i64>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&isize as core::ops::bit::Shl<i8>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&isize as core::ops::bit::Shl<isize>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&isize as core::ops::bit::Shl<u128>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&isize as core::ops::bit::Shl<u16>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&isize as core::ops::bit::Shl<u32>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&isize as core::ops::bit::Shl<u64>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&isize as core::ops::bit::Shl<u8>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&isize as core::ops::bit::Shl<usize>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&isize as core::ops::bit::Shr<&i128>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&isize as core::ops::bit::Shr<&i16>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&isize as core::ops::bit::Shr<&i32>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&isize as core::ops::bit::Shr<&i64>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&isize as core::ops::bit::Shr<&i8>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&isize as core::ops::bit::Shr<&isize>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&isize as core::ops::bit::Shr<&u128>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&isize as core::ops::bit::Shr<&u16>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&isize as core::ops::bit::Shr<&u32>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&isize as core::ops::bit::Shr<&u64>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&isize as core::ops::bit::Shr<&u8>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&isize as core::ops::bit::Shr<&usize>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&isize as core::ops::bit::Shr<i128>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&isize as core::ops::bit::Shr<i16>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&isize as core::ops::bit::Shr<i32>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&isize as core::ops::bit::Shr<i64>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&isize as core::ops::bit::Shr<i8>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&isize as core::ops::bit::Shr<isize>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&isize as core::ops::bit::Shr<u128>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&isize as core::ops::bit::Shr<u16>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&isize as core::ops::bit::Shr<u32>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&isize as core::ops::bit::Shr<u64>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&isize as core::ops::bit::Shr<u8>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&isize as core::ops::bit::Shr<usize>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&mut I as core::iter::traits::exact_size::ExactSizeIterator>::is_empty fn is_empty(&self) -> bool {
(**self).is_empty()
}
<&mut I as core::iter::traits::exact_size::ExactSizeIterator>::len fn len(&self) -> usize {
(**self).len()
}
<&mut I as core::iter::traits::iterator::Iterator>::advance_by fn advance_by(&mut self, n: usize) -> Result<(), NonZero<usize>> {
(**self).advance_by(n)
}
<&mut I as core::iter::traits::iterator::Iterator>::next fn next(&mut self) -> Option<I::Item> {
(**self).next()
}
<&mut I as core::iter::traits::iterator::Iterator>::nth fn nth(&mut self, n: usize) -> Option<Self::Item> {
(**self).nth(n)
}
<&mut I as core::iter::traits::iterator::Iterator>::size_hint fn size_hint(&self) -> (usize, Option<usize>) {
(**self).size_hint()
}
<&mut I as core::iter::traits::iterator::Iterator>::try_fold fn try_fold<B, F, R>(&mut self, init: B, f: F) -> R
where
F: FnMut(B, Self::Item) -> R,
R: Try<Output = B>,
{
self.spec_try_fold(init, f)
}
<&mut I as core::iter::traits::iterator::IteratorRefSpec>::spec_try_fold default fn spec_try_fold<B, F, R>(&mut self, init: B, mut f: F) -> R
where
F: FnMut(B, Self::Item) -> R,
R: Try<Output = B>,
{
let mut accum = init;
while let Some(x) = self.next() {
accum = f(accum, x)?;
}
try { accum }
}
<&mut T as core::borrow::Borrow<T>>::borrow fn borrow(&self) -> &T {
self
}
<&mut T as core::borrow::BorrowMut<T>>::borrow_mut fn borrow_mut(&mut self) -> &mut T {
self
}
<&mut T as core::convert::AsMut<U>>::as_mut fn as_mut(&mut self) -> &mut U {
(*self).as_mut()
}
<&mut T as core::convert::AsRef<U>>::as_ref fn as_ref(&self) -> &U {
<T as AsRef<U>>::as_ref(*self)
}
<&mut T as core::fmt::Binary>::fmt fn fmt(&self, f: &mut Formatter<'_>) -> Result { $tr::fmt(&**self, f) }
<&mut T as core::fmt::Debug>::fmt fn fmt(&self, f: &mut Formatter<'_>) -> Result { $tr::fmt(&**self, f) }
<&mut T as core::fmt::Display>::fmt fn fmt(&self, f: &mut Formatter<'_>) -> Result { $tr::fmt(&**self, f) }
<&mut T as core::fmt::LowerExp>::fmt fn fmt(&self, f: &mut Formatter<'_>) -> Result { $tr::fmt(&**self, f) }
<&mut T as core::fmt::LowerHex>::fmt fn fmt(&self, f: &mut Formatter<'_>) -> Result { $tr::fmt(&**self, f) }
<&mut T as core::fmt::Octal>::fmt fn fmt(&self, f: &mut Formatter<'_>) -> Result { $tr::fmt(&**self, f) }
<&mut T as core::fmt::Pointer>::fmt fn fmt(&self, f: &mut Formatter<'_>) -> Result {
Pointer::fmt(&(&**self as *const T), f)
}
<&mut T as core::fmt::UpperExp>::fmt fn fmt(&self, f: &mut Formatter<'_>) -> Result { $tr::fmt(&**self, f) }
<&mut T as core::fmt::UpperHex>::fmt fn fmt(&self, f: &mut Formatter<'_>) -> Result { $tr::fmt(&**self, f) }
<&mut T as core::ops::deref::Deref>::deref fn deref(&self) -> &T {
self
}
<&mut T as core::ops::deref::DerefMut>::deref_mut fn deref_mut(&mut self) -> &mut T {
self
}
<&mut W as core::fmt::Write::write_fmt::SpecWriteFmt>::spec_write_fmt default fn spec_write_fmt(mut self, args: Arguments<'_>) -> Result {
if let Some(s) = args.as_statically_known_str() {
self.write_str(s)
} else {
write(&mut self, args)
}
}
<&mut W as core::fmt::Write::write_fmt::SpecWriteFmt>::spec_write_fmt fn spec_write_fmt(self, args: Arguments<'_>) -> Result {
if let Some(s) = args.as_statically_known_str() {
self.write_str(s)
} else {
write(self, args)
}
}
<&mut W as core::fmt::Write>::write_char fn write_char(&mut self, c: char) -> Result {
(**self).write_char(c)
}
<&mut W as core::fmt::Write>::write_fmt fn write_fmt(&mut self, args: Arguments<'_>) -> Result {
(**self).write_fmt(args)
}
<&mut W as core::fmt::Write>::write_str fn write_str(&mut self, s: &str) -> Result {
(**self).write_str(s)
}
<&mut core::array::drain::Drain<'_, '_, T, N, F> as core::ops::function::FnMut<(usize,)>>::call_mut extern "rust-call" fn call_mut(
&mut self,
(_ /* ignore argument */,): (usize,),
) -> Self::Output {
if T::IS_ZST {
// its UB to call this more than N times, so returning more ZSTs is valid.
// SAFETY: its a ZST? we conjur.
(self.f)(unsafe { conjure_zst::<T>() })
} else {
// increment before moving; if `f` panics, we drop the rest.
let p = self.ptr;
// SAFETY: caller guarantees never called more than N times (see `Drain::new`)
self.ptr = unsafe { self.ptr.add(1) };
// SAFETY: we are allowed to move this.
(self.f)(unsafe { p.read() })
}
}
<&mut core::array::drain::Drain<'_, '_, T, N, F> as core::ops::function::FnOnce<(usize,)>>::call_once extern "rust-call" fn call_once(mut self, args: (usize,)) -> Self::Output {
self.call_mut(args)
}
<&u128 as core::ops::arith::Add<&u128>>::add fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u128 as core::ops::arith::Add<u128>>::add fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u128 as core::ops::arith::Div<&u128>>::div fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u128 as core::ops::arith::Div<u128>>::div fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u128 as core::ops::arith::Mul<&u128>>::mul fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u128 as core::ops::arith::Mul<u128>>::mul fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u128 as core::ops::arith::Rem<&u128>>::rem fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u128 as core::ops::arith::Rem<u128>>::rem fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u128 as core::ops::arith::Sub<&u128>>::sub fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u128 as core::ops::arith::Sub<u128>>::sub fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u128 as core::ops::bit::BitAnd<&u128>>::bitand fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u128 as core::ops::bit::BitAnd<u128>>::bitand fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u128 as core::ops::bit::BitOr<&u128>>::bitor fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u128 as core::ops::bit::BitOr<u128>>::bitor fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u128 as core::ops::bit::BitXor<&u128>>::bitxor fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u128 as core::ops::bit::BitXor<u128>>::bitxor fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u128 as core::ops::bit::Not>::not fn $method(self) -> <$t as $imp>::Output {
$imp::$method(*self)
}
<&u128 as core::ops::bit::Shl<&i128>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u128 as core::ops::bit::Shl<&i16>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u128 as core::ops::bit::Shl<&i32>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u128 as core::ops::bit::Shl<&i64>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u128 as core::ops::bit::Shl<&i8>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u128 as core::ops::bit::Shl<&isize>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u128 as core::ops::bit::Shl<&u128>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u128 as core::ops::bit::Shl<&u16>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u128 as core::ops::bit::Shl<&u32>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u128 as core::ops::bit::Shl<&u64>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u128 as core::ops::bit::Shl<&u8>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u128 as core::ops::bit::Shl<&usize>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u128 as core::ops::bit::Shl<i128>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u128 as core::ops::bit::Shl<i16>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u128 as core::ops::bit::Shl<i32>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u128 as core::ops::bit::Shl<i64>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u128 as core::ops::bit::Shl<i8>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u128 as core::ops::bit::Shl<isize>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u128 as core::ops::bit::Shl<u128>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u128 as core::ops::bit::Shl<u16>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u128 as core::ops::bit::Shl<u32>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u128 as core::ops::bit::Shl<u64>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u128 as core::ops::bit::Shl<u8>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u128 as core::ops::bit::Shl<usize>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u128 as core::ops::bit::Shr<&i128>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u128 as core::ops::bit::Shr<&i16>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u128 as core::ops::bit::Shr<&i32>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u128 as core::ops::bit::Shr<&i64>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u128 as core::ops::bit::Shr<&i8>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u128 as core::ops::bit::Shr<&isize>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u128 as core::ops::bit::Shr<&u128>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u128 as core::ops::bit::Shr<&u16>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u128 as core::ops::bit::Shr<&u32>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u128 as core::ops::bit::Shr<&u64>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u128 as core::ops::bit::Shr<&u8>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u128 as core::ops::bit::Shr<&usize>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u128 as core::ops::bit::Shr<i128>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u128 as core::ops::bit::Shr<i16>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u128 as core::ops::bit::Shr<i32>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u128 as core::ops::bit::Shr<i64>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u128 as core::ops::bit::Shr<i8>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u128 as core::ops::bit::Shr<isize>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u128 as core::ops::bit::Shr<u128>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u128 as core::ops::bit::Shr<u16>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u128 as core::ops::bit::Shr<u32>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u128 as core::ops::bit::Shr<u64>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u128 as core::ops::bit::Shr<u8>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u128 as core::ops::bit::Shr<usize>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u16 as core::ops::arith::Add<&u16>>::add fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u16 as core::ops::arith::Add<u16>>::add fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u16 as core::ops::arith::Div<&u16>>::div fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u16 as core::ops::arith::Div<u16>>::div fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u16 as core::ops::arith::Mul<&u16>>::mul fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u16 as core::ops::arith::Mul<u16>>::mul fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u16 as core::ops::arith::Rem<&u16>>::rem fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u16 as core::ops::arith::Rem<u16>>::rem fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u16 as core::ops::arith::Sub<&u16>>::sub fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u16 as core::ops::arith::Sub<u16>>::sub fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u16 as core::ops::bit::BitAnd<&u16>>::bitand fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u16 as core::ops::bit::BitAnd<u16>>::bitand fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u16 as core::ops::bit::BitOr<&u16>>::bitor fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u16 as core::ops::bit::BitOr<u16>>::bitor fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u16 as core::ops::bit::BitXor<&u16>>::bitxor fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u16 as core::ops::bit::BitXor<u16>>::bitxor fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u16 as core::ops::bit::Not>::not fn $method(self) -> <$t as $imp>::Output {
$imp::$method(*self)
}
<&u16 as core::ops::bit::Shl<&i128>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u16 as core::ops::bit::Shl<&i16>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u16 as core::ops::bit::Shl<&i32>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u16 as core::ops::bit::Shl<&i64>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u16 as core::ops::bit::Shl<&i8>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u16 as core::ops::bit::Shl<&isize>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u16 as core::ops::bit::Shl<&u128>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u16 as core::ops::bit::Shl<&u16>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u16 as core::ops::bit::Shl<&u32>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u16 as core::ops::bit::Shl<&u64>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u16 as core::ops::bit::Shl<&u8>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u16 as core::ops::bit::Shl<&usize>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u16 as core::ops::bit::Shl<i128>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u16 as core::ops::bit::Shl<i16>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u16 as core::ops::bit::Shl<i32>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u16 as core::ops::bit::Shl<i64>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u16 as core::ops::bit::Shl<i8>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u16 as core::ops::bit::Shl<isize>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u16 as core::ops::bit::Shl<u128>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u16 as core::ops::bit::Shl<u16>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u16 as core::ops::bit::Shl<u32>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u16 as core::ops::bit::Shl<u64>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u16 as core::ops::bit::Shl<u8>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u16 as core::ops::bit::Shl<usize>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u16 as core::ops::bit::Shr<&i128>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u16 as core::ops::bit::Shr<&i16>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u16 as core::ops::bit::Shr<&i32>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u16 as core::ops::bit::Shr<&i64>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u16 as core::ops::bit::Shr<&i8>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u16 as core::ops::bit::Shr<&isize>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u16 as core::ops::bit::Shr<&u128>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u16 as core::ops::bit::Shr<&u16>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u16 as core::ops::bit::Shr<&u32>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u16 as core::ops::bit::Shr<&u64>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u16 as core::ops::bit::Shr<&u8>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u16 as core::ops::bit::Shr<&usize>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u16 as core::ops::bit::Shr<i128>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u16 as core::ops::bit::Shr<i16>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u16 as core::ops::bit::Shr<i32>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u16 as core::ops::bit::Shr<i64>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u16 as core::ops::bit::Shr<i8>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u16 as core::ops::bit::Shr<isize>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u16 as core::ops::bit::Shr<u128>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u16 as core::ops::bit::Shr<u16>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u16 as core::ops::bit::Shr<u32>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u16 as core::ops::bit::Shr<u64>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u16 as core::ops::bit::Shr<u8>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u16 as core::ops::bit::Shr<usize>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u32 as core::ops::arith::Add<&u32>>::add fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u32 as core::ops::arith::Add<u32>>::add fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u32 as core::ops::arith::Div<&u32>>::div fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u32 as core::ops::arith::Div<u32>>::div fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u32 as core::ops::arith::Mul<&u32>>::mul fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u32 as core::ops::arith::Mul<u32>>::mul fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u32 as core::ops::arith::Rem<&u32>>::rem fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u32 as core::ops::arith::Rem<u32>>::rem fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u32 as core::ops::arith::Sub<&u32>>::sub fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u32 as core::ops::arith::Sub<u32>>::sub fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u32 as core::ops::bit::BitAnd<&u32>>::bitand fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u32 as core::ops::bit::BitAnd<u32>>::bitand fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u32 as core::ops::bit::BitOr<&u32>>::bitor fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u32 as core::ops::bit::BitOr<u32>>::bitor fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u32 as core::ops::bit::BitXor<&u32>>::bitxor fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u32 as core::ops::bit::BitXor<u32>>::bitxor fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u32 as core::ops::bit::Not>::not fn $method(self) -> <$t as $imp>::Output {
$imp::$method(*self)
}
<&u32 as core::ops::bit::Shl<&i128>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u32 as core::ops::bit::Shl<&i16>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u32 as core::ops::bit::Shl<&i32>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u32 as core::ops::bit::Shl<&i64>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u32 as core::ops::bit::Shl<&i8>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u32 as core::ops::bit::Shl<&isize>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u32 as core::ops::bit::Shl<&u128>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u32 as core::ops::bit::Shl<&u16>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u32 as core::ops::bit::Shl<&u32>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u32 as core::ops::bit::Shl<&u64>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u32 as core::ops::bit::Shl<&u8>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u32 as core::ops::bit::Shl<&usize>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u32 as core::ops::bit::Shl<i128>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u32 as core::ops::bit::Shl<i16>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u32 as core::ops::bit::Shl<i32>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u32 as core::ops::bit::Shl<i64>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u32 as core::ops::bit::Shl<i8>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u32 as core::ops::bit::Shl<isize>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u32 as core::ops::bit::Shl<u128>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u32 as core::ops::bit::Shl<u16>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u32 as core::ops::bit::Shl<u32>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u32 as core::ops::bit::Shl<u64>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u32 as core::ops::bit::Shl<u8>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u32 as core::ops::bit::Shl<usize>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u32 as core::ops::bit::Shr<&i128>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u32 as core::ops::bit::Shr<&i16>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u32 as core::ops::bit::Shr<&i32>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u32 as core::ops::bit::Shr<&i64>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u32 as core::ops::bit::Shr<&i8>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u32 as core::ops::bit::Shr<&isize>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u32 as core::ops::bit::Shr<&u128>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u32 as core::ops::bit::Shr<&u16>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u32 as core::ops::bit::Shr<&u32>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u32 as core::ops::bit::Shr<&u64>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u32 as core::ops::bit::Shr<&u8>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u32 as core::ops::bit::Shr<&usize>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u32 as core::ops::bit::Shr<i128>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u32 as core::ops::bit::Shr<i16>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u32 as core::ops::bit::Shr<i32>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u32 as core::ops::bit::Shr<i64>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u32 as core::ops::bit::Shr<i8>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u32 as core::ops::bit::Shr<isize>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u32 as core::ops::bit::Shr<u128>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u32 as core::ops::bit::Shr<u16>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u32 as core::ops::bit::Shr<u32>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u32 as core::ops::bit::Shr<u64>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u32 as core::ops::bit::Shr<u8>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u32 as core::ops::bit::Shr<usize>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u64 as core::ops::arith::Add<&u64>>::add fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u64 as core::ops::arith::Add<u64>>::add fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u64 as core::ops::arith::Div<&u64>>::div fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u64 as core::ops::arith::Div<u64>>::div fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u64 as core::ops::arith::Mul<&u64>>::mul fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u64 as core::ops::arith::Mul<u64>>::mul fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u64 as core::ops::arith::Rem<&u64>>::rem fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u64 as core::ops::arith::Rem<u64>>::rem fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u64 as core::ops::arith::Sub<&u64>>::sub fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u64 as core::ops::arith::Sub<u64>>::sub fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u64 as core::ops::bit::BitAnd<&u64>>::bitand fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u64 as core::ops::bit::BitAnd<u64>>::bitand fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u64 as core::ops::bit::BitOr<&u64>>::bitor fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u64 as core::ops::bit::BitOr<u64>>::bitor fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u64 as core::ops::bit::BitXor<&u64>>::bitxor fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u64 as core::ops::bit::BitXor<u64>>::bitxor fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u64 as core::ops::bit::Not>::not fn $method(self) -> <$t as $imp>::Output {
$imp::$method(*self)
}
<&u64 as core::ops::bit::Shl<&i128>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u64 as core::ops::bit::Shl<&i16>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u64 as core::ops::bit::Shl<&i32>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u64 as core::ops::bit::Shl<&i64>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u64 as core::ops::bit::Shl<&i8>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u64 as core::ops::bit::Shl<&isize>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u64 as core::ops::bit::Shl<&u128>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u64 as core::ops::bit::Shl<&u16>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u64 as core::ops::bit::Shl<&u32>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u64 as core::ops::bit::Shl<&u64>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u64 as core::ops::bit::Shl<&u8>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u64 as core::ops::bit::Shl<&usize>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u64 as core::ops::bit::Shl<i128>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u64 as core::ops::bit::Shl<i16>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u64 as core::ops::bit::Shl<i32>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u64 as core::ops::bit::Shl<i64>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u64 as core::ops::bit::Shl<i8>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u64 as core::ops::bit::Shl<isize>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u64 as core::ops::bit::Shl<u128>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u64 as core::ops::bit::Shl<u16>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u64 as core::ops::bit::Shl<u32>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u64 as core::ops::bit::Shl<u64>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u64 as core::ops::bit::Shl<u8>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u64 as core::ops::bit::Shl<usize>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u64 as core::ops::bit::Shr<&i128>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u64 as core::ops::bit::Shr<&i16>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u64 as core::ops::bit::Shr<&i32>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u64 as core::ops::bit::Shr<&i64>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u64 as core::ops::bit::Shr<&i8>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u64 as core::ops::bit::Shr<&isize>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u64 as core::ops::bit::Shr<&u128>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u64 as core::ops::bit::Shr<&u16>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u64 as core::ops::bit::Shr<&u32>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u64 as core::ops::bit::Shr<&u64>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u64 as core::ops::bit::Shr<&u8>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u64 as core::ops::bit::Shr<&usize>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u64 as core::ops::bit::Shr<i128>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u64 as core::ops::bit::Shr<i16>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u64 as core::ops::bit::Shr<i32>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u64 as core::ops::bit::Shr<i64>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u64 as core::ops::bit::Shr<i8>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u64 as core::ops::bit::Shr<isize>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u64 as core::ops::bit::Shr<u128>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u64 as core::ops::bit::Shr<u16>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u64 as core::ops::bit::Shr<u32>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u64 as core::ops::bit::Shr<u64>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u64 as core::ops::bit::Shr<u8>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u64 as core::ops::bit::Shr<usize>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u8 as core::ops::arith::Add<&u8>>::add fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u8 as core::ops::arith::Add<u8>>::add fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u8 as core::ops::arith::Div<&u8>>::div fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u8 as core::ops::arith::Div<u8>>::div fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u8 as core::ops::arith::Mul<&u8>>::mul fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u8 as core::ops::arith::Mul<u8>>::mul fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u8 as core::ops::arith::Rem<&u8>>::rem fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u8 as core::ops::arith::Rem<u8>>::rem fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u8 as core::ops::arith::Sub<&u8>>::sub fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u8 as core::ops::arith::Sub<u8>>::sub fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u8 as core::ops::bit::BitAnd<&u8>>::bitand fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u8 as core::ops::bit::BitAnd<u8>>::bitand fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u8 as core::ops::bit::BitOr<&u8>>::bitor fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u8 as core::ops::bit::BitOr<u8>>::bitor fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u8 as core::ops::bit::BitXor<&u8>>::bitxor fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u8 as core::ops::bit::BitXor<u8>>::bitxor fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u8 as core::ops::bit::Not>::not fn $method(self) -> <$t as $imp>::Output {
$imp::$method(*self)
}
<&u8 as core::ops::bit::Shl<&i128>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u8 as core::ops::bit::Shl<&i16>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u8 as core::ops::bit::Shl<&i32>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u8 as core::ops::bit::Shl<&i64>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u8 as core::ops::bit::Shl<&i8>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u8 as core::ops::bit::Shl<&isize>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u8 as core::ops::bit::Shl<&u128>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u8 as core::ops::bit::Shl<&u16>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u8 as core::ops::bit::Shl<&u32>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u8 as core::ops::bit::Shl<&u64>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u8 as core::ops::bit::Shl<&u8>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u8 as core::ops::bit::Shl<&usize>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u8 as core::ops::bit::Shl<i128>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u8 as core::ops::bit::Shl<i16>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u8 as core::ops::bit::Shl<i32>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u8 as core::ops::bit::Shl<i64>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u8 as core::ops::bit::Shl<i8>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u8 as core::ops::bit::Shl<isize>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u8 as core::ops::bit::Shl<u128>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u8 as core::ops::bit::Shl<u16>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u8 as core::ops::bit::Shl<u32>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u8 as core::ops::bit::Shl<u64>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u8 as core::ops::bit::Shl<u8>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u8 as core::ops::bit::Shl<usize>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u8 as core::ops::bit::Shr<&i128>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u8 as core::ops::bit::Shr<&i16>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u8 as core::ops::bit::Shr<&i32>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u8 as core::ops::bit::Shr<&i64>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u8 as core::ops::bit::Shr<&i8>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u8 as core::ops::bit::Shr<&isize>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u8 as core::ops::bit::Shr<&u128>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u8 as core::ops::bit::Shr<&u16>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u8 as core::ops::bit::Shr<&u32>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u8 as core::ops::bit::Shr<&u64>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u8 as core::ops::bit::Shr<&u8>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u8 as core::ops::bit::Shr<&usize>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u8 as core::ops::bit::Shr<i128>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u8 as core::ops::bit::Shr<i16>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u8 as core::ops::bit::Shr<i32>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u8 as core::ops::bit::Shr<i64>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u8 as core::ops::bit::Shr<i8>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u8 as core::ops::bit::Shr<isize>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u8 as core::ops::bit::Shr<u128>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u8 as core::ops::bit::Shr<u16>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u8 as core::ops::bit::Shr<u32>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u8 as core::ops::bit::Shr<u64>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u8 as core::ops::bit::Shr<u8>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u8 as core::ops::bit::Shr<usize>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&usize as core::ops::arith::Add<&usize>>::add fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&usize as core::ops::arith::Add<usize>>::add fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&usize as core::ops::arith::Div<&usize>>::div fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&usize as core::ops::arith::Div<usize>>::div fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&usize as core::ops::arith::Mul<&usize>>::mul fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&usize as core::ops::arith::Mul<usize>>::mul fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&usize as core::ops::arith::Rem<&usize>>::rem fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&usize as core::ops::arith::Rem<usize>>::rem fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&usize as core::ops::arith::Sub<&usize>>::sub fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&usize as core::ops::arith::Sub<usize>>::sub fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&usize as core::ops::bit::BitAnd<&usize>>::bitand fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&usize as core::ops::bit::BitAnd<usize>>::bitand fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&usize as core::ops::bit::BitOr<&usize>>::bitor fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&usize as core::ops::bit::BitOr<usize>>::bitor fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&usize as core::ops::bit::BitXor<&usize>>::bitxor fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&usize as core::ops::bit::BitXor<usize>>::bitxor fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&usize as core::ops::bit::Not>::not fn $method(self) -> <$t as $imp>::Output {
$imp::$method(*self)
}
<&usize as core::ops::bit::Shl<&i128>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&usize as core::ops::bit::Shl<&i16>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&usize as core::ops::bit::Shl<&i32>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&usize as core::ops::bit::Shl<&i64>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&usize as core::ops::bit::Shl<&i8>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&usize as core::ops::bit::Shl<&isize>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&usize as core::ops::bit::Shl<&u128>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&usize as core::ops::bit::Shl<&u16>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&usize as core::ops::bit::Shl<&u32>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&usize as core::ops::bit::Shl<&u64>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&usize as core::ops::bit::Shl<&u8>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&usize as core::ops::bit::Shl<&usize>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&usize as core::ops::bit::Shl<i128>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&usize as core::ops::bit::Shl<i16>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&usize as core::ops::bit::Shl<i32>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&usize as core::ops::bit::Shl<i64>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&usize as core::ops::bit::Shl<i8>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&usize as core::ops::bit::Shl<isize>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&usize as core::ops::bit::Shl<u128>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&usize as core::ops::bit::Shl<u16>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&usize as core::ops::bit::Shl<u32>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&usize as core::ops::bit::Shl<u64>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&usize as core::ops::bit::Shl<u8>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&usize as core::ops::bit::Shl<usize>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&usize as core::ops::bit::Shr<&i128>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&usize as core::ops::bit::Shr<&i16>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&usize as core::ops::bit::Shr<&i32>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&usize as core::ops::bit::Shr<&i64>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&usize as core::ops::bit::Shr<&i8>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&usize as core::ops::bit::Shr<&isize>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&usize as core::ops::bit::Shr<&u128>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&usize as core::ops::bit::Shr<&u16>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&usize as core::ops::bit::Shr<&u32>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&usize as core::ops::bit::Shr<&u64>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&usize as core::ops::bit::Shr<&u8>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&usize as core::ops::bit::Shr<&usize>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&usize as core::ops::bit::Shr<i128>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&usize as core::ops::bit::Shr<i16>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&usize as core::ops::bit::Shr<i32>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&usize as core::ops::bit::Shr<i64>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&usize as core::ops::bit::Shr<i8>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&usize as core::ops::bit::Shr<isize>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&usize as core::ops::bit::Shr<u128>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&usize as core::ops::bit::Shr<u16>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&usize as core::ops::bit::Shr<u32>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&usize as core::ops::bit::Shr<u64>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&usize as core::ops::bit::Shr<u8>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&usize as core::ops::bit::Shr<usize>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<() as core::default::Default>::default fn default() -> $t {
$v
}
<() as core::fmt::Debug>::fmt fn fmt(&self, f: &mut Formatter<'_>) -> Result {
f.pad("()")
}
<() as core::unit::IsUnit>::is_unit fn is_unit() -> bool {
true
}
<(A, Z, Y, X, W, V, U, T) as core::fmt::Debug>::fmt fn fmt(&self, f: &mut Formatter<'_>) -> Result {
let mut builder = f.debug_tuple("");
let ($(ref $name,)+) = *self;
$(
builder.field(&$name);
)+
builder.finish()
}
<(B, A, Z, Y, X, W, V, U, T) as core::fmt::Debug>::fmt fn fmt(&self, f: &mut Formatter<'_>) -> Result {
let mut builder = f.debug_tuple("");
let ($(ref $name,)+) = *self;
$(
builder.field(&$name);
)+
builder.finish()
}
<(C, B, A, Z, Y, X, W, V, U, T) as core::fmt::Debug>::fmt fn fmt(&self, f: &mut Formatter<'_>) -> Result {
let mut builder = f.debug_tuple("");
let ($(ref $name,)+) = *self;
$(
builder.field(&$name);
)+
builder.finish()
}
<(D, C, B, A, Z, Y, X, W, V, U, T) as core::fmt::Debug>::fmt fn fmt(&self, f: &mut Formatter<'_>) -> Result {
let mut builder = f.debug_tuple("");
let ($(ref $name,)+) = *self;
$(
builder.field(&$name);
)+
builder.finish()
}
<(E, D, C, B, A, Z, Y, X, W, V, U, T) as core::fmt::Debug>::fmt fn fmt(&self, f: &mut Formatter<'_>) -> Result {
let mut builder = f.debug_tuple("");
let ($(ref $name,)+) = *self;
$(
builder.field(&$name);
)+
builder.finish()
}
<(T,) as core::fmt::Debug>::fmt fn fmt(&self, f: &mut Formatter<'_>) -> Result {
let mut builder = f.debug_tuple("");
let ($(ref $name,)+) = *self;
$(
builder.field(&$name);
)+
builder.finish()
}
<(U, T) as core::fmt::Debug>::fmt fn fmt(&self, f: &mut Formatter<'_>) -> Result {
let mut builder = f.debug_tuple("");
let ($(ref $name,)+) = *self;
$(
builder.field(&$name);
)+
builder.finish()
}
<(V, U, T) as core::fmt::Debug>::fmt fn fmt(&self, f: &mut Formatter<'_>) -> Result {
let mut builder = f.debug_tuple("");
let ($(ref $name,)+) = *self;
$(
builder.field(&$name);
)+
builder.finish()
}
<(W, V, U, T) as core::fmt::Debug>::fmt fn fmt(&self, f: &mut Formatter<'_>) -> Result {
let mut builder = f.debug_tuple("");
let ($(ref $name,)+) = *self;
$(
builder.field(&$name);
)+
builder.finish()
}
<(X, W, V, U, T) as core::fmt::Debug>::fmt fn fmt(&self, f: &mut Formatter<'_>) -> Result {
let mut builder = f.debug_tuple("");
let ($(ref $name,)+) = *self;
$(
builder.field(&$name);
)+
builder.finish()
}
<(Y, X, W, V, U, T) as core::fmt::Debug>::fmt fn fmt(&self, f: &mut Formatter<'_>) -> Result {
let mut builder = f.debug_tuple("");
let ($(ref $name,)+) = *self;
$(
builder.field(&$name);
)+
builder.finish()
}
<(Z, Y, X, W, V, U, T) as core::fmt::Debug>::fmt fn fmt(&self, f: &mut Formatter<'_>) -> Result {
let mut builder = f.debug_tuple("");
let ($(ref $name,)+) = *self;
$(
builder.field(&$name);
)+
builder.finish()
}
<(core::ops::range::Bound<T>, core::ops::range::Bound<T>) as core::ops::range::IntoBounds<T>>::into_bounds fn into_bounds(self) -> (Bound<T>, Bound<T>) {
self
}
<(core::ops::range::Bound<T>, core::ops::range::Bound<T>) as core::ops::range::RangeBounds<T>>::end_bound fn end_bound(&self) -> Bound<&T> {
match *self {
(_, Included(ref end)) => Included(end),
(_, Excluded(ref end)) => Excluded(end),
(_, Unbounded) => Unbounded,
}
}
<(core::ops::range::Bound<T>, core::ops::range::Bound<T>) as core::ops::range::RangeBounds<T>>::start_bound fn start_bound(&self) -> Bound<&T> {
match *self {
(Included(ref start), _) => Included(start),
(Excluded(ref start), _) => Excluded(start),
(Unbounded, _) => Unbounded,
}
}
<(core::ops::range::Bound<usize>, core::ops::range::Bound<usize>) as core::slice::index::SliceIndex<[T]>>::get fn get(self, slice: &[T]) -> Option<&Self::Output> {
try_into_slice_range(slice.len(), self)?.get(slice)
}
<(core::ops::range::Bound<usize>, core::ops::range::Bound<usize>) as core::slice::index::SliceIndex<[T]>>::get_mut fn get_mut(self, slice: &mut [T]) -> Option<&mut Self::Output> {
try_into_slice_range(slice.len(), self)?.get_mut(slice)
}
<(core::ops::range::Bound<usize>, core::ops::range::Bound<usize>) as core::slice::index::SliceIndex<[T]>>::get_unchecked unsafe fn get_unchecked(self, slice: *const [T]) -> *const Self::Output {
// SAFETY: the caller has to uphold the safety contract for `get_unchecked`.
unsafe { into_range_unchecked(slice.len(), self).get_unchecked(slice) }
}
<(core::ops::range::Bound<usize>, core::ops::range::Bound<usize>) as core::slice::index::SliceIndex<[T]>>::get_unchecked_mut unsafe fn get_unchecked_mut(self, slice: *mut [T]) -> *mut Self::Output {
// SAFETY: the caller has to uphold the safety contract for `get_unchecked_mut`.
unsafe { into_range_unchecked(slice.len(), self).get_unchecked_mut(slice) }
}
<(core::ops::range::Bound<usize>, core::ops::range::Bound<usize>) as core::slice::index::SliceIndex<[T]>>::index fn index(self, slice: &[T]) -> &Self::Output {
into_slice_range(slice.len(), self).index(slice)
}
<(core::ops::range::Bound<usize>, core::ops::range::Bound<usize>) as core::slice::index::SliceIndex<[T]>>::index_mut fn index_mut(self, slice: &mut [T]) -> &mut Self::Output {
into_slice_range(slice.len(), self).index_mut(slice)
}
<(dyn core::any::Any + 'static) as core::fmt::Debug>::fmt fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("Any").finish_non_exhaustive()
}
<(dyn core::any::Any + core::marker::Send + 'static) as core::fmt::Debug>::fmt fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("Any").finish_non_exhaustive()
}
<(dyn core::any::Any + core::marker::Send + core::marker::Sync + 'static) as core::fmt::Debug>::fmt fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("Any").finish_non_exhaustive()
}
<*const T as core::fmt::Debug>::fmt fn fmt(&self, f: &mut Formatter<'_>) -> Result {
Pointer::fmt(self, f)
}
<*const T as core::fmt::Pointer>::fmt fn fmt(&self, f: &mut Formatter<'_>) -> Result {
if <<T as core::ptr::Pointee>::Metadata as core::unit::IsUnit>::is_unit() {
pointer_fmt_inner(self.expose_provenance(), f)
} else {
f.debug_struct("Pointer")
.field_with("addr", |f| pointer_fmt_inner(self.expose_provenance(), f))
.field("metadata", &core::ptr::metadata(*self))
.finish()
}
}
<*mut T as core::fmt::Debug>::fmt fn fmt(&self, f: &mut Formatter<'_>) -> Result {
Pointer::fmt(self, f)
}
<*mut T as core::fmt::Pointer>::fmt fn fmt(&self, f: &mut Formatter<'_>) -> Result {
Pointer::fmt(&(*self as *const T), f)
}
<A as core::iter::traits::iterator::SpecIterEq<B>>::spec_iter_eq default fn spec_iter_eq<F>(self, b: B, f: F) -> bool
where
F: FnMut(Self::Item, <B as Iterator>::Item) -> ControlFlow<()>,
{
iter_eq(self, b, f)
}
<A as core::iter::traits::iterator::SpecIterEq<B>>::spec_iter_eq fn spec_iter_eq<F>(self, b: B, f: F) -> bool
where
F: FnMut(Self::Item, <B as Iterator>::Item) -> ControlFlow<()>,
{
// we *can't* short-circuit if:
match (self.size_hint(), b.size_hint()) {
// ... both iterators have the same length
((_, Some(a)), (_, Some(b))) if a == b => {}
// ... or both of them are longer than `usize::MAX` (i.e. have an unknown length).
((_, None), (_, None)) => {}
// otherwise, we can ascertain that they are unequal without actually comparing items
_ => return false,
}
iter_eq(self, b, f)
}
<A as core::slice::cmp::SlicePartialEq<B>>::equal_same_length default unsafe fn equal_same_length(lhs: *const Self, rhs: *const B, len: usize) -> bool {
// Implemented as explicit indexing rather
// than zipped iterators for performance reasons.
// See PR https://github.com/rust-lang/rust/pull/116846
// FIXME(const_hack): make this a `for idx in 0..len` loop.
let mut idx = 0;
while idx < len {
// SAFETY: idx < len, so both are in-bounds and readable
if unsafe { *lhs.add(idx) != *rhs.add(idx) } {
return false;
}
idx += 1;
}
true
}
<A as core::slice::cmp::SlicePartialEq<B>>::equal_same_length unsafe fn equal_same_length(lhs: *const Self, rhs: *const B, len: usize) -> bool {
// SAFETY: by our precondition, `lhs` and `rhs` are guaranteed to be valid
// for reading `len` values, which also means the size is guaranteed
// not to overflow because it exists in memory;
unsafe {
let size = crate::intrinsics::unchecked_mul(len, Self::SIZE);
compare_bytes(lhs as _, rhs as _, size) == 0
}
}
<I as core::iter::adapters::filter::SpecAssumeCount>::assume_count_le_upper_bound default unsafe fn assume_count_le_upper_bound(count: usize, upper: usize) {
// In the default we can't trust the `upper` for soundness
// because it came from an untrusted `size_hint`.
// In debug mode we might as well check that the size_hint wasn't too small
let _ = upper - count;
}
<I as core::iter::adapters::filter::SpecAssumeCount>::assume_count_le_upper_bound unsafe fn assume_count_le_upper_bound(count: usize, upper: usize) {
// SAFETY: The `upper` is trusted because it came from a `TrustedLen` iterator.
unsafe { crate::hint::assert_unchecked(count <= upper) }
}
<I as core::iter::traits::collect::IntoIterator>::into_iter fn into_iter(self) -> I {
self
}
<I as core::iter::traits::iterator::Iterator::advance_by::SpecAdvanceBy>::spec_advance_by default fn spec_advance_by(&mut self, n: usize) -> Result<(), NonZero<usize>> {
for i in 0..n {
if self.next().is_none() {
// SAFETY: `i` is always less than `n`.
return Err(unsafe { NonZero::new_unchecked(n - i) });
}
}
Ok(())
}
<I as core::iter::traits::iterator::Iterator::advance_by::SpecAdvanceBy>::spec_advance_by fn spec_advance_by(&mut self, n: usize) -> Result<(), NonZero<usize>> {
let Some(n) = NonZero::new(n) else {
return Ok(());
};
let res = self.try_fold(n, |n, _| NonZero::new(n.get() - 1));
match res {
None => Ok(()),
Some(n) => Err(n),
}
}
<T as core::array::SpecArrayClone>::clone default fn clone<const N: usize>(array: &[T; N]) -> [T; N] {
from_trusted_iterator(array.iter().cloned())
}
<T as core::array::SpecArrayClone>::clone fn clone<const N: usize>(array: &[T; N]) -> [T; N] {
// SAFETY: `TrivialClone` implies that this is equivalent to calling
// `Clone` on every element.
unsafe { ptr::read(array) }
}
<T as core::array::equality::SpecArrayEq<Other, N>>::spec_eq default fn spec_eq(a: &[Self; N], b: &[Other; N]) -> bool {
a[..] == b[..]
}
<T as core::array::equality::SpecArrayEq<Other, N>>::spec_ne default fn spec_ne(a: &[Self; N], b: &[Other; N]) -> bool {
a[..] != b[..]
}
<T as core::borrow::Borrow<T>>::borrow fn borrow(&self) -> &T {
self
}
<T as core::borrow::BorrowMut<T>>::borrow_mut fn borrow_mut(&mut self) -> &mut T {
self
}
<T as core::convert::From<T>>::from fn from(t: T) -> T {
t
}
<T as core::convert::Into<U>>::into fn into(self) -> U {
U::from(self)
}
<T as core::convert::TryFrom<U>>::try_from fn try_from(value: U) -> Result<Self, Self::Error> {
Ok(U::into(value))
}
<T as core::convert::TryInto<U>>::try_into fn try_into(self) -> Result<U, U::Error> {
U::try_from(self)
}
<T as core::iter::adapters::step_by::SpecRangeSetup<T>>::setup default fn setup(inner: T, _step: usize) -> T {
inner
}
<T as core::unit::IsUnit>::is_unit default fn is_unit() -> bool {
false
}
<[T] as core::convert::AsMut<[T]>>::as_mut fn as_mut(&mut self) -> &mut [T] {
self
}
<[T] as core::convert::AsRef<[T]>>::as_ref fn as_ref(&self) -> &[T] {
self
}
<[T] as core::fmt::Debug>::fmt fn fmt(&self, f: &mut Formatter<'_>) -> Result {
f.debug_list().entries(self.iter()).finish()
}
<[T] as core::slice::CloneFromSpec<T>>::spec_clone_from default fn spec_clone_from(&mut self, src: &[T]) {
assert!(self.len() == src.len(), "destination and source slices have different lengths");
// NOTE: We need to explicitly slice them to the same length
// to make it easier for the optimizer to elide bounds checking.
// But since it can't be relied on we also have an explicit specialization for T: Copy.
let len = self.len();
let src = &src[..len];
// FIXME(const_hack): make this a `for idx in 0..self.len()` loop.
let mut idx = 0;
while idx < self.len() {
self[idx].clone_from(&src[idx]);
idx += 1;
}
}
<[T] as core::slice::specialize::SpecFill<T>>::spec_fill default fn spec_fill(&mut self, value: T) {
if let Some((last, elems)) = self.split_last_mut() {
for el in elems {
el.clone_from(&value);
}
*last = value
}
}
<[T] as core::slice::specialize::SpecFill<T>>::spec_fill default fn spec_fill(&mut self, value: T) {
for item in self.iter_mut() {
// SAFETY: `TrivialClone` indicates that this is equivalent to
// calling `Clone::clone`
*item = unsafe { ptr::read(&value) };
}
}
<[core::ascii::ascii_char::AsciiChar]>::as_bytes pub const fn as_bytes(&self) -> &[u8] {
self.as_str().as_bytes()
}
<[core::ascii::ascii_char::AsciiChar]>::as_str pub const fn as_str(&self) -> &str {
let ascii_ptr: *const Self = self;
let str_ptr = ascii_ptr as *const str;
// SAFETY: Each ASCII codepoint in UTF-8 is encoded as one single-byte
// code unit having the same value as the ASCII byte.
unsafe { &*str_ptr }
}
<[core::mem::maybe_uninit::MaybeUninit<T>; N] as core::array::iter::iter_inner::PartialDrop>::partial_drop unsafe fn partial_drop(&mut self, alive: IndexRange) {
let slice: &mut [MaybeUninit<T>] = self;
// SAFETY: Initialized elements in the array are also initialized in the slice.
unsafe { slice.partial_drop(alive) }
}
<[core::mem::maybe_uninit::MaybeUninit<T>; N]>::transpose pub const fn transpose(self) -> MaybeUninit<[T; N]> {
// SAFETY: T and MaybeUninit<T> have the same layout
unsafe { intrinsics::transmute_unchecked(self) }
}
<[core::mem::maybe_uninit::MaybeUninit<T>] as core::array::iter::iter_inner::PartialDrop>::partial_drop unsafe fn partial_drop(&mut self, alive: IndexRange) {
// SAFETY: We know that all elements within `alive` are properly initialized.
unsafe { self.get_unchecked_mut(alive).assume_init_drop() }
}
<[core::mem::maybe_uninit::MaybeUninit<T>]>::assume_init_drop pub const unsafe fn assume_init_drop(&mut self)
where
T: [const] Destruct,
{
if !self.is_empty() {
// SAFETY: the caller must guarantee that every element of `self`
// is initialized and satisfies all invariants of `T`.
// Dropping the value in place is safe if that is the case.
unsafe { ptr::drop_in_place(self as *mut [MaybeUninit<T>] as *mut [T]) }
}
}
<[core::mem::maybe_uninit::MaybeUninit<T>]>::assume_init_mut pub const unsafe fn assume_init_mut(&mut self) -> &mut [T] {
// SAFETY: similar to safety notes for `slice_get_ref`, but we have a
// mutable reference which is also guaranteed to be valid for writes.
unsafe { &mut *(self as *mut Self as *mut [T]) }
}
<[core::mem::maybe_uninit::MaybeUninit<T>]>::assume_init_ref pub const unsafe fn assume_init_ref(&self) -> &[T] {
// SAFETY: casting `slice` to a `*const [T]` is safe since the caller guarantees that
// `slice` is initialized, and `MaybeUninit` is guaranteed to have the same layout as `T`.
// The pointer obtained is valid since it refers to memory owned by `slice` which is a
// reference and thus guaranteed to be valid for reads.
unsafe { &*(self as *const Self as *const [T]) }
}
<bool as core::default::Default>::default fn default() -> $t {
$v
}
<bool as core::fmt::Debug>::fmt fn fmt(&self, f: &mut Formatter<'_>) -> Result {
Display::fmt(self, f)
}
<bool as core::fmt::Display>::fmt fn fmt(&self, f: &mut Formatter<'_>) -> Result {
Display::fmt(if *self { "true" } else { "false" }, f)
}
<bool as core::intrinsics::fallback::DisjointBitOr>::disjoint_bitor unsafe fn disjoint_bitor(self, other: Self) -> Self {
// Note that the assume here is required for UB detection in Miri!
// SAFETY: our precondition is that there are no bits in common,
// so this is just telling that to the backend.
unsafe { super::assume((self & other) == zero!($t)) };
self | other
}
<bool as core::ops::bit::BitAnd<&bool>>::bitand fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<bool as core::ops::bit::BitAnd>::bitand fn bitand(self, rhs: $t) -> $t { self & rhs }
<bool as core::ops::bit::BitAndAssign<&bool>>::bitand_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<bool as core::ops::bit::BitAndAssign>::bitand_assign fn bitand_assign(&mut self, other: $t) { *self &= other }
<bool as core::ops::bit::BitOr<&bool>>::bitor fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<bool as core::ops::bit::BitOr>::bitor fn bitor(self, rhs: $t) -> $t { self | rhs }
<bool as core::ops::bit::BitOrAssign<&bool>>::bitor_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<bool as core::ops::bit::BitOrAssign>::bitor_assign fn bitor_assign(&mut self, other: $t) { *self |= other }
<bool as core::ops::bit::BitXor<&bool>>::bitxor fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<bool as core::ops::bit::BitXor>::bitxor fn bitxor(self, other: $t) -> $t { self ^ other }
<bool as core::ops::bit::BitXorAssign<&bool>>::bitxor_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<bool as core::ops::bit::BitXorAssign>::bitxor_assign fn bitxor_assign(&mut self, other: $t) { *self ^= other }
<bool as core::ops::bit::Not>::not fn not(self) -> $t { !self }
<char as core::default::Default>::default fn default() -> $t {
$v
}
<char as core::fmt::Debug>::fmt fn fmt(&self, f: &mut Formatter<'_>) -> Result {
f.write_char('\'')?;
let esc = self.escape_debug_ext(EscapeDebugExtArgs {
escape_grapheme_extended: true,
escape_single_quote: true,
escape_double_quote: false,
});
Display::fmt(&esc, f)?;
f.write_char('\'')
}
<char as core::fmt::Display>::fmt fn fmt(&self, f: &mut Formatter<'_>) -> Result {
if f.options.flags & (flags::WIDTH_FLAG | flags::PRECISION_FLAG) == 0 {
f.write_char(*self)
} else {
f.pad(self.encode_utf8(&mut [0; char::MAX_LEN_UTF8]))
}
}
<char as core::str::pattern::Pattern>::into_searcher fn into_searcher<'a>(self, haystack: &'a str) -> Self::Searcher<'a> {
let mut utf8_encoded = [0; char::MAX_LEN_UTF8];
let utf8_size = self
.encode_utf8(&mut utf8_encoded)
.len()
.try_into()
.expect("char len should be less than 255");
CharSearcher {
haystack,
finger: 0,
finger_back: haystack.len(),
needle: self,
utf8_size,
utf8_encoded,
}
}
<char as core::str::pattern::Pattern>::is_prefix_of fn is_prefix_of(self, haystack: &str) -> bool {
self.encode_utf8(&mut [0u8; 4]).is_prefix_of(haystack)
}
<char as core::str::pattern::Pattern>::is_suffix_of fn is_suffix_of<'a>(self, haystack: &'a str) -> bool
where
Self::Searcher<'a>: ReverseSearcher<'a>,
{
self.encode_utf8(&mut [0u8; 4]).is_suffix_of(haystack)
}
<core::any::TypeId as core::cmp::PartialEq>::eq fn eq(&self, other: &Self) -> bool {
#[cfg(miri)]
return crate::intrinsics::type_id_eq(*self, *other);
#[cfg(not(miri))]
{
let this = self;
crate::intrinsics::const_eval_select!(
@capture { this: &TypeId, other: &TypeId } -> bool:
if const {
crate::intrinsics::type_id_eq(*this, *other)
} else {
// Ideally we would just invoke `type_id_eq` unconditionally here,
// but since we do not MIR inline intrinsics, because backends
// may want to override them (and miri does!), MIR opts do not
// clean up this call sufficiently for LLVM to turn repeated calls
// of `TypeId` comparisons against one specific `TypeId` into
// a lookup table.
// SAFETY: We know that at runtime none of the bits have provenance and all bits
// are initialized. So we can just convert the whole thing to a `u128` and compare that.
unsafe {
crate::mem::transmute::<_, u128>(*this) == crate::mem::transmute::<_, u128>(*other)
}
}
)
}
}
<core::any::TypeId as core::cmp::PartialEq>::eq::runtime fn runtime$(<$($binders)*>)?($($arg: $ty),*) $( -> $ret )? {
$runtime
}
<core::any::TypeId as core::fmt::Debug>::fmt fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
write!(f, "TypeId({:#034x})", self.as_u128())
}
<core::array::Guard<'_, T> as core::ops::drop::Drop>::drop fn drop(&mut self) {
debug_assert!(self.initialized <= self.array_mut.len());
// SAFETY: this slice will contain only initialized objects.
unsafe {
self.array_mut.get_unchecked_mut(..self.initialized).assume_init_drop();
}
}
<core::array::TryFromSliceError as core::fmt::Display>::fmt fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
"could not convert slice to array".fmt(f)
}
<core::array::drain::Drain<'_, '_, T, N, F> as core::ops::drop::Drop>::drop fn drop(&mut self) {
if !T::IS_ZST {
// SAFETY: we cant read more than N elements
let slice = unsafe {
from_raw_parts_mut::<[T]>(
self.ptr.as_ptr(),
// SAFETY: `start <= end`
self.end.offset_from_unsigned(self.ptr.as_ptr()),
)
};
// SAFETY: By the type invariant, we're allowed to drop all these. (we own it, after all)
unsafe { drop_in_place(slice) }
}
}
<core::array::iter::IntoIter<T, N> as core::fmt::Debug>::fmt fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
self.unsize().fmt(f)
}
<core::array::iter::IntoIter<T, N> as core::iter::traits::double_ended::DoubleEndedIterator>::advance_back_by fn advance_back_by(&mut self, n: usize) -> Result<(), NonZero<usize>> {
self.unsize_mut().advance_back_by(n)
}
<core::array::iter::IntoIter<T, N> as core::iter::traits::double_ended::DoubleEndedIterator>::next_back fn next_back(&mut self) -> Option<Self::Item> {
self.unsize_mut().next_back()
}
<core::array::iter::IntoIter<T, N> as core::iter::traits::double_ended::DoubleEndedIterator>::rfold fn rfold<Acc, Fold>(mut self, init: Acc, rfold: Fold) -> Acc
where
Fold: FnMut(Acc, Self::Item) -> Acc,
{
self.unsize_mut().rfold(init, rfold)
}
<core::array::iter::IntoIter<T, N> as core::iter::traits::double_ended::DoubleEndedIterator>::try_rfold fn try_rfold<B, F, R>(&mut self, init: B, f: F) -> R
where
Self: Sized,
F: FnMut(B, Self::Item) -> R,
R: Try<Output = B>,
{
self.unsize_mut().try_rfold(init, f)
}
<core::array::iter::IntoIter<T, N> as core::iter::traits::exact_size::ExactSizeIterator>::is_empty fn is_empty(&self) -> bool {
self.inner.len() == 0
}
<core::array::iter::IntoIter<T, N> as core::iter::traits::exact_size::ExactSizeIterator>::len fn len(&self) -> usize {
self.inner.len()
}
<core::array::iter::IntoIter<T, N> as core::iter::traits::iterator::Iterator>::advance_by fn advance_by(&mut self, n: usize) -> Result<(), NonZero<usize>> {
self.unsize_mut().advance_by(n)
}
<core::array::iter::IntoIter<T, N> as core::iter::traits::iterator::Iterator>::count fn count(self) -> usize {
self.len()
}
<core::array::iter::IntoIter<T, N> as core::iter::traits::iterator::Iterator>::fold fn fold<Acc, Fold>(mut self, init: Acc, fold: Fold) -> Acc
where
Fold: FnMut(Acc, Self::Item) -> Acc,
{
self.unsize_mut().fold(init, fold)
}
<core::array::iter::IntoIter<T, N> as core::iter::traits::iterator::Iterator>::last fn last(mut self) -> Option<Self::Item> {
self.next_back()
}
<core::array::iter::IntoIter<T, N> as core::iter::traits::iterator::Iterator>::next fn next(&mut self) -> Option<Self::Item> {
self.unsize_mut().next()
}
<core::array::iter::IntoIter<T, N> as core::iter::traits::iterator::Iterator>::size_hint fn size_hint(&self) -> (usize, Option<usize>) {
self.unsize().size_hint()
}
<core::array::iter::IntoIter<T, N> as core::iter::traits::iterator::Iterator>::try_fold fn try_fold<B, F, R>(&mut self, init: B, f: F) -> R
where
Self: Sized,
F: FnMut(B, Self::Item) -> R,
R: Try<Output = B>,
{
self.unsize_mut().try_fold(init, f)
}
<core::array::iter::IntoIter<T, N> as core::ops::drop::Drop>::drop fn drop(&mut self) {
if crate::mem::needs_drop::<T>() {
// SAFETY: This is the only place where we drop this field.
unsafe { ManuallyDrop::drop(&mut self.inner) }
}
}
<core::array::iter::iter_inner::PolymorphicIter<DATA> as core::ops::drop::Drop>::drop fn drop(&mut self) {
// SAFETY: by our type invariant `self.alive` is exactly the initialized
// items, and this is drop so nothing can use the items afterwards.
unsafe { self.data.partial_drop(self.alive.clone()) }
}
<core::array::iter::iter_inner::PolymorphicIter<[core::mem::maybe_uninit::MaybeUninit<T>; N]> as core::clone::Clone>::clone fn clone(&self) -> Self {
// Note, we don't really need to match the exact same alive range, so
// we can just clone into offset 0 regardless of where `self` is.
let mut new = Self::empty();
fn clone_into_new<U: Clone>(
source: &PolymorphicIter<[MaybeUninit<U>]>,
target: &mut PolymorphicIter<[MaybeUninit<U>]>,
) {
// Clone all alive elements.
for (src, dst) in iter::zip(source.as_slice(), &mut target.data) {
// Write a clone into the new array, then update its alive range.
// If cloning panics, we'll correctly drop the previous items.
dst.write(src.clone());
// This addition cannot overflow as we're iterating a slice,
// the length of which always fits in usize.
target.alive = IndexRange::zero_to(target.alive.end() + 1);
}
}
clone_into_new(self, &mut new);
new
}
<core::array::iter::iter_inner::PolymorphicIter<[core::mem::maybe_uninit::MaybeUninit<T>; N]> as core::clone::Clone>::clone::clone_into_new fn clone_into_new<U: Clone>(
source: &PolymorphicIter<[MaybeUninit<U>]>,
target: &mut PolymorphicIter<[MaybeUninit<U>]>,
) {
// Clone all alive elements.
for (src, dst) in iter::zip(source.as_slice(), &mut target.data) {
// Write a clone into the new array, then update its alive range.
// If cloning panics, we'll correctly drop the previous items.
dst.write(src.clone());
// This addition cannot overflow as we're iterating a slice,
// the length of which always fits in usize.
target.alive = IndexRange::zero_to(target.alive.end() + 1);
}
}
<core::array::iter::iter_inner::PolymorphicIter<[core::mem::maybe_uninit::MaybeUninit<T>]> as core::fmt::Debug>::fmt fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
// Only print the elements that were not yielded yet: we cannot
// access the yielded elements anymore.
f.debug_tuple("IntoIter").field(&self.as_slice()).finish()
}
<core::ascii::EscapeDefault as core::fmt::Debug>::fmt fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("EscapeDefault").finish_non_exhaustive()
}
<core::ascii::EscapeDefault as core::fmt::Display>::fmt fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt::Display::fmt(&self.0, f)
}
<core::ascii::EscapeDefault as core::iter::traits::iterator::Iterator>::advance_by fn advance_by(&mut self, n: usize) -> Result<(), NonZero<usize>> {
self.0.advance_by(n)
}
<core::ascii::EscapeDefault as core::iter::traits::iterator::Iterator>::count fn count(self) -> usize {
self.0.len()
}
<core::ascii::EscapeDefault as core::iter::traits::iterator::Iterator>::last fn last(mut self) -> Option<u8> {
self.0.next_back()
}
<core::ascii::EscapeDefault as core::iter::traits::iterator::Iterator>::next fn next(&mut self) -> Option<u8> {
self.0.next()
}
<core::ascii::EscapeDefault as core::iter::traits::iterator::Iterator>::size_hint fn size_hint(&self) -> (usize, Option<usize>) {
let n = self.0.len();
(n, Some(n))
}
<core::ascii::ascii_char::AsciiChar as core::fmt::Debug>::fmt fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
use AsciiChar::{Apostrophe, Null, ReverseSolidus as Backslash};
fn backslash(a: AsciiChar) -> ([AsciiChar; 6], usize) {
([Apostrophe, Backslash, a, Apostrophe, Null, Null], 4)
}
let (buf, len) = match self {
AsciiChar::Null => backslash(AsciiChar::Digit0),
AsciiChar::CharacterTabulation => backslash(AsciiChar::SmallT),
AsciiChar::CarriageReturn => backslash(AsciiChar::SmallR),
AsciiChar::LineFeed => backslash(AsciiChar::SmallN),
AsciiChar::ReverseSolidus => backslash(AsciiChar::ReverseSolidus),
AsciiChar::Apostrophe => backslash(AsciiChar::Apostrophe),
_ if self.to_u8().is_ascii_control() => {
const HEX_DIGITS: [AsciiChar; 16] = *b"0123456789abcdef".as_ascii().unwrap();
let byte = self.to_u8();
let hi = HEX_DIGITS[usize::from(byte >> 4)];
let lo = HEX_DIGITS[usize::from(byte & 0xf)];
([Apostrophe, Backslash, AsciiChar::SmallX, hi, lo, Apostrophe], 6)
}
_ => ([Apostrophe, *self, Apostrophe, Null, Null, Null], 3),
};
f.write_str(buf[..len].as_str())
}
<core::ascii::ascii_char::AsciiChar as core::fmt::Debug>::fmt::backslash fn backslash(a: AsciiChar) -> ([AsciiChar; 6], usize) {
([Apostrophe, Backslash, a, Apostrophe, Null, Null], 4)
}
<core::ascii::ascii_char::AsciiChar as core::fmt::Display>::fmt fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
<str as fmt::Display>::fmt(self.as_str(), f)
}
<core::bstr::ByteStr as core::fmt::Debug>::fmt fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "\"")?;
for chunk in self.utf8_chunks() {
for c in chunk.valid().chars() {
match c {
'\0' => write!(f, "\\0")?,
'\x01'..='\x7f' => write!(f, "{}", (c as u8).escape_ascii())?,
_ => write!(f, "{}", c.escape_debug())?,
}
}
write!(f, "{}", chunk.invalid().escape_ascii())?;
}
write!(f, "\"")?;
Ok(())
}
<core::bstr::ByteStr as core::ops::deref::Deref>::deref fn deref(&self) -> &[u8] {
&self.0
}
<core::bstr::ByteStr as core::ops::deref::DerefMut>::deref_mut fn deref_mut(&mut self) -> &mut [u8] {
&mut self.0
}
<core::cell::BorrowError as core::fmt::Display>::fmt fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
#[cfg(feature = "debug_refcell")]
let res = write!(
f,
"RefCell already mutably borrowed; a previous borrow was at {}",
self.location
);
#[cfg(not(feature = "debug_refcell"))]
let res = Display::fmt("RefCell already mutably borrowed", f);
res
}
<core::cell::BorrowMutError as core::fmt::Display>::fmt fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
#[cfg(feature = "debug_refcell")]
let res = write!(f, "RefCell already borrowed; a previous borrow was at {}", self.location);
#[cfg(not(feature = "debug_refcell"))]
let res = Display::fmt("RefCell already borrowed", f);
res
}
<core::cell::BorrowRef<'_> as core::ops::drop::Drop>::drop fn drop(&mut self) {
let borrow = self.borrow.get();
debug_assert!(is_reading(borrow));
self.borrow.replace(borrow - 1);
}
<core::cell::BorrowRefMut<'_> as core::ops::drop::Drop>::drop fn drop(&mut self) {
let borrow = self.borrow.get();
debug_assert!(is_writing(borrow));
self.borrow.replace(borrow + 1);
}
<core::cell::Cell<T> as core::fmt::Debug>::fmt fn fmt(&self, f: &mut Formatter<'_>) -> Result {
f.debug_struct("Cell").field("value", &self.get()).finish()
}
<core::cell::Ref<'_, T> as core::fmt::Debug>::fmt fn fmt(&self, f: &mut Formatter<'_>) -> Result {
Debug::fmt(&**self, f)
}
<core::cell::Ref<'_, T> as core::ops::deref::Deref>::deref fn deref(&self) -> &T {
// SAFETY: the value is accessible as long as we hold our borrow.
unsafe { self.value.as_ref() }
}
<core::cell::RefCell<T> as core::fmt::Debug>::fmt fn fmt(&self, f: &mut Formatter<'_>) -> Result {
let mut d = f.debug_struct("RefCell");
match self.try_borrow() {
Ok(borrow) => d.field("value", &borrow),
Err(_) => d.field("value", &format_args!("<borrowed>")),
};
d.finish()
}
<core::cell::RefMut<'_, T> as core::fmt::Debug>::fmt fn fmt(&self, f: &mut Formatter<'_>) -> Result {
Debug::fmt(&*(self.deref()), f)
}
<core::cell::RefMut<'_, T> as core::ops::deref::Deref>::deref fn deref(&self) -> &T {
// SAFETY: the value is accessible as long as we hold our borrow.
unsafe { self.value.as_ref() }
}
<core::cell::RefMut<'_, T> as core::ops::deref::DerefMut>::deref_mut fn deref_mut(&mut self) -> &mut T {
// SAFETY: the value is accessible as long as we hold our borrow.
unsafe { self.value.as_mut() }
}
<core::cell::SyncUnsafeCell<T> as core::fmt::Debug>::fmt fn fmt(&self, f: &mut Formatter<'_>) -> Result {
f.debug_struct("SyncUnsafeCell").finish_non_exhaustive()
}
<core::cell::UnsafeCell<T> as core::fmt::Debug>::fmt fn fmt(&self, f: &mut Formatter<'_>) -> Result {
f.debug_struct("UnsafeCell").finish_non_exhaustive()
}
<core::char::EscapeDebug as core::fmt::Display>::fmt fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt::Display::fmt(&self.0, f)
}
<core::char::EscapeDebug as core::iter::traits::exact_size::ExactSizeIterator>::len fn len(&self) -> usize {
self.0.len()
}
<core::char::EscapeDebug as core::iter::traits::iterator::Iterator>::count fn count(self) -> usize {
self.len()
}
<core::char::EscapeDebug as core::iter::traits::iterator::Iterator>::next fn next(&mut self) -> Option<char> {
self.0.next()
}
<core::char::EscapeDebug as core::iter::traits::iterator::Iterator>::size_hint fn size_hint(&self) -> (usize, Option<usize>) {
let n = self.len();
(n, Some(n))
}
<core::char::decode::DecodeUtf16<I> as core::iter::traits::iterator::Iterator>::next fn next(&mut self) -> Option<Result<char, DecodeUtf16Error>> {
let u = match self.buf.take() {
Some(buf) => buf,
None => self.iter.next()?,
};
if !u.is_utf16_surrogate() {
// SAFETY: not a surrogate
Some(Ok(unsafe { char::from_u32_unchecked(u as u32) }))
} else if u >= 0xDC00 {
// a trailing surrogate
Some(Err(DecodeUtf16Error { code: u }))
} else {
let u2 = match self.iter.next() {
Some(u2) => u2,
// eof
None => return Some(Err(DecodeUtf16Error { code: u })),
};
if u2 < 0xDC00 || u2 > 0xDFFF {
// not a trailing surrogate so we're not a valid
// surrogate pair, so rewind to redecode u2 next time.
self.buf = Some(u2);
return Some(Err(DecodeUtf16Error { code: u }));
}
// all ok, so lets decode it.
let c = (((u & 0x3ff) as u32) << 10 | (u2 & 0x3ff) as u32) + 0x1_0000;
// SAFETY: we checked that it's a legal unicode value
Some(Ok(unsafe { char::from_u32_unchecked(c) }))
}
}
<core::char::decode::DecodeUtf16<I> as core::iter::traits::iterator::Iterator>::size_hint fn size_hint(&self) -> (usize, Option<usize>) {
let (low, high) = self.iter.size_hint();
let (low_buf, high_buf) = match self.buf {
// buf is empty, no additional elements from it.
None => (0, 0),
// `u` is a non surrogate, so it's always an additional character.
Some(u) if !u.is_utf16_surrogate() => (1, 1),
// `u` is a leading surrogate (it can never be a trailing surrogate and
// it's a surrogate due to the previous branch) and `self.iter` is empty.
//
// `u` can't be paired, since the `self.iter` is empty,
// so it will always become an additional element (error).
Some(_u) if high == Some(0) => (1, 1),
// `u` is a leading surrogate and `iter` may be non-empty.
//
// `u` can either pair with a trailing surrogate, in which case no additional elements
// are produced, or it can become an error, in which case it's an additional character (error).
Some(_u) => (0, 1),
};
// `self.iter` could contain entirely valid surrogates (2 elements per
// char), or entirely non-surrogates (1 element per char).
//
// On odd lower bound, at least one element must stay unpaired
// (with other elements from `self.iter`), so we round up.
let low = low.div_ceil(2) + low_buf;
let high = high.and_then(|h| h.checked_add(high_buf));
(low, high)
}
<core::escape::EscapeIterInner<N, core::escape::AlwaysEscaped> as core::fmt::Debug>::fmt fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_tuple("EscapeIterInner").field(&format_args!("'{}'", self)).finish()
}
<core::escape::EscapeIterInner<N, core::escape::AlwaysEscaped> as core::fmt::Display>::fmt fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
// SAFETY: The `AlwaysEscaped` marker guarantees that `self.data`
// contains printable ASCII chars, and `self.alive` is
// guaranteed to be a valid range for `self.data`.
f.write_str(unsafe { self.to_str_unchecked() })
}
<core::escape::EscapeIterInner<N, core::escape::MaybeEscaped> as core::fmt::Debug>::fmt fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_tuple("EscapeIterInner").field(&format_args!("'{}'", self)).finish()
}
<core::escape::EscapeIterInner<N, core::escape::MaybeEscaped> as core::fmt::Display>::fmt fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
if let Some(c) = self.to_char() {
return f.write_char(c);
}
// SAFETY: At this point, `self.data` must contain printable ASCII
// characters in its `escape_seq` variant, and `self.alive`
// is guaranteed to be a valid range for `self.data`.
f.write_str(unsafe { self.to_str_unchecked() })
}
<core::ffi::c_str::CStr as core::fmt::Debug>::fmt fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt::Debug::fmt(crate::bstr::ByteStr::from_bytes(self.to_bytes()), f)
}
<core::ffi::c_void as core::fmt::Debug>::fmt fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("c_void").finish()
}
<core::fmt::Arguments<'_> as core::fmt::Debug>::fmt fn fmt(&self, fmt: &mut Formatter<'_>) -> Result {
Display::fmt(self, fmt)
}
<core::fmt::Arguments<'_> as core::fmt::Display>::fmt fn fmt(&self, fmt: &mut Formatter<'_>) -> Result {
write(fmt.buf, *self)
}
<core::fmt::Error as core::fmt::Display>::fmt fn fmt(&self, f: &mut Formatter<'_>) -> Result {
Display::fmt("an error occurred when formatting an argument", f)
}
<core::fmt::Formatter<'_> as core::fmt::Write>::write_char fn write_char(&mut self, c: char) -> Result {
self.buf.write_char(c)
}
<core::fmt::Formatter<'_> as core::fmt::Write>::write_fmt fn write_fmt(&mut self, args: Arguments<'_>) -> Result {
if let Some(s) = args.as_statically_known_str() {
self.buf.write_str(s)
} else {
write(self.buf, args)
}
}
<core::fmt::Formatter<'_> as core::fmt::Write>::write_str fn write_str(&mut self, s: &str) -> Result {
self.buf.write_str(s)
}
<core::fmt::FormattingOptions as core::default::Default>::default fn default() -> Self {
// The `#[derive(Default)]` implementation would set `fill` to `\0` instead of space.
Self::new()
}
<core::fmt::builders::FromFn<F> as core::fmt::Debug>::fmt fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
(self.0)(f)
}
<core::fmt::builders::FromFn<F> as core::fmt::Display>::fmt fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
(self.0)(f)
}
<core::fmt::builders::PadAdapter<'_, '_> as core::fmt::Write>::write_char fn write_char(&mut self, c: char) -> fmt::Result {
if self.state.on_newline {
self.buf.write_str(" ")?;
}
self.state.on_newline = c == '\n';
self.buf.write_char(c)
}
<core::fmt::builders::PadAdapter<'_, '_> as core::fmt::Write>::write_str fn write_str(&mut self, s: &str) -> fmt::Result {
for s in s.split_inclusive('\n') {
if self.state.on_newline {
self.buf.write_str(" ")?;
}
self.state.on_newline = s.ends_with('\n');
self.buf.write_str(s)?;
}
Ok(())
}
<core::fmt::builders::PadAdapterState as core::default::Default>::default fn default() -> Self {
PadAdapterState { on_newline: true }
}
<core::iter::adapters::chain::Chain<A, B> as core::iter::traits::iterator::Iterator>::advance_by fn advance_by(&mut self, mut n: usize) -> Result<(), NonZero<usize>> {
if let Some(ref mut a) = self.a {
n = match a.advance_by(n) {
Ok(()) => return Ok(()),
Err(k) => k.get(),
};
self.a = None;
}
if let Some(ref mut b) = self.b {
return b.advance_by(n);
// we don't fuse the second iterator
}
NonZero::new(n).map_or(Ok(()), Err)
}
<core::iter::adapters::chain::Chain<A, B> as core::iter::traits::iterator::Iterator>::count fn count(self) -> usize {
let a_count = match self.a {
Some(a) => a.count(),
None => 0,
};
let b_count = match self.b {
Some(b) => b.count(),
None => 0,
};
a_count + b_count
}
<core::iter::adapters::chain::Chain<A, B> as core::iter::traits::iterator::Iterator>::find fn find<P>(&mut self, mut predicate: P) -> Option<Self::Item>
where
P: FnMut(&Self::Item) -> bool,
{
and_then_or_clear(&mut self.a, |a| a.find(&mut predicate))
.or_else(|| self.b.as_mut()?.find(predicate))
}
<core::iter::adapters::chain::Chain<A, B> as core::iter::traits::iterator::Iterator>::fold fn fold<Acc, F>(self, mut acc: Acc, mut f: F) -> Acc
where
F: FnMut(Acc, Self::Item) -> Acc,
{
if let Some(a) = self.a {
acc = a.fold(acc, &mut f);
}
if let Some(b) = self.b {
acc = b.fold(acc, f);
}
acc
}
<core::iter::adapters::chain::Chain<A, B> as core::iter::traits::iterator::Iterator>::last fn last(self) -> Option<A::Item> {
// Must exhaust a before b.
let a_last = self.a.and_then(Iterator::last);
let b_last = self.b.and_then(Iterator::last);
b_last.or(a_last)
}
<core::iter::adapters::chain::Chain<A, B> as core::iter::traits::iterator::Iterator>::next fn next(&mut self) -> Option<A::Item> {
and_then_or_clear(&mut self.a, Iterator::next).or_else(|| self.b.as_mut()?.next())
}
<core::iter::adapters::chain::Chain<A, B> as core::iter::traits::iterator::Iterator>::nth fn nth(&mut self, mut n: usize) -> Option<Self::Item> {
if let Some(ref mut a) = self.a {
n = match a.advance_by(n) {
Ok(()) => match a.next() {
None => 0,
x => return x,
},
Err(k) => k.get(),
};
self.a = None;
}
self.b.as_mut()?.nth(n)
}
<core::iter::adapters::chain::Chain<A, B> as core::iter::traits::iterator::Iterator>::size_hint fn size_hint(&self) -> (usize, Option<usize>) {
match self {
Chain { a: Some(a), b: Some(b) } => {
let (a_lower, a_upper) = a.size_hint();
let (b_lower, b_upper) = b.size_hint();
let lower = a_lower.saturating_add(b_lower);
let upper = match (a_upper, b_upper) {
(Some(x), Some(y)) => x.checked_add(y),
_ => None,
};
(lower, upper)
}
Chain { a: Some(a), b: None } => a.size_hint(),
Chain { a: None, b: Some(b) } => b.size_hint(),
Chain { a: None, b: None } => (0, Some(0)),
}
}
<core::iter::adapters::chain::Chain<A, B> as core::iter::traits::iterator::Iterator>::try_fold fn try_fold<Acc, F, R>(&mut self, mut acc: Acc, mut f: F) -> R
where
Self: Sized,
F: FnMut(Acc, Self::Item) -> R,
R: Try<Output = Acc>,
{
if let Some(ref mut a) = self.a {
acc = a.try_fold(acc, &mut f)?;
self.a = None;
}
if let Some(ref mut b) = self.b {
acc = b.try_fold(acc, f)?;
// we don't fuse the second iterator
}
try { acc }
}
<core::iter::adapters::cloned::Cloned<I> as core::iter::traits::double_ended::DoubleEndedIterator>::next_back fn next_back(&mut self) -> Option<T> {
self.it.next_back().cloned()
}
<core::iter::adapters::cloned::Cloned<I> as core::iter::traits::double_ended::DoubleEndedIterator>::rfold fn rfold<Acc, F>(self, init: Acc, f: F) -> Acc
where
F: FnMut(Acc, Self::Item) -> Acc,
{
self.it.map(T::clone).rfold(init, f)
}
<core::iter::adapters::cloned::Cloned<I> as core::iter::traits::double_ended::DoubleEndedIterator>::try_rfold fn try_rfold<B, F, R>(&mut self, init: B, f: F) -> R
where
Self: Sized,
F: FnMut(B, Self::Item) -> R,
R: Try<Output = B>,
{
self.it.try_rfold(init, clone_try_fold(f))
}
<core::iter::adapters::cloned::Cloned<I> as core::iter::traits::iterator::Iterator>::fold fn fold<Acc, F>(self, init: Acc, f: F) -> Acc
where
F: FnMut(Acc, Self::Item) -> Acc,
{
self.it.map(T::clone).fold(init, f)
}
<core::iter::adapters::cloned::Cloned<I> as core::iter::traits::iterator::Iterator>::next fn next(&mut self) -> Option<T> {
self.it.next().cloned()
}
<core::iter::adapters::cloned::Cloned<I> as core::iter::traits::iterator::Iterator>::size_hint fn size_hint(&self) -> (usize, Option<usize>) {
self.it.size_hint()
}
<core::iter::adapters::cloned::Cloned<I> as core::iter::traits::iterator::Iterator>::try_fold fn try_fold<B, F, R>(&mut self, init: B, f: F) -> R
where
Self: Sized,
F: FnMut(B, Self::Item) -> R,
R: Try<Output = B>,
{
self.it.try_fold(init, clone_try_fold(f))
}
<core::iter::adapters::cloned::Cloned<I> as core::iter::traits::unchecked_iterator::UncheckedIterator>::next_unchecked unsafe fn next_unchecked(&mut self) -> T {
// SAFETY: `Cloned` is 1:1 with the inner iterator, so if the caller promised
// that there's an element left, the inner iterator has one too.
let item = unsafe { self.it.next_unchecked() };
item.clone()
}
<core::iter::adapters::copied::Copied<I> as core::iter::traits::double_ended::DoubleEndedIterator>::advance_back_by fn advance_back_by(&mut self, n: usize) -> Result<(), NonZero<usize>> {
self.it.advance_back_by(n)
}
<core::iter::adapters::copied::Copied<I> as core::iter::traits::double_ended::DoubleEndedIterator>::next_back fn next_back(&mut self) -> Option<T> {
self.it.next_back().copied()
}
<core::iter::adapters::copied::Copied<I> as core::iter::traits::double_ended::DoubleEndedIterator>::rfold fn rfold<Acc, F>(self, init: Acc, f: F) -> Acc
where
F: FnMut(Acc, Self::Item) -> Acc,
{
self.it.rfold(init, copy_fold(f))
}
<core::iter::adapters::copied::Copied<I> as core::iter::traits::double_ended::DoubleEndedIterator>::try_rfold fn try_rfold<B, F, R>(&mut self, init: B, f: F) -> R
where
Self: Sized,
F: FnMut(B, Self::Item) -> R,
R: Try<Output = B>,
{
self.it.try_rfold(init, copy_try_fold(f))
}
<core::iter::adapters::copied::Copied<I> as core::iter::traits::exact_size::ExactSizeIterator>::is_empty fn is_empty(&self) -> bool {
self.it.is_empty()
}
<core::iter::adapters::copied::Copied<I> as core::iter::traits::exact_size::ExactSizeIterator>::len fn len(&self) -> usize {
self.it.len()
}
<core::iter::adapters::copied::Copied<I> as core::iter::traits::iterator::Iterator>::advance_by fn advance_by(&mut self, n: usize) -> Result<(), NonZero<usize>> {
self.it.advance_by(n)
}
<core::iter::adapters::copied::Copied<I> as core::iter::traits::iterator::Iterator>::count fn count(self) -> usize {
self.it.count()
}
<core::iter::adapters::copied::Copied<I> as core::iter::traits::iterator::Iterator>::fold fn fold<Acc, F>(self, init: Acc, f: F) -> Acc
where
F: FnMut(Acc, Self::Item) -> Acc,
{
self.it.fold(init, copy_fold(f))
}
<core::iter::adapters::copied::Copied<I> as core::iter::traits::iterator::Iterator>::last fn last(self) -> Option<T> {
self.it.last().copied()
}
<core::iter::adapters::copied::Copied<I> as core::iter::traits::iterator::Iterator>::next fn next(&mut self) -> Option<T> {
self.it.next().copied()
}
<core::iter::adapters::copied::Copied<I> as core::iter::traits::iterator::Iterator>::nth fn nth(&mut self, n: usize) -> Option<T> {
self.it.nth(n).copied()
}
<core::iter::adapters::copied::Copied<I> as core::iter::traits::iterator::Iterator>::size_hint fn size_hint(&self) -> (usize, Option<usize>) {
self.it.size_hint()
}
<core::iter::adapters::copied::Copied<I> as core::iter::traits::iterator::Iterator>::try_fold fn try_fold<B, F, R>(&mut self, init: B, f: F) -> R
where
Self: Sized,
F: FnMut(B, Self::Item) -> R,
R: Try<Output = B>,
{
self.it.try_fold(init, copy_try_fold(f))
}
<core::iter::adapters::enumerate::Enumerate<I> as core::iter::traits::iterator::Iterator>::advance_by fn advance_by(&mut self, n: usize) -> Result<(), NonZero<usize>> {
let remaining = self.iter.advance_by(n);
let advanced = match remaining {
Ok(()) => n,
Err(rem) => n - rem.get(),
};
self.count += advanced;
remaining
}
<core::iter::adapters::enumerate::Enumerate<I> as core::iter::traits::iterator::Iterator>::count fn count(self) -> usize {
self.iter.count()
}
<core::iter::adapters::enumerate::Enumerate<I> as core::iter::traits::iterator::Iterator>::fold fn fold<Acc, Fold>(self, init: Acc, fold: Fold) -> Acc
where
Fold: FnMut(Acc, Self::Item) -> Acc,
{
#[inline]
fn enumerate<T, Acc>(
mut count: usize,
mut fold: impl FnMut(Acc, (usize, T)) -> Acc,
) -> impl FnMut(Acc, T) -> Acc {
#[rustc_inherit_overflow_checks]
move |acc, item| {
let acc = fold(acc, (count, item));
count += 1;
acc
}
}
self.iter.fold(init, enumerate(self.count, fold))
}
<core::iter::adapters::enumerate::Enumerate<I> as core::iter::traits::iterator::Iterator>::fold::enumerate fn enumerate<T, Acc>(
mut count: usize,
mut fold: impl FnMut(Acc, (usize, T)) -> Acc,
) -> impl FnMut(Acc, T) -> Acc {
#[rustc_inherit_overflow_checks]
move |acc, item| {
let acc = fold(acc, (count, item));
count += 1;
acc
}
}
<core::iter::adapters::enumerate::Enumerate<I> as core::iter::traits::iterator::Iterator>::next fn next(&mut self) -> Option<(usize, <I as Iterator>::Item)> {
let a = self.iter.next()?;
let i = self.count;
self.count += 1;
Some((i, a))
}
<core::iter::adapters::enumerate::Enumerate<I> as core::iter::traits::iterator::Iterator>::nth fn nth(&mut self, n: usize) -> Option<(usize, I::Item)> {
let a = self.iter.nth(n)?;
let i = self.count + n;
self.count = i + 1;
Some((i, a))
}
<core::iter::adapters::enumerate::Enumerate<I> as core::iter::traits::iterator::Iterator>::size_hint fn size_hint(&self) -> (usize, Option<usize>) {
self.iter.size_hint()
}
<core::iter::adapters::enumerate::Enumerate<I> as core::iter::traits::iterator::Iterator>::try_fold fn try_fold<Acc, Fold, R>(&mut self, init: Acc, fold: Fold) -> R
where
Self: Sized,
Fold: FnMut(Acc, Self::Item) -> R,
R: Try<Output = Acc>,
{
#[inline]
fn enumerate<'a, T, Acc, R>(
count: &'a mut usize,
mut fold: impl FnMut(Acc, (usize, T)) -> R + 'a,
) -> impl FnMut(Acc, T) -> R + 'a {
#[rustc_inherit_overflow_checks]
move |acc, item| {
let acc = fold(acc, (*count, item));
*count += 1;
acc
}
}
self.iter.try_fold(init, enumerate(&mut self.count, fold))
}
<core::iter::adapters::enumerate::Enumerate<I> as core::iter::traits::iterator::Iterator>::try_fold::enumerate fn enumerate<'a, T, Acc, R>(
count: &'a mut usize,
mut fold: impl FnMut(Acc, (usize, T)) -> R + 'a,
) -> impl FnMut(Acc, T) -> R + 'a {
#[rustc_inherit_overflow_checks]
move |acc, item| {
let acc = fold(acc, (*count, item));
*count += 1;
acc
}
}
<core::iter::adapters::filter::Filter<I, P> as core::fmt::Debug>::fmt fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("Filter").field("iter", &self.iter).finish()
}
<core::iter::adapters::filter::Filter<I, P> as core::iter::traits::iterator::Iterator>::count fn count(self) -> usize {
#[inline]
fn to_usize<T>(mut predicate: impl FnMut(&T) -> bool) -> impl FnMut(T) -> usize {
move |x| predicate(&x) as usize
}
let before = self.iter.size_hint().1.unwrap_or(usize::MAX);
let total = self.iter.map(to_usize(self.predicate)).sum();
// SAFETY: `total` and `before` came from the same iterator of type `I`
unsafe {
<I as SpecAssumeCount>::assume_count_le_upper_bound(total, before);
}
total
}
<core::iter::adapters::filter::Filter<I, P> as core::iter::traits::iterator::Iterator>::count::to_usize fn to_usize<T>(mut predicate: impl FnMut(&T) -> bool) -> impl FnMut(T) -> usize {
move |x| predicate(&x) as usize
}
<core::iter::adapters::filter::Filter<I, P> as core::iter::traits::iterator::Iterator>::fold fn fold<Acc, Fold>(self, init: Acc, fold: Fold) -> Acc
where
Fold: FnMut(Acc, Self::Item) -> Acc,
{
self.iter.fold(init, filter_fold(self.predicate, fold))
}
<core::iter::adapters::filter::Filter<I, P> as core::iter::traits::iterator::Iterator>::next fn next(&mut self) -> Option<I::Item> {
self.iter.find(&mut self.predicate)
}
<core::iter::adapters::filter::Filter<I, P> as core::iter::traits::iterator::Iterator>::size_hint fn size_hint(&self) -> (usize, Option<usize>) {
let (_, upper) = self.iter.size_hint();
(0, upper) // can't know a lower bound, due to the predicate
}
<core::iter::adapters::filter::Filter<I, P> as core::iter::traits::iterator::Iterator>::try_fold fn try_fold<Acc, Fold, R>(&mut self, init: Acc, fold: Fold) -> R
where
Self: Sized,
Fold: FnMut(Acc, Self::Item) -> R,
R: Try<Output = Acc>,
{
self.iter.try_fold(init, filter_try_fold(&mut self.predicate, fold))
}
<core::iter::adapters::flatten::FlatMap<I, U, F> as core::clone::Clone>::clone fn clone(&self) -> Self {
FlatMap { inner: self.inner.clone() }
}
<core::iter::adapters::flatten::FlatMap<I, U, F> as core::fmt::Debug>::fmt fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("FlatMap").field("inner", &self.inner).finish()
}
<core::iter::adapters::map::Map<I, F> as core::fmt::Debug>::fmt fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("Map").field("iter", &self.iter).finish()
}
<core::iter::adapters::map::Map<I, F> as core::iter::traits::double_ended::DoubleEndedIterator>::next_back fn next_back(&mut self) -> Option<B> {
self.iter.next_back().map(&mut self.f)
}
<core::iter::adapters::map::Map<I, F> as core::iter::traits::double_ended::DoubleEndedIterator>::rfold fn rfold<Acc, G>(self, init: Acc, g: G) -> Acc
where
G: FnMut(Acc, Self::Item) -> Acc,
{
self.iter.rfold(init, map_fold(self.f, g))
}
<core::iter::adapters::map::Map<I, F> as core::iter::traits::double_ended::DoubleEndedIterator>::try_rfold fn try_rfold<Acc, G, R>(&mut self, init: Acc, g: G) -> R
where
Self: Sized,
G: FnMut(Acc, Self::Item) -> R,
R: Try<Output = Acc>,
{
self.iter.try_rfold(init, map_try_fold(&mut self.f, g))
}
<core::iter::adapters::map::Map<I, F> as core::iter::traits::iterator::Iterator>::fold fn fold<Acc, G>(self, init: Acc, g: G) -> Acc
where
G: FnMut(Acc, Self::Item) -> Acc,
{
self.iter.fold(init, map_fold(self.f, g))
}
<core::iter::adapters::map::Map<I, F> as core::iter::traits::iterator::Iterator>::next fn next(&mut self) -> Option<B> {
self.iter.next().map(&mut self.f)
}
<core::iter::adapters::map::Map<I, F> as core::iter::traits::iterator::Iterator>::size_hint fn size_hint(&self) -> (usize, Option<usize>) {
self.iter.size_hint()
}
<core::iter::adapters::map::Map<I, F> as core::iter::traits::iterator::Iterator>::try_fold fn try_fold<Acc, G, R>(&mut self, init: Acc, g: G) -> R
where
Self: Sized,
G: FnMut(Acc, Self::Item) -> R,
R: Try<Output = Acc>,
{
self.iter.try_fold(init, map_try_fold(&mut self.f, g))
}
<core::iter::adapters::map::Map<I, F> as core::iter::traits::unchecked_iterator::UncheckedIterator>::next_unchecked unsafe fn next_unchecked(&mut self) -> B {
// SAFETY: `Map` is 1:1 with the inner iterator, so if the caller promised
// that there's an element left, the inner iterator has one too.
let item = unsafe { self.iter.next_unchecked() };
(self.f)(item)
}
<core::iter::adapters::rev::Rev<I> as core::iter::traits::double_ended::DoubleEndedIterator>::advance_back_by fn advance_back_by(&mut self, n: usize) -> Result<(), NonZero<usize>> {
self.iter.advance_by(n)
}
<core::iter::adapters::rev::Rev<I> as core::iter::traits::double_ended::DoubleEndedIterator>::next_back fn next_back(&mut self) -> Option<<I as Iterator>::Item> {
self.iter.next()
}
<core::iter::adapters::rev::Rev<I> as core::iter::traits::double_ended::DoubleEndedIterator>::nth_back fn nth_back(&mut self, n: usize) -> Option<<I as Iterator>::Item> {
self.iter.nth(n)
}
<core::iter::adapters::rev::Rev<I> as core::iter::traits::double_ended::DoubleEndedIterator>::rfind fn rfind<P>(&mut self, predicate: P) -> Option<Self::Item>
where
P: FnMut(&Self::Item) -> bool,
{
self.iter.find(predicate)
}
<core::iter::adapters::rev::Rev<I> as core::iter::traits::double_ended::DoubleEndedIterator>::rfold fn rfold<Acc, F>(self, init: Acc, f: F) -> Acc
where
F: FnMut(Acc, Self::Item) -> Acc,
{
self.iter.fold(init, f)
}
<core::iter::adapters::rev::Rev<I> as core::iter::traits::double_ended::DoubleEndedIterator>::try_rfold fn try_rfold<B, F, R>(&mut self, init: B, f: F) -> R
where
Self: Sized,
F: FnMut(B, Self::Item) -> R,
R: Try<Output = B>,
{
self.iter.try_fold(init, f)
}
<core::iter::adapters::rev::Rev<I> as core::iter::traits::iterator::Iterator>::advance_by fn advance_by(&mut self, n: usize) -> Result<(), NonZero<usize>> {
self.iter.advance_back_by(n)
}
<core::iter::adapters::rev::Rev<I> as core::iter::traits::iterator::Iterator>::find fn find<P>(&mut self, predicate: P) -> Option<Self::Item>
where
P: FnMut(&Self::Item) -> bool,
{
self.iter.rfind(predicate)
}
<core::iter::adapters::rev::Rev<I> as core::iter::traits::iterator::Iterator>::fold fn fold<Acc, F>(self, init: Acc, f: F) -> Acc
where
F: FnMut(Acc, Self::Item) -> Acc,
{
self.iter.rfold(init, f)
}
<core::iter::adapters::rev::Rev<I> as core::iter::traits::iterator::Iterator>::next fn next(&mut self) -> Option<<I as Iterator>::Item> {
self.iter.next_back()
}
<core::iter::adapters::rev::Rev<I> as core::iter::traits::iterator::Iterator>::nth fn nth(&mut self, n: usize) -> Option<<I as Iterator>::Item> {
self.iter.nth_back(n)
}
<core::iter::adapters::rev::Rev<I> as core::iter::traits::iterator::Iterator>::size_hint fn size_hint(&self) -> (usize, Option<usize>) {
self.iter.size_hint()
}
<core::iter::adapters::rev::Rev<I> as core::iter::traits::iterator::Iterator>::try_fold fn try_fold<B, F, R>(&mut self, init: B, f: F) -> R
where
Self: Sized,
F: FnMut(B, Self::Item) -> R,
R: Try<Output = B>,
{
self.iter.try_rfold(init, f)
}
<core::iter::adapters::skip::Skip<I> as core::iter::traits::double_ended::DoubleEndedIterator>::advance_back_by fn advance_back_by(&mut self, n: usize) -> Result<(), NonZero<usize>> {
let min = crate::cmp::min(self.len(), n);
let rem = self.iter.advance_back_by(min);
assert!(rem.is_ok(), "ExactSizeIterator contract violation");
NonZero::new(n - min).map_or(Ok(()), Err)
}
<core::iter::adapters::skip::Skip<I> as core::iter::traits::double_ended::DoubleEndedIterator>::next_back fn next_back(&mut self) -> Option<Self::Item> {
if self.len() > 0 { self.iter.next_back() } else { None }
}
<core::iter::adapters::skip::Skip<I> as core::iter::traits::double_ended::DoubleEndedIterator>::nth_back fn nth_back(&mut self, n: usize) -> Option<I::Item> {
let len = self.len();
if n < len {
self.iter.nth_back(n)
} else {
if len > 0 {
// consume the original iterator
self.iter.nth_back(len - 1);
}
None
}
}
<core::iter::adapters::skip::Skip<I> as core::iter::traits::double_ended::DoubleEndedIterator>::rfold fn $fold<AAA, FFF>(mut self, init: AAA, fold: FFF) -> AAA
where
FFF: FnMut(AAA, Self::Item) -> AAA,
{
use crate::ops::NeverShortCircuit;
self.$try_fold(init, NeverShortCircuit::wrap_mut_2(fold)).0
}
<core::iter::adapters::skip::Skip<I> as core::iter::traits::double_ended::DoubleEndedIterator>::try_rfold fn try_rfold<Acc, Fold, R>(&mut self, init: Acc, fold: Fold) -> R
where
Self: Sized,
Fold: FnMut(Acc, Self::Item) -> R,
R: Try<Output = Acc>,
{
fn check<T, Acc, R: Try<Output = Acc>>(
mut n: usize,
mut fold: impl FnMut(Acc, T) -> R,
) -> impl FnMut(Acc, T) -> ControlFlow<R, Acc> {
move |acc, x| {
n -= 1;
let r = fold(acc, x);
if n == 0 { ControlFlow::Break(r) } else { ControlFlow::from_try(r) }
}
}
let n = self.len();
if n == 0 { try { init } } else { self.iter.try_rfold(init, check(n, fold)).into_try() }
}
<core::iter::adapters::skip::Skip<I> as core::iter::traits::double_ended::DoubleEndedIterator>::try_rfold::check fn check<T, Acc, R: Try<Output = Acc>>(
mut n: usize,
mut fold: impl FnMut(Acc, T) -> R,
) -> impl FnMut(Acc, T) -> ControlFlow<R, Acc> {
move |acc, x| {
n -= 1;
let r = fold(acc, x);
if n == 0 { ControlFlow::Break(r) } else { ControlFlow::from_try(r) }
}
}
<core::iter::adapters::skip::Skip<I> as core::iter::traits::iterator::Iterator>::advance_by fn advance_by(&mut self, mut n: usize) -> Result<(), NonZero<usize>> {
let skip_inner = self.n;
let skip_and_advance = skip_inner.saturating_add(n);
let remainder = match self.iter.advance_by(skip_and_advance) {
Ok(()) => 0,
Err(n) => n.get(),
};
let advanced_inner = skip_and_advance - remainder;
n -= advanced_inner.saturating_sub(skip_inner);
self.n = self.n.saturating_sub(advanced_inner);
// skip_and_advance may have saturated
if unlikely(remainder == 0 && n > 0) {
n = match self.iter.advance_by(n) {
Ok(()) => 0,
Err(n) => n.get(),
}
}
NonZero::new(n).map_or(Ok(()), Err)
}
<core::iter::adapters::skip::Skip<I> as core::iter::traits::iterator::Iterator>::count fn count(mut self) -> usize {
if self.n > 0 {
// nth(n) skips n+1
if self.iter.nth(self.n - 1).is_none() {
return 0;
}
}
self.iter.count()
}
<core::iter::adapters::skip::Skip<I> as core::iter::traits::iterator::Iterator>::fold fn fold<Acc, Fold>(mut self, init: Acc, fold: Fold) -> Acc
where
Fold: FnMut(Acc, Self::Item) -> Acc,
{
if self.n > 0 {
// nth(n) skips n+1
if self.iter.nth(self.n - 1).is_none() {
return init;
}
}
self.iter.fold(init, fold)
}
<core::iter::adapters::skip::Skip<I> as core::iter::traits::iterator::Iterator>::last fn last(mut self) -> Option<I::Item> {
if self.n > 0 {
// nth(n) skips n+1
self.iter.nth(self.n - 1)?;
}
self.iter.last()
}
<core::iter::adapters::skip::Skip<I> as core::iter::traits::iterator::Iterator>::next fn next(&mut self) -> Option<I::Item> {
if unlikely(self.n > 0) {
self.iter.nth(crate::mem::take(&mut self.n))
} else {
self.iter.next()
}
}
<core::iter::adapters::skip::Skip<I> as core::iter::traits::iterator::Iterator>::nth fn nth(&mut self, n: usize) -> Option<I::Item> {
if self.n > 0 {
let skip: usize = crate::mem::take(&mut self.n);
// Checked add to handle overflow case.
let n = match skip.checked_add(n) {
Some(nth) => nth,
None => {
// In case of overflow, load skip value, before loading `n`.
// Because the amount of elements to iterate is beyond `usize::MAX`, this
// is split into two `nth` calls where the `skip` `nth` call is discarded.
self.iter.nth(skip - 1)?;
n
}
};
// Load nth element including skip.
self.iter.nth(n)
} else {
self.iter.nth(n)
}
}
<core::iter::adapters::skip::Skip<I> as core::iter::traits::iterator::Iterator>::size_hint fn size_hint(&self) -> (usize, Option<usize>) {
let (lower, upper) = self.iter.size_hint();
let lower = lower.saturating_sub(self.n);
let upper = match upper {
Some(x) => Some(x.saturating_sub(self.n)),
None => None,
};
(lower, upper)
}
<core::iter::adapters::skip::Skip<I> as core::iter::traits::iterator::Iterator>::try_fold fn try_fold<Acc, Fold, R>(&mut self, init: Acc, fold: Fold) -> R
where
Self: Sized,
Fold: FnMut(Acc, Self::Item) -> R,
R: Try<Output = Acc>,
{
let n = self.n;
self.n = 0;
if n > 0 {
// nth(n) skips n+1
if self.iter.nth(n - 1).is_none() {
return try { init };
}
}
self.iter.try_fold(init, fold)
}
<core::iter::adapters::step_by::StepBy<I> as core::iter::adapters::step_by::StepByImpl<I>>::spec_fold default fn spec_fold<Acc, F>(mut self, mut acc: Acc, mut f: F) -> Acc
where
F: FnMut(Acc, Self::Item) -> Acc,
{
#[inline]
fn nth<I: Iterator>(
iter: &mut I,
step_minus_one: usize,
) -> impl FnMut() -> Option<I::Item> + '_ {
move || iter.nth(step_minus_one)
}
if self.first_take {
self.first_take = false;
match self.iter.next() {
None => return acc,
Some(x) => acc = f(acc, x),
}
}
from_fn(nth(&mut self.iter, self.step_minus_one)).fold(acc, f)
}
<core::iter::adapters::step_by::StepBy<I> as core::iter::adapters::step_by::StepByImpl<I>>::spec_fold::nth fn nth<I: Iterator>(
iter: &mut I,
step_minus_one: usize,
) -> impl FnMut() -> Option<I::Item> + '_ {
move || iter.nth(step_minus_one)
}
<core::iter::adapters::step_by::StepBy<I> as core::iter::adapters::step_by::StepByImpl<I>>::spec_next default fn spec_next(&mut self) -> Option<I::Item> {
let step_size = if self.first_take { 0 } else { self.step_minus_one };
self.first_take = false;
self.iter.nth(step_size)
}
<core::iter::adapters::step_by::StepBy<I> as core::iter::adapters::step_by::StepByImpl<I>>::spec_nth default fn spec_nth(&mut self, mut n: usize) -> Option<I::Item> {
if self.first_take {
self.first_take = false;
let first = self.iter.next();
if n == 0 {
return first;
}
n -= 1;
}
// n and self.step_minus_one are indices, we need to add 1 to get the amount of elements
// When calling `.nth`, we need to subtract 1 again to convert back to an index
let mut step = self.original_step().get();
// n + 1 could overflow
// thus, if n is usize::MAX, instead of adding one, we call .nth(step)
if n == usize::MAX {
self.iter.nth(step - 1);
} else {
n += 1;
}
// overflow handling
loop {
let mul = n.checked_mul(step);
{
if intrinsics::likely(mul.is_some()) {
return self.iter.nth(mul.unwrap() - 1);
}
}
let div_n = usize::MAX / n;
let div_step = usize::MAX / step;
let nth_n = div_n * n;
let nth_step = div_step * step;
let nth = if nth_n > nth_step {
step -= div_n;
nth_n
} else {
n -= div_step;
nth_step
};
self.iter.nth(nth - 1);
}
}
<core::iter::adapters::step_by::StepBy<I> as core::iter::adapters::step_by::StepByImpl<I>>::spec_size_hint default fn spec_size_hint(&self) -> (usize, Option<usize>) {
#[inline]
fn first_size(step: NonZero<usize>) -> impl Fn(usize) -> usize {
move |n| if n == 0 { 0 } else { 1 + (n - 1) / step }
}
#[inline]
fn other_size(step: NonZero<usize>) -> impl Fn(usize) -> usize {
move |n| n / step
}
let (low, high) = self.iter.size_hint();
if self.first_take {
let f = first_size(self.original_step());
(f(low), high.map(f))
} else {
let f = other_size(self.original_step());
(f(low), high.map(f))
}
}
<core::iter::adapters::step_by::StepBy<I> as core::iter::adapters::step_by::StepByImpl<I>>::spec_size_hint::first_size fn first_size(step: NonZero<usize>) -> impl Fn(usize) -> usize {
move |n| if n == 0 { 0 } else { 1 + (n - 1) / step }
}
<core::iter::adapters::step_by::StepBy<I> as core::iter::adapters::step_by::StepByImpl<I>>::spec_size_hint::other_size fn other_size(step: NonZero<usize>) -> impl Fn(usize) -> usize {
move |n| n / step
}
<core::iter::adapters::step_by::StepBy<I> as core::iter::adapters::step_by::StepByImpl<I>>::spec_try_fold default fn spec_try_fold<Acc, F, R>(&mut self, mut acc: Acc, mut f: F) -> R
where
F: FnMut(Acc, Self::Item) -> R,
R: Try<Output = Acc>,
{
#[inline]
fn nth<I: Iterator>(
iter: &mut I,
step_minus_one: usize,
) -> impl FnMut() -> Option<I::Item> + '_ {
move || iter.nth(step_minus_one)
}
if self.first_take {
self.first_take = false;
match self.iter.next() {
None => return try { acc },
Some(x) => acc = f(acc, x)?,
}
}
from_fn(nth(&mut self.iter, self.step_minus_one)).try_fold(acc, f)
}
<core::iter::adapters::step_by::StepBy<I> as core::iter::adapters::step_by::StepByImpl<I>>::spec_try_fold::nth fn nth<I: Iterator>(
iter: &mut I,
step_minus_one: usize,
) -> impl FnMut() -> Option<I::Item> + '_ {
move || iter.nth(step_minus_one)
}
<core::iter::adapters::step_by::StepBy<I> as core::iter::traits::iterator::Iterator>::fold fn fold<Acc, F>(self, acc: Acc, f: F) -> Acc
where
F: FnMut(Acc, Self::Item) -> Acc,
{
self.spec_fold(acc, f)
}
<core::iter::adapters::step_by::StepBy<I> as core::iter::traits::iterator::Iterator>::next fn next(&mut self) -> Option<Self::Item> {
self.spec_next()
}
<core::iter::adapters::step_by::StepBy<I> as core::iter::traits::iterator::Iterator>::nth fn nth(&mut self, n: usize) -> Option<Self::Item> {
self.spec_nth(n)
}
<core::iter::adapters::step_by::StepBy<I> as core::iter::traits::iterator::Iterator>::size_hint fn size_hint(&self) -> (usize, Option<usize>) {
self.spec_size_hint()
}
<core::iter::adapters::step_by::StepBy<I> as core::iter::traits::iterator::Iterator>::try_fold fn try_fold<Acc, F, R>(&mut self, acc: Acc, f: F) -> R
where
F: FnMut(Acc, Self::Item) -> R,
R: Try<Output = Acc>,
{
self.spec_try_fold(acc, f)
}
<core::iter::adapters::step_by::StepBy<core::ops::range::Range<u16>> as core::iter::adapters::step_by::StepByImpl<core::ops::range::Range<u16>>>::spec_fold fn spec_fold<Acc, F>(self, init: Acc, mut f: F) -> Acc
where
F: FnMut(Acc, Self::Item) -> Acc
{
// if a step size larger than the type has been specified fall back to
// t::MAX, in which case remaining will be at most 1.
let step = <$t>::try_from(self.original_step().get()).unwrap_or(<$t>::MAX);
let remaining = self.iter.end;
let mut acc = init;
let mut val = self.iter.start;
for _ in 0..remaining {
acc = f(acc, val);
// this can only overflow during the last step, after which the value
// will no longer be used
val = val.wrapping_add(step);
}
acc
}
<core::iter::adapters::step_by::StepBy<core::ops::range::Range<u16>> as core::iter::adapters::step_by::StepByImpl<core::ops::range::Range<u16>>>::spec_next fn spec_next(&mut self) -> Option<$t> {
// if a step size larger than the type has been specified fall back to
// t::MAX, in which case remaining will be at most 1.
let step = <$t>::try_from(self.original_step().get()).unwrap_or(<$t>::MAX);
let remaining = self.iter.end;
if remaining > 0 {
let val = self.iter.start;
// this can only overflow during the last step, after which the value
// will not be used
self.iter.start = val.wrapping_add(step);
self.iter.end = remaining - 1;
Some(val)
} else {
None
}
}
<core::iter::adapters::step_by::StepBy<core::ops::range::Range<u16>> as core::iter::adapters::step_by::StepByImpl<core::ops::range::Range<u16>>>::spec_nth fn spec_nth(&mut self, n: usize) -> Option<Self::Item> {
self.advance_by(n).ok()?;
self.next()
}
<core::iter::adapters::step_by::StepBy<core::ops::range::Range<u16>> as core::iter::adapters::step_by::StepByImpl<core::ops::range::Range<u16>>>::spec_size_hint fn spec_size_hint(&self) -> (usize, Option<usize>) {
let remaining = self.iter.end as usize;
(remaining, Some(remaining))
}
<core::iter::adapters::step_by::StepBy<core::ops::range::Range<u16>> as core::iter::adapters::step_by::StepByImpl<core::ops::range::Range<u16>>>::spec_try_fold fn spec_try_fold<Acc, F, R>(&mut self, init: Acc, mut f: F) -> R
where
F: FnMut(Acc, Self::Item) -> R,
R: Try<Output = Acc>
{
let mut accum = init;
while let Some(x) = self.next() {
accum = f(accum, x)?;
}
try { accum }
}
<core::iter::adapters::step_by::StepBy<core::ops::range::Range<u32>> as core::iter::adapters::step_by::StepByImpl<core::ops::range::Range<u32>>>::spec_fold fn spec_fold<Acc, F>(self, init: Acc, mut f: F) -> Acc
where
F: FnMut(Acc, Self::Item) -> Acc
{
// if a step size larger than the type has been specified fall back to
// t::MAX, in which case remaining will be at most 1.
let step = <$t>::try_from(self.original_step().get()).unwrap_or(<$t>::MAX);
let remaining = self.iter.end;
let mut acc = init;
let mut val = self.iter.start;
for _ in 0..remaining {
acc = f(acc, val);
// this can only overflow during the last step, after which the value
// will no longer be used
val = val.wrapping_add(step);
}
acc
}
<core::iter::adapters::step_by::StepBy<core::ops::range::Range<u32>> as core::iter::adapters::step_by::StepByImpl<core::ops::range::Range<u32>>>::spec_next fn spec_next(&mut self) -> Option<$t> {
// if a step size larger than the type has been specified fall back to
// t::MAX, in which case remaining will be at most 1.
let step = <$t>::try_from(self.original_step().get()).unwrap_or(<$t>::MAX);
let remaining = self.iter.end;
if remaining > 0 {
let val = self.iter.start;
// this can only overflow during the last step, after which the value
// will not be used
self.iter.start = val.wrapping_add(step);
self.iter.end = remaining - 1;
Some(val)
} else {
None
}
}
<core::iter::adapters::step_by::StepBy<core::ops::range::Range<u32>> as core::iter::adapters::step_by::StepByImpl<core::ops::range::Range<u32>>>::spec_nth fn spec_nth(&mut self, n: usize) -> Option<Self::Item> {
self.advance_by(n).ok()?;
self.next()
}
<core::iter::adapters::step_by::StepBy<core::ops::range::Range<u32>> as core::iter::adapters::step_by::StepByImpl<core::ops::range::Range<u32>>>::spec_size_hint fn spec_size_hint(&self) -> (usize, Option<usize>) {
let remaining = self.iter.end as usize;
(remaining, Some(remaining))
}
<core::iter::adapters::step_by::StepBy<core::ops::range::Range<u32>> as core::iter::adapters::step_by::StepByImpl<core::ops::range::Range<u32>>>::spec_try_fold fn spec_try_fold<Acc, F, R>(&mut self, init: Acc, mut f: F) -> R
where
F: FnMut(Acc, Self::Item) -> R,
R: Try<Output = Acc>
{
let mut accum = init;
while let Some(x) = self.next() {
accum = f(accum, x)?;
}
try { accum }
}
<core::iter::adapters::step_by::StepBy<core::ops::range::Range<u64>> as core::iter::adapters::step_by::StepByImpl<core::ops::range::Range<u64>>>::spec_fold fn spec_fold<Acc, F>(self, init: Acc, mut f: F) -> Acc
where
F: FnMut(Acc, Self::Item) -> Acc
{
// if a step size larger than the type has been specified fall back to
// t::MAX, in which case remaining will be at most 1.
let step = <$t>::try_from(self.original_step().get()).unwrap_or(<$t>::MAX);
let remaining = self.iter.end;
let mut acc = init;
let mut val = self.iter.start;
for _ in 0..remaining {
acc = f(acc, val);
// this can only overflow during the last step, after which the value
// will no longer be used
val = val.wrapping_add(step);
}
acc
}
<core::iter::adapters::step_by::StepBy<core::ops::range::Range<u64>> as core::iter::adapters::step_by::StepByImpl<core::ops::range::Range<u64>>>::spec_next fn spec_next(&mut self) -> Option<$t> {
// if a step size larger than the type has been specified fall back to
// t::MAX, in which case remaining will be at most 1.
let step = <$t>::try_from(self.original_step().get()).unwrap_or(<$t>::MAX);
let remaining = self.iter.end;
if remaining > 0 {
let val = self.iter.start;
// this can only overflow during the last step, after which the value
// will not be used
self.iter.start = val.wrapping_add(step);
self.iter.end = remaining - 1;
Some(val)
} else {
None
}
}
<core::iter::adapters::step_by::StepBy<core::ops::range::Range<u64>> as core::iter::adapters::step_by::StepByImpl<core::ops::range::Range<u64>>>::spec_nth fn spec_nth(&mut self, n: usize) -> Option<Self::Item> {
self.advance_by(n).ok()?;
self.next()
}
<core::iter::adapters::step_by::StepBy<core::ops::range::Range<u64>> as core::iter::adapters::step_by::StepByImpl<core::ops::range::Range<u64>>>::spec_size_hint fn spec_size_hint(&self) -> (usize, Option<usize>) {
let remaining = self.iter.end as usize;
(remaining, Some(remaining))
}
<core::iter::adapters::step_by::StepBy<core::ops::range::Range<u64>> as core::iter::adapters::step_by::StepByImpl<core::ops::range::Range<u64>>>::spec_try_fold fn spec_try_fold<Acc, F, R>(&mut self, init: Acc, mut f: F) -> R
where
F: FnMut(Acc, Self::Item) -> R,
R: Try<Output = Acc>
{
let mut accum = init;
while let Some(x) = self.next() {
accum = f(accum, x)?;
}
try { accum }
}
<core::iter::adapters::step_by::StepBy<core::ops::range::Range<u8>> as core::iter::adapters::step_by::StepByImpl<core::ops::range::Range<u8>>>::spec_fold fn spec_fold<Acc, F>(self, init: Acc, mut f: F) -> Acc
where
F: FnMut(Acc, Self::Item) -> Acc
{
// if a step size larger than the type has been specified fall back to
// t::MAX, in which case remaining will be at most 1.
let step = <$t>::try_from(self.original_step().get()).unwrap_or(<$t>::MAX);
let remaining = self.iter.end;
let mut acc = init;
let mut val = self.iter.start;
for _ in 0..remaining {
acc = f(acc, val);
// this can only overflow during the last step, after which the value
// will no longer be used
val = val.wrapping_add(step);
}
acc
}
<core::iter::adapters::step_by::StepBy<core::ops::range::Range<u8>> as core::iter::adapters::step_by::StepByImpl<core::ops::range::Range<u8>>>::spec_next fn spec_next(&mut self) -> Option<$t> {
// if a step size larger than the type has been specified fall back to
// t::MAX, in which case remaining will be at most 1.
let step = <$t>::try_from(self.original_step().get()).unwrap_or(<$t>::MAX);
let remaining = self.iter.end;
if remaining > 0 {
let val = self.iter.start;
// this can only overflow during the last step, after which the value
// will not be used
self.iter.start = val.wrapping_add(step);
self.iter.end = remaining - 1;
Some(val)
} else {
None
}
}
<core::iter::adapters::step_by::StepBy<core::ops::range::Range<u8>> as core::iter::adapters::step_by::StepByImpl<core::ops::range::Range<u8>>>::spec_nth fn spec_nth(&mut self, n: usize) -> Option<Self::Item> {
self.advance_by(n).ok()?;
self.next()
}
<core::iter::adapters::step_by::StepBy<core::ops::range::Range<u8>> as core::iter::adapters::step_by::StepByImpl<core::ops::range::Range<u8>>>::spec_size_hint fn spec_size_hint(&self) -> (usize, Option<usize>) {
let remaining = self.iter.end as usize;
(remaining, Some(remaining))
}
<core::iter::adapters::step_by::StepBy<core::ops::range::Range<u8>> as core::iter::adapters::step_by::StepByImpl<core::ops::range::Range<u8>>>::spec_try_fold fn spec_try_fold<Acc, F, R>(&mut self, init: Acc, mut f: F) -> R
where
F: FnMut(Acc, Self::Item) -> R,
R: Try<Output = Acc>
{
let mut accum = init;
while let Some(x) = self.next() {
accum = f(accum, x)?;
}
try { accum }
}
<core::iter::adapters::step_by::StepBy<core::ops::range::Range<usize>> as core::iter::adapters::step_by::StepByImpl<core::ops::range::Range<usize>>>::spec_fold fn spec_fold<Acc, F>(self, init: Acc, mut f: F) -> Acc
where
F: FnMut(Acc, Self::Item) -> Acc
{
// if a step size larger than the type has been specified fall back to
// t::MAX, in which case remaining will be at most 1.
let step = <$t>::try_from(self.original_step().get()).unwrap_or(<$t>::MAX);
let remaining = self.iter.end;
let mut acc = init;
let mut val = self.iter.start;
for _ in 0..remaining {
acc = f(acc, val);
// this can only overflow during the last step, after which the value
// will no longer be used
val = val.wrapping_add(step);
}
acc
}
<core::iter::adapters::step_by::StepBy<core::ops::range::Range<usize>> as core::iter::adapters::step_by::StepByImpl<core::ops::range::Range<usize>>>::spec_next fn spec_next(&mut self) -> Option<$t> {
// if a step size larger than the type has been specified fall back to
// t::MAX, in which case remaining will be at most 1.
let step = <$t>::try_from(self.original_step().get()).unwrap_or(<$t>::MAX);
let remaining = self.iter.end;
if remaining > 0 {
let val = self.iter.start;
// this can only overflow during the last step, after which the value
// will not be used
self.iter.start = val.wrapping_add(step);
self.iter.end = remaining - 1;
Some(val)
} else {
None
}
}
<core::iter::adapters::step_by::StepBy<core::ops::range::Range<usize>> as core::iter::adapters::step_by::StepByImpl<core::ops::range::Range<usize>>>::spec_nth fn spec_nth(&mut self, n: usize) -> Option<Self::Item> {
self.advance_by(n).ok()?;
self.next()
}
<core::iter::adapters::step_by::StepBy<core::ops::range::Range<usize>> as core::iter::adapters::step_by::StepByImpl<core::ops::range::Range<usize>>>::spec_size_hint fn spec_size_hint(&self) -> (usize, Option<usize>) {
let remaining = self.iter.end as usize;
(remaining, Some(remaining))
}
<core::iter::adapters::step_by::StepBy<core::ops::range::Range<usize>> as core::iter::adapters::step_by::StepByImpl<core::ops::range::Range<usize>>>::spec_try_fold fn spec_try_fold<Acc, F, R>(&mut self, init: Acc, mut f: F) -> R
where
F: FnMut(Acc, Self::Item) -> R,
R: Try<Output = Acc>
{
let mut accum = init;
while let Some(x) = self.next() {
accum = f(accum, x)?;
}
try { accum }
}
<core::iter::adapters::take::Take<I> as core::iter::adapters::take::SpecTake>::spec_fold default fn spec_fold<B, F>(mut self, init: B, f: F) -> B
where
Self: Sized,
F: FnMut(B, Self::Item) -> B,
{
use crate::ops::NeverShortCircuit;
self.try_fold(init, NeverShortCircuit::wrap_mut_2(f)).0
}
<core::iter::adapters::take::Take<I> as core::iter::adapters::take::SpecTake>::spec_for_each default fn spec_for_each<F: FnMut(Self::Item)>(mut self, f: F) {
// The default implementation would use a unit accumulator, so we can
// avoid a stateful closure by folding over the remaining number
// of items we wish to return instead.
fn check<'a, Item>(
mut action: impl FnMut(Item) + 'a,
) -> impl FnMut(usize, Item) -> Option<usize> + 'a {
move |more, x| {
action(x);
more.checked_sub(1)
}
}
let remaining = self.n;
if remaining > 0 {
self.iter.try_fold(remaining - 1, check(f));
}
}
<core::iter::adapters::take::Take<I> as core::iter::adapters::take::SpecTake>::spec_for_each::check fn check<'a, Item>(
mut action: impl FnMut(Item) + 'a,
) -> impl FnMut(usize, Item) -> Option<usize> + 'a {
move |more, x| {
action(x);
more.checked_sub(1)
}
}
<core::iter::adapters::take::Take<I> as core::iter::traits::iterator::Iterator>::advance_by fn advance_by(&mut self, n: usize) -> Result<(), NonZero<usize>> {
let min = self.n.min(n);
let rem = match self.iter.advance_by(min) {
Ok(()) => 0,
Err(rem) => rem.get(),
};
let advanced = min - rem;
self.n -= advanced;
NonZero::new(n - advanced).map_or(Ok(()), Err)
}
<core::iter::adapters::take::Take<I> as core::iter::traits::iterator::Iterator>::fold fn fold<B, F>(self, init: B, f: F) -> B
where
Self: Sized,
F: FnMut(B, Self::Item) -> B,
{
Self::spec_fold(self, init, f)
}
<core::iter::adapters::take::Take<I> as core::iter::traits::iterator::Iterator>::for_each fn for_each<F: FnMut(Self::Item)>(self, f: F) {
Self::spec_for_each(self, f)
}
<core::iter::adapters::take::Take<I> as core::iter::traits::iterator::Iterator>::next fn next(&mut self) -> Option<<I as Iterator>::Item> {
if self.n != 0 {
self.n -= 1;
self.iter.next()
} else {
None
}
}
<core::iter::adapters::take::Take<I> as core::iter::traits::iterator::Iterator>::nth fn nth(&mut self, n: usize) -> Option<I::Item> {
if self.n > n {
self.n -= n + 1;
self.iter.nth(n)
} else {
if self.n > 0 {
self.iter.nth(self.n - 1);
self.n = 0;
}
None
}
}
<core::iter::adapters::take::Take<I> as core::iter::traits::iterator::Iterator>::size_hint fn size_hint(&self) -> (usize, Option<usize>) {
if self.n == 0 {
return (0, Some(0));
}
let (lower, upper) = self.iter.size_hint();
let lower = cmp::min(lower, self.n);
let upper = match upper {
Some(x) if x < self.n => Some(x),
_ => Some(self.n),
};
(lower, upper)
}
<core::iter::adapters::take::Take<I> as core::iter::traits::iterator::Iterator>::try_fold fn try_fold<Acc, Fold, R>(&mut self, init: Acc, fold: Fold) -> R
where
Fold: FnMut(Acc, Self::Item) -> R,
R: Try<Output = Acc>,
{
fn check<'a, T, Acc, R: Try<Output = Acc>>(
n: &'a mut usize,
mut fold: impl FnMut(Acc, T) -> R + 'a,
) -> impl FnMut(Acc, T) -> ControlFlow<R, Acc> + 'a {
move |acc, x| {
*n -= 1;
let r = fold(acc, x);
if *n == 0 { ControlFlow::Break(r) } else { ControlFlow::from_try(r) }
}
}
if self.n == 0 {
try { init }
} else {
let n = &mut self.n;
self.iter.try_fold(init, check(n, fold)).into_try()
}
}
<core::iter::adapters::take::Take<I> as core::iter::traits::iterator::Iterator>::try_fold::check fn check<'a, T, Acc, R: Try<Output = Acc>>(
n: &'a mut usize,
mut fold: impl FnMut(Acc, T) -> R + 'a,
) -> impl FnMut(Acc, T) -> ControlFlow<R, Acc> + 'a {
move |acc, x| {
*n -= 1;
let r = fold(acc, x);
if *n == 0 { ControlFlow::Break(r) } else { ControlFlow::from_try(r) }
}
}
<core::iter::adapters::take_while::TakeWhile<I, P> as core::fmt::Debug>::fmt fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("TakeWhile").field("iter", &self.iter).field("flag", &self.flag).finish()
}
<core::iter::adapters::take_while::TakeWhile<I, P> as core::iter::traits::iterator::Iterator>::fold fn $fold<AAA, FFF>(mut self, init: AAA, fold: FFF) -> AAA
where
FFF: FnMut(AAA, Self::Item) -> AAA,
{
use crate::ops::NeverShortCircuit;
self.$try_fold(init, NeverShortCircuit::wrap_mut_2(fold)).0
}
<core::iter::adapters::take_while::TakeWhile<I, P> as core::iter::traits::iterator::Iterator>::next fn next(&mut self) -> Option<I::Item> {
if self.flag {
None
} else {
let x = self.iter.next()?;
if (self.predicate)(&x) {
Some(x)
} else {
self.flag = true;
None
}
}
}
<core::iter::adapters::take_while::TakeWhile<I, P> as core::iter::traits::iterator::Iterator>::size_hint fn size_hint(&self) -> (usize, Option<usize>) {
if self.flag {
(0, Some(0))
} else {
let (_, upper) = self.iter.size_hint();
(0, upper) // can't know a lower bound, due to the predicate
}
}
<core::iter::adapters::take_while::TakeWhile<I, P> as core::iter::traits::iterator::Iterator>::try_fold fn try_fold<Acc, Fold, R>(&mut self, init: Acc, fold: Fold) -> R
where
Self: Sized,
Fold: FnMut(Acc, Self::Item) -> R,
R: Try<Output = Acc>,
{
fn check<'a, T, Acc, R: Try<Output = Acc>>(
flag: &'a mut bool,
p: &'a mut impl FnMut(&T) -> bool,
mut fold: impl FnMut(Acc, T) -> R + 'a,
) -> impl FnMut(Acc, T) -> ControlFlow<R, Acc> + 'a {
move |acc, x| {
if p(&x) {
ControlFlow::from_try(fold(acc, x))
} else {
*flag = true;
ControlFlow::Break(try { acc })
}
}
}
if self.flag {
try { init }
} else {
let flag = &mut self.flag;
let p = &mut self.predicate;
self.iter.try_fold(init, check(flag, p, fold)).into_try()
}
}
<core::iter::adapters::take_while::TakeWhile<I, P> as core::iter::traits::iterator::Iterator>::try_fold::check fn check<'a, T, Acc, R: Try<Output = Acc>>(
flag: &'a mut bool,
p: &'a mut impl FnMut(&T) -> bool,
mut fold: impl FnMut(Acc, T) -> R + 'a,
) -> impl FnMut(Acc, T) -> ControlFlow<R, Acc> + 'a {
move |acc, x| {
if p(&x) {
ControlFlow::from_try(fold(acc, x))
} else {
*flag = true;
ControlFlow::Break(try { acc })
}
}
}
<core::iter::adapters::zip::Zip<A, B> as core::fmt::Debug>::fmt fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
ZipFmt::fmt(self, f)
}
<core::iter::adapters::zip::Zip<A, B> as core::iter::adapters::zip::SpecFold>::spec_fold default fn spec_fold<Acc, F>(mut self, init: Acc, mut f: F) -> Acc
where
F: FnMut(Acc, Self::Item) -> Acc,
{
let mut accum = init;
while let Some(x) = ZipImpl::next(&mut self) {
accum = f(accum, x);
}
accum
}
<core::iter::adapters::zip::Zip<A, B> as core::iter::adapters::zip::ZipFmt<A, B>>::fmt default fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("Zip").field("a", &self.a).field("b", &self.b).finish()
}
<core::iter::adapters::zip::Zip<A, B> as core::iter::adapters::zip::ZipFmt<A, B>>::fmt fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
// It's *not safe* to call fmt on the contained iterators, since once
// we start iterating they're in strange, potentially unsafe, states.
f.debug_struct("Zip").finish()
}
<core::iter::adapters::zip::Zip<A, B> as core::iter::adapters::zip::ZipImpl<A, B>>::fold default fn fold<Acc, F>(self, init: Acc, f: F) -> Acc
where
F: FnMut(Acc, Self::Item) -> Acc,
{
SpecFold::spec_fold(self, init, f)
}
<core::iter::adapters::zip::Zip<A, B> as core::iter::adapters::zip::ZipImpl<A, B>>::new default fn new(a: A, b: B) -> Self {
Zip {
a,
b,
index: 0, // unused
len: 0, // unused
}
}
<core::iter::adapters::zip::Zip<A, B> as core::iter::adapters::zip::ZipImpl<A, B>>::next default fn next(&mut self) -> Option<(A::Item, B::Item)> {
let x = self.a.next()?;
let y = self.b.next()?;
Some((x, y))
}
<core::iter::adapters::zip::Zip<A, B> as core::iter::adapters::zip::ZipImpl<A, B>>::nth default fn nth(&mut self, n: usize) -> Option<Self::Item> {
self.super_nth(n)
}
<core::iter::adapters::zip::Zip<A, B> as core::iter::adapters::zip::ZipImpl<A, B>>::size_hint default fn size_hint(&self) -> (usize, Option<usize>) {
let (a_lower, a_upper) = self.a.size_hint();
let (b_lower, b_upper) = self.b.size_hint();
let lower = cmp::min(a_lower, b_lower);
let upper = match (a_upper, b_upper) {
(Some(x), Some(y)) => Some(cmp::min(x, y)),
(Some(x), None) => Some(x),
(None, Some(y)) => Some(y),
(None, None) => None,
};
(lower, upper)
}
<core::iter::adapters::zip::Zip<A, B> as core::iter::traits::iterator::Iterator>::fold fn fold<Acc, F>(self, init: Acc, f: F) -> Acc
where
F: FnMut(Acc, Self::Item) -> Acc,
{
ZipImpl::fold(self, init, f)
}
<core::iter::adapters::zip::Zip<A, B> as core::iter::traits::iterator::Iterator>::next fn next(&mut self) -> Option<Self::Item> {
ZipImpl::next(self)
}
<core::iter::adapters::zip::Zip<A, B> as core::iter::traits::iterator::Iterator>::nth fn nth(&mut self, n: usize) -> Option<Self::Item> {
ZipImpl::nth(self, n)
}
<core::iter::adapters::zip::Zip<A, B> as core::iter::traits::iterator::Iterator>::size_hint fn size_hint(&self) -> (usize, Option<usize>) {
ZipImpl::size_hint(self)
}
<core::iter::sources::from_fn::FromFn<F> as core::fmt::Debug>::fmt fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("FromFn").finish()
}
<core::iter::sources::from_fn::FromFn<F> as core::iter::traits::iterator::Iterator>::next fn next(&mut self) -> Option<Self::Item> {
(self.0)()
}
<core::marker::PhantomData<T> as core::clone::Clone>::clone fn clone(&self) -> Self {
Self
}
<core::marker::PhantomData<T> as core::fmt::Debug>::fmt fn fmt(&self, f: &mut Formatter<'_>) -> Result {
write!(f, "PhantomData<{}>", crate::any::type_name::<T>())
}
<core::marker::PhantomData<T> as core::hash::Hash>::hash fn hash<H: Hasher>(&self, _: &mut H) {}
<core::mem::Discriminant<T> as core::cmp::PartialEq>::eq fn eq(&self, rhs: &Self) -> bool {
self.0 == rhs.0
}
<core::mem::Discriminant<T> as core::fmt::Debug>::fmt fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt.debug_tuple("Discriminant").field(&self.0).finish()
}
<core::mem::manually_drop::ManuallyDrop<T> as core::cmp::PartialEq>::eq fn eq(&self, other: &Self) -> bool {
self.value.as_ref().eq(other.value.as_ref())
}
<core::mem::manually_drop::ManuallyDrop<T> as core::ops::deref::Deref>::deref fn deref(&self) -> &T {
self.value.as_ref()
}
<core::mem::manually_drop::ManuallyDrop<T> as core::ops::deref::DerefMut>::deref_mut fn deref_mut(&mut self) -> &mut T {
self.value.as_mut()
}
<core::mem::maybe_uninit::MaybeUninit<T> as core::clone::Clone>::clone fn clone(&self) -> Self {
// Not calling `T::clone()`, we cannot know if we are initialized enough for that.
*self
}
<core::mem::maybe_uninit::MaybeUninit<T> as core::fmt::Debug>::fmt fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
// NB: there is no `.pad_fmt` so we can't use a simpler `format_args!("MaybeUninit<{..}>").
let full_name = type_name::<Self>();
let prefix_len = full_name.find("MaybeUninit").unwrap();
f.pad(&full_name[prefix_len..])
}
<core::num::bignum::Big32x40 as core::clone::Clone>::clone fn clone(&self) -> Self {
Self { size: self.size, base: self.base }
}
<core::num::bignum::Big32x40 as core::cmp::Ord>::cmp fn cmp(&self, other: &$name) -> crate::cmp::Ordering {
use crate::cmp::max;
let sz = max(self.size, other.size);
let lhs = self.base[..sz].iter().cloned().rev();
let rhs = other.base[..sz].iter().cloned().rev();
lhs.cmp(rhs)
}
<core::num::bignum::Big32x40 as core::cmp::PartialEq>::eq fn eq(&self, other: &$name) -> bool {
self.base[..] == other.base[..]
}
<core::num::bignum::Big32x40 as core::cmp::PartialOrd>::partial_cmp fn partial_cmp(&self, other: &$name) -> crate::option::Option<crate::cmp::Ordering> {
crate::option::Option::Some(self.cmp(other))
}
<core::num::bignum::Big32x40 as core::fmt::Debug>::fmt fn fmt(&self, f: &mut crate::fmt::Formatter<'_>) -> crate::fmt::Result {
let sz = if self.size < 1 { 1 } else { self.size };
let digitlen = <$ty>::BITS as usize / 4;
write!(f, "{:#x}", self.base[sz - 1])?;
for &v in self.base[..sz - 1].iter().rev() {
write!(f, "_{:01$x}", v, digitlen)?;
}
crate::result::Result::Ok(())
}
<core::num::error::ParseIntError as core::fmt::Display>::fmt fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self.kind {
IntErrorKind::Empty => "cannot parse integer from empty string",
IntErrorKind::InvalidDigit => "invalid digit found in string",
IntErrorKind::PosOverflow => "number too large to fit in target type",
IntErrorKind::NegOverflow => "number too small to fit in target type",
IntErrorKind::Zero => "number would be zero for non-zero type",
}
.fmt(f)
}
<core::num::error::TryFromIntError as core::fmt::Display>::fmt fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
"out of range integral type conversion attempted".fmt(f)
}
<core::num::niche_types::Nanoseconds as core::cmp::Ord>::cmp fn cmp(&self, other: &Self) -> Ordering {
Ord::cmp(&self.as_inner(), &other.as_inner())
}
<core::num::niche_types::Nanoseconds as core::cmp::PartialEq>::eq fn eq(&self, other: &Self) -> bool {
self.as_inner() == other.as_inner()
}
<core::num::niche_types::Nanoseconds as core::cmp::PartialOrd>::partial_cmp fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(Ord::cmp(self, other))
}
<core::num::niche_types::Nanoseconds as core::default::Default>::default fn default() -> Self {
Self::ZERO
}
<core::num::niche_types::Nanoseconds as core::fmt::Debug>::fmt fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
<$int as fmt::Debug>::fmt(&self.as_inner(), f)
}
<core::num::niche_types::Nanoseconds as core::hash::Hash>::hash fn hash<H: Hasher>(&self, state: &mut H) {
Hash::hash(&self.as_inner(), state);
}
<core::num::niche_types::NonZeroCharInner as core::cmp::Ord>::cmp fn cmp(&self, other: &Self) -> Ordering {
Ord::cmp(&self.as_inner(), &other.as_inner())
}
<core::num::niche_types::NonZeroCharInner as core::cmp::PartialEq>::eq fn eq(&self, other: &Self) -> bool {
self.as_inner() == other.as_inner()
}
<core::num::niche_types::NonZeroCharInner as core::cmp::PartialOrd>::partial_cmp fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(Ord::cmp(self, other))
}
<core::num::niche_types::NonZeroCharInner as core::fmt::Debug>::fmt fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
<$int as fmt::Debug>::fmt(&self.as_inner(), f)
}
<core::num::niche_types::NonZeroCharInner as core::hash::Hash>::hash fn hash<H: Hasher>(&self, state: &mut H) {
Hash::hash(&self.as_inner(), state);
}
<core::num::niche_types::NonZeroI128Inner as core::cmp::Ord>::cmp fn cmp(&self, other: &Self) -> Ordering {
Ord::cmp(&self.as_inner(), &other.as_inner())
}
<core::num::niche_types::NonZeroI128Inner as core::cmp::PartialEq>::eq fn eq(&self, other: &Self) -> bool {
self.as_inner() == other.as_inner()
}
<core::num::niche_types::NonZeroI128Inner as core::cmp::PartialOrd>::partial_cmp fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(Ord::cmp(self, other))
}
<core::num::niche_types::NonZeroI128Inner as core::fmt::Debug>::fmt fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
<$int as fmt::Debug>::fmt(&self.as_inner(), f)
}
<core::num::niche_types::NonZeroI128Inner as core::hash::Hash>::hash fn hash<H: Hasher>(&self, state: &mut H) {
Hash::hash(&self.as_inner(), state);
}
<core::num::niche_types::NonZeroI16Inner as core::cmp::Ord>::cmp fn cmp(&self, other: &Self) -> Ordering {
Ord::cmp(&self.as_inner(), &other.as_inner())
}
<core::num::niche_types::NonZeroI16Inner as core::cmp::PartialEq>::eq fn eq(&self, other: &Self) -> bool {
self.as_inner() == other.as_inner()
}
<core::num::niche_types::NonZeroI16Inner as core::cmp::PartialOrd>::partial_cmp fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(Ord::cmp(self, other))
}
<core::num::niche_types::NonZeroI16Inner as core::fmt::Debug>::fmt fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
<$int as fmt::Debug>::fmt(&self.as_inner(), f)
}
<core::num::niche_types::NonZeroI16Inner as core::hash::Hash>::hash fn hash<H: Hasher>(&self, state: &mut H) {
Hash::hash(&self.as_inner(), state);
}
<core::num::niche_types::NonZeroI32Inner as core::cmp::Ord>::cmp fn cmp(&self, other: &Self) -> Ordering {
Ord::cmp(&self.as_inner(), &other.as_inner())
}
<core::num::niche_types::NonZeroI32Inner as core::cmp::PartialEq>::eq fn eq(&self, other: &Self) -> bool {
self.as_inner() == other.as_inner()
}
<core::num::niche_types::NonZeroI32Inner as core::cmp::PartialOrd>::partial_cmp fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(Ord::cmp(self, other))
}
<core::num::niche_types::NonZeroI32Inner as core::fmt::Debug>::fmt fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
<$int as fmt::Debug>::fmt(&self.as_inner(), f)
}
<core::num::niche_types::NonZeroI32Inner as core::hash::Hash>::hash fn hash<H: Hasher>(&self, state: &mut H) {
Hash::hash(&self.as_inner(), state);
}
<core::num::niche_types::NonZeroI64Inner as core::cmp::Ord>::cmp fn cmp(&self, other: &Self) -> Ordering {
Ord::cmp(&self.as_inner(), &other.as_inner())
}
<core::num::niche_types::NonZeroI64Inner as core::cmp::PartialEq>::eq fn eq(&self, other: &Self) -> bool {
self.as_inner() == other.as_inner()
}
<core::num::niche_types::NonZeroI64Inner as core::cmp::PartialOrd>::partial_cmp fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(Ord::cmp(self, other))
}
<core::num::niche_types::NonZeroI64Inner as core::fmt::Debug>::fmt fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
<$int as fmt::Debug>::fmt(&self.as_inner(), f)
}
<core::num::niche_types::NonZeroI64Inner as core::hash::Hash>::hash fn hash<H: Hasher>(&self, state: &mut H) {
Hash::hash(&self.as_inner(), state);
}
<core::num::niche_types::NonZeroI8Inner as core::cmp::Ord>::cmp fn cmp(&self, other: &Self) -> Ordering {
Ord::cmp(&self.as_inner(), &other.as_inner())
}
<core::num::niche_types::NonZeroI8Inner as core::cmp::PartialEq>::eq fn eq(&self, other: &Self) -> bool {
self.as_inner() == other.as_inner()
}
<core::num::niche_types::NonZeroI8Inner as core::cmp::PartialOrd>::partial_cmp fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(Ord::cmp(self, other))
}
<core::num::niche_types::NonZeroI8Inner as core::fmt::Debug>::fmt fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
<$int as fmt::Debug>::fmt(&self.as_inner(), f)
}
<core::num::niche_types::NonZeroI8Inner as core::hash::Hash>::hash fn hash<H: Hasher>(&self, state: &mut H) {
Hash::hash(&self.as_inner(), state);
}
<core::num::niche_types::NonZeroIsizeInner as core::cmp::Ord>::cmp fn cmp(&self, other: &Self) -> Ordering {
Ord::cmp(&self.as_inner(), &other.as_inner())
}
<core::num::niche_types::NonZeroIsizeInner as core::cmp::PartialEq>::eq fn eq(&self, other: &Self) -> bool {
self.as_inner() == other.as_inner()
}
<core::num::niche_types::NonZeroIsizeInner as core::cmp::PartialOrd>::partial_cmp fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(Ord::cmp(self, other))
}
<core::num::niche_types::NonZeroIsizeInner as core::fmt::Debug>::fmt fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
<$int as fmt::Debug>::fmt(&self.as_inner(), f)
}
<core::num::niche_types::NonZeroIsizeInner as core::hash::Hash>::hash fn hash<H: Hasher>(&self, state: &mut H) {
Hash::hash(&self.as_inner(), state);
}
<core::num::niche_types::NonZeroU128Inner as core::cmp::Ord>::cmp fn cmp(&self, other: &Self) -> Ordering {
Ord::cmp(&self.as_inner(), &other.as_inner())
}
<core::num::niche_types::NonZeroU128Inner as core::cmp::PartialEq>::eq fn eq(&self, other: &Self) -> bool {
self.as_inner() == other.as_inner()
}
<core::num::niche_types::NonZeroU128Inner as core::cmp::PartialOrd>::partial_cmp fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(Ord::cmp(self, other))
}
<core::num::niche_types::NonZeroU128Inner as core::fmt::Debug>::fmt fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
<$int as fmt::Debug>::fmt(&self.as_inner(), f)
}
<core::num::niche_types::NonZeroU128Inner as core::hash::Hash>::hash fn hash<H: Hasher>(&self, state: &mut H) {
Hash::hash(&self.as_inner(), state);
}
<core::num::niche_types::NonZeroU16Inner as core::cmp::Ord>::cmp fn cmp(&self, other: &Self) -> Ordering {
Ord::cmp(&self.as_inner(), &other.as_inner())
}
<core::num::niche_types::NonZeroU16Inner as core::cmp::PartialEq>::eq fn eq(&self, other: &Self) -> bool {
self.as_inner() == other.as_inner()
}
<core::num::niche_types::NonZeroU16Inner as core::cmp::PartialOrd>::partial_cmp fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(Ord::cmp(self, other))
}
<core::num::niche_types::NonZeroU16Inner as core::fmt::Debug>::fmt fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
<$int as fmt::Debug>::fmt(&self.as_inner(), f)
}
<core::num::niche_types::NonZeroU16Inner as core::hash::Hash>::hash fn hash<H: Hasher>(&self, state: &mut H) {
Hash::hash(&self.as_inner(), state);
}
<core::num::niche_types::NonZeroU32Inner as core::cmp::Ord>::cmp fn cmp(&self, other: &Self) -> Ordering {
Ord::cmp(&self.as_inner(), &other.as_inner())
}
<core::num::niche_types::NonZeroU32Inner as core::cmp::PartialEq>::eq fn eq(&self, other: &Self) -> bool {
self.as_inner() == other.as_inner()
}
<core::num::niche_types::NonZeroU32Inner as core::cmp::PartialOrd>::partial_cmp fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(Ord::cmp(self, other))
}
<core::num::niche_types::NonZeroU32Inner as core::fmt::Debug>::fmt fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
<$int as fmt::Debug>::fmt(&self.as_inner(), f)
}
<core::num::niche_types::NonZeroU32Inner as core::hash::Hash>::hash fn hash<H: Hasher>(&self, state: &mut H) {
Hash::hash(&self.as_inner(), state);
}
<core::num::niche_types::NonZeroU64Inner as core::cmp::Ord>::cmp fn cmp(&self, other: &Self) -> Ordering {
Ord::cmp(&self.as_inner(), &other.as_inner())
}
<core::num::niche_types::NonZeroU64Inner as core::cmp::PartialEq>::eq fn eq(&self, other: &Self) -> bool {
self.as_inner() == other.as_inner()
}
<core::num::niche_types::NonZeroU64Inner as core::cmp::PartialOrd>::partial_cmp fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(Ord::cmp(self, other))
}
<core::num::niche_types::NonZeroU64Inner as core::fmt::Debug>::fmt fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
<$int as fmt::Debug>::fmt(&self.as_inner(), f)
}
<core::num::niche_types::NonZeroU64Inner as core::hash::Hash>::hash fn hash<H: Hasher>(&self, state: &mut H) {
Hash::hash(&self.as_inner(), state);
}
<core::num::niche_types::NonZeroU8Inner as core::cmp::Ord>::cmp fn cmp(&self, other: &Self) -> Ordering {
Ord::cmp(&self.as_inner(), &other.as_inner())
}
<core::num::niche_types::NonZeroU8Inner as core::cmp::PartialEq>::eq fn eq(&self, other: &Self) -> bool {
self.as_inner() == other.as_inner()
}
<core::num::niche_types::NonZeroU8Inner as core::cmp::PartialOrd>::partial_cmp fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(Ord::cmp(self, other))
}
<core::num::niche_types::NonZeroU8Inner as core::fmt::Debug>::fmt fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
<$int as fmt::Debug>::fmt(&self.as_inner(), f)
}
<core::num::niche_types::NonZeroU8Inner as core::hash::Hash>::hash fn hash<H: Hasher>(&self, state: &mut H) {
Hash::hash(&self.as_inner(), state);
}
<core::num::niche_types::NonZeroUsizeInner as core::cmp::Ord>::cmp fn cmp(&self, other: &Self) -> Ordering {
Ord::cmp(&self.as_inner(), &other.as_inner())
}
<core::num::niche_types::NonZeroUsizeInner as core::cmp::PartialEq>::eq fn eq(&self, other: &Self) -> bool {
self.as_inner() == other.as_inner()
}
<core::num::niche_types::NonZeroUsizeInner as core::cmp::PartialOrd>::partial_cmp fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(Ord::cmp(self, other))
}
<core::num::niche_types::NonZeroUsizeInner as core::fmt::Debug>::fmt fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
<$int as fmt::Debug>::fmt(&self.as_inner(), f)
}
<core::num::niche_types::NonZeroUsizeInner as core::hash::Hash>::hash fn hash<H: Hasher>(&self, state: &mut H) {
Hash::hash(&self.as_inner(), state);
}
<core::num::niche_types::UsizeNoHighBit as core::cmp::Ord>::cmp fn cmp(&self, other: &Self) -> Ordering {
Ord::cmp(&self.as_inner(), &other.as_inner())
}
<core::num::niche_types::UsizeNoHighBit as core::cmp::PartialEq>::eq fn eq(&self, other: &Self) -> bool {
self.as_inner() == other.as_inner()
}
<core::num::niche_types::UsizeNoHighBit as core::cmp::PartialOrd>::partial_cmp fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(Ord::cmp(self, other))
}
<core::num::niche_types::UsizeNoHighBit as core::fmt::Debug>::fmt fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
<$int as fmt::Debug>::fmt(&self.as_inner(), f)
}
<core::num::niche_types::UsizeNoHighBit as core::hash::Hash>::hash fn hash<H: Hasher>(&self, state: &mut H) {
Hash::hash(&self.as_inner(), state);
}
<core::num::nonzero::NonZero<T> as core::clone::Clone>::clone fn clone(&self) -> Self {
*self
}
<core::num::nonzero::NonZero<T> as core::cmp::PartialEq>::eq fn eq(&self, other: &Self) -> bool {
self.get() == other.get()
}
<core::num::nonzero::NonZero<T> as core::cmp::PartialEq>::ne fn ne(&self, other: &Self) -> bool {
self.get() != other.get()
}
<core::num::nonzero::NonZero<T> as core::fmt::Binary>::fmt fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
self.get().fmt(f)
}
<core::num::nonzero::NonZero<T> as core::fmt::Debug>::fmt fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
self.get().fmt(f)
}
<core::num::nonzero::NonZero<T> as core::fmt::Display>::fmt fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
self.get().fmt(f)
}
<core::num::nonzero::NonZero<T> as core::fmt::LowerExp>::fmt fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
self.get().fmt(f)
}
<core::num::nonzero::NonZero<T> as core::fmt::LowerHex>::fmt fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
self.get().fmt(f)
}
<core::num::nonzero::NonZero<T> as core::fmt::Octal>::fmt fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
self.get().fmt(f)
}
<core::num::nonzero::NonZero<T> as core::fmt::UpperExp>::fmt fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
self.get().fmt(f)
}
<core::num::nonzero::NonZero<T> as core::fmt::UpperHex>::fmt fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
self.get().fmt(f)
}
<core::num::nonzero::NonZero<T> as core::hash::Hash>::hash fn hash<H>(&self, state: &mut H)
where
H: Hasher,
{
self.get().hash(state)
}
<core::ops::control_flow::ControlFlow<B, C> as core::ops::try_trait::FromResidual<core::ops::control_flow::ControlFlow<B, core::convert::Infallible>>>::from_residual fn from_residual(residual: ControlFlow<B, convert::Infallible>) -> Self {
match residual {
ControlFlow::Break(b) => ControlFlow::Break(b),
}
}
<core::ops::control_flow::ControlFlow<B, C> as core::ops::try_trait::Try>::branch fn branch(self) -> ControlFlow<Self::Residual, Self::Output> {
match self {
ControlFlow::Continue(c) => ControlFlow::Continue(c),
ControlFlow::Break(b) => ControlFlow::Break(ControlFlow::Break(b)),
}
}
<core::ops::control_flow::ControlFlow<B, C> as core::ops::try_trait::Try>::from_output fn from_output(output: Self::Output) -> Self {
ControlFlow::Continue(output)
}
<core::ops::index_range::IndexRange as core::iter::traits::double_ended::DoubleEndedIterator>::advance_back_by fn advance_back_by(&mut self, n: usize) -> Result<(), NonZero<usize>> {
let taken = self.take_suffix(n);
NonZero::new(n - taken.len()).map_or(Ok(()), Err)
}
<core::ops::index_range::IndexRange as core::iter::traits::double_ended::DoubleEndedIterator>::next_back fn next_back(&mut self) -> Option<usize> {
if self.len() > 0 {
// SAFETY: We just checked that the range is non-empty
unsafe { Some(self.next_back_unchecked()) }
} else {
None
}
}
<core::ops::index_range::IndexRange as core::iter::traits::double_ended::DoubleEndedIterator>::rfold fn rfold<B, F: FnMut(B, usize) -> B>(mut self, init: B, f: F) -> B {
self.try_rfold(init, NeverShortCircuit::wrap_mut_2(f)).0
}
<core::ops::index_range::IndexRange as core::iter::traits::double_ended::DoubleEndedIterator>::try_rfold fn try_rfold<B, F, R>(&mut self, mut accum: B, mut f: F) -> R
where
Self: Sized,
F: FnMut(B, Self::Item) -> R,
R: Try<Output = B>,
{
// `Range` needs to check `start < end`, but thanks to our type invariant
// we can loop on the stricter `start != end`.
self.assume_range();
while self.start != self.end {
// SAFETY: We just checked that the range is non-empty
let i = unsafe { self.next_back_unchecked() };
accum = f(accum, i)?;
}
try { accum }
}
<core::ops::index_range::IndexRange as core::iter::traits::iterator::Iterator>::advance_by fn advance_by(&mut self, n: usize) -> Result<(), NonZero<usize>> {
let taken = self.take_prefix(n);
NonZero::new(n - taken.len()).map_or(Ok(()), Err)
}
<core::ops::index_range::IndexRange as core::iter::traits::iterator::Iterator>::fold fn fold<B, F: FnMut(B, usize) -> B>(mut self, init: B, f: F) -> B {
self.try_fold(init, NeverShortCircuit::wrap_mut_2(f)).0
}
<core::ops::index_range::IndexRange as core::iter::traits::iterator::Iterator>::next fn next(&mut self) -> Option<usize> {
if self.len() > 0 {
// SAFETY: We just checked that the range is non-empty
unsafe { Some(self.next_unchecked()) }
} else {
None
}
}
<core::ops::index_range::IndexRange as core::iter::traits::iterator::Iterator>::size_hint fn size_hint(&self) -> (usize, Option<usize>) {
let len = self.len();
(len, Some(len))
}
<core::ops::index_range::IndexRange as core::iter::traits::iterator::Iterator>::try_fold fn try_fold<B, F, R>(&mut self, mut accum: B, mut f: F) -> R
where
Self: Sized,
F: FnMut(B, Self::Item) -> R,
R: Try<Output = B>,
{
// `Range` needs to check `start < end`, but thanks to our type invariant
// we can loop on the stricter `start != end`.
self.assume_range();
while self.start != self.end {
// SAFETY: We just checked that the range is non-empty
let i = unsafe { self.next_unchecked() };
accum = f(accum, i)?;
}
try { accum }
}
<core::ops::index_range::IndexRange as core::slice::index::SliceIndex<[T]>>::get fn get(self, slice: &[T]) -> Option<&[T]> {
if self.end() <= slice.len() {
// SAFETY: `self` is checked to be valid and in bounds above.
unsafe { Some(&*get_offset_len_noubcheck(slice, self.start(), self.len())) }
} else {
None
}
}
<core::ops::index_range::IndexRange as core::slice::index::SliceIndex<[T]>>::get_mut fn get_mut(self, slice: &mut [T]) -> Option<&mut [T]> {
if self.end() <= slice.len() {
// SAFETY: `self` is checked to be valid and in bounds above.
unsafe { Some(&mut *get_offset_len_mut_noubcheck(slice, self.start(), self.len())) }
} else {
None
}
}
<core::ops::index_range::IndexRange as core::slice::index::SliceIndex<[T]>>::get_unchecked unsafe fn get_unchecked(self, slice: *const [T]) -> *const [T] {
assert_unsafe_precondition!(
check_library_ub,
"slice::get_unchecked requires that the index is within the slice",
(end: usize = self.end(), len: usize = slice.len()) => end <= len
);
// SAFETY: the caller guarantees that `slice` is not dangling, so it
// cannot be longer than `isize::MAX`. They also guarantee that
// `self` is in bounds of `slice` so `self` cannot overflow an `isize`,
// so the call to `add` is safe.
unsafe { get_offset_len_noubcheck(slice, self.start(), self.len()) }
}
<core::ops::index_range::IndexRange as core::slice::index::SliceIndex<[T]>>::get_unchecked_mut unsafe fn get_unchecked_mut(self, slice: *mut [T]) -> *mut [T] {
assert_unsafe_precondition!(
check_library_ub,
"slice::get_unchecked_mut requires that the index is within the slice",
(end: usize = self.end(), len: usize = slice.len()) => end <= len
);
// SAFETY: see comments for `get_unchecked` above.
unsafe { get_offset_len_mut_noubcheck(slice, self.start(), self.len()) }
}
<core::ops::index_range::IndexRange as core::slice::index::SliceIndex<[T]>>::index fn index(self, slice: &[T]) -> &[T] {
if self.end() <= slice.len() {
// SAFETY: `self` is checked to be valid and in bounds above.
unsafe { &*get_offset_len_noubcheck(slice, self.start(), self.len()) }
} else {
slice_index_fail(self.start(), self.end(), slice.len())
}
}
<core::ops::index_range::IndexRange as core::slice::index::SliceIndex<[T]>>::index_mut fn index_mut(self, slice: &mut [T]) -> &mut [T] {
if self.end() <= slice.len() {
// SAFETY: `self` is checked to be valid and in bounds above.
unsafe { &mut *get_offset_len_mut_noubcheck(slice, self.start(), self.len()) }
} else {
slice_index_fail(self.start(), self.end(), slice.len())
}
}
<core::ops::range::Range<&T> as core::ops::range::RangeBounds<T>>::end_bound fn end_bound(&self) -> Bound<&T> {
Excluded(self.end)
}
<core::ops::range::Range<&T> as core::ops::range::RangeBounds<T>>::start_bound fn start_bound(&self) -> Bound<&T> {
Included(self.start)
}
<core::ops::range::Range<A> as core::iter::range::RangeIteratorImpl>::spec_advance_back_by default fn spec_advance_back_by(&mut self, n: usize) -> Result<(), NonZero<usize>> {
let steps = Step::steps_between(&self.start, &self.end);
let available = steps.1.unwrap_or(steps.0);
let taken = available.min(n);
self.end =
Step::backward_checked(self.end.clone(), taken).expect("`Step` invariants not upheld");
NonZero::new(n - taken).map_or(Ok(()), Err)
}
<core::ops::range::Range<A> as core::iter::range::RangeIteratorImpl>::spec_advance_by default fn spec_advance_by(&mut self, n: usize) -> Result<(), NonZero<usize>> {
let steps = Step::steps_between(&self.start, &self.end);
let available = steps.1.unwrap_or(steps.0);
let taken = available.min(n);
self.start =
Step::forward_checked(self.start.clone(), taken).expect("`Step` invariants not upheld");
NonZero::new(n - taken).map_or(Ok(()), Err)
}
<core::ops::range::Range<A> as core::iter::range::RangeIteratorImpl>::spec_next default fn spec_next(&mut self) -> Option<A> {
if self.start < self.end {
let n =
Step::forward_checked(self.start.clone(), 1).expect("`Step` invariants not upheld");
Some(mem::replace(&mut self.start, n))
} else {
None
}
}
<core::ops::range::Range<A> as core::iter::range::RangeIteratorImpl>::spec_next_back default fn spec_next_back(&mut self) -> Option<A> {
if self.start < self.end {
self.end =
Step::backward_checked(self.end.clone(), 1).expect("`Step` invariants not upheld");
Some(self.end.clone())
} else {
None
}
}
<core::ops::range::Range<A> as core::iter::range::RangeIteratorImpl>::spec_nth default fn spec_nth(&mut self, n: usize) -> Option<A> {
if let Some(plus_n) = Step::forward_checked(self.start.clone(), n) {
if plus_n < self.end {
self.start =
Step::forward_checked(plus_n.clone(), 1).expect("`Step` invariants not upheld");
return Some(plus_n);
}
}
self.start = self.end.clone();
None
}
<core::ops::range::Range<A> as core::iter::range::RangeIteratorImpl>::spec_nth_back default fn spec_nth_back(&mut self, n: usize) -> Option<A> {
if let Some(minus_n) = Step::backward_checked(self.end.clone(), n) {
if minus_n > self.start {
self.end =
Step::backward_checked(minus_n, 1).expect("`Step` invariants not upheld");
return Some(self.end.clone());
}
}
self.end = self.start.clone();
None
}
<core::ops::range::Range<Idx> as core::fmt::Debug>::fmt fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
self.start.fmt(fmt)?;
write!(fmt, "..")?;
self.end.fmt(fmt)?;
Ok(())
}
<core::ops::range::Range<T> as core::ops::range::IntoBounds<T>>::into_bounds fn into_bounds(self) -> (Bound<T>, Bound<T>) {
(Included(self.start), Excluded(self.end))
}
<core::ops::range::Range<T> as core::ops::range::RangeBounds<T>>::end_bound fn end_bound(&self) -> Bound<&T> {
Excluded(&self.end)
}
<core::ops::range::Range<T> as core::ops::range::RangeBounds<T>>::start_bound fn start_bound(&self) -> Bound<&T> {
Included(&self.start)
}
<core::ops::range::Range<u16> as core::iter::adapters::step_by::SpecRangeSetup<core::ops::range::Range<u16>>>::setup fn setup(mut r: Range<$t>, step: usize) -> Range<$t> {
let inner_len = r.size_hint().0;
// If step exceeds $t::MAX, then the count will be at most 1 and
// thus always fit into $t.
let yield_count = inner_len.div_ceil(step);
// Turn the range end into an iteration counter
r.end = yield_count as $t;
r
}
<core::ops::range::Range<u32> as core::iter::adapters::step_by::SpecRangeSetup<core::ops::range::Range<u32>>>::setup fn setup(mut r: Range<$t>, step: usize) -> Range<$t> {
let inner_len = r.size_hint().0;
// If step exceeds $t::MAX, then the count will be at most 1 and
// thus always fit into $t.
let yield_count = inner_len.div_ceil(step);
// Turn the range end into an iteration counter
r.end = yield_count as $t;
r
}
<core::ops::range::Range<u64> as core::iter::adapters::step_by::SpecRangeSetup<core::ops::range::Range<u64>>>::setup fn setup(mut r: Range<$t>, step: usize) -> Range<$t> {
let inner_len = r.size_hint().0;
// If step exceeds $t::MAX, then the count will be at most 1 and
// thus always fit into $t.
let yield_count = inner_len.div_ceil(step);
// Turn the range end into an iteration counter
r.end = yield_count as $t;
r
}
<core::ops::range::Range<u8> as core::iter::adapters::step_by::SpecRangeSetup<core::ops::range::Range<u8>>>::setup fn setup(mut r: Range<$t>, step: usize) -> Range<$t> {
let inner_len = r.size_hint().0;
// If step exceeds $t::MAX, then the count will be at most 1 and
// thus always fit into $t.
let yield_count = inner_len.div_ceil(step);
// Turn the range end into an iteration counter
r.end = yield_count as $t;
r
}
<core::ops::range::Range<usize> as core::iter::adapters::step_by::SpecRangeSetup<core::ops::range::Range<usize>>>::setup fn setup(mut r: Range<$t>, step: usize) -> Range<$t> {
let inner_len = r.size_hint().0;
// If step exceeds $t::MAX, then the count will be at most 1 and
// thus always fit into $t.
let yield_count = inner_len.div_ceil(step);
// Turn the range end into an iteration counter
r.end = yield_count as $t;
r
}
<core::ops::range::Range<usize> as core::slice::index::SliceIndex<[T]>>::get fn get(self, slice: &[T]) -> Option<&[T]> {
// Using checked_sub is a safe way to get `SubUnchecked` in MIR
if let Some(new_len) = usize::checked_sub(self.end, self.start)
&& self.end <= slice.len()
{
// SAFETY: `self` is checked to be valid and in bounds above.
unsafe { Some(&*get_offset_len_noubcheck(slice, self.start, new_len)) }
} else {
None
}
}
<core::ops::range::Range<usize> as core::slice::index::SliceIndex<[T]>>::get_mut fn get_mut(self, slice: &mut [T]) -> Option<&mut [T]> {
if let Some(new_len) = usize::checked_sub(self.end, self.start)
&& self.end <= slice.len()
{
// SAFETY: `self` is checked to be valid and in bounds above.
unsafe { Some(&mut *get_offset_len_mut_noubcheck(slice, self.start, new_len)) }
} else {
None
}
}
<core::ops::range::Range<usize> as core::slice::index::SliceIndex<[T]>>::get_unchecked unsafe fn get_unchecked(self, slice: *const [T]) -> *const [T] {
assert_unsafe_precondition!(
check_library_ub,
"slice::get_unchecked requires that the range is within the slice",
(
start: usize = self.start,
end: usize = self.end,
len: usize = slice.len()
) => end >= start && end <= len
);
// SAFETY: the caller guarantees that `slice` is not dangling, so it
// cannot be longer than `isize::MAX`. They also guarantee that
// `self` is in bounds of `slice` so `self` cannot overflow an `isize`,
// so the call to `add` is safe and the length calculation cannot overflow.
unsafe {
// Using the intrinsic avoids a superfluous UB check,
// since the one on this method already checked `end >= start`.
let new_len = crate::intrinsics::unchecked_sub(self.end, self.start);
get_offset_len_noubcheck(slice, self.start, new_len)
}
}
<core::ops::range::Range<usize> as core::slice::index::SliceIndex<[T]>>::get_unchecked_mut unsafe fn get_unchecked_mut(self, slice: *mut [T]) -> *mut [T] {
assert_unsafe_precondition!(
check_library_ub,
"slice::get_unchecked_mut requires that the range is within the slice",
(
start: usize = self.start,
end: usize = self.end,
len: usize = slice.len()
) => end >= start && end <= len
);
// SAFETY: see comments for `get_unchecked` above.
unsafe {
let new_len = crate::intrinsics::unchecked_sub(self.end, self.start);
get_offset_len_mut_noubcheck(slice, self.start, new_len)
}
}
<core::ops::range::Range<usize> as core::slice::index::SliceIndex<[T]>>::index fn index(self, slice: &[T]) -> &[T] {
// Using checked_sub is a safe way to get `SubUnchecked` in MIR
if let Some(new_len) = usize::checked_sub(self.end, self.start)
&& self.end <= slice.len()
{
// SAFETY: `self` is checked to be valid and in bounds above.
unsafe { &*get_offset_len_noubcheck(slice, self.start, new_len) }
} else {
slice_index_fail(self.start, self.end, slice.len())
}
}
<core::ops::range::Range<usize> as core::slice::index::SliceIndex<[T]>>::index_mut fn index_mut(self, slice: &mut [T]) -> &mut [T] {
// Using checked_sub is a safe way to get `SubUnchecked` in MIR
if let Some(new_len) = usize::checked_sub(self.end, self.start)
&& self.end <= slice.len()
{
// SAFETY: `self` is checked to be valid and in bounds above.
unsafe { &mut *get_offset_len_mut_noubcheck(slice, self.start, new_len) }
} else {
slice_index_fail(self.start, self.end, slice.len())
}
}
<core::ops::range::RangeFrom<&T> as core::ops::range::RangeBounds<T>>::end_bound fn end_bound(&self) -> Bound<&T> {
Unbounded
}
<core::ops::range::RangeFrom<&T> as core::ops::range::RangeBounds<T>>::start_bound fn start_bound(&self) -> Bound<&T> {
Included(self.start)
}
<core::ops::range::RangeFrom<Idx> as core::fmt::Debug>::fmt fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
self.start.fmt(fmt)?;
write!(fmt, "..")?;
Ok(())
}
<core::ops::range::RangeFrom<T> as core::ops::range::IntoBounds<T>>::into_bounds fn into_bounds(self) -> (Bound<T>, Bound<T>) {
(Included(self.start), Unbounded)
}
<core::ops::range::RangeFrom<T> as core::ops::range::OneSidedRange<T>>::bound fn bound(self) -> (OneSidedRangeBound, T) {
(OneSidedRangeBound::StartInclusive, self.start)
}
<core::ops::range::RangeFrom<T> as core::ops::range::RangeBounds<T>>::end_bound fn end_bound(&self) -> Bound<&T> {
Unbounded
}
<core::ops::range::RangeFrom<T> as core::ops::range::RangeBounds<T>>::start_bound fn start_bound(&self) -> Bound<&T> {
Included(&self.start)
}
<core::ops::range::RangeFrom<usize> as core::slice::index::SliceIndex<[T]>>::get fn get(self, slice: &[T]) -> Option<&[T]> {
(self.start..slice.len()).get(slice)
}
<core::ops::range::RangeFrom<usize> as core::slice::index::SliceIndex<[T]>>::get_mut fn get_mut(self, slice: &mut [T]) -> Option<&mut [T]> {
(self.start..slice.len()).get_mut(slice)
}
<core::ops::range::RangeFrom<usize> as core::slice::index::SliceIndex<[T]>>::get_unchecked unsafe fn get_unchecked(self, slice: *const [T]) -> *const [T] {
// SAFETY: the caller has to uphold the safety contract for `get_unchecked`.
unsafe { (self.start..slice.len()).get_unchecked(slice) }
}
<core::ops::range::RangeFrom<usize> as core::slice::index::SliceIndex<[T]>>::get_unchecked_mut unsafe fn get_unchecked_mut(self, slice: *mut [T]) -> *mut [T] {
// SAFETY: the caller has to uphold the safety contract for `get_unchecked_mut`.
unsafe { (self.start..slice.len()).get_unchecked_mut(slice) }
}
<core::ops::range::RangeFrom<usize> as core::slice::index::SliceIndex<[T]>>::index fn index(self, slice: &[T]) -> &[T] {
if self.start > slice.len() {
slice_index_fail(self.start, slice.len(), slice.len())
}
// SAFETY: `self` is checked to be valid and in bounds above.
unsafe {
let new_len = crate::intrinsics::unchecked_sub(slice.len(), self.start);
&*get_offset_len_noubcheck(slice, self.start, new_len)
}
}
<core::ops::range::RangeFrom<usize> as core::slice::index::SliceIndex<[T]>>::index_mut fn index_mut(self, slice: &mut [T]) -> &mut [T] {
if self.start > slice.len() {
slice_index_fail(self.start, slice.len(), slice.len())
}
// SAFETY: `self` is checked to be valid and in bounds above.
unsafe {
let new_len = crate::intrinsics::unchecked_sub(slice.len(), self.start);
&mut *get_offset_len_mut_noubcheck(slice, self.start, new_len)
}
}
<core::ops::range::RangeFull as core::fmt::Debug>::fmt fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(fmt, "..")
}
<core::ops::range::RangeFull as core::ops::range::IntoBounds<T>>::into_bounds fn into_bounds(self) -> (Bound<T>, Bound<T>) {
(Unbounded, Unbounded)
}
<core::ops::range::RangeFull as core::ops::range::RangeBounds<T>>::end_bound fn end_bound(&self) -> Bound<&T> {
Unbounded
}
<core::ops::range::RangeFull as core::ops::range::RangeBounds<T>>::start_bound fn start_bound(&self) -> Bound<&T> {
Unbounded
}
<core::ops::range::RangeFull as core::slice::index::SliceIndex<[T]>>::get fn get(self, slice: &[T]) -> Option<&[T]> {
Some(slice)
}
<core::ops::range::RangeFull as core::slice::index::SliceIndex<[T]>>::get_mut fn get_mut(self, slice: &mut [T]) -> Option<&mut [T]> {
Some(slice)
}
<core::ops::range::RangeFull as core::slice::index::SliceIndex<[T]>>::get_unchecked unsafe fn get_unchecked(self, slice: *const [T]) -> *const [T] {
slice
}
<core::ops::range::RangeFull as core::slice::index::SliceIndex<[T]>>::get_unchecked_mut unsafe fn get_unchecked_mut(self, slice: *mut [T]) -> *mut [T] {
slice
}
<core::ops::range::RangeFull as core::slice::index::SliceIndex<[T]>>::index fn index(self, slice: &[T]) -> &[T] {
slice
}
<core::ops::range::RangeFull as core::slice::index::SliceIndex<[T]>>::index_mut fn index_mut(self, slice: &mut [T]) -> &mut [T] {
slice
}
<core::ops::range::RangeInclusive<&T> as core::ops::range::RangeBounds<T>>::end_bound fn end_bound(&self) -> Bound<&T> {
Included(self.end)
}
<core::ops::range::RangeInclusive<&T> as core::ops::range::RangeBounds<T>>::start_bound fn start_bound(&self) -> Bound<&T> {
Included(self.start)
}
<core::ops::range::RangeInclusive<A> as core::iter::range::RangeInclusiveIteratorImpl>::spec_next default fn spec_next(&mut self) -> Option<A> {
if self.is_empty() {
return None;
}
let is_iterating = self.start < self.end;
Some(if is_iterating {
let n =
Step::forward_checked(self.start.clone(), 1).expect("`Step` invariants not upheld");
mem::replace(&mut self.start, n)
} else {
self.exhausted = true;
self.start.clone()
})
}
<core::ops::range::RangeInclusive<A> as core::iter::range::RangeInclusiveIteratorImpl>::spec_next_back default fn spec_next_back(&mut self) -> Option<A> {
if self.is_empty() {
return None;
}
let is_iterating = self.start < self.end;
Some(if is_iterating {
let n =
Step::backward_checked(self.end.clone(), 1).expect("`Step` invariants not upheld");
mem::replace(&mut self.end, n)
} else {
self.exhausted = true;
self.end.clone()
})
}
<core::ops::range::RangeInclusive<A> as core::iter::range::RangeInclusiveIteratorImpl>::spec_try_fold default fn spec_try_fold<B, F, R>(&mut self, init: B, mut f: F) -> R
where
Self: Sized,
F: FnMut(B, A) -> R,
R: Try<Output = B>,
{
if self.is_empty() {
return try { init };
}
let mut accum = init;
while self.start < self.end {
let n =
Step::forward_checked(self.start.clone(), 1).expect("`Step` invariants not upheld");
let n = mem::replace(&mut self.start, n);
accum = f(accum, n)?;
}
self.exhausted = true;
if self.start == self.end {
accum = f(accum, self.start.clone())?;
}
try { accum }
}
<core::ops::range::RangeInclusive<A> as core::iter::range::RangeInclusiveIteratorImpl>::spec_try_rfold default fn spec_try_rfold<B, F, R>(&mut self, init: B, mut f: F) -> R
where
Self: Sized,
F: FnMut(B, A) -> R,
R: Try<Output = B>,
{
if self.is_empty() {
return try { init };
}
let mut accum = init;
while self.start < self.end {
let n =
Step::backward_checked(self.end.clone(), 1).expect("`Step` invariants not upheld");
let n = mem::replace(&mut self.end, n);
accum = f(accum, n)?;
}
self.exhausted = true;
if self.start == self.end {
accum = f(accum, self.start.clone())?;
}
try { accum }
}
<core::ops::range::RangeInclusive<Idx> as core::fmt::Debug>::fmt fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
self.start.fmt(fmt)?;
write!(fmt, "..=")?;
self.end.fmt(fmt)?;
if self.exhausted {
write!(fmt, " (exhausted)")?;
}
Ok(())
}
<core::ops::range::RangeInclusive<T> as core::iter::range::RangeInclusiveIteratorImpl>::spec_next fn spec_next(&mut self) -> Option<T> {
if self.is_empty() {
return None;
}
let is_iterating = self.start < self.end;
Some(if is_iterating {
// SAFETY: just checked precondition
let n = unsafe { Step::forward_unchecked(self.start, 1) };
mem::replace(&mut self.start, n)
} else {
self.exhausted = true;
self.start
})
}
<core::ops::range::RangeInclusive<T> as core::iter::range::RangeInclusiveIteratorImpl>::spec_next_back fn spec_next_back(&mut self) -> Option<T> {
if self.is_empty() {
return None;
}
let is_iterating = self.start < self.end;
Some(if is_iterating {
// SAFETY: just checked precondition
let n = unsafe { Step::backward_unchecked(self.end, 1) };
mem::replace(&mut self.end, n)
} else {
self.exhausted = true;
self.end
})
}
<core::ops::range::RangeInclusive<T> as core::iter::range::RangeInclusiveIteratorImpl>::spec_try_fold fn spec_try_fold<B, F, R>(&mut self, init: B, mut f: F) -> R
where
Self: Sized,
F: FnMut(B, T) -> R,
R: Try<Output = B>,
{
if self.is_empty() {
return try { init };
}
let mut accum = init;
while self.start < self.end {
// SAFETY: just checked precondition
let n = unsafe { Step::forward_unchecked(self.start, 1) };
let n = mem::replace(&mut self.start, n);
accum = f(accum, n)?;
}
self.exhausted = true;
if self.start == self.end {
accum = f(accum, self.start)?;
}
try { accum }
}
<core::ops::range::RangeInclusive<T> as core::iter::range::RangeInclusiveIteratorImpl>::spec_try_rfold fn spec_try_rfold<B, F, R>(&mut self, init: B, mut f: F) -> R
where
Self: Sized,
F: FnMut(B, T) -> R,
R: Try<Output = B>,
{
if self.is_empty() {
return try { init };
}
let mut accum = init;
while self.start < self.end {
// SAFETY: just checked precondition
let n = unsafe { Step::backward_unchecked(self.end, 1) };
let n = mem::replace(&mut self.end, n);
accum = f(accum, n)?;
}
self.exhausted = true;
if self.start == self.end {
accum = f(accum, self.start)?;
}
try { accum }
}
<core::ops::range::RangeInclusive<T> as core::ops::range::IntoBounds<T>>::into_bounds fn into_bounds(self) -> (Bound<T>, Bound<T>) {
(
Included(self.start),
if self.exhausted {
// When the iterator is exhausted, we usually have start == end,
// but we want the range to appear empty, containing nothing.
Excluded(self.end)
} else {
Included(self.end)
},
)
}
<core::ops::range::RangeInclusive<T> as core::ops::range::RangeBounds<T>>::end_bound fn end_bound(&self) -> Bound<&T> {
if self.exhausted {
// When the iterator is exhausted, we usually have start == end,
// but we want the range to appear empty, containing nothing.
Excluded(&self.end)
} else {
Included(&self.end)
}
}
<core::ops::range::RangeInclusive<T> as core::ops::range::RangeBounds<T>>::start_bound fn start_bound(&self) -> Bound<&T> {
Included(&self.start)
}
<core::ops::range::RangeInclusive<usize> as core::slice::index::SliceIndex<[T]>>::get fn get(self, slice: &[T]) -> Option<&[T]> {
if *self.end() >= slice.len() { None } else { self.into_slice_range().get(slice) }
}
<core::ops::range::RangeInclusive<usize> as core::slice::index::SliceIndex<[T]>>::get_mut fn get_mut(self, slice: &mut [T]) -> Option<&mut [T]> {
if *self.end() >= slice.len() { None } else { self.into_slice_range().get_mut(slice) }
}
<core::ops::range::RangeInclusive<usize> as core::slice::index::SliceIndex<[T]>>::get_unchecked unsafe fn get_unchecked(self, slice: *const [T]) -> *const [T] {
// SAFETY: the caller has to uphold the safety contract for `get_unchecked`.
unsafe { self.into_slice_range().get_unchecked(slice) }
}
<core::ops::range::RangeInclusive<usize> as core::slice::index::SliceIndex<[T]>>::get_unchecked_mut unsafe fn get_unchecked_mut(self, slice: *mut [T]) -> *mut [T] {
// SAFETY: the caller has to uphold the safety contract for `get_unchecked_mut`.
unsafe { self.into_slice_range().get_unchecked_mut(slice) }
}
<core::ops::range::RangeInclusive<usize> as core::slice::index::SliceIndex<[T]>>::index fn index(self, slice: &[T]) -> &[T] {
let Self { mut start, mut end, exhausted } = self;
let len = slice.len();
if end < len {
end = end + 1;
start = if exhausted { end } else { start };
if let Some(new_len) = usize::checked_sub(end, start) {
// SAFETY: `self` is checked to be valid and in bounds above.
unsafe { return &*get_offset_len_noubcheck(slice, start, new_len) }
}
}
slice_index_fail(start, end, slice.len())
}
<core::ops::range::RangeInclusive<usize> as core::slice::index::SliceIndex<[T]>>::index_mut fn index_mut(self, slice: &mut [T]) -> &mut [T] {
let Self { mut start, mut end, exhausted } = self;
let len = slice.len();
if end < len {
end = end + 1;
start = if exhausted { end } else { start };
if let Some(new_len) = usize::checked_sub(end, start) {
// SAFETY: `self` is checked to be valid and in bounds above.
unsafe { return &mut *get_offset_len_mut_noubcheck(slice, start, new_len) }
}
}
slice_index_fail(start, end, slice.len())
}
<core::ops::range::RangeTo<&T> as core::ops::range::RangeBounds<T>>::end_bound fn end_bound(&self) -> Bound<&T> {
Excluded(self.end)
}
<core::ops::range::RangeTo<&T> as core::ops::range::RangeBounds<T>>::start_bound fn start_bound(&self) -> Bound<&T> {
Unbounded
}
<core::ops::range::RangeTo<Idx> as core::fmt::Debug>::fmt fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(fmt, "..")?;
self.end.fmt(fmt)?;
Ok(())
}
<core::ops::range::RangeTo<T> as core::ops::range::IntoBounds<T>>::into_bounds fn into_bounds(self) -> (Bound<T>, Bound<T>) {
(Unbounded, Excluded(self.end))
}
<core::ops::range::RangeTo<T> as core::ops::range::OneSidedRange<T>>::bound fn bound(self) -> (OneSidedRangeBound, T) {
(OneSidedRangeBound::End, self.end)
}
<core::ops::range::RangeTo<T> as core::ops::range::RangeBounds<T>>::end_bound fn end_bound(&self) -> Bound<&T> {
Excluded(&self.end)
}
<core::ops::range::RangeTo<T> as core::ops::range::RangeBounds<T>>::start_bound fn start_bound(&self) -> Bound<&T> {
Unbounded
}
<core::ops::range::RangeTo<usize> as core::slice::index::SliceIndex<[T]>>::get fn get(self, slice: &[T]) -> Option<&[T]> {
(0..self.end).get(slice)
}
<core::ops::range::RangeTo<usize> as core::slice::index::SliceIndex<[T]>>::get_mut fn get_mut(self, slice: &mut [T]) -> Option<&mut [T]> {
(0..self.end).get_mut(slice)
}
<core::ops::range::RangeTo<usize> as core::slice::index::SliceIndex<[T]>>::get_unchecked unsafe fn get_unchecked(self, slice: *const [T]) -> *const [T] {
// SAFETY: the caller has to uphold the safety contract for `get_unchecked`.
unsafe { (0..self.end).get_unchecked(slice) }
}
<core::ops::range::RangeTo<usize> as core::slice::index::SliceIndex<[T]>>::get_unchecked_mut unsafe fn get_unchecked_mut(self, slice: *mut [T]) -> *mut [T] {
// SAFETY: the caller has to uphold the safety contract for `get_unchecked_mut`.
unsafe { (0..self.end).get_unchecked_mut(slice) }
}
<core::ops::range::RangeTo<usize> as core::slice::index::SliceIndex<[T]>>::index fn index(self, slice: &[T]) -> &[T] {
(0..self.end).index(slice)
}
<core::ops::range::RangeTo<usize> as core::slice::index::SliceIndex<[T]>>::index_mut fn index_mut(self, slice: &mut [T]) -> &mut [T] {
(0..self.end).index_mut(slice)
}
<core::ops::range::RangeToInclusive<&T> as core::ops::range::RangeBounds<T>>::end_bound fn end_bound(&self) -> Bound<&T> {
Included(self.end)
}
<core::ops::range::RangeToInclusive<&T> as core::ops::range::RangeBounds<T>>::start_bound fn start_bound(&self) -> Bound<&T> {
Unbounded
}
<core::ops::range::RangeToInclusive<Idx> as core::fmt::Debug>::fmt fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(fmt, "..=")?;
self.end.fmt(fmt)?;
Ok(())
}
<core::ops::range::RangeToInclusive<T> as core::ops::range::IntoBounds<T>>::into_bounds fn into_bounds(self) -> (Bound<T>, Bound<T>) {
(Unbounded, Included(self.end))
}
<core::ops::range::RangeToInclusive<T> as core::ops::range::OneSidedRange<T>>::bound fn bound(self) -> (OneSidedRangeBound, T) {
(OneSidedRangeBound::EndInclusive, self.end)
}
<core::ops::range::RangeToInclusive<T> as core::ops::range::RangeBounds<T>>::end_bound fn end_bound(&self) -> Bound<&T> {
Included(&self.end)
}
<core::ops::range::RangeToInclusive<T> as core::ops::range::RangeBounds<T>>::start_bound fn start_bound(&self) -> Bound<&T> {
Unbounded
}
<core::ops::range::RangeToInclusive<usize> as core::slice::index::SliceIndex<[T]>>::get fn get(self, slice: &[T]) -> Option<&[T]> {
(0..=self.end).get(slice)
}
<core::ops::range::RangeToInclusive<usize> as core::slice::index::SliceIndex<[T]>>::get_mut fn get_mut(self, slice: &mut [T]) -> Option<&mut [T]> {
(0..=self.end).get_mut(slice)
}
<core::ops::range::RangeToInclusive<usize> as core::slice::index::SliceIndex<[T]>>::get_unchecked unsafe fn get_unchecked(self, slice: *const [T]) -> *const [T] {
// SAFETY: the caller has to uphold the safety contract for `get_unchecked`.
unsafe { (0..=self.end).get_unchecked(slice) }
}
<core::ops::range::RangeToInclusive<usize> as core::slice::index::SliceIndex<[T]>>::get_unchecked_mut unsafe fn get_unchecked_mut(self, slice: *mut [T]) -> *mut [T] {
// SAFETY: the caller has to uphold the safety contract for `get_unchecked_mut`.
unsafe { (0..=self.end).get_unchecked_mut(slice) }
}
<core::ops::range::RangeToInclusive<usize> as core::slice::index::SliceIndex<[T]>>::index fn index(self, slice: &[T]) -> &[T] {
(0..=self.end).index(slice)
}
<core::ops::range::RangeToInclusive<usize> as core::slice::index::SliceIndex<[T]>>::index_mut fn index_mut(self, slice: &mut [T]) -> &mut [T] {
(0..=self.end).index_mut(slice)
}
<core::ops::try_trait::NeverShortCircuit<T> as core::ops::try_trait::Try>::branch fn branch(self) -> ControlFlow<NeverShortCircuitResidual, T> {
ControlFlow::Continue(self.0)
}
<core::ops::try_trait::NeverShortCircuit<T> as core::ops::try_trait::Try>::from_output fn from_output(x: T) -> Self {
NeverShortCircuit(x)
}
<core::ops::try_trait::Wrapped<T, A, F> as core::ops::function::FnMut<(A,)>>::call_mut extern "rust-call" fn call_mut(&mut self, (args,): (A,)) -> Self::Output {
NeverShortCircuit((self.f)(args))
}
<core::ops::try_trait::Wrapped<T, A, F> as core::ops::function::FnOnce<(A,)>>::call_once extern "rust-call" fn call_once(mut self, args: (A,)) -> Self::Output {
self.call_mut(args)
}
<core::option::IntoIter<A> as core::iter::traits::iterator::Iterator>::next fn next(&mut self) -> Option<A> {
self.inner.next()
}
<core::option::IntoIter<A> as core::iter::traits::iterator::Iterator>::size_hint fn size_hint(&self) -> (usize, Option<usize>) {
self.inner.size_hint()
}
<core::option::Item<A> as core::iter::traits::exact_size::ExactSizeIterator>::len fn len(&self) -> usize {
self.opt.len()
}
<core::option::Item<A> as core::iter::traits::iterator::Iterator>::next fn next(&mut self) -> Option<A> {
self.opt.take()
}
<core::option::Item<A> as core::iter::traits::iterator::Iterator>::size_hint fn size_hint(&self) -> (usize, Option<usize>) {
let len = self.opt.len();
(len, Some(len))
}
<core::option::Option<&'a T> as core::convert::From<&'a core::option::Option<T>>>::from fn from(o: &'a Option<T>) -> Option<&'a T> {
o.as_ref()
}
<core::option::Option<&'a mut T> as core::convert::From<&'a mut core::option::Option<T>>>::from fn from(o: &'a mut Option<T>) -> Option<&'a mut T> {
o.as_mut()
}
<core::option::Option<T> as core::clone::Clone>::clone fn clone(&self) -> Self {
match self {
Some(x) => Some(x.clone()),
None => None,
}
}
<core::option::Option<T> as core::clone::Clone>::clone_from fn clone_from(&mut self, source: &Self) {
match (self, source) {
(Some(to), Some(from)) => to.clone_from(from),
(to, from) => *to = from.clone(),
}
}
<core::option::Option<T> as core::cmp::PartialEq>::eq fn eq(&self, other: &Self) -> bool {
// Spelling out the cases explicitly optimizes better than
// `_ => false`
match (self, other) {
(Some(l), Some(r)) => *l == *r,
(Some(_), None) => false,
(None, Some(_)) => false,
(None, None) => true,
}
}
<core::option::Option<T> as core::convert::From<T>>::from fn from(val: T) -> Option<T> {
Some(val)
}
<core::option::Option<T> as core::default::Default>::default fn default() -> Option<T> {
None
}
<core::option::Option<T> as core::iter::traits::collect::IntoIterator>::into_iter fn into_iter(self) -> IntoIter<T> {
IntoIter { inner: Item { opt: self } }
}
<core::option::Option<T> as core::ops::try_trait::FromResidual<core::option::Option<core::convert::Infallible>>>::from_residual fn from_residual(residual: Option<convert::Infallible>) -> Self {
match residual {
None => None,
}
}
<core::option::Option<T> as core::ops::try_trait::Try>::branch fn branch(self) -> ControlFlow<Self::Residual, Self::Output> {
match self {
Some(v) => ControlFlow::Continue(v),
None => ControlFlow::Break(None),
}
}
<core::option::Option<T> as core::ops::try_trait::Try>::from_output fn from_output(output: Self::Output) -> Self {
Some(output)
}
<core::panic::location::Location<'_> as core::fmt::Debug>::fmt fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("Location")
.field("file", &self.file())
.field("line", &self.line)
.field("column", &self.col)
.finish()
}
<core::panic::location::Location<'_> as core::fmt::Display>::fmt fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(formatter, "{}:{}:{}", self.file(), self.line, self.col)
}
<core::panic::panic_info::PanicInfo<'_> as core::fmt::Display>::fmt fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result {
formatter.write_str("panicked at ")?;
self.location.fmt(formatter)?;
formatter.write_str(":\n")?;
formatter.write_fmt(*self.message)?;
Ok(())
}
<core::panic::panic_info::PanicMessage<'_> as core::fmt::Debug>::fmt fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result {
formatter.write_fmt(*self.message)
}
<core::panic::panic_info::PanicMessage<'_> as core::fmt::Display>::fmt fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result {
formatter.write_fmt(*self.message)
}
<core::panicking::assert_matches_failed::Pattern<'_> as core::fmt::Debug>::fmt fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str(self.0)
}
<core::ptr::alignment::Alignment as core::fmt::Debug>::fmt fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{:?} (1 << {:?})", self.as_nonzero(), self.log2())
}
<core::ptr::alignment::Alignment as core::hash::Hash>::hash fn hash<H: hash::Hasher>(&self, state: &mut H) {
self.as_nonzero().hash(state)
}
<core::ptr::metadata::DynMetadata<Dyn> as core::clone::Clone>::clone fn clone(&self) -> Self {
*self
}
<core::ptr::metadata::DynMetadata<Dyn> as core::fmt::Debug>::fmt fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_tuple("DynMetadata").field(&self.vtable_ptr()).finish()
}
<core::ptr::non_null::NonNull<T> as core::clone::Clone>::clone fn clone(&self) -> Self {
*self
}
<core::ptr::non_null::NonNull<T> as core::cmp::PartialEq>::eq fn eq(&self, other: &Self) -> bool {
self.as_ptr() == other.as_ptr()
}
<core::ptr::non_null::NonNull<T> as core::convert::From<&mut T>>::from fn from(r: &mut T) -> Self {
NonNull::from_mut(r)
}
<core::ptr::non_null::NonNull<T> as core::fmt::Debug>::fmt fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt::Pointer::fmt(&self.as_ptr(), f)
}
<core::ptr::non_null::NonNull<T> as core::fmt::Pointer>::fmt fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt::Pointer::fmt(&self.as_ptr(), f)
}
<core::result::Result<T, E> as core::clone::Clone>::clone fn clone(&self) -> Self {
match self {
Ok(x) => Ok(x.clone()),
Err(x) => Err(x.clone()),
}
}
<core::result::Result<T, E> as core::clone::Clone>::clone_from fn clone_from(&mut self, source: &Self) {
match (self, source) {
(Ok(to), Ok(from)) => to.clone_from(from),
(Err(to), Err(from)) => to.clone_from(from),
(to, from) => *to = from.clone(),
}
}
<core::result::Result<T, E> as core::ops::try_trait::Try>::branch fn branch(self) -> ControlFlow<Self::Residual, Self::Output> {
match self {
Ok(v) => ControlFlow::Continue(v),
Err(e) => ControlFlow::Break(Err(e)),
}
}
<core::result::Result<T, E> as core::ops::try_trait::Try>::from_output fn from_output(output: Self::Output) -> Self {
Ok(output)
}
<core::result::Result<T, F> as core::ops::try_trait::FromResidual<core::result::Result<core::convert::Infallible, E>>>::from_residual fn from_residual(residual: Result<convert::Infallible, E>) -> Self {
match residual {
Err(e) => Err(From::from(e)),
}
}
<core::slice::ascii::EscapeAscii<'a> as core::fmt::Debug>::fmt fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("EscapeAscii").finish_non_exhaustive()
}
<core::slice::ascii::EscapeAscii<'a> as core::fmt::Display>::fmt fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
// disassemble iterator, including front/back parts of flatmap in case it has been partially consumed
let (front, slice, back) = self.clone().inner.into_parts();
let front = front.unwrap_or(EscapeDefault::empty());
let mut bytes = slice.unwrap_or_default().as_slice();
let back = back.unwrap_or(EscapeDefault::empty());
// usually empty, so the formatter won't have to do any work
for byte in front {
f.write_char(byte as char)?;
}
fn needs_escape(b: u8) -> bool {
b > 0x7E || b < 0x20 || b == b'\\' || b == b'\'' || b == b'"'
}
while bytes.len() > 0 {
// fast path for the printable, non-escaped subset of ascii
let prefix = bytes.iter().take_while(|&&b| !needs_escape(b)).count();
// SAFETY: prefix length was derived by counting bytes in the same splice, so it's in-bounds
let (prefix, remainder) = unsafe { bytes.split_at_unchecked(prefix) };
// SAFETY: prefix is a valid utf8 sequence, as it's a subset of ASCII
let prefix = unsafe { crate::str::from_utf8_unchecked(prefix) };
f.write_str(prefix)?; // the fast part
bytes = remainder;
if let Some(&b) = bytes.first() {
// guaranteed to be non-empty, better to write it as a str
fmt::Display::fmt(&ascii::escape_default(b), f)?;
bytes = &bytes[1..];
}
}
// also usually empty
for byte in back {
f.write_char(byte as char)?;
}
Ok(())
}
<core::slice::ascii::EscapeAscii<'a> as core::fmt::Display>::fmt::needs_escape fn needs_escape(b: u8) -> bool {
b > 0x7E || b < 0x20 || b == b'\\' || b == b'\'' || b == b'"'
}
<core::slice::ascii::EscapeByte as core::ops::function::Fn<(&u8,)>>::call extern "rust-call" fn call(&self, ($( $arg, )*): ($( $ArgTy, )*)) -> $ReturnTy {
$body
}
<core::slice::ascii::EscapeByte as core::ops::function::FnMut<(&u8,)>>::call_mut extern "rust-call" fn call_mut(
&mut self,
($( $arg, )*): ($( $ArgTy, )*)
) -> $ReturnTy {
Fn::call(&*self, ($( $arg, )*))
}
<core::slice::ascii::EscapeByte as core::ops::function::FnOnce<(&u8,)>>::call_once extern "rust-call" fn call_once(self, ($( $arg, )*): ($( $ArgTy, )*)) -> $ReturnTy {
Fn::call(&self, ($( $arg, )*))
}
<core::slice::iter::Chunks<'a, T> as core::iter::traits::iterator::Iterator>::count fn count(self) -> usize {
self.len()
}
<core::slice::iter::Chunks<'a, T> as core::iter::traits::iterator::Iterator>::last fn last(self) -> Option<Self::Item> {
if self.v.is_empty() {
None
} else {
let start = (self.v.len() - 1) / self.chunk_size * self.chunk_size;
Some(&self.v[start..])
}
}
<core::slice::iter::Chunks<'a, T> as core::iter::traits::iterator::Iterator>::next fn next(&mut self) -> Option<&'a [T]> {
if self.v.is_empty() {
None
} else {
let chunksz = cmp::min(self.v.len(), self.chunk_size);
let (fst, snd) = self.v.split_at(chunksz);
self.v = snd;
Some(fst)
}
}
<core::slice::iter::Chunks<'a, T> as core::iter::traits::iterator::Iterator>::nth fn nth(&mut self, n: usize) -> Option<Self::Item> {
if let Some(start) = n.checked_mul(self.chunk_size)
&& start < self.v.len()
{
let rest = &self.v[start..];
let (chunk, rest) = rest.split_at(self.chunk_size.min(rest.len()));
self.v = rest;
Some(chunk)
} else {
self.v = &self.v[..0]; // cheaper than &[]
None
}
}
<core::slice::iter::Chunks<'a, T> as core::iter::traits::iterator::Iterator>::size_hint fn size_hint(&self) -> (usize, Option<usize>) {
if self.v.is_empty() {
(0, Some(0))
} else {
let n = self.v.len().div_ceil(self.chunk_size);
(n, Some(n))
}
}
<core::slice::iter::ChunksExact<'_, T> as core::iter::traits::exact_size::ExactSizeIterator>::is_empty fn is_empty(&self) -> bool {
self.v.is_empty()
}
<core::slice::iter::ChunksExact<'a, T> as core::iter::traits::double_ended::DoubleEndedIterator>::next_back fn next_back(&mut self) -> Option<&'a [T]> {
if self.v.len() < self.chunk_size {
None
} else {
let (fst, snd) = self.v.split_at(self.v.len() - self.chunk_size);
self.v = fst;
Some(snd)
}
}
<core::slice::iter::ChunksExact<'a, T> as core::iter::traits::double_ended::DoubleEndedIterator>::nth_back fn nth_back(&mut self, n: usize) -> Option<Self::Item> {
let len = self.len();
if n < len {
let start = (len - 1 - n) * self.chunk_size;
let end = start + self.chunk_size;
let nth_back = &self.v[start..end];
self.v = &self.v[..start];
Some(nth_back)
} else {
self.v = &self.v[..0]; // cheaper than &[]
None
}
}
<core::slice::iter::ChunksExact<'a, T> as core::iter::traits::iterator::Iterator>::count fn count(self) -> usize {
self.len()
}
<core::slice::iter::ChunksExact<'a, T> as core::iter::traits::iterator::Iterator>::last fn last(mut self) -> Option<Self::Item> {
self.next_back()
}
<core::slice::iter::ChunksExact<'a, T> as core::iter::traits::iterator::Iterator>::next fn next(&mut self) -> Option<&'a [T]> {
self.v.split_at_checked(self.chunk_size).and_then(|(chunk, rest)| {
self.v = rest;
Some(chunk)
})
}
<core::slice::iter::ChunksExact<'a, T> as core::iter::traits::iterator::Iterator>::nth fn nth(&mut self, n: usize) -> Option<Self::Item> {
if let Some(start) = n.checked_mul(self.chunk_size)
&& start < self.v.len()
{
self.v = &self.v[start..];
self.next()
} else {
self.v = &self.v[..0]; // cheaper than &[]
None
}
}
<core::slice::iter::ChunksExact<'a, T> as core::iter::traits::iterator::Iterator>::size_hint fn size_hint(&self) -> (usize, Option<usize>) {
let n = self.v.len() / self.chunk_size;
(n, Some(n))
}
<core::slice::iter::ChunksExactMut<'_, T> as core::iter::traits::exact_size::ExactSizeIterator>::is_empty fn is_empty(&self) -> bool {
self.v.is_empty()
}
<core::slice::iter::ChunksExactMut<'a, T> as core::iter::traits::double_ended::DoubleEndedIterator>::next_back fn next_back(&mut self) -> Option<&'a mut [T]> {
if self.v.len() < self.chunk_size {
None
} else {
// SAFETY: This subtraction is inbounds because of the check above
let (head, tail) = unsafe { self.v.split_at_mut(self.v.len() - self.chunk_size) };
self.v = head;
// SAFETY: Nothing else points to or will point to the contents of this slice.
Some(unsafe { &mut *tail })
}
}
<core::slice::iter::ChunksExactMut<'a, T> as core::iter::traits::double_ended::DoubleEndedIterator>::nth_back fn nth_back(&mut self, n: usize) -> Option<Self::Item> {
let len = self.len();
if n < len {
let start = (len - 1 - n) * self.chunk_size;
let end = start + self.chunk_size;
// SAFETY: The self.v contract ensures that any split_at_mut is valid.
let (temp, _tail) = unsafe { mem::replace(&mut self.v, &mut []).split_at_mut(end) };
// SAFETY: The self.v contract ensures that any split_at_mut is valid.
let (head, nth_back) = unsafe { temp.split_at_mut(start) };
self.v = head;
// SAFETY: Nothing else points to or will point to the contents of this slice.
Some(unsafe { &mut *nth_back })
} else {
self.v = &mut [];
None
}
}
<core::slice::iter::ChunksExactMut<'a, T> as core::iter::traits::iterator::Iterator>::count fn count(self) -> usize {
self.len()
}
<core::slice::iter::ChunksExactMut<'a, T> as core::iter::traits::iterator::Iterator>::last fn last(mut self) -> Option<Self::Item> {
self.next_back()
}
<core::slice::iter::ChunksExactMut<'a, T> as core::iter::traits::iterator::Iterator>::next fn next(&mut self) -> Option<&'a mut [T]> {
// SAFETY: we have `&mut self`, so are allowed to temporarily materialize a mut slice
unsafe { &mut *self.v }.split_at_mut_checked(self.chunk_size).and_then(|(chunk, rest)| {
self.v = rest;
Some(chunk)
})
}
<core::slice::iter::ChunksExactMut<'a, T> as core::iter::traits::iterator::Iterator>::nth fn nth(&mut self, n: usize) -> Option<&'a mut [T]> {
if let Some(start) = n.checked_mul(self.chunk_size)
&& start < self.v.len()
{
// SAFETY: `start < self.v.len()`
self.v = unsafe { self.v.split_at_mut(start).1 };
self.next()
} else {
self.v = &mut [];
None
}
}
<core::slice::iter::ChunksExactMut<'a, T> as core::iter::traits::iterator::Iterator>::size_hint fn size_hint(&self) -> (usize, Option<usize>) {
let n = self.v.len() / self.chunk_size;
(n, Some(n))
}
<core::slice::iter::ChunksMut<'a, T> as core::iter::traits::iterator::Iterator>::count fn count(self) -> usize {
self.len()
}
<core::slice::iter::ChunksMut<'a, T> as core::iter::traits::iterator::Iterator>::last fn last(self) -> Option<Self::Item> {
if self.v.is_empty() {
None
} else {
let start = (self.v.len() - 1) / self.chunk_size * self.chunk_size;
// SAFETY: Nothing else points to or will point to the contents of this slice.
Some(unsafe { &mut *self.v.get_unchecked_mut(start..) })
}
}
<core::slice::iter::ChunksMut<'a, T> as core::iter::traits::iterator::Iterator>::next fn next(&mut self) -> Option<&'a mut [T]> {
if self.v.is_empty() {
None
} else {
let sz = cmp::min(self.v.len(), self.chunk_size);
// SAFETY: The self.v contract ensures that any split_at_mut is valid.
let (head, tail) = unsafe { self.v.split_at_mut(sz) };
self.v = tail;
// SAFETY: Nothing else points to or will point to the contents of this slice.
Some(unsafe { &mut *head })
}
}
<core::slice::iter::ChunksMut<'a, T> as core::iter::traits::iterator::Iterator>::nth fn nth(&mut self, n: usize) -> Option<&'a mut [T]> {
if let Some(start) = n.checked_mul(self.chunk_size)
&& start < self.v.len()
{
// SAFETY: `start < self.v.len()` ensures this is in bounds
let (_, rest) = unsafe { self.v.split_at_mut(start) };
// SAFETY: `.min(rest.len()` ensures this is in bounds
let (chunk, rest) = unsafe { rest.split_at_mut(self.chunk_size.min(rest.len())) };
self.v = rest;
// SAFETY: Nothing else points to or will point to the contents of this slice.
Some(unsafe { &mut *chunk })
} else {
self.v = &mut [];
None
}
}
<core::slice::iter::ChunksMut<'a, T> as core::iter::traits::iterator::Iterator>::size_hint fn size_hint(&self) -> (usize, Option<usize>) {
if self.v.is_empty() {
(0, Some(0))
} else {
let n = self.v.len().div_ceil(self.chunk_size);
(n, Some(n))
}
}
<core::slice::iter::Iter<'_, T> as core::clone::Clone>::clone fn clone(&self) -> Self {
Iter { ptr: self.ptr, end_or_len: self.end_or_len, _marker: self._marker }
}
<core::slice::iter::Iter<'_, T> as core::default::Default>::default fn default() -> Self {
(& $( $mut_ )? []).into_iter()
}
<core::slice::iter::Iter<'_, T> as core::fmt::Debug>::fmt fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_tuple("Iter").field(&self.as_slice()).finish()
}
<core::slice::iter::Iter<'_, T> as core::iter::traits::exact_size::ExactSizeIterator>::len fn len(&self) -> usize {
len!(self)
}
<core::slice::iter::Iter<'a, T> as core::iter::traits::double_ended::DoubleEndedIterator>::advance_back_by fn advance_back_by(&mut self, n: usize) -> Result<(), NonZero<usize>> {
let advance = cmp::min(len!(self), n);
// SAFETY: By construction, `advance` does not exceed `self.len()`.
unsafe { self.pre_dec_end(advance) };
NonZero::new(n - advance).map_or(Ok(()), Err)
}
<core::slice::iter::Iter<'a, T> as core::iter::traits::double_ended::DoubleEndedIterator>::next_back fn next_back(&mut self) -> Option<$elem> {
// could be implemented with slices, but this avoids bounds checks
// SAFETY: The call to `next_back_unchecked`
// is safe since we check if the iterator is empty first.
unsafe {
if is_empty!(self) {
None
} else {
Some(self.next_back_unchecked())
}
}
}
<core::slice::iter::Iter<'a, T> as core::iter::traits::double_ended::DoubleEndedIterator>::nth_back fn nth_back(&mut self, n: usize) -> Option<$elem> {
if n >= len!(self) {
// This iterator is now empty.
if_zst!(mut self,
len => *len = 0,
end => *end = self.ptr,
);
return None;
}
// SAFETY: We are in bounds. `pre_dec_end` does the right thing even for ZSTs.
unsafe {
self.pre_dec_end(n);
Some(self.next_back_unchecked())
}
}
<core::slice::iter::Iter<'a, T> as core::iter::traits::iterator::Iterator>::advance_by fn advance_by(&mut self, n: usize) -> Result<(), NonZero<usize>> {
let advance = cmp::min(len!(self), n);
// SAFETY: By construction, `advance` does not exceed `self.len()`.
unsafe { self.post_inc_start(advance) };
NonZero::new(n - advance).map_or(Ok(()), Err)
}
<core::slice::iter::Iter<'a, T> as core::iter::traits::iterator::Iterator>::all fn all<F>(&mut self, mut f: F) -> bool
where
Self: Sized,
F: FnMut(Self::Item) -> bool,
{
while let Some(x) = self.next() {
if !f(x) {
return false;
}
}
true
}
<core::slice::iter::Iter<'a, T> as core::iter::traits::iterator::Iterator>::any fn any<F>(&mut self, mut f: F) -> bool
where
Self: Sized,
F: FnMut(Self::Item) -> bool,
{
while let Some(x) = self.next() {
if f(x) {
return true;
}
}
false
}
<core::slice::iter::Iter<'a, T> as core::iter::traits::iterator::Iterator>::count fn count(self) -> usize {
len!(self)
}
<core::slice::iter::Iter<'a, T> as core::iter::traits::iterator::Iterator>::find fn find<P>(&mut self, mut predicate: P) -> Option<Self::Item>
where
Self: Sized,
P: FnMut(&Self::Item) -> bool,
{
while let Some(x) = self.next() {
if predicate(&x) {
return Some(x);
}
}
None
}
<core::slice::iter::Iter<'a, T> as core::iter::traits::iterator::Iterator>::fold fn fold<B, F>(self, init: B, mut f: F) -> B
where
F: FnMut(B, Self::Item) -> B,
{
// this implementation consists of the following optimizations compared to the
// default implementation:
// - do-while loop, as is llvm's preferred loop shape,
// see https://releases.llvm.org/16.0.0/docs/LoopTerminology.html#more-canonical-loops
// - bumps an index instead of a pointer since the latter case inhibits
// some optimizations, see #111603
// - avoids Option wrapping/matching
if is_empty!(self) {
return init;
}
let mut acc = init;
let mut i = 0;
let len = len!(self);
loop {
// SAFETY: the loop iterates `i in 0..len`, which always is in bounds of
// the slice allocation
acc = f(acc, unsafe { & $( $mut_ )? *self.ptr.add(i).as_ptr() });
// SAFETY: `i` can't overflow since it'll only reach usize::MAX if the
// slice had that length, in which case we'll break out of the loop
// after the increment
i = unsafe { i.unchecked_add(1) };
if i == len {
break;
}
}
acc
}
<core::slice::iter::Iter<'a, T> as core::iter::traits::iterator::Iterator>::for_each fn for_each<F>(mut self, mut f: F)
where
Self: Sized,
F: FnMut(Self::Item),
{
while let Some(x) = self.next() {
f(x);
}
}
<core::slice::iter::Iter<'a, T> as core::iter::traits::iterator::Iterator>::last fn last(mut self) -> Option<$elem> {
self.next_back()
}
<core::slice::iter::Iter<'a, T> as core::iter::traits::iterator::Iterator>::next fn next(&mut self) -> Option<$elem> {
// intentionally not using the helpers because this is
// one of the most mono'd things in the library.
let ptr = self.ptr;
let end_or_len = self.end_or_len;
// SAFETY: See inner comments. (For some reason having multiple
// block breaks inlining this -- if you can fix that please do!)
unsafe {
if T::IS_ZST {
let len = end_or_len.addr();
if len == 0 {
return None;
}
// SAFETY: just checked that it's not zero, so subtracting one
// cannot wrap. (Ideally this would be `checked_sub`, which
// does the same thing internally, but as of 2025-02 that
// doesn't optimize quite as small in MIR.)
self.end_or_len = without_provenance_mut(len.unchecked_sub(1));
} else {
// SAFETY: by type invariant, the `end_or_len` field is always
// non-null for a non-ZST pointee. (This transmute ensures we
// get `!nonnull` metadata on the load of the field.)
if ptr == crate::intrinsics::transmute::<$ptr, NonNull<T>>(end_or_len) {
return None;
}
// SAFETY: since it's not empty, per the check above, moving
// forward one keeps us inside the slice, and this is valid.
self.ptr = ptr.add(1);
}
// SAFETY: Now that we know it wasn't empty and we've moved past
// the first one (to avoid giving a duplicate `&mut` next time),
// we can give out a reference to it.
Some({ptr}.$into_ref())
}
}
<core::slice::iter::Iter<'a, T> as core::iter::traits::iterator::Iterator>::nth fn nth(&mut self, n: usize) -> Option<$elem> {
if n >= len!(self) {
// This iterator is now empty.
if_zst!(mut self,
len => *len = 0,
end => self.ptr = *end,
);
return None;
}
// SAFETY: We are in bounds. `post_inc_start` does the right thing even for ZSTs.
unsafe {
self.post_inc_start(n);
Some(self.next_unchecked())
}
}
<core::slice::iter::Iter<'a, T> as core::iter::traits::iterator::Iterator>::position fn position<P>(&mut self, mut predicate: P) -> Option<usize> where
Self: Sized,
P: FnMut(Self::Item) -> bool,
{
let n = len!(self);
let mut i = 0;
while let Some(x) = self.next() {
if predicate(x) {
// SAFETY: we are guaranteed to be in bounds by the loop invariant:
// when `i >= n`, `self.next()` returns `None` and the loop breaks.
unsafe { assert_unchecked(i < n) };
return Some(i);
}
i += 1;
}
None
}
<core::slice::iter::Iter<'a, T> as core::iter::traits::iterator::Iterator>::rposition fn rposition<P>(&mut self, mut predicate: P) -> Option<usize> where
P: FnMut(Self::Item) -> bool,
Self: Sized + ExactSizeIterator + DoubleEndedIterator
{
let n = len!(self);
let mut i = n;
while let Some(x) = self.next_back() {
i -= 1;
if predicate(x) {
// SAFETY: `i` must be lower than `n` since it starts at `n`
// and is only decreasing.
unsafe { assert_unchecked(i < n) };
return Some(i);
}
}
None
}
<core::slice::iter::Iter<'a, T> as core::iter::traits::unchecked_iterator::UncheckedIterator>::next_unchecked unsafe fn next_unchecked(&mut self) -> $elem {
// SAFETY: The caller promised there's at least one more item.
unsafe {
self.post_inc_start(1).$into_ref()
}
}
<core::slice::iter::IterMut<'_, T> as core::default::Default>::default fn default() -> Self {
(& $( $mut_ )? []).into_iter()
}
<core::slice::iter::IterMut<'_, T> as core::fmt::Debug>::fmt fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_tuple("IterMut").field(&self.make_slice()).finish()
}
<core::slice::iter::IterMut<'_, T> as core::iter::traits::exact_size::ExactSizeIterator>::len fn len(&self) -> usize {
len!(self)
}
<core::slice::iter::IterMut<'a, T> as core::iter::traits::double_ended::DoubleEndedIterator>::advance_back_by fn advance_back_by(&mut self, n: usize) -> Result<(), NonZero<usize>> {
let advance = cmp::min(len!(self), n);
// SAFETY: By construction, `advance` does not exceed `self.len()`.
unsafe { self.pre_dec_end(advance) };
NonZero::new(n - advance).map_or(Ok(()), Err)
}
<core::slice::iter::IterMut<'a, T> as core::iter::traits::double_ended::DoubleEndedIterator>::next_back fn next_back(&mut self) -> Option<$elem> {
// could be implemented with slices, but this avoids bounds checks
// SAFETY: The call to `next_back_unchecked`
// is safe since we check if the iterator is empty first.
unsafe {
if is_empty!(self) {
None
} else {
Some(self.next_back_unchecked())
}
}
}
<core::slice::iter::IterMut<'a, T> as core::iter::traits::double_ended::DoubleEndedIterator>::nth_back fn nth_back(&mut self, n: usize) -> Option<$elem> {
if n >= len!(self) {
// This iterator is now empty.
if_zst!(mut self,
len => *len = 0,
end => *end = self.ptr,
);
return None;
}
// SAFETY: We are in bounds. `pre_dec_end` does the right thing even for ZSTs.
unsafe {
self.pre_dec_end(n);
Some(self.next_back_unchecked())
}
}
<core::slice::iter::IterMut<'a, T> as core::iter::traits::iterator::Iterator>::advance_by fn advance_by(&mut self, n: usize) -> Result<(), NonZero<usize>> {
let advance = cmp::min(len!(self), n);
// SAFETY: By construction, `advance` does not exceed `self.len()`.
unsafe { self.post_inc_start(advance) };
NonZero::new(n - advance).map_or(Ok(()), Err)
}
<core::slice::iter::IterMut<'a, T> as core::iter::traits::iterator::Iterator>::all fn all<F>(&mut self, mut f: F) -> bool
where
Self: Sized,
F: FnMut(Self::Item) -> bool,
{
while let Some(x) = self.next() {
if !f(x) {
return false;
}
}
true
}
<core::slice::iter::IterMut<'a, T> as core::iter::traits::iterator::Iterator>::any fn any<F>(&mut self, mut f: F) -> bool
where
Self: Sized,
F: FnMut(Self::Item) -> bool,
{
while let Some(x) = self.next() {
if f(x) {
return true;
}
}
false
}
<core::slice::iter::IterMut<'a, T> as core::iter::traits::iterator::Iterator>::count fn count(self) -> usize {
len!(self)
}
<core::slice::iter::IterMut<'a, T> as core::iter::traits::iterator::Iterator>::find fn find<P>(&mut self, mut predicate: P) -> Option<Self::Item>
where
Self: Sized,
P: FnMut(&Self::Item) -> bool,
{
while let Some(x) = self.next() {
if predicate(&x) {
return Some(x);
}
}
None
}
<core::slice::iter::IterMut<'a, T> as core::iter::traits::iterator::Iterator>::fold fn fold<B, F>(self, init: B, mut f: F) -> B
where
F: FnMut(B, Self::Item) -> B,
{
// this implementation consists of the following optimizations compared to the
// default implementation:
// - do-while loop, as is llvm's preferred loop shape,
// see https://releases.llvm.org/16.0.0/docs/LoopTerminology.html#more-canonical-loops
// - bumps an index instead of a pointer since the latter case inhibits
// some optimizations, see #111603
// - avoids Option wrapping/matching
if is_empty!(self) {
return init;
}
let mut acc = init;
let mut i = 0;
let len = len!(self);
loop {
// SAFETY: the loop iterates `i in 0..len`, which always is in bounds of
// the slice allocation
acc = f(acc, unsafe { & $( $mut_ )? *self.ptr.add(i).as_ptr() });
// SAFETY: `i` can't overflow since it'll only reach usize::MAX if the
// slice had that length, in which case we'll break out of the loop
// after the increment
i = unsafe { i.unchecked_add(1) };
if i == len {
break;
}
}
acc
}
<core::slice::iter::IterMut<'a, T> as core::iter::traits::iterator::Iterator>::for_each fn for_each<F>(mut self, mut f: F)
where
Self: Sized,
F: FnMut(Self::Item),
{
while let Some(x) = self.next() {
f(x);
}
}
<core::slice::iter::IterMut<'a, T> as core::iter::traits::iterator::Iterator>::last fn last(mut self) -> Option<$elem> {
self.next_back()
}
<core::slice::iter::IterMut<'a, T> as core::iter::traits::iterator::Iterator>::next fn next(&mut self) -> Option<$elem> {
// intentionally not using the helpers because this is
// one of the most mono'd things in the library.
let ptr = self.ptr;
let end_or_len = self.end_or_len;
// SAFETY: See inner comments. (For some reason having multiple
// block breaks inlining this -- if you can fix that please do!)
unsafe {
if T::IS_ZST {
let len = end_or_len.addr();
if len == 0 {
return None;
}
// SAFETY: just checked that it's not zero, so subtracting one
// cannot wrap. (Ideally this would be `checked_sub`, which
// does the same thing internally, but as of 2025-02 that
// doesn't optimize quite as small in MIR.)
self.end_or_len = without_provenance_mut(len.unchecked_sub(1));
} else {
// SAFETY: by type invariant, the `end_or_len` field is always
// non-null for a non-ZST pointee. (This transmute ensures we
// get `!nonnull` metadata on the load of the field.)
if ptr == crate::intrinsics::transmute::<$ptr, NonNull<T>>(end_or_len) {
return None;
}
// SAFETY: since it's not empty, per the check above, moving
// forward one keeps us inside the slice, and this is valid.
self.ptr = ptr.add(1);
}
// SAFETY: Now that we know it wasn't empty and we've moved past
// the first one (to avoid giving a duplicate `&mut` next time),
// we can give out a reference to it.
Some({ptr}.$into_ref())
}
}
<core::slice::iter::IterMut<'a, T> as core::iter::traits::iterator::Iterator>::nth fn nth(&mut self, n: usize) -> Option<$elem> {
if n >= len!(self) {
// This iterator is now empty.
if_zst!(mut self,
len => *len = 0,
end => self.ptr = *end,
);
return None;
}
// SAFETY: We are in bounds. `post_inc_start` does the right thing even for ZSTs.
unsafe {
self.post_inc_start(n);
Some(self.next_unchecked())
}
}
<core::slice::iter::IterMut<'a, T> as core::iter::traits::iterator::Iterator>::position fn position<P>(&mut self, mut predicate: P) -> Option<usize> where
Self: Sized,
P: FnMut(Self::Item) -> bool,
{
let n = len!(self);
let mut i = 0;
while let Some(x) = self.next() {
if predicate(x) {
// SAFETY: we are guaranteed to be in bounds by the loop invariant:
// when `i >= n`, `self.next()` returns `None` and the loop breaks.
unsafe { assert_unchecked(i < n) };
return Some(i);
}
i += 1;
}
None
}
<core::slice::iter::IterMut<'a, T> as core::iter::traits::iterator::Iterator>::rposition fn rposition<P>(&mut self, mut predicate: P) -> Option<usize> where
P: FnMut(Self::Item) -> bool,
Self: Sized + ExactSizeIterator + DoubleEndedIterator
{
let n = len!(self);
let mut i = n;
while let Some(x) = self.next_back() {
i -= 1;
if predicate(x) {
// SAFETY: `i` must be lower than `n` since it starts at `n`
// and is only decreasing.
unsafe { assert_unchecked(i < n) };
return Some(i);
}
}
None
}
<core::slice::iter::IterMut<'a, T> as core::iter::traits::unchecked_iterator::UncheckedIterator>::next_unchecked unsafe fn next_unchecked(&mut self) -> $elem {
// SAFETY: The caller promised there's at least one more item.
unsafe {
self.post_inc_start(1).$into_ref()
}
}
<core::slice::iter::Windows<'a, T> as core::iter::traits::iterator::Iterator>::count fn count(self) -> usize {
self.len()
}
<core::slice::iter::Windows<'a, T> as core::iter::traits::iterator::Iterator>::last fn last(self) -> Option<Self::Item> {
if self.size.get() > self.v.len() {
None
} else {
let start = self.v.len() - self.size.get();
Some(&self.v[start..])
}
}
<core::slice::iter::Windows<'a, T> as core::iter::traits::iterator::Iterator>::next fn next(&mut self) -> Option<&'a [T]> {
if self.size.get() > self.v.len() {
None
} else {
let ret = Some(&self.v[..self.size.get()]);
self.v = &self.v[1..];
ret
}
}
<core::slice::iter::Windows<'a, T> as core::iter::traits::iterator::Iterator>::nth fn nth(&mut self, n: usize) -> Option<Self::Item> {
let size = self.size.get();
if let Some(rest) = self.v.get(n..)
&& let Some(nth) = rest.get(..size)
{
self.v = &rest[1..];
Some(nth)
} else {
// setting length to 0 is cheaper than overwriting the pointer when assigning &[]
self.v = &self.v[..0]; // cheaper than &[]
None
}
}
<core::slice::iter::Windows<'a, T> as core::iter::traits::iterator::Iterator>::size_hint fn size_hint(&self) -> (usize, Option<usize>) {
if self.size.get() > self.v.len() {
(0, Some(0))
} else {
let size = self.v.len() - self.size.get() + 1;
(size, Some(size))
}
}
<core::str::error::Utf8Error as core::fmt::Display>::fmt fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
if let Some(error_len) = self.error_len {
write!(
f,
"invalid utf-8 sequence of {} bytes from index {}",
error_len, self.valid_up_to
)
} else {
write!(f, "incomplete utf-8 byte sequence from index {}", self.valid_up_to)
}
}
<core::str::iter::Bytes<'_> as core::iter::traits::iterator::Iterator>::all fn all<F>(&mut self, f: F) -> bool
where
F: FnMut(Self::Item) -> bool,
{
self.0.all(f)
}
<core::str::iter::Bytes<'_> as core::iter::traits::iterator::Iterator>::any fn any<F>(&mut self, f: F) -> bool
where
F: FnMut(Self::Item) -> bool,
{
self.0.any(f)
}
<core::str::iter::Bytes<'_> as core::iter::traits::iterator::Iterator>::count fn count(self) -> usize {
self.0.count()
}
<core::str::iter::Bytes<'_> as core::iter::traits::iterator::Iterator>::find fn find<P>(&mut self, predicate: P) -> Option<Self::Item>
where
P: FnMut(&Self::Item) -> bool,
{
self.0.find(predicate)
}
<core::str::iter::Bytes<'_> as core::iter::traits::iterator::Iterator>::last fn last(self) -> Option<Self::Item> {
self.0.last()
}
<core::str::iter::Bytes<'_> as core::iter::traits::iterator::Iterator>::next fn next(&mut self) -> Option<u8> {
self.0.next()
}
<core::str::iter::Bytes<'_> as core::iter::traits::iterator::Iterator>::nth fn nth(&mut self, n: usize) -> Option<Self::Item> {
self.0.nth(n)
}
<core::str::iter::Bytes<'_> as core::iter::traits::iterator::Iterator>::position fn position<P>(&mut self, predicate: P) -> Option<usize>
where
P: FnMut(Self::Item) -> bool,
{
self.0.position(predicate)
}
<core::str::iter::Bytes<'_> as core::iter::traits::iterator::Iterator>::rposition fn rposition<P>(&mut self, predicate: P) -> Option<usize>
where
P: FnMut(Self::Item) -> bool,
{
self.0.rposition(predicate)
}
<core::str::iter::Bytes<'_> as core::iter::traits::iterator::Iterator>::size_hint fn size_hint(&self) -> (usize, Option<usize>) {
self.0.size_hint()
}
<core::str::iter::CharIndices<'a> as core::iter::traits::double_ended::DoubleEndedIterator>::next_back fn next_back(&mut self) -> Option<(usize, char)> {
self.iter.next_back().map(|ch| {
let index = self.front_offset + self.iter.iter.len();
(index, ch)
})
}
<core::str::iter::CharIndices<'a> as core::iter::traits::iterator::Iterator>::count fn count(self) -> usize {
self.iter.count()
}
<core::str::iter::CharIndices<'a> as core::iter::traits::iterator::Iterator>::last fn last(mut self) -> Option<(usize, char)> {
// No need to go through the entire string.
self.next_back()
}
<core::str::iter::CharIndices<'a> as core::iter::traits::iterator::Iterator>::next fn next(&mut self) -> Option<(usize, char)> {
let pre_len = self.iter.iter.len();
match self.iter.next() {
None => None,
Some(ch) => {
let index = self.front_offset;
let len = self.iter.iter.len();
self.front_offset += pre_len - len;
Some((index, ch))
}
}
}
<core::str::iter::CharIndices<'a> as core::iter::traits::iterator::Iterator>::size_hint fn size_hint(&self) -> (usize, Option<usize>) {
self.iter.size_hint()
}
<core::str::iter::Chars<'_> as core::fmt::Debug>::fmt fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "Chars(")?;
f.debug_list().entries(self.clone()).finish()?;
write!(f, ")")?;
Ok(())
}
<core::str::iter::Chars<'a> as core::iter::traits::double_ended::DoubleEndedIterator>::next_back fn next_back(&mut self) -> Option<char> {
// SAFETY: `str` invariant says `self.iter` is a valid UTF-8 string and
// the resulting `ch` is a valid Unicode Scalar Value.
unsafe { next_code_point_reverse(&mut self.iter).map(|ch| char::from_u32_unchecked(ch)) }
}
<core::str::iter::Chars<'a> as core::iter::traits::iterator::Iterator>::advance_by fn advance_by(&mut self, mut remainder: usize) -> Result<(), NonZero<usize>> {
const CHUNK_SIZE: usize = 32;
if remainder >= CHUNK_SIZE {
let mut chunks = self.iter.as_slice().as_chunks::<CHUNK_SIZE>().0.iter();
let mut bytes_skipped: usize = 0;
while remainder > CHUNK_SIZE
&& let Some(chunk) = chunks.next()
{
bytes_skipped += CHUNK_SIZE;
let mut start_bytes = [false; CHUNK_SIZE];
for i in 0..CHUNK_SIZE {
start_bytes[i] = !super::validations::utf8_is_cont_byte(chunk[i]);
}
remainder -= start_bytes.into_iter().map(|i| i as u8).sum::<u8>() as usize;
}
// SAFETY: The amount of bytes exists since we just iterated over them,
// so advance_by will succeed.
unsafe { self.iter.advance_by(bytes_skipped).unwrap_unchecked() };
// skip trailing continuation bytes
while self.iter.len() > 0 {
let b = self.iter.as_slice()[0];
if !super::validations::utf8_is_cont_byte(b) {
break;
}
// SAFETY: We just peeked at the byte, therefore it exists
unsafe { self.iter.advance_by(1).unwrap_unchecked() };
}
}
while (remainder > 0) && (self.iter.len() > 0) {
remainder -= 1;
let b = self.iter.as_slice()[0];
let slurp = super::validations::utf8_char_width(b);
// SAFETY: utf8 validity requires that the string must contain
// the continuation bytes (if any)
unsafe { self.iter.advance_by(slurp).unwrap_unchecked() };
}
NonZero::new(remainder).map_or(Ok(()), Err)
}
<core::str::iter::Chars<'a> as core::iter::traits::iterator::Iterator>::count fn count(self) -> usize {
super::count::count_chars(self.as_str())
}
<core::str::iter::Chars<'a> as core::iter::traits::iterator::Iterator>::last fn last(mut self) -> Option<char> {
// No need to go through the entire string.
self.next_back()
}
<core::str::iter::Chars<'a> as core::iter::traits::iterator::Iterator>::next fn next(&mut self) -> Option<char> {
// SAFETY: `str` invariant says `self.iter` is a valid UTF-8 string and
// the resulting `ch` is a valid Unicode Scalar Value.
unsafe { next_code_point(&mut self.iter).map(|ch| char::from_u32_unchecked(ch)) }
}
<core::str::iter::Chars<'a> as core::iter::traits::iterator::Iterator>::size_hint fn size_hint(&self) -> (usize, Option<usize>) {
let len = self.iter.len();
(len.div_ceil(4), Some(len))
}
<core::str::iter::SplitInclusive<'a, P> as core::fmt::Debug>::fmt fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("SplitInclusive").field("0", &self.0).finish()
}
<core::str::iter::SplitInclusive<'a, P> as core::iter::traits::iterator::Iterator>::next fn next(&mut self) -> Option<&'a str> {
self.0.next_inclusive()
}
<core::str::iter::SplitInternal<'a, P> as core::fmt::Debug>::fmt fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("SplitInternal")
.field("start", &self.start)
.field("end", &self.end)
.field("matcher", &self.matcher)
.field("allow_trailing_empty", &self.allow_trailing_empty)
.field("finished", &self.finished)
.finish()
}
<core::str::lossy::Debug<'_> as core::fmt::Debug>::fmt fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
f.write_char('"')?;
for chunk in self.0.utf8_chunks() {
// Valid part.
// Here we partially parse UTF-8 again which is suboptimal.
{
let valid = chunk.valid();
let mut from = 0;
for (i, c) in valid.char_indices() {
let esc = c.escape_debug_ext(EscapeDebugExtArgs {
escape_grapheme_extended: true,
escape_single_quote: false,
escape_double_quote: true,
});
// If char needs escaping, flush backlog so far and write, else skip
if esc.len() != 1 {
f.write_str(&valid[from..i])?;
for c in esc {
f.write_char(c)?;
}
from = i + c.len_utf8();
}
}
f.write_str(&valid[from..])?;
}
// Broken parts of string as hex escape.
for &b in chunk.invalid() {
write!(f, "\\x{:02X}", b)?;
}
}
f.write_char('"')
}
<core::str::lossy::Utf8Chunks<'_> as core::fmt::Debug>::fmt fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
f.debug_struct("Utf8Chunks").field("source", &self.debug()).finish()
}
<core::str::lossy::Utf8Chunks<'a> as core::iter::traits::iterator::Iterator>::next fn next(&mut self) -> Option<Utf8Chunk<'a>> {
if self.source.is_empty() {
return None;
}
const TAG_CONT_U8: u8 = 128;
fn safe_get(xs: &[u8], i: usize) -> u8 {
*xs.get(i).unwrap_or(&0)
}
let mut i = 0;
let mut valid_up_to = 0;
while i < self.source.len() {
// SAFETY: `i < self.source.len()` per previous line.
// For some reason the following are both significantly slower:
// while let Some(&byte) = self.source.get(i) {
// while let Some(byte) = self.source.get(i).copied() {
let byte = unsafe { *self.source.get_unchecked(i) };
i += 1;
if byte < 128 {
// This could be a `1 => ...` case in the match below, but for
// the common case of all-ASCII inputs, we bypass loading the
// sizeable UTF8_CHAR_WIDTH table into cache.
} else {
let w = utf8_char_width(byte);
match w {
2 => {
if safe_get(self.source, i) & 192 != TAG_CONT_U8 {
break;
}
i += 1;
}
3 => {
match (byte, safe_get(self.source, i)) {
(0xE0, 0xA0..=0xBF) => (),
(0xE1..=0xEC, 0x80..=0xBF) => (),
(0xED, 0x80..=0x9F) => (),
(0xEE..=0xEF, 0x80..=0xBF) => (),
_ => break,
}
i += 1;
if safe_get(self.source, i) & 192 != TAG_CONT_U8 {
break;
}
i += 1;
}
4 => {
match (byte, safe_get(self.source, i)) {
(0xF0, 0x90..=0xBF) => (),
(0xF1..=0xF3, 0x80..=0xBF) => (),
(0xF4, 0x80..=0x8F) => (),
_ => break,
}
i += 1;
if safe_get(self.source, i) & 192 != TAG_CONT_U8 {
break;
}
i += 1;
if safe_get(self.source, i) & 192 != TAG_CONT_U8 {
break;
}
i += 1;
}
_ => break,
}
}
valid_up_to = i;
}
// SAFETY: `i <= self.source.len()` because it is only ever incremented
// via `i += 1` and in between every single one of those increments, `i`
// is compared against `self.source.len()`. That happens either
// literally by `i < self.source.len()` in the while-loop's condition,
// or indirectly by `safe_get(self.source, i) & 192 != TAG_CONT_U8`. The
// loop is terminated as soon as the latest `i += 1` has made `i` no
// longer less than `self.source.len()`, which means it'll be at most
// equal to `self.source.len()`.
let (inspected, remaining) = unsafe { self.source.split_at_unchecked(i) };
self.source = remaining;
// SAFETY: `valid_up_to <= i` because it is only ever assigned via
// `valid_up_to = i` and `i` only increases.
let (valid, invalid) = unsafe { inspected.split_at_unchecked(valid_up_to) };
Some(Utf8Chunk {
// SAFETY: All bytes up to `valid_up_to` are valid UTF-8.
valid: unsafe { from_utf8_unchecked(valid) },
invalid,
})
}
<core::str::lossy::Utf8Chunks<'a> as core::iter::traits::iterator::Iterator>::next::safe_get fn safe_get(xs: &[u8], i: usize) -> u8 {
*xs.get(i).unwrap_or(&0)
}
<core::str::pattern::CharSearcher<'a> as core::str::pattern::ReverseSearcher<'a>>::next_back fn next_back(&mut self) -> SearchStep {
let old_finger = self.finger_back;
// SAFETY: see the comment for next() above
let slice = unsafe { self.haystack.get_unchecked(self.finger..old_finger) };
let mut iter = slice.chars();
let old_len = iter.iter.len();
if let Some(ch) = iter.next_back() {
// subtract byte offset of current character
// without re-encoding as utf-8
self.finger_back -= old_len - iter.iter.len();
if ch == self.needle {
SearchStep::Match(self.finger_back, old_finger)
} else {
SearchStep::Reject(self.finger_back, old_finger)
}
} else {
SearchStep::Done
}
}
<core::str::pattern::CharSearcher<'a> as core::str::pattern::Searcher<'a>>::haystack fn haystack(&self) -> &'a str {
self.haystack
}
<core::str::pattern::CharSearcher<'a> as core::str::pattern::Searcher<'a>>::next fn next(&mut self) -> SearchStep {
let old_finger = self.finger;
// SAFETY: 1-4 guarantee safety of `get_unchecked`
// 1. `self.finger` and `self.finger_back` are kept on unicode boundaries
// (this is invariant)
// 2. `self.finger >= 0` since it starts at 0 and only increases
// 3. `self.finger < self.finger_back` because otherwise the char `iter`
// would return `SearchStep::Done`
// 4. `self.finger` comes before the end of the haystack because `self.finger_back`
// starts at the end and only decreases
let slice = unsafe { self.haystack.get_unchecked(old_finger..self.finger_back) };
let mut iter = slice.chars();
let old_len = iter.iter.len();
if let Some(ch) = iter.next() {
// add byte offset of current character
// without re-encoding as utf-8
self.finger += old_len - iter.iter.len();
if ch == self.needle {
SearchStep::Match(old_finger, self.finger)
} else {
SearchStep::Reject(old_finger, self.finger)
}
} else {
SearchStep::Done
}
}
<core::str::pattern::MatchOnly as core::str::pattern::TwoWayStrategy>::matching fn matching(a: usize, b: usize) -> Self::Output {
Some((a, b))
}
<core::str::pattern::MatchOnly as core::str::pattern::TwoWayStrategy>::rejecting fn rejecting(_a: usize, _b: usize) -> Self::Output {
None
}
<core::str::pattern::MatchOnly as core::str::pattern::TwoWayStrategy>::use_early_reject fn use_early_reject() -> bool {
false
}
<core::str::pattern::RejectAndMatch as core::str::pattern::TwoWayStrategy>::matching fn matching(a: usize, b: usize) -> Self::Output {
SearchStep::Match(a, b)
}
<core::str::pattern::RejectAndMatch as core::str::pattern::TwoWayStrategy>::rejecting fn rejecting(a: usize, b: usize) -> Self::Output {
SearchStep::Reject(a, b)
}
<core::str::pattern::RejectAndMatch as core::str::pattern::TwoWayStrategy>::use_early_reject fn use_early_reject() -> bool {
true
}
<core::str::pattern::StrSearcher<'a, 'b> as core::str::pattern::ReverseSearcher<'a>>::next_back fn next_back(&mut self) -> SearchStep {
match self.searcher {
StrSearcherImpl::Empty(ref mut searcher) => {
if searcher.is_finished {
return SearchStep::Done;
}
let is_match = searcher.is_match_bw;
searcher.is_match_bw = !searcher.is_match_bw;
let end = searcher.end;
match self.haystack[..end].chars().next_back() {
_ if is_match => SearchStep::Match(end, end),
None => {
searcher.is_finished = true;
SearchStep::Done
}
Some(ch) => {
searcher.end -= ch.len_utf8();
SearchStep::Reject(searcher.end, end)
}
}
}
StrSearcherImpl::TwoWay(ref mut searcher) => {
if searcher.end == 0 {
return SearchStep::Done;
}
let is_long = searcher.memory == usize::MAX;
match searcher.next_back::<RejectAndMatch>(
self.haystack.as_bytes(),
self.needle.as_bytes(),
is_long,
) {
SearchStep::Reject(mut a, b) => {
// skip to next char boundary
while !self.haystack.is_char_boundary(a) {
a -= 1;
}
searcher.end = cmp::min(a, searcher.end);
SearchStep::Reject(a, b)
}
otherwise => otherwise,
}
}
}
}
<core::str::pattern::StrSearcher<'a, 'b> as core::str::pattern::Searcher<'a>>::haystack fn haystack(&self) -> &'a str {
self.haystack
}
<core::str::pattern::StrSearcher<'a, 'b> as core::str::pattern::Searcher<'a>>::next fn next(&mut self) -> SearchStep {
match self.searcher {
StrSearcherImpl::Empty(ref mut searcher) => {
if searcher.is_finished {
return SearchStep::Done;
}
// empty needle rejects every char and matches every empty string between them
let is_match = searcher.is_match_fw;
searcher.is_match_fw = !searcher.is_match_fw;
let pos = searcher.position;
match self.haystack[pos..].chars().next() {
_ if is_match => SearchStep::Match(pos, pos),
None => {
searcher.is_finished = true;
SearchStep::Done
}
Some(ch) => {
searcher.position += ch.len_utf8();
SearchStep::Reject(pos, searcher.position)
}
}
}
StrSearcherImpl::TwoWay(ref mut searcher) => {
// TwoWaySearcher produces valid *Match* indices that split at char boundaries
// as long as it does correct matching and that haystack and needle are
// valid UTF-8
// *Rejects* from the algorithm can fall on any indices, but we will walk them
// manually to the next character boundary, so that they are utf-8 safe.
if searcher.position == self.haystack.len() {
return SearchStep::Done;
}
let is_long = searcher.memory == usize::MAX;
match searcher.next::<RejectAndMatch>(
self.haystack.as_bytes(),
self.needle.as_bytes(),
is_long,
) {
SearchStep::Reject(a, mut b) => {
// skip to next char boundary
while !self.haystack.is_char_boundary(b) {
b += 1;
}
searcher.position = cmp::max(b, searcher.position);
SearchStep::Reject(a, b)
}
otherwise => otherwise,
}
}
}
}
<core::str::pattern::StrSearcher<'a, 'b> as core::str::pattern::Searcher<'a>>::next_match fn next_match(&mut self) -> Option<(usize, usize)> {
match self.searcher {
StrSearcherImpl::Empty(..) => loop {
match self.next() {
SearchStep::Match(a, b) => return Some((a, b)),
SearchStep::Done => return None,
SearchStep::Reject(..) => {}
}
},
StrSearcherImpl::TwoWay(ref mut searcher) => {
let is_long = searcher.memory == usize::MAX;
// write out `true` and `false` cases to encourage the compiler
// to specialize the two cases separately.
if is_long {
searcher.next::<MatchOnly>(
self.haystack.as_bytes(),
self.needle.as_bytes(),
true,
)
} else {
searcher.next::<MatchOnly>(
self.haystack.as_bytes(),
self.needle.as_bytes(),
false,
)
}
}
}
}
<core::sync::atomic::Atomic<bool> as core::fmt::Debug>::fmt fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt::Debug::fmt(&self.load(Ordering::Relaxed), f)
}
<core::sync::atomic::Atomic<u16> as core::convert::From<u16>>::from fn from(v: $int_type) -> Self { Self::new(v) }
<core::sync::atomic::Atomic<u16> as core::default::Default>::default fn default() -> Self {
Self::new(Default::default())
}
<core::sync::atomic::Atomic<u16> as core::fmt::Debug>::fmt fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt::Debug::fmt(&self.load(Ordering::Relaxed), f)
}
<core::sync::atomic::Atomic<u32> as core::convert::From<u32>>::from fn from(v: $int_type) -> Self { Self::new(v) }
<core::sync::atomic::Atomic<u32> as core::default::Default>::default fn default() -> Self {
Self::new(Default::default())
}
<core::sync::atomic::Atomic<u32> as core::fmt::Debug>::fmt fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt::Debug::fmt(&self.load(Ordering::Relaxed), f)
}
<core::sync::atomic::Atomic<u64> as core::convert::From<u64>>::from fn from(v: $int_type) -> Self { Self::new(v) }
<core::sync::atomic::Atomic<u64> as core::default::Default>::default fn default() -> Self {
Self::new(Default::default())
}
<core::sync::atomic::Atomic<u64> as core::fmt::Debug>::fmt fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt::Debug::fmt(&self.load(Ordering::Relaxed), f)
}
<core::sync::atomic::Atomic<u8> as core::convert::From<u8>>::from fn from(v: $int_type) -> Self { Self::new(v) }
<core::sync::atomic::Atomic<u8> as core::default::Default>::default fn default() -> Self {
Self::new(Default::default())
}
<core::sync::atomic::Atomic<u8> as core::fmt::Debug>::fmt fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt::Debug::fmt(&self.load(Ordering::Relaxed), f)
}
<core::sync::atomic::Atomic<usize> as core::convert::From<usize>>::from fn from(v: $int_type) -> Self { Self::new(v) }
<core::sync::atomic::Atomic<usize> as core::default::Default>::default fn default() -> Self {
Self::new(Default::default())
}
<core::sync::atomic::Atomic<usize> as core::fmt::Debug>::fmt fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt::Debug::fmt(&self.load(Ordering::Relaxed), f)
}
<core::time::Duration as core::fmt::Debug>::fmt fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
/// Formats a floating point number in decimal notation.
///
/// The number is given as the `integer_part` and a fractional part.
/// The value of the fractional part is `fractional_part / divisor`. So
/// `integer_part` = 3, `fractional_part` = 12 and `divisor` = 100
/// represents the number `3.012`. Trailing zeros are omitted.
///
/// `divisor` must not be above 100_000_000. It also should be a power
/// of 10, everything else doesn't make sense. `fractional_part` has
/// to be less than `10 * divisor`!
///
/// A prefix and postfix may be added. The whole thing is padded
/// to the formatter's `width`, if specified.
fn fmt_decimal(
f: &mut fmt::Formatter<'_>,
integer_part: u64,
mut fractional_part: u32,
mut divisor: u32,
prefix: &str,
postfix: &str,
) -> fmt::Result {
// Encode the fractional part into a temporary buffer. The buffer
// only need to hold 9 elements, because `fractional_part` has to
// be smaller than 10^9. The buffer is prefilled with '0' digits
// to simplify the code below.
let mut buf = [b'0'; 9];
// The next digit is written at this position
let mut pos = 0;
// We keep writing digits into the buffer while there are non-zero
// digits left and we haven't written enough digits yet.
while fractional_part > 0 && pos < f.precision().unwrap_or(9) {
// Write new digit into the buffer
buf[pos] = b'0' + (fractional_part / divisor) as u8;
fractional_part %= divisor;
divisor /= 10;
pos += 1;
}
// If a precision < 9 was specified, there may be some non-zero
// digits left that weren't written into the buffer. In that case we
// need to perform rounding to match the semantics of printing
// normal floating point numbers. However, we only need to do work
// when rounding up. This happens if the first digit of the
// remaining ones is >= 5. When the first digit is exactly 5, rounding
// follows IEEE-754 round-ties-to-even semantics: we only round up
// if the last written digit is odd.
let integer_part = if fractional_part > 0 && fractional_part >= divisor * 5 {
// For ties (fractional_part == divisor * 5), only round up if last digit is odd
let is_tie = fractional_part == divisor * 5;
let last_digit_is_odd = if pos > 0 {
(buf[pos - 1] - b'0') % 2 == 1
} else {
// No fractional digits - check the integer part
(integer_part % 2) == 1
};
if is_tie && !last_digit_is_odd {
Some(integer_part)
} else {
// Round up the number contained in the buffer. We go through
// the buffer backwards and keep track of the carry.
let mut rev_pos = pos;
let mut carry = true;
while carry && rev_pos > 0 {
rev_pos -= 1;
// If the digit in the buffer is not '9', we just need to
// increment it and can stop then (since we don't have a
// carry anymore). Otherwise, we set it to '0' (overflow)
// and continue.
if buf[rev_pos] < b'9' {
buf[rev_pos] += 1;
carry = false;
} else {
buf[rev_pos] = b'0';
}
}
// If we still have the carry bit set, that means that we set
// the whole buffer to '0's and need to increment the integer
// part.
if carry {
// If `integer_part == u64::MAX` and precision < 9, any
// carry of the overflow during rounding of the
// `fractional_part` into the `integer_part` will cause the
// `integer_part` itself to overflow. Avoid this by using an
// `Option<u64>`, with `None` representing `u64::MAX + 1`.
integer_part.checked_add(1)
} else {
Some(integer_part)
}
}
} else {
Some(integer_part)
};
// Determine the end of the buffer: if precision is set, we just
// use as many digits from the buffer (capped to 9). If it isn't
// set, we only use all digits up to the last non-zero one.
let end = f.precision().map(|p| crate::cmp::min(p, 9)).unwrap_or(pos);
// This closure emits the formatted duration without emitting any
// padding (padding is calculated below).
let emit_without_padding = |f: &mut fmt::Formatter<'_>| {
if let Some(integer_part) = integer_part {
write!(f, "{}{}", prefix, integer_part)?;
} else {
// u64::MAX + 1 == 18446744073709551616
write!(f, "{}18446744073709551616", prefix)?;
}
// Write the decimal point and the fractional part (if any).
if end > 0 {
// SAFETY: We are only writing ASCII digits into the buffer and
// it was initialized with '0's, so it contains valid UTF8.
let s = unsafe { crate::str::from_utf8_unchecked(&buf[..end]) };
// If the user request a precision > 9, we pad '0's at the end.
let w = f.precision().unwrap_or(pos);
write!(f, ".{:0<width$}", s, width = w)?;
}
write!(f, "{}", postfix)
};
match f.width() {
None => {
// No `width` specified. There's no need to calculate the
// length of the output in this case, just emit it.
emit_without_padding(f)
}
Some(requested_w) => {
// A `width` was specified. Calculate the actual width of
// the output in order to calculate the required padding.
// It consists of 4 parts:
// 1. The prefix: is either "+" or "", so we can just use len().
// 2. The postfix: can be "µs" so we have to count UTF8 characters.
let mut actual_w = prefix.len() + postfix.chars().count();
// 3. The integer part:
if let Some(integer_part) = integer_part {
if let Some(log) = integer_part.checked_ilog10() {
// integer_part is > 0, so has length log10(x)+1
actual_w += 1 + log as usize;
} else {
// integer_part is 0, so has length 1.
actual_w += 1;
}
} else {
// integer_part is u64::MAX + 1, so has length 20
actual_w += 20;
}
// 4. The fractional part (if any):
if end > 0 {
let frac_part_w = f.precision().unwrap_or(pos);
actual_w += 1 + frac_part_w;
}
if requested_w <= actual_w {
// Output is already longer than `width`, so don't pad.
emit_without_padding(f)
} else {
// We need to add padding. Use the `Formatter::padding` helper function.
let default_align = fmt::Alignment::Left;
let post_padding =
f.padding((requested_w - actual_w) as u16, default_align)?;
emit_without_padding(f)?;
post_padding.write(f)
}
}
}
}
// Print leading '+' sign if requested
let prefix = if f.sign_plus() { "+" } else { "" };
if self.secs > 0 {
fmt_decimal(f, self.secs, self.nanos.as_inner(), NANOS_PER_SEC / 10, prefix, "s")
} else if self.nanos.as_inner() >= NANOS_PER_MILLI {
fmt_decimal(
f,
(self.nanos.as_inner() / NANOS_PER_MILLI) as u64,
self.nanos.as_inner() % NANOS_PER_MILLI,
NANOS_PER_MILLI / 10,
prefix,
"ms",
)
} else if self.nanos.as_inner() >= NANOS_PER_MICRO {
fmt_decimal(
f,
(self.nanos.as_inner() / NANOS_PER_MICRO) as u64,
self.nanos.as_inner() % NANOS_PER_MICRO,
NANOS_PER_MICRO / 10,
prefix,
"µs",
)
} else {
fmt_decimal(f, self.nanos.as_inner() as u64, 0, 1, prefix, "ns")
}
}
<core::time::Duration as core::fmt::Debug>::fmt::fmt_decimal fn fmt_decimal(
f: &mut fmt::Formatter<'_>,
integer_part: u64,
mut fractional_part: u32,
mut divisor: u32,
prefix: &str,
postfix: &str,
) -> fmt::Result {
// Encode the fractional part into a temporary buffer. The buffer
// only need to hold 9 elements, because `fractional_part` has to
// be smaller than 10^9. The buffer is prefilled with '0' digits
// to simplify the code below.
let mut buf = [b'0'; 9];
// The next digit is written at this position
let mut pos = 0;
// We keep writing digits into the buffer while there are non-zero
// digits left and we haven't written enough digits yet.
while fractional_part > 0 && pos < f.precision().unwrap_or(9) {
// Write new digit into the buffer
buf[pos] = b'0' + (fractional_part / divisor) as u8;
fractional_part %= divisor;
divisor /= 10;
pos += 1;
}
// If a precision < 9 was specified, there may be some non-zero
// digits left that weren't written into the buffer. In that case we
// need to perform rounding to match the semantics of printing
// normal floating point numbers. However, we only need to do work
// when rounding up. This happens if the first digit of the
// remaining ones is >= 5. When the first digit is exactly 5, rounding
// follows IEEE-754 round-ties-to-even semantics: we only round up
// if the last written digit is odd.
let integer_part = if fractional_part > 0 && fractional_part >= divisor * 5 {
// For ties (fractional_part == divisor * 5), only round up if last digit is odd
let is_tie = fractional_part == divisor * 5;
let last_digit_is_odd = if pos > 0 {
(buf[pos - 1] - b'0') % 2 == 1
} else {
// No fractional digits - check the integer part
(integer_part % 2) == 1
};
if is_tie && !last_digit_is_odd {
Some(integer_part)
} else {
// Round up the number contained in the buffer. We go through
// the buffer backwards and keep track of the carry.
let mut rev_pos = pos;
let mut carry = true;
while carry && rev_pos > 0 {
rev_pos -= 1;
// If the digit in the buffer is not '9', we just need to
// increment it and can stop then (since we don't have a
// carry anymore). Otherwise, we set it to '0' (overflow)
// and continue.
if buf[rev_pos] < b'9' {
buf[rev_pos] += 1;
carry = false;
} else {
buf[rev_pos] = b'0';
}
}
// If we still have the carry bit set, that means that we set
// the whole buffer to '0's and need to increment the integer
// part.
if carry {
// If `integer_part == u64::MAX` and precision < 9, any
// carry of the overflow during rounding of the
// `fractional_part` into the `integer_part` will cause the
// `integer_part` itself to overflow. Avoid this by using an
// `Option<u64>`, with `None` representing `u64::MAX + 1`.
integer_part.checked_add(1)
} else {
Some(integer_part)
}
}
} else {
Some(integer_part)
};
// Determine the end of the buffer: if precision is set, we just
// use as many digits from the buffer (capped to 9). If it isn't
// set, we only use all digits up to the last non-zero one.
let end = f.precision().map(|p| crate::cmp::min(p, 9)).unwrap_or(pos);
// This closure emits the formatted duration without emitting any
// padding (padding is calculated below).
let emit_without_padding = |f: &mut fmt::Formatter<'_>| {
if let Some(integer_part) = integer_part {
write!(f, "{}{}", prefix, integer_part)?;
} else {
// u64::MAX + 1 == 18446744073709551616
write!(f, "{}18446744073709551616", prefix)?;
}
// Write the decimal point and the fractional part (if any).
if end > 0 {
// SAFETY: We are only writing ASCII digits into the buffer and
// it was initialized with '0's, so it contains valid UTF8.
let s = unsafe { crate::str::from_utf8_unchecked(&buf[..end]) };
// If the user request a precision > 9, we pad '0's at the end.
let w = f.precision().unwrap_or(pos);
write!(f, ".{:0<width$}", s, width = w)?;
}
write!(f, "{}", postfix)
};
match f.width() {
None => {
// No `width` specified. There's no need to calculate the
// length of the output in this case, just emit it.
emit_without_padding(f)
}
Some(requested_w) => {
// A `width` was specified. Calculate the actual width of
// the output in order to calculate the required padding.
// It consists of 4 parts:
// 1. The prefix: is either "+" or "", so we can just use len().
// 2. The postfix: can be "µs" so we have to count UTF8 characters.
let mut actual_w = prefix.len() + postfix.chars().count();
// 3. The integer part:
if let Some(integer_part) = integer_part {
if let Some(log) = integer_part.checked_ilog10() {
// integer_part is > 0, so has length log10(x)+1
actual_w += 1 + log as usize;
} else {
// integer_part is 0, so has length 1.
actual_w += 1;
}
} else {
// integer_part is u64::MAX + 1, so has length 20
actual_w += 20;
}
// 4. The fractional part (if any):
if end > 0 {
let frac_part_w = f.precision().unwrap_or(pos);
actual_w += 1 + frac_part_w;
}
if requested_w <= actual_w {
// Output is already longer than `width`, so don't pad.
emit_without_padding(f)
} else {
// We need to add padding. Use the `Formatter::padding` helper function.
let default_align = fmt::Alignment::Left;
let post_padding =
f.padding((requested_w - actual_w) as u16, default_align)?;
emit_without_padding(f)?;
post_padding.write(f)
}
}
}
}
<core::time::Duration as core::ops::arith::Add>::add fn add(self, rhs: Duration) -> Duration {
self.checked_add(rhs).expect("overflow when adding durations")
}
<core::time::Duration as core::ops::arith::Div<u32>>::div fn div(self, rhs: u32) -> Duration {
self.checked_div(rhs).expect("divide by zero error when dividing duration by scalar")
}
<core::time::Duration as core::ops::arith::Sub>::sub fn sub(self, rhs: Duration) -> Duration {
self.checked_sub(rhs).expect("overflow when subtracting durations")
}
<core::time::TryFromFloatSecsError as core::fmt::Display>::fmt fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self.kind {
TryFromFloatSecsErrorKind::Negative => {
"cannot convert float seconds to Duration: value is negative"
}
TryFromFloatSecsErrorKind::OverflowOrNan => {
"cannot convert float seconds to Duration: value is either too big or NaN"
}
}
.fmt(f)
}
<f128 as core::default::Default>::default fn default() -> $t {
$v
}
<f128 as core::iter::traits::accum::Sum<&'a f128>>::sum fn sum<I: Iterator<Item=&'a Self>>(iter: I) -> Self {
iter.fold(
-0.0,
#[rustc_inherit_overflow_checks]
|a, b| a + b,
)
}
<f128 as core::iter::traits::accum::Sum>::sum fn sum<I: Iterator<Item=Self>>(iter: I) -> Self {
iter.fold(
-0.0,
#[rustc_inherit_overflow_checks]
|a, b| a + b,
)
}
<f128 as core::ops::arith::Add<&f128>>::add fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<f128 as core::ops::arith::Add>::add fn add(self, other: $t) -> $t { self + other }
<f128 as core::ops::arith::AddAssign<&f128>>::add_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<f128 as core::ops::arith::AddAssign>::add_assign fn add_assign(&mut self, other: $t) { *self += other }
<f128 as core::ops::arith::Div<&f128>>::div fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<f128 as core::ops::arith::Div>::div fn div(self, other: $t) -> $t { self / other }
<f128 as core::ops::arith::DivAssign<&f128>>::div_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<f128 as core::ops::arith::DivAssign>::div_assign fn div_assign(&mut self, other: $t) { *self /= other }
<f128 as core::ops::arith::Mul<&f128>>::mul fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<f128 as core::ops::arith::Mul>::mul fn mul(self, other: $t) -> $t { self * other }
<f128 as core::ops::arith::MulAssign<&f128>>::mul_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<f128 as core::ops::arith::MulAssign>::mul_assign fn mul_assign(&mut self, other: $t) { *self *= other }
<f128 as core::ops::arith::Neg>::neg fn neg(self) -> $t { -self }
<f128 as core::ops::arith::Rem<&f128>>::rem fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<f128 as core::ops::arith::Rem>::rem fn rem(self, other: $t) -> $t { self % other }
<f128 as core::ops::arith::RemAssign<&f128>>::rem_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<f128 as core::ops::arith::RemAssign>::rem_assign fn rem_assign(&mut self, other: $t) { *self %= other }
<f128 as core::ops::arith::Sub<&f128>>::sub fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<f128 as core::ops::arith::Sub>::sub fn sub(self, other: $t) -> $t { self - other }
<f128 as core::ops::arith::SubAssign<&f128>>::sub_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<f128 as core::ops::arith::SubAssign>::sub_assign fn sub_assign(&mut self, other: $t) { *self -= other }
<f16 as core::default::Default>::default fn default() -> $t {
$v
}
<f16 as core::fmt::float::GeneralFormat>::already_rounded_value_should_use_exponential fn already_rounded_value_should_use_exponential(&self) -> bool {
// `max_abs` rounds to infinity for `f16`. This is fine to save us from a more
// complex macro, it just means a positive-exponent `f16` will never print as
// scientific notation by default (reasonably, the max is 65504.0).
#[allow(overflowing_literals)]
let max_abs = 1e+16;
let abs = $t::abs(*self);
(abs != 0.0 && abs < 1e-4) || abs >= max_abs
}
<f16 as core::iter::traits::accum::Sum<&'a f16>>::sum fn sum<I: Iterator<Item=&'a Self>>(iter: I) -> Self {
iter.fold(
-0.0,
#[rustc_inherit_overflow_checks]
|a, b| a + b,
)
}
<f16 as core::iter::traits::accum::Sum>::sum fn sum<I: Iterator<Item=Self>>(iter: I) -> Self {
iter.fold(
-0.0,
#[rustc_inherit_overflow_checks]
|a, b| a + b,
)
}
<f16 as core::num::dec2flt::float::RawFloat>::classify fn classify(self) -> FpCategory {
self.classify()
}
<f16 as core::num::dec2flt::float::RawFloat>::from_u64 fn from_u64(v: u64) -> Self {
debug_assert!(v <= Self::MAX_MANTISSA_FAST_PATH);
v as _
}
<f16 as core::num::dec2flt::float::RawFloat>::from_u64_bits fn from_u64_bits(v: u64) -> Self {
Self::from_bits((v & 0xFFFF) as u16)
}
<f16 as core::num::dec2flt::float::RawFloat>::pow10_fast_path fn pow10_fast_path(exponent: usize) -> Self {
#[allow(clippy::use_self)]
const TABLE: [f16; 8] = [1e0, 1e1, 1e2, 1e3, 1e4, 0.0, 0.0, 0.];
TABLE[exponent & 7]
}
<f16 as core::num::dec2flt::float::RawFloat>::to_bits fn to_bits(self) -> Self::Int {
self.to_bits()
}
<f16 as core::num::flt2dec::decoder::DecodableFloat>::min_pos_norm_value fn min_pos_norm_value() -> Self {
f16::MIN_POSITIVE
}
<f16 as core::ops::arith::Add<&f16>>::add fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<f16 as core::ops::arith::Add>::add fn add(self, other: $t) -> $t { self + other }
<f16 as core::ops::arith::AddAssign<&f16>>::add_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<f16 as core::ops::arith::AddAssign>::add_assign fn add_assign(&mut self, other: $t) { *self += other }
<f16 as core::ops::arith::Div<&f16>>::div fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<f16 as core::ops::arith::Div>::div fn div(self, other: $t) -> $t { self / other }
<f16 as core::ops::arith::DivAssign<&f16>>::div_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<f16 as core::ops::arith::DivAssign>::div_assign fn div_assign(&mut self, other: $t) { *self /= other }
<f16 as core::ops::arith::Mul<&f16>>::mul fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<f16 as core::ops::arith::Mul>::mul fn mul(self, other: $t) -> $t { self * other }
<f16 as core::ops::arith::MulAssign<&f16>>::mul_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<f16 as core::ops::arith::MulAssign>::mul_assign fn mul_assign(&mut self, other: $t) { *self *= other }
<f16 as core::ops::arith::Neg>::neg fn neg(self) -> $t { -self }
<f16 as core::ops::arith::Rem<&f16>>::rem fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<f16 as core::ops::arith::Rem>::rem fn rem(self, other: $t) -> $t { self % other }
<f16 as core::ops::arith::RemAssign<&f16>>::rem_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<f16 as core::ops::arith::RemAssign>::rem_assign fn rem_assign(&mut self, other: $t) { *self %= other }
<f16 as core::ops::arith::Sub<&f16>>::sub fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<f16 as core::ops::arith::Sub>::sub fn sub(self, other: $t) -> $t { self - other }
<f16 as core::ops::arith::SubAssign<&f16>>::sub_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<f16 as core::ops::arith::SubAssign>::sub_assign fn sub_assign(&mut self, other: $t) { *self -= other }
<f32 as core::default::Default>::default fn default() -> $t {
$v
}
<f32 as core::fmt::float::GeneralFormat>::already_rounded_value_should_use_exponential fn already_rounded_value_should_use_exponential(&self) -> bool {
// `max_abs` rounds to infinity for `f16`. This is fine to save us from a more
// complex macro, it just means a positive-exponent `f16` will never print as
// scientific notation by default (reasonably, the max is 65504.0).
#[allow(overflowing_literals)]
let max_abs = 1e+16;
let abs = $t::abs(*self);
(abs != 0.0 && abs < 1e-4) || abs >= max_abs
}
<f32 as core::iter::traits::accum::Sum<&'a f32>>::sum fn sum<I: Iterator<Item=&'a Self>>(iter: I) -> Self {
iter.fold(
-0.0,
#[rustc_inherit_overflow_checks]
|a, b| a + b,
)
}
<f32 as core::iter::traits::accum::Sum>::sum fn sum<I: Iterator<Item=Self>>(iter: I) -> Self {
iter.fold(
-0.0,
#[rustc_inherit_overflow_checks]
|a, b| a + b,
)
}
<f32 as core::num::dec2flt::float::RawFloat>::classify fn classify(self) -> FpCategory {
self.classify()
}
<f32 as core::num::dec2flt::float::RawFloat>::from_u64 fn from_u64(v: u64) -> Self {
debug_assert!(v <= Self::MAX_MANTISSA_FAST_PATH);
v as _
}
<f32 as core::num::dec2flt::float::RawFloat>::from_u64_bits fn from_u64_bits(v: u64) -> Self {
f32::from_bits((v & 0xFFFFFFFF) as u32)
}
<f32 as core::num::dec2flt::float::RawFloat>::pow10_fast_path fn pow10_fast_path(exponent: usize) -> Self {
#[allow(clippy::use_self)]
const TABLE: [f32; 16] =
[1e0, 1e1, 1e2, 1e3, 1e4, 1e5, 1e6, 1e7, 1e8, 1e9, 1e10, 0., 0., 0., 0., 0.];
TABLE[exponent & 15]
}
<f32 as core::num::dec2flt::float::RawFloat>::to_bits fn to_bits(self) -> Self::Int {
self.to_bits()
}
<f32 as core::num::flt2dec::decoder::DecodableFloat>::min_pos_norm_value fn min_pos_norm_value() -> Self {
f32::MIN_POSITIVE
}
<f32 as core::ops::arith::Add<&f32>>::add fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<f32 as core::ops::arith::Add>::add fn add(self, other: $t) -> $t { self + other }
<f32 as core::ops::arith::AddAssign<&f32>>::add_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<f32 as core::ops::arith::AddAssign>::add_assign fn add_assign(&mut self, other: $t) { *self += other }
<f32 as core::ops::arith::Div<&f32>>::div fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<f32 as core::ops::arith::Div>::div fn div(self, other: $t) -> $t { self / other }
<f32 as core::ops::arith::DivAssign<&f32>>::div_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<f32 as core::ops::arith::DivAssign>::div_assign fn div_assign(&mut self, other: $t) { *self /= other }
<f32 as core::ops::arith::Mul<&f32>>::mul fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<f32 as core::ops::arith::Mul>::mul fn mul(self, other: $t) -> $t { self * other }
<f32 as core::ops::arith::MulAssign<&f32>>::mul_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<f32 as core::ops::arith::MulAssign>::mul_assign fn mul_assign(&mut self, other: $t) { *self *= other }
<f32 as core::ops::arith::Neg>::neg fn neg(self) -> $t { -self }
<f32 as core::ops::arith::Rem<&f32>>::rem fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<f32 as core::ops::arith::Rem>::rem fn rem(self, other: $t) -> $t { self % other }
<f32 as core::ops::arith::RemAssign<&f32>>::rem_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<f32 as core::ops::arith::RemAssign>::rem_assign fn rem_assign(&mut self, other: $t) { *self %= other }
<f32 as core::ops::arith::Sub<&f32>>::sub fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<f32 as core::ops::arith::Sub>::sub fn sub(self, other: $t) -> $t { self - other }
<f32 as core::ops::arith::SubAssign<&f32>>::sub_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<f32 as core::ops::arith::SubAssign>::sub_assign fn sub_assign(&mut self, other: $t) { *self -= other }
<f64 as core::default::Default>::default fn default() -> $t {
$v
}
<f64 as core::fmt::float::GeneralFormat>::already_rounded_value_should_use_exponential fn already_rounded_value_should_use_exponential(&self) -> bool {
// `max_abs` rounds to infinity for `f16`. This is fine to save us from a more
// complex macro, it just means a positive-exponent `f16` will never print as
// scientific notation by default (reasonably, the max is 65504.0).
#[allow(overflowing_literals)]
let max_abs = 1e+16;
let abs = $t::abs(*self);
(abs != 0.0 && abs < 1e-4) || abs >= max_abs
}
<f64 as core::iter::traits::accum::Sum<&'a f64>>::sum fn sum<I: Iterator<Item=&'a Self>>(iter: I) -> Self {
iter.fold(
-0.0,
#[rustc_inherit_overflow_checks]
|a, b| a + b,
)
}
<f64 as core::iter::traits::accum::Sum>::sum fn sum<I: Iterator<Item=Self>>(iter: I) -> Self {
iter.fold(
-0.0,
#[rustc_inherit_overflow_checks]
|a, b| a + b,
)
}
<f64 as core::num::dec2flt::float::RawFloat>::classify fn classify(self) -> FpCategory {
self.classify()
}
<f64 as core::num::dec2flt::float::RawFloat>::from_u64 fn from_u64(v: u64) -> Self {
debug_assert!(v <= Self::MAX_MANTISSA_FAST_PATH);
v as _
}
<f64 as core::num::dec2flt::float::RawFloat>::from_u64_bits fn from_u64_bits(v: u64) -> Self {
f64::from_bits(v)
}
<f64 as core::num::dec2flt::float::RawFloat>::pow10_fast_path fn pow10_fast_path(exponent: usize) -> Self {
const TABLE: [f64; 32] = [
1e0, 1e1, 1e2, 1e3, 1e4, 1e5, 1e6, 1e7, 1e8, 1e9, 1e10, 1e11, 1e12, 1e13, 1e14, 1e15,
1e16, 1e17, 1e18, 1e19, 1e20, 1e21, 1e22, 0., 0., 0., 0., 0., 0., 0., 0., 0.,
];
TABLE[exponent & 31]
}
<f64 as core::num::dec2flt::float::RawFloat>::to_bits fn to_bits(self) -> Self::Int {
self.to_bits()
}
<f64 as core::num::flt2dec::decoder::DecodableFloat>::min_pos_norm_value fn min_pos_norm_value() -> Self {
f64::MIN_POSITIVE
}
<f64 as core::ops::arith::Add<&f64>>::add fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<f64 as core::ops::arith::Add>::add fn add(self, other: $t) -> $t { self + other }
<f64 as core::ops::arith::AddAssign<&f64>>::add_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<f64 as core::ops::arith::AddAssign>::add_assign fn add_assign(&mut self, other: $t) { *self += other }
<f64 as core::ops::arith::Div<&f64>>::div fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<f64 as core::ops::arith::Div>::div fn div(self, other: $t) -> $t { self / other }
<f64 as core::ops::arith::DivAssign<&f64>>::div_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<f64 as core::ops::arith::DivAssign>::div_assign fn div_assign(&mut self, other: $t) { *self /= other }
<f64 as core::ops::arith::Mul<&f64>>::mul fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<f64 as core::ops::arith::Mul>::mul fn mul(self, other: $t) -> $t { self * other }
<f64 as core::ops::arith::MulAssign<&f64>>::mul_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<f64 as core::ops::arith::MulAssign>::mul_assign fn mul_assign(&mut self, other: $t) { *self *= other }
<f64 as core::ops::arith::Neg>::neg fn neg(self) -> $t { -self }
<f64 as core::ops::arith::Rem<&f64>>::rem fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<f64 as core::ops::arith::Rem>::rem fn rem(self, other: $t) -> $t { self % other }
<f64 as core::ops::arith::RemAssign<&f64>>::rem_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<f64 as core::ops::arith::RemAssign>::rem_assign fn rem_assign(&mut self, other: $t) { *self %= other }
<f64 as core::ops::arith::Sub<&f64>>::sub fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<f64 as core::ops::arith::Sub>::sub fn sub(self, other: $t) -> $t { self - other }
<f64 as core::ops::arith::SubAssign<&f64>>::sub_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<f64 as core::ops::arith::SubAssign>::sub_assign fn sub_assign(&mut self, other: $t) { *self -= other }
<i128 as core::default::Default>::default fn default() -> $t {
$v
}
<i128 as core::intrinsics::fallback::CarryingMulAdd>::carrying_mul_add fn carrying_mul_add(self, b: i128, c: i128, d: i128) -> (u128, i128) {
let (low, high) = wide_mul_u128(self as u128, b as u128);
let mut high = high as i128;
high = high.wrapping_add(i128::wrapping_mul(self >> 127, b));
high = high.wrapping_add(i128::wrapping_mul(self, b >> 127));
let (low, carry) = u128::overflowing_add(low, c as u128);
high = high.wrapping_add((carry as i128) + (c >> 127));
let (low, carry) = u128::overflowing_add(low, d as u128);
high = high.wrapping_add((carry as i128) + (d >> 127));
(low, high)
}
<i128 as core::intrinsics::fallback::DisjointBitOr>::disjoint_bitor unsafe fn disjoint_bitor(self, other: Self) -> Self {
// Note that the assume here is required for UB detection in Miri!
// SAFETY: our precondition is that there are no bits in common,
// so this is just telling that to the backend.
unsafe { super::assume((self & other) == zero!($t)) };
self | other
}
<i128 as core::iter::range::Step>::backward fn backward(start: Self, n: usize) -> Self {
// In debug builds, trigger a panic on overflow.
// This should optimize completely out in release builds.
if Self::backward_checked(start, n).is_none() {
let _ = Self::MIN - 1;
}
// Do wrapping math to allow e.g. `Step::backward(127i8, 255)`.
start.wrapping_sub(n as Self)
}
<i128 as core::iter::range::Step>::backward_checked fn backward_checked(start: Self, n: usize) -> Option<Self> {
start.checked_sub(n as Self)
}
<i128 as core::iter::range::Step>::backward_unchecked unsafe fn backward_unchecked(start: Self, n: usize) -> Self {
// SAFETY: the caller has to guarantee that `start - n` doesn't overflow.
unsafe { start.checked_sub_unsigned(n as $unsigned).unwrap_unchecked() }
}
<i128 as core::iter::range::Step>::forward fn forward(start: Self, n: usize) -> Self {
// In debug builds, trigger a panic on overflow.
// This should optimize completely out in release builds.
if Self::forward_checked(start, n).is_none() {
let _ = Self::MAX + 1;
}
// Do wrapping math to allow e.g. `Step::forward(-128i8, 255)`.
start.wrapping_add(n as Self)
}
<i128 as core::iter::range::Step>::forward_checked fn forward_checked(start: Self, n: usize) -> Option<Self> {
start.checked_add(n as Self)
}
<i128 as core::iter::range::Step>::forward_unchecked unsafe fn forward_unchecked(start: Self, n: usize) -> Self {
// SAFETY: the caller has to guarantee that `start + n` doesn't overflow.
unsafe { start.checked_add_unsigned(n as $unsigned).unwrap_unchecked() }
}
<i128 as core::iter::range::Step>::steps_between fn steps_between(start: &Self, end: &Self) -> (usize, Option<usize>) {
if *start <= *end {
match end.checked_sub(*start) {
Some(result) => {
if let Ok(steps) = usize::try_from(result) {
(steps, Some(steps))
} else {
(usize::MAX, None)
}
}
// If the difference is too big for e.g. i128,
// it's also gonna be too big for usize with fewer bits.
None => (usize::MAX, None),
}
} else {
(0, None)
}
}
<i128 as core::iter::traits::accum::Sum<&'a i128>>::sum fn sum<I: Iterator<Item=&'a Self>>(iter: I) -> Self {
iter.fold(
$zero,
#[rustc_inherit_overflow_checks]
|a, b| a + b,
)
}
<i128 as core::iter::traits::accum::Sum>::sum fn sum<I: Iterator<Item=Self>>(iter: I) -> Self {
iter.fold(
$zero,
#[rustc_inherit_overflow_checks]
|a, b| a + b,
)
}
<i128 as core::ops::arith::Add<&i128>>::add fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i128 as core::ops::arith::Add>::add fn add(self, other: $t) -> $t { self + other }
<i128 as core::ops::arith::AddAssign<&i128>>::add_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i128 as core::ops::arith::AddAssign>::add_assign fn add_assign(&mut self, other: $t) { *self += other }
<i128 as core::ops::arith::Div<&i128>>::div fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i128 as core::ops::arith::Div>::div fn div(self, other: $t) -> $t { self / other }
<i128 as core::ops::arith::DivAssign<&i128>>::div_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i128 as core::ops::arith::DivAssign>::div_assign fn div_assign(&mut self, other: $t) { *self /= other }
<i128 as core::ops::arith::Mul<&i128>>::mul fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i128 as core::ops::arith::Mul>::mul fn mul(self, other: $t) -> $t { self * other }
<i128 as core::ops::arith::MulAssign<&i128>>::mul_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i128 as core::ops::arith::MulAssign>::mul_assign fn mul_assign(&mut self, other: $t) { *self *= other }
<i128 as core::ops::arith::Neg>::neg fn neg(self) -> $t { -self }
<i128 as core::ops::arith::Rem<&i128>>::rem fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i128 as core::ops::arith::Rem>::rem fn rem(self, other: $t) -> $t { self % other }
<i128 as core::ops::arith::RemAssign<&i128>>::rem_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i128 as core::ops::arith::RemAssign>::rem_assign fn rem_assign(&mut self, other: $t) { *self %= other }
<i128 as core::ops::arith::Sub<&i128>>::sub fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i128 as core::ops::arith::Sub>::sub fn sub(self, other: $t) -> $t { self - other }
<i128 as core::ops::arith::SubAssign<&i128>>::sub_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i128 as core::ops::arith::SubAssign>::sub_assign fn sub_assign(&mut self, other: $t) { *self -= other }
<i128 as core::ops::bit::BitAnd<&i128>>::bitand fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i128 as core::ops::bit::BitAnd>::bitand fn bitand(self, rhs: $t) -> $t { self & rhs }
<i128 as core::ops::bit::BitAndAssign<&i128>>::bitand_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i128 as core::ops::bit::BitAndAssign>::bitand_assign fn bitand_assign(&mut self, other: $t) { *self &= other }
<i128 as core::ops::bit::BitOr<&i128>>::bitor fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i128 as core::ops::bit::BitOr>::bitor fn bitor(self, rhs: $t) -> $t { self | rhs }
<i128 as core::ops::bit::BitOrAssign<&i128>>::bitor_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i128 as core::ops::bit::BitOrAssign>::bitor_assign fn bitor_assign(&mut self, other: $t) { *self |= other }
<i128 as core::ops::bit::BitXor<&i128>>::bitxor fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i128 as core::ops::bit::BitXor>::bitxor fn bitxor(self, other: $t) -> $t { self ^ other }
<i128 as core::ops::bit::BitXorAssign<&i128>>::bitxor_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i128 as core::ops::bit::BitXorAssign>::bitxor_assign fn bitxor_assign(&mut self, other: $t) { *self ^= other }
<i128 as core::ops::bit::Not>::not fn not(self) -> $t { !self }
<i128 as core::ops::bit::Shl<&i128>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i128 as core::ops::bit::Shl<&i16>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i128 as core::ops::bit::Shl<&i32>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i128 as core::ops::bit::Shl<&i64>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i128 as core::ops::bit::Shl<&i8>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i128 as core::ops::bit::Shl<&isize>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i128 as core::ops::bit::Shl<&u128>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i128 as core::ops::bit::Shl<&u16>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i128 as core::ops::bit::Shl<&u32>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i128 as core::ops::bit::Shl<&u64>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i128 as core::ops::bit::Shl<&u8>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i128 as core::ops::bit::Shl<&usize>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i128 as core::ops::bit::Shl<i16>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<i128 as core::ops::bit::Shl<i32>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<i128 as core::ops::bit::Shl<i64>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<i128 as core::ops::bit::Shl<i8>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<i128 as core::ops::bit::Shl<isize>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<i128 as core::ops::bit::Shl<u128>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<i128 as core::ops::bit::Shl<u16>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<i128 as core::ops::bit::Shl<u32>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<i128 as core::ops::bit::Shl<u64>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<i128 as core::ops::bit::Shl<u8>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<i128 as core::ops::bit::Shl<usize>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<i128 as core::ops::bit::Shl>::shl fn shl(self, other: $f) -> $t {
self << other
}
<i128 as core::ops::bit::ShlAssign<&i128>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i128 as core::ops::bit::ShlAssign<&i16>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i128 as core::ops::bit::ShlAssign<&i32>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i128 as core::ops::bit::ShlAssign<&i64>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i128 as core::ops::bit::ShlAssign<&i8>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i128 as core::ops::bit::ShlAssign<&isize>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i128 as core::ops::bit::ShlAssign<&u128>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i128 as core::ops::bit::ShlAssign<&u16>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i128 as core::ops::bit::ShlAssign<&u32>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i128 as core::ops::bit::ShlAssign<&u64>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i128 as core::ops::bit::ShlAssign<&u8>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i128 as core::ops::bit::ShlAssign<&usize>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i128 as core::ops::bit::ShlAssign<i16>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<i128 as core::ops::bit::ShlAssign<i32>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<i128 as core::ops::bit::ShlAssign<i64>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<i128 as core::ops::bit::ShlAssign<i8>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<i128 as core::ops::bit::ShlAssign<isize>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<i128 as core::ops::bit::ShlAssign<u128>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<i128 as core::ops::bit::ShlAssign<u16>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<i128 as core::ops::bit::ShlAssign<u32>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<i128 as core::ops::bit::ShlAssign<u64>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<i128 as core::ops::bit::ShlAssign<u8>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<i128 as core::ops::bit::ShlAssign<usize>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<i128 as core::ops::bit::ShlAssign>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<i128 as core::ops::bit::Shr<&i128>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i128 as core::ops::bit::Shr<&i16>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i128 as core::ops::bit::Shr<&i32>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i128 as core::ops::bit::Shr<&i64>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i128 as core::ops::bit::Shr<&i8>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i128 as core::ops::bit::Shr<&isize>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i128 as core::ops::bit::Shr<&u128>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i128 as core::ops::bit::Shr<&u16>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i128 as core::ops::bit::Shr<&u32>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i128 as core::ops::bit::Shr<&u64>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i128 as core::ops::bit::Shr<&u8>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i128 as core::ops::bit::Shr<&usize>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i128 as core::ops::bit::Shr<i16>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<i128 as core::ops::bit::Shr<i32>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<i128 as core::ops::bit::Shr<i64>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<i128 as core::ops::bit::Shr<i8>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<i128 as core::ops::bit::Shr<isize>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<i128 as core::ops::bit::Shr<u128>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<i128 as core::ops::bit::Shr<u16>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<i128 as core::ops::bit::Shr<u32>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<i128 as core::ops::bit::Shr<u64>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<i128 as core::ops::bit::Shr<u8>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<i128 as core::ops::bit::Shr<usize>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<i128 as core::ops::bit::Shr>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<i128 as core::ops::bit::ShrAssign<&i128>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i128 as core::ops::bit::ShrAssign<&i16>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i128 as core::ops::bit::ShrAssign<&i32>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i128 as core::ops::bit::ShrAssign<&i64>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i128 as core::ops::bit::ShrAssign<&i8>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i128 as core::ops::bit::ShrAssign<&isize>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i128 as core::ops::bit::ShrAssign<&u128>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i128 as core::ops::bit::ShrAssign<&u16>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i128 as core::ops::bit::ShrAssign<&u32>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i128 as core::ops::bit::ShrAssign<&u64>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i128 as core::ops::bit::ShrAssign<&u8>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i128 as core::ops::bit::ShrAssign<&usize>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i128 as core::ops::bit::ShrAssign<i16>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<i128 as core::ops::bit::ShrAssign<i32>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<i128 as core::ops::bit::ShrAssign<i64>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<i128 as core::ops::bit::ShrAssign<i8>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<i128 as core::ops::bit::ShrAssign<isize>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<i128 as core::ops::bit::ShrAssign<u128>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<i128 as core::ops::bit::ShrAssign<u16>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<i128 as core::ops::bit::ShrAssign<u32>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<i128 as core::ops::bit::ShrAssign<u64>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<i128 as core::ops::bit::ShrAssign<u8>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<i128 as core::ops::bit::ShrAssign<usize>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<i128 as core::ops::bit::ShrAssign>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<i128 as core::pat::RangePattern>::sub_one fn sub_one(self) -> Self {
match self.checked_sub(1) {
Some(val) => val,
None => panic!("exclusive range end at minimum value of type")
}
}
<i16 as core::default::Default>::default fn default() -> $t {
$v
}
<i16 as core::intrinsics::fallback::CarryingMulAdd>::carrying_mul_add fn carrying_mul_add(self, a: Self, b: Self, c: Self) -> ($u, $t) {
let wide = (self as $w) * (a as $w) + (b as $w) + (c as $w);
(wide as _, (wide >> Self::BITS) as _)
}
<i16 as core::intrinsics::fallback::DisjointBitOr>::disjoint_bitor unsafe fn disjoint_bitor(self, other: Self) -> Self {
// Note that the assume here is required for UB detection in Miri!
// SAFETY: our precondition is that there are no bits in common,
// so this is just telling that to the backend.
unsafe { super::assume((self & other) == zero!($t)) };
self | other
}
<i16 as core::iter::range::Step>::backward fn backward(start: Self, n: usize) -> Self {
// In debug builds, trigger a panic on overflow.
// This should optimize completely out in release builds.
if Self::backward_checked(start, n).is_none() {
let _ = Self::MIN - 1;
}
// Do wrapping math to allow e.g. `Step::backward(127i8, 255)`.
start.wrapping_sub(n as Self)
}
<i16 as core::iter::range::Step>::backward_checked fn backward_checked(start: Self, n: usize) -> Option<Self> {
match $u_narrower::try_from(n) {
Ok(n) => {
// Wrapping handles cases like
// `Step::forward(-120_i8, 200) == Some(80_i8)`,
// even though 200 is out of range for i8.
let wrapped = start.wrapping_sub(n as Self);
if wrapped <= start {
Some(wrapped)
} else {
None // Subtraction overflowed
}
}
// If n is out of range of e.g. u8,
// then it is bigger than the entire range for i8 is wide
// so `any_i8 - n` necessarily overflows i8.
Err(_) => None,
}
}
<i16 as core::iter::range::Step>::backward_unchecked unsafe fn backward_unchecked(start: Self, n: usize) -> Self {
// SAFETY: the caller has to guarantee that `start - n` doesn't overflow.
unsafe { start.checked_sub_unsigned(n as $unsigned).unwrap_unchecked() }
}
<i16 as core::iter::range::Step>::forward fn forward(start: Self, n: usize) -> Self {
// In debug builds, trigger a panic on overflow.
// This should optimize completely out in release builds.
if Self::forward_checked(start, n).is_none() {
let _ = Self::MAX + 1;
}
// Do wrapping math to allow e.g. `Step::forward(-128i8, 255)`.
start.wrapping_add(n as Self)
}
<i16 as core::iter::range::Step>::forward_checked fn forward_checked(start: Self, n: usize) -> Option<Self> {
match $u_narrower::try_from(n) {
Ok(n) => {
// Wrapping handles cases like
// `Step::forward(-120_i8, 200) == Some(80_i8)`,
// even though 200 is out of range for i8.
let wrapped = start.wrapping_add(n as Self);
if wrapped >= start {
Some(wrapped)
} else {
None // Addition overflowed
}
}
// If n is out of range of e.g. u8,
// then it is bigger than the entire range for i8 is wide
// so `any_i8 + n` necessarily overflows i8.
Err(_) => None,
}
}
<i16 as core::iter::range::Step>::forward_unchecked unsafe fn forward_unchecked(start: Self, n: usize) -> Self {
// SAFETY: the caller has to guarantee that `start + n` doesn't overflow.
unsafe { start.checked_add_unsigned(n as $unsigned).unwrap_unchecked() }
}
<i16 as core::iter::range::Step>::steps_between fn steps_between(start: &Self, end: &Self) -> (usize, Option<usize>) {
if *start <= *end {
// This relies on $i_narrower <= usize
//
// Casting to isize extends the width but preserves the sign.
// Use wrapping_sub in isize space and cast to usize to compute
// the difference that might not fit inside the range of isize.
let steps = (*end as isize).wrapping_sub(*start as isize) as usize;
(steps, Some(steps))
} else {
(0, None)
}
}
<i16 as core::iter::traits::accum::Sum<&'a i16>>::sum fn sum<I: Iterator<Item=&'a Self>>(iter: I) -> Self {
iter.fold(
$zero,
#[rustc_inherit_overflow_checks]
|a, b| a + b,
)
}
<i16 as core::iter::traits::accum::Sum>::sum fn sum<I: Iterator<Item=Self>>(iter: I) -> Self {
iter.fold(
$zero,
#[rustc_inherit_overflow_checks]
|a, b| a + b,
)
}
<i16 as core::ops::arith::Add<&i16>>::add fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i16 as core::ops::arith::Add>::add fn add(self, other: $t) -> $t { self + other }
<i16 as core::ops::arith::AddAssign<&i16>>::add_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i16 as core::ops::arith::AddAssign>::add_assign fn add_assign(&mut self, other: $t) { *self += other }
<i16 as core::ops::arith::Div<&i16>>::div fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i16 as core::ops::arith::Div>::div fn div(self, other: $t) -> $t { self / other }
<i16 as core::ops::arith::DivAssign<&i16>>::div_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i16 as core::ops::arith::DivAssign>::div_assign fn div_assign(&mut self, other: $t) { *self /= other }
<i16 as core::ops::arith::Mul<&i16>>::mul fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i16 as core::ops::arith::Mul>::mul fn mul(self, other: $t) -> $t { self * other }
<i16 as core::ops::arith::MulAssign<&i16>>::mul_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i16 as core::ops::arith::MulAssign>::mul_assign fn mul_assign(&mut self, other: $t) { *self *= other }
<i16 as core::ops::arith::Neg>::neg fn neg(self) -> $t { -self }
<i16 as core::ops::arith::Rem<&i16>>::rem fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i16 as core::ops::arith::Rem>::rem fn rem(self, other: $t) -> $t { self % other }
<i16 as core::ops::arith::RemAssign<&i16>>::rem_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i16 as core::ops::arith::RemAssign>::rem_assign fn rem_assign(&mut self, other: $t) { *self %= other }
<i16 as core::ops::arith::Sub<&i16>>::sub fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i16 as core::ops::arith::Sub>::sub fn sub(self, other: $t) -> $t { self - other }
<i16 as core::ops::arith::SubAssign<&i16>>::sub_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i16 as core::ops::arith::SubAssign>::sub_assign fn sub_assign(&mut self, other: $t) { *self -= other }
<i16 as core::ops::bit::BitAnd<&i16>>::bitand fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i16 as core::ops::bit::BitAnd>::bitand fn bitand(self, rhs: $t) -> $t { self & rhs }
<i16 as core::ops::bit::BitAndAssign<&i16>>::bitand_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i16 as core::ops::bit::BitAndAssign>::bitand_assign fn bitand_assign(&mut self, other: $t) { *self &= other }
<i16 as core::ops::bit::BitOr<&i16>>::bitor fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i16 as core::ops::bit::BitOr>::bitor fn bitor(self, rhs: $t) -> $t { self | rhs }
<i16 as core::ops::bit::BitOrAssign<&i16>>::bitor_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i16 as core::ops::bit::BitOrAssign>::bitor_assign fn bitor_assign(&mut self, other: $t) { *self |= other }
<i16 as core::ops::bit::BitXor<&i16>>::bitxor fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i16 as core::ops::bit::BitXor>::bitxor fn bitxor(self, other: $t) -> $t { self ^ other }
<i16 as core::ops::bit::BitXorAssign<&i16>>::bitxor_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i16 as core::ops::bit::BitXorAssign>::bitxor_assign fn bitxor_assign(&mut self, other: $t) { *self ^= other }
<i16 as core::ops::bit::Not>::not fn not(self) -> $t { !self }
<i16 as core::ops::bit::Shl<&i128>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i16 as core::ops::bit::Shl<&i16>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i16 as core::ops::bit::Shl<&i32>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i16 as core::ops::bit::Shl<&i64>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i16 as core::ops::bit::Shl<&i8>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i16 as core::ops::bit::Shl<&isize>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i16 as core::ops::bit::Shl<&u128>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i16 as core::ops::bit::Shl<&u16>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i16 as core::ops::bit::Shl<&u32>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i16 as core::ops::bit::Shl<&u64>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i16 as core::ops::bit::Shl<&u8>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i16 as core::ops::bit::Shl<&usize>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i16 as core::ops::bit::Shl<i128>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<i16 as core::ops::bit::Shl<i32>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<i16 as core::ops::bit::Shl<i64>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<i16 as core::ops::bit::Shl<i8>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<i16 as core::ops::bit::Shl<isize>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<i16 as core::ops::bit::Shl<u128>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<i16 as core::ops::bit::Shl<u16>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<i16 as core::ops::bit::Shl<u32>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<i16 as core::ops::bit::Shl<u64>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<i16 as core::ops::bit::Shl<u8>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<i16 as core::ops::bit::Shl<usize>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<i16 as core::ops::bit::Shl>::shl fn shl(self, other: $f) -> $t {
self << other
}
<i16 as core::ops::bit::ShlAssign<&i128>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i16 as core::ops::bit::ShlAssign<&i16>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i16 as core::ops::bit::ShlAssign<&i32>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i16 as core::ops::bit::ShlAssign<&i64>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i16 as core::ops::bit::ShlAssign<&i8>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i16 as core::ops::bit::ShlAssign<&isize>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i16 as core::ops::bit::ShlAssign<&u128>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i16 as core::ops::bit::ShlAssign<&u16>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i16 as core::ops::bit::ShlAssign<&u32>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i16 as core::ops::bit::ShlAssign<&u64>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i16 as core::ops::bit::ShlAssign<&u8>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i16 as core::ops::bit::ShlAssign<&usize>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i16 as core::ops::bit::ShlAssign<i128>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<i16 as core::ops::bit::ShlAssign<i32>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<i16 as core::ops::bit::ShlAssign<i64>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<i16 as core::ops::bit::ShlAssign<i8>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<i16 as core::ops::bit::ShlAssign<isize>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<i16 as core::ops::bit::ShlAssign<u128>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<i16 as core::ops::bit::ShlAssign<u16>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<i16 as core::ops::bit::ShlAssign<u32>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<i16 as core::ops::bit::ShlAssign<u64>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<i16 as core::ops::bit::ShlAssign<u8>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<i16 as core::ops::bit::ShlAssign<usize>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<i16 as core::ops::bit::ShlAssign>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<i16 as core::ops::bit::Shr<&i128>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i16 as core::ops::bit::Shr<&i16>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i16 as core::ops::bit::Shr<&i32>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i16 as core::ops::bit::Shr<&i64>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i16 as core::ops::bit::Shr<&i8>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i16 as core::ops::bit::Shr<&isize>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i16 as core::ops::bit::Shr<&u128>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i16 as core::ops::bit::Shr<&u16>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i16 as core::ops::bit::Shr<&u32>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i16 as core::ops::bit::Shr<&u64>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i16 as core::ops::bit::Shr<&u8>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i16 as core::ops::bit::Shr<&usize>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i16 as core::ops::bit::Shr<i128>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<i16 as core::ops::bit::Shr<i32>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<i16 as core::ops::bit::Shr<i64>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<i16 as core::ops::bit::Shr<i8>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<i16 as core::ops::bit::Shr<isize>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<i16 as core::ops::bit::Shr<u128>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<i16 as core::ops::bit::Shr<u16>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<i16 as core::ops::bit::Shr<u32>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<i16 as core::ops::bit::Shr<u64>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<i16 as core::ops::bit::Shr<u8>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<i16 as core::ops::bit::Shr<usize>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<i16 as core::ops::bit::Shr>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<i16 as core::ops::bit::ShrAssign<&i128>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i16 as core::ops::bit::ShrAssign<&i16>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i16 as core::ops::bit::ShrAssign<&i32>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i16 as core::ops::bit::ShrAssign<&i64>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i16 as core::ops::bit::ShrAssign<&i8>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i16 as core::ops::bit::ShrAssign<&isize>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i16 as core::ops::bit::ShrAssign<&u128>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i16 as core::ops::bit::ShrAssign<&u16>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i16 as core::ops::bit::ShrAssign<&u32>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i16 as core::ops::bit::ShrAssign<&u64>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i16 as core::ops::bit::ShrAssign<&u8>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i16 as core::ops::bit::ShrAssign<&usize>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i16 as core::ops::bit::ShrAssign<i128>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<i16 as core::ops::bit::ShrAssign<i32>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<i16 as core::ops::bit::ShrAssign<i64>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<i16 as core::ops::bit::ShrAssign<i8>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<i16 as core::ops::bit::ShrAssign<isize>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<i16 as core::ops::bit::ShrAssign<u128>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<i16 as core::ops::bit::ShrAssign<u16>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<i16 as core::ops::bit::ShrAssign<u32>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<i16 as core::ops::bit::ShrAssign<u64>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<i16 as core::ops::bit::ShrAssign<u8>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<i16 as core::ops::bit::ShrAssign<usize>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<i16 as core::ops::bit::ShrAssign>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<i16 as core::pat::RangePattern>::sub_one fn sub_one(self) -> Self {
match self.checked_sub(1) {
Some(val) => val,
None => panic!("exclusive range end at minimum value of type")
}
}
<i32 as core::default::Default>::default fn default() -> $t {
$v
}
<i32 as core::intrinsics::fallback::CarryingMulAdd>::carrying_mul_add fn carrying_mul_add(self, a: Self, b: Self, c: Self) -> ($u, $t) {
let wide = (self as $w) * (a as $w) + (b as $w) + (c as $w);
(wide as _, (wide >> Self::BITS) as _)
}
<i32 as core::intrinsics::fallback::DisjointBitOr>::disjoint_bitor unsafe fn disjoint_bitor(self, other: Self) -> Self {
// Note that the assume here is required for UB detection in Miri!
// SAFETY: our precondition is that there are no bits in common,
// so this is just telling that to the backend.
unsafe { super::assume((self & other) == zero!($t)) };
self | other
}
<i32 as core::iter::range::Step>::backward fn backward(start: Self, n: usize) -> Self {
// In debug builds, trigger a panic on overflow.
// This should optimize completely out in release builds.
if Self::backward_checked(start, n).is_none() {
let _ = Self::MIN - 1;
}
// Do wrapping math to allow e.g. `Step::backward(127i8, 255)`.
start.wrapping_sub(n as Self)
}
<i32 as core::iter::range::Step>::backward_checked fn backward_checked(start: Self, n: usize) -> Option<Self> {
match $u_narrower::try_from(n) {
Ok(n) => {
// Wrapping handles cases like
// `Step::forward(-120_i8, 200) == Some(80_i8)`,
// even though 200 is out of range for i8.
let wrapped = start.wrapping_sub(n as Self);
if wrapped <= start {
Some(wrapped)
} else {
None // Subtraction overflowed
}
}
// If n is out of range of e.g. u8,
// then it is bigger than the entire range for i8 is wide
// so `any_i8 - n` necessarily overflows i8.
Err(_) => None,
}
}
<i32 as core::iter::range::Step>::backward_unchecked unsafe fn backward_unchecked(start: Self, n: usize) -> Self {
// SAFETY: the caller has to guarantee that `start - n` doesn't overflow.
unsafe { start.checked_sub_unsigned(n as $unsigned).unwrap_unchecked() }
}
<i32 as core::iter::range::Step>::forward fn forward(start: Self, n: usize) -> Self {
// In debug builds, trigger a panic on overflow.
// This should optimize completely out in release builds.
if Self::forward_checked(start, n).is_none() {
let _ = Self::MAX + 1;
}
// Do wrapping math to allow e.g. `Step::forward(-128i8, 255)`.
start.wrapping_add(n as Self)
}
<i32 as core::iter::range::Step>::forward_checked fn forward_checked(start: Self, n: usize) -> Option<Self> {
match $u_narrower::try_from(n) {
Ok(n) => {
// Wrapping handles cases like
// `Step::forward(-120_i8, 200) == Some(80_i8)`,
// even though 200 is out of range for i8.
let wrapped = start.wrapping_add(n as Self);
if wrapped >= start {
Some(wrapped)
} else {
None // Addition overflowed
}
}
// If n is out of range of e.g. u8,
// then it is bigger than the entire range for i8 is wide
// so `any_i8 + n` necessarily overflows i8.
Err(_) => None,
}
}
<i32 as core::iter::range::Step>::forward_unchecked unsafe fn forward_unchecked(start: Self, n: usize) -> Self {
// SAFETY: the caller has to guarantee that `start + n` doesn't overflow.
unsafe { start.checked_add_unsigned(n as $unsigned).unwrap_unchecked() }
}
<i32 as core::iter::range::Step>::steps_between fn steps_between(start: &Self, end: &Self) -> (usize, Option<usize>) {
if *start <= *end {
// This relies on $i_narrower <= usize
//
// Casting to isize extends the width but preserves the sign.
// Use wrapping_sub in isize space and cast to usize to compute
// the difference that might not fit inside the range of isize.
let steps = (*end as isize).wrapping_sub(*start as isize) as usize;
(steps, Some(steps))
} else {
(0, None)
}
}
<i32 as core::iter::traits::accum::Sum<&'a i32>>::sum fn sum<I: Iterator<Item=&'a Self>>(iter: I) -> Self {
iter.fold(
$zero,
#[rustc_inherit_overflow_checks]
|a, b| a + b,
)
}
<i32 as core::iter::traits::accum::Sum>::sum fn sum<I: Iterator<Item=Self>>(iter: I) -> Self {
iter.fold(
$zero,
#[rustc_inherit_overflow_checks]
|a, b| a + b,
)
}
<i32 as core::ops::arith::Add<&i32>>::add fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i32 as core::ops::arith::Add>::add fn add(self, other: $t) -> $t { self + other }
<i32 as core::ops::arith::AddAssign<&i32>>::add_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i32 as core::ops::arith::AddAssign>::add_assign fn add_assign(&mut self, other: $t) { *self += other }
<i32 as core::ops::arith::Div<&i32>>::div fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i32 as core::ops::arith::Div>::div fn div(self, other: $t) -> $t { self / other }
<i32 as core::ops::arith::DivAssign<&i32>>::div_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i32 as core::ops::arith::DivAssign>::div_assign fn div_assign(&mut self, other: $t) { *self /= other }
<i32 as core::ops::arith::Mul<&i32>>::mul fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i32 as core::ops::arith::Mul>::mul fn mul(self, other: $t) -> $t { self * other }
<i32 as core::ops::arith::MulAssign<&i32>>::mul_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i32 as core::ops::arith::MulAssign>::mul_assign fn mul_assign(&mut self, other: $t) { *self *= other }
<i32 as core::ops::arith::Neg>::neg fn neg(self) -> $t { -self }
<i32 as core::ops::arith::Rem<&i32>>::rem fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i32 as core::ops::arith::Rem>::rem fn rem(self, other: $t) -> $t { self % other }
<i32 as core::ops::arith::RemAssign<&i32>>::rem_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i32 as core::ops::arith::RemAssign>::rem_assign fn rem_assign(&mut self, other: $t) { *self %= other }
<i32 as core::ops::arith::Sub<&i32>>::sub fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i32 as core::ops::arith::Sub>::sub fn sub(self, other: $t) -> $t { self - other }
<i32 as core::ops::arith::SubAssign<&i32>>::sub_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i32 as core::ops::arith::SubAssign>::sub_assign fn sub_assign(&mut self, other: $t) { *self -= other }
<i32 as core::ops::bit::BitAnd<&i32>>::bitand fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i32 as core::ops::bit::BitAnd>::bitand fn bitand(self, rhs: $t) -> $t { self & rhs }
<i32 as core::ops::bit::BitAndAssign<&i32>>::bitand_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i32 as core::ops::bit::BitAndAssign>::bitand_assign fn bitand_assign(&mut self, other: $t) { *self &= other }
<i32 as core::ops::bit::BitOr<&i32>>::bitor fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i32 as core::ops::bit::BitOr>::bitor fn bitor(self, rhs: $t) -> $t { self | rhs }
<i32 as core::ops::bit::BitOrAssign<&i32>>::bitor_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i32 as core::ops::bit::BitOrAssign>::bitor_assign fn bitor_assign(&mut self, other: $t) { *self |= other }
<i32 as core::ops::bit::BitXor<&i32>>::bitxor fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i32 as core::ops::bit::BitXor>::bitxor fn bitxor(self, other: $t) -> $t { self ^ other }
<i32 as core::ops::bit::BitXorAssign<&i32>>::bitxor_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i32 as core::ops::bit::BitXorAssign>::bitxor_assign fn bitxor_assign(&mut self, other: $t) { *self ^= other }
<i32 as core::ops::bit::Not>::not fn not(self) -> $t { !self }
<i32 as core::ops::bit::Shl<&i128>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i32 as core::ops::bit::Shl<&i16>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i32 as core::ops::bit::Shl<&i32>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i32 as core::ops::bit::Shl<&i64>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i32 as core::ops::bit::Shl<&i8>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i32 as core::ops::bit::Shl<&isize>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i32 as core::ops::bit::Shl<&u128>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i32 as core::ops::bit::Shl<&u16>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i32 as core::ops::bit::Shl<&u32>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i32 as core::ops::bit::Shl<&u64>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i32 as core::ops::bit::Shl<&u8>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i32 as core::ops::bit::Shl<&usize>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i32 as core::ops::bit::Shl<i128>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<i32 as core::ops::bit::Shl<i16>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<i32 as core::ops::bit::Shl<i64>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<i32 as core::ops::bit::Shl<i8>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<i32 as core::ops::bit::Shl<isize>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<i32 as core::ops::bit::Shl<u128>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<i32 as core::ops::bit::Shl<u16>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<i32 as core::ops::bit::Shl<u32>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<i32 as core::ops::bit::Shl<u64>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<i32 as core::ops::bit::Shl<u8>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<i32 as core::ops::bit::Shl<usize>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<i32 as core::ops::bit::Shl>::shl fn shl(self, other: $f) -> $t {
self << other
}
<i32 as core::ops::bit::ShlAssign<&i128>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i32 as core::ops::bit::ShlAssign<&i16>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i32 as core::ops::bit::ShlAssign<&i32>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i32 as core::ops::bit::ShlAssign<&i64>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i32 as core::ops::bit::ShlAssign<&i8>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i32 as core::ops::bit::ShlAssign<&isize>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i32 as core::ops::bit::ShlAssign<&u128>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i32 as core::ops::bit::ShlAssign<&u16>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i32 as core::ops::bit::ShlAssign<&u32>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i32 as core::ops::bit::ShlAssign<&u64>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i32 as core::ops::bit::ShlAssign<&u8>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i32 as core::ops::bit::ShlAssign<&usize>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i32 as core::ops::bit::ShlAssign<i128>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<i32 as core::ops::bit::ShlAssign<i16>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<i32 as core::ops::bit::ShlAssign<i64>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<i32 as core::ops::bit::ShlAssign<i8>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<i32 as core::ops::bit::ShlAssign<isize>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<i32 as core::ops::bit::ShlAssign<u128>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<i32 as core::ops::bit::ShlAssign<u16>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<i32 as core::ops::bit::ShlAssign<u32>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<i32 as core::ops::bit::ShlAssign<u64>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<i32 as core::ops::bit::ShlAssign<u8>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<i32 as core::ops::bit::ShlAssign<usize>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<i32 as core::ops::bit::ShlAssign>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<i32 as core::ops::bit::Shr<&i128>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i32 as core::ops::bit::Shr<&i16>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i32 as core::ops::bit::Shr<&i32>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i32 as core::ops::bit::Shr<&i64>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i32 as core::ops::bit::Shr<&i8>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i32 as core::ops::bit::Shr<&isize>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i32 as core::ops::bit::Shr<&u128>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i32 as core::ops::bit::Shr<&u16>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i32 as core::ops::bit::Shr<&u32>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i32 as core::ops::bit::Shr<&u64>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i32 as core::ops::bit::Shr<&u8>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i32 as core::ops::bit::Shr<&usize>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i32 as core::ops::bit::Shr<i128>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<i32 as core::ops::bit::Shr<i16>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<i32 as core::ops::bit::Shr<i64>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<i32 as core::ops::bit::Shr<i8>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<i32 as core::ops::bit::Shr<isize>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<i32 as core::ops::bit::Shr<u128>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<i32 as core::ops::bit::Shr<u16>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<i32 as core::ops::bit::Shr<u32>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<i32 as core::ops::bit::Shr<u64>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<i32 as core::ops::bit::Shr<u8>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<i32 as core::ops::bit::Shr<usize>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<i32 as core::ops::bit::Shr>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<i32 as core::ops::bit::ShrAssign<&i128>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i32 as core::ops::bit::ShrAssign<&i16>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i32 as core::ops::bit::ShrAssign<&i32>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i32 as core::ops::bit::ShrAssign<&i64>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i32 as core::ops::bit::ShrAssign<&i8>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i32 as core::ops::bit::ShrAssign<&isize>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i32 as core::ops::bit::ShrAssign<&u128>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i32 as core::ops::bit::ShrAssign<&u16>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i32 as core::ops::bit::ShrAssign<&u32>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i32 as core::ops::bit::ShrAssign<&u64>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i32 as core::ops::bit::ShrAssign<&u8>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i32 as core::ops::bit::ShrAssign<&usize>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i32 as core::ops::bit::ShrAssign<i128>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<i32 as core::ops::bit::ShrAssign<i16>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<i32 as core::ops::bit::ShrAssign<i64>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<i32 as core::ops::bit::ShrAssign<i8>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<i32 as core::ops::bit::ShrAssign<isize>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<i32 as core::ops::bit::ShrAssign<u128>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<i32 as core::ops::bit::ShrAssign<u16>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<i32 as core::ops::bit::ShrAssign<u32>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<i32 as core::ops::bit::ShrAssign<u64>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<i32 as core::ops::bit::ShrAssign<u8>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<i32 as core::ops::bit::ShrAssign<usize>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<i32 as core::ops::bit::ShrAssign>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<i32 as core::pat::RangePattern>::sub_one fn sub_one(self) -> Self {
match self.checked_sub(1) {
Some(val) => val,
None => panic!("exclusive range end at minimum value of type")
}
}
<i64 as core::default::Default>::default fn default() -> $t {
$v
}
<i64 as core::intrinsics::fallback::CarryingMulAdd>::carrying_mul_add fn carrying_mul_add(self, a: Self, b: Self, c: Self) -> ($u, $t) {
let wide = (self as $w) * (a as $w) + (b as $w) + (c as $w);
(wide as _, (wide >> Self::BITS) as _)
}
<i64 as core::intrinsics::fallback::DisjointBitOr>::disjoint_bitor unsafe fn disjoint_bitor(self, other: Self) -> Self {
// Note that the assume here is required for UB detection in Miri!
// SAFETY: our precondition is that there are no bits in common,
// so this is just telling that to the backend.
unsafe { super::assume((self & other) == zero!($t)) };
self | other
}
<i64 as core::iter::range::Step>::backward fn backward(start: Self, n: usize) -> Self {
// In debug builds, trigger a panic on overflow.
// This should optimize completely out in release builds.
if Self::backward_checked(start, n).is_none() {
let _ = Self::MIN - 1;
}
// Do wrapping math to allow e.g. `Step::backward(127i8, 255)`.
start.wrapping_sub(n as Self)
}
<i64 as core::iter::range::Step>::backward_checked fn backward_checked(start: Self, n: usize) -> Option<Self> {
match $u_narrower::try_from(n) {
Ok(n) => {
// Wrapping handles cases like
// `Step::forward(-120_i8, 200) == Some(80_i8)`,
// even though 200 is out of range for i8.
let wrapped = start.wrapping_sub(n as Self);
if wrapped <= start {
Some(wrapped)
} else {
None // Subtraction overflowed
}
}
// If n is out of range of e.g. u8,
// then it is bigger than the entire range for i8 is wide
// so `any_i8 - n` necessarily overflows i8.
Err(_) => None,
}
}
<i64 as core::iter::range::Step>::backward_unchecked unsafe fn backward_unchecked(start: Self, n: usize) -> Self {
// SAFETY: the caller has to guarantee that `start - n` doesn't overflow.
unsafe { start.checked_sub_unsigned(n as $unsigned).unwrap_unchecked() }
}
<i64 as core::iter::range::Step>::forward fn forward(start: Self, n: usize) -> Self {
// In debug builds, trigger a panic on overflow.
// This should optimize completely out in release builds.
if Self::forward_checked(start, n).is_none() {
let _ = Self::MAX + 1;
}
// Do wrapping math to allow e.g. `Step::forward(-128i8, 255)`.
start.wrapping_add(n as Self)
}
<i64 as core::iter::range::Step>::forward_checked fn forward_checked(start: Self, n: usize) -> Option<Self> {
match $u_narrower::try_from(n) {
Ok(n) => {
// Wrapping handles cases like
// `Step::forward(-120_i8, 200) == Some(80_i8)`,
// even though 200 is out of range for i8.
let wrapped = start.wrapping_add(n as Self);
if wrapped >= start {
Some(wrapped)
} else {
None // Addition overflowed
}
}
// If n is out of range of e.g. u8,
// then it is bigger than the entire range for i8 is wide
// so `any_i8 + n` necessarily overflows i8.
Err(_) => None,
}
}
<i64 as core::iter::range::Step>::forward_unchecked unsafe fn forward_unchecked(start: Self, n: usize) -> Self {
// SAFETY: the caller has to guarantee that `start + n` doesn't overflow.
unsafe { start.checked_add_unsigned(n as $unsigned).unwrap_unchecked() }
}
<i64 as core::iter::range::Step>::steps_between fn steps_between(start: &Self, end: &Self) -> (usize, Option<usize>) {
if *start <= *end {
// This relies on $i_narrower <= usize
//
// Casting to isize extends the width but preserves the sign.
// Use wrapping_sub in isize space and cast to usize to compute
// the difference that might not fit inside the range of isize.
let steps = (*end as isize).wrapping_sub(*start as isize) as usize;
(steps, Some(steps))
} else {
(0, None)
}
}
<i64 as core::iter::traits::accum::Sum<&'a i64>>::sum fn sum<I: Iterator<Item=&'a Self>>(iter: I) -> Self {
iter.fold(
$zero,
#[rustc_inherit_overflow_checks]
|a, b| a + b,
)
}
<i64 as core::iter::traits::accum::Sum>::sum fn sum<I: Iterator<Item=Self>>(iter: I) -> Self {
iter.fold(
$zero,
#[rustc_inherit_overflow_checks]
|a, b| a + b,
)
}
<i64 as core::ops::arith::Add<&i64>>::add fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i64 as core::ops::arith::Add>::add fn add(self, other: $t) -> $t { self + other }
<i64 as core::ops::arith::AddAssign<&i64>>::add_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i64 as core::ops::arith::AddAssign>::add_assign fn add_assign(&mut self, other: $t) { *self += other }
<i64 as core::ops::arith::Div<&i64>>::div fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i64 as core::ops::arith::Div>::div fn div(self, other: $t) -> $t { self / other }
<i64 as core::ops::arith::DivAssign<&i64>>::div_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i64 as core::ops::arith::DivAssign>::div_assign fn div_assign(&mut self, other: $t) { *self /= other }
<i64 as core::ops::arith::Mul<&i64>>::mul fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i64 as core::ops::arith::Mul>::mul fn mul(self, other: $t) -> $t { self * other }
<i64 as core::ops::arith::MulAssign<&i64>>::mul_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i64 as core::ops::arith::MulAssign>::mul_assign fn mul_assign(&mut self, other: $t) { *self *= other }
<i64 as core::ops::arith::Neg>::neg fn neg(self) -> $t { -self }
<i64 as core::ops::arith::Rem<&i64>>::rem fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i64 as core::ops::arith::Rem>::rem fn rem(self, other: $t) -> $t { self % other }
<i64 as core::ops::arith::RemAssign<&i64>>::rem_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i64 as core::ops::arith::RemAssign>::rem_assign fn rem_assign(&mut self, other: $t) { *self %= other }
<i64 as core::ops::arith::Sub<&i64>>::sub fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i64 as core::ops::arith::Sub>::sub fn sub(self, other: $t) -> $t { self - other }
<i64 as core::ops::arith::SubAssign<&i64>>::sub_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i64 as core::ops::arith::SubAssign>::sub_assign fn sub_assign(&mut self, other: $t) { *self -= other }
<i64 as core::ops::bit::BitAnd<&i64>>::bitand fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i64 as core::ops::bit::BitAnd>::bitand fn bitand(self, rhs: $t) -> $t { self & rhs }
<i64 as core::ops::bit::BitAndAssign<&i64>>::bitand_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i64 as core::ops::bit::BitAndAssign>::bitand_assign fn bitand_assign(&mut self, other: $t) { *self &= other }
<i64 as core::ops::bit::BitOr<&i64>>::bitor fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i64 as core::ops::bit::BitOr>::bitor fn bitor(self, rhs: $t) -> $t { self | rhs }
<i64 as core::ops::bit::BitOrAssign<&i64>>::bitor_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i64 as core::ops::bit::BitOrAssign>::bitor_assign fn bitor_assign(&mut self, other: $t) { *self |= other }
<i64 as core::ops::bit::BitXor<&i64>>::bitxor fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i64 as core::ops::bit::BitXor>::bitxor fn bitxor(self, other: $t) -> $t { self ^ other }
<i64 as core::ops::bit::BitXorAssign<&i64>>::bitxor_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i64 as core::ops::bit::BitXorAssign>::bitxor_assign fn bitxor_assign(&mut self, other: $t) { *self ^= other }
<i64 as core::ops::bit::Not>::not fn not(self) -> $t { !self }
<i64 as core::ops::bit::Shl<&i128>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i64 as core::ops::bit::Shl<&i16>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i64 as core::ops::bit::Shl<&i32>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i64 as core::ops::bit::Shl<&i64>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i64 as core::ops::bit::Shl<&i8>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i64 as core::ops::bit::Shl<&isize>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i64 as core::ops::bit::Shl<&u128>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i64 as core::ops::bit::Shl<&u16>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i64 as core::ops::bit::Shl<&u32>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i64 as core::ops::bit::Shl<&u64>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i64 as core::ops::bit::Shl<&u8>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i64 as core::ops::bit::Shl<&usize>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i64 as core::ops::bit::Shl<i128>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<i64 as core::ops::bit::Shl<i16>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<i64 as core::ops::bit::Shl<i32>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<i64 as core::ops::bit::Shl<i8>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<i64 as core::ops::bit::Shl<isize>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<i64 as core::ops::bit::Shl<u128>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<i64 as core::ops::bit::Shl<u16>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<i64 as core::ops::bit::Shl<u32>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<i64 as core::ops::bit::Shl<u64>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<i64 as core::ops::bit::Shl<u8>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<i64 as core::ops::bit::Shl<usize>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<i64 as core::ops::bit::Shl>::shl fn shl(self, other: $f) -> $t {
self << other
}
<i64 as core::ops::bit::ShlAssign<&i128>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i64 as core::ops::bit::ShlAssign<&i16>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i64 as core::ops::bit::ShlAssign<&i32>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i64 as core::ops::bit::ShlAssign<&i64>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i64 as core::ops::bit::ShlAssign<&i8>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i64 as core::ops::bit::ShlAssign<&isize>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i64 as core::ops::bit::ShlAssign<&u128>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i64 as core::ops::bit::ShlAssign<&u16>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i64 as core::ops::bit::ShlAssign<&u32>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i64 as core::ops::bit::ShlAssign<&u64>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i64 as core::ops::bit::ShlAssign<&u8>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i64 as core::ops::bit::ShlAssign<&usize>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i64 as core::ops::bit::ShlAssign<i128>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<i64 as core::ops::bit::ShlAssign<i16>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<i64 as core::ops::bit::ShlAssign<i32>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<i64 as core::ops::bit::ShlAssign<i8>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<i64 as core::ops::bit::ShlAssign<isize>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<i64 as core::ops::bit::ShlAssign<u128>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<i64 as core::ops::bit::ShlAssign<u16>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<i64 as core::ops::bit::ShlAssign<u32>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<i64 as core::ops::bit::ShlAssign<u64>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<i64 as core::ops::bit::ShlAssign<u8>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<i64 as core::ops::bit::ShlAssign<usize>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<i64 as core::ops::bit::ShlAssign>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<i64 as core::ops::bit::Shr<&i128>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i64 as core::ops::bit::Shr<&i16>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i64 as core::ops::bit::Shr<&i32>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i64 as core::ops::bit::Shr<&i64>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i64 as core::ops::bit::Shr<&i8>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i64 as core::ops::bit::Shr<&isize>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i64 as core::ops::bit::Shr<&u128>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i64 as core::ops::bit::Shr<&u16>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i64 as core::ops::bit::Shr<&u32>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i64 as core::ops::bit::Shr<&u64>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i64 as core::ops::bit::Shr<&u8>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i64 as core::ops::bit::Shr<&usize>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i64 as core::ops::bit::Shr<i128>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<i64 as core::ops::bit::Shr<i16>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<i64 as core::ops::bit::Shr<i32>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<i64 as core::ops::bit::Shr<i8>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<i64 as core::ops::bit::Shr<isize>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<i64 as core::ops::bit::Shr<u128>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<i64 as core::ops::bit::Shr<u16>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<i64 as core::ops::bit::Shr<u32>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<i64 as core::ops::bit::Shr<u64>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<i64 as core::ops::bit::Shr<u8>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<i64 as core::ops::bit::Shr<usize>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<i64 as core::ops::bit::Shr>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<i64 as core::ops::bit::ShrAssign<&i128>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i64 as core::ops::bit::ShrAssign<&i16>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i64 as core::ops::bit::ShrAssign<&i32>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i64 as core::ops::bit::ShrAssign<&i64>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i64 as core::ops::bit::ShrAssign<&i8>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i64 as core::ops::bit::ShrAssign<&isize>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i64 as core::ops::bit::ShrAssign<&u128>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i64 as core::ops::bit::ShrAssign<&u16>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i64 as core::ops::bit::ShrAssign<&u32>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i64 as core::ops::bit::ShrAssign<&u64>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i64 as core::ops::bit::ShrAssign<&u8>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i64 as core::ops::bit::ShrAssign<&usize>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i64 as core::ops::bit::ShrAssign<i128>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<i64 as core::ops::bit::ShrAssign<i16>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<i64 as core::ops::bit::ShrAssign<i32>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<i64 as core::ops::bit::ShrAssign<i8>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<i64 as core::ops::bit::ShrAssign<isize>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<i64 as core::ops::bit::ShrAssign<u128>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<i64 as core::ops::bit::ShrAssign<u16>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<i64 as core::ops::bit::ShrAssign<u32>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<i64 as core::ops::bit::ShrAssign<u64>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<i64 as core::ops::bit::ShrAssign<u8>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<i64 as core::ops::bit::ShrAssign<usize>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<i64 as core::ops::bit::ShrAssign>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<i64 as core::pat::RangePattern>::sub_one fn sub_one(self) -> Self {
match self.checked_sub(1) {
Some(val) => val,
None => panic!("exclusive range end at minimum value of type")
}
}
<i8 as core::default::Default>::default fn default() -> $t {
$v
}
<i8 as core::intrinsics::fallback::CarryingMulAdd>::carrying_mul_add fn carrying_mul_add(self, a: Self, b: Self, c: Self) -> ($u, $t) {
let wide = (self as $w) * (a as $w) + (b as $w) + (c as $w);
(wide as _, (wide >> Self::BITS) as _)
}
<i8 as core::intrinsics::fallback::DisjointBitOr>::disjoint_bitor unsafe fn disjoint_bitor(self, other: Self) -> Self {
// Note that the assume here is required for UB detection in Miri!
// SAFETY: our precondition is that there are no bits in common,
// so this is just telling that to the backend.
unsafe { super::assume((self & other) == zero!($t)) };
self | other
}
<i8 as core::iter::range::Step>::backward fn backward(start: Self, n: usize) -> Self {
// In debug builds, trigger a panic on overflow.
// This should optimize completely out in release builds.
if Self::backward_checked(start, n).is_none() {
let _ = Self::MIN - 1;
}
// Do wrapping math to allow e.g. `Step::backward(127i8, 255)`.
start.wrapping_sub(n as Self)
}
<i8 as core::iter::range::Step>::backward_checked fn backward_checked(start: Self, n: usize) -> Option<Self> {
match $u_narrower::try_from(n) {
Ok(n) => {
// Wrapping handles cases like
// `Step::forward(-120_i8, 200) == Some(80_i8)`,
// even though 200 is out of range for i8.
let wrapped = start.wrapping_sub(n as Self);
if wrapped <= start {
Some(wrapped)
} else {
None // Subtraction overflowed
}
}
// If n is out of range of e.g. u8,
// then it is bigger than the entire range for i8 is wide
// so `any_i8 - n` necessarily overflows i8.
Err(_) => None,
}
}
<i8 as core::iter::range::Step>::backward_unchecked unsafe fn backward_unchecked(start: Self, n: usize) -> Self {
// SAFETY: the caller has to guarantee that `start - n` doesn't overflow.
unsafe { start.checked_sub_unsigned(n as $unsigned).unwrap_unchecked() }
}
<i8 as core::iter::range::Step>::forward fn forward(start: Self, n: usize) -> Self {
// In debug builds, trigger a panic on overflow.
// This should optimize completely out in release builds.
if Self::forward_checked(start, n).is_none() {
let _ = Self::MAX + 1;
}
// Do wrapping math to allow e.g. `Step::forward(-128i8, 255)`.
start.wrapping_add(n as Self)
}
<i8 as core::iter::range::Step>::forward_checked fn forward_checked(start: Self, n: usize) -> Option<Self> {
match $u_narrower::try_from(n) {
Ok(n) => {
// Wrapping handles cases like
// `Step::forward(-120_i8, 200) == Some(80_i8)`,
// even though 200 is out of range for i8.
let wrapped = start.wrapping_add(n as Self);
if wrapped >= start {
Some(wrapped)
} else {
None // Addition overflowed
}
}
// If n is out of range of e.g. u8,
// then it is bigger than the entire range for i8 is wide
// so `any_i8 + n` necessarily overflows i8.
Err(_) => None,
}
}
<i8 as core::iter::range::Step>::forward_unchecked unsafe fn forward_unchecked(start: Self, n: usize) -> Self {
// SAFETY: the caller has to guarantee that `start + n` doesn't overflow.
unsafe { start.checked_add_unsigned(n as $unsigned).unwrap_unchecked() }
}
<i8 as core::iter::range::Step>::steps_between fn steps_between(start: &Self, end: &Self) -> (usize, Option<usize>) {
if *start <= *end {
// This relies on $i_narrower <= usize
//
// Casting to isize extends the width but preserves the sign.
// Use wrapping_sub in isize space and cast to usize to compute
// the difference that might not fit inside the range of isize.
let steps = (*end as isize).wrapping_sub(*start as isize) as usize;
(steps, Some(steps))
} else {
(0, None)
}
}
<i8 as core::iter::traits::accum::Sum<&'a i8>>::sum fn sum<I: Iterator<Item=&'a Self>>(iter: I) -> Self {
iter.fold(
$zero,
#[rustc_inherit_overflow_checks]
|a, b| a + b,
)
}
<i8 as core::iter::traits::accum::Sum>::sum fn sum<I: Iterator<Item=Self>>(iter: I) -> Self {
iter.fold(
$zero,
#[rustc_inherit_overflow_checks]
|a, b| a + b,
)
}
<i8 as core::ops::arith::Add<&i8>>::add fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i8 as core::ops::arith::Add>::add fn add(self, other: $t) -> $t { self + other }
<i8 as core::ops::arith::AddAssign<&i8>>::add_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i8 as core::ops::arith::AddAssign>::add_assign fn add_assign(&mut self, other: $t) { *self += other }
<i8 as core::ops::arith::Div<&i8>>::div fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i8 as core::ops::arith::Div>::div fn div(self, other: $t) -> $t { self / other }
<i8 as core::ops::arith::DivAssign<&i8>>::div_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i8 as core::ops::arith::DivAssign>::div_assign fn div_assign(&mut self, other: $t) { *self /= other }
<i8 as core::ops::arith::Mul<&i8>>::mul fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i8 as core::ops::arith::Mul>::mul fn mul(self, other: $t) -> $t { self * other }
<i8 as core::ops::arith::MulAssign<&i8>>::mul_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i8 as core::ops::arith::MulAssign>::mul_assign fn mul_assign(&mut self, other: $t) { *self *= other }
<i8 as core::ops::arith::Neg>::neg fn neg(self) -> $t { -self }
<i8 as core::ops::arith::Rem<&i8>>::rem fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i8 as core::ops::arith::Rem>::rem fn rem(self, other: $t) -> $t { self % other }
<i8 as core::ops::arith::RemAssign<&i8>>::rem_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i8 as core::ops::arith::RemAssign>::rem_assign fn rem_assign(&mut self, other: $t) { *self %= other }
<i8 as core::ops::arith::Sub<&i8>>::sub fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i8 as core::ops::arith::Sub>::sub fn sub(self, other: $t) -> $t { self - other }
<i8 as core::ops::arith::SubAssign<&i8>>::sub_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i8 as core::ops::arith::SubAssign>::sub_assign fn sub_assign(&mut self, other: $t) { *self -= other }
<i8 as core::ops::bit::BitAnd<&i8>>::bitand fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i8 as core::ops::bit::BitAnd>::bitand fn bitand(self, rhs: $t) -> $t { self & rhs }
<i8 as core::ops::bit::BitAndAssign<&i8>>::bitand_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i8 as core::ops::bit::BitAndAssign>::bitand_assign fn bitand_assign(&mut self, other: $t) { *self &= other }
<i8 as core::ops::bit::BitOr<&i8>>::bitor fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i8 as core::ops::bit::BitOr>::bitor fn bitor(self, rhs: $t) -> $t { self | rhs }
<i8 as core::ops::bit::BitOrAssign<&i8>>::bitor_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i8 as core::ops::bit::BitOrAssign>::bitor_assign fn bitor_assign(&mut self, other: $t) { *self |= other }
<i8 as core::ops::bit::BitXor<&i8>>::bitxor fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i8 as core::ops::bit::BitXor>::bitxor fn bitxor(self, other: $t) -> $t { self ^ other }
<i8 as core::ops::bit::BitXorAssign<&i8>>::bitxor_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i8 as core::ops::bit::BitXorAssign>::bitxor_assign fn bitxor_assign(&mut self, other: $t) { *self ^= other }
<i8 as core::ops::bit::Not>::not fn not(self) -> $t { !self }
<i8 as core::ops::bit::Shl<&i128>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i8 as core::ops::bit::Shl<&i16>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i8 as core::ops::bit::Shl<&i32>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i8 as core::ops::bit::Shl<&i64>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i8 as core::ops::bit::Shl<&i8>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i8 as core::ops::bit::Shl<&isize>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i8 as core::ops::bit::Shl<&u128>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i8 as core::ops::bit::Shl<&u16>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i8 as core::ops::bit::Shl<&u32>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i8 as core::ops::bit::Shl<&u64>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i8 as core::ops::bit::Shl<&u8>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i8 as core::ops::bit::Shl<&usize>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i8 as core::ops::bit::Shl<i128>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<i8 as core::ops::bit::Shl<i16>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<i8 as core::ops::bit::Shl<i32>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<i8 as core::ops::bit::Shl<i64>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<i8 as core::ops::bit::Shl<isize>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<i8 as core::ops::bit::Shl<u128>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<i8 as core::ops::bit::Shl<u16>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<i8 as core::ops::bit::Shl<u32>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<i8 as core::ops::bit::Shl<u64>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<i8 as core::ops::bit::Shl<u8>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<i8 as core::ops::bit::Shl<usize>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<i8 as core::ops::bit::Shl>::shl fn shl(self, other: $f) -> $t {
self << other
}
<i8 as core::ops::bit::ShlAssign<&i128>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i8 as core::ops::bit::ShlAssign<&i16>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i8 as core::ops::bit::ShlAssign<&i32>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i8 as core::ops::bit::ShlAssign<&i64>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i8 as core::ops::bit::ShlAssign<&i8>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i8 as core::ops::bit::ShlAssign<&isize>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i8 as core::ops::bit::ShlAssign<&u128>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i8 as core::ops::bit::ShlAssign<&u16>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i8 as core::ops::bit::ShlAssign<&u32>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i8 as core::ops::bit::ShlAssign<&u64>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i8 as core::ops::bit::ShlAssign<&u8>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i8 as core::ops::bit::ShlAssign<&usize>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i8 as core::ops::bit::ShlAssign<i128>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<i8 as core::ops::bit::ShlAssign<i16>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<i8 as core::ops::bit::ShlAssign<i32>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<i8 as core::ops::bit::ShlAssign<i64>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<i8 as core::ops::bit::ShlAssign<isize>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<i8 as core::ops::bit::ShlAssign<u128>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<i8 as core::ops::bit::ShlAssign<u16>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<i8 as core::ops::bit::ShlAssign<u32>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<i8 as core::ops::bit::ShlAssign<u64>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<i8 as core::ops::bit::ShlAssign<u8>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<i8 as core::ops::bit::ShlAssign<usize>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<i8 as core::ops::bit::ShlAssign>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<i8 as core::ops::bit::Shr<&i128>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i8 as core::ops::bit::Shr<&i16>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i8 as core::ops::bit::Shr<&i32>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i8 as core::ops::bit::Shr<&i64>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i8 as core::ops::bit::Shr<&i8>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i8 as core::ops::bit::Shr<&isize>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i8 as core::ops::bit::Shr<&u128>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i8 as core::ops::bit::Shr<&u16>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i8 as core::ops::bit::Shr<&u32>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i8 as core::ops::bit::Shr<&u64>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i8 as core::ops::bit::Shr<&u8>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i8 as core::ops::bit::Shr<&usize>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i8 as core::ops::bit::Shr<i128>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<i8 as core::ops::bit::Shr<i16>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<i8 as core::ops::bit::Shr<i32>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<i8 as core::ops::bit::Shr<i64>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<i8 as core::ops::bit::Shr<isize>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<i8 as core::ops::bit::Shr<u128>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<i8 as core::ops::bit::Shr<u16>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<i8 as core::ops::bit::Shr<u32>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<i8 as core::ops::bit::Shr<u64>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<i8 as core::ops::bit::Shr<u8>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<i8 as core::ops::bit::Shr<usize>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<i8 as core::ops::bit::Shr>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<i8 as core::ops::bit::ShrAssign<&i128>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i8 as core::ops::bit::ShrAssign<&i16>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i8 as core::ops::bit::ShrAssign<&i32>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i8 as core::ops::bit::ShrAssign<&i64>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i8 as core::ops::bit::ShrAssign<&i8>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i8 as core::ops::bit::ShrAssign<&isize>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i8 as core::ops::bit::ShrAssign<&u128>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i8 as core::ops::bit::ShrAssign<&u16>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i8 as core::ops::bit::ShrAssign<&u32>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i8 as core::ops::bit::ShrAssign<&u64>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i8 as core::ops::bit::ShrAssign<&u8>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i8 as core::ops::bit::ShrAssign<&usize>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i8 as core::ops::bit::ShrAssign<i128>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<i8 as core::ops::bit::ShrAssign<i16>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<i8 as core::ops::bit::ShrAssign<i32>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<i8 as core::ops::bit::ShrAssign<i64>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<i8 as core::ops::bit::ShrAssign<isize>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<i8 as core::ops::bit::ShrAssign<u128>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<i8 as core::ops::bit::ShrAssign<u16>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<i8 as core::ops::bit::ShrAssign<u32>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<i8 as core::ops::bit::ShrAssign<u64>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<i8 as core::ops::bit::ShrAssign<u8>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<i8 as core::ops::bit::ShrAssign<usize>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<i8 as core::ops::bit::ShrAssign>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<i8 as core::pat::RangePattern>::sub_one fn sub_one(self) -> Self {
match self.checked_sub(1) {
Some(val) => val,
None => panic!("exclusive range end at minimum value of type")
}
}
<isize as core::default::Default>::default fn default() -> $t {
$v
}
<isize as core::intrinsics::fallback::CarryingMulAdd>::carrying_mul_add fn carrying_mul_add(self, a: Self, b: Self, c: Self) -> ($u, $t) {
let wide = (self as $w) * (a as $w) + (b as $w) + (c as $w);
(wide as _, (wide >> Self::BITS) as _)
}
<isize as core::intrinsics::fallback::DisjointBitOr>::disjoint_bitor unsafe fn disjoint_bitor(self, other: Self) -> Self {
// Note that the assume here is required for UB detection in Miri!
// SAFETY: our precondition is that there are no bits in common,
// so this is just telling that to the backend.
unsafe { super::assume((self & other) == zero!($t)) };
self | other
}
<isize as core::iter::range::Step>::backward fn backward(start: Self, n: usize) -> Self {
// In debug builds, trigger a panic on overflow.
// This should optimize completely out in release builds.
if Self::backward_checked(start, n).is_none() {
let _ = Self::MIN - 1;
}
// Do wrapping math to allow e.g. `Step::backward(127i8, 255)`.
start.wrapping_sub(n as Self)
}
<isize as core::iter::range::Step>::backward_checked fn backward_checked(start: Self, n: usize) -> Option<Self> {
match $u_narrower::try_from(n) {
Ok(n) => {
// Wrapping handles cases like
// `Step::forward(-120_i8, 200) == Some(80_i8)`,
// even though 200 is out of range for i8.
let wrapped = start.wrapping_sub(n as Self);
if wrapped <= start {
Some(wrapped)
} else {
None // Subtraction overflowed
}
}
// If n is out of range of e.g. u8,
// then it is bigger than the entire range for i8 is wide
// so `any_i8 - n` necessarily overflows i8.
Err(_) => None,
}
}
<isize as core::iter::range::Step>::backward_unchecked unsafe fn backward_unchecked(start: Self, n: usize) -> Self {
// SAFETY: the caller has to guarantee that `start - n` doesn't overflow.
unsafe { start.checked_sub_unsigned(n as $unsigned).unwrap_unchecked() }
}
<isize as core::iter::range::Step>::forward fn forward(start: Self, n: usize) -> Self {
// In debug builds, trigger a panic on overflow.
// This should optimize completely out in release builds.
if Self::forward_checked(start, n).is_none() {
let _ = Self::MAX + 1;
}
// Do wrapping math to allow e.g. `Step::forward(-128i8, 255)`.
start.wrapping_add(n as Self)
}
<isize as core::iter::range::Step>::forward_checked fn forward_checked(start: Self, n: usize) -> Option<Self> {
match $u_narrower::try_from(n) {
Ok(n) => {
// Wrapping handles cases like
// `Step::forward(-120_i8, 200) == Some(80_i8)`,
// even though 200 is out of range for i8.
let wrapped = start.wrapping_add(n as Self);
if wrapped >= start {
Some(wrapped)
} else {
None // Addition overflowed
}
}
// If n is out of range of e.g. u8,
// then it is bigger than the entire range for i8 is wide
// so `any_i8 + n` necessarily overflows i8.
Err(_) => None,
}
}
<isize as core::iter::range::Step>::forward_unchecked unsafe fn forward_unchecked(start: Self, n: usize) -> Self {
// SAFETY: the caller has to guarantee that `start + n` doesn't overflow.
unsafe { start.checked_add_unsigned(n as $unsigned).unwrap_unchecked() }
}
<isize as core::iter::range::Step>::steps_between fn steps_between(start: &Self, end: &Self) -> (usize, Option<usize>) {
if *start <= *end {
// This relies on $i_narrower <= usize
//
// Casting to isize extends the width but preserves the sign.
// Use wrapping_sub in isize space and cast to usize to compute
// the difference that might not fit inside the range of isize.
let steps = (*end as isize).wrapping_sub(*start as isize) as usize;
(steps, Some(steps))
} else {
(0, None)
}
}
<isize as core::iter::traits::accum::Sum<&'a isize>>::sum fn sum<I: Iterator<Item=&'a Self>>(iter: I) -> Self {
iter.fold(
$zero,
#[rustc_inherit_overflow_checks]
|a, b| a + b,
)
}
<isize as core::iter::traits::accum::Sum>::sum fn sum<I: Iterator<Item=Self>>(iter: I) -> Self {
iter.fold(
$zero,
#[rustc_inherit_overflow_checks]
|a, b| a + b,
)
}
<isize as core::ops::arith::Add<&isize>>::add fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<isize as core::ops::arith::Add>::add fn add(self, other: $t) -> $t { self + other }
<isize as core::ops::arith::AddAssign<&isize>>::add_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<isize as core::ops::arith::AddAssign>::add_assign fn add_assign(&mut self, other: $t) { *self += other }
<isize as core::ops::arith::Div<&isize>>::div fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<isize as core::ops::arith::Div>::div fn div(self, other: $t) -> $t { self / other }
<isize as core::ops::arith::DivAssign<&isize>>::div_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<isize as core::ops::arith::DivAssign>::div_assign fn div_assign(&mut self, other: $t) { *self /= other }
<isize as core::ops::arith::Mul<&isize>>::mul fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<isize as core::ops::arith::Mul>::mul fn mul(self, other: $t) -> $t { self * other }
<isize as core::ops::arith::MulAssign<&isize>>::mul_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<isize as core::ops::arith::MulAssign>::mul_assign fn mul_assign(&mut self, other: $t) { *self *= other }
<isize as core::ops::arith::Neg>::neg fn neg(self) -> $t { -self }
<isize as core::ops::arith::Rem<&isize>>::rem fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<isize as core::ops::arith::Rem>::rem fn rem(self, other: $t) -> $t { self % other }
<isize as core::ops::arith::RemAssign<&isize>>::rem_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<isize as core::ops::arith::RemAssign>::rem_assign fn rem_assign(&mut self, other: $t) { *self %= other }
<isize as core::ops::arith::Sub<&isize>>::sub fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<isize as core::ops::arith::Sub>::sub fn sub(self, other: $t) -> $t { self - other }
<isize as core::ops::arith::SubAssign<&isize>>::sub_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<isize as core::ops::arith::SubAssign>::sub_assign fn sub_assign(&mut self, other: $t) { *self -= other }
<isize as core::ops::bit::BitAnd<&isize>>::bitand fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<isize as core::ops::bit::BitAnd>::bitand fn bitand(self, rhs: $t) -> $t { self & rhs }
<isize as core::ops::bit::BitAndAssign<&isize>>::bitand_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<isize as core::ops::bit::BitAndAssign>::bitand_assign fn bitand_assign(&mut self, other: $t) { *self &= other }
<isize as core::ops::bit::BitOr<&isize>>::bitor fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<isize as core::ops::bit::BitOr>::bitor fn bitor(self, rhs: $t) -> $t { self | rhs }
<isize as core::ops::bit::BitOrAssign<&isize>>::bitor_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<isize as core::ops::bit::BitOrAssign>::bitor_assign fn bitor_assign(&mut self, other: $t) { *self |= other }
<isize as core::ops::bit::BitXor<&isize>>::bitxor fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<isize as core::ops::bit::BitXor>::bitxor fn bitxor(self, other: $t) -> $t { self ^ other }
<isize as core::ops::bit::BitXorAssign<&isize>>::bitxor_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<isize as core::ops::bit::BitXorAssign>::bitxor_assign fn bitxor_assign(&mut self, other: $t) { *self ^= other }
<isize as core::ops::bit::Not>::not fn not(self) -> $t { !self }
<isize as core::ops::bit::Shl<&i128>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<isize as core::ops::bit::Shl<&i16>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<isize as core::ops::bit::Shl<&i32>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<isize as core::ops::bit::Shl<&i64>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<isize as core::ops::bit::Shl<&i8>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<isize as core::ops::bit::Shl<&isize>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<isize as core::ops::bit::Shl<&u128>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<isize as core::ops::bit::Shl<&u16>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<isize as core::ops::bit::Shl<&u32>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<isize as core::ops::bit::Shl<&u64>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<isize as core::ops::bit::Shl<&u8>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<isize as core::ops::bit::Shl<&usize>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<isize as core::ops::bit::Shl<i128>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<isize as core::ops::bit::Shl<i16>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<isize as core::ops::bit::Shl<i32>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<isize as core::ops::bit::Shl<i64>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<isize as core::ops::bit::Shl<i8>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<isize as core::ops::bit::Shl<u128>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<isize as core::ops::bit::Shl<u16>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<isize as core::ops::bit::Shl<u32>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<isize as core::ops::bit::Shl<u64>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<isize as core::ops::bit::Shl<u8>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<isize as core::ops::bit::Shl<usize>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<isize as core::ops::bit::Shl>::shl fn shl(self, other: $f) -> $t {
self << other
}
<isize as core::ops::bit::ShlAssign<&i128>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<isize as core::ops::bit::ShlAssign<&i16>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<isize as core::ops::bit::ShlAssign<&i32>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<isize as core::ops::bit::ShlAssign<&i64>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<isize as core::ops::bit::ShlAssign<&i8>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<isize as core::ops::bit::ShlAssign<&isize>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<isize as core::ops::bit::ShlAssign<&u128>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<isize as core::ops::bit::ShlAssign<&u16>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<isize as core::ops::bit::ShlAssign<&u32>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<isize as core::ops::bit::ShlAssign<&u64>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<isize as core::ops::bit::ShlAssign<&u8>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<isize as core::ops::bit::ShlAssign<&usize>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<isize as core::ops::bit::ShlAssign<i128>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<isize as core::ops::bit::ShlAssign<i16>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<isize as core::ops::bit::ShlAssign<i32>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<isize as core::ops::bit::ShlAssign<i64>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<isize as core::ops::bit::ShlAssign<i8>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<isize as core::ops::bit::ShlAssign<u128>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<isize as core::ops::bit::ShlAssign<u16>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<isize as core::ops::bit::ShlAssign<u32>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<isize as core::ops::bit::ShlAssign<u64>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<isize as core::ops::bit::ShlAssign<u8>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<isize as core::ops::bit::ShlAssign<usize>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<isize as core::ops::bit::ShlAssign>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<isize as core::ops::bit::Shr<&i128>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<isize as core::ops::bit::Shr<&i16>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<isize as core::ops::bit::Shr<&i32>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<isize as core::ops::bit::Shr<&i64>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<isize as core::ops::bit::Shr<&i8>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<isize as core::ops::bit::Shr<&isize>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<isize as core::ops::bit::Shr<&u128>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<isize as core::ops::bit::Shr<&u16>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<isize as core::ops::bit::Shr<&u32>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<isize as core::ops::bit::Shr<&u64>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<isize as core::ops::bit::Shr<&u8>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<isize as core::ops::bit::Shr<&usize>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<isize as core::ops::bit::Shr<i128>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<isize as core::ops::bit::Shr<i16>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<isize as core::ops::bit::Shr<i32>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<isize as core::ops::bit::Shr<i64>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<isize as core::ops::bit::Shr<i8>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<isize as core::ops::bit::Shr<u128>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<isize as core::ops::bit::Shr<u16>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<isize as core::ops::bit::Shr<u32>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<isize as core::ops::bit::Shr<u64>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<isize as core::ops::bit::Shr<u8>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<isize as core::ops::bit::Shr<usize>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<isize as core::ops::bit::Shr>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<isize as core::ops::bit::ShrAssign<&i128>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<isize as core::ops::bit::ShrAssign<&i16>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<isize as core::ops::bit::ShrAssign<&i32>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<isize as core::ops::bit::ShrAssign<&i64>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<isize as core::ops::bit::ShrAssign<&i8>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<isize as core::ops::bit::ShrAssign<&isize>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<isize as core::ops::bit::ShrAssign<&u128>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<isize as core::ops::bit::ShrAssign<&u16>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<isize as core::ops::bit::ShrAssign<&u32>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<isize as core::ops::bit::ShrAssign<&u64>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<isize as core::ops::bit::ShrAssign<&u8>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<isize as core::ops::bit::ShrAssign<&usize>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<isize as core::ops::bit::ShrAssign<i128>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<isize as core::ops::bit::ShrAssign<i16>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<isize as core::ops::bit::ShrAssign<i32>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<isize as core::ops::bit::ShrAssign<i64>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<isize as core::ops::bit::ShrAssign<i8>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<isize as core::ops::bit::ShrAssign<u128>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<isize as core::ops::bit::ShrAssign<u16>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<isize as core::ops::bit::ShrAssign<u32>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<isize as core::ops::bit::ShrAssign<u64>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<isize as core::ops::bit::ShrAssign<u8>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<isize as core::ops::bit::ShrAssign<usize>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<isize as core::ops::bit::ShrAssign>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<isize as core::pat::RangePattern>::sub_one fn sub_one(self) -> Self {
match self.checked_sub(1) {
Some(val) => val,
None => panic!("exclusive range end at minimum value of type")
}
}
<str as core::convert::AsMut<str>>::as_mut fn as_mut(&mut self) -> &mut str {
self
}
<str as core::fmt::Debug>::fmt::needs_escape fn needs_escape(b: u8) -> bool {
b > 0x7E || b < 0x20 || b == b'\\' || b == b'"'
}
<str as core::fmt::Display>::fmt fn fmt(&self, f: &mut Formatter<'_>) -> Result {
f.pad(self)
}
<u128 as core::default::Default>::default fn default() -> $t {
$v
}
<u128 as core::intrinsics::fallback::CarryingMulAdd>::carrying_mul_add fn carrying_mul_add(self, b: u128, c: u128, d: u128) -> (u128, u128) {
let (low, mut high) = wide_mul_u128(self, b);
let (low, carry) = u128::overflowing_add(low, c);
high += carry as u128;
let (low, carry) = u128::overflowing_add(low, d);
high += carry as u128;
(low, high)
}
<u128 as core::intrinsics::fallback::CarrylessMul>::carryless_mul fn carryless_mul(self, rhs: Self) -> Self {
let mut result = 0;
let mut i = 0;
while i < $type::BITS {
// If the i-th bit in rhs is set.
if (rhs >> i) & 1 != 0 {
// Then xor the result with `self` shifted to the left by i positions.
result ^= self << i;
}
i += 1;
}
result
}
<u128 as core::intrinsics::fallback::DisjointBitOr>::disjoint_bitor unsafe fn disjoint_bitor(self, other: Self) -> Self {
// Note that the assume here is required for UB detection in Miri!
// SAFETY: our precondition is that there are no bits in common,
// so this is just telling that to the backend.
unsafe { super::assume((self & other) == zero!($t)) };
self | other
}
<u128 as core::intrinsics::fallback::FunnelShift>::unchecked_funnel_shl unsafe fn unchecked_funnel_shl(self, rhs: Self, shift: u32) -> Self {
// This implementation is also used by Miri so we have to check the precondition.
// SAFETY: this is guaranteed by the caller
unsafe { super::assume(shift < $type::BITS) };
if shift == 0 {
self
} else {
// SAFETY:
// - `shift < T::BITS`, which satisfies `unchecked_shl`
// - this also ensures that `T::BITS - shift < T::BITS` (shift = 0 is checked
// above), which satisfies `unchecked_shr`
// - because the types are unsigned, the combination are disjoint bits (this is
// not true if they're signed, since SHR will fill in the empty space with a
// sign bit, not zero)
unsafe {
super::disjoint_bitor(
super::unchecked_shl(self, shift),
super::unchecked_shr(rhs, $type::BITS - shift),
)
}
}
}
<u128 as core::intrinsics::fallback::FunnelShift>::unchecked_funnel_shr unsafe fn unchecked_funnel_shr(self, rhs: Self, shift: u32) -> Self {
// This implementation is also used by Miri so we have to check the precondition.
// SAFETY: this is guaranteed by the caller
unsafe { super::assume(shift < $type::BITS) };
if shift == 0 {
rhs
} else {
// SAFETY:
// - `shift < T::BITS`, which satisfies `unchecked_shr`
// - this also ensures that `T::BITS - shift < T::BITS` (shift = 0 is checked
// above), which satisfies `unchecked_shl`
// - because the types are unsigned, the combination are disjoint bits (this is
// not true if they're signed, since SHR will fill in the empty space with a
// sign bit, not zero)
unsafe {
super::disjoint_bitor(
super::unchecked_shl(self, $type::BITS - shift),
super::unchecked_shr(rhs, shift),
)
}
}
}
<u128 as core::iter::range::Step>::backward fn backward(start: Self, n: usize) -> Self {
// In debug builds, trigger a panic on overflow.
// This should optimize completely out in release builds.
if Self::backward_checked(start, n).is_none() {
let _ = Self::MIN - 1;
}
// Do wrapping math to allow e.g. `Step::backward(127i8, 255)`.
start.wrapping_sub(n as Self)
}
<u128 as core::iter::range::Step>::backward_checked fn backward_checked(start: Self, n: usize) -> Option<Self> {
start.checked_sub(n as Self)
}
<u128 as core::iter::range::Step>::backward_unchecked unsafe fn backward_unchecked(start: Self, n: usize) -> Self {
// SAFETY: the caller has to guarantee that `start - n` doesn't overflow.
unsafe { start.unchecked_sub(n as Self) }
}
<u128 as core::iter::range::Step>::forward fn forward(start: Self, n: usize) -> Self {
// In debug builds, trigger a panic on overflow.
// This should optimize completely out in release builds.
if Self::forward_checked(start, n).is_none() {
let _ = Self::MAX + 1;
}
// Do wrapping math to allow e.g. `Step::forward(-128i8, 255)`.
start.wrapping_add(n as Self)
}
<u128 as core::iter::range::Step>::forward_checked fn forward_checked(start: Self, n: usize) -> Option<Self> {
start.checked_add(n as Self)
}
<u128 as core::iter::range::Step>::forward_unchecked unsafe fn forward_unchecked(start: Self, n: usize) -> Self {
// SAFETY: the caller has to guarantee that `start + n` doesn't overflow.
unsafe { start.unchecked_add(n as Self) }
}
<u128 as core::iter::range::Step>::steps_between fn steps_between(start: &Self, end: &Self) -> (usize, Option<usize>) {
if *start <= *end {
if let Ok(steps) = usize::try_from(*end - *start) {
(steps, Some(steps))
} else {
(usize::MAX, None)
}
} else {
(0, None)
}
}
<u128 as core::iter::traits::accum::Sum<&'a u128>>::sum fn sum<I: Iterator<Item=&'a Self>>(iter: I) -> Self {
iter.fold(
$zero,
#[rustc_inherit_overflow_checks]
|a, b| a + b,
)
}
<u128 as core::iter::traits::accum::Sum>::sum fn sum<I: Iterator<Item=Self>>(iter: I) -> Self {
iter.fold(
$zero,
#[rustc_inherit_overflow_checks]
|a, b| a + b,
)
}
<u128 as core::ops::arith::Add<&u128>>::add fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u128 as core::ops::arith::Add>::add fn add(self, other: $t) -> $t { self + other }
<u128 as core::ops::arith::AddAssign<&u128>>::add_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u128 as core::ops::arith::AddAssign>::add_assign fn add_assign(&mut self, other: $t) { *self += other }
<u128 as core::ops::arith::Div<&u128>>::div fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u128 as core::ops::arith::Div>::div fn div(self, other: $t) -> $t { self / other }
<u128 as core::ops::arith::DivAssign<&u128>>::div_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u128 as core::ops::arith::DivAssign>::div_assign fn div_assign(&mut self, other: $t) { *self /= other }
<u128 as core::ops::arith::Mul<&u128>>::mul fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u128 as core::ops::arith::Mul>::mul fn mul(self, other: $t) -> $t { self * other }
<u128 as core::ops::arith::MulAssign<&u128>>::mul_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u128 as core::ops::arith::MulAssign>::mul_assign fn mul_assign(&mut self, other: $t) { *self *= other }
<u128 as core::ops::arith::Rem<&u128>>::rem fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u128 as core::ops::arith::Rem>::rem fn rem(self, other: $t) -> $t { self % other }
<u128 as core::ops::arith::RemAssign<&u128>>::rem_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u128 as core::ops::arith::RemAssign>::rem_assign fn rem_assign(&mut self, other: $t) { *self %= other }
<u128 as core::ops::arith::Sub<&u128>>::sub fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u128 as core::ops::arith::Sub>::sub fn sub(self, other: $t) -> $t { self - other }
<u128 as core::ops::arith::SubAssign<&u128>>::sub_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u128 as core::ops::arith::SubAssign>::sub_assign fn sub_assign(&mut self, other: $t) { *self -= other }
<u128 as core::ops::bit::BitAnd<&u128>>::bitand fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u128 as core::ops::bit::BitAnd>::bitand fn bitand(self, rhs: $t) -> $t { self & rhs }
<u128 as core::ops::bit::BitAndAssign<&u128>>::bitand_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u128 as core::ops::bit::BitAndAssign>::bitand_assign fn bitand_assign(&mut self, other: $t) { *self &= other }
<u128 as core::ops::bit::BitOr<&u128>>::bitor fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u128 as core::ops::bit::BitOr>::bitor fn bitor(self, rhs: $t) -> $t { self | rhs }
<u128 as core::ops::bit::BitOrAssign<&u128>>::bitor_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u128 as core::ops::bit::BitOrAssign>::bitor_assign fn bitor_assign(&mut self, other: $t) { *self |= other }
<u128 as core::ops::bit::BitXor<&u128>>::bitxor fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u128 as core::ops::bit::BitXor>::bitxor fn bitxor(self, other: $t) -> $t { self ^ other }
<u128 as core::ops::bit::BitXorAssign<&u128>>::bitxor_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u128 as core::ops::bit::BitXorAssign>::bitxor_assign fn bitxor_assign(&mut self, other: $t) { *self ^= other }
<u128 as core::ops::bit::Not>::not fn not(self) -> $t { !self }
<u128 as core::ops::bit::Shl<&i128>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u128 as core::ops::bit::Shl<&i16>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u128 as core::ops::bit::Shl<&i32>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u128 as core::ops::bit::Shl<&i64>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u128 as core::ops::bit::Shl<&i8>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u128 as core::ops::bit::Shl<&isize>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u128 as core::ops::bit::Shl<&u128>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u128 as core::ops::bit::Shl<&u16>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u128 as core::ops::bit::Shl<&u32>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u128 as core::ops::bit::Shl<&u64>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u128 as core::ops::bit::Shl<&u8>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u128 as core::ops::bit::Shl<&usize>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u128 as core::ops::bit::Shl<i128>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<u128 as core::ops::bit::Shl<i16>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<u128 as core::ops::bit::Shl<i32>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<u128 as core::ops::bit::Shl<i64>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<u128 as core::ops::bit::Shl<i8>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<u128 as core::ops::bit::Shl<isize>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<u128 as core::ops::bit::Shl<u16>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<u128 as core::ops::bit::Shl<u32>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<u128 as core::ops::bit::Shl<u64>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<u128 as core::ops::bit::Shl<u8>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<u128 as core::ops::bit::Shl<usize>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<u128 as core::ops::bit::Shl>::shl fn shl(self, other: $f) -> $t {
self << other
}
<u128 as core::ops::bit::ShlAssign<&i128>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u128 as core::ops::bit::ShlAssign<&i16>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u128 as core::ops::bit::ShlAssign<&i32>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u128 as core::ops::bit::ShlAssign<&i64>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u128 as core::ops::bit::ShlAssign<&i8>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u128 as core::ops::bit::ShlAssign<&isize>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u128 as core::ops::bit::ShlAssign<&u128>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u128 as core::ops::bit::ShlAssign<&u16>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u128 as core::ops::bit::ShlAssign<&u32>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u128 as core::ops::bit::ShlAssign<&u64>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u128 as core::ops::bit::ShlAssign<&u8>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u128 as core::ops::bit::ShlAssign<&usize>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u128 as core::ops::bit::ShlAssign<i128>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<u128 as core::ops::bit::ShlAssign<i16>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<u128 as core::ops::bit::ShlAssign<i32>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<u128 as core::ops::bit::ShlAssign<i64>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<u128 as core::ops::bit::ShlAssign<i8>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<u128 as core::ops::bit::ShlAssign<isize>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<u128 as core::ops::bit::ShlAssign<u16>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<u128 as core::ops::bit::ShlAssign<u32>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<u128 as core::ops::bit::ShlAssign<u64>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<u128 as core::ops::bit::ShlAssign<u8>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<u128 as core::ops::bit::ShlAssign<usize>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<u128 as core::ops::bit::ShlAssign>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<u128 as core::ops::bit::Shr<&i128>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u128 as core::ops::bit::Shr<&i16>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u128 as core::ops::bit::Shr<&i32>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u128 as core::ops::bit::Shr<&i64>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u128 as core::ops::bit::Shr<&i8>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u128 as core::ops::bit::Shr<&isize>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u128 as core::ops::bit::Shr<&u128>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u128 as core::ops::bit::Shr<&u16>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u128 as core::ops::bit::Shr<&u32>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u128 as core::ops::bit::Shr<&u64>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u128 as core::ops::bit::Shr<&u8>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u128 as core::ops::bit::Shr<&usize>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u128 as core::ops::bit::Shr<i128>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<u128 as core::ops::bit::Shr<i16>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<u128 as core::ops::bit::Shr<i32>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<u128 as core::ops::bit::Shr<i64>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<u128 as core::ops::bit::Shr<i8>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<u128 as core::ops::bit::Shr<isize>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<u128 as core::ops::bit::Shr<u16>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<u128 as core::ops::bit::Shr<u32>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<u128 as core::ops::bit::Shr<u64>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<u128 as core::ops::bit::Shr<u8>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<u128 as core::ops::bit::Shr<usize>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<u128 as core::ops::bit::Shr>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<u128 as core::ops::bit::ShrAssign<&i128>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u128 as core::ops::bit::ShrAssign<&i16>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u128 as core::ops::bit::ShrAssign<&i32>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u128 as core::ops::bit::ShrAssign<&i64>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u128 as core::ops::bit::ShrAssign<&i8>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u128 as core::ops::bit::ShrAssign<&isize>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u128 as core::ops::bit::ShrAssign<&u128>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u128 as core::ops::bit::ShrAssign<&u16>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u128 as core::ops::bit::ShrAssign<&u32>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u128 as core::ops::bit::ShrAssign<&u64>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u128 as core::ops::bit::ShrAssign<&u8>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u128 as core::ops::bit::ShrAssign<&usize>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u128 as core::ops::bit::ShrAssign<i128>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<u128 as core::ops::bit::ShrAssign<i16>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<u128 as core::ops::bit::ShrAssign<i32>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<u128 as core::ops::bit::ShrAssign<i64>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<u128 as core::ops::bit::ShrAssign<i8>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<u128 as core::ops::bit::ShrAssign<isize>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<u128 as core::ops::bit::ShrAssign<u16>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<u128 as core::ops::bit::ShrAssign<u32>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<u128 as core::ops::bit::ShrAssign<u64>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<u128 as core::ops::bit::ShrAssign<u8>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<u128 as core::ops::bit::ShrAssign<usize>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<u128 as core::ops::bit::ShrAssign>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<u128 as core::pat::RangePattern>::sub_one fn sub_one(self) -> Self {
match self.checked_sub(1) {
Some(val) => val,
None => panic!("exclusive range end at minimum value of type")
}
}
<u16 as core::default::Default>::default fn default() -> $t {
$v
}
<u16 as core::intrinsics::fallback::CarryingMulAdd>::carrying_mul_add fn carrying_mul_add(self, a: Self, b: Self, c: Self) -> ($u, $t) {
let wide = (self as $w) * (a as $w) + (b as $w) + (c as $w);
(wide as _, (wide >> Self::BITS) as _)
}
<u16 as core::intrinsics::fallback::CarrylessMul>::carryless_mul fn carryless_mul(self, rhs: Self) -> Self {
let mut result = 0;
let mut i = 0;
while i < $type::BITS {
// If the i-th bit in rhs is set.
if (rhs >> i) & 1 != 0 {
// Then xor the result with `self` shifted to the left by i positions.
result ^= self << i;
}
i += 1;
}
result
}
<u16 as core::intrinsics::fallback::DisjointBitOr>::disjoint_bitor unsafe fn disjoint_bitor(self, other: Self) -> Self {
// Note that the assume here is required for UB detection in Miri!
// SAFETY: our precondition is that there are no bits in common,
// so this is just telling that to the backend.
unsafe { super::assume((self & other) == zero!($t)) };
self | other
}
<u16 as core::intrinsics::fallback::FunnelShift>::unchecked_funnel_shl unsafe fn unchecked_funnel_shl(self, rhs: Self, shift: u32) -> Self {
// This implementation is also used by Miri so we have to check the precondition.
// SAFETY: this is guaranteed by the caller
unsafe { super::assume(shift < $type::BITS) };
if shift == 0 {
self
} else {
// SAFETY:
// - `shift < T::BITS`, which satisfies `unchecked_shl`
// - this also ensures that `T::BITS - shift < T::BITS` (shift = 0 is checked
// above), which satisfies `unchecked_shr`
// - because the types are unsigned, the combination are disjoint bits (this is
// not true if they're signed, since SHR will fill in the empty space with a
// sign bit, not zero)
unsafe {
super::disjoint_bitor(
super::unchecked_shl(self, shift),
super::unchecked_shr(rhs, $type::BITS - shift),
)
}
}
}
<u16 as core::intrinsics::fallback::FunnelShift>::unchecked_funnel_shr unsafe fn unchecked_funnel_shr(self, rhs: Self, shift: u32) -> Self {
// This implementation is also used by Miri so we have to check the precondition.
// SAFETY: this is guaranteed by the caller
unsafe { super::assume(shift < $type::BITS) };
if shift == 0 {
rhs
} else {
// SAFETY:
// - `shift < T::BITS`, which satisfies `unchecked_shr`
// - this also ensures that `T::BITS - shift < T::BITS` (shift = 0 is checked
// above), which satisfies `unchecked_shl`
// - because the types are unsigned, the combination are disjoint bits (this is
// not true if they're signed, since SHR will fill in the empty space with a
// sign bit, not zero)
unsafe {
super::disjoint_bitor(
super::unchecked_shl(self, $type::BITS - shift),
super::unchecked_shr(rhs, shift),
)
}
}
}
<u16 as core::iter::range::Step>::backward fn backward(start: Self, n: usize) -> Self {
// In debug builds, trigger a panic on overflow.
// This should optimize completely out in release builds.
if Self::backward_checked(start, n).is_none() {
let _ = Self::MIN - 1;
}
// Do wrapping math to allow e.g. `Step::backward(127i8, 255)`.
start.wrapping_sub(n as Self)
}
<u16 as core::iter::range::Step>::backward_checked fn backward_checked(start: Self, n: usize) -> Option<Self> {
match Self::try_from(n) {
Ok(n) => start.checked_sub(n),
Err(_) => None, // if n is out of range, `unsigned_start - n` is too
}
}
<u16 as core::iter::range::Step>::backward_unchecked unsafe fn backward_unchecked(start: Self, n: usize) -> Self {
// SAFETY: the caller has to guarantee that `start - n` doesn't overflow.
unsafe { start.unchecked_sub(n as Self) }
}
<u16 as core::iter::range::Step>::forward fn forward(start: Self, n: usize) -> Self {
// In debug builds, trigger a panic on overflow.
// This should optimize completely out in release builds.
if Self::forward_checked(start, n).is_none() {
let _ = Self::MAX + 1;
}
// Do wrapping math to allow e.g. `Step::forward(-128i8, 255)`.
start.wrapping_add(n as Self)
}
<u16 as core::iter::range::Step>::forward_checked fn forward_checked(start: Self, n: usize) -> Option<Self> {
match Self::try_from(n) {
Ok(n) => start.checked_add(n),
Err(_) => None, // if n is out of range, `unsigned_start + n` is too
}
}
<u16 as core::iter::range::Step>::forward_unchecked unsafe fn forward_unchecked(start: Self, n: usize) -> Self {
// SAFETY: the caller has to guarantee that `start + n` doesn't overflow.
unsafe { start.unchecked_add(n as Self) }
}
<u16 as core::iter::range::Step>::steps_between fn steps_between(start: &Self, end: &Self) -> (usize, Option<usize>) {
if *start <= *end {
// This relies on $u_narrower <= usize
let steps = (*end - *start) as usize;
(steps, Some(steps))
} else {
(0, None)
}
}
<u16 as core::iter::traits::accum::Sum<&'a u16>>::sum fn sum<I: Iterator<Item=&'a Self>>(iter: I) -> Self {
iter.fold(
$zero,
#[rustc_inherit_overflow_checks]
|a, b| a + b,
)
}
<u16 as core::iter::traits::accum::Sum>::sum fn sum<I: Iterator<Item=Self>>(iter: I) -> Self {
iter.fold(
$zero,
#[rustc_inherit_overflow_checks]
|a, b| a + b,
)
}
<u16 as core::num::bignum::FullOps>::full_div_rem fn full_div_rem(self, other: $ty, borrow: $ty) -> ($ty, $ty) {
debug_assert!(borrow < other);
// This cannot overflow; the output is between `0` and `other * (2^nbits - 1)`.
let lhs = ((borrow as $bigty) << <$ty>::BITS) | (self as $bigty);
let rhs = other as $bigty;
((lhs / rhs) as $ty, (lhs % rhs) as $ty)
}
<u16 as core::num::bignum::FullOps>::full_mul_add fn full_mul_add(self, other: $ty, other2: $ty, carry: $ty) -> ($ty, $ty) {
// This cannot overflow;
// the output is between `0` and `2^nbits * (2^nbits - 1)`.
let (lo, hi) = self.carrying_mul_add(other, other2, carry);
(hi, lo)
}
<u16 as core::num::dec2flt::float::CastInto<i16>>::cast fn cast(self) -> i16 {
self as i16
}
<u16 as core::ops::arith::Add<&u16>>::add fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u16 as core::ops::arith::Add>::add fn add(self, other: $t) -> $t { self + other }
<u16 as core::ops::arith::AddAssign<&u16>>::add_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u16 as core::ops::arith::AddAssign>::add_assign fn add_assign(&mut self, other: $t) { *self += other }
<u16 as core::ops::arith::Div<&u16>>::div fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u16 as core::ops::arith::Div>::div fn div(self, other: $t) -> $t { self / other }
<u16 as core::ops::arith::DivAssign<&u16>>::div_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u16 as core::ops::arith::DivAssign>::div_assign fn div_assign(&mut self, other: $t) { *self /= other }
<u16 as core::ops::arith::Mul<&u16>>::mul fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u16 as core::ops::arith::Mul>::mul fn mul(self, other: $t) -> $t { self * other }
<u16 as core::ops::arith::MulAssign<&u16>>::mul_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u16 as core::ops::arith::MulAssign>::mul_assign fn mul_assign(&mut self, other: $t) { *self *= other }
<u16 as core::ops::arith::Rem<&u16>>::rem fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u16 as core::ops::arith::Rem>::rem fn rem(self, other: $t) -> $t { self % other }
<u16 as core::ops::arith::RemAssign<&u16>>::rem_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u16 as core::ops::arith::RemAssign>::rem_assign fn rem_assign(&mut self, other: $t) { *self %= other }
<u16 as core::ops::arith::Sub<&u16>>::sub fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u16 as core::ops::arith::Sub>::sub fn sub(self, other: $t) -> $t { self - other }
<u16 as core::ops::arith::SubAssign<&u16>>::sub_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u16 as core::ops::arith::SubAssign>::sub_assign fn sub_assign(&mut self, other: $t) { *self -= other }
<u16 as core::ops::bit::BitAnd<&u16>>::bitand fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u16 as core::ops::bit::BitAnd>::bitand fn bitand(self, rhs: $t) -> $t { self & rhs }
<u16 as core::ops::bit::BitAndAssign<&u16>>::bitand_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u16 as core::ops::bit::BitAndAssign>::bitand_assign fn bitand_assign(&mut self, other: $t) { *self &= other }
<u16 as core::ops::bit::BitOr<&u16>>::bitor fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u16 as core::ops::bit::BitOr>::bitor fn bitor(self, rhs: $t) -> $t { self | rhs }
<u16 as core::ops::bit::BitOrAssign<&u16>>::bitor_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u16 as core::ops::bit::BitOrAssign>::bitor_assign fn bitor_assign(&mut self, other: $t) { *self |= other }
<u16 as core::ops::bit::BitXor<&u16>>::bitxor fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u16 as core::ops::bit::BitXor>::bitxor fn bitxor(self, other: $t) -> $t { self ^ other }
<u16 as core::ops::bit::BitXorAssign<&u16>>::bitxor_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u16 as core::ops::bit::BitXorAssign>::bitxor_assign fn bitxor_assign(&mut self, other: $t) { *self ^= other }
<u16 as core::ops::bit::Not>::not fn not(self) -> $t { !self }
<u16 as core::ops::bit::Shl<&i128>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u16 as core::ops::bit::Shl<&i16>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u16 as core::ops::bit::Shl<&i32>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u16 as core::ops::bit::Shl<&i64>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u16 as core::ops::bit::Shl<&i8>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u16 as core::ops::bit::Shl<&isize>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u16 as core::ops::bit::Shl<&u128>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u16 as core::ops::bit::Shl<&u16>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u16 as core::ops::bit::Shl<&u32>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u16 as core::ops::bit::Shl<&u64>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u16 as core::ops::bit::Shl<&u8>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u16 as core::ops::bit::Shl<&usize>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u16 as core::ops::bit::Shl<i128>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<u16 as core::ops::bit::Shl<i16>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<u16 as core::ops::bit::Shl<i32>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<u16 as core::ops::bit::Shl<i64>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<u16 as core::ops::bit::Shl<i8>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<u16 as core::ops::bit::Shl<isize>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<u16 as core::ops::bit::Shl<u128>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<u16 as core::ops::bit::Shl<u32>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<u16 as core::ops::bit::Shl<u64>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<u16 as core::ops::bit::Shl<u8>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<u16 as core::ops::bit::Shl<usize>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<u16 as core::ops::bit::Shl>::shl fn shl(self, other: $f) -> $t {
self << other
}
<u16 as core::ops::bit::ShlAssign<&i128>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u16 as core::ops::bit::ShlAssign<&i16>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u16 as core::ops::bit::ShlAssign<&i32>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u16 as core::ops::bit::ShlAssign<&i64>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u16 as core::ops::bit::ShlAssign<&i8>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u16 as core::ops::bit::ShlAssign<&isize>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u16 as core::ops::bit::ShlAssign<&u128>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u16 as core::ops::bit::ShlAssign<&u16>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u16 as core::ops::bit::ShlAssign<&u32>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u16 as core::ops::bit::ShlAssign<&u64>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u16 as core::ops::bit::ShlAssign<&u8>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u16 as core::ops::bit::ShlAssign<&usize>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u16 as core::ops::bit::ShlAssign<i128>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<u16 as core::ops::bit::ShlAssign<i16>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<u16 as core::ops::bit::ShlAssign<i32>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<u16 as core::ops::bit::ShlAssign<i64>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<u16 as core::ops::bit::ShlAssign<i8>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<u16 as core::ops::bit::ShlAssign<isize>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<u16 as core::ops::bit::ShlAssign<u128>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<u16 as core::ops::bit::ShlAssign<u32>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<u16 as core::ops::bit::ShlAssign<u64>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<u16 as core::ops::bit::ShlAssign<u8>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<u16 as core::ops::bit::ShlAssign<usize>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<u16 as core::ops::bit::ShlAssign>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<u16 as core::ops::bit::Shr<&i128>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u16 as core::ops::bit::Shr<&i16>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u16 as core::ops::bit::Shr<&i32>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u16 as core::ops::bit::Shr<&i64>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u16 as core::ops::bit::Shr<&i8>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u16 as core::ops::bit::Shr<&isize>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u16 as core::ops::bit::Shr<&u128>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u16 as core::ops::bit::Shr<&u16>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u16 as core::ops::bit::Shr<&u32>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u16 as core::ops::bit::Shr<&u64>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u16 as core::ops::bit::Shr<&u8>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u16 as core::ops::bit::Shr<&usize>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u16 as core::ops::bit::Shr<i128>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<u16 as core::ops::bit::Shr<i16>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<u16 as core::ops::bit::Shr<i32>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<u16 as core::ops::bit::Shr<i64>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<u16 as core::ops::bit::Shr<i8>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<u16 as core::ops::bit::Shr<isize>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<u16 as core::ops::bit::Shr<u128>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<u16 as core::ops::bit::Shr<u32>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<u16 as core::ops::bit::Shr<u64>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<u16 as core::ops::bit::Shr<u8>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<u16 as core::ops::bit::Shr<usize>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<u16 as core::ops::bit::Shr>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<u16 as core::ops::bit::ShrAssign<&i128>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u16 as core::ops::bit::ShrAssign<&i16>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u16 as core::ops::bit::ShrAssign<&i32>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u16 as core::ops::bit::ShrAssign<&i64>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u16 as core::ops::bit::ShrAssign<&i8>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u16 as core::ops::bit::ShrAssign<&isize>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u16 as core::ops::bit::ShrAssign<&u128>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u16 as core::ops::bit::ShrAssign<&u16>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u16 as core::ops::bit::ShrAssign<&u32>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u16 as core::ops::bit::ShrAssign<&u64>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u16 as core::ops::bit::ShrAssign<&u8>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u16 as core::ops::bit::ShrAssign<&usize>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u16 as core::ops::bit::ShrAssign<i128>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<u16 as core::ops::bit::ShrAssign<i16>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<u16 as core::ops::bit::ShrAssign<i32>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<u16 as core::ops::bit::ShrAssign<i64>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<u16 as core::ops::bit::ShrAssign<i8>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<u16 as core::ops::bit::ShrAssign<isize>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<u16 as core::ops::bit::ShrAssign<u128>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<u16 as core::ops::bit::ShrAssign<u32>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<u16 as core::ops::bit::ShrAssign<u64>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<u16 as core::ops::bit::ShrAssign<u8>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<u16 as core::ops::bit::ShrAssign<usize>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<u16 as core::ops::bit::ShrAssign>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<u16 as core::pat::RangePattern>::sub_one fn sub_one(self) -> Self {
match self.checked_sub(1) {
Some(val) => val,
None => panic!("exclusive range end at minimum value of type")
}
}
<u32 as core::default::Default>::default fn default() -> $t {
$v
}
<u32 as core::intrinsics::fallback::CarryingMulAdd>::carrying_mul_add fn carrying_mul_add(self, a: Self, b: Self, c: Self) -> ($u, $t) {
let wide = (self as $w) * (a as $w) + (b as $w) + (c as $w);
(wide as _, (wide >> Self::BITS) as _)
}
<u32 as core::intrinsics::fallback::CarrylessMul>::carryless_mul fn carryless_mul(self, rhs: Self) -> Self {
let mut result = 0;
let mut i = 0;
while i < $type::BITS {
// If the i-th bit in rhs is set.
if (rhs >> i) & 1 != 0 {
// Then xor the result with `self` shifted to the left by i positions.
result ^= self << i;
}
i += 1;
}
result
}
<u32 as core::intrinsics::fallback::DisjointBitOr>::disjoint_bitor unsafe fn disjoint_bitor(self, other: Self) -> Self {
// Note that the assume here is required for UB detection in Miri!
// SAFETY: our precondition is that there are no bits in common,
// so this is just telling that to the backend.
unsafe { super::assume((self & other) == zero!($t)) };
self | other
}
<u32 as core::intrinsics::fallback::FunnelShift>::unchecked_funnel_shl unsafe fn unchecked_funnel_shl(self, rhs: Self, shift: u32) -> Self {
// This implementation is also used by Miri so we have to check the precondition.
// SAFETY: this is guaranteed by the caller
unsafe { super::assume(shift < $type::BITS) };
if shift == 0 {
self
} else {
// SAFETY:
// - `shift < T::BITS`, which satisfies `unchecked_shl`
// - this also ensures that `T::BITS - shift < T::BITS` (shift = 0 is checked
// above), which satisfies `unchecked_shr`
// - because the types are unsigned, the combination are disjoint bits (this is
// not true if they're signed, since SHR will fill in the empty space with a
// sign bit, not zero)
unsafe {
super::disjoint_bitor(
super::unchecked_shl(self, shift),
super::unchecked_shr(rhs, $type::BITS - shift),
)
}
}
}
<u32 as core::intrinsics::fallback::FunnelShift>::unchecked_funnel_shr unsafe fn unchecked_funnel_shr(self, rhs: Self, shift: u32) -> Self {
// This implementation is also used by Miri so we have to check the precondition.
// SAFETY: this is guaranteed by the caller
unsafe { super::assume(shift < $type::BITS) };
if shift == 0 {
rhs
} else {
// SAFETY:
// - `shift < T::BITS`, which satisfies `unchecked_shr`
// - this also ensures that `T::BITS - shift < T::BITS` (shift = 0 is checked
// above), which satisfies `unchecked_shl`
// - because the types are unsigned, the combination are disjoint bits (this is
// not true if they're signed, since SHR will fill in the empty space with a
// sign bit, not zero)
unsafe {
super::disjoint_bitor(
super::unchecked_shl(self, $type::BITS - shift),
super::unchecked_shr(rhs, shift),
)
}
}
}
<u32 as core::iter::range::Step>::backward fn backward(start: Self, n: usize) -> Self {
// In debug builds, trigger a panic on overflow.
// This should optimize completely out in release builds.
if Self::backward_checked(start, n).is_none() {
let _ = Self::MIN - 1;
}
// Do wrapping math to allow e.g. `Step::backward(127i8, 255)`.
start.wrapping_sub(n as Self)
}
<u32 as core::iter::range::Step>::backward_checked fn backward_checked(start: Self, n: usize) -> Option<Self> {
match Self::try_from(n) {
Ok(n) => start.checked_sub(n),
Err(_) => None, // if n is out of range, `unsigned_start - n` is too
}
}
<u32 as core::iter::range::Step>::backward_unchecked unsafe fn backward_unchecked(start: Self, n: usize) -> Self {
// SAFETY: the caller has to guarantee that `start - n` doesn't overflow.
unsafe { start.unchecked_sub(n as Self) }
}
<u32 as core::iter::range::Step>::forward fn forward(start: Self, n: usize) -> Self {
// In debug builds, trigger a panic on overflow.
// This should optimize completely out in release builds.
if Self::forward_checked(start, n).is_none() {
let _ = Self::MAX + 1;
}
// Do wrapping math to allow e.g. `Step::forward(-128i8, 255)`.
start.wrapping_add(n as Self)
}
<u32 as core::iter::range::Step>::forward_checked fn forward_checked(start: Self, n: usize) -> Option<Self> {
match Self::try_from(n) {
Ok(n) => start.checked_add(n),
Err(_) => None, // if n is out of range, `unsigned_start + n` is too
}
}
<u32 as core::iter::range::Step>::forward_unchecked unsafe fn forward_unchecked(start: Self, n: usize) -> Self {
// SAFETY: the caller has to guarantee that `start + n` doesn't overflow.
unsafe { start.unchecked_add(n as Self) }
}
<u32 as core::iter::range::Step>::steps_between fn steps_between(start: &Self, end: &Self) -> (usize, Option<usize>) {
if *start <= *end {
// This relies on $u_narrower <= usize
let steps = (*end - *start) as usize;
(steps, Some(steps))
} else {
(0, None)
}
}
<u32 as core::iter::traits::accum::Sum<&'a u32>>::sum fn sum<I: Iterator<Item=&'a Self>>(iter: I) -> Self {
iter.fold(
$zero,
#[rustc_inherit_overflow_checks]
|a, b| a + b,
)
}
<u32 as core::iter::traits::accum::Sum>::sum fn sum<I: Iterator<Item=Self>>(iter: I) -> Self {
iter.fold(
$zero,
#[rustc_inherit_overflow_checks]
|a, b| a + b,
)
}
<u32 as core::num::bignum::FullOps>::full_div_rem fn full_div_rem(self, other: $ty, borrow: $ty) -> ($ty, $ty) {
debug_assert!(borrow < other);
// This cannot overflow; the output is between `0` and `other * (2^nbits - 1)`.
let lhs = ((borrow as $bigty) << <$ty>::BITS) | (self as $bigty);
let rhs = other as $bigty;
((lhs / rhs) as $ty, (lhs % rhs) as $ty)
}
<u32 as core::num::bignum::FullOps>::full_mul_add fn full_mul_add(self, other: $ty, other2: $ty, carry: $ty) -> ($ty, $ty) {
// This cannot overflow;
// the output is between `0` and `2^nbits * (2^nbits - 1)`.
let (lo, hi) = self.carrying_mul_add(other, other2, carry);
(hi, lo)
}
<u32 as core::num::dec2flt::float::CastInto<i16>>::cast fn cast(self) -> i16 {
self as i16
}
<u32 as core::ops::arith::Add<&u32>>::add fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u32 as core::ops::arith::Add>::add fn add(self, other: $t) -> $t { self + other }
<u32 as core::ops::arith::AddAssign<&u32>>::add_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u32 as core::ops::arith::AddAssign>::add_assign fn add_assign(&mut self, other: $t) { *self += other }
<u32 as core::ops::arith::Div<&u32>>::div fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u32 as core::ops::arith::Div>::div fn div(self, other: $t) -> $t { self / other }
<u32 as core::ops::arith::DivAssign<&u32>>::div_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u32 as core::ops::arith::DivAssign>::div_assign fn div_assign(&mut self, other: $t) { *self /= other }
<u32 as core::ops::arith::Mul<&u32>>::mul fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u32 as core::ops::arith::Mul>::mul fn mul(self, other: $t) -> $t { self * other }
<u32 as core::ops::arith::MulAssign<&u32>>::mul_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u32 as core::ops::arith::MulAssign>::mul_assign fn mul_assign(&mut self, other: $t) { *self *= other }
<u32 as core::ops::arith::Rem<&u32>>::rem fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u32 as core::ops::arith::Rem>::rem fn rem(self, other: $t) -> $t { self % other }
<u32 as core::ops::arith::RemAssign<&u32>>::rem_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u32 as core::ops::arith::RemAssign>::rem_assign fn rem_assign(&mut self, other: $t) { *self %= other }
<u32 as core::ops::arith::Sub<&u32>>::sub fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u32 as core::ops::arith::Sub>::sub fn sub(self, other: $t) -> $t { self - other }
<u32 as core::ops::arith::SubAssign<&u32>>::sub_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u32 as core::ops::arith::SubAssign>::sub_assign fn sub_assign(&mut self, other: $t) { *self -= other }
<u32 as core::ops::bit::BitAnd<&u32>>::bitand fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u32 as core::ops::bit::BitAnd>::bitand fn bitand(self, rhs: $t) -> $t { self & rhs }
<u32 as core::ops::bit::BitAndAssign<&u32>>::bitand_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u32 as core::ops::bit::BitAndAssign>::bitand_assign fn bitand_assign(&mut self, other: $t) { *self &= other }
<u32 as core::ops::bit::BitOr<&u32>>::bitor fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u32 as core::ops::bit::BitOr>::bitor fn bitor(self, rhs: $t) -> $t { self | rhs }
<u32 as core::ops::bit::BitOrAssign<&u32>>::bitor_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u32 as core::ops::bit::BitOrAssign>::bitor_assign fn bitor_assign(&mut self, other: $t) { *self |= other }
<u32 as core::ops::bit::BitXor<&u32>>::bitxor fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u32 as core::ops::bit::BitXor>::bitxor fn bitxor(self, other: $t) -> $t { self ^ other }
<u32 as core::ops::bit::BitXorAssign<&u32>>::bitxor_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u32 as core::ops::bit::BitXorAssign>::bitxor_assign fn bitxor_assign(&mut self, other: $t) { *self ^= other }
<u32 as core::ops::bit::Not>::not fn not(self) -> $t { !self }
<u32 as core::ops::bit::Shl<&i128>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u32 as core::ops::bit::Shl<&i16>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u32 as core::ops::bit::Shl<&i32>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u32 as core::ops::bit::Shl<&i64>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u32 as core::ops::bit::Shl<&i8>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u32 as core::ops::bit::Shl<&isize>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u32 as core::ops::bit::Shl<&u128>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u32 as core::ops::bit::Shl<&u16>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u32 as core::ops::bit::Shl<&u32>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u32 as core::ops::bit::Shl<&u64>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u32 as core::ops::bit::Shl<&u8>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u32 as core::ops::bit::Shl<&usize>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u32 as core::ops::bit::Shl<i128>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<u32 as core::ops::bit::Shl<i16>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<u32 as core::ops::bit::Shl<i32>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<u32 as core::ops::bit::Shl<i64>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<u32 as core::ops::bit::Shl<i8>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<u32 as core::ops::bit::Shl<isize>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<u32 as core::ops::bit::Shl<u128>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<u32 as core::ops::bit::Shl<u16>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<u32 as core::ops::bit::Shl<u64>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<u32 as core::ops::bit::Shl<u8>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<u32 as core::ops::bit::Shl<usize>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<u32 as core::ops::bit::Shl>::shl fn shl(self, other: $f) -> $t {
self << other
}
<u32 as core::ops::bit::ShlAssign<&i128>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u32 as core::ops::bit::ShlAssign<&i16>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u32 as core::ops::bit::ShlAssign<&i32>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u32 as core::ops::bit::ShlAssign<&i64>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u32 as core::ops::bit::ShlAssign<&i8>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u32 as core::ops::bit::ShlAssign<&isize>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u32 as core::ops::bit::ShlAssign<&u128>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u32 as core::ops::bit::ShlAssign<&u16>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u32 as core::ops::bit::ShlAssign<&u32>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u32 as core::ops::bit::ShlAssign<&u64>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u32 as core::ops::bit::ShlAssign<&u8>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u32 as core::ops::bit::ShlAssign<&usize>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u32 as core::ops::bit::ShlAssign<i128>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<u32 as core::ops::bit::ShlAssign<i16>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<u32 as core::ops::bit::ShlAssign<i32>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<u32 as core::ops::bit::ShlAssign<i64>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<u32 as core::ops::bit::ShlAssign<i8>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<u32 as core::ops::bit::ShlAssign<isize>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<u32 as core::ops::bit::ShlAssign<u128>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<u32 as core::ops::bit::ShlAssign<u16>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<u32 as core::ops::bit::ShlAssign<u64>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<u32 as core::ops::bit::ShlAssign<u8>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<u32 as core::ops::bit::ShlAssign<usize>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<u32 as core::ops::bit::ShlAssign>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<u32 as core::ops::bit::Shr<&i128>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u32 as core::ops::bit::Shr<&i16>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u32 as core::ops::bit::Shr<&i32>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u32 as core::ops::bit::Shr<&i64>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u32 as core::ops::bit::Shr<&i8>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u32 as core::ops::bit::Shr<&isize>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u32 as core::ops::bit::Shr<&u128>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u32 as core::ops::bit::Shr<&u16>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u32 as core::ops::bit::Shr<&u32>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u32 as core::ops::bit::Shr<&u64>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u32 as core::ops::bit::Shr<&u8>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u32 as core::ops::bit::Shr<&usize>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u32 as core::ops::bit::Shr<i128>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<u32 as core::ops::bit::Shr<i16>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<u32 as core::ops::bit::Shr<i32>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<u32 as core::ops::bit::Shr<i64>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<u32 as core::ops::bit::Shr<i8>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<u32 as core::ops::bit::Shr<isize>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<u32 as core::ops::bit::Shr<u128>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<u32 as core::ops::bit::Shr<u16>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<u32 as core::ops::bit::Shr<u64>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<u32 as core::ops::bit::Shr<u8>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<u32 as core::ops::bit::Shr<usize>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<u32 as core::ops::bit::Shr>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<u32 as core::ops::bit::ShrAssign<&i128>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u32 as core::ops::bit::ShrAssign<&i16>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u32 as core::ops::bit::ShrAssign<&i32>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u32 as core::ops::bit::ShrAssign<&i64>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u32 as core::ops::bit::ShrAssign<&i8>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u32 as core::ops::bit::ShrAssign<&isize>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u32 as core::ops::bit::ShrAssign<&u128>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u32 as core::ops::bit::ShrAssign<&u16>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u32 as core::ops::bit::ShrAssign<&u32>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u32 as core::ops::bit::ShrAssign<&u64>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u32 as core::ops::bit::ShrAssign<&u8>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u32 as core::ops::bit::ShrAssign<&usize>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u32 as core::ops::bit::ShrAssign<i128>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<u32 as core::ops::bit::ShrAssign<i16>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<u32 as core::ops::bit::ShrAssign<i32>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<u32 as core::ops::bit::ShrAssign<i64>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<u32 as core::ops::bit::ShrAssign<i8>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<u32 as core::ops::bit::ShrAssign<isize>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<u32 as core::ops::bit::ShrAssign<u128>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<u32 as core::ops::bit::ShrAssign<u16>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<u32 as core::ops::bit::ShrAssign<u64>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<u32 as core::ops::bit::ShrAssign<u8>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<u32 as core::ops::bit::ShrAssign<usize>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<u32 as core::ops::bit::ShrAssign>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<u32 as core::pat::RangePattern>::sub_one fn sub_one(self) -> Self {
match self.checked_sub(1) {
Some(val) => val,
None => panic!("exclusive range end at minimum value of type")
}
}
<u64 as core::default::Default>::default fn default() -> $t {
$v
}
<u64 as core::intrinsics::fallback::CarryingMulAdd>::carrying_mul_add fn carrying_mul_add(self, a: Self, b: Self, c: Self) -> ($u, $t) {
let wide = (self as $w) * (a as $w) + (b as $w) + (c as $w);
(wide as _, (wide >> Self::BITS) as _)
}
<u64 as core::intrinsics::fallback::CarrylessMul>::carryless_mul fn carryless_mul(self, rhs: Self) -> Self {
let mut result = 0;
let mut i = 0;
while i < $type::BITS {
// If the i-th bit in rhs is set.
if (rhs >> i) & 1 != 0 {
// Then xor the result with `self` shifted to the left by i positions.
result ^= self << i;
}
i += 1;
}
result
}
<u64 as core::intrinsics::fallback::DisjointBitOr>::disjoint_bitor unsafe fn disjoint_bitor(self, other: Self) -> Self {
// Note that the assume here is required for UB detection in Miri!
// SAFETY: our precondition is that there are no bits in common,
// so this is just telling that to the backend.
unsafe { super::assume((self & other) == zero!($t)) };
self | other
}
<u64 as core::intrinsics::fallback::FunnelShift>::unchecked_funnel_shl unsafe fn unchecked_funnel_shl(self, rhs: Self, shift: u32) -> Self {
// This implementation is also used by Miri so we have to check the precondition.
// SAFETY: this is guaranteed by the caller
unsafe { super::assume(shift < $type::BITS) };
if shift == 0 {
self
} else {
// SAFETY:
// - `shift < T::BITS`, which satisfies `unchecked_shl`
// - this also ensures that `T::BITS - shift < T::BITS` (shift = 0 is checked
// above), which satisfies `unchecked_shr`
// - because the types are unsigned, the combination are disjoint bits (this is
// not true if they're signed, since SHR will fill in the empty space with a
// sign bit, not zero)
unsafe {
super::disjoint_bitor(
super::unchecked_shl(self, shift),
super::unchecked_shr(rhs, $type::BITS - shift),
)
}
}
}
<u64 as core::intrinsics::fallback::FunnelShift>::unchecked_funnel_shr unsafe fn unchecked_funnel_shr(self, rhs: Self, shift: u32) -> Self {
// This implementation is also used by Miri so we have to check the precondition.
// SAFETY: this is guaranteed by the caller
unsafe { super::assume(shift < $type::BITS) };
if shift == 0 {
rhs
} else {
// SAFETY:
// - `shift < T::BITS`, which satisfies `unchecked_shr`
// - this also ensures that `T::BITS - shift < T::BITS` (shift = 0 is checked
// above), which satisfies `unchecked_shl`
// - because the types are unsigned, the combination are disjoint bits (this is
// not true if they're signed, since SHR will fill in the empty space with a
// sign bit, not zero)
unsafe {
super::disjoint_bitor(
super::unchecked_shl(self, $type::BITS - shift),
super::unchecked_shr(rhs, shift),
)
}
}
}
<u64 as core::iter::range::Step>::backward fn backward(start: Self, n: usize) -> Self {
// In debug builds, trigger a panic on overflow.
// This should optimize completely out in release builds.
if Self::backward_checked(start, n).is_none() {
let _ = Self::MIN - 1;
}
// Do wrapping math to allow e.g. `Step::backward(127i8, 255)`.
start.wrapping_sub(n as Self)
}
<u64 as core::iter::range::Step>::backward_checked fn backward_checked(start: Self, n: usize) -> Option<Self> {
match Self::try_from(n) {
Ok(n) => start.checked_sub(n),
Err(_) => None, // if n is out of range, `unsigned_start - n` is too
}
}
<u64 as core::iter::range::Step>::backward_unchecked unsafe fn backward_unchecked(start: Self, n: usize) -> Self {
// SAFETY: the caller has to guarantee that `start - n` doesn't overflow.
unsafe { start.unchecked_sub(n as Self) }
}
<u64 as core::iter::range::Step>::forward fn forward(start: Self, n: usize) -> Self {
// In debug builds, trigger a panic on overflow.
// This should optimize completely out in release builds.
if Self::forward_checked(start, n).is_none() {
let _ = Self::MAX + 1;
}
// Do wrapping math to allow e.g. `Step::forward(-128i8, 255)`.
start.wrapping_add(n as Self)
}
<u64 as core::iter::range::Step>::forward_checked fn forward_checked(start: Self, n: usize) -> Option<Self> {
match Self::try_from(n) {
Ok(n) => start.checked_add(n),
Err(_) => None, // if n is out of range, `unsigned_start + n` is too
}
}
<u64 as core::iter::range::Step>::forward_unchecked unsafe fn forward_unchecked(start: Self, n: usize) -> Self {
// SAFETY: the caller has to guarantee that `start + n` doesn't overflow.
unsafe { start.unchecked_add(n as Self) }
}
<u64 as core::iter::range::Step>::steps_between fn steps_between(start: &Self, end: &Self) -> (usize, Option<usize>) {
if *start <= *end {
// This relies on $u_narrower <= usize
let steps = (*end - *start) as usize;
(steps, Some(steps))
} else {
(0, None)
}
}
<u64 as core::iter::traits::accum::Sum<&'a u64>>::sum fn sum<I: Iterator<Item=&'a Self>>(iter: I) -> Self {
iter.fold(
$zero,
#[rustc_inherit_overflow_checks]
|a, b| a + b,
)
}
<u64 as core::iter::traits::accum::Sum>::sum fn sum<I: Iterator<Item=Self>>(iter: I) -> Self {
iter.fold(
$zero,
#[rustc_inherit_overflow_checks]
|a, b| a + b,
)
}
<u64 as core::num::bignum::FullOps>::full_div_rem fn full_div_rem(self, other: $ty, borrow: $ty) -> ($ty, $ty) {
debug_assert!(borrow < other);
// This cannot overflow; the output is between `0` and `other * (2^nbits - 1)`.
let lhs = ((borrow as $bigty) << <$ty>::BITS) | (self as $bigty);
let rhs = other as $bigty;
((lhs / rhs) as $ty, (lhs % rhs) as $ty)
}
<u64 as core::num::bignum::FullOps>::full_mul_add fn full_mul_add(self, other: $ty, other2: $ty, carry: $ty) -> ($ty, $ty) {
// This cannot overflow;
// the output is between `0` and `2^nbits * (2^nbits - 1)`.
let (lo, hi) = self.carrying_mul_add(other, other2, carry);
(hi, lo)
}
<u64 as core::num::dec2flt::float::CastInto<i16>>::cast fn cast(self) -> i16 {
self as i16
}
<u64 as core::ops::arith::Add<&u64>>::add fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u64 as core::ops::arith::Add>::add fn add(self, other: $t) -> $t { self + other }
<u64 as core::ops::arith::AddAssign<&u64>>::add_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u64 as core::ops::arith::AddAssign>::add_assign fn add_assign(&mut self, other: $t) { *self += other }
<u64 as core::ops::arith::Div<&u64>>::div fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u64 as core::ops::arith::Div>::div fn div(self, other: $t) -> $t { self / other }
<u64 as core::ops::arith::DivAssign<&u64>>::div_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u64 as core::ops::arith::DivAssign>::div_assign fn div_assign(&mut self, other: $t) { *self /= other }
<u64 as core::ops::arith::Mul<&u64>>::mul fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u64 as core::ops::arith::Mul>::mul fn mul(self, other: $t) -> $t { self * other }
<u64 as core::ops::arith::MulAssign<&u64>>::mul_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u64 as core::ops::arith::MulAssign>::mul_assign fn mul_assign(&mut self, other: $t) { *self *= other }
<u64 as core::ops::arith::Rem<&u64>>::rem fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u64 as core::ops::arith::Rem>::rem fn rem(self, other: $t) -> $t { self % other }
<u64 as core::ops::arith::RemAssign<&u64>>::rem_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u64 as core::ops::arith::RemAssign>::rem_assign fn rem_assign(&mut self, other: $t) { *self %= other }
<u64 as core::ops::arith::Sub<&u64>>::sub fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u64 as core::ops::arith::Sub>::sub fn sub(self, other: $t) -> $t { self - other }
<u64 as core::ops::arith::SubAssign<&u64>>::sub_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u64 as core::ops::arith::SubAssign>::sub_assign fn sub_assign(&mut self, other: $t) { *self -= other }
<u64 as core::ops::bit::BitAnd<&u64>>::bitand fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u64 as core::ops::bit::BitAnd>::bitand fn bitand(self, rhs: $t) -> $t { self & rhs }
<u64 as core::ops::bit::BitAndAssign<&u64>>::bitand_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u64 as core::ops::bit::BitAndAssign>::bitand_assign fn bitand_assign(&mut self, other: $t) { *self &= other }
<u64 as core::ops::bit::BitOr<&u64>>::bitor fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u64 as core::ops::bit::BitOr>::bitor fn bitor(self, rhs: $t) -> $t { self | rhs }
<u64 as core::ops::bit::BitOrAssign<&u64>>::bitor_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u64 as core::ops::bit::BitOrAssign>::bitor_assign fn bitor_assign(&mut self, other: $t) { *self |= other }
<u64 as core::ops::bit::BitXor<&u64>>::bitxor fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u64 as core::ops::bit::BitXor>::bitxor fn bitxor(self, other: $t) -> $t { self ^ other }
<u64 as core::ops::bit::BitXorAssign<&u64>>::bitxor_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u64 as core::ops::bit::BitXorAssign>::bitxor_assign fn bitxor_assign(&mut self, other: $t) { *self ^= other }
<u64 as core::ops::bit::Not>::not fn not(self) -> $t { !self }
<u64 as core::ops::bit::Shl<&i128>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u64 as core::ops::bit::Shl<&i16>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u64 as core::ops::bit::Shl<&i32>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u64 as core::ops::bit::Shl<&i64>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u64 as core::ops::bit::Shl<&i8>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u64 as core::ops::bit::Shl<&isize>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u64 as core::ops::bit::Shl<&u128>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u64 as core::ops::bit::Shl<&u16>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u64 as core::ops::bit::Shl<&u32>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u64 as core::ops::bit::Shl<&u64>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u64 as core::ops::bit::Shl<&u8>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u64 as core::ops::bit::Shl<&usize>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u64 as core::ops::bit::Shl<i128>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<u64 as core::ops::bit::Shl<i16>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<u64 as core::ops::bit::Shl<i32>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<u64 as core::ops::bit::Shl<i64>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<u64 as core::ops::bit::Shl<i8>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<u64 as core::ops::bit::Shl<isize>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<u64 as core::ops::bit::Shl<u128>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<u64 as core::ops::bit::Shl<u16>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<u64 as core::ops::bit::Shl<u32>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<u64 as core::ops::bit::Shl<u8>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<u64 as core::ops::bit::Shl<usize>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<u64 as core::ops::bit::Shl>::shl fn shl(self, other: $f) -> $t {
self << other
}
<u64 as core::ops::bit::ShlAssign<&i128>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u64 as core::ops::bit::ShlAssign<&i16>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u64 as core::ops::bit::ShlAssign<&i32>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u64 as core::ops::bit::ShlAssign<&i64>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u64 as core::ops::bit::ShlAssign<&i8>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u64 as core::ops::bit::ShlAssign<&isize>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u64 as core::ops::bit::ShlAssign<&u128>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u64 as core::ops::bit::ShlAssign<&u16>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u64 as core::ops::bit::ShlAssign<&u32>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u64 as core::ops::bit::ShlAssign<&u64>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u64 as core::ops::bit::ShlAssign<&u8>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u64 as core::ops::bit::ShlAssign<&usize>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u64 as core::ops::bit::ShlAssign<i128>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<u64 as core::ops::bit::ShlAssign<i16>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<u64 as core::ops::bit::ShlAssign<i32>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<u64 as core::ops::bit::ShlAssign<i64>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<u64 as core::ops::bit::ShlAssign<i8>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<u64 as core::ops::bit::ShlAssign<isize>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<u64 as core::ops::bit::ShlAssign<u128>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<u64 as core::ops::bit::ShlAssign<u16>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<u64 as core::ops::bit::ShlAssign<u32>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<u64 as core::ops::bit::ShlAssign<u8>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<u64 as core::ops::bit::ShlAssign<usize>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<u64 as core::ops::bit::ShlAssign>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<u64 as core::ops::bit::Shr<&i128>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u64 as core::ops::bit::Shr<&i16>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u64 as core::ops::bit::Shr<&i32>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u64 as core::ops::bit::Shr<&i64>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u64 as core::ops::bit::Shr<&i8>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u64 as core::ops::bit::Shr<&isize>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u64 as core::ops::bit::Shr<&u128>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u64 as core::ops::bit::Shr<&u16>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u64 as core::ops::bit::Shr<&u32>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u64 as core::ops::bit::Shr<&u64>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u64 as core::ops::bit::Shr<&u8>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u64 as core::ops::bit::Shr<&usize>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u64 as core::ops::bit::Shr<i128>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<u64 as core::ops::bit::Shr<i16>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<u64 as core::ops::bit::Shr<i32>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<u64 as core::ops::bit::Shr<i64>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<u64 as core::ops::bit::Shr<i8>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<u64 as core::ops::bit::Shr<isize>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<u64 as core::ops::bit::Shr<u128>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<u64 as core::ops::bit::Shr<u16>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<u64 as core::ops::bit::Shr<u32>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<u64 as core::ops::bit::Shr<u8>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<u64 as core::ops::bit::Shr<usize>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<u64 as core::ops::bit::Shr>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<u64 as core::ops::bit::ShrAssign<&i128>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u64 as core::ops::bit::ShrAssign<&i16>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u64 as core::ops::bit::ShrAssign<&i32>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u64 as core::ops::bit::ShrAssign<&i64>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u64 as core::ops::bit::ShrAssign<&i8>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u64 as core::ops::bit::ShrAssign<&isize>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u64 as core::ops::bit::ShrAssign<&u128>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u64 as core::ops::bit::ShrAssign<&u16>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u64 as core::ops::bit::ShrAssign<&u32>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u64 as core::ops::bit::ShrAssign<&u64>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u64 as core::ops::bit::ShrAssign<&u8>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u64 as core::ops::bit::ShrAssign<&usize>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u64 as core::ops::bit::ShrAssign<i128>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<u64 as core::ops::bit::ShrAssign<i16>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<u64 as core::ops::bit::ShrAssign<i32>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<u64 as core::ops::bit::ShrAssign<i64>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<u64 as core::ops::bit::ShrAssign<i8>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<u64 as core::ops::bit::ShrAssign<isize>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<u64 as core::ops::bit::ShrAssign<u128>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<u64 as core::ops::bit::ShrAssign<u16>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<u64 as core::ops::bit::ShrAssign<u32>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<u64 as core::ops::bit::ShrAssign<u8>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<u64 as core::ops::bit::ShrAssign<usize>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<u64 as core::ops::bit::ShrAssign>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<u64 as core::pat::RangePattern>::sub_one fn sub_one(self) -> Self {
match self.checked_sub(1) {
Some(val) => val,
None => panic!("exclusive range end at minimum value of type")
}
}
<u8 as core::default::Default>::default fn default() -> $t {
$v
}
<u8 as core::intrinsics::fallback::CarryingMulAdd>::carrying_mul_add fn carrying_mul_add(self, a: Self, b: Self, c: Self) -> ($u, $t) {
let wide = (self as $w) * (a as $w) + (b as $w) + (c as $w);
(wide as _, (wide >> Self::BITS) as _)
}
<u8 as core::intrinsics::fallback::CarrylessMul>::carryless_mul fn carryless_mul(self, rhs: Self) -> Self {
let mut result = 0;
let mut i = 0;
while i < $type::BITS {
// If the i-th bit in rhs is set.
if (rhs >> i) & 1 != 0 {
// Then xor the result with `self` shifted to the left by i positions.
result ^= self << i;
}
i += 1;
}
result
}
<u8 as core::intrinsics::fallback::DisjointBitOr>::disjoint_bitor unsafe fn disjoint_bitor(self, other: Self) -> Self {
// Note that the assume here is required for UB detection in Miri!
// SAFETY: our precondition is that there are no bits in common,
// so this is just telling that to the backend.
unsafe { super::assume((self & other) == zero!($t)) };
self | other
}
<u8 as core::intrinsics::fallback::FunnelShift>::unchecked_funnel_shl unsafe fn unchecked_funnel_shl(self, rhs: Self, shift: u32) -> Self {
// This implementation is also used by Miri so we have to check the precondition.
// SAFETY: this is guaranteed by the caller
unsafe { super::assume(shift < $type::BITS) };
if shift == 0 {
self
} else {
// SAFETY:
// - `shift < T::BITS`, which satisfies `unchecked_shl`
// - this also ensures that `T::BITS - shift < T::BITS` (shift = 0 is checked
// above), which satisfies `unchecked_shr`
// - because the types are unsigned, the combination are disjoint bits (this is
// not true if they're signed, since SHR will fill in the empty space with a
// sign bit, not zero)
unsafe {
super::disjoint_bitor(
super::unchecked_shl(self, shift),
super::unchecked_shr(rhs, $type::BITS - shift),
)
}
}
}
<u8 as core::intrinsics::fallback::FunnelShift>::unchecked_funnel_shr unsafe fn unchecked_funnel_shr(self, rhs: Self, shift: u32) -> Self {
// This implementation is also used by Miri so we have to check the precondition.
// SAFETY: this is guaranteed by the caller
unsafe { super::assume(shift < $type::BITS) };
if shift == 0 {
rhs
} else {
// SAFETY:
// - `shift < T::BITS`, which satisfies `unchecked_shr`
// - this also ensures that `T::BITS - shift < T::BITS` (shift = 0 is checked
// above), which satisfies `unchecked_shl`
// - because the types are unsigned, the combination are disjoint bits (this is
// not true if they're signed, since SHR will fill in the empty space with a
// sign bit, not zero)
unsafe {
super::disjoint_bitor(
super::unchecked_shl(self, $type::BITS - shift),
super::unchecked_shr(rhs, shift),
)
}
}
}
<u8 as core::iter::range::Step>::backward fn backward(start: Self, n: usize) -> Self {
// In debug builds, trigger a panic on overflow.
// This should optimize completely out in release builds.
if Self::backward_checked(start, n).is_none() {
let _ = Self::MIN - 1;
}
// Do wrapping math to allow e.g. `Step::backward(127i8, 255)`.
start.wrapping_sub(n as Self)
}
<u8 as core::iter::range::Step>::backward_checked fn backward_checked(start: Self, n: usize) -> Option<Self> {
match Self::try_from(n) {
Ok(n) => start.checked_sub(n),
Err(_) => None, // if n is out of range, `unsigned_start - n` is too
}
}
<u8 as core::iter::range::Step>::backward_unchecked unsafe fn backward_unchecked(start: Self, n: usize) -> Self {
// SAFETY: the caller has to guarantee that `start - n` doesn't overflow.
unsafe { start.unchecked_sub(n as Self) }
}
<u8 as core::iter::range::Step>::forward fn forward(start: Self, n: usize) -> Self {
// In debug builds, trigger a panic on overflow.
// This should optimize completely out in release builds.
if Self::forward_checked(start, n).is_none() {
let _ = Self::MAX + 1;
}
// Do wrapping math to allow e.g. `Step::forward(-128i8, 255)`.
start.wrapping_add(n as Self)
}
<u8 as core::iter::range::Step>::forward_checked fn forward_checked(start: Self, n: usize) -> Option<Self> {
match Self::try_from(n) {
Ok(n) => start.checked_add(n),
Err(_) => None, // if n is out of range, `unsigned_start + n` is too
}
}
<u8 as core::iter::range::Step>::forward_unchecked unsafe fn forward_unchecked(start: Self, n: usize) -> Self {
// SAFETY: the caller has to guarantee that `start + n` doesn't overflow.
unsafe { start.unchecked_add(n as Self) }
}
<u8 as core::iter::range::Step>::steps_between fn steps_between(start: &Self, end: &Self) -> (usize, Option<usize>) {
if *start <= *end {
// This relies on $u_narrower <= usize
let steps = (*end - *start) as usize;
(steps, Some(steps))
} else {
(0, None)
}
}
<u8 as core::iter::traits::accum::Sum<&'a u8>>::sum fn sum<I: Iterator<Item=&'a Self>>(iter: I) -> Self {
iter.fold(
$zero,
#[rustc_inherit_overflow_checks]
|a, b| a + b,
)
}
<u8 as core::iter::traits::accum::Sum>::sum fn sum<I: Iterator<Item=Self>>(iter: I) -> Self {
iter.fold(
$zero,
#[rustc_inherit_overflow_checks]
|a, b| a + b,
)
}
<u8 as core::num::bignum::FullOps>::full_div_rem fn full_div_rem(self, other: $ty, borrow: $ty) -> ($ty, $ty) {
debug_assert!(borrow < other);
// This cannot overflow; the output is between `0` and `other * (2^nbits - 1)`.
let lhs = ((borrow as $bigty) << <$ty>::BITS) | (self as $bigty);
let rhs = other as $bigty;
((lhs / rhs) as $ty, (lhs % rhs) as $ty)
}
<u8 as core::num::bignum::FullOps>::full_mul_add fn full_mul_add(self, other: $ty, other2: $ty, carry: $ty) -> ($ty, $ty) {
// This cannot overflow;
// the output is between `0` and `2^nbits * (2^nbits - 1)`.
let (lo, hi) = self.carrying_mul_add(other, other2, carry);
(hi, lo)
}
<u8 as core::ops::arith::Add<&u8>>::add fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u8 as core::ops::arith::Add>::add fn add(self, other: $t) -> $t { self + other }
<u8 as core::ops::arith::AddAssign<&u8>>::add_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u8 as core::ops::arith::AddAssign>::add_assign fn add_assign(&mut self, other: $t) { *self += other }
<u8 as core::ops::arith::Div<&u8>>::div fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u8 as core::ops::arith::Div>::div fn div(self, other: $t) -> $t { self / other }
<u8 as core::ops::arith::DivAssign<&u8>>::div_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u8 as core::ops::arith::DivAssign>::div_assign fn div_assign(&mut self, other: $t) { *self /= other }
<u8 as core::ops::arith::Mul<&u8>>::mul fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u8 as core::ops::arith::Mul>::mul fn mul(self, other: $t) -> $t { self * other }
<u8 as core::ops::arith::MulAssign<&u8>>::mul_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u8 as core::ops::arith::MulAssign>::mul_assign fn mul_assign(&mut self, other: $t) { *self *= other }
<u8 as core::ops::arith::Rem<&u8>>::rem fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u8 as core::ops::arith::Rem>::rem fn rem(self, other: $t) -> $t { self % other }
<u8 as core::ops::arith::RemAssign<&u8>>::rem_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u8 as core::ops::arith::RemAssign>::rem_assign fn rem_assign(&mut self, other: $t) { *self %= other }
<u8 as core::ops::arith::Sub<&u8>>::sub fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u8 as core::ops::arith::Sub>::sub fn sub(self, other: $t) -> $t { self - other }
<u8 as core::ops::arith::SubAssign<&u8>>::sub_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u8 as core::ops::arith::SubAssign>::sub_assign fn sub_assign(&mut self, other: $t) { *self -= other }
<u8 as core::ops::bit::BitAnd<&u8>>::bitand fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u8 as core::ops::bit::BitAnd>::bitand fn bitand(self, rhs: $t) -> $t { self & rhs }
<u8 as core::ops::bit::BitAndAssign<&u8>>::bitand_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u8 as core::ops::bit::BitAndAssign>::bitand_assign fn bitand_assign(&mut self, other: $t) { *self &= other }
<u8 as core::ops::bit::BitOr<&u8>>::bitor fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u8 as core::ops::bit::BitOr>::bitor fn bitor(self, rhs: $t) -> $t { self | rhs }
<u8 as core::ops::bit::BitOrAssign<&u8>>::bitor_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u8 as core::ops::bit::BitOrAssign>::bitor_assign fn bitor_assign(&mut self, other: $t) { *self |= other }
<u8 as core::ops::bit::BitXor<&u8>>::bitxor fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u8 as core::ops::bit::BitXor>::bitxor fn bitxor(self, other: $t) -> $t { self ^ other }
<u8 as core::ops::bit::BitXorAssign<&u8>>::bitxor_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u8 as core::ops::bit::BitXorAssign>::bitxor_assign fn bitxor_assign(&mut self, other: $t) { *self ^= other }
<u8 as core::ops::bit::Not>::not fn not(self) -> $t { !self }
<u8 as core::ops::bit::Shl<&i128>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u8 as core::ops::bit::Shl<&i16>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u8 as core::ops::bit::Shl<&i32>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u8 as core::ops::bit::Shl<&i64>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u8 as core::ops::bit::Shl<&i8>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u8 as core::ops::bit::Shl<&isize>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u8 as core::ops::bit::Shl<&u128>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u8 as core::ops::bit::Shl<&u16>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u8 as core::ops::bit::Shl<&u32>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u8 as core::ops::bit::Shl<&u64>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u8 as core::ops::bit::Shl<&u8>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u8 as core::ops::bit::Shl<&usize>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u8 as core::ops::bit::Shl<i128>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<u8 as core::ops::bit::Shl<i16>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<u8 as core::ops::bit::Shl<i32>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<u8 as core::ops::bit::Shl<i64>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<u8 as core::ops::bit::Shl<i8>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<u8 as core::ops::bit::Shl<isize>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<u8 as core::ops::bit::Shl<u128>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<u8 as core::ops::bit::Shl<u16>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<u8 as core::ops::bit::Shl<u32>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<u8 as core::ops::bit::Shl<u64>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<u8 as core::ops::bit::Shl<usize>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<u8 as core::ops::bit::Shl>::shl fn shl(self, other: $f) -> $t {
self << other
}
<u8 as core::ops::bit::ShlAssign<&i128>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u8 as core::ops::bit::ShlAssign<&i16>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u8 as core::ops::bit::ShlAssign<&i32>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u8 as core::ops::bit::ShlAssign<&i64>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u8 as core::ops::bit::ShlAssign<&i8>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u8 as core::ops::bit::ShlAssign<&isize>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u8 as core::ops::bit::ShlAssign<&u128>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u8 as core::ops::bit::ShlAssign<&u16>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u8 as core::ops::bit::ShlAssign<&u32>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u8 as core::ops::bit::ShlAssign<&u64>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u8 as core::ops::bit::ShlAssign<&u8>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u8 as core::ops::bit::ShlAssign<&usize>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u8 as core::ops::bit::ShlAssign<i128>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<u8 as core::ops::bit::ShlAssign<i16>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<u8 as core::ops::bit::ShlAssign<i32>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<u8 as core::ops::bit::ShlAssign<i64>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<u8 as core::ops::bit::ShlAssign<i8>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<u8 as core::ops::bit::ShlAssign<isize>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<u8 as core::ops::bit::ShlAssign<u128>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<u8 as core::ops::bit::ShlAssign<u16>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<u8 as core::ops::bit::ShlAssign<u32>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<u8 as core::ops::bit::ShlAssign<u64>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<u8 as core::ops::bit::ShlAssign<usize>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<u8 as core::ops::bit::ShlAssign>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<u8 as core::ops::bit::Shr<&i128>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u8 as core::ops::bit::Shr<&i16>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u8 as core::ops::bit::Shr<&i32>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u8 as core::ops::bit::Shr<&i64>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u8 as core::ops::bit::Shr<&i8>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u8 as core::ops::bit::Shr<&isize>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u8 as core::ops::bit::Shr<&u128>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u8 as core::ops::bit::Shr<&u16>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u8 as core::ops::bit::Shr<&u32>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u8 as core::ops::bit::Shr<&u64>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u8 as core::ops::bit::Shr<&u8>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u8 as core::ops::bit::Shr<&usize>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u8 as core::ops::bit::Shr<i128>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<u8 as core::ops::bit::Shr<i16>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<u8 as core::ops::bit::Shr<i32>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<u8 as core::ops::bit::Shr<i64>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<u8 as core::ops::bit::Shr<i8>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<u8 as core::ops::bit::Shr<isize>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<u8 as core::ops::bit::Shr<u128>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<u8 as core::ops::bit::Shr<u16>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<u8 as core::ops::bit::Shr<u32>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<u8 as core::ops::bit::Shr<u64>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<u8 as core::ops::bit::Shr<usize>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<u8 as core::ops::bit::Shr>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<u8 as core::ops::bit::ShrAssign<&i128>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u8 as core::ops::bit::ShrAssign<&i16>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u8 as core::ops::bit::ShrAssign<&i32>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u8 as core::ops::bit::ShrAssign<&i64>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u8 as core::ops::bit::ShrAssign<&i8>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u8 as core::ops::bit::ShrAssign<&isize>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u8 as core::ops::bit::ShrAssign<&u128>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u8 as core::ops::bit::ShrAssign<&u16>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u8 as core::ops::bit::ShrAssign<&u32>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u8 as core::ops::bit::ShrAssign<&u64>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u8 as core::ops::bit::ShrAssign<&u8>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u8 as core::ops::bit::ShrAssign<&usize>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u8 as core::ops::bit::ShrAssign<i128>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<u8 as core::ops::bit::ShrAssign<i16>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<u8 as core::ops::bit::ShrAssign<i32>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<u8 as core::ops::bit::ShrAssign<i64>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<u8 as core::ops::bit::ShrAssign<i8>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<u8 as core::ops::bit::ShrAssign<isize>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<u8 as core::ops::bit::ShrAssign<u128>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<u8 as core::ops::bit::ShrAssign<u16>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<u8 as core::ops::bit::ShrAssign<u32>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<u8 as core::ops::bit::ShrAssign<u64>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<u8 as core::ops::bit::ShrAssign<usize>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<u8 as core::ops::bit::ShrAssign>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<u8 as core::pat::RangePattern>::sub_one fn sub_one(self) -> Self {
match self.checked_sub(1) {
Some(val) => val,
None => panic!("exclusive range end at minimum value of type")
}
}
<usize as core::default::Default>::default fn default() -> $t {
$v
}
<usize as core::intrinsics::fallback::CarryingMulAdd>::carrying_mul_add fn carrying_mul_add(self, a: Self, b: Self, c: Self) -> ($u, $t) {
let wide = (self as $w) * (a as $w) + (b as $w) + (c as $w);
(wide as _, (wide >> Self::BITS) as _)
}
<usize as core::intrinsics::fallback::CarrylessMul>::carryless_mul fn carryless_mul(self, rhs: Self) -> Self {
let mut result = 0;
let mut i = 0;
while i < $type::BITS {
// If the i-th bit in rhs is set.
if (rhs >> i) & 1 != 0 {
// Then xor the result with `self` shifted to the left by i positions.
result ^= self << i;
}
i += 1;
}
result
}
<usize as core::intrinsics::fallback::DisjointBitOr>::disjoint_bitor unsafe fn disjoint_bitor(self, other: Self) -> Self {
// Note that the assume here is required for UB detection in Miri!
// SAFETY: our precondition is that there are no bits in common,
// so this is just telling that to the backend.
unsafe { super::assume((self & other) == zero!($t)) };
self | other
}
<usize as core::intrinsics::fallback::FunnelShift>::unchecked_funnel_shl unsafe fn unchecked_funnel_shl(self, rhs: Self, shift: u32) -> Self {
// This implementation is also used by Miri so we have to check the precondition.
// SAFETY: this is guaranteed by the caller
unsafe { super::assume(shift < $type::BITS) };
if shift == 0 {
self
} else {
// SAFETY:
// - `shift < T::BITS`, which satisfies `unchecked_shl`
// - this also ensures that `T::BITS - shift < T::BITS` (shift = 0 is checked
// above), which satisfies `unchecked_shr`
// - because the types are unsigned, the combination are disjoint bits (this is
// not true if they're signed, since SHR will fill in the empty space with a
// sign bit, not zero)
unsafe {
super::disjoint_bitor(
super::unchecked_shl(self, shift),
super::unchecked_shr(rhs, $type::BITS - shift),
)
}
}
}
<usize as core::intrinsics::fallback::FunnelShift>::unchecked_funnel_shr unsafe fn unchecked_funnel_shr(self, rhs: Self, shift: u32) -> Self {
// This implementation is also used by Miri so we have to check the precondition.
// SAFETY: this is guaranteed by the caller
unsafe { super::assume(shift < $type::BITS) };
if shift == 0 {
rhs
} else {
// SAFETY:
// - `shift < T::BITS`, which satisfies `unchecked_shr`
// - this also ensures that `T::BITS - shift < T::BITS` (shift = 0 is checked
// above), which satisfies `unchecked_shl`
// - because the types are unsigned, the combination are disjoint bits (this is
// not true if they're signed, since SHR will fill in the empty space with a
// sign bit, not zero)
unsafe {
super::disjoint_bitor(
super::unchecked_shl(self, $type::BITS - shift),
super::unchecked_shr(rhs, shift),
)
}
}
}
<usize as core::iter::range::Step>::backward fn backward(start: Self, n: usize) -> Self {
// In debug builds, trigger a panic on overflow.
// This should optimize completely out in release builds.
if Self::backward_checked(start, n).is_none() {
let _ = Self::MIN - 1;
}
// Do wrapping math to allow e.g. `Step::backward(127i8, 255)`.
start.wrapping_sub(n as Self)
}
<usize as core::iter::range::Step>::backward_checked fn backward_checked(start: Self, n: usize) -> Option<Self> {
match Self::try_from(n) {
Ok(n) => start.checked_sub(n),
Err(_) => None, // if n is out of range, `unsigned_start - n` is too
}
}
<usize as core::iter::range::Step>::backward_unchecked unsafe fn backward_unchecked(start: Self, n: usize) -> Self {
// SAFETY: the caller has to guarantee that `start - n` doesn't overflow.
unsafe { start.unchecked_sub(n as Self) }
}
<usize as core::iter::range::Step>::forward fn forward(start: Self, n: usize) -> Self {
// In debug builds, trigger a panic on overflow.
// This should optimize completely out in release builds.
if Self::forward_checked(start, n).is_none() {
let _ = Self::MAX + 1;
}
// Do wrapping math to allow e.g. `Step::forward(-128i8, 255)`.
start.wrapping_add(n as Self)
}
<usize as core::iter::range::Step>::forward_checked fn forward_checked(start: Self, n: usize) -> Option<Self> {
match Self::try_from(n) {
Ok(n) => start.checked_add(n),
Err(_) => None, // if n is out of range, `unsigned_start + n` is too
}
}
<usize as core::iter::range::Step>::forward_unchecked unsafe fn forward_unchecked(start: Self, n: usize) -> Self {
// SAFETY: the caller has to guarantee that `start + n` doesn't overflow.
unsafe { start.unchecked_add(n as Self) }
}
<usize as core::iter::range::Step>::steps_between fn steps_between(start: &Self, end: &Self) -> (usize, Option<usize>) {
if *start <= *end {
// This relies on $u_narrower <= usize
let steps = (*end - *start) as usize;
(steps, Some(steps))
} else {
(0, None)
}
}
<usize as core::iter::traits::accum::Sum<&'a usize>>::sum fn sum<I: Iterator<Item=&'a Self>>(iter: I) -> Self {
iter.fold(
$zero,
#[rustc_inherit_overflow_checks]
|a, b| a + b,
)
}
<usize as core::iter::traits::accum::Sum>::sum fn sum<I: Iterator<Item=Self>>(iter: I) -> Self {
iter.fold(
$zero,
#[rustc_inherit_overflow_checks]
|a, b| a + b,
)
}
<usize as core::ops::arith::Add<&usize>>::add fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<usize as core::ops::arith::Add>::add fn add(self, other: $t) -> $t { self + other }
<usize as core::ops::arith::AddAssign<&usize>>::add_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<usize as core::ops::arith::AddAssign>::add_assign fn add_assign(&mut self, other: $t) { *self += other }
<usize as core::ops::arith::Div<&usize>>::div fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<usize as core::ops::arith::Div>::div fn div(self, other: $t) -> $t { self / other }
<usize as core::ops::arith::DivAssign<&usize>>::div_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<usize as core::ops::arith::DivAssign>::div_assign fn div_assign(&mut self, other: $t) { *self /= other }
<usize as core::ops::arith::Mul<&usize>>::mul fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<usize as core::ops::arith::Mul>::mul fn mul(self, other: $t) -> $t { self * other }
<usize as core::ops::arith::MulAssign<&usize>>::mul_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<usize as core::ops::arith::MulAssign>::mul_assign fn mul_assign(&mut self, other: $t) { *self *= other }
<usize as core::ops::arith::Rem<&usize>>::rem fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<usize as core::ops::arith::Rem>::rem fn rem(self, other: $t) -> $t { self % other }
<usize as core::ops::arith::RemAssign<&usize>>::rem_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<usize as core::ops::arith::RemAssign>::rem_assign fn rem_assign(&mut self, other: $t) { *self %= other }
<usize as core::ops::arith::Sub<&usize>>::sub fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<usize as core::ops::arith::Sub>::sub fn sub(self, other: $t) -> $t { self - other }
<usize as core::ops::arith::SubAssign<&usize>>::sub_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<usize as core::ops::arith::SubAssign>::sub_assign fn sub_assign(&mut self, other: $t) { *self -= other }
<usize as core::ops::bit::BitAnd<&usize>>::bitand fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<usize as core::ops::bit::BitAnd>::bitand fn bitand(self, rhs: $t) -> $t { self & rhs }
<usize as core::ops::bit::BitAndAssign<&usize>>::bitand_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<usize as core::ops::bit::BitAndAssign>::bitand_assign fn bitand_assign(&mut self, other: $t) { *self &= other }
<usize as core::ops::bit::BitOr<&usize>>::bitor fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<usize as core::ops::bit::BitOr>::bitor fn bitor(self, rhs: $t) -> $t { self | rhs }
<usize as core::ops::bit::BitOrAssign<&usize>>::bitor_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<usize as core::ops::bit::BitOrAssign>::bitor_assign fn bitor_assign(&mut self, other: $t) { *self |= other }
<usize as core::ops::bit::BitXor<&usize>>::bitxor fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<usize as core::ops::bit::BitXor>::bitxor fn bitxor(self, other: $t) -> $t { self ^ other }
<usize as core::ops::bit::BitXorAssign<&usize>>::bitxor_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<usize as core::ops::bit::BitXorAssign>::bitxor_assign fn bitxor_assign(&mut self, other: $t) { *self ^= other }
<usize as core::ops::bit::Not>::not fn not(self) -> $t { !self }
<usize as core::ops::bit::Shl<&i128>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<usize as core::ops::bit::Shl<&i16>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<usize as core::ops::bit::Shl<&i32>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<usize as core::ops::bit::Shl<&i64>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<usize as core::ops::bit::Shl<&i8>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<usize as core::ops::bit::Shl<&isize>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<usize as core::ops::bit::Shl<&u128>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<usize as core::ops::bit::Shl<&u16>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<usize as core::ops::bit::Shl<&u32>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<usize as core::ops::bit::Shl<&u64>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<usize as core::ops::bit::Shl<&u8>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<usize as core::ops::bit::Shl<&usize>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<usize as core::ops::bit::Shl<i128>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<usize as core::ops::bit::Shl<i16>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<usize as core::ops::bit::Shl<i32>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<usize as core::ops::bit::Shl<i64>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<usize as core::ops::bit::Shl<i8>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<usize as core::ops::bit::Shl<isize>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<usize as core::ops::bit::Shl<u128>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<usize as core::ops::bit::Shl<u16>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<usize as core::ops::bit::Shl<u32>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<usize as core::ops::bit::Shl<u64>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<usize as core::ops::bit::Shl<u8>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<usize as core::ops::bit::Shl>::shl fn shl(self, other: $f) -> $t {
self << other
}
<usize as core::ops::bit::ShlAssign<&i128>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<usize as core::ops::bit::ShlAssign<&i16>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<usize as core::ops::bit::ShlAssign<&i32>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<usize as core::ops::bit::ShlAssign<&i64>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<usize as core::ops::bit::ShlAssign<&i8>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<usize as core::ops::bit::ShlAssign<&isize>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<usize as core::ops::bit::ShlAssign<&u128>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<usize as core::ops::bit::ShlAssign<&u16>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<usize as core::ops::bit::ShlAssign<&u32>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<usize as core::ops::bit::ShlAssign<&u64>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<usize as core::ops::bit::ShlAssign<&u8>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<usize as core::ops::bit::ShlAssign<&usize>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<usize as core::ops::bit::ShlAssign<i128>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<usize as core::ops::bit::ShlAssign<i16>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<usize as core::ops::bit::ShlAssign<i32>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<usize as core::ops::bit::ShlAssign<i64>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<usize as core::ops::bit::ShlAssign<i8>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<usize as core::ops::bit::ShlAssign<isize>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<usize as core::ops::bit::ShlAssign<u128>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<usize as core::ops::bit::ShlAssign<u16>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<usize as core::ops::bit::ShlAssign<u32>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<usize as core::ops::bit::ShlAssign<u64>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<usize as core::ops::bit::ShlAssign<u8>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<usize as core::ops::bit::ShlAssign>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<usize as core::ops::bit::Shr<&i128>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<usize as core::ops::bit::Shr<&i16>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<usize as core::ops::bit::Shr<&i32>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<usize as core::ops::bit::Shr<&i64>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<usize as core::ops::bit::Shr<&i8>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<usize as core::ops::bit::Shr<&isize>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<usize as core::ops::bit::Shr<&u128>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<usize as core::ops::bit::Shr<&u16>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<usize as core::ops::bit::Shr<&u32>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<usize as core::ops::bit::Shr<&u64>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<usize as core::ops::bit::Shr<&u8>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<usize as core::ops::bit::Shr<&usize>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<usize as core::ops::bit::Shr<i128>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<usize as core::ops::bit::Shr<i16>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<usize as core::ops::bit::Shr<i32>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<usize as core::ops::bit::Shr<i64>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<usize as core::ops::bit::Shr<i8>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<usize as core::ops::bit::Shr<isize>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<usize as core::ops::bit::Shr<u128>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<usize as core::ops::bit::Shr<u16>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<usize as core::ops::bit::Shr<u32>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<usize as core::ops::bit::Shr<u64>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<usize as core::ops::bit::Shr<u8>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<usize as core::ops::bit::Shr>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<usize as core::ops::bit::ShrAssign<&i128>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<usize as core::ops::bit::ShrAssign<&i16>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<usize as core::ops::bit::ShrAssign<&i32>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<usize as core::ops::bit::ShrAssign<&i64>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<usize as core::ops::bit::ShrAssign<&i8>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<usize as core::ops::bit::ShrAssign<&isize>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<usize as core::ops::bit::ShrAssign<&u128>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<usize as core::ops::bit::ShrAssign<&u16>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<usize as core::ops::bit::ShrAssign<&u32>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<usize as core::ops::bit::ShrAssign<&u64>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<usize as core::ops::bit::ShrAssign<&u8>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<usize as core::ops::bit::ShrAssign<&usize>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<usize as core::ops::bit::ShrAssign<i128>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<usize as core::ops::bit::ShrAssign<i16>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<usize as core::ops::bit::ShrAssign<i32>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<usize as core::ops::bit::ShrAssign<i64>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<usize as core::ops::bit::ShrAssign<i8>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<usize as core::ops::bit::ShrAssign<isize>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<usize as core::ops::bit::ShrAssign<u128>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<usize as core::ops::bit::ShrAssign<u16>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<usize as core::ops::bit::ShrAssign<u32>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<usize as core::ops::bit::ShrAssign<u64>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<usize as core::ops::bit::ShrAssign<u8>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<usize as core::ops::bit::ShrAssign>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<usize as core::pat::RangePattern>::sub_one fn sub_one(self) -> Self {
match self.checked_sub(1) {
Some(val) => val,
None => panic!("exclusive range end at minimum value of type")
}
}
<usize as core::slice::index::SliceIndex<[T]>>::get fn get(self, slice: &[T]) -> Option<&T> {
if self < slice.len() {
// SAFETY: `self` is checked to be in bounds.
unsafe { Some(slice_get_unchecked(slice, self)) }
} else {
None
}
}
<usize as core::slice::index::SliceIndex<[T]>>::get_mut fn get_mut(self, slice: &mut [T]) -> Option<&mut T> {
if self < slice.len() {
// SAFETY: `self` is checked to be in bounds.
unsafe { Some(slice_get_unchecked(slice, self)) }
} else {
None
}
}
<usize as core::slice::index::SliceIndex<[T]>>::get_unchecked unsafe fn get_unchecked(self, slice: *const [T]) -> *const T {
assert_unsafe_precondition!(
check_language_ub, // okay because of the `assume` below
"slice::get_unchecked requires that the index is within the slice",
(this: usize = self, len: usize = slice.len()) => this < len
);
// SAFETY: the caller guarantees that `slice` is not dangling, so it
// cannot be longer than `isize::MAX`. They also guarantee that
// `self` is in bounds of `slice` so `self` cannot overflow an `isize`,
// so the call to `add` is safe.
unsafe {
// Use intrinsics::assume instead of hint::assert_unchecked so that we don't check the
// precondition of this function twice.
crate::intrinsics::assume(self < slice.len());
slice_get_unchecked(slice, self)
}
}
<usize as core::slice::index::SliceIndex<[T]>>::get_unchecked_mut unsafe fn get_unchecked_mut(self, slice: *mut [T]) -> *mut T {
assert_unsafe_precondition!(
check_library_ub,
"slice::get_unchecked_mut requires that the index is within the slice",
(this: usize = self, len: usize = slice.len()) => this < len
);
// SAFETY: see comments for `get_unchecked` above.
unsafe { slice_get_unchecked(slice, self) }
}
<usize as core::slice::index::SliceIndex<[T]>>::index fn index(self, slice: &[T]) -> &T {
// N.B., use intrinsic indexing
&(*slice)[self]
}
<usize as core::slice::index::SliceIndex<[T]>>::index_mut fn index_mut(self, slice: &mut [T]) -> &mut T {
// N.B., use intrinsic indexing
&mut (*slice)[self]
}
core::alloc::layout::Layout::align pub const fn align(&self) -> usize {
self.align.as_usize()
}
core::alloc::layout::Layout::from_size_align_unchecked pub const unsafe fn from_size_align_unchecked(size: usize, align: usize) -> Self {
assert_unsafe_precondition!(
check_library_ub,
"Layout::from_size_align_unchecked requires that align is a power of 2 \
and the rounded-up allocation size does not exceed isize::MAX",
(
size: usize = size,
align: usize = align,
) => Layout::is_size_align_valid(size, align)
);
// SAFETY: the caller is required to uphold the preconditions.
unsafe { Layout { size, align: mem::transmute(align) } }
}
core::alloc::layout::Layout::from_size_alignment_unchecked pub const unsafe fn from_size_alignment_unchecked(size: usize, alignment: Alignment) -> Self {
assert_unsafe_precondition!(
check_library_ub,
"Layout::from_size_alignment_unchecked requires \
that the rounded-up allocation size does not exceed isize::MAX",
(
size: usize = size,
alignment: Alignment = alignment,
) => Layout::is_size_alignment_valid(size, alignment)
);
// SAFETY: the caller is required to uphold the preconditions.
Layout { size, align: alignment }
}
core::alloc::layout::Layout::is_size_align_valid const fn is_size_align_valid(size: usize, align: usize) -> bool {
let Some(alignment) = Alignment::new(align) else { return false };
Self::is_size_alignment_valid(size, alignment)
}
core::alloc::layout::Layout::is_size_alignment_valid const fn is_size_alignment_valid(size: usize, alignment: Alignment) -> bool {
size <= Self::max_size_for_alignment(alignment)
}
core::alloc::layout::Layout::max_size_for_alignment const fn max_size_for_alignment(alignment: Alignment) -> usize {
// (power-of-two implies align != 0.)
// Rounded up size is:
// size_rounded_up = (size + align - 1) & !(align - 1);
//
// We know from above that align != 0. If adding (align - 1)
// does not overflow, then rounding up will be fine.
//
// Conversely, &-masking with !(align - 1) will subtract off
// only low-order-bits. Thus if overflow occurs with the sum,
// the &-mask cannot subtract enough to undo that overflow.
//
// Above implies that checking for summation overflow is both
// necessary and sufficient.
// SAFETY: the maximum possible alignment is `isize::MAX + 1`,
// so the subtraction cannot overflow.
unsafe { unchecked_sub(isize::MAX as usize + 1, alignment.as_usize()) }
}
core::alloc::layout::Layout::new pub const fn new<T>() -> Self {
<T as SizedTypeProperties>::LAYOUT
}
core::alloc::layout::Layout::size pub const fn size(&self) -> usize {
self.size
}
core::any::TypeId::as_u128 fn as_u128(self) -> u128 {
let mut bytes = [0; 16];
// This is a provenance-stripping memcpy.
for (i, chunk) in self.data.iter().copied().enumerate() {
let chunk = chunk.addr().to_ne_bytes();
let start = i * chunk.len();
bytes[start..(start + chunk.len())].copy_from_slice(&chunk);
}
u128::from_ne_bytes(bytes)
}
core::any::TypeId::of pub const fn of<T: ?Sized + 'static>() -> TypeId {
const { intrinsics::type_id::<T>() }
}
core::any::type_namepub const fn type_name<T: ?Sized>() -> &'static str {
const { intrinsics::type_name::<T>() }
}
core::any::type_name_of_valpub const fn type_name_of_val<T: ?Sized>(_val: &T) -> &'static str {
type_name::<T>()
}
core::array::<impl [T; N]>::as_mut_slice pub const fn as_mut_slice(&mut self) -> &mut [T] {
self
}
core::array::<impl [T; N]>::as_slice pub const fn as_slice(&self) -> &[T] {
self
}
core::array::<impl [T; N]>::map pub const fn map<F, U>(self, f: F) -> [U; N]
where
F: [const] FnMut(T) -> U + [const] Destruct,
U: [const] Destruct,
T: [const] Destruct,
{
self.try_map(NeverShortCircuit::wrap_mut_1(f)).0
}
core::array::<impl [T; N]>::try_map pub const fn try_map<R>(
self,
mut f: impl [const] FnMut(T) -> R + [const] Destruct,
) -> ChangeOutputType<R, [R::Output; N]>
where
R: [const] Try<Residual: [const] Residual<[R::Output; N]>, Output: [const] Destruct>,
T: [const] Destruct,
{
let mut me = ManuallyDrop::new(self);
// SAFETY: try_from_fn calls `f` N times.
let mut f = unsafe { drain::Drain::new(&mut me, &mut f) };
try_from_fn(&mut f)
}
core::array::<impl core::clone::Clone for [T; N]>::clone fn clone(&self) -> Self {
SpecArrayClone::clone(self)
}
core::array::<impl core::convert::AsRef<[T]> for [T; N]>::as_ref fn as_ref(&self) -> &[T] {
&self[..]
}
core::array::<impl core::convert::TryFrom<&'a [T]> for &'a [T; N]>::try_from fn try_from(slice: &'a [T]) -> Result<&'a [T; N], TryFromSliceError> {
slice.as_array().ok_or(TryFromSliceError(()))
}
core::array::<impl core::convert::TryFrom<&'a mut [T]> for &'a mut [T; N]>::try_from fn try_from(slice: &'a mut [T]) -> Result<&'a mut [T; N], TryFromSliceError> {
slice.as_mut_array().ok_or(TryFromSliceError(()))
}
core::array::<impl core::convert::TryFrom<&[T]> for [T; N]>::try_from fn try_from(slice: &[T]) -> Result<[T; N], TryFromSliceError> {
<&Self>::try_from(slice).copied()
}
core::array::<impl core::convert::TryFrom<&mut [T]> for [T; N]>::try_from fn try_from(slice: &mut [T]) -> Result<[T; N], TryFromSliceError> {
<Self>::try_from(&*slice)
}
core::array::<impl core::fmt::Debug for [T; N]>::fmt fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt::Debug::fmt(&&self[..], f)
}
core::array::<impl core::iter::traits::collect::IntoIterator for &'a [T; N]>::into_iter fn into_iter(self) -> Iter<'a, T> {
self.iter()
}
core::array::<impl core::iter::traits::collect::IntoIterator for &'a mut [T; N]>::into_iter fn into_iter(self) -> IterMut<'a, T> {
self.iter_mut()
}
core::array::<impl core::ops::index::Index<I> for [T; N]>::index fn index(&self, index: I) -> &Self::Output {
Index::index(self as &[T], index)
}
core::array::<impl core::ops::index::IndexMut<I> for [T; N]>::index_mut fn index_mut(&mut self, index: I) -> &mut Self::Output {
IndexMut::index_mut(self as &mut [T], index)
}
core::array::Guard::<'_, T>::push_unchecked pub(crate) const unsafe fn push_unchecked(&mut self, item: T) {
// SAFETY: If `initialized` was correct before and the caller does not
// invoke this method more than N times, then writes will be in-bounds
// and slots will not be initialized more than once.
unsafe {
self.array_mut.get_unchecked_mut(self.initialized).write(item);
self.initialized = self.initialized.unchecked_add(1);
}
}
core::array::ascii::<impl [u8; N]>::as_ascii pub const fn as_ascii(&self) -> Option<&[ascii::Char; N]> {
if self.is_ascii() {
// SAFETY: Just checked that it's ASCII
Some(unsafe { self.as_ascii_unchecked() })
} else {
None
}
}
core::array::ascii::<impl [u8; N]>::as_ascii_unchecked pub const unsafe fn as_ascii_unchecked(&self) -> &[ascii::Char; N] {
let byte_ptr: *const [u8; N] = self;
let ascii_ptr = byte_ptr as *const [ascii::Char; N];
// SAFETY: The caller promised all the bytes are ASCII
unsafe { &*ascii_ptr }
}
core::array::drain::Drain::<'l, 'f, T, N, F>::new pub(super) const unsafe fn new(array: &'l mut ManuallyDrop<[T; N]>, f: &'f mut F) -> Self {
// dont drop the array, transfers "ownership" to Self
let ptr: NonNull<T> = NonNull::from_mut(array).cast();
// SAFETY:
// Adding `slice.len()` to the starting pointer gives a pointer
// at the end of `slice`. `end` will never be dereferenced, only checked
// for direct pointer equality with `ptr` to check if the drainer is done.
unsafe {
let end = if T::IS_ZST { null_mut() } else { ptr.as_ptr().add(N) };
Self { ptr, end, f, l: PhantomData }
}
}
core::array::equality::<impl core::cmp::PartialEq<&[U]> for [T; N]>::eq fn eq(&self, other: &&[U]) -> bool {
*self == **other
}
core::array::equality::<impl core::cmp::PartialEq<&[U]> for [T; N]>::ne fn ne(&self, other: &&[U]) -> bool {
*self != **other
}
core::array::equality::<impl core::cmp::PartialEq<&mut [U]> for [T; N]>::eq fn eq(&self, other: &&mut [U]) -> bool {
*self == **other
}
core::array::equality::<impl core::cmp::PartialEq<&mut [U]> for [T; N]>::ne fn ne(&self, other: &&mut [U]) -> bool {
*self != **other
}
core::array::equality::<impl core::cmp::PartialEq<[U; N]> for &[T]>::eq fn eq(&self, other: &[U; N]) -> bool {
**self == *other
}
core::array::equality::<impl core::cmp::PartialEq<[U; N]> for &[T]>::ne fn ne(&self, other: &[U; N]) -> bool {
**self != *other
}
core::array::equality::<impl core::cmp::PartialEq<[U; N]> for &mut [T]>::eq fn eq(&self, other: &[U; N]) -> bool {
**self == *other
}
core::array::equality::<impl core::cmp::PartialEq<[U; N]> for &mut [T]>::ne fn ne(&self, other: &[U; N]) -> bool {
**self != *other
}
core::array::equality::<impl core::cmp::PartialEq<[U; N]> for [T; N]>::eq fn eq(&self, other: &[U; N]) -> bool {
SpecArrayEq::spec_eq(self, other)
}
core::array::equality::<impl core::cmp::PartialEq<[U; N]> for [T; N]>::ne fn ne(&self, other: &[U; N]) -> bool {
SpecArrayEq::spec_ne(self, other)
}
core::array::equality::<impl core::cmp::PartialEq<[U; N]> for [T]>::eq fn eq(&self, other: &[U; N]) -> bool {
match self.as_array::<N>() {
Some(b) => *b == *other,
None => false,
}
}
core::array::equality::<impl core::cmp::PartialEq<[U; N]> for [T]>::ne fn ne(&self, other: &[U; N]) -> bool {
match self.as_array::<N>() {
Some(b) => *b != *other,
None => true,
}
}
core::array::equality::<impl core::cmp::PartialEq<[U]> for [T; N]>::eq fn eq(&self, other: &[U]) -> bool {
match other.as_array::<N>() {
Some(b) => *self == *b,
None => false,
}
}
core::array::equality::<impl core::cmp::PartialEq<[U]> for [T; N]>::ne fn ne(&self, other: &[U]) -> bool {
match other.as_array::<N>() {
Some(b) => *self != *b,
None => true,
}
}
core::array::from_fnpub const fn from_fn<T: [const] Destruct, const N: usize, F>(f: F) -> [T; N]
where
F: [const] FnMut(usize) -> T + [const] Destruct,
{
try_from_fn(NeverShortCircuit::wrap_mut_1(f)).0
}
core::array::from_mutpub const fn from_mut<T>(s: &mut T) -> &mut [T; 1] {
// SAFETY: Converting `&mut T` to `&mut [T; 1]` is sound.
unsafe { &mut *(s as *mut T).cast::<[T; 1]>() }
}
core::array::from_refpub const fn from_ref<T>(s: &T) -> &[T; 1] {
// SAFETY: Converting `&T` to `&[T; 1]` is sound.
unsafe { &*(s as *const T).cast::<[T; 1]>() }
}
core::array::from_trusted_iteratorfn from_trusted_iterator<T, const N: usize>(iter: impl UncheckedIterator<Item = T>) -> [T; N] {
try_from_trusted_iterator(iter.map(NeverShortCircuit)).0
}
core::array::iter::<impl core::iter::traits::collect::IntoIterator for [T; N]>::into_iter fn into_iter(self) -> Self::IntoIter {
// SAFETY: The transmute here is actually safe. The docs of `MaybeUninit`
// promise:
//
// > `MaybeUninit<T>` is guaranteed to have the same size and alignment
// > as `T`.
//
// The docs even show a transmute from an array of `MaybeUninit<T>` to
// an array of `T`.
//
// With that, this initialization satisfies the invariants.
//
// FIXME: If normal `transmute` ever gets smart enough to allow this
// directly, use it instead of `transmute_unchecked`.
let data: [MaybeUninit<T>; N] = unsafe { transmute_unchecked(self) };
// SAFETY: The original array was entirely initialized and the alive
// range we're passing here represents that fact.
let inner = unsafe { InnerSized::new_unchecked(IndexRange::zero_to(N), data) };
IntoIter { inner: ManuallyDrop::new(inner) }
}
core::array::iter::IntoIter::<T, N>::unsize fn unsize(&self) -> &InnerUnsized<T> {
self.inner.deref()
}
core::array::iter::IntoIter::<T, N>::unsize_mut fn unsize_mut(&mut self) -> &mut InnerUnsized<T> {
self.inner.deref_mut()
}
core::array::iter::iter_inner::PolymorphicIter::<DATA>::len pub(super) const fn len(&self) -> usize {
self.alive.len()
}
core::array::iter::iter_inner::PolymorphicIter::<[core::mem::maybe_uninit::MaybeUninit<T>; N]>::empty pub(super) const fn empty() -> Self {
Self { alive: IndexRange::zero_to(0), data: [const { MaybeUninit::uninit() }; N] }
}
core::array::iter::iter_inner::PolymorphicIter::<[core::mem::maybe_uninit::MaybeUninit<T>; N]>::new_unchecked pub(super) const unsafe fn new_unchecked(alive: IndexRange, data: [MaybeUninit<T>; N]) -> Self {
Self { alive, data }
}
core::array::iter::iter_inner::PolymorphicIter::<[core::mem::maybe_uninit::MaybeUninit<T>]>::advance_back_by pub(super) fn advance_back_by(&mut self, n: usize) -> Result<(), NonZero<usize>> {
// This also moves the end, which marks them as conceptually "dropped",
// so if anything goes bad then our drop impl won't double-free them.
let range_to_drop = self.alive.take_suffix(n);
let remaining = n - range_to_drop.len();
// SAFETY: These elements are currently initialized, so it's fine to drop them.
unsafe {
let slice = self.data.get_unchecked_mut(range_to_drop);
slice.assume_init_drop();
}
NonZero::new(remaining).map_or(Ok(()), Err)
}
core::array::iter::iter_inner::PolymorphicIter::<[core::mem::maybe_uninit::MaybeUninit<T>]>::advance_by pub(super) fn advance_by(&mut self, n: usize) -> Result<(), NonZero<usize>> {
// This also moves the start, which marks them as conceptually "dropped",
// so if anything goes bad then our drop impl won't double-free them.
let range_to_drop = self.alive.take_prefix(n);
let remaining = n - range_to_drop.len();
// SAFETY: These elements are currently initialized, so it's fine to drop them.
unsafe {
let slice = self.data.get_unchecked_mut(range_to_drop);
slice.assume_init_drop();
}
NonZero::new(remaining).map_or(Ok(()), Err)
}
core::array::iter::iter_inner::PolymorphicIter::<[core::mem::maybe_uninit::MaybeUninit<T>]>::as_slice pub(super) fn as_slice(&self) -> &[T] {
// SAFETY: We know that all elements within `alive` are properly initialized.
unsafe {
let slice = self.data.get_unchecked(self.alive.clone());
slice.assume_init_ref()
}
}
core::array::iter::iter_inner::PolymorphicIter::<[core::mem::maybe_uninit::MaybeUninit<T>]>::fold pub(super) fn fold<B>(&mut self, init: B, f: impl FnMut(B, T) -> B) -> B {
self.try_fold(init, NeverShortCircuit::wrap_mut_2(f)).0
}
core::array::iter::iter_inner::PolymorphicIter::<[core::mem::maybe_uninit::MaybeUninit<T>]>::next pub(super) fn next(&mut self) -> Option<T> {
// Get the next index from the front.
//
// Increasing `alive.start` by 1 maintains the invariant regarding
// `alive`. However, due to this change, for a short time, the alive
// zone is not `data[alive]` anymore, but `data[idx..alive.end]`.
self.alive.next().map(|idx| {
// Read the element from the array.
// SAFETY: `idx` is an index into the former "alive" region of the
// array. Reading this element means that `data[idx]` is regarded as
// dead now (i.e. do not touch). As `idx` was the start of the
// alive-zone, the alive zone is now `data[alive]` again, restoring
// all invariants.
unsafe { self.data.get_unchecked(idx).assume_init_read() }
})
}
core::array::iter::iter_inner::PolymorphicIter::<[core::mem::maybe_uninit::MaybeUninit<T>]>::next_back pub(super) fn next_back(&mut self) -> Option<T> {
// Get the next index from the back.
//
// Decreasing `alive.end` by 1 maintains the invariant regarding
// `alive`. However, due to this change, for a short time, the alive
// zone is not `data[alive]` anymore, but `data[alive.start..=idx]`.
self.alive.next_back().map(|idx| {
// Read the element from the array.
// SAFETY: `idx` is an index into the former "alive" region of the
// array. Reading this element means that `data[idx]` is regarded as
// dead now (i.e. do not touch). As `idx` was the end of the
// alive-zone, the alive zone is now `data[alive]` again, restoring
// all invariants.
unsafe { self.data.get_unchecked(idx).assume_init_read() }
})
}
core::array::iter::iter_inner::PolymorphicIter::<[core::mem::maybe_uninit::MaybeUninit<T>]>::rfold pub(super) fn rfold<B>(&mut self, init: B, f: impl FnMut(B, T) -> B) -> B {
self.try_rfold(init, NeverShortCircuit::wrap_mut_2(f)).0
}
core::array::iter::iter_inner::PolymorphicIter::<[core::mem::maybe_uninit::MaybeUninit<T>]>::size_hint pub(super) fn size_hint(&self) -> (usize, Option<usize>) {
let len = self.len();
(len, Some(len))
}
core::array::iter::iter_inner::PolymorphicIter::<[core::mem::maybe_uninit::MaybeUninit<T>]>::try_fold pub(super) fn try_fold<B, F, R>(&mut self, init: B, mut f: F) -> R
where
F: FnMut(B, T) -> R,
R: Try<Output = B>,
{
// `alive` is an `IndexRange`, not an arbitrary iterator, so we can
// trust that its `try_fold` isn't going to do something weird like
// call the fold-er multiple times for the same index.
let data = &mut self.data;
self.alive.try_fold(init, move |accum, idx| {
// SAFETY: `idx` has been removed from the alive range, so we're not
// going to drop it (even if `f` panics) and thus its ok to give
// out ownership of that item to `f` to handle.
let elem = unsafe { data.get_unchecked(idx).assume_init_read() };
f(accum, elem)
})
}
core::array::iter::iter_inner::PolymorphicIter::<[core::mem::maybe_uninit::MaybeUninit<T>]>::try_rfold pub(super) fn try_rfold<B, F, R>(&mut self, init: B, mut f: F) -> R
where
F: FnMut(B, T) -> R,
R: Try<Output = B>,
{
// `alive` is an `IndexRange`, not an arbitrary iterator, so we can
// trust that its `try_rfold` isn't going to do something weird like
// call the fold-er multiple times for the same index.
let data = &mut self.data;
self.alive.try_rfold(init, move |accum, idx| {
// SAFETY: `idx` has been removed from the alive range, so we're not
// going to drop it (even if `f` panics) and thus its ok to give
// out ownership of that item to `f` to handle.
let elem = unsafe { data.get_unchecked(idx).assume_init_read() };
f(accum, elem)
})
}
core::array::try_from_fnpub const fn try_from_fn<R, const N: usize, F>(cb: F) -> ChangeOutputType<R, [R::Output; N]>
where
R: [const] Try<Residual: [const] Residual<[R::Output; N]>, Output: [const] Destruct>,
F: [const] FnMut(usize) -> R + [const] Destruct,
{
let mut array = [const { MaybeUninit::uninit() }; N];
match try_from_fn_erased(&mut array, cb) {
ControlFlow::Break(r) => FromResidual::from_residual(r),
ControlFlow::Continue(()) => {
// SAFETY: All elements of the array were populated.
try { unsafe { MaybeUninit::array_assume_init(array) } }
}
}
}
core::array::try_from_fn_erasedconst fn try_from_fn_erased<R: [const] Try<Output: [const] Destruct>>(
buffer: &mut [MaybeUninit<R::Output>],
mut generator: impl [const] FnMut(usize) -> R + [const] Destruct,
) -> ControlFlow<R::Residual> {
let mut guard = Guard { array_mut: buffer, initialized: 0 };
while guard.initialized < guard.array_mut.len() {
let item = generator(guard.initialized).branch()?;
// SAFETY: The loop condition ensures we have space to push the item
unsafe { guard.push_unchecked(item) };
}
mem::forget(guard);
ControlFlow::Continue(())
}
core::array::try_from_trusted_iteratorfn try_from_trusted_iterator<T, R, const N: usize>(
iter: impl UncheckedIterator<Item = R>,
) -> ChangeOutputType<R, [T; N]>
where
R: Try<Output = T>,
R::Residual: Residual<[T; N]>,
{
assert!(iter.size_hint().0 >= N);
fn next<T>(mut iter: impl UncheckedIterator<Item = T>) -> impl FnMut(usize) -> T {
move |_| {
// SAFETY: We know that `from_fn` will call this at most N times,
// and we checked to ensure that we have at least that many items.
unsafe { iter.next_unchecked() }
}
}
try_from_fn(next(iter))
}
core::array::try_from_trusted_iterator::next fn next<T>(mut iter: impl UncheckedIterator<Item = T>) -> impl FnMut(usize) -> T {
move |_| {
// SAFETY: We know that `from_fn` will call this at most N times,
// and we checked to ensure that we have at least that many items.
unsafe { iter.next_unchecked() }
}
}
core::ascii::EscapeDefault::empty pub(crate) fn empty() -> Self {
Self(EscapeIterInner::empty())
}
core::ascii::EscapeDefault::new pub(crate) const fn new(c: u8) -> Self {
Self(EscapeIterInner::ascii(c))
}
core::ascii::ascii_char::AsciiChar::as_str pub const fn as_str(&self) -> &str {
crate::slice::from_ref(self).as_str()
}
core::ascii::ascii_char::AsciiChar::from_u8_unchecked pub const unsafe fn from_u8_unchecked(b: u8) -> Self {
// SAFETY: Our safety precondition is that `b` is in-range.
unsafe { transmute(b) }
}
core::ascii::ascii_char::AsciiChar::to_u8 pub const fn to_u8(self) -> u8 {
self as u8
}
core::ascii::escape_defaultpub fn escape_default(c: u8) -> EscapeDefault {
EscapeDefault::new(c)
}
core::bool::<impl bool>::ok_or pub const fn ok_or<E: [const] Destruct>(self, err: E) -> Result<(), E> {
if self { Ok(()) } else { Err(err) }
}
core::bool::<impl bool>::ok_or_else pub const fn ok_or_else<E, F: [const] FnOnce() -> E + [const] Destruct>(
self,
f: F,
) -> Result<(), E> {
if self { Ok(()) } else { Err(f()) }
}
core::bool::<impl bool>::then pub const fn then<T, F: [const] FnOnce() -> T + [const] Destruct>(self, f: F) -> Option<T> {
if self { Some(f()) } else { None }
}
core::bool::<impl bool>::then_some pub const fn then_some<T: [const] Destruct>(self, t: T) -> Option<T> {
if self { Some(t) } else { None }
}
core::bstr::ByteStr::from_bytes pub const fn from_bytes(slice: &[u8]) -> &Self {
// SAFETY: `ByteStr` is a transparent wrapper around `[u8]`, so we can turn a reference to
// the wrapped type into a reference to the wrapper type.
unsafe { &*(slice as *const [u8] as *const Self) }
}
core::cell::BorrowRef::<'b>::new const fn new(borrow: &'b Cell<BorrowCounter>) -> Option<BorrowRef<'b>> {
let b = borrow.get().wrapping_add(1);
if !is_reading(b) {
// Incrementing borrow can result in a non-reading value (<= 0) in these cases:
// 1. It was < 0, i.e. there are writing borrows, so we can't allow a read borrow
// due to Rust's reference aliasing rules
// 2. It was isize::MAX (the max amount of reading borrows) and it overflowed
// into isize::MIN (the max amount of writing borrows) so we can't allow
// an additional read borrow because isize can't represent so many read borrows
// (this can only happen if you mem::forget more than a small constant amount of
// `Ref`s, which is not good practice)
None
} else {
// Incrementing borrow can result in a reading value (> 0) in these cases:
// 1. It was = 0, i.e. it wasn't borrowed, and we are taking the first read borrow
// 2. It was > 0 and < isize::MAX, i.e. there were read borrows, and isize
// is large enough to represent having one more read borrow
borrow.replace(b);
Some(BorrowRef { borrow })
}
}
core::cell::BorrowRefMut::<'b>::new const fn new(borrow: &'b Cell<BorrowCounter>) -> Option<BorrowRefMut<'b>> {
// NOTE: Unlike BorrowRefMut::clone, new is called to create the initial
// mutable reference, and so there must currently be no existing
// references. Thus, while clone increments the mutable refcount, here
// we explicitly only allow going from UNUSED to UNUSED - 1.
match borrow.get() {
UNUSED => {
borrow.replace(UNUSED - 1);
Some(BorrowRefMut { borrow })
}
_ => None,
}
}
core::cell::Cell::<T>::get pub const fn get(&self) -> T {
// SAFETY: This can cause data races if called from a separate thread,
// but `Cell` is `!Sync` so this won't happen.
unsafe { *self.value.get() }
}
core::cell::Cell::<T>::new pub const fn new(value: T) -> Cell<T> {
Cell { value: UnsafeCell::new(value) }
}
core::cell::Cell::<T>::replace pub const fn replace(&self, val: T) -> T {
// SAFETY: This can cause data races if called from a separate thread,
// but `Cell` is `!Sync` so this won't happen.
mem::replace(unsafe { &mut *self.value.get() }, val)
}
core::cell::Cell::<T>::set pub const fn set(&self, val: T)
where
T: [const] Destruct,
{
self.replace(val);
}
core::cell::RefCell::<T>::borrow pub const fn borrow(&self) -> Ref<'_, T> {
match self.try_borrow() {
Ok(b) => b,
Err(err) => panic_already_mutably_borrowed(err),
}
}
core::cell::RefCell::<T>::borrow_mut pub const fn borrow_mut(&self) -> RefMut<'_, T> {
match self.try_borrow_mut() {
Ok(b) => b,
Err(err) => panic_already_borrowed(err),
}
}
core::cell::RefCell::<T>::new pub const fn new(value: T) -> RefCell<T> {
RefCell {
value: UnsafeCell::new(value),
borrow: Cell::new(UNUSED),
#[cfg(feature = "debug_refcell")]
borrowed_at: Cell::new(None),
}
}
core::cell::RefCell::<T>::replace pub const fn replace(&self, t: T) -> T {
mem::replace(&mut self.borrow_mut(), t)
}
core::cell::RefCell::<T>::replace_with pub fn replace_with<F: FnOnce(&mut T) -> T>(&self, f: F) -> T {
let mut_borrow = &mut *self.borrow_mut();
let replacement = f(mut_borrow);
mem::replace(mut_borrow, replacement)
}
core::cell::RefCell::<T>::take pub fn take(&self) -> T {
self.replace(Default::default())
}
core::cell::RefCell::<T>::try_borrow pub const fn try_borrow(&self) -> Result<Ref<'_, T>, BorrowError> {
match BorrowRef::new(&self.borrow) {
Some(b) => {
#[cfg(feature = "debug_refcell")]
{
// `borrowed_at` is always the *first* active borrow
if b.borrow.get() == 1 {
self.borrowed_at.replace(Some(crate::panic::Location::caller()));
}
}
// SAFETY: `BorrowRef` ensures that there is only immutable access
// to the value while borrowed.
let value = unsafe { NonNull::new_unchecked(self.value.get()) };
Ok(Ref { value, borrow: b })
}
None => Err(BorrowError {
// If a borrow occurred, then we must already have an outstanding borrow,
// so `borrowed_at` will be `Some`
#[cfg(feature = "debug_refcell")]
location: self.borrowed_at.get().unwrap(),
}),
}
}
core::cell::RefCell::<T>::try_borrow_mut pub const fn try_borrow_mut(&self) -> Result<RefMut<'_, T>, BorrowMutError> {
match BorrowRefMut::new(&self.borrow) {
Some(b) => {
#[cfg(feature = "debug_refcell")]
{
self.borrowed_at.replace(Some(crate::panic::Location::caller()));
}
// SAFETY: `BorrowRefMut` guarantees unique access.
let value = unsafe { NonNull::new_unchecked(self.value.get()) };
Ok(RefMut { value, borrow: b, marker: PhantomData })
}
None => Err(BorrowMutError {
// If a borrow occurred, then we must already have an outstanding borrow,
// so `borrowed_at` will be `Some`
#[cfg(feature = "debug_refcell")]
location: self.borrowed_at.get().unwrap(),
}),
}
}
core::cell::UnsafeCell::<T>::get pub const fn get(&self) -> *mut T {
// We can just cast the pointer from `UnsafeCell<T>` to `T` because of
// #[repr(transparent)]. This exploits std's special status, there is
// no guarantee for user code that this will work in future versions of the compiler!
self as *const UnsafeCell<T> as *const T as *mut T
}
core::cell::UnsafeCell::<T>::get_mut pub const fn get_mut(&mut self) -> &mut T {
&mut self.value
}
core::cell::UnsafeCell::<T>::into_inner pub const fn into_inner(self) -> T {
self.value
}
core::cell::UnsafeCell::<T>::new pub const fn new(value: T) -> UnsafeCell<T> {
UnsafeCell { value }
}
core::cell::UnsafeCell::<T>::raw_get pub const fn raw_get(this: *const Self) -> *mut T {
// We can just cast the pointer from `UnsafeCell<T>` to `T` because of
// #[repr(transparent)]. This exploits std's special status, there is
// no guarantee for user code that this will work in future versions of the compiler!
this as *const T as *mut T
}
core::cell::is_readingconst fn is_reading(x: BorrowCounter) -> bool {
x > UNUSED
}
core::cell::is_writingconst fn is_writing(x: BorrowCounter) -> bool {
x < UNUSED
}
core::cell::panic_already_borrowedconst fn panic_already_borrowed(err: BorrowMutError) -> ! {
const_panic!(
"RefCell already borrowed",
"{err}",
err: BorrowMutError = err,
)
}
core::cell::panic_already_borrowed::do_panic const fn do_panic($($arg: $ty),*) -> ! {
$crate::intrinsics::const_eval_select!(
@capture { $($arg: $ty = $arg),* } -> !:
if const #[track_caller] {
$crate::panic!($const_msg)
} else #[track_caller] {
$crate::panic!($runtime_msg)
}
)
}
core::cell::panic_already_borrowed::do_panic::runtime fn runtime$(<$($binders)*>)?($($arg: $ty),*) $( -> $ret )? {
$runtime
}
core::cell::panic_already_mutably_borrowedconst fn panic_already_mutably_borrowed(err: BorrowError) -> ! {
const_panic!(
"RefCell already mutably borrowed",
"{err}",
err: BorrowError = err,
)
}
core::cell::panic_already_mutably_borrowed::do_panic const fn do_panic($($arg: $ty),*) -> ! {
$crate::intrinsics::const_eval_select!(
@capture { $($arg: $ty = $arg),* } -> !:
if const #[track_caller] {
$crate::panic!($const_msg)
} else #[track_caller] {
$crate::panic!($runtime_msg)
}
)
}
core::cell::panic_already_mutably_borrowed::do_panic::runtime fn runtime$(<$($binders)*>)?($($arg: $ty),*) $( -> $ret )? {
$runtime
}
core::char::EscapeDebug::backslash const fn backslash(c: ascii::Char) -> Self {
Self(EscapeIterInner::backslash(c))
}
core::char::EscapeDebug::printable const fn printable(chr: char) -> Self {
Self(EscapeIterInner::printable(chr))
}
core::char::EscapeDebug::unicode const fn unicode(c: char) -> Self {
Self(EscapeIterInner::unicode(c))
}
core::char::convert::<impl core::convert::From<u8> for char>::from fn from(i: u8) -> Self {
i as char
}
core::char::convert::char_try_from_u32const fn char_try_from_u32(i: u32) -> Result<char, CharTryFromError> {
// This is an optimized version of the check
// (i > MAX as u32) || (i >= 0xD800 && i <= 0xDFFF),
// which can also be written as
// i >= 0x110000 || (i >= 0xD800 && i < 0xE000).
//
// The XOR with 0xD800 permutes the ranges such that 0xD800..0xE000 is
// mapped to 0x0000..0x0800, while keeping all the high bits outside 0xFFFF the same.
// In particular, numbers >= 0x110000 stay in this range.
//
// Subtracting 0x800 causes 0x0000..0x0800 to wrap, meaning that a single
// unsigned comparison against 0x110000 - 0x800 will detect both the wrapped
// surrogate range as well as the numbers originally larger than 0x110000.
if (i ^ 0xD800).wrapping_sub(0x800) >= 0x110000 - 0x800 {
Err(CharTryFromError(()))
} else {
// SAFETY: checked that it's a legal unicode value
Ok(unsafe { transmute(i) })
}
}
core::char::convert::from_u32_uncheckedpub(super) const unsafe fn from_u32_unchecked(i: u32) -> char {
// SAFETY: the caller must guarantee that `i` is a valid char value.
unsafe {
assert_unsafe_precondition!(
check_language_ub,
"invalid value for `char`",
(i: u32 = i) => char_try_from_u32(i).is_ok()
);
transmute(i)
}
}
core::char::decode::DecodeUtf16Error::unpaired_surrogate pub fn unpaired_surrogate(&self) -> u16 {
self.code
}
core::char::decode::decode_utf16pub(super) fn decode_utf16<I: IntoIterator<Item = u16>>(iter: I) -> DecodeUtf16<I::IntoIter> {
DecodeUtf16 { iter: iter.into_iter(), buf: None }
}
core::char::methods::<impl char>::decode_utf16 pub fn decode_utf16<I: IntoIterator<Item = u16>>(iter: I) -> DecodeUtf16<I::IntoIter> {
super::decode::decode_utf16(iter)
}
core::char::methods::<impl char>::encode_utf8 pub const fn encode_utf8(self, dst: &mut [u8]) -> &mut str {
// SAFETY: `char` is not a surrogate, so this is valid UTF-8.
unsafe { from_utf8_unchecked_mut(encode_utf8_raw(self as u32, dst)) }
}
core::char::methods::<impl char>::escape_debug pub fn escape_debug(self) -> EscapeDebug {
self.escape_debug_ext(EscapeDebugExtArgs::ESCAPE_ALL)
}
core::char::methods::<impl char>::escape_debug_ext pub(crate) fn escape_debug_ext(self, args: EscapeDebugExtArgs) -> EscapeDebug {
match self {
'\0' => EscapeDebug::backslash(ascii::Char::Digit0),
'\t' => EscapeDebug::backslash(ascii::Char::SmallT),
'\r' => EscapeDebug::backslash(ascii::Char::SmallR),
'\n' => EscapeDebug::backslash(ascii::Char::SmallN),
'\\' => EscapeDebug::backslash(ascii::Char::ReverseSolidus),
'\"' if args.escape_double_quote => EscapeDebug::backslash(ascii::Char::QuotationMark),
'\'' if args.escape_single_quote => EscapeDebug::backslash(ascii::Char::Apostrophe),
_ if args.escape_grapheme_extended && self.is_grapheme_extended() => {
EscapeDebug::unicode(self)
}
_ if is_printable(self) => EscapeDebug::printable(self),
_ => EscapeDebug::unicode(self),
}
}
core::char::methods::<impl char>::from_u32_unchecked pub const unsafe fn from_u32_unchecked(i: u32) -> char {
// SAFETY: the safety contract must be upheld by the caller.
unsafe { super::convert::from_u32_unchecked(i) }
}
core::char::methods::<impl char>::is_ascii pub const fn is_ascii(&self) -> bool {
*self as u32 <= 0x7F
}
core::char::methods::<impl char>::is_grapheme_extended pub(crate) fn is_grapheme_extended(self) -> bool {
!self.is_ascii() && unicode::Grapheme_Extend(self)
}
core::char::methods::<impl char>::len_utf8 pub const fn len_utf8(self) -> usize {
len_utf8(self as u32)
}
core::char::methods::<impl char>::to_digit pub const fn to_digit(self, radix: u32) -> Option<u32> {
assert!(
radix >= 2 && radix <= 36,
"to_digit: invalid radix -- radix must be in the range 2 to 36 inclusive"
);
// check radix to remove letter handling code when radix is a known constant
let value = if self > '9' && radix > 10 {
// mask to convert ASCII letters to uppercase
const TO_UPPERCASE_MASK: u32 = !0b0010_0000;
// Converts an ASCII letter to its corresponding integer value:
// A-Z => 10-35, a-z => 10-35. Other characters produce values >= 36.
//
// Add Overflow Safety:
// By applying the mask after the subtraction, the first addendum is
// constrained such that it never exceeds u32::MAX - 0x20.
((self as u32).wrapping_sub('A' as u32) & TO_UPPERCASE_MASK) + 10
} else {
// convert digit to value, non-digits wrap to values > 36
(self as u32).wrapping_sub('0' as u32)
};
// FIXME(const-hack): once then_some is const fn, use it here
if value < radix { Some(value) } else { None }
}
core::char::methods::encode_utf8_rawpub const fn encode_utf8_raw(code: u32, dst: &mut [u8]) -> &mut [u8] {
let len = len_utf8(code);
if dst.len() < len {
const_panic!(
"encode_utf8: buffer does not have enough bytes to encode code point",
"encode_utf8: need {len} bytes to encode U+{code:04X} but buffer has just {dst_len}",
code: u32 = code,
len: usize = len,
dst_len: usize = dst.len(),
);
}
// SAFETY: `dst` is checked to be at least the length needed to encode the codepoint.
unsafe { encode_utf8_raw_unchecked(code, dst.as_mut_ptr()) };
// SAFETY: `<&mut [u8]>::as_mut_ptr` is guaranteed to return a valid pointer and `len` has been tested to be within bounds.
unsafe { slice::from_raw_parts_mut(dst.as_mut_ptr(), len) }
}
core::char::methods::encode_utf8_raw::do_panic const fn do_panic($($arg: $ty),*) -> ! {
$crate::intrinsics::const_eval_select!(
@capture { $($arg: $ty = $arg),* } -> !:
if const #[track_caller] {
$crate::panic!($const_msg)
} else #[track_caller] {
$crate::panic!($runtime_msg)
}
)
}
core::char::methods::encode_utf8_raw::do_panic::runtime fn runtime$(<$($binders)*>)?($($arg: $ty),*) $( -> $ret )? {
$runtime
}
core::char::methods::encode_utf8_raw_uncheckedpub const unsafe fn encode_utf8_raw_unchecked(code: u32, dst: *mut u8) {
let len = len_utf8(code);
// SAFETY: The caller must guarantee that the buffer pointed to by `dst`
// is at least `len` bytes long.
unsafe {
if len == 1 {
*dst = code as u8;
return;
}
let last1 = (code >> 0 & 0x3F) as u8 | TAG_CONT;
let last2 = (code >> 6 & 0x3F) as u8 | TAG_CONT;
let last3 = (code >> 12 & 0x3F) as u8 | TAG_CONT;
let last4 = (code >> 18 & 0x3F) as u8 | TAG_FOUR_B;
if len == 2 {
*dst = last2 | TAG_TWO_B;
*dst.add(1) = last1;
return;
}
if len == 3 {
*dst = last3 | TAG_THREE_B;
*dst.add(1) = last2;
*dst.add(2) = last1;
return;
}
*dst = last4;
*dst.add(1) = last3;
*dst.add(2) = last2;
*dst.add(3) = last1;
}
}
core::char::methods::len_utf8const fn len_utf8(code: u32) -> usize {
match code {
..MAX_ONE_B => 1,
..MAX_TWO_B => 2,
..MAX_THREE_B => 3,
_ => 4,
}
}
core::clone::Clone::clone_from fn clone_from(&mut self, source: &Self)
where
Self: [const] Destruct,
{
*self = source.clone()
}
core::clone::impls::<impl core::clone::Clone for &T>::clone fn clone(&self) -> Self {
self
}
core::clone::impls::<impl core::clone::Clone for bool>::clone fn clone(&self) -> Self {
*self
}
core::clone::impls::<impl core::clone::Clone for char>::clone fn clone(&self) -> Self {
*self
}
core::clone::impls::<impl core::clone::Clone for f128>::clone fn clone(&self) -> Self {
*self
}
core::clone::impls::<impl core::clone::Clone for f16>::clone fn clone(&self) -> Self {
*self
}
core::clone::impls::<impl core::clone::Clone for f32>::clone fn clone(&self) -> Self {
*self
}
core::clone::impls::<impl core::clone::Clone for f64>::clone fn clone(&self) -> Self {
*self
}
core::clone::impls::<impl core::clone::Clone for i128>::clone fn clone(&self) -> Self {
*self
}
core::clone::impls::<impl core::clone::Clone for i16>::clone fn clone(&self) -> Self {
*self
}
core::clone::impls::<impl core::clone::Clone for i32>::clone fn clone(&self) -> Self {
*self
}
core::clone::impls::<impl core::clone::Clone for i64>::clone fn clone(&self) -> Self {
*self
}
core::clone::impls::<impl core::clone::Clone for i8>::clone fn clone(&self) -> Self {
*self
}
core::clone::impls::<impl core::clone::Clone for isize>::clone fn clone(&self) -> Self {
*self
}
core::clone::impls::<impl core::clone::Clone for u128>::clone fn clone(&self) -> Self {
*self
}
core::clone::impls::<impl core::clone::Clone for u16>::clone fn clone(&self) -> Self {
*self
}
core::clone::impls::<impl core::clone::Clone for u32>::clone fn clone(&self) -> Self {
*self
}
core::clone::impls::<impl core::clone::Clone for u64>::clone fn clone(&self) -> Self {
*self
}
core::clone::impls::<impl core::clone::Clone for u8>::clone fn clone(&self) -> Self {
*self
}
core::clone::impls::<impl core::clone::Clone for usize>::clone fn clone(&self) -> Self {
*self
}
core::cmp::Ord::clamp fn clamp(self, min: Self, max: Self) -> Self
where
Self: Sized + [const] Destruct,
{
assert!(min <= max);
if self < min {
min
} else if self > max {
max
} else {
self
}
}
core::cmp::Ord::max fn max(self, other: Self) -> Self
where
Self: Sized + [const] Destruct,
{
if other < self { self } else { other }
}
core::cmp::Ord::min fn min(self, other: Self) -> Self
where
Self: Sized + [const] Destruct,
{
if other < self { other } else { self }
}
core::cmp::Ordering::as_raw const fn as_raw(self) -> i8 {
// FIXME(const-hack): just use `PartialOrd` against `Equal` once that's const
crate::intrinsics::discriminant_value(&self)
}
core::cmp::Ordering::is_eq pub const fn is_eq(self) -> bool {
// All the `is_*` methods are implemented as comparisons against zero
// to follow how clang's libcxx implements their equivalents in
// <https://github.com/llvm/llvm-project/blob/60486292b79885b7800b082754153202bef5b1f0/libcxx/include/__compare/is_eq.h#L23-L28>
self.as_raw() == 0
}
core::cmp::Ordering::is_ge pub const fn is_ge(self) -> bool {
self.as_raw() >= 0
}
core::cmp::Ordering::is_gt pub const fn is_gt(self) -> bool {
self.as_raw() > 0
}
core::cmp::Ordering::is_le pub const fn is_le(self) -> bool {
self.as_raw() <= 0
}
core::cmp::Ordering::is_lt pub const fn is_lt(self) -> bool {
self.as_raw() < 0
}
core::cmp::Ordering::is_ne pub const fn is_ne(self) -> bool {
self.as_raw() != 0
}
core::cmp::Ordering::reverse pub const fn reverse(self) -> Ordering {
match self {
Less => Greater,
Equal => Equal,
Greater => Less,
}
}
core::cmp::Ordering::then pub const fn then(self, other: Ordering) -> Ordering {
match self {
Equal => other,
_ => self,
}
}
core::cmp::PartialEq::ne fn ne(&self, other: &Rhs) -> bool {
!self.eq(other)
}
core::cmp::PartialOrd::__chaining_ge fn __chaining_ge(&self, other: &Rhs) -> ControlFlow<bool> {
default_chaining_impl(self, other, Ordering::is_ge)
}
core::cmp::PartialOrd::__chaining_gt fn __chaining_gt(&self, other: &Rhs) -> ControlFlow<bool> {
default_chaining_impl(self, other, Ordering::is_gt)
}
core::cmp::PartialOrd::__chaining_le fn __chaining_le(&self, other: &Rhs) -> ControlFlow<bool> {
default_chaining_impl(self, other, Ordering::is_le)
}
core::cmp::PartialOrd::__chaining_lt fn __chaining_lt(&self, other: &Rhs) -> ControlFlow<bool> {
default_chaining_impl(self, other, Ordering::is_lt)
}
core::cmp::PartialOrd::ge fn ge(&self, other: &Rhs) -> bool {
self.partial_cmp(other).is_some_and(Ordering::is_ge)
}
core::cmp::PartialOrd::gt fn gt(&self, other: &Rhs) -> bool {
self.partial_cmp(other).is_some_and(Ordering::is_gt)
}
core::cmp::PartialOrd::le fn le(&self, other: &Rhs) -> bool {
self.partial_cmp(other).is_some_and(Ordering::is_le)
}
core::cmp::PartialOrd::lt fn lt(&self, other: &Rhs) -> bool {
self.partial_cmp(other).is_some_and(Ordering::is_lt)
}
core::cmp::default_chaining_implconst fn default_chaining_impl<T, U>(
lhs: &T,
rhs: &U,
p: impl [const] FnOnce(Ordering) -> bool + [const] Destruct,
) -> ControlFlow<bool>
where
T: [const] PartialOrd<U> + PointeeSized,
U: PointeeSized,
{
// It's important that this only call `partial_cmp` once, not call `eq` then
// one of the relational operators. We don't want to `bcmp`-then-`memcp` a
// `String`, for example, or similarly for other data structures (#108157).
match <T as PartialOrd<U>>::partial_cmp(lhs, rhs) {
Some(Equal) => ControlFlow::Continue(()),
Some(c) => ControlFlow::Break(p(c)),
None => ControlFlow::Break(false),
}
}
core::cmp::impls::<impl core::cmp::Ord for ()>::cmp fn cmp(&self, _other: &()) -> Ordering {
Equal
}
core::cmp::impls::<impl core::cmp::Ord for bool>::clamp fn clamp(self, min: bool, max: bool) -> bool {
assert!(min <= max);
self.max(min).min(max)
}
core::cmp::impls::<impl core::cmp::Ord for bool>::max fn max(self, other: bool) -> bool {
self | other
}
core::cmp::impls::<impl core::cmp::Ord for bool>::min fn min(self, other: bool) -> bool {
self & other
}
core::cmp::impls::<impl core::cmp::Ord for char>::cmp fn cmp(&self, other: &Self) -> Ordering {
crate::intrinsics::three_way_compare(*self, *other)
}
core::cmp::impls::<impl core::cmp::Ord for i128>::cmp fn cmp(&self, other: &Self) -> Ordering {
crate::intrinsics::three_way_compare(*self, *other)
}
core::cmp::impls::<impl core::cmp::Ord for i16>::cmp fn cmp(&self, other: &Self) -> Ordering {
crate::intrinsics::three_way_compare(*self, *other)
}
core::cmp::impls::<impl core::cmp::Ord for i32>::cmp fn cmp(&self, other: &Self) -> Ordering {
crate::intrinsics::three_way_compare(*self, *other)
}
core::cmp::impls::<impl core::cmp::Ord for i64>::cmp fn cmp(&self, other: &Self) -> Ordering {
crate::intrinsics::three_way_compare(*self, *other)
}
core::cmp::impls::<impl core::cmp::Ord for i8>::cmp fn cmp(&self, other: &Self) -> Ordering {
crate::intrinsics::three_way_compare(*self, *other)
}
core::cmp::impls::<impl core::cmp::Ord for isize>::cmp fn cmp(&self, other: &Self) -> Ordering {
crate::intrinsics::three_way_compare(*self, *other)
}
core::cmp::impls::<impl core::cmp::Ord for u128>::cmp fn cmp(&self, other: &Self) -> Ordering {
crate::intrinsics::three_way_compare(*self, *other)
}
core::cmp::impls::<impl core::cmp::Ord for u16>::cmp fn cmp(&self, other: &Self) -> Ordering {
crate::intrinsics::three_way_compare(*self, *other)
}
core::cmp::impls::<impl core::cmp::Ord for u32>::cmp fn cmp(&self, other: &Self) -> Ordering {
crate::intrinsics::three_way_compare(*self, *other)
}
core::cmp::impls::<impl core::cmp::Ord for u64>::cmp fn cmp(&self, other: &Self) -> Ordering {
crate::intrinsics::three_way_compare(*self, *other)
}
core::cmp::impls::<impl core::cmp::Ord for u8>::cmp fn cmp(&self, other: &Self) -> Ordering {
crate::intrinsics::three_way_compare(*self, *other)
}
core::cmp::impls::<impl core::cmp::Ord for usize>::cmp fn cmp(&self, other: &Self) -> Ordering {
crate::intrinsics::three_way_compare(*self, *other)
}
core::cmp::impls::<impl core::cmp::PartialEq for ()>::eq fn eq(&self, _other: &()) -> bool {
true
}
core::cmp::impls::<impl core::cmp::PartialEq for ()>::ne fn ne(&self, _other: &()) -> bool {
false
}
core::cmp::impls::<impl core::cmp::PartialEq for bool>::eq fn eq(&self, other: &Self) -> bool { *self == *other }
core::cmp::impls::<impl core::cmp::PartialEq for bool>::ne fn ne(&self, other: &Self) -> bool { *self != *other }
core::cmp::impls::<impl core::cmp::PartialEq for char>::eq fn eq(&self, other: &Self) -> bool { *self == *other }
core::cmp::impls::<impl core::cmp::PartialEq for char>::ne fn ne(&self, other: &Self) -> bool { *self != *other }
core::cmp::impls::<impl core::cmp::PartialEq for f128>::eq fn eq(&self, other: &Self) -> bool { *self == *other }
core::cmp::impls::<impl core::cmp::PartialEq for f128>::ne fn ne(&self, other: &Self) -> bool { *self != *other }
core::cmp::impls::<impl core::cmp::PartialEq for f16>::eq fn eq(&self, other: &Self) -> bool { *self == *other }
core::cmp::impls::<impl core::cmp::PartialEq for f16>::ne fn ne(&self, other: &Self) -> bool { *self != *other }
core::cmp::impls::<impl core::cmp::PartialEq for f32>::eq fn eq(&self, other: &Self) -> bool { *self == *other }
core::cmp::impls::<impl core::cmp::PartialEq for f32>::ne fn ne(&self, other: &Self) -> bool { *self != *other }
core::cmp::impls::<impl core::cmp::PartialEq for f64>::eq fn eq(&self, other: &Self) -> bool { *self == *other }
core::cmp::impls::<impl core::cmp::PartialEq for f64>::ne fn ne(&self, other: &Self) -> bool { *self != *other }
core::cmp::impls::<impl core::cmp::PartialEq for i128>::eq fn eq(&self, other: &Self) -> bool { *self == *other }
core::cmp::impls::<impl core::cmp::PartialEq for i128>::ne fn ne(&self, other: &Self) -> bool { *self != *other }
core::cmp::impls::<impl core::cmp::PartialEq for i16>::eq fn eq(&self, other: &Self) -> bool { *self == *other }
core::cmp::impls::<impl core::cmp::PartialEq for i16>::ne fn ne(&self, other: &Self) -> bool { *self != *other }
core::cmp::impls::<impl core::cmp::PartialEq for i32>::eq fn eq(&self, other: &Self) -> bool { *self == *other }
core::cmp::impls::<impl core::cmp::PartialEq for i32>::ne fn ne(&self, other: &Self) -> bool { *self != *other }
core::cmp::impls::<impl core::cmp::PartialEq for i64>::eq fn eq(&self, other: &Self) -> bool { *self == *other }
core::cmp::impls::<impl core::cmp::PartialEq for i64>::ne fn ne(&self, other: &Self) -> bool { *self != *other }
core::cmp::impls::<impl core::cmp::PartialEq for i8>::eq fn eq(&self, other: &Self) -> bool { *self == *other }
core::cmp::impls::<impl core::cmp::PartialEq for i8>::ne fn ne(&self, other: &Self) -> bool { *self != *other }
core::cmp::impls::<impl core::cmp::PartialEq for isize>::eq fn eq(&self, other: &Self) -> bool { *self == *other }
core::cmp::impls::<impl core::cmp::PartialEq for isize>::ne fn ne(&self, other: &Self) -> bool { *self != *other }
core::cmp::impls::<impl core::cmp::PartialEq for u128>::eq fn eq(&self, other: &Self) -> bool { *self == *other }
core::cmp::impls::<impl core::cmp::PartialEq for u128>::ne fn ne(&self, other: &Self) -> bool { *self != *other }
core::cmp::impls::<impl core::cmp::PartialEq for u16>::eq fn eq(&self, other: &Self) -> bool { *self == *other }
core::cmp::impls::<impl core::cmp::PartialEq for u16>::ne fn ne(&self, other: &Self) -> bool { *self != *other }
core::cmp::impls::<impl core::cmp::PartialEq for u32>::eq fn eq(&self, other: &Self) -> bool { *self == *other }
core::cmp::impls::<impl core::cmp::PartialEq for u32>::ne fn ne(&self, other: &Self) -> bool { *self != *other }
core::cmp::impls::<impl core::cmp::PartialEq for u64>::eq fn eq(&self, other: &Self) -> bool { *self == *other }
core::cmp::impls::<impl core::cmp::PartialEq for u64>::ne fn ne(&self, other: &Self) -> bool { *self != *other }
core::cmp::impls::<impl core::cmp::PartialEq for u8>::eq fn eq(&self, other: &Self) -> bool { *self == *other }
core::cmp::impls::<impl core::cmp::PartialEq for u8>::ne fn ne(&self, other: &Self) -> bool { *self != *other }
core::cmp::impls::<impl core::cmp::PartialEq for usize>::eq fn eq(&self, other: &Self) -> bool { *self == *other }
core::cmp::impls::<impl core::cmp::PartialEq for usize>::ne fn ne(&self, other: &Self) -> bool { *self != *other }
core::cmp::impls::<impl core::cmp::PartialEq<&B> for &A>::eq fn eq(&self, other: &&B) -> bool {
PartialEq::eq(*self, *other)
}
core::cmp::impls::<impl core::cmp::PartialEq<&B> for &A>::ne fn ne(&self, other: &&B) -> bool {
PartialEq::ne(*self, *other)
}
core::cmp::impls::<impl core::cmp::PartialEq<&B> for &mut A>::eq fn eq(&self, other: &&B) -> bool {
PartialEq::eq(*self, *other)
}
core::cmp::impls::<impl core::cmp::PartialEq<&B> for &mut A>::ne fn ne(&self, other: &&B) -> bool {
PartialEq::ne(*self, *other)
}
core::cmp::impls::<impl core::cmp::PartialEq<&mut B> for &A>::eq fn eq(&self, other: &&mut B) -> bool {
PartialEq::eq(*self, *other)
}
core::cmp::impls::<impl core::cmp::PartialEq<&mut B> for &A>::ne fn ne(&self, other: &&mut B) -> bool {
PartialEq::ne(*self, *other)
}
core::cmp::impls::<impl core::cmp::PartialEq<&mut B> for &mut A>::eq fn eq(&self, other: &&mut B) -> bool {
PartialEq::eq(*self, *other)
}
core::cmp::impls::<impl core::cmp::PartialEq<&mut B> for &mut A>::ne fn ne(&self, other: &&mut B) -> bool {
PartialEq::ne(*self, *other)
}
core::cmp::impls::<impl core::cmp::PartialOrd for ()>::partial_cmp fn partial_cmp(&self, _: &()) -> Option<Ordering> {
Some(Equal)
}
core::cmp::impls::<impl core::cmp::PartialOrd for bool>::__chaining_ge fn __chaining_ge(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs >= rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for bool>::__chaining_gt fn __chaining_gt(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs > rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for bool>::__chaining_le fn __chaining_le(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs <= rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for bool>::__chaining_lt fn __chaining_lt(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs < rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for bool>::ge fn ge(&self, other: &Self) -> bool { *self >= *other }
core::cmp::impls::<impl core::cmp::PartialOrd for bool>::gt fn gt(&self, other: &Self) -> bool { *self > *other }
core::cmp::impls::<impl core::cmp::PartialOrd for bool>::le fn le(&self, other: &Self) -> bool { *self <= *other }
core::cmp::impls::<impl core::cmp::PartialOrd for bool>::lt fn lt(&self, other: &Self) -> bool { *self < *other }
core::cmp::impls::<impl core::cmp::PartialOrd for bool>::partial_cmp fn partial_cmp(&self, other: &bool) -> Option<Ordering> {
Some(self.cmp(other))
}
core::cmp::impls::<impl core::cmp::PartialOrd for char>::__chaining_ge fn __chaining_ge(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs >= rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for char>::__chaining_gt fn __chaining_gt(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs > rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for char>::__chaining_le fn __chaining_le(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs <= rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for char>::__chaining_lt fn __chaining_lt(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs < rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for char>::ge fn ge(&self, other: &Self) -> bool { *self >= *other }
core::cmp::impls::<impl core::cmp::PartialOrd for char>::gt fn gt(&self, other: &Self) -> bool { *self > *other }
core::cmp::impls::<impl core::cmp::PartialOrd for char>::le fn le(&self, other: &Self) -> bool { *self <= *other }
core::cmp::impls::<impl core::cmp::PartialOrd for char>::lt fn lt(&self, other: &Self) -> bool { *self < *other }
core::cmp::impls::<impl core::cmp::PartialOrd for char>::partial_cmp fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(crate::intrinsics::three_way_compare(*self, *other))
}
core::cmp::impls::<impl core::cmp::PartialOrd for f128>::__chaining_ge fn __chaining_ge(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs >= rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for f128>::__chaining_gt fn __chaining_gt(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs > rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for f128>::__chaining_le fn __chaining_le(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs <= rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for f128>::__chaining_lt fn __chaining_lt(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs < rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for f128>::ge fn ge(&self, other: &Self) -> bool { *self >= *other }
core::cmp::impls::<impl core::cmp::PartialOrd for f128>::gt fn gt(&self, other: &Self) -> bool { *self > *other }
core::cmp::impls::<impl core::cmp::PartialOrd for f128>::le fn le(&self, other: &Self) -> bool { *self <= *other }
core::cmp::impls::<impl core::cmp::PartialOrd for f128>::lt fn lt(&self, other: &Self) -> bool { *self < *other }
core::cmp::impls::<impl core::cmp::PartialOrd for f128>::partial_cmp fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
match (*self <= *other, *self >= *other) {
(false, false) => None,
(false, true) => Some(Greater),
(true, false) => Some(Less),
(true, true) => Some(Equal),
}
}
core::cmp::impls::<impl core::cmp::PartialOrd for f16>::__chaining_ge fn __chaining_ge(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs >= rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for f16>::__chaining_gt fn __chaining_gt(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs > rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for f16>::__chaining_le fn __chaining_le(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs <= rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for f16>::__chaining_lt fn __chaining_lt(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs < rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for f16>::ge fn ge(&self, other: &Self) -> bool { *self >= *other }
core::cmp::impls::<impl core::cmp::PartialOrd for f16>::gt fn gt(&self, other: &Self) -> bool { *self > *other }
core::cmp::impls::<impl core::cmp::PartialOrd for f16>::le fn le(&self, other: &Self) -> bool { *self <= *other }
core::cmp::impls::<impl core::cmp::PartialOrd for f16>::lt fn lt(&self, other: &Self) -> bool { *self < *other }
core::cmp::impls::<impl core::cmp::PartialOrd for f16>::partial_cmp fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
match (*self <= *other, *self >= *other) {
(false, false) => None,
(false, true) => Some(Greater),
(true, false) => Some(Less),
(true, true) => Some(Equal),
}
}
core::cmp::impls::<impl core::cmp::PartialOrd for f32>::__chaining_ge fn __chaining_ge(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs >= rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for f32>::__chaining_gt fn __chaining_gt(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs > rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for f32>::__chaining_le fn __chaining_le(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs <= rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for f32>::__chaining_lt fn __chaining_lt(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs < rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for f32>::ge fn ge(&self, other: &Self) -> bool { *self >= *other }
core::cmp::impls::<impl core::cmp::PartialOrd for f32>::gt fn gt(&self, other: &Self) -> bool { *self > *other }
core::cmp::impls::<impl core::cmp::PartialOrd for f32>::le fn le(&self, other: &Self) -> bool { *self <= *other }
core::cmp::impls::<impl core::cmp::PartialOrd for f32>::lt fn lt(&self, other: &Self) -> bool { *self < *other }
core::cmp::impls::<impl core::cmp::PartialOrd for f32>::partial_cmp fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
match (*self <= *other, *self >= *other) {
(false, false) => None,
(false, true) => Some(Greater),
(true, false) => Some(Less),
(true, true) => Some(Equal),
}
}
core::cmp::impls::<impl core::cmp::PartialOrd for f64>::__chaining_ge fn __chaining_ge(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs >= rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for f64>::__chaining_gt fn __chaining_gt(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs > rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for f64>::__chaining_le fn __chaining_le(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs <= rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for f64>::__chaining_lt fn __chaining_lt(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs < rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for f64>::ge fn ge(&self, other: &Self) -> bool { *self >= *other }
core::cmp::impls::<impl core::cmp::PartialOrd for f64>::gt fn gt(&self, other: &Self) -> bool { *self > *other }
core::cmp::impls::<impl core::cmp::PartialOrd for f64>::le fn le(&self, other: &Self) -> bool { *self <= *other }
core::cmp::impls::<impl core::cmp::PartialOrd for f64>::lt fn lt(&self, other: &Self) -> bool { *self < *other }
core::cmp::impls::<impl core::cmp::PartialOrd for f64>::partial_cmp fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
match (*self <= *other, *self >= *other) {
(false, false) => None,
(false, true) => Some(Greater),
(true, false) => Some(Less),
(true, true) => Some(Equal),
}
}
core::cmp::impls::<impl core::cmp::PartialOrd for i128>::__chaining_ge fn __chaining_ge(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs >= rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for i128>::__chaining_gt fn __chaining_gt(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs > rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for i128>::__chaining_le fn __chaining_le(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs <= rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for i128>::__chaining_lt fn __chaining_lt(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs < rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for i128>::ge fn ge(&self, other: &Self) -> bool { *self >= *other }
core::cmp::impls::<impl core::cmp::PartialOrd for i128>::gt fn gt(&self, other: &Self) -> bool { *self > *other }
core::cmp::impls::<impl core::cmp::PartialOrd for i128>::le fn le(&self, other: &Self) -> bool { *self <= *other }
core::cmp::impls::<impl core::cmp::PartialOrd for i128>::lt fn lt(&self, other: &Self) -> bool { *self < *other }
core::cmp::impls::<impl core::cmp::PartialOrd for i128>::partial_cmp fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(crate::intrinsics::three_way_compare(*self, *other))
}
core::cmp::impls::<impl core::cmp::PartialOrd for i16>::__chaining_ge fn __chaining_ge(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs >= rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for i16>::__chaining_gt fn __chaining_gt(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs > rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for i16>::__chaining_le fn __chaining_le(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs <= rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for i16>::__chaining_lt fn __chaining_lt(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs < rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for i16>::ge fn ge(&self, other: &Self) -> bool { *self >= *other }
core::cmp::impls::<impl core::cmp::PartialOrd for i16>::gt fn gt(&self, other: &Self) -> bool { *self > *other }
core::cmp::impls::<impl core::cmp::PartialOrd for i16>::le fn le(&self, other: &Self) -> bool { *self <= *other }
core::cmp::impls::<impl core::cmp::PartialOrd for i16>::lt fn lt(&self, other: &Self) -> bool { *self < *other }
core::cmp::impls::<impl core::cmp::PartialOrd for i16>::partial_cmp fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(crate::intrinsics::three_way_compare(*self, *other))
}
core::cmp::impls::<impl core::cmp::PartialOrd for i32>::__chaining_ge fn __chaining_ge(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs >= rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for i32>::__chaining_gt fn __chaining_gt(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs > rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for i32>::__chaining_le fn __chaining_le(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs <= rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for i32>::__chaining_lt fn __chaining_lt(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs < rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for i32>::ge fn ge(&self, other: &Self) -> bool { *self >= *other }
core::cmp::impls::<impl core::cmp::PartialOrd for i32>::gt fn gt(&self, other: &Self) -> bool { *self > *other }
core::cmp::impls::<impl core::cmp::PartialOrd for i32>::le fn le(&self, other: &Self) -> bool { *self <= *other }
core::cmp::impls::<impl core::cmp::PartialOrd for i32>::lt fn lt(&self, other: &Self) -> bool { *self < *other }
core::cmp::impls::<impl core::cmp::PartialOrd for i32>::partial_cmp fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(crate::intrinsics::three_way_compare(*self, *other))
}
core::cmp::impls::<impl core::cmp::PartialOrd for i64>::__chaining_ge fn __chaining_ge(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs >= rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for i64>::__chaining_gt fn __chaining_gt(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs > rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for i64>::__chaining_le fn __chaining_le(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs <= rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for i64>::__chaining_lt fn __chaining_lt(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs < rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for i64>::ge fn ge(&self, other: &Self) -> bool { *self >= *other }
core::cmp::impls::<impl core::cmp::PartialOrd for i64>::gt fn gt(&self, other: &Self) -> bool { *self > *other }
core::cmp::impls::<impl core::cmp::PartialOrd for i64>::le fn le(&self, other: &Self) -> bool { *self <= *other }
core::cmp::impls::<impl core::cmp::PartialOrd for i64>::lt fn lt(&self, other: &Self) -> bool { *self < *other }
core::cmp::impls::<impl core::cmp::PartialOrd for i64>::partial_cmp fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(crate::intrinsics::three_way_compare(*self, *other))
}
core::cmp::impls::<impl core::cmp::PartialOrd for i8>::__chaining_ge fn __chaining_ge(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs >= rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for i8>::__chaining_gt fn __chaining_gt(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs > rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for i8>::__chaining_le fn __chaining_le(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs <= rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for i8>::__chaining_lt fn __chaining_lt(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs < rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for i8>::ge fn ge(&self, other: &Self) -> bool { *self >= *other }
core::cmp::impls::<impl core::cmp::PartialOrd for i8>::gt fn gt(&self, other: &Self) -> bool { *self > *other }
core::cmp::impls::<impl core::cmp::PartialOrd for i8>::le fn le(&self, other: &Self) -> bool { *self <= *other }
core::cmp::impls::<impl core::cmp::PartialOrd for i8>::lt fn lt(&self, other: &Self) -> bool { *self < *other }
core::cmp::impls::<impl core::cmp::PartialOrd for i8>::partial_cmp fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(crate::intrinsics::three_way_compare(*self, *other))
}
core::cmp::impls::<impl core::cmp::PartialOrd for isize>::__chaining_ge fn __chaining_ge(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs >= rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for isize>::__chaining_gt fn __chaining_gt(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs > rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for isize>::__chaining_le fn __chaining_le(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs <= rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for isize>::__chaining_lt fn __chaining_lt(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs < rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for isize>::ge fn ge(&self, other: &Self) -> bool { *self >= *other }
core::cmp::impls::<impl core::cmp::PartialOrd for isize>::gt fn gt(&self, other: &Self) -> bool { *self > *other }
core::cmp::impls::<impl core::cmp::PartialOrd for isize>::le fn le(&self, other: &Self) -> bool { *self <= *other }
core::cmp::impls::<impl core::cmp::PartialOrd for isize>::lt fn lt(&self, other: &Self) -> bool { *self < *other }
core::cmp::impls::<impl core::cmp::PartialOrd for isize>::partial_cmp fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(crate::intrinsics::three_way_compare(*self, *other))
}
core::cmp::impls::<impl core::cmp::PartialOrd for u128>::__chaining_ge fn __chaining_ge(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs >= rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for u128>::__chaining_gt fn __chaining_gt(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs > rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for u128>::__chaining_le fn __chaining_le(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs <= rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for u128>::__chaining_lt fn __chaining_lt(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs < rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for u128>::ge fn ge(&self, other: &Self) -> bool { *self >= *other }
core::cmp::impls::<impl core::cmp::PartialOrd for u128>::gt fn gt(&self, other: &Self) -> bool { *self > *other }
core::cmp::impls::<impl core::cmp::PartialOrd for u128>::le fn le(&self, other: &Self) -> bool { *self <= *other }
core::cmp::impls::<impl core::cmp::PartialOrd for u128>::lt fn lt(&self, other: &Self) -> bool { *self < *other }
core::cmp::impls::<impl core::cmp::PartialOrd for u128>::partial_cmp fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(crate::intrinsics::three_way_compare(*self, *other))
}
core::cmp::impls::<impl core::cmp::PartialOrd for u16>::__chaining_ge fn __chaining_ge(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs >= rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for u16>::__chaining_gt fn __chaining_gt(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs > rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for u16>::__chaining_le fn __chaining_le(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs <= rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for u16>::__chaining_lt fn __chaining_lt(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs < rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for u16>::ge fn ge(&self, other: &Self) -> bool { *self >= *other }
core::cmp::impls::<impl core::cmp::PartialOrd for u16>::gt fn gt(&self, other: &Self) -> bool { *self > *other }
core::cmp::impls::<impl core::cmp::PartialOrd for u16>::le fn le(&self, other: &Self) -> bool { *self <= *other }
core::cmp::impls::<impl core::cmp::PartialOrd for u16>::lt fn lt(&self, other: &Self) -> bool { *self < *other }
core::cmp::impls::<impl core::cmp::PartialOrd for u16>::partial_cmp fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(crate::intrinsics::three_way_compare(*self, *other))
}
core::cmp::impls::<impl core::cmp::PartialOrd for u32>::__chaining_ge fn __chaining_ge(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs >= rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for u32>::__chaining_gt fn __chaining_gt(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs > rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for u32>::__chaining_le fn __chaining_le(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs <= rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for u32>::__chaining_lt fn __chaining_lt(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs < rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for u32>::ge fn ge(&self, other: &Self) -> bool { *self >= *other }
core::cmp::impls::<impl core::cmp::PartialOrd for u32>::gt fn gt(&self, other: &Self) -> bool { *self > *other }
core::cmp::impls::<impl core::cmp::PartialOrd for u32>::le fn le(&self, other: &Self) -> bool { *self <= *other }
core::cmp::impls::<impl core::cmp::PartialOrd for u32>::lt fn lt(&self, other: &Self) -> bool { *self < *other }
core::cmp::impls::<impl core::cmp::PartialOrd for u32>::partial_cmp fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(crate::intrinsics::three_way_compare(*self, *other))
}
core::cmp::impls::<impl core::cmp::PartialOrd for u64>::__chaining_ge fn __chaining_ge(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs >= rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for u64>::__chaining_gt fn __chaining_gt(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs > rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for u64>::__chaining_le fn __chaining_le(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs <= rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for u64>::__chaining_lt fn __chaining_lt(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs < rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for u64>::ge fn ge(&self, other: &Self) -> bool { *self >= *other }
core::cmp::impls::<impl core::cmp::PartialOrd for u64>::gt fn gt(&self, other: &Self) -> bool { *self > *other }
core::cmp::impls::<impl core::cmp::PartialOrd for u64>::le fn le(&self, other: &Self) -> bool { *self <= *other }
core::cmp::impls::<impl core::cmp::PartialOrd for u64>::lt fn lt(&self, other: &Self) -> bool { *self < *other }
core::cmp::impls::<impl core::cmp::PartialOrd for u64>::partial_cmp fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(crate::intrinsics::three_way_compare(*self, *other))
}
core::cmp::impls::<impl core::cmp::PartialOrd for u8>::__chaining_ge fn __chaining_ge(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs >= rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for u8>::__chaining_gt fn __chaining_gt(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs > rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for u8>::__chaining_le fn __chaining_le(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs <= rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for u8>::__chaining_lt fn __chaining_lt(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs < rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for u8>::ge fn ge(&self, other: &Self) -> bool { *self >= *other }
core::cmp::impls::<impl core::cmp::PartialOrd for u8>::gt fn gt(&self, other: &Self) -> bool { *self > *other }
core::cmp::impls::<impl core::cmp::PartialOrd for u8>::le fn le(&self, other: &Self) -> bool { *self <= *other }
core::cmp::impls::<impl core::cmp::PartialOrd for u8>::lt fn lt(&self, other: &Self) -> bool { *self < *other }
core::cmp::impls::<impl core::cmp::PartialOrd for u8>::partial_cmp fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(crate::intrinsics::three_way_compare(*self, *other))
}
core::cmp::impls::<impl core::cmp::PartialOrd for usize>::__chaining_ge fn __chaining_ge(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs >= rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for usize>::__chaining_gt fn __chaining_gt(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs > rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for usize>::__chaining_le fn __chaining_le(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs <= rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for usize>::__chaining_lt fn __chaining_lt(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs < rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for usize>::ge fn ge(&self, other: &Self) -> bool { *self >= *other }
core::cmp::impls::<impl core::cmp::PartialOrd for usize>::gt fn gt(&self, other: &Self) -> bool { *self > *other }
core::cmp::impls::<impl core::cmp::PartialOrd for usize>::le fn le(&self, other: &Self) -> bool { *self <= *other }
core::cmp::impls::<impl core::cmp::PartialOrd for usize>::lt fn lt(&self, other: &Self) -> bool { *self < *other }
core::cmp::impls::<impl core::cmp::PartialOrd for usize>::partial_cmp fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(crate::intrinsics::three_way_compare(*self, *other))
}
core::cmp::impls::<impl core::cmp::PartialOrd<&B> for &A>::__chaining_ge fn __chaining_ge(&self, other: &&B) -> ControlFlow<bool> {
PartialOrd::__chaining_ge(*self, *other)
}
core::cmp::impls::<impl core::cmp::PartialOrd<&B> for &A>::__chaining_gt fn __chaining_gt(&self, other: &&B) -> ControlFlow<bool> {
PartialOrd::__chaining_gt(*self, *other)
}
core::cmp::impls::<impl core::cmp::PartialOrd<&B> for &A>::__chaining_le fn __chaining_le(&self, other: &&B) -> ControlFlow<bool> {
PartialOrd::__chaining_le(*self, *other)
}
core::cmp::impls::<impl core::cmp::PartialOrd<&B> for &A>::__chaining_lt fn __chaining_lt(&self, other: &&B) -> ControlFlow<bool> {
PartialOrd::__chaining_lt(*self, *other)
}
core::cmp::impls::<impl core::cmp::PartialOrd<&B> for &A>::ge fn ge(&self, other: &&B) -> bool {
PartialOrd::ge(*self, *other)
}
core::cmp::impls::<impl core::cmp::PartialOrd<&B> for &A>::gt fn gt(&self, other: &&B) -> bool {
PartialOrd::gt(*self, *other)
}
core::cmp::impls::<impl core::cmp::PartialOrd<&B> for &A>::le fn le(&self, other: &&B) -> bool {
PartialOrd::le(*self, *other)
}
core::cmp::impls::<impl core::cmp::PartialOrd<&B> for &A>::lt fn lt(&self, other: &&B) -> bool {
PartialOrd::lt(*self, *other)
}
core::cmp::impls::<impl core::cmp::PartialOrd<&B> for &A>::partial_cmp fn partial_cmp(&self, other: &&B) -> Option<Ordering> {
PartialOrd::partial_cmp(*self, *other)
}
core::cmp::maxpub const fn max<T: [const] Ord + [const] Destruct>(v1: T, v2: T) -> T {
v1.max(v2)
}
core::cmp::max_bypub const fn max_by<T: [const] Destruct, F: [const] FnOnce(&T, &T) -> Ordering>(
v1: T,
v2: T,
compare: F,
) -> T {
if compare(&v1, &v2).is_gt() { v1 } else { v2 }
}
core::cmp::minpub const fn min<T: [const] Ord + [const] Destruct>(v1: T, v2: T) -> T {
v1.min(v2)
}
core::convert::identitypub const fn identity<T>(x: T) -> T {
x
}
core::convert::num::<impl core::convert::From<bool> for i128>::from fn from(b: bool) -> Self {
b as Self
}
core::convert::num::<impl core::convert::From<bool> for i16>::from fn from(b: bool) -> Self {
b as Self
}
core::convert::num::<impl core::convert::From<bool> for i32>::from fn from(b: bool) -> Self {
b as Self
}
core::convert::num::<impl core::convert::From<bool> for i64>::from fn from(b: bool) -> Self {
b as Self
}
core::convert::num::<impl core::convert::From<bool> for i8>::from fn from(b: bool) -> Self {
b as Self
}
core::convert::num::<impl core::convert::From<bool> for isize>::from fn from(b: bool) -> Self {
b as Self
}
core::convert::num::<impl core::convert::From<bool> for u128>::from fn from(b: bool) -> Self {
b as Self
}
core::convert::num::<impl core::convert::From<bool> for u16>::from fn from(b: bool) -> Self {
b as Self
}
core::convert::num::<impl core::convert::From<bool> for u32>::from fn from(b: bool) -> Self {
b as Self
}
core::convert::num::<impl core::convert::From<bool> for u64>::from fn from(b: bool) -> Self {
b as Self
}
core::convert::num::<impl core::convert::From<bool> for u8>::from fn from(b: bool) -> Self {
b as Self
}
core::convert::num::<impl core::convert::From<bool> for usize>::from fn from(b: bool) -> Self {
b as Self
}
core::convert::num::<impl core::convert::From<i16> for i128>::from fn from(small: $small) -> Self {
debug_assert!(<$large>::MIN as i128 <= <$small>::MIN as i128);
debug_assert!(<$small>::MAX as u128 <= <$large>::MAX as u128);
small as Self
}
core::convert::num::<impl core::convert::From<i16> for i32>::from fn from(small: $small) -> Self {
debug_assert!(<$large>::MIN as i128 <= <$small>::MIN as i128);
debug_assert!(<$small>::MAX as u128 <= <$large>::MAX as u128);
small as Self
}
core::convert::num::<impl core::convert::From<i16> for i64>::from fn from(small: $small) -> Self {
debug_assert!(<$large>::MIN as i128 <= <$small>::MIN as i128);
debug_assert!(<$small>::MAX as u128 <= <$large>::MAX as u128);
small as Self
}
core::convert::num::<impl core::convert::From<i16> for isize>::from fn from(small: $small) -> Self {
debug_assert!(<$large>::MIN as i128 <= <$small>::MIN as i128);
debug_assert!(<$small>::MAX as u128 <= <$large>::MAX as u128);
small as Self
}
core::convert::num::<impl core::convert::From<i32> for i128>::from fn from(small: $small) -> Self {
debug_assert!(<$large>::MIN as i128 <= <$small>::MIN as i128);
debug_assert!(<$small>::MAX as u128 <= <$large>::MAX as u128);
small as Self
}
core::convert::num::<impl core::convert::From<i32> for i64>::from fn from(small: $small) -> Self {
debug_assert!(<$large>::MIN as i128 <= <$small>::MIN as i128);
debug_assert!(<$small>::MAX as u128 <= <$large>::MAX as u128);
small as Self
}
core::convert::num::<impl core::convert::From<i64> for i128>::from fn from(small: $small) -> Self {
debug_assert!(<$large>::MIN as i128 <= <$small>::MIN as i128);
debug_assert!(<$small>::MAX as u128 <= <$large>::MAX as u128);
small as Self
}
core::convert::num::<impl core::convert::From<i8> for i128>::from fn from(small: $small) -> Self {
debug_assert!(<$large>::MIN as i128 <= <$small>::MIN as i128);
debug_assert!(<$small>::MAX as u128 <= <$large>::MAX as u128);
small as Self
}
core::convert::num::<impl core::convert::From<i8> for i16>::from fn from(small: $small) -> Self {
debug_assert!(<$large>::MIN as i128 <= <$small>::MIN as i128);
debug_assert!(<$small>::MAX as u128 <= <$large>::MAX as u128);
small as Self
}
core::convert::num::<impl core::convert::From<i8> for i32>::from fn from(small: $small) -> Self {
debug_assert!(<$large>::MIN as i128 <= <$small>::MIN as i128);
debug_assert!(<$small>::MAX as u128 <= <$large>::MAX as u128);
small as Self
}
core::convert::num::<impl core::convert::From<i8> for i64>::from fn from(small: $small) -> Self {
debug_assert!(<$large>::MIN as i128 <= <$small>::MIN as i128);
debug_assert!(<$small>::MAX as u128 <= <$large>::MAX as u128);
small as Self
}
core::convert::num::<impl core::convert::From<i8> for isize>::from fn from(small: $small) -> Self {
debug_assert!(<$large>::MIN as i128 <= <$small>::MIN as i128);
debug_assert!(<$small>::MAX as u128 <= <$large>::MAX as u128);
small as Self
}
core::convert::num::<impl core::convert::From<u16> for u128>::from fn from(small: $small) -> Self {
debug_assert!(<$large>::MIN as i128 <= <$small>::MIN as i128);
debug_assert!(<$small>::MAX as u128 <= <$large>::MAX as u128);
small as Self
}
core::convert::num::<impl core::convert::From<u16> for u32>::from fn from(small: $small) -> Self {
debug_assert!(<$large>::MIN as i128 <= <$small>::MIN as i128);
debug_assert!(<$small>::MAX as u128 <= <$large>::MAX as u128);
small as Self
}
core::convert::num::<impl core::convert::From<u16> for u64>::from fn from(small: $small) -> Self {
debug_assert!(<$large>::MIN as i128 <= <$small>::MIN as i128);
debug_assert!(<$small>::MAX as u128 <= <$large>::MAX as u128);
small as Self
}
core::convert::num::<impl core::convert::From<u16> for usize>::from fn from(small: $small) -> Self {
debug_assert!(<$large>::MIN as i128 <= <$small>::MIN as i128);
debug_assert!(<$small>::MAX as u128 <= <$large>::MAX as u128);
small as Self
}
core::convert::num::<impl core::convert::From<u32> for u128>::from fn from(small: $small) -> Self {
debug_assert!(<$large>::MIN as i128 <= <$small>::MIN as i128);
debug_assert!(<$small>::MAX as u128 <= <$large>::MAX as u128);
small as Self
}
core::convert::num::<impl core::convert::From<u32> for u64>::from fn from(small: $small) -> Self {
debug_assert!(<$large>::MIN as i128 <= <$small>::MIN as i128);
debug_assert!(<$small>::MAX as u128 <= <$large>::MAX as u128);
small as Self
}
core::convert::num::<impl core::convert::From<u64> for u128>::from fn from(small: $small) -> Self {
debug_assert!(<$large>::MIN as i128 <= <$small>::MIN as i128);
debug_assert!(<$small>::MAX as u128 <= <$large>::MAX as u128);
small as Self
}
core::convert::num::<impl core::convert::From<u8> for isize>::from fn from(small: $small) -> Self {
debug_assert!(<$large>::MIN as i128 <= <$small>::MIN as i128);
debug_assert!(<$small>::MAX as u128 <= <$large>::MAX as u128);
small as Self
}
core::convert::num::<impl core::convert::From<u8> for u128>::from fn from(small: $small) -> Self {
debug_assert!(<$large>::MIN as i128 <= <$small>::MIN as i128);
debug_assert!(<$small>::MAX as u128 <= <$large>::MAX as u128);
small as Self
}
core::convert::num::<impl core::convert::From<u8> for u16>::from fn from(small: $small) -> Self {
debug_assert!(<$large>::MIN as i128 <= <$small>::MIN as i128);
debug_assert!(<$small>::MAX as u128 <= <$large>::MAX as u128);
small as Self
}
core::convert::num::<impl core::convert::From<u8> for u32>::from fn from(small: $small) -> Self {
debug_assert!(<$large>::MIN as i128 <= <$small>::MIN as i128);
debug_assert!(<$small>::MAX as u128 <= <$large>::MAX as u128);
small as Self
}
core::convert::num::<impl core::convert::From<u8> for u64>::from fn from(small: $small) -> Self {
debug_assert!(<$large>::MIN as i128 <= <$small>::MIN as i128);
debug_assert!(<$small>::MAX as u128 <= <$large>::MAX as u128);
small as Self
}
core::convert::num::<impl core::convert::From<u8> for usize>::from fn from(small: $small) -> Self {
debug_assert!(<$large>::MIN as i128 <= <$small>::MIN as i128);
debug_assert!(<$small>::MAX as u128 <= <$large>::MAX as u128);
small as Self
}
core::convert::num::<impl core::convert::TryFrom<i128> for bool>::try_from fn try_from(i: $int) -> Result<Self, Self::Error> {
match i {
0 => Ok(false),
1 => Ok(true),
_ => Err(TryFromIntError(())),
}
}
core::convert::num::<impl core::convert::TryFrom<i16> for bool>::try_from fn try_from(i: $int) -> Result<Self, Self::Error> {
match i {
0 => Ok(false),
1 => Ok(true),
_ => Err(TryFromIntError(())),
}
}
core::convert::num::<impl core::convert::TryFrom<i32> for bool>::try_from fn try_from(i: $int) -> Result<Self, Self::Error> {
match i {
0 => Ok(false),
1 => Ok(true),
_ => Err(TryFromIntError(())),
}
}
core::convert::num::<impl core::convert::TryFrom<i32> for u128>::try_from fn try_from(u: $source) -> Result<Self, Self::Error> {
if u >= 0 {
Ok(u as Self)
} else {
Err(TryFromIntError(()))
}
}
core::convert::num::<impl core::convert::TryFrom<i32> for u16>::try_from fn try_from(u: $source) -> Result<Self, Self::Error> {
let min = Self::MIN as $source;
let max = Self::MAX as $source;
if u < min || u > max {
Err(TryFromIntError(()))
} else {
Ok(u as Self)
}
}
core::convert::num::<impl core::convert::TryFrom<i32> for u32>::try_from fn try_from(u: $source) -> Result<Self, Self::Error> {
if u >= 0 {
Ok(u as Self)
} else {
Err(TryFromIntError(()))
}
}
core::convert::num::<impl core::convert::TryFrom<i32> for u64>::try_from fn try_from(u: $source) -> Result<Self, Self::Error> {
if u >= 0 {
Ok(u as Self)
} else {
Err(TryFromIntError(()))
}
}
core::convert::num::<impl core::convert::TryFrom<i32> for u8>::try_from fn try_from(u: $source) -> Result<Self, Self::Error> {
let min = Self::MIN as $source;
let max = Self::MAX as $source;
if u < min || u > max {
Err(TryFromIntError(()))
} else {
Ok(u as Self)
}
}
core::convert::num::<impl core::convert::TryFrom<i64> for bool>::try_from fn try_from(i: $int) -> Result<Self, Self::Error> {
match i {
0 => Ok(false),
1 => Ok(true),
_ => Err(TryFromIntError(())),
}
}
core::convert::num::<impl core::convert::TryFrom<i8> for bool>::try_from fn try_from(i: $int) -> Result<Self, Self::Error> {
match i {
0 => Ok(false),
1 => Ok(true),
_ => Err(TryFromIntError(())),
}
}
core::convert::num::<impl core::convert::TryFrom<u128> for bool>::try_from fn try_from(i: $int) -> Result<Self, Self::Error> {
match i {
0 => Ok(false),
1 => Ok(true),
_ => Err(TryFromIntError(())),
}
}
core::convert::num::<impl core::convert::TryFrom<u128> for u16>::try_from fn try_from(u: $source) -> Result<Self, Self::Error> {
if u > (Self::MAX as $source) {
Err(TryFromIntError(()))
} else {
Ok(u as Self)
}
}
core::convert::num::<impl core::convert::TryFrom<u128> for u32>::try_from fn try_from(u: $source) -> Result<Self, Self::Error> {
if u > (Self::MAX as $source) {
Err(TryFromIntError(()))
} else {
Ok(u as Self)
}
}
core::convert::num::<impl core::convert::TryFrom<u128> for u64>::try_from fn try_from(u: $source) -> Result<Self, Self::Error> {
if u > (Self::MAX as $source) {
Err(TryFromIntError(()))
} else {
Ok(u as Self)
}
}
core::convert::num::<impl core::convert::TryFrom<u128> for u8>::try_from fn try_from(u: $source) -> Result<Self, Self::Error> {
if u > (Self::MAX as $source) {
Err(TryFromIntError(()))
} else {
Ok(u as Self)
}
}
core::convert::num::<impl core::convert::TryFrom<u16> for bool>::try_from fn try_from(i: $int) -> Result<Self, Self::Error> {
match i {
0 => Ok(false),
1 => Ok(true),
_ => Err(TryFromIntError(())),
}
}
core::convert::num::<impl core::convert::TryFrom<u16> for u8>::try_from fn try_from(u: $source) -> Result<Self, Self::Error> {
if u > (Self::MAX as $source) {
Err(TryFromIntError(()))
} else {
Ok(u as Self)
}
}
core::convert::num::<impl core::convert::TryFrom<u32> for bool>::try_from fn try_from(i: $int) -> Result<Self, Self::Error> {
match i {
0 => Ok(false),
1 => Ok(true),
_ => Err(TryFromIntError(())),
}
}
core::convert::num::<impl core::convert::TryFrom<u32> for u16>::try_from fn try_from(u: $source) -> Result<Self, Self::Error> {
if u > (Self::MAX as $source) {
Err(TryFromIntError(()))
} else {
Ok(u as Self)
}
}
core::convert::num::<impl core::convert::TryFrom<u32> for u8>::try_from fn try_from(u: $source) -> Result<Self, Self::Error> {
if u > (Self::MAX as $source) {
Err(TryFromIntError(()))
} else {
Ok(u as Self)
}
}
core::convert::num::<impl core::convert::TryFrom<u64> for bool>::try_from fn try_from(i: $int) -> Result<Self, Self::Error> {
match i {
0 => Ok(false),
1 => Ok(true),
_ => Err(TryFromIntError(())),
}
}
core::convert::num::<impl core::convert::TryFrom<u64> for u16>::try_from fn try_from(u: $source) -> Result<Self, Self::Error> {
if u > (Self::MAX as $source) {
Err(TryFromIntError(()))
} else {
Ok(u as Self)
}
}
core::convert::num::<impl core::convert::TryFrom<u64> for u32>::try_from fn try_from(u: $source) -> Result<Self, Self::Error> {
if u > (Self::MAX as $source) {
Err(TryFromIntError(()))
} else {
Ok(u as Self)
}
}
core::convert::num::<impl core::convert::TryFrom<u64> for u8>::try_from fn try_from(u: $source) -> Result<Self, Self::Error> {
if u > (Self::MAX as $source) {
Err(TryFromIntError(()))
} else {
Ok(u as Self)
}
}
core::convert::num::<impl core::convert::TryFrom<u8> for bool>::try_from fn try_from(i: $int) -> Result<Self, Self::Error> {
match i {
0 => Ok(false),
1 => Ok(true),
_ => Err(TryFromIntError(())),
}
}
core::convert::num::ptr_try_from_impls::<impl core::convert::TryFrom<i128> for usize>::try_from fn try_from(u: $source) -> Result<Self, Self::Error> {
let min = Self::MIN as $source;
let max = Self::MAX as $source;
if u < min || u > max {
Err(TryFromIntError(()))
} else {
Ok(u as Self)
}
}
core::convert::num::ptr_try_from_impls::<impl core::convert::TryFrom<i16> for usize>::try_from fn try_from(u: $source) -> Result<Self, Self::Error> {
if u >= 0 {
Ok(u as Self)
} else {
Err(TryFromIntError(()))
}
}
core::convert::num::ptr_try_from_impls::<impl core::convert::TryFrom<i32> for usize>::try_from fn try_from(u: $source) -> Result<Self, Self::Error> {
if u >= 0 {
Ok(u as Self)
} else {
Err(TryFromIntError(()))
}
}
core::convert::num::ptr_try_from_impls::<impl core::convert::TryFrom<i64> for usize>::try_from fn try_from(u: $source) -> Result<Self, Self::Error> {
if u >= 0 {
Ok(u as Self)
} else {
Err(TryFromIntError(()))
}
}
core::convert::num::ptr_try_from_impls::<impl core::convert::TryFrom<i8> for usize>::try_from fn try_from(u: $source) -> Result<Self, Self::Error> {
if u >= 0 {
Ok(u as Self)
} else {
Err(TryFromIntError(()))
}
}
core::convert::num::ptr_try_from_impls::<impl core::convert::TryFrom<u128> for usize>::try_from fn try_from(u: $source) -> Result<Self, Self::Error> {
if u > (Self::MAX as $source) {
Err(TryFromIntError(()))
} else {
Ok(u as Self)
}
}
core::convert::num::ptr_try_from_impls::<impl core::convert::TryFrom<u32> for usize>::try_from fn try_from(value: $source) -> Result<Self, Self::Error> {
Ok(value as Self)
}
core::convert::num::ptr_try_from_impls::<impl core::convert::TryFrom<u64> for usize>::try_from fn try_from(value: $source) -> Result<Self, Self::Error> {
Ok(value as Self)
}
core::convert::num::ptr_try_from_impls::<impl core::convert::TryFrom<usize> for u128>::try_from fn try_from(value: $source) -> Result<Self, Self::Error> {
Ok(value as Self)
}
core::convert::num::ptr_try_from_impls::<impl core::convert::TryFrom<usize> for u16>::try_from fn try_from(u: $source) -> Result<Self, Self::Error> {
if u > (Self::MAX as $source) {
Err(TryFromIntError(()))
} else {
Ok(u as Self)
}
}
core::convert::num::ptr_try_from_impls::<impl core::convert::TryFrom<usize> for u32>::try_from fn try_from(u: $source) -> Result<Self, Self::Error> {
if u > (Self::MAX as $source) {
Err(TryFromIntError(()))
} else {
Ok(u as Self)
}
}
core::convert::num::ptr_try_from_impls::<impl core::convert::TryFrom<usize> for u64>::try_from fn try_from(value: $source) -> Result<Self, Self::Error> {
Ok(value as Self)
}
core::convert::num::ptr_try_from_impls::<impl core::convert::TryFrom<usize> for u8>::try_from fn try_from(u: $source) -> Result<Self, Self::Error> {
if u > (Self::MAX as $source) {
Err(TryFromIntError(()))
} else {
Ok(u as Self)
}
}
core::escape::EscapeIterInner::<N, ESCAPING>::advance_by pub(crate) fn advance_by(&mut self, n: usize) -> Result<(), NonZero<usize>> {
self.alive.advance_by(n)
}
core::escape::EscapeIterInner::<N, ESCAPING>::ascii pub(crate) const fn ascii(c: u8) -> Self {
let (escape_seq, alive) = escape_ascii(c);
// SAFETY: `escape_seq` contains an escape sequence in the range given by `alive`.
unsafe { Self::new(MaybeEscapedCharacter { escape_seq }, alive) }
}
core::escape::EscapeIterInner::<N, ESCAPING>::backslash pub(crate) const fn backslash(c: ascii::Char) -> Self {
let (escape_seq, alive) = backslash(c);
// SAFETY: `escape_seq` contains an escape sequence in the range given by `alive`.
unsafe { Self::new(MaybeEscapedCharacter { escape_seq }, alive) }
}
core::escape::EscapeIterInner::<N, ESCAPING>::empty pub(crate) const fn empty() -> Self {
// SAFETY: `0..0` ensures an empty escape sequence.
unsafe { Self::new(MaybeEscapedCharacter { escape_seq: [ascii::Char::Null; N] }, 0..0) }
}
core::escape::EscapeIterInner::<N, ESCAPING>::len pub(crate) fn len(&self) -> usize {
usize::from(self.alive.end - self.alive.start)
}
core::escape::EscapeIterInner::<N, ESCAPING>::new const unsafe fn new(data: MaybeEscapedCharacter<N>, alive: Range<u8>) -> Self {
// Longer escape sequences are not useful given `alive.end` is at most
// `Self::LITERAL_ESCAPE_START`.
const { assert!(N < Self::LITERAL_ESCAPE_START as usize) };
// Check bounds, which implicitly also checks the invariant
// `alive.end <= Self::LITERAL_ESCAPE_START`.
debug_assert!(alive.end <= (N + 1) as u8);
Self { data, alive, escaping: PhantomData }
}
core::escape::EscapeIterInner::<N, ESCAPING>::to_char const fn to_char(&self) -> Option<char> {
if self.alive.end > Self::LITERAL_ESCAPE_START {
// SAFETY: We just checked that `self.data` contains a `char` in
// its `literal` variant.
return Some(unsafe { self.data.literal });
}
None
}
core::escape::EscapeIterInner::<N, ESCAPING>::to_str_unchecked unsafe fn to_str_unchecked(&self) -> &str {
debug_assert!(self.alive.end <= Self::LITERAL_ESCAPE_START);
// SAFETY: The caller guarantees `self.data` contains printable ASCII
// characters in its `escape_seq` variant, and `self.alive` is
// a valid range for `self.data.escape_seq`.
unsafe {
self.data
.escape_seq
.get_unchecked(usize::from(self.alive.start)..usize::from(self.alive.end))
.as_str()
}
}
core::escape::EscapeIterInner::<N, ESCAPING>::unicode pub(crate) const fn unicode(c: char) -> Self {
let (escape_seq, alive) = escape_unicode(c);
// SAFETY: `escape_seq` contains an escape sequence in the range given by `alive`.
unsafe { Self::new(MaybeEscapedCharacter { escape_seq }, alive) }
}
core::escape::EscapeIterInner::<N, core::escape::AlwaysEscaped>::next pub(crate) fn next(&mut self) -> Option<u8> {
let i = self.alive.next()?;
// SAFETY: The `AlwaysEscaped` marker guarantees that `self.data`
// contains printable ASCII characters in its `escape_seq`
// variant, and `i` is guaranteed to be a valid index for
// `self.data.escape_seq`.
unsafe { Some(self.data.escape_seq.get_unchecked(usize::from(i)).to_u8()) }
}
core::escape::EscapeIterInner::<N, core::escape::AlwaysEscaped>::next_back pub(crate) fn next_back(&mut self) -> Option<u8> {
let i = self.alive.next_back()?;
// SAFETY: The `AlwaysEscaped` marker guarantees that `self.data`
// contains printable ASCII characters in its `escape_seq`
// variant, and `i` is guaranteed to be a valid index for
// `self.data.escape_seq`.
unsafe { Some(self.data.escape_seq.get_unchecked(usize::from(i)).to_u8()) }
}
core::escape::EscapeIterInner::<N, core::escape::MaybeEscaped>::next pub(crate) fn next(&mut self) -> Option<char> {
let i = self.alive.next()?;
if let Some(c) = self.to_char() {
return Some(c);
}
// SAFETY: At this point, `self.data` must contain printable ASCII
// characters in its `escape_seq` variant, and `i` is
// guaranteed to be a valid index for `self.data.escape_seq`.
Some(char::from(unsafe { self.data.escape_seq.get_unchecked(usize::from(i)).to_u8() }))
}
core::escape::EscapeIterInner::<N, core::escape::MaybeEscaped>::printable pub(crate) const fn printable(c: char) -> Self {
Self {
data: MaybeEscapedCharacter { literal: c },
// Uphold the invariant `alive.end > Self::LITERAL_ESCAPE_START`, and ensure
// `len` behaves correctly for iterating through one character literal.
alive: Self::LITERAL_ESCAPE_START..(Self::LITERAL_ESCAPE_START + 1),
escaping: PhantomData,
}
}
core::escape::backslashconst fn backslash<const N: usize>(a: ascii::Char) -> ([ascii::Char; N], Range<u8>) {
const { assert!(N >= 2) };
let mut output = [ascii::Char::Null; N];
output[0] = ascii::Char::ReverseSolidus;
output[1] = a;
(output, 0..2)
}
core::escape::escape_asciiconst fn escape_ascii<const N: usize>(byte: u8) -> ([ascii::Char; N], Range<u8>) {
const { assert!(N >= 4) };
#[cfg(feature = "optimize_for_size")]
{
match byte {
b'\t' => backslash(ascii::Char::SmallT),
b'\r' => backslash(ascii::Char::SmallR),
b'\n' => backslash(ascii::Char::SmallN),
b'\\' => backslash(ascii::Char::ReverseSolidus),
b'\'' => backslash(ascii::Char::Apostrophe),
b'"' => backslash(ascii::Char::QuotationMark),
0x00..=0x1F | 0x7F => hex_escape(byte),
_ => match ascii::Char::from_u8(byte) {
Some(a) => verbatim(a),
None => hex_escape(byte),
},
}
}
#[cfg(not(feature = "optimize_for_size"))]
{
/// Lookup table helps us determine how to display character.
///
/// Since ASCII characters will always be 7 bits, we can exploit this to store the 8th bit to
/// indicate whether the result is escaped or unescaped.
///
/// We additionally use 0x80 (escaped NUL character) to indicate hex-escaped bytes, since
/// escaped NUL will not occur.
const LOOKUP: [u8; 256] = {
let mut arr = [0; 256];
let mut idx = 0;
while idx <= 255 {
arr[idx] = match idx as u8 {
// use 8th bit to indicate escaped
b'\t' => 0x80 | b't',
b'\r' => 0x80 | b'r',
b'\n' => 0x80 | b'n',
b'\\' => 0x80 | b'\\',
b'\'' => 0x80 | b'\'',
b'"' => 0x80 | b'"',
// use NUL to indicate hex-escaped
0x00..=0x1F | 0x7F..=0xFF => 0x80 | b'\0',
idx => idx,
};
idx += 1;
}
arr
};
let lookup = LOOKUP[byte as usize];
// 8th bit indicates escape
let lookup_escaped = lookup & 0x80 != 0;
// SAFETY: We explicitly mask out the eighth bit to get a 7-bit ASCII character.
let lookup_ascii = unsafe { ascii::Char::from_u8_unchecked(lookup & 0x7F) };
if lookup_escaped {
// NUL indicates hex-escaped
if matches!(lookup_ascii, ascii::Char::Null) {
hex_escape(byte)
} else {
backslash(lookup_ascii)
}
} else {
verbatim(lookup_ascii)
}
}
}
core::escape::escape_unicodeconst fn escape_unicode<const N: usize>(c: char) -> ([ascii::Char; N], Range<u8>) {
const { assert!(N >= 10 && N < u8::MAX as usize) };
let c = c as u32;
// OR-ing `1` ensures that for `c == 0` the code computes that
// one digit should be printed.
let start = (c | 1).leading_zeros() as usize / 4 - 2;
let mut output = [ascii::Char::Null; N];
output[3] = HEX_DIGITS[((c >> 20) & 15) as usize];
output[4] = HEX_DIGITS[((c >> 16) & 15) as usize];
output[5] = HEX_DIGITS[((c >> 12) & 15) as usize];
output[6] = HEX_DIGITS[((c >> 8) & 15) as usize];
output[7] = HEX_DIGITS[((c >> 4) & 15) as usize];
output[8] = HEX_DIGITS[((c >> 0) & 15) as usize];
output[9] = ascii::Char::RightCurlyBracket;
output[start + 0] = ascii::Char::ReverseSolidus;
output[start + 1] = ascii::Char::SmallU;
output[start + 2] = ascii::Char::LeftCurlyBracket;
(output, (start as u8)..(N as u8))
}
core::escape::hex_escapeconst fn hex_escape<const N: usize>(byte: u8) -> ([ascii::Char; N], Range<u8>) {
const { assert!(N >= 4) };
let mut output = [ascii::Char::Null; N];
let hi = HEX_DIGITS[(byte >> 4) as usize];
let lo = HEX_DIGITS[(byte & 0xf) as usize];
output[0] = ascii::Char::ReverseSolidus;
output[1] = ascii::Char::SmallX;
output[2] = hi;
output[3] = lo;
(output, 0..4)
}
core::escape::verbatimconst fn verbatim<const N: usize>(a: ascii::Char) -> ([ascii::Char; N], Range<u8>) {
const { assert!(N >= 1) };
let mut output = [ascii::Char::Null; N];
output[0] = a;
(output, 0..1)
}
core::f16::<impl f16>::abs pub const fn abs(self) -> Self {
intrinsics::fabsf16(self)
}
core::f16::<impl f16>::classify pub const fn classify(self) -> FpCategory {
let b = self.to_bits();
match (b & Self::MAN_MASK, b & Self::EXP_MASK) {
(0, Self::EXP_MASK) => FpCategory::Infinite,
(_, Self::EXP_MASK) => FpCategory::Nan,
(0, 0) => FpCategory::Zero,
(_, 0) => FpCategory::Subnormal,
_ => FpCategory::Normal,
}
}
core::f16::<impl f16>::from_bits pub const fn from_bits(v: u16) -> Self {
// It turns out the safety issues with sNaN were overblown! Hooray!
// SAFETY: `u16` is a plain old datatype so we can always transmute from it.
unsafe { mem::transmute(v) }
}
core::f16::<impl f16>::to_bits pub const fn to_bits(self) -> u16 {
// SAFETY: `u16` is a plain old datatype so we can always transmute to it.
unsafe { mem::transmute(self) }
}
core::f32::<impl f32>::abs pub const fn abs(self) -> f32 {
intrinsics::fabsf32(self)
}
core::f32::<impl f32>::classify pub const fn classify(self) -> FpCategory {
// We used to have complicated logic here that avoids the simple bit-based tests to work
// around buggy codegen for x87 targets (see
// https://github.com/rust-lang/rust/issues/114479). However, some LLVM versions later, none
// of our tests is able to find any difference between the complicated and the naive
// version, so now we are back to the naive version.
let b = self.to_bits();
match (b & Self::MAN_MASK, b & Self::EXP_MASK) {
(0, Self::EXP_MASK) => FpCategory::Infinite,
(_, Self::EXP_MASK) => FpCategory::Nan,
(0, 0) => FpCategory::Zero,
(_, 0) => FpCategory::Subnormal,
_ => FpCategory::Normal,
}
}
core::f32::<impl f32>::copysign pub const fn copysign(self, sign: f32) -> f32 {
intrinsics::copysignf32(self, sign)
}
core::f32::<impl f32>::from_bits pub const fn from_bits(v: u32) -> Self {
// It turns out the safety issues with sNaN were overblown! Hooray!
// SAFETY: `u32` is a plain old datatype so we can always transmute from it.
unsafe { mem::transmute(v) }
}
core::f32::<impl f32>::from_le_bytes pub const fn from_le_bytes(bytes: [u8; 4]) -> Self {
Self::from_bits(u32::from_le_bytes(bytes))
}
core::f32::<impl f32>::is_infinite pub const fn is_infinite(self) -> bool {
// Getting clever with transmutation can result in incorrect answers on some FPUs
// FIXME: alter the Rust <-> Rust calling convention to prevent this problem.
// See https://github.com/rust-lang/rust/issues/72327
(self == f32::INFINITY) | (self == f32::NEG_INFINITY)
}
core::f32::<impl f32>::is_nan pub const fn is_nan(self) -> bool {
self != self
}
core::f32::<impl f32>::is_sign_negative pub const fn is_sign_negative(self) -> bool {
// IEEE754 says: isSignMinus(x) is true if and only if x has negative sign. isSignMinus
// applies to zeros and NaNs as well.
self.to_bits() & 0x8000_0000 != 0
}
core::f32::<impl f32>::max pub const fn max(self, other: f32) -> f32 {
intrinsics::maxnumf32(self, other)
}
core::f32::<impl f32>::min pub const fn min(self, other: f32) -> f32 {
intrinsics::minnumf32(self, other)
}
core::f32::<impl f32>::signum pub const fn signum(self) -> f32 {
if self.is_nan() { Self::NAN } else { 1.0_f32.copysign(self) }
}
core::f32::<impl f32>::to_bits pub const fn to_bits(self) -> u32 {
// SAFETY: `u32` is a plain old datatype so we can always transmute to it.
unsafe { mem::transmute(self) }
}
core::f32::<impl f32>::to_le_bytes pub const fn to_le_bytes(self) -> [u8; 4] {
self.to_bits().to_le_bytes()
}
core::f64::<impl f64>::abs pub const fn abs(self) -> f64 {
intrinsics::fabsf64(self)
}
core::f64::<impl f64>::classify pub const fn classify(self) -> FpCategory {
// We used to have complicated logic here that avoids the simple bit-based tests to work
// around buggy codegen for x87 targets (see
// https://github.com/rust-lang/rust/issues/114479). However, some LLVM versions later, none
// of our tests is able to find any difference between the complicated and the naive
// version, so now we are back to the naive version.
let b = self.to_bits();
match (b & Self::MAN_MASK, b & Self::EXP_MASK) {
(0, Self::EXP_MASK) => FpCategory::Infinite,
(_, Self::EXP_MASK) => FpCategory::Nan,
(0, 0) => FpCategory::Zero,
(_, 0) => FpCategory::Subnormal,
_ => FpCategory::Normal,
}
}
core::f64::<impl f64>::copysign pub const fn copysign(self, sign: f64) -> f64 {
intrinsics::copysignf64(self, sign)
}
core::f64::<impl f64>::from_bits pub const fn from_bits(v: u64) -> Self {
// It turns out the safety issues with sNaN were overblown! Hooray!
// SAFETY: `u64` is a plain old datatype so we can always transmute from it.
unsafe { mem::transmute(v) }
}
core::f64::<impl f64>::from_le_bytes pub const fn from_le_bytes(bytes: [u8; 8]) -> Self {
Self::from_bits(u64::from_le_bytes(bytes))
}
core::f64::<impl f64>::is_infinite pub const fn is_infinite(self) -> bool {
// Getting clever with transmutation can result in incorrect answers on some FPUs
// FIXME: alter the Rust <-> Rust calling convention to prevent this problem.
// See https://github.com/rust-lang/rust/issues/72327
(self == f64::INFINITY) | (self == f64::NEG_INFINITY)
}
core::f64::<impl f64>::is_nan pub const fn is_nan(self) -> bool {
self != self
}
core::f64::<impl f64>::is_negative pub fn is_negative(self) -> bool {
self.is_sign_negative()
}
core::f64::<impl f64>::is_positive pub fn is_positive(self) -> bool {
self.is_sign_positive()
}
core::f64::<impl f64>::is_sign_negative pub const fn is_sign_negative(self) -> bool {
// IEEE754 says: isSignMinus(x) is true if and only if x has negative sign. isSignMinus
// applies to zeros and NaNs as well.
self.to_bits() & Self::SIGN_MASK != 0
}
core::f64::<impl f64>::is_sign_positive pub const fn is_sign_positive(self) -> bool {
!self.is_sign_negative()
}
core::f64::<impl f64>::to_bits pub const fn to_bits(self) -> u64 {
// SAFETY: `u64` is a plain old datatype so we can always transmute to it.
unsafe { mem::transmute(self) }
}
core::f64::<impl f64>::to_le_bytes pub const fn to_le_bytes(self) -> [u8; 8] {
self.to_bits().to_le_bytes()
}
core::ffi::c_str::CStr::from_bytes_with_nul pub const fn from_bytes_with_nul(bytes: &[u8]) -> Result<&Self, FromBytesWithNulError> {
let nul_pos = memchr::memchr(0, bytes);
match nul_pos {
Some(nul_pos) if nul_pos + 1 == bytes.len() => {
// SAFETY: We know there is only one nul byte, at the end
// of the byte slice.
Ok(unsafe { Self::from_bytes_with_nul_unchecked(bytes) })
}
Some(position) => Err(FromBytesWithNulError::InteriorNul { position }),
None => Err(FromBytesWithNulError::NotNulTerminated),
}
}
core::ffi::c_str::CStr::from_bytes_with_nul_unchecked pub const unsafe fn from_bytes_with_nul_unchecked(bytes: &[u8]) -> &CStr {
const_eval_select!(
@capture { bytes: &[u8] } -> &CStr:
if const {
// Saturating so that an empty slice panics in the assert with a good
// message, not here due to underflow.
let mut i = bytes.len().saturating_sub(1);
assert!(!bytes.is_empty() && bytes[i] == 0, "input was not nul-terminated");
// Ending nul byte exists, skip to the rest.
while i != 0 {
i -= 1;
let byte = bytes[i];
assert!(byte != 0, "input contained interior nul");
}
// SAFETY: See runtime cast comment below.
unsafe { &*(bytes as *const [u8] as *const CStr) }
} else {
// Chance at catching some UB at runtime with debug builds.
debug_assert!(!bytes.is_empty() && bytes[bytes.len() - 1] == 0);
// SAFETY: Casting to CStr is safe because its internal representation
// is a [u8] too (safe only inside std).
// Dereferencing the obtained pointer is safe because it comes from a
// reference. Making a reference is then safe because its lifetime
// is bound by the lifetime of the given `bytes`.
unsafe { &*(bytes as *const [u8] as *const CStr) }
}
)
}
core::ffi::c_str::CStr::from_bytes_with_nul_unchecked::runtime fn runtime$(<$($binders)*>)?($($arg: $ty),*) $( -> $ret )? {
$runtime
}
core::ffi::c_str::CStr::from_ptr pub const unsafe fn from_ptr<'a>(ptr: *const c_char) -> &'a CStr {
// SAFETY: The caller has provided a pointer that points to a valid C
// string with a NUL terminator less than `isize::MAX` from `ptr`.
let len = unsafe { strlen(ptr) };
// SAFETY: The caller has provided a valid pointer with length less than
// `isize::MAX`, so `from_raw_parts` is safe. The content remains valid
// and doesn't change for the lifetime of the returned `CStr`. This
// means the call to `from_bytes_with_nul_unchecked` is correct.
//
// The cast from c_char to u8 is ok because a c_char is always one byte.
unsafe { Self::from_bytes_with_nul_unchecked(slice::from_raw_parts(ptr.cast(), len + 1)) }
}
core::ffi::c_str::CStr::to_bytes pub const fn to_bytes(&self) -> &[u8] {
let bytes = self.to_bytes_with_nul();
// FIXME(const-hack) replace with range index
// SAFETY: to_bytes_with_nul returns slice with length at least 1
unsafe { slice::from_raw_parts(bytes.as_ptr(), bytes.len() - 1) }
}
core::ffi::c_str::CStr::to_bytes_with_nul pub const fn to_bytes_with_nul(&self) -> &[u8] {
// SAFETY: Transmuting a slice of `c_char`s to a slice of `u8`s
// is safe on all supported targets.
unsafe { &*((&raw const self.inner) as *const [u8]) }
}
core::ffi::c_str::strlenconst unsafe fn strlen(ptr: *const c_char) -> usize {
const_eval_select!(
@capture { s: *const c_char = ptr } -> usize:
if const {
let mut len = 0;
// SAFETY: Outer caller has provided a pointer to a valid C string.
while unsafe { *s.add(len) } != 0 {
len += 1;
}
len
} else {
unsafe extern "C" {
/// Provided by libc or compiler_builtins.
fn strlen(s: *const c_char) -> usize;
}
// SAFETY: Outer caller has provided a pointer to a valid C string.
unsafe { strlen(s) }
}
)
}
core::ffi::c_str::strlen::runtime fn runtime$(<$($binders)*>)?($($arg: $ty),*) $( -> $ret )? {
$runtime
}
core::fmt::Arguments::<'a>::as_statically_known_str pub fn as_statically_known_str(&self) -> Option<&'static str> {
let s = self.as_str();
if core::intrinsics::is_val_statically_known(s.is_some()) { s } else { None }
}
core::fmt::Arguments::<'a>::as_str pub const fn as_str(&self) -> Option<&'static str> {
// SAFETY: During const eval, `self.args` must have come from a usize,
// not a pointer, because that's the only way to create a fmt::Arguments in const.
// (I.e. only fmt::Arguments::from_str is const, fmt::Arguments::new is not.)
//
// Outside const eval, transmuting a pointer to a usize is fine.
let bits: usize = unsafe { mem::transmute(self.args) };
if bits & 1 == 1 {
// SAFETY: This fmt::Arguments stores a &'static str. See encoding documentation above.
Some(unsafe {
str::from_utf8_unchecked(crate::slice::from_raw_parts(
self.template.as_ptr(),
bits >> 1,
))
})
} else {
None
}
}
core::fmt::Arguments::<'a>::estimated_capacity pub fn estimated_capacity(&self) -> usize {
if let Some(s) = self.as_str() {
return s.len();
}
// Iterate over the template, counting the length of literal pieces.
let mut length = 0usize;
let mut starts_with_placeholder = false;
let mut template = self.template;
loop {
// SAFETY: We can assume the template is valid.
unsafe {
let n = template.read();
template = template.add(1);
if n == 0 {
// End of template.
break;
} else if n < 128 {
// Short literal string piece.
length += n as usize;
template = template.add(n as usize);
} else if n == 128 {
// Long literal string piece.
let len = usize::from(u16::from_le_bytes(template.cast_array().read()));
length += len;
template = template.add(2 + len);
} else {
assert_unchecked(n >= 0xC0);
// Placeholder piece.
if length == 0 {
starts_with_placeholder = true;
}
// Skip remainder of placeholder:
let skip = (n & 1 != 0) as usize * 4 // flags (32 bit)
+ (n & 2 != 0) as usize * 2 // width (16 bit)
+ (n & 4 != 0) as usize * 2 // precision (16 bit)
+ (n & 8 != 0) as usize * 2; // arg_index (16 bit)
template = template.add(skip as usize);
}
}
}
if starts_with_placeholder && length < 16 {
// If the format string starts with a placeholder,
// don't preallocate anything, unless length
// of literal pieces is significant.
0
} else {
// There are some placeholders, so any additional push
// will reallocate the string. To avoid that,
// we're "pre-doubling" the capacity here.
length.wrapping_mul(2)
}
}
core::fmt::Arguments::<'a>::from_str pub const fn from_str(s: &'static str) -> Arguments<'a> {
// SAFETY: This is the "static str" representation of fmt::Arguments; see above.
unsafe {
Arguments {
template: mem::transmute(s.as_ptr()),
args: mem::transmute(s.len() << 1 | 1),
}
}
}
core::fmt::Arguments::<'a>::from_str_nonconst pub fn from_str_nonconst(s: &'static str) -> Arguments<'a> {
Arguments::from_str(s)
}
core::fmt::Arguments::<'a>::new pub unsafe fn new<const N: usize, const M: usize>(
template: &'a [u8; N],
args: &'a [rt::Argument<'a>; M],
) -> Arguments<'a> {
// SAFETY: Responsibility of the caller.
unsafe { Arguments { template: mem::transmute(template), args: mem::transmute(args) } }
}
core::fmt::Formatter::<'a>::align pub fn align(&self) -> Option<Alignment> {
self.options.get_align()
}
core::fmt::Formatter::<'a>::alternate pub fn alternate(&self) -> bool {
self.options.flags & flags::ALTERNATE_FLAG != 0
}
core::fmt::Formatter::<'a>::debug_list pub fn debug_list<'b>(&'b mut self) -> DebugList<'b, 'a> {
builders::debug_list_new(self)
}
core::fmt::Formatter::<'a>::debug_lower_hex fn debug_lower_hex(&self) -> bool {
self.options.flags & flags::DEBUG_LOWER_HEX_FLAG != 0
}
core::fmt::Formatter::<'a>::debug_map pub fn debug_map<'b>(&'b mut self) -> DebugMap<'b, 'a> {
builders::debug_map_new(self)
}
core::fmt::Formatter::<'a>::debug_set pub fn debug_set<'b>(&'b mut self) -> DebugSet<'b, 'a> {
builders::debug_set_new(self)
}
core::fmt::Formatter::<'a>::debug_struct pub fn debug_struct<'b>(&'b mut self, name: &str) -> DebugStruct<'b, 'a> {
builders::debug_struct_new(self, name)
}
core::fmt::Formatter::<'a>::debug_struct_field1_finish pub fn debug_struct_field1_finish<'b>(
&'b mut self,
name: &str,
name1: &str,
value1: &dyn Debug,
) -> Result {
let mut builder = builders::debug_struct_new(self, name);
builder.field(name1, value1);
builder.finish()
}
core::fmt::Formatter::<'a>::debug_struct_field2_finish pub fn debug_struct_field2_finish<'b>(
&'b mut self,
name: &str,
name1: &str,
value1: &dyn Debug,
name2: &str,
value2: &dyn Debug,
) -> Result {
let mut builder = builders::debug_struct_new(self, name);
builder.field(name1, value1);
builder.field(name2, value2);
builder.finish()
}
core::fmt::Formatter::<'a>::debug_struct_field3_finish pub fn debug_struct_field3_finish<'b>(
&'b mut self,
name: &str,
name1: &str,
value1: &dyn Debug,
name2: &str,
value2: &dyn Debug,
name3: &str,
value3: &dyn Debug,
) -> Result {
let mut builder = builders::debug_struct_new(self, name);
builder.field(name1, value1);
builder.field(name2, value2);
builder.field(name3, value3);
builder.finish()
}
core::fmt::Formatter::<'a>::debug_struct_field4_finish pub fn debug_struct_field4_finish<'b>(
&'b mut self,
name: &str,
name1: &str,
value1: &dyn Debug,
name2: &str,
value2: &dyn Debug,
name3: &str,
value3: &dyn Debug,
name4: &str,
value4: &dyn Debug,
) -> Result {
let mut builder = builders::debug_struct_new(self, name);
builder.field(name1, value1);
builder.field(name2, value2);
builder.field(name3, value3);
builder.field(name4, value4);
builder.finish()
}
core::fmt::Formatter::<'a>::debug_struct_field5_finish pub fn debug_struct_field5_finish<'b>(
&'b mut self,
name: &str,
name1: &str,
value1: &dyn Debug,
name2: &str,
value2: &dyn Debug,
name3: &str,
value3: &dyn Debug,
name4: &str,
value4: &dyn Debug,
name5: &str,
value5: &dyn Debug,
) -> Result {
let mut builder = builders::debug_struct_new(self, name);
builder.field(name1, value1);
builder.field(name2, value2);
builder.field(name3, value3);
builder.field(name4, value4);
builder.field(name5, value5);
builder.finish()
}
core::fmt::Formatter::<'a>::debug_struct_fields_finish pub fn debug_struct_fields_finish<'b>(
&'b mut self,
name: &str,
names: &[&str],
values: &[&dyn Debug],
) -> Result {
assert_eq!(names.len(), values.len());
let mut builder = builders::debug_struct_new(self, name);
for (name, value) in iter::zip(names, values) {
builder.field(name, value);
}
builder.finish()
}
core::fmt::Formatter::<'a>::debug_tuple pub fn debug_tuple<'b>(&'b mut self, name: &str) -> DebugTuple<'b, 'a> {
builders::debug_tuple_new(self, name)
}
core::fmt::Formatter::<'a>::debug_tuple_field1_finish pub fn debug_tuple_field1_finish<'b>(&'b mut self, name: &str, value1: &dyn Debug) -> Result {
let mut builder = builders::debug_tuple_new(self, name);
builder.field(value1);
builder.finish()
}
core::fmt::Formatter::<'a>::debug_tuple_field2_finish pub fn debug_tuple_field2_finish<'b>(
&'b mut self,
name: &str,
value1: &dyn Debug,
value2: &dyn Debug,
) -> Result {
let mut builder = builders::debug_tuple_new(self, name);
builder.field(value1);
builder.field(value2);
builder.finish()
}
core::fmt::Formatter::<'a>::debug_tuple_field3_finish pub fn debug_tuple_field3_finish<'b>(
&'b mut self,
name: &str,
value1: &dyn Debug,
value2: &dyn Debug,
value3: &dyn Debug,
) -> Result {
let mut builder = builders::debug_tuple_new(self, name);
builder.field(value1);
builder.field(value2);
builder.field(value3);
builder.finish()
}
core::fmt::Formatter::<'a>::debug_tuple_field4_finish pub fn debug_tuple_field4_finish<'b>(
&'b mut self,
name: &str,
value1: &dyn Debug,
value2: &dyn Debug,
value3: &dyn Debug,
value4: &dyn Debug,
) -> Result {
let mut builder = builders::debug_tuple_new(self, name);
builder.field(value1);
builder.field(value2);
builder.field(value3);
builder.field(value4);
builder.finish()
}
core::fmt::Formatter::<'a>::debug_tuple_field5_finish pub fn debug_tuple_field5_finish<'b>(
&'b mut self,
name: &str,
value1: &dyn Debug,
value2: &dyn Debug,
value3: &dyn Debug,
value4: &dyn Debug,
value5: &dyn Debug,
) -> Result {
let mut builder = builders::debug_tuple_new(self, name);
builder.field(value1);
builder.field(value2);
builder.field(value3);
builder.field(value4);
builder.field(value5);
builder.finish()
}
core::fmt::Formatter::<'a>::debug_tuple_fields_finish pub fn debug_tuple_fields_finish<'b>(
&'b mut self,
name: &str,
values: &[&dyn Debug],
) -> Result {
let mut builder = builders::debug_tuple_new(self, name);
for value in values {
builder.field(value);
}
builder.finish()
}
core::fmt::Formatter::<'a>::debug_upper_hex fn debug_upper_hex(&self) -> bool {
self.options.flags & flags::DEBUG_UPPER_HEX_FLAG != 0
}
core::fmt::Formatter::<'a>::fill pub fn fill(&self) -> char {
self.options.get_fill()
}
core::fmt::Formatter::<'a>::flags pub fn flags(&self) -> u32 {
// Extract the debug upper/lower hex, zero pad, alternate, and plus/minus flags
// to stay compatible with older versions of Rust.
self.options.flags >> 21 & 0x3F
}
core::fmt::Formatter::<'a>::new pub const fn new(write: &'a mut (dyn Write + 'a), options: FormattingOptions) -> Self {
Formatter { options, buf: write }
}
core::fmt::Formatter::<'a>::options pub const fn options(&self) -> FormattingOptions {
self.options
}
core::fmt::Formatter::<'a>::pad pub fn pad(&mut self, s: &str) -> Result {
// Make sure there's a fast path up front.
if self.options.flags & (flags::WIDTH_FLAG | flags::PRECISION_FLAG) == 0 {
return self.buf.write_str(s);
}
// The `precision` field can be interpreted as a maximum width for the
// string being formatted.
let (s, char_count) = if let Some(max_char_count) = self.options.get_precision() {
let mut iter = s.char_indices();
let remaining = match iter.advance_by(usize::from(max_char_count)) {
Ok(()) => 0,
Err(remaining) => remaining.get(),
};
// SAFETY: The offset of `.char_indices()` is guaranteed to be
// in-bounds and between character boundaries.
let truncated = unsafe { s.get_unchecked(..iter.offset()) };
(truncated, usize::from(max_char_count) - remaining)
} else {
// Use the optimized char counting algorithm for the full string.
(s, s.chars().count())
};
// The `width` field is more of a minimum width parameter at this point.
if char_count < usize::from(self.options.width) {
// If we're under the minimum width, then fill up the minimum width
// with the specified string + some alignment.
let post_padding =
self.padding(self.options.width - char_count as u16, Alignment::Left)?;
self.buf.write_str(s)?;
post_padding.write(self)
} else {
// If we're over the minimum width or there is no minimum width, we
// can just emit the string.
self.buf.write_str(s)
}
}
core::fmt::Formatter::<'a>::pad_formatted_parts unsafe fn pad_formatted_parts(&mut self, formatted: &numfmt::Formatted<'_>) -> Result {
if self.options.width == 0 {
// this is the common case and we take a shortcut
// SAFETY: Per the precondition.
unsafe { self.write_formatted_parts(formatted) }
} else {
// for the sign-aware zero padding, we render the sign first and
// behave as if we had no sign from the beginning.
let mut formatted = formatted.clone();
let mut width = self.options.width;
let old_options = self.options;
if self.sign_aware_zero_pad() {
// a sign always goes first
let sign = formatted.sign;
self.buf.write_str(sign)?;
// remove the sign from the formatted parts
formatted.sign = "";
width = width.saturating_sub(sign.len() as u16);
self.options.fill('0').align(Some(Alignment::Right));
}
// remaining parts go through the ordinary padding process.
let len = formatted.len();
let ret = if usize::from(width) <= len {
// no padding
// SAFETY: Per the precondition.
unsafe { self.write_formatted_parts(&formatted) }
} else {
let post_padding = self.padding(width - len as u16, Alignment::Right)?;
// SAFETY: Per the precondition.
unsafe {
self.write_formatted_parts(&formatted)?;
}
post_padding.write(self)
};
self.options = old_options;
ret
}
}
core::fmt::Formatter::<'a>::pad_integral pub fn pad_integral(&mut self, is_nonnegative: bool, prefix: &str, buf: &str) -> Result {
let mut width = buf.len();
let mut sign = None;
if !is_nonnegative {
sign = Some('-');
width += 1;
} else if self.sign_plus() {
sign = Some('+');
width += 1;
}
let prefix = if self.alternate() {
width += prefix.chars().count();
Some(prefix)
} else {
None
};
// Writes the sign if it exists, and then the prefix if it was requested
#[inline(never)]
fn write_prefix(f: &mut Formatter<'_>, sign: Option<char>, prefix: Option<&str>) -> Result {
if let Some(c) = sign {
f.buf.write_char(c)?;
}
if let Some(prefix) = prefix { f.buf.write_str(prefix) } else { Ok(()) }
}
// The `width` field is more of a `min-width` parameter at this point.
let min = self.options.width;
if width >= usize::from(min) {
// We're over the minimum width, so then we can just write the bytes.
write_prefix(self, sign, prefix)?;
self.buf.write_str(buf)
} else if self.sign_aware_zero_pad() {
// The sign and prefix goes before the padding if the fill character
// is zero
let old_options = self.options;
self.options.fill('0').align(Some(Alignment::Right));
write_prefix(self, sign, prefix)?;
let post_padding = self.padding(min - width as u16, Alignment::Right)?;
self.buf.write_str(buf)?;
post_padding.write(self)?;
self.options = old_options;
Ok(())
} else {
// Otherwise, the sign and prefix goes after the padding
let post_padding = self.padding(min - width as u16, Alignment::Right)?;
write_prefix(self, sign, prefix)?;
self.buf.write_str(buf)?;
post_padding.write(self)
}
}
core::fmt::Formatter::<'a>::pad_integral::write_prefix fn write_prefix(f: &mut Formatter<'_>, sign: Option<char>, prefix: Option<&str>) -> Result {
if let Some(c) = sign {
f.buf.write_char(c)?;
}
if let Some(prefix) = prefix { f.buf.write_str(prefix) } else { Ok(()) }
}
core::fmt::Formatter::<'a>::padding pub(crate) fn padding(
&mut self,
padding: u16,
default: Alignment,
) -> result::Result<PostPadding, Error> {
let align = self.options.get_align().unwrap_or(default);
let fill = self.options.get_fill();
let padding_left = match align {
Alignment::Left => 0,
Alignment::Right => padding,
Alignment::Center => padding / 2,
};
for _ in 0..padding_left {
self.buf.write_char(fill)?;
}
Ok(PostPadding::new(fill, padding - padding_left))
}
core::fmt::Formatter::<'a>::precision pub fn precision(&self) -> Option<usize> {
if self.options.flags & flags::PRECISION_FLAG == 0 {
None
} else {
Some(self.options.precision as usize)
}
}
core::fmt::Formatter::<'a>::sign pub const fn sign(&self) -> Option<Sign> {
self.options.get_sign()
}
core::fmt::Formatter::<'a>::sign_aware_zero_pad pub fn sign_aware_zero_pad(&self) -> bool {
self.options.flags & flags::SIGN_AWARE_ZERO_PAD_FLAG != 0
}
core::fmt::Formatter::<'a>::sign_minus pub fn sign_minus(&self) -> bool {
self.options.flags & flags::SIGN_MINUS_FLAG != 0
}
core::fmt::Formatter::<'a>::sign_plus pub fn sign_plus(&self) -> bool {
self.options.flags & flags::SIGN_PLUS_FLAG != 0
}
core::fmt::Formatter::<'a>::width pub fn width(&self) -> Option<usize> {
if self.options.flags & flags::WIDTH_FLAG == 0 {
None
} else {
Some(self.options.width as usize)
}
}
core::fmt::Formatter::<'a>::with_options pub const fn with_options<'b>(&'b mut self, options: FormattingOptions) -> Formatter<'b> {
Formatter { options, buf: self.buf }
}
core::fmt::Formatter::<'a>::wrap_buf fn wrap_buf<'b, 'c, F>(&'b mut self, wrap: F) -> Formatter<'c>
where
'b: 'c,
F: FnOnce(&'b mut (dyn Write + 'b)) -> &'c mut (dyn Write + 'c),
{
Formatter {
// We want to change this
buf: wrap(self.buf),
// And preserve these
options: self.options,
}
}
core::fmt::Formatter::<'a>::write_fmt pub fn write_fmt(&mut self, fmt: Arguments<'_>) -> Result {
if let Some(s) = fmt.as_statically_known_str() {
self.buf.write_str(s)
} else {
write(self.buf, fmt)
}
}
core::fmt::Formatter::<'a>::write_formatted_parts unsafe fn write_formatted_parts(&mut self, formatted: &numfmt::Formatted<'_>) -> Result {
unsafe fn write_bytes(buf: &mut dyn Write, s: &[u8]) -> Result {
// SAFETY: This is used for `numfmt::Part::Num` and `numfmt::Part::Copy`.
// It's safe to use for `numfmt::Part::Num` since every char `c` is between
// `b'0'` and `b'9'`, which means `s` is valid UTF-8. It's safe to use for
// `numfmt::Part::Copy` due to this function's precondition.
buf.write_str(unsafe { str::from_utf8_unchecked(s) })
}
if !formatted.sign.is_empty() {
self.buf.write_str(formatted.sign)?;
}
for part in formatted.parts {
match *part {
numfmt::Part::Zero(mut nzeroes) => {
const ZEROES: &str = // 64 zeroes
"0000000000000000000000000000000000000000000000000000000000000000";
while nzeroes > ZEROES.len() {
self.buf.write_str(ZEROES)?;
nzeroes -= ZEROES.len();
}
if nzeroes > 0 {
self.buf.write_str(&ZEROES[..nzeroes])?;
}
}
numfmt::Part::Num(mut v) => {
let mut s = [0; 5];
let len = part.len();
for c in s[..len].iter_mut().rev() {
*c = b'0' + (v % 10) as u8;
v /= 10;
}
// SAFETY: Per the precondition.
unsafe {
write_bytes(self.buf, &s[..len])?;
}
}
// SAFETY: Per the precondition.
numfmt::Part::Copy(buf) => unsafe {
write_bytes(self.buf, buf)?;
},
}
}
Ok(())
}
core::fmt::Formatter::<'a>::write_formatted_parts::write_bytes unsafe fn write_bytes(buf: &mut dyn Write, s: &[u8]) -> Result {
// SAFETY: This is used for `numfmt::Part::Num` and `numfmt::Part::Copy`.
// It's safe to use for `numfmt::Part::Num` since every char `c` is between
// `b'0'` and `b'9'`, which means `s` is valid UTF-8. It's safe to use for
// `numfmt::Part::Copy` due to this function's precondition.
buf.write_str(unsafe { str::from_utf8_unchecked(s) })
}
core::fmt::Formatter::<'a>::write_str pub fn write_str(&mut self, data: &str) -> Result {
self.buf.write_str(data)
}
core::fmt::FormattingOptions::align pub const fn align(&mut self, align: Option<Alignment>) -> &mut Self {
let align: u32 = match align {
Some(Alignment::Left) => flags::ALIGN_LEFT,
Some(Alignment::Right) => flags::ALIGN_RIGHT,
Some(Alignment::Center) => flags::ALIGN_CENTER,
None => flags::ALIGN_UNKNOWN,
};
self.flags = self.flags & !flags::ALIGN_BITS | align;
self
}
core::fmt::FormattingOptions::alternate pub const fn alternate(&mut self, alternate: bool) -> &mut Self {
if alternate {
self.flags |= flags::ALTERNATE_FLAG;
} else {
self.flags &= !flags::ALTERNATE_FLAG;
}
self
}
core::fmt::FormattingOptions::create_formatter pub const fn create_formatter<'a>(self, write: &'a mut (dyn Write + 'a)) -> Formatter<'a> {
Formatter { options: self, buf: write }
}
core::fmt::FormattingOptions::debug_as_hex pub const fn debug_as_hex(&mut self, debug_as_hex: Option<DebugAsHex>) -> &mut Self {
let debug_as_hex = match debug_as_hex {
None => 0,
Some(DebugAsHex::Lower) => flags::DEBUG_LOWER_HEX_FLAG,
Some(DebugAsHex::Upper) => flags::DEBUG_UPPER_HEX_FLAG,
};
self.flags = self.flags & !(flags::DEBUG_LOWER_HEX_FLAG | flags::DEBUG_UPPER_HEX_FLAG)
| debug_as_hex;
self
}
core::fmt::FormattingOptions::fill pub const fn fill(&mut self, fill: char) -> &mut Self {
self.flags = self.flags & (u32::MAX << 21) | fill as u32;
self
}
core::fmt::FormattingOptions::get_align pub const fn get_align(&self) -> Option<Alignment> {
match self.flags & flags::ALIGN_BITS {
flags::ALIGN_LEFT => Some(Alignment::Left),
flags::ALIGN_RIGHT => Some(Alignment::Right),
flags::ALIGN_CENTER => Some(Alignment::Center),
_ => None,
}
}
core::fmt::FormattingOptions::get_alternate pub const fn get_alternate(&self) -> bool {
self.flags & flags::ALTERNATE_FLAG != 0
}
core::fmt::FormattingOptions::get_debug_as_hex pub const fn get_debug_as_hex(&self) -> Option<DebugAsHex> {
if self.flags & flags::DEBUG_LOWER_HEX_FLAG != 0 {
Some(DebugAsHex::Lower)
} else if self.flags & flags::DEBUG_UPPER_HEX_FLAG != 0 {
Some(DebugAsHex::Upper)
} else {
None
}
}
core::fmt::FormattingOptions::get_fill pub const fn get_fill(&self) -> char {
// SAFETY: We only ever put a valid `char` in the lower 21 bits of the flags field.
unsafe { char::from_u32_unchecked(self.flags & 0x1FFFFF) }
}
core::fmt::FormattingOptions::get_precision pub const fn get_precision(&self) -> Option<u16> {
if self.flags & flags::PRECISION_FLAG != 0 { Some(self.precision) } else { None }
}
core::fmt::FormattingOptions::get_sign pub const fn get_sign(&self) -> Option<Sign> {
if self.flags & flags::SIGN_PLUS_FLAG != 0 {
Some(Sign::Plus)
} else if self.flags & flags::SIGN_MINUS_FLAG != 0 {
Some(Sign::Minus)
} else {
None
}
}
core::fmt::FormattingOptions::get_sign_aware_zero_pad pub const fn get_sign_aware_zero_pad(&self) -> bool {
self.flags & flags::SIGN_AWARE_ZERO_PAD_FLAG != 0
}
core::fmt::FormattingOptions::get_width pub const fn get_width(&self) -> Option<u16> {
if self.flags & flags::WIDTH_FLAG != 0 { Some(self.width) } else { None }
}
core::fmt::FormattingOptions::new pub const fn new() -> Self {
Self { flags: ' ' as u32 | flags::ALIGN_UNKNOWN, width: 0, precision: 0 }
}
core::fmt::FormattingOptions::precision pub const fn precision(&mut self, precision: Option<u16>) -> &mut Self {
if let Some(precision) = precision {
self.flags |= flags::PRECISION_FLAG;
self.precision = precision;
} else {
self.flags &= !flags::PRECISION_FLAG;
self.precision = 0;
}
self
}
core::fmt::FormattingOptions::sign pub const fn sign(&mut self, sign: Option<Sign>) -> &mut Self {
let sign = match sign {
None => 0,
Some(Sign::Plus) => flags::SIGN_PLUS_FLAG,
Some(Sign::Minus) => flags::SIGN_MINUS_FLAG,
};
self.flags = self.flags & !(flags::SIGN_PLUS_FLAG | flags::SIGN_MINUS_FLAG) | sign;
self
}
core::fmt::FormattingOptions::sign_aware_zero_pad pub const fn sign_aware_zero_pad(&mut self, sign_aware_zero_pad: bool) -> &mut Self {
if sign_aware_zero_pad {
self.flags |= flags::SIGN_AWARE_ZERO_PAD_FLAG;
} else {
self.flags &= !flags::SIGN_AWARE_ZERO_PAD_FLAG;
}
self
}
core::fmt::FormattingOptions::width pub const fn width(&mut self, width: Option<u16>) -> &mut Self {
if let Some(width) = width {
self.flags |= flags::WIDTH_FLAG;
self.width = width;
} else {
self.flags &= !flags::WIDTH_FLAG;
self.width = 0;
}
self
}
core::fmt::PostPadding::new fn new(fill: char, padding: u16) -> PostPadding {
PostPadding { fill, padding }
}
core::fmt::PostPadding::write pub(crate) fn write(self, f: &mut Formatter<'_>) -> Result {
for _ in 0..self.padding {
f.buf.write_char(self.fill)?;
}
Ok(())
}
core::fmt::Write::write_char fn write_char(&mut self, c: char) -> Result {
self.write_str(c.encode_utf8(&mut [0; char::MAX_LEN_UTF8]))
}
core::fmt::Write::write_fmt fn write_fmt(&mut self, args: Arguments<'_>) -> Result {
// We use a specialization for `Sized` types to avoid an indirection
// through `&mut self`
trait SpecWriteFmt {
fn spec_write_fmt(self, args: Arguments<'_>) -> Result;
}
impl<W: Write + ?Sized> SpecWriteFmt for &mut W {
#[inline]
default fn spec_write_fmt(mut self, args: Arguments<'_>) -> Result {
if let Some(s) = args.as_statically_known_str() {
self.write_str(s)
} else {
write(&mut self, args)
}
}
}
impl<W: Write> SpecWriteFmt for &mut W {
#[inline]
fn spec_write_fmt(self, args: Arguments<'_>) -> Result {
if let Some(s) = args.as_statically_known_str() {
self.write_str(s)
} else {
write(self, args)
}
}
}
self.spec_write_fmt(args)
}
core::fmt::builders::DebugInner::<'a, 'b>::entry_with fn entry_with<F>(&mut self, entry_fmt: F)
where
F: FnOnce(&mut fmt::Formatter<'_>) -> fmt::Result,
{
self.result = self.result.and_then(|_| {
if self.is_pretty() {
if !self.has_fields {
self.fmt.write_str("\n")?;
}
let mut slot = None;
let mut state = Default::default();
let mut writer = PadAdapter::wrap(self.fmt, &mut slot, &mut state);
entry_fmt(&mut writer)?;
writer.write_str(",\n")
} else {
if self.has_fields {
self.fmt.write_str(", ")?
}
entry_fmt(self.fmt)
}
});
self.has_fields = true;
}
core::fmt::builders::DebugInner::<'a, 'b>::is_pretty fn is_pretty(&self) -> bool {
self.fmt.alternate()
}
core::fmt::builders::DebugList::<'a, 'b>::entries pub fn entries<D, I>(&mut self, entries: I) -> &mut Self
where
D: fmt::Debug,
I: IntoIterator<Item = D>,
{
for entry in entries {
self.entry(&entry);
}
self
}
core::fmt::builders::DebugList::<'a, 'b>::entry pub fn entry(&mut self, entry: &dyn fmt::Debug) -> &mut Self {
self.inner.entry_with(|f| entry.fmt(f));
self
}
core::fmt::builders::DebugList::<'a, 'b>::entry_with pub fn entry_with<F>(&mut self, entry_fmt: F) -> &mut Self
where
F: FnOnce(&mut fmt::Formatter<'_>) -> fmt::Result,
{
self.inner.entry_with(entry_fmt);
self
}
core::fmt::builders::DebugList::<'a, 'b>::finish pub fn finish(&mut self) -> fmt::Result {
self.inner.result = self.inner.result.and_then(|_| self.inner.fmt.write_str("]"));
self.inner.result
}
core::fmt::builders::DebugList::<'a, 'b>::finish_non_exhaustive pub fn finish_non_exhaustive(&mut self) -> fmt::Result {
self.inner.result.and_then(|_| {
if self.inner.has_fields {
if self.inner.is_pretty() {
let mut slot = None;
let mut state = Default::default();
let mut writer = PadAdapter::wrap(self.inner.fmt, &mut slot, &mut state);
writer.write_str("..\n")?;
self.inner.fmt.write_str("]")
} else {
self.inner.fmt.write_str(", ..]")
}
} else {
self.inner.fmt.write_str("..]")
}
})
}
core::fmt::builders::DebugMap::<'a, 'b>::entries pub fn entries<K, V, I>(&mut self, entries: I) -> &mut Self
where
K: fmt::Debug,
V: fmt::Debug,
I: IntoIterator<Item = (K, V)>,
{
for (k, v) in entries {
self.entry(&k, &v);
}
self
}
core::fmt::builders::DebugMap::<'a, 'b>::entry pub fn entry(&mut self, key: &dyn fmt::Debug, value: &dyn fmt::Debug) -> &mut Self {
self.key(key).value(value)
}
core::fmt::builders::DebugMap::<'a, 'b>::finish pub fn finish(&mut self) -> fmt::Result {
self.result = self.result.and_then(|_| {
assert!(!self.has_key, "attempted to finish a map with a partial entry");
self.fmt.write_str("}")
});
self.result
}
core::fmt::builders::DebugMap::<'a, 'b>::finish_non_exhaustive pub fn finish_non_exhaustive(&mut self) -> fmt::Result {
self.result = self.result.and_then(|_| {
assert!(!self.has_key, "attempted to finish a map with a partial entry");
if self.has_fields {
if self.is_pretty() {
let mut slot = None;
let mut state = Default::default();
let mut writer = PadAdapter::wrap(self.fmt, &mut slot, &mut state);
writer.write_str("..\n")?;
self.fmt.write_str("}")
} else {
self.fmt.write_str(", ..}")
}
} else {
self.fmt.write_str("..}")
}
});
self.result
}
core::fmt::builders::DebugMap::<'a, 'b>::is_pretty fn is_pretty(&self) -> bool {
self.fmt.alternate()
}
core::fmt::builders::DebugMap::<'a, 'b>::key pub fn key(&mut self, key: &dyn fmt::Debug) -> &mut Self {
self.key_with(|f| key.fmt(f))
}
core::fmt::builders::DebugMap::<'a, 'b>::key_with pub fn key_with<F>(&mut self, key_fmt: F) -> &mut Self
where
F: FnOnce(&mut fmt::Formatter<'_>) -> fmt::Result,
{
self.result = self.result.and_then(|_| {
assert!(
!self.has_key,
"attempted to begin a new map entry \
without completing the previous one"
);
if self.is_pretty() {
if !self.has_fields {
self.fmt.write_str("\n")?;
}
let mut slot = None;
self.state = Default::default();
let mut writer = PadAdapter::wrap(self.fmt, &mut slot, &mut self.state);
key_fmt(&mut writer)?;
writer.write_str(": ")?;
} else {
if self.has_fields {
self.fmt.write_str(", ")?
}
key_fmt(self.fmt)?;
self.fmt.write_str(": ")?;
}
self.has_key = true;
Ok(())
});
self
}
core::fmt::builders::DebugMap::<'a, 'b>::value pub fn value(&mut self, value: &dyn fmt::Debug) -> &mut Self {
self.value_with(|f| value.fmt(f))
}
core::fmt::builders::DebugMap::<'a, 'b>::value_with pub fn value_with<F>(&mut self, value_fmt: F) -> &mut Self
where
F: FnOnce(&mut fmt::Formatter<'_>) -> fmt::Result,
{
self.result = self.result.and_then(|_| {
assert!(self.has_key, "attempted to format a map value before its key");
if self.is_pretty() {
let mut slot = None;
let mut writer = PadAdapter::wrap(self.fmt, &mut slot, &mut self.state);
value_fmt(&mut writer)?;
writer.write_str(",\n")?;
} else {
value_fmt(self.fmt)?;
}
self.has_key = false;
Ok(())
});
self.has_fields = true;
self
}
core::fmt::builders::DebugSet::<'a, 'b>::entries pub fn entries<D, I>(&mut self, entries: I) -> &mut Self
where
D: fmt::Debug,
I: IntoIterator<Item = D>,
{
for entry in entries {
self.entry(&entry);
}
self
}
core::fmt::builders::DebugSet::<'a, 'b>::entry pub fn entry(&mut self, entry: &dyn fmt::Debug) -> &mut Self {
self.inner.entry_with(|f| entry.fmt(f));
self
}
core::fmt::builders::DebugSet::<'a, 'b>::entry_with pub fn entry_with<F>(&mut self, entry_fmt: F) -> &mut Self
where
F: FnOnce(&mut fmt::Formatter<'_>) -> fmt::Result,
{
self.inner.entry_with(entry_fmt);
self
}
core::fmt::builders::DebugSet::<'a, 'b>::finish pub fn finish(&mut self) -> fmt::Result {
self.inner.result = self.inner.result.and_then(|_| self.inner.fmt.write_str("}"));
self.inner.result
}
core::fmt::builders::DebugSet::<'a, 'b>::finish_non_exhaustive pub fn finish_non_exhaustive(&mut self) -> fmt::Result {
self.inner.result = self.inner.result.and_then(|_| {
if self.inner.has_fields {
if self.inner.is_pretty() {
let mut slot = None;
let mut state = Default::default();
let mut writer = PadAdapter::wrap(self.inner.fmt, &mut slot, &mut state);
writer.write_str("..\n")?;
self.inner.fmt.write_str("}")
} else {
self.inner.fmt.write_str(", ..}")
}
} else {
self.inner.fmt.write_str("..}")
}
});
self.inner.result
}
core::fmt::builders::DebugStruct::<'a, 'b>::field pub fn field(&mut self, name: &str, value: &dyn fmt::Debug) -> &mut Self {
self.field_with(name, |f| value.fmt(f))
}
core::fmt::builders::DebugStruct::<'a, 'b>::field_with pub fn field_with<F>(&mut self, name: &str, value_fmt: F) -> &mut Self
where
F: FnOnce(&mut fmt::Formatter<'_>) -> fmt::Result,
{
self.result = self.result.and_then(|_| {
if self.is_pretty() {
if !self.has_fields {
self.fmt.write_str(" {\n")?;
}
let mut slot = None;
let mut state = Default::default();
let mut writer = PadAdapter::wrap(self.fmt, &mut slot, &mut state);
writer.write_str(name)?;
writer.write_str(": ")?;
value_fmt(&mut writer)?;
writer.write_str(",\n")
} else {
let prefix = if self.has_fields { ", " } else { " { " };
self.fmt.write_str(prefix)?;
self.fmt.write_str(name)?;
self.fmt.write_str(": ")?;
value_fmt(self.fmt)
}
});
self.has_fields = true;
self
}
core::fmt::builders::DebugStruct::<'a, 'b>::finish pub fn finish(&mut self) -> fmt::Result {
if self.has_fields {
self.result = self.result.and_then(|_| {
if self.is_pretty() { self.fmt.write_str("}") } else { self.fmt.write_str(" }") }
});
}
self.result
}
core::fmt::builders::DebugStruct::<'a, 'b>::finish_non_exhaustive pub fn finish_non_exhaustive(&mut self) -> fmt::Result {
self.result = self.result.and_then(|_| {
if self.has_fields {
if self.is_pretty() {
let mut slot = None;
let mut state = Default::default();
let mut writer = PadAdapter::wrap(self.fmt, &mut slot, &mut state);
writer.write_str("..\n")?;
self.fmt.write_str("}")
} else {
self.fmt.write_str(", .. }")
}
} else {
self.fmt.write_str(" { .. }")
}
});
self.result
}
core::fmt::builders::DebugStruct::<'a, 'b>::is_pretty fn is_pretty(&self) -> bool {
self.fmt.alternate()
}
core::fmt::builders::DebugTuple::<'a, 'b>::field pub fn field(&mut self, value: &dyn fmt::Debug) -> &mut Self {
self.field_with(|f| value.fmt(f))
}
core::fmt::builders::DebugTuple::<'a, 'b>::field_with pub fn field_with<F>(&mut self, value_fmt: F) -> &mut Self
where
F: FnOnce(&mut fmt::Formatter<'_>) -> fmt::Result,
{
self.result = self.result.and_then(|_| {
if self.is_pretty() {
if self.fields == 0 {
self.fmt.write_str("(\n")?;
}
let mut slot = None;
let mut state = Default::default();
let mut writer = PadAdapter::wrap(self.fmt, &mut slot, &mut state);
value_fmt(&mut writer)?;
writer.write_str(",\n")
} else {
let prefix = if self.fields == 0 { "(" } else { ", " };
self.fmt.write_str(prefix)?;
value_fmt(self.fmt)
}
});
self.fields += 1;
self
}
core::fmt::builders::DebugTuple::<'a, 'b>::finish pub fn finish(&mut self) -> fmt::Result {
if self.fields > 0 {
self.result = self.result.and_then(|_| {
if self.fields == 1 && self.empty_name && !self.is_pretty() {
self.fmt.write_str(",")?;
}
self.fmt.write_str(")")
});
}
self.result
}
core::fmt::builders::DebugTuple::<'a, 'b>::finish_non_exhaustive pub fn finish_non_exhaustive(&mut self) -> fmt::Result {
self.result = self.result.and_then(|_| {
if self.fields > 0 {
if self.is_pretty() {
let mut slot = None;
let mut state = Default::default();
let mut writer = PadAdapter::wrap(self.fmt, &mut slot, &mut state);
writer.write_str("..\n")?;
self.fmt.write_str(")")
} else {
self.fmt.write_str(", ..)")
}
} else {
self.fmt.write_str("(..)")
}
});
self.result
}
core::fmt::builders::DebugTuple::<'a, 'b>::is_pretty fn is_pretty(&self) -> bool {
self.fmt.alternate()
}
core::fmt::builders::PadAdapter::<'buf, 'state>::wrap fn wrap<'slot, 'fmt: 'buf + 'slot>(
fmt: &'fmt mut fmt::Formatter<'_>,
slot: &'slot mut Option<Self>,
state: &'state mut PadAdapterState,
) -> fmt::Formatter<'slot> {
fmt.wrap_buf(move |buf| slot.insert(PadAdapter { buf, state }))
}
core::fmt::builders::debug_list_newpub(super) fn debug_list_new<'a, 'b>(fmt: &'a mut fmt::Formatter<'b>) -> DebugList<'a, 'b> {
let result = fmt.write_str("[");
DebugList { inner: DebugInner { fmt, result, has_fields: false } }
}
core::fmt::builders::debug_map_newpub(super) fn debug_map_new<'a, 'b>(fmt: &'a mut fmt::Formatter<'b>) -> DebugMap<'a, 'b> {
let result = fmt.write_str("{");
DebugMap { fmt, result, has_fields: false, has_key: false, state: Default::default() }
}
core::fmt::builders::debug_set_newpub(super) fn debug_set_new<'a, 'b>(fmt: &'a mut fmt::Formatter<'b>) -> DebugSet<'a, 'b> {
let result = fmt.write_str("{");
DebugSet { inner: DebugInner { fmt, result, has_fields: false } }
}
core::fmt::builders::debug_struct_newpub(super) fn debug_struct_new<'a, 'b>(
fmt: &'a mut fmt::Formatter<'b>,
name: &str,
) -> DebugStruct<'a, 'b> {
let result = fmt.write_str(name);
DebugStruct { fmt, result, has_fields: false }
}
core::fmt::builders::debug_tuple_newpub(super) fn debug_tuple_new<'a, 'b>(
fmt: &'a mut fmt::Formatter<'b>,
name: &str,
) -> DebugTuple<'a, 'b> {
let result = fmt.write_str(name);
DebugTuple { fmt, result, fields: 0, empty_name: name.is_empty() }
}
core::fmt::builders::from_fnpub const fn from_fn<F: Fn(&mut fmt::Formatter<'_>) -> fmt::Result>(f: F) -> FromFn<F> {
FromFn(f)
}
core::fmt::float::<impl core::fmt::Debug for f16>::fmt fn fmt(&self, fmt: &mut Formatter<'_>) -> Result {
float_to_general_debug(fmt, self)
}
core::fmt::float::<impl core::fmt::Debug for f32>::fmt fn fmt(&self, fmt: &mut Formatter<'_>) -> Result {
float_to_general_debug(fmt, self)
}
core::fmt::float::<impl core::fmt::Debug for f64>::fmt fn fmt(&self, fmt: &mut Formatter<'_>) -> Result {
float_to_general_debug(fmt, self)
}
core::fmt::float::<impl core::fmt::Display for f16>::fmt fn fmt(&self, fmt: &mut Formatter<'_>) -> Result {
float_to_decimal_display(fmt, self)
}
core::fmt::float::<impl core::fmt::Display for f32>::fmt fn fmt(&self, fmt: &mut Formatter<'_>) -> Result {
float_to_decimal_display(fmt, self)
}
core::fmt::float::<impl core::fmt::Display for f64>::fmt fn fmt(&self, fmt: &mut Formatter<'_>) -> Result {
float_to_decimal_display(fmt, self)
}
core::fmt::float::<impl core::fmt::LowerExp for f16>::fmt fn fmt(&self, fmt: &mut Formatter<'_>) -> Result {
float_to_exponential_common(fmt, self, false)
}
core::fmt::float::<impl core::fmt::LowerExp for f32>::fmt fn fmt(&self, fmt: &mut Formatter<'_>) -> Result {
float_to_exponential_common(fmt, self, false)
}
core::fmt::float::<impl core::fmt::LowerExp for f64>::fmt fn fmt(&self, fmt: &mut Formatter<'_>) -> Result {
float_to_exponential_common(fmt, self, false)
}
core::fmt::float::<impl core::fmt::UpperExp for f16>::fmt fn fmt(&self, fmt: &mut Formatter<'_>) -> Result {
float_to_exponential_common(fmt, self, true)
}
core::fmt::float::<impl core::fmt::UpperExp for f32>::fmt fn fmt(&self, fmt: &mut Formatter<'_>) -> Result {
float_to_exponential_common(fmt, self, true)
}
core::fmt::float::<impl core::fmt::UpperExp for f64>::fmt fn fmt(&self, fmt: &mut Formatter<'_>) -> Result {
float_to_exponential_common(fmt, self, true)
}
core::fmt::float::float_to_decimal_common_exactfn float_to_decimal_common_exact<T>(
fmt: &mut Formatter<'_>,
num: &T,
sign: flt2dec::Sign,
precision: u16,
) -> Result
where
T: flt2dec::DecodableFloat,
{
let mut buf: [MaybeUninit<u8>; 1024] = [MaybeUninit::uninit(); 1024]; // enough for f32 and f64
let mut parts: [MaybeUninit<numfmt::Part<'_>>; 4] = [MaybeUninit::uninit(); 4];
let formatted = flt2dec::to_exact_fixed_str(
flt2dec::strategy::grisu::format_exact,
*num,
sign,
precision.into(),
&mut buf,
&mut parts,
);
// SAFETY: `to_exact_fixed_str` and `format_exact` produce only ASCII characters.
unsafe { fmt.pad_formatted_parts(&formatted) }
}
core::fmt::float::float_to_decimal_common_shortestfn float_to_decimal_common_shortest<T>(
fmt: &mut Formatter<'_>,
num: &T,
sign: flt2dec::Sign,
precision: u16,
) -> Result
where
T: flt2dec::DecodableFloat,
{
// enough for f32 and f64
let mut buf: [MaybeUninit<u8>; flt2dec::MAX_SIG_DIGITS] =
[MaybeUninit::uninit(); flt2dec::MAX_SIG_DIGITS];
let mut parts: [MaybeUninit<numfmt::Part<'_>>; 4] = [MaybeUninit::uninit(); 4];
let formatted = flt2dec::to_shortest_str(
flt2dec::strategy::grisu::format_shortest,
*num,
sign,
precision.into(),
&mut buf,
&mut parts,
);
// SAFETY: `to_shortest_str` and `format_shortest` produce only ASCII characters.
unsafe { fmt.pad_formatted_parts(&formatted) }
}
core::fmt::float::float_to_decimal_displayfn float_to_decimal_display<T>(fmt: &mut Formatter<'_>, num: &T) -> Result
where
T: flt2dec::DecodableFloat,
{
let force_sign = fmt.sign_plus();
let sign = match force_sign {
false => flt2dec::Sign::Minus,
true => flt2dec::Sign::MinusPlus,
};
if let Some(precision) = fmt.options.get_precision() {
float_to_decimal_common_exact(fmt, num, sign, precision)
} else {
let min_precision = 0;
float_to_decimal_common_shortest(fmt, num, sign, min_precision)
}
}
core::fmt::float::float_to_exponential_commonfn float_to_exponential_common<T>(fmt: &mut Formatter<'_>, num: &T, upper: bool) -> Result
where
T: flt2dec::DecodableFloat,
{
let force_sign = fmt.sign_plus();
let sign = match force_sign {
false => flt2dec::Sign::Minus,
true => flt2dec::Sign::MinusPlus,
};
if let Some(precision) = fmt.options.get_precision() {
// 1 integral digit + `precision` fractional digits = `precision + 1` total digits
float_to_exponential_common_exact(fmt, num, sign, precision + 1, upper)
} else {
float_to_exponential_common_shortest(fmt, num, sign, upper)
}
}
core::fmt::float::float_to_exponential_common_exactfn float_to_exponential_common_exact<T>(
fmt: &mut Formatter<'_>,
num: &T,
sign: flt2dec::Sign,
precision: u16,
upper: bool,
) -> Result
where
T: flt2dec::DecodableFloat,
{
let mut buf: [MaybeUninit<u8>; 1024] = [MaybeUninit::uninit(); 1024]; // enough for f32 and f64
let mut parts: [MaybeUninit<numfmt::Part<'_>>; 6] = [MaybeUninit::uninit(); 6];
let formatted = flt2dec::to_exact_exp_str(
flt2dec::strategy::grisu::format_exact,
*num,
sign,
precision.into(),
upper,
&mut buf,
&mut parts,
);
// SAFETY: `to_exact_exp_str` and `format_exact` produce only ASCII characters.
unsafe { fmt.pad_formatted_parts(&formatted) }
}
core::fmt::float::float_to_exponential_common_shortestfn float_to_exponential_common_shortest<T>(
fmt: &mut Formatter<'_>,
num: &T,
sign: flt2dec::Sign,
upper: bool,
) -> Result
where
T: flt2dec::DecodableFloat,
{
// enough for f32 and f64
let mut buf: [MaybeUninit<u8>; flt2dec::MAX_SIG_DIGITS] =
[MaybeUninit::uninit(); flt2dec::MAX_SIG_DIGITS];
let mut parts: [MaybeUninit<numfmt::Part<'_>>; 6] = [MaybeUninit::uninit(); 6];
let formatted = flt2dec::to_shortest_exp_str(
flt2dec::strategy::grisu::format_shortest,
*num,
sign,
(0, 0),
upper,
&mut buf,
&mut parts,
);
// SAFETY: `to_shortest_exp_str` and `format_shortest` produce only ASCII characters.
unsafe { fmt.pad_formatted_parts(&formatted) }
}
core::fmt::float::float_to_general_debugfn float_to_general_debug<T>(fmt: &mut Formatter<'_>, num: &T) -> Result
where
T: flt2dec::DecodableFloat + GeneralFormat,
{
let force_sign = fmt.sign_plus();
let sign = match force_sign {
false => flt2dec::Sign::Minus,
true => flt2dec::Sign::MinusPlus,
};
if let Some(precision) = fmt.options.get_precision() {
// this behavior of {:.PREC?} predates exponential formatting for {:?}
float_to_decimal_common_exact(fmt, num, sign, precision)
} else {
// since there is no precision, there will be no rounding
if num.already_rounded_value_should_use_exponential() {
let upper = false;
float_to_exponential_common_shortest(fmt, num, sign, upper)
} else {
let min_precision = 1;
float_to_decimal_common_shortest(fmt, num, sign, min_precision)
}
}
}
core::fmt::num::<impl core::fmt::Binary for i128>::fmt fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt::$Trait::fmt(&self.cast_unsigned(), f)
}
core::fmt::num::<impl core::fmt::Binary for i16>::fmt fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt::$Trait::fmt(&self.cast_unsigned(), f)
}
core::fmt::num::<impl core::fmt::Binary for i32>::fmt fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt::$Trait::fmt(&self.cast_unsigned(), f)
}
core::fmt::num::<impl core::fmt::Binary for i64>::fmt fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt::$Trait::fmt(&self.cast_unsigned(), f)
}
core::fmt::num::<impl core::fmt::Binary for i8>::fmt fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt::$Trait::fmt(&self.cast_unsigned(), f)
}
core::fmt::num::<impl core::fmt::Binary for isize>::fmt fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt::$Trait::fmt(&self.cast_unsigned(), f)
}
core::fmt::num::<impl core::fmt::Binary for u128>::fmt fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
// Check macro arguments at compile time.
const {
assert!($Unsigned::MIN == 0, "need unsigned");
assert!($dig_tab.is_ascii(), "need single-byte entries");
}
// ASCII digits in ascending order are used as a lookup table.
const DIG_TAB: &[u8] = $dig_tab;
const BASE: $Unsigned = DIG_TAB.len() as $Unsigned;
const MAX_DIG_N: usize = $Unsigned::MAX.ilog(BASE) as usize + 1;
// Buffer digits of self with right alignment.
let mut buf = [MaybeUninit::<u8>::uninit(); MAX_DIG_N];
// Count the number of bytes in buf that are not initialized.
let mut offset = buf.len();
// Accumulate each digit of the number from the least
// significant to the most significant figure.
let mut remain = *self;
loop {
let digit = remain % BASE;
remain /= BASE;
offset -= 1;
// SAFETY: `remain` will reach 0 and we will break before `offset` wraps
unsafe { core::hint::assert_unchecked(offset < buf.len()) }
buf[offset].write(DIG_TAB[digit as usize]);
if remain == 0 {
break;
}
}
// SAFETY: Starting from `offset`, all elements of the slice have been set.
let digits = unsafe { slice_buffer_to_str(&buf, offset) };
f.pad_integral(true, $prefix, digits)
}
core::fmt::num::<impl core::fmt::Binary for u16>::fmt fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
// Check macro arguments at compile time.
const {
assert!($Unsigned::MIN == 0, "need unsigned");
assert!($dig_tab.is_ascii(), "need single-byte entries");
}
// ASCII digits in ascending order are used as a lookup table.
const DIG_TAB: &[u8] = $dig_tab;
const BASE: $Unsigned = DIG_TAB.len() as $Unsigned;
const MAX_DIG_N: usize = $Unsigned::MAX.ilog(BASE) as usize + 1;
// Buffer digits of self with right alignment.
let mut buf = [MaybeUninit::<u8>::uninit(); MAX_DIG_N];
// Count the number of bytes in buf that are not initialized.
let mut offset = buf.len();
// Accumulate each digit of the number from the least
// significant to the most significant figure.
let mut remain = *self;
loop {
let digit = remain % BASE;
remain /= BASE;
offset -= 1;
// SAFETY: `remain` will reach 0 and we will break before `offset` wraps
unsafe { core::hint::assert_unchecked(offset < buf.len()) }
buf[offset].write(DIG_TAB[digit as usize]);
if remain == 0 {
break;
}
}
// SAFETY: Starting from `offset`, all elements of the slice have been set.
let digits = unsafe { slice_buffer_to_str(&buf, offset) };
f.pad_integral(true, $prefix, digits)
}
core::fmt::num::<impl core::fmt::Binary for u32>::fmt fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
// Check macro arguments at compile time.
const {
assert!($Unsigned::MIN == 0, "need unsigned");
assert!($dig_tab.is_ascii(), "need single-byte entries");
}
// ASCII digits in ascending order are used as a lookup table.
const DIG_TAB: &[u8] = $dig_tab;
const BASE: $Unsigned = DIG_TAB.len() as $Unsigned;
const MAX_DIG_N: usize = $Unsigned::MAX.ilog(BASE) as usize + 1;
// Buffer digits of self with right alignment.
let mut buf = [MaybeUninit::<u8>::uninit(); MAX_DIG_N];
// Count the number of bytes in buf that are not initialized.
let mut offset = buf.len();
// Accumulate each digit of the number from the least
// significant to the most significant figure.
let mut remain = *self;
loop {
let digit = remain % BASE;
remain /= BASE;
offset -= 1;
// SAFETY: `remain` will reach 0 and we will break before `offset` wraps
unsafe { core::hint::assert_unchecked(offset < buf.len()) }
buf[offset].write(DIG_TAB[digit as usize]);
if remain == 0 {
break;
}
}
// SAFETY: Starting from `offset`, all elements of the slice have been set.
let digits = unsafe { slice_buffer_to_str(&buf, offset) };
f.pad_integral(true, $prefix, digits)
}
core::fmt::num::<impl core::fmt::Binary for u64>::fmt fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
// Check macro arguments at compile time.
const {
assert!($Unsigned::MIN == 0, "need unsigned");
assert!($dig_tab.is_ascii(), "need single-byte entries");
}
// ASCII digits in ascending order are used as a lookup table.
const DIG_TAB: &[u8] = $dig_tab;
const BASE: $Unsigned = DIG_TAB.len() as $Unsigned;
const MAX_DIG_N: usize = $Unsigned::MAX.ilog(BASE) as usize + 1;
// Buffer digits of self with right alignment.
let mut buf = [MaybeUninit::<u8>::uninit(); MAX_DIG_N];
// Count the number of bytes in buf that are not initialized.
let mut offset = buf.len();
// Accumulate each digit of the number from the least
// significant to the most significant figure.
let mut remain = *self;
loop {
let digit = remain % BASE;
remain /= BASE;
offset -= 1;
// SAFETY: `remain` will reach 0 and we will break before `offset` wraps
unsafe { core::hint::assert_unchecked(offset < buf.len()) }
buf[offset].write(DIG_TAB[digit as usize]);
if remain == 0 {
break;
}
}
// SAFETY: Starting from `offset`, all elements of the slice have been set.
let digits = unsafe { slice_buffer_to_str(&buf, offset) };
f.pad_integral(true, $prefix, digits)
}
core::fmt::num::<impl core::fmt::Binary for u8>::fmt fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
// Check macro arguments at compile time.
const {
assert!($Unsigned::MIN == 0, "need unsigned");
assert!($dig_tab.is_ascii(), "need single-byte entries");
}
// ASCII digits in ascending order are used as a lookup table.
const DIG_TAB: &[u8] = $dig_tab;
const BASE: $Unsigned = DIG_TAB.len() as $Unsigned;
const MAX_DIG_N: usize = $Unsigned::MAX.ilog(BASE) as usize + 1;
// Buffer digits of self with right alignment.
let mut buf = [MaybeUninit::<u8>::uninit(); MAX_DIG_N];
// Count the number of bytes in buf that are not initialized.
let mut offset = buf.len();
// Accumulate each digit of the number from the least
// significant to the most significant figure.
let mut remain = *self;
loop {
let digit = remain % BASE;
remain /= BASE;
offset -= 1;
// SAFETY: `remain` will reach 0 and we will break before `offset` wraps
unsafe { core::hint::assert_unchecked(offset < buf.len()) }
buf[offset].write(DIG_TAB[digit as usize]);
if remain == 0 {
break;
}
}
// SAFETY: Starting from `offset`, all elements of the slice have been set.
let digits = unsafe { slice_buffer_to_str(&buf, offset) };
f.pad_integral(true, $prefix, digits)
}
core::fmt::num::<impl core::fmt::Binary for usize>::fmt fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
// Check macro arguments at compile time.
const {
assert!($Unsigned::MIN == 0, "need unsigned");
assert!($dig_tab.is_ascii(), "need single-byte entries");
}
// ASCII digits in ascending order are used as a lookup table.
const DIG_TAB: &[u8] = $dig_tab;
const BASE: $Unsigned = DIG_TAB.len() as $Unsigned;
const MAX_DIG_N: usize = $Unsigned::MAX.ilog(BASE) as usize + 1;
// Buffer digits of self with right alignment.
let mut buf = [MaybeUninit::<u8>::uninit(); MAX_DIG_N];
// Count the number of bytes in buf that are not initialized.
let mut offset = buf.len();
// Accumulate each digit of the number from the least
// significant to the most significant figure.
let mut remain = *self;
loop {
let digit = remain % BASE;
remain /= BASE;
offset -= 1;
// SAFETY: `remain` will reach 0 and we will break before `offset` wraps
unsafe { core::hint::assert_unchecked(offset < buf.len()) }
buf[offset].write(DIG_TAB[digit as usize]);
if remain == 0 {
break;
}
}
// SAFETY: Starting from `offset`, all elements of the slice have been set.
let digits = unsafe { slice_buffer_to_str(&buf, offset) };
f.pad_integral(true, $prefix, digits)
}
core::fmt::num::<impl core::fmt::Debug for i128>::fmt fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
if f.debug_lower_hex() {
fmt::LowerHex::fmt(self, f)
} else if f.debug_upper_hex() {
fmt::UpperHex::fmt(self, f)
} else {
fmt::Display::fmt(self, f)
}
}
core::fmt::num::<impl core::fmt::Debug for i16>::fmt fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
if f.debug_lower_hex() {
fmt::LowerHex::fmt(self, f)
} else if f.debug_upper_hex() {
fmt::UpperHex::fmt(self, f)
} else {
fmt::Display::fmt(self, f)
}
}
core::fmt::num::<impl core::fmt::Debug for i32>::fmt fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
if f.debug_lower_hex() {
fmt::LowerHex::fmt(self, f)
} else if f.debug_upper_hex() {
fmt::UpperHex::fmt(self, f)
} else {
fmt::Display::fmt(self, f)
}
}
core::fmt::num::<impl core::fmt::Debug for i64>::fmt fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
if f.debug_lower_hex() {
fmt::LowerHex::fmt(self, f)
} else if f.debug_upper_hex() {
fmt::UpperHex::fmt(self, f)
} else {
fmt::Display::fmt(self, f)
}
}
core::fmt::num::<impl core::fmt::Debug for i8>::fmt fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
if f.debug_lower_hex() {
fmt::LowerHex::fmt(self, f)
} else if f.debug_upper_hex() {
fmt::UpperHex::fmt(self, f)
} else {
fmt::Display::fmt(self, f)
}
}
core::fmt::num::<impl core::fmt::Debug for isize>::fmt fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
if f.debug_lower_hex() {
fmt::LowerHex::fmt(self, f)
} else if f.debug_upper_hex() {
fmt::UpperHex::fmt(self, f)
} else {
fmt::Display::fmt(self, f)
}
}
core::fmt::num::<impl core::fmt::Debug for u128>::fmt fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
if f.debug_lower_hex() {
fmt::LowerHex::fmt(self, f)
} else if f.debug_upper_hex() {
fmt::UpperHex::fmt(self, f)
} else {
fmt::Display::fmt(self, f)
}
}
core::fmt::num::<impl core::fmt::Debug for u16>::fmt fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
if f.debug_lower_hex() {
fmt::LowerHex::fmt(self, f)
} else if f.debug_upper_hex() {
fmt::UpperHex::fmt(self, f)
} else {
fmt::Display::fmt(self, f)
}
}
core::fmt::num::<impl core::fmt::Debug for u32>::fmt fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
if f.debug_lower_hex() {
fmt::LowerHex::fmt(self, f)
} else if f.debug_upper_hex() {
fmt::UpperHex::fmt(self, f)
} else {
fmt::Display::fmt(self, f)
}
}
core::fmt::num::<impl core::fmt::Debug for u64>::fmt fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
if f.debug_lower_hex() {
fmt::LowerHex::fmt(self, f)
} else if f.debug_upper_hex() {
fmt::UpperHex::fmt(self, f)
} else {
fmt::Display::fmt(self, f)
}
}
core::fmt::num::<impl core::fmt::Debug for u8>::fmt fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
if f.debug_lower_hex() {
fmt::LowerHex::fmt(self, f)
} else if f.debug_upper_hex() {
fmt::UpperHex::fmt(self, f)
} else {
fmt::Display::fmt(self, f)
}
}
core::fmt::num::<impl core::fmt::Debug for usize>::fmt fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
if f.debug_lower_hex() {
fmt::LowerHex::fmt(self, f)
} else if f.debug_upper_hex() {
fmt::UpperHex::fmt(self, f)
} else {
fmt::Display::fmt(self, f)
}
}
core::fmt::num::<impl core::fmt::Display for i128>::fmt fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
// This is not a typo, we use the maximum number of digits of `u128`, hence why we use
// `U128_MAX_DEC_N`.
let mut buf = [MaybeUninit::<u8>::uninit(); U128_MAX_DEC_N];
let is_nonnegative = *self >= 0;
// SAFETY: `buf` is always big enough to contain all the digits.
unsafe { f.pad_integral(is_nonnegative, "", self.unsigned_abs()._fmt(&mut buf)) }
}
core::fmt::num::<impl core::fmt::Display for u128>::fmt fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let mut buf = [MaybeUninit::<u8>::uninit(); U128_MAX_DEC_N];
// SAFETY: `buf` is always big enough to contain all the digits.
unsafe { f.pad_integral(true, "", self._fmt(&mut buf)) }
}
core::fmt::num::<impl core::fmt::LowerExp for i128>::fmt fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
$fmt_fn(f, self.unsigned_abs() as $T, *self >= 0, b'e')
}
core::fmt::num::<impl core::fmt::LowerExp for u128>::fmt fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
$fmt_fn(f, *self as $T, true, b'e')
}
core::fmt::num::<impl core::fmt::LowerHex for i128>::fmt fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt::$Trait::fmt(&self.cast_unsigned(), f)
}
core::fmt::num::<impl core::fmt::LowerHex for i16>::fmt fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt::$Trait::fmt(&self.cast_unsigned(), f)
}
core::fmt::num::<impl core::fmt::LowerHex for i32>::fmt fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt::$Trait::fmt(&self.cast_unsigned(), f)
}
core::fmt::num::<impl core::fmt::LowerHex for i64>::fmt fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt::$Trait::fmt(&self.cast_unsigned(), f)
}
core::fmt::num::<impl core::fmt::LowerHex for i8>::fmt fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt::$Trait::fmt(&self.cast_unsigned(), f)
}
core::fmt::num::<impl core::fmt::LowerHex for isize>::fmt fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt::$Trait::fmt(&self.cast_unsigned(), f)
}
core::fmt::num::<impl core::fmt::LowerHex for u128>::fmt fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
// Check macro arguments at compile time.
const {
assert!($Unsigned::MIN == 0, "need unsigned");
assert!($dig_tab.is_ascii(), "need single-byte entries");
}
// ASCII digits in ascending order are used as a lookup table.
const DIG_TAB: &[u8] = $dig_tab;
const BASE: $Unsigned = DIG_TAB.len() as $Unsigned;
const MAX_DIG_N: usize = $Unsigned::MAX.ilog(BASE) as usize + 1;
// Buffer digits of self with right alignment.
let mut buf = [MaybeUninit::<u8>::uninit(); MAX_DIG_N];
// Count the number of bytes in buf that are not initialized.
let mut offset = buf.len();
// Accumulate each digit of the number from the least
// significant to the most significant figure.
let mut remain = *self;
loop {
let digit = remain % BASE;
remain /= BASE;
offset -= 1;
// SAFETY: `remain` will reach 0 and we will break before `offset` wraps
unsafe { core::hint::assert_unchecked(offset < buf.len()) }
buf[offset].write(DIG_TAB[digit as usize]);
if remain == 0 {
break;
}
}
// SAFETY: Starting from `offset`, all elements of the slice have been set.
let digits = unsafe { slice_buffer_to_str(&buf, offset) };
f.pad_integral(true, $prefix, digits)
}
core::fmt::num::<impl core::fmt::LowerHex for u16>::fmt fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
// Check macro arguments at compile time.
const {
assert!($Unsigned::MIN == 0, "need unsigned");
assert!($dig_tab.is_ascii(), "need single-byte entries");
}
// ASCII digits in ascending order are used as a lookup table.
const DIG_TAB: &[u8] = $dig_tab;
const BASE: $Unsigned = DIG_TAB.len() as $Unsigned;
const MAX_DIG_N: usize = $Unsigned::MAX.ilog(BASE) as usize + 1;
// Buffer digits of self with right alignment.
let mut buf = [MaybeUninit::<u8>::uninit(); MAX_DIG_N];
// Count the number of bytes in buf that are not initialized.
let mut offset = buf.len();
// Accumulate each digit of the number from the least
// significant to the most significant figure.
let mut remain = *self;
loop {
let digit = remain % BASE;
remain /= BASE;
offset -= 1;
// SAFETY: `remain` will reach 0 and we will break before `offset` wraps
unsafe { core::hint::assert_unchecked(offset < buf.len()) }
buf[offset].write(DIG_TAB[digit as usize]);
if remain == 0 {
break;
}
}
// SAFETY: Starting from `offset`, all elements of the slice have been set.
let digits = unsafe { slice_buffer_to_str(&buf, offset) };
f.pad_integral(true, $prefix, digits)
}
core::fmt::num::<impl core::fmt::LowerHex for u32>::fmt fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
// Check macro arguments at compile time.
const {
assert!($Unsigned::MIN == 0, "need unsigned");
assert!($dig_tab.is_ascii(), "need single-byte entries");
}
// ASCII digits in ascending order are used as a lookup table.
const DIG_TAB: &[u8] = $dig_tab;
const BASE: $Unsigned = DIG_TAB.len() as $Unsigned;
const MAX_DIG_N: usize = $Unsigned::MAX.ilog(BASE) as usize + 1;
// Buffer digits of self with right alignment.
let mut buf = [MaybeUninit::<u8>::uninit(); MAX_DIG_N];
// Count the number of bytes in buf that are not initialized.
let mut offset = buf.len();
// Accumulate each digit of the number from the least
// significant to the most significant figure.
let mut remain = *self;
loop {
let digit = remain % BASE;
remain /= BASE;
offset -= 1;
// SAFETY: `remain` will reach 0 and we will break before `offset` wraps
unsafe { core::hint::assert_unchecked(offset < buf.len()) }
buf[offset].write(DIG_TAB[digit as usize]);
if remain == 0 {
break;
}
}
// SAFETY: Starting from `offset`, all elements of the slice have been set.
let digits = unsafe { slice_buffer_to_str(&buf, offset) };
f.pad_integral(true, $prefix, digits)
}
core::fmt::num::<impl core::fmt::LowerHex for u64>::fmt fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
// Check macro arguments at compile time.
const {
assert!($Unsigned::MIN == 0, "need unsigned");
assert!($dig_tab.is_ascii(), "need single-byte entries");
}
// ASCII digits in ascending order are used as a lookup table.
const DIG_TAB: &[u8] = $dig_tab;
const BASE: $Unsigned = DIG_TAB.len() as $Unsigned;
const MAX_DIG_N: usize = $Unsigned::MAX.ilog(BASE) as usize + 1;
// Buffer digits of self with right alignment.
let mut buf = [MaybeUninit::<u8>::uninit(); MAX_DIG_N];
// Count the number of bytes in buf that are not initialized.
let mut offset = buf.len();
// Accumulate each digit of the number from the least
// significant to the most significant figure.
let mut remain = *self;
loop {
let digit = remain % BASE;
remain /= BASE;
offset -= 1;
// SAFETY: `remain` will reach 0 and we will break before `offset` wraps
unsafe { core::hint::assert_unchecked(offset < buf.len()) }
buf[offset].write(DIG_TAB[digit as usize]);
if remain == 0 {
break;
}
}
// SAFETY: Starting from `offset`, all elements of the slice have been set.
let digits = unsafe { slice_buffer_to_str(&buf, offset) };
f.pad_integral(true, $prefix, digits)
}
core::fmt::num::<impl core::fmt::LowerHex for u8>::fmt fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
// Check macro arguments at compile time.
const {
assert!($Unsigned::MIN == 0, "need unsigned");
assert!($dig_tab.is_ascii(), "need single-byte entries");
}
// ASCII digits in ascending order are used as a lookup table.
const DIG_TAB: &[u8] = $dig_tab;
const BASE: $Unsigned = DIG_TAB.len() as $Unsigned;
const MAX_DIG_N: usize = $Unsigned::MAX.ilog(BASE) as usize + 1;
// Buffer digits of self with right alignment.
let mut buf = [MaybeUninit::<u8>::uninit(); MAX_DIG_N];
// Count the number of bytes in buf that are not initialized.
let mut offset = buf.len();
// Accumulate each digit of the number from the least
// significant to the most significant figure.
let mut remain = *self;
loop {
let digit = remain % BASE;
remain /= BASE;
offset -= 1;
// SAFETY: `remain` will reach 0 and we will break before `offset` wraps
unsafe { core::hint::assert_unchecked(offset < buf.len()) }
buf[offset].write(DIG_TAB[digit as usize]);
if remain == 0 {
break;
}
}
// SAFETY: Starting from `offset`, all elements of the slice have been set.
let digits = unsafe { slice_buffer_to_str(&buf, offset) };
f.pad_integral(true, $prefix, digits)
}
core::fmt::num::<impl core::fmt::LowerHex for usize>::fmt fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
// Check macro arguments at compile time.
const {
assert!($Unsigned::MIN == 0, "need unsigned");
assert!($dig_tab.is_ascii(), "need single-byte entries");
}
// ASCII digits in ascending order are used as a lookup table.
const DIG_TAB: &[u8] = $dig_tab;
const BASE: $Unsigned = DIG_TAB.len() as $Unsigned;
const MAX_DIG_N: usize = $Unsigned::MAX.ilog(BASE) as usize + 1;
// Buffer digits of self with right alignment.
let mut buf = [MaybeUninit::<u8>::uninit(); MAX_DIG_N];
// Count the number of bytes in buf that are not initialized.
let mut offset = buf.len();
// Accumulate each digit of the number from the least
// significant to the most significant figure.
let mut remain = *self;
loop {
let digit = remain % BASE;
remain /= BASE;
offset -= 1;
// SAFETY: `remain` will reach 0 and we will break before `offset` wraps
unsafe { core::hint::assert_unchecked(offset < buf.len()) }
buf[offset].write(DIG_TAB[digit as usize]);
if remain == 0 {
break;
}
}
// SAFETY: Starting from `offset`, all elements of the slice have been set.
let digits = unsafe { slice_buffer_to_str(&buf, offset) };
f.pad_integral(true, $prefix, digits)
}
core::fmt::num::<impl core::fmt::Octal for i128>::fmt fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt::$Trait::fmt(&self.cast_unsigned(), f)
}
core::fmt::num::<impl core::fmt::Octal for i16>::fmt fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt::$Trait::fmt(&self.cast_unsigned(), f)
}
core::fmt::num::<impl core::fmt::Octal for i32>::fmt fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt::$Trait::fmt(&self.cast_unsigned(), f)
}
core::fmt::num::<impl core::fmt::Octal for i64>::fmt fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt::$Trait::fmt(&self.cast_unsigned(), f)
}
core::fmt::num::<impl core::fmt::Octal for i8>::fmt fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt::$Trait::fmt(&self.cast_unsigned(), f)
}
core::fmt::num::<impl core::fmt::Octal for isize>::fmt fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt::$Trait::fmt(&self.cast_unsigned(), f)
}
core::fmt::num::<impl core::fmt::Octal for u128>::fmt fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
// Check macro arguments at compile time.
const {
assert!($Unsigned::MIN == 0, "need unsigned");
assert!($dig_tab.is_ascii(), "need single-byte entries");
}
// ASCII digits in ascending order are used as a lookup table.
const DIG_TAB: &[u8] = $dig_tab;
const BASE: $Unsigned = DIG_TAB.len() as $Unsigned;
const MAX_DIG_N: usize = $Unsigned::MAX.ilog(BASE) as usize + 1;
// Buffer digits of self with right alignment.
let mut buf = [MaybeUninit::<u8>::uninit(); MAX_DIG_N];
// Count the number of bytes in buf that are not initialized.
let mut offset = buf.len();
// Accumulate each digit of the number from the least
// significant to the most significant figure.
let mut remain = *self;
loop {
let digit = remain % BASE;
remain /= BASE;
offset -= 1;
// SAFETY: `remain` will reach 0 and we will break before `offset` wraps
unsafe { core::hint::assert_unchecked(offset < buf.len()) }
buf[offset].write(DIG_TAB[digit as usize]);
if remain == 0 {
break;
}
}
// SAFETY: Starting from `offset`, all elements of the slice have been set.
let digits = unsafe { slice_buffer_to_str(&buf, offset) };
f.pad_integral(true, $prefix, digits)
}
core::fmt::num::<impl core::fmt::Octal for u16>::fmt fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
// Check macro arguments at compile time.
const {
assert!($Unsigned::MIN == 0, "need unsigned");
assert!($dig_tab.is_ascii(), "need single-byte entries");
}
// ASCII digits in ascending order are used as a lookup table.
const DIG_TAB: &[u8] = $dig_tab;
const BASE: $Unsigned = DIG_TAB.len() as $Unsigned;
const MAX_DIG_N: usize = $Unsigned::MAX.ilog(BASE) as usize + 1;
// Buffer digits of self with right alignment.
let mut buf = [MaybeUninit::<u8>::uninit(); MAX_DIG_N];
// Count the number of bytes in buf that are not initialized.
let mut offset = buf.len();
// Accumulate each digit of the number from the least
// significant to the most significant figure.
let mut remain = *self;
loop {
let digit = remain % BASE;
remain /= BASE;
offset -= 1;
// SAFETY: `remain` will reach 0 and we will break before `offset` wraps
unsafe { core::hint::assert_unchecked(offset < buf.len()) }
buf[offset].write(DIG_TAB[digit as usize]);
if remain == 0 {
break;
}
}
// SAFETY: Starting from `offset`, all elements of the slice have been set.
let digits = unsafe { slice_buffer_to_str(&buf, offset) };
f.pad_integral(true, $prefix, digits)
}
core::fmt::num::<impl core::fmt::Octal for u32>::fmt fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
// Check macro arguments at compile time.
const {
assert!($Unsigned::MIN == 0, "need unsigned");
assert!($dig_tab.is_ascii(), "need single-byte entries");
}
// ASCII digits in ascending order are used as a lookup table.
const DIG_TAB: &[u8] = $dig_tab;
const BASE: $Unsigned = DIG_TAB.len() as $Unsigned;
const MAX_DIG_N: usize = $Unsigned::MAX.ilog(BASE) as usize + 1;
// Buffer digits of self with right alignment.
let mut buf = [MaybeUninit::<u8>::uninit(); MAX_DIG_N];
// Count the number of bytes in buf that are not initialized.
let mut offset = buf.len();
// Accumulate each digit of the number from the least
// significant to the most significant figure.
let mut remain = *self;
loop {
let digit = remain % BASE;
remain /= BASE;
offset -= 1;
// SAFETY: `remain` will reach 0 and we will break before `offset` wraps
unsafe { core::hint::assert_unchecked(offset < buf.len()) }
buf[offset].write(DIG_TAB[digit as usize]);
if remain == 0 {
break;
}
}
// SAFETY: Starting from `offset`, all elements of the slice have been set.
let digits = unsafe { slice_buffer_to_str(&buf, offset) };
f.pad_integral(true, $prefix, digits)
}
core::fmt::num::<impl core::fmt::Octal for u64>::fmt fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
// Check macro arguments at compile time.
const {
assert!($Unsigned::MIN == 0, "need unsigned");
assert!($dig_tab.is_ascii(), "need single-byte entries");
}
// ASCII digits in ascending order are used as a lookup table.
const DIG_TAB: &[u8] = $dig_tab;
const BASE: $Unsigned = DIG_TAB.len() as $Unsigned;
const MAX_DIG_N: usize = $Unsigned::MAX.ilog(BASE) as usize + 1;
// Buffer digits of self with right alignment.
let mut buf = [MaybeUninit::<u8>::uninit(); MAX_DIG_N];
// Count the number of bytes in buf that are not initialized.
let mut offset = buf.len();
// Accumulate each digit of the number from the least
// significant to the most significant figure.
let mut remain = *self;
loop {
let digit = remain % BASE;
remain /= BASE;
offset -= 1;
// SAFETY: `remain` will reach 0 and we will break before `offset` wraps
unsafe { core::hint::assert_unchecked(offset < buf.len()) }
buf[offset].write(DIG_TAB[digit as usize]);
if remain == 0 {
break;
}
}
// SAFETY: Starting from `offset`, all elements of the slice have been set.
let digits = unsafe { slice_buffer_to_str(&buf, offset) };
f.pad_integral(true, $prefix, digits)
}
core::fmt::num::<impl core::fmt::Octal for u8>::fmt fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
// Check macro arguments at compile time.
const {
assert!($Unsigned::MIN == 0, "need unsigned");
assert!($dig_tab.is_ascii(), "need single-byte entries");
}
// ASCII digits in ascending order are used as a lookup table.
const DIG_TAB: &[u8] = $dig_tab;
const BASE: $Unsigned = DIG_TAB.len() as $Unsigned;
const MAX_DIG_N: usize = $Unsigned::MAX.ilog(BASE) as usize + 1;
// Buffer digits of self with right alignment.
let mut buf = [MaybeUninit::<u8>::uninit(); MAX_DIG_N];
// Count the number of bytes in buf that are not initialized.
let mut offset = buf.len();
// Accumulate each digit of the number from the least
// significant to the most significant figure.
let mut remain = *self;
loop {
let digit = remain % BASE;
remain /= BASE;
offset -= 1;
// SAFETY: `remain` will reach 0 and we will break before `offset` wraps
unsafe { core::hint::assert_unchecked(offset < buf.len()) }
buf[offset].write(DIG_TAB[digit as usize]);
if remain == 0 {
break;
}
}
// SAFETY: Starting from `offset`, all elements of the slice have been set.
let digits = unsafe { slice_buffer_to_str(&buf, offset) };
f.pad_integral(true, $prefix, digits)
}
core::fmt::num::<impl core::fmt::Octal for usize>::fmt fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
// Check macro arguments at compile time.
const {
assert!($Unsigned::MIN == 0, "need unsigned");
assert!($dig_tab.is_ascii(), "need single-byte entries");
}
// ASCII digits in ascending order are used as a lookup table.
const DIG_TAB: &[u8] = $dig_tab;
const BASE: $Unsigned = DIG_TAB.len() as $Unsigned;
const MAX_DIG_N: usize = $Unsigned::MAX.ilog(BASE) as usize + 1;
// Buffer digits of self with right alignment.
let mut buf = [MaybeUninit::<u8>::uninit(); MAX_DIG_N];
// Count the number of bytes in buf that are not initialized.
let mut offset = buf.len();
// Accumulate each digit of the number from the least
// significant to the most significant figure.
let mut remain = *self;
loop {
let digit = remain % BASE;
remain /= BASE;
offset -= 1;
// SAFETY: `remain` will reach 0 and we will break before `offset` wraps
unsafe { core::hint::assert_unchecked(offset < buf.len()) }
buf[offset].write(DIG_TAB[digit as usize]);
if remain == 0 {
break;
}
}
// SAFETY: Starting from `offset`, all elements of the slice have been set.
let digits = unsafe { slice_buffer_to_str(&buf, offset) };
f.pad_integral(true, $prefix, digits)
}
core::fmt::num::<impl core::fmt::UpperExp for i128>::fmt fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
$fmt_fn(f, self.unsigned_abs() as $T, *self >= 0, b'E')
}
core::fmt::num::<impl core::fmt::UpperExp for u128>::fmt fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
$fmt_fn(f, *self as $T, true, b'E')
}
core::fmt::num::<impl core::fmt::UpperHex for i128>::fmt fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt::$Trait::fmt(&self.cast_unsigned(), f)
}
core::fmt::num::<impl core::fmt::UpperHex for i16>::fmt fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt::$Trait::fmt(&self.cast_unsigned(), f)
}
core::fmt::num::<impl core::fmt::UpperHex for i32>::fmt fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt::$Trait::fmt(&self.cast_unsigned(), f)
}
core::fmt::num::<impl core::fmt::UpperHex for i64>::fmt fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt::$Trait::fmt(&self.cast_unsigned(), f)
}
core::fmt::num::<impl core::fmt::UpperHex for i8>::fmt fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt::$Trait::fmt(&self.cast_unsigned(), f)
}
core::fmt::num::<impl core::fmt::UpperHex for isize>::fmt fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt::$Trait::fmt(&self.cast_unsigned(), f)
}
core::fmt::num::<impl core::fmt::UpperHex for u128>::fmt fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
// Check macro arguments at compile time.
const {
assert!($Unsigned::MIN == 0, "need unsigned");
assert!($dig_tab.is_ascii(), "need single-byte entries");
}
// ASCII digits in ascending order are used as a lookup table.
const DIG_TAB: &[u8] = $dig_tab;
const BASE: $Unsigned = DIG_TAB.len() as $Unsigned;
const MAX_DIG_N: usize = $Unsigned::MAX.ilog(BASE) as usize + 1;
// Buffer digits of self with right alignment.
let mut buf = [MaybeUninit::<u8>::uninit(); MAX_DIG_N];
// Count the number of bytes in buf that are not initialized.
let mut offset = buf.len();
// Accumulate each digit of the number from the least
// significant to the most significant figure.
let mut remain = *self;
loop {
let digit = remain % BASE;
remain /= BASE;
offset -= 1;
// SAFETY: `remain` will reach 0 and we will break before `offset` wraps
unsafe { core::hint::assert_unchecked(offset < buf.len()) }
buf[offset].write(DIG_TAB[digit as usize]);
if remain == 0 {
break;
}
}
// SAFETY: Starting from `offset`, all elements of the slice have been set.
let digits = unsafe { slice_buffer_to_str(&buf, offset) };
f.pad_integral(true, $prefix, digits)
}
core::fmt::num::<impl core::fmt::UpperHex for u16>::fmt fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
// Check macro arguments at compile time.
const {
assert!($Unsigned::MIN == 0, "need unsigned");
assert!($dig_tab.is_ascii(), "need single-byte entries");
}
// ASCII digits in ascending order are used as a lookup table.
const DIG_TAB: &[u8] = $dig_tab;
const BASE: $Unsigned = DIG_TAB.len() as $Unsigned;
const MAX_DIG_N: usize = $Unsigned::MAX.ilog(BASE) as usize + 1;
// Buffer digits of self with right alignment.
let mut buf = [MaybeUninit::<u8>::uninit(); MAX_DIG_N];
// Count the number of bytes in buf that are not initialized.
let mut offset = buf.len();
// Accumulate each digit of the number from the least
// significant to the most significant figure.
let mut remain = *self;
loop {
let digit = remain % BASE;
remain /= BASE;
offset -= 1;
// SAFETY: `remain` will reach 0 and we will break before `offset` wraps
unsafe { core::hint::assert_unchecked(offset < buf.len()) }
buf[offset].write(DIG_TAB[digit as usize]);
if remain == 0 {
break;
}
}
// SAFETY: Starting from `offset`, all elements of the slice have been set.
let digits = unsafe { slice_buffer_to_str(&buf, offset) };
f.pad_integral(true, $prefix, digits)
}
core::fmt::num::<impl core::fmt::UpperHex for u32>::fmt fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
// Check macro arguments at compile time.
const {
assert!($Unsigned::MIN == 0, "need unsigned");
assert!($dig_tab.is_ascii(), "need single-byte entries");
}
// ASCII digits in ascending order are used as a lookup table.
const DIG_TAB: &[u8] = $dig_tab;
const BASE: $Unsigned = DIG_TAB.len() as $Unsigned;
const MAX_DIG_N: usize = $Unsigned::MAX.ilog(BASE) as usize + 1;
// Buffer digits of self with right alignment.
let mut buf = [MaybeUninit::<u8>::uninit(); MAX_DIG_N];
// Count the number of bytes in buf that are not initialized.
let mut offset = buf.len();
// Accumulate each digit of the number from the least
// significant to the most significant figure.
let mut remain = *self;
loop {
let digit = remain % BASE;
remain /= BASE;
offset -= 1;
// SAFETY: `remain` will reach 0 and we will break before `offset` wraps
unsafe { core::hint::assert_unchecked(offset < buf.len()) }
buf[offset].write(DIG_TAB[digit as usize]);
if remain == 0 {
break;
}
}
// SAFETY: Starting from `offset`, all elements of the slice have been set.
let digits = unsafe { slice_buffer_to_str(&buf, offset) };
f.pad_integral(true, $prefix, digits)
}
core::fmt::num::<impl core::fmt::UpperHex for u64>::fmt fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
// Check macro arguments at compile time.
const {
assert!($Unsigned::MIN == 0, "need unsigned");
assert!($dig_tab.is_ascii(), "need single-byte entries");
}
// ASCII digits in ascending order are used as a lookup table.
const DIG_TAB: &[u8] = $dig_tab;
const BASE: $Unsigned = DIG_TAB.len() as $Unsigned;
const MAX_DIG_N: usize = $Unsigned::MAX.ilog(BASE) as usize + 1;
// Buffer digits of self with right alignment.
let mut buf = [MaybeUninit::<u8>::uninit(); MAX_DIG_N];
// Count the number of bytes in buf that are not initialized.
let mut offset = buf.len();
// Accumulate each digit of the number from the least
// significant to the most significant figure.
let mut remain = *self;
loop {
let digit = remain % BASE;
remain /= BASE;
offset -= 1;
// SAFETY: `remain` will reach 0 and we will break before `offset` wraps
unsafe { core::hint::assert_unchecked(offset < buf.len()) }
buf[offset].write(DIG_TAB[digit as usize]);
if remain == 0 {
break;
}
}
// SAFETY: Starting from `offset`, all elements of the slice have been set.
let digits = unsafe { slice_buffer_to_str(&buf, offset) };
f.pad_integral(true, $prefix, digits)
}
core::fmt::num::<impl core::fmt::UpperHex for u8>::fmt fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
// Check macro arguments at compile time.
const {
assert!($Unsigned::MIN == 0, "need unsigned");
assert!($dig_tab.is_ascii(), "need single-byte entries");
}
// ASCII digits in ascending order are used as a lookup table.
const DIG_TAB: &[u8] = $dig_tab;
const BASE: $Unsigned = DIG_TAB.len() as $Unsigned;
const MAX_DIG_N: usize = $Unsigned::MAX.ilog(BASE) as usize + 1;
// Buffer digits of self with right alignment.
let mut buf = [MaybeUninit::<u8>::uninit(); MAX_DIG_N];
// Count the number of bytes in buf that are not initialized.
let mut offset = buf.len();
// Accumulate each digit of the number from the least
// significant to the most significant figure.
let mut remain = *self;
loop {
let digit = remain % BASE;
remain /= BASE;
offset -= 1;
// SAFETY: `remain` will reach 0 and we will break before `offset` wraps
unsafe { core::hint::assert_unchecked(offset < buf.len()) }
buf[offset].write(DIG_TAB[digit as usize]);
if remain == 0 {
break;
}
}
// SAFETY: Starting from `offset`, all elements of the slice have been set.
let digits = unsafe { slice_buffer_to_str(&buf, offset) };
f.pad_integral(true, $prefix, digits)
}
core::fmt::num::<impl core::fmt::UpperHex for usize>::fmt fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
// Check macro arguments at compile time.
const {
assert!($Unsigned::MIN == 0, "need unsigned");
assert!($dig_tab.is_ascii(), "need single-byte entries");
}
// ASCII digits in ascending order are used as a lookup table.
const DIG_TAB: &[u8] = $dig_tab;
const BASE: $Unsigned = DIG_TAB.len() as $Unsigned;
const MAX_DIG_N: usize = $Unsigned::MAX.ilog(BASE) as usize + 1;
// Buffer digits of self with right alignment.
let mut buf = [MaybeUninit::<u8>::uninit(); MAX_DIG_N];
// Count the number of bytes in buf that are not initialized.
let mut offset = buf.len();
// Accumulate each digit of the number from the least
// significant to the most significant figure.
let mut remain = *self;
loop {
let digit = remain % BASE;
remain /= BASE;
offset -= 1;
// SAFETY: `remain` will reach 0 and we will break before `offset` wraps
unsafe { core::hint::assert_unchecked(offset < buf.len()) }
buf[offset].write(DIG_TAB[digit as usize]);
if remain == 0 {
break;
}
}
// SAFETY: Starting from `offset`, all elements of the slice have been set.
let digits = unsafe { slice_buffer_to_str(&buf, offset) };
f.pad_integral(true, $prefix, digits)
}
core::fmt::num::<impl i128>::format_into pub fn format_into(self, buf: &mut NumBuffer<Self>) -> &str {
let diff = buf.capacity() - U128_MAX_DEC_N;
// FIXME: Once const generics are better, use `NumberBufferTrait::BUF_SIZE` as generic const
// for `fmt_u128_inner`.
//
// In the meantime, we have to use a slice starting at index 1 and add 1 to the returned
// offset to ensure the number is correctly generated at the end of the buffer.
let mut offset =
// SAFETY: `buf` will always be big enough to contain all digits.
unsafe { self.unsigned_abs()._fmt_inner(buf.buf.get_unchecked_mut(diff..)) };
// We put back the offset at the right position.
offset += diff;
// Only difference between signed and unsigned are these 4 lines.
if self < 0 {
offset -= 1;
// SAFETY: `buf` will always be big enough to contain all digits plus the minus sign.
unsafe {
buf.buf.get_unchecked_mut(offset).write(b'-');
}
}
// SAFETY: Starting from `offset`, all elements of the slice have been set.
unsafe { slice_buffer_to_str(&buf.buf, offset) }
}
core::fmt::num::<impl u128>::_fmt pub unsafe fn _fmt<'a>(self, buf: &'a mut [MaybeUninit<u8>]) -> &'a str {
// SAFETY: `buf` will always be big enough to contain all digits.
let offset = unsafe { self._fmt_inner(buf) };
// SAFETY: Starting from `offset`, all elements of the slice have been set.
unsafe { slice_buffer_to_str(buf, offset) }
}
core::fmt::num::<impl u128>::_fmt_inner unsafe fn _fmt_inner(self, buf: &mut [MaybeUninit<u8>]) -> usize {
// Optimize common-case zero, which would also need special treatment due to
// its "leading" zero.
if self == 0 {
let offset = buf.len() - 1;
buf[offset].write(b'0');
return offset;
}
// Take the 16 least-significant decimals.
let (quot_1e16, mod_1e16) = div_rem_1e16(self);
let (mut remain, mut offset) = if quot_1e16 == 0 {
(mod_1e16, U128_MAX_DEC_N)
} else {
// Write digits at buf[23..39].
enc_16lsd::<{ U128_MAX_DEC_N - 16 }>(buf, mod_1e16);
// Take another 16 decimals.
let (quot2, mod2) = div_rem_1e16(quot_1e16);
if quot2 == 0 {
(mod2, U128_MAX_DEC_N - 16)
} else {
// Write digits at buf[7..23].
enc_16lsd::<{ U128_MAX_DEC_N - 32 }>(buf, mod2);
// Quot2 has at most 7 decimals remaining after two 1e16 divisions.
(quot2 as u64, U128_MAX_DEC_N - 32)
}
};
// Format per four digits from the lookup table.
while remain > 999 {
// SAFETY: All of the decimals fit in buf due to U128_MAX_DEC_N
// and the while condition ensures at least 4 more decimals.
unsafe { core::hint::assert_unchecked(offset >= 4) }
// SAFETY: The offset counts down from its initial buf.len()
// without underflow due to the previous precondition.
unsafe { core::hint::assert_unchecked(offset <= buf.len()) }
offset -= 4;
// pull two pairs
let quad = remain % 1_00_00;
remain /= 1_00_00;
let pair1 = (quad / 100) as usize;
let pair2 = (quad % 100) as usize;
buf[offset + 0].write(DECIMAL_PAIRS[pair1 * 2 + 0]);
buf[offset + 1].write(DECIMAL_PAIRS[pair1 * 2 + 1]);
buf[offset + 2].write(DECIMAL_PAIRS[pair2 * 2 + 0]);
buf[offset + 3].write(DECIMAL_PAIRS[pair2 * 2 + 1]);
}
// Format per two digits from the lookup table.
if remain > 9 {
// SAFETY: All of the decimals fit in buf due to U128_MAX_DEC_N
// and the if condition ensures at least 2 more decimals.
unsafe { core::hint::assert_unchecked(offset >= 2) }
// SAFETY: The offset counts down from its initial buf.len()
// without underflow due to the previous precondition.
unsafe { core::hint::assert_unchecked(offset <= buf.len()) }
offset -= 2;
let pair = (remain % 100) as usize;
remain /= 100;
buf[offset + 0].write(DECIMAL_PAIRS[pair * 2 + 0]);
buf[offset + 1].write(DECIMAL_PAIRS[pair * 2 + 1]);
}
// Format the last remaining digit, if any.
if remain != 0 {
// SAFETY: All of the decimals fit in buf due to U128_MAX_DEC_N
// and the if condition ensures (at least) 1 more decimals.
unsafe { core::hint::assert_unchecked(offset >= 1) }
// SAFETY: The offset counts down from its initial buf.len()
// without underflow due to the previous precondition.
unsafe { core::hint::assert_unchecked(offset <= buf.len()) }
offset -= 1;
// Either the compiler sees that remain < 10, or it prevents
// a boundary check up next.
let last = (remain & 15) as usize;
buf[offset].write(DECIMAL_PAIRS[last * 2 + 1]);
// not used: remain = 0;
}
offset
}
core::fmt::num::<impl u128>::format_into pub fn format_into(self, buf: &mut NumBuffer<Self>) -> &str {
let diff = buf.capacity() - U128_MAX_DEC_N;
// FIXME: Once const generics are better, use `NumberBufferTrait::BUF_SIZE` as generic const
// for `fmt_u128_inner`.
//
// In the meantime, we have to use a slice starting at index 1 and add 1 to the returned
// offset to ensure the number is correctly generated at the end of the buffer.
// SAFETY: `diff` will always be between 0 and its initial value.
unsafe { self._fmt(buf.buf.get_unchecked_mut(diff..)) }
}
core::fmt::num::div_rem_1e16fn div_rem_1e16(n: u128) -> (u128, u64) {
const D: u128 = 1_0000_0000_0000_0000;
// The check inlines well with the caller flow.
if n < D {
return (0, n as u64);
}
// These constant values are computed with the CHOOSE_MULTIPLIER procedure
// from the Granlund & Montgomery paper, using N=128, prec=128 and d=1E16.
const M_HIGH: u128 = 76624777043294442917917351357515459181;
const SH_POST: u8 = 51;
let quot = n.widening_mul(M_HIGH).1 >> SH_POST;
let rem = n - quot * D;
(quot, rem as u64)
}
core::fmt::num::enc_16lsdfn enc_16lsd<const OFFSET: usize>(buf: &mut [MaybeUninit<u8>], n: u64) {
// Consume the least-significant decimals from a working copy.
let mut remain = n;
// Format per four digits from the lookup table.
for quad_index in (0..4).rev() {
// pull two pairs
let quad = remain % 1_00_00;
remain /= 1_00_00;
let pair1 = (quad / 100) as usize;
let pair2 = (quad % 100) as usize;
buf[quad_index * 4 + OFFSET + 0].write(DECIMAL_PAIRS[pair1 * 2 + 0]);
buf[quad_index * 4 + OFFSET + 1].write(DECIMAL_PAIRS[pair1 * 2 + 1]);
buf[quad_index * 4 + OFFSET + 2].write(DECIMAL_PAIRS[pair2 * 2 + 0]);
buf[quad_index * 4 + OFFSET + 3].write(DECIMAL_PAIRS[pair2 * 2 + 1]);
}
}
core::fmt::num::imp::<impl core::fmt::Display for i16>::fmt fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
#[cfg(not(feature = "optimize_for_size"))]
{
const MAX_DEC_N: usize = $Unsigned::MAX.ilog10() as usize + 1;
// Buffer decimals for self with right alignment.
let mut buf = [MaybeUninit::<u8>::uninit(); MAX_DEC_N];
// SAFETY: `buf` is always big enough to contain all the digits.
unsafe { f.pad_integral(*self >= 0, "", self.unsigned_abs()._fmt(&mut buf)) }
}
#[cfg(feature = "optimize_for_size")]
{
// Lossless conversion (with as) is asserted at the top of
// this macro.
return ${concat($fmt_fn, _small)}(self.unsigned_abs() as $T, *self >= 0, f);
}
}
core::fmt::num::imp::<impl core::fmt::Display for i32>::fmt fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
#[cfg(not(feature = "optimize_for_size"))]
{
const MAX_DEC_N: usize = $Unsigned::MAX.ilog10() as usize + 1;
// Buffer decimals for self with right alignment.
let mut buf = [MaybeUninit::<u8>::uninit(); MAX_DEC_N];
// SAFETY: `buf` is always big enough to contain all the digits.
unsafe { f.pad_integral(*self >= 0, "", self.unsigned_abs()._fmt(&mut buf)) }
}
#[cfg(feature = "optimize_for_size")]
{
// Lossless conversion (with as) is asserted at the top of
// this macro.
return ${concat($fmt_fn, _small)}(self.unsigned_abs() as $T, *self >= 0, f);
}
}
core::fmt::num::imp::<impl core::fmt::Display for i64>::fmt fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
#[cfg(not(feature = "optimize_for_size"))]
{
const MAX_DEC_N: usize = $Unsigned::MAX.ilog10() as usize + 1;
// Buffer decimals for self with right alignment.
let mut buf = [MaybeUninit::<u8>::uninit(); MAX_DEC_N];
// SAFETY: `buf` is always big enough to contain all the digits.
unsafe { f.pad_integral(*self >= 0, "", self.unsigned_abs()._fmt(&mut buf)) }
}
#[cfg(feature = "optimize_for_size")]
{
// Lossless conversion (with as) is asserted at the top of
// this macro.
return ${concat($fmt_fn, _small)}(self.unsigned_abs() as $T, *self >= 0, f);
}
}
core::fmt::num::imp::<impl core::fmt::Display for i8>::fmt fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
#[cfg(not(feature = "optimize_for_size"))]
{
const MAX_DEC_N: usize = $Unsigned::MAX.ilog10() as usize + 1;
// Buffer decimals for self with right alignment.
let mut buf = [MaybeUninit::<u8>::uninit(); MAX_DEC_N];
// SAFETY: `buf` is always big enough to contain all the digits.
unsafe { f.pad_integral(*self >= 0, "", self.unsigned_abs()._fmt(&mut buf)) }
}
#[cfg(feature = "optimize_for_size")]
{
// Lossless conversion (with as) is asserted at the top of
// this macro.
return ${concat($fmt_fn, _small)}(self.unsigned_abs() as $T, *self >= 0, f);
}
}
core::fmt::num::imp::<impl core::fmt::Display for isize>::fmt fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
#[cfg(not(feature = "optimize_for_size"))]
{
const MAX_DEC_N: usize = $Unsigned::MAX.ilog10() as usize + 1;
// Buffer decimals for self with right alignment.
let mut buf = [MaybeUninit::<u8>::uninit(); MAX_DEC_N];
// SAFETY: `buf` is always big enough to contain all the digits.
unsafe { f.pad_integral(*self >= 0, "", self.unsigned_abs()._fmt(&mut buf)) }
}
#[cfg(feature = "optimize_for_size")]
{
// Lossless conversion (with as) is asserted at the top of
// this macro.
return ${concat($fmt_fn, _small)}(self.unsigned_abs() as $T, *self >= 0, f);
}
}
core::fmt::num::imp::<impl core::fmt::Display for u16>::fmt fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
#[cfg(not(feature = "optimize_for_size"))]
{
const MAX_DEC_N: usize = $Unsigned::MAX.ilog10() as usize + 1;
// Buffer decimals for self with right alignment.
let mut buf = [MaybeUninit::<u8>::uninit(); MAX_DEC_N];
// SAFETY: `buf` is always big enough to contain all the digits.
unsafe { f.pad_integral(true, "", self._fmt(&mut buf)) }
}
#[cfg(feature = "optimize_for_size")]
{
// Lossless conversion (with as) is asserted at the top of
// this macro.
${concat($fmt_fn, _small)}(*self as $T, true, f)
}
}
core::fmt::num::imp::<impl core::fmt::Display for u32>::fmt fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
#[cfg(not(feature = "optimize_for_size"))]
{
const MAX_DEC_N: usize = $Unsigned::MAX.ilog10() as usize + 1;
// Buffer decimals for self with right alignment.
let mut buf = [MaybeUninit::<u8>::uninit(); MAX_DEC_N];
// SAFETY: `buf` is always big enough to contain all the digits.
unsafe { f.pad_integral(true, "", self._fmt(&mut buf)) }
}
#[cfg(feature = "optimize_for_size")]
{
// Lossless conversion (with as) is asserted at the top of
// this macro.
${concat($fmt_fn, _small)}(*self as $T, true, f)
}
}
core::fmt::num::imp::<impl core::fmt::Display for u64>::fmt fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
#[cfg(not(feature = "optimize_for_size"))]
{
const MAX_DEC_N: usize = $Unsigned::MAX.ilog10() as usize + 1;
// Buffer decimals for self with right alignment.
let mut buf = [MaybeUninit::<u8>::uninit(); MAX_DEC_N];
// SAFETY: `buf` is always big enough to contain all the digits.
unsafe { f.pad_integral(true, "", self._fmt(&mut buf)) }
}
#[cfg(feature = "optimize_for_size")]
{
// Lossless conversion (with as) is asserted at the top of
// this macro.
${concat($fmt_fn, _small)}(*self as $T, true, f)
}
}
core::fmt::num::imp::<impl core::fmt::Display for u8>::fmt fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
#[cfg(not(feature = "optimize_for_size"))]
{
const MAX_DEC_N: usize = $Unsigned::MAX.ilog10() as usize + 1;
// Buffer decimals for self with right alignment.
let mut buf = [MaybeUninit::<u8>::uninit(); MAX_DEC_N];
// SAFETY: `buf` is always big enough to contain all the digits.
unsafe { f.pad_integral(true, "", self._fmt(&mut buf)) }
}
#[cfg(feature = "optimize_for_size")]
{
// Lossless conversion (with as) is asserted at the top of
// this macro.
${concat($fmt_fn, _small)}(*self as $T, true, f)
}
}
core::fmt::num::imp::<impl core::fmt::Display for usize>::fmt fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
#[cfg(not(feature = "optimize_for_size"))]
{
const MAX_DEC_N: usize = $Unsigned::MAX.ilog10() as usize + 1;
// Buffer decimals for self with right alignment.
let mut buf = [MaybeUninit::<u8>::uninit(); MAX_DEC_N];
// SAFETY: `buf` is always big enough to contain all the digits.
unsafe { f.pad_integral(true, "", self._fmt(&mut buf)) }
}
#[cfg(feature = "optimize_for_size")]
{
// Lossless conversion (with as) is asserted at the top of
// this macro.
${concat($fmt_fn, _small)}(*self as $T, true, f)
}
}
core::fmt::num::imp::<impl core::fmt::LowerExp for i16>::fmt fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
$fmt_fn(f, self.unsigned_abs() as $T, *self >= 0, b'e')
}
core::fmt::num::imp::<impl core::fmt::LowerExp for i32>::fmt fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
$fmt_fn(f, self.unsigned_abs() as $T, *self >= 0, b'e')
}
core::fmt::num::imp::<impl core::fmt::LowerExp for i64>::fmt fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
$fmt_fn(f, self.unsigned_abs() as $T, *self >= 0, b'e')
}
core::fmt::num::imp::<impl core::fmt::LowerExp for i8>::fmt fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
$fmt_fn(f, self.unsigned_abs() as $T, *self >= 0, b'e')
}
core::fmt::num::imp::<impl core::fmt::LowerExp for isize>::fmt fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
$fmt_fn(f, self.unsigned_abs() as $T, *self >= 0, b'e')
}
core::fmt::num::imp::<impl core::fmt::LowerExp for u16>::fmt fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
$fmt_fn(f, *self as $T, true, b'e')
}
core::fmt::num::imp::<impl core::fmt::LowerExp for u32>::fmt fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
$fmt_fn(f, *self as $T, true, b'e')
}
core::fmt::num::imp::<impl core::fmt::LowerExp for u64>::fmt fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
$fmt_fn(f, *self as $T, true, b'e')
}
core::fmt::num::imp::<impl core::fmt::LowerExp for u8>::fmt fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
$fmt_fn(f, *self as $T, true, b'e')
}
core::fmt::num::imp::<impl core::fmt::LowerExp for usize>::fmt fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
$fmt_fn(f, *self as $T, true, b'e')
}
core::fmt::num::imp::<impl core::fmt::UpperExp for i16>::fmt fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
$fmt_fn(f, self.unsigned_abs() as $T, *self >= 0, b'E')
}
core::fmt::num::imp::<impl core::fmt::UpperExp for i32>::fmt fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
$fmt_fn(f, self.unsigned_abs() as $T, *self >= 0, b'E')
}
core::fmt::num::imp::<impl core::fmt::UpperExp for i64>::fmt fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
$fmt_fn(f, self.unsigned_abs() as $T, *self >= 0, b'E')
}
core::fmt::num::imp::<impl core::fmt::UpperExp for i8>::fmt fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
$fmt_fn(f, self.unsigned_abs() as $T, *self >= 0, b'E')
}
core::fmt::num::imp::<impl core::fmt::UpperExp for isize>::fmt fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
$fmt_fn(f, self.unsigned_abs() as $T, *self >= 0, b'E')
}
core::fmt::num::imp::<impl core::fmt::UpperExp for u16>::fmt fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
$fmt_fn(f, *self as $T, true, b'E')
}
core::fmt::num::imp::<impl core::fmt::UpperExp for u32>::fmt fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
$fmt_fn(f, *self as $T, true, b'E')
}
core::fmt::num::imp::<impl core::fmt::UpperExp for u64>::fmt fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
$fmt_fn(f, *self as $T, true, b'E')
}
core::fmt::num::imp::<impl core::fmt::UpperExp for u8>::fmt fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
$fmt_fn(f, *self as $T, true, b'E')
}
core::fmt::num::imp::<impl core::fmt::UpperExp for usize>::fmt fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
$fmt_fn(f, *self as $T, true, b'E')
}
core::fmt::num::imp::<impl i16>::format_into pub fn format_into(self, buf: &mut NumBuffer<Self>) -> &str {
let mut offset;
#[cfg(not(feature = "optimize_for_size"))]
// SAFETY: `buf` will always be big enough to contain all digits.
unsafe {
offset = self.unsigned_abs()._fmt_inner(&mut buf.buf);
}
#[cfg(feature = "optimize_for_size")]
{
// Lossless conversion (with as) is asserted at the top of
// this macro.
offset = ${concat($fmt_fn, _in_buf_small)}(self.unsigned_abs() as $T, &mut buf.buf);
}
// Only difference between signed and unsigned are these 4 lines.
if self < 0 {
offset -= 1;
buf.buf[offset].write(b'-');
}
// SAFETY: Starting from `offset`, all elements of the slice have been set.
unsafe { slice_buffer_to_str(&buf.buf, offset) }
}
core::fmt::num::imp::<impl i32>::format_into pub fn format_into(self, buf: &mut NumBuffer<Self>) -> &str {
let mut offset;
#[cfg(not(feature = "optimize_for_size"))]
// SAFETY: `buf` will always be big enough to contain all digits.
unsafe {
offset = self.unsigned_abs()._fmt_inner(&mut buf.buf);
}
#[cfg(feature = "optimize_for_size")]
{
// Lossless conversion (with as) is asserted at the top of
// this macro.
offset = ${concat($fmt_fn, _in_buf_small)}(self.unsigned_abs() as $T, &mut buf.buf);
}
// Only difference between signed and unsigned are these 4 lines.
if self < 0 {
offset -= 1;
buf.buf[offset].write(b'-');
}
// SAFETY: Starting from `offset`, all elements of the slice have been set.
unsafe { slice_buffer_to_str(&buf.buf, offset) }
}
core::fmt::num::imp::<impl i64>::format_into pub fn format_into(self, buf: &mut NumBuffer<Self>) -> &str {
let mut offset;
#[cfg(not(feature = "optimize_for_size"))]
// SAFETY: `buf` will always be big enough to contain all digits.
unsafe {
offset = self.unsigned_abs()._fmt_inner(&mut buf.buf);
}
#[cfg(feature = "optimize_for_size")]
{
// Lossless conversion (with as) is asserted at the top of
// this macro.
offset = ${concat($fmt_fn, _in_buf_small)}(self.unsigned_abs() as $T, &mut buf.buf);
}
// Only difference between signed and unsigned are these 4 lines.
if self < 0 {
offset -= 1;
buf.buf[offset].write(b'-');
}
// SAFETY: Starting from `offset`, all elements of the slice have been set.
unsafe { slice_buffer_to_str(&buf.buf, offset) }
}
core::fmt::num::imp::<impl i8>::format_into pub fn format_into(self, buf: &mut NumBuffer<Self>) -> &str {
let mut offset;
#[cfg(not(feature = "optimize_for_size"))]
// SAFETY: `buf` will always be big enough to contain all digits.
unsafe {
offset = self.unsigned_abs()._fmt_inner(&mut buf.buf);
}
#[cfg(feature = "optimize_for_size")]
{
// Lossless conversion (with as) is asserted at the top of
// this macro.
offset = ${concat($fmt_fn, _in_buf_small)}(self.unsigned_abs() as $T, &mut buf.buf);
}
// Only difference between signed and unsigned are these 4 lines.
if self < 0 {
offset -= 1;
buf.buf[offset].write(b'-');
}
// SAFETY: Starting from `offset`, all elements of the slice have been set.
unsafe { slice_buffer_to_str(&buf.buf, offset) }
}
core::fmt::num::imp::<impl isize>::format_into pub fn format_into(self, buf: &mut NumBuffer<Self>) -> &str {
let mut offset;
#[cfg(not(feature = "optimize_for_size"))]
// SAFETY: `buf` will always be big enough to contain all digits.
unsafe {
offset = self.unsigned_abs()._fmt_inner(&mut buf.buf);
}
#[cfg(feature = "optimize_for_size")]
{
// Lossless conversion (with as) is asserted at the top of
// this macro.
offset = ${concat($fmt_fn, _in_buf_small)}(self.unsigned_abs() as $T, &mut buf.buf);
}
// Only difference between signed and unsigned are these 4 lines.
if self < 0 {
offset -= 1;
buf.buf[offset].write(b'-');
}
// SAFETY: Starting from `offset`, all elements of the slice have been set.
unsafe { slice_buffer_to_str(&buf.buf, offset) }
}
core::fmt::num::imp::<impl u16>::_fmt pub unsafe fn _fmt<'a>(self, buf: &'a mut [MaybeUninit::<u8>]) -> &'a str {
// SAFETY: `buf` will always be big enough to contain all digits.
let offset = unsafe { self._fmt_inner(buf) };
// SAFETY: Starting from `offset`, all elements of the slice have been set.
unsafe { slice_buffer_to_str(buf, offset) }
}
core::fmt::num::imp::<impl u16>::_fmt_inner unsafe fn _fmt_inner(self, buf: &mut [MaybeUninit::<u8>]) -> usize {
// Count the number of bytes in buf that are not initialized.
let mut offset = buf.len();
// Consume the least-significant decimals from a working copy.
let mut remain = self;
// Format per four digits from the lookup table.
// Four digits need a 16-bit $Unsigned or wider.
while size_of::<Self>() > 1 && remain > 999.try_into().expect("branch is not hit for types that cannot fit 999 (u8)") {
// SAFETY: All of the decimals fit in buf due to MAX_DEC_N
// and the while condition ensures at least 4 more decimals.
unsafe { core::hint::assert_unchecked(offset >= 4) }
// SAFETY: The offset counts down from its initial buf.len()
// without underflow due to the previous precondition.
unsafe { core::hint::assert_unchecked(offset <= buf.len()) }
offset -= 4;
// pull two pairs
let scale: Self = 1_00_00.try_into().expect("branch is not hit for types that cannot fit 1E4 (u8)");
let quad = remain % scale;
remain /= scale;
let pair1 = (quad / 100) as usize;
let pair2 = (quad % 100) as usize;
buf[offset + 0].write(DECIMAL_PAIRS[pair1 * 2 + 0]);
buf[offset + 1].write(DECIMAL_PAIRS[pair1 * 2 + 1]);
buf[offset + 2].write(DECIMAL_PAIRS[pair2 * 2 + 0]);
buf[offset + 3].write(DECIMAL_PAIRS[pair2 * 2 + 1]);
}
// Format per two digits from the lookup table.
if remain > 9 {
// SAFETY: All of the decimals fit in buf due to MAX_DEC_N
// and the if condition ensures at least 2 more decimals.
unsafe { core::hint::assert_unchecked(offset >= 2) }
// SAFETY: The offset counts down from its initial buf.len()
// without underflow due to the previous precondition.
unsafe { core::hint::assert_unchecked(offset <= buf.len()) }
offset -= 2;
let pair = (remain % 100) as usize;
remain /= 100;
buf[offset + 0].write(DECIMAL_PAIRS[pair * 2 + 0]);
buf[offset + 1].write(DECIMAL_PAIRS[pair * 2 + 1]);
}
// Format the last remaining digit, if any.
if remain != 0 || self == 0 {
// SAFETY: All of the decimals fit in buf due to MAX_DEC_N
// and the if condition ensures (at least) 1 more decimals.
unsafe { core::hint::assert_unchecked(offset >= 1) }
// SAFETY: The offset counts down from its initial buf.len()
// without underflow due to the previous precondition.
unsafe { core::hint::assert_unchecked(offset <= buf.len()) }
offset -= 1;
// Either the compiler sees that remain < 10, or it prevents
// a boundary check up next.
let last = (remain & 15) as usize;
buf[offset].write(DECIMAL_PAIRS[last * 2 + 1]);
// not used: remain = 0;
}
offset
}
core::fmt::num::imp::<impl u16>::format_into pub fn format_into(self, buf: &mut NumBuffer<Self>) -> &str {
let offset;
#[cfg(not(feature = "optimize_for_size"))]
// SAFETY: `buf` will always be big enough to contain all digits.
unsafe {
offset = self._fmt_inner(&mut buf.buf);
}
#[cfg(feature = "optimize_for_size")]
{
// Lossless conversion (with as) is asserted at the top of
// this macro.
offset = ${concat($fmt_fn, _in_buf_small)}(self as $T, &mut buf.buf);
}
// SAFETY: Starting from `offset`, all elements of the slice have been set.
unsafe { slice_buffer_to_str(&buf.buf, offset) }
}
core::fmt::num::imp::<impl u32>::_fmt pub unsafe fn _fmt<'a>(self, buf: &'a mut [MaybeUninit::<u8>]) -> &'a str {
// SAFETY: `buf` will always be big enough to contain all digits.
let offset = unsafe { self._fmt_inner(buf) };
// SAFETY: Starting from `offset`, all elements of the slice have been set.
unsafe { slice_buffer_to_str(buf, offset) }
}
core::fmt::num::imp::<impl u32>::_fmt_inner unsafe fn _fmt_inner(self, buf: &mut [MaybeUninit::<u8>]) -> usize {
// Count the number of bytes in buf that are not initialized.
let mut offset = buf.len();
// Consume the least-significant decimals from a working copy.
let mut remain = self;
// Format per four digits from the lookup table.
// Four digits need a 16-bit $Unsigned or wider.
while size_of::<Self>() > 1 && remain > 999.try_into().expect("branch is not hit for types that cannot fit 999 (u8)") {
// SAFETY: All of the decimals fit in buf due to MAX_DEC_N
// and the while condition ensures at least 4 more decimals.
unsafe { core::hint::assert_unchecked(offset >= 4) }
// SAFETY: The offset counts down from its initial buf.len()
// without underflow due to the previous precondition.
unsafe { core::hint::assert_unchecked(offset <= buf.len()) }
offset -= 4;
// pull two pairs
let scale: Self = 1_00_00.try_into().expect("branch is not hit for types that cannot fit 1E4 (u8)");
let quad = remain % scale;
remain /= scale;
let pair1 = (quad / 100) as usize;
let pair2 = (quad % 100) as usize;
buf[offset + 0].write(DECIMAL_PAIRS[pair1 * 2 + 0]);
buf[offset + 1].write(DECIMAL_PAIRS[pair1 * 2 + 1]);
buf[offset + 2].write(DECIMAL_PAIRS[pair2 * 2 + 0]);
buf[offset + 3].write(DECIMAL_PAIRS[pair2 * 2 + 1]);
}
// Format per two digits from the lookup table.
if remain > 9 {
// SAFETY: All of the decimals fit in buf due to MAX_DEC_N
// and the if condition ensures at least 2 more decimals.
unsafe { core::hint::assert_unchecked(offset >= 2) }
// SAFETY: The offset counts down from its initial buf.len()
// without underflow due to the previous precondition.
unsafe { core::hint::assert_unchecked(offset <= buf.len()) }
offset -= 2;
let pair = (remain % 100) as usize;
remain /= 100;
buf[offset + 0].write(DECIMAL_PAIRS[pair * 2 + 0]);
buf[offset + 1].write(DECIMAL_PAIRS[pair * 2 + 1]);
}
// Format the last remaining digit, if any.
if remain != 0 || self == 0 {
// SAFETY: All of the decimals fit in buf due to MAX_DEC_N
// and the if condition ensures (at least) 1 more decimals.
unsafe { core::hint::assert_unchecked(offset >= 1) }
// SAFETY: The offset counts down from its initial buf.len()
// without underflow due to the previous precondition.
unsafe { core::hint::assert_unchecked(offset <= buf.len()) }
offset -= 1;
// Either the compiler sees that remain < 10, or it prevents
// a boundary check up next.
let last = (remain & 15) as usize;
buf[offset].write(DECIMAL_PAIRS[last * 2 + 1]);
// not used: remain = 0;
}
offset
}
core::fmt::num::imp::<impl u32>::format_into pub fn format_into(self, buf: &mut NumBuffer<Self>) -> &str {
let offset;
#[cfg(not(feature = "optimize_for_size"))]
// SAFETY: `buf` will always be big enough to contain all digits.
unsafe {
offset = self._fmt_inner(&mut buf.buf);
}
#[cfg(feature = "optimize_for_size")]
{
// Lossless conversion (with as) is asserted at the top of
// this macro.
offset = ${concat($fmt_fn, _in_buf_small)}(self as $T, &mut buf.buf);
}
// SAFETY: Starting from `offset`, all elements of the slice have been set.
unsafe { slice_buffer_to_str(&buf.buf, offset) }
}
core::fmt::num::imp::<impl u64>::_fmt pub unsafe fn _fmt<'a>(self, buf: &'a mut [MaybeUninit::<u8>]) -> &'a str {
// SAFETY: `buf` will always be big enough to contain all digits.
let offset = unsafe { self._fmt_inner(buf) };
// SAFETY: Starting from `offset`, all elements of the slice have been set.
unsafe { slice_buffer_to_str(buf, offset) }
}
core::fmt::num::imp::<impl u64>::_fmt_inner unsafe fn _fmt_inner(self, buf: &mut [MaybeUninit::<u8>]) -> usize {
// Count the number of bytes in buf that are not initialized.
let mut offset = buf.len();
// Consume the least-significant decimals from a working copy.
let mut remain = self;
// Format per four digits from the lookup table.
// Four digits need a 16-bit $Unsigned or wider.
while size_of::<Self>() > 1 && remain > 999.try_into().expect("branch is not hit for types that cannot fit 999 (u8)") {
// SAFETY: All of the decimals fit in buf due to MAX_DEC_N
// and the while condition ensures at least 4 more decimals.
unsafe { core::hint::assert_unchecked(offset >= 4) }
// SAFETY: The offset counts down from its initial buf.len()
// without underflow due to the previous precondition.
unsafe { core::hint::assert_unchecked(offset <= buf.len()) }
offset -= 4;
// pull two pairs
let scale: Self = 1_00_00.try_into().expect("branch is not hit for types that cannot fit 1E4 (u8)");
let quad = remain % scale;
remain /= scale;
let pair1 = (quad / 100) as usize;
let pair2 = (quad % 100) as usize;
buf[offset + 0].write(DECIMAL_PAIRS[pair1 * 2 + 0]);
buf[offset + 1].write(DECIMAL_PAIRS[pair1 * 2 + 1]);
buf[offset + 2].write(DECIMAL_PAIRS[pair2 * 2 + 0]);
buf[offset + 3].write(DECIMAL_PAIRS[pair2 * 2 + 1]);
}
// Format per two digits from the lookup table.
if remain > 9 {
// SAFETY: All of the decimals fit in buf due to MAX_DEC_N
// and the if condition ensures at least 2 more decimals.
unsafe { core::hint::assert_unchecked(offset >= 2) }
// SAFETY: The offset counts down from its initial buf.len()
// without underflow due to the previous precondition.
unsafe { core::hint::assert_unchecked(offset <= buf.len()) }
offset -= 2;
let pair = (remain % 100) as usize;
remain /= 100;
buf[offset + 0].write(DECIMAL_PAIRS[pair * 2 + 0]);
buf[offset + 1].write(DECIMAL_PAIRS[pair * 2 + 1]);
}
// Format the last remaining digit, if any.
if remain != 0 || self == 0 {
// SAFETY: All of the decimals fit in buf due to MAX_DEC_N
// and the if condition ensures (at least) 1 more decimals.
unsafe { core::hint::assert_unchecked(offset >= 1) }
// SAFETY: The offset counts down from its initial buf.len()
// without underflow due to the previous precondition.
unsafe { core::hint::assert_unchecked(offset <= buf.len()) }
offset -= 1;
// Either the compiler sees that remain < 10, or it prevents
// a boundary check up next.
let last = (remain & 15) as usize;
buf[offset].write(DECIMAL_PAIRS[last * 2 + 1]);
// not used: remain = 0;
}
offset
}
core::fmt::num::imp::<impl u64>::format_into pub fn format_into(self, buf: &mut NumBuffer<Self>) -> &str {
let offset;
#[cfg(not(feature = "optimize_for_size"))]
// SAFETY: `buf` will always be big enough to contain all digits.
unsafe {
offset = self._fmt_inner(&mut buf.buf);
}
#[cfg(feature = "optimize_for_size")]
{
// Lossless conversion (with as) is asserted at the top of
// this macro.
offset = ${concat($fmt_fn, _in_buf_small)}(self as $T, &mut buf.buf);
}
// SAFETY: Starting from `offset`, all elements of the slice have been set.
unsafe { slice_buffer_to_str(&buf.buf, offset) }
}
core::fmt::num::imp::<impl u8>::_fmt pub unsafe fn _fmt<'a>(self, buf: &'a mut [MaybeUninit::<u8>]) -> &'a str {
// SAFETY: `buf` will always be big enough to contain all digits.
let offset = unsafe { self._fmt_inner(buf) };
// SAFETY: Starting from `offset`, all elements of the slice have been set.
unsafe { slice_buffer_to_str(buf, offset) }
}
core::fmt::num::imp::<impl u8>::_fmt_inner unsafe fn _fmt_inner(self, buf: &mut [MaybeUninit::<u8>]) -> usize {
// Count the number of bytes in buf that are not initialized.
let mut offset = buf.len();
// Consume the least-significant decimals from a working copy.
let mut remain = self;
// Format per four digits from the lookup table.
// Four digits need a 16-bit $Unsigned or wider.
while size_of::<Self>() > 1 && remain > 999.try_into().expect("branch is not hit for types that cannot fit 999 (u8)") {
// SAFETY: All of the decimals fit in buf due to MAX_DEC_N
// and the while condition ensures at least 4 more decimals.
unsafe { core::hint::assert_unchecked(offset >= 4) }
// SAFETY: The offset counts down from its initial buf.len()
// without underflow due to the previous precondition.
unsafe { core::hint::assert_unchecked(offset <= buf.len()) }
offset -= 4;
// pull two pairs
let scale: Self = 1_00_00.try_into().expect("branch is not hit for types that cannot fit 1E4 (u8)");
let quad = remain % scale;
remain /= scale;
let pair1 = (quad / 100) as usize;
let pair2 = (quad % 100) as usize;
buf[offset + 0].write(DECIMAL_PAIRS[pair1 * 2 + 0]);
buf[offset + 1].write(DECIMAL_PAIRS[pair1 * 2 + 1]);
buf[offset + 2].write(DECIMAL_PAIRS[pair2 * 2 + 0]);
buf[offset + 3].write(DECIMAL_PAIRS[pair2 * 2 + 1]);
}
// Format per two digits from the lookup table.
if remain > 9 {
// SAFETY: All of the decimals fit in buf due to MAX_DEC_N
// and the if condition ensures at least 2 more decimals.
unsafe { core::hint::assert_unchecked(offset >= 2) }
// SAFETY: The offset counts down from its initial buf.len()
// without underflow due to the previous precondition.
unsafe { core::hint::assert_unchecked(offset <= buf.len()) }
offset -= 2;
let pair = (remain % 100) as usize;
remain /= 100;
buf[offset + 0].write(DECIMAL_PAIRS[pair * 2 + 0]);
buf[offset + 1].write(DECIMAL_PAIRS[pair * 2 + 1]);
}
// Format the last remaining digit, if any.
if remain != 0 || self == 0 {
// SAFETY: All of the decimals fit in buf due to MAX_DEC_N
// and the if condition ensures (at least) 1 more decimals.
unsafe { core::hint::assert_unchecked(offset >= 1) }
// SAFETY: The offset counts down from its initial buf.len()
// without underflow due to the previous precondition.
unsafe { core::hint::assert_unchecked(offset <= buf.len()) }
offset -= 1;
// Either the compiler sees that remain < 10, or it prevents
// a boundary check up next.
let last = (remain & 15) as usize;
buf[offset].write(DECIMAL_PAIRS[last * 2 + 1]);
// not used: remain = 0;
}
offset
}
core::fmt::num::imp::<impl u8>::format_into pub fn format_into(self, buf: &mut NumBuffer<Self>) -> &str {
let offset;
#[cfg(not(feature = "optimize_for_size"))]
// SAFETY: `buf` will always be big enough to contain all digits.
unsafe {
offset = self._fmt_inner(&mut buf.buf);
}
#[cfg(feature = "optimize_for_size")]
{
// Lossless conversion (with as) is asserted at the top of
// this macro.
offset = ${concat($fmt_fn, _in_buf_small)}(self as $T, &mut buf.buf);
}
// SAFETY: Starting from `offset`, all elements of the slice have been set.
unsafe { slice_buffer_to_str(&buf.buf, offset) }
}
core::fmt::num::imp::<impl usize>::_fmt pub unsafe fn _fmt<'a>(self, buf: &'a mut [MaybeUninit::<u8>]) -> &'a str {
// SAFETY: `buf` will always be big enough to contain all digits.
let offset = unsafe { self._fmt_inner(buf) };
// SAFETY: Starting from `offset`, all elements of the slice have been set.
unsafe { slice_buffer_to_str(buf, offset) }
}
core::fmt::num::imp::<impl usize>::_fmt_inner unsafe fn _fmt_inner(self, buf: &mut [MaybeUninit::<u8>]) -> usize {
// Count the number of bytes in buf that are not initialized.
let mut offset = buf.len();
// Consume the least-significant decimals from a working copy.
let mut remain = self;
// Format per four digits from the lookup table.
// Four digits need a 16-bit $Unsigned or wider.
while size_of::<Self>() > 1 && remain > 999.try_into().expect("branch is not hit for types that cannot fit 999 (u8)") {
// SAFETY: All of the decimals fit in buf due to MAX_DEC_N
// and the while condition ensures at least 4 more decimals.
unsafe { core::hint::assert_unchecked(offset >= 4) }
// SAFETY: The offset counts down from its initial buf.len()
// without underflow due to the previous precondition.
unsafe { core::hint::assert_unchecked(offset <= buf.len()) }
offset -= 4;
// pull two pairs
let scale: Self = 1_00_00.try_into().expect("branch is not hit for types that cannot fit 1E4 (u8)");
let quad = remain % scale;
remain /= scale;
let pair1 = (quad / 100) as usize;
let pair2 = (quad % 100) as usize;
buf[offset + 0].write(DECIMAL_PAIRS[pair1 * 2 + 0]);
buf[offset + 1].write(DECIMAL_PAIRS[pair1 * 2 + 1]);
buf[offset + 2].write(DECIMAL_PAIRS[pair2 * 2 + 0]);
buf[offset + 3].write(DECIMAL_PAIRS[pair2 * 2 + 1]);
}
// Format per two digits from the lookup table.
if remain > 9 {
// SAFETY: All of the decimals fit in buf due to MAX_DEC_N
// and the if condition ensures at least 2 more decimals.
unsafe { core::hint::assert_unchecked(offset >= 2) }
// SAFETY: The offset counts down from its initial buf.len()
// without underflow due to the previous precondition.
unsafe { core::hint::assert_unchecked(offset <= buf.len()) }
offset -= 2;
let pair = (remain % 100) as usize;
remain /= 100;
buf[offset + 0].write(DECIMAL_PAIRS[pair * 2 + 0]);
buf[offset + 1].write(DECIMAL_PAIRS[pair * 2 + 1]);
}
// Format the last remaining digit, if any.
if remain != 0 || self == 0 {
// SAFETY: All of the decimals fit in buf due to MAX_DEC_N
// and the if condition ensures (at least) 1 more decimals.
unsafe { core::hint::assert_unchecked(offset >= 1) }
// SAFETY: The offset counts down from its initial buf.len()
// without underflow due to the previous precondition.
unsafe { core::hint::assert_unchecked(offset <= buf.len()) }
offset -= 1;
// Either the compiler sees that remain < 10, or it prevents
// a boundary check up next.
let last = (remain & 15) as usize;
buf[offset].write(DECIMAL_PAIRS[last * 2 + 1]);
// not used: remain = 0;
}
offset
}
core::fmt::num::imp::<impl usize>::format_into pub fn format_into(self, buf: &mut NumBuffer<Self>) -> &str {
let offset;
#[cfg(not(feature = "optimize_for_size"))]
// SAFETY: `buf` will always be big enough to contain all digits.
unsafe {
offset = self._fmt_inner(&mut buf.buf);
}
#[cfg(feature = "optimize_for_size")]
{
// Lossless conversion (with as) is asserted at the top of
// this macro.
offset = ${concat($fmt_fn, _in_buf_small)}(self as $T, &mut buf.buf);
}
// SAFETY: Starting from `offset`, all elements of the slice have been set.
unsafe { slice_buffer_to_str(&buf.buf, offset) }
}
core::fmt::num::slice_buffer_to_strunsafe fn slice_buffer_to_str(buf: &[MaybeUninit<u8>], offset: usize) -> &str {
// SAFETY: `offset` is always included between 0 and `buf`'s length.
let written = unsafe { buf.get_unchecked(offset..) };
// SAFETY: (`assume_init_ref`) All buf content since offset is set.
// SAFETY: (`from_utf8_unchecked`) Writes use ASCII from the lookup table exclusively.
unsafe { str::from_utf8_unchecked(written.assume_init_ref()) }
}
core::fmt::num_buffer::NumBuffer::<T>::capacity pub const fn capacity(&self) -> usize {
self.buf.len()
}
core::fmt::num_buffer::NumBuffer::<T>::new pub const fn new() -> Self {
// FIXME: Once const generics feature is working, use `T::BUF_SIZE` instead of 40.
NumBuffer { buf: [MaybeUninit::<u8>::uninit(); 40], phantom: core::marker::PhantomData }
}
core::fmt::pointer_fmt_innerpub(crate) fn pointer_fmt_inner(ptr_addr: usize, f: &mut Formatter<'_>) -> Result {
let old_options = f.options;
// The alternate flag is already treated by LowerHex as being special-
// it denotes whether to prefix with 0x. We use it to work out whether
// or not to zero extend, and then unconditionally set it to get the
// prefix.
if f.options.get_alternate() {
f.options.sign_aware_zero_pad(true);
if f.options.get_width().is_none() {
f.options.width(Some((usize::BITS / 4) as u16 + 2));
}
}
f.options.alternate(true);
let ret = LowerHex::fmt(&ptr_addr, f);
f.options = old_options;
ret
}
core::fmt::rt::Argument::<'_>::as_u16 pub(super) const fn as_u16(&self) -> Option<u16> {
match self.ty {
ArgumentType::Count(count) => Some(count),
ArgumentType::Placeholder { .. } => None,
}
}
core::fmt::rt::Argument::<'_>::from_usize pub const fn from_usize(x: &usize) -> Argument<'_> {
if *x > u16::MAX as usize {
panic!("Formatting argument out of range");
}
Argument { ty: ArgumentType::Count(*x as u16) }
}
core::fmt::rt::Argument::<'_>::new_binary pub const fn new_binary<T: Binary>(x: &T) -> Argument<'_> {
argument_new!(T, x, <T as Binary>::fmt)
}
core::fmt::rt::Argument::<'_>::new_debug pub const fn new_debug<T: Debug>(x: &T) -> Argument<'_> {
argument_new!(T, x, <T as Debug>::fmt)
}
core::fmt::rt::Argument::<'_>::new_display pub const fn new_display<T: Display>(x: &T) -> Argument<'_> {
argument_new!(T, x, <T as Display>::fmt)
}
core::fmt::rt::Argument::<'_>::new_lower_exp pub const fn new_lower_exp<T: LowerExp>(x: &T) -> Argument<'_> {
argument_new!(T, x, <T as LowerExp>::fmt)
}
core::fmt::rt::Argument::<'_>::new_lower_hex pub const fn new_lower_hex<T: LowerHex>(x: &T) -> Argument<'_> {
argument_new!(T, x, <T as LowerHex>::fmt)
}
core::fmt::rt::Argument::<'_>::new_octal pub const fn new_octal<T: Octal>(x: &T) -> Argument<'_> {
argument_new!(T, x, <T as Octal>::fmt)
}
core::fmt::rt::Argument::<'_>::new_pointer pub const fn new_pointer<T: Pointer>(x: &T) -> Argument<'_> {
argument_new!(T, x, <T as Pointer>::fmt)
}
core::fmt::rt::Argument::<'_>::new_upper_exp pub const fn new_upper_exp<T: UpperExp>(x: &T) -> Argument<'_> {
argument_new!(T, x, <T as UpperExp>::fmt)
}
core::fmt::rt::Argument::<'_>::new_upper_hex pub const fn new_upper_hex<T: UpperHex>(x: &T) -> Argument<'_> {
argument_new!(T, x, <T as UpperHex>::fmt)
}
core::fmt::writepub fn write(output: &mut dyn Write, fmt: Arguments<'_>) -> Result {
if let Some(s) = fmt.as_str() {
return output.write_str(s);
}
let mut template = fmt.template;
let args = fmt.args;
let mut arg_index = 0;
// See comment on `fmt::Arguments` for the details of how the template is encoded.
// This must match the encoding from `expand_format_args` in
// compiler/rustc_ast_lowering/src/format.rs.
loop {
// SAFETY: We can assume the template is valid.
let n = unsafe {
let n = template.read();
template = template.add(1);
n
};
if n == 0 {
// End of template.
return Ok(());
} else if n < 0x80 {
// Literal string piece of length `n`.
// SAFETY: We can assume the strings in the template are valid.
let s = unsafe {
let s = crate::str::from_raw_parts(template.as_ptr(), n as usize);
template = template.add(n as usize);
s
};
output.write_str(s)?;
} else if n == 0x80 {
// Literal string piece with a 16-bit length.
// SAFETY: We can assume the strings in the template are valid.
let s = unsafe {
let len = usize::from(u16::from_le_bytes(template.cast_array().read()));
template = template.add(2);
let s = crate::str::from_raw_parts(template.as_ptr(), len);
template = template.add(len);
s
};
output.write_str(s)?;
} else if n == 0xC0 {
// Placeholder for next argument with default options.
//
// Having this as a separate case improves performance for the common case.
// SAFETY: We can assume the template only refers to arguments that exist.
unsafe {
args.add(arg_index)
.as_ref()
.fmt(&mut Formatter::new(output, FormattingOptions::new()))?;
}
arg_index += 1;
} else {
// SAFETY: We can assume the template is valid.
unsafe { assert_unchecked(n > 0xC0) };
// Placeholder with custom options.
let mut opt = FormattingOptions::new();
// SAFETY: We can assume the template is valid.
unsafe {
if n & 1 != 0 {
opt.flags = u32::from_le_bytes(template.cast_array().read());
template = template.add(4);
}
if n & 2 != 0 {
opt.width = u16::from_le_bytes(template.cast_array().read());
template = template.add(2);
}
if n & 4 != 0 {
opt.precision = u16::from_le_bytes(template.cast_array().read());
template = template.add(2);
}
if n & 8 != 0 {
arg_index = usize::from(u16::from_le_bytes(template.cast_array().read()));
template = template.add(2);
}
}
if n & 16 != 0 {
// Dynamic width from a usize argument.
// SAFETY: We can assume the template only refers to arguments that exist.
unsafe {
opt.width = args.add(opt.width as usize).as_ref().as_u16().unwrap_unchecked();
}
}
if n & 32 != 0 {
// Dynamic precision from a usize argument.
// SAFETY: We can assume the template only refers to arguments that exist.
unsafe {
opt.precision =
args.add(opt.precision as usize).as_ref().as_u16().unwrap_unchecked();
}
}
// SAFETY: We can assume the template only refers to arguments that exist.
unsafe {
args.add(arg_index).as_ref().fmt(&mut Formatter::new(output, opt))?;
}
arg_index += 1;
}
}
}
core::hash::Hash::hash_slice fn hash_slice<H: Hasher>(data: &[Self], state: &mut H)
where
Self: Sized,
{
for piece in data {
piece.hash(state)
}
}
core::hash::Hasher::write_i128 fn write_i128(&mut self, i: i128) {
self.write_u128(i as u128)
}
core::hash::Hasher::write_i16 fn write_i16(&mut self, i: i16) {
self.write_u16(i as u16)
}
core::hash::Hasher::write_i32 fn write_i32(&mut self, i: i32) {
self.write_u32(i as u32)
}
core::hash::Hasher::write_i64 fn write_i64(&mut self, i: i64) {
self.write_u64(i as u64)
}
core::hash::Hasher::write_i8 fn write_i8(&mut self, i: i8) {
self.write_u8(i as u8)
}
core::hash::Hasher::write_isize fn write_isize(&mut self, i: isize) {
self.write_usize(i as usize)
}
core::hash::Hasher::write_length_prefix fn write_length_prefix(&mut self, len: usize) {
self.write_usize(len);
}
core::hash::Hasher::write_str fn write_str(&mut self, s: &str) {
self.write(s.as_bytes());
self.write_u8(0xff);
}
core::hash::Hasher::write_u128 fn write_u128(&mut self, i: u128) {
self.write(&i.to_ne_bytes())
}
core::hash::Hasher::write_u16 fn write_u16(&mut self, i: u16) {
self.write(&i.to_ne_bytes())
}
core::hash::Hasher::write_u32 fn write_u32(&mut self, i: u32) {
self.write(&i.to_ne_bytes())
}
core::hash::Hasher::write_u64 fn write_u64(&mut self, i: u64) {
self.write(&i.to_ne_bytes())
}
core::hash::Hasher::write_u8 fn write_u8(&mut self, i: u8) {
self.write(&[i])
}
core::hash::Hasher::write_usize fn write_usize(&mut self, i: usize) {
self.write(&i.to_ne_bytes())
}
core::hash::impls::<impl core::hash::Hash for &T>::hash fn hash<H: Hasher>(&self, state: &mut H) {
(**self).hash(state);
}
core::hash::impls::<impl core::hash::Hash for &mut T>::hash fn hash<H: Hasher>(&self, state: &mut H) {
(**self).hash(state);
}
core::hash::impls::<impl core::hash::Hash for ()>::hash fn hash<H: Hasher>(&self, _state: &mut H) {}
core::hash::impls::<impl core::hash::Hash for (T, B)>::hash fn hash<S: Hasher>(&self, state: &mut S) {
let ($(ref $name,)+) = *self;
$($name.hash(state);)+
}
core::hash::impls::<impl core::hash::Hash for (T, B, C)>::hash fn hash<S: Hasher>(&self, state: &mut S) {
let ($(ref $name,)+) = *self;
$($name.hash(state);)+
}
core::hash::impls::<impl core::hash::Hash for (T, B, C, D)>::hash fn hash<S: Hasher>(&self, state: &mut S) {
let ($(ref $name,)+) = *self;
$($name.hash(state);)+
}
core::hash::impls::<impl core::hash::Hash for (T, B, C, D, E)>::hash fn hash<S: Hasher>(&self, state: &mut S) {
let ($(ref $name,)+) = *self;
$($name.hash(state);)+
}
core::hash::impls::<impl core::hash::Hash for (T, B, C, D, E, F)>::hash fn hash<S: Hasher>(&self, state: &mut S) {
let ($(ref $name,)+) = *self;
$($name.hash(state);)+
}
core::hash::impls::<impl core::hash::Hash for (T, B, C, D, E, F, G)>::hash fn hash<S: Hasher>(&self, state: &mut S) {
let ($(ref $name,)+) = *self;
$($name.hash(state);)+
}
core::hash::impls::<impl core::hash::Hash for (T, B, C, D, E, F, G, H)>::hash fn hash<S: Hasher>(&self, state: &mut S) {
let ($(ref $name,)+) = *self;
$($name.hash(state);)+
}
core::hash::impls::<impl core::hash::Hash for (T, B, C, D, E, F, G, H, I)>::hash fn hash<S: Hasher>(&self, state: &mut S) {
let ($(ref $name,)+) = *self;
$($name.hash(state);)+
}
core::hash::impls::<impl core::hash::Hash for (T, B, C, D, E, F, G, H, I, J)>::hash fn hash<S: Hasher>(&self, state: &mut S) {
let ($(ref $name,)+) = *self;
$($name.hash(state);)+
}
core::hash::impls::<impl core::hash::Hash for (T, B, C, D, E, F, G, H, I, J, K)>::hash fn hash<S: Hasher>(&self, state: &mut S) {
let ($(ref $name,)+) = *self;
$($name.hash(state);)+
}
core::hash::impls::<impl core::hash::Hash for (T, B, C, D, E, F, G, H, I, J, K, L)>::hash fn hash<S: Hasher>(&self, state: &mut S) {
let ($(ref $name,)+) = *self;
$($name.hash(state);)+
}
core::hash::impls::<impl core::hash::Hash for (T,)>::hash fn hash<S: Hasher>(&self, state: &mut S) {
let ($(ref $name,)+) = *self;
$($name.hash(state);)+
}
core::hash::impls::<impl core::hash::Hash for *const T>::hash fn hash<H: Hasher>(&self, state: &mut H) {
let (address, metadata) = self.to_raw_parts();
state.write_usize(address.addr());
metadata.hash(state);
}
core::hash::impls::<impl core::hash::Hash for *mut T>::hash fn hash<H: Hasher>(&self, state: &mut H) {
let (address, metadata) = self.to_raw_parts();
state.write_usize(address.addr());
metadata.hash(state);
}
core::hash::impls::<impl core::hash::Hash for [T]>::hash fn hash<H: Hasher>(&self, state: &mut H) {
state.write_length_prefix(self.len());
Hash::hash_slice(self, state)
}
core::hash::impls::<impl core::hash::Hash for bool>::hash fn hash<H: Hasher>(&self, state: &mut H) {
state.write_u8(*self as u8)
}
core::hash::impls::<impl core::hash::Hash for char>::hash fn hash<H: Hasher>(&self, state: &mut H) {
state.write_u32(*self as u32)
}
core::hash::impls::<impl core::hash::Hash for i128>::hash fn hash<H: Hasher>(&self, state: &mut H) {
state.$meth(*self)
}
core::hash::impls::<impl core::hash::Hash for i128>::hash_slice fn hash_slice<H: Hasher>(data: &[$ty], state: &mut H) {
let newlen = size_of_val(data);
let ptr = data.as_ptr() as *const u8;
// SAFETY: `ptr` is valid and aligned, as this macro is only used
// for numeric primitives which have no padding. The new slice only
// spans across `data` and is never mutated, and its total size is the
// same as the original `data` so it can't be over `isize::MAX`.
state.write(unsafe { slice::from_raw_parts(ptr, newlen) })
}
core::hash::impls::<impl core::hash::Hash for i16>::hash fn hash<H: Hasher>(&self, state: &mut H) {
state.$meth(*self)
}
core::hash::impls::<impl core::hash::Hash for i16>::hash_slice fn hash_slice<H: Hasher>(data: &[$ty], state: &mut H) {
let newlen = size_of_val(data);
let ptr = data.as_ptr() as *const u8;
// SAFETY: `ptr` is valid and aligned, as this macro is only used
// for numeric primitives which have no padding. The new slice only
// spans across `data` and is never mutated, and its total size is the
// same as the original `data` so it can't be over `isize::MAX`.
state.write(unsafe { slice::from_raw_parts(ptr, newlen) })
}
core::hash::impls::<impl core::hash::Hash for i32>::hash fn hash<H: Hasher>(&self, state: &mut H) {
state.$meth(*self)
}
core::hash::impls::<impl core::hash::Hash for i32>::hash_slice fn hash_slice<H: Hasher>(data: &[$ty], state: &mut H) {
let newlen = size_of_val(data);
let ptr = data.as_ptr() as *const u8;
// SAFETY: `ptr` is valid and aligned, as this macro is only used
// for numeric primitives which have no padding. The new slice only
// spans across `data` and is never mutated, and its total size is the
// same as the original `data` so it can't be over `isize::MAX`.
state.write(unsafe { slice::from_raw_parts(ptr, newlen) })
}
core::hash::impls::<impl core::hash::Hash for i64>::hash fn hash<H: Hasher>(&self, state: &mut H) {
state.$meth(*self)
}
core::hash::impls::<impl core::hash::Hash for i64>::hash_slice fn hash_slice<H: Hasher>(data: &[$ty], state: &mut H) {
let newlen = size_of_val(data);
let ptr = data.as_ptr() as *const u8;
// SAFETY: `ptr` is valid and aligned, as this macro is only used
// for numeric primitives which have no padding. The new slice only
// spans across `data` and is never mutated, and its total size is the
// same as the original `data` so it can't be over `isize::MAX`.
state.write(unsafe { slice::from_raw_parts(ptr, newlen) })
}
core::hash::impls::<impl core::hash::Hash for i8>::hash fn hash<H: Hasher>(&self, state: &mut H) {
state.$meth(*self)
}
core::hash::impls::<impl core::hash::Hash for i8>::hash_slice fn hash_slice<H: Hasher>(data: &[$ty], state: &mut H) {
let newlen = size_of_val(data);
let ptr = data.as_ptr() as *const u8;
// SAFETY: `ptr` is valid and aligned, as this macro is only used
// for numeric primitives which have no padding. The new slice only
// spans across `data` and is never mutated, and its total size is the
// same as the original `data` so it can't be over `isize::MAX`.
state.write(unsafe { slice::from_raw_parts(ptr, newlen) })
}
core::hash::impls::<impl core::hash::Hash for isize>::hash fn hash<H: Hasher>(&self, state: &mut H) {
state.$meth(*self)
}
core::hash::impls::<impl core::hash::Hash for isize>::hash_slice fn hash_slice<H: Hasher>(data: &[$ty], state: &mut H) {
let newlen = size_of_val(data);
let ptr = data.as_ptr() as *const u8;
// SAFETY: `ptr` is valid and aligned, as this macro is only used
// for numeric primitives which have no padding. The new slice only
// spans across `data` and is never mutated, and its total size is the
// same as the original `data` so it can't be over `isize::MAX`.
state.write(unsafe { slice::from_raw_parts(ptr, newlen) })
}
core::hash::impls::<impl core::hash::Hash for str>::hash fn hash<H: Hasher>(&self, state: &mut H) {
state.write_str(self);
}
core::hash::impls::<impl core::hash::Hash for u128>::hash fn hash<H: Hasher>(&self, state: &mut H) {
state.$meth(*self)
}
core::hash::impls::<impl core::hash::Hash for u128>::hash_slice fn hash_slice<H: Hasher>(data: &[$ty], state: &mut H) {
let newlen = size_of_val(data);
let ptr = data.as_ptr() as *const u8;
// SAFETY: `ptr` is valid and aligned, as this macro is only used
// for numeric primitives which have no padding. The new slice only
// spans across `data` and is never mutated, and its total size is the
// same as the original `data` so it can't be over `isize::MAX`.
state.write(unsafe { slice::from_raw_parts(ptr, newlen) })
}
core::hash::impls::<impl core::hash::Hash for u16>::hash fn hash<H: Hasher>(&self, state: &mut H) {
state.$meth(*self)
}
core::hash::impls::<impl core::hash::Hash for u16>::hash_slice fn hash_slice<H: Hasher>(data: &[$ty], state: &mut H) {
let newlen = size_of_val(data);
let ptr = data.as_ptr() as *const u8;
// SAFETY: `ptr` is valid and aligned, as this macro is only used
// for numeric primitives which have no padding. The new slice only
// spans across `data` and is never mutated, and its total size is the
// same as the original `data` so it can't be over `isize::MAX`.
state.write(unsafe { slice::from_raw_parts(ptr, newlen) })
}
core::hash::impls::<impl core::hash::Hash for u32>::hash fn hash<H: Hasher>(&self, state: &mut H) {
state.$meth(*self)
}
core::hash::impls::<impl core::hash::Hash for u32>::hash_slice fn hash_slice<H: Hasher>(data: &[$ty], state: &mut H) {
let newlen = size_of_val(data);
let ptr = data.as_ptr() as *const u8;
// SAFETY: `ptr` is valid and aligned, as this macro is only used
// for numeric primitives which have no padding. The new slice only
// spans across `data` and is never mutated, and its total size is the
// same as the original `data` so it can't be over `isize::MAX`.
state.write(unsafe { slice::from_raw_parts(ptr, newlen) })
}
core::hash::impls::<impl core::hash::Hash for u64>::hash fn hash<H: Hasher>(&self, state: &mut H) {
state.$meth(*self)
}
core::hash::impls::<impl core::hash::Hash for u64>::hash_slice fn hash_slice<H: Hasher>(data: &[$ty], state: &mut H) {
let newlen = size_of_val(data);
let ptr = data.as_ptr() as *const u8;
// SAFETY: `ptr` is valid and aligned, as this macro is only used
// for numeric primitives which have no padding. The new slice only
// spans across `data` and is never mutated, and its total size is the
// same as the original `data` so it can't be over `isize::MAX`.
state.write(unsafe { slice::from_raw_parts(ptr, newlen) })
}
core::hash::impls::<impl core::hash::Hash for u8>::hash fn hash<H: Hasher>(&self, state: &mut H) {
state.$meth(*self)
}
core::hash::impls::<impl core::hash::Hash for u8>::hash_slice fn hash_slice<H: Hasher>(data: &[$ty], state: &mut H) {
let newlen = size_of_val(data);
let ptr = data.as_ptr() as *const u8;
// SAFETY: `ptr` is valid and aligned, as this macro is only used
// for numeric primitives which have no padding. The new slice only
// spans across `data` and is never mutated, and its total size is the
// same as the original `data` so it can't be over `isize::MAX`.
state.write(unsafe { slice::from_raw_parts(ptr, newlen) })
}
core::hash::impls::<impl core::hash::Hash for usize>::hash fn hash<H: Hasher>(&self, state: &mut H) {
state.$meth(*self)
}
core::hash::impls::<impl core::hash::Hash for usize>::hash_slice fn hash_slice<H: Hasher>(data: &[$ty], state: &mut H) {
let newlen = size_of_val(data);
let ptr = data.as_ptr() as *const u8;
// SAFETY: `ptr` is valid and aligned, as this macro is only used
// for numeric primitives which have no padding. The new slice only
// spans across `data` and is never mutated, and its total size is the
// same as the original `data` so it can't be over `isize::MAX`.
state.write(unsafe { slice::from_raw_parts(ptr, newlen) })
}
core::hint::assert_uncheckedpub const unsafe fn assert_unchecked(cond: bool) {
// SAFETY: The caller promised `cond` is true.
unsafe {
ub_checks::assert_unsafe_precondition!(
check_language_ub,
"hint::assert_unchecked must never be called when the condition is false",
(cond: bool = cond) => cond,
);
crate::intrinsics::assume(cond);
}
}
core::intrinsics::fallback::wide_mul_u128const fn wide_mul_u128(a: u128, b: u128) -> (u128, u128) {
#[inline]
const fn to_low_high(x: u128) -> [u128; 2] {
const MASK: u128 = u64::MAX as _;
[x & MASK, x >> 64]
}
#[inline]
const fn from_low_high(x: [u128; 2]) -> u128 {
x[0] | (x[1] << 64)
}
#[inline]
const fn scalar_mul(low_high: [u128; 2], k: u128) -> [u128; 3] {
let [x, c] = to_low_high(k * low_high[0]);
let [y, z] = to_low_high(k * low_high[1] + c);
[x, y, z]
}
let a = to_low_high(a);
let b = to_low_high(b);
let low = scalar_mul(a, b[0]);
let high = scalar_mul(a, b[1]);
let r0 = low[0];
let [r1, c] = to_low_high(low[1] + high[0]);
let [r2, c] = to_low_high(low[2] + high[1] + c);
let r3 = high[2] + c;
(from_low_high([r0, r1]), from_low_high([r2, r3]))
}
core::intrinsics::fallback::wide_mul_u128::from_low_high const fn from_low_high(x: [u128; 2]) -> u128 {
x[0] | (x[1] << 64)
}
core::intrinsics::fallback::wide_mul_u128::scalar_mul const fn scalar_mul(low_high: [u128; 2], k: u128) -> [u128; 3] {
let [x, c] = to_low_high(k * low_high[0]);
let [y, z] = to_low_high(k * low_high[1] + c);
[x, y, z]
}
core::intrinsics::fallback::wide_mul_u128::to_low_high const fn to_low_high(x: u128) -> [u128; 2] {
const MASK: u128 = u64::MAX as _;
[x & MASK, x >> 64]
}
core::intrinsics::likelypub const fn likely(b: bool) -> bool {
if b {
true
} else {
cold_path();
false
}
}
core::intrinsics::ptr_guaranteed_cmppub const fn ptr_guaranteed_cmp<T>(ptr: *const T, other: *const T) -> u8 {
(ptr == other) as u8
}
core::intrinsics::rotate_leftpub const fn rotate_left<T: [const] fallback::FunnelShift>(x: T, shift: u32) -> T {
// Make sure to call the intrinsic for `funnel_shl`, not the fallback impl.
// SAFETY: we modulo `shift` so that the result is definitely less than the size of
// `T` in bits.
unsafe { unchecked_funnel_shl(x, x, shift % (mem::size_of::<T>() as u32 * 8)) }
}
core::intrinsics::rotate_rightpub const fn rotate_right<T: [const] fallback::FunnelShift>(x: T, shift: u32) -> T {
// Make sure to call the intrinsic for `funnel_shr`, not the fallback impl.
// SAFETY: we modulo `shift` so that the result is definitely less than the size of
// `T` in bits.
unsafe { unchecked_funnel_shr(x, x, shift % (mem::size_of::<T>() as u32 * 8)) }
}
core::intrinsics::typed_swap_nonoverlappingpub const unsafe fn typed_swap_nonoverlapping<T>(x: *mut T, y: *mut T) {
// SAFETY: The caller provided single non-overlapping items behind
// pointers, so swapping them with `count: 1` is fine.
unsafe { ptr::swap_nonoverlapping(x, y, 1) };
}
core::intrinsics::unlikelypub const fn unlikely(b: bool) -> bool {
if b {
cold_path();
true
} else {
false
}
}
core::iter::adapters::chain::Chain::<A, B>::new pub(in super::super) fn new(a: A, b: B) -> Chain<A, B> {
Chain { a: Some(a), b: Some(b) }
}
core::iter::adapters::chain::and_then_or_clearfn and_then_or_clear<T, U>(opt: &mut Option<T>, f: impl FnOnce(&mut T) -> Option<U>) -> Option<U> {
let x = f(opt.as_mut()?);
if x.is_none() {
*opt = None;
}
x
}
core::iter::adapters::cloned::Cloned::<I>::new pub(in crate::iter) fn new(it: I) -> Cloned<I> {
Cloned { it }
}
core::iter::adapters::cloned::clone_try_foldfn clone_try_fold<T: Clone, Acc, R>(mut f: impl FnMut(Acc, T) -> R) -> impl FnMut(Acc, &T) -> R {
move |acc, elt| f(acc, elt.clone())
}
core::iter::adapters::copied::Copied::<I>::new pub(in crate::iter) fn new(it: I) -> Copied<I> {
Copied { it }
}
core::iter::adapters::copied::copy_foldfn copy_fold<T: Copy, Acc>(mut f: impl FnMut(Acc, T) -> Acc) -> impl FnMut(Acc, &T) -> Acc {
move |acc, &elt| f(acc, elt)
}
core::iter::adapters::copied::copy_try_foldfn copy_try_fold<T: Copy, Acc, R>(mut f: impl FnMut(Acc, T) -> R) -> impl FnMut(Acc, &T) -> R {
move |acc, &elt| f(acc, elt)
}
core::iter::adapters::enumerate::Enumerate::<I>::new pub(in crate::iter) fn new(iter: I) -> Enumerate<I> {
Enumerate { iter, count: 0 }
}
core::iter::adapters::filter::Filter::<I, P>::new pub(in crate::iter) fn new(iter: I, predicate: P) -> Filter<I, P> {
Filter { iter, predicate }
}
core::iter::adapters::filter::filter_foldfn filter_fold<T, Acc>(
mut predicate: impl FnMut(&T) -> bool,
mut fold: impl FnMut(Acc, T) -> Acc,
) -> impl FnMut(Acc, T) -> Acc {
move |acc, item| if predicate(&item) { fold(acc, item) } else { acc }
}
core::iter::adapters::filter::filter_try_foldfn filter_try_fold<'a, T, Acc, R: Try<Output = Acc>>(
predicate: &'a mut impl FnMut(&T) -> bool,
mut fold: impl FnMut(Acc, T) -> R + 'a,
) -> impl FnMut(Acc, T) -> R + 'a {
move |acc, item| if predicate(&item) { fold(acc, item) } else { try { acc } }
}
core::iter::adapters::flatten::FlatMap::<I, U, F>::into_parts pub(crate) fn into_parts(self) -> (Option<U::IntoIter>, Option<I>, Option<U::IntoIter>) {
(
self.inner.frontiter,
self.inner.iter.into_inner().map(Map::into_inner),
self.inner.backiter,
)
}
core::iter::adapters::flatten::FlatMap::<I, U, F>::new pub(in crate::iter) fn new(iter: I, f: F) -> FlatMap<I, U, F> {
FlatMap { inner: FlattenCompat::new(iter.map(f)) }
}
core::iter::adapters::flatten::FlattenCompat::<I, U>::new fn new(iter: I) -> FlattenCompat<I, U> {
FlattenCompat { iter: iter.fuse(), frontiter: None, backiter: None }
}
core::iter::adapters::fuse::Fuse::<I>::into_inner pub(crate) fn into_inner(self) -> Option<I> {
self.iter
}
core::iter::adapters::fuse::Fuse::<I>::new pub(in crate::iter) fn new(iter: I) -> Fuse<I> {
Fuse { iter: Some(iter) }
}
core::iter::adapters::map::Map::<I, F>::into_inner pub(crate) fn into_inner(self) -> I {
self.iter
}
core::iter::adapters::map::Map::<I, F>::new pub(in crate::iter) fn new(iter: I, f: F) -> Map<I, F> {
Map { iter, f }
}
core::iter::adapters::map::map_foldfn map_fold<T, B, Acc>(
mut f: impl FnMut(T) -> B,
mut g: impl FnMut(Acc, B) -> Acc,
) -> impl FnMut(Acc, T) -> Acc {
move |acc, elt| g(acc, f(elt))
}
core::iter::adapters::map::map_try_foldfn map_try_fold<'a, T, B, Acc, R>(
f: &'a mut impl FnMut(T) -> B,
mut g: impl FnMut(Acc, B) -> R + 'a,
) -> impl FnMut(Acc, T) -> R + 'a {
move |acc, elt| g(acc, f(elt))
}
core::iter::adapters::rev::Rev::<T>::new pub(in crate::iter) fn new(iter: T) -> Rev<T> {
Rev { iter }
}
core::iter::adapters::skip::Skip::<I>::new pub(in crate::iter) fn new(iter: I, n: usize) -> Skip<I> {
Skip { iter, n }
}
core::iter::adapters::step_by::StepBy::<I>::new pub(in crate::iter) fn new(iter: I, step: usize) -> StepBy<I> {
assert!(step != 0);
let iter = <I as SpecRangeSetup<I>>::setup(iter, step);
StepBy { iter, step_minus_one: step - 1, first_take: true }
}
core::iter::adapters::step_by::StepBy::<I>::original_step fn original_step(&self) -> NonZero<usize> {
// SAFETY: By type invariant, `step_minus_one` cannot be `MAX`, which
// means the addition cannot overflow and the result cannot be zero.
unsafe { NonZero::new_unchecked(intrinsics::unchecked_add(self.step_minus_one, 1)) }
}
core::iter::adapters::take::Take::<I>::new pub(in crate::iter) fn new(iter: I, n: usize) -> Take<I> {
Take { iter, n }
}
core::iter::adapters::take_while::TakeWhile::<I, P>::new pub(in crate::iter) fn new(iter: I, predicate: P) -> TakeWhile<I, P> {
TakeWhile { iter, flag: false, predicate }
}
core::iter::adapters::zip::TrustedRandomAccessNoCoerce::size fn size(&self) -> usize
where
Self: Iterator,
{
self.size_hint().0
}
core::iter::adapters::zip::Zip::<A, B>::new pub(in crate::iter) fn new(a: A, b: B) -> Zip<A, B> {
ZipImpl::new(a, b)
}
core::iter::adapters::zip::Zip::<A, B>::super_nth fn super_nth(&mut self, mut n: usize) -> Option<(A::Item, B::Item)> {
while let Some(x) = Iterator::next(self) {
if n == 0 {
return Some(x);
}
n -= 1;
}
None
}
core::iter::adapters::zip::zippub fn zip<A, B>(a: A, b: B) -> Zip<A::IntoIter, B::IntoIter>
where
A: IntoIterator,
B: IntoIterator,
{
ZipImpl::new(a.into_iter(), b.into_iter())
}
core::iter::range::<impl core::iter::traits::double_ended::DoubleEndedIterator for core::ops::range::Range<A>>::advance_back_by fn advance_back_by(&mut self, n: usize) -> Result<(), NonZero<usize>> {
self.spec_advance_back_by(n)
}
core::iter::range::<impl core::iter::traits::double_ended::DoubleEndedIterator for core::ops::range::Range<A>>::next_back fn next_back(&mut self) -> Option<A> {
self.spec_next_back()
}
core::iter::range::<impl core::iter::traits::double_ended::DoubleEndedIterator for core::ops::range::Range<A>>::nth_back fn nth_back(&mut self, n: usize) -> Option<A> {
self.spec_nth_back(n)
}
core::iter::range::<impl core::iter::traits::double_ended::DoubleEndedIterator for core::ops::range::RangeInclusive<A>>::next_back fn next_back(&mut self) -> Option<A> {
self.spec_next_back()
}
core::iter::range::<impl core::iter::traits::double_ended::DoubleEndedIterator for core::ops::range::RangeInclusive<A>>::nth_back fn nth_back(&mut self, n: usize) -> Option<A> {
if self.is_empty() {
return None;
}
if let Some(minus_n) = Step::backward_checked(self.end.clone(), n) {
use crate::cmp::Ordering::*;
match minus_n.partial_cmp(&self.start) {
Some(Greater) => {
self.end = Step::backward(minus_n.clone(), 1);
return Some(minus_n);
}
Some(Equal) => {
self.end = minus_n.clone();
self.exhausted = true;
return Some(minus_n);
}
_ => {}
}
}
self.end = self.start.clone();
self.exhausted = true;
None
}
core::iter::range::<impl core::iter::traits::double_ended::DoubleEndedIterator for core::ops::range::RangeInclusive<A>>::rfold fn $fold<AAA, FFF>(mut self, init: AAA, fold: FFF) -> AAA
where
FFF: FnMut(AAA, Self::Item) -> AAA,
{
use crate::ops::NeverShortCircuit;
self.$try_fold(init, NeverShortCircuit::wrap_mut_2(fold)).0
}
core::iter::range::<impl core::iter::traits::double_ended::DoubleEndedIterator for core::ops::range::RangeInclusive<A>>::try_rfold fn try_rfold<B, F, R>(&mut self, init: B, f: F) -> R
where
Self: Sized,
F: FnMut(B, Self::Item) -> R,
R: Try<Output = B>,
{
self.spec_try_rfold(init, f)
}
core::iter::range::<impl core::iter::traits::iterator::Iterator for core::ops::range::Range<A>>::advance_by fn advance_by(&mut self, n: usize) -> Result<(), NonZero<usize>> {
self.spec_advance_by(n)
}
core::iter::range::<impl core::iter::traits::iterator::Iterator for core::ops::range::Range<A>>::count fn count(self) -> usize {
if self.start < self.end {
Step::steps_between(&self.start, &self.end).1.expect("count overflowed usize")
} else {
0
}
}
core::iter::range::<impl core::iter::traits::iterator::Iterator for core::ops::range::Range<A>>::last fn last(mut self) -> Option<A> {
self.next_back()
}
core::iter::range::<impl core::iter::traits::iterator::Iterator for core::ops::range::Range<A>>::next fn next(&mut self) -> Option<A> {
self.spec_next()
}
core::iter::range::<impl core::iter::traits::iterator::Iterator for core::ops::range::Range<A>>::nth fn nth(&mut self, n: usize) -> Option<A> {
self.spec_nth(n)
}
core::iter::range::<impl core::iter::traits::iterator::Iterator for core::ops::range::Range<A>>::size_hint fn size_hint(&self) -> (usize, Option<usize>) {
if self.start < self.end {
Step::steps_between(&self.start, &self.end)
} else {
(0, Some(0))
}
}
core::iter::range::<impl core::iter::traits::iterator::Iterator for core::ops::range::RangeInclusive<A>>::count fn count(self) -> usize {
if self.is_empty() {
return 0;
}
Step::steps_between(&self.start, &self.end)
.1
.and_then(|steps| steps.checked_add(1))
.expect("count overflowed usize")
}
core::iter::range::<impl core::iter::traits::iterator::Iterator for core::ops::range::RangeInclusive<A>>::fold fn $fold<AAA, FFF>(mut self, init: AAA, fold: FFF) -> AAA
where
FFF: FnMut(AAA, Self::Item) -> AAA,
{
use crate::ops::NeverShortCircuit;
self.$try_fold(init, NeverShortCircuit::wrap_mut_2(fold)).0
}
core::iter::range::<impl core::iter::traits::iterator::Iterator for core::ops::range::RangeInclusive<A>>::last fn last(mut self) -> Option<A> {
self.next_back()
}
core::iter::range::<impl core::iter::traits::iterator::Iterator for core::ops::range::RangeInclusive<A>>::next fn next(&mut self) -> Option<A> {
self.spec_next()
}
core::iter::range::<impl core::iter::traits::iterator::Iterator for core::ops::range::RangeInclusive<A>>::nth fn nth(&mut self, n: usize) -> Option<A> {
if self.is_empty() {
return None;
}
if let Some(plus_n) = Step::forward_checked(self.start.clone(), n) {
use crate::cmp::Ordering::*;
match plus_n.partial_cmp(&self.end) {
Some(Less) => {
self.start = Step::forward(plus_n.clone(), 1);
return Some(plus_n);
}
Some(Equal) => {
self.start = plus_n.clone();
self.exhausted = true;
return Some(plus_n);
}
_ => {}
}
}
self.start = self.end.clone();
self.exhausted = true;
None
}
core::iter::range::<impl core::iter::traits::iterator::Iterator for core::ops::range::RangeInclusive<A>>::size_hint fn size_hint(&self) -> (usize, Option<usize>) {
if self.is_empty() {
return (0, Some(0));
}
let hint = Step::steps_between(&self.start, &self.end);
(hint.0.saturating_add(1), hint.1.and_then(|steps| steps.checked_add(1)))
}
core::iter::range::<impl core::iter::traits::iterator::Iterator for core::ops::range::RangeInclusive<A>>::try_fold fn try_fold<B, F, R>(&mut self, init: B, f: F) -> R
where
Self: Sized,
F: FnMut(B, Self::Item) -> R,
R: Try<Output = B>,
{
self.spec_try_fold(init, f)
}
core::iter::range::Step::backward fn backward(start: Self, count: usize) -> Self {
Step::backward_checked(start, count).expect("overflow in `Step::backward`")
}
core::iter::range::Step::backward_unchecked unsafe fn backward_unchecked(start: Self, count: usize) -> Self {
Step::backward(start, count)
}
core::iter::range::Step::forward fn forward(start: Self, count: usize) -> Self {
Step::forward_checked(start, count).expect("overflow in `Step::forward`")
}
core::iter::range::Step::forward_unchecked unsafe fn forward_unchecked(start: Self, count: usize) -> Self {
Step::forward(start, count)
}
core::iter::sources::from_fn::from_fnpub fn from_fn<T, F>(f: F) -> FromFn<F>
where
F: FnMut() -> Option<T>,
{
FromFn(f)
}
core::iter::traits::collect::Extend::extend_one fn extend_one(&mut self, item: A) {
self.extend(Some(item));
}
core::iter::traits::collect::Extend::extend_one_unchecked unsafe fn extend_one_unchecked(&mut self, item: A)
where
Self: Sized,
{
self.extend_one(item);
}
core::iter::traits::collect::Extend::extend_reserve fn extend_reserve(&mut self, additional: usize) {
let _ = additional;
}
core::iter::traits::double_ended::DoubleEndedIterator::advance_back_by fn advance_back_by(&mut self, n: usize) -> Result<(), NonZero<usize>> {
for i in 0..n {
if self.next_back().is_none() {
// SAFETY: `i` is always less than `n`.
return Err(unsafe { NonZero::new_unchecked(n - i) });
}
}
Ok(())
}
core::iter::traits::double_ended::DoubleEndedIterator::nth_back fn nth_back(&mut self, n: usize) -> Option<Self::Item> {
if self.advance_back_by(n).is_err() {
return None;
}
self.next_back()
}
core::iter::traits::double_ended::DoubleEndedIterator::rfind fn rfind<P>(&mut self, predicate: P) -> Option<Self::Item>
where
Self: Sized,
P: FnMut(&Self::Item) -> bool,
{
#[inline]
fn check<T>(mut predicate: impl FnMut(&T) -> bool) -> impl FnMut((), T) -> ControlFlow<T> {
move |(), x| {
if predicate(&x) { ControlFlow::Break(x) } else { ControlFlow::Continue(()) }
}
}
self.try_rfold((), check(predicate)).break_value()
}
core::iter::traits::double_ended::DoubleEndedIterator::rfind::check fn check<T>(mut predicate: impl FnMut(&T) -> bool) -> impl FnMut((), T) -> ControlFlow<T> {
move |(), x| {
if predicate(&x) { ControlFlow::Break(x) } else { ControlFlow::Continue(()) }
}
}
core::iter::traits::double_ended::DoubleEndedIterator::rfold fn rfold<B, F>(mut self, init: B, mut f: F) -> B
where
Self: Sized,
F: FnMut(B, Self::Item) -> B,
{
let mut accum = init;
while let Some(x) = self.next_back() {
accum = f(accum, x);
}
accum
}
core::iter::traits::double_ended::DoubleEndedIterator::try_rfold fn try_rfold<B, F, R>(&mut self, init: B, mut f: F) -> R
where
Self: Sized,
F: FnMut(B, Self::Item) -> R,
R: Try<Output = B>,
{
let mut accum = init;
while let Some(x) = self.next_back() {
accum = f(accum, x)?;
}
try { accum }
}
core::iter::traits::exact_size::ExactSizeIterator::is_empty fn is_empty(&self) -> bool {
self.len() == 0
}
core::iter::traits::exact_size::ExactSizeIterator::len fn len(&self) -> usize {
let (lower, upper) = self.size_hint();
// Note: This assertion is overly defensive, but it checks the invariant
// guaranteed by the trait. If this trait were rust-internal,
// we could use debug_assert!; assert_eq! will check all Rust user
// implementations too.
assert_eq!(upper, Some(lower));
lower
}
core::iter::traits::iterator::Iterator::advance_by fn advance_by(&mut self, n: usize) -> Result<(), NonZero<usize>> {
/// Helper trait to specialize `advance_by` via `try_fold` for `Sized` iterators.
trait SpecAdvanceBy {
fn spec_advance_by(&mut self, n: usize) -> Result<(), NonZero<usize>>;
}
impl<I: Iterator + ?Sized> SpecAdvanceBy for I {
default fn spec_advance_by(&mut self, n: usize) -> Result<(), NonZero<usize>> {
for i in 0..n {
if self.next().is_none() {
// SAFETY: `i` is always less than `n`.
return Err(unsafe { NonZero::new_unchecked(n - i) });
}
}
Ok(())
}
}
impl<I: Iterator> SpecAdvanceBy for I {
fn spec_advance_by(&mut self, n: usize) -> Result<(), NonZero<usize>> {
let Some(n) = NonZero::new(n) else {
return Ok(());
};
let res = self.try_fold(n, |n, _| NonZero::new(n.get() - 1));
match res {
None => Ok(()),
Some(n) => Err(n),
}
}
}
self.spec_advance_by(n)
}
core::iter::traits::iterator::Iterator::all fn all<F>(&mut self, f: F) -> bool
where
Self: Sized,
F: FnMut(Self::Item) -> bool,
{
#[inline]
fn check<T>(mut f: impl FnMut(T) -> bool) -> impl FnMut((), T) -> ControlFlow<()> {
move |(), x| {
if f(x) { ControlFlow::Continue(()) } else { ControlFlow::Break(()) }
}
}
self.try_fold((), check(f)) == ControlFlow::Continue(())
}
core::iter::traits::iterator::Iterator::all::check fn check<T>(mut f: impl FnMut(T) -> bool) -> impl FnMut((), T) -> ControlFlow<()> {
move |(), x| {
if f(x) { ControlFlow::Continue(()) } else { ControlFlow::Break(()) }
}
}
core::iter::traits::iterator::Iterator::any fn any<F>(&mut self, f: F) -> bool
where
Self: Sized,
F: FnMut(Self::Item) -> bool,
{
#[inline]
fn check<T>(mut f: impl FnMut(T) -> bool) -> impl FnMut((), T) -> ControlFlow<()> {
move |(), x| {
if f(x) { ControlFlow::Break(()) } else { ControlFlow::Continue(()) }
}
}
self.try_fold((), check(f)) == ControlFlow::Break(())
}
core::iter::traits::iterator::Iterator::any::check fn check<T>(mut f: impl FnMut(T) -> bool) -> impl FnMut((), T) -> ControlFlow<()> {
move |(), x| {
if f(x) { ControlFlow::Break(()) } else { ControlFlow::Continue(()) }
}
}
core::iter::traits::iterator::Iterator::by_ref fn by_ref(&mut self) -> &mut Self
where
Self: Sized,
{
self
}
core::iter::traits::iterator::Iterator::chain fn chain<U>(self, other: U) -> Chain<Self, U::IntoIter>
where
Self: Sized,
U: IntoIterator<Item = Self::Item>,
{
Chain::new(self, other.into_iter())
}
core::iter::traits::iterator::Iterator::cloned fn cloned<'a, T>(self) -> Cloned<Self>
where
T: Clone + 'a,
Self: Sized + Iterator<Item = &'a T>,
{
Cloned::new(self)
}
core::iter::traits::iterator::Iterator::cmp fn cmp<I>(self, other: I) -> Ordering
where
I: IntoIterator<Item = Self::Item>,
Self::Item: Ord,
Self: Sized,
{
self.cmp_by(other, |x, y| x.cmp(&y))
}
core::iter::traits::iterator::Iterator::cmp_by fn cmp_by<I, F>(self, other: I, cmp: F) -> Ordering
where
Self: Sized,
I: IntoIterator,
F: FnMut(Self::Item, I::Item) -> Ordering,
{
#[inline]
fn compare<X, Y, F>(mut cmp: F) -> impl FnMut(X, Y) -> ControlFlow<Ordering>
where
F: FnMut(X, Y) -> Ordering,
{
move |x, y| match cmp(x, y) {
Ordering::Equal => ControlFlow::Continue(()),
non_eq => ControlFlow::Break(non_eq),
}
}
match iter_compare(self, other.into_iter(), compare(cmp)) {
ControlFlow::Continue(ord) => ord,
ControlFlow::Break(ord) => ord,
}
}
core::iter::traits::iterator::Iterator::cmp_by::compare fn compare<X, Y, F>(mut cmp: F) -> impl FnMut(X, Y) -> ControlFlow<Ordering>
where
F: FnMut(X, Y) -> Ordering,
{
move |x, y| match cmp(x, y) {
Ordering::Equal => ControlFlow::Continue(()),
non_eq => ControlFlow::Break(non_eq),
}
}
core::iter::traits::iterator::Iterator::copied fn copied<'a, T>(self) -> Copied<Self>
where
T: Copy + 'a,
Self: Sized + Iterator<Item = &'a T>,
{
Copied::new(self)
}
core::iter::traits::iterator::Iterator::count fn count(self) -> usize
where
Self: Sized,
{
self.fold(
0,
#[rustc_inherit_overflow_checks]
|count, _| count + 1,
)
}
core::iter::traits::iterator::Iterator::enumerate fn enumerate(self) -> Enumerate<Self>
where
Self: Sized,
{
Enumerate::new(self)
}
core::iter::traits::iterator::Iterator::eq fn eq<I>(self, other: I) -> bool
where
I: IntoIterator,
Self::Item: PartialEq<I::Item>,
Self: Sized,
{
self.eq_by(other, |x, y| x == y)
}
core::iter::traits::iterator::Iterator::eq_by fn eq_by<I, F>(self, other: I, eq: F) -> bool
where
Self: Sized,
I: IntoIterator,
F: FnMut(Self::Item, I::Item) -> bool,
{
#[inline]
fn compare<X, Y, F>(mut eq: F) -> impl FnMut(X, Y) -> ControlFlow<()>
where
F: FnMut(X, Y) -> bool,
{
move |x, y| {
if eq(x, y) { ControlFlow::Continue(()) } else { ControlFlow::Break(()) }
}
}
SpecIterEq::spec_iter_eq(self, other.into_iter(), compare(eq))
}
core::iter::traits::iterator::Iterator::eq_by::compare fn compare<X, Y, F>(mut eq: F) -> impl FnMut(X, Y) -> ControlFlow<()>
where
F: FnMut(X, Y) -> bool,
{
move |x, y| {
if eq(x, y) { ControlFlow::Continue(()) } else { ControlFlow::Break(()) }
}
}
core::iter::traits::iterator::Iterator::filter fn filter<P>(self, predicate: P) -> Filter<Self, P>
where
Self: Sized,
P: FnMut(&Self::Item) -> bool,
{
Filter::new(self, predicate)
}
core::iter::traits::iterator::Iterator::find fn find<P>(&mut self, predicate: P) -> Option<Self::Item>
where
Self: Sized,
P: FnMut(&Self::Item) -> bool,
{
#[inline]
fn check<T>(mut predicate: impl FnMut(&T) -> bool) -> impl FnMut((), T) -> ControlFlow<T> {
move |(), x| {
if predicate(&x) { ControlFlow::Break(x) } else { ControlFlow::Continue(()) }
}
}
self.try_fold((), check(predicate)).break_value()
}
core::iter::traits::iterator::Iterator::find::check fn check<T>(mut predicate: impl FnMut(&T) -> bool) -> impl FnMut((), T) -> ControlFlow<T> {
move |(), x| {
if predicate(&x) { ControlFlow::Break(x) } else { ControlFlow::Continue(()) }
}
}
core::iter::traits::iterator::Iterator::flat_map fn flat_map<U, F>(self, f: F) -> FlatMap<Self, U, F>
where
Self: Sized,
U: IntoIterator,
F: FnMut(Self::Item) -> U,
{
FlatMap::new(self, f)
}
core::iter::traits::iterator::Iterator::fold fn fold<B, F>(mut self, init: B, mut f: F) -> B
where
Self: Sized,
F: FnMut(B, Self::Item) -> B,
{
let mut accum = init;
while let Some(x) = self.next() {
accum = f(accum, x);
}
accum
}
core::iter::traits::iterator::Iterator::for_each fn for_each<F>(self, f: F)
where
Self: Sized,
F: FnMut(Self::Item),
{
#[inline]
fn call<T>(mut f: impl FnMut(T)) -> impl FnMut((), T) {
move |(), item| f(item)
}
self.fold((), call(f));
}
core::iter::traits::iterator::Iterator::for_each::call fn call<T>(mut f: impl FnMut(T)) -> impl FnMut((), T) {
move |(), item| f(item)
}
core::iter::traits::iterator::Iterator::fuse fn fuse(self) -> Fuse<Self>
where
Self: Sized,
{
Fuse::new(self)
}
core::iter::traits::iterator::Iterator::last fn last(self) -> Option<Self::Item>
where
Self: Sized,
{
#[inline]
fn some<T>(_: Option<T>, x: T) -> Option<T> {
Some(x)
}
self.fold(None, some)
}
core::iter::traits::iterator::Iterator::last::some fn some<T>(_: Option<T>, x: T) -> Option<T> {
Some(x)
}
core::iter::traits::iterator::Iterator::map fn map<B, F>(self, f: F) -> Map<Self, F>
where
Self: Sized,
F: FnMut(Self::Item) -> B,
{
Map::new(self, f)
}
core::iter::traits::iterator::Iterator::max_by fn max_by<F>(self, compare: F) -> Option<Self::Item>
where
Self: Sized,
F: FnMut(&Self::Item, &Self::Item) -> Ordering,
{
#[inline]
fn fold<T>(mut compare: impl FnMut(&T, &T) -> Ordering) -> impl FnMut(T, T) -> T {
move |x, y| cmp::max_by(x, y, &mut compare)
}
self.reduce(fold(compare))
}
core::iter::traits::iterator::Iterator::max_by::fold fn fold<T>(mut compare: impl FnMut(&T, &T) -> Ordering) -> impl FnMut(T, T) -> T {
move |x, y| cmp::max_by(x, y, &mut compare)
}
core::iter::traits::iterator::Iterator::nth fn nth(&mut self, n: usize) -> Option<Self::Item> {
self.advance_by(n).ok()?;
self.next()
}
core::iter::traits::iterator::Iterator::position fn position<P>(&mut self, predicate: P) -> Option<usize>
where
Self: Sized,
P: FnMut(Self::Item) -> bool,
{
#[inline]
fn check<'a, T>(
mut predicate: impl FnMut(T) -> bool + 'a,
acc: &'a mut usize,
) -> impl FnMut((), T) -> ControlFlow<usize, ()> + 'a {
#[rustc_inherit_overflow_checks]
move |_, x| {
if predicate(x) {
ControlFlow::Break(*acc)
} else {
*acc += 1;
ControlFlow::Continue(())
}
}
}
let mut acc = 0;
self.try_fold((), check(predicate, &mut acc)).break_value()
}
core::iter::traits::iterator::Iterator::position::check fn check<'a, T>(
mut predicate: impl FnMut(T) -> bool + 'a,
acc: &'a mut usize,
) -> impl FnMut((), T) -> ControlFlow<usize, ()> + 'a {
#[rustc_inherit_overflow_checks]
move |_, x| {
if predicate(x) {
ControlFlow::Break(*acc)
} else {
*acc += 1;
ControlFlow::Continue(())
}
}
}
core::iter::traits::iterator::Iterator::reduce fn reduce<F>(mut self, f: F) -> Option<Self::Item>
where
Self: Sized,
F: FnMut(Self::Item, Self::Item) -> Self::Item,
{
let first = self.next()?;
Some(self.fold(first, f))
}
core::iter::traits::iterator::Iterator::rev fn rev(self) -> Rev<Self>
where
Self: Sized + DoubleEndedIterator,
{
Rev::new(self)
}
core::iter::traits::iterator::Iterator::rposition fn rposition<P>(&mut self, predicate: P) -> Option<usize>
where
P: FnMut(Self::Item) -> bool,
Self: Sized + ExactSizeIterator + DoubleEndedIterator,
{
// No need for an overflow check here, because `ExactSizeIterator`
// implies that the number of elements fits into a `usize`.
#[inline]
fn check<T>(
mut predicate: impl FnMut(T) -> bool,
) -> impl FnMut(usize, T) -> ControlFlow<usize, usize> {
move |i, x| {
let i = i - 1;
if predicate(x) { ControlFlow::Break(i) } else { ControlFlow::Continue(i) }
}
}
let n = self.len();
self.try_rfold(n, check(predicate)).break_value()
}
core::iter::traits::iterator::Iterator::rposition::check fn check<T>(
mut predicate: impl FnMut(T) -> bool,
) -> impl FnMut(usize, T) -> ControlFlow<usize, usize> {
move |i, x| {
let i = i - 1;
if predicate(x) { ControlFlow::Break(i) } else { ControlFlow::Continue(i) }
}
}
core::iter::traits::iterator::Iterator::size_hint fn size_hint(&self) -> (usize, Option<usize>) {
(0, None)
}
core::iter::traits::iterator::Iterator::skip fn skip(self, n: usize) -> Skip<Self>
where
Self: Sized,
{
Skip::new(self, n)
}
core::iter::traits::iterator::Iterator::step_by fn step_by(self, step: usize) -> StepBy<Self>
where
Self: Sized,
{
StepBy::new(self, step)
}
core::iter::traits::iterator::Iterator::sum fn sum<S>(self) -> S
where
Self: Sized,
S: Sum<Self::Item>,
{
Sum::sum(self)
}
core::iter::traits::iterator::Iterator::take fn take(self, n: usize) -> Take<Self>
where
Self: Sized,
{
Take::new(self, n)
}
core::iter::traits::iterator::Iterator::take_while fn take_while<P>(self, predicate: P) -> TakeWhile<Self, P>
where
Self: Sized,
P: FnMut(&Self::Item) -> bool,
{
TakeWhile::new(self, predicate)
}
core::iter::traits::iterator::Iterator::try_fold fn try_fold<B, F, R>(&mut self, init: B, mut f: F) -> R
where
Self: Sized,
F: FnMut(B, Self::Item) -> R,
R: Try<Output = B>,
{
let mut accum = init;
while let Some(x) = self.next() {
accum = f(accum, x)?;
}
try { accum }
}
core::iter::traits::iterator::Iterator::try_for_each fn try_for_each<F, R>(&mut self, f: F) -> R
where
Self: Sized,
F: FnMut(Self::Item) -> R,
R: Try<Output = ()>,
{
#[inline]
fn call<T, R>(mut f: impl FnMut(T) -> R) -> impl FnMut((), T) -> R {
move |(), x| f(x)
}
self.try_fold((), call(f))
}
core::iter::traits::iterator::Iterator::try_for_each::call fn call<T, R>(mut f: impl FnMut(T) -> R) -> impl FnMut((), T) -> R {
move |(), x| f(x)
}
core::iter::traits::iterator::Iterator::zip fn zip<U>(self, other: U) -> Zip<Self, U::IntoIter>
where
Self: Sized,
U: IntoIterator,
{
Zip::new(self, other.into_iter())
}
core::iter::traits::iterator::iter_comparefn iter_compare<A, B, F, T>(mut a: A, mut b: B, f: F) -> ControlFlow<T, Ordering>
where
A: Iterator,
B: Iterator,
F: FnMut(A::Item, B::Item) -> ControlFlow<T>,
{
#[inline]
fn compare<'a, B, X, T>(
b: &'a mut B,
mut f: impl FnMut(X, B::Item) -> ControlFlow<T> + 'a,
) -> impl FnMut(X) -> ControlFlow<ControlFlow<T, Ordering>> + 'a
where
B: Iterator,
{
move |x| match b.next() {
None => ControlFlow::Break(ControlFlow::Continue(Ordering::Greater)),
Some(y) => f(x, y).map_break(ControlFlow::Break),
}
}
match a.try_for_each(compare(&mut b, f)) {
ControlFlow::Continue(()) => ControlFlow::Continue(match b.next() {
None => Ordering::Equal,
Some(_) => Ordering::Less,
}),
ControlFlow::Break(x) => x,
}
}
core::iter::traits::iterator::iter_compare::compare fn compare<'a, B, X, T>(
b: &'a mut B,
mut f: impl FnMut(X, B::Item) -> ControlFlow<T> + 'a,
) -> impl FnMut(X) -> ControlFlow<ControlFlow<T, Ordering>> + 'a
where
B: Iterator,
{
move |x| match b.next() {
None => ControlFlow::Break(ControlFlow::Continue(Ordering::Greater)),
Some(y) => f(x, y).map_break(ControlFlow::Break),
}
}
core::iter::traits::iterator::iter_eqfn iter_eq<A, B, F>(a: A, b: B, f: F) -> bool
where
A: Iterator,
B: Iterator,
F: FnMut(A::Item, B::Item) -> ControlFlow<()>,
{
iter_compare(a, b, f).continue_value().is_some_and(|ord| ord == Ordering::Equal)
}
core::mem::align_ofpub const fn align_of<T>() -> usize {
<T as SizedTypeProperties>::ALIGN
}
core::mem::align_of_valpub const fn align_of_val<T: ?Sized>(val: &T) -> usize {
// SAFETY: val is a reference, so it's a valid raw pointer
unsafe { intrinsics::align_of_val(val) }
}
core::mem::conjure_zst::do_panic const fn do_panic($($arg: $ty),*) -> ! {
$crate::intrinsics::const_eval_select!(
@capture { $($arg: $ty = $arg),* } -> !:
if const #[track_caller] {
$crate::panic!($const_msg)
} else #[track_caller] {
$crate::panic!($runtime_msg)
}
)
}
core::mem::conjure_zst::do_panic::runtime fn runtime$(<$($binders)*>)?($($arg: $ty),*) $( -> $ret )? {
$runtime
}
core::mem::discriminantpub const fn discriminant<T>(v: &T) -> Discriminant<T> {
Discriminant(intrinsics::discriminant_value(v))
}
core::mem::droppub const fn drop<T>(_x: T)
where
T: [const] Destruct,
{
}
core::mem::forgetpub const fn forget<T>(t: T) {
let _ = ManuallyDrop::new(t);
}
core::mem::manually_drop::ManuallyDrop::<T>::drop pub const unsafe fn drop(slot: &mut ManuallyDrop<T>)
where
T: [const] Destruct,
{
// SAFETY: we are dropping the value pointed to by a mutable reference
// which is guaranteed to be valid for writes.
// It is up to the caller to make sure that `slot` isn't dropped again.
unsafe { ptr::drop_in_place(slot.value.as_mut()) }
}
core::mem::manually_drop::ManuallyDrop::<T>::into_inner pub const fn into_inner(slot: ManuallyDrop<T>) -> T {
// Cannot use `MaybeDangling::into_inner` as that does not yet have the desired semantics.
// SAFETY: We know this is a valid `T`. `slot` will not be dropped.
unsafe { (&raw const slot).cast::<T>().read() }
}
core::mem::manually_drop::ManuallyDrop::<T>::new pub const fn new(value: T) -> ManuallyDrop<T> {
ManuallyDrop { value: MaybeDangling::new(value) }
}
core::mem::manually_drop::ManuallyDrop::<T>::take pub const unsafe fn take(slot: &mut ManuallyDrop<T>) -> T {
// SAFETY: we are reading from a reference, which is guaranteed
// to be valid for reads.
unsafe { ptr::read(slot.value.as_ref()) }
}
core::mem::maybe_dangling::MaybeDangling::<P>::as_mut pub const fn as_mut(&mut self) -> &mut P {
&mut self.0
}
core::mem::maybe_dangling::MaybeDangling::<P>::as_ref pub const fn as_ref(&self) -> &P {
&self.0
}
core::mem::maybe_dangling::MaybeDangling::<P>::into_inner pub const fn into_inner(self) -> P
where
P: Sized,
{
// FIXME: replace this with `self.0` when const checker can figure out that `self` isn't actually dropped
// SAFETY: this is equivalent to `self.0`
let x = unsafe { ptr::read(&self.0) };
mem::forget(self);
x
}
core::mem::maybe_dangling::MaybeDangling::<P>::new pub const fn new(x: P) -> Self
where
P: Sized,
{
MaybeDangling(x)
}
core::mem::maybe_uninit::MaybeUninit::<T>::array_assume_init pub const unsafe fn array_assume_init<const N: usize>(array: [Self; N]) -> [T; N] {
// SAFETY:
// * The caller guarantees that all elements of the array are initialized
// * `MaybeUninit<T>` and T are guaranteed to have the same layout
// * `MaybeUninit` does not drop, so there are no double-frees
// And thus the conversion is safe
unsafe {
intrinsics::assert_inhabited::<[T; N]>();
intrinsics::transmute_unchecked(array)
}
}
core::mem::maybe_uninit::MaybeUninit::<T>::as_bytes pub const fn as_bytes(&self) -> &[MaybeUninit<u8>] {
// SAFETY: MaybeUninit<u8> is always valid, even for padding bytes
unsafe {
slice::from_raw_parts(self.as_ptr().cast::<MaybeUninit<u8>>(), super::size_of::<T>())
}
}
core::mem::maybe_uninit::MaybeUninit::<T>::as_bytes_mut pub const fn as_bytes_mut(&mut self) -> &mut [MaybeUninit<u8>] {
// SAFETY: MaybeUninit<u8> is always valid, even for padding bytes
unsafe {
slice::from_raw_parts_mut(
self.as_mut_ptr().cast::<MaybeUninit<u8>>(),
super::size_of::<T>(),
)
}
}
core::mem::maybe_uninit::MaybeUninit::<T>::as_mut_ptr pub const fn as_mut_ptr(&mut self) -> *mut T {
// `MaybeUninit` and `ManuallyDrop` are both `repr(transparent)` so we can cast the pointer.
self as *mut _ as *mut T
}
core::mem::maybe_uninit::MaybeUninit::<T>::as_ptr pub const fn as_ptr(&self) -> *const T {
// `MaybeUninit` and `ManuallyDrop` are both `repr(transparent)` so we can cast the pointer.
self as *const _ as *const T
}
core::mem::maybe_uninit::MaybeUninit::<T>::assume_init pub const unsafe fn assume_init(self) -> T {
// SAFETY: the caller must guarantee that `self` is initialized.
// This also means that `self` must be a `value` variant.
unsafe {
intrinsics::assert_inhabited::<T>();
// We do this via a raw ptr read instead of `ManuallyDrop::into_inner` so that there's
// no trace of `ManuallyDrop` in Miri's error messages here.
(&raw const self.value).cast::<T>().read()
}
}
core::mem::maybe_uninit::MaybeUninit::<T>::assume_init_drop pub const unsafe fn assume_init_drop(&mut self)
where
T: [const] Destruct,
{
// SAFETY: the caller must guarantee that `self` is initialized and
// satisfies all invariants of `T`.
// Dropping the value in place is safe if that is the case.
unsafe { ptr::drop_in_place(self.as_mut_ptr()) }
}
core::mem::maybe_uninit::MaybeUninit::<T>::assume_init_mut pub const unsafe fn assume_init_mut(&mut self) -> &mut T {
// SAFETY: the caller must guarantee that `self` is initialized.
// This also means that `self` must be a `value` variant.
unsafe {
intrinsics::assert_inhabited::<T>();
&mut *self.as_mut_ptr()
}
}
core::mem::maybe_uninit::MaybeUninit::<T>::assume_init_read pub const unsafe fn assume_init_read(&self) -> T {
// SAFETY: the caller must guarantee that `self` is initialized.
// Reading from `self.as_ptr()` is safe since `self` should be initialized.
unsafe {
intrinsics::assert_inhabited::<T>();
self.as_ptr().read()
}
}
core::mem::maybe_uninit::MaybeUninit::<T>::assume_init_ref pub const unsafe fn assume_init_ref(&self) -> &T {
// SAFETY: the caller must guarantee that `self` is initialized.
// This also means that `self` must be a `value` variant.
unsafe {
intrinsics::assert_inhabited::<T>();
&*self.as_ptr()
}
}
core::mem::maybe_uninit::MaybeUninit::<T>::new pub const fn new(val: T) -> MaybeUninit<T> {
MaybeUninit { value: ManuallyDrop::new(val) }
}
core::mem::maybe_uninit::MaybeUninit::<T>::uninit pub const fn uninit() -> MaybeUninit<T> {
MaybeUninit { uninit: () }
}
core::mem::maybe_uninit::MaybeUninit::<T>::write pub const fn write(&mut self, val: T) -> &mut T {
*self = MaybeUninit::new(val);
// SAFETY: We just initialized this value.
unsafe { self.assume_init_mut() }
}
core::mem::maybe_uninit::MaybeUninit::<T>::zeroed pub const fn zeroed() -> MaybeUninit<T> {
let mut u = MaybeUninit::<T>::uninit();
// SAFETY: `u.as_mut_ptr()` points to allocated memory.
unsafe { u.as_mut_ptr().write_bytes(0u8, 1) };
u
}
core::mem::maybe_uninit::MaybeUninit::<[T; N]>::transpose pub const fn transpose(self) -> [MaybeUninit<T>; N] {
// SAFETY: T and MaybeUninit<T> have the same layout
unsafe { intrinsics::transmute_unchecked(self) }
}
core::mem::needs_droppub const fn needs_drop<T: ?Sized>() -> bool {
const { intrinsics::needs_drop::<T>() }
}
core::mem::replacepub const fn replace<T>(dest: &mut T, src: T) -> T {
// It may be tempting to use `swap` to avoid `unsafe` here. Don't!
// The compiler optimizes the implementation below to two `memcpy`s
// while `swap` would require at least three. See PR#83022 for details.
// SAFETY: We read from `dest` but directly write `src` into it afterwards,
// such that the old value is not duplicated. Nothing is dropped and
// nothing here can panic.
unsafe {
// Ideally we wouldn't use the intrinsics here, but going through the
// `ptr` methods introduces two unnecessary UbChecks, so until we can
// remove those for pointers that come from references, this uses the
// intrinsics instead so this stays very cheap in MIR (and debug).
let result = crate::intrinsics::read_via_copy(dest);
crate::intrinsics::write_via_move(dest, src);
result
}
}
core::mem::size_ofpub const fn size_of<T>() -> usize {
<T as SizedTypeProperties>::SIZE
}
core::mem::size_of_valpub const fn size_of_val<T: ?Sized>(val: &T) -> usize {
// SAFETY: `val` is a reference, so it's a valid raw pointer
unsafe { intrinsics::size_of_val(val) }
}
core::mem::size_of_val_rawpub const unsafe fn size_of_val_raw<T: ?Sized>(val: *const T) -> usize {
// SAFETY: the caller must provide a valid raw pointer
unsafe { intrinsics::size_of_val(val) }
}
core::mem::swappub const fn swap<T>(x: &mut T, y: &mut T) {
// SAFETY: `&mut` guarantees these are typed readable and writable
// as well as non-overlapping.
unsafe { intrinsics::typed_swap_nonoverlapping(x, y) }
}
core::mem::takepub const fn take<T: [const] Default>(dest: &mut T) -> T {
replace(dest, T::default())
}
core::mem::transmute_copypub const unsafe fn transmute_copy<Src, Dst>(src: &Src) -> Dst {
assert!(
size_of::<Src>() >= size_of::<Dst>(),
"cannot transmute_copy if Dst is larger than Src"
);
// If Dst has a higher alignment requirement, src might not be suitably aligned.
if align_of::<Dst>() > align_of::<Src>() {
// SAFETY: `src` is a reference which is guaranteed to be valid for reads.
// The caller must guarantee that the actual transmutation is safe.
unsafe { ptr::read_unaligned(src as *const Src as *const Dst) }
} else {
// SAFETY: `src` is a reference which is guaranteed to be valid for reads.
// We just checked that `src as *const Dst` was properly aligned.
// The caller must guarantee that the actual transmutation is safe.
unsafe { ptr::read(src as *const Src as *const Dst) }
}
}
core::mem::zeroedpub const unsafe fn zeroed<T>() -> T {
// SAFETY: the caller must guarantee that an all-zero value is valid for `T`.
unsafe {
intrinsics::assert_zero_valid::<T>();
MaybeUninit::zeroed().assume_init()
}
}
core::num::<impl i128>::abs pub const fn abs(self) -> Self {
// Note that the #[rustc_inherit_overflow_checks] and #[inline]
// above mean that the overflow semantics of the subtraction
// depend on the crate we're being called from.
if self.is_negative() {
-self
} else {
self
}
}
core::num::<impl i128>::abs_diff pub const fn abs_diff(self, other: Self) -> $UnsignedT {
if self < other {
// Converting a non-negative x from signed to unsigned by using
// `x as U` is left unchanged, but a negative x is converted
// to value x + 2^N. Thus if `s` and `o` are binary variables
// respectively indicating whether `self` and `other` are
// negative, we are computing the mathematical value:
//
// (other + o*2^N) - (self + s*2^N) mod 2^N
// other - self + (o-s)*2^N mod 2^N
// other - self mod 2^N
//
// Finally, taking the mod 2^N of the mathematical value of
// `other - self` does not change it as it already is
// in the range [0, 2^N).
(other as $UnsignedT).wrapping_sub(self as $UnsignedT)
} else {
(self as $UnsignedT).wrapping_sub(other as $UnsignedT)
}
}
core::num::<impl i128>::cast_unsigned pub const fn cast_unsigned(self) -> $UnsignedT {
self as $UnsignedT
}
core::num::<impl i128>::checked_add pub const fn checked_add(self, rhs: Self) -> Option<Self> {
let (a, b) = self.overflowing_add(rhs);
if intrinsics::unlikely(b) { None } else { Some(a) }
}
core::num::<impl i128>::checked_add_unsigned pub const fn checked_add_unsigned(self, rhs: $UnsignedT) -> Option<Self> {
let (a, b) = self.overflowing_add_unsigned(rhs);
if intrinsics::unlikely(b) { None } else { Some(a) }
}
core::num::<impl i128>::checked_ilog pub const fn checked_ilog(self, base: Self) -> Option<u32> {
if self <= 0 || base <= 1 {
None
} else {
// Delegate to the unsigned implementation.
// The condition makes sure that both casts are exact.
(self as $UnsignedT).checked_ilog(base as $UnsignedT)
}
}
core::num::<impl i128>::checked_ilog2 pub const fn checked_ilog2(self) -> Option<u32> {
if self <= 0 {
None
} else {
// SAFETY: We just checked that this number is positive
let log = (Self::BITS - 1) - unsafe { intrinsics::ctlz_nonzero(self) as u32 };
Some(log)
}
}
core::num::<impl i128>::checked_mul pub const fn checked_mul(self, rhs: Self) -> Option<Self> {
let (a, b) = self.overflowing_mul(rhs);
if intrinsics::unlikely(b) { None } else { Some(a) }
}
core::num::<impl i128>::checked_sub pub const fn checked_sub(self, rhs: Self) -> Option<Self> {
let (a, b) = self.overflowing_sub(rhs);
if intrinsics::unlikely(b) { None } else { Some(a) }
}
core::num::<impl i128>::checked_sub_unsigned pub const fn checked_sub_unsigned(self, rhs: $UnsignedT) -> Option<Self> {
let (a, b) = self.overflowing_sub_unsigned(rhs);
if intrinsics::unlikely(b) { None } else { Some(a) }
}
core::num::<impl i128>::count_ones pub const fn count_ones(self) -> u32 { (self as $UnsignedT).count_ones() }
core::num::<impl i128>::count_zeros pub const fn count_zeros(self) -> u32 {
(!self).count_ones()
}
core::num::<impl i128>::from_le pub const fn from_le(x: Self) -> Self {
#[cfg(target_endian = "little")]
{
x
}
#[cfg(not(target_endian = "little"))]
{
x.swap_bytes()
}
}
core::num::<impl i128>::from_le_bytes pub const fn from_le_bytes(bytes: [u8; size_of::<Self>()]) -> Self {
Self::from_le(Self::from_ne_bytes(bytes))
}
core::num::<impl i128>::from_ne_bytes pub const fn from_ne_bytes(bytes: [u8; size_of::<Self>()]) -> Self {
// SAFETY: integers are plain old datatypes so we can always transmute to them
unsafe { mem::transmute(bytes) }
}
core::num::<impl i128>::ilog pub const fn ilog(self, base: Self) -> u32 {
assert!(base >= 2, "base of integer logarithm must be at least 2");
if let Some(log) = self.checked_ilog(base) {
log
} else {
int_log10::panic_for_nonpositive_argument()
}
}
core::num::<impl i128>::ilog2 pub const fn ilog2(self) -> u32 {
if let Some(log) = self.checked_ilog2() {
log
} else {
int_log10::panic_for_nonpositive_argument()
}
}
core::num::<impl i128>::is_negative pub const fn is_negative(self) -> bool { self < 0 }
core::num::<impl i128>::leading_zeros pub const fn leading_zeros(self) -> u32 {
(self as $UnsignedT).leading_zeros()
}
core::num::<impl i128>::overflowing_add pub const fn overflowing_add(self, rhs: Self) -> (Self, bool) {
let (a, b) = intrinsics::add_with_overflow(self as $ActualT, rhs as $ActualT);
(a as Self, b)
}
core::num::<impl i128>::overflowing_add_unsigned pub const fn overflowing_add_unsigned(self, rhs: $UnsignedT) -> (Self, bool) {
let rhs = rhs as Self;
let (res, overflowed) = self.overflowing_add(rhs);
(res, overflowed ^ (rhs < 0))
}
core::num::<impl i128>::overflowing_mul pub const fn overflowing_mul(self, rhs: Self) -> (Self, bool) {
let (a, b) = intrinsics::mul_with_overflow(self as $ActualT, rhs as $ActualT);
(a as Self, b)
}
core::num::<impl i128>::overflowing_neg pub const fn overflowing_neg(self) -> (Self, bool) {
if intrinsics::unlikely(self == Self::MIN) {
(Self::MIN, true)
} else {
(-self, false)
}
}
core::num::<impl i128>::overflowing_sub pub const fn overflowing_sub(self, rhs: Self) -> (Self, bool) {
let (a, b) = intrinsics::sub_with_overflow(self as $ActualT, rhs as $ActualT);
(a as Self, b)
}
core::num::<impl i128>::overflowing_sub_unsigned pub const fn overflowing_sub_unsigned(self, rhs: $UnsignedT) -> (Self, bool) {
let rhs = rhs as Self;
let (res, overflowed) = self.overflowing_sub(rhs);
(res, overflowed ^ (rhs < 0))
}
core::num::<impl i128>::rotate_left pub const fn rotate_left(self, n: u32) -> Self {
(self as $UnsignedT).rotate_left(n) as Self
}
core::num::<impl i128>::to_le pub const fn to_le(self) -> Self {
#[cfg(target_endian = "little")]
{
self
}
#[cfg(not(target_endian = "little"))]
{
self.swap_bytes()
}
}
core::num::<impl i128>::to_le_bytes pub const fn to_le_bytes(self) -> [u8; size_of::<Self>()] {
self.to_le().to_ne_bytes()
}
core::num::<impl i128>::to_ne_bytes pub const fn to_ne_bytes(self) -> [u8; size_of::<Self>()] {
// SAFETY: integers are plain old datatypes so we can always transmute them to
// arrays of bytes
unsafe { mem::transmute(self) }
}
core::num::<impl i128>::unchecked_neg pub const unsafe fn unchecked_neg(self) -> Self {
assert_unsafe_precondition!(
check_language_ub,
concat!(stringify!($SelfT), "::unchecked_neg cannot overflow"),
(
lhs: $SelfT = self,
) => !lhs.overflowing_neg().1,
);
// SAFETY: this is guaranteed to be safe by the caller.
unsafe {
intrinsics::unchecked_sub(0, self)
}
}
core::num::<impl i128>::unchecked_shl pub const unsafe fn unchecked_shl(self, rhs: u32) -> Self {
assert_unsafe_precondition!(
check_language_ub,
concat!(stringify!($SelfT), "::unchecked_shl cannot overflow"),
(
rhs: u32 = rhs,
) => rhs < <$ActualT>::BITS,
);
// SAFETY: this is guaranteed to be safe by the caller.
unsafe {
intrinsics::unchecked_shl(self, rhs)
}
}
core::num::<impl i128>::unchecked_shr pub const unsafe fn unchecked_shr(self, rhs: u32) -> Self {
assert_unsafe_precondition!(
check_language_ub,
concat!(stringify!($SelfT), "::unchecked_shr cannot overflow"),
(
rhs: u32 = rhs,
) => rhs < <$ActualT>::BITS,
);
// SAFETY: this is guaranteed to be safe by the caller.
unsafe {
intrinsics::unchecked_shr(self, rhs)
}
}
core::num::<impl i128>::unsigned_abs pub const fn unsigned_abs(self) -> $UnsignedT {
self.wrapping_abs() as $UnsignedT
}
core::num::<impl i128>::wrapping_abs pub const fn wrapping_abs(self) -> Self {
if self.is_negative() {
self.wrapping_neg()
} else {
self
}
}
core::num::<impl i128>::wrapping_add pub const fn wrapping_add(self, rhs: Self) -> Self {
intrinsics::wrapping_add(self, rhs)
}
core::num::<impl i128>::wrapping_mul pub const fn wrapping_mul(self, rhs: Self) -> Self {
intrinsics::wrapping_mul(self, rhs)
}
core::num::<impl i128>::wrapping_neg pub const fn wrapping_neg(self) -> Self {
(0 as $SelfT).wrapping_sub(self)
}
core::num::<impl i128>::wrapping_shl pub const fn wrapping_shl(self, rhs: u32) -> Self {
// SAFETY: the masking by the bitsize of the type ensures that we do not shift
// out of bounds
unsafe {
self.unchecked_shl(rhs & (Self::BITS - 1))
}
}
core::num::<impl i128>::wrapping_shr pub const fn wrapping_shr(self, rhs: u32) -> Self {
// SAFETY: the masking by the bitsize of the type ensures that we do not shift
// out of bounds
unsafe {
self.unchecked_shr(rhs & (Self::BITS - 1))
}
}
core::num::<impl i128>::wrapping_sub pub const fn wrapping_sub(self, rhs: Self) -> Self {
intrinsics::wrapping_sub(self, rhs)
}
core::num::<impl i16>::abs pub const fn abs(self) -> Self {
// Note that the #[rustc_inherit_overflow_checks] and #[inline]
// above mean that the overflow semantics of the subtraction
// depend on the crate we're being called from.
if self.is_negative() {
-self
} else {
self
}
}
core::num::<impl i16>::abs_diff pub const fn abs_diff(self, other: Self) -> $UnsignedT {
if self < other {
// Converting a non-negative x from signed to unsigned by using
// `x as U` is left unchanged, but a negative x is converted
// to value x + 2^N. Thus if `s` and `o` are binary variables
// respectively indicating whether `self` and `other` are
// negative, we are computing the mathematical value:
//
// (other + o*2^N) - (self + s*2^N) mod 2^N
// other - self + (o-s)*2^N mod 2^N
// other - self mod 2^N
//
// Finally, taking the mod 2^N of the mathematical value of
// `other - self` does not change it as it already is
// in the range [0, 2^N).
(other as $UnsignedT).wrapping_sub(self as $UnsignedT)
} else {
(self as $UnsignedT).wrapping_sub(other as $UnsignedT)
}
}
core::num::<impl i16>::cast_unsigned pub const fn cast_unsigned(self) -> $UnsignedT {
self as $UnsignedT
}
core::num::<impl i16>::checked_add pub const fn checked_add(self, rhs: Self) -> Option<Self> {
let (a, b) = self.overflowing_add(rhs);
if intrinsics::unlikely(b) { None } else { Some(a) }
}
core::num::<impl i16>::checked_add_unsigned pub const fn checked_add_unsigned(self, rhs: $UnsignedT) -> Option<Self> {
let (a, b) = self.overflowing_add_unsigned(rhs);
if intrinsics::unlikely(b) { None } else { Some(a) }
}
core::num::<impl i16>::checked_ilog pub const fn checked_ilog(self, base: Self) -> Option<u32> {
if self <= 0 || base <= 1 {
None
} else {
// Delegate to the unsigned implementation.
// The condition makes sure that both casts are exact.
(self as $UnsignedT).checked_ilog(base as $UnsignedT)
}
}
core::num::<impl i16>::checked_ilog2 pub const fn checked_ilog2(self) -> Option<u32> {
if self <= 0 {
None
} else {
// SAFETY: We just checked that this number is positive
let log = (Self::BITS - 1) - unsafe { intrinsics::ctlz_nonzero(self) as u32 };
Some(log)
}
}
core::num::<impl i16>::checked_mul pub const fn checked_mul(self, rhs: Self) -> Option<Self> {
let (a, b) = self.overflowing_mul(rhs);
if intrinsics::unlikely(b) { None } else { Some(a) }
}
core::num::<impl i16>::checked_sub pub const fn checked_sub(self, rhs: Self) -> Option<Self> {
let (a, b) = self.overflowing_sub(rhs);
if intrinsics::unlikely(b) { None } else { Some(a) }
}
core::num::<impl i16>::checked_sub_unsigned pub const fn checked_sub_unsigned(self, rhs: $UnsignedT) -> Option<Self> {
let (a, b) = self.overflowing_sub_unsigned(rhs);
if intrinsics::unlikely(b) { None } else { Some(a) }
}
core::num::<impl i16>::count_ones pub const fn count_ones(self) -> u32 { (self as $UnsignedT).count_ones() }
core::num::<impl i16>::count_zeros pub const fn count_zeros(self) -> u32 {
(!self).count_ones()
}
core::num::<impl i16>::from_le pub const fn from_le(x: Self) -> Self {
#[cfg(target_endian = "little")]
{
x
}
#[cfg(not(target_endian = "little"))]
{
x.swap_bytes()
}
}
core::num::<impl i16>::from_le_bytes pub const fn from_le_bytes(bytes: [u8; size_of::<Self>()]) -> Self {
Self::from_le(Self::from_ne_bytes(bytes))
}
core::num::<impl i16>::from_ne_bytes pub const fn from_ne_bytes(bytes: [u8; size_of::<Self>()]) -> Self {
// SAFETY: integers are plain old datatypes so we can always transmute to them
unsafe { mem::transmute(bytes) }
}
core::num::<impl i16>::ilog pub const fn ilog(self, base: Self) -> u32 {
assert!(base >= 2, "base of integer logarithm must be at least 2");
if let Some(log) = self.checked_ilog(base) {
log
} else {
int_log10::panic_for_nonpositive_argument()
}
}
core::num::<impl i16>::ilog2 pub const fn ilog2(self) -> u32 {
if let Some(log) = self.checked_ilog2() {
log
} else {
int_log10::panic_for_nonpositive_argument()
}
}
core::num::<impl i16>::is_negative pub const fn is_negative(self) -> bool { self < 0 }
core::num::<impl i16>::leading_zeros pub const fn leading_zeros(self) -> u32 {
(self as $UnsignedT).leading_zeros()
}
core::num::<impl i16>::overflowing_add pub const fn overflowing_add(self, rhs: Self) -> (Self, bool) {
let (a, b) = intrinsics::add_with_overflow(self as $ActualT, rhs as $ActualT);
(a as Self, b)
}
core::num::<impl i16>::overflowing_add_unsigned pub const fn overflowing_add_unsigned(self, rhs: $UnsignedT) -> (Self, bool) {
let rhs = rhs as Self;
let (res, overflowed) = self.overflowing_add(rhs);
(res, overflowed ^ (rhs < 0))
}
core::num::<impl i16>::overflowing_mul pub const fn overflowing_mul(self, rhs: Self) -> (Self, bool) {
let (a, b) = intrinsics::mul_with_overflow(self as $ActualT, rhs as $ActualT);
(a as Self, b)
}
core::num::<impl i16>::overflowing_neg pub const fn overflowing_neg(self) -> (Self, bool) {
if intrinsics::unlikely(self == Self::MIN) {
(Self::MIN, true)
} else {
(-self, false)
}
}
core::num::<impl i16>::overflowing_sub pub const fn overflowing_sub(self, rhs: Self) -> (Self, bool) {
let (a, b) = intrinsics::sub_with_overflow(self as $ActualT, rhs as $ActualT);
(a as Self, b)
}
core::num::<impl i16>::overflowing_sub_unsigned pub const fn overflowing_sub_unsigned(self, rhs: $UnsignedT) -> (Self, bool) {
let rhs = rhs as Self;
let (res, overflowed) = self.overflowing_sub(rhs);
(res, overflowed ^ (rhs < 0))
}
core::num::<impl i16>::rotate_left pub const fn rotate_left(self, n: u32) -> Self {
(self as $UnsignedT).rotate_left(n) as Self
}
core::num::<impl i16>::to_le pub const fn to_le(self) -> Self {
#[cfg(target_endian = "little")]
{
self
}
#[cfg(not(target_endian = "little"))]
{
self.swap_bytes()
}
}
core::num::<impl i16>::to_le_bytes pub const fn to_le_bytes(self) -> [u8; size_of::<Self>()] {
self.to_le().to_ne_bytes()
}
core::num::<impl i16>::to_ne_bytes pub const fn to_ne_bytes(self) -> [u8; size_of::<Self>()] {
// SAFETY: integers are plain old datatypes so we can always transmute them to
// arrays of bytes
unsafe { mem::transmute(self) }
}
core::num::<impl i16>::unchecked_neg pub const unsafe fn unchecked_neg(self) -> Self {
assert_unsafe_precondition!(
check_language_ub,
concat!(stringify!($SelfT), "::unchecked_neg cannot overflow"),
(
lhs: $SelfT = self,
) => !lhs.overflowing_neg().1,
);
// SAFETY: this is guaranteed to be safe by the caller.
unsafe {
intrinsics::unchecked_sub(0, self)
}
}
core::num::<impl i16>::unchecked_shl pub const unsafe fn unchecked_shl(self, rhs: u32) -> Self {
assert_unsafe_precondition!(
check_language_ub,
concat!(stringify!($SelfT), "::unchecked_shl cannot overflow"),
(
rhs: u32 = rhs,
) => rhs < <$ActualT>::BITS,
);
// SAFETY: this is guaranteed to be safe by the caller.
unsafe {
intrinsics::unchecked_shl(self, rhs)
}
}
core::num::<impl i16>::unchecked_shr pub const unsafe fn unchecked_shr(self, rhs: u32) -> Self {
assert_unsafe_precondition!(
check_language_ub,
concat!(stringify!($SelfT), "::unchecked_shr cannot overflow"),
(
rhs: u32 = rhs,
) => rhs < <$ActualT>::BITS,
);
// SAFETY: this is guaranteed to be safe by the caller.
unsafe {
intrinsics::unchecked_shr(self, rhs)
}
}
core::num::<impl i16>::unsigned_abs pub const fn unsigned_abs(self) -> $UnsignedT {
self.wrapping_abs() as $UnsignedT
}
core::num::<impl i16>::wrapping_abs pub const fn wrapping_abs(self) -> Self {
if self.is_negative() {
self.wrapping_neg()
} else {
self
}
}
core::num::<impl i16>::wrapping_add pub const fn wrapping_add(self, rhs: Self) -> Self {
intrinsics::wrapping_add(self, rhs)
}
core::num::<impl i16>::wrapping_mul pub const fn wrapping_mul(self, rhs: Self) -> Self {
intrinsics::wrapping_mul(self, rhs)
}
core::num::<impl i16>::wrapping_neg pub const fn wrapping_neg(self) -> Self {
(0 as $SelfT).wrapping_sub(self)
}
core::num::<impl i16>::wrapping_shl pub const fn wrapping_shl(self, rhs: u32) -> Self {
// SAFETY: the masking by the bitsize of the type ensures that we do not shift
// out of bounds
unsafe {
self.unchecked_shl(rhs & (Self::BITS - 1))
}
}
core::num::<impl i16>::wrapping_shr pub const fn wrapping_shr(self, rhs: u32) -> Self {
// SAFETY: the masking by the bitsize of the type ensures that we do not shift
// out of bounds
unsafe {
self.unchecked_shr(rhs & (Self::BITS - 1))
}
}
core::num::<impl i16>::wrapping_sub pub const fn wrapping_sub(self, rhs: Self) -> Self {
intrinsics::wrapping_sub(self, rhs)
}
core::num::<impl i32>::abs pub const fn abs(self) -> Self {
// Note that the #[rustc_inherit_overflow_checks] and #[inline]
// above mean that the overflow semantics of the subtraction
// depend on the crate we're being called from.
if self.is_negative() {
-self
} else {
self
}
}
core::num::<impl i32>::abs_diff pub const fn abs_diff(self, other: Self) -> $UnsignedT {
if self < other {
// Converting a non-negative x from signed to unsigned by using
// `x as U` is left unchanged, but a negative x is converted
// to value x + 2^N. Thus if `s` and `o` are binary variables
// respectively indicating whether `self` and `other` are
// negative, we are computing the mathematical value:
//
// (other + o*2^N) - (self + s*2^N) mod 2^N
// other - self + (o-s)*2^N mod 2^N
// other - self mod 2^N
//
// Finally, taking the mod 2^N of the mathematical value of
// `other - self` does not change it as it already is
// in the range [0, 2^N).
(other as $UnsignedT).wrapping_sub(self as $UnsignedT)
} else {
(self as $UnsignedT).wrapping_sub(other as $UnsignedT)
}
}
core::num::<impl i32>::cast_unsigned pub const fn cast_unsigned(self) -> $UnsignedT {
self as $UnsignedT
}
core::num::<impl i32>::checked_add pub const fn checked_add(self, rhs: Self) -> Option<Self> {
let (a, b) = self.overflowing_add(rhs);
if intrinsics::unlikely(b) { None } else { Some(a) }
}
core::num::<impl i32>::checked_add_unsigned pub const fn checked_add_unsigned(self, rhs: $UnsignedT) -> Option<Self> {
let (a, b) = self.overflowing_add_unsigned(rhs);
if intrinsics::unlikely(b) { None } else { Some(a) }
}
core::num::<impl i32>::checked_ilog pub const fn checked_ilog(self, base: Self) -> Option<u32> {
if self <= 0 || base <= 1 {
None
} else {
// Delegate to the unsigned implementation.
// The condition makes sure that both casts are exact.
(self as $UnsignedT).checked_ilog(base as $UnsignedT)
}
}
core::num::<impl i32>::checked_ilog2 pub const fn checked_ilog2(self) -> Option<u32> {
if self <= 0 {
None
} else {
// SAFETY: We just checked that this number is positive
let log = (Self::BITS - 1) - unsafe { intrinsics::ctlz_nonzero(self) as u32 };
Some(log)
}
}
core::num::<impl i32>::checked_mul pub const fn checked_mul(self, rhs: Self) -> Option<Self> {
let (a, b) = self.overflowing_mul(rhs);
if intrinsics::unlikely(b) { None } else { Some(a) }
}
core::num::<impl i32>::checked_sub pub const fn checked_sub(self, rhs: Self) -> Option<Self> {
let (a, b) = self.overflowing_sub(rhs);
if intrinsics::unlikely(b) { None } else { Some(a) }
}
core::num::<impl i32>::checked_sub_unsigned pub const fn checked_sub_unsigned(self, rhs: $UnsignedT) -> Option<Self> {
let (a, b) = self.overflowing_sub_unsigned(rhs);
if intrinsics::unlikely(b) { None } else { Some(a) }
}
core::num::<impl i32>::count_ones pub const fn count_ones(self) -> u32 { (self as $UnsignedT).count_ones() }
core::num::<impl i32>::count_zeros pub const fn count_zeros(self) -> u32 {
(!self).count_ones()
}
core::num::<impl i32>::from_le pub const fn from_le(x: Self) -> Self {
#[cfg(target_endian = "little")]
{
x
}
#[cfg(not(target_endian = "little"))]
{
x.swap_bytes()
}
}
core::num::<impl i32>::from_le_bytes pub const fn from_le_bytes(bytes: [u8; size_of::<Self>()]) -> Self {
Self::from_le(Self::from_ne_bytes(bytes))
}
core::num::<impl i32>::from_ne_bytes pub const fn from_ne_bytes(bytes: [u8; size_of::<Self>()]) -> Self {
// SAFETY: integers are plain old datatypes so we can always transmute to them
unsafe { mem::transmute(bytes) }
}
core::num::<impl i32>::ilog pub const fn ilog(self, base: Self) -> u32 {
assert!(base >= 2, "base of integer logarithm must be at least 2");
if let Some(log) = self.checked_ilog(base) {
log
} else {
int_log10::panic_for_nonpositive_argument()
}
}
core::num::<impl i32>::ilog2 pub const fn ilog2(self) -> u32 {
if let Some(log) = self.checked_ilog2() {
log
} else {
int_log10::panic_for_nonpositive_argument()
}
}
core::num::<impl i32>::is_negative pub const fn is_negative(self) -> bool { self < 0 }
core::num::<impl i32>::leading_zeros pub const fn leading_zeros(self) -> u32 {
(self as $UnsignedT).leading_zeros()
}
core::num::<impl i32>::overflowing_add pub const fn overflowing_add(self, rhs: Self) -> (Self, bool) {
let (a, b) = intrinsics::add_with_overflow(self as $ActualT, rhs as $ActualT);
(a as Self, b)
}
core::num::<impl i32>::overflowing_add_unsigned pub const fn overflowing_add_unsigned(self, rhs: $UnsignedT) -> (Self, bool) {
let rhs = rhs as Self;
let (res, overflowed) = self.overflowing_add(rhs);
(res, overflowed ^ (rhs < 0))
}
core::num::<impl i32>::overflowing_mul pub const fn overflowing_mul(self, rhs: Self) -> (Self, bool) {
let (a, b) = intrinsics::mul_with_overflow(self as $ActualT, rhs as $ActualT);
(a as Self, b)
}
core::num::<impl i32>::overflowing_neg pub const fn overflowing_neg(self) -> (Self, bool) {
if intrinsics::unlikely(self == Self::MIN) {
(Self::MIN, true)
} else {
(-self, false)
}
}
core::num::<impl i32>::overflowing_sub pub const fn overflowing_sub(self, rhs: Self) -> (Self, bool) {
let (a, b) = intrinsics::sub_with_overflow(self as $ActualT, rhs as $ActualT);
(a as Self, b)
}
core::num::<impl i32>::overflowing_sub_unsigned pub const fn overflowing_sub_unsigned(self, rhs: $UnsignedT) -> (Self, bool) {
let rhs = rhs as Self;
let (res, overflowed) = self.overflowing_sub(rhs);
(res, overflowed ^ (rhs < 0))
}
core::num::<impl i32>::rotate_left pub const fn rotate_left(self, n: u32) -> Self {
(self as $UnsignedT).rotate_left(n) as Self
}
core::num::<impl i32>::to_le pub const fn to_le(self) -> Self {
#[cfg(target_endian = "little")]
{
self
}
#[cfg(not(target_endian = "little"))]
{
self.swap_bytes()
}
}
core::num::<impl i32>::to_le_bytes pub const fn to_le_bytes(self) -> [u8; size_of::<Self>()] {
self.to_le().to_ne_bytes()
}
core::num::<impl i32>::to_ne_bytes pub const fn to_ne_bytes(self) -> [u8; size_of::<Self>()] {
// SAFETY: integers are plain old datatypes so we can always transmute them to
// arrays of bytes
unsafe { mem::transmute(self) }
}
core::num::<impl i32>::unchecked_neg pub const unsafe fn unchecked_neg(self) -> Self {
assert_unsafe_precondition!(
check_language_ub,
concat!(stringify!($SelfT), "::unchecked_neg cannot overflow"),
(
lhs: $SelfT = self,
) => !lhs.overflowing_neg().1,
);
// SAFETY: this is guaranteed to be safe by the caller.
unsafe {
intrinsics::unchecked_sub(0, self)
}
}
core::num::<impl i32>::unchecked_shl pub const unsafe fn unchecked_shl(self, rhs: u32) -> Self {
assert_unsafe_precondition!(
check_language_ub,
concat!(stringify!($SelfT), "::unchecked_shl cannot overflow"),
(
rhs: u32 = rhs,
) => rhs < <$ActualT>::BITS,
);
// SAFETY: this is guaranteed to be safe by the caller.
unsafe {
intrinsics::unchecked_shl(self, rhs)
}
}
core::num::<impl i32>::unchecked_shr pub const unsafe fn unchecked_shr(self, rhs: u32) -> Self {
assert_unsafe_precondition!(
check_language_ub,
concat!(stringify!($SelfT), "::unchecked_shr cannot overflow"),
(
rhs: u32 = rhs,
) => rhs < <$ActualT>::BITS,
);
// SAFETY: this is guaranteed to be safe by the caller.
unsafe {
intrinsics::unchecked_shr(self, rhs)
}
}
core::num::<impl i32>::unsigned_abs pub const fn unsigned_abs(self) -> $UnsignedT {
self.wrapping_abs() as $UnsignedT
}
core::num::<impl i32>::wrapping_abs pub const fn wrapping_abs(self) -> Self {
if self.is_negative() {
self.wrapping_neg()
} else {
self
}
}
core::num::<impl i32>::wrapping_add pub const fn wrapping_add(self, rhs: Self) -> Self {
intrinsics::wrapping_add(self, rhs)
}
core::num::<impl i32>::wrapping_mul pub const fn wrapping_mul(self, rhs: Self) -> Self {
intrinsics::wrapping_mul(self, rhs)
}
core::num::<impl i32>::wrapping_neg pub const fn wrapping_neg(self) -> Self {
(0 as $SelfT).wrapping_sub(self)
}
core::num::<impl i32>::wrapping_shl pub const fn wrapping_shl(self, rhs: u32) -> Self {
// SAFETY: the masking by the bitsize of the type ensures that we do not shift
// out of bounds
unsafe {
self.unchecked_shl(rhs & (Self::BITS - 1))
}
}
core::num::<impl i32>::wrapping_shr pub const fn wrapping_shr(self, rhs: u32) -> Self {
// SAFETY: the masking by the bitsize of the type ensures that we do not shift
// out of bounds
unsafe {
self.unchecked_shr(rhs & (Self::BITS - 1))
}
}
core::num::<impl i32>::wrapping_sub pub const fn wrapping_sub(self, rhs: Self) -> Self {
intrinsics::wrapping_sub(self, rhs)
}
core::num::<impl i64>::abs pub const fn abs(self) -> Self {
// Note that the #[rustc_inherit_overflow_checks] and #[inline]
// above mean that the overflow semantics of the subtraction
// depend on the crate we're being called from.
if self.is_negative() {
-self
} else {
self
}
}
core::num::<impl i64>::abs_diff pub const fn abs_diff(self, other: Self) -> $UnsignedT {
if self < other {
// Converting a non-negative x from signed to unsigned by using
// `x as U` is left unchanged, but a negative x is converted
// to value x + 2^N. Thus if `s` and `o` are binary variables
// respectively indicating whether `self` and `other` are
// negative, we are computing the mathematical value:
//
// (other + o*2^N) - (self + s*2^N) mod 2^N
// other - self + (o-s)*2^N mod 2^N
// other - self mod 2^N
//
// Finally, taking the mod 2^N of the mathematical value of
// `other - self` does not change it as it already is
// in the range [0, 2^N).
(other as $UnsignedT).wrapping_sub(self as $UnsignedT)
} else {
(self as $UnsignedT).wrapping_sub(other as $UnsignedT)
}
}
core::num::<impl i64>::cast_unsigned pub const fn cast_unsigned(self) -> $UnsignedT {
self as $UnsignedT
}
core::num::<impl i64>::checked_add pub const fn checked_add(self, rhs: Self) -> Option<Self> {
let (a, b) = self.overflowing_add(rhs);
if intrinsics::unlikely(b) { None } else { Some(a) }
}
core::num::<impl i64>::checked_add_unsigned pub const fn checked_add_unsigned(self, rhs: $UnsignedT) -> Option<Self> {
let (a, b) = self.overflowing_add_unsigned(rhs);
if intrinsics::unlikely(b) { None } else { Some(a) }
}
core::num::<impl i64>::checked_ilog pub const fn checked_ilog(self, base: Self) -> Option<u32> {
if self <= 0 || base <= 1 {
None
} else {
// Delegate to the unsigned implementation.
// The condition makes sure that both casts are exact.
(self as $UnsignedT).checked_ilog(base as $UnsignedT)
}
}
core::num::<impl i64>::checked_ilog2 pub const fn checked_ilog2(self) -> Option<u32> {
if self <= 0 {
None
} else {
// SAFETY: We just checked that this number is positive
let log = (Self::BITS - 1) - unsafe { intrinsics::ctlz_nonzero(self) as u32 };
Some(log)
}
}
core::num::<impl i64>::checked_mul pub const fn checked_mul(self, rhs: Self) -> Option<Self> {
let (a, b) = self.overflowing_mul(rhs);
if intrinsics::unlikely(b) { None } else { Some(a) }
}
core::num::<impl i64>::checked_sub pub const fn checked_sub(self, rhs: Self) -> Option<Self> {
let (a, b) = self.overflowing_sub(rhs);
if intrinsics::unlikely(b) { None } else { Some(a) }
}
core::num::<impl i64>::checked_sub_unsigned pub const fn checked_sub_unsigned(self, rhs: $UnsignedT) -> Option<Self> {
let (a, b) = self.overflowing_sub_unsigned(rhs);
if intrinsics::unlikely(b) { None } else { Some(a) }
}
core::num::<impl i64>::count_ones pub const fn count_ones(self) -> u32 { (self as $UnsignedT).count_ones() }
core::num::<impl i64>::count_zeros pub const fn count_zeros(self) -> u32 {
(!self).count_ones()
}
core::num::<impl i64>::from_le pub const fn from_le(x: Self) -> Self {
#[cfg(target_endian = "little")]
{
x
}
#[cfg(not(target_endian = "little"))]
{
x.swap_bytes()
}
}
core::num::<impl i64>::from_le_bytes pub const fn from_le_bytes(bytes: [u8; size_of::<Self>()]) -> Self {
Self::from_le(Self::from_ne_bytes(bytes))
}
core::num::<impl i64>::from_ne_bytes pub const fn from_ne_bytes(bytes: [u8; size_of::<Self>()]) -> Self {
// SAFETY: integers are plain old datatypes so we can always transmute to them
unsafe { mem::transmute(bytes) }
}
core::num::<impl i64>::ilog pub const fn ilog(self, base: Self) -> u32 {
assert!(base >= 2, "base of integer logarithm must be at least 2");
if let Some(log) = self.checked_ilog(base) {
log
} else {
int_log10::panic_for_nonpositive_argument()
}
}
core::num::<impl i64>::ilog2 pub const fn ilog2(self) -> u32 {
if let Some(log) = self.checked_ilog2() {
log
} else {
int_log10::panic_for_nonpositive_argument()
}
}
core::num::<impl i64>::is_negative pub const fn is_negative(self) -> bool { self < 0 }
core::num::<impl i64>::leading_zeros pub const fn leading_zeros(self) -> u32 {
(self as $UnsignedT).leading_zeros()
}
core::num::<impl i64>::overflowing_add pub const fn overflowing_add(self, rhs: Self) -> (Self, bool) {
let (a, b) = intrinsics::add_with_overflow(self as $ActualT, rhs as $ActualT);
(a as Self, b)
}
core::num::<impl i64>::overflowing_add_unsigned pub const fn overflowing_add_unsigned(self, rhs: $UnsignedT) -> (Self, bool) {
let rhs = rhs as Self;
let (res, overflowed) = self.overflowing_add(rhs);
(res, overflowed ^ (rhs < 0))
}
core::num::<impl i64>::overflowing_mul pub const fn overflowing_mul(self, rhs: Self) -> (Self, bool) {
let (a, b) = intrinsics::mul_with_overflow(self as $ActualT, rhs as $ActualT);
(a as Self, b)
}
core::num::<impl i64>::overflowing_neg pub const fn overflowing_neg(self) -> (Self, bool) {
if intrinsics::unlikely(self == Self::MIN) {
(Self::MIN, true)
} else {
(-self, false)
}
}
core::num::<impl i64>::overflowing_sub pub const fn overflowing_sub(self, rhs: Self) -> (Self, bool) {
let (a, b) = intrinsics::sub_with_overflow(self as $ActualT, rhs as $ActualT);
(a as Self, b)
}
core::num::<impl i64>::overflowing_sub_unsigned pub const fn overflowing_sub_unsigned(self, rhs: $UnsignedT) -> (Self, bool) {
let rhs = rhs as Self;
let (res, overflowed) = self.overflowing_sub(rhs);
(res, overflowed ^ (rhs < 0))
}
core::num::<impl i64>::rotate_left pub const fn rotate_left(self, n: u32) -> Self {
(self as $UnsignedT).rotate_left(n) as Self
}
core::num::<impl i64>::to_le pub const fn to_le(self) -> Self {
#[cfg(target_endian = "little")]
{
self
}
#[cfg(not(target_endian = "little"))]
{
self.swap_bytes()
}
}
core::num::<impl i64>::to_le_bytes pub const fn to_le_bytes(self) -> [u8; size_of::<Self>()] {
self.to_le().to_ne_bytes()
}
core::num::<impl i64>::to_ne_bytes pub const fn to_ne_bytes(self) -> [u8; size_of::<Self>()] {
// SAFETY: integers are plain old datatypes so we can always transmute them to
// arrays of bytes
unsafe { mem::transmute(self) }
}
core::num::<impl i64>::unchecked_neg pub const unsafe fn unchecked_neg(self) -> Self {
assert_unsafe_precondition!(
check_language_ub,
concat!(stringify!($SelfT), "::unchecked_neg cannot overflow"),
(
lhs: $SelfT = self,
) => !lhs.overflowing_neg().1,
);
// SAFETY: this is guaranteed to be safe by the caller.
unsafe {
intrinsics::unchecked_sub(0, self)
}
}
core::num::<impl i64>::unchecked_shl pub const unsafe fn unchecked_shl(self, rhs: u32) -> Self {
assert_unsafe_precondition!(
check_language_ub,
concat!(stringify!($SelfT), "::unchecked_shl cannot overflow"),
(
rhs: u32 = rhs,
) => rhs < <$ActualT>::BITS,
);
// SAFETY: this is guaranteed to be safe by the caller.
unsafe {
intrinsics::unchecked_shl(self, rhs)
}
}
core::num::<impl i64>::unchecked_shr pub const unsafe fn unchecked_shr(self, rhs: u32) -> Self {
assert_unsafe_precondition!(
check_language_ub,
concat!(stringify!($SelfT), "::unchecked_shr cannot overflow"),
(
rhs: u32 = rhs,
) => rhs < <$ActualT>::BITS,
);
// SAFETY: this is guaranteed to be safe by the caller.
unsafe {
intrinsics::unchecked_shr(self, rhs)
}
}
core::num::<impl i64>::unsigned_abs pub const fn unsigned_abs(self) -> $UnsignedT {
self.wrapping_abs() as $UnsignedT
}
core::num::<impl i64>::wrapping_abs pub const fn wrapping_abs(self) -> Self {
if self.is_negative() {
self.wrapping_neg()
} else {
self
}
}
core::num::<impl i64>::wrapping_add pub const fn wrapping_add(self, rhs: Self) -> Self {
intrinsics::wrapping_add(self, rhs)
}
core::num::<impl i64>::wrapping_mul pub const fn wrapping_mul(self, rhs: Self) -> Self {
intrinsics::wrapping_mul(self, rhs)
}
core::num::<impl i64>::wrapping_neg pub const fn wrapping_neg(self) -> Self {
(0 as $SelfT).wrapping_sub(self)
}
core::num::<impl i64>::wrapping_shl pub const fn wrapping_shl(self, rhs: u32) -> Self {
// SAFETY: the masking by the bitsize of the type ensures that we do not shift
// out of bounds
unsafe {
self.unchecked_shl(rhs & (Self::BITS - 1))
}
}
core::num::<impl i64>::wrapping_shr pub const fn wrapping_shr(self, rhs: u32) -> Self {
// SAFETY: the masking by the bitsize of the type ensures that we do not shift
// out of bounds
unsafe {
self.unchecked_shr(rhs & (Self::BITS - 1))
}
}
core::num::<impl i64>::wrapping_sub pub const fn wrapping_sub(self, rhs: Self) -> Self {
intrinsics::wrapping_sub(self, rhs)
}
core::num::<impl i8>::abs pub const fn abs(self) -> Self {
// Note that the #[rustc_inherit_overflow_checks] and #[inline]
// above mean that the overflow semantics of the subtraction
// depend on the crate we're being called from.
if self.is_negative() {
-self
} else {
self
}
}
core::num::<impl i8>::abs_diff pub const fn abs_diff(self, other: Self) -> $UnsignedT {
if self < other {
// Converting a non-negative x from signed to unsigned by using
// `x as U` is left unchanged, but a negative x is converted
// to value x + 2^N. Thus if `s` and `o` are binary variables
// respectively indicating whether `self` and `other` are
// negative, we are computing the mathematical value:
//
// (other + o*2^N) - (self + s*2^N) mod 2^N
// other - self + (o-s)*2^N mod 2^N
// other - self mod 2^N
//
// Finally, taking the mod 2^N of the mathematical value of
// `other - self` does not change it as it already is
// in the range [0, 2^N).
(other as $UnsignedT).wrapping_sub(self as $UnsignedT)
} else {
(self as $UnsignedT).wrapping_sub(other as $UnsignedT)
}
}
core::num::<impl i8>::cast_unsigned pub const fn cast_unsigned(self) -> $UnsignedT {
self as $UnsignedT
}
core::num::<impl i8>::checked_add pub const fn checked_add(self, rhs: Self) -> Option<Self> {
let (a, b) = self.overflowing_add(rhs);
if intrinsics::unlikely(b) { None } else { Some(a) }
}
core::num::<impl i8>::checked_add_unsigned pub const fn checked_add_unsigned(self, rhs: $UnsignedT) -> Option<Self> {
let (a, b) = self.overflowing_add_unsigned(rhs);
if intrinsics::unlikely(b) { None } else { Some(a) }
}
core::num::<impl i8>::checked_ilog pub const fn checked_ilog(self, base: Self) -> Option<u32> {
if self <= 0 || base <= 1 {
None
} else {
// Delegate to the unsigned implementation.
// The condition makes sure that both casts are exact.
(self as $UnsignedT).checked_ilog(base as $UnsignedT)
}
}
core::num::<impl i8>::checked_ilog2 pub const fn checked_ilog2(self) -> Option<u32> {
if self <= 0 {
None
} else {
// SAFETY: We just checked that this number is positive
let log = (Self::BITS - 1) - unsafe { intrinsics::ctlz_nonzero(self) as u32 };
Some(log)
}
}
core::num::<impl i8>::checked_mul pub const fn checked_mul(self, rhs: Self) -> Option<Self> {
let (a, b) = self.overflowing_mul(rhs);
if intrinsics::unlikely(b) { None } else { Some(a) }
}
core::num::<impl i8>::checked_sub pub const fn checked_sub(self, rhs: Self) -> Option<Self> {
let (a, b) = self.overflowing_sub(rhs);
if intrinsics::unlikely(b) { None } else { Some(a) }
}
core::num::<impl i8>::checked_sub_unsigned pub const fn checked_sub_unsigned(self, rhs: $UnsignedT) -> Option<Self> {
let (a, b) = self.overflowing_sub_unsigned(rhs);
if intrinsics::unlikely(b) { None } else { Some(a) }
}
core::num::<impl i8>::count_ones pub const fn count_ones(self) -> u32 { (self as $UnsignedT).count_ones() }
core::num::<impl i8>::count_zeros pub const fn count_zeros(self) -> u32 {
(!self).count_ones()
}
core::num::<impl i8>::from_le pub const fn from_le(x: Self) -> Self {
#[cfg(target_endian = "little")]
{
x
}
#[cfg(not(target_endian = "little"))]
{
x.swap_bytes()
}
}
core::num::<impl i8>::from_le_bytes pub const fn from_le_bytes(bytes: [u8; size_of::<Self>()]) -> Self {
Self::from_le(Self::from_ne_bytes(bytes))
}
core::num::<impl i8>::from_ne_bytes pub const fn from_ne_bytes(bytes: [u8; size_of::<Self>()]) -> Self {
// SAFETY: integers are plain old datatypes so we can always transmute to them
unsafe { mem::transmute(bytes) }
}
core::num::<impl i8>::ilog pub const fn ilog(self, base: Self) -> u32 {
assert!(base >= 2, "base of integer logarithm must be at least 2");
if let Some(log) = self.checked_ilog(base) {
log
} else {
int_log10::panic_for_nonpositive_argument()
}
}
core::num::<impl i8>::ilog2 pub const fn ilog2(self) -> u32 {
if let Some(log) = self.checked_ilog2() {
log
} else {
int_log10::panic_for_nonpositive_argument()
}
}
core::num::<impl i8>::is_negative pub const fn is_negative(self) -> bool { self < 0 }
core::num::<impl i8>::leading_zeros pub const fn leading_zeros(self) -> u32 {
(self as $UnsignedT).leading_zeros()
}
core::num::<impl i8>::overflowing_add pub const fn overflowing_add(self, rhs: Self) -> (Self, bool) {
let (a, b) = intrinsics::add_with_overflow(self as $ActualT, rhs as $ActualT);
(a as Self, b)
}
core::num::<impl i8>::overflowing_add_unsigned pub const fn overflowing_add_unsigned(self, rhs: $UnsignedT) -> (Self, bool) {
let rhs = rhs as Self;
let (res, overflowed) = self.overflowing_add(rhs);
(res, overflowed ^ (rhs < 0))
}
core::num::<impl i8>::overflowing_mul pub const fn overflowing_mul(self, rhs: Self) -> (Self, bool) {
let (a, b) = intrinsics::mul_with_overflow(self as $ActualT, rhs as $ActualT);
(a as Self, b)
}
core::num::<impl i8>::overflowing_neg pub const fn overflowing_neg(self) -> (Self, bool) {
if intrinsics::unlikely(self == Self::MIN) {
(Self::MIN, true)
} else {
(-self, false)
}
}
core::num::<impl i8>::overflowing_sub pub const fn overflowing_sub(self, rhs: Self) -> (Self, bool) {
let (a, b) = intrinsics::sub_with_overflow(self as $ActualT, rhs as $ActualT);
(a as Self, b)
}
core::num::<impl i8>::overflowing_sub_unsigned pub const fn overflowing_sub_unsigned(self, rhs: $UnsignedT) -> (Self, bool) {
let rhs = rhs as Self;
let (res, overflowed) = self.overflowing_sub(rhs);
(res, overflowed ^ (rhs < 0))
}
core::num::<impl i8>::rotate_left pub const fn rotate_left(self, n: u32) -> Self {
(self as $UnsignedT).rotate_left(n) as Self
}
core::num::<impl i8>::to_le pub const fn to_le(self) -> Self {
#[cfg(target_endian = "little")]
{
self
}
#[cfg(not(target_endian = "little"))]
{
self.swap_bytes()
}
}
core::num::<impl i8>::to_le_bytes pub const fn to_le_bytes(self) -> [u8; size_of::<Self>()] {
self.to_le().to_ne_bytes()
}
core::num::<impl i8>::to_ne_bytes pub const fn to_ne_bytes(self) -> [u8; size_of::<Self>()] {
// SAFETY: integers are plain old datatypes so we can always transmute them to
// arrays of bytes
unsafe { mem::transmute(self) }
}
core::num::<impl i8>::unchecked_neg pub const unsafe fn unchecked_neg(self) -> Self {
assert_unsafe_precondition!(
check_language_ub,
concat!(stringify!($SelfT), "::unchecked_neg cannot overflow"),
(
lhs: $SelfT = self,
) => !lhs.overflowing_neg().1,
);
// SAFETY: this is guaranteed to be safe by the caller.
unsafe {
intrinsics::unchecked_sub(0, self)
}
}
core::num::<impl i8>::unchecked_shl pub const unsafe fn unchecked_shl(self, rhs: u32) -> Self {
assert_unsafe_precondition!(
check_language_ub,
concat!(stringify!($SelfT), "::unchecked_shl cannot overflow"),
(
rhs: u32 = rhs,
) => rhs < <$ActualT>::BITS,
);
// SAFETY: this is guaranteed to be safe by the caller.
unsafe {
intrinsics::unchecked_shl(self, rhs)
}
}
core::num::<impl i8>::unchecked_shr pub const unsafe fn unchecked_shr(self, rhs: u32) -> Self {
assert_unsafe_precondition!(
check_language_ub,
concat!(stringify!($SelfT), "::unchecked_shr cannot overflow"),
(
rhs: u32 = rhs,
) => rhs < <$ActualT>::BITS,
);
// SAFETY: this is guaranteed to be safe by the caller.
unsafe {
intrinsics::unchecked_shr(self, rhs)
}
}
core::num::<impl i8>::unsigned_abs pub const fn unsigned_abs(self) -> $UnsignedT {
self.wrapping_abs() as $UnsignedT
}
core::num::<impl i8>::wrapping_abs pub const fn wrapping_abs(self) -> Self {
if self.is_negative() {
self.wrapping_neg()
} else {
self
}
}
core::num::<impl i8>::wrapping_add pub const fn wrapping_add(self, rhs: Self) -> Self {
intrinsics::wrapping_add(self, rhs)
}
core::num::<impl i8>::wrapping_mul pub const fn wrapping_mul(self, rhs: Self) -> Self {
intrinsics::wrapping_mul(self, rhs)
}
core::num::<impl i8>::wrapping_neg pub const fn wrapping_neg(self) -> Self {
(0 as $SelfT).wrapping_sub(self)
}
core::num::<impl i8>::wrapping_shl pub const fn wrapping_shl(self, rhs: u32) -> Self {
// SAFETY: the masking by the bitsize of the type ensures that we do not shift
// out of bounds
unsafe {
self.unchecked_shl(rhs & (Self::BITS - 1))
}
}
core::num::<impl i8>::wrapping_shr pub const fn wrapping_shr(self, rhs: u32) -> Self {
// SAFETY: the masking by the bitsize of the type ensures that we do not shift
// out of bounds
unsafe {
self.unchecked_shr(rhs & (Self::BITS - 1))
}
}
core::num::<impl i8>::wrapping_sub pub const fn wrapping_sub(self, rhs: Self) -> Self {
intrinsics::wrapping_sub(self, rhs)
}
core::num::<impl isize>::abs pub const fn abs(self) -> Self {
// Note that the #[rustc_inherit_overflow_checks] and #[inline]
// above mean that the overflow semantics of the subtraction
// depend on the crate we're being called from.
if self.is_negative() {
-self
} else {
self
}
}
core::num::<impl isize>::abs_diff pub const fn abs_diff(self, other: Self) -> $UnsignedT {
if self < other {
// Converting a non-negative x from signed to unsigned by using
// `x as U` is left unchanged, but a negative x is converted
// to value x + 2^N. Thus if `s` and `o` are binary variables
// respectively indicating whether `self` and `other` are
// negative, we are computing the mathematical value:
//
// (other + o*2^N) - (self + s*2^N) mod 2^N
// other - self + (o-s)*2^N mod 2^N
// other - self mod 2^N
//
// Finally, taking the mod 2^N of the mathematical value of
// `other - self` does not change it as it already is
// in the range [0, 2^N).
(other as $UnsignedT).wrapping_sub(self as $UnsignedT)
} else {
(self as $UnsignedT).wrapping_sub(other as $UnsignedT)
}
}
core::num::<impl isize>::cast_unsigned pub const fn cast_unsigned(self) -> $UnsignedT {
self as $UnsignedT
}
core::num::<impl isize>::checked_add pub const fn checked_add(self, rhs: Self) -> Option<Self> {
let (a, b) = self.overflowing_add(rhs);
if intrinsics::unlikely(b) { None } else { Some(a) }
}
core::num::<impl isize>::checked_add_unsigned pub const fn checked_add_unsigned(self, rhs: $UnsignedT) -> Option<Self> {
let (a, b) = self.overflowing_add_unsigned(rhs);
if intrinsics::unlikely(b) { None } else { Some(a) }
}
core::num::<impl isize>::checked_ilog pub const fn checked_ilog(self, base: Self) -> Option<u32> {
if self <= 0 || base <= 1 {
None
} else {
// Delegate to the unsigned implementation.
// The condition makes sure that both casts are exact.
(self as $UnsignedT).checked_ilog(base as $UnsignedT)
}
}
core::num::<impl isize>::checked_ilog2 pub const fn checked_ilog2(self) -> Option<u32> {
if self <= 0 {
None
} else {
// SAFETY: We just checked that this number is positive
let log = (Self::BITS - 1) - unsafe { intrinsics::ctlz_nonzero(self) as u32 };
Some(log)
}
}
core::num::<impl isize>::checked_mul pub const fn checked_mul(self, rhs: Self) -> Option<Self> {
let (a, b) = self.overflowing_mul(rhs);
if intrinsics::unlikely(b) { None } else { Some(a) }
}
core::num::<impl isize>::checked_sub pub const fn checked_sub(self, rhs: Self) -> Option<Self> {
let (a, b) = self.overflowing_sub(rhs);
if intrinsics::unlikely(b) { None } else { Some(a) }
}
core::num::<impl isize>::checked_sub_unsigned pub const fn checked_sub_unsigned(self, rhs: $UnsignedT) -> Option<Self> {
let (a, b) = self.overflowing_sub_unsigned(rhs);
if intrinsics::unlikely(b) { None } else { Some(a) }
}
core::num::<impl isize>::count_ones pub const fn count_ones(self) -> u32 { (self as $UnsignedT).count_ones() }
core::num::<impl isize>::count_zeros pub const fn count_zeros(self) -> u32 {
(!self).count_ones()
}
core::num::<impl isize>::from_le pub const fn from_le(x: Self) -> Self {
#[cfg(target_endian = "little")]
{
x
}
#[cfg(not(target_endian = "little"))]
{
x.swap_bytes()
}
}
core::num::<impl isize>::from_le_bytes pub const fn from_le_bytes(bytes: [u8; size_of::<Self>()]) -> Self {
Self::from_le(Self::from_ne_bytes(bytes))
}
core::num::<impl isize>::from_ne_bytes pub const fn from_ne_bytes(bytes: [u8; size_of::<Self>()]) -> Self {
// SAFETY: integers are plain old datatypes so we can always transmute to them
unsafe { mem::transmute(bytes) }
}
core::num::<impl isize>::ilog pub const fn ilog(self, base: Self) -> u32 {
assert!(base >= 2, "base of integer logarithm must be at least 2");
if let Some(log) = self.checked_ilog(base) {
log
} else {
int_log10::panic_for_nonpositive_argument()
}
}
core::num::<impl isize>::ilog2 pub const fn ilog2(self) -> u32 {
if let Some(log) = self.checked_ilog2() {
log
} else {
int_log10::panic_for_nonpositive_argument()
}
}
core::num::<impl isize>::is_negative pub const fn is_negative(self) -> bool { self < 0 }
core::num::<impl isize>::leading_zeros pub const fn leading_zeros(self) -> u32 {
(self as $UnsignedT).leading_zeros()
}
core::num::<impl isize>::overflowing_add pub const fn overflowing_add(self, rhs: Self) -> (Self, bool) {
let (a, b) = intrinsics::add_with_overflow(self as $ActualT, rhs as $ActualT);
(a as Self, b)
}
core::num::<impl isize>::overflowing_add_unsigned pub const fn overflowing_add_unsigned(self, rhs: $UnsignedT) -> (Self, bool) {
let rhs = rhs as Self;
let (res, overflowed) = self.overflowing_add(rhs);
(res, overflowed ^ (rhs < 0))
}
core::num::<impl isize>::overflowing_mul pub const fn overflowing_mul(self, rhs: Self) -> (Self, bool) {
let (a, b) = intrinsics::mul_with_overflow(self as $ActualT, rhs as $ActualT);
(a as Self, b)
}
core::num::<impl isize>::overflowing_neg pub const fn overflowing_neg(self) -> (Self, bool) {
if intrinsics::unlikely(self == Self::MIN) {
(Self::MIN, true)
} else {
(-self, false)
}
}
core::num::<impl isize>::overflowing_sub pub const fn overflowing_sub(self, rhs: Self) -> (Self, bool) {
let (a, b) = intrinsics::sub_with_overflow(self as $ActualT, rhs as $ActualT);
(a as Self, b)
}
core::num::<impl isize>::overflowing_sub_unsigned pub const fn overflowing_sub_unsigned(self, rhs: $UnsignedT) -> (Self, bool) {
let rhs = rhs as Self;
let (res, overflowed) = self.overflowing_sub(rhs);
(res, overflowed ^ (rhs < 0))
}
core::num::<impl isize>::rotate_left pub const fn rotate_left(self, n: u32) -> Self {
(self as $UnsignedT).rotate_left(n) as Self
}
core::num::<impl isize>::to_le pub const fn to_le(self) -> Self {
#[cfg(target_endian = "little")]
{
self
}
#[cfg(not(target_endian = "little"))]
{
self.swap_bytes()
}
}
core::num::<impl isize>::to_le_bytes pub const fn to_le_bytes(self) -> [u8; size_of::<Self>()] {
self.to_le().to_ne_bytes()
}
core::num::<impl isize>::to_ne_bytes pub const fn to_ne_bytes(self) -> [u8; size_of::<Self>()] {
// SAFETY: integers are plain old datatypes so we can always transmute them to
// arrays of bytes
unsafe { mem::transmute(self) }
}
core::num::<impl isize>::unchecked_neg pub const unsafe fn unchecked_neg(self) -> Self {
assert_unsafe_precondition!(
check_language_ub,
concat!(stringify!($SelfT), "::unchecked_neg cannot overflow"),
(
lhs: $SelfT = self,
) => !lhs.overflowing_neg().1,
);
// SAFETY: this is guaranteed to be safe by the caller.
unsafe {
intrinsics::unchecked_sub(0, self)
}
}
core::num::<impl isize>::unchecked_shl pub const unsafe fn unchecked_shl(self, rhs: u32) -> Self {
assert_unsafe_precondition!(
check_language_ub,
concat!(stringify!($SelfT), "::unchecked_shl cannot overflow"),
(
rhs: u32 = rhs,
) => rhs < <$ActualT>::BITS,
);
// SAFETY: this is guaranteed to be safe by the caller.
unsafe {
intrinsics::unchecked_shl(self, rhs)
}
}
core::num::<impl isize>::unchecked_shr pub const unsafe fn unchecked_shr(self, rhs: u32) -> Self {
assert_unsafe_precondition!(
check_language_ub,
concat!(stringify!($SelfT), "::unchecked_shr cannot overflow"),
(
rhs: u32 = rhs,
) => rhs < <$ActualT>::BITS,
);
// SAFETY: this is guaranteed to be safe by the caller.
unsafe {
intrinsics::unchecked_shr(self, rhs)
}
}
core::num::<impl isize>::unsigned_abs pub const fn unsigned_abs(self) -> $UnsignedT {
self.wrapping_abs() as $UnsignedT
}
core::num::<impl isize>::wrapping_abs pub const fn wrapping_abs(self) -> Self {
if self.is_negative() {
self.wrapping_neg()
} else {
self
}
}
core::num::<impl isize>::wrapping_add pub const fn wrapping_add(self, rhs: Self) -> Self {
intrinsics::wrapping_add(self, rhs)
}
core::num::<impl isize>::wrapping_mul pub const fn wrapping_mul(self, rhs: Self) -> Self {
intrinsics::wrapping_mul(self, rhs)
}
core::num::<impl isize>::wrapping_neg pub const fn wrapping_neg(self) -> Self {
(0 as $SelfT).wrapping_sub(self)
}
core::num::<impl isize>::wrapping_shl pub const fn wrapping_shl(self, rhs: u32) -> Self {
// SAFETY: the masking by the bitsize of the type ensures that we do not shift
// out of bounds
unsafe {
self.unchecked_shl(rhs & (Self::BITS - 1))
}
}
core::num::<impl isize>::wrapping_shr pub const fn wrapping_shr(self, rhs: u32) -> Self {
// SAFETY: the masking by the bitsize of the type ensures that we do not shift
// out of bounds
unsafe {
self.unchecked_shr(rhs & (Self::BITS - 1))
}
}
core::num::<impl isize>::wrapping_sub pub const fn wrapping_sub(self, rhs: Self) -> Self {
intrinsics::wrapping_sub(self, rhs)
}
core::num::<impl u128>::abs_diff pub const fn abs_diff(self, other: Self) -> Self {
if size_of::<Self>() == 1 {
// Trick LLVM into generating the psadbw instruction when SSE2
// is available and this function is autovectorized for u8's.
(self as i32).wrapping_sub(other as i32).unsigned_abs() as Self
} else {
if self < other {
other - self
} else {
self - other
}
}
}
core::num::<impl u128>::carrying_add pub const fn carrying_add(self, rhs: Self, carry: bool) -> (Self, bool) {
// note: longer-term this should be done via an intrinsic, but this has been shown
// to generate optimal code for now, and LLVM doesn't have an equivalent intrinsic
let (a, c1) = self.overflowing_add(rhs);
let (b, c2) = a.overflowing_add(carry as $SelfT);
// Ideally LLVM would know this is disjoint without us telling them,
// but it doesn't <https://github.com/llvm/llvm-project/issues/118162>
// SAFETY: Only one of `c1` and `c2` can be set.
// For c1 to be set we need to have overflowed, but if we did then
// `a` is at most `MAX-1`, which means that `c2` cannot possibly
// overflow because it's adding at most `1` (since it came from `bool`)
(b, unsafe { intrinsics::disjoint_bitor(c1, c2) })
}
core::num::<impl u128>::carrying_mul pub const fn carrying_mul(self, rhs: Self, carry: Self) -> (Self, Self) {
Self::carrying_mul_add(self, rhs, carry, 0)
}
core::num::<impl u128>::carrying_mul_add pub const fn carrying_mul_add(self, rhs: Self, carry: Self, add: Self) -> (Self, Self) {
intrinsics::carrying_mul_add(self, rhs, carry, add)
}
core::num::<impl u128>::checked_add pub const fn checked_add(self, rhs: Self) -> Option<Self> {
// This used to use `overflowing_add`, but that means it ends up being
// a `wrapping_add`, losing some optimization opportunities. Notably,
// phrasing it this way helps `.checked_add(1)` optimize to a check
// against `MAX` and a `add nuw`.
// Per <https://github.com/rust-lang/rust/pull/124114#issuecomment-2066173305>,
// LLVM is happy to re-form the intrinsic later if useful.
if intrinsics::unlikely(intrinsics::add_with_overflow(self, rhs).1) {
None
} else {
// SAFETY: Just checked it doesn't overflow
Some(unsafe { intrinsics::unchecked_add(self, rhs) })
}
}
core::num::<impl u128>::checked_ilog pub const fn checked_ilog(self, base: Self) -> Option<u32> {
// Inform compiler of optimizations when the base is known at
// compile time and there's a cheaper method available.
//
// Note: Like all optimizations, this is not guaranteed to be
// applied by the compiler. If you want those specific bases,
// use `.checked_ilog2()` or `.checked_ilog10()` directly.
if core::intrinsics::is_val_statically_known(base) {
if base == 2 {
return self.checked_ilog2();
} else if base == 10 {
return self.checked_ilog10();
}
}
if self <= 0 || base <= 1 {
None
} else if self < base {
Some(0)
} else {
// Since base >= self, n >= 1
let mut n = 1;
let mut r = base;
// Optimization for 128 bit wide integers.
if Self::BITS == 128 {
// The following is a correct lower bound for ⌊log(base,self)⌋ because
//
// log(base,self) = log(2,self) / log(2,base)
// ≥ ⌊log(2,self)⌋ / (⌊log(2,base)⌋ + 1)
//
// hence
//
// ⌊log(base,self)⌋ ≥ ⌊ ⌊log(2,self)⌋ / (⌊log(2,base)⌋ + 1) ⌋ .
n = self.ilog2() / (base.ilog2() + 1);
r = base.pow(n);
}
while r <= self / base {
n += 1;
r *= base;
}
Some(n)
}
}
core::num::<impl u128>::checked_ilog10 pub const fn checked_ilog10(self) -> Option<u32> {
match NonZero::new(self) {
Some(x) => Some(x.ilog10()),
None => None,
}
}
core::num::<impl u128>::checked_ilog2 pub const fn checked_ilog2(self) -> Option<u32> {
match NonZero::new(self) {
Some(x) => Some(x.ilog2()),
None => None,
}
}
core::num::<impl u128>::checked_mul pub const fn checked_mul(self, rhs: Self) -> Option<Self> {
let (a, b) = self.overflowing_mul(rhs);
if intrinsics::unlikely(b) { None } else { Some(a) }
}
core::num::<impl u128>::checked_pow pub const fn checked_pow(self, mut exp: u32) -> Option<Self> {
if exp == 0 {
return Some(1);
}
let mut base = self;
let mut acc: Self = 1;
loop {
if (exp & 1) == 1 {
acc = try_opt!(acc.checked_mul(base));
// since exp!=0, finally the exp must be 1.
if exp == 1 {
return Some(acc);
}
}
exp /= 2;
base = try_opt!(base.checked_mul(base));
}
}
core::num::<impl u128>::checked_sub pub const fn checked_sub(self, rhs: Self) -> Option<Self> {
// Per PR#103299, there's no advantage to the `overflowing` intrinsic
// for *unsigned* subtraction and we just emit the manual check anyway.
// Thus, rather than using `overflowing_sub` that produces a wrapping
// subtraction, check it ourself so we can use an unchecked one.
if self < rhs {
None
} else {
// SAFETY: just checked this can't overflow
Some(unsafe { intrinsics::unchecked_sub(self, rhs) })
}
}
core::num::<impl u128>::count_ones pub const fn count_ones(self) -> u32 {
return intrinsics::ctpop(self);
}
core::num::<impl u128>::count_zeros pub const fn count_zeros(self) -> u32 {
(!self).count_ones()
}
core::num::<impl u128>::div_ceil pub const fn div_ceil(self, rhs: Self) -> Self {
let d = self / rhs;
let r = self % rhs;
if r > 0 {
d + 1
} else {
d
}
}
core::num::<impl u128>::from_ascii_radix pub const fn from_ascii_radix(src: &[u8], radix: u32) -> Result<$int_ty, ParseIntError> {
use self::IntErrorKind::*;
use self::ParseIntError as PIE;
if 2 > radix || radix > 36 {
from_ascii_radix_panic(radix);
}
if src.is_empty() {
return Err(PIE { kind: Empty });
}
#[allow(unused_comparisons)]
let is_signed_ty = 0 > <$int_ty>::MIN;
let (is_positive, mut digits) = match src {
[b'+' | b'-'] => {
return Err(PIE { kind: InvalidDigit });
}
[b'+', rest @ ..] => (true, rest),
[b'-', rest @ ..] if is_signed_ty => (false, rest),
_ => (true, src),
};
let mut result = 0;
macro_rules! unwrap_or_PIE {
($option:expr, $kind:ident) => {
match $option {
Some(value) => value,
None => return Err(PIE { kind: $kind }),
}
};
}
if can_not_overflow::<$int_ty>(radix, is_signed_ty, digits) {
// If the len of the str is short compared to the range of the type
// we are parsing into, then we can be certain that an overflow will not occur.
// This bound is when `radix.pow(digits.len()) - 1 <= T::MAX` but the condition
// above is a faster (conservative) approximation of this.
//
// Consider radix 16 as it has the highest information density per digit and will thus overflow the earliest:
// `u8::MAX` is `ff` - any str of len 2 is guaranteed to not overflow.
// `i8::MAX` is `7f` - only a str of len 1 is guaranteed to not overflow.
macro_rules! run_unchecked_loop {
($unchecked_additive_op:tt) => {{
while let [c, rest @ ..] = digits {
result = result * (radix as $int_ty);
let x = unwrap_or_PIE!((*c as char).to_digit(radix), InvalidDigit);
result = result $unchecked_additive_op (x as $int_ty);
digits = rest;
}
}};
}
if is_positive {
run_unchecked_loop!(+)
} else {
run_unchecked_loop!(-)
};
} else {
macro_rules! run_checked_loop {
($checked_additive_op:ident, $overflow_err:ident) => {{
while let [c, rest @ ..] = digits {
// When `radix` is passed in as a literal, rather than doing a slow `imul`
// the compiler can use shifts if `radix` can be expressed as a
// sum of powers of 2 (x*10 can be written as x*8 + x*2).
// When the compiler can't use these optimisations,
// the latency of the multiplication can be hidden by issuing it
// before the result is needed to improve performance on
// modern out-of-order CPU as multiplication here is slower
// than the other instructions, we can get the end result faster
// doing multiplication first and let the CPU spends other cycles
// doing other computation and get multiplication result later.
let mul = result.checked_mul(radix as $int_ty);
let x = unwrap_or_PIE!((*c as char).to_digit(radix), InvalidDigit) as $int_ty;
result = unwrap_or_PIE!(mul, $overflow_err);
result = unwrap_or_PIE!(<$int_ty>::$checked_additive_op(result, x), $overflow_err);
digits = rest;
}
}};
}
if is_positive {
run_checked_loop!(checked_add, PosOverflow)
} else {
run_checked_loop!(checked_sub, NegOverflow)
};
}
Ok(result)
}
core::num::<impl u128>::from_be pub const fn from_be(x: Self) -> Self {
#[cfg(target_endian = "big")]
{
x
}
#[cfg(not(target_endian = "big"))]
{
x.swap_bytes()
}
}
core::num::<impl u128>::from_be_bytes pub const fn from_be_bytes(bytes: [u8; size_of::<Self>()]) -> Self {
Self::from_be(Self::from_ne_bytes(bytes))
}
core::num::<impl u128>::from_le pub const fn from_le(x: Self) -> Self {
#[cfg(target_endian = "little")]
{
x
}
#[cfg(not(target_endian = "little"))]
{
x.swap_bytes()
}
}
core::num::<impl u128>::from_le_bytes pub const fn from_le_bytes(bytes: [u8; size_of::<Self>()]) -> Self {
Self::from_le(Self::from_ne_bytes(bytes))
}
core::num::<impl u128>::from_ne_bytes pub const fn from_ne_bytes(bytes: [u8; size_of::<Self>()]) -> Self {
// SAFETY: integers are plain old datatypes so we can always transmute to them
unsafe { mem::transmute(bytes) }
}
core::num::<impl u128>::from_str_radix pub const fn from_str_radix(src: &str, radix: u32) -> Result<$int_ty, ParseIntError> {
<$int_ty>::from_ascii_radix(src.as_bytes(), radix)
}
core::num::<impl u128>::ilog pub const fn ilog(self, base: Self) -> u32 {
assert!(base >= 2, "base of integer logarithm must be at least 2");
if let Some(log) = self.checked_ilog(base) {
log
} else {
int_log10::panic_for_nonpositive_argument()
}
}
core::num::<impl u128>::ilog10 pub const fn ilog10(self) -> u32 {
if let Some(log) = self.checked_ilog10() {
log
} else {
int_log10::panic_for_nonpositive_argument()
}
}
core::num::<impl u128>::ilog2 pub const fn ilog2(self) -> u32 {
if let Some(log) = self.checked_ilog2() {
log
} else {
int_log10::panic_for_nonpositive_argument()
}
}
core::num::<impl u128>::is_multiple_of pub const fn is_multiple_of(self, rhs: Self) -> bool {
match rhs {
0 => self == 0,
_ => self % rhs == 0,
}
}
core::num::<impl u128>::is_power_of_two pub const fn is_power_of_two(self) -> bool {
self.count_ones() == 1
}
core::num::<impl u128>::leading_ones pub const fn leading_ones(self) -> u32 {
(!self).leading_zeros()
}
core::num::<impl u128>::leading_zeros pub const fn leading_zeros(self) -> u32 {
return intrinsics::ctlz(self as $ActualT);
}
core::num::<impl u128>::overflowing_add pub const fn overflowing_add(self, rhs: Self) -> (Self, bool) {
let (a, b) = intrinsics::add_with_overflow(self as $ActualT, rhs as $ActualT);
(a as Self, b)
}
core::num::<impl u128>::overflowing_add_signed pub const fn overflowing_add_signed(self, rhs: $SignedT) -> (Self, bool) {
let (res, overflowed) = self.overflowing_add(rhs as Self);
(res, overflowed ^ (rhs < 0))
}
core::num::<impl u128>::overflowing_mul pub const fn overflowing_mul(self, rhs: Self) -> (Self, bool) {
let (a, b) = intrinsics::mul_with_overflow(self as $ActualT, rhs as $ActualT);
(a as Self, b)
}
core::num::<impl u128>::overflowing_sub pub const fn overflowing_sub(self, rhs: Self) -> (Self, bool) {
let (a, b) = intrinsics::sub_with_overflow(self as $ActualT, rhs as $ActualT);
(a as Self, b)
}
core::num::<impl u128>::pow pub const fn pow(self, mut exp: u32) -> Self {
if exp == 0 {
return 1;
}
let mut base = self;
let mut acc = 1;
if intrinsics::is_val_statically_known(exp) {
while exp > 1 {
if (exp & 1) == 1 {
acc = acc * base;
}
exp /= 2;
base = base * base;
}
// since exp!=0, finally the exp must be 1.
// Deal with the final bit of the exponent separately, since
// squaring the base afterwards is not necessary and may cause a
// needless overflow.
acc * base
} else {
// This is faster than the above when the exponent is not known
// at compile time. We can't use the same code for the constant
// exponent case because LLVM is currently unable to unroll
// this loop.
loop {
if (exp & 1) == 1 {
acc = acc * base;
// since exp!=0, finally the exp must be 1.
if exp == 1 {
return acc;
}
}
exp /= 2;
base = base * base;
}
}
}
core::num::<impl u128>::reverse_bits pub const fn reverse_bits(self) -> Self {
intrinsics::bitreverse(self as $ActualT) as Self
}
core::num::<impl u128>::rotate_left pub const fn rotate_left(self, n: u32) -> Self {
return intrinsics::rotate_left(self, n);
}
core::num::<impl u128>::rotate_right pub const fn rotate_right(self, n: u32) -> Self {
return intrinsics::rotate_right(self, n);
}
core::num::<impl u128>::saturating_add pub const fn saturating_add(self, rhs: Self) -> Self {
intrinsics::saturating_add(self, rhs)
}
core::num::<impl u128>::saturating_mul pub const fn saturating_mul(self, rhs: Self) -> Self {
match self.checked_mul(rhs) {
Some(x) => x,
None => Self::MAX,
}
}
core::num::<impl u128>::saturating_sub pub const fn saturating_sub(self, rhs: Self) -> Self {
intrinsics::saturating_sub(self, rhs)
}
core::num::<impl u128>::swap_bytes pub const fn swap_bytes(self) -> Self {
intrinsics::bswap(self as $ActualT) as Self
}
core::num::<impl u128>::to_be pub const fn to_be(self) -> Self { // or not to be?
#[cfg(target_endian = "big")]
{
self
}
#[cfg(not(target_endian = "big"))]
{
self.swap_bytes()
}
}
core::num::<impl u128>::to_be_bytes pub const fn to_be_bytes(self) -> [u8; size_of::<Self>()] {
self.to_be().to_ne_bytes()
}
core::num::<impl u128>::to_le pub const fn to_le(self) -> Self {
#[cfg(target_endian = "little")]
{
self
}
#[cfg(not(target_endian = "little"))]
{
self.swap_bytes()
}
}
core::num::<impl u128>::to_le_bytes pub const fn to_le_bytes(self) -> [u8; size_of::<Self>()] {
self.to_le().to_ne_bytes()
}
core::num::<impl u128>::to_ne_bytes pub const fn to_ne_bytes(self) -> [u8; size_of::<Self>()] {
// SAFETY: integers are plain old datatypes so we can always transmute them to
// arrays of bytes
unsafe { mem::transmute(self) }
}
core::num::<impl u128>::trailing_zeros pub const fn trailing_zeros(self) -> u32 {
return intrinsics::cttz(self);
}
core::num::<impl u128>::unchecked_add pub const unsafe fn unchecked_add(self, rhs: Self) -> Self {
assert_unsafe_precondition!(
check_language_ub,
concat!(stringify!($SelfT), "::unchecked_add cannot overflow"),
(
lhs: $SelfT = self,
rhs: $SelfT = rhs,
) => !lhs.overflowing_add(rhs).1,
);
// SAFETY: this is guaranteed to be safe by the caller.
unsafe {
intrinsics::unchecked_add(self, rhs)
}
}
core::num::<impl u128>::unchecked_shl pub const unsafe fn unchecked_shl(self, rhs: u32) -> Self {
assert_unsafe_precondition!(
check_language_ub,
concat!(stringify!($SelfT), "::unchecked_shl cannot overflow"),
(
rhs: u32 = rhs,
) => rhs < <$ActualT>::BITS,
);
// SAFETY: this is guaranteed to be safe by the caller.
unsafe {
intrinsics::unchecked_shl(self, rhs)
}
}
core::num::<impl u128>::unchecked_shr pub const unsafe fn unchecked_shr(self, rhs: u32) -> Self {
assert_unsafe_precondition!(
check_language_ub,
concat!(stringify!($SelfT), "::unchecked_shr cannot overflow"),
(
rhs: u32 = rhs,
) => rhs < <$ActualT>::BITS,
);
// SAFETY: this is guaranteed to be safe by the caller.
unsafe {
intrinsics::unchecked_shr(self, rhs)
}
}
core::num::<impl u128>::unchecked_sub pub const unsafe fn unchecked_sub(self, rhs: Self) -> Self {
assert_unsafe_precondition!(
check_language_ub,
concat!(stringify!($SelfT), "::unchecked_sub cannot overflow"),
(
lhs: $SelfT = self,
rhs: $SelfT = rhs,
) => !lhs.overflowing_sub(rhs).1,
);
// SAFETY: this is guaranteed to be safe by the caller.
unsafe {
intrinsics::unchecked_sub(self, rhs)
}
}
core::num::<impl u128>::widening_mul pub const fn widening_mul(self, rhs: Self) -> (Self, Self) {
Self::carrying_mul_add(self, rhs, 0, 0)
}
core::num::<impl u128>::wrapping_add pub const fn wrapping_add(self, rhs: Self) -> Self {
intrinsics::wrapping_add(self, rhs)
}
core::num::<impl u128>::wrapping_add_signed pub const fn wrapping_add_signed(self, rhs: $SignedT) -> Self {
self.wrapping_add(rhs as Self)
}
core::num::<impl u128>::wrapping_mul pub const fn wrapping_mul(self, rhs: Self) -> Self {
intrinsics::wrapping_mul(self, rhs)
}
core::num::<impl u128>::wrapping_neg pub const fn wrapping_neg(self) -> Self {
(0 as $SelfT).wrapping_sub(self)
}
core::num::<impl u128>::wrapping_shl pub const fn wrapping_shl(self, rhs: u32) -> Self {
// SAFETY: the masking by the bitsize of the type ensures that we do not shift
// out of bounds
unsafe {
self.unchecked_shl(rhs & (Self::BITS - 1))
}
}
core::num::<impl u128>::wrapping_shr pub const fn wrapping_shr(self, rhs: u32) -> Self {
// SAFETY: the masking by the bitsize of the type ensures that we do not shift
// out of bounds
unsafe {
self.unchecked_shr(rhs & (Self::BITS - 1))
}
}
core::num::<impl u128>::wrapping_sub pub const fn wrapping_sub(self, rhs: Self) -> Self {
intrinsics::wrapping_sub(self, rhs)
}
core::num::<impl u16>::abs_diff pub const fn abs_diff(self, other: Self) -> Self {
if size_of::<Self>() == 1 {
// Trick LLVM into generating the psadbw instruction when SSE2
// is available and this function is autovectorized for u8's.
(self as i32).wrapping_sub(other as i32).unsigned_abs() as Self
} else {
if self < other {
other - self
} else {
self - other
}
}
}
core::num::<impl u16>::carrying_add pub const fn carrying_add(self, rhs: Self, carry: bool) -> (Self, bool) {
// note: longer-term this should be done via an intrinsic, but this has been shown
// to generate optimal code for now, and LLVM doesn't have an equivalent intrinsic
let (a, c1) = self.overflowing_add(rhs);
let (b, c2) = a.overflowing_add(carry as $SelfT);
// Ideally LLVM would know this is disjoint without us telling them,
// but it doesn't <https://github.com/llvm/llvm-project/issues/118162>
// SAFETY: Only one of `c1` and `c2` can be set.
// For c1 to be set we need to have overflowed, but if we did then
// `a` is at most `MAX-1`, which means that `c2` cannot possibly
// overflow because it's adding at most `1` (since it came from `bool`)
(b, unsafe { intrinsics::disjoint_bitor(c1, c2) })
}
core::num::<impl u16>::carrying_mul pub const fn carrying_mul(self, rhs: Self, carry: Self) -> (Self, Self) {
Self::carrying_mul_add(self, rhs, carry, 0)
}
core::num::<impl u16>::carrying_mul_add pub const fn carrying_mul_add(self, rhs: Self, carry: Self, add: Self) -> (Self, Self) {
intrinsics::carrying_mul_add(self, rhs, carry, add)
}
core::num::<impl u16>::checked_add pub const fn checked_add(self, rhs: Self) -> Option<Self> {
// This used to use `overflowing_add`, but that means it ends up being
// a `wrapping_add`, losing some optimization opportunities. Notably,
// phrasing it this way helps `.checked_add(1)` optimize to a check
// against `MAX` and a `add nuw`.
// Per <https://github.com/rust-lang/rust/pull/124114#issuecomment-2066173305>,
// LLVM is happy to re-form the intrinsic later if useful.
if intrinsics::unlikely(intrinsics::add_with_overflow(self, rhs).1) {
None
} else {
// SAFETY: Just checked it doesn't overflow
Some(unsafe { intrinsics::unchecked_add(self, rhs) })
}
}
core::num::<impl u16>::checked_ilog pub const fn checked_ilog(self, base: Self) -> Option<u32> {
// Inform compiler of optimizations when the base is known at
// compile time and there's a cheaper method available.
//
// Note: Like all optimizations, this is not guaranteed to be
// applied by the compiler. If you want those specific bases,
// use `.checked_ilog2()` or `.checked_ilog10()` directly.
if core::intrinsics::is_val_statically_known(base) {
if base == 2 {
return self.checked_ilog2();
} else if base == 10 {
return self.checked_ilog10();
}
}
if self <= 0 || base <= 1 {
None
} else if self < base {
Some(0)
} else {
// Since base >= self, n >= 1
let mut n = 1;
let mut r = base;
// Optimization for 128 bit wide integers.
if Self::BITS == 128 {
// The following is a correct lower bound for ⌊log(base,self)⌋ because
//
// log(base,self) = log(2,self) / log(2,base)
// ≥ ⌊log(2,self)⌋ / (⌊log(2,base)⌋ + 1)
//
// hence
//
// ⌊log(base,self)⌋ ≥ ⌊ ⌊log(2,self)⌋ / (⌊log(2,base)⌋ + 1) ⌋ .
n = self.ilog2() / (base.ilog2() + 1);
r = base.pow(n);
}
while r <= self / base {
n += 1;
r *= base;
}
Some(n)
}
}
core::num::<impl u16>::checked_ilog10 pub const fn checked_ilog10(self) -> Option<u32> {
match NonZero::new(self) {
Some(x) => Some(x.ilog10()),
None => None,
}
}
core::num::<impl u16>::checked_ilog2 pub const fn checked_ilog2(self) -> Option<u32> {
match NonZero::new(self) {
Some(x) => Some(x.ilog2()),
None => None,
}
}
core::num::<impl u16>::checked_mul pub const fn checked_mul(self, rhs: Self) -> Option<Self> {
let (a, b) = self.overflowing_mul(rhs);
if intrinsics::unlikely(b) { None } else { Some(a) }
}
core::num::<impl u16>::checked_pow pub const fn checked_pow(self, mut exp: u32) -> Option<Self> {
if exp == 0 {
return Some(1);
}
let mut base = self;
let mut acc: Self = 1;
loop {
if (exp & 1) == 1 {
acc = try_opt!(acc.checked_mul(base));
// since exp!=0, finally the exp must be 1.
if exp == 1 {
return Some(acc);
}
}
exp /= 2;
base = try_opt!(base.checked_mul(base));
}
}
core::num::<impl u16>::checked_sub pub const fn checked_sub(self, rhs: Self) -> Option<Self> {
// Per PR#103299, there's no advantage to the `overflowing` intrinsic
// for *unsigned* subtraction and we just emit the manual check anyway.
// Thus, rather than using `overflowing_sub` that produces a wrapping
// subtraction, check it ourself so we can use an unchecked one.
if self < rhs {
None
} else {
// SAFETY: just checked this can't overflow
Some(unsafe { intrinsics::unchecked_sub(self, rhs) })
}
}
core::num::<impl u16>::count_ones pub const fn count_ones(self) -> u32 {
return intrinsics::ctpop(self);
}
core::num::<impl u16>::count_zeros pub const fn count_zeros(self) -> u32 {
(!self).count_ones()
}
core::num::<impl u16>::div_ceil pub const fn div_ceil(self, rhs: Self) -> Self {
let d = self / rhs;
let r = self % rhs;
if r > 0 {
d + 1
} else {
d
}
}
core::num::<impl u16>::from_ascii_radix pub const fn from_ascii_radix(src: &[u8], radix: u32) -> Result<$int_ty, ParseIntError> {
use self::IntErrorKind::*;
use self::ParseIntError as PIE;
if 2 > radix || radix > 36 {
from_ascii_radix_panic(radix);
}
if src.is_empty() {
return Err(PIE { kind: Empty });
}
#[allow(unused_comparisons)]
let is_signed_ty = 0 > <$int_ty>::MIN;
let (is_positive, mut digits) = match src {
[b'+' | b'-'] => {
return Err(PIE { kind: InvalidDigit });
}
[b'+', rest @ ..] => (true, rest),
[b'-', rest @ ..] if is_signed_ty => (false, rest),
_ => (true, src),
};
let mut result = 0;
macro_rules! unwrap_or_PIE {
($option:expr, $kind:ident) => {
match $option {
Some(value) => value,
None => return Err(PIE { kind: $kind }),
}
};
}
if can_not_overflow::<$int_ty>(radix, is_signed_ty, digits) {
// If the len of the str is short compared to the range of the type
// we are parsing into, then we can be certain that an overflow will not occur.
// This bound is when `radix.pow(digits.len()) - 1 <= T::MAX` but the condition
// above is a faster (conservative) approximation of this.
//
// Consider radix 16 as it has the highest information density per digit and will thus overflow the earliest:
// `u8::MAX` is `ff` - any str of len 2 is guaranteed to not overflow.
// `i8::MAX` is `7f` - only a str of len 1 is guaranteed to not overflow.
macro_rules! run_unchecked_loop {
($unchecked_additive_op:tt) => {{
while let [c, rest @ ..] = digits {
result = result * (radix as $int_ty);
let x = unwrap_or_PIE!((*c as char).to_digit(radix), InvalidDigit);
result = result $unchecked_additive_op (x as $int_ty);
digits = rest;
}
}};
}
if is_positive {
run_unchecked_loop!(+)
} else {
run_unchecked_loop!(-)
};
} else {
macro_rules! run_checked_loop {
($checked_additive_op:ident, $overflow_err:ident) => {{
while let [c, rest @ ..] = digits {
// When `radix` is passed in as a literal, rather than doing a slow `imul`
// the compiler can use shifts if `radix` can be expressed as a
// sum of powers of 2 (x*10 can be written as x*8 + x*2).
// When the compiler can't use these optimisations,
// the latency of the multiplication can be hidden by issuing it
// before the result is needed to improve performance on
// modern out-of-order CPU as multiplication here is slower
// than the other instructions, we can get the end result faster
// doing multiplication first and let the CPU spends other cycles
// doing other computation and get multiplication result later.
let mul = result.checked_mul(radix as $int_ty);
let x = unwrap_or_PIE!((*c as char).to_digit(radix), InvalidDigit) as $int_ty;
result = unwrap_or_PIE!(mul, $overflow_err);
result = unwrap_or_PIE!(<$int_ty>::$checked_additive_op(result, x), $overflow_err);
digits = rest;
}
}};
}
if is_positive {
run_checked_loop!(checked_add, PosOverflow)
} else {
run_checked_loop!(checked_sub, NegOverflow)
};
}
Ok(result)
}
core::num::<impl u16>::from_be pub const fn from_be(x: Self) -> Self {
#[cfg(target_endian = "big")]
{
x
}
#[cfg(not(target_endian = "big"))]
{
x.swap_bytes()
}
}
core::num::<impl u16>::from_be_bytes pub const fn from_be_bytes(bytes: [u8; size_of::<Self>()]) -> Self {
Self::from_be(Self::from_ne_bytes(bytes))
}
core::num::<impl u16>::from_le pub const fn from_le(x: Self) -> Self {
#[cfg(target_endian = "little")]
{
x
}
#[cfg(not(target_endian = "little"))]
{
x.swap_bytes()
}
}
core::num::<impl u16>::from_le_bytes pub const fn from_le_bytes(bytes: [u8; size_of::<Self>()]) -> Self {
Self::from_le(Self::from_ne_bytes(bytes))
}
core::num::<impl u16>::from_ne_bytes pub const fn from_ne_bytes(bytes: [u8; size_of::<Self>()]) -> Self {
// SAFETY: integers are plain old datatypes so we can always transmute to them
unsafe { mem::transmute(bytes) }
}
core::num::<impl u16>::from_str_radix pub const fn from_str_radix(src: &str, radix: u32) -> Result<$int_ty, ParseIntError> {
<$int_ty>::from_ascii_radix(src.as_bytes(), radix)
}
core::num::<impl u16>::ilog pub const fn ilog(self, base: Self) -> u32 {
assert!(base >= 2, "base of integer logarithm must be at least 2");
if let Some(log) = self.checked_ilog(base) {
log
} else {
int_log10::panic_for_nonpositive_argument()
}
}
core::num::<impl u16>::ilog10 pub const fn ilog10(self) -> u32 {
if let Some(log) = self.checked_ilog10() {
log
} else {
int_log10::panic_for_nonpositive_argument()
}
}
core::num::<impl u16>::ilog2 pub const fn ilog2(self) -> u32 {
if let Some(log) = self.checked_ilog2() {
log
} else {
int_log10::panic_for_nonpositive_argument()
}
}
core::num::<impl u16>::is_multiple_of pub const fn is_multiple_of(self, rhs: Self) -> bool {
match rhs {
0 => self == 0,
_ => self % rhs == 0,
}
}
core::num::<impl u16>::is_power_of_two pub const fn is_power_of_two(self) -> bool {
self.count_ones() == 1
}
core::num::<impl u16>::is_utf16_surrogate pub const fn is_utf16_surrogate(self) -> bool {
matches!(self, 0xD800..=0xDFFF)
}
core::num::<impl u16>::leading_ones pub const fn leading_ones(self) -> u32 {
(!self).leading_zeros()
}
core::num::<impl u16>::leading_zeros pub const fn leading_zeros(self) -> u32 {
return intrinsics::ctlz(self as $ActualT);
}
core::num::<impl u16>::overflowing_add pub const fn overflowing_add(self, rhs: Self) -> (Self, bool) {
let (a, b) = intrinsics::add_with_overflow(self as $ActualT, rhs as $ActualT);
(a as Self, b)
}
core::num::<impl u16>::overflowing_add_signed pub const fn overflowing_add_signed(self, rhs: $SignedT) -> (Self, bool) {
let (res, overflowed) = self.overflowing_add(rhs as Self);
(res, overflowed ^ (rhs < 0))
}
core::num::<impl u16>::overflowing_mul pub const fn overflowing_mul(self, rhs: Self) -> (Self, bool) {
let (a, b) = intrinsics::mul_with_overflow(self as $ActualT, rhs as $ActualT);
(a as Self, b)
}
core::num::<impl u16>::overflowing_sub pub const fn overflowing_sub(self, rhs: Self) -> (Self, bool) {
let (a, b) = intrinsics::sub_with_overflow(self as $ActualT, rhs as $ActualT);
(a as Self, b)
}
core::num::<impl u16>::pow pub const fn pow(self, mut exp: u32) -> Self {
if exp == 0 {
return 1;
}
let mut base = self;
let mut acc = 1;
if intrinsics::is_val_statically_known(exp) {
while exp > 1 {
if (exp & 1) == 1 {
acc = acc * base;
}
exp /= 2;
base = base * base;
}
// since exp!=0, finally the exp must be 1.
// Deal with the final bit of the exponent separately, since
// squaring the base afterwards is not necessary and may cause a
// needless overflow.
acc * base
} else {
// This is faster than the above when the exponent is not known
// at compile time. We can't use the same code for the constant
// exponent case because LLVM is currently unable to unroll
// this loop.
loop {
if (exp & 1) == 1 {
acc = acc * base;
// since exp!=0, finally the exp must be 1.
if exp == 1 {
return acc;
}
}
exp /= 2;
base = base * base;
}
}
}
core::num::<impl u16>::reverse_bits pub const fn reverse_bits(self) -> Self {
intrinsics::bitreverse(self as $ActualT) as Self
}
core::num::<impl u16>::rotate_left pub const fn rotate_left(self, n: u32) -> Self {
return intrinsics::rotate_left(self, n);
}
core::num::<impl u16>::rotate_right pub const fn rotate_right(self, n: u32) -> Self {
return intrinsics::rotate_right(self, n);
}
core::num::<impl u16>::saturating_add pub const fn saturating_add(self, rhs: Self) -> Self {
intrinsics::saturating_add(self, rhs)
}
core::num::<impl u16>::saturating_mul pub const fn saturating_mul(self, rhs: Self) -> Self {
match self.checked_mul(rhs) {
Some(x) => x,
None => Self::MAX,
}
}
core::num::<impl u16>::saturating_sub pub const fn saturating_sub(self, rhs: Self) -> Self {
intrinsics::saturating_sub(self, rhs)
}
core::num::<impl u16>::swap_bytes pub const fn swap_bytes(self) -> Self {
intrinsics::bswap(self as $ActualT) as Self
}
core::num::<impl u16>::to_be pub const fn to_be(self) -> Self { // or not to be?
#[cfg(target_endian = "big")]
{
self
}
#[cfg(not(target_endian = "big"))]
{
self.swap_bytes()
}
}
core::num::<impl u16>::to_be_bytes pub const fn to_be_bytes(self) -> [u8; size_of::<Self>()] {
self.to_be().to_ne_bytes()
}
core::num::<impl u16>::to_le pub const fn to_le(self) -> Self {
#[cfg(target_endian = "little")]
{
self
}
#[cfg(not(target_endian = "little"))]
{
self.swap_bytes()
}
}
core::num::<impl u16>::to_le_bytes pub const fn to_le_bytes(self) -> [u8; size_of::<Self>()] {
self.to_le().to_ne_bytes()
}
core::num::<impl u16>::to_ne_bytes pub const fn to_ne_bytes(self) -> [u8; size_of::<Self>()] {
// SAFETY: integers are plain old datatypes so we can always transmute them to
// arrays of bytes
unsafe { mem::transmute(self) }
}
core::num::<impl u16>::trailing_zeros pub const fn trailing_zeros(self) -> u32 {
return intrinsics::cttz(self);
}
core::num::<impl u16>::unchecked_add pub const unsafe fn unchecked_add(self, rhs: Self) -> Self {
assert_unsafe_precondition!(
check_language_ub,
concat!(stringify!($SelfT), "::unchecked_add cannot overflow"),
(
lhs: $SelfT = self,
rhs: $SelfT = rhs,
) => !lhs.overflowing_add(rhs).1,
);
// SAFETY: this is guaranteed to be safe by the caller.
unsafe {
intrinsics::unchecked_add(self, rhs)
}
}
core::num::<impl u16>::unchecked_shl pub const unsafe fn unchecked_shl(self, rhs: u32) -> Self {
assert_unsafe_precondition!(
check_language_ub,
concat!(stringify!($SelfT), "::unchecked_shl cannot overflow"),
(
rhs: u32 = rhs,
) => rhs < <$ActualT>::BITS,
);
// SAFETY: this is guaranteed to be safe by the caller.
unsafe {
intrinsics::unchecked_shl(self, rhs)
}
}
core::num::<impl u16>::unchecked_shr pub const unsafe fn unchecked_shr(self, rhs: u32) -> Self {
assert_unsafe_precondition!(
check_language_ub,
concat!(stringify!($SelfT), "::unchecked_shr cannot overflow"),
(
rhs: u32 = rhs,
) => rhs < <$ActualT>::BITS,
);
// SAFETY: this is guaranteed to be safe by the caller.
unsafe {
intrinsics::unchecked_shr(self, rhs)
}
}
core::num::<impl u16>::unchecked_sub pub const unsafe fn unchecked_sub(self, rhs: Self) -> Self {
assert_unsafe_precondition!(
check_language_ub,
concat!(stringify!($SelfT), "::unchecked_sub cannot overflow"),
(
lhs: $SelfT = self,
rhs: $SelfT = rhs,
) => !lhs.overflowing_sub(rhs).1,
);
// SAFETY: this is guaranteed to be safe by the caller.
unsafe {
intrinsics::unchecked_sub(self, rhs)
}
}
core::num::<impl u16>::widening_mul pub const fn widening_mul(self, rhs: Self) -> (Self, Self) {
Self::carrying_mul_add(self, rhs, 0, 0)
}
core::num::<impl u16>::wrapping_add pub const fn wrapping_add(self, rhs: Self) -> Self {
intrinsics::wrapping_add(self, rhs)
}
core::num::<impl u16>::wrapping_add_signed pub const fn wrapping_add_signed(self, rhs: $SignedT) -> Self {
self.wrapping_add(rhs as Self)
}
core::num::<impl u16>::wrapping_mul pub const fn wrapping_mul(self, rhs: Self) -> Self {
intrinsics::wrapping_mul(self, rhs)
}
core::num::<impl u16>::wrapping_neg pub const fn wrapping_neg(self) -> Self {
(0 as $SelfT).wrapping_sub(self)
}
core::num::<impl u16>::wrapping_shl pub const fn wrapping_shl(self, rhs: u32) -> Self {
// SAFETY: the masking by the bitsize of the type ensures that we do not shift
// out of bounds
unsafe {
self.unchecked_shl(rhs & (Self::BITS - 1))
}
}
core::num::<impl u16>::wrapping_shr pub const fn wrapping_shr(self, rhs: u32) -> Self {
// SAFETY: the masking by the bitsize of the type ensures that we do not shift
// out of bounds
unsafe {
self.unchecked_shr(rhs & (Self::BITS - 1))
}
}
core::num::<impl u16>::wrapping_sub pub const fn wrapping_sub(self, rhs: Self) -> Self {
intrinsics::wrapping_sub(self, rhs)
}
core::num::<impl u32>::abs_diff pub const fn abs_diff(self, other: Self) -> Self {
if size_of::<Self>() == 1 {
// Trick LLVM into generating the psadbw instruction when SSE2
// is available and this function is autovectorized for u8's.
(self as i32).wrapping_sub(other as i32).unsigned_abs() as Self
} else {
if self < other {
other - self
} else {
self - other
}
}
}
core::num::<impl u32>::carrying_add pub const fn carrying_add(self, rhs: Self, carry: bool) -> (Self, bool) {
// note: longer-term this should be done via an intrinsic, but this has been shown
// to generate optimal code for now, and LLVM doesn't have an equivalent intrinsic
let (a, c1) = self.overflowing_add(rhs);
let (b, c2) = a.overflowing_add(carry as $SelfT);
// Ideally LLVM would know this is disjoint without us telling them,
// but it doesn't <https://github.com/llvm/llvm-project/issues/118162>
// SAFETY: Only one of `c1` and `c2` can be set.
// For c1 to be set we need to have overflowed, but if we did then
// `a` is at most `MAX-1`, which means that `c2` cannot possibly
// overflow because it's adding at most `1` (since it came from `bool`)
(b, unsafe { intrinsics::disjoint_bitor(c1, c2) })
}
core::num::<impl u32>::carrying_mul pub const fn carrying_mul(self, rhs: Self, carry: Self) -> (Self, Self) {
Self::carrying_mul_add(self, rhs, carry, 0)
}
core::num::<impl u32>::carrying_mul_add pub const fn carrying_mul_add(self, rhs: Self, carry: Self, add: Self) -> (Self, Self) {
intrinsics::carrying_mul_add(self, rhs, carry, add)
}
core::num::<impl u32>::checked_add pub const fn checked_add(self, rhs: Self) -> Option<Self> {
// This used to use `overflowing_add`, but that means it ends up being
// a `wrapping_add`, losing some optimization opportunities. Notably,
// phrasing it this way helps `.checked_add(1)` optimize to a check
// against `MAX` and a `add nuw`.
// Per <https://github.com/rust-lang/rust/pull/124114#issuecomment-2066173305>,
// LLVM is happy to re-form the intrinsic later if useful.
if intrinsics::unlikely(intrinsics::add_with_overflow(self, rhs).1) {
None
} else {
// SAFETY: Just checked it doesn't overflow
Some(unsafe { intrinsics::unchecked_add(self, rhs) })
}
}
core::num::<impl u32>::checked_ilog pub const fn checked_ilog(self, base: Self) -> Option<u32> {
// Inform compiler of optimizations when the base is known at
// compile time and there's a cheaper method available.
//
// Note: Like all optimizations, this is not guaranteed to be
// applied by the compiler. If you want those specific bases,
// use `.checked_ilog2()` or `.checked_ilog10()` directly.
if core::intrinsics::is_val_statically_known(base) {
if base == 2 {
return self.checked_ilog2();
} else if base == 10 {
return self.checked_ilog10();
}
}
if self <= 0 || base <= 1 {
None
} else if self < base {
Some(0)
} else {
// Since base >= self, n >= 1
let mut n = 1;
let mut r = base;
// Optimization for 128 bit wide integers.
if Self::BITS == 128 {
// The following is a correct lower bound for ⌊log(base,self)⌋ because
//
// log(base,self) = log(2,self) / log(2,base)
// ≥ ⌊log(2,self)⌋ / (⌊log(2,base)⌋ + 1)
//
// hence
//
// ⌊log(base,self)⌋ ≥ ⌊ ⌊log(2,self)⌋ / (⌊log(2,base)⌋ + 1) ⌋ .
n = self.ilog2() / (base.ilog2() + 1);
r = base.pow(n);
}
while r <= self / base {
n += 1;
r *= base;
}
Some(n)
}
}
core::num::<impl u32>::checked_ilog10 pub const fn checked_ilog10(self) -> Option<u32> {
match NonZero::new(self) {
Some(x) => Some(x.ilog10()),
None => None,
}
}
core::num::<impl u32>::checked_ilog2 pub const fn checked_ilog2(self) -> Option<u32> {
match NonZero::new(self) {
Some(x) => Some(x.ilog2()),
None => None,
}
}
core::num::<impl u32>::checked_mul pub const fn checked_mul(self, rhs: Self) -> Option<Self> {
let (a, b) = self.overflowing_mul(rhs);
if intrinsics::unlikely(b) { None } else { Some(a) }
}
core::num::<impl u32>::checked_pow pub const fn checked_pow(self, mut exp: u32) -> Option<Self> {
if exp == 0 {
return Some(1);
}
let mut base = self;
let mut acc: Self = 1;
loop {
if (exp & 1) == 1 {
acc = try_opt!(acc.checked_mul(base));
// since exp!=0, finally the exp must be 1.
if exp == 1 {
return Some(acc);
}
}
exp /= 2;
base = try_opt!(base.checked_mul(base));
}
}
core::num::<impl u32>::checked_sub pub const fn checked_sub(self, rhs: Self) -> Option<Self> {
// Per PR#103299, there's no advantage to the `overflowing` intrinsic
// for *unsigned* subtraction and we just emit the manual check anyway.
// Thus, rather than using `overflowing_sub` that produces a wrapping
// subtraction, check it ourself so we can use an unchecked one.
if self < rhs {
None
} else {
// SAFETY: just checked this can't overflow
Some(unsafe { intrinsics::unchecked_sub(self, rhs) })
}
}
core::num::<impl u32>::count_ones pub const fn count_ones(self) -> u32 {
return intrinsics::ctpop(self);
}
core::num::<impl u32>::count_zeros pub const fn count_zeros(self) -> u32 {
(!self).count_ones()
}
core::num::<impl u32>::div_ceil pub const fn div_ceil(self, rhs: Self) -> Self {
let d = self / rhs;
let r = self % rhs;
if r > 0 {
d + 1
} else {
d
}
}
core::num::<impl u32>::from_ascii_radix pub const fn from_ascii_radix(src: &[u8], radix: u32) -> Result<$int_ty, ParseIntError> {
use self::IntErrorKind::*;
use self::ParseIntError as PIE;
if 2 > radix || radix > 36 {
from_ascii_radix_panic(radix);
}
if src.is_empty() {
return Err(PIE { kind: Empty });
}
#[allow(unused_comparisons)]
let is_signed_ty = 0 > <$int_ty>::MIN;
let (is_positive, mut digits) = match src {
[b'+' | b'-'] => {
return Err(PIE { kind: InvalidDigit });
}
[b'+', rest @ ..] => (true, rest),
[b'-', rest @ ..] if is_signed_ty => (false, rest),
_ => (true, src),
};
let mut result = 0;
macro_rules! unwrap_or_PIE {
($option:expr, $kind:ident) => {
match $option {
Some(value) => value,
None => return Err(PIE { kind: $kind }),
}
};
}
if can_not_overflow::<$int_ty>(radix, is_signed_ty, digits) {
// If the len of the str is short compared to the range of the type
// we are parsing into, then we can be certain that an overflow will not occur.
// This bound is when `radix.pow(digits.len()) - 1 <= T::MAX` but the condition
// above is a faster (conservative) approximation of this.
//
// Consider radix 16 as it has the highest information density per digit and will thus overflow the earliest:
// `u8::MAX` is `ff` - any str of len 2 is guaranteed to not overflow.
// `i8::MAX` is `7f` - only a str of len 1 is guaranteed to not overflow.
macro_rules! run_unchecked_loop {
($unchecked_additive_op:tt) => {{
while let [c, rest @ ..] = digits {
result = result * (radix as $int_ty);
let x = unwrap_or_PIE!((*c as char).to_digit(radix), InvalidDigit);
result = result $unchecked_additive_op (x as $int_ty);
digits = rest;
}
}};
}
if is_positive {
run_unchecked_loop!(+)
} else {
run_unchecked_loop!(-)
};
} else {
macro_rules! run_checked_loop {
($checked_additive_op:ident, $overflow_err:ident) => {{
while let [c, rest @ ..] = digits {
// When `radix` is passed in as a literal, rather than doing a slow `imul`
// the compiler can use shifts if `radix` can be expressed as a
// sum of powers of 2 (x*10 can be written as x*8 + x*2).
// When the compiler can't use these optimisations,
// the latency of the multiplication can be hidden by issuing it
// before the result is needed to improve performance on
// modern out-of-order CPU as multiplication here is slower
// than the other instructions, we can get the end result faster
// doing multiplication first and let the CPU spends other cycles
// doing other computation and get multiplication result later.
let mul = result.checked_mul(radix as $int_ty);
let x = unwrap_or_PIE!((*c as char).to_digit(radix), InvalidDigit) as $int_ty;
result = unwrap_or_PIE!(mul, $overflow_err);
result = unwrap_or_PIE!(<$int_ty>::$checked_additive_op(result, x), $overflow_err);
digits = rest;
}
}};
}
if is_positive {
run_checked_loop!(checked_add, PosOverflow)
} else {
run_checked_loop!(checked_sub, NegOverflow)
};
}
Ok(result)
}
core::num::<impl u32>::from_be pub const fn from_be(x: Self) -> Self {
#[cfg(target_endian = "big")]
{
x
}
#[cfg(not(target_endian = "big"))]
{
x.swap_bytes()
}
}
core::num::<impl u32>::from_be_bytes pub const fn from_be_bytes(bytes: [u8; size_of::<Self>()]) -> Self {
Self::from_be(Self::from_ne_bytes(bytes))
}
core::num::<impl u32>::from_le pub const fn from_le(x: Self) -> Self {
#[cfg(target_endian = "little")]
{
x
}
#[cfg(not(target_endian = "little"))]
{
x.swap_bytes()
}
}
core::num::<impl u32>::from_le_bytes pub const fn from_le_bytes(bytes: [u8; size_of::<Self>()]) -> Self {
Self::from_le(Self::from_ne_bytes(bytes))
}
core::num::<impl u32>::from_ne_bytes pub const fn from_ne_bytes(bytes: [u8; size_of::<Self>()]) -> Self {
// SAFETY: integers are plain old datatypes so we can always transmute to them
unsafe { mem::transmute(bytes) }
}
core::num::<impl u32>::from_str_radix pub const fn from_str_radix(src: &str, radix: u32) -> Result<$int_ty, ParseIntError> {
<$int_ty>::from_ascii_radix(src.as_bytes(), radix)
}
core::num::<impl u32>::ilog pub const fn ilog(self, base: Self) -> u32 {
assert!(base >= 2, "base of integer logarithm must be at least 2");
if let Some(log) = self.checked_ilog(base) {
log
} else {
int_log10::panic_for_nonpositive_argument()
}
}
core::num::<impl u32>::ilog10 pub const fn ilog10(self) -> u32 {
if let Some(log) = self.checked_ilog10() {
log
} else {
int_log10::panic_for_nonpositive_argument()
}
}
core::num::<impl u32>::ilog2 pub const fn ilog2(self) -> u32 {
if let Some(log) = self.checked_ilog2() {
log
} else {
int_log10::panic_for_nonpositive_argument()
}
}
core::num::<impl u32>::is_multiple_of pub const fn is_multiple_of(self, rhs: Self) -> bool {
match rhs {
0 => self == 0,
_ => self % rhs == 0,
}
}
core::num::<impl u32>::is_power_of_two pub const fn is_power_of_two(self) -> bool {
self.count_ones() == 1
}
core::num::<impl u32>::leading_ones pub const fn leading_ones(self) -> u32 {
(!self).leading_zeros()
}
core::num::<impl u32>::leading_zeros pub const fn leading_zeros(self) -> u32 {
return intrinsics::ctlz(self as $ActualT);
}
core::num::<impl u32>::overflowing_add pub const fn overflowing_add(self, rhs: Self) -> (Self, bool) {
let (a, b) = intrinsics::add_with_overflow(self as $ActualT, rhs as $ActualT);
(a as Self, b)
}
core::num::<impl u32>::overflowing_add_signed pub const fn overflowing_add_signed(self, rhs: $SignedT) -> (Self, bool) {
let (res, overflowed) = self.overflowing_add(rhs as Self);
(res, overflowed ^ (rhs < 0))
}
core::num::<impl u32>::overflowing_mul pub const fn overflowing_mul(self, rhs: Self) -> (Self, bool) {
let (a, b) = intrinsics::mul_with_overflow(self as $ActualT, rhs as $ActualT);
(a as Self, b)
}
core::num::<impl u32>::overflowing_sub pub const fn overflowing_sub(self, rhs: Self) -> (Self, bool) {
let (a, b) = intrinsics::sub_with_overflow(self as $ActualT, rhs as $ActualT);
(a as Self, b)
}
core::num::<impl u32>::pow pub const fn pow(self, mut exp: u32) -> Self {
if exp == 0 {
return 1;
}
let mut base = self;
let mut acc = 1;
if intrinsics::is_val_statically_known(exp) {
while exp > 1 {
if (exp & 1) == 1 {
acc = acc * base;
}
exp /= 2;
base = base * base;
}
// since exp!=0, finally the exp must be 1.
// Deal with the final bit of the exponent separately, since
// squaring the base afterwards is not necessary and may cause a
// needless overflow.
acc * base
} else {
// This is faster than the above when the exponent is not known
// at compile time. We can't use the same code for the constant
// exponent case because LLVM is currently unable to unroll
// this loop.
loop {
if (exp & 1) == 1 {
acc = acc * base;
// since exp!=0, finally the exp must be 1.
if exp == 1 {
return acc;
}
}
exp /= 2;
base = base * base;
}
}
}
core::num::<impl u32>::reverse_bits pub const fn reverse_bits(self) -> Self {
intrinsics::bitreverse(self as $ActualT) as Self
}
core::num::<impl u32>::rotate_left pub const fn rotate_left(self, n: u32) -> Self {
return intrinsics::rotate_left(self, n);
}
core::num::<impl u32>::rotate_right pub const fn rotate_right(self, n: u32) -> Self {
return intrinsics::rotate_right(self, n);
}
core::num::<impl u32>::saturating_add pub const fn saturating_add(self, rhs: Self) -> Self {
intrinsics::saturating_add(self, rhs)
}
core::num::<impl u32>::saturating_mul pub const fn saturating_mul(self, rhs: Self) -> Self {
match self.checked_mul(rhs) {
Some(x) => x,
None => Self::MAX,
}
}
core::num::<impl u32>::saturating_sub pub const fn saturating_sub(self, rhs: Self) -> Self {
intrinsics::saturating_sub(self, rhs)
}
core::num::<impl u32>::swap_bytes pub const fn swap_bytes(self) -> Self {
intrinsics::bswap(self as $ActualT) as Self
}
core::num::<impl u32>::to_be pub const fn to_be(self) -> Self { // or not to be?
#[cfg(target_endian = "big")]
{
self
}
#[cfg(not(target_endian = "big"))]
{
self.swap_bytes()
}
}
core::num::<impl u32>::to_be_bytes pub const fn to_be_bytes(self) -> [u8; size_of::<Self>()] {
self.to_be().to_ne_bytes()
}
core::num::<impl u32>::to_le pub const fn to_le(self) -> Self {
#[cfg(target_endian = "little")]
{
self
}
#[cfg(not(target_endian = "little"))]
{
self.swap_bytes()
}
}
core::num::<impl u32>::to_le_bytes pub const fn to_le_bytes(self) -> [u8; size_of::<Self>()] {
self.to_le().to_ne_bytes()
}
core::num::<impl u32>::to_ne_bytes pub const fn to_ne_bytes(self) -> [u8; size_of::<Self>()] {
// SAFETY: integers are plain old datatypes so we can always transmute them to
// arrays of bytes
unsafe { mem::transmute(self) }
}
core::num::<impl u32>::trailing_zeros pub const fn trailing_zeros(self) -> u32 {
return intrinsics::cttz(self);
}
core::num::<impl u32>::unchecked_add pub const unsafe fn unchecked_add(self, rhs: Self) -> Self {
assert_unsafe_precondition!(
check_language_ub,
concat!(stringify!($SelfT), "::unchecked_add cannot overflow"),
(
lhs: $SelfT = self,
rhs: $SelfT = rhs,
) => !lhs.overflowing_add(rhs).1,
);
// SAFETY: this is guaranteed to be safe by the caller.
unsafe {
intrinsics::unchecked_add(self, rhs)
}
}
core::num::<impl u32>::unchecked_shl pub const unsafe fn unchecked_shl(self, rhs: u32) -> Self {
assert_unsafe_precondition!(
check_language_ub,
concat!(stringify!($SelfT), "::unchecked_shl cannot overflow"),
(
rhs: u32 = rhs,
) => rhs < <$ActualT>::BITS,
);
// SAFETY: this is guaranteed to be safe by the caller.
unsafe {
intrinsics::unchecked_shl(self, rhs)
}
}
core::num::<impl u32>::unchecked_shr pub const unsafe fn unchecked_shr(self, rhs: u32) -> Self {
assert_unsafe_precondition!(
check_language_ub,
concat!(stringify!($SelfT), "::unchecked_shr cannot overflow"),
(
rhs: u32 = rhs,
) => rhs < <$ActualT>::BITS,
);
// SAFETY: this is guaranteed to be safe by the caller.
unsafe {
intrinsics::unchecked_shr(self, rhs)
}
}
core::num::<impl u32>::unchecked_sub pub const unsafe fn unchecked_sub(self, rhs: Self) -> Self {
assert_unsafe_precondition!(
check_language_ub,
concat!(stringify!($SelfT), "::unchecked_sub cannot overflow"),
(
lhs: $SelfT = self,
rhs: $SelfT = rhs,
) => !lhs.overflowing_sub(rhs).1,
);
// SAFETY: this is guaranteed to be safe by the caller.
unsafe {
intrinsics::unchecked_sub(self, rhs)
}
}
core::num::<impl u32>::widening_mul pub const fn widening_mul(self, rhs: Self) -> (Self, Self) {
Self::carrying_mul_add(self, rhs, 0, 0)
}
core::num::<impl u32>::wrapping_add pub const fn wrapping_add(self, rhs: Self) -> Self {
intrinsics::wrapping_add(self, rhs)
}
core::num::<impl u32>::wrapping_add_signed pub const fn wrapping_add_signed(self, rhs: $SignedT) -> Self {
self.wrapping_add(rhs as Self)
}
core::num::<impl u32>::wrapping_mul pub const fn wrapping_mul(self, rhs: Self) -> Self {
intrinsics::wrapping_mul(self, rhs)
}
core::num::<impl u32>::wrapping_neg pub const fn wrapping_neg(self) -> Self {
(0 as $SelfT).wrapping_sub(self)
}
core::num::<impl u32>::wrapping_shl pub const fn wrapping_shl(self, rhs: u32) -> Self {
// SAFETY: the masking by the bitsize of the type ensures that we do not shift
// out of bounds
unsafe {
self.unchecked_shl(rhs & (Self::BITS - 1))
}
}
core::num::<impl u32>::wrapping_shr pub const fn wrapping_shr(self, rhs: u32) -> Self {
// SAFETY: the masking by the bitsize of the type ensures that we do not shift
// out of bounds
unsafe {
self.unchecked_shr(rhs & (Self::BITS - 1))
}
}
core::num::<impl u32>::wrapping_sub pub const fn wrapping_sub(self, rhs: Self) -> Self {
intrinsics::wrapping_sub(self, rhs)
}
core::num::<impl u64>::abs_diff pub const fn abs_diff(self, other: Self) -> Self {
if size_of::<Self>() == 1 {
// Trick LLVM into generating the psadbw instruction when SSE2
// is available and this function is autovectorized for u8's.
(self as i32).wrapping_sub(other as i32).unsigned_abs() as Self
} else {
if self < other {
other - self
} else {
self - other
}
}
}
core::num::<impl u64>::carrying_add pub const fn carrying_add(self, rhs: Self, carry: bool) -> (Self, bool) {
// note: longer-term this should be done via an intrinsic, but this has been shown
// to generate optimal code for now, and LLVM doesn't have an equivalent intrinsic
let (a, c1) = self.overflowing_add(rhs);
let (b, c2) = a.overflowing_add(carry as $SelfT);
// Ideally LLVM would know this is disjoint without us telling them,
// but it doesn't <https://github.com/llvm/llvm-project/issues/118162>
// SAFETY: Only one of `c1` and `c2` can be set.
// For c1 to be set we need to have overflowed, but if we did then
// `a` is at most `MAX-1`, which means that `c2` cannot possibly
// overflow because it's adding at most `1` (since it came from `bool`)
(b, unsafe { intrinsics::disjoint_bitor(c1, c2) })
}
core::num::<impl u64>::carrying_mul pub const fn carrying_mul(self, rhs: Self, carry: Self) -> (Self, Self) {
Self::carrying_mul_add(self, rhs, carry, 0)
}
core::num::<impl u64>::carrying_mul_add pub const fn carrying_mul_add(self, rhs: Self, carry: Self, add: Self) -> (Self, Self) {
intrinsics::carrying_mul_add(self, rhs, carry, add)
}
core::num::<impl u64>::checked_add pub const fn checked_add(self, rhs: Self) -> Option<Self> {
// This used to use `overflowing_add`, but that means it ends up being
// a `wrapping_add`, losing some optimization opportunities. Notably,
// phrasing it this way helps `.checked_add(1)` optimize to a check
// against `MAX` and a `add nuw`.
// Per <https://github.com/rust-lang/rust/pull/124114#issuecomment-2066173305>,
// LLVM is happy to re-form the intrinsic later if useful.
if intrinsics::unlikely(intrinsics::add_with_overflow(self, rhs).1) {
None
} else {
// SAFETY: Just checked it doesn't overflow
Some(unsafe { intrinsics::unchecked_add(self, rhs) })
}
}
core::num::<impl u64>::checked_ilog pub const fn checked_ilog(self, base: Self) -> Option<u32> {
// Inform compiler of optimizations when the base is known at
// compile time and there's a cheaper method available.
//
// Note: Like all optimizations, this is not guaranteed to be
// applied by the compiler. If you want those specific bases,
// use `.checked_ilog2()` or `.checked_ilog10()` directly.
if core::intrinsics::is_val_statically_known(base) {
if base == 2 {
return self.checked_ilog2();
} else if base == 10 {
return self.checked_ilog10();
}
}
if self <= 0 || base <= 1 {
None
} else if self < base {
Some(0)
} else {
// Since base >= self, n >= 1
let mut n = 1;
let mut r = base;
// Optimization for 128 bit wide integers.
if Self::BITS == 128 {
// The following is a correct lower bound for ⌊log(base,self)⌋ because
//
// log(base,self) = log(2,self) / log(2,base)
// ≥ ⌊log(2,self)⌋ / (⌊log(2,base)⌋ + 1)
//
// hence
//
// ⌊log(base,self)⌋ ≥ ⌊ ⌊log(2,self)⌋ / (⌊log(2,base)⌋ + 1) ⌋ .
n = self.ilog2() / (base.ilog2() + 1);
r = base.pow(n);
}
while r <= self / base {
n += 1;
r *= base;
}
Some(n)
}
}
core::num::<impl u64>::checked_ilog10 pub const fn checked_ilog10(self) -> Option<u32> {
match NonZero::new(self) {
Some(x) => Some(x.ilog10()),
None => None,
}
}
core::num::<impl u64>::checked_ilog2 pub const fn checked_ilog2(self) -> Option<u32> {
match NonZero::new(self) {
Some(x) => Some(x.ilog2()),
None => None,
}
}
core::num::<impl u64>::checked_mul pub const fn checked_mul(self, rhs: Self) -> Option<Self> {
let (a, b) = self.overflowing_mul(rhs);
if intrinsics::unlikely(b) { None } else { Some(a) }
}
core::num::<impl u64>::checked_pow pub const fn checked_pow(self, mut exp: u32) -> Option<Self> {
if exp == 0 {
return Some(1);
}
let mut base = self;
let mut acc: Self = 1;
loop {
if (exp & 1) == 1 {
acc = try_opt!(acc.checked_mul(base));
// since exp!=0, finally the exp must be 1.
if exp == 1 {
return Some(acc);
}
}
exp /= 2;
base = try_opt!(base.checked_mul(base));
}
}
core::num::<impl u64>::checked_sub pub const fn checked_sub(self, rhs: Self) -> Option<Self> {
// Per PR#103299, there's no advantage to the `overflowing` intrinsic
// for *unsigned* subtraction and we just emit the manual check anyway.
// Thus, rather than using `overflowing_sub` that produces a wrapping
// subtraction, check it ourself so we can use an unchecked one.
if self < rhs {
None
} else {
// SAFETY: just checked this can't overflow
Some(unsafe { intrinsics::unchecked_sub(self, rhs) })
}
}
core::num::<impl u64>::count_ones pub const fn count_ones(self) -> u32 {
return intrinsics::ctpop(self);
}
core::num::<impl u64>::count_zeros pub const fn count_zeros(self) -> u32 {
(!self).count_ones()
}
core::num::<impl u64>::div_ceil pub const fn div_ceil(self, rhs: Self) -> Self {
let d = self / rhs;
let r = self % rhs;
if r > 0 {
d + 1
} else {
d
}
}
core::num::<impl u64>::from_ascii_radix pub const fn from_ascii_radix(src: &[u8], radix: u32) -> Result<$int_ty, ParseIntError> {
use self::IntErrorKind::*;
use self::ParseIntError as PIE;
if 2 > radix || radix > 36 {
from_ascii_radix_panic(radix);
}
if src.is_empty() {
return Err(PIE { kind: Empty });
}
#[allow(unused_comparisons)]
let is_signed_ty = 0 > <$int_ty>::MIN;
let (is_positive, mut digits) = match src {
[b'+' | b'-'] => {
return Err(PIE { kind: InvalidDigit });
}
[b'+', rest @ ..] => (true, rest),
[b'-', rest @ ..] if is_signed_ty => (false, rest),
_ => (true, src),
};
let mut result = 0;
macro_rules! unwrap_or_PIE {
($option:expr, $kind:ident) => {
match $option {
Some(value) => value,
None => return Err(PIE { kind: $kind }),
}
};
}
if can_not_overflow::<$int_ty>(radix, is_signed_ty, digits) {
// If the len of the str is short compared to the range of the type
// we are parsing into, then we can be certain that an overflow will not occur.
// This bound is when `radix.pow(digits.len()) - 1 <= T::MAX` but the condition
// above is a faster (conservative) approximation of this.
//
// Consider radix 16 as it has the highest information density per digit and will thus overflow the earliest:
// `u8::MAX` is `ff` - any str of len 2 is guaranteed to not overflow.
// `i8::MAX` is `7f` - only a str of len 1 is guaranteed to not overflow.
macro_rules! run_unchecked_loop {
($unchecked_additive_op:tt) => {{
while let [c, rest @ ..] = digits {
result = result * (radix as $int_ty);
let x = unwrap_or_PIE!((*c as char).to_digit(radix), InvalidDigit);
result = result $unchecked_additive_op (x as $int_ty);
digits = rest;
}
}};
}
if is_positive {
run_unchecked_loop!(+)
} else {
run_unchecked_loop!(-)
};
} else {
macro_rules! run_checked_loop {
($checked_additive_op:ident, $overflow_err:ident) => {{
while let [c, rest @ ..] = digits {
// When `radix` is passed in as a literal, rather than doing a slow `imul`
// the compiler can use shifts if `radix` can be expressed as a
// sum of powers of 2 (x*10 can be written as x*8 + x*2).
// When the compiler can't use these optimisations,
// the latency of the multiplication can be hidden by issuing it
// before the result is needed to improve performance on
// modern out-of-order CPU as multiplication here is slower
// than the other instructions, we can get the end result faster
// doing multiplication first and let the CPU spends other cycles
// doing other computation and get multiplication result later.
let mul = result.checked_mul(radix as $int_ty);
let x = unwrap_or_PIE!((*c as char).to_digit(radix), InvalidDigit) as $int_ty;
result = unwrap_or_PIE!(mul, $overflow_err);
result = unwrap_or_PIE!(<$int_ty>::$checked_additive_op(result, x), $overflow_err);
digits = rest;
}
}};
}
if is_positive {
run_checked_loop!(checked_add, PosOverflow)
} else {
run_checked_loop!(checked_sub, NegOverflow)
};
}
Ok(result)
}
core::num::<impl u64>::from_be pub const fn from_be(x: Self) -> Self {
#[cfg(target_endian = "big")]
{
x
}
#[cfg(not(target_endian = "big"))]
{
x.swap_bytes()
}
}
core::num::<impl u64>::from_be_bytes pub const fn from_be_bytes(bytes: [u8; size_of::<Self>()]) -> Self {
Self::from_be(Self::from_ne_bytes(bytes))
}
core::num::<impl u64>::from_le pub const fn from_le(x: Self) -> Self {
#[cfg(target_endian = "little")]
{
x
}
#[cfg(not(target_endian = "little"))]
{
x.swap_bytes()
}
}
core::num::<impl u64>::from_le_bytes pub const fn from_le_bytes(bytes: [u8; size_of::<Self>()]) -> Self {
Self::from_le(Self::from_ne_bytes(bytes))
}
core::num::<impl u64>::from_ne_bytes pub const fn from_ne_bytes(bytes: [u8; size_of::<Self>()]) -> Self {
// SAFETY: integers are plain old datatypes so we can always transmute to them
unsafe { mem::transmute(bytes) }
}
core::num::<impl u64>::from_str_radix pub const fn from_str_radix(src: &str, radix: u32) -> Result<$int_ty, ParseIntError> {
<$int_ty>::from_ascii_radix(src.as_bytes(), radix)
}
core::num::<impl u64>::ilog pub const fn ilog(self, base: Self) -> u32 {
assert!(base >= 2, "base of integer logarithm must be at least 2");
if let Some(log) = self.checked_ilog(base) {
log
} else {
int_log10::panic_for_nonpositive_argument()
}
}
core::num::<impl u64>::ilog10 pub const fn ilog10(self) -> u32 {
if let Some(log) = self.checked_ilog10() {
log
} else {
int_log10::panic_for_nonpositive_argument()
}
}
core::num::<impl u64>::ilog2 pub const fn ilog2(self) -> u32 {
if let Some(log) = self.checked_ilog2() {
log
} else {
int_log10::panic_for_nonpositive_argument()
}
}
core::num::<impl u64>::is_multiple_of pub const fn is_multiple_of(self, rhs: Self) -> bool {
match rhs {
0 => self == 0,
_ => self % rhs == 0,
}
}
core::num::<impl u64>::is_power_of_two pub const fn is_power_of_two(self) -> bool {
self.count_ones() == 1
}
core::num::<impl u64>::leading_ones pub const fn leading_ones(self) -> u32 {
(!self).leading_zeros()
}
core::num::<impl u64>::leading_zeros pub const fn leading_zeros(self) -> u32 {
return intrinsics::ctlz(self as $ActualT);
}
core::num::<impl u64>::overflowing_add pub const fn overflowing_add(self, rhs: Self) -> (Self, bool) {
let (a, b) = intrinsics::add_with_overflow(self as $ActualT, rhs as $ActualT);
(a as Self, b)
}
core::num::<impl u64>::overflowing_add_signed pub const fn overflowing_add_signed(self, rhs: $SignedT) -> (Self, bool) {
let (res, overflowed) = self.overflowing_add(rhs as Self);
(res, overflowed ^ (rhs < 0))
}
core::num::<impl u64>::overflowing_mul pub const fn overflowing_mul(self, rhs: Self) -> (Self, bool) {
let (a, b) = intrinsics::mul_with_overflow(self as $ActualT, rhs as $ActualT);
(a as Self, b)
}
core::num::<impl u64>::overflowing_sub pub const fn overflowing_sub(self, rhs: Self) -> (Self, bool) {
let (a, b) = intrinsics::sub_with_overflow(self as $ActualT, rhs as $ActualT);
(a as Self, b)
}
core::num::<impl u64>::pow pub const fn pow(self, mut exp: u32) -> Self {
if exp == 0 {
return 1;
}
let mut base = self;
let mut acc = 1;
if intrinsics::is_val_statically_known(exp) {
while exp > 1 {
if (exp & 1) == 1 {
acc = acc * base;
}
exp /= 2;
base = base * base;
}
// since exp!=0, finally the exp must be 1.
// Deal with the final bit of the exponent separately, since
// squaring the base afterwards is not necessary and may cause a
// needless overflow.
acc * base
} else {
// This is faster than the above when the exponent is not known
// at compile time. We can't use the same code for the constant
// exponent case because LLVM is currently unable to unroll
// this loop.
loop {
if (exp & 1) == 1 {
acc = acc * base;
// since exp!=0, finally the exp must be 1.
if exp == 1 {
return acc;
}
}
exp /= 2;
base = base * base;
}
}
}
core::num::<impl u64>::reverse_bits pub const fn reverse_bits(self) -> Self {
intrinsics::bitreverse(self as $ActualT) as Self
}
core::num::<impl u64>::rotate_left pub const fn rotate_left(self, n: u32) -> Self {
return intrinsics::rotate_left(self, n);
}
core::num::<impl u64>::rotate_right pub const fn rotate_right(self, n: u32) -> Self {
return intrinsics::rotate_right(self, n);
}
core::num::<impl u64>::saturating_add pub const fn saturating_add(self, rhs: Self) -> Self {
intrinsics::saturating_add(self, rhs)
}
core::num::<impl u64>::saturating_mul pub const fn saturating_mul(self, rhs: Self) -> Self {
match self.checked_mul(rhs) {
Some(x) => x,
None => Self::MAX,
}
}
core::num::<impl u64>::saturating_sub pub const fn saturating_sub(self, rhs: Self) -> Self {
intrinsics::saturating_sub(self, rhs)
}
core::num::<impl u64>::swap_bytes pub const fn swap_bytes(self) -> Self {
intrinsics::bswap(self as $ActualT) as Self
}
core::num::<impl u64>::to_be pub const fn to_be(self) -> Self { // or not to be?
#[cfg(target_endian = "big")]
{
self
}
#[cfg(not(target_endian = "big"))]
{
self.swap_bytes()
}
}
core::num::<impl u64>::to_be_bytes pub const fn to_be_bytes(self) -> [u8; size_of::<Self>()] {
self.to_be().to_ne_bytes()
}
core::num::<impl u64>::to_le pub const fn to_le(self) -> Self {
#[cfg(target_endian = "little")]
{
self
}
#[cfg(not(target_endian = "little"))]
{
self.swap_bytes()
}
}
core::num::<impl u64>::to_le_bytes pub const fn to_le_bytes(self) -> [u8; size_of::<Self>()] {
self.to_le().to_ne_bytes()
}
core::num::<impl u64>::to_ne_bytes pub const fn to_ne_bytes(self) -> [u8; size_of::<Self>()] {
// SAFETY: integers are plain old datatypes so we can always transmute them to
// arrays of bytes
unsafe { mem::transmute(self) }
}
core::num::<impl u64>::trailing_zeros pub const fn trailing_zeros(self) -> u32 {
return intrinsics::cttz(self);
}
core::num::<impl u64>::unchecked_add pub const unsafe fn unchecked_add(self, rhs: Self) -> Self {
assert_unsafe_precondition!(
check_language_ub,
concat!(stringify!($SelfT), "::unchecked_add cannot overflow"),
(
lhs: $SelfT = self,
rhs: $SelfT = rhs,
) => !lhs.overflowing_add(rhs).1,
);
// SAFETY: this is guaranteed to be safe by the caller.
unsafe {
intrinsics::unchecked_add(self, rhs)
}
}
core::num::<impl u64>::unchecked_shl pub const unsafe fn unchecked_shl(self, rhs: u32) -> Self {
assert_unsafe_precondition!(
check_language_ub,
concat!(stringify!($SelfT), "::unchecked_shl cannot overflow"),
(
rhs: u32 = rhs,
) => rhs < <$ActualT>::BITS,
);
// SAFETY: this is guaranteed to be safe by the caller.
unsafe {
intrinsics::unchecked_shl(self, rhs)
}
}
core::num::<impl u64>::unchecked_shr pub const unsafe fn unchecked_shr(self, rhs: u32) -> Self {
assert_unsafe_precondition!(
check_language_ub,
concat!(stringify!($SelfT), "::unchecked_shr cannot overflow"),
(
rhs: u32 = rhs,
) => rhs < <$ActualT>::BITS,
);
// SAFETY: this is guaranteed to be safe by the caller.
unsafe {
intrinsics::unchecked_shr(self, rhs)
}
}
core::num::<impl u64>::unchecked_sub pub const unsafe fn unchecked_sub(self, rhs: Self) -> Self {
assert_unsafe_precondition!(
check_language_ub,
concat!(stringify!($SelfT), "::unchecked_sub cannot overflow"),
(
lhs: $SelfT = self,
rhs: $SelfT = rhs,
) => !lhs.overflowing_sub(rhs).1,
);
// SAFETY: this is guaranteed to be safe by the caller.
unsafe {
intrinsics::unchecked_sub(self, rhs)
}
}
core::num::<impl u64>::widening_mul pub const fn widening_mul(self, rhs: Self) -> (Self, Self) {
Self::carrying_mul_add(self, rhs, 0, 0)
}
core::num::<impl u64>::wrapping_add pub const fn wrapping_add(self, rhs: Self) -> Self {
intrinsics::wrapping_add(self, rhs)
}
core::num::<impl u64>::wrapping_add_signed pub const fn wrapping_add_signed(self, rhs: $SignedT) -> Self {
self.wrapping_add(rhs as Self)
}
core::num::<impl u64>::wrapping_mul pub const fn wrapping_mul(self, rhs: Self) -> Self {
intrinsics::wrapping_mul(self, rhs)
}
core::num::<impl u64>::wrapping_neg pub const fn wrapping_neg(self) -> Self {
(0 as $SelfT).wrapping_sub(self)
}
core::num::<impl u64>::wrapping_shl pub const fn wrapping_shl(self, rhs: u32) -> Self {
// SAFETY: the masking by the bitsize of the type ensures that we do not shift
// out of bounds
unsafe {
self.unchecked_shl(rhs & (Self::BITS - 1))
}
}
core::num::<impl u64>::wrapping_shr pub const fn wrapping_shr(self, rhs: u32) -> Self {
// SAFETY: the masking by the bitsize of the type ensures that we do not shift
// out of bounds
unsafe {
self.unchecked_shr(rhs & (Self::BITS - 1))
}
}
core::num::<impl u64>::wrapping_sub pub const fn wrapping_sub(self, rhs: Self) -> Self {
intrinsics::wrapping_sub(self, rhs)
}
core::num::<impl u8>::abs_diff pub const fn abs_diff(self, other: Self) -> Self {
if size_of::<Self>() == 1 {
// Trick LLVM into generating the psadbw instruction when SSE2
// is available and this function is autovectorized for u8's.
(self as i32).wrapping_sub(other as i32).unsigned_abs() as Self
} else {
if self < other {
other - self
} else {
self - other
}
}
}
core::num::<impl u8>::carrying_add pub const fn carrying_add(self, rhs: Self, carry: bool) -> (Self, bool) {
// note: longer-term this should be done via an intrinsic, but this has been shown
// to generate optimal code for now, and LLVM doesn't have an equivalent intrinsic
let (a, c1) = self.overflowing_add(rhs);
let (b, c2) = a.overflowing_add(carry as $SelfT);
// Ideally LLVM would know this is disjoint without us telling them,
// but it doesn't <https://github.com/llvm/llvm-project/issues/118162>
// SAFETY: Only one of `c1` and `c2` can be set.
// For c1 to be set we need to have overflowed, but if we did then
// `a` is at most `MAX-1`, which means that `c2` cannot possibly
// overflow because it's adding at most `1` (since it came from `bool`)
(b, unsafe { intrinsics::disjoint_bitor(c1, c2) })
}
core::num::<impl u8>::carrying_mul pub const fn carrying_mul(self, rhs: Self, carry: Self) -> (Self, Self) {
Self::carrying_mul_add(self, rhs, carry, 0)
}
core::num::<impl u8>::carrying_mul_add pub const fn carrying_mul_add(self, rhs: Self, carry: Self, add: Self) -> (Self, Self) {
intrinsics::carrying_mul_add(self, rhs, carry, add)
}
core::num::<impl u8>::checked_add pub const fn checked_add(self, rhs: Self) -> Option<Self> {
// This used to use `overflowing_add`, but that means it ends up being
// a `wrapping_add`, losing some optimization opportunities. Notably,
// phrasing it this way helps `.checked_add(1)` optimize to a check
// against `MAX` and a `add nuw`.
// Per <https://github.com/rust-lang/rust/pull/124114#issuecomment-2066173305>,
// LLVM is happy to re-form the intrinsic later if useful.
if intrinsics::unlikely(intrinsics::add_with_overflow(self, rhs).1) {
None
} else {
// SAFETY: Just checked it doesn't overflow
Some(unsafe { intrinsics::unchecked_add(self, rhs) })
}
}
core::num::<impl u8>::checked_ilog pub const fn checked_ilog(self, base: Self) -> Option<u32> {
// Inform compiler of optimizations when the base is known at
// compile time and there's a cheaper method available.
//
// Note: Like all optimizations, this is not guaranteed to be
// applied by the compiler. If you want those specific bases,
// use `.checked_ilog2()` or `.checked_ilog10()` directly.
if core::intrinsics::is_val_statically_known(base) {
if base == 2 {
return self.checked_ilog2();
} else if base == 10 {
return self.checked_ilog10();
}
}
if self <= 0 || base <= 1 {
None
} else if self < base {
Some(0)
} else {
// Since base >= self, n >= 1
let mut n = 1;
let mut r = base;
// Optimization for 128 bit wide integers.
if Self::BITS == 128 {
// The following is a correct lower bound for ⌊log(base,self)⌋ because
//
// log(base,self) = log(2,self) / log(2,base)
// ≥ ⌊log(2,self)⌋ / (⌊log(2,base)⌋ + 1)
//
// hence
//
// ⌊log(base,self)⌋ ≥ ⌊ ⌊log(2,self)⌋ / (⌊log(2,base)⌋ + 1) ⌋ .
n = self.ilog2() / (base.ilog2() + 1);
r = base.pow(n);
}
while r <= self / base {
n += 1;
r *= base;
}
Some(n)
}
}
core::num::<impl u8>::checked_ilog10 pub const fn checked_ilog10(self) -> Option<u32> {
match NonZero::new(self) {
Some(x) => Some(x.ilog10()),
None => None,
}
}
core::num::<impl u8>::checked_ilog2 pub const fn checked_ilog2(self) -> Option<u32> {
match NonZero::new(self) {
Some(x) => Some(x.ilog2()),
None => None,
}
}
core::num::<impl u8>::checked_mul pub const fn checked_mul(self, rhs: Self) -> Option<Self> {
let (a, b) = self.overflowing_mul(rhs);
if intrinsics::unlikely(b) { None } else { Some(a) }
}
core::num::<impl u8>::checked_pow pub const fn checked_pow(self, mut exp: u32) -> Option<Self> {
if exp == 0 {
return Some(1);
}
let mut base = self;
let mut acc: Self = 1;
loop {
if (exp & 1) == 1 {
acc = try_opt!(acc.checked_mul(base));
// since exp!=0, finally the exp must be 1.
if exp == 1 {
return Some(acc);
}
}
exp /= 2;
base = try_opt!(base.checked_mul(base));
}
}
core::num::<impl u8>::checked_sub pub const fn checked_sub(self, rhs: Self) -> Option<Self> {
// Per PR#103299, there's no advantage to the `overflowing` intrinsic
// for *unsigned* subtraction and we just emit the manual check anyway.
// Thus, rather than using `overflowing_sub` that produces a wrapping
// subtraction, check it ourself so we can use an unchecked one.
if self < rhs {
None
} else {
// SAFETY: just checked this can't overflow
Some(unsafe { intrinsics::unchecked_sub(self, rhs) })
}
}
core::num::<impl u8>::count_ones pub const fn count_ones(self) -> u32 {
return intrinsics::ctpop(self);
}
core::num::<impl u8>::count_zeros pub const fn count_zeros(self) -> u32 {
(!self).count_ones()
}
core::num::<impl u8>::div_ceil pub const fn div_ceil(self, rhs: Self) -> Self {
let d = self / rhs;
let r = self % rhs;
if r > 0 {
d + 1
} else {
d
}
}
core::num::<impl u8>::eq_ignore_ascii_case pub const fn eq_ignore_ascii_case(&self, other: &u8) -> bool {
self.to_ascii_lowercase() == other.to_ascii_lowercase()
}
core::num::<impl u8>::escape_ascii pub fn escape_ascii(self) -> ascii::EscapeDefault {
ascii::escape_default(self)
}
core::num::<impl u8>::from_ascii_radix pub const fn from_ascii_radix(src: &[u8], radix: u32) -> Result<$int_ty, ParseIntError> {
use self::IntErrorKind::*;
use self::ParseIntError as PIE;
if 2 > radix || radix > 36 {
from_ascii_radix_panic(radix);
}
if src.is_empty() {
return Err(PIE { kind: Empty });
}
#[allow(unused_comparisons)]
let is_signed_ty = 0 > <$int_ty>::MIN;
let (is_positive, mut digits) = match src {
[b'+' | b'-'] => {
return Err(PIE { kind: InvalidDigit });
}
[b'+', rest @ ..] => (true, rest),
[b'-', rest @ ..] if is_signed_ty => (false, rest),
_ => (true, src),
};
let mut result = 0;
macro_rules! unwrap_or_PIE {
($option:expr, $kind:ident) => {
match $option {
Some(value) => value,
None => return Err(PIE { kind: $kind }),
}
};
}
if can_not_overflow::<$int_ty>(radix, is_signed_ty, digits) {
// If the len of the str is short compared to the range of the type
// we are parsing into, then we can be certain that an overflow will not occur.
// This bound is when `radix.pow(digits.len()) - 1 <= T::MAX` but the condition
// above is a faster (conservative) approximation of this.
//
// Consider radix 16 as it has the highest information density per digit and will thus overflow the earliest:
// `u8::MAX` is `ff` - any str of len 2 is guaranteed to not overflow.
// `i8::MAX` is `7f` - only a str of len 1 is guaranteed to not overflow.
macro_rules! run_unchecked_loop {
($unchecked_additive_op:tt) => {{
while let [c, rest @ ..] = digits {
result = result * (radix as $int_ty);
let x = unwrap_or_PIE!((*c as char).to_digit(radix), InvalidDigit);
result = result $unchecked_additive_op (x as $int_ty);
digits = rest;
}
}};
}
if is_positive {
run_unchecked_loop!(+)
} else {
run_unchecked_loop!(-)
};
} else {
macro_rules! run_checked_loop {
($checked_additive_op:ident, $overflow_err:ident) => {{
while let [c, rest @ ..] = digits {
// When `radix` is passed in as a literal, rather than doing a slow `imul`
// the compiler can use shifts if `radix` can be expressed as a
// sum of powers of 2 (x*10 can be written as x*8 + x*2).
// When the compiler can't use these optimisations,
// the latency of the multiplication can be hidden by issuing it
// before the result is needed to improve performance on
// modern out-of-order CPU as multiplication here is slower
// than the other instructions, we can get the end result faster
// doing multiplication first and let the CPU spends other cycles
// doing other computation and get multiplication result later.
let mul = result.checked_mul(radix as $int_ty);
let x = unwrap_or_PIE!((*c as char).to_digit(radix), InvalidDigit) as $int_ty;
result = unwrap_or_PIE!(mul, $overflow_err);
result = unwrap_or_PIE!(<$int_ty>::$checked_additive_op(result, x), $overflow_err);
digits = rest;
}
}};
}
if is_positive {
run_checked_loop!(checked_add, PosOverflow)
} else {
run_checked_loop!(checked_sub, NegOverflow)
};
}
Ok(result)
}
core::num::<impl u8>::from_be pub const fn from_be(x: Self) -> Self {
#[cfg(target_endian = "big")]
{
x
}
#[cfg(not(target_endian = "big"))]
{
x.swap_bytes()
}
}
core::num::<impl u8>::from_be_bytes pub const fn from_be_bytes(bytes: [u8; size_of::<Self>()]) -> Self {
Self::from_be(Self::from_ne_bytes(bytes))
}
core::num::<impl u8>::from_le pub const fn from_le(x: Self) -> Self {
#[cfg(target_endian = "little")]
{
x
}
#[cfg(not(target_endian = "little"))]
{
x.swap_bytes()
}
}
core::num::<impl u8>::from_le_bytes pub const fn from_le_bytes(bytes: [u8; size_of::<Self>()]) -> Self {
Self::from_le(Self::from_ne_bytes(bytes))
}
core::num::<impl u8>::from_ne_bytes pub const fn from_ne_bytes(bytes: [u8; size_of::<Self>()]) -> Self {
// SAFETY: integers are plain old datatypes so we can always transmute to them
unsafe { mem::transmute(bytes) }
}
core::num::<impl u8>::from_str_radix pub const fn from_str_radix(src: &str, radix: u32) -> Result<$int_ty, ParseIntError> {
<$int_ty>::from_ascii_radix(src.as_bytes(), radix)
}
core::num::<impl u8>::ilog pub const fn ilog(self, base: Self) -> u32 {
assert!(base >= 2, "base of integer logarithm must be at least 2");
if let Some(log) = self.checked_ilog(base) {
log
} else {
int_log10::panic_for_nonpositive_argument()
}
}
core::num::<impl u8>::ilog10 pub const fn ilog10(self) -> u32 {
if let Some(log) = self.checked_ilog10() {
log
} else {
int_log10::panic_for_nonpositive_argument()
}
}
core::num::<impl u8>::ilog2 pub const fn ilog2(self) -> u32 {
if let Some(log) = self.checked_ilog2() {
log
} else {
int_log10::panic_for_nonpositive_argument()
}
}
core::num::<impl u8>::is_ascii pub const fn is_ascii(&self) -> bool {
*self <= 127
}
core::num::<impl u8>::is_ascii_alphabetic pub const fn is_ascii_alphabetic(&self) -> bool {
matches!(*self, b'A'..=b'Z' | b'a'..=b'z')
}
core::num::<impl u8>::is_ascii_control pub const fn is_ascii_control(&self) -> bool {
matches!(*self, b'\0'..=b'\x1F' | b'\x7F')
}
core::num::<impl u8>::is_ascii_uppercase pub const fn is_ascii_uppercase(&self) -> bool {
matches!(*self, b'A'..=b'Z')
}
core::num::<impl u8>::is_multiple_of pub const fn is_multiple_of(self, rhs: Self) -> bool {
match rhs {
0 => self == 0,
_ => self % rhs == 0,
}
}
core::num::<impl u8>::is_power_of_two pub const fn is_power_of_two(self) -> bool {
self.count_ones() == 1
}
core::num::<impl u8>::is_utf8_char_boundary pub(crate) const fn is_utf8_char_boundary(self) -> bool {
// This is bit magic equivalent to: b < 128 || b >= 192
(self as i8) >= -0x40
}
core::num::<impl u8>::leading_ones pub const fn leading_ones(self) -> u32 {
(!self).leading_zeros()
}
core::num::<impl u8>::leading_zeros pub const fn leading_zeros(self) -> u32 {
return intrinsics::ctlz(self as $ActualT);
}
core::num::<impl u8>::overflowing_add pub const fn overflowing_add(self, rhs: Self) -> (Self, bool) {
let (a, b) = intrinsics::add_with_overflow(self as $ActualT, rhs as $ActualT);
(a as Self, b)
}
core::num::<impl u8>::overflowing_add_signed pub const fn overflowing_add_signed(self, rhs: $SignedT) -> (Self, bool) {
let (res, overflowed) = self.overflowing_add(rhs as Self);
(res, overflowed ^ (rhs < 0))
}
core::num::<impl u8>::overflowing_mul pub const fn overflowing_mul(self, rhs: Self) -> (Self, bool) {
let (a, b) = intrinsics::mul_with_overflow(self as $ActualT, rhs as $ActualT);
(a as Self, b)
}
core::num::<impl u8>::overflowing_sub pub const fn overflowing_sub(self, rhs: Self) -> (Self, bool) {
let (a, b) = intrinsics::sub_with_overflow(self as $ActualT, rhs as $ActualT);
(a as Self, b)
}
core::num::<impl u8>::pow pub const fn pow(self, mut exp: u32) -> Self {
if exp == 0 {
return 1;
}
let mut base = self;
let mut acc = 1;
if intrinsics::is_val_statically_known(exp) {
while exp > 1 {
if (exp & 1) == 1 {
acc = acc * base;
}
exp /= 2;
base = base * base;
}
// since exp!=0, finally the exp must be 1.
// Deal with the final bit of the exponent separately, since
// squaring the base afterwards is not necessary and may cause a
// needless overflow.
acc * base
} else {
// This is faster than the above when the exponent is not known
// at compile time. We can't use the same code for the constant
// exponent case because LLVM is currently unable to unroll
// this loop.
loop {
if (exp & 1) == 1 {
acc = acc * base;
// since exp!=0, finally the exp must be 1.
if exp == 1 {
return acc;
}
}
exp /= 2;
base = base * base;
}
}
}
core::num::<impl u8>::reverse_bits pub const fn reverse_bits(self) -> Self {
intrinsics::bitreverse(self as $ActualT) as Self
}
core::num::<impl u8>::rotate_left pub const fn rotate_left(self, n: u32) -> Self {
return intrinsics::rotate_left(self, n);
}
core::num::<impl u8>::rotate_right pub const fn rotate_right(self, n: u32) -> Self {
return intrinsics::rotate_right(self, n);
}
core::num::<impl u8>::saturating_add pub const fn saturating_add(self, rhs: Self) -> Self {
intrinsics::saturating_add(self, rhs)
}
core::num::<impl u8>::saturating_mul pub const fn saturating_mul(self, rhs: Self) -> Self {
match self.checked_mul(rhs) {
Some(x) => x,
None => Self::MAX,
}
}
core::num::<impl u8>::saturating_sub pub const fn saturating_sub(self, rhs: Self) -> Self {
intrinsics::saturating_sub(self, rhs)
}
core::num::<impl u8>::swap_bytes pub const fn swap_bytes(self) -> Self {
intrinsics::bswap(self as $ActualT) as Self
}
core::num::<impl u8>::to_ascii_lowercase pub const fn to_ascii_lowercase(&self) -> u8 {
// Set the 6th bit if this is an uppercase letter
*self | (self.is_ascii_uppercase() as u8 * ASCII_CASE_MASK)
}
core::num::<impl u8>::to_be pub const fn to_be(self) -> Self { // or not to be?
#[cfg(target_endian = "big")]
{
self
}
#[cfg(not(target_endian = "big"))]
{
self.swap_bytes()
}
}
core::num::<impl u8>::to_be_bytes pub const fn to_be_bytes(self) -> [u8; size_of::<Self>()] {
self.to_be().to_ne_bytes()
}
core::num::<impl u8>::to_le pub const fn to_le(self) -> Self {
#[cfg(target_endian = "little")]
{
self
}
#[cfg(not(target_endian = "little"))]
{
self.swap_bytes()
}
}
core::num::<impl u8>::to_le_bytes pub const fn to_le_bytes(self) -> [u8; size_of::<Self>()] {
self.to_le().to_ne_bytes()
}
core::num::<impl u8>::to_ne_bytes pub const fn to_ne_bytes(self) -> [u8; size_of::<Self>()] {
// SAFETY: integers are plain old datatypes so we can always transmute them to
// arrays of bytes
unsafe { mem::transmute(self) }
}
core::num::<impl u8>::trailing_zeros pub const fn trailing_zeros(self) -> u32 {
return intrinsics::cttz(self);
}
core::num::<impl u8>::unchecked_add pub const unsafe fn unchecked_add(self, rhs: Self) -> Self {
assert_unsafe_precondition!(
check_language_ub,
concat!(stringify!($SelfT), "::unchecked_add cannot overflow"),
(
lhs: $SelfT = self,
rhs: $SelfT = rhs,
) => !lhs.overflowing_add(rhs).1,
);
// SAFETY: this is guaranteed to be safe by the caller.
unsafe {
intrinsics::unchecked_add(self, rhs)
}
}
core::num::<impl u8>::unchecked_shl pub const unsafe fn unchecked_shl(self, rhs: u32) -> Self {
assert_unsafe_precondition!(
check_language_ub,
concat!(stringify!($SelfT), "::unchecked_shl cannot overflow"),
(
rhs: u32 = rhs,
) => rhs < <$ActualT>::BITS,
);
// SAFETY: this is guaranteed to be safe by the caller.
unsafe {
intrinsics::unchecked_shl(self, rhs)
}
}
core::num::<impl u8>::unchecked_shr pub const unsafe fn unchecked_shr(self, rhs: u32) -> Self {
assert_unsafe_precondition!(
check_language_ub,
concat!(stringify!($SelfT), "::unchecked_shr cannot overflow"),
(
rhs: u32 = rhs,
) => rhs < <$ActualT>::BITS,
);
// SAFETY: this is guaranteed to be safe by the caller.
unsafe {
intrinsics::unchecked_shr(self, rhs)
}
}
core::num::<impl u8>::unchecked_sub pub const unsafe fn unchecked_sub(self, rhs: Self) -> Self {
assert_unsafe_precondition!(
check_language_ub,
concat!(stringify!($SelfT), "::unchecked_sub cannot overflow"),
(
lhs: $SelfT = self,
rhs: $SelfT = rhs,
) => !lhs.overflowing_sub(rhs).1,
);
// SAFETY: this is guaranteed to be safe by the caller.
unsafe {
intrinsics::unchecked_sub(self, rhs)
}
}
core::num::<impl u8>::widening_mul pub const fn widening_mul(self, rhs: Self) -> (Self, Self) {
Self::carrying_mul_add(self, rhs, 0, 0)
}
core::num::<impl u8>::wrapping_add pub const fn wrapping_add(self, rhs: Self) -> Self {
intrinsics::wrapping_add(self, rhs)
}
core::num::<impl u8>::wrapping_add_signed pub const fn wrapping_add_signed(self, rhs: $SignedT) -> Self {
self.wrapping_add(rhs as Self)
}
core::num::<impl u8>::wrapping_mul pub const fn wrapping_mul(self, rhs: Self) -> Self {
intrinsics::wrapping_mul(self, rhs)
}
core::num::<impl u8>::wrapping_neg pub const fn wrapping_neg(self) -> Self {
(0 as $SelfT).wrapping_sub(self)
}
core::num::<impl u8>::wrapping_shl pub const fn wrapping_shl(self, rhs: u32) -> Self {
// SAFETY: the masking by the bitsize of the type ensures that we do not shift
// out of bounds
unsafe {
self.unchecked_shl(rhs & (Self::BITS - 1))
}
}
core::num::<impl u8>::wrapping_shr pub const fn wrapping_shr(self, rhs: u32) -> Self {
// SAFETY: the masking by the bitsize of the type ensures that we do not shift
// out of bounds
unsafe {
self.unchecked_shr(rhs & (Self::BITS - 1))
}
}
core::num::<impl u8>::wrapping_sub pub const fn wrapping_sub(self, rhs: Self) -> Self {
intrinsics::wrapping_sub(self, rhs)
}
core::num::<impl usize>::abs_diff pub const fn abs_diff(self, other: Self) -> Self {
if size_of::<Self>() == 1 {
// Trick LLVM into generating the psadbw instruction when SSE2
// is available and this function is autovectorized for u8's.
(self as i32).wrapping_sub(other as i32).unsigned_abs() as Self
} else {
if self < other {
other - self
} else {
self - other
}
}
}
core::num::<impl usize>::carrying_add pub const fn carrying_add(self, rhs: Self, carry: bool) -> (Self, bool) {
// note: longer-term this should be done via an intrinsic, but this has been shown
// to generate optimal code for now, and LLVM doesn't have an equivalent intrinsic
let (a, c1) = self.overflowing_add(rhs);
let (b, c2) = a.overflowing_add(carry as $SelfT);
// Ideally LLVM would know this is disjoint without us telling them,
// but it doesn't <https://github.com/llvm/llvm-project/issues/118162>
// SAFETY: Only one of `c1` and `c2` can be set.
// For c1 to be set we need to have overflowed, but if we did then
// `a` is at most `MAX-1`, which means that `c2` cannot possibly
// overflow because it's adding at most `1` (since it came from `bool`)
(b, unsafe { intrinsics::disjoint_bitor(c1, c2) })
}
core::num::<impl usize>::carrying_mul pub const fn carrying_mul(self, rhs: Self, carry: Self) -> (Self, Self) {
Self::carrying_mul_add(self, rhs, carry, 0)
}
core::num::<impl usize>::carrying_mul_add pub const fn carrying_mul_add(self, rhs: Self, carry: Self, add: Self) -> (Self, Self) {
intrinsics::carrying_mul_add(self, rhs, carry, add)
}
core::num::<impl usize>::checked_add pub const fn checked_add(self, rhs: Self) -> Option<Self> {
// This used to use `overflowing_add`, but that means it ends up being
// a `wrapping_add`, losing some optimization opportunities. Notably,
// phrasing it this way helps `.checked_add(1)` optimize to a check
// against `MAX` and a `add nuw`.
// Per <https://github.com/rust-lang/rust/pull/124114#issuecomment-2066173305>,
// LLVM is happy to re-form the intrinsic later if useful.
if intrinsics::unlikely(intrinsics::add_with_overflow(self, rhs).1) {
None
} else {
// SAFETY: Just checked it doesn't overflow
Some(unsafe { intrinsics::unchecked_add(self, rhs) })
}
}
core::num::<impl usize>::checked_ilog pub const fn checked_ilog(self, base: Self) -> Option<u32> {
// Inform compiler of optimizations when the base is known at
// compile time and there's a cheaper method available.
//
// Note: Like all optimizations, this is not guaranteed to be
// applied by the compiler. If you want those specific bases,
// use `.checked_ilog2()` or `.checked_ilog10()` directly.
if core::intrinsics::is_val_statically_known(base) {
if base == 2 {
return self.checked_ilog2();
} else if base == 10 {
return self.checked_ilog10();
}
}
if self <= 0 || base <= 1 {
None
} else if self < base {
Some(0)
} else {
// Since base >= self, n >= 1
let mut n = 1;
let mut r = base;
// Optimization for 128 bit wide integers.
if Self::BITS == 128 {
// The following is a correct lower bound for ⌊log(base,self)⌋ because
//
// log(base,self) = log(2,self) / log(2,base)
// ≥ ⌊log(2,self)⌋ / (⌊log(2,base)⌋ + 1)
//
// hence
//
// ⌊log(base,self)⌋ ≥ ⌊ ⌊log(2,self)⌋ / (⌊log(2,base)⌋ + 1) ⌋ .
n = self.ilog2() / (base.ilog2() + 1);
r = base.pow(n);
}
while r <= self / base {
n += 1;
r *= base;
}
Some(n)
}
}
core::num::<impl usize>::checked_ilog10 pub const fn checked_ilog10(self) -> Option<u32> {
match NonZero::new(self) {
Some(x) => Some(x.ilog10()),
None => None,
}
}
core::num::<impl usize>::checked_ilog2 pub const fn checked_ilog2(self) -> Option<u32> {
match NonZero::new(self) {
Some(x) => Some(x.ilog2()),
None => None,
}
}
core::num::<impl usize>::checked_mul pub const fn checked_mul(self, rhs: Self) -> Option<Self> {
let (a, b) = self.overflowing_mul(rhs);
if intrinsics::unlikely(b) { None } else { Some(a) }
}
core::num::<impl usize>::checked_pow pub const fn checked_pow(self, mut exp: u32) -> Option<Self> {
if exp == 0 {
return Some(1);
}
let mut base = self;
let mut acc: Self = 1;
loop {
if (exp & 1) == 1 {
acc = try_opt!(acc.checked_mul(base));
// since exp!=0, finally the exp must be 1.
if exp == 1 {
return Some(acc);
}
}
exp /= 2;
base = try_opt!(base.checked_mul(base));
}
}
core::num::<impl usize>::checked_sub pub const fn checked_sub(self, rhs: Self) -> Option<Self> {
// Per PR#103299, there's no advantage to the `overflowing` intrinsic
// for *unsigned* subtraction and we just emit the manual check anyway.
// Thus, rather than using `overflowing_sub` that produces a wrapping
// subtraction, check it ourself so we can use an unchecked one.
if self < rhs {
None
} else {
// SAFETY: just checked this can't overflow
Some(unsafe { intrinsics::unchecked_sub(self, rhs) })
}
}
core::num::<impl usize>::count_ones pub const fn count_ones(self) -> u32 {
return intrinsics::ctpop(self);
}
core::num::<impl usize>::count_zeros pub const fn count_zeros(self) -> u32 {
(!self).count_ones()
}
core::num::<impl usize>::div_ceil pub const fn div_ceil(self, rhs: Self) -> Self {
let d = self / rhs;
let r = self % rhs;
if r > 0 {
d + 1
} else {
d
}
}
core::num::<impl usize>::from_ascii_radix pub const fn from_ascii_radix(src: &[u8], radix: u32) -> Result<$int_ty, ParseIntError> {
use self::IntErrorKind::*;
use self::ParseIntError as PIE;
if 2 > radix || radix > 36 {
from_ascii_radix_panic(radix);
}
if src.is_empty() {
return Err(PIE { kind: Empty });
}
#[allow(unused_comparisons)]
let is_signed_ty = 0 > <$int_ty>::MIN;
let (is_positive, mut digits) = match src {
[b'+' | b'-'] => {
return Err(PIE { kind: InvalidDigit });
}
[b'+', rest @ ..] => (true, rest),
[b'-', rest @ ..] if is_signed_ty => (false, rest),
_ => (true, src),
};
let mut result = 0;
macro_rules! unwrap_or_PIE {
($option:expr, $kind:ident) => {
match $option {
Some(value) => value,
None => return Err(PIE { kind: $kind }),
}
};
}
if can_not_overflow::<$int_ty>(radix, is_signed_ty, digits) {
// If the len of the str is short compared to the range of the type
// we are parsing into, then we can be certain that an overflow will not occur.
// This bound is when `radix.pow(digits.len()) - 1 <= T::MAX` but the condition
// above is a faster (conservative) approximation of this.
//
// Consider radix 16 as it has the highest information density per digit and will thus overflow the earliest:
// `u8::MAX` is `ff` - any str of len 2 is guaranteed to not overflow.
// `i8::MAX` is `7f` - only a str of len 1 is guaranteed to not overflow.
macro_rules! run_unchecked_loop {
($unchecked_additive_op:tt) => {{
while let [c, rest @ ..] = digits {
result = result * (radix as $int_ty);
let x = unwrap_or_PIE!((*c as char).to_digit(radix), InvalidDigit);
result = result $unchecked_additive_op (x as $int_ty);
digits = rest;
}
}};
}
if is_positive {
run_unchecked_loop!(+)
} else {
run_unchecked_loop!(-)
};
} else {
macro_rules! run_checked_loop {
($checked_additive_op:ident, $overflow_err:ident) => {{
while let [c, rest @ ..] = digits {
// When `radix` is passed in as a literal, rather than doing a slow `imul`
// the compiler can use shifts if `radix` can be expressed as a
// sum of powers of 2 (x*10 can be written as x*8 + x*2).
// When the compiler can't use these optimisations,
// the latency of the multiplication can be hidden by issuing it
// before the result is needed to improve performance on
// modern out-of-order CPU as multiplication here is slower
// than the other instructions, we can get the end result faster
// doing multiplication first and let the CPU spends other cycles
// doing other computation and get multiplication result later.
let mul = result.checked_mul(radix as $int_ty);
let x = unwrap_or_PIE!((*c as char).to_digit(radix), InvalidDigit) as $int_ty;
result = unwrap_or_PIE!(mul, $overflow_err);
result = unwrap_or_PIE!(<$int_ty>::$checked_additive_op(result, x), $overflow_err);
digits = rest;
}
}};
}
if is_positive {
run_checked_loop!(checked_add, PosOverflow)
} else {
run_checked_loop!(checked_sub, NegOverflow)
};
}
Ok(result)
}
core::num::<impl usize>::from_be pub const fn from_be(x: Self) -> Self {
#[cfg(target_endian = "big")]
{
x
}
#[cfg(not(target_endian = "big"))]
{
x.swap_bytes()
}
}
core::num::<impl usize>::from_be_bytes pub const fn from_be_bytes(bytes: [u8; size_of::<Self>()]) -> Self {
Self::from_be(Self::from_ne_bytes(bytes))
}
core::num::<impl usize>::from_le pub const fn from_le(x: Self) -> Self {
#[cfg(target_endian = "little")]
{
x
}
#[cfg(not(target_endian = "little"))]
{
x.swap_bytes()
}
}
core::num::<impl usize>::from_le_bytes pub const fn from_le_bytes(bytes: [u8; size_of::<Self>()]) -> Self {
Self::from_le(Self::from_ne_bytes(bytes))
}
core::num::<impl usize>::from_ne_bytes pub const fn from_ne_bytes(bytes: [u8; size_of::<Self>()]) -> Self {
// SAFETY: integers are plain old datatypes so we can always transmute to them
unsafe { mem::transmute(bytes) }
}
core::num::<impl usize>::from_str_radix pub const fn from_str_radix(src: &str, radix: u32) -> Result<$int_ty, ParseIntError> {
<$int_ty>::from_ascii_radix(src.as_bytes(), radix)
}
core::num::<impl usize>::ilog pub const fn ilog(self, base: Self) -> u32 {
assert!(base >= 2, "base of integer logarithm must be at least 2");
if let Some(log) = self.checked_ilog(base) {
log
} else {
int_log10::panic_for_nonpositive_argument()
}
}
core::num::<impl usize>::ilog10 pub const fn ilog10(self) -> u32 {
if let Some(log) = self.checked_ilog10() {
log
} else {
int_log10::panic_for_nonpositive_argument()
}
}
core::num::<impl usize>::ilog2 pub const fn ilog2(self) -> u32 {
if let Some(log) = self.checked_ilog2() {
log
} else {
int_log10::panic_for_nonpositive_argument()
}
}
core::num::<impl usize>::is_multiple_of pub const fn is_multiple_of(self, rhs: Self) -> bool {
match rhs {
0 => self == 0,
_ => self % rhs == 0,
}
}
core::num::<impl usize>::is_power_of_two pub const fn is_power_of_two(self) -> bool {
self.count_ones() == 1
}
core::num::<impl usize>::leading_ones pub const fn leading_ones(self) -> u32 {
(!self).leading_zeros()
}
core::num::<impl usize>::leading_zeros pub const fn leading_zeros(self) -> u32 {
return intrinsics::ctlz(self as $ActualT);
}
core::num::<impl usize>::overflowing_add pub const fn overflowing_add(self, rhs: Self) -> (Self, bool) {
let (a, b) = intrinsics::add_with_overflow(self as $ActualT, rhs as $ActualT);
(a as Self, b)
}
core::num::<impl usize>::overflowing_add_signed pub const fn overflowing_add_signed(self, rhs: $SignedT) -> (Self, bool) {
let (res, overflowed) = self.overflowing_add(rhs as Self);
(res, overflowed ^ (rhs < 0))
}
core::num::<impl usize>::overflowing_mul pub const fn overflowing_mul(self, rhs: Self) -> (Self, bool) {
let (a, b) = intrinsics::mul_with_overflow(self as $ActualT, rhs as $ActualT);
(a as Self, b)
}
core::num::<impl usize>::overflowing_sub pub const fn overflowing_sub(self, rhs: Self) -> (Self, bool) {
let (a, b) = intrinsics::sub_with_overflow(self as $ActualT, rhs as $ActualT);
(a as Self, b)
}
core::num::<impl usize>::pow pub const fn pow(self, mut exp: u32) -> Self {
if exp == 0 {
return 1;
}
let mut base = self;
let mut acc = 1;
if intrinsics::is_val_statically_known(exp) {
while exp > 1 {
if (exp & 1) == 1 {
acc = acc * base;
}
exp /= 2;
base = base * base;
}
// since exp!=0, finally the exp must be 1.
// Deal with the final bit of the exponent separately, since
// squaring the base afterwards is not necessary and may cause a
// needless overflow.
acc * base
} else {
// This is faster than the above when the exponent is not known
// at compile time. We can't use the same code for the constant
// exponent case because LLVM is currently unable to unroll
// this loop.
loop {
if (exp & 1) == 1 {
acc = acc * base;
// since exp!=0, finally the exp must be 1.
if exp == 1 {
return acc;
}
}
exp /= 2;
base = base * base;
}
}
}
core::num::<impl usize>::repeat_u8 pub(crate) const fn repeat_u8(x: u8) -> usize {
usize::from_ne_bytes([x; size_of::<usize>()])
}
core::num::<impl usize>::reverse_bits pub const fn reverse_bits(self) -> Self {
intrinsics::bitreverse(self as $ActualT) as Self
}
core::num::<impl usize>::rotate_left pub const fn rotate_left(self, n: u32) -> Self {
return intrinsics::rotate_left(self, n);
}
core::num::<impl usize>::rotate_right pub const fn rotate_right(self, n: u32) -> Self {
return intrinsics::rotate_right(self, n);
}
core::num::<impl usize>::saturating_add pub const fn saturating_add(self, rhs: Self) -> Self {
intrinsics::saturating_add(self, rhs)
}
core::num::<impl usize>::saturating_mul pub const fn saturating_mul(self, rhs: Self) -> Self {
match self.checked_mul(rhs) {
Some(x) => x,
None => Self::MAX,
}
}
core::num::<impl usize>::saturating_sub pub const fn saturating_sub(self, rhs: Self) -> Self {
intrinsics::saturating_sub(self, rhs)
}
core::num::<impl usize>::swap_bytes pub const fn swap_bytes(self) -> Self {
intrinsics::bswap(self as $ActualT) as Self
}
core::num::<impl usize>::to_be pub const fn to_be(self) -> Self { // or not to be?
#[cfg(target_endian = "big")]
{
self
}
#[cfg(not(target_endian = "big"))]
{
self.swap_bytes()
}
}
core::num::<impl usize>::to_be_bytes pub const fn to_be_bytes(self) -> [u8; size_of::<Self>()] {
self.to_be().to_ne_bytes()
}
core::num::<impl usize>::to_le pub const fn to_le(self) -> Self {
#[cfg(target_endian = "little")]
{
self
}
#[cfg(not(target_endian = "little"))]
{
self.swap_bytes()
}
}
core::num::<impl usize>::to_le_bytes pub const fn to_le_bytes(self) -> [u8; size_of::<Self>()] {
self.to_le().to_ne_bytes()
}
core::num::<impl usize>::to_ne_bytes pub const fn to_ne_bytes(self) -> [u8; size_of::<Self>()] {
// SAFETY: integers are plain old datatypes so we can always transmute them to
// arrays of bytes
unsafe { mem::transmute(self) }
}
core::num::<impl usize>::trailing_zeros pub const fn trailing_zeros(self) -> u32 {
return intrinsics::cttz(self);
}
core::num::<impl usize>::unchecked_add pub const unsafe fn unchecked_add(self, rhs: Self) -> Self {
assert_unsafe_precondition!(
check_language_ub,
concat!(stringify!($SelfT), "::unchecked_add cannot overflow"),
(
lhs: $SelfT = self,
rhs: $SelfT = rhs,
) => !lhs.overflowing_add(rhs).1,
);
// SAFETY: this is guaranteed to be safe by the caller.
unsafe {
intrinsics::unchecked_add(self, rhs)
}
}
core::num::<impl usize>::unchecked_shl pub const unsafe fn unchecked_shl(self, rhs: u32) -> Self {
assert_unsafe_precondition!(
check_language_ub,
concat!(stringify!($SelfT), "::unchecked_shl cannot overflow"),
(
rhs: u32 = rhs,
) => rhs < <$ActualT>::BITS,
);
// SAFETY: this is guaranteed to be safe by the caller.
unsafe {
intrinsics::unchecked_shl(self, rhs)
}
}
core::num::<impl usize>::unchecked_shr pub const unsafe fn unchecked_shr(self, rhs: u32) -> Self {
assert_unsafe_precondition!(
check_language_ub,
concat!(stringify!($SelfT), "::unchecked_shr cannot overflow"),
(
rhs: u32 = rhs,
) => rhs < <$ActualT>::BITS,
);
// SAFETY: this is guaranteed to be safe by the caller.
unsafe {
intrinsics::unchecked_shr(self, rhs)
}
}
core::num::<impl usize>::unchecked_sub pub const unsafe fn unchecked_sub(self, rhs: Self) -> Self {
assert_unsafe_precondition!(
check_language_ub,
concat!(stringify!($SelfT), "::unchecked_sub cannot overflow"),
(
lhs: $SelfT = self,
rhs: $SelfT = rhs,
) => !lhs.overflowing_sub(rhs).1,
);
// SAFETY: this is guaranteed to be safe by the caller.
unsafe {
intrinsics::unchecked_sub(self, rhs)
}
}
core::num::<impl usize>::widening_mul pub const fn widening_mul(self, rhs: Self) -> (Self, Self) {
Self::carrying_mul_add(self, rhs, 0, 0)
}
core::num::<impl usize>::wrapping_add pub const fn wrapping_add(self, rhs: Self) -> Self {
intrinsics::wrapping_add(self, rhs)
}
core::num::<impl usize>::wrapping_add_signed pub const fn wrapping_add_signed(self, rhs: $SignedT) -> Self {
self.wrapping_add(rhs as Self)
}
core::num::<impl usize>::wrapping_mul pub const fn wrapping_mul(self, rhs: Self) -> Self {
intrinsics::wrapping_mul(self, rhs)
}
core::num::<impl usize>::wrapping_neg pub const fn wrapping_neg(self) -> Self {
(0 as $SelfT).wrapping_sub(self)
}
core::num::<impl usize>::wrapping_shl pub const fn wrapping_shl(self, rhs: u32) -> Self {
// SAFETY: the masking by the bitsize of the type ensures that we do not shift
// out of bounds
unsafe {
self.unchecked_shl(rhs & (Self::BITS - 1))
}
}
core::num::<impl usize>::wrapping_shr pub const fn wrapping_shr(self, rhs: u32) -> Self {
// SAFETY: the masking by the bitsize of the type ensures that we do not shift
// out of bounds
unsafe {
self.unchecked_shr(rhs & (Self::BITS - 1))
}
}
core::num::<impl usize>::wrapping_sub pub const fn wrapping_sub(self, rhs: Self) -> Self {
intrinsics::wrapping_sub(self, rhs)
}
core::num::bignum::Big32x40::add pub fn add<'a>(&'a mut self, other: &$name) -> &'a mut $name {
use crate::{cmp, iter};
let mut sz = cmp::max(self.size, other.size);
let mut carry = false;
for (a, b) in iter::zip(&mut self.base[..sz], &other.base[..sz]) {
let (v, c) = (*a).carrying_add(*b, carry);
*a = v;
carry = c;
}
if carry {
self.base[sz] = 1;
sz += 1;
}
self.size = sz;
self
}
core::num::bignum::Big32x40::digits pub fn digits(&self) -> &[$ty] {
&self.base[..self.size]
}
core::num::bignum::Big32x40::div_rem_small pub fn div_rem_small(&mut self, other: $ty) -> (&mut $name, $ty) {
use crate::num::bignum::FullOps;
assert!(other > 0);
let sz = self.size;
let mut borrow = 0;
for a in self.base[..sz].iter_mut().rev() {
let (q, r) = (*a).full_div_rem(other, borrow);
*a = q;
borrow = r;
}
(self, borrow)
}
core::num::bignum::Big32x40::from_small pub fn from_small(v: $ty) -> $name {
let mut base = [0; $n];
base[0] = v;
$name { size: 1, base }
}
core::num::bignum::Big32x40::from_u64 pub fn from_u64(mut v: u64) -> $name {
let mut base = [0; $n];
let mut sz = 0;
while v > 0 {
base[sz] = v as $ty;
v >>= <$ty>::BITS;
sz += 1;
}
$name { size: sz, base }
}
core::num::bignum::Big32x40::is_zero pub fn is_zero(&self) -> bool {
self.digits().iter().all(|&v| v == 0)
}
core::num::bignum::Big32x40::mul_digits pub fn mul_digits<'a>(&'a mut self, other: &[$ty]) -> &'a mut $name {
// the internal routine. works best when aa.len() <= bb.len().
fn mul_inner(ret: &mut [$ty; $n], aa: &[$ty], bb: &[$ty]) -> usize {
use crate::num::bignum::FullOps;
let mut retsz = 0;
for (i, &a) in aa.iter().enumerate() {
if a == 0 {
continue;
}
let mut sz = bb.len();
let mut carry = 0;
for (j, &b) in bb.iter().enumerate() {
let (c, v) = a.full_mul_add(b, ret[i + j], carry);
ret[i + j] = v;
carry = c;
}
if carry > 0 {
ret[i + sz] = carry;
sz += 1;
}
if retsz < i + sz {
retsz = i + sz;
}
}
retsz
}
let mut ret = [0; $n];
let retsz = if self.size < other.len() {
mul_inner(&mut ret, &self.digits(), other)
} else {
mul_inner(&mut ret, other, &self.digits())
};
self.base = ret;
self.size = retsz;
self
}
core::num::bignum::Big32x40::mul_digits::mul_inner fn mul_inner(ret: &mut [$ty; $n], aa: &[$ty], bb: &[$ty]) -> usize {
use crate::num::bignum::FullOps;
let mut retsz = 0;
for (i, &a) in aa.iter().enumerate() {
if a == 0 {
continue;
}
let mut sz = bb.len();
let mut carry = 0;
for (j, &b) in bb.iter().enumerate() {
let (c, v) = a.full_mul_add(b, ret[i + j], carry);
ret[i + j] = v;
carry = c;
}
if carry > 0 {
ret[i + sz] = carry;
sz += 1;
}
if retsz < i + sz {
retsz = i + sz;
}
}
retsz
}
core::num::bignum::Big32x40::mul_pow2 pub fn mul_pow2(&mut self, bits: usize) -> &mut $name {
let digitbits = <$ty>::BITS as usize;
let digits = bits / digitbits;
let bits = bits % digitbits;
assert!(digits < $n);
debug_assert!(self.base[$n - digits..].iter().all(|&v| v == 0));
debug_assert!(bits == 0 || (self.base[$n - digits - 1] >> (digitbits - bits)) == 0);
// shift by `digits * digitbits` bits
for i in (0..self.size).rev() {
self.base[i + digits] = self.base[i];
}
for i in 0..digits {
self.base[i] = 0;
}
// shift by `bits` bits
let mut sz = self.size + digits;
if bits > 0 {
let last = sz;
let overflow = self.base[last - 1] >> (digitbits - bits);
if overflow > 0 {
self.base[last] = overflow;
sz += 1;
}
for i in (digits + 1..last).rev() {
self.base[i] =
(self.base[i] << bits) | (self.base[i - 1] >> (digitbits - bits));
}
self.base[digits] <<= bits;
// self.base[..digits] is zero, no need to shift
}
self.size = sz;
self
}
core::num::bignum::Big32x40::mul_small pub fn mul_small(&mut self, other: $ty) -> &mut $name {
let mut sz = self.size;
let mut carry = 0;
for a in &mut self.base[..sz] {
let (v, c) = (*a).carrying_mul(other, carry);
*a = v;
carry = c;
}
if carry > 0 {
self.base[sz] = carry;
sz += 1;
}
self.size = sz;
self
}
core::num::bignum::Big32x40::sub pub fn sub<'a>(&'a mut self, other: &$name) -> &'a mut $name {
use crate::{cmp, iter};
let sz = cmp::max(self.size, other.size);
let mut noborrow = true;
for (a, b) in iter::zip(&mut self.base[..sz], &other.base[..sz]) {
let (v, c) = (*a).carrying_add(!*b, noborrow);
*a = v;
noborrow = c;
}
assert!(noborrow);
self.size = sz;
self
}
core::num::can_not_overflowpub const fn can_not_overflow<T>(radix: u32, is_signed_ty: bool, digits: &[u8]) -> bool {
radix <= 16 && digits.len() <= size_of::<T>() * 2 - is_signed_ty as usize
}
core::num::dec2flt::float::RawFloat::integer_decode fn integer_decode(self) -> (u64, i16, i8) {
let bits = self.to_bits();
let sign: i8 = if bits >> (Self::BITS - 1) == Self::Int::ZERO { 1 } else { -1 };
let mut exponent: i16 = ((bits & Self::EXP_MASK) >> Self::SIG_BITS).cast();
let mantissa = if exponent == 0 {
(bits & Self::SIG_MASK) << 1
} else {
(bits & Self::SIG_MASK) | (Self::Int::ONE << Self::SIG_BITS)
};
// Exponent bias + mantissa shift
exponent -= (Self::EXP_BIAS + Self::SIG_BITS) as i16;
(mantissa.into(), exponent, sign)
}
core::num::diy_float::Fp::mul pub fn mul(self, other: Self) -> Self {
let (lo, hi) = self.f.widening_mul(other.f);
let f = hi + (lo >> 63) /* round */;
let e = self.e + other.e + 64;
Self { f, e }
}
core::num::diy_float::Fp::normalize pub fn normalize(self) -> Self {
let lz = self.f.leading_zeros();
let f = self.f << lz;
let e = self.e - lz as i16;
debug_assert!(f >= (1 << 63));
Self { f, e }
}
core::num::diy_float::Fp::normalize_to pub fn normalize_to(self, e: i16) -> Self {
let edelta = self.e - e;
assert!(edelta >= 0);
let edelta = edelta as usize;
assert_eq!(self.f << edelta >> edelta, self.f);
Self { f: self.f << edelta, e }
}
core::num::error::ParseIntError::kind pub const fn kind(&self) -> &IntErrorKind {
&self.kind
}
core::num::flt2dec::decoder::decodepub fn decode<T: DecodableFloat>(v: T) -> (/*negative?*/ bool, FullDecoded) {
let (mant, exp, sign) = v.integer_decode();
let even = (mant & 1) == 0;
let decoded = match v.classify() {
FpCategory::Nan => FullDecoded::Nan,
FpCategory::Infinite => FullDecoded::Infinite,
FpCategory::Zero => FullDecoded::Zero,
FpCategory::Subnormal => {
// neighbors: (mant - 2, exp) -- (mant, exp) -- (mant + 2, exp)
// Float::integer_decode always preserves the exponent,
// so the mantissa is scaled for subnormals.
FullDecoded::Finite(Decoded { mant, minus: 1, plus: 1, exp, inclusive: even })
}
FpCategory::Normal => {
let minnorm = <T as DecodableFloat>::min_pos_norm_value().integer_decode();
if mant == minnorm.0 {
// neighbors: (maxmant, exp - 1) -- (minnormmant, exp) -- (minnormmant + 1, exp)
// where maxmant = minnormmant * 2 - 1
FullDecoded::Finite(Decoded {
mant: mant << 2,
minus: 1,
plus: 2,
exp: exp - 2,
inclusive: even,
})
} else {
// neighbors: (mant - 1, exp) -- (mant, exp) -- (mant + 1, exp)
FullDecoded::Finite(Decoded {
mant: mant << 1,
minus: 1,
plus: 1,
exp: exp - 1,
inclusive: even,
})
}
}
};
(sign < 0, decoded)
}
core::num::flt2dec::determine_signfn determine_sign(sign: Sign, decoded: &FullDecoded, negative: bool) -> &'static str {
match (*decoded, sign) {
(FullDecoded::Nan, _) => "",
(_, Sign::Minus) => {
if negative {
"-"
} else {
""
}
}
(_, Sign::MinusPlus) => {
if negative {
"-"
} else {
"+"
}
}
}
}
core::num::flt2dec::digits_to_dec_strfn digits_to_dec_str<'a>(
buf: &'a [u8],
exp: i16,
frac_digits: usize,
parts: &'a mut [MaybeUninit<Part<'a>>],
) -> &'a [Part<'a>] {
assert!(!buf.is_empty());
assert!(buf[0] > b'0');
assert!(parts.len() >= 4);
// if there is the restriction on the last digit position, `buf` is assumed to be
// left-padded with the virtual zeroes. the number of virtual zeroes, `nzeroes`,
// equals to `max(0, exp + frac_digits - buf.len())`, so that the position of
// the last digit `exp - buf.len() - nzeroes` is no more than `-frac_digits`:
//
// |<-virtual->|
// |<---- buf ---->| zeroes | exp
// 0. 1 2 3 4 5 6 7 8 9 _ _ _ _ _ _ x 10
// | | |
// 10^exp 10^(exp-buf.len()) 10^(exp-buf.len()-nzeroes)
//
// `nzeroes` is individually calculated for each case in order to avoid overflow.
if exp <= 0 {
// the decimal point is before rendered digits: [0.][000...000][1234][____]
let minus_exp = -(exp as i32) as usize;
parts[0] = MaybeUninit::new(Part::Copy(b"0."));
parts[1] = MaybeUninit::new(Part::Zero(minus_exp));
parts[2] = MaybeUninit::new(Part::Copy(buf));
if frac_digits > buf.len() && frac_digits - buf.len() > minus_exp {
parts[3] = MaybeUninit::new(Part::Zero((frac_digits - buf.len()) - minus_exp));
// SAFETY: we just initialized the elements `..4`.
unsafe { parts[..4].assume_init_ref() }
} else {
// SAFETY: we just initialized the elements `..3`.
unsafe { parts[..3].assume_init_ref() }
}
} else {
let exp = exp as usize;
if exp < buf.len() {
// the decimal point is inside rendered digits: [12][.][34][____]
parts[0] = MaybeUninit::new(Part::Copy(&buf[..exp]));
parts[1] = MaybeUninit::new(Part::Copy(b"."));
parts[2] = MaybeUninit::new(Part::Copy(&buf[exp..]));
if frac_digits > buf.len() - exp {
parts[3] = MaybeUninit::new(Part::Zero(frac_digits - (buf.len() - exp)));
// SAFETY: we just initialized the elements `..4`.
unsafe { parts[..4].assume_init_ref() }
} else {
// SAFETY: we just initialized the elements `..3`.
unsafe { parts[..3].assume_init_ref() }
}
} else {
// the decimal point is after rendered digits: [1234][____0000] or [1234][__][.][__].
parts[0] = MaybeUninit::new(Part::Copy(buf));
parts[1] = MaybeUninit::new(Part::Zero(exp - buf.len()));
if frac_digits > 0 {
parts[2] = MaybeUninit::new(Part::Copy(b"."));
parts[3] = MaybeUninit::new(Part::Zero(frac_digits));
// SAFETY: we just initialized the elements `..4`.
unsafe { parts[..4].assume_init_ref() }
} else {
// SAFETY: we just initialized the elements `..2`.
unsafe { parts[..2].assume_init_ref() }
}
}
}
}
core::num::flt2dec::digits_to_exp_strfn digits_to_exp_str<'a>(
buf: &'a [u8],
exp: i16,
min_ndigits: usize,
upper: bool,
parts: &'a mut [MaybeUninit<Part<'a>>],
) -> &'a [Part<'a>] {
assert!(!buf.is_empty());
assert!(buf[0] > b'0');
assert!(parts.len() >= 6);
let mut n = 0;
parts[n] = MaybeUninit::new(Part::Copy(&buf[..1]));
n += 1;
if buf.len() > 1 || min_ndigits > 1 {
parts[n] = MaybeUninit::new(Part::Copy(b"."));
parts[n + 1] = MaybeUninit::new(Part::Copy(&buf[1..]));
n += 2;
if min_ndigits > buf.len() {
parts[n] = MaybeUninit::new(Part::Zero(min_ndigits - buf.len()));
n += 1;
}
}
// 0.1234 x 10^exp = 1.234 x 10^(exp-1)
let exp = exp as i32 - 1; // avoid underflow when exp is i16::MIN
if exp < 0 {
parts[n] = MaybeUninit::new(Part::Copy(if upper { b"E-" } else { b"e-" }));
parts[n + 1] = MaybeUninit::new(Part::Num(-exp as u16));
} else {
parts[n] = MaybeUninit::new(Part::Copy(if upper { b"E" } else { b"e" }));
parts[n + 1] = MaybeUninit::new(Part::Num(exp as u16));
}
// SAFETY: we just initialized the elements `..n + 2`.
unsafe { parts[..n + 2].assume_init_ref() }
}
core::num::flt2dec::estimate_max_buf_lenfn estimate_max_buf_len(exp: i16) -> usize {
21 + ((if exp < 0 { -12 } else { 5 } * exp as i32) as usize >> 4)
}
core::num::flt2dec::estimator::estimate_scaling_factorpub fn estimate_scaling_factor(mant: u64, exp: i16) -> i16 {
// 2^(nbits-1) < mant <= 2^nbits if mant > 0
let nbits = 64 - (mant - 1).leading_zeros() as i64;
// 1292913986 = floor(2^32 * log_10 2)
// therefore this always underestimates (or is exact), but not much.
(((nbits + exp as i64) * 1292913986) >> 32) as i16
}
core::num::flt2dec::round_uppub fn round_up(d: &mut [u8]) -> Option<u8> {
match d.iter().rposition(|&c| c != b'9') {
Some(i) => {
// d[i+1..n] is all nines
d[i] += 1;
d[i + 1..].fill(b'0');
None
}
None if d.is_empty() => {
// an empty buffer rounds up (a bit strange but reasonable)
Some(b'1')
}
None => {
// 999..999 rounds to 1000..000 with an increased exponent
d[0] = b'1';
d[1..].fill(b'0');
Some(b'0')
}
}
}
core::num::flt2dec::strategy::dragon::div_2pow10fn div_2pow10(x: &mut Big, mut n: usize) -> &mut Big {
let largest = POW10.len() - 1;
while n > largest {
x.div_rem_small(POW10[largest]);
n -= largest;
}
x.div_rem_small(POW10[n] << 1);
x
}
core::num::flt2dec::strategy::dragon::div_rem_upto_16fn div_rem_upto_16<'a>(
x: &'a mut Big,
scale: &Big,
scale2: &Big,
scale4: &Big,
scale8: &Big,
) -> (u8, &'a mut Big) {
let mut d = 0;
if *x >= *scale8 {
x.sub(scale8);
d += 8;
}
if *x >= *scale4 {
x.sub(scale4);
d += 4;
}
if *x >= *scale2 {
x.sub(scale2);
d += 2;
}
if *x >= *scale {
x.sub(scale);
d += 1;
}
debug_assert!(*x < *scale);
(d, x)
}
core::num::flt2dec::strategy::dragon::format_exactpub fn format_exact<'a>(
d: &Decoded,
buf: &'a mut [MaybeUninit<u8>],
limit: i16,
) -> (/*digits*/ &'a [u8], /*exp*/ i16) {
assert!(d.mant > 0);
assert!(d.minus > 0);
assert!(d.plus > 0);
assert!(d.mant.checked_add(d.plus).is_some());
assert!(d.mant.checked_sub(d.minus).is_some());
// estimate `k_0` from original inputs satisfying `10^(k_0-1) < v <= 10^(k_0+1)`.
let mut k = estimate_scaling_factor(d.mant, d.exp);
// `v = mant / scale`.
let mut mant = Big::from_u64(d.mant);
let mut scale = Big::from_small(1);
if d.exp < 0 {
scale.mul_pow2(-d.exp as usize);
} else {
mant.mul_pow2(d.exp as usize);
}
// divide `mant` by `10^k`. now `scale / 10 < mant <= scale * 10`.
if k >= 0 {
mul_pow10(&mut scale, k as usize);
} else {
mul_pow10(&mut mant, -k as usize);
}
// fixup when `mant + plus >= scale`, where `plus / scale = 10^-buf.len() / 2`.
// in order to keep the fixed-size bignum, we actually use `mant + floor(plus) >= scale`.
// we are not actually modifying `scale`, since we can skip the initial multiplication instead.
// again with the shortest algorithm, `d[0]` can be zero but will be eventually rounded up.
if *div_2pow10(&mut scale.clone(), buf.len()).add(&mant) >= scale {
// equivalent to scaling `scale` by 10
k += 1;
} else {
mant.mul_small(10);
}
// if we are working with the last-digit limitation, we need to shorten the buffer
// before the actual rendering in order to avoid double rounding.
// note that we have to enlarge the buffer again when rounding up happens!
let mut len = if k < limit {
// oops, we cannot even produce *one* digit.
// this is possible when, say, we've got something like 9.5 and it's being rounded to 10.
// we return an empty buffer, with an exception of the later rounding-up case
// which occurs when `k == limit` and has to produce exactly one digit.
0
} else if ((k as i32 - limit as i32) as usize) < buf.len() {
(k - limit) as usize
} else {
buf.len()
};
if len > 0 {
// cache `(2, 4, 8) * scale` for digit generation.
// (this can be expensive, so do not calculate them when the buffer is empty.)
let mut scale2 = scale.clone();
scale2.mul_pow2(1);
let mut scale4 = scale.clone();
scale4.mul_pow2(2);
let mut scale8 = scale.clone();
scale8.mul_pow2(3);
for i in 0..len {
if mant.is_zero() {
// following digits are all zeroes, we stop here
// do *not* try to perform rounding! rather, fill remaining digits.
for c in &mut buf[i..len] {
*c = MaybeUninit::new(b'0');
}
// SAFETY: we initialized that memory above.
return (unsafe { buf[..len].assume_init_ref() }, k);
}
let mut d = 0;
if mant >= scale8 {
mant.sub(&scale8);
d += 8;
}
if mant >= scale4 {
mant.sub(&scale4);
d += 4;
}
if mant >= scale2 {
mant.sub(&scale2);
d += 2;
}
if mant >= scale {
mant.sub(&scale);
d += 1;
}
debug_assert!(mant < scale);
debug_assert!(d < 10);
buf[i] = MaybeUninit::new(b'0' + d);
mant.mul_small(10);
}
}
// rounding up if we stop in the middle of digits
// if the following digits are exactly 5000..., check the prior digit and try to
// round to even (i.e., avoid rounding up when the prior digit is even).
let order = mant.cmp(scale.mul_small(5));
if order == Ordering::Greater
|| (order == Ordering::Equal
// SAFETY: `buf[len-1]` is initialized.
&& len > 0 && unsafe { buf[len - 1].assume_init() } & 1 == 1)
{
// if rounding up changes the length, the exponent should also change.
// but we've been requested a fixed number of digits, so do not alter the buffer...
// SAFETY: we initialized that memory above.
if let Some(c) = round_up(unsafe { buf[..len].assume_init_mut() }) {
// ...unless we've been requested the fixed precision instead.
// we also need to check that, if the original buffer was empty,
// the additional digit can only be added when `k == limit` (edge case).
k += 1;
if k > limit && len < buf.len() {
buf[len] = MaybeUninit::new(c);
len += 1;
}
}
}
// SAFETY: we initialized that memory above.
(unsafe { buf[..len].assume_init_ref() }, k)
}
core::num::flt2dec::strategy::dragon::mul_pow10pub fn mul_pow10(x: &mut Big, n: usize) -> &mut Big {
debug_assert!(n < 512);
// Save ourself the left shift for the smallest cases.
if n < 8 {
return x.mul_small(POW10[n & 7]);
}
// Multiply by the powers of 5 and shift the 2s in at the end.
// This keeps the intermediate products smaller and faster.
if n & 7 != 0 {
x.mul_small(POW10[n & 7] >> (n & 7));
}
if n & 8 != 0 {
x.mul_small(POW10[8] >> 8);
}
if n & 16 != 0 {
x.mul_digits(&POW5TO16);
}
if n & 32 != 0 {
x.mul_digits(&POW5TO32);
}
if n & 64 != 0 {
x.mul_digits(&POW5TO64);
}
if n & 128 != 0 {
x.mul_digits(&POW5TO128);
}
if n & 256 != 0 {
x.mul_digits(&POW5TO256);
}
x.mul_pow2(n)
}
core::num::flt2dec::strategy::grisu::cached_powerpub fn cached_power(alpha: i16, gamma: i16) -> (i16, Fp) {
let offset = CACHED_POW10_FIRST_E as i32;
let range = (CACHED_POW10.len() as i32) - 1;
let domain = (CACHED_POW10_LAST_E - CACHED_POW10_FIRST_E) as i32;
let idx = ((gamma as i32) - offset) * range / domain;
let (f, e, k) = CACHED_POW10[idx as usize];
debug_assert!(alpha <= e && e <= gamma);
(k, Fp { f, e })
}
core::num::flt2dec::strategy::grisu::format_exactpub fn format_exact<'a>(
d: &Decoded,
buf: &'a mut [MaybeUninit<u8>],
limit: i16,
) -> (/*digits*/ &'a [u8], /*exp*/ i16) {
use crate::num::flt2dec::strategy::dragon::format_exact as fallback;
// SAFETY: The borrow checker is not smart enough to let us use `buf`
// in the second branch, so we launder the lifetime here. But we only re-use
// `buf` if `format_exact_opt` returned `None` so this is okay.
match format_exact_opt(d, unsafe { &mut *(buf as *mut _) }, limit) {
Some(ret) => ret,
None => fallback(d, buf, limit),
}
}
core::num::flt2dec::strategy::grisu::format_exact_optpub fn format_exact_opt<'a>(
d: &Decoded,
buf: &'a mut [MaybeUninit<u8>],
limit: i16,
) -> Option<(/*digits*/ &'a [u8], /*exp*/ i16)> {
assert!(d.mant > 0);
assert!(d.mant < (1 << 61)); // we need at least three bits of additional precision
assert!(!buf.is_empty());
// normalize and scale `v`.
let v = Fp { f: d.mant, e: d.exp }.normalize();
let (minusk, cached) = cached_power(ALPHA - v.e - 64, GAMMA - v.e - 64);
let v = v.mul(cached);
// divide `v` into integral and fractional parts.
let e = -v.e as usize;
let vint = (v.f >> e) as u32;
let vfrac = v.f & ((1 << e) - 1);
let requested_digits = buf.len();
const POW10_UP_TO_9: [u32; 10] =
[1, 10, 100, 1000, 10_000, 100_000, 1_000_000, 10_000_000, 100_000_000, 1_000_000_000];
// We deviate from the original algorithm here and do some early checks to determine if we can satisfy requested_digits.
// If we determine that we can't, we exit early and avoid most of the heavy lifting that the algorithm otherwise does.
//
// When vfrac is zero, we can easily determine if vint can satisfy requested digits:
// If requested_digits >= 11, vint is not able to exhaust the count by itself since 10^(11 -1) > u32 max value >= vint.
// If vint < 10^(requested_digits - 1), vint cannot exhaust the count.
// Otherwise, vint might be able to exhaust the count and we need to execute the rest of the code.
if (vfrac == 0) && ((requested_digits >= 11) || (vint < POW10_UP_TO_9[requested_digits - 1])) {
return None;
}
// both old `v` and new `v` (scaled by `10^-k`) has an error of < 1 ulp (Theorem 5.1).
// as we don't know the error is positive or negative, we use two approximations
// spaced equally and have the maximal error of 2 ulps (same to the shortest case).
//
// the goal is to find the exactly rounded series of digits that are common to
// both `v - 1 ulp` and `v + 1 ulp`, so that we are maximally confident.
// if this is not possible, we don't know which one is the correct output for `v`,
// so we give up and fall back.
//
// `err` is defined as `1 ulp * 2^e` here (same to the ulp in `vfrac`),
// and we will scale it whenever `v` gets scaled.
let mut err = 1;
// calculate the largest `10^max_kappa` no more than `v` (thus `v < 10^(max_kappa+1)`).
// this is an upper bound of `kappa` below.
let (max_kappa, max_ten_kappa) = max_pow10_no_more_than(vint);
let mut i = 0;
let exp = max_kappa as i16 - minusk + 1;
// if we are working with the last-digit limitation, we need to shorten the buffer
// before the actual rendering in order to avoid double rounding.
// note that we have to enlarge the buffer again when rounding up happens!
let len = if exp <= limit {
// oops, we cannot even produce *one* digit.
// this is possible when, say, we've got something like 9.5 and it's being rounded to 10.
//
// in principle we can immediately call `possibly_round` with an empty buffer,
// but scaling `max_ten_kappa << e` by 10 can result in overflow.
// thus we are being sloppy here and widen the error range by a factor of 10.
// this will increase the false negative rate, but only very, *very* slightly;
// it can only matter noticeably when the mantissa is bigger than 60 bits.
//
// SAFETY: `len=0`, so the obligation of having initialized this memory is trivial.
return unsafe {
possibly_round(buf, 0, exp, limit, v.f / 10, (max_ten_kappa as u64) << e, err << e)
};
} else if ((exp as i32 - limit as i32) as usize) < buf.len() {
(exp - limit) as usize
} else {
buf.len()
};
debug_assert!(len > 0);
// render integral parts.
// the error is entirely fractional, so we don't need to check it in this part.
let mut kappa = max_kappa as i16;
let mut ten_kappa = max_ten_kappa; // 10^kappa
let mut remainder = vint; // digits yet to be rendered
loop {
// we always have at least one digit to render
// invariants:
// - `remainder < 10^(kappa+1)`
// - `vint = d[0..n-1] * 10^(kappa+1) + remainder`
// (it follows that `remainder = vint % 10^(kappa+1)`)
// divide `remainder` by `10^kappa`. both are scaled by `2^-e`.
let q = remainder / ten_kappa;
let r = remainder % ten_kappa;
debug_assert!(q < 10);
buf[i] = MaybeUninit::new(b'0' + q as u8);
i += 1;
// is the buffer full? run the rounding pass with the remainder.
if i == len {
let vrem = ((r as u64) << e) + vfrac; // == (v % 10^kappa) * 2^e
// SAFETY: we have initialized `len` many bytes.
return unsafe {
possibly_round(buf, len, exp, limit, vrem, (ten_kappa as u64) << e, err << e)
};
}
// break the loop when we have rendered all integral digits.
// the exact number of digits is `max_kappa + 1` as `plus1 < 10^(max_kappa+1)`.
if i > max_kappa as usize {
debug_assert_eq!(ten_kappa, 1);
debug_assert_eq!(kappa, 0);
break;
}
// restore invariants
kappa -= 1;
ten_kappa /= 10;
remainder = r;
}
// render fractional parts.
//
// in principle we can continue to the last available digit and check for the accuracy.
// unfortunately we are working with the finite-sized integers, so we need some criterion
// to detect the overflow. V8 uses `remainder > err`, which becomes false when
// the first `i` significant digits of `v - 1 ulp` and `v` differ. however this rejects
// too many otherwise valid input.
//
// since the later phase has a correct overflow detection, we instead use tighter criterion:
// we continue til `err` exceeds `10^kappa / 2`, so that the range between `v - 1 ulp` and
// `v + 1 ulp` definitely contains two or more rounded representations. this is same to
// the first two comparisons from `possibly_round`, for the reference.
let mut remainder = vfrac;
let maxerr = 1 << (e - 1);
while err < maxerr {
// invariants, where `m = max_kappa + 1` (# of digits in the integral part):
// - `remainder < 2^e`
// - `vfrac * 10^(n-m) = d[m..n-1] * 2^e + remainder`
// - `err = 10^(n-m)`
remainder *= 10; // won't overflow, `2^e * 10 < 2^64`
err *= 10; // won't overflow, `err * 10 < 2^e * 5 < 2^64`
// divide `remainder` by `10^kappa`.
// both are scaled by `2^e / 10^kappa`, so the latter is implicit here.
let q = remainder >> e;
let r = remainder & ((1 << e) - 1);
debug_assert!(q < 10);
buf[i] = MaybeUninit::new(b'0' + q as u8);
i += 1;
// is the buffer full? run the rounding pass with the remainder.
if i == len {
// SAFETY: we have initialized `len` many bytes.
return unsafe { possibly_round(buf, len, exp, limit, r, 1 << e, err) };
}
// restore invariants
remainder = r;
}
// further calculation is useless (`possibly_round` definitely fails), so we give up.
return None;
// we've generated all requested digits of `v`, which should be also same to corresponding
// digits of `v - 1 ulp`. now we check if there is a unique representation shared by
// both `v - 1 ulp` and `v + 1 ulp`; this can be either same to generated digits, or
// to the rounded-up version of those digits. if the range contains multiple representations
// of the same length, we cannot be sure and should return `None` instead.
//
// all arguments here are scaled by the common (but implicit) value `k`, so that:
// - `remainder = (v % 10^kappa) * k`
// - `ten_kappa = 10^kappa * k`
// - `ulp = 2^-e * k`
//
// SAFETY: the first `len` bytes of `buf` must be initialized.
unsafe fn possibly_round(
buf: &mut [MaybeUninit<u8>],
mut len: usize,
mut exp: i16,
limit: i16,
remainder: u64,
ten_kappa: u64,
ulp: u64,
) -> Option<(&[u8], i16)> {
debug_assert!(remainder < ten_kappa);
// 10^kappa
// : : :<->: :
// : : : : :
// :|1 ulp|1 ulp| :
// :|<--->|<--->| :
// ----|-----|-----|----
// | v |
// v - 1 ulp v + 1 ulp
//
// (for the reference, the dotted line indicates the exact value for
// possible representations in given number of digits.)
//
// error is too large that there are at least three possible representations
// between `v - 1 ulp` and `v + 1 ulp`. we cannot determine which one is correct.
if ulp >= ten_kappa {
return None;
}
// 10^kappa
// :<------->:
// : :
// : |1 ulp|1 ulp|
// : |<--->|<--->|
// ----|-----|-----|----
// | v |
// v - 1 ulp v + 1 ulp
//
// in fact, 1/2 ulp is enough to introduce two possible representations.
// (remember that we need a unique representation for both `v - 1 ulp` and `v + 1 ulp`.)
// this won't overflow, as `ulp < ten_kappa` from the first check.
if ten_kappa - ulp <= ulp {
return None;
}
// remainder
// :<->| :
// : | :
// :<--------- 10^kappa ---------->:
// | : | :
// |1 ulp|1 ulp| :
// |<--->|<--->| :
// ----|-----|-----|------------------------
// | v |
// v - 1 ulp v + 1 ulp
//
// if `v + 1 ulp` is closer to the rounded-down representation (which is already in `buf`),
// then we can safely return. note that `v - 1 ulp` *can* be less than the current
// representation, but as `1 ulp < 10^kappa / 2`, this condition is enough:
// the distance between `v - 1 ulp` and the current representation
// cannot exceed `10^kappa / 2`.
//
// the condition equals to `remainder + ulp < 10^kappa / 2`.
// since this can easily overflow, first check if `remainder < 10^kappa / 2`.
// we've already verified that `ulp < 10^kappa / 2`, so as long as
// `10^kappa` did not overflow after all, the second check is fine.
if ten_kappa - remainder > remainder && ten_kappa - 2 * remainder >= 2 * ulp {
// SAFETY: our caller initialized that memory.
return Some((unsafe { buf[..len].assume_init_ref() }, exp));
}
// :<------- remainder ------>| :
// : | :
// :<--------- 10^kappa --------->:
// : | | : |
// : |1 ulp|1 ulp|
// : |<--->|<--->|
// -----------------------|-----|-----|-----
// | v |
// v - 1 ulp v + 1 ulp
//
// on the other hands, if `v - 1 ulp` is closer to the rounded-up representation,
// we should round up and return. for the same reason we don't need to check `v + 1 ulp`.
//
// the condition equals to `remainder - ulp >= 10^kappa / 2`.
// again we first check if `remainder > ulp` (note that this is not `remainder >= ulp`,
// as `10^kappa` is never zero). also note that `remainder - ulp <= 10^kappa`,
// so the second check does not overflow.
if remainder > ulp && ten_kappa - (remainder - ulp) <= remainder - ulp {
if let Some(c) =
// SAFETY: our caller must have initialized that memory.
round_up(unsafe { buf[..len].assume_init_mut() })
{
// only add an additional digit when we've been requested the fixed precision.
// we also need to check that, if the original buffer was empty,
// the additional digit can only be added when `exp == limit` (edge case).
exp += 1;
if exp > limit && len < buf.len() {
buf[len] = MaybeUninit::new(c);
len += 1;
}
}
// SAFETY: we and our caller initialized that memory.
return Some((unsafe { buf[..len].assume_init_ref() }, exp));
}
// otherwise we are doomed (i.e., some values between `v - 1 ulp` and `v + 1 ulp` are
// rounding down and others are rounding up) and give up.
None
}
}
core::num::flt2dec::strategy::grisu::format_exact_opt::possibly_round unsafe fn possibly_round(
buf: &mut [MaybeUninit<u8>],
mut len: usize,
mut exp: i16,
limit: i16,
remainder: u64,
ten_kappa: u64,
ulp: u64,
) -> Option<(&[u8], i16)> {
debug_assert!(remainder < ten_kappa);
// 10^kappa
// : : :<->: :
// : : : : :
// :|1 ulp|1 ulp| :
// :|<--->|<--->| :
// ----|-----|-----|----
// | v |
// v - 1 ulp v + 1 ulp
//
// (for the reference, the dotted line indicates the exact value for
// possible representations in given number of digits.)
//
// error is too large that there are at least three possible representations
// between `v - 1 ulp` and `v + 1 ulp`. we cannot determine which one is correct.
if ulp >= ten_kappa {
return None;
}
// 10^kappa
// :<------->:
// : :
// : |1 ulp|1 ulp|
// : |<--->|<--->|
// ----|-----|-----|----
// | v |
// v - 1 ulp v + 1 ulp
//
// in fact, 1/2 ulp is enough to introduce two possible representations.
// (remember that we need a unique representation for both `v - 1 ulp` and `v + 1 ulp`.)
// this won't overflow, as `ulp < ten_kappa` from the first check.
if ten_kappa - ulp <= ulp {
return None;
}
// remainder
// :<->| :
// : | :
// :<--------- 10^kappa ---------->:
// | : | :
// |1 ulp|1 ulp| :
// |<--->|<--->| :
// ----|-----|-----|------------------------
// | v |
// v - 1 ulp v + 1 ulp
//
// if `v + 1 ulp` is closer to the rounded-down representation (which is already in `buf`),
// then we can safely return. note that `v - 1 ulp` *can* be less than the current
// representation, but as `1 ulp < 10^kappa / 2`, this condition is enough:
// the distance between `v - 1 ulp` and the current representation
// cannot exceed `10^kappa / 2`.
//
// the condition equals to `remainder + ulp < 10^kappa / 2`.
// since this can easily overflow, first check if `remainder < 10^kappa / 2`.
// we've already verified that `ulp < 10^kappa / 2`, so as long as
// `10^kappa` did not overflow after all, the second check is fine.
if ten_kappa - remainder > remainder && ten_kappa - 2 * remainder >= 2 * ulp {
// SAFETY: our caller initialized that memory.
return Some((unsafe { buf[..len].assume_init_ref() }, exp));
}
// :<------- remainder ------>| :
// : | :
// :<--------- 10^kappa --------->:
// : | | : |
// : |1 ulp|1 ulp|
// : |<--->|<--->|
// -----------------------|-----|-----|-----
// | v |
// v - 1 ulp v + 1 ulp
//
// on the other hands, if `v - 1 ulp` is closer to the rounded-up representation,
// we should round up and return. for the same reason we don't need to check `v + 1 ulp`.
//
// the condition equals to `remainder - ulp >= 10^kappa / 2`.
// again we first check if `remainder > ulp` (note that this is not `remainder >= ulp`,
// as `10^kappa` is never zero). also note that `remainder - ulp <= 10^kappa`,
// so the second check does not overflow.
if remainder > ulp && ten_kappa - (remainder - ulp) <= remainder - ulp {
if let Some(c) =
// SAFETY: our caller must have initialized that memory.
round_up(unsafe { buf[..len].assume_init_mut() })
{
// only add an additional digit when we've been requested the fixed precision.
// we also need to check that, if the original buffer was empty,
// the additional digit can only be added when `exp == limit` (edge case).
exp += 1;
if exp > limit && len < buf.len() {
buf[len] = MaybeUninit::new(c);
len += 1;
}
}
// SAFETY: we and our caller initialized that memory.
return Some((unsafe { buf[..len].assume_init_ref() }, exp));
}
// otherwise we are doomed (i.e., some values between `v - 1 ulp` and `v + 1 ulp` are
// rounding down and others are rounding up) and give up.
None
}
core::num::flt2dec::strategy::grisu::format_shortestpub fn format_shortest<'a>(
d: &Decoded,
buf: &'a mut [MaybeUninit<u8>],
) -> (/*digits*/ &'a [u8], /*exp*/ i16) {
use crate::num::flt2dec::strategy::dragon::format_shortest as fallback;
// SAFETY: The borrow checker is not smart enough to let us use `buf`
// in the second branch, so we launder the lifetime here. But we only re-use
// `buf` if `format_shortest_opt` returned `None` so this is okay.
match format_shortest_opt(d, unsafe { &mut *(buf as *mut _) }) {
Some(ret) => ret,
None => fallback(d, buf),
}
}
core::num::flt2dec::strategy::grisu::format_shortest_optpub fn format_shortest_opt<'a>(
d: &Decoded,
buf: &'a mut [MaybeUninit<u8>],
) -> Option<(/*digits*/ &'a [u8], /*exp*/ i16)> {
assert!(d.mant > 0);
assert!(d.minus > 0);
assert!(d.plus > 0);
assert!(d.mant.checked_add(d.plus).is_some());
assert!(d.mant.checked_sub(d.minus).is_some());
assert!(buf.len() >= MAX_SIG_DIGITS);
assert!(d.mant + d.plus < (1 << 61)); // we need at least three bits of additional precision
// start with the normalized values with the shared exponent
let plus = Fp { f: d.mant + d.plus, e: d.exp }.normalize();
let minus = Fp { f: d.mant - d.minus, e: d.exp }.normalize_to(plus.e);
let v = Fp { f: d.mant, e: d.exp }.normalize_to(plus.e);
// find any `cached = 10^minusk` such that `ALPHA <= minusk + plus.e + 64 <= GAMMA`.
// since `plus` is normalized, this means `2^(62 + ALPHA) <= plus * cached < 2^(64 + GAMMA)`;
// given our choices of `ALPHA` and `GAMMA`, this puts `plus * cached` into `[4, 2^32)`.
//
// it is obviously desirable to maximize `GAMMA - ALPHA`,
// so that we don't need many cached powers of 10, but there are some considerations:
//
// 1. we want to keep `floor(plus * cached)` within `u32` since it needs a costly division.
// (this is not really avoidable, remainder is required for accuracy estimation.)
// 2. the remainder of `floor(plus * cached)` repeatedly gets multiplied by 10,
// and it should not overflow.
//
// the first gives `64 + GAMMA <= 32`, while the second gives `10 * 2^-ALPHA <= 2^64`;
// -60 and -32 is the maximal range with this constraint, and V8 also uses them.
let (minusk, cached) = cached_power(ALPHA - plus.e - 64, GAMMA - plus.e - 64);
// scale fps. this gives the maximal error of 1 ulp (proved from Theorem 5.1).
let plus = plus.mul(cached);
let minus = minus.mul(cached);
let v = v.mul(cached);
debug_assert_eq!(plus.e, minus.e);
debug_assert_eq!(plus.e, v.e);
// +- actual range of minus
// | <---|---------------------- unsafe region --------------------------> |
// | | |
// | |<--->| | <--------------- safe region ---------------> | |
// | | | | | |
// |1 ulp|1 ulp| |1 ulp|1 ulp| |1 ulp|1 ulp|
// |<--->|<--->| |<--->|<--->| |<--->|<--->|
// |-----|-----|-------...-------|-----|-----|-------...-------|-----|-----|
// | minus | | v | | plus |
// minus1 minus0 v - 1 ulp v + 1 ulp plus0 plus1
//
// above `minus`, `v` and `plus` are *quantized* approximations (error < 1 ulp).
// as we don't know the error is positive or negative, we use two approximations spaced equally
// and have the maximal error of 2 ulps.
//
// the "unsafe region" is a liberal interval which we initially generate.
// the "safe region" is a conservative interval which we only accept.
// we start with the correct repr within the unsafe region, and try to find the closest repr
// to `v` which is also within the safe region. if we can't, we give up.
let plus1 = plus.f + 1;
// let plus0 = plus.f - 1; // only for explanation
// let minus0 = minus.f + 1; // only for explanation
let minus1 = minus.f - 1;
let e = -plus.e as usize; // shared exponent
// divide `plus1` into integral and fractional parts.
// integral parts are guaranteed to fit in u32, since cached power guarantees `plus < 2^32`
// and normalized `plus.f` is always less than `2^64 - 2^4` due to the precision requirement.
let plus1int = (plus1 >> e) as u32;
let plus1frac = plus1 & ((1 << e) - 1);
// calculate the largest `10^max_kappa` no more than `plus1` (thus `plus1 < 10^(max_kappa+1)`).
// this is an upper bound of `kappa` below.
let (max_kappa, max_ten_kappa) = max_pow10_no_more_than(plus1int);
let mut i = 0;
let exp = max_kappa as i16 - minusk + 1;
// Theorem 6.2: if `k` is the greatest integer s.t. `0 <= y mod 10^k <= y - x`,
// then `V = floor(y / 10^k) * 10^k` is in `[x, y]` and one of the shortest
// representations (with the minimal number of significant digits) in that range.
//
// find the digit length `kappa` between `(minus1, plus1)` as per Theorem 6.2.
// Theorem 6.2 can be adopted to exclude `x` by requiring `y mod 10^k < y - x` instead.
// (e.g., `x` = 32000, `y` = 32777; `kappa` = 2 since `y mod 10^3 = 777 < y - x = 777`.)
// the algorithm relies on the later verification phase to exclude `y`.
let delta1 = plus1 - minus1;
// let delta1int = (delta1 >> e) as usize; // only for explanation
let delta1frac = delta1 & ((1 << e) - 1);
// render integral parts, while checking for the accuracy at each step.
let mut ten_kappa = max_ten_kappa; // 10^kappa
let mut remainder = plus1int; // digits yet to be rendered
loop {
// we always have at least one digit to render, as `plus1 >= 10^kappa`
// invariants:
// - `delta1int <= remainder < 10^(kappa+1)`
// - `plus1int = d[0..n-1] * 10^(kappa+1) + remainder`
// (it follows that `remainder = plus1int % 10^(kappa+1)`)
// divide `remainder` by `10^kappa`. both are scaled by `2^-e`.
let q = remainder / ten_kappa;
let r = remainder % ten_kappa;
debug_assert!(q < 10);
buf[i] = MaybeUninit::new(b'0' + q as u8);
i += 1;
let plus1rem = ((r as u64) << e) + plus1frac; // == (plus1 % 10^kappa) * 2^e
if plus1rem < delta1 {
// `plus1 % 10^kappa < delta1 = plus1 - minus1`; we've found the correct `kappa`.
let ten_kappa = (ten_kappa as u64) << e; // scale 10^kappa back to the shared exponent
return round_and_weed(
// SAFETY: we initialized that memory above.
unsafe { buf[..i].assume_init_mut() },
exp,
plus1rem,
delta1,
plus1 - v.f,
ten_kappa,
1,
);
}
// break the loop when we have rendered all integral digits.
// the exact number of digits is `max_kappa + 1` as `plus1 < 10^(max_kappa+1)`.
if i > max_kappa as usize {
debug_assert_eq!(ten_kappa, 1);
break;
}
// restore invariants
ten_kappa /= 10;
remainder = r;
}
// render fractional parts, while checking for the accuracy at each step.
// this time we rely on repeated multiplications, as division will lose the precision.
let mut remainder = plus1frac;
let mut threshold = delta1frac;
let mut ulp = 1;
loop {
// the next digit should be significant as we've tested that before breaking out
// invariants, where `m = max_kappa + 1` (# of digits in the integral part):
// - `remainder < 2^e`
// - `plus1frac * 10^(n-m) = d[m..n-1] * 2^e + remainder`
remainder *= 10; // won't overflow, `2^e * 10 < 2^64`
threshold *= 10;
ulp *= 10;
// divide `remainder` by `10^kappa`.
// both are scaled by `2^e / 10^kappa`, so the latter is implicit here.
let q = remainder >> e;
let r = remainder & ((1 << e) - 1);
debug_assert!(q < 10);
buf[i] = MaybeUninit::new(b'0' + q as u8);
i += 1;
if r < threshold {
let ten_kappa = 1 << e; // implicit divisor
return round_and_weed(
// SAFETY: we initialized that memory above.
unsafe { buf[..i].assume_init_mut() },
exp,
r,
threshold,
(plus1 - v.f) * ulp,
ten_kappa,
ulp,
);
}
// restore invariants
remainder = r;
}
// we've generated all significant digits of `plus1`, but not sure if it's the optimal one.
// for example, if `minus1` is 3.14153... and `plus1` is 3.14158..., there are 5 different
// shortest representation from 3.14154 to 3.14158 but we only have the greatest one.
// we have to successively decrease the last digit and check if this is the optimal repr.
// there are at most 9 candidates (..1 to ..9), so this is fairly quick. ("rounding" phase)
//
// the function checks if this "optimal" repr is actually within the ulp ranges,
// and also, it is possible that the "second-to-optimal" repr can actually be optimal
// due to the rounding error. in either cases this returns `None`. ("weeding" phase)
//
// all arguments here are scaled by the common (but implicit) value `k`, so that:
// - `remainder = (plus1 % 10^kappa) * k`
// - `threshold = (plus1 - minus1) * k` (and also, `remainder < threshold`)
// - `plus1v = (plus1 - v) * k` (and also, `threshold > plus1v` from prior invariants)
// - `ten_kappa = 10^kappa * k`
// - `ulp = 2^-e * k`
fn round_and_weed(
buf: &mut [u8],
exp: i16,
remainder: u64,
threshold: u64,
plus1v: u64,
ten_kappa: u64,
ulp: u64,
) -> Option<(&[u8], i16)> {
assert!(!buf.is_empty());
// produce two approximations to `v` (actually `plus1 - v`) within 1.5 ulps.
// the resulting representation should be the closest representation to both.
//
// here `plus1 - v` is used since calculations are done with respect to `plus1`
// in order to avoid overflow/underflow (hence the seemingly swapped names).
let plus1v_down = plus1v + ulp; // plus1 - (v - 1 ulp)
let plus1v_up = plus1v - ulp; // plus1 - (v + 1 ulp)
// decrease the last digit and stop at the closest representation to `v + 1 ulp`.
let mut plus1w = remainder; // plus1w(n) = plus1 - w(n)
{
let last = buf.last_mut().unwrap();
// we work with the approximated digits `w(n)`, which is initially equal to `plus1 -
// plus1 % 10^kappa`. after running the loop body `n` times, `w(n) = plus1 -
// plus1 % 10^kappa - n * 10^kappa`. we set `plus1w(n) = plus1 - w(n) =
// plus1 % 10^kappa + n * 10^kappa` (thus `remainder = plus1w(0)`) to simplify checks.
// note that `plus1w(n)` is always increasing.
//
// we have three conditions to terminate. any of them will make the loop unable to
// proceed, but we then have at least one valid representation known to be closest to
// `v + 1 ulp` anyway. we will denote them as TC1 through TC3 for brevity.
//
// TC1: `w(n) <= v + 1 ulp`, i.e., this is the last repr that can be the closest one.
// this is equivalent to `plus1 - w(n) = plus1w(n) >= plus1 - (v + 1 ulp) = plus1v_up`.
// combined with TC2 (which checks if `w(n+1)` is valid), this prevents the possible
// overflow on the calculation of `plus1w(n)`.
//
// TC2: `w(n+1) < minus1`, i.e., the next repr definitely does not round to `v`.
// this is equivalent to `plus1 - w(n) + 10^kappa = plus1w(n) + 10^kappa >
// plus1 - minus1 = threshold`. the left hand side can overflow, but we know
// `threshold > plus1v`, so if TC1 is false, `threshold - plus1w(n) >
// threshold - (plus1v - 1 ulp) > 1 ulp` and we can safely test if
// `threshold - plus1w(n) < 10^kappa` instead.
//
// TC3: `abs(w(n) - (v + 1 ulp)) <= abs(w(n+1) - (v + 1 ulp))`, i.e., the next repr is
// no closer to `v + 1 ulp` than the current repr. given `z(n) = plus1v_up - plus1w(n)`,
// this becomes `abs(z(n)) <= abs(z(n+1))`. again assuming that TC1 is false, we have
// `z(n) > 0`. we have two cases to consider:
//
// - when `z(n+1) >= 0`: TC3 becomes `z(n) <= z(n+1)`. as `plus1w(n)` is increasing,
// `z(n)` should be decreasing and this is clearly false.
// - when `z(n+1) < 0`:
// - TC3a: the precondition is `plus1v_up < plus1w(n) + 10^kappa`. assuming TC2 is
// false, `threshold >= plus1w(n) + 10^kappa` so it cannot overflow.
// - TC3b: TC3 becomes `z(n) <= -z(n+1)`, i.e., `plus1v_up - plus1w(n) >=
// plus1w(n+1) - plus1v_up = plus1w(n) + 10^kappa - plus1v_up`. the negated TC1
// gives `plus1v_up > plus1w(n)`, so it cannot overflow or underflow when
// combined with TC3a.
//
// consequently, we should stop when `TC1 || TC2 || (TC3a && TC3b)`. the following is
// equal to its inverse, `!TC1 && !TC2 && (!TC3a || !TC3b)`.
while plus1w < plus1v_up
&& threshold - plus1w >= ten_kappa
&& (plus1w + ten_kappa < plus1v_up
|| plus1v_up - plus1w >= plus1w + ten_kappa - plus1v_up)
{
*last -= 1;
debug_assert!(*last > b'0'); // the shortest repr cannot end with `0`
plus1w += ten_kappa;
}
}
// check if this representation is also the closest representation to `v - 1 ulp`.
//
// this is simply same to the terminating conditions for `v + 1 ulp`, with all `plus1v_up`
// replaced by `plus1v_down` instead. overflow analysis equally holds.
if plus1w < plus1v_down
&& threshold - plus1w >= ten_kappa
&& (plus1w + ten_kappa < plus1v_down
|| plus1v_down - plus1w >= plus1w + ten_kappa - plus1v_down)
{
return None;
}
// now we have the closest representation to `v` between `plus1` and `minus1`.
// this is too liberal, though, so we reject any `w(n)` not between `plus0` and `minus0`,
// i.e., `plus1 - plus1w(n) <= minus0` or `plus1 - plus1w(n) >= plus0`. we utilize the facts
// that `threshold = plus1 - minus1` and `plus1 - plus0 = minus0 - minus1 = 2 ulp`.
if 2 * ulp <= plus1w && plus1w <= threshold - 4 * ulp { Some((buf, exp)) } else { None }
}
}
core::num::flt2dec::strategy::grisu::format_shortest_opt::round_and_weed fn round_and_weed(
buf: &mut [u8],
exp: i16,
remainder: u64,
threshold: u64,
plus1v: u64,
ten_kappa: u64,
ulp: u64,
) -> Option<(&[u8], i16)> {
assert!(!buf.is_empty());
// produce two approximations to `v` (actually `plus1 - v`) within 1.5 ulps.
// the resulting representation should be the closest representation to both.
//
// here `plus1 - v` is used since calculations are done with respect to `plus1`
// in order to avoid overflow/underflow (hence the seemingly swapped names).
let plus1v_down = plus1v + ulp; // plus1 - (v - 1 ulp)
let plus1v_up = plus1v - ulp; // plus1 - (v + 1 ulp)
// decrease the last digit and stop at the closest representation to `v + 1 ulp`.
let mut plus1w = remainder; // plus1w(n) = plus1 - w(n)
{
let last = buf.last_mut().unwrap();
// we work with the approximated digits `w(n)`, which is initially equal to `plus1 -
// plus1 % 10^kappa`. after running the loop body `n` times, `w(n) = plus1 -
// plus1 % 10^kappa - n * 10^kappa`. we set `plus1w(n) = plus1 - w(n) =
// plus1 % 10^kappa + n * 10^kappa` (thus `remainder = plus1w(0)`) to simplify checks.
// note that `plus1w(n)` is always increasing.
//
// we have three conditions to terminate. any of them will make the loop unable to
// proceed, but we then have at least one valid representation known to be closest to
// `v + 1 ulp` anyway. we will denote them as TC1 through TC3 for brevity.
//
// TC1: `w(n) <= v + 1 ulp`, i.e., this is the last repr that can be the closest one.
// this is equivalent to `plus1 - w(n) = plus1w(n) >= plus1 - (v + 1 ulp) = plus1v_up`.
// combined with TC2 (which checks if `w(n+1)` is valid), this prevents the possible
// overflow on the calculation of `plus1w(n)`.
//
// TC2: `w(n+1) < minus1`, i.e., the next repr definitely does not round to `v`.
// this is equivalent to `plus1 - w(n) + 10^kappa = plus1w(n) + 10^kappa >
// plus1 - minus1 = threshold`. the left hand side can overflow, but we know
// `threshold > plus1v`, so if TC1 is false, `threshold - plus1w(n) >
// threshold - (plus1v - 1 ulp) > 1 ulp` and we can safely test if
// `threshold - plus1w(n) < 10^kappa` instead.
//
// TC3: `abs(w(n) - (v + 1 ulp)) <= abs(w(n+1) - (v + 1 ulp))`, i.e., the next repr is
// no closer to `v + 1 ulp` than the current repr. given `z(n) = plus1v_up - plus1w(n)`,
// this becomes `abs(z(n)) <= abs(z(n+1))`. again assuming that TC1 is false, we have
// `z(n) > 0`. we have two cases to consider:
//
// - when `z(n+1) >= 0`: TC3 becomes `z(n) <= z(n+1)`. as `plus1w(n)` is increasing,
// `z(n)` should be decreasing and this is clearly false.
// - when `z(n+1) < 0`:
// - TC3a: the precondition is `plus1v_up < plus1w(n) + 10^kappa`. assuming TC2 is
// false, `threshold >= plus1w(n) + 10^kappa` so it cannot overflow.
// - TC3b: TC3 becomes `z(n) <= -z(n+1)`, i.e., `plus1v_up - plus1w(n) >=
// plus1w(n+1) - plus1v_up = plus1w(n) + 10^kappa - plus1v_up`. the negated TC1
// gives `plus1v_up > plus1w(n)`, so it cannot overflow or underflow when
// combined with TC3a.
//
// consequently, we should stop when `TC1 || TC2 || (TC3a && TC3b)`. the following is
// equal to its inverse, `!TC1 && !TC2 && (!TC3a || !TC3b)`.
while plus1w < plus1v_up
&& threshold - plus1w >= ten_kappa
&& (plus1w + ten_kappa < plus1v_up
|| plus1v_up - plus1w >= plus1w + ten_kappa - plus1v_up)
{
*last -= 1;
debug_assert!(*last > b'0'); // the shortest repr cannot end with `0`
plus1w += ten_kappa;
}
}
// check if this representation is also the closest representation to `v - 1 ulp`.
//
// this is simply same to the terminating conditions for `v + 1 ulp`, with all `plus1v_up`
// replaced by `plus1v_down` instead. overflow analysis equally holds.
if plus1w < plus1v_down
&& threshold - plus1w >= ten_kappa
&& (plus1w + ten_kappa < plus1v_down
|| plus1v_down - plus1w >= plus1w + ten_kappa - plus1v_down)
{
return None;
}
// now we have the closest representation to `v` between `plus1` and `minus1`.
// this is too liberal, though, so we reject any `w(n)` not between `plus0` and `minus0`,
// i.e., `plus1 - plus1w(n) <= minus0` or `plus1 - plus1w(n) >= plus0`. we utilize the facts
// that `threshold = plus1 - minus1` and `plus1 - plus0 = minus0 - minus1 = 2 ulp`.
if 2 * ulp <= plus1w && plus1w <= threshold - 4 * ulp { Some((buf, exp)) } else { None }
}
core::num::flt2dec::strategy::grisu::max_pow10_no_more_thanpub fn max_pow10_no_more_than(x: u32) -> (u8, u32) {
debug_assert!(x > 0);
const X9: u32 = 10_0000_0000;
const X8: u32 = 1_0000_0000;
const X7: u32 = 1000_0000;
const X6: u32 = 100_0000;
const X5: u32 = 10_0000;
const X4: u32 = 1_0000;
const X3: u32 = 1000;
const X2: u32 = 100;
const X1: u32 = 10;
if x < X4 {
if x < X2 {
if x < X1 { (0, 1) } else { (1, X1) }
} else {
if x < X3 { (2, X2) } else { (3, X3) }
}
} else {
if x < X6 {
if x < X5 { (4, X4) } else { (5, X5) }
} else if x < X8 {
if x < X7 { (6, X6) } else { (7, X7) }
} else {
if x < X9 { (8, X8) } else { (9, X9) }
}
}
}
core::num::flt2dec::to_exact_exp_strpub fn to_exact_exp_str<'a, T, F>(
mut format_exact: F,
v: T,
sign: Sign,
ndigits: usize,
upper: bool,
buf: &'a mut [MaybeUninit<u8>],
parts: &'a mut [MaybeUninit<Part<'a>>],
) -> Formatted<'a>
where
T: DecodableFloat,
F: FnMut(&Decoded, &'a mut [MaybeUninit<u8>], i16) -> (&'a [u8], i16),
{
assert!(parts.len() >= 6);
assert!(ndigits > 0);
let (negative, full_decoded) = decode(v);
let sign = determine_sign(sign, &full_decoded, negative);
match full_decoded {
FullDecoded::Nan => {
parts[0] = MaybeUninit::new(Part::Copy(b"NaN"));
// SAFETY: we just initialized the elements `..1`.
Formatted { sign, parts: unsafe { parts[..1].assume_init_ref() } }
}
FullDecoded::Infinite => {
parts[0] = MaybeUninit::new(Part::Copy(b"inf"));
// SAFETY: we just initialized the elements `..1`.
Formatted { sign, parts: unsafe { parts[..1].assume_init_ref() } }
}
FullDecoded::Zero => {
if ndigits > 1 {
// [0.][0000][e0]
parts[0] = MaybeUninit::new(Part::Copy(b"0."));
parts[1] = MaybeUninit::new(Part::Zero(ndigits - 1));
parts[2] = MaybeUninit::new(Part::Copy(if upper { b"E0" } else { b"e0" }));
Formatted {
sign,
// SAFETY: we just initialized the elements `..3`.
parts: unsafe { parts[..3].assume_init_ref() },
}
} else {
parts[0] = MaybeUninit::new(Part::Copy(if upper { b"0E0" } else { b"0e0" }));
Formatted {
sign,
// SAFETY: we just initialized the elements `..1`.
parts: unsafe { parts[..1].assume_init_ref() },
}
}
}
FullDecoded::Finite(ref decoded) => {
let maxlen = estimate_max_buf_len(decoded.exp);
assert!(buf.len() >= ndigits || buf.len() >= maxlen);
let trunc = if ndigits < maxlen { ndigits } else { maxlen };
let (buf, exp) = format_exact(decoded, &mut buf[..trunc], i16::MIN);
Formatted { sign, parts: digits_to_exp_str(buf, exp, ndigits, upper, parts) }
}
}
}
core::num::flt2dec::to_exact_fixed_strpub fn to_exact_fixed_str<'a, T, F>(
mut format_exact: F,
v: T,
sign: Sign,
frac_digits: usize,
buf: &'a mut [MaybeUninit<u8>],
parts: &'a mut [MaybeUninit<Part<'a>>],
) -> Formatted<'a>
where
T: DecodableFloat,
F: FnMut(&Decoded, &'a mut [MaybeUninit<u8>], i16) -> (&'a [u8], i16),
{
assert!(parts.len() >= 4);
let (negative, full_decoded) = decode(v);
let sign = determine_sign(sign, &full_decoded, negative);
match full_decoded {
FullDecoded::Nan => {
parts[0] = MaybeUninit::new(Part::Copy(b"NaN"));
// SAFETY: we just initialized the elements `..1`.
Formatted { sign, parts: unsafe { parts[..1].assume_init_ref() } }
}
FullDecoded::Infinite => {
parts[0] = MaybeUninit::new(Part::Copy(b"inf"));
// SAFETY: we just initialized the elements `..1`.
Formatted { sign, parts: unsafe { parts[..1].assume_init_ref() } }
}
FullDecoded::Zero => {
if frac_digits > 0 {
// [0.][0000]
parts[0] = MaybeUninit::new(Part::Copy(b"0."));
parts[1] = MaybeUninit::new(Part::Zero(frac_digits));
Formatted {
sign,
// SAFETY: we just initialized the elements `..2`.
parts: unsafe { parts[..2].assume_init_ref() },
}
} else {
parts[0] = MaybeUninit::new(Part::Copy(b"0"));
Formatted {
sign,
// SAFETY: we just initialized the elements `..1`.
parts: unsafe { parts[..1].assume_init_ref() },
}
}
}
FullDecoded::Finite(ref decoded) => {
let maxlen = estimate_max_buf_len(decoded.exp);
assert!(buf.len() >= maxlen);
// it *is* possible that `frac_digits` is ridiculously large.
// `format_exact` will end rendering digits much earlier in this case,
// because we are strictly limited by `maxlen`.
let limit = if frac_digits < 0x8000 { -(frac_digits as i16) } else { i16::MIN };
let (buf, exp) = format_exact(decoded, &mut buf[..maxlen], limit);
if exp <= limit {
// the restriction couldn't been met, so this should render like zero no matter
// `exp` was. this does not include the case that the restriction has been met
// only after the final rounding-up; it's a regular case with `exp = limit + 1`.
debug_assert_eq!(buf.len(), 0);
if frac_digits > 0 {
// [0.][0000]
parts[0] = MaybeUninit::new(Part::Copy(b"0."));
parts[1] = MaybeUninit::new(Part::Zero(frac_digits));
Formatted {
sign,
// SAFETY: we just initialized the elements `..2`.
parts: unsafe { parts[..2].assume_init_ref() },
}
} else {
parts[0] = MaybeUninit::new(Part::Copy(b"0"));
Formatted {
sign,
// SAFETY: we just initialized the elements `..1`.
parts: unsafe { parts[..1].assume_init_ref() },
}
}
} else {
Formatted { sign, parts: digits_to_dec_str(buf, exp, frac_digits, parts) }
}
}
}
}
core::num::flt2dec::to_shortest_exp_strpub fn to_shortest_exp_str<'a, T, F>(
mut format_shortest: F,
v: T,
sign: Sign,
dec_bounds: (i16, i16),
upper: bool,
buf: &'a mut [MaybeUninit<u8>],
parts: &'a mut [MaybeUninit<Part<'a>>],
) -> Formatted<'a>
where
T: DecodableFloat,
F: FnMut(&Decoded, &'a mut [MaybeUninit<u8>]) -> (&'a [u8], i16),
{
assert!(parts.len() >= 6);
assert!(buf.len() >= MAX_SIG_DIGITS);
assert!(dec_bounds.0 <= dec_bounds.1);
let (negative, full_decoded) = decode(v);
let sign = determine_sign(sign, &full_decoded, negative);
match full_decoded {
FullDecoded::Nan => {
parts[0] = MaybeUninit::new(Part::Copy(b"NaN"));
// SAFETY: we just initialized the elements `..1`.
Formatted { sign, parts: unsafe { parts[..1].assume_init_ref() } }
}
FullDecoded::Infinite => {
parts[0] = MaybeUninit::new(Part::Copy(b"inf"));
// SAFETY: we just initialized the elements `..1`.
Formatted { sign, parts: unsafe { parts[..1].assume_init_ref() } }
}
FullDecoded::Zero => {
parts[0] = if dec_bounds.0 <= 0 && 0 < dec_bounds.1 {
MaybeUninit::new(Part::Copy(b"0"))
} else {
MaybeUninit::new(Part::Copy(if upper { b"0E0" } else { b"0e0" }))
};
// SAFETY: we just initialized the elements `..1`.
Formatted { sign, parts: unsafe { parts[..1].assume_init_ref() } }
}
FullDecoded::Finite(ref decoded) => {
let (buf, exp) = format_shortest(decoded, buf);
let vis_exp = exp as i32 - 1;
let parts = if dec_bounds.0 as i32 <= vis_exp && vis_exp < dec_bounds.1 as i32 {
digits_to_dec_str(buf, exp, 0, parts)
} else {
digits_to_exp_str(buf, exp, 0, upper, parts)
};
Formatted { sign, parts }
}
}
}
core::num::flt2dec::to_shortest_strpub fn to_shortest_str<'a, T, F>(
mut format_shortest: F,
v: T,
sign: Sign,
frac_digits: usize,
buf: &'a mut [MaybeUninit<u8>],
parts: &'a mut [MaybeUninit<Part<'a>>],
) -> Formatted<'a>
where
T: DecodableFloat,
F: FnMut(&Decoded, &'a mut [MaybeUninit<u8>]) -> (&'a [u8], i16),
{
assert!(parts.len() >= 4);
assert!(buf.len() >= MAX_SIG_DIGITS);
let (negative, full_decoded) = decode(v);
let sign = determine_sign(sign, &full_decoded, negative);
match full_decoded {
FullDecoded::Nan => {
parts[0] = MaybeUninit::new(Part::Copy(b"NaN"));
// SAFETY: we just initialized the elements `..1`.
Formatted { sign, parts: unsafe { parts[..1].assume_init_ref() } }
}
FullDecoded::Infinite => {
parts[0] = MaybeUninit::new(Part::Copy(b"inf"));
// SAFETY: we just initialized the elements `..1`.
Formatted { sign, parts: unsafe { parts[..1].assume_init_ref() } }
}
FullDecoded::Zero => {
if frac_digits > 0 {
// [0.][0000]
parts[0] = MaybeUninit::new(Part::Copy(b"0."));
parts[1] = MaybeUninit::new(Part::Zero(frac_digits));
Formatted {
sign,
// SAFETY: we just initialized the elements `..2`.
parts: unsafe { parts[..2].assume_init_ref() },
}
} else {
parts[0] = MaybeUninit::new(Part::Copy(b"0"));
Formatted {
sign,
// SAFETY: we just initialized the elements `..1`.
parts: unsafe { parts[..1].assume_init_ref() },
}
}
}
FullDecoded::Finite(ref decoded) => {
let (buf, exp) = format_shortest(decoded, buf);
Formatted { sign, parts: digits_to_dec_str(buf, exp, frac_digits, parts) }
}
}
}
core::num::fmt::Formatted::<'a>::len pub fn len(&self) -> usize {
self.sign.len() + self.parts.iter().map(|part| part.len()).sum::<usize>()
}
core::num::fmt::Part::<'a>::len pub fn len(&self) -> usize {
match *self {
Part::Zero(nzeroes) => nzeroes,
Part::Num(v) => v.checked_ilog10().unwrap_or_default() as usize + 1,
Part::Copy(buf) => buf.len(),
}
}
core::num::from_ascii_radix_panicconst fn from_ascii_radix_panic(radix: u32) -> ! {
const_panic!(
"from_ascii_radix: radix must lie in the range `[2, 36]`",
"from_ascii_radix: radix must lie in the range `[2, 36]` - found {radix}",
radix: u32 = radix,
)
}
core::num::from_ascii_radix_panic::do_panic const fn do_panic($($arg: $ty),*) -> ! {
$crate::intrinsics::const_eval_select!(
@capture { $($arg: $ty = $arg),* } -> !:
if const #[track_caller] {
$crate::panic!($const_msg)
} else #[track_caller] {
$crate::panic!($runtime_msg)
}
)
}
core::num::from_ascii_radix_panic::do_panic::runtime fn runtime$(<$($binders)*>)?($($arg: $ty),*) $( -> $ret )? {
$runtime
}
core::num::int_log10::less_than_5const fn less_than_5(val: u32) -> u32 {
// Similar to u8, when adding one of these constants to val,
// we get two possible bit patterns above the low 17 bits,
// depending on whether val is below or above the threshold.
const C1: u32 = 0b011_00000000000000000 - 10; // 393206
const C2: u32 = 0b100_00000000000000000 - 100; // 524188
const C3: u32 = 0b111_00000000000000000 - 1000; // 916504
const C4: u32 = 0b100_00000000000000000 - 10000; // 514288
// Value of top bits:
// +c1 +c2 1&2 +c3 +c4 3&4 ^
// 0..=9 010 011 010 110 011 010 000 = 0
// 10..=99 011 011 011 110 011 010 001 = 1
// 100..=999 011 100 000 110 011 010 010 = 2
// 1000..=9999 011 100 000 111 011 011 011 = 3
// 10000..=99999 011 100 000 111 100 100 100 = 4
(((val + C1) & (val + C2)) ^ ((val + C3) & (val + C4))) >> 17
}
core::num::int_log10::panic_for_nonpositive_argumentpub(super) const fn panic_for_nonpositive_argument() -> ! {
panic!("argument of integer logarithm must be positive")
}
core::num::int_log10::u128 pub(super) const fn $ty(val: NonZero<$ty>) -> u32 {
let result = $impl_fn(val.get());
// SAFETY: Integer logarithm is monotonic non-decreasing, so the computed `result` cannot
// exceed the value produced for the maximum input.
unsafe { crate::hint::assert_unchecked(result <= const { $impl_fn($ty::MAX) }) };
result
}
core::num::int_log10::u128_implconst fn u128_impl(mut val: u128) -> u32 {
let mut log = 0;
if val >= 100_000_000_000_000_000_000_000_000_000_000 {
val /= 100_000_000_000_000_000_000_000_000_000_000;
log += 32;
return log + u32_impl(val as u32);
}
if val >= 10_000_000_000_000_000 {
val /= 10_000_000_000_000_000;
log += 16;
}
log + u64_impl(val as u64)
}
core::num::int_log10::u16 pub(super) const fn $ty(val: NonZero<$ty>) -> u32 {
let result = $impl_fn(val.get());
// SAFETY: Integer logarithm is monotonic non-decreasing, so the computed `result` cannot
// exceed the value produced for the maximum input.
unsafe { crate::hint::assert_unchecked(result <= const { $impl_fn($ty::MAX) }) };
result
}
core::num::int_log10::u16_implconst fn u16_impl(val: u16) -> u32 {
less_than_5(val as u32)
}
core::num::int_log10::u32 pub(super) const fn $ty(val: NonZero<$ty>) -> u32 {
let result = $impl_fn(val.get());
// SAFETY: Integer logarithm is monotonic non-decreasing, so the computed `result` cannot
// exceed the value produced for the maximum input.
unsafe { crate::hint::assert_unchecked(result <= const { $impl_fn($ty::MAX) }) };
result
}
core::num::int_log10::u32_implconst fn u32_impl(mut val: u32) -> u32 {
let mut log = 0;
if val >= 100_000 {
val /= 100_000;
log += 5;
}
log + less_than_5(val)
}
core::num::int_log10::u64 pub(super) const fn $ty(val: NonZero<$ty>) -> u32 {
let result = $impl_fn(val.get());
// SAFETY: Integer logarithm is monotonic non-decreasing, so the computed `result` cannot
// exceed the value produced for the maximum input.
unsafe { crate::hint::assert_unchecked(result <= const { $impl_fn($ty::MAX) }) };
result
}
core::num::int_log10::u64_implconst fn u64_impl(mut val: u64) -> u32 {
let mut log = 0;
if val >= 10_000_000_000 {
val /= 10_000_000_000;
log += 10;
}
if val >= 100_000 {
val /= 100_000;
log += 5;
}
log + less_than_5(val as u32)
}
core::num::int_log10::u8 pub(super) const fn $ty(val: NonZero<$ty>) -> u32 {
let result = $impl_fn(val.get());
// SAFETY: Integer logarithm is monotonic non-decreasing, so the computed `result` cannot
// exceed the value produced for the maximum input.
unsafe { crate::hint::assert_unchecked(result <= const { $impl_fn($ty::MAX) }) };
result
}
core::num::int_log10::u8_implconst fn u8_impl(val: u8) -> u32 {
let val = val as u32;
// For better performance, avoid branches by assembling the solution
// in the bits above the low 8 bits.
// Adding c1 to val gives 10 in the top bits for val < 10, 11 for val >= 10
const C1: u32 = 0b11_00000000 - 10; // 758
// Adding c2 to val gives 01 in the top bits for val < 100, 10 for val >= 100
const C2: u32 = 0b10_00000000 - 100; // 412
// Value of top bits:
// +c1 +c2 1&2
// 0..=9 10 01 00 = 0
// 10..=99 11 01 01 = 1
// 100..=255 11 10 10 = 2
((val + C1) & (val + C2)) >> 8
}
core::num::int_log10::usizepub(super) const fn usize(val: NonZero<usize>) -> u32 {
#[cfg(target_pointer_width = "16")]
let impl_fn = u16;
#[cfg(target_pointer_width = "32")]
let impl_fn = u32;
#[cfg(target_pointer_width = "64")]
let impl_fn = u64;
// SAFETY: We have selected the correct `impl_fn`, so the converting `val` to the argument is
// safe.
impl_fn(unsafe { NonZero::new_unchecked(val.get() as _) })
}
core::num::niche_types::Nanoseconds::as_inner pub const fn as_inner(self) -> $int {
// SAFETY: pattern types are always legal values of their base type
// (Not using `.0` because that has perf regressions.)
unsafe { crate::mem::transmute(self) }
}
core::num::niche_types::Nanoseconds::new pub const fn new(val: $int) -> Option<Self> {
#[allow(non_contiguous_range_endpoints)]
if let $pat = val {
// SAFETY: just checked that the value matches the pattern
Some(unsafe { $name(crate::mem::transmute(val)) })
} else {
None
}
}
core::num::niche_types::Nanoseconds::new_unchecked pub const unsafe fn new_unchecked(val: $int) -> Self {
// SAFETY: Caller promised that `val` is within the valid range.
unsafe { crate::mem::transmute(val) }
}
core::num::niche_types::NonZeroCharInner::as_inner pub const fn as_inner(self) -> $int {
// SAFETY: pattern types are always legal values of their base type
// (Not using `.0` because that has perf regressions.)
unsafe { crate::mem::transmute(self) }
}
core::num::niche_types::NonZeroCharInner::new pub const fn new(val: $int) -> Option<Self> {
#[allow(non_contiguous_range_endpoints)]
if let $pat = val {
// SAFETY: just checked that the value matches the pattern
Some(unsafe { $name(crate::mem::transmute(val)) })
} else {
None
}
}
core::num::niche_types::NonZeroCharInner::new_unchecked pub const unsafe fn new_unchecked(val: $int) -> Self {
// SAFETY: Caller promised that `val` is within the valid range.
unsafe { crate::mem::transmute(val) }
}
core::num::niche_types::NonZeroI128Inner::as_inner pub const fn as_inner(self) -> $int {
// SAFETY: pattern types are always legal values of their base type
// (Not using `.0` because that has perf regressions.)
unsafe { crate::mem::transmute(self) }
}
core::num::niche_types::NonZeroI128Inner::new pub const fn new(val: $int) -> Option<Self> {
#[allow(non_contiguous_range_endpoints)]
if let $pat = val {
// SAFETY: just checked that the value matches the pattern
Some(unsafe { $name(crate::mem::transmute(val)) })
} else {
None
}
}
core::num::niche_types::NonZeroI128Inner::new_unchecked pub const unsafe fn new_unchecked(val: $int) -> Self {
// SAFETY: Caller promised that `val` is within the valid range.
unsafe { crate::mem::transmute(val) }
}
core::num::niche_types::NonZeroI16Inner::as_inner pub const fn as_inner(self) -> $int {
// SAFETY: pattern types are always legal values of their base type
// (Not using `.0` because that has perf regressions.)
unsafe { crate::mem::transmute(self) }
}
core::num::niche_types::NonZeroI16Inner::new pub const fn new(val: $int) -> Option<Self> {
#[allow(non_contiguous_range_endpoints)]
if let $pat = val {
// SAFETY: just checked that the value matches the pattern
Some(unsafe { $name(crate::mem::transmute(val)) })
} else {
None
}
}
core::num::niche_types::NonZeroI16Inner::new_unchecked pub const unsafe fn new_unchecked(val: $int) -> Self {
// SAFETY: Caller promised that `val` is within the valid range.
unsafe { crate::mem::transmute(val) }
}
core::num::niche_types::NonZeroI32Inner::as_inner pub const fn as_inner(self) -> $int {
// SAFETY: pattern types are always legal values of their base type
// (Not using `.0` because that has perf regressions.)
unsafe { crate::mem::transmute(self) }
}
core::num::niche_types::NonZeroI32Inner::new pub const fn new(val: $int) -> Option<Self> {
#[allow(non_contiguous_range_endpoints)]
if let $pat = val {
// SAFETY: just checked that the value matches the pattern
Some(unsafe { $name(crate::mem::transmute(val)) })
} else {
None
}
}
core::num::niche_types::NonZeroI32Inner::new_unchecked pub const unsafe fn new_unchecked(val: $int) -> Self {
// SAFETY: Caller promised that `val` is within the valid range.
unsafe { crate::mem::transmute(val) }
}
core::num::niche_types::NonZeroI64Inner::as_inner pub const fn as_inner(self) -> $int {
// SAFETY: pattern types are always legal values of their base type
// (Not using `.0` because that has perf regressions.)
unsafe { crate::mem::transmute(self) }
}
core::num::niche_types::NonZeroI64Inner::new pub const fn new(val: $int) -> Option<Self> {
#[allow(non_contiguous_range_endpoints)]
if let $pat = val {
// SAFETY: just checked that the value matches the pattern
Some(unsafe { $name(crate::mem::transmute(val)) })
} else {
None
}
}
core::num::niche_types::NonZeroI64Inner::new_unchecked pub const unsafe fn new_unchecked(val: $int) -> Self {
// SAFETY: Caller promised that `val` is within the valid range.
unsafe { crate::mem::transmute(val) }
}
core::num::niche_types::NonZeroI8Inner::as_inner pub const fn as_inner(self) -> $int {
// SAFETY: pattern types are always legal values of their base type
// (Not using `.0` because that has perf regressions.)
unsafe { crate::mem::transmute(self) }
}
core::num::niche_types::NonZeroI8Inner::new pub const fn new(val: $int) -> Option<Self> {
#[allow(non_contiguous_range_endpoints)]
if let $pat = val {
// SAFETY: just checked that the value matches the pattern
Some(unsafe { $name(crate::mem::transmute(val)) })
} else {
None
}
}
core::num::niche_types::NonZeroI8Inner::new_unchecked pub const unsafe fn new_unchecked(val: $int) -> Self {
// SAFETY: Caller promised that `val` is within the valid range.
unsafe { crate::mem::transmute(val) }
}
core::num::niche_types::NonZeroIsizeInner::as_inner pub const fn as_inner(self) -> $int {
// SAFETY: pattern types are always legal values of their base type
// (Not using `.0` because that has perf regressions.)
unsafe { crate::mem::transmute(self) }
}
core::num::niche_types::NonZeroIsizeInner::new pub const fn new(val: $int) -> Option<Self> {
#[allow(non_contiguous_range_endpoints)]
if let $pat = val {
// SAFETY: just checked that the value matches the pattern
Some(unsafe { $name(crate::mem::transmute(val)) })
} else {
None
}
}
core::num::niche_types::NonZeroIsizeInner::new_unchecked pub const unsafe fn new_unchecked(val: $int) -> Self {
// SAFETY: Caller promised that `val` is within the valid range.
unsafe { crate::mem::transmute(val) }
}
core::num::niche_types::NonZeroU128Inner::as_inner pub const fn as_inner(self) -> $int {
// SAFETY: pattern types are always legal values of their base type
// (Not using `.0` because that has perf regressions.)
unsafe { crate::mem::transmute(self) }
}
core::num::niche_types::NonZeroU128Inner::new pub const fn new(val: $int) -> Option<Self> {
#[allow(non_contiguous_range_endpoints)]
if let $pat = val {
// SAFETY: just checked that the value matches the pattern
Some(unsafe { $name(crate::mem::transmute(val)) })
} else {
None
}
}
core::num::niche_types::NonZeroU128Inner::new_unchecked pub const unsafe fn new_unchecked(val: $int) -> Self {
// SAFETY: Caller promised that `val` is within the valid range.
unsafe { crate::mem::transmute(val) }
}
core::num::niche_types::NonZeroU16Inner::as_inner pub const fn as_inner(self) -> $int {
// SAFETY: pattern types are always legal values of their base type
// (Not using `.0` because that has perf regressions.)
unsafe { crate::mem::transmute(self) }
}
core::num::niche_types::NonZeroU16Inner::new pub const fn new(val: $int) -> Option<Self> {
#[allow(non_contiguous_range_endpoints)]
if let $pat = val {
// SAFETY: just checked that the value matches the pattern
Some(unsafe { $name(crate::mem::transmute(val)) })
} else {
None
}
}
core::num::niche_types::NonZeroU16Inner::new_unchecked pub const unsafe fn new_unchecked(val: $int) -> Self {
// SAFETY: Caller promised that `val` is within the valid range.
unsafe { crate::mem::transmute(val) }
}
core::num::niche_types::NonZeroU32Inner::as_inner pub const fn as_inner(self) -> $int {
// SAFETY: pattern types are always legal values of their base type
// (Not using `.0` because that has perf regressions.)
unsafe { crate::mem::transmute(self) }
}
core::num::niche_types::NonZeroU32Inner::new pub const fn new(val: $int) -> Option<Self> {
#[allow(non_contiguous_range_endpoints)]
if let $pat = val {
// SAFETY: just checked that the value matches the pattern
Some(unsafe { $name(crate::mem::transmute(val)) })
} else {
None
}
}
core::num::niche_types::NonZeroU32Inner::new_unchecked pub const unsafe fn new_unchecked(val: $int) -> Self {
// SAFETY: Caller promised that `val` is within the valid range.
unsafe { crate::mem::transmute(val) }
}
core::num::niche_types::NonZeroU64Inner::as_inner pub const fn as_inner(self) -> $int {
// SAFETY: pattern types are always legal values of their base type
// (Not using `.0` because that has perf regressions.)
unsafe { crate::mem::transmute(self) }
}
core::num::niche_types::NonZeroU64Inner::new pub const fn new(val: $int) -> Option<Self> {
#[allow(non_contiguous_range_endpoints)]
if let $pat = val {
// SAFETY: just checked that the value matches the pattern
Some(unsafe { $name(crate::mem::transmute(val)) })
} else {
None
}
}
core::num::niche_types::NonZeroU64Inner::new_unchecked pub const unsafe fn new_unchecked(val: $int) -> Self {
// SAFETY: Caller promised that `val` is within the valid range.
unsafe { crate::mem::transmute(val) }
}
core::num::niche_types::NonZeroU8Inner::as_inner pub const fn as_inner(self) -> $int {
// SAFETY: pattern types are always legal values of their base type
// (Not using `.0` because that has perf regressions.)
unsafe { crate::mem::transmute(self) }
}
core::num::niche_types::NonZeroU8Inner::new pub const fn new(val: $int) -> Option<Self> {
#[allow(non_contiguous_range_endpoints)]
if let $pat = val {
// SAFETY: just checked that the value matches the pattern
Some(unsafe { $name(crate::mem::transmute(val)) })
} else {
None
}
}
core::num::niche_types::NonZeroU8Inner::new_unchecked pub const unsafe fn new_unchecked(val: $int) -> Self {
// SAFETY: Caller promised that `val` is within the valid range.
unsafe { crate::mem::transmute(val) }
}
core::num::niche_types::NonZeroUsizeInner::as_inner pub const fn as_inner(self) -> $int {
// SAFETY: pattern types are always legal values of their base type
// (Not using `.0` because that has perf regressions.)
unsafe { crate::mem::transmute(self) }
}
core::num::niche_types::NonZeroUsizeInner::new pub const fn new(val: $int) -> Option<Self> {
#[allow(non_contiguous_range_endpoints)]
if let $pat = val {
// SAFETY: just checked that the value matches the pattern
Some(unsafe { $name(crate::mem::transmute(val)) })
} else {
None
}
}
core::num::niche_types::NonZeroUsizeInner::new_unchecked pub const unsafe fn new_unchecked(val: $int) -> Self {
// SAFETY: Caller promised that `val` is within the valid range.
unsafe { crate::mem::transmute(val) }
}
core::num::niche_types::UsizeNoHighBit::as_inner pub const fn as_inner(self) -> $int {
// SAFETY: pattern types are always legal values of their base type
// (Not using `.0` because that has perf regressions.)
unsafe { crate::mem::transmute(self) }
}
core::num::niche_types::UsizeNoHighBit::new pub const fn new(val: $int) -> Option<Self> {
#[allow(non_contiguous_range_endpoints)]
if let $pat = val {
// SAFETY: just checked that the value matches the pattern
Some(unsafe { $name(crate::mem::transmute(val)) })
} else {
None
}
}
core::num::niche_types::UsizeNoHighBit::new_unchecked pub const unsafe fn new_unchecked(val: $int) -> Self {
// SAFETY: Caller promised that `val` is within the valid range.
unsafe { crate::mem::transmute(val) }
}
core::num::nonzero::<impl core::ops::arith::Div<core::num::nonzero::NonZero<u128>> for u128>::div fn div(self, other: NonZero<$Int>) -> $Int {
// SAFETY: Division by zero is checked because `other` is non-zero,
// and MIN/-1 is checked because `self` is an unsigned int.
unsafe { intrinsics::unchecked_div(self, other.get()) }
}
core::num::nonzero::<impl core::ops::arith::Div<core::num::nonzero::NonZero<u16>> for u16>::div fn div(self, other: NonZero<$Int>) -> $Int {
// SAFETY: Division by zero is checked because `other` is non-zero,
// and MIN/-1 is checked because `self` is an unsigned int.
unsafe { intrinsics::unchecked_div(self, other.get()) }
}
core::num::nonzero::<impl core::ops::arith::Div<core::num::nonzero::NonZero<u32>> for u32>::div fn div(self, other: NonZero<$Int>) -> $Int {
// SAFETY: Division by zero is checked because `other` is non-zero,
// and MIN/-1 is checked because `self` is an unsigned int.
unsafe { intrinsics::unchecked_div(self, other.get()) }
}
core::num::nonzero::<impl core::ops::arith::Div<core::num::nonzero::NonZero<u64>> for u64>::div fn div(self, other: NonZero<$Int>) -> $Int {
// SAFETY: Division by zero is checked because `other` is non-zero,
// and MIN/-1 is checked because `self` is an unsigned int.
unsafe { intrinsics::unchecked_div(self, other.get()) }
}
core::num::nonzero::<impl core::ops::arith::Div<core::num::nonzero::NonZero<u8>> for u8>::div fn div(self, other: NonZero<$Int>) -> $Int {
// SAFETY: Division by zero is checked because `other` is non-zero,
// and MIN/-1 is checked because `self` is an unsigned int.
unsafe { intrinsics::unchecked_div(self, other.get()) }
}
core::num::nonzero::<impl core::ops::arith::Div<core::num::nonzero::NonZero<usize>> for usize>::div fn div(self, other: NonZero<$Int>) -> $Int {
// SAFETY: Division by zero is checked because `other` is non-zero,
// and MIN/-1 is checked because `self` is an unsigned int.
unsafe { intrinsics::unchecked_div(self, other.get()) }
}
core::num::nonzero::NonZero::<T>::get pub const fn get(self) -> T {
// Rustc can set range metadata only if it loads `self` from
// memory somewhere. If the value of `self` was from by-value argument
// of some not-inlined function, LLVM don't have range metadata
// to understand that the value cannot be zero.
//
// Using the transmute `assume`s the range at runtime.
//
// Even once LLVM supports `!range` metadata for function arguments
// (see <https://github.com/llvm/llvm-project/issues/76628>), this can't
// be `.0` because MCP#807 bans field-projecting into `scalar_valid_range`
// types, and it arguably wouldn't want to be anyway because if this is
// MIR-inlined, there's no opportunity to put that argument metadata anywhere.
//
// The good answer here will eventually be pattern types, which will hopefully
// allow it to go back to `.0`, maybe with a cast of some sort.
//
// SAFETY: `ZeroablePrimitive` guarantees that the size and bit validity
// of `.0` is such that this transmute is sound.
unsafe { intrinsics::transmute_unchecked(self) }
}
core::num::nonzero::NonZero::<T>::new pub const fn new(n: T) -> Option<Self> {
// SAFETY: Memory layout optimization guarantees that `Option<NonZero<T>>` has
// the same layout and size as `T`, with `0` representing `None`.
unsafe { intrinsics::transmute_unchecked(n) }
}
core::num::nonzero::NonZero::<u128>::ilog10 pub const fn ilog10(self) -> u32 {
super::int_log10::$Int(self)
}
core::num::nonzero::NonZero::<u128>::ilog2 pub const fn ilog2(self) -> u32 {
Self::BITS - 1 - self.leading_zeros()
}
core::num::nonzero::NonZero::<u128>::leading_zeros pub const fn leading_zeros(self) -> u32 {
// SAFETY: since `self` cannot be zero, it is safe to call `ctlz_nonzero`.
unsafe {
intrinsics::ctlz_nonzero(self.get() as $Uint)
}
}
core::num::nonzero::NonZero::<u128>::trailing_zeros pub const fn trailing_zeros(self) -> u32 {
// SAFETY: since `self` cannot be zero, it is safe to call `cttz_nonzero`.
unsafe {
intrinsics::cttz_nonzero(self.get() as $Uint)
}
}
core::num::nonzero::NonZero::<u16>::ilog10 pub const fn ilog10(self) -> u32 {
super::int_log10::$Int(self)
}
core::num::nonzero::NonZero::<u16>::ilog2 pub const fn ilog2(self) -> u32 {
Self::BITS - 1 - self.leading_zeros()
}
core::num::nonzero::NonZero::<u16>::leading_zeros pub const fn leading_zeros(self) -> u32 {
// SAFETY: since `self` cannot be zero, it is safe to call `ctlz_nonzero`.
unsafe {
intrinsics::ctlz_nonzero(self.get() as $Uint)
}
}
core::num::nonzero::NonZero::<u16>::trailing_zeros pub const fn trailing_zeros(self) -> u32 {
// SAFETY: since `self` cannot be zero, it is safe to call `cttz_nonzero`.
unsafe {
intrinsics::cttz_nonzero(self.get() as $Uint)
}
}
core::num::nonzero::NonZero::<u32>::ilog10 pub const fn ilog10(self) -> u32 {
super::int_log10::$Int(self)
}
core::num::nonzero::NonZero::<u32>::ilog2 pub const fn ilog2(self) -> u32 {
Self::BITS - 1 - self.leading_zeros()
}
core::num::nonzero::NonZero::<u32>::leading_zeros pub const fn leading_zeros(self) -> u32 {
// SAFETY: since `self` cannot be zero, it is safe to call `ctlz_nonzero`.
unsafe {
intrinsics::ctlz_nonzero(self.get() as $Uint)
}
}
core::num::nonzero::NonZero::<u32>::trailing_zeros pub const fn trailing_zeros(self) -> u32 {
// SAFETY: since `self` cannot be zero, it is safe to call `cttz_nonzero`.
unsafe {
intrinsics::cttz_nonzero(self.get() as $Uint)
}
}
core::num::nonzero::NonZero::<u64>::ilog10 pub const fn ilog10(self) -> u32 {
super::int_log10::$Int(self)
}
core::num::nonzero::NonZero::<u64>::ilog2 pub const fn ilog2(self) -> u32 {
Self::BITS - 1 - self.leading_zeros()
}
core::num::nonzero::NonZero::<u64>::leading_zeros pub const fn leading_zeros(self) -> u32 {
// SAFETY: since `self` cannot be zero, it is safe to call `ctlz_nonzero`.
unsafe {
intrinsics::ctlz_nonzero(self.get() as $Uint)
}
}
core::num::nonzero::NonZero::<u64>::trailing_zeros pub const fn trailing_zeros(self) -> u32 {
// SAFETY: since `self` cannot be zero, it is safe to call `cttz_nonzero`.
unsafe {
intrinsics::cttz_nonzero(self.get() as $Uint)
}
}
core::num::nonzero::NonZero::<u8>::ilog10 pub const fn ilog10(self) -> u32 {
super::int_log10::$Int(self)
}
core::num::nonzero::NonZero::<u8>::ilog2 pub const fn ilog2(self) -> u32 {
Self::BITS - 1 - self.leading_zeros()
}
core::num::nonzero::NonZero::<u8>::leading_zeros pub const fn leading_zeros(self) -> u32 {
// SAFETY: since `self` cannot be zero, it is safe to call `ctlz_nonzero`.
unsafe {
intrinsics::ctlz_nonzero(self.get() as $Uint)
}
}
core::num::nonzero::NonZero::<u8>::trailing_zeros pub const fn trailing_zeros(self) -> u32 {
// SAFETY: since `self` cannot be zero, it is safe to call `cttz_nonzero`.
unsafe {
intrinsics::cttz_nonzero(self.get() as $Uint)
}
}
core::num::nonzero::NonZero::<usize>::ilog10 pub const fn ilog10(self) -> u32 {
super::int_log10::$Int(self)
}
core::num::nonzero::NonZero::<usize>::ilog2 pub const fn ilog2(self) -> u32 {
Self::BITS - 1 - self.leading_zeros()
}
core::num::nonzero::NonZero::<usize>::leading_zeros pub const fn leading_zeros(self) -> u32 {
// SAFETY: since `self` cannot be zero, it is safe to call `ctlz_nonzero`.
unsafe {
intrinsics::ctlz_nonzero(self.get() as $Uint)
}
}
core::num::nonzero::NonZero::<usize>::trailing_zeros pub const fn trailing_zeros(self) -> u32 {
// SAFETY: since `self` cannot be zero, it is safe to call `cttz_nonzero`.
unsafe {
intrinsics::cttz_nonzero(self.get() as $Uint)
}
}
core::ops::control_flow::ControlFlow::<B, C>::break_ok pub const fn break_ok(self) -> Result<B, C> {
match self {
ControlFlow::Continue(c) => Err(c),
ControlFlow::Break(b) => Ok(b),
}
}
core::ops::control_flow::ControlFlow::<B, C>::break_value pub const fn break_value(self) -> Option<B>
where
Self: [const] Destruct,
{
match self {
ControlFlow::Continue(..) => None,
ControlFlow::Break(x) => Some(x),
}
}
core::ops::control_flow::ControlFlow::<B, C>::continue_ok pub const fn continue_ok(self) -> Result<C, B> {
match self {
ControlFlow::Continue(c) => Ok(c),
ControlFlow::Break(b) => Err(b),
}
}
core::ops::control_flow::ControlFlow::<B, C>::continue_value pub const fn continue_value(self) -> Option<C>
where
Self: [const] Destruct,
{
match self {
ControlFlow::Continue(x) => Some(x),
ControlFlow::Break(..) => None,
}
}
core::ops::control_flow::ControlFlow::<B, C>::is_break pub const fn is_break(&self) -> bool {
matches!(*self, ControlFlow::Break(_))
}
core::ops::control_flow::ControlFlow::<B, C>::is_continue pub const fn is_continue(&self) -> bool {
matches!(*self, ControlFlow::Continue(_))
}
core::ops::control_flow::ControlFlow::<B, C>::map_break pub const fn map_break<T, F>(self, f: F) -> ControlFlow<T, C>
where
F: [const] FnOnce(B) -> T + [const] Destruct,
{
match self {
ControlFlow::Continue(x) => ControlFlow::Continue(x),
ControlFlow::Break(x) => ControlFlow::Break(f(x)),
}
}
core::ops::control_flow::ControlFlow::<B, C>::map_continue pub const fn map_continue<T, F>(self, f: F) -> ControlFlow<B, T>
where
F: [const] FnOnce(C) -> T + [const] Destruct,
{
match self {
ControlFlow::Continue(x) => ControlFlow::Continue(f(x)),
ControlFlow::Break(x) => ControlFlow::Break(x),
}
}
core::ops::control_flow::ControlFlow::<R, <R as core::ops::try_trait::Try>::Output>::from_try pub(crate) fn from_try(r: R) -> Self {
match R::branch(r) {
ControlFlow::Continue(v) => ControlFlow::Continue(v),
ControlFlow::Break(v) => ControlFlow::Break(R::from_residual(v)),
}
}
core::ops::control_flow::ControlFlow::<R, <R as core::ops::try_trait::Try>::Output>::into_try pub(crate) fn into_try(self) -> R {
match self {
ControlFlow::Continue(v) => R::from_output(v),
ControlFlow::Break(v) => v,
}
}
core::ops::control_flow::ControlFlow::<T, T>::into_value pub const fn into_value(self) -> T {
match self {
ControlFlow::Continue(x) | ControlFlow::Break(x) => x,
}
}
core::ops::function::impls::<impl core::ops::function::Fn<A> for &F>::call extern "rust-call" fn call(&self, args: A) -> F::Output {
(**self).call(args)
}
core::ops::function::impls::<impl core::ops::function::FnMut<A> for &F>::call_mut extern "rust-call" fn call_mut(&mut self, args: A) -> F::Output {
(**self).call(args)
}
core::ops::function::impls::<impl core::ops::function::FnMut<A> for &mut F>::call_mut extern "rust-call" fn call_mut(&mut self, args: A) -> F::Output {
(*self).call_mut(args)
}
core::ops::function::impls::<impl core::ops::function::FnOnce<A> for &F>::call_once extern "rust-call" fn call_once(self, args: A) -> F::Output {
(*self).call(args)
}
core::ops::function::impls::<impl core::ops::function::FnOnce<A> for &mut F>::call_once extern "rust-call" fn call_once(self, args: A) -> F::Output {
(*self).call_mut(args)
}
core::ops::index_range::IndexRange::assume_range const fn assume_range(&self) {
// SAFETY: This is the type invariant
unsafe { crate::hint::assert_unchecked(self.start <= self.end) }
}
core::ops::index_range::IndexRange::end pub(crate) const fn end(&self) -> usize {
self.end
}
core::ops::index_range::IndexRange::len pub(crate) const fn len(&self) -> usize {
// SAFETY: By invariant, this cannot wrap
// Using the intrinsic because a UB check here impedes LLVM optimization. (#131563)
unsafe { crate::intrinsics::unchecked_sub(self.end, self.start) }
}
core::ops::index_range::IndexRange::new_unchecked pub(crate) const unsafe fn new_unchecked(start: usize, end: usize) -> Self {
ub_checks::assert_unsafe_precondition!(
check_library_ub,
"IndexRange::new_unchecked requires `start <= end`",
(start: usize = start, end: usize = end) => start <= end,
);
IndexRange { start, end }
}
core::ops::index_range::IndexRange::next_back_unchecked const unsafe fn next_back_unchecked(&mut self) -> usize {
debug_assert!(self.start < self.end);
// SAFETY: The range isn't empty, so this cannot overflow
let value = unsafe { self.end.unchecked_sub(1) };
self.end = value;
value
}
core::ops::index_range::IndexRange::next_unchecked const unsafe fn next_unchecked(&mut self) -> usize {
debug_assert!(self.start < self.end);
let value = self.start;
// SAFETY: The range isn't empty, so this cannot overflow
self.start = unsafe { value.unchecked_add(1) };
value
}
core::ops::index_range::IndexRange::start pub(crate) const fn start(&self) -> usize {
self.start
}
core::ops::index_range::IndexRange::take_prefix pub(crate) fn take_prefix(&mut self, n: usize) -> Self {
let mid = if n <= self.len() {
// SAFETY: We just checked that this will be between start and end,
// and thus the addition cannot overflow.
// Using the intrinsic avoids a superfluous UB check.
unsafe { crate::intrinsics::unchecked_add(self.start, n) }
} else {
self.end
};
let prefix = Self { start: self.start, end: mid };
self.start = mid;
prefix
}
core::ops::index_range::IndexRange::take_suffix pub(crate) fn take_suffix(&mut self, n: usize) -> Self {
let mid = if n <= self.len() {
// SAFETY: We just checked that this will be between start and end,
// and thus the subtraction cannot overflow.
// Using the intrinsic avoids a superfluous UB check.
unsafe { crate::intrinsics::unchecked_sub(self.end, n) }
} else {
self.start
};
let suffix = Self { start: mid, end: self.end };
self.end = mid;
suffix
}
core::ops::index_range::IndexRange::zero_to pub(crate) const fn zero_to(end: usize) -> Self {
IndexRange { start: 0, end }
}
core::ops::range::Bound::<&T>::cloned pub const fn cloned(self) -> Bound<T>
where
T: [const] Clone,
{
match self {
Bound::Unbounded => Bound::Unbounded,
Bound::Included(x) => Bound::Included(x.clone()),
Bound::Excluded(x) => Bound::Excluded(x.clone()),
}
}
core::ops::range::Bound::<&T>::copied pub const fn copied(self) -> Bound<T> {
match self {
Bound::Unbounded => Bound::Unbounded,
Bound::Included(x) => Bound::Included(*x),
Bound::Excluded(x) => Bound::Excluded(*x),
}
}
core::ops::range::Bound::<T>::as_mut pub const fn as_mut(&mut self) -> Bound<&mut T> {
match *self {
Included(ref mut x) => Included(x),
Excluded(ref mut x) => Excluded(x),
Unbounded => Unbounded,
}
}
core::ops::range::Bound::<T>::as_ref pub const fn as_ref(&self) -> Bound<&T> {
match *self {
Included(ref x) => Included(x),
Excluded(ref x) => Excluded(x),
Unbounded => Unbounded,
}
}
core::ops::range::Bound::<T>::map pub fn map<U, F: FnOnce(T) -> U>(self, f: F) -> Bound<U> {
match self {
Unbounded => Unbounded,
Included(x) => Included(f(x)),
Excluded(x) => Excluded(f(x)),
}
}
core::ops::range::Range::<Idx>::contains pub const fn contains<U>(&self, item: &U) -> bool
where
Idx: [const] PartialOrd<U>,
U: ?Sized + [const] PartialOrd<Idx>,
{
<Self as RangeBounds<Idx>>::contains(self, item)
}
core::ops::range::RangeBounds::contains fn contains<U>(&self, item: &U) -> bool
where
T: [const] PartialOrd<U>,
U: ?Sized + [const] PartialOrd<T>,
{
(match self.start_bound() {
Included(start) => start <= item,
Excluded(start) => start < item,
Unbounded => true,
}) && (match self.end_bound() {
Included(end) => item <= end,
Excluded(end) => item < end,
Unbounded => true,
})
}
core::ops::range::RangeInclusive::<Idx>::contains pub const fn contains<U>(&self, item: &U) -> bool
where
Idx: [const] PartialOrd<U>,
U: ?Sized + [const] PartialOrd<Idx>,
{
<Self as RangeBounds<Idx>>::contains(self, item)
}
core::ops::range::RangeInclusive::<Idx>::end pub const fn end(&self) -> &Idx {
&self.end
}
core::ops::range::RangeInclusive::<Idx>::into_inner pub const fn into_inner(self) -> (Idx, Idx) {
(self.start, self.end)
}
core::ops::range::RangeInclusive::<Idx>::is_empty pub const fn is_empty(&self) -> bool
where
Idx: [const] PartialOrd,
{
self.exhausted || !(self.start <= self.end)
}
core::ops::range::RangeInclusive::<Idx>::new pub const fn new(start: Idx, end: Idx) -> Self {
Self { start, end, exhausted: false }
}
core::ops::range::RangeInclusive::<Idx>::start pub const fn start(&self) -> &Idx {
&self.start
}
core::ops::range::RangeInclusive::<usize>::into_slice_range pub(crate) const fn into_slice_range(self) -> Range<usize> {
// If we're not exhausted, we want to simply slice `start..end + 1`.
// If we are exhausted, then slicing with `end + 1..end + 1` gives us an
// empty range that is still subject to bounds-checks for that endpoint.
let exclusive_end = self.end + 1;
let start = if self.exhausted { exclusive_end } else { self.start };
start..exclusive_end
}
core::ops::try_trait::NeverShortCircuit::<T>::wrap_mut_1 pub(crate) const fn wrap_mut_1<A, F>(f: F) -> Wrapped<T, A, F>
where
F: [const] FnMut(A) -> T,
{
Wrapped { f, p: PhantomData }
}
core::ops::try_trait::NeverShortCircuit::<T>::wrap_mut_2 pub(crate) fn wrap_mut_2<A, B>(mut f: impl FnMut(A, B) -> T) -> impl FnMut(A, B) -> Self {
move |a, b| NeverShortCircuit(f(a, b))
}
core::ops::try_trait::residual_into_try_typepub const fn residual_into_try_type<R: [const] Residual<O>, O>(
r: R,
) -> <R as Residual<O>>::TryType {
FromResidual::from_residual(r)
}
core::option::Option::<&T>::cloned pub fn cloned(self) -> Option<T>
where
T: Clone,
{
self.map(T::clone)
}
core::option::Option::<&T>::copied pub const fn copied(self) -> Option<T>
where
T: Copy,
{
// FIXME(const-hack): this implementation, which sidesteps using `Option::map` since it's not const
// ready yet, should be reverted when possible to avoid code repetition
match self {
Some(&v) => Some(v),
None => None,
}
}
core::option::Option::<&mut T>::cloned pub fn cloned(self) -> Option<T>
where
T: Clone,
{
self.as_deref().map(T::clone)
}
core::option::Option::<&mut T>::copied pub const fn copied(self) -> Option<T>
where
T: Copy,
{
match self {
Some(&mut t) => Some(t),
None => None,
}
}
core::option::Option::<(T, U)>::unzip pub fn unzip(self) -> (Option<T>, Option<U>) {
match self {
Some((a, b)) => (Some(a), Some(b)),
None => (None, None),
}
}
core::option::Option::<T>::and pub const fn and<U>(self, optb: Option<U>) -> Option<U>
where
T: [const] Destruct,
U: [const] Destruct,
{
match self {
Some(_) => optb,
None => None,
}
}
core::option::Option::<T>::and_then pub const fn and_then<U, F>(self, f: F) -> Option<U>
where
F: [const] FnOnce(T) -> Option<U> + [const] Destruct,
{
match self {
Some(x) => f(x),
None => None,
}
}
core::option::Option::<T>::as_deref pub const fn as_deref(&self) -> Option<&T::Target>
where
T: [const] Deref,
{
self.as_ref().map(Deref::deref)
}
core::option::Option::<T>::as_deref_mut pub const fn as_deref_mut(&mut self) -> Option<&mut T::Target>
where
T: [const] DerefMut,
{
self.as_mut().map(DerefMut::deref_mut)
}
core::option::Option::<T>::as_mut pub const fn as_mut(&mut self) -> Option<&mut T> {
match *self {
Some(ref mut x) => Some(x),
None => None,
}
}
core::option::Option::<T>::as_ref pub const fn as_ref(&self) -> Option<&T> {
match *self {
Some(ref x) => Some(x),
None => None,
}
}
core::option::Option::<T>::expect pub const fn expect(self, msg: &str) -> T {
match self {
Some(val) => val,
None => expect_failed(msg),
}
}
core::option::Option::<T>::filter pub const fn filter<P>(self, predicate: P) -> Self
where
P: [const] FnOnce(&T) -> bool + [const] Destruct,
T: [const] Destruct,
{
if let Some(x) = self {
if predicate(&x) {
return Some(x);
}
}
None
}
core::option::Option::<T>::insert pub const fn insert(&mut self, value: T) -> &mut T
where
T: [const] Destruct,
{
*self = Some(value);
// SAFETY: the code above just filled the option
unsafe { self.as_mut().unwrap_unchecked() }
}
core::option::Option::<T>::inspect pub const fn inspect<F>(self, f: F) -> Self
where
F: [const] FnOnce(&T) + [const] Destruct,
{
if let Some(ref x) = self {
f(x);
}
self
}
core::option::Option::<T>::is_none pub const fn is_none(&self) -> bool {
!self.is_some()
}
core::option::Option::<T>::is_none_or pub const fn is_none_or(self, f: impl [const] FnOnce(T) -> bool + [const] Destruct) -> bool {
match self {
None => true,
Some(x) => f(x),
}
}
core::option::Option::<T>::is_some pub const fn is_some(&self) -> bool {
matches!(*self, Some(_))
}
core::option::Option::<T>::is_some_and pub const fn is_some_and(self, f: impl [const] FnOnce(T) -> bool + [const] Destruct) -> bool {
match self {
None => false,
Some(x) => f(x),
}
}
core::option::Option::<T>::iter pub fn iter(&self) -> Iter<'_, T> {
Iter { inner: Item { opt: self.as_ref() } }
}
core::option::Option::<T>::iter_mut pub fn iter_mut(&mut self) -> IterMut<'_, T> {
IterMut { inner: Item { opt: self.as_mut() } }
}
core::option::Option::<T>::len const fn len(&self) -> usize {
// Using the intrinsic avoids emitting a branch to get the 0 or 1.
let discriminant: isize = crate::intrinsics::discriminant_value(self);
discriminant as usize
}
core::option::Option::<T>::map pub const fn map<U, F>(self, f: F) -> Option<U>
where
F: [const] FnOnce(T) -> U + [const] Destruct,
{
match self {
Some(x) => Some(f(x)),
None => None,
}
}
core::option::Option::<T>::map_or pub const fn map_or<U, F>(self, default: U, f: F) -> U
where
F: [const] FnOnce(T) -> U + [const] Destruct,
U: [const] Destruct,
{
match self {
Some(t) => f(t),
None => default,
}
}
core::option::Option::<T>::map_or_default pub const fn map_or_default<U, F>(self, f: F) -> U
where
U: [const] Default,
F: [const] FnOnce(T) -> U + [const] Destruct,
{
match self {
Some(t) => f(t),
None => U::default(),
}
}
core::option::Option::<T>::map_or_else pub const fn map_or_else<U, D, F>(self, default: D, f: F) -> U
where
D: [const] FnOnce() -> U + [const] Destruct,
F: [const] FnOnce(T) -> U + [const] Destruct,
{
match self {
Some(t) => f(t),
None => default(),
}
}
core::option::Option::<T>::ok_or pub const fn ok_or<E: [const] Destruct>(self, err: E) -> Result<T, E> {
match self {
Some(v) => Ok(v),
None => Err(err),
}
}
core::option::Option::<T>::ok_or_else pub const fn ok_or_else<E, F>(self, err: F) -> Result<T, E>
where
F: [const] FnOnce() -> E + [const] Destruct,
{
match self {
Some(v) => Ok(v),
None => Err(err()),
}
}
core::option::Option::<T>::or pub const fn or(self, optb: Option<T>) -> Option<T>
where
T: [const] Destruct,
{
match self {
x @ Some(_) => x,
None => optb,
}
}
core::option::Option::<T>::or_else pub const fn or_else<F>(self, f: F) -> Option<T>
where
F: [const] FnOnce() -> Option<T> + [const] Destruct,
//FIXME(const_hack): this `T: [const] Destruct` is unnecessary, but even precise live drops can't tell
// no value of type `T` gets dropped here
T: [const] Destruct,
{
match self {
x @ Some(_) => x,
None => f(),
}
}
core::option::Option::<T>::reduce pub fn reduce<U, R, F>(self, other: Option<U>, f: F) -> Option<R>
where
T: Into<R>,
U: Into<R>,
F: FnOnce(T, U) -> R,
{
match (self, other) {
(Some(a), Some(b)) => Some(f(a, b)),
(Some(a), _) => Some(a.into()),
(_, Some(b)) => Some(b.into()),
_ => None,
}
}
core::option::Option::<T>::replace pub const fn replace(&mut self, value: T) -> Option<T> {
mem::replace(self, Some(value))
}
core::option::Option::<T>::take pub const fn take(&mut self) -> Option<T> {
// FIXME(const-hack) replace `mem::replace` by `mem::take` when the latter is const ready
mem::replace(self, None)
}
core::option::Option::<T>::unwrap pub const fn unwrap(self) -> T {
match self {
Some(val) => val,
None => unwrap_failed(),
}
}
core::option::Option::<T>::unwrap_or pub const fn unwrap_or(self, default: T) -> T
where
T: [const] Destruct,
{
match self {
Some(x) => x,
None => default,
}
}
core::option::Option::<T>::unwrap_or_default pub const fn unwrap_or_default(self) -> T
where
T: [const] Default,
{
match self {
Some(x) => x,
None => T::default(),
}
}
core::option::Option::<T>::unwrap_or_else pub const fn unwrap_or_else<F>(self, f: F) -> T
where
F: [const] FnOnce() -> T + [const] Destruct,
{
match self {
Some(x) => x,
None => f(),
}
}
core::option::Option::<T>::xor pub const fn xor(self, optb: Option<T>) -> Option<T>
where
T: [const] Destruct,
{
match (self, optb) {
(a @ Some(_), None) => a,
(None, b @ Some(_)) => b,
_ => None,
}
}
core::option::Option::<T>::zip pub const fn zip<U>(self, other: Option<U>) -> Option<(T, U)>
where
T: [const] Destruct,
U: [const] Destruct,
{
match (self, other) {
(Some(a), Some(b)) => Some((a, b)),
_ => None,
}
}
core::option::Option::<core::option::Option<T>>::flatten pub const fn flatten(self) -> Option<T> {
// FIXME(const-hack): could be written with `and_then`
match self {
Some(inner) => inner,
None => None,
}
}
core::option::Option::<core::result::Result<T, E>>::transpose pub const fn transpose(self) -> Result<Option<T>, E> {
match self {
Some(Ok(x)) => Ok(Some(x)),
Some(Err(e)) => Err(e),
None => Ok(None),
}
}
core::option::expect_failedconst fn expect_failed(msg: &str) -> ! {
panic_display(&msg)
}
core::option::unwrap_failedconst fn unwrap_failed() -> ! {
panic("called `Option::unwrap()` on a `None` value")
}
core::panic::location::Location::<'a>::caller pub const fn caller() -> &'static Location<'static> {
crate::intrinsics::caller_location()
}
core::panic::location::Location::<'a>::column pub const fn column(&self) -> u32 {
self.col
}
core::panic::location::Location::<'a>::file pub const fn file(&self) -> &'a str {
// SAFETY: The filename is valid.
unsafe { self.filename.as_ref() }
}
core::panic::location::Location::<'a>::line pub const fn line(&self) -> u32 {
self.line
}
core::panic::panic_info::PanicInfo::<'a>::location pub fn location(&self) -> Option<&Location<'_>> {
// NOTE: If this is changed to sometimes return None,
// deal with that case in std::panicking::default_hook and core::panicking::panic_fmt.
Some(&self.location)
}
core::panic::panic_info::PanicInfo::<'a>::message pub fn message(&self) -> PanicMessage<'_> {
PanicMessage { message: self.message }
}
core::panic::panic_info::PanicInfo::<'a>::new pub(crate) fn new(
message: &'a fmt::Arguments<'a>,
location: &'a Location<'a>,
can_unwind: bool,
force_no_backtrace: bool,
) -> Self {
PanicInfo { location, message, can_unwind, force_no_backtrace }
}
core::panic::panic_info::PanicMessage::<'a>::as_str pub const fn as_str(&self) -> Option<&'static str> {
self.message.as_str()
}
core::panicking::assert_failedpub fn assert_failed<T, U>(
kind: AssertKind,
left: &T,
right: &U,
args: Option<fmt::Arguments<'_>>,
) -> !
where
T: fmt::Debug + ?Sized,
U: fmt::Debug + ?Sized,
{
assert_failed_inner(kind, &left, &right, args)
}
core::panicking::assert_failed_innerfn assert_failed_inner(
kind: AssertKind,
left: &dyn fmt::Debug,
right: &dyn fmt::Debug,
args: Option<fmt::Arguments<'_>>,
) -> ! {
let op = match kind {
AssertKind::Eq => "==",
AssertKind::Ne => "!=",
AssertKind::Match => "matches",
};
match args {
Some(args) => panic!(
r#"assertion `left {op} right` failed: {args}
left: {left:?}
right: {right:?}"#
),
None => panic!(
r#"assertion `left {op} right` failed
left: {left:?}
right: {right:?}"#
),
}
}
core::panicking::assert_matches_failedpub fn assert_matches_failed<T: fmt::Debug + ?Sized>(
left: &T,
right: &str,
args: Option<fmt::Arguments<'_>>,
) -> ! {
// The pattern is a string so it can be displayed directly.
struct Pattern<'a>(&'a str);
impl fmt::Debug for Pattern<'_> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str(self.0)
}
}
assert_failed_inner(AssertKind::Match, &left, &Pattern(right), args);
}
core::panicking::panicpub const fn panic(expr: &'static str) -> ! {
// Use Arguments::from_str instead of format_args!("{expr}") to potentially
// reduce size overhead. The format_args! macro uses str's Display trait to
// write expr, which calls Formatter::pad, which must accommodate string
// truncation and padding (even though none is used here). Using
// Arguments::from_str may allow the compiler to omit Formatter::pad from the
// output binary, saving up to a few kilobytes.
// However, this optimization only works for `'static` strings: `from_str` also makes this
// message return `Some` from `Arguments::as_str`, which means it can become part of the panic
// payload without any allocation or copying. Shorter-lived strings would become invalid as
// stack frames get popped during unwinding, and couldn't be directly referenced from the
// payload.
panic_fmt(fmt::Arguments::from_str(expr));
}
core::panicking::panic_displaypub const fn panic_display<T: fmt::Display>(x: &T) -> ! {
panic_fmt(format_args!("{}", *x));
}
core::panicking::panic_nounwind_fmt::runtime fn runtime$(<$($binders)*>)?($($arg: $ty),*) $( -> $ret )? {
$runtime
}
core::ptr::align_offsetpub(crate) unsafe fn align_offset<T: Sized>(p: *const T, a: usize) -> usize {
// FIXME(#75598): Direct use of these intrinsics improves codegen significantly at opt-level <=
// 1, where the method versions of these operations are not inlined.
use intrinsics::{
assume, cttz_nonzero, exact_div, mul_with_overflow, unchecked_rem, unchecked_shl,
unchecked_shr, unchecked_sub, wrapping_add, wrapping_mul, wrapping_sub,
};
/// Calculate multiplicative modular inverse of `x` modulo `m`.
///
/// This implementation is tailored for `align_offset` and has following preconditions:
///
/// * `m` is a power-of-two;
/// * `x < m`; (if `x ≥ m`, pass in `x % m` instead)
///
/// Implementation of this function shall not panic. Ever.
#[inline]
const unsafe fn mod_inv(x: usize, m: usize) -> usize {
/// Multiplicative modular inverse table modulo 2⁴ = 16.
///
/// Note, that this table does not contain values where inverse does not exist (i.e., for
/// `0⁻¹ mod 16`, `2⁻¹ mod 16`, etc.)
const INV_TABLE_MOD_16: [u8; 8] = [1, 11, 13, 7, 9, 3, 5, 15];
/// Modulo for which the `INV_TABLE_MOD_16` is intended.
const INV_TABLE_MOD: usize = 16;
// SAFETY: `m` is required to be a power-of-two, hence non-zero.
let m_minus_one = unsafe { unchecked_sub(m, 1) };
let mut inverse = INV_TABLE_MOD_16[(x & (INV_TABLE_MOD - 1)) >> 1] as usize;
let mut mod_gate = INV_TABLE_MOD;
// We iterate "up" using the following formula:
//
// $$ xy ≡ 1 (mod 2ⁿ) → xy (2 - xy) ≡ 1 (mod 2²ⁿ) $$
//
// This application needs to be applied at least until `2²ⁿ ≥ m`, at which point we can
// finally reduce the computation to our desired `m` by taking `inverse mod m`.
//
// This computation is `O(log log m)`, which is to say, that on 64-bit machines this loop
// will always finish in at most 4 iterations.
loop {
// y = y * (2 - xy) mod n
//
// Note, that we use wrapping operations here intentionally – the original formula
// uses e.g., subtraction `mod n`. It is entirely fine to do them `mod
// usize::MAX` instead, because we take the result `mod n` at the end
// anyway.
if mod_gate >= m {
break;
}
inverse = wrapping_mul(inverse, wrapping_sub(2usize, wrapping_mul(x, inverse)));
let (new_gate, overflow) = mul_with_overflow(mod_gate, mod_gate);
if overflow {
break;
}
mod_gate = new_gate;
}
inverse & m_minus_one
}
let stride = size_of::<T>();
let addr: usize = p.addr();
// SAFETY: `a` is a power-of-two, therefore non-zero.
let a_minus_one = unsafe { unchecked_sub(a, 1) };
if stride == 0 {
// SPECIAL_CASE: handle 0-sized types. No matter how many times we step, the address will
// stay the same, so no offset will be able to align the pointer unless it is already
// aligned. This branch _will_ be optimized out as `stride` is known at compile-time.
let p_mod_a = addr & a_minus_one;
return if p_mod_a == 0 { 0 } else { usize::MAX };
}
// SAFETY: `stride == 0` case has been handled by the special case above.
let a_mod_stride = unsafe { unchecked_rem(a, stride) };
if a_mod_stride == 0 {
// SPECIAL_CASE: In cases where the `a` is divisible by `stride`, byte offset to align a
// pointer can be computed more simply through `-p (mod a)`. In the off-chance the byte
// offset is not a multiple of `stride`, the input pointer was misaligned and no pointer
// offset will be able to produce a `p` aligned to the specified `a`.
//
// The naive `-p (mod a)` equation inhibits LLVM's ability to select instructions
// like `lea`. We compute `(round_up_to_next_alignment(p, a) - p)` instead. This
// redistributes operations around the load-bearing, but pessimizing `and` instruction
// sufficiently for LLVM to be able to utilize the various optimizations it knows about.
//
// LLVM handles the branch here particularly nicely. If this branch needs to be evaluated
// at runtime, it will produce a mask `if addr_mod_stride == 0 { 0 } else { usize::MAX }`
// in a branch-free way and then bitwise-OR it with whatever result the `-p mod a`
// computation produces.
let aligned_address = wrapping_add(addr, a_minus_one) & wrapping_sub(0, a);
let byte_offset = wrapping_sub(aligned_address, addr);
// FIXME: Remove the assume after <https://github.com/llvm/llvm-project/issues/62502>
// SAFETY: Masking by `-a` can only affect the low bits, and thus cannot have reduced
// the value by more than `a-1`, so even though the intermediate values might have
// wrapped, the byte_offset is always in `[0, a)`.
unsafe { assume(byte_offset < a) };
// SAFETY: `stride == 0` case has been handled by the special case above.
let addr_mod_stride = unsafe { unchecked_rem(addr, stride) };
return if addr_mod_stride == 0 {
// SAFETY: `stride` is non-zero. This is guaranteed to divide exactly as well, because
// addr has been verified to be aligned to the original type’s alignment requirements.
unsafe { exact_div(byte_offset, stride) }
} else {
usize::MAX
};
}
// GENERAL_CASE: From here on we’re handling the very general case where `addr` may be
// misaligned, there isn’t an obvious relationship between `stride` and `a` that we can take an
// advantage of, etc. This case produces machine code that isn’t particularly high quality,
// compared to the special cases above. The code produced here is still within the realm of
// miracles, given the situations this case has to deal with.
// SAFETY: a is power-of-two hence non-zero. stride == 0 case is handled above.
// FIXME(const-hack) replace with min
let gcdpow = unsafe {
let x = cttz_nonzero(stride);
let y = cttz_nonzero(a);
if x < y { x } else { y }
};
// SAFETY: gcdpow has an upper-bound that’s at most the number of bits in a `usize`.
let gcd = unsafe { unchecked_shl(1usize, gcdpow) };
// SAFETY: gcd is always greater or equal to 1.
if addr & unsafe { unchecked_sub(gcd, 1) } == 0 {
// This branch solves for the following linear congruence equation:
//
// ` p + so = 0 mod a `
//
// `p` here is the pointer value, `s` - stride of `T`, `o` offset in `T`s, and `a` - the
// requested alignment.
//
// With `g = gcd(a, s)`, and the above condition asserting that `p` is also divisible by
// `g`, we can denote `a' = a/g`, `s' = s/g`, `p' = p/g`, then this becomes equivalent to:
//
// ` p' + s'o = 0 mod a' `
// ` o = (a' - (p' mod a')) * (s'^-1 mod a') `
//
// The first term is "the relative alignment of `p` to `a`" (divided by the `g`), the
// second term is "how does incrementing `p` by `s` bytes change the relative alignment of
// `p`" (again divided by `g`). Division by `g` is necessary to make the inverse well
// formed if `a` and `s` are not co-prime.
//
// Furthermore, the result produced by this solution is not "minimal", so it is necessary
// to take the result `o mod lcm(s, a)`. This `lcm(s, a)` is the same as `a'`.
// SAFETY: `gcdpow` has an upper-bound not greater than the number of trailing 0-bits in
// `a`.
let a2 = unsafe { unchecked_shr(a, gcdpow) };
// SAFETY: `a2` is non-zero. Shifting `a` by `gcdpow` cannot shift out any of the set bits
// in `a` (of which it has exactly one).
let a2minus1 = unsafe { unchecked_sub(a2, 1) };
// SAFETY: `gcdpow` has an upper-bound not greater than the number of trailing 0-bits in
// `a`.
let s2 = unsafe { unchecked_shr(stride & a_minus_one, gcdpow) };
// SAFETY: `gcdpow` has an upper-bound not greater than the number of trailing 0-bits in
// `a`. Furthermore, the subtraction cannot overflow, because `a2 = a >> gcdpow` will
// always be strictly greater than `(p % a) >> gcdpow`.
let minusp2 = unsafe { unchecked_sub(a2, unchecked_shr(addr & a_minus_one, gcdpow)) };
// SAFETY: `a2` is a power-of-two, as proven above. `s2` is strictly less than `a2`
// because `(s % a) >> gcdpow` is strictly less than `a >> gcdpow`.
return wrapping_mul(minusp2, unsafe { mod_inv(s2, a2) }) & a2minus1;
}
// Cannot be aligned at all.
usize::MAX
}
core::ptr::align_offset::mod_inv const unsafe fn mod_inv(x: usize, m: usize) -> usize {
/// Multiplicative modular inverse table modulo 2⁴ = 16.
///
/// Note, that this table does not contain values where inverse does not exist (i.e., for
/// `0⁻¹ mod 16`, `2⁻¹ mod 16`, etc.)
const INV_TABLE_MOD_16: [u8; 8] = [1, 11, 13, 7, 9, 3, 5, 15];
/// Modulo for which the `INV_TABLE_MOD_16` is intended.
const INV_TABLE_MOD: usize = 16;
// SAFETY: `m` is required to be a power-of-two, hence non-zero.
let m_minus_one = unsafe { unchecked_sub(m, 1) };
let mut inverse = INV_TABLE_MOD_16[(x & (INV_TABLE_MOD - 1)) >> 1] as usize;
let mut mod_gate = INV_TABLE_MOD;
// We iterate "up" using the following formula:
//
// $$ xy ≡ 1 (mod 2ⁿ) → xy (2 - xy) ≡ 1 (mod 2²ⁿ) $$
//
// This application needs to be applied at least until `2²ⁿ ≥ m`, at which point we can
// finally reduce the computation to our desired `m` by taking `inverse mod m`.
//
// This computation is `O(log log m)`, which is to say, that on 64-bit machines this loop
// will always finish in at most 4 iterations.
loop {
// y = y * (2 - xy) mod n
//
// Note, that we use wrapping operations here intentionally – the original formula
// uses e.g., subtraction `mod n`. It is entirely fine to do them `mod
// usize::MAX` instead, because we take the result `mod n` at the end
// anyway.
if mod_gate >= m {
break;
}
inverse = wrapping_mul(inverse, wrapping_sub(2usize, wrapping_mul(x, inverse)));
let (new_gate, overflow) = mul_with_overflow(mod_gate, mod_gate);
if overflow {
break;
}
mod_gate = new_gate;
}
inverse & m_minus_one
}
core::ptr::alignment::Alignment::as_nonzero pub const fn as_nonzero(self) -> NonZero<usize> {
// This transmutes directly to avoid the UbCheck in `NonZero::new_unchecked`
// since there's no way for the user to trip that check anyway -- the
// validity invariant of the type would have to have been broken earlier --
// and emitting it in an otherwise simple method is bad for compile time.
// SAFETY: All the discriminants are non-zero.
unsafe { mem::transmute::<Alignment, NonZero<usize>>(self) }
}
core::ptr::alignment::Alignment::as_usize pub const fn as_usize(self) -> usize {
// Going through `as_nonzero` helps this be more clearly the inverse of
// `new_unchecked`, letting MIR optimizations fold it away.
self.as_nonzero().get()
}
core::ptr::alignment::Alignment::log2 pub const fn log2(self) -> u32 {
self.as_nonzero().trailing_zeros()
}
core::ptr::alignment::Alignment::new pub const fn new(align: usize) -> Option<Self> {
if align.is_power_of_two() {
// SAFETY: Just checked it only has one bit set
Some(unsafe { Self::new_unchecked(align) })
} else {
None
}
}
core::ptr::alignment::Alignment::new_unchecked pub const unsafe fn new_unchecked(align: usize) -> Self {
assert_unsafe_precondition!(
check_language_ub,
"Alignment::new_unchecked requires a power of two",
(align: usize = align) => align.is_power_of_two()
);
// SAFETY: By precondition, this must be a power of two, and
// our variants encompass all possible powers of two.
unsafe { mem::transmute::<usize, Alignment>(align) }
}
core::ptr::const_ptr::<impl *const T>::add pub const unsafe fn add(self, count: usize) -> Self
where
T: Sized,
{
#[cfg(debug_assertions)]
#[inline]
#[rustc_allow_const_fn_unstable(const_eval_select)]
const fn runtime_add_nowrap(this: *const (), count: usize, size: usize) -> bool {
const_eval_select!(
@capture { this: *const (), count: usize, size: usize } -> bool:
if const {
true
} else {
let Some(byte_offset) = count.checked_mul(size) else {
return false;
};
let (_, overflow) = this.addr().overflowing_add(byte_offset);
byte_offset <= (isize::MAX as usize) && !overflow
}
)
}
#[cfg(debug_assertions)] // Expensive, and doesn't catch much in the wild.
ub_checks::assert_unsafe_precondition!(
check_language_ub,
"ptr::add requires that the address calculation does not overflow",
(
this: *const () = self as *const (),
count: usize = count,
size: usize = size_of::<T>(),
) => runtime_add_nowrap(this, count, size)
);
// SAFETY: the caller must uphold the safety contract for `offset`.
unsafe { intrinsics::offset(self, count) }
}
core::ptr::const_ptr::<impl *const T>::add::runtime_add_nowrap const fn runtime_add_nowrap(this: *const (), count: usize, size: usize) -> bool {
const_eval_select!(
@capture { this: *const (), count: usize, size: usize } -> bool:
if const {
true
} else {
let Some(byte_offset) = count.checked_mul(size) else {
return false;
};
let (_, overflow) = this.addr().overflowing_add(byte_offset);
byte_offset <= (isize::MAX as usize) && !overflow
}
)
}
core::ptr::const_ptr::<impl *const T>::add::runtime_add_nowrap::runtime fn runtime$(<$($binders)*>)?($($arg: $ty),*) $( -> $ret )? {
$runtime
}
core::ptr::const_ptr::<impl *const T>::addr pub fn addr(self) -> usize {
// A pointer-to-integer transmute currently has exactly the right semantics: it returns the
// address without exposing the provenance. Note that this is *not* a stable guarantee about
// transmute semantics, it relies on sysroot crates having special status.
// SAFETY: Pointer-to-integer transmutes are valid (if you are okay with losing the
// provenance).
unsafe { mem::transmute(self.cast::<()>()) }
}
core::ptr::const_ptr::<impl *const T>::align_offset pub fn align_offset(self, align: usize) -> usize
where
T: Sized,
{
if !align.is_power_of_two() {
panic!("align_offset: align is not a power-of-two");
}
// SAFETY: `align` has been checked to be a power of 2 above
let ret = unsafe { align_offset(self, align) };
// Inform Miri that we want to consider the resulting pointer to be suitably aligned.
#[cfg(miri)]
if ret != usize::MAX {
intrinsics::miri_promise_symbolic_alignment(self.wrapping_add(ret).cast(), align);
}
ret
}
core::ptr::const_ptr::<impl *const T>::byte_add pub const unsafe fn byte_add(self, count: usize) -> Self {
// SAFETY: the caller must uphold the safety contract for `add`.
unsafe { self.cast::<u8>().add(count).with_metadata_of(self) }
}
core::ptr::const_ptr::<impl *const T>::cast pub const fn cast<U>(self) -> *const U {
self as _
}
core::ptr::const_ptr::<impl *const T>::cast_array pub const fn cast_array<const N: usize>(self) -> *const [T; N] {
self.cast()
}
core::ptr::const_ptr::<impl *const T>::expose_provenance pub fn expose_provenance(self) -> usize {
self.cast::<()>() as usize
}
core::ptr::const_ptr::<impl *const T>::is_aligned_to pub fn is_aligned_to(self, align: usize) -> bool {
if !align.is_power_of_two() {
panic!("is_aligned_to: align is not a power-of-two");
}
self.addr() & (align - 1) == 0
}
core::ptr::const_ptr::<impl *const T>::is_null pub const fn is_null(self) -> bool {
// Compare via a cast to a thin pointer, so fat pointers are only
// considering their "data" part for null-ness.
let ptr = self as *const u8;
const_eval_select!(
@capture { ptr: *const u8 } -> bool:
// This use of `const_raw_ptr_comparison` has been explicitly blessed by t-lang.
if const #[rustc_allow_const_fn_unstable(const_raw_ptr_comparison)] {
match (ptr).guaranteed_eq(null_mut()) {
Some(res) => res,
// To remain maximally conservative, we stop execution when we don't
// know whether the pointer is null or not.
// We can *not* return `false` here, that would be unsound in `NonNull::new`!
None => panic!("null-ness of this pointer cannot be determined in const context"),
}
} else {
ptr.addr() == 0
}
)
}
core::ptr::const_ptr::<impl *const T>::is_null::runtime fn runtime$(<$($binders)*>)?($($arg: $ty),*) $( -> $ret )? {
$runtime
}
core::ptr::const_ptr::<impl *const T>::offset pub const unsafe fn offset(self, count: isize) -> *const T
where
T: Sized,
{
#[inline]
#[rustc_allow_const_fn_unstable(const_eval_select)]
const fn runtime_offset_nowrap(this: *const (), count: isize, size: usize) -> bool {
// We can use const_eval_select here because this is only for UB checks.
const_eval_select!(
@capture { this: *const (), count: isize, size: usize } -> bool:
if const {
true
} else {
// `size` is the size of a Rust type, so we know that
// `size <= isize::MAX` and thus `as` cast here is not lossy.
let Some(byte_offset) = count.checked_mul(size as isize) else {
return false;
};
let (_, overflow) = this.addr().overflowing_add_signed(byte_offset);
!overflow
}
)
}
ub_checks::assert_unsafe_precondition!(
check_language_ub,
"ptr::offset requires the address calculation to not overflow",
(
this: *const () = self as *const (),
count: isize = count,
size: usize = size_of::<T>(),
) => runtime_offset_nowrap(this, count, size)
);
// SAFETY: the caller must uphold the safety contract for `offset`.
unsafe { intrinsics::offset(self, count) }
}
core::ptr::const_ptr::<impl *const T>::offset::runtime_offset_nowrap const fn runtime_offset_nowrap(this: *const (), count: isize, size: usize) -> bool {
// We can use const_eval_select here because this is only for UB checks.
const_eval_select!(
@capture { this: *const (), count: isize, size: usize } -> bool:
if const {
true
} else {
// `size` is the size of a Rust type, so we know that
// `size <= isize::MAX` and thus `as` cast here is not lossy.
let Some(byte_offset) = count.checked_mul(size as isize) else {
return false;
};
let (_, overflow) = this.addr().overflowing_add_signed(byte_offset);
!overflow
}
)
}
core::ptr::const_ptr::<impl *const T>::offset::runtime_offset_nowrap::runtime fn runtime$(<$($binders)*>)?($($arg: $ty),*) $( -> $ret )? {
$runtime
}
core::ptr::const_ptr::<impl *const T>::offset_from_unsigned pub const unsafe fn offset_from_unsigned(self, origin: *const T) -> usize
where
T: Sized,
{
#[rustc_allow_const_fn_unstable(const_eval_select)]
const fn runtime_ptr_ge(this: *const (), origin: *const ()) -> bool {
const_eval_select!(
@capture { this: *const (), origin: *const () } -> bool:
if const {
true
} else {
this >= origin
}
)
}
ub_checks::assert_unsafe_precondition!(
check_language_ub,
"ptr::offset_from_unsigned requires `self >= origin`",
(
this: *const () = self as *const (),
origin: *const () = origin as *const (),
) => runtime_ptr_ge(this, origin)
);
let pointee_size = size_of::<T>();
assert!(0 < pointee_size && pointee_size <= isize::MAX as usize);
// SAFETY: the caller must uphold the safety contract for `ptr_offset_from_unsigned`.
unsafe { intrinsics::ptr_offset_from_unsigned(self, origin) }
}
core::ptr::const_ptr::<impl *const T>::offset_from_unsigned::runtime_ptr_ge const fn runtime_ptr_ge(this: *const (), origin: *const ()) -> bool {
const_eval_select!(
@capture { this: *const (), origin: *const () } -> bool:
if const {
true
} else {
this >= origin
}
)
}
core::ptr::const_ptr::<impl *const T>::offset_from_unsigned::runtime_ptr_ge::runtime fn runtime$(<$($binders)*>)?($($arg: $ty),*) $( -> $ret )? {
$runtime
}
core::ptr::const_ptr::<impl *const T>::read pub const unsafe fn read(self) -> T
where
T: Sized,
{
// SAFETY: the caller must uphold the safety contract for `read`.
unsafe { read(self) }
}
core::ptr::const_ptr::<impl *const T>::read_unaligned pub const unsafe fn read_unaligned(self) -> T
where
T: Sized,
{
// SAFETY: the caller must uphold the safety contract for `read_unaligned`.
unsafe { read_unaligned(self) }
}
core::ptr::const_ptr::<impl *const T>::to_raw_parts pub const fn to_raw_parts(self) -> (*const (), <T as super::Pointee>::Metadata) {
(self.cast(), metadata(self))
}
core::ptr::const_ptr::<impl *const T>::try_cast_aligned pub fn try_cast_aligned<U>(self) -> Option<*const U> {
if self.is_aligned_to(align_of::<U>()) { Some(self.cast()) } else { None }
}
core::ptr::const_ptr::<impl *const T>::with_metadata_of pub const fn with_metadata_of<U>(self, meta: *const U) -> *const U
where
U: PointeeSized,
{
from_raw_parts::<U>(self as *const (), metadata(meta))
}
core::ptr::const_ptr::<impl *const T>::wrapping_add pub const fn wrapping_add(self, count: usize) -> Self
where
T: Sized,
{
self.wrapping_offset(count as isize)
}
core::ptr::const_ptr::<impl *const T>::wrapping_offset pub const fn wrapping_offset(self, count: isize) -> *const T
where
T: Sized,
{
// SAFETY: the `arith_offset` intrinsic has no prerequisites to be called.
unsafe { intrinsics::arith_offset(self, count) }
}
core::ptr::const_ptr::<impl *const [T]>::as_array pub const fn as_array<const N: usize>(self) -> Option<*const [T; N]> {
if self.len() == N {
let me = self.as_ptr() as *const [T; N];
Some(me)
} else {
None
}
}
core::ptr::const_ptr::<impl *const [T]>::as_ptr pub const fn as_ptr(self) -> *const T {
self as *const T
}
core::ptr::const_ptr::<impl *const [T]>::is_empty pub const fn is_empty(self) -> bool {
self.len() == 0
}
core::ptr::const_ptr::<impl *const [T]>::len pub const fn len(self) -> usize {
metadata(self)
}
core::ptr::const_ptr::<impl core::cmp::Ord for *const T>::cmp fn cmp(&self, other: &*const T) -> Ordering {
if self < other {
Less
} else if self == other {
Equal
} else {
Greater
}
}
core::ptr::const_ptr::<impl core::cmp::PartialEq for *const T>::eq fn eq(&self, other: &*const T) -> bool {
*self == *other
}
core::ptr::const_ptr::<impl core::cmp::PartialOrd for *const T>::ge fn ge(&self, other: &*const T) -> bool {
*self >= *other
}
core::ptr::const_ptr::<impl core::cmp::PartialOrd for *const T>::gt fn gt(&self, other: &*const T) -> bool {
*self > *other
}
core::ptr::const_ptr::<impl core::cmp::PartialOrd for *const T>::le fn le(&self, other: &*const T) -> bool {
*self <= *other
}
core::ptr::const_ptr::<impl core::cmp::PartialOrd for *const T>::lt fn lt(&self, other: &*const T) -> bool {
*self < *other
}
core::ptr::const_ptr::<impl core::cmp::PartialOrd for *const T>::partial_cmp fn partial_cmp(&self, other: &*const T) -> Option<Ordering> {
Some(self.cmp(other))
}
core::ptr::copypub const unsafe fn copy<T>(src: *const T, dst: *mut T, count: usize) {
// SAFETY: the safety contract for `copy` must be upheld by the caller.
unsafe {
ub_checks::assert_unsafe_precondition!(
check_language_ub,
"ptr::copy requires that both pointer arguments are aligned and non-null",
(
src: *const () = src as *const (),
dst: *mut () = dst as *mut (),
align: usize = align_of::<T>(),
zero_size: bool = T::IS_ZST || count == 0,
) =>
ub_checks::maybe_is_aligned_and_not_null(src, align, zero_size)
&& ub_checks::maybe_is_aligned_and_not_null(dst, align, zero_size)
);
crate::intrinsics::copy(src, dst, count)
}
}
core::ptr::copy_nonoverlappingpub const unsafe fn copy_nonoverlapping<T>(src: *const T, dst: *mut T, count: usize) {
ub_checks::assert_unsafe_precondition!(
check_language_ub,
"ptr::copy_nonoverlapping requires that both pointer arguments are aligned and non-null \
and the specified memory ranges do not overlap",
(
src: *const () = src as *const (),
dst: *mut () = dst as *mut (),
size: usize = size_of::<T>(),
align: usize = align_of::<T>(),
count: usize = count,
) => {
let zero_size = count == 0 || size == 0;
ub_checks::maybe_is_aligned_and_not_null(src, align, zero_size)
&& ub_checks::maybe_is_aligned_and_not_null(dst, align, zero_size)
&& ub_checks::maybe_is_nonoverlapping(src, dst, size, count)
}
);
// SAFETY: the safety contract for `copy_nonoverlapping` must be
// upheld by the caller.
unsafe { crate::intrinsics::copy_nonoverlapping(src, dst, count) }
}
core::ptr::from_refpub const fn from_ref<T: PointeeSized>(r: &T) -> *const T {
r
}
core::ptr::metadata::DynMetadata::<Dyn>::vtable_ptr fn vtable_ptr(self) -> *const VTable {
// SAFETY: this layout assumption is hard-coded into the compiler.
// If it's somehow not a size match, the transmute will error.
unsafe { crate::mem::transmute::<Self, *const VTable>(self) }
}
core::ptr::metadata::from_raw_partspub const fn from_raw_parts<T: PointeeSized>(
data_pointer: *const impl Thin,
metadata: <T as Pointee>::Metadata,
) -> *const T {
aggregate_raw_ptr(data_pointer, metadata)
}
core::ptr::metadata::from_raw_parts_mutpub const fn from_raw_parts_mut<T: PointeeSized>(
data_pointer: *mut impl Thin,
metadata: <T as Pointee>::Metadata,
) -> *mut T {
aggregate_raw_ptr(data_pointer, metadata)
}
core::ptr::metadata::metadatapub const fn metadata<T: PointeeSized>(ptr: *const T) -> <T as Pointee>::Metadata {
ptr_metadata(ptr)
}
core::ptr::mut_ptr::<impl *mut T>::add pub const unsafe fn add(self, count: usize) -> Self
where
T: Sized,
{
#[cfg(debug_assertions)]
#[inline]
#[rustc_allow_const_fn_unstable(const_eval_select)]
const fn runtime_add_nowrap(this: *const (), count: usize, size: usize) -> bool {
const_eval_select!(
@capture { this: *const (), count: usize, size: usize } -> bool:
if const {
true
} else {
let Some(byte_offset) = count.checked_mul(size) else {
return false;
};
let (_, overflow) = this.addr().overflowing_add(byte_offset);
byte_offset <= (isize::MAX as usize) && !overflow
}
)
}
#[cfg(debug_assertions)] // Expensive, and doesn't catch much in the wild.
ub_checks::assert_unsafe_precondition!(
check_language_ub,
"ptr::add requires that the address calculation does not overflow",
(
this: *const () = self as *const (),
count: usize = count,
size: usize = size_of::<T>(),
) => runtime_add_nowrap(this, count, size)
);
// SAFETY: the caller must uphold the safety contract for `offset`.
unsafe { intrinsics::offset(self, count) }
}
core::ptr::mut_ptr::<impl *mut T>::add::runtime_add_nowrap const fn runtime_add_nowrap(this: *const (), count: usize, size: usize) -> bool {
const_eval_select!(
@capture { this: *const (), count: usize, size: usize } -> bool:
if const {
true
} else {
let Some(byte_offset) = count.checked_mul(size) else {
return false;
};
let (_, overflow) = this.addr().overflowing_add(byte_offset);
byte_offset <= (isize::MAX as usize) && !overflow
}
)
}
core::ptr::mut_ptr::<impl *mut T>::add::runtime_add_nowrap::runtime fn runtime$(<$($binders)*>)?($($arg: $ty),*) $( -> $ret )? {
$runtime
}
core::ptr::mut_ptr::<impl *mut T>::addr pub fn addr(self) -> usize {
// A pointer-to-integer transmute currently has exactly the right semantics: it returns the
// address without exposing the provenance. Note that this is *not* a stable guarantee about
// transmute semantics, it relies on sysroot crates having special status.
// SAFETY: Pointer-to-integer transmutes are valid (if you are okay with losing the
// provenance).
unsafe { mem::transmute(self.cast::<()>()) }
}
core::ptr::mut_ptr::<impl *mut T>::as_mut pub const unsafe fn as_mut<'a>(self) -> Option<&'a mut T> {
// SAFETY: the caller must guarantee that `self` is be valid for
// a mutable reference if it isn't null.
if self.is_null() { None } else { unsafe { Some(&mut *self) } }
}
core::ptr::mut_ptr::<impl *mut T>::cast pub const fn cast<U>(self) -> *mut U {
self as _
}
core::ptr::mut_ptr::<impl *mut T>::cast_array pub const fn cast_array<const N: usize>(self) -> *mut [T; N] {
self.cast()
}
core::ptr::mut_ptr::<impl *mut T>::cast_const pub const fn cast_const(self) -> *const T {
self as _
}
core::ptr::mut_ptr::<impl *mut T>::drop_in_place pub const unsafe fn drop_in_place(self)
where
T: [const] Destruct,
{
// SAFETY: the caller must uphold the safety contract for `drop_in_place`.
unsafe { drop_in_place(self) }
}
core::ptr::mut_ptr::<impl *mut T>::is_null pub const fn is_null(self) -> bool {
self.cast_const().is_null()
}
core::ptr::mut_ptr::<impl *mut T>::offset pub const unsafe fn offset(self, count: isize) -> *mut T
where
T: Sized,
{
#[inline]
#[rustc_allow_const_fn_unstable(const_eval_select)]
const fn runtime_offset_nowrap(this: *const (), count: isize, size: usize) -> bool {
// We can use const_eval_select here because this is only for UB checks.
const_eval_select!(
@capture { this: *const (), count: isize, size: usize } -> bool:
if const {
true
} else {
// `size` is the size of a Rust type, so we know that
// `size <= isize::MAX` and thus `as` cast here is not lossy.
let Some(byte_offset) = count.checked_mul(size as isize) else {
return false;
};
let (_, overflow) = this.addr().overflowing_add_signed(byte_offset);
!overflow
}
)
}
ub_checks::assert_unsafe_precondition!(
check_language_ub,
"ptr::offset requires the address calculation to not overflow",
(
this: *const () = self as *const (),
count: isize = count,
size: usize = size_of::<T>(),
) => runtime_offset_nowrap(this, count, size)
);
// SAFETY: the caller must uphold the safety contract for `offset`.
// The obtained pointer is valid for writes since the caller must
// guarantee that it points to the same allocation as `self`.
unsafe { intrinsics::offset(self, count) }
}
core::ptr::mut_ptr::<impl *mut T>::offset::runtime_offset_nowrap const fn runtime_offset_nowrap(this: *const (), count: isize, size: usize) -> bool {
// We can use const_eval_select here because this is only for UB checks.
const_eval_select!(
@capture { this: *const (), count: isize, size: usize } -> bool:
if const {
true
} else {
// `size` is the size of a Rust type, so we know that
// `size <= isize::MAX` and thus `as` cast here is not lossy.
let Some(byte_offset) = count.checked_mul(size as isize) else {
return false;
};
let (_, overflow) = this.addr().overflowing_add_signed(byte_offset);
!overflow
}
)
}
core::ptr::mut_ptr::<impl *mut T>::offset::runtime_offset_nowrap::runtime fn runtime$(<$($binders)*>)?($($arg: $ty),*) $( -> $ret )? {
$runtime
}
core::ptr::mut_ptr::<impl *mut T>::offset_from_unsigned pub const unsafe fn offset_from_unsigned(self, origin: *const T) -> usize
where
T: Sized,
{
// SAFETY: the caller must uphold the safety contract for `offset_from_unsigned`.
unsafe { (self as *const T).offset_from_unsigned(origin) }
}
core::ptr::mut_ptr::<impl *mut T>::read pub const unsafe fn read(self) -> T
where
T: Sized,
{
// SAFETY: the caller must uphold the safety contract for ``.
unsafe { read(self) }
}
core::ptr::mut_ptr::<impl *mut T>::replace pub const unsafe fn replace(self, src: T) -> T
where
T: Sized,
{
// SAFETY: the caller must uphold the safety contract for `replace`.
unsafe { replace(self, src) }
}
core::ptr::mut_ptr::<impl *mut T>::sub pub const unsafe fn sub(self, count: usize) -> Self
where
T: Sized,
{
#[cfg(debug_assertions)]
#[inline]
#[rustc_allow_const_fn_unstable(const_eval_select)]
const fn runtime_sub_nowrap(this: *const (), count: usize, size: usize) -> bool {
const_eval_select!(
@capture { this: *const (), count: usize, size: usize } -> bool:
if const {
true
} else {
let Some(byte_offset) = count.checked_mul(size) else {
return false;
};
byte_offset <= (isize::MAX as usize) && this.addr() >= byte_offset
}
)
}
#[cfg(debug_assertions)] // Expensive, and doesn't catch much in the wild.
ub_checks::assert_unsafe_precondition!(
check_language_ub,
"ptr::sub requires that the address calculation does not overflow",
(
this: *const () = self as *const (),
count: usize = count,
size: usize = size_of::<T>(),
) => runtime_sub_nowrap(this, count, size)
);
if T::IS_ZST {
// Pointer arithmetic does nothing when the pointee is a ZST.
self
} else {
// SAFETY: the caller must uphold the safety contract for `offset`.
// Because the pointee is *not* a ZST, that means that `count` is
// at most `isize::MAX`, and thus the negation cannot overflow.
unsafe { intrinsics::offset(self, intrinsics::unchecked_sub(0, count as isize)) }
}
}
core::ptr::mut_ptr::<impl *mut T>::sub::runtime_sub_nowrap const fn runtime_sub_nowrap(this: *const (), count: usize, size: usize) -> bool {
const_eval_select!(
@capture { this: *const (), count: usize, size: usize } -> bool:
if const {
true
} else {
let Some(byte_offset) = count.checked_mul(size) else {
return false;
};
byte_offset <= (isize::MAX as usize) && this.addr() >= byte_offset
}
)
}
core::ptr::mut_ptr::<impl *mut T>::sub::runtime_sub_nowrap::runtime fn runtime$(<$($binders)*>)?($($arg: $ty),*) $( -> $ret )? {
$runtime
}
core::ptr::mut_ptr::<impl *mut T>::to_raw_parts pub const fn to_raw_parts(self) -> (*mut (), <T as super::Pointee>::Metadata) {
(self.cast(), super::metadata(self))
}
core::ptr::mut_ptr::<impl *mut T>::wrapping_add pub const fn wrapping_add(self, count: usize) -> Self
where
T: Sized,
{
self.wrapping_offset(count as isize)
}
core::ptr::mut_ptr::<impl *mut T>::wrapping_offset pub const fn wrapping_offset(self, count: isize) -> *mut T
where
T: Sized,
{
// SAFETY: the `arith_offset` intrinsic has no prerequisites to be called.
unsafe { intrinsics::arith_offset(self, count) as *mut T }
}
core::ptr::mut_ptr::<impl *mut T>::write pub const unsafe fn write(self, val: T)
where
T: Sized,
{
// SAFETY: the caller must uphold the safety contract for `write`.
unsafe { write(self, val) }
}
core::ptr::mut_ptr::<impl *mut T>::write_bytes pub const unsafe fn write_bytes(self, val: u8, count: usize)
where
T: Sized,
{
// SAFETY: the caller must uphold the safety contract for `write_bytes`.
unsafe { write_bytes(self, val, count) }
}
core::ptr::mut_ptr::<impl *mut [T]>::as_mut_array pub const fn as_mut_array<const N: usize>(self) -> Option<*mut [T; N]> {
if self.len() == N {
let me = self.as_mut_ptr() as *mut [T; N];
Some(me)
} else {
None
}
}
core::ptr::mut_ptr::<impl *mut [T]>::as_mut_ptr pub const fn as_mut_ptr(self) -> *mut T {
self as *mut T
}
core::ptr::mut_ptr::<impl *mut [T]>::get_unchecked_mut pub const unsafe fn get_unchecked_mut<I>(self, index: I) -> *mut I::Output
where
I: [const] SliceIndex<[T]>,
{
// SAFETY: the caller ensures that `self` is dereferenceable and `index` in-bounds.
unsafe { index.get_unchecked_mut(self) }
}
core::ptr::mut_ptr::<impl *mut [T]>::is_empty pub const fn is_empty(self) -> bool {
self.len() == 0
}
core::ptr::mut_ptr::<impl *mut [T]>::len pub const fn len(self) -> usize {
metadata(self)
}
core::ptr::mut_ptr::<impl *mut [T]>::split_at_mut pub unsafe fn split_at_mut(self, mid: usize) -> (*mut [T], *mut [T]) {
assert!(mid <= self.len());
// SAFETY: The assert above is only a safety-net as long as `self.len()` is correct
// The actual safety requirements of this function are the same as for `split_at_mut_unchecked`
unsafe { self.split_at_mut_unchecked(mid) }
}
core::ptr::mut_ptr::<impl *mut [T]>::split_at_mut_unchecked pub unsafe fn split_at_mut_unchecked(self, mid: usize) -> (*mut [T], *mut [T]) {
let len = self.len();
let ptr = self.as_mut_ptr();
// SAFETY: Caller must pass a valid pointer and an index that is in-bounds.
let tail = unsafe { ptr.add(mid) };
(
crate::ptr::slice_from_raw_parts_mut(ptr, mid),
crate::ptr::slice_from_raw_parts_mut(tail, len - mid),
)
}
core::ptr::mut_ptr::<impl core::cmp::PartialEq for *mut T>::eq fn eq(&self, other: &*mut T) -> bool {
*self == *other
}
core::ptr::non_null::NonNull::<T>::add pub const unsafe fn add(self, count: usize) -> Self
where
T: Sized,
{
// SAFETY: the caller must uphold the safety contract for `offset`.
// Additionally safety contract of `offset` guarantees that the resulting pointer is
// pointing to an allocation, there can't be an allocation at null, thus it's safe to
// construct `NonNull`.
unsafe { transmute(intrinsics::offset(self.as_ptr(), count)) }
}
core::ptr::non_null::NonNull::<T>::as_mut pub const unsafe fn as_mut<'a>(&mut self) -> &'a mut T {
// SAFETY: the caller must guarantee that `self` meets all the
// requirements for a mutable reference.
unsafe { &mut *self.as_ptr() }
}
core::ptr::non_null::NonNull::<T>::as_ptr pub const fn as_ptr(self) -> *mut T {
// This is a transmute for the same reasons as `NonZero::get`.
// SAFETY: `NonNull` is `transparent` over a `*const T`, and `*const T`
// and `*mut T` have the same layout, so transitively we can transmute
// our `NonNull` to a `*mut T` directly.
unsafe { mem::transmute::<Self, *mut T>(self) }
}
core::ptr::non_null::NonNull::<T>::as_ref pub const unsafe fn as_ref<'a>(&self) -> &'a T {
// SAFETY: the caller must guarantee that `self` meets all the
// requirements for a reference.
// `cast_const` avoids a mutable raw pointer deref.
unsafe { &*self.as_ptr().cast_const() }
}
core::ptr::non_null::NonNull::<T>::cast pub const fn cast<U>(self) -> NonNull<U> {
// SAFETY: `self` is a `NonNull` pointer which is necessarily non-null
unsafe { transmute(self.as_ptr() as *mut U) }
}
core::ptr::non_null::NonNull::<T>::cast_array pub const fn cast_array<const N: usize>(self) -> NonNull<[T; N]> {
self.cast()
}
core::ptr::non_null::NonNull::<T>::from_mut pub const fn from_mut(r: &mut T) -> Self {
// SAFETY: A mutable reference cannot be null.
unsafe { transmute(r as *mut T) }
}
core::ptr::non_null::NonNull::<T>::from_ref pub const fn from_ref(r: &T) -> Self {
// SAFETY: A reference cannot be null.
unsafe { transmute(r as *const T) }
}
core::ptr::non_null::NonNull::<T>::new_unchecked pub const unsafe fn new_unchecked(ptr: *mut T) -> Self {
// SAFETY: the caller must guarantee that `ptr` is non-null.
unsafe {
assert_unsafe_precondition!(
check_language_ub,
"NonNull::new_unchecked requires that the pointer is non-null",
(ptr: *mut () = ptr as *mut ()) => !ptr.is_null()
);
transmute(ptr)
}
}
core::ptr::non_null::NonNull::<T>::offset pub const unsafe fn offset(self, count: isize) -> Self
where
T: Sized,
{
// SAFETY: the caller must uphold the safety contract for `offset`.
// Additionally safety contract of `offset` guarantees that the resulting pointer is
// pointing to an allocation, there can't be an allocation at null, thus it's safe to
// construct `NonNull`.
unsafe { transmute(intrinsics::offset(self.as_ptr(), count)) }
}
core::ptr::non_null::NonNull::<T>::offset_from_unsigned pub const unsafe fn offset_from_unsigned(self, subtracted: NonNull<T>) -> usize
where
T: Sized,
{
// SAFETY: the caller must uphold the safety contract for `offset_from_unsigned`.
unsafe { self.as_ptr().offset_from_unsigned(subtracted.as_ptr()) }
}
core::ptr::non_null::NonNull::<T>::read pub const unsafe fn read(self) -> T
where
T: Sized,
{
// SAFETY: the caller must uphold the safety contract for `read`.
unsafe { ptr::read(self.as_ptr()) }
}
core::ptr::non_null::NonNull::<T>::sub pub const unsafe fn sub(self, count: usize) -> Self
where
T: Sized,
{
if T::IS_ZST {
// Pointer arithmetic does nothing when the pointee is a ZST.
self
} else {
// SAFETY: the caller must uphold the safety contract for `offset`.
// Because the pointee is *not* a ZST, that means that `count` is
// at most `isize::MAX`, and thus the negation cannot overflow.
unsafe { self.offset((count as isize).unchecked_neg()) }
}
}
core::ptr::nullpub const fn null<T: PointeeSized + Thin>() -> *const T {
from_raw_parts(without_provenance::<()>(0), ())
}
core::ptr::null_mutpub const fn null_mut<T: PointeeSized + Thin>() -> *mut T {
from_raw_parts_mut(without_provenance_mut::<()>(0), ())
}
core::ptr::readpub const unsafe fn read<T>(src: *const T) -> T {
// It would be semantically correct to implement this via `copy_nonoverlapping`
// and `MaybeUninit`, as was done before PR #109035. Calling `assume_init`
// provides enough information to know that this is a typed operation.
// However, as of March 2023 the compiler was not capable of taking advantage
// of that information. Thus, the implementation here switched to an intrinsic,
// which lowers to `_0 = *src` in MIR, to address a few issues:
//
// - Using `MaybeUninit::assume_init` after a `copy_nonoverlapping` was not
// turning the untyped copy into a typed load. As such, the generated
// `load` in LLVM didn't get various metadata, such as `!range` (#73258),
// `!nonnull`, and `!noundef`, resulting in poorer optimization.
// - Going through the extra local resulted in multiple extra copies, even
// in optimized MIR. (Ignoring StorageLive/Dead, the intrinsic is one
// MIR statement, while the previous implementation was eight.) LLVM
// could sometimes optimize them away, but because `read` is at the core
// of so many things, not having them in the first place improves what we
// hand off to the backend. For example, `mem::replace::<Big>` previously
// emitted 4 `alloca` and 6 `memcpy`s, but is now 1 `alloc` and 3 `memcpy`s.
// - In general, this approach keeps us from getting any more bugs (like
// #106369) that boil down to "`read(p)` is worse than `*p`", as this
// makes them look identical to the backend (or other MIR consumers).
//
// Future enhancements to MIR optimizations might well allow this to return
// to the previous implementation, rather than using an intrinsic.
// SAFETY: the caller must guarantee that `src` is valid for reads.
unsafe {
#[cfg(debug_assertions)] // Too expensive to always enable (for now?)
ub_checks::assert_unsafe_precondition!(
check_language_ub,
"ptr::read requires that the pointer argument is aligned and non-null",
(
addr: *const () = src as *const (),
align: usize = align_of::<T>(),
is_zst: bool = T::IS_ZST,
) => ub_checks::maybe_is_aligned_and_not_null(addr, align, is_zst)
);
crate::intrinsics::read_via_copy(src)
}
}
core::ptr::read_unalignedpub const unsafe fn read_unaligned<T>(src: *const T) -> T {
let mut tmp = MaybeUninit::<T>::uninit();
// SAFETY: the caller must guarantee that `src` is valid for reads.
// `src` cannot overlap `tmp` because `tmp` was just allocated on
// the stack as a separate allocation.
//
// Also, since we just wrote a valid value into `tmp`, it is guaranteed
// to be properly initialized.
unsafe {
copy_nonoverlapping(src as *const u8, tmp.as_mut_ptr() as *mut u8, size_of::<T>());
tmp.assume_init()
}
}
core::ptr::read_volatilepub unsafe fn read_volatile<T>(src: *const T) -> T {
// SAFETY: the caller must uphold the safety contract for `volatile_load`.
unsafe {
ub_checks::assert_unsafe_precondition!(
check_language_ub,
"ptr::read_volatile requires that the pointer argument is aligned",
(
addr: *const () = src as *const (),
align: usize = align_of::<T>(),
) => ub_checks::maybe_is_aligned(addr, align)
);
intrinsics::volatile_load(src)
}
}
core::ptr::slice_from_raw_partspub const fn slice_from_raw_parts<T>(data: *const T, len: usize) -> *const [T] {
from_raw_parts(data, len)
}
core::ptr::slice_from_raw_parts_mutpub const fn slice_from_raw_parts_mut<T>(data: *mut T, len: usize) -> *mut [T] {
from_raw_parts_mut(data, len)
}
core::ptr::swappub const unsafe fn swap<T>(x: *mut T, y: *mut T) {
// Give ourselves some scratch space to work with.
// We do not have to worry about drops: `MaybeUninit` does nothing when dropped.
let mut tmp = MaybeUninit::<T>::uninit();
// Perform the swap
// SAFETY: the caller must guarantee that `x` and `y` are
// valid for writes and properly aligned. `tmp` cannot be
// overlapping either `x` or `y` because `tmp` was just allocated
// on the stack as a separate allocation.
unsafe {
copy_nonoverlapping(x, tmp.as_mut_ptr(), 1);
copy(y, x, 1); // `x` and `y` may overlap
copy_nonoverlapping(tmp.as_ptr(), y, 1);
}
}
core::ptr::swap_chunkfn swap_chunk<const N: usize>(x: &mut MaybeUninit<[u8; N]>, y: &mut MaybeUninit<[u8; N]>) {
let a = *x;
let b = *y;
*x = b;
*y = a;
}
core::ptr::swap_nonoverlappingpub const unsafe fn swap_nonoverlapping<T>(x: *mut T, y: *mut T, count: usize) {
ub_checks::assert_unsafe_precondition!(
check_library_ub,
"ptr::swap_nonoverlapping requires that both pointer arguments are aligned and non-null \
and the specified memory ranges do not overlap",
(
x: *mut () = x as *mut (),
y: *mut () = y as *mut (),
size: usize = size_of::<T>(),
align: usize = align_of::<T>(),
count: usize = count,
) => {
let zero_size = size == 0 || count == 0;
ub_checks::maybe_is_aligned_and_not_null(x, align, zero_size)
&& ub_checks::maybe_is_aligned_and_not_null(y, align, zero_size)
&& ub_checks::maybe_is_nonoverlapping(x, y, size, count)
}
);
const_eval_select!(
@capture[T] { x: *mut T, y: *mut T, count: usize }:
if const {
// At compile-time we don't need all the special code below.
// SAFETY: Same preconditions as this function
unsafe { swap_nonoverlapping_const(x, y, count) }
} else {
// Going though a slice here helps codegen know the size fits in `isize`
let slice = slice_from_raw_parts_mut(x, count);
// SAFETY: This is all readable from the pointer, meaning it's one
// allocation, and thus cannot be more than isize::MAX bytes.
let bytes = unsafe { mem::size_of_val_raw::<[T]>(slice) };
if let Some(bytes) = NonZero::new(bytes) {
// SAFETY: These are the same ranges, just expressed in a different
// type, so they're still non-overlapping.
unsafe { swap_nonoverlapping_bytes(x.cast(), y.cast(), bytes) };
}
}
)
}
core::ptr::swap_nonoverlapping::runtime fn runtime$(<$($binders)*>)?($($arg: $ty),*) $( -> $ret )? {
$runtime
}
core::ptr::swap_nonoverlapping_bytesunsafe fn swap_nonoverlapping_bytes(x: *mut u8, y: *mut u8, bytes: NonZero<usize>) {
// Same as `swap_nonoverlapping::<[u8; N]>`.
unsafe fn swap_nonoverlapping_chunks<const N: usize>(
x: *mut MaybeUninit<[u8; N]>,
y: *mut MaybeUninit<[u8; N]>,
chunks: NonZero<usize>,
) {
let chunks = chunks.get();
for i in 0..chunks {
// SAFETY: i is in [0, chunks) so the adds and dereferences are in-bounds.
unsafe { swap_chunk(&mut *x.add(i), &mut *y.add(i)) };
}
}
// Same as `swap_nonoverlapping_bytes`, but accepts at most 1+2+4=7 bytes
#[inline]
unsafe fn swap_nonoverlapping_short(x: *mut u8, y: *mut u8, bytes: NonZero<usize>) {
// Tail handling for auto-vectorized code sometimes has element-at-a-time behaviour,
// see <https://github.com/rust-lang/rust/issues/134946>.
// By swapping as different sizes, rather than as a loop over bytes,
// we make sure not to end up with, say, seven byte-at-a-time copies.
let bytes = bytes.get();
let mut i = 0;
macro_rules! swap_prefix {
($($n:literal)+) => {$(
if (bytes & $n) != 0 {
// SAFETY: `i` can only have the same bits set as those in bytes,
// so these `add`s are in-bounds of `bytes`. But the bit for
// `$n` hasn't been set yet, so the `$n` bytes that `swap_chunk`
// will read and write are within the usable range.
unsafe { swap_chunk::<$n>(&mut*x.add(i).cast(), &mut*y.add(i).cast()) };
i |= $n;
}
)+};
}
swap_prefix!(4 2 1);
debug_assert_eq!(i, bytes);
}
const CHUNK_SIZE: usize = size_of::<*const ()>();
let bytes = bytes.get();
let chunks = bytes / CHUNK_SIZE;
let tail = bytes % CHUNK_SIZE;
if let Some(chunks) = NonZero::new(chunks) {
// SAFETY: this is bytes/CHUNK_SIZE*CHUNK_SIZE bytes, which is <= bytes,
// so it's within the range of our non-overlapping bytes.
unsafe { swap_nonoverlapping_chunks::<CHUNK_SIZE>(x.cast(), y.cast(), chunks) };
}
if let Some(tail) = NonZero::new(tail) {
const { assert!(CHUNK_SIZE <= 8) };
let delta = chunks * CHUNK_SIZE;
// SAFETY: the tail length is below CHUNK SIZE because of the remainder,
// and CHUNK_SIZE is at most 8 by the const assert, so tail <= 7
unsafe { swap_nonoverlapping_short(x.add(delta), y.add(delta), tail) };
}
}
core::ptr::swap_nonoverlapping_bytes::swap_nonoverlapping_chunks unsafe fn swap_nonoverlapping_chunks<const N: usize>(
x: *mut MaybeUninit<[u8; N]>,
y: *mut MaybeUninit<[u8; N]>,
chunks: NonZero<usize>,
) {
let chunks = chunks.get();
for i in 0..chunks {
// SAFETY: i is in [0, chunks) so the adds and dereferences are in-bounds.
unsafe { swap_chunk(&mut *x.add(i), &mut *y.add(i)) };
}
}
core::ptr::swap_nonoverlapping_bytes::swap_nonoverlapping_short unsafe fn swap_nonoverlapping_short(x: *mut u8, y: *mut u8, bytes: NonZero<usize>) {
// Tail handling for auto-vectorized code sometimes has element-at-a-time behaviour,
// see <https://github.com/rust-lang/rust/issues/134946>.
// By swapping as different sizes, rather than as a loop over bytes,
// we make sure not to end up with, say, seven byte-at-a-time copies.
let bytes = bytes.get();
let mut i = 0;
macro_rules! swap_prefix {
($($n:literal)+) => {$(
if (bytes & $n) != 0 {
// SAFETY: `i` can only have the same bits set as those in bytes,
// so these `add`s are in-bounds of `bytes`. But the bit for
// `$n` hasn't been set yet, so the `$n` bytes that `swap_chunk`
// will read and write are within the usable range.
unsafe { swap_chunk::<$n>(&mut*x.add(i).cast(), &mut*y.add(i).cast()) };
i |= $n;
}
)+};
}
swap_prefix!(4 2 1);
debug_assert_eq!(i, bytes);
}
core::ptr::without_provenancepub const fn without_provenance<T>(addr: usize) -> *const T {
without_provenance_mut(addr)
}
core::ptr::without_provenance_mutpub const fn without_provenance_mut<T>(addr: usize) -> *mut T {
// An int-to-pointer transmute currently has exactly the intended semantics: it creates a
// pointer without provenance. Note that this is *not* a stable guarantee about transmute
// semantics, it relies on sysroot crates having special status.
// SAFETY: every valid integer is also a valid pointer (as long as you don't dereference that
// pointer).
unsafe { mem::transmute(addr) }
}
core::ptr::writepub const unsafe fn write<T>(dst: *mut T, src: T) {
// Semantically, it would be fine for this to be implemented as a
// `copy_nonoverlapping` and appropriate drop suppression of `src`.
// However, implementing via that currently produces more MIR than is ideal.
// Using an intrinsic keeps it down to just the simple `*dst = move src` in
// MIR (11 statements shorter, at the time of writing), and also allows
// `src` to stay an SSA value in codegen_ssa, rather than a memory one.
// SAFETY: the caller must guarantee that `dst` is valid for writes.
// `dst` cannot overlap `src` because the caller has mutable access
// to `dst` while `src` is owned by this function.
unsafe {
#[cfg(debug_assertions)] // Too expensive to always enable (for now?)
ub_checks::assert_unsafe_precondition!(
check_language_ub,
"ptr::write requires that the pointer argument is aligned and non-null",
(
addr: *mut () = dst as *mut (),
align: usize = align_of::<T>(),
is_zst: bool = T::IS_ZST,
) => ub_checks::maybe_is_aligned_and_not_null(addr, align, is_zst)
);
intrinsics::write_via_move(dst, src)
}
}
core::ptr::write_bytespub const unsafe fn write_bytes<T>(dst: *mut T, val: u8, count: usize) {
// SAFETY: the safety contract for `write_bytes` must be upheld by the caller.
unsafe {
ub_checks::assert_unsafe_precondition!(
check_language_ub,
"ptr::write_bytes requires that the destination pointer is aligned and non-null",
(
addr: *const () = dst as *const (),
align: usize = align_of::<T>(),
zero_size: bool = T::IS_ZST || count == 0,
) => ub_checks::maybe_is_aligned_and_not_null(addr, align, zero_size)
);
crate::intrinsics::write_bytes(dst, val, count)
}
}
core::ptr::write_unalignedpub const unsafe fn write_unaligned<T>(dst: *mut T, src: T) {
// SAFETY: the caller must guarantee that `dst` is valid for writes.
// `dst` cannot overlap `src` because the caller has mutable access
// to `dst` while `src` is owned by this function.
unsafe {
copy_nonoverlapping((&raw const src) as *const u8, dst as *mut u8, size_of::<T>());
// We are calling the intrinsic directly to avoid function calls in the generated code.
intrinsics::forget(src);
}
}
core::ptr::write_volatilepub unsafe fn write_volatile<T>(dst: *mut T, src: T) {
// SAFETY: the caller must uphold the safety contract for `volatile_store`.
unsafe {
ub_checks::assert_unsafe_precondition!(
check_language_ub,
"ptr::write_volatile requires that the pointer argument is aligned",
(
addr: *mut () = dst as *mut (),
align: usize = align_of::<T>(),
) => ub_checks::maybe_is_aligned(addr, align)
);
intrinsics::volatile_store(dst, src);
}
}
core::result::Result::<&T, E>::cloned pub fn cloned(self) -> Result<T, E>
where
T: Clone,
{
self.map(|t| t.clone())
}
core::result::Result::<&T, E>::copied pub const fn copied(self) -> Result<T, E>
where
T: Copy,
{
// FIXME(const-hack): this implementation, which sidesteps using `Result::map` since it's not const
// ready yet, should be reverted when possible to avoid code repetition
match self {
Ok(&v) => Ok(v),
Err(e) => Err(e),
}
}
core::result::Result::<&mut T, E>::cloned pub fn cloned(self) -> Result<T, E>
where
T: Clone,
{
self.map(|t| t.clone())
}
core::result::Result::<&mut T, E>::copied pub const fn copied(self) -> Result<T, E>
where
T: Copy,
{
// FIXME(const-hack): this implementation, which sidesteps using `Result::map` since it's not const
// ready yet, should be reverted when possible to avoid code repetition
match self {
Ok(&mut v) => Ok(v),
Err(e) => Err(e),
}
}
core::result::Result::<T, E>::and pub const fn and<U>(self, res: Result<U, E>) -> Result<U, E>
where
T: [const] Destruct,
E: [const] Destruct,
U: [const] Destruct,
{
match self {
Ok(_) => res,
Err(e) => Err(e),
}
}
core::result::Result::<T, E>::and_then pub const fn and_then<U, F>(self, op: F) -> Result<U, E>
where
F: [const] FnOnce(T) -> Result<U, E> + [const] Destruct,
{
match self {
Ok(t) => op(t),
Err(e) => Err(e),
}
}
core::result::Result::<T, E>::as_deref pub const fn as_deref(&self) -> Result<&T::Target, &E>
where
T: [const] Deref,
{
self.as_ref().map(Deref::deref)
}
core::result::Result::<T, E>::as_deref_mut pub const fn as_deref_mut(&mut self) -> Result<&mut T::Target, &mut E>
where
T: [const] DerefMut,
{
self.as_mut().map(DerefMut::deref_mut)
}
core::result::Result::<T, E>::as_mut pub const fn as_mut(&mut self) -> Result<&mut T, &mut E> {
match *self {
Ok(ref mut x) => Ok(x),
Err(ref mut x) => Err(x),
}
}
core::result::Result::<T, E>::as_ref pub const fn as_ref(&self) -> Result<&T, &E> {
match *self {
Ok(ref x) => Ok(x),
Err(ref x) => Err(x),
}
}
core::result::Result::<T, E>::err pub const fn err(self) -> Option<E>
where
T: [const] Destruct,
E: [const] Destruct,
{
match self {
Ok(_) => None,
Err(x) => Some(x),
}
}
core::result::Result::<T, E>::expect pub fn expect(self, msg: &str) -> T
where
E: fmt::Debug,
{
match self {
Ok(t) => t,
Err(e) => unwrap_failed(msg, &e),
}
}
core::result::Result::<T, E>::inspect pub const fn inspect<F>(self, f: F) -> Self
where
F: [const] FnOnce(&T) + [const] Destruct,
{
if let Ok(ref t) = self {
f(t);
}
self
}
core::result::Result::<T, E>::inspect_err pub const fn inspect_err<F>(self, f: F) -> Self
where
F: [const] FnOnce(&E) + [const] Destruct,
{
if let Err(ref e) = self {
f(e);
}
self
}
core::result::Result::<T, E>::is_err pub const fn is_err(&self) -> bool {
!self.is_ok()
}
core::result::Result::<T, E>::is_err_and pub const fn is_err_and<F>(self, f: F) -> bool
where
F: [const] FnOnce(E) -> bool + [const] Destruct,
E: [const] Destruct,
T: [const] Destruct,
{
match self {
Ok(_) => false,
Err(e) => f(e),
}
}
core::result::Result::<T, E>::is_ok pub const fn is_ok(&self) -> bool {
matches!(*self, Ok(_))
}
core::result::Result::<T, E>::is_ok_and pub const fn is_ok_and<F>(self, f: F) -> bool
where
F: [const] FnOnce(T) -> bool + [const] Destruct,
T: [const] Destruct,
E: [const] Destruct,
{
match self {
Err(_) => false,
Ok(x) => f(x),
}
}
core::result::Result::<T, E>::map pub const fn map<U, F>(self, op: F) -> Result<U, E>
where
F: [const] FnOnce(T) -> U + [const] Destruct,
{
match self {
Ok(t) => Ok(op(t)),
Err(e) => Err(e),
}
}
core::result::Result::<T, E>::map_err pub const fn map_err<F, O>(self, op: O) -> Result<T, F>
where
O: [const] FnOnce(E) -> F + [const] Destruct,
{
match self {
Ok(t) => Ok(t),
Err(e) => Err(op(e)),
}
}
core::result::Result::<T, E>::map_or pub const fn map_or<U, F>(self, default: U, f: F) -> U
where
F: [const] FnOnce(T) -> U + [const] Destruct,
T: [const] Destruct,
E: [const] Destruct,
U: [const] Destruct,
{
match self {
Ok(t) => f(t),
Err(_) => default,
}
}
core::result::Result::<T, E>::map_or_default pub const fn map_or_default<U, F>(self, f: F) -> U
where
F: [const] FnOnce(T) -> U + [const] Destruct,
U: [const] Default,
T: [const] Destruct,
E: [const] Destruct,
{
match self {
Ok(t) => f(t),
Err(_) => U::default(),
}
}
core::result::Result::<T, E>::map_or_else pub const fn map_or_else<U, D, F>(self, default: D, f: F) -> U
where
D: [const] FnOnce(E) -> U + [const] Destruct,
F: [const] FnOnce(T) -> U + [const] Destruct,
{
match self {
Ok(t) => f(t),
Err(e) => default(e),
}
}
core::result::Result::<T, E>::ok pub const fn ok(self) -> Option<T>
where
T: [const] Destruct,
E: [const] Destruct,
{
match self {
Ok(x) => Some(x),
Err(_) => None,
}
}
core::result::Result::<T, E>::or pub const fn or<F>(self, res: Result<T, F>) -> Result<T, F>
where
T: [const] Destruct,
E: [const] Destruct,
F: [const] Destruct,
{
match self {
Ok(v) => Ok(v),
Err(_) => res,
}
}
core::result::Result::<T, E>::or_else pub const fn or_else<F, O>(self, op: O) -> Result<T, F>
where
O: [const] FnOnce(E) -> Result<T, F> + [const] Destruct,
{
match self {
Ok(t) => Ok(t),
Err(e) => op(e),
}
}
core::result::Result::<T, E>::unwrap_or pub const fn unwrap_or(self, default: T) -> T
where
T: [const] Destruct,
E: [const] Destruct,
{
match self {
Ok(t) => t,
Err(_) => default,
}
}
core::result::Result::<T, E>::unwrap_or_default pub const fn unwrap_or_default(self) -> T
where
T: [const] Default + [const] Destruct,
E: [const] Destruct,
{
match self {
Ok(x) => x,
Err(_) => Default::default(),
}
}
core::result::Result::<T, E>::unwrap_or_else pub const fn unwrap_or_else<F>(self, op: F) -> T
where
F: [const] FnOnce(E) -> T + [const] Destruct,
{
match self {
Ok(t) => t,
Err(e) => op(e),
}
}
core::result::Result::<core::option::Option<T>, E>::transpose pub const fn transpose(self) -> Option<Result<T, E>> {
match self {
Ok(Some(x)) => Some(Ok(x)),
Ok(None) => None,
Err(e) => Some(Err(e)),
}
}
core::result::unwrap_failedfn unwrap_failed(msg: &str, error: &dyn fmt::Debug) -> ! {
panic!("{msg}: {error:?}");
}
core::slice::<impl [T]>::align_to pub unsafe fn align_to<U>(&self) -> (&[T], &[U], &[T]) {
// Note that most of this function will be constant-evaluated,
if U::IS_ZST || T::IS_ZST {
// handle ZSTs specially, which is – don't handle them at all.
return (self, &[], &[]);
}
// First, find at what point do we split between the first and 2nd slice. Easy with
// ptr.align_offset.
let ptr = self.as_ptr();
// SAFETY: See the `align_to_mut` method for the detailed safety comment.
let offset = unsafe { crate::ptr::align_offset(ptr, align_of::<U>()) };
if offset > self.len() {
(self, &[], &[])
} else {
let (left, rest) = self.split_at(offset);
let (us_len, ts_len) = rest.align_to_offsets::<U>();
// Inform Miri that we want to consider the "middle" pointer to be suitably aligned.
#[cfg(miri)]
crate::intrinsics::miri_promise_symbolic_alignment(
rest.as_ptr().cast(),
align_of::<U>(),
);
// SAFETY: now `rest` is definitely aligned, so `from_raw_parts` below is okay,
// since the caller guarantees that we can transmute `T` to `U` safely.
unsafe {
(
left,
from_raw_parts(rest.as_ptr() as *const U, us_len),
from_raw_parts(rest.as_ptr().add(rest.len() - ts_len), ts_len),
)
}
}
}
core::slice::<impl [T]>::align_to_mut pub unsafe fn align_to_mut<U>(&mut self) -> (&mut [T], &mut [U], &mut [T]) {
// Note that most of this function will be constant-evaluated,
if U::IS_ZST || T::IS_ZST {
// handle ZSTs specially, which is – don't handle them at all.
return (self, &mut [], &mut []);
}
// First, find at what point do we split between the first and 2nd slice. Easy with
// ptr.align_offset.
let ptr = self.as_ptr();
// SAFETY: Here we are ensuring we will use aligned pointers for U for the
// rest of the method. This is done by passing a pointer to &[T] with an
// alignment targeted for U.
// `crate::ptr::align_offset` is called with a correctly aligned and
// valid pointer `ptr` (it comes from a reference to `self`) and with
// a size that is a power of two (since it comes from the alignment for U),
// satisfying its safety constraints.
let offset = unsafe { crate::ptr::align_offset(ptr, align_of::<U>()) };
if offset > self.len() {
(self, &mut [], &mut [])
} else {
let (left, rest) = self.split_at_mut(offset);
let (us_len, ts_len) = rest.align_to_offsets::<U>();
let rest_len = rest.len();
let mut_ptr = rest.as_mut_ptr();
// Inform Miri that we want to consider the "middle" pointer to be suitably aligned.
#[cfg(miri)]
crate::intrinsics::miri_promise_symbolic_alignment(
mut_ptr.cast() as *const (),
align_of::<U>(),
);
// We can't use `rest` again after this, that would invalidate its alias `mut_ptr`!
// SAFETY: see comments for `align_to`.
unsafe {
(
left,
from_raw_parts_mut(mut_ptr as *mut U, us_len),
from_raw_parts_mut(mut_ptr.add(rest_len - ts_len), ts_len),
)
}
}
}
core::slice::<impl [T]>::as_array pub const fn as_array<const N: usize>(&self) -> Option<&[T; N]> {
if self.len() == N {
let ptr = self.as_ptr().cast_array();
// SAFETY: The underlying array of a slice can be reinterpreted as an actual array `[T; N]` if `N` is not greater than the slice's length.
let me = unsafe { &*ptr };
Some(me)
} else {
None
}
}
core::slice::<impl [T]>::as_chunks pub const fn as_chunks<const N: usize>(&self) -> (&[[T; N]], &[T]) {
assert!(N != 0, "chunk size must be non-zero");
let len_rounded_down = self.len() / N * N;
// SAFETY: The rounded-down value is always the same or smaller than the
// original length, and thus must be in-bounds of the slice.
let (multiple_of_n, remainder) = unsafe { self.split_at_unchecked(len_rounded_down) };
// SAFETY: We already panicked for zero, and ensured by construction
// that the length of the subslice is a multiple of N.
let array_slice = unsafe { multiple_of_n.as_chunks_unchecked() };
(array_slice, remainder)
}
core::slice::<impl [T]>::as_chunks_unchecked pub const unsafe fn as_chunks_unchecked<const N: usize>(&self) -> &[[T; N]] {
assert_unsafe_precondition!(
check_language_ub,
"slice::as_chunks_unchecked requires `N != 0` and the slice to split exactly into `N`-element chunks",
(n: usize = N, len: usize = self.len()) => n != 0 && len.is_multiple_of(n),
);
// SAFETY: Caller must guarantee that `N` is nonzero and exactly divides the slice length
let new_len = unsafe { exact_div(self.len(), N) };
// SAFETY: We cast a slice of `new_len * N` elements into
// a slice of `new_len` many `N` elements chunks.
unsafe { from_raw_parts(self.as_ptr().cast(), new_len) }
}
core::slice::<impl [T]>::as_mut_array pub const fn as_mut_array<const N: usize>(&mut self) -> Option<&mut [T; N]> {
if self.len() == N {
let ptr = self.as_mut_ptr().cast_array();
// SAFETY: The underlying array of a slice can be reinterpreted as an actual array `[T; N]` if `N` is not greater than the slice's length.
let me = unsafe { &mut *ptr };
Some(me)
} else {
None
}
}
core::slice::<impl [T]>::as_mut_ptr pub const fn as_mut_ptr(&mut self) -> *mut T {
self as *mut [T] as *mut T
}
core::slice::<impl [T]>::as_ptr pub const fn as_ptr(&self) -> *const T {
self as *const [T] as *const T
}
core::slice::<impl [T]>::binary_search_by pub fn binary_search_by<'a, F>(&'a self, mut f: F) -> Result<usize, usize>
where
F: FnMut(&'a T) -> Ordering,
{
let mut size = self.len();
if size == 0 {
return Err(0);
}
let mut base = 0usize;
// This loop intentionally doesn't have an early exit if the comparison
// returns Equal. We want the number of loop iterations to depend *only*
// on the size of the input slice so that the CPU can reliably predict
// the loop count.
while size > 1 {
let half = size / 2;
let mid = base + half;
// SAFETY: the call is made safe by the following invariants:
// - `mid >= 0`: by definition
// - `mid < size`: `mid = size / 2 + size / 4 + size / 8 ...`
let cmp = f(unsafe { self.get_unchecked(mid) });
// Binary search interacts poorly with branch prediction, so force
// the compiler to use conditional moves if supported by the target
// architecture.
base = hint::select_unpredictable(cmp == Greater, base, mid);
// This is imprecise in the case where `size` is odd and the
// comparison returns Greater: the mid element still gets included
// by `size` even though it's known to be larger than the element
// being searched for.
//
// This is fine though: we gain more performance by keeping the
// loop iteration count invariant (and thus predictable) than we
// lose from considering one additional element.
size -= half;
}
// SAFETY: base is always in [0, size) because base <= mid.
let cmp = f(unsafe { self.get_unchecked(base) });
if cmp == Equal {
// SAFETY: same as the `get_unchecked` above.
unsafe { hint::assert_unchecked(base < self.len()) };
Ok(base)
} else {
let result = base + (cmp == Less) as usize;
// SAFETY: same as the `get_unchecked` above.
// Note that this is `<=`, unlike the assume in the `Ok` path.
unsafe { hint::assert_unchecked(result <= self.len()) };
Err(result)
}
}
core::slice::<impl [T]>::binary_search_by_key pub fn binary_search_by_key<'a, B, F>(&'a self, b: &B, mut f: F) -> Result<usize, usize>
where
F: FnMut(&'a T) -> B,
B: Ord,
{
self.binary_search_by(|k| f(k).cmp(b))
}
core::slice::<impl [T]>::chunks pub const fn chunks(&self, chunk_size: usize) -> Chunks<'_, T> {
assert!(chunk_size != 0, "chunk size must be non-zero");
Chunks::new(self, chunk_size)
}
core::slice::<impl [T]>::chunks_exact pub const fn chunks_exact(&self, chunk_size: usize) -> ChunksExact<'_, T> {
assert!(chunk_size != 0, "chunk size must be non-zero");
ChunksExact::new(self, chunk_size)
}
core::slice::<impl [T]>::chunks_exact_mut pub const fn chunks_exact_mut(&mut self, chunk_size: usize) -> ChunksExactMut<'_, T> {
assert!(chunk_size != 0, "chunk size must be non-zero");
ChunksExactMut::new(self, chunk_size)
}
core::slice::<impl [T]>::chunks_mut pub const fn chunks_mut(&mut self, chunk_size: usize) -> ChunksMut<'_, T> {
assert!(chunk_size != 0, "chunk size must be non-zero");
ChunksMut::new(self, chunk_size)
}
core::slice::<impl [T]>::clone_from_slice pub const fn clone_from_slice(&mut self, src: &[T])
where
T: [const] Clone + [const] Destruct,
{
self.spec_clone_from(src);
}
core::slice::<impl [T]>::copy_from_slice pub const fn copy_from_slice(&mut self, src: &[T])
where
T: Copy,
{
// SAFETY: `T` implements `Copy`.
unsafe { copy_from_slice_impl(self, src) }
}
core::slice::<impl [T]>::ends_with pub fn ends_with(&self, needle: &[T]) -> bool
where
T: PartialEq,
{
let (m, n) = (self.len(), needle.len());
m >= n && needle == &self[m - n..]
}
core::slice::<impl [T]>::fill pub fn fill(&mut self, value: T)
where
T: Clone,
{
specialize::SpecFill::spec_fill(self, value);
}
core::slice::<impl [T]>::first pub const fn first(&self) -> Option<&T> {
if let [first, ..] = self { Some(first) } else { None }
}
core::slice::<impl [T]>::first_chunk pub const fn first_chunk<const N: usize>(&self) -> Option<&[T; N]> {
if self.len() < N {
None
} else {
// SAFETY: We explicitly check for the correct number of elements,
// and do not let the reference outlive the slice.
Some(unsafe { &*(self.as_ptr().cast_array()) })
}
}
core::slice::<impl [T]>::first_chunk_mut pub const fn first_chunk_mut<const N: usize>(&mut self) -> Option<&mut [T; N]> {
if self.len() < N {
None
} else {
// SAFETY: We explicitly check for the correct number of elements,
// do not let the reference outlive the slice,
// and require exclusive access to the entire slice to mutate the chunk.
Some(unsafe { &mut *(self.as_mut_ptr().cast_array()) })
}
}
core::slice::<impl [T]>::first_mut pub const fn first_mut(&mut self) -> Option<&mut T> {
if let [first, ..] = self { Some(first) } else { None }
}
core::slice::<impl [T]>::get pub const fn get<I>(&self, index: I) -> Option<&I::Output>
where
I: [const] SliceIndex<Self>,
{
index.get(self)
}
core::slice::<impl [T]>::get_mut pub const fn get_mut<I>(&mut self, index: I) -> Option<&mut I::Output>
where
I: [const] SliceIndex<Self>,
{
index.get_mut(self)
}
core::slice::<impl [T]>::get_unchecked pub const unsafe fn get_unchecked<I>(&self, index: I) -> &I::Output
where
I: [const] SliceIndex<Self>,
{
// SAFETY: the caller must uphold most of the safety requirements for `get_unchecked`;
// the slice is dereferenceable because `self` is a safe reference.
// The returned pointer is safe because impls of `SliceIndex` have to guarantee that it is.
unsafe { &*index.get_unchecked(self) }
}
core::slice::<impl [T]>::get_unchecked_mut pub const unsafe fn get_unchecked_mut<I>(&mut self, index: I) -> &mut I::Output
where
I: [const] SliceIndex<Self>,
{
// SAFETY: the caller must uphold the safety requirements for `get_unchecked_mut`;
// the slice is dereferenceable because `self` is a safe reference.
// The returned pointer is safe because impls of `SliceIndex` have to guarantee that it is.
unsafe { &mut *index.get_unchecked_mut(self) }
}
core::slice::<impl [T]>::is_empty pub const fn is_empty(&self) -> bool {
self.len() == 0
}
core::slice::<impl [T]>::iter pub const fn iter(&self) -> Iter<'_, T> {
Iter::new(self)
}
core::slice::<impl [T]>::iter_mut pub const fn iter_mut(&mut self) -> IterMut<'_, T> {
IterMut::new(self)
}
core::slice::<impl [T]>::last pub const fn last(&self) -> Option<&T> {
if let [.., last] = self { Some(last) } else { None }
}
core::slice::<impl [T]>::last_mut pub const fn last_mut(&mut self) -> Option<&mut T> {
if let [.., last] = self { Some(last) } else { None }
}
core::slice::<impl [T]>::rotate_left pub const fn rotate_left(&mut self, mid: usize) {
assert!(mid <= self.len());
let k = self.len() - mid;
let p = self.as_mut_ptr();
// SAFETY: The range `[p.add(mid) - mid, p.add(mid) + k)` is trivially
// valid for reading and writing, as required by `ptr_rotate`.
unsafe {
rotate::ptr_rotate(mid, p.add(mid), k);
}
}
core::slice::<impl [T]>::rotate_right pub const fn rotate_right(&mut self, k: usize) {
assert!(k <= self.len());
let mid = self.len() - k;
let p = self.as_mut_ptr();
// SAFETY: The range `[p.add(mid) - mid, p.add(mid) + k)` is trivially
// valid for reading and writing, as required by `ptr_rotate`.
unsafe {
rotate::ptr_rotate(mid, p.add(mid), k);
}
}
core::slice::<impl [T]>::split_at pub const fn split_at(&self, mid: usize) -> (&[T], &[T]) {
match self.split_at_checked(mid) {
Some(pair) => pair,
None => panic!("mid > len"),
}
}
core::slice::<impl [T]>::split_at_checked pub const fn split_at_checked(&self, mid: usize) -> Option<(&[T], &[T])> {
if mid <= self.len() {
// SAFETY: `[ptr; mid]` and `[mid; len]` are inside `self`, which
// fulfills the requirements of `split_at_unchecked`.
Some(unsafe { self.split_at_unchecked(mid) })
} else {
None
}
}
core::slice::<impl [T]>::split_at_mut pub const fn split_at_mut(&mut self, mid: usize) -> (&mut [T], &mut [T]) {
match self.split_at_mut_checked(mid) {
Some(pair) => pair,
None => panic!("mid > len"),
}
}
core::slice::<impl [T]>::split_at_mut_checked pub const fn split_at_mut_checked(&mut self, mid: usize) -> Option<(&mut [T], &mut [T])> {
if mid <= self.len() {
// SAFETY: `[ptr; mid]` and `[mid; len]` are inside `self`, which
// fulfills the requirements of `split_at_unchecked`.
Some(unsafe { self.split_at_mut_unchecked(mid) })
} else {
None
}
}
core::slice::<impl [T]>::split_at_mut_unchecked pub const unsafe fn split_at_mut_unchecked(&mut self, mid: usize) -> (&mut [T], &mut [T]) {
let len = self.len();
let ptr = self.as_mut_ptr();
assert_unsafe_precondition!(
check_library_ub,
"slice::split_at_mut_unchecked requires the index to be within the slice",
(mid: usize = mid, len: usize = len) => mid <= len,
);
// SAFETY: Caller has to check that `0 <= mid <= self.len()`.
//
// `[ptr; mid]` and `[mid; len]` are not overlapping, so returning a mutable reference
// is fine.
unsafe {
(
from_raw_parts_mut(ptr, mid),
from_raw_parts_mut(ptr.add(mid), unchecked_sub(len, mid)),
)
}
}
core::slice::<impl [T]>::split_at_unchecked pub const unsafe fn split_at_unchecked(&self, mid: usize) -> (&[T], &[T]) {
// FIXME(const-hack): the const function `from_raw_parts` is used to make this
// function const; previously the implementation used
// `(self.get_unchecked(..mid), self.get_unchecked(mid..))`
let len = self.len();
let ptr = self.as_ptr();
assert_unsafe_precondition!(
check_library_ub,
"slice::split_at_unchecked requires the index to be within the slice",
(mid: usize = mid, len: usize = len) => mid <= len,
);
// SAFETY: Caller has to check that `0 <= mid <= self.len()`
unsafe { (from_raw_parts(ptr, mid), from_raw_parts(ptr.add(mid), unchecked_sub(len, mid))) }
}
core::slice::<impl [T]>::split_first pub const fn split_first(&self) -> Option<(&T, &[T])> {
if let [first, tail @ ..] = self { Some((first, tail)) } else { None }
}
core::slice::<impl [T]>::split_first_mut pub const fn split_first_mut(&mut self) -> Option<(&mut T, &mut [T])> {
if let [first, tail @ ..] = self { Some((first, tail)) } else { None }
}
core::slice::<impl [T]>::split_last pub const fn split_last(&self) -> Option<(&T, &[T])> {
if let [init @ .., last] = self { Some((last, init)) } else { None }
}
core::slice::<impl [T]>::split_last_mut pub const fn split_last_mut(&mut self) -> Option<(&mut T, &mut [T])> {
if let [init @ .., last] = self { Some((last, init)) } else { None }
}
core::slice::<impl [T]>::starts_with pub fn starts_with(&self, needle: &[T]) -> bool
where
T: PartialEq,
{
let n = needle.len();
self.len() >= n && needle == &self[..n]
}
core::slice::<impl [T]>::swap pub const fn swap(&mut self, a: usize, b: usize) {
// FIXME: use swap_unchecked here (https://github.com/rust-lang/rust/pull/88540#issuecomment-944344343)
// Can't take two mutable loans from one vector, so instead use raw pointers.
let pa = &raw mut self[a];
let pb = &raw mut self[b];
// SAFETY: `pa` and `pb` have been created from safe mutable references and refer
// to elements in the slice and therefore are guaranteed to be valid and aligned.
// Note that accessing the elements behind `a` and `b` is checked and will
// panic when out of bounds.
unsafe {
ptr::swap(pa, pb);
}
}
core::slice::<impl [T]>::windows pub const fn windows(&self, size: usize) -> Windows<'_, T> {
let size = NonZero::new(size).expect("window size must be non-zero");
Windows::new(self, size)
}
core::slice::ascii::<impl [u8]>::eq_ignore_ascii_case pub const fn eq_ignore_ascii_case(&self, other: &[u8]) -> bool {
if self.len() != other.len() {
return false;
}
#[cfg(all(target_arch = "x86_64", target_feature = "sse2"))]
{
const CHUNK_SIZE: usize = 16;
// The following function has two invariants:
// 1. The slice lengths must be equal, which we checked above.
// 2. The slice lengths must greater than or equal to N, which this
// if-statement is checking.
if self.len() >= CHUNK_SIZE {
return self.eq_ignore_ascii_case_chunks::<CHUNK_SIZE>(other);
}
}
self.eq_ignore_ascii_case_simple(other)
}
core::slice::ascii::<impl [u8]>::eq_ignore_ascii_case_simple const fn eq_ignore_ascii_case_simple(&self, other: &[u8]) -> bool {
// FIXME(const-hack): This implementation can be reverted when
// `core::iter::zip` is allowed in const. The original implementation:
// self.len() == other.len() && iter::zip(self, other).all(|(a, b)| a.eq_ignore_ascii_case(b))
let mut a = self;
let mut b = other;
while let ([first_a, rest_a @ ..], [first_b, rest_b @ ..]) = (a, b) {
if first_a.eq_ignore_ascii_case(&first_b) {
a = rest_a;
b = rest_b;
} else {
return false;
}
}
true
}
core::slice::ascii::<impl [u8]>::escape_ascii pub fn escape_ascii(&self) -> EscapeAscii<'_> {
EscapeAscii { inner: self.iter().flat_map(EscapeByte) }
}
core::slice::ascii::<impl [u8]>::is_ascii pub const fn is_ascii(&self) -> bool {
is_ascii(self)
}
core::slice::ascii::is_ascii::runtime fn runtime$(<$($binders)*>)?($($arg: $ty),*) $( -> $ret )? {
$runtime
}
core::slice::ascii::is_ascii_simplepub const fn is_ascii_simple(mut bytes: &[u8]) -> bool {
while let [rest @ .., last] = bytes {
if !last.is_ascii() {
break;
}
bytes = rest;
}
bytes.is_empty()
}
core::slice::cmp::<impl core::cmp::PartialEq<[U]> for [T]>::eq fn eq(&self, other: &[U]) -> bool {
let len = self.len();
if len == other.len() {
// SAFETY: Just checked that they're the same length, and the pointers
// come from references-to-slices so they're guaranteed readable.
unsafe { SlicePartialEq::equal_same_length(self.as_ptr(), other.as_ptr(), len) }
} else {
false
}
}
core::slice::copy_from_slice_implconst unsafe fn copy_from_slice_impl<T: Clone>(dest: &mut [T], src: &[T]) {
// The panic code path was put into a cold function to not bloat the
// call site.
#[cfg_attr(not(panic = "immediate-abort"), inline(never), cold)]
#[cfg_attr(panic = "immediate-abort", inline)]
#[track_caller]
const fn len_mismatch_fail(dst_len: usize, src_len: usize) -> ! {
const_panic!(
"copy_from_slice: source slice length does not match destination slice length",
"copy_from_slice: source slice length ({src_len}) does not match destination slice length ({dst_len})",
src_len: usize,
dst_len: usize,
)
}
if dest.len() != src.len() {
len_mismatch_fail(dest.len(), src.len());
}
// SAFETY: `self` is valid for `self.len()` elements by definition, and `src` was
// checked to have the same length. The slices cannot overlap because
// mutable references are exclusive.
unsafe {
ptr::copy_nonoverlapping(src.as_ptr(), dest.as_mut_ptr(), dest.len());
}
}
core::slice::copy_from_slice_impl::len_mismatch_fail const fn len_mismatch_fail(dst_len: usize, src_len: usize) -> ! {
const_panic!(
"copy_from_slice: source slice length does not match destination slice length",
"copy_from_slice: source slice length ({src_len}) does not match destination slice length ({dst_len})",
src_len: usize,
dst_len: usize,
)
}
core::slice::copy_from_slice_impl::len_mismatch_fail::do_panic const fn do_panic($($arg: $ty),*) -> ! {
$crate::intrinsics::const_eval_select!(
@capture { $($arg: $ty = $arg),* } -> !:
if const #[track_caller] {
$crate::panic!($const_msg)
} else #[track_caller] {
$crate::panic!($runtime_msg)
}
)
}
core::slice::copy_from_slice_impl::len_mismatch_fail::do_panic::runtime fn runtime$(<$($binders)*>)?($($arg: $ty),*) $( -> $ret )? {
$runtime
}
core::slice::index::<impl core::ops::index::Index<I> for [T]>::index fn index(&self, index: I) -> &I::Output {
index.index(self)
}
core::slice::index::<impl core::ops::index::IndexMut<I> for [T]>::index_mut fn index_mut(&mut self, index: I) -> &mut I::Output {
index.index_mut(self)
}
core::slice::index::get_offset_len_mut_noubcheckconst unsafe fn get_offset_len_mut_noubcheck<T>(
ptr: *mut [T],
offset: usize,
len: usize,
) -> *mut [T] {
let ptr = ptr as *mut T;
// SAFETY: The caller already checked these preconditions
let ptr = unsafe { crate::intrinsics::offset(ptr, offset) };
crate::intrinsics::aggregate_raw_ptr(ptr, len)
}
core::slice::index::get_offset_len_noubcheckconst unsafe fn get_offset_len_noubcheck<T>(
ptr: *const [T],
offset: usize,
len: usize,
) -> *const [T] {
let ptr = ptr as *const T;
// SAFETY: The caller already checked these preconditions
let ptr = unsafe { crate::intrinsics::offset(ptr, offset) };
crate::intrinsics::aggregate_raw_ptr(ptr, len)
}
core::slice::index::into_range_uncheckedpub(crate) const fn into_range_unchecked(
len: usize,
(start, end): (ops::Bound<usize>, ops::Bound<usize>),
) -> ops::Range<usize> {
use ops::Bound;
let start = match start {
Bound::Included(i) => i,
Bound::Excluded(i) => i + 1,
Bound::Unbounded => 0,
};
let end = match end {
Bound::Included(i) => i + 1,
Bound::Excluded(i) => i,
Bound::Unbounded => len,
};
start..end
}
core::slice::index::into_slice_rangepub(crate) const fn into_slice_range(
len: usize,
(start, end): (ops::Bound<usize>, ops::Bound<usize>),
) -> ops::Range<usize> {
let end = match end {
ops::Bound::Included(end) if end >= len => slice_index_fail(0, end, len),
// Cannot overflow because `end < len` implies `end < usize::MAX`.
ops::Bound::Included(end) => end + 1,
ops::Bound::Excluded(end) if end > len => slice_index_fail(0, end, len),
ops::Bound::Excluded(end) => end,
ops::Bound::Unbounded => len,
};
let start = match start {
ops::Bound::Excluded(start) if start >= end => slice_index_fail(start, end, len),
// Cannot overflow because `start < end` implies `start < usize::MAX`.
ops::Bound::Excluded(start) => start + 1,
ops::Bound::Included(start) if start > end => slice_index_fail(start, end, len),
ops::Bound::Included(start) => start,
ops::Bound::Unbounded => 0,
};
start..end
}
core::slice::index::slice_index_failconst fn slice_index_fail(start: usize, end: usize, len: usize) -> ! {
if start > len {
const_panic!(
"slice start index is out of range for slice",
"range start index {start} out of range for slice of length {len}",
start: usize,
len: usize,
)
}
if end > len {
const_panic!(
"slice end index is out of range for slice",
"range end index {end} out of range for slice of length {len}",
end: usize,
len: usize,
)
}
if start > end {
const_panic!(
"slice index start is larger than end",
"slice index starts at {start} but ends at {end}",
start: usize,
end: usize,
)
}
// Only reachable if the range was a `RangeInclusive` or a
// `RangeToInclusive`, with `end == len`.
const_panic!(
"slice end index is out of range for slice",
"range end index {end} out of range for slice of length {len}",
end: usize,
len: usize,
)
}
core::slice::index::slice_index_fail::do_panic const fn do_panic($($arg: $ty),*) -> ! {
$crate::intrinsics::const_eval_select!(
@capture { $($arg: $ty = $arg),* } -> !:
if const #[track_caller] {
$crate::panic!($const_msg)
} else #[track_caller] {
$crate::panic!($runtime_msg)
}
)
}
core::slice::index::slice_index_fail::do_panic const fn do_panic($($arg: $ty),*) -> ! {
$crate::intrinsics::const_eval_select!(
@capture { $($arg: $ty = $arg),* } -> !:
if const #[track_caller] {
$crate::panic!($const_msg)
} else #[track_caller] {
$crate::panic!($runtime_msg)
}
)
}
core::slice::index::slice_index_fail::do_panic const fn do_panic($($arg: $ty),*) -> ! {
$crate::intrinsics::const_eval_select!(
@capture { $($arg: $ty = $arg),* } -> !:
if const #[track_caller] {
$crate::panic!($const_msg)
} else #[track_caller] {
$crate::panic!($runtime_msg)
}
)
}
core::slice::index::slice_index_fail::do_panic const fn do_panic($($arg: $ty),*) -> ! {
$crate::intrinsics::const_eval_select!(
@capture { $($arg: $ty = $arg),* } -> !:
if const #[track_caller] {
$crate::panic!($const_msg)
} else #[track_caller] {
$crate::panic!($runtime_msg)
}
)
}
core::slice::index::slice_index_fail::do_panic::runtime fn runtime$(<$($binders)*>)?($($arg: $ty),*) $( -> $ret )? {
$runtime
}
core::slice::index::slice_index_fail::do_panic::runtime fn runtime$(<$($binders)*>)?($($arg: $ty),*) $( -> $ret )? {
$runtime
}
core::slice::index::slice_index_fail::do_panic::runtime fn runtime$(<$($binders)*>)?($($arg: $ty),*) $( -> $ret )? {
$runtime
}
core::slice::index::slice_index_fail::do_panic::runtime fn runtime$(<$($binders)*>)?($($arg: $ty),*) $( -> $ret )? {
$runtime
}
core::slice::index::try_into_slice_rangepub(crate) const fn try_into_slice_range(
len: usize,
(start, end): (ops::Bound<usize>, ops::Bound<usize>),
) -> Option<ops::Range<usize>> {
let end = match end {
ops::Bound::Included(end) if end >= len => return None,
// Cannot overflow because `end < len` implies `end < usize::MAX`.
ops::Bound::Included(end) => end + 1,
ops::Bound::Excluded(end) if end > len => return None,
ops::Bound::Excluded(end) => end,
ops::Bound::Unbounded => len,
};
let start = match start {
ops::Bound::Excluded(start) if start >= end => return None,
// Cannot overflow because `start < end` implies `start < usize::MAX`.
ops::Bound::Excluded(start) => start + 1,
ops::Bound::Included(start) if start > end => return None,
ops::Bound::Included(start) => start,
ops::Bound::Unbounded => 0,
};
Some(start..end)
}
core::slice::iter::<impl core::iter::traits::collect::IntoIterator for &'a [T]>::into_iter fn into_iter(self) -> Iter<'a, T> {
self.iter()
}
core::slice::iter::<impl core::iter::traits::collect::IntoIterator for &'a mut [T]>::into_iter fn into_iter(self) -> IterMut<'a, T> {
self.iter_mut()
}
core::slice::iter::Chunks::<'a, T>::new pub(super) const fn new(slice: &'a [T], size: usize) -> Self {
Self { v: slice, chunk_size: size }
}
core::slice::iter::ChunksExact::<'a, T>::new pub(super) const fn new(slice: &'a [T], chunk_size: usize) -> Self {
let rem = slice.len() % chunk_size;
let fst_len = slice.len() - rem;
// SAFETY: 0 <= fst_len <= slice.len() by construction above
let (fst, snd) = unsafe { slice.split_at_unchecked(fst_len) };
Self { v: fst, rem: snd, chunk_size }
}
core::slice::iter::ChunksExact::<'a, T>::remainder pub fn remainder(&self) -> &'a [T] {
self.rem
}
core::slice::iter::ChunksExactMut::<'a, T>::into_remainder pub fn into_remainder(self) -> &'a mut [T] {
self.rem
}
core::slice::iter::ChunksExactMut::<'a, T>::new pub(super) const fn new(slice: &'a mut [T], chunk_size: usize) -> Self {
let rem = slice.len() % chunk_size;
let fst_len = slice.len() - rem;
// SAFETY: 0 <= fst_len <= slice.len() by construction above
let (fst, snd) = unsafe { slice.split_at_mut_unchecked(fst_len) };
Self { v: fst, rem: snd, chunk_size, _marker: PhantomData }
}
core::slice::iter::ChunksMut::<'a, T>::new pub(super) const fn new(slice: &'a mut [T], size: usize) -> Self {
Self { v: slice, chunk_size: size, _marker: PhantomData }
}
core::slice::iter::Iter::<'a, T>::as_slice pub fn as_slice(&self) -> &'a [T] {
self.make_slice()
}
core::slice::iter::Iter::<'a, T>::make_slice fn make_slice(&self) -> &'a [T] {
// SAFETY: the iterator was created from a slice with pointer
// `self.ptr` and length `len!(self)`. This guarantees that all
// the prerequisites for `from_raw_parts` are fulfilled.
unsafe { from_raw_parts(self.ptr.as_ptr(), len!(self)) }
}
core::slice::iter::Iter::<'a, T>::new pub(super) const fn new(slice: &'a [T]) -> Self {
let len = slice.len();
let ptr: NonNull<T> = NonNull::from_ref(slice).cast();
// SAFETY: Similar to `IterMut::new`.
unsafe {
let end_or_len =
if T::IS_ZST { without_provenance(len) } else { ptr.as_ptr().add(len) };
Self { ptr, end_or_len, _marker: PhantomData }
}
}
core::slice::iter::Iter::<'a, T>::next_back_unchecked unsafe fn next_back_unchecked(&mut self) -> $elem {
// SAFETY: the caller promised it's not empty, so
// the offsetting is in-bounds and there's an element to return.
unsafe { self.pre_dec_end(1).$into_ref() }
}
core::slice::iter::Iter::<'a, T>::post_inc_start unsafe fn post_inc_start(&mut self, offset: usize) -> NonNull<T> {
let old = self.ptr;
// SAFETY: the caller guarantees that `offset` doesn't exceed `self.len()`,
// so this new pointer is inside `self` and thus guaranteed to be non-null.
unsafe {
if_zst!(mut self,
// Using the intrinsic directly avoids emitting a UbCheck
len => *len = crate::intrinsics::unchecked_sub(*len, offset),
_end => self.ptr = self.ptr.add(offset),
);
}
old
}
core::slice::iter::Iter::<'a, T>::pre_dec_end unsafe fn pre_dec_end(&mut self, offset: usize) -> NonNull<T> {
if_zst!(mut self,
// SAFETY: By our precondition, `offset` can be at most the
// current length, so the subtraction can never overflow.
len => unsafe {
// Using the intrinsic directly avoids emitting a UbCheck
*len = crate::intrinsics::unchecked_sub(*len, offset);
self.ptr
},
// SAFETY: the caller guarantees that `offset` doesn't exceed `self.len()`,
// which is guaranteed to not overflow an `isize`. Also, the resulting pointer
// is in bounds of `slice`, which fulfills the other requirements for `offset`.
end => unsafe {
*end = end.sub(offset);
*end
},
)
}
core::slice::iter::IterMut::<'a, T>::as_mut_slice pub fn as_mut_slice(&mut self) -> &mut [T] {
// SAFETY: the iterator was created from a mutable slice with pointer
// `self.ptr` and length `len!(self)`. This guarantees that all the prerequisites
// for `from_raw_parts_mut` are fulfilled.
unsafe { from_raw_parts_mut(self.ptr.as_ptr(), len!(self)) }
}
core::slice::iter::IterMut::<'a, T>::make_slice fn make_slice(&self) -> &'a [T] {
// SAFETY: the iterator was created from a slice with pointer
// `self.ptr` and length `len!(self)`. This guarantees that all
// the prerequisites for `from_raw_parts` are fulfilled.
unsafe { from_raw_parts(self.ptr.as_ptr(), len!(self)) }
}
core::slice::iter::IterMut::<'a, T>::new pub(super) const fn new(slice: &'a mut [T]) -> Self {
let len = slice.len();
let ptr: NonNull<T> = NonNull::from_mut(slice).cast();
// SAFETY: There are several things here:
//
// `ptr` has been obtained by `slice.as_ptr()` where `slice` is a valid
// reference thus it is non-NUL and safe to use and pass to
// `NonNull::new_unchecked` .
//
// Adding `slice.len()` to the starting pointer gives a pointer
// at the end of `slice`. `end` will never be dereferenced, only checked
// for direct pointer equality with `ptr` to check if the iterator is
// done.
//
// In the case of a ZST, the end pointer is just the length. It's never
// used as a pointer at all, and thus it's fine to have no provenance.
//
// See the `next_unchecked!` and `is_empty!` macros as well as the
// `post_inc_start` method for more information.
unsafe {
let end_or_len =
if T::IS_ZST { without_provenance_mut(len) } else { ptr.as_ptr().add(len) };
Self { ptr, end_or_len, _marker: PhantomData }
}
}
core::slice::iter::IterMut::<'a, T>::next_back_unchecked unsafe fn next_back_unchecked(&mut self) -> $elem {
// SAFETY: the caller promised it's not empty, so
// the offsetting is in-bounds and there's an element to return.
unsafe { self.pre_dec_end(1).$into_ref() }
}
core::slice::iter::IterMut::<'a, T>::post_inc_start unsafe fn post_inc_start(&mut self, offset: usize) -> NonNull<T> {
let old = self.ptr;
// SAFETY: the caller guarantees that `offset` doesn't exceed `self.len()`,
// so this new pointer is inside `self` and thus guaranteed to be non-null.
unsafe {
if_zst!(mut self,
// Using the intrinsic directly avoids emitting a UbCheck
len => *len = crate::intrinsics::unchecked_sub(*len, offset),
_end => self.ptr = self.ptr.add(offset),
);
}
old
}
core::slice::iter::IterMut::<'a, T>::pre_dec_end unsafe fn pre_dec_end(&mut self, offset: usize) -> NonNull<T> {
if_zst!(mut self,
// SAFETY: By our precondition, `offset` can be at most the
// current length, so the subtraction can never overflow.
len => unsafe {
// Using the intrinsic directly avoids emitting a UbCheck
*len = crate::intrinsics::unchecked_sub(*len, offset);
self.ptr
},
// SAFETY: the caller guarantees that `offset` doesn't exceed `self.len()`,
// which is guaranteed to not overflow an `isize`. Also, the resulting pointer
// is in bounds of `slice`, which fulfills the other requirements for `offset`.
end => unsafe {
*end = end.sub(offset);
*end
},
)
}
core::slice::iter::Windows::<'a, T>::new pub(super) const fn new(slice: &'a [T], size: NonZero<usize>) -> Self {
Self { v: slice, size }
}
core::slice::memchr::contains_zero_byteconst fn contains_zero_byte(x: usize) -> bool {
x.wrapping_sub(LO_USIZE) & !x & HI_USIZE != 0
}
core::slice::memchr::memchrpub const fn memchr(x: u8, text: &[u8]) -> Option<usize> {
// Fast path for small slices.
if text.len() < 2 * USIZE_BYTES {
return memchr_naive(x, text);
}
memchr_aligned(x, text)
}
core::slice::memchr::memchr_alignedconst fn memchr_aligned(x: u8, text: &[u8]) -> Option<usize> {
// The runtime version behaves the same as the compiletime version, it's
// just more optimized.
const_eval_select!(
@capture { x: u8, text: &[u8] } -> Option<usize>:
if const {
memchr_naive(x, text)
} else {
// Scan for a single byte value by reading two `usize` words at a time.
//
// Split `text` in three parts
// - unaligned initial part, before the first word aligned address in text
// - body, scan by 2 words at a time
// - the last remaining part, < 2 word size
// search up to an aligned boundary
let len = text.len();
let ptr = text.as_ptr();
let mut offset = ptr.align_offset(USIZE_BYTES);
if offset > 0 {
offset = offset.min(len);
let slice = &text[..offset];
if let Some(index) = memchr_naive(x, slice) {
return Some(index);
}
}
// search the body of the text
let repeated_x = usize::repeat_u8(x);
while offset <= len - 2 * USIZE_BYTES {
// SAFETY: the while's predicate guarantees a distance of at least 2 * usize_bytes
// between the offset and the end of the slice.
unsafe {
let u = *(ptr.add(offset) as *const usize);
let v = *(ptr.add(offset + USIZE_BYTES) as *const usize);
// break if there is a matching byte
let zu = contains_zero_byte(u ^ repeated_x);
let zv = contains_zero_byte(v ^ repeated_x);
if zu || zv {
break;
}
}
offset += USIZE_BYTES * 2;
}
// Find the byte after the point the body loop stopped.
// FIXME(const-hack): Use `?` instead.
// FIXME(const-hack, fee1-dead): use range slicing
let slice =
// SAFETY: offset is within bounds
unsafe { super::from_raw_parts(text.as_ptr().add(offset), text.len() - offset) };
if let Some(i) = memchr_naive(x, slice) { Some(offset + i) } else { None }
}
)
}
core::slice::memchr::memchr_aligned::runtime fn runtime$(<$($binders)*>)?($($arg: $ty),*) $( -> $ret )? {
$runtime
}
core::slice::memchr::memchr_naiveconst fn memchr_naive(x: u8, text: &[u8]) -> Option<usize> {
let mut i = 0;
// FIXME(const-hack): Replace with `text.iter().pos(|c| *c == x)`.
while i < text.len() {
if text[i] == x {
return Some(i);
}
i += 1;
}
None
}
core::slice::raw::from_mutpub const fn from_mut<T>(s: &mut T) -> &mut [T] {
array::from_mut(s)
}
core::slice::raw::from_raw_partspub const unsafe fn from_raw_parts<'a, T>(data: *const T, len: usize) -> &'a [T] {
// SAFETY: the caller must uphold the safety contract for `from_raw_parts`.
unsafe {
ub_checks::assert_unsafe_precondition!(
check_language_ub,
"slice::from_raw_parts requires the pointer to be aligned and non-null, and the total size of the slice not to exceed `isize::MAX`",
(
data: *mut () = data as *mut (),
size: usize = size_of::<T>(),
align: usize = align_of::<T>(),
len: usize = len,
) =>
ub_checks::maybe_is_aligned_and_not_null(data, align, false)
&& ub_checks::is_valid_allocation_size(size, len)
);
&*ptr::slice_from_raw_parts(data, len)
}
}
core::slice::raw::from_raw_parts_mutpub const unsafe fn from_raw_parts_mut<'a, T>(data: *mut T, len: usize) -> &'a mut [T] {
// SAFETY: the caller must uphold the safety contract for `from_raw_parts_mut`.
unsafe {
ub_checks::assert_unsafe_precondition!(
check_language_ub,
"slice::from_raw_parts_mut requires the pointer to be aligned and non-null, and the total size of the slice not to exceed `isize::MAX`",
(
data: *mut () = data as *mut (),
size: usize = size_of::<T>(),
align: usize = align_of::<T>(),
len: usize = len,
) =>
ub_checks::maybe_is_aligned_and_not_null(data, align, false)
&& ub_checks::is_valid_allocation_size(size, len)
);
&mut *ptr::slice_from_raw_parts_mut(data, len)
}
}
core::slice::raw::from_refpub const fn from_ref<T>(s: &T) -> &[T] {
array::from_ref(s)
}
core::slice::rotate::const_minconst fn const_min(left: usize, right: usize) -> usize {
if right < left { right } else { left }
}
core::slice::rotate::ptr_rotatepub(super) const unsafe fn ptr_rotate<T>(left: usize, mid: *mut T, right: usize) {
if T::IS_ZST {
return;
}
// abort early if the rotate is a no-op
if (left == 0) || (right == 0) {
return;
}
// `T` is not a zero-sized type, so it's okay to divide by its size.
if !cfg!(feature = "optimize_for_size")
// FIXME(const-hack): Use cmp::min when available in const
&& const_min(left, right) <= size_of::<BufType>() / size_of::<T>()
{
// SAFETY: guaranteed by the caller
unsafe { ptr_rotate_memmove(left, mid, right) };
} else if !cfg!(feature = "optimize_for_size")
&& ((left + right < 24) || (size_of::<T>() > size_of::<[usize; 4]>()))
{
// SAFETY: guaranteed by the caller
unsafe { ptr_rotate_gcd(left, mid, right) }
} else {
// SAFETY: guaranteed by the caller
unsafe { ptr_rotate_swap(left, mid, right) }
}
}
core::slice::rotate::ptr_rotate_gcdconst unsafe fn ptr_rotate_gcd<T>(left: usize, mid: *mut T, right: usize) {
// Algorithm 2
// Microbenchmarks indicate that the average performance for random shifts is better all
// the way until about `left + right == 32`, but the worst case performance breaks even
// around 16. 24 was chosen as middle ground. If the size of `T` is larger than 4
// `usize`s, this algorithm also outperforms other algorithms.
// SAFETY: callers must ensure `mid - left` is valid for reading and writing.
let x = unsafe { mid.sub(left) };
// beginning of first round
// SAFETY: see previous comment.
let mut tmp: T = unsafe { x.read() };
let mut i = right;
// `gcd` can be found before hand by calculating `gcd(left + right, right)`,
// but it is faster to do one loop which calculates the gcd as a side effect, then
// doing the rest of the chunk
let mut gcd = right;
// benchmarks reveal that it is faster to swap temporaries all the way through instead
// of reading one temporary once, copying backwards, and then writing that temporary at
// the very end. This is possibly due to the fact that swapping or replacing temporaries
// uses only one memory address in the loop instead of needing to manage two.
loop {
// [long-safety-expl]
// SAFETY: callers must ensure `[left, left+mid+right)` are all valid for reading and
// writing.
//
// - `i` start with `right` so `mid-left <= x+i = x+right = mid-left+right < mid+right`
// - `i <= left+right-1` is always true
// - if `i < left`, `right` is added so `i < left+right` and on the next
// iteration `left` is removed from `i` so it doesn't go further
// - if `i >= left`, `left` is removed immediately and so it doesn't go further.
// - overflows cannot happen for `i` since the function's safety contract ask for
// `mid+right-1 = x+left+right` to be valid for writing
// - underflows cannot happen because `i` must be bigger or equal to `left` for
// a subtraction of `left` to happen.
//
// So `x+i` is valid for reading and writing if the caller respected the contract
tmp = unsafe { x.add(i).replace(tmp) };
// instead of incrementing `i` and then checking if it is outside the bounds, we
// check if `i` will go outside the bounds on the next increment. This prevents
// any wrapping of pointers or `usize`.
if i >= left {
i -= left;
if i == 0 {
// end of first round
// SAFETY: tmp has been read from a valid source and x is valid for writing
// according to the caller.
unsafe { x.write(tmp) };
break;
}
// this conditional must be here if `left + right >= 15`
if i < gcd {
gcd = i;
}
} else {
i += right;
}
}
// finish the chunk with more rounds
// FIXME(const-hack): Use `for start in 1..gcd` when available in const
let mut start = 1;
while start < gcd {
// SAFETY: `gcd` is at most equal to `right` so all values in `1..gcd` are valid for
// reading and writing as per the function's safety contract, see [long-safety-expl]
// above
tmp = unsafe { x.add(start).read() };
// [safety-expl-addition]
//
// Here `start < gcd` so `start < right` so `i < right+right`: `right` being the
// greatest common divisor of `(left+right, right)` means that `left = right` so
// `i < left+right` so `x+i = mid-left+i` is always valid for reading and writing
// according to the function's safety contract.
i = start + right;
loop {
// SAFETY: see [long-safety-expl] and [safety-expl-addition]
tmp = unsafe { x.add(i).replace(tmp) };
if i >= left {
i -= left;
if i == start {
// SAFETY: see [long-safety-expl] and [safety-expl-addition]
unsafe { x.add(start).write(tmp) };
break;
}
} else {
i += right;
}
}
start += 1;
}
}
core::slice::rotate::ptr_rotate_memmoveconst unsafe fn ptr_rotate_memmove<T>(left: usize, mid: *mut T, right: usize) {
// The `[T; 0]` here is to ensure this is appropriately aligned for T
let mut rawarray = MaybeUninit::<(BufType, [T; 0])>::uninit();
let buf = rawarray.as_mut_ptr() as *mut T;
// SAFETY: `mid-left <= mid-left+right < mid+right`
let dim = unsafe { mid.sub(left).add(right) };
if left <= right {
// SAFETY:
//
// 1) The `if` condition about the sizes ensures `[mid-left; left]` will fit in
// `buf` without overflow and `buf` was created just above and so cannot be
// overlapped with any value of `[mid-left; left]`
// 2) [mid-left, mid+right) are all valid for reading and writing and we don't care
// about overlaps here.
// 3) The `if` condition about `left <= right` ensures writing `left` elements to
// `dim = mid-left+right` is valid because:
// - `buf` is valid and `left` elements were written in it in 1)
// - `dim+left = mid-left+right+left = mid+right` and we write `[dim, dim+left)`
unsafe {
// 1)
ptr::copy_nonoverlapping(mid.sub(left), buf, left);
// 2)
ptr::copy(mid, mid.sub(left), right);
// 3)
ptr::copy_nonoverlapping(buf, dim, left);
}
} else {
// SAFETY: same reasoning as above but with `left` and `right` reversed
unsafe {
ptr::copy_nonoverlapping(mid, buf, right);
ptr::copy(mid.sub(left), dim, left);
ptr::copy_nonoverlapping(buf, mid.sub(left), right);
}
}
}
core::slice::rotate::ptr_rotate_swapconst unsafe fn ptr_rotate_swap<T>(mut left: usize, mut mid: *mut T, mut right: usize) {
loop {
if left >= right {
// Algorithm 3
// There is an alternate way of swapping that involves finding where the last swap
// of this algorithm would be, and swapping using that last chunk instead of swapping
// adjacent chunks like this algorithm is doing, but this way is still faster.
loop {
// SAFETY:
// `left >= right` so `[mid-right, mid+right)` is valid for reading and writing
// Subtracting `right` from `mid` each turn is counterbalanced by the addition and
// check after it.
unsafe {
ptr::swap_nonoverlapping(mid.sub(right), mid, right);
mid = mid.sub(right);
}
left -= right;
if left < right {
break;
}
}
} else {
// Algorithm 3, `left < right`
loop {
// SAFETY: `[mid-left, mid+left)` is valid for reading and writing because
// `left < right` so `mid+left < mid+right`.
// Adding `left` to `mid` each turn is counterbalanced by the subtraction and check
// after it.
unsafe {
ptr::swap_nonoverlapping(mid.sub(left), mid, left);
mid = mid.add(left);
}
right -= left;
if right < left {
break;
}
}
}
if (right == 0) || (left == 0) {
return;
}
}
}
core::str::<impl core::convert::AsRef<[u8]> for str>::as_ref fn as_ref(&self) -> &[u8] {
self.as_bytes()
}
core::str::<impl core::default::Default for &str>::default fn default() -> Self {
""
}
core::str::<impl str>::as_bytes pub const fn as_bytes(&self) -> &[u8] {
// SAFETY: const sound because we transmute two types with the same layout
unsafe { mem::transmute(self) }
}
core::str::<impl str>::as_bytes_mut pub const unsafe fn as_bytes_mut(&mut self) -> &mut [u8] {
// SAFETY: the cast from `&str` to `&[u8]` is safe since `str`
// has the same layout as `&[u8]` (only std can make this guarantee).
// The pointer dereference is safe since it comes from a mutable reference which
// is guaranteed to be valid for writes.
unsafe { &mut *(self as *mut str as *mut [u8]) }
}
core::str::<impl str>::as_mut_ptr pub const fn as_mut_ptr(&mut self) -> *mut u8 {
self as *mut str as *mut u8
}
core::str::<impl str>::as_ptr pub const fn as_ptr(&self) -> *const u8 {
self as *const str as *const u8
}
core::str::<impl str>::as_str pub const fn as_str(&self) -> &str {
self
}
core::str::<impl str>::bytes pub fn bytes(&self) -> Bytes<'_> {
Bytes(self.as_bytes().iter().copied())
}
core::str::<impl str>::ceil_char_boundary pub const fn ceil_char_boundary(&self, index: usize) -> usize {
if index >= self.len() {
self.len()
} else {
let mut i = index;
while i < self.len() {
if self.as_bytes()[i].is_utf8_char_boundary() {
break;
}
i += 1;
}
// The character boundary will be within four bytes of the index
debug_assert!(i <= index + 3);
i
}
}
core::str::<impl str>::char_indices pub fn char_indices(&self) -> CharIndices<'_> {
CharIndices { front_offset: 0, iter: self.chars() }
}
core::str::<impl str>::chars pub fn chars(&self) -> Chars<'_> {
Chars { iter: self.as_bytes().iter() }
}
core::str::<impl str>::ends_with pub fn ends_with<P: Pattern>(&self, pat: P) -> bool
where
for<'a> P::Searcher<'a>: ReverseSearcher<'a>,
{
pat.is_suffix_of(self)
}
core::str::<impl str>::eq_ignore_ascii_case pub const fn eq_ignore_ascii_case(&self, other: &str) -> bool {
self.as_bytes().eq_ignore_ascii_case(other.as_bytes())
}
core::str::<impl str>::find pub fn find<P: Pattern>(&self, pat: P) -> Option<usize> {
pat.into_searcher(self).next_match().map(|(i, _)| i)
}
core::str::<impl str>::floor_char_boundary pub const fn floor_char_boundary(&self, index: usize) -> usize {
if index >= self.len() {
self.len()
} else {
let mut i = index;
while i > 0 {
if self.as_bytes()[i].is_utf8_char_boundary() {
break;
}
i -= 1;
}
// The character boundary will be within four bytes of the index
debug_assert!(i >= index.saturating_sub(3));
i
}
}
core::str::<impl str>::from_utf8 pub const fn from_utf8(v: &[u8]) -> Result<&str, Utf8Error> {
converts::from_utf8(v)
}
core::str::<impl str>::from_utf8_mut pub const fn from_utf8_mut(v: &mut [u8]) -> Result<&mut str, Utf8Error> {
converts::from_utf8_mut(v)
}
core::str::<impl str>::from_utf8_unchecked pub const unsafe fn from_utf8_unchecked(v: &[u8]) -> &str {
// SAFETY: converts::from_utf8_unchecked has the same safety requirements as this function.
unsafe { converts::from_utf8_unchecked(v) }
}
core::str::<impl str>::from_utf8_unchecked_mut pub const unsafe fn from_utf8_unchecked_mut(v: &mut [u8]) -> &mut str {
// SAFETY: converts::from_utf8_unchecked_mut has the same safety requirements as this function.
unsafe { converts::from_utf8_unchecked_mut(v) }
}
core::str::<impl str>::get_unchecked pub unsafe fn get_unchecked<I: SliceIndex<str>>(&self, i: I) -> &I::Output {
// SAFETY: the caller must uphold the safety contract for `get_unchecked`;
// the slice is dereferenceable because `self` is a safe reference.
// The returned pointer is safe because impls of `SliceIndex` have to guarantee that it is.
unsafe { &*i.get_unchecked(self) }
}
core::str::<impl str>::is_char_boundary pub const fn is_char_boundary(&self, index: usize) -> bool {
// 0 is always ok.
// Test for 0 explicitly so that it can optimize out the check
// easily and skip reading string data for that case.
// Note that optimizing `self.get(..index)` relies on this.
if index == 0 {
return true;
}
if index >= self.len() {
// For `true` we have two options:
//
// - index == self.len()
// Empty strings are valid, so return true
// - index > self.len()
// In this case return false
//
// The check is placed exactly here, because it improves generated
// code on higher opt-levels. See PR #84751 for more details.
index == self.len()
} else {
self.as_bytes()[index].is_utf8_char_boundary()
}
}
core::str::<impl str>::is_empty pub const fn is_empty(&self) -> bool {
self.len() == 0
}
core::str::<impl str>::len pub const fn len(&self) -> usize {
self.as_bytes().len()
}
core::str::<impl str>::parse pub fn parse<F: FromStr>(&self) -> Result<F, F::Err> {
FromStr::from_str(self)
}
core::str::<impl str>::split_inclusive pub fn split_inclusive<P: Pattern>(&self, pat: P) -> SplitInclusive<'_, P> {
SplitInclusive(SplitInternal {
start: 0,
end: self.len(),
matcher: pat.into_searcher(self),
allow_trailing_empty: false,
finished: false,
})
}
core::str::<impl str>::starts_with pub fn starts_with<P: Pattern>(&self, pat: P) -> bool {
pat.is_prefix_of(self)
}
core::str::converts::from_raw_partspub const unsafe fn from_raw_parts<'a>(ptr: *const u8, len: usize) -> &'a str {
// SAFETY: the caller must uphold the safety contract for `from_raw_parts`.
unsafe { &*ptr::from_raw_parts(ptr, len) }
}
core::str::converts::from_raw_parts_mutpub const unsafe fn from_raw_parts_mut<'a>(ptr: *mut u8, len: usize) -> &'a mut str {
// SAFETY: the caller must uphold the safety contract for `from_raw_parts_mut`.
unsafe { &mut *ptr::from_raw_parts_mut(ptr, len) }
}
core::str::converts::from_utf8pub const fn from_utf8(v: &[u8]) -> Result<&str, Utf8Error> {
// FIXME(const-hack): This should use `?` again, once it's `const`
match run_utf8_validation(v) {
Ok(_) => {
// SAFETY: validation succeeded.
Ok(unsafe { from_utf8_unchecked(v) })
}
Err(err) => Err(err),
}
}
core::str::converts::from_utf8_mutpub const fn from_utf8_mut(v: &mut [u8]) -> Result<&mut str, Utf8Error> {
// FIXME(const-hack): This should use `?` again, once it's `const`
match run_utf8_validation(v) {
Ok(_) => {
// SAFETY: validation succeeded.
Ok(unsafe { from_utf8_unchecked_mut(v) })
}
Err(err) => Err(err),
}
}
core::str::converts::from_utf8_uncheckedpub const unsafe fn from_utf8_unchecked(v: &[u8]) -> &str {
// SAFETY: the caller must guarantee that the bytes `v` are valid UTF-8.
// Also relies on `&str` and `&[u8]` having the same layout.
unsafe { mem::transmute(v) }
}
core::str::converts::from_utf8_unchecked_mutpub const unsafe fn from_utf8_unchecked_mut(v: &mut [u8]) -> &mut str {
// SAFETY: the caller must guarantee that the bytes `v`
// are valid UTF-8, thus the cast to `*mut str` is safe.
// Also, the pointer dereference is safe because that pointer
// comes from a reference which is guaranteed to be valid for writes.
unsafe { &mut *(v as *mut [u8] as *mut str) }
}
core::str::count::char_count_general_casefn char_count_general_case(s: &[u8]) -> usize {
s.iter().filter(|&&byte| !super::validations::utf8_is_cont_byte(byte)).count()
}
core::str::count::contains_non_continuation_bytefn contains_non_continuation_byte(w: usize) -> usize {
const LSB: usize = usize::repeat_u8(0x01);
((!w >> 7) | (w >> 6)) & LSB
}
core::str::count::count_charspub(super) fn count_chars(s: &str) -> usize {
if cfg!(feature = "optimize_for_size") || s.len() < USIZE_SIZE * UNROLL_INNER {
// Avoid entering the optimized implementation for strings where the
// difference is not likely to matter, or where it might even be slower.
// That said, a ton of thought was not spent on the particular threshold
// here, beyond "this value seems to make sense".
char_count_general_case(s.as_bytes())
} else {
do_count_chars(s)
}
}
core::str::count::sum_bytes_in_usizefn sum_bytes_in_usize(values: usize) -> usize {
const LSB_SHORTS: usize = usize::repeat_u16(0x0001);
const SKIP_BYTES: usize = usize::repeat_u16(0x00ff);
let pair_sum: usize = (values & SKIP_BYTES) + ((values >> 8) & SKIP_BYTES);
pair_sum.wrapping_mul(LSB_SHORTS) >> ((USIZE_SIZE - 2) * 8)
}
core::str::error::Utf8Error::error_len pub const fn error_len(&self) -> Option<usize> {
// FIXME(const-hack): This should become `map` again, once it's `const`
match self.error_len {
Some(len) => Some(len as usize),
None => None,
}
}
core::str::error::Utf8Error::valid_up_to pub const fn valid_up_to(&self) -> usize {
self.valid_up_to
}
core::str::iter::CharIndices::<'a>::as_str pub fn as_str(&self) -> &'a str {
self.iter.as_str()
}
core::str::iter::CharIndices::<'a>::offset pub fn offset(&self) -> usize {
self.front_offset
}
core::str::iter::Chars::<'a>::as_str pub fn as_str(&self) -> &'a str {
// SAFETY: `Chars` is only made from a str, which guarantees the iter is valid UTF-8.
unsafe { from_utf8_unchecked(self.iter.as_slice()) }
}
core::str::iter::SplitInternal::<'a, P>::get_end fn get_end(&mut self) -> Option<&'a str> {
if !self.finished {
self.finished = true;
if self.allow_trailing_empty || self.end - self.start > 0 {
// SAFETY: `self.start` and `self.end` always lie on unicode boundaries.
let string = unsafe { self.matcher.haystack().get_unchecked(self.start..self.end) };
return Some(string);
}
}
None
}
core::str::iter::SplitInternal::<'a, P>::next_inclusive fn next_inclusive(&mut self) -> Option<&'a str> {
if self.finished {
return None;
}
let haystack = self.matcher.haystack();
match self.matcher.next_match() {
// SAFETY: `Searcher` guarantees that `b` lies on unicode boundary,
// and self.start is either the start of the original string,
// or `b` was assigned to it, so it also lies on unicode boundary.
Some((_, b)) => unsafe {
let elt = haystack.get_unchecked(self.start..b);
self.start = b;
Some(elt)
},
None => self.get_end(),
}
}
core::str::lossy::<impl [u8]>::utf8_chunks pub fn utf8_chunks(&self) -> Utf8Chunks<'_> {
Utf8Chunks { source: self }
}
core::str::lossy::Utf8Chunk::<'a>::invalid pub fn invalid(&self) -> &'a [u8] {
self.invalid
}
core::str::lossy::Utf8Chunk::<'a>::valid pub fn valid(&self) -> &'a str {
self.valid
}
core::str::lossy::Utf8Chunks::<'a>::debug pub fn debug(&self) -> Debug<'_> {
Debug(self.source)
}
core::str::pattern::CharSearcher::<'_>::utf8_size fn utf8_size(&self) -> usize {
self.utf8_size.into()
}
core::str::pattern::Pattern::is_prefix_of fn is_prefix_of(self, haystack: &str) -> bool {
matches!(self.into_searcher(haystack).next(), SearchStep::Match(0, _))
}
core::str::pattern::Pattern::is_suffix_of fn is_suffix_of<'a>(self, haystack: &'a str) -> bool
where
Self::Searcher<'a>: ReverseSearcher<'a>,
{
matches!(self.into_searcher(haystack).next_back(), SearchStep::Match(_, j) if haystack.len() == j)
}
core::str::pattern::Searcher::next_match fn next_match(&mut self) -> Option<(usize, usize)> {
loop {
match self.next() {
SearchStep::Match(a, b) => return Some((a, b)),
SearchStep::Done => return None,
_ => continue,
}
}
}
core::str::pattern::StrSearcher::<'a, 'b>::new fn new(haystack: &'a str, needle: &'b str) -> StrSearcher<'a, 'b> {
if needle.is_empty() {
StrSearcher {
haystack,
needle,
searcher: StrSearcherImpl::Empty(EmptyNeedle {
position: 0,
end: haystack.len(),
is_match_fw: true,
is_match_bw: true,
is_finished: false,
}),
}
} else {
StrSearcher {
haystack,
needle,
searcher: StrSearcherImpl::TwoWay(TwoWaySearcher::new(
needle.as_bytes(),
haystack.len(),
)),
}
}
}
core::str::pattern::TwoWaySearcher::byteset_contains fn byteset_contains(&self, byte: u8) -> bool {
(self.byteset >> ((byte & 0x3f) as usize)) & 1 != 0
}
core::str::pattern::TwoWaySearcher::byteset_create fn byteset_create(bytes: &[u8]) -> u64 {
bytes.iter().fold(0, |a, &b| (1 << (b & 0x3f)) | a)
}
core::str::pattern::TwoWaySearcher::maximal_suffix fn maximal_suffix(arr: &[u8], order_greater: bool) -> (usize, usize) {
let mut left = 0; // Corresponds to i in the paper
let mut right = 1; // Corresponds to j in the paper
let mut offset = 0; // Corresponds to k in the paper, but starting at 0
// to match 0-based indexing.
let mut period = 1; // Corresponds to p in the paper
while let Some(&a) = arr.get(right + offset) {
// `left` will be inbounds when `right` is.
let b = arr[left + offset];
if (a < b && !order_greater) || (a > b && order_greater) {
// Suffix is smaller, period is entire prefix so far.
right += offset + 1;
offset = 0;
period = right - left;
} else if a == b {
// Advance through repetition of the current period.
if offset + 1 == period {
right += offset + 1;
offset = 0;
} else {
offset += 1;
}
} else {
// Suffix is larger, start over from current location.
left = right;
right += 1;
offset = 0;
period = 1;
}
}
(left, period)
}
core::str::pattern::TwoWaySearcher::new fn new(needle: &[u8], end: usize) -> TwoWaySearcher {
let (crit_pos_false, period_false) = TwoWaySearcher::maximal_suffix(needle, false);
let (crit_pos_true, period_true) = TwoWaySearcher::maximal_suffix(needle, true);
let (crit_pos, period) = if crit_pos_false > crit_pos_true {
(crit_pos_false, period_false)
} else {
(crit_pos_true, period_true)
};
// A particularly readable explanation of what's going on here can be found
// in Crochemore and Rytter's book "Text Algorithms", ch 13. Specifically
// see the code for "Algorithm CP" on p. 323.
//
// What's going on is we have some critical factorization (u, v) of the
// needle, and we want to determine whether u is a suffix of
// &v[..period]. If it is, we use "Algorithm CP1". Otherwise we use
// "Algorithm CP2", which is optimized for when the period of the needle
// is large.
if needle[..crit_pos] == needle[period..period + crit_pos] {
// short period case -- the period is exact
// compute a separate critical factorization for the reversed needle
// x = u' v' where |v'| < period(x).
//
// This is sped up by the period being known already.
// Note that a case like x = "acba" may be factored exactly forwards
// (crit_pos = 1, period = 3) while being factored with approximate
// period in reverse (crit_pos = 2, period = 2). We use the given
// reverse factorization but keep the exact period.
let crit_pos_back = needle.len()
- cmp::max(
TwoWaySearcher::reverse_maximal_suffix(needle, period, false),
TwoWaySearcher::reverse_maximal_suffix(needle, period, true),
);
TwoWaySearcher {
crit_pos,
crit_pos_back,
period,
byteset: Self::byteset_create(&needle[..period]),
position: 0,
end,
memory: 0,
memory_back: needle.len(),
}
} else {
// long period case -- we have an approximation to the actual period,
// and don't use memorization.
//
// Approximate the period by lower bound max(|u|, |v|) + 1.
// The critical factorization is efficient to use for both forward and
// reverse search.
TwoWaySearcher {
crit_pos,
crit_pos_back: crit_pos,
period: cmp::max(crit_pos, needle.len() - crit_pos) + 1,
byteset: Self::byteset_create(needle),
position: 0,
end,
memory: usize::MAX, // Dummy value to signify that the period is long
memory_back: usize::MAX,
}
}
}
core::str::pattern::TwoWaySearcher::next fn next<S>(&mut self, haystack: &[u8], needle: &[u8], long_period: bool) -> S::Output
where
S: TwoWayStrategy,
{
// `next()` uses `self.position` as its cursor
let old_pos = self.position;
let needle_last = needle.len() - 1;
'search: loop {
// Check that we have room to search in
// position + needle_last can not overflow if we assume slices
// are bounded by isize's range.
let tail_byte = match haystack.get(self.position + needle_last) {
Some(&b) => b,
None => {
self.position = haystack.len();
return S::rejecting(old_pos, self.position);
}
};
if S::use_early_reject() && old_pos != self.position {
return S::rejecting(old_pos, self.position);
}
// Quickly skip by large portions unrelated to our substring
if !self.byteset_contains(tail_byte) {
self.position += needle.len();
if !long_period {
self.memory = 0;
}
continue 'search;
}
// See if the right part of the needle matches
let start =
if long_period { self.crit_pos } else { cmp::max(self.crit_pos, self.memory) };
for i in start..needle.len() {
if needle[i] != haystack[self.position + i] {
self.position += i - self.crit_pos + 1;
if !long_period {
self.memory = 0;
}
continue 'search;
}
}
// See if the left part of the needle matches
let start = if long_period { 0 } else { self.memory };
for i in (start..self.crit_pos).rev() {
if needle[i] != haystack[self.position + i] {
self.position += self.period;
if !long_period {
self.memory = needle.len() - self.period;
}
continue 'search;
}
}
// We have found a match!
let match_pos = self.position;
// Note: add self.period instead of needle.len() to have overlapping matches
self.position += needle.len();
if !long_period {
self.memory = 0; // set to needle.len() - self.period for overlapping matches
}
return S::matching(match_pos, match_pos + needle.len());
}
}
core::str::pattern::TwoWaySearcher::next_back fn next_back<S>(&mut self, haystack: &[u8], needle: &[u8], long_period: bool) -> S::Output
where
S: TwoWayStrategy,
{
// `next_back()` uses `self.end` as its cursor -- so that `next()` and `next_back()`
// are independent.
let old_end = self.end;
'search: loop {
// Check that we have room to search in
// end - needle.len() will wrap around when there is no more room,
// but due to slice length limits it can never wrap all the way back
// into the length of haystack.
let front_byte = match haystack.get(self.end.wrapping_sub(needle.len())) {
Some(&b) => b,
None => {
self.end = 0;
return S::rejecting(0, old_end);
}
};
if S::use_early_reject() && old_end != self.end {
return S::rejecting(self.end, old_end);
}
// Quickly skip by large portions unrelated to our substring
if !self.byteset_contains(front_byte) {
self.end -= needle.len();
if !long_period {
self.memory_back = needle.len();
}
continue 'search;
}
// See if the left part of the needle matches
let crit = if long_period {
self.crit_pos_back
} else {
cmp::min(self.crit_pos_back, self.memory_back)
};
for i in (0..crit).rev() {
if needle[i] != haystack[self.end - needle.len() + i] {
self.end -= self.crit_pos_back - i;
if !long_period {
self.memory_back = needle.len();
}
continue 'search;
}
}
// See if the right part of the needle matches
let needle_end = if long_period { needle.len() } else { self.memory_back };
for i in self.crit_pos_back..needle_end {
if needle[i] != haystack[self.end - needle.len() + i] {
self.end -= self.period;
if !long_period {
self.memory_back = self.period;
}
continue 'search;
}
}
// We have found a match!
let match_pos = self.end - needle.len();
// Note: sub self.period instead of needle.len() to have overlapping matches
self.end -= needle.len();
if !long_period {
self.memory_back = needle.len();
}
return S::matching(match_pos, match_pos + needle.len());
}
}
core::str::pattern::TwoWaySearcher::reverse_maximal_suffix fn reverse_maximal_suffix(arr: &[u8], known_period: usize, order_greater: bool) -> usize {
let mut left = 0; // Corresponds to i in the paper
let mut right = 1; // Corresponds to j in the paper
let mut offset = 0; // Corresponds to k in the paper, but starting at 0
// to match 0-based indexing.
let mut period = 1; // Corresponds to p in the paper
let n = arr.len();
while right + offset < n {
let a = arr[n - (1 + right + offset)];
let b = arr[n - (1 + left + offset)];
if (a < b && !order_greater) || (a > b && order_greater) {
// Suffix is smaller, period is entire prefix so far.
right += offset + 1;
offset = 0;
period = right - left;
} else if a == b {
// Advance through repetition of the current period.
if offset + 1 == period {
right += offset + 1;
offset = 0;
} else {
offset += 1;
}
} else {
// Suffix is larger, start over from current location.
left = right;
right += 1;
offset = 0;
period = 1;
}
if period == known_period {
break;
}
}
debug_assert!(period <= known_period);
left
}
core::str::slice_error_failconst fn slice_error_fail(s: &str, begin: usize, end: usize) -> ! {
crate::intrinsics::const_eval_select((s, begin, end), slice_error_fail_ct, slice_error_fail_rt)
}
core::str::slice_error_fail_rtfn slice_error_fail_rt(s: &str, begin: usize, end: usize) -> ! {
const MAX_DISPLAY_LENGTH: usize = 256;
let trunc_len = s.floor_char_boundary(MAX_DISPLAY_LENGTH);
let s_trunc = &s[..trunc_len];
let ellipsis = if trunc_len < s.len() { "[...]" } else { "" };
let len = s.len();
// 1. begin is OOB.
if begin > len {
panic!("start byte index {begin} is out of bounds of `{s_trunc}`{ellipsis}");
}
// 2. end is OOB.
if end > len {
panic!("end byte index {end} is out of bounds of `{s_trunc}`{ellipsis}");
}
// 3. range is backwards.
if begin > end {
panic!("begin > end ({begin} > {end}) when slicing `{s_trunc}`{ellipsis}")
}
// 4. begin is inside a character.
if !s.is_char_boundary(begin) {
let floor = s.floor_char_boundary(begin);
let ceil = s.ceil_char_boundary(begin);
let range = floor..ceil;
let ch = s[floor..ceil].chars().next().unwrap();
panic!(
"start byte index {begin} is not a char boundary; it is inside {ch:?} (bytes {range:?}) of `{s_trunc}`{ellipsis}"
)
}
// 5. end is inside a character.
if !s.is_char_boundary(end) {
let floor = s.floor_char_boundary(end);
let ceil = s.ceil_char_boundary(end);
let range = floor..ceil;
let ch = s[floor..ceil].chars().next().unwrap();
panic!(
"end byte index {end} is not a char boundary; it is inside {ch:?} (bytes {range:?}) of `{s_trunc}`{ellipsis}"
)
}
// 6. end is OOB and range is inclusive (end == len).
// This test cannot be combined with 2. above because for cases like
// `"abcαβγ"[4..9]` the error is that 4 is inside 'α', not that 9 is OOB.
debug_assert_eq!(end, len);
panic!("end byte index {end} is out of bounds of `{s_trunc}`{ellipsis}");
}
core::str::traits::<impl core::cmp::PartialEq for str>::eq fn eq(&self, other: &str) -> bool {
self.as_bytes() == other.as_bytes()
}
core::str::traits::<impl core::ops::index::Index<I> for str>::index fn index(&self, index: I) -> &I::Output {
index.index(self)
}
core::str::traits::<impl core::slice::index::SliceIndex<str> for core::ops::range::Range<usize>>::get fn get(self, slice: &str) -> Option<&Self::Output> {
if self.start <= self.end
&& slice.is_char_boundary(self.start)
&& slice.is_char_boundary(self.end)
{
// SAFETY: just checked that `start` and `end` are on a char boundary,
// and we are passing in a safe reference, so the return value will also be one.
// We also checked char boundaries, so this is valid UTF-8.
Some(unsafe { &*self.get_unchecked(slice) })
} else {
None
}
}
core::str::traits::<impl core::slice::index::SliceIndex<str> for core::ops::range::Range<usize>>::get_mut fn get_mut(self, slice: &mut str) -> Option<&mut Self::Output> {
if self.start <= self.end
&& slice.is_char_boundary(self.start)
&& slice.is_char_boundary(self.end)
{
// SAFETY: just checked that `start` and `end` are on a char boundary.
// We know the pointer is unique because we got it from `slice`.
Some(unsafe { &mut *self.get_unchecked_mut(slice) })
} else {
None
}
}
core::str::traits::<impl core::slice::index::SliceIndex<str> for core::ops::range::Range<usize>>::get_unchecked unsafe fn get_unchecked(self, slice: *const str) -> *const Self::Output {
let slice = slice as *const [u8];
assert_unsafe_precondition!(
// We'd like to check that the bounds are on char boundaries,
// but there's not really a way to do so without reading
// behind the pointer, which has aliasing implications.
// It's also not possible to move this check up to
// `str::get_unchecked` without adding a special function
// to `SliceIndex` just for this.
check_library_ub,
"str::get_unchecked requires that the range is within the string slice",
(
start: usize = self.start,
end: usize = self.end,
len: usize = slice.len()
) => end >= start && end <= len,
);
// SAFETY: the caller guarantees that `self` is in bounds of `slice`
// which satisfies all the conditions for `add`.
unsafe {
let new_len = unchecked_sub(self.end, self.start);
ptr::slice_from_raw_parts(slice.as_ptr().add(self.start), new_len) as *const str
}
}
core::str::traits::<impl core::slice::index::SliceIndex<str> for core::ops::range::Range<usize>>::get_unchecked_mut unsafe fn get_unchecked_mut(self, slice: *mut str) -> *mut Self::Output {
let slice = slice as *mut [u8];
assert_unsafe_precondition!(
check_library_ub,
"str::get_unchecked_mut requires that the range is within the string slice",
(
start: usize = self.start,
end: usize = self.end,
len: usize = slice.len()
) => end >= start && end <= len,
);
// SAFETY: see comments for `get_unchecked`.
unsafe {
let new_len = unchecked_sub(self.end, self.start);
ptr::slice_from_raw_parts_mut(slice.as_mut_ptr().add(self.start), new_len) as *mut str
}
}
core::str::traits::<impl core::slice::index::SliceIndex<str> for core::ops::range::Range<usize>>::index fn index(self, slice: &str) -> &Self::Output {
let (start, end) = (self.start, self.end);
match self.get(slice) {
Some(s) => s,
None => super::slice_error_fail(slice, start, end),
}
}
core::str::traits::<impl core::slice::index::SliceIndex<str> for core::ops::range::Range<usize>>::index_mut fn index_mut(self, slice: &mut str) -> &mut Self::Output {
// is_char_boundary checks that the index is in [0, .len()]
// cannot reuse `get` as above, because of NLL trouble
if self.start <= self.end
&& slice.is_char_boundary(self.start)
&& slice.is_char_boundary(self.end)
{
// SAFETY: just checked that `start` and `end` are on a char boundary,
// and we are passing in a safe reference, so the return value will also be one.
unsafe { &mut *self.get_unchecked_mut(slice) }
} else {
super::slice_error_fail(slice, self.start, self.end)
}
}
core::str::traits::<impl core::slice::index::SliceIndex<str> for core::ops::range::RangeFrom<usize>>::get fn get(self, slice: &str) -> Option<&Self::Output> {
if slice.is_char_boundary(self.start) {
// SAFETY: just checked that `start` is on a char boundary,
// and we are passing in a safe reference, so the return value will also be one.
Some(unsafe { &*self.get_unchecked(slice) })
} else {
None
}
}
core::str::traits::<impl core::slice::index::SliceIndex<str> for core::ops::range::RangeFrom<usize>>::get_mut fn get_mut(self, slice: &mut str) -> Option<&mut Self::Output> {
if slice.is_char_boundary(self.start) {
// SAFETY: just checked that `start` is on a char boundary,
// and we are passing in a safe reference, so the return value will also be one.
Some(unsafe { &mut *self.get_unchecked_mut(slice) })
} else {
None
}
}
core::str::traits::<impl core::slice::index::SliceIndex<str> for core::ops::range::RangeFrom<usize>>::get_unchecked unsafe fn get_unchecked(self, slice: *const str) -> *const Self::Output {
let len = (slice as *const [u8]).len();
// SAFETY: the caller has to uphold the safety contract for `get_unchecked`.
unsafe { (self.start..len).get_unchecked(slice) }
}
core::str::traits::<impl core::slice::index::SliceIndex<str> for core::ops::range::RangeFrom<usize>>::get_unchecked_mut unsafe fn get_unchecked_mut(self, slice: *mut str) -> *mut Self::Output {
let len = (slice as *mut [u8]).len();
// SAFETY: the caller has to uphold the safety contract for `get_unchecked_mut`.
unsafe { (self.start..len).get_unchecked_mut(slice) }
}
core::str::traits::<impl core::slice::index::SliceIndex<str> for core::ops::range::RangeFrom<usize>>::index fn index(self, slice: &str) -> &Self::Output {
let (start, end) = (self.start, slice.len());
match self.get(slice) {
Some(s) => s,
None => super::slice_error_fail(slice, start, end),
}
}
core::str::traits::<impl core::slice::index::SliceIndex<str> for core::ops::range::RangeFrom<usize>>::index_mut fn index_mut(self, slice: &mut str) -> &mut Self::Output {
if slice.is_char_boundary(self.start) {
// SAFETY: just checked that `start` is on a char boundary,
// and we are passing in a safe reference, so the return value will also be one.
unsafe { &mut *self.get_unchecked_mut(slice) }
} else {
super::slice_error_fail(slice, self.start, slice.len())
}
}
core::str::traits::<impl core::slice::index::SliceIndex<str> for core::ops::range::RangeTo<usize>>::get fn get(self, slice: &str) -> Option<&Self::Output> {
if slice.is_char_boundary(self.end) {
// SAFETY: just checked that `end` is on a char boundary,
// and we are passing in a safe reference, so the return value will also be one.
Some(unsafe { &*self.get_unchecked(slice) })
} else {
None
}
}
core::str::traits::<impl core::slice::index::SliceIndex<str> for core::ops::range::RangeTo<usize>>::get_mut fn get_mut(self, slice: &mut str) -> Option<&mut Self::Output> {
if slice.is_char_boundary(self.end) {
// SAFETY: just checked that `end` is on a char boundary,
// and we are passing in a safe reference, so the return value will also be one.
Some(unsafe { &mut *self.get_unchecked_mut(slice) })
} else {
None
}
}
core::str::traits::<impl core::slice::index::SliceIndex<str> for core::ops::range::RangeTo<usize>>::get_unchecked unsafe fn get_unchecked(self, slice: *const str) -> *const Self::Output {
// SAFETY: the caller has to uphold the safety contract for `get_unchecked`.
unsafe { (0..self.end).get_unchecked(slice) }
}
core::str::traits::<impl core::slice::index::SliceIndex<str> for core::ops::range::RangeTo<usize>>::get_unchecked_mut unsafe fn get_unchecked_mut(self, slice: *mut str) -> *mut Self::Output {
// SAFETY: the caller has to uphold the safety contract for `get_unchecked_mut`.
unsafe { (0..self.end).get_unchecked_mut(slice) }
}
core::str::traits::<impl core::slice::index::SliceIndex<str> for core::ops::range::RangeTo<usize>>::index fn index(self, slice: &str) -> &Self::Output {
let end = self.end;
match self.get(slice) {
Some(s) => s,
None => super::slice_error_fail(slice, 0, end),
}
}
core::str::traits::<impl core::slice::index::SliceIndex<str> for core::ops::range::RangeTo<usize>>::index_mut fn index_mut(self, slice: &mut str) -> &mut Self::Output {
if slice.is_char_boundary(self.end) {
// SAFETY: just checked that `end` is on a char boundary,
// and we are passing in a safe reference, so the return value will also be one.
unsafe { &mut *self.get_unchecked_mut(slice) }
} else {
super::slice_error_fail(slice, 0, self.end)
}
}
core::str::validations::contains_nonasciiconst fn contains_nonascii(x: usize) -> bool {
(x & NONASCII_MASK) != 0
}
core::str::validations::next_code_pointpub unsafe fn next_code_point<'a, I: Iterator<Item = &'a u8>>(bytes: &mut I) -> Option<u32> {
// Decode UTF-8
let x = *bytes.next()?;
if x < 128 {
return Some(x as u32);
}
// Multibyte case follows
// Decode from a byte combination out of: [[[x y] z] w]
// NOTE: Performance is sensitive to the exact formulation here
let init = utf8_first_byte(x, 2);
// SAFETY: `bytes` produces an UTF-8-like string,
// so the iterator must produce a value here.
let y = unsafe { *bytes.next().unwrap_unchecked() };
let mut ch = utf8_acc_cont_byte(init, y);
if x >= 0xE0 {
// [[x y z] w] case
// 5th bit in 0xE0 .. 0xEF is always clear, so `init` is still valid
// SAFETY: `bytes` produces an UTF-8-like string,
// so the iterator must produce a value here.
let z = unsafe { *bytes.next().unwrap_unchecked() };
let y_z = utf8_acc_cont_byte((y & CONT_MASK) as u32, z);
ch = init << 12 | y_z;
if x >= 0xF0 {
// [x y z w] case
// use only the lower 3 bits of `init`
// SAFETY: `bytes` produces an UTF-8-like string,
// so the iterator must produce a value here.
let w = unsafe { *bytes.next().unwrap_unchecked() };
ch = (init & 7) << 18 | utf8_acc_cont_byte(y_z, w);
}
}
Some(ch)
}
core::str::validations::next_code_point_reversepub(super) unsafe fn next_code_point_reverse<'a, I>(bytes: &mut I) -> Option<u32>
where
I: DoubleEndedIterator<Item = &'a u8>,
{
// Decode UTF-8
let w = match *bytes.next_back()? {
next_byte if next_byte < 128 => return Some(next_byte as u32),
back_byte => back_byte,
};
// Multibyte case follows
// Decode from a byte combination out of: [x [y [z w]]]
let mut ch;
// SAFETY: `bytes` produces an UTF-8-like string,
// so the iterator must produce a value here.
let z = unsafe { *bytes.next_back().unwrap_unchecked() };
ch = utf8_first_byte(z, 2);
if utf8_is_cont_byte(z) {
// SAFETY: `bytes` produces an UTF-8-like string,
// so the iterator must produce a value here.
let y = unsafe { *bytes.next_back().unwrap_unchecked() };
ch = utf8_first_byte(y, 3);
if utf8_is_cont_byte(y) {
// SAFETY: `bytes` produces an UTF-8-like string,
// so the iterator must produce a value here.
let x = unsafe { *bytes.next_back().unwrap_unchecked() };
ch = utf8_first_byte(x, 4);
ch = utf8_acc_cont_byte(ch, y);
}
ch = utf8_acc_cont_byte(ch, z);
}
ch = utf8_acc_cont_byte(ch, w);
Some(ch)
}
core::str::validations::run_utf8_validationpub(super) const fn run_utf8_validation(v: &[u8]) -> Result<(), Utf8Error> {
let mut index = 0;
let len = v.len();
const USIZE_BYTES: usize = size_of::<usize>();
let ascii_block_size = 2 * USIZE_BYTES;
let blocks_end = if len >= ascii_block_size { len - ascii_block_size + 1 } else { 0 };
// Below, we safely fall back to a slower codepath if the offset is `usize::MAX`,
// so the end-to-end behavior is the same at compiletime and runtime.
let align = const_eval_select!(
@capture { v: &[u8] } -> usize:
if const {
usize::MAX
} else {
v.as_ptr().align_offset(USIZE_BYTES)
}
);
while index < len {
let old_offset = index;
macro_rules! err {
($error_len: expr) => {
return Err(Utf8Error { valid_up_to: old_offset, error_len: $error_len })
};
}
macro_rules! next {
() => {{
index += 1;
// we needed data, but there was none: error!
if index >= len {
err!(None)
}
v[index]
}};
}
let first = v[index];
if first >= 128 {
let w = utf8_char_width(first);
// 2-byte encoding is for codepoints \u{0080} to \u{07ff}
// first C2 80 last DF BF
// 3-byte encoding is for codepoints \u{0800} to \u{ffff}
// first E0 A0 80 last EF BF BF
// excluding surrogates codepoints \u{d800} to \u{dfff}
// ED A0 80 to ED BF BF
// 4-byte encoding is for codepoints \u{10000} to \u{10ffff}
// first F0 90 80 80 last F4 8F BF BF
//
// Use the UTF-8 syntax from the RFC
//
// https://tools.ietf.org/html/rfc3629
// UTF8-1 = %x00-7F
// UTF8-2 = %xC2-DF UTF8-tail
// UTF8-3 = %xE0 %xA0-BF UTF8-tail / %xE1-EC 2( UTF8-tail ) /
// %xED %x80-9F UTF8-tail / %xEE-EF 2( UTF8-tail )
// UTF8-4 = %xF0 %x90-BF 2( UTF8-tail ) / %xF1-F3 3( UTF8-tail ) /
// %xF4 %x80-8F 2( UTF8-tail )
match w {
2 => {
if next!() as i8 >= -64 {
err!(Some(1))
}
}
3 => {
match (first, next!()) {
(0xE0, 0xA0..=0xBF)
| (0xE1..=0xEC, 0x80..=0xBF)
| (0xED, 0x80..=0x9F)
| (0xEE..=0xEF, 0x80..=0xBF) => {}
_ => err!(Some(1)),
}
if next!() as i8 >= -64 {
err!(Some(2))
}
}
4 => {
match (first, next!()) {
(0xF0, 0x90..=0xBF) | (0xF1..=0xF3, 0x80..=0xBF) | (0xF4, 0x80..=0x8F) => {}
_ => err!(Some(1)),
}
if next!() as i8 >= -64 {
err!(Some(2))
}
if next!() as i8 >= -64 {
err!(Some(3))
}
}
_ => err!(Some(1)),
}
index += 1;
} else {
// Ascii case, try to skip forward quickly.
// When the pointer is aligned, read 2 words of data per iteration
// until we find a word containing a non-ascii byte.
if align != usize::MAX && align.wrapping_sub(index).is_multiple_of(USIZE_BYTES) {
let ptr = v.as_ptr();
while index < blocks_end {
// SAFETY: since `align - index` and `ascii_block_size` are
// multiples of `USIZE_BYTES`, `block = ptr.add(index)` is
// always aligned with a `usize` so it's safe to dereference
// both `block` and `block.add(1)`.
unsafe {
let block = ptr.add(index) as *const usize;
// break if there is a nonascii byte
let zu = contains_nonascii(*block);
let zv = contains_nonascii(*block.add(1));
if zu || zv {
break;
}
}
index += ascii_block_size;
}
// step from the point where the wordwise loop stopped
while index < len && v[index] < 128 {
index += 1;
}
} else {
index += 1;
}
}
}
Ok(())
}
core::str::validations::run_utf8_validation::runtime fn runtime$(<$($binders)*>)?($($arg: $ty),*) $( -> $ret )? {
$runtime
}
core::str::validations::utf8_acc_cont_byteconst fn utf8_acc_cont_byte(ch: u32, byte: u8) -> u32 {
(ch << 6) | (byte & CONT_MASK) as u32
}
core::str::validations::utf8_char_widthpub const fn utf8_char_width(b: u8) -> usize {
UTF8_CHAR_WIDTH[b as usize] as usize
}
core::str::validations::utf8_first_byteconst fn utf8_first_byte(byte: u8, width: u32) -> u32 {
(byte & (0x7F >> width)) as u32
}
core::str::validations::utf8_is_cont_bytepub(super) const fn utf8_is_cont_byte(byte: u8) -> bool {
(byte as i8) < -64
}
core::sync::atomic::Atomic::<bool>::fetch_and pub fn fetch_and(&self, val: bool, order: Ordering) -> bool {
// SAFETY: data races are prevented by atomic intrinsics.
unsafe { atomic_and(self.v.get().cast::<u8>(), val as u8, order) != 0 }
}
core::sync::atomic::Atomic::<bool>::fetch_or pub fn fetch_or(&self, val: bool, order: Ordering) -> bool {
// SAFETY: data races are prevented by atomic intrinsics.
unsafe { atomic_or(self.v.get().cast::<u8>(), val as u8, order) != 0 }
}
core::sync::atomic::Atomic::<bool>::load pub fn load(&self, order: Ordering) -> bool {
// SAFETY: any data races are prevented by atomic intrinsics and the raw
// pointer passed in is valid because we got it from a reference.
unsafe { atomic_load(self.v.get().cast::<u8>(), order) != 0 }
}
core::sync::atomic::Atomic::<bool>::new pub const fn new(v: bool) -> AtomicBool {
// SAFETY:
// `Atomic<T>` is essentially a transparent wrapper around `T`.
unsafe { transmute(v) }
}
core::sync::atomic::Atomic::<bool>::store pub fn store(&self, val: bool, order: Ordering) {
// SAFETY: any data races are prevented by atomic intrinsics and the raw
// pointer passed in is valid because we got it from a reference.
unsafe {
atomic_store(self.v.get().cast::<u8>(), val as u8, order);
}
}
core::sync::atomic::Atomic::<u16>::as_ptr pub const fn as_ptr(&self) -> *mut $int_type {
self.v.get().cast()
}
core::sync::atomic::Atomic::<u16>::compare_and_swap pub fn compare_and_swap(&self,
current: $int_type,
new: $int_type,
order: Ordering) -> $int_type {
match self.compare_exchange(current,
new,
order,
strongest_failure_ordering(order)) {
Ok(x) => x,
Err(x) => x,
}
}
core::sync::atomic::Atomic::<u16>::compare_exchange pub fn compare_exchange(&self,
current: $int_type,
new: $int_type,
success: Ordering,
failure: Ordering) -> Result<$int_type, $int_type> {
// SAFETY: data races are prevented by atomic intrinsics.
unsafe { atomic_compare_exchange(self.as_ptr(), current, new, success, failure) }
}
core::sync::atomic::Atomic::<u16>::compare_exchange_weak pub fn compare_exchange_weak(&self,
current: $int_type,
new: $int_type,
success: Ordering,
failure: Ordering) -> Result<$int_type, $int_type> {
// SAFETY: data races are prevented by atomic intrinsics.
unsafe {
atomic_compare_exchange_weak(self.as_ptr(), current, new, success, failure)
}
}
core::sync::atomic::Atomic::<u16>::fetch_add pub fn fetch_add(&self, val: $int_type, order: Ordering) -> $int_type {
// SAFETY: data races are prevented by atomic intrinsics.
unsafe { atomic_add(self.as_ptr(), val, order) }
}
core::sync::atomic::Atomic::<u16>::fetch_and pub fn fetch_and(&self, val: $int_type, order: Ordering) -> $int_type {
// SAFETY: data races are prevented by atomic intrinsics.
unsafe { atomic_and(self.as_ptr(), val, order) }
}
core::sync::atomic::Atomic::<u16>::fetch_max pub fn fetch_max(&self, val: $int_type, order: Ordering) -> $int_type {
// SAFETY: data races are prevented by atomic intrinsics.
unsafe { $max_fn(self.as_ptr(), val, order) }
}
core::sync::atomic::Atomic::<u16>::fetch_min pub fn fetch_min(&self, val: $int_type, order: Ordering) -> $int_type {
// SAFETY: data races are prevented by atomic intrinsics.
unsafe { $min_fn(self.as_ptr(), val, order) }
}
core::sync::atomic::Atomic::<u16>::fetch_nand pub fn fetch_nand(&self, val: $int_type, order: Ordering) -> $int_type {
// SAFETY: data races are prevented by atomic intrinsics.
unsafe { atomic_nand(self.as_ptr(), val, order) }
}
core::sync::atomic::Atomic::<u16>::fetch_or pub fn fetch_or(&self, val: $int_type, order: Ordering) -> $int_type {
// SAFETY: data races are prevented by atomic intrinsics.
unsafe { atomic_or(self.as_ptr(), val, order) }
}
core::sync::atomic::Atomic::<u16>::fetch_sub pub fn fetch_sub(&self, val: $int_type, order: Ordering) -> $int_type {
// SAFETY: data races are prevented by atomic intrinsics.
unsafe { atomic_sub(self.as_ptr(), val, order) }
}
core::sync::atomic::Atomic::<u16>::fetch_update pub fn fetch_update<F>(&self,
set_order: Ordering,
fetch_order: Ordering,
f: F) -> Result<$int_type, $int_type>
where F: FnMut($int_type) -> Option<$int_type> {
self.try_update(set_order, fetch_order, f)
}
core::sync::atomic::Atomic::<u16>::fetch_xor pub fn fetch_xor(&self, val: $int_type, order: Ordering) -> $int_type {
// SAFETY: data races are prevented by atomic intrinsics.
unsafe { atomic_xor(self.as_ptr(), val, order) }
}
core::sync::atomic::Atomic::<u16>::from_mut pub fn from_mut(v: &mut $int_type) -> &mut Self {
let [] = [(); align_of::<Self>() - align_of::<$int_type>()];
// SAFETY:
// - the mutable reference guarantees unique ownership.
// - the alignment of `$int_type` and `Self` is the
// same, as promised by $cfg_align and verified above.
unsafe { &mut *(v as *mut $int_type as *mut Self) }
}
core::sync::atomic::Atomic::<u16>::from_mut_slice pub fn from_mut_slice(v: &mut [$int_type]) -> &mut [Self] {
let [] = [(); align_of::<Self>() - align_of::<$int_type>()];
// SAFETY:
// - the mutable reference guarantees unique ownership.
// - the alignment of `$int_type` and `Self` is the
// same, as promised by $cfg_align and verified above.
unsafe { &mut *(v as *mut [$int_type] as *mut [Self]) }
}
core::sync::atomic::Atomic::<u16>::from_ptr pub const unsafe fn from_ptr<'a>(ptr: *mut $int_type) -> &'a $atomic_type {
// SAFETY: guaranteed by the caller
unsafe { &*ptr.cast() }
}
core::sync::atomic::Atomic::<u16>::get_mut pub fn get_mut(&mut self) -> &mut $int_type {
// SAFETY:
// `Atomic<T>` is essentially a transparent wrapper around `T`.
unsafe { &mut *self.as_ptr() }
}
core::sync::atomic::Atomic::<u16>::get_mut_slice pub fn get_mut_slice(this: &mut [Self]) -> &mut [$int_type] {
// SAFETY: the mutable reference guarantees unique ownership.
unsafe { &mut *(this as *mut [Self] as *mut [$int_type]) }
}
core::sync::atomic::Atomic::<u16>::into_inner pub const fn into_inner(self) -> $int_type {
// SAFETY:
// `Atomic<T>` is essentially a transparent wrapper around `T`.
unsafe { transmute(self) }
}
core::sync::atomic::Atomic::<u16>::load pub fn load(&self, order: Ordering) -> $int_type {
// SAFETY: data races are prevented by atomic intrinsics.
unsafe { atomic_load(self.as_ptr(), order) }
}
core::sync::atomic::Atomic::<u16>::new pub const fn new(v: $int_type) -> Self {
// SAFETY:
// `Atomic<T>` is essentially a transparent wrapper around `T`.
unsafe { transmute(v) }
}
core::sync::atomic::Atomic::<u16>::store pub fn store(&self, val: $int_type, order: Ordering) {
// SAFETY: data races are prevented by atomic intrinsics.
unsafe { atomic_store(self.as_ptr(), val, order); }
}
core::sync::atomic::Atomic::<u16>::swap pub fn swap(&self, val: $int_type, order: Ordering) -> $int_type {
// SAFETY: data races are prevented by atomic intrinsics.
unsafe { atomic_swap(self.as_ptr(), val, order) }
}
core::sync::atomic::Atomic::<u16>::try_update pub fn try_update(
&self,
set_order: Ordering,
fetch_order: Ordering,
mut f: impl FnMut($int_type) -> Option<$int_type>,
) -> Result<$int_type, $int_type> {
let mut prev = self.load(fetch_order);
while let Some(next) = f(prev) {
match self.compare_exchange_weak(prev, next, set_order, fetch_order) {
x @ Ok(_) => return x,
Err(next_prev) => prev = next_prev
}
}
Err(prev)
}
core::sync::atomic::Atomic::<u16>::update pub fn update(
&self,
set_order: Ordering,
fetch_order: Ordering,
mut f: impl FnMut($int_type) -> $int_type,
) -> $int_type {
let mut prev = self.load(fetch_order);
loop {
match self.compare_exchange_weak(prev, f(prev), set_order, fetch_order) {
Ok(x) => break x,
Err(next_prev) => prev = next_prev,
}
}
}
core::sync::atomic::Atomic::<u32>::as_ptr pub const fn as_ptr(&self) -> *mut $int_type {
self.v.get().cast()
}
core::sync::atomic::Atomic::<u32>::compare_and_swap pub fn compare_and_swap(&self,
current: $int_type,
new: $int_type,
order: Ordering) -> $int_type {
match self.compare_exchange(current,
new,
order,
strongest_failure_ordering(order)) {
Ok(x) => x,
Err(x) => x,
}
}
core::sync::atomic::Atomic::<u32>::compare_exchange pub fn compare_exchange(&self,
current: $int_type,
new: $int_type,
success: Ordering,
failure: Ordering) -> Result<$int_type, $int_type> {
// SAFETY: data races are prevented by atomic intrinsics.
unsafe { atomic_compare_exchange(self.as_ptr(), current, new, success, failure) }
}
core::sync::atomic::Atomic::<u32>::compare_exchange_weak pub fn compare_exchange_weak(&self,
current: $int_type,
new: $int_type,
success: Ordering,
failure: Ordering) -> Result<$int_type, $int_type> {
// SAFETY: data races are prevented by atomic intrinsics.
unsafe {
atomic_compare_exchange_weak(self.as_ptr(), current, new, success, failure)
}
}
core::sync::atomic::Atomic::<u32>::fetch_add pub fn fetch_add(&self, val: $int_type, order: Ordering) -> $int_type {
// SAFETY: data races are prevented by atomic intrinsics.
unsafe { atomic_add(self.as_ptr(), val, order) }
}
core::sync::atomic::Atomic::<u32>::fetch_and pub fn fetch_and(&self, val: $int_type, order: Ordering) -> $int_type {
// SAFETY: data races are prevented by atomic intrinsics.
unsafe { atomic_and(self.as_ptr(), val, order) }
}
core::sync::atomic::Atomic::<u32>::fetch_max pub fn fetch_max(&self, val: $int_type, order: Ordering) -> $int_type {
// SAFETY: data races are prevented by atomic intrinsics.
unsafe { $max_fn(self.as_ptr(), val, order) }
}
core::sync::atomic::Atomic::<u32>::fetch_min pub fn fetch_min(&self, val: $int_type, order: Ordering) -> $int_type {
// SAFETY: data races are prevented by atomic intrinsics.
unsafe { $min_fn(self.as_ptr(), val, order) }
}
core::sync::atomic::Atomic::<u32>::fetch_nand pub fn fetch_nand(&self, val: $int_type, order: Ordering) -> $int_type {
// SAFETY: data races are prevented by atomic intrinsics.
unsafe { atomic_nand(self.as_ptr(), val, order) }
}
core::sync::atomic::Atomic::<u32>::fetch_or pub fn fetch_or(&self, val: $int_type, order: Ordering) -> $int_type {
// SAFETY: data races are prevented by atomic intrinsics.
unsafe { atomic_or(self.as_ptr(), val, order) }
}
core::sync::atomic::Atomic::<u32>::fetch_sub pub fn fetch_sub(&self, val: $int_type, order: Ordering) -> $int_type {
// SAFETY: data races are prevented by atomic intrinsics.
unsafe { atomic_sub(self.as_ptr(), val, order) }
}
core::sync::atomic::Atomic::<u32>::fetch_update pub fn fetch_update<F>(&self,
set_order: Ordering,
fetch_order: Ordering,
f: F) -> Result<$int_type, $int_type>
where F: FnMut($int_type) -> Option<$int_type> {
self.try_update(set_order, fetch_order, f)
}
core::sync::atomic::Atomic::<u32>::fetch_xor pub fn fetch_xor(&self, val: $int_type, order: Ordering) -> $int_type {
// SAFETY: data races are prevented by atomic intrinsics.
unsafe { atomic_xor(self.as_ptr(), val, order) }
}
core::sync::atomic::Atomic::<u32>::from_mut pub fn from_mut(v: &mut $int_type) -> &mut Self {
let [] = [(); align_of::<Self>() - align_of::<$int_type>()];
// SAFETY:
// - the mutable reference guarantees unique ownership.
// - the alignment of `$int_type` and `Self` is the
// same, as promised by $cfg_align and verified above.
unsafe { &mut *(v as *mut $int_type as *mut Self) }
}
core::sync::atomic::Atomic::<u32>::from_mut_slice pub fn from_mut_slice(v: &mut [$int_type]) -> &mut [Self] {
let [] = [(); align_of::<Self>() - align_of::<$int_type>()];
// SAFETY:
// - the mutable reference guarantees unique ownership.
// - the alignment of `$int_type` and `Self` is the
// same, as promised by $cfg_align and verified above.
unsafe { &mut *(v as *mut [$int_type] as *mut [Self]) }
}
core::sync::atomic::Atomic::<u32>::from_ptr pub const unsafe fn from_ptr<'a>(ptr: *mut $int_type) -> &'a $atomic_type {
// SAFETY: guaranteed by the caller
unsafe { &*ptr.cast() }
}
core::sync::atomic::Atomic::<u32>::get_mut pub fn get_mut(&mut self) -> &mut $int_type {
// SAFETY:
// `Atomic<T>` is essentially a transparent wrapper around `T`.
unsafe { &mut *self.as_ptr() }
}
core::sync::atomic::Atomic::<u32>::get_mut_slice pub fn get_mut_slice(this: &mut [Self]) -> &mut [$int_type] {
// SAFETY: the mutable reference guarantees unique ownership.
unsafe { &mut *(this as *mut [Self] as *mut [$int_type]) }
}
core::sync::atomic::Atomic::<u32>::into_inner pub const fn into_inner(self) -> $int_type {
// SAFETY:
// `Atomic<T>` is essentially a transparent wrapper around `T`.
unsafe { transmute(self) }
}
core::sync::atomic::Atomic::<u32>::load pub fn load(&self, order: Ordering) -> $int_type {
// SAFETY: data races are prevented by atomic intrinsics.
unsafe { atomic_load(self.as_ptr(), order) }
}
core::sync::atomic::Atomic::<u32>::new pub const fn new(v: $int_type) -> Self {
// SAFETY:
// `Atomic<T>` is essentially a transparent wrapper around `T`.
unsafe { transmute(v) }
}
core::sync::atomic::Atomic::<u32>::store pub fn store(&self, val: $int_type, order: Ordering) {
// SAFETY: data races are prevented by atomic intrinsics.
unsafe { atomic_store(self.as_ptr(), val, order); }
}
core::sync::atomic::Atomic::<u32>::swap pub fn swap(&self, val: $int_type, order: Ordering) -> $int_type {
// SAFETY: data races are prevented by atomic intrinsics.
unsafe { atomic_swap(self.as_ptr(), val, order) }
}
core::sync::atomic::Atomic::<u32>::try_update pub fn try_update(
&self,
set_order: Ordering,
fetch_order: Ordering,
mut f: impl FnMut($int_type) -> Option<$int_type>,
) -> Result<$int_type, $int_type> {
let mut prev = self.load(fetch_order);
while let Some(next) = f(prev) {
match self.compare_exchange_weak(prev, next, set_order, fetch_order) {
x @ Ok(_) => return x,
Err(next_prev) => prev = next_prev
}
}
Err(prev)
}
core::sync::atomic::Atomic::<u32>::update pub fn update(
&self,
set_order: Ordering,
fetch_order: Ordering,
mut f: impl FnMut($int_type) -> $int_type,
) -> $int_type {
let mut prev = self.load(fetch_order);
loop {
match self.compare_exchange_weak(prev, f(prev), set_order, fetch_order) {
Ok(x) => break x,
Err(next_prev) => prev = next_prev,
}
}
}
core::sync::atomic::Atomic::<u64>::as_ptr pub const fn as_ptr(&self) -> *mut $int_type {
self.v.get().cast()
}
core::sync::atomic::Atomic::<u64>::compare_and_swap pub fn compare_and_swap(&self,
current: $int_type,
new: $int_type,
order: Ordering) -> $int_type {
match self.compare_exchange(current,
new,
order,
strongest_failure_ordering(order)) {
Ok(x) => x,
Err(x) => x,
}
}
core::sync::atomic::Atomic::<u64>::compare_exchange pub fn compare_exchange(&self,
current: $int_type,
new: $int_type,
success: Ordering,
failure: Ordering) -> Result<$int_type, $int_type> {
// SAFETY: data races are prevented by atomic intrinsics.
unsafe { atomic_compare_exchange(self.as_ptr(), current, new, success, failure) }
}
core::sync::atomic::Atomic::<u64>::compare_exchange_weak pub fn compare_exchange_weak(&self,
current: $int_type,
new: $int_type,
success: Ordering,
failure: Ordering) -> Result<$int_type, $int_type> {
// SAFETY: data races are prevented by atomic intrinsics.
unsafe {
atomic_compare_exchange_weak(self.as_ptr(), current, new, success, failure)
}
}
core::sync::atomic::Atomic::<u64>::fetch_add pub fn fetch_add(&self, val: $int_type, order: Ordering) -> $int_type {
// SAFETY: data races are prevented by atomic intrinsics.
unsafe { atomic_add(self.as_ptr(), val, order) }
}
core::sync::atomic::Atomic::<u64>::fetch_and pub fn fetch_and(&self, val: $int_type, order: Ordering) -> $int_type {
// SAFETY: data races are prevented by atomic intrinsics.
unsafe { atomic_and(self.as_ptr(), val, order) }
}
core::sync::atomic::Atomic::<u64>::fetch_max pub fn fetch_max(&self, val: $int_type, order: Ordering) -> $int_type {
// SAFETY: data races are prevented by atomic intrinsics.
unsafe { $max_fn(self.as_ptr(), val, order) }
}
core::sync::atomic::Atomic::<u64>::fetch_min pub fn fetch_min(&self, val: $int_type, order: Ordering) -> $int_type {
// SAFETY: data races are prevented by atomic intrinsics.
unsafe { $min_fn(self.as_ptr(), val, order) }
}
core::sync::atomic::Atomic::<u64>::fetch_nand pub fn fetch_nand(&self, val: $int_type, order: Ordering) -> $int_type {
// SAFETY: data races are prevented by atomic intrinsics.
unsafe { atomic_nand(self.as_ptr(), val, order) }
}
core::sync::atomic::Atomic::<u64>::fetch_or pub fn fetch_or(&self, val: $int_type, order: Ordering) -> $int_type {
// SAFETY: data races are prevented by atomic intrinsics.
unsafe { atomic_or(self.as_ptr(), val, order) }
}
core::sync::atomic::Atomic::<u64>::fetch_sub pub fn fetch_sub(&self, val: $int_type, order: Ordering) -> $int_type {
// SAFETY: data races are prevented by atomic intrinsics.
unsafe { atomic_sub(self.as_ptr(), val, order) }
}
core::sync::atomic::Atomic::<u64>::fetch_update pub fn fetch_update<F>(&self,
set_order: Ordering,
fetch_order: Ordering,
f: F) -> Result<$int_type, $int_type>
where F: FnMut($int_type) -> Option<$int_type> {
self.try_update(set_order, fetch_order, f)
}
core::sync::atomic::Atomic::<u64>::fetch_xor pub fn fetch_xor(&self, val: $int_type, order: Ordering) -> $int_type {
// SAFETY: data races are prevented by atomic intrinsics.
unsafe { atomic_xor(self.as_ptr(), val, order) }
}
core::sync::atomic::Atomic::<u64>::from_mut pub fn from_mut(v: &mut $int_type) -> &mut Self {
let [] = [(); align_of::<Self>() - align_of::<$int_type>()];
// SAFETY:
// - the mutable reference guarantees unique ownership.
// - the alignment of `$int_type` and `Self` is the
// same, as promised by $cfg_align and verified above.
unsafe { &mut *(v as *mut $int_type as *mut Self) }
}
core::sync::atomic::Atomic::<u64>::from_mut_slice pub fn from_mut_slice(v: &mut [$int_type]) -> &mut [Self] {
let [] = [(); align_of::<Self>() - align_of::<$int_type>()];
// SAFETY:
// - the mutable reference guarantees unique ownership.
// - the alignment of `$int_type` and `Self` is the
// same, as promised by $cfg_align and verified above.
unsafe { &mut *(v as *mut [$int_type] as *mut [Self]) }
}
core::sync::atomic::Atomic::<u64>::from_ptr pub const unsafe fn from_ptr<'a>(ptr: *mut $int_type) -> &'a $atomic_type {
// SAFETY: guaranteed by the caller
unsafe { &*ptr.cast() }
}
core::sync::atomic::Atomic::<u64>::get_mut pub fn get_mut(&mut self) -> &mut $int_type {
// SAFETY:
// `Atomic<T>` is essentially a transparent wrapper around `T`.
unsafe { &mut *self.as_ptr() }
}
core::sync::atomic::Atomic::<u64>::get_mut_slice pub fn get_mut_slice(this: &mut [Self]) -> &mut [$int_type] {
// SAFETY: the mutable reference guarantees unique ownership.
unsafe { &mut *(this as *mut [Self] as *mut [$int_type]) }
}
core::sync::atomic::Atomic::<u64>::into_inner pub const fn into_inner(self) -> $int_type {
// SAFETY:
// `Atomic<T>` is essentially a transparent wrapper around `T`.
unsafe { transmute(self) }
}
core::sync::atomic::Atomic::<u64>::load pub fn load(&self, order: Ordering) -> $int_type {
// SAFETY: data races are prevented by atomic intrinsics.
unsafe { atomic_load(self.as_ptr(), order) }
}
core::sync::atomic::Atomic::<u64>::new pub const fn new(v: $int_type) -> Self {
// SAFETY:
// `Atomic<T>` is essentially a transparent wrapper around `T`.
unsafe { transmute(v) }
}
core::sync::atomic::Atomic::<u64>::store pub fn store(&self, val: $int_type, order: Ordering) {
// SAFETY: data races are prevented by atomic intrinsics.
unsafe { atomic_store(self.as_ptr(), val, order); }
}
core::sync::atomic::Atomic::<u64>::swap pub fn swap(&self, val: $int_type, order: Ordering) -> $int_type {
// SAFETY: data races are prevented by atomic intrinsics.
unsafe { atomic_swap(self.as_ptr(), val, order) }
}
core::sync::atomic::Atomic::<u64>::try_update pub fn try_update(
&self,
set_order: Ordering,
fetch_order: Ordering,
mut f: impl FnMut($int_type) -> Option<$int_type>,
) -> Result<$int_type, $int_type> {
let mut prev = self.load(fetch_order);
while let Some(next) = f(prev) {
match self.compare_exchange_weak(prev, next, set_order, fetch_order) {
x @ Ok(_) => return x,
Err(next_prev) => prev = next_prev
}
}
Err(prev)
}
core::sync::atomic::Atomic::<u64>::update pub fn update(
&self,
set_order: Ordering,
fetch_order: Ordering,
mut f: impl FnMut($int_type) -> $int_type,
) -> $int_type {
let mut prev = self.load(fetch_order);
loop {
match self.compare_exchange_weak(prev, f(prev), set_order, fetch_order) {
Ok(x) => break x,
Err(next_prev) => prev = next_prev,
}
}
}
core::sync::atomic::Atomic::<u8>::as_ptr pub const fn as_ptr(&self) -> *mut $int_type {
self.v.get().cast()
}
core::sync::atomic::Atomic::<u8>::compare_and_swap pub fn compare_and_swap(&self,
current: $int_type,
new: $int_type,
order: Ordering) -> $int_type {
match self.compare_exchange(current,
new,
order,
strongest_failure_ordering(order)) {
Ok(x) => x,
Err(x) => x,
}
}
core::sync::atomic::Atomic::<u8>::compare_exchange pub fn compare_exchange(&self,
current: $int_type,
new: $int_type,
success: Ordering,
failure: Ordering) -> Result<$int_type, $int_type> {
// SAFETY: data races are prevented by atomic intrinsics.
unsafe { atomic_compare_exchange(self.as_ptr(), current, new, success, failure) }
}
core::sync::atomic::Atomic::<u8>::compare_exchange_weak pub fn compare_exchange_weak(&self,
current: $int_type,
new: $int_type,
success: Ordering,
failure: Ordering) -> Result<$int_type, $int_type> {
// SAFETY: data races are prevented by atomic intrinsics.
unsafe {
atomic_compare_exchange_weak(self.as_ptr(), current, new, success, failure)
}
}
core::sync::atomic::Atomic::<u8>::fetch_add pub fn fetch_add(&self, val: $int_type, order: Ordering) -> $int_type {
// SAFETY: data races are prevented by atomic intrinsics.
unsafe { atomic_add(self.as_ptr(), val, order) }
}
core::sync::atomic::Atomic::<u8>::fetch_and pub fn fetch_and(&self, val: $int_type, order: Ordering) -> $int_type {
// SAFETY: data races are prevented by atomic intrinsics.
unsafe { atomic_and(self.as_ptr(), val, order) }
}
core::sync::atomic::Atomic::<u8>::fetch_max pub fn fetch_max(&self, val: $int_type, order: Ordering) -> $int_type {
// SAFETY: data races are prevented by atomic intrinsics.
unsafe { $max_fn(self.as_ptr(), val, order) }
}
core::sync::atomic::Atomic::<u8>::fetch_min pub fn fetch_min(&self, val: $int_type, order: Ordering) -> $int_type {
// SAFETY: data races are prevented by atomic intrinsics.
unsafe { $min_fn(self.as_ptr(), val, order) }
}
core::sync::atomic::Atomic::<u8>::fetch_nand pub fn fetch_nand(&self, val: $int_type, order: Ordering) -> $int_type {
// SAFETY: data races are prevented by atomic intrinsics.
unsafe { atomic_nand(self.as_ptr(), val, order) }
}
core::sync::atomic::Atomic::<u8>::fetch_or pub fn fetch_or(&self, val: $int_type, order: Ordering) -> $int_type {
// SAFETY: data races are prevented by atomic intrinsics.
unsafe { atomic_or(self.as_ptr(), val, order) }
}
core::sync::atomic::Atomic::<u8>::fetch_sub pub fn fetch_sub(&self, val: $int_type, order: Ordering) -> $int_type {
// SAFETY: data races are prevented by atomic intrinsics.
unsafe { atomic_sub(self.as_ptr(), val, order) }
}
core::sync::atomic::Atomic::<u8>::fetch_update pub fn fetch_update<F>(&self,
set_order: Ordering,
fetch_order: Ordering,
f: F) -> Result<$int_type, $int_type>
where F: FnMut($int_type) -> Option<$int_type> {
self.try_update(set_order, fetch_order, f)
}
core::sync::atomic::Atomic::<u8>::fetch_xor pub fn fetch_xor(&self, val: $int_type, order: Ordering) -> $int_type {
// SAFETY: data races are prevented by atomic intrinsics.
unsafe { atomic_xor(self.as_ptr(), val, order) }
}
core::sync::atomic::Atomic::<u8>::from_mut pub fn from_mut(v: &mut $int_type) -> &mut Self {
let [] = [(); align_of::<Self>() - align_of::<$int_type>()];
// SAFETY:
// - the mutable reference guarantees unique ownership.
// - the alignment of `$int_type` and `Self` is the
// same, as promised by $cfg_align and verified above.
unsafe { &mut *(v as *mut $int_type as *mut Self) }
}
core::sync::atomic::Atomic::<u8>::from_mut_slice pub fn from_mut_slice(v: &mut [$int_type]) -> &mut [Self] {
let [] = [(); align_of::<Self>() - align_of::<$int_type>()];
// SAFETY:
// - the mutable reference guarantees unique ownership.
// - the alignment of `$int_type` and `Self` is the
// same, as promised by $cfg_align and verified above.
unsafe { &mut *(v as *mut [$int_type] as *mut [Self]) }
}
core::sync::atomic::Atomic::<u8>::from_ptr pub const unsafe fn from_ptr<'a>(ptr: *mut $int_type) -> &'a $atomic_type {
// SAFETY: guaranteed by the caller
unsafe { &*ptr.cast() }
}
core::sync::atomic::Atomic::<u8>::get_mut pub fn get_mut(&mut self) -> &mut $int_type {
// SAFETY:
// `Atomic<T>` is essentially a transparent wrapper around `T`.
unsafe { &mut *self.as_ptr() }
}
core::sync::atomic::Atomic::<u8>::get_mut_slice pub fn get_mut_slice(this: &mut [Self]) -> &mut [$int_type] {
// SAFETY: the mutable reference guarantees unique ownership.
unsafe { &mut *(this as *mut [Self] as *mut [$int_type]) }
}
core::sync::atomic::Atomic::<u8>::into_inner pub const fn into_inner(self) -> $int_type {
// SAFETY:
// `Atomic<T>` is essentially a transparent wrapper around `T`.
unsafe { transmute(self) }
}
core::sync::atomic::Atomic::<u8>::load pub fn load(&self, order: Ordering) -> $int_type {
// SAFETY: data races are prevented by atomic intrinsics.
unsafe { atomic_load(self.as_ptr(), order) }
}
core::sync::atomic::Atomic::<u8>::new pub const fn new(v: $int_type) -> Self {
// SAFETY:
// `Atomic<T>` is essentially a transparent wrapper around `T`.
unsafe { transmute(v) }
}
core::sync::atomic::Atomic::<u8>::store pub fn store(&self, val: $int_type, order: Ordering) {
// SAFETY: data races are prevented by atomic intrinsics.
unsafe { atomic_store(self.as_ptr(), val, order); }
}
core::sync::atomic::Atomic::<u8>::swap pub fn swap(&self, val: $int_type, order: Ordering) -> $int_type {
// SAFETY: data races are prevented by atomic intrinsics.
unsafe { atomic_swap(self.as_ptr(), val, order) }
}
core::sync::atomic::Atomic::<u8>::try_update pub fn try_update(
&self,
set_order: Ordering,
fetch_order: Ordering,
mut f: impl FnMut($int_type) -> Option<$int_type>,
) -> Result<$int_type, $int_type> {
let mut prev = self.load(fetch_order);
while let Some(next) = f(prev) {
match self.compare_exchange_weak(prev, next, set_order, fetch_order) {
x @ Ok(_) => return x,
Err(next_prev) => prev = next_prev
}
}
Err(prev)
}
core::sync::atomic::Atomic::<u8>::update pub fn update(
&self,
set_order: Ordering,
fetch_order: Ordering,
mut f: impl FnMut($int_type) -> $int_type,
) -> $int_type {
let mut prev = self.load(fetch_order);
loop {
match self.compare_exchange_weak(prev, f(prev), set_order, fetch_order) {
Ok(x) => break x,
Err(next_prev) => prev = next_prev,
}
}
}
core::sync::atomic::Atomic::<usize>::as_ptr pub const fn as_ptr(&self) -> *mut $int_type {
self.v.get().cast()
}
core::sync::atomic::Atomic::<usize>::compare_and_swap pub fn compare_and_swap(&self,
current: $int_type,
new: $int_type,
order: Ordering) -> $int_type {
match self.compare_exchange(current,
new,
order,
strongest_failure_ordering(order)) {
Ok(x) => x,
Err(x) => x,
}
}
core::sync::atomic::Atomic::<usize>::compare_exchange pub fn compare_exchange(&self,
current: $int_type,
new: $int_type,
success: Ordering,
failure: Ordering) -> Result<$int_type, $int_type> {
// SAFETY: data races are prevented by atomic intrinsics.
unsafe { atomic_compare_exchange(self.as_ptr(), current, new, success, failure) }
}
core::sync::atomic::Atomic::<usize>::compare_exchange_weak pub fn compare_exchange_weak(&self,
current: $int_type,
new: $int_type,
success: Ordering,
failure: Ordering) -> Result<$int_type, $int_type> {
// SAFETY: data races are prevented by atomic intrinsics.
unsafe {
atomic_compare_exchange_weak(self.as_ptr(), current, new, success, failure)
}
}
core::sync::atomic::Atomic::<usize>::fetch_add pub fn fetch_add(&self, val: $int_type, order: Ordering) -> $int_type {
// SAFETY: data races are prevented by atomic intrinsics.
unsafe { atomic_add(self.as_ptr(), val, order) }
}
core::sync::atomic::Atomic::<usize>::fetch_and pub fn fetch_and(&self, val: $int_type, order: Ordering) -> $int_type {
// SAFETY: data races are prevented by atomic intrinsics.
unsafe { atomic_and(self.as_ptr(), val, order) }
}
core::sync::atomic::Atomic::<usize>::fetch_max pub fn fetch_max(&self, val: $int_type, order: Ordering) -> $int_type {
// SAFETY: data races are prevented by atomic intrinsics.
unsafe { $max_fn(self.as_ptr(), val, order) }
}
core::sync::atomic::Atomic::<usize>::fetch_min pub fn fetch_min(&self, val: $int_type, order: Ordering) -> $int_type {
// SAFETY: data races are prevented by atomic intrinsics.
unsafe { $min_fn(self.as_ptr(), val, order) }
}
core::sync::atomic::Atomic::<usize>::fetch_nand pub fn fetch_nand(&self, val: $int_type, order: Ordering) -> $int_type {
// SAFETY: data races are prevented by atomic intrinsics.
unsafe { atomic_nand(self.as_ptr(), val, order) }
}
core::sync::atomic::Atomic::<usize>::fetch_or pub fn fetch_or(&self, val: $int_type, order: Ordering) -> $int_type {
// SAFETY: data races are prevented by atomic intrinsics.
unsafe { atomic_or(self.as_ptr(), val, order) }
}
core::sync::atomic::Atomic::<usize>::fetch_sub pub fn fetch_sub(&self, val: $int_type, order: Ordering) -> $int_type {
// SAFETY: data races are prevented by atomic intrinsics.
unsafe { atomic_sub(self.as_ptr(), val, order) }
}
core::sync::atomic::Atomic::<usize>::fetch_update pub fn fetch_update<F>(&self,
set_order: Ordering,
fetch_order: Ordering,
f: F) -> Result<$int_type, $int_type>
where F: FnMut($int_type) -> Option<$int_type> {
self.try_update(set_order, fetch_order, f)
}
core::sync::atomic::Atomic::<usize>::fetch_xor pub fn fetch_xor(&self, val: $int_type, order: Ordering) -> $int_type {
// SAFETY: data races are prevented by atomic intrinsics.
unsafe { atomic_xor(self.as_ptr(), val, order) }
}
core::sync::atomic::Atomic::<usize>::from_mut pub fn from_mut(v: &mut $int_type) -> &mut Self {
let [] = [(); align_of::<Self>() - align_of::<$int_type>()];
// SAFETY:
// - the mutable reference guarantees unique ownership.
// - the alignment of `$int_type` and `Self` is the
// same, as promised by $cfg_align and verified above.
unsafe { &mut *(v as *mut $int_type as *mut Self) }
}
core::sync::atomic::Atomic::<usize>::from_mut_slice pub fn from_mut_slice(v: &mut [$int_type]) -> &mut [Self] {
let [] = [(); align_of::<Self>() - align_of::<$int_type>()];
// SAFETY:
// - the mutable reference guarantees unique ownership.
// - the alignment of `$int_type` and `Self` is the
// same, as promised by $cfg_align and verified above.
unsafe { &mut *(v as *mut [$int_type] as *mut [Self]) }
}
core::sync::atomic::Atomic::<usize>::from_ptr pub const unsafe fn from_ptr<'a>(ptr: *mut $int_type) -> &'a $atomic_type {
// SAFETY: guaranteed by the caller
unsafe { &*ptr.cast() }
}
core::sync::atomic::Atomic::<usize>::get_mut pub fn get_mut(&mut self) -> &mut $int_type {
// SAFETY:
// `Atomic<T>` is essentially a transparent wrapper around `T`.
unsafe { &mut *self.as_ptr() }
}
core::sync::atomic::Atomic::<usize>::get_mut_slice pub fn get_mut_slice(this: &mut [Self]) -> &mut [$int_type] {
// SAFETY: the mutable reference guarantees unique ownership.
unsafe { &mut *(this as *mut [Self] as *mut [$int_type]) }
}
core::sync::atomic::Atomic::<usize>::into_inner pub const fn into_inner(self) -> $int_type {
// SAFETY:
// `Atomic<T>` is essentially a transparent wrapper around `T`.
unsafe { transmute(self) }
}
core::sync::atomic::Atomic::<usize>::load pub fn load(&self, order: Ordering) -> $int_type {
// SAFETY: data races are prevented by atomic intrinsics.
unsafe { atomic_load(self.as_ptr(), order) }
}
core::sync::atomic::Atomic::<usize>::new pub const fn new(v: $int_type) -> Self {
// SAFETY:
// `Atomic<T>` is essentially a transparent wrapper around `T`.
unsafe { transmute(v) }
}
core::sync::atomic::Atomic::<usize>::store pub fn store(&self, val: $int_type, order: Ordering) {
// SAFETY: data races are prevented by atomic intrinsics.
unsafe { atomic_store(self.as_ptr(), val, order); }
}
core::sync::atomic::Atomic::<usize>::swap pub fn swap(&self, val: $int_type, order: Ordering) -> $int_type {
// SAFETY: data races are prevented by atomic intrinsics.
unsafe { atomic_swap(self.as_ptr(), val, order) }
}
core::sync::atomic::Atomic::<usize>::try_update pub fn try_update(
&self,
set_order: Ordering,
fetch_order: Ordering,
mut f: impl FnMut($int_type) -> Option<$int_type>,
) -> Result<$int_type, $int_type> {
let mut prev = self.load(fetch_order);
while let Some(next) = f(prev) {
match self.compare_exchange_weak(prev, next, set_order, fetch_order) {
x @ Ok(_) => return x,
Err(next_prev) => prev = next_prev
}
}
Err(prev)
}
core::sync::atomic::Atomic::<usize>::update pub fn update(
&self,
set_order: Ordering,
fetch_order: Ordering,
mut f: impl FnMut($int_type) -> $int_type,
) -> $int_type {
let mut prev = self.load(fetch_order);
loop {
match self.compare_exchange_weak(prev, f(prev), set_order, fetch_order) {
Ok(x) => break x,
Err(next_prev) => prev = next_prev,
}
}
}
core::sync::atomic::atomic_addunsafe fn atomic_add<T: Copy, U: Copy>(dst: *mut T, val: U, order: Ordering) -> T {
// SAFETY: the caller must uphold the safety contract for `atomic_add`.
unsafe {
match order {
Relaxed => intrinsics::atomic_xadd::<T, U, { AO::Relaxed }>(dst, val),
Acquire => intrinsics::atomic_xadd::<T, U, { AO::Acquire }>(dst, val),
Release => intrinsics::atomic_xadd::<T, U, { AO::Release }>(dst, val),
AcqRel => intrinsics::atomic_xadd::<T, U, { AO::AcqRel }>(dst, val),
SeqCst => intrinsics::atomic_xadd::<T, U, { AO::SeqCst }>(dst, val),
}
}
}
core::sync::atomic::atomic_andunsafe fn atomic_and<T: Copy, U: Copy>(dst: *mut T, val: U, order: Ordering) -> T {
// SAFETY: the caller must uphold the safety contract for `atomic_and`
unsafe {
match order {
Relaxed => intrinsics::atomic_and::<T, U, { AO::Relaxed }>(dst, val),
Acquire => intrinsics::atomic_and::<T, U, { AO::Acquire }>(dst, val),
Release => intrinsics::atomic_and::<T, U, { AO::Release }>(dst, val),
AcqRel => intrinsics::atomic_and::<T, U, { AO::AcqRel }>(dst, val),
SeqCst => intrinsics::atomic_and::<T, U, { AO::SeqCst }>(dst, val),
}
}
}
core::sync::atomic::atomic_compare_exchangepub unsafe fn atomic_compare_exchange<T: Copy>(
dst: *mut T,
old: T,
new: T,
success: Ordering,
failure: Ordering,
) -> Result<T, T> {
// SAFETY: the caller must uphold the safety contract for `atomic_compare_exchange`.
let (val, ok) = unsafe {
match (success, failure) {
(Relaxed, Relaxed) => {
intrinsics::atomic_cxchg::<T, { AO::Relaxed }, { AO::Relaxed }>(dst, old, new)
}
(Relaxed, Acquire) => {
intrinsics::atomic_cxchg::<T, { AO::Relaxed }, { AO::Acquire }>(dst, old, new)
}
(Relaxed, SeqCst) => {
intrinsics::atomic_cxchg::<T, { AO::Relaxed }, { AO::SeqCst }>(dst, old, new)
}
(Acquire, Relaxed) => {
intrinsics::atomic_cxchg::<T, { AO::Acquire }, { AO::Relaxed }>(dst, old, new)
}
(Acquire, Acquire) => {
intrinsics::atomic_cxchg::<T, { AO::Acquire }, { AO::Acquire }>(dst, old, new)
}
(Acquire, SeqCst) => {
intrinsics::atomic_cxchg::<T, { AO::Acquire }, { AO::SeqCst }>(dst, old, new)
}
(Release, Relaxed) => {
intrinsics::atomic_cxchg::<T, { AO::Release }, { AO::Relaxed }>(dst, old, new)
}
(Release, Acquire) => {
intrinsics::atomic_cxchg::<T, { AO::Release }, { AO::Acquire }>(dst, old, new)
}
(Release, SeqCst) => {
intrinsics::atomic_cxchg::<T, { AO::Release }, { AO::SeqCst }>(dst, old, new)
}
(AcqRel, Relaxed) => {
intrinsics::atomic_cxchg::<T, { AO::AcqRel }, { AO::Relaxed }>(dst, old, new)
}
(AcqRel, Acquire) => {
intrinsics::atomic_cxchg::<T, { AO::AcqRel }, { AO::Acquire }>(dst, old, new)
}
(AcqRel, SeqCst) => {
intrinsics::atomic_cxchg::<T, { AO::AcqRel }, { AO::SeqCst }>(dst, old, new)
}
(SeqCst, Relaxed) => {
intrinsics::atomic_cxchg::<T, { AO::SeqCst }, { AO::Relaxed }>(dst, old, new)
}
(SeqCst, Acquire) => {
intrinsics::atomic_cxchg::<T, { AO::SeqCst }, { AO::Acquire }>(dst, old, new)
}
(SeqCst, SeqCst) => {
intrinsics::atomic_cxchg::<T, { AO::SeqCst }, { AO::SeqCst }>(dst, old, new)
}
(_, AcqRel) => panic!("there is no such thing as an acquire-release failure ordering"),
(_, Release) => panic!("there is no such thing as a release failure ordering"),
}
};
if ok { Ok(val) } else { Err(val) }
}
core::sync::atomic::atomic_compare_exchange_weakunsafe fn atomic_compare_exchange_weak<T: Copy>(
dst: *mut T,
old: T,
new: T,
success: Ordering,
failure: Ordering,
) -> Result<T, T> {
// SAFETY: the caller must uphold the safety contract for `atomic_compare_exchange_weak`.
let (val, ok) = unsafe {
match (success, failure) {
(Relaxed, Relaxed) => {
intrinsics::atomic_cxchgweak::<T, { AO::Relaxed }, { AO::Relaxed }>(dst, old, new)
}
(Relaxed, Acquire) => {
intrinsics::atomic_cxchgweak::<T, { AO::Relaxed }, { AO::Acquire }>(dst, old, new)
}
(Relaxed, SeqCst) => {
intrinsics::atomic_cxchgweak::<T, { AO::Relaxed }, { AO::SeqCst }>(dst, old, new)
}
(Acquire, Relaxed) => {
intrinsics::atomic_cxchgweak::<T, { AO::Acquire }, { AO::Relaxed }>(dst, old, new)
}
(Acquire, Acquire) => {
intrinsics::atomic_cxchgweak::<T, { AO::Acquire }, { AO::Acquire }>(dst, old, new)
}
(Acquire, SeqCst) => {
intrinsics::atomic_cxchgweak::<T, { AO::Acquire }, { AO::SeqCst }>(dst, old, new)
}
(Release, Relaxed) => {
intrinsics::atomic_cxchgweak::<T, { AO::Release }, { AO::Relaxed }>(dst, old, new)
}
(Release, Acquire) => {
intrinsics::atomic_cxchgweak::<T, { AO::Release }, { AO::Acquire }>(dst, old, new)
}
(Release, SeqCst) => {
intrinsics::atomic_cxchgweak::<T, { AO::Release }, { AO::SeqCst }>(dst, old, new)
}
(AcqRel, Relaxed) => {
intrinsics::atomic_cxchgweak::<T, { AO::AcqRel }, { AO::Relaxed }>(dst, old, new)
}
(AcqRel, Acquire) => {
intrinsics::atomic_cxchgweak::<T, { AO::AcqRel }, { AO::Acquire }>(dst, old, new)
}
(AcqRel, SeqCst) => {
intrinsics::atomic_cxchgweak::<T, { AO::AcqRel }, { AO::SeqCst }>(dst, old, new)
}
(SeqCst, Relaxed) => {
intrinsics::atomic_cxchgweak::<T, { AO::SeqCst }, { AO::Relaxed }>(dst, old, new)
}
(SeqCst, Acquire) => {
intrinsics::atomic_cxchgweak::<T, { AO::SeqCst }, { AO::Acquire }>(dst, old, new)
}
(SeqCst, SeqCst) => {
intrinsics::atomic_cxchgweak::<T, { AO::SeqCst }, { AO::SeqCst }>(dst, old, new)
}
(_, AcqRel) => panic!("there is no such thing as an acquire-release failure ordering"),
(_, Release) => panic!("there is no such thing as a release failure ordering"),
}
};
if ok { Ok(val) } else { Err(val) }
}
core::sync::atomic::atomic_loadunsafe fn atomic_load<T: Copy>(dst: *const T, order: Ordering) -> T {
// SAFETY: the caller must uphold the safety contract for `atomic_load`.
unsafe {
match order {
Relaxed => intrinsics::atomic_load::<T, { AO::Relaxed }>(dst),
Acquire => intrinsics::atomic_load::<T, { AO::Acquire }>(dst),
SeqCst => intrinsics::atomic_load::<T, { AO::SeqCst }>(dst),
Release => panic!("there is no such thing as a release load"),
AcqRel => panic!("there is no such thing as an acquire-release load"),
}
}
}
core::sync::atomic::atomic_nandunsafe fn atomic_nand<T: Copy, U: Copy>(dst: *mut T, val: U, order: Ordering) -> T {
// SAFETY: the caller must uphold the safety contract for `atomic_nand`
unsafe {
match order {
Relaxed => intrinsics::atomic_nand::<T, U, { AO::Relaxed }>(dst, val),
Acquire => intrinsics::atomic_nand::<T, U, { AO::Acquire }>(dst, val),
Release => intrinsics::atomic_nand::<T, U, { AO::Release }>(dst, val),
AcqRel => intrinsics::atomic_nand::<T, U, { AO::AcqRel }>(dst, val),
SeqCst => intrinsics::atomic_nand::<T, U, { AO::SeqCst }>(dst, val),
}
}
}
core::sync::atomic::atomic_orunsafe fn atomic_or<T: Copy, U: Copy>(dst: *mut T, val: U, order: Ordering) -> T {
// SAFETY: the caller must uphold the safety contract for `atomic_or`
unsafe {
match order {
SeqCst => intrinsics::atomic_or::<T, U, { AO::SeqCst }>(dst, val),
Acquire => intrinsics::atomic_or::<T, U, { AO::Acquire }>(dst, val),
Release => intrinsics::atomic_or::<T, U, { AO::Release }>(dst, val),
AcqRel => intrinsics::atomic_or::<T, U, { AO::AcqRel }>(dst, val),
Relaxed => intrinsics::atomic_or::<T, U, { AO::Relaxed }>(dst, val),
}
}
}
core::sync::atomic::atomic_storeunsafe fn atomic_store<T: Copy>(dst: *mut T, val: T, order: Ordering) {
// SAFETY: the caller must uphold the safety contract for `atomic_store`.
unsafe {
match order {
Relaxed => intrinsics::atomic_store::<T, { AO::Relaxed }>(dst, val),
Release => intrinsics::atomic_store::<T, { AO::Release }>(dst, val),
SeqCst => intrinsics::atomic_store::<T, { AO::SeqCst }>(dst, val),
Acquire => panic!("there is no such thing as an acquire store"),
AcqRel => panic!("there is no such thing as an acquire-release store"),
}
}
}
core::sync::atomic::atomic_subunsafe fn atomic_sub<T: Copy, U: Copy>(dst: *mut T, val: U, order: Ordering) -> T {
// SAFETY: the caller must uphold the safety contract for `atomic_sub`.
unsafe {
match order {
Relaxed => intrinsics::atomic_xsub::<T, U, { AO::Relaxed }>(dst, val),
Acquire => intrinsics::atomic_xsub::<T, U, { AO::Acquire }>(dst, val),
Release => intrinsics::atomic_xsub::<T, U, { AO::Release }>(dst, val),
AcqRel => intrinsics::atomic_xsub::<T, U, { AO::AcqRel }>(dst, val),
SeqCst => intrinsics::atomic_xsub::<T, U, { AO::SeqCst }>(dst, val),
}
}
}
core::sync::atomic::atomic_swapunsafe fn atomic_swap<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
// SAFETY: the caller must uphold the safety contract for `atomic_swap`.
unsafe {
match order {
Relaxed => intrinsics::atomic_xchg::<T, { AO::Relaxed }>(dst, val),
Acquire => intrinsics::atomic_xchg::<T, { AO::Acquire }>(dst, val),
Release => intrinsics::atomic_xchg::<T, { AO::Release }>(dst, val),
AcqRel => intrinsics::atomic_xchg::<T, { AO::AcqRel }>(dst, val),
SeqCst => intrinsics::atomic_xchg::<T, { AO::SeqCst }>(dst, val),
}
}
}
core::sync::atomic::atomic_umaxunsafe fn atomic_umax<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
// SAFETY: the caller must uphold the safety contract for `atomic_umax`
unsafe {
match order {
Relaxed => intrinsics::atomic_umax::<T, { AO::Relaxed }>(dst, val),
Acquire => intrinsics::atomic_umax::<T, { AO::Acquire }>(dst, val),
Release => intrinsics::atomic_umax::<T, { AO::Release }>(dst, val),
AcqRel => intrinsics::atomic_umax::<T, { AO::AcqRel }>(dst, val),
SeqCst => intrinsics::atomic_umax::<T, { AO::SeqCst }>(dst, val),
}
}
}
core::sync::atomic::atomic_uminunsafe fn atomic_umin<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
// SAFETY: the caller must uphold the safety contract for `atomic_umin`
unsafe {
match order {
Relaxed => intrinsics::atomic_umin::<T, { AO::Relaxed }>(dst, val),
Acquire => intrinsics::atomic_umin::<T, { AO::Acquire }>(dst, val),
Release => intrinsics::atomic_umin::<T, { AO::Release }>(dst, val),
AcqRel => intrinsics::atomic_umin::<T, { AO::AcqRel }>(dst, val),
SeqCst => intrinsics::atomic_umin::<T, { AO::SeqCst }>(dst, val),
}
}
}
core::sync::atomic::atomic_xorunsafe fn atomic_xor<T: Copy, U: Copy>(dst: *mut T, val: U, order: Ordering) -> T {
// SAFETY: the caller must uphold the safety contract for `atomic_xor`
unsafe {
match order {
SeqCst => intrinsics::atomic_xor::<T, U, { AO::SeqCst }>(dst, val),
Acquire => intrinsics::atomic_xor::<T, U, { AO::Acquire }>(dst, val),
Release => intrinsics::atomic_xor::<T, U, { AO::Release }>(dst, val),
AcqRel => intrinsics::atomic_xor::<T, U, { AO::AcqRel }>(dst, val),
Relaxed => intrinsics::atomic_xor::<T, U, { AO::Relaxed }>(dst, val),
}
}
}
core::sync::atomic::compiler_fencepub fn compiler_fence(order: Ordering) {
// SAFETY: using an atomic fence is safe.
unsafe {
match order {
Acquire => intrinsics::atomic_singlethreadfence::<{ AO::Acquire }>(),
Release => intrinsics::atomic_singlethreadfence::<{ AO::Release }>(),
AcqRel => intrinsics::atomic_singlethreadfence::<{ AO::AcqRel }>(),
SeqCst => intrinsics::atomic_singlethreadfence::<{ AO::SeqCst }>(),
Relaxed => panic!("there is no such thing as a relaxed fence"),
}
}
}
core::sync::atomic::fencepub fn fence(order: Ordering) {
// SAFETY: using an atomic fence is safe.
unsafe {
match order {
Acquire => intrinsics::atomic_fence::<{ AO::Acquire }>(),
Release => intrinsics::atomic_fence::<{ AO::Release }>(),
AcqRel => intrinsics::atomic_fence::<{ AO::AcqRel }>(),
SeqCst => intrinsics::atomic_fence::<{ AO::SeqCst }>(),
Relaxed => panic!("there is no such thing as a relaxed fence"),
}
}
}
core::sync::atomic::strongest_failure_orderingfn strongest_failure_ordering(order: Ordering) -> Ordering {
match order {
Release => Relaxed,
Relaxed => Relaxed,
SeqCst => SeqCst,
Acquire => Acquire,
AcqRel => Acquire,
}
}
core::time::Duration::as_micros pub const fn as_micros(&self) -> u128 {
self.secs as u128 * MICROS_PER_SEC as u128
+ (self.nanos.as_inner() / NANOS_PER_MICRO) as u128
}
core::time::Duration::as_millis pub const fn as_millis(&self) -> u128 {
self.secs as u128 * MILLIS_PER_SEC as u128
+ (self.nanos.as_inner() / NANOS_PER_MILLI) as u128
}
core::time::Duration::as_millis_f32 pub const fn as_millis_f32(&self) -> f32 {
(self.secs as f32) * (MILLIS_PER_SEC as f32)
+ (self.nanos.as_inner() as f32) / (NANOS_PER_MILLI as f32)
}
core::time::Duration::as_millis_f64 pub const fn as_millis_f64(&self) -> f64 {
(self.secs as f64) * (MILLIS_PER_SEC as f64)
+ (self.nanos.as_inner() as f64) / (NANOS_PER_MILLI as f64)
}
core::time::Duration::as_nanos pub const fn as_nanos(&self) -> u128 {
self.secs as u128 * NANOS_PER_SEC as u128 + self.nanos.as_inner() as u128
}
core::time::Duration::as_secs pub const fn as_secs(&self) -> u64 {
self.secs
}
core::time::Duration::as_secs_f32 pub const fn as_secs_f32(&self) -> f32 {
(self.secs as f32) + (self.nanos.as_inner() as f32) / (NANOS_PER_SEC as f32)
}
core::time::Duration::as_secs_f64 pub const fn as_secs_f64(&self) -> f64 {
(self.secs as f64) + (self.nanos.as_inner() as f64) / (NANOS_PER_SEC as f64)
}
core::time::Duration::checked_add pub const fn checked_add(self, rhs: Duration) -> Option<Duration> {
if let Some(mut secs) = self.secs.checked_add(rhs.secs) {
let mut nanos = self.nanos.as_inner() + rhs.nanos.as_inner();
if nanos >= NANOS_PER_SEC {
nanos -= NANOS_PER_SEC;
let Some(new_secs) = secs.checked_add(1) else {
return None;
};
secs = new_secs;
}
debug_assert!(nanos < NANOS_PER_SEC);
Some(Duration::new(secs, nanos))
} else {
None
}
}
core::time::Duration::checked_div pub const fn checked_div(self, rhs: u32) -> Option<Duration> {
if rhs != 0 {
let (secs, extra_secs) = (self.secs / (rhs as u64), self.secs % (rhs as u64));
let (mut nanos, extra_nanos) =
(self.nanos.as_inner() / rhs, self.nanos.as_inner() % rhs);
nanos +=
((extra_secs * (NANOS_PER_SEC as u64) + extra_nanos as u64) / (rhs as u64)) as u32;
debug_assert!(nanos < NANOS_PER_SEC);
Some(Duration::new(secs, nanos))
} else {
None
}
}
core::time::Duration::checked_sub pub const fn checked_sub(self, rhs: Duration) -> Option<Duration> {
if let Some(mut secs) = self.secs.checked_sub(rhs.secs) {
let nanos = if self.nanos.as_inner() >= rhs.nanos.as_inner() {
self.nanos.as_inner() - rhs.nanos.as_inner()
} else if let Some(sub_secs) = secs.checked_sub(1) {
secs = sub_secs;
self.nanos.as_inner() + NANOS_PER_SEC - rhs.nanos.as_inner()
} else {
return None;
};
debug_assert!(nanos < NANOS_PER_SEC);
Some(Duration::new(secs, nanos))
} else {
None
}
}
core::time::Duration::div_duration_f32 pub const fn div_duration_f32(self, rhs: Duration) -> f32 {
let self_nanos =
(self.secs as f32) * (NANOS_PER_SEC as f32) + (self.nanos.as_inner() as f32);
let rhs_nanos = (rhs.secs as f32) * (NANOS_PER_SEC as f32) + (rhs.nanos.as_inner() as f32);
self_nanos / rhs_nanos
}
core::time::Duration::div_duration_f64 pub const fn div_duration_f64(self, rhs: Duration) -> f64 {
let self_nanos =
(self.secs as f64) * (NANOS_PER_SEC as f64) + (self.nanos.as_inner() as f64);
let rhs_nanos = (rhs.secs as f64) * (NANOS_PER_SEC as f64) + (rhs.nanos.as_inner() as f64);
self_nanos / rhs_nanos
}
core::time::Duration::from_days pub const fn from_days(days: u64) -> Duration {
if days > u64::MAX / (SECS_PER_MINUTE * MINS_PER_HOUR * HOURS_PER_DAY) {
panic!("overflow in Duration::from_days");
}
Duration::from_secs(days * MINS_PER_HOUR * SECS_PER_MINUTE * HOURS_PER_DAY)
}
core::time::Duration::from_hours pub const fn from_hours(hours: u64) -> Duration {
if hours > u64::MAX / (SECS_PER_MINUTE * MINS_PER_HOUR) {
panic!("overflow in Duration::from_hours");
}
Duration::from_secs(hours * MINS_PER_HOUR * SECS_PER_MINUTE)
}
core::time::Duration::from_micros pub const fn from_micros(micros: u64) -> Duration {
let secs = micros / MICROS_PER_SEC;
let subsec_micros = (micros % MICROS_PER_SEC) as u32;
// SAFETY: (x % 1_000_000) * 1_000 < 1_000_000_000
// => x % 1_000_000 < 1_000_000
let subsec_nanos = unsafe { Nanoseconds::new_unchecked(subsec_micros * NANOS_PER_MICRO) };
Duration { secs, nanos: subsec_nanos }
}
core::time::Duration::from_millis pub const fn from_millis(millis: u64) -> Duration {
let secs = millis / MILLIS_PER_SEC;
let subsec_millis = (millis % MILLIS_PER_SEC) as u32;
// SAFETY: (x % 1_000) * 1_000_000 < 1_000_000_000
// => x % 1_000 < 1_000
let subsec_nanos = unsafe { Nanoseconds::new_unchecked(subsec_millis * NANOS_PER_MILLI) };
Duration { secs, nanos: subsec_nanos }
}
core::time::Duration::from_mins pub const fn from_mins(mins: u64) -> Duration {
if mins > u64::MAX / SECS_PER_MINUTE {
panic!("overflow in Duration::from_mins");
}
Duration::from_secs(mins * SECS_PER_MINUTE)
}
core::time::Duration::from_nanos pub const fn from_nanos(nanos: u64) -> Duration {
const NANOS_PER_SEC: u64 = self::NANOS_PER_SEC as u64;
let secs = nanos / NANOS_PER_SEC;
let subsec_nanos = (nanos % NANOS_PER_SEC) as u32;
// SAFETY: x % 1_000_000_000 < 1_000_000_000
let subsec_nanos = unsafe { Nanoseconds::new_unchecked(subsec_nanos) };
Duration { secs, nanos: subsec_nanos }
}
core::time::Duration::from_secs pub const fn from_secs(secs: u64) -> Duration {
Duration { secs, nanos: Nanoseconds::ZERO }
}
core::time::Duration::from_secs_f32 pub fn from_secs_f32(secs: f32) -> Duration {
match Duration::try_from_secs_f32(secs) {
Ok(v) => v,
Err(e) => panic!("{e}"),
}
}
core::time::Duration::from_weeks pub const fn from_weeks(weeks: u64) -> Duration {
if weeks > u64::MAX / (SECS_PER_MINUTE * MINS_PER_HOUR * HOURS_PER_DAY * DAYS_PER_WEEK) {
panic!("overflow in Duration::from_weeks");
}
Duration::from_secs(weeks * MINS_PER_HOUR * SECS_PER_MINUTE * HOURS_PER_DAY * DAYS_PER_WEEK)
}
core::time::Duration::is_zero pub const fn is_zero(&self) -> bool {
self.secs == 0 && self.nanos.as_inner() == 0
}
core::time::Duration::new pub const fn new(secs: u64, nanos: u32) -> Duration {
if nanos < NANOS_PER_SEC {
// SAFETY: nanos < NANOS_PER_SEC, therefore nanos is within the valid range
Duration { secs, nanos: unsafe { Nanoseconds::new_unchecked(nanos) } }
} else {
let secs = secs
.checked_add((nanos / NANOS_PER_SEC) as u64)
.expect("overflow in Duration::new");
let nanos = nanos % NANOS_PER_SEC;
// SAFETY: nanos % NANOS_PER_SEC < NANOS_PER_SEC, therefore nanos is within the valid range
Duration { secs, nanos: unsafe { Nanoseconds::new_unchecked(nanos) } }
}
}
core::time::Duration::subsec_micros pub const fn subsec_micros(&self) -> u32 {
self.nanos.as_inner() / NANOS_PER_MICRO
}
core::time::Duration::subsec_millis pub const fn subsec_millis(&self) -> u32 {
self.nanos.as_inner() / NANOS_PER_MILLI
}
core::time::Duration::subsec_nanos pub const fn subsec_nanos(&self) -> u32 {
self.nanos.as_inner()
}
core::tuple::<impl core::cmp::PartialEq for (A, Z, Y, X, W, V, U, T)>::eq fn eq(&self, other: &($($T,)+)) -> bool {
$( ${ignore($T)} self.${index()} == other.${index()} )&&+
}
core::tuple::<impl core::cmp::PartialEq for (A, Z, Y, X, W, V, U, T)>::ne fn ne(&self, other: &($($T,)+)) -> bool {
$( ${ignore($T)} self.${index()} != other.${index()} )||+
}
core::tuple::<impl core::cmp::PartialEq for (B, A, Z, Y, X, W, V, U, T)>::eq fn eq(&self, other: &($($T,)+)) -> bool {
$( ${ignore($T)} self.${index()} == other.${index()} )&&+
}
core::tuple::<impl core::cmp::PartialEq for (B, A, Z, Y, X, W, V, U, T)>::ne fn ne(&self, other: &($($T,)+)) -> bool {
$( ${ignore($T)} self.${index()} != other.${index()} )||+
}
core::tuple::<impl core::cmp::PartialEq for (C, B, A, Z, Y, X, W, V, U, T)>::eq fn eq(&self, other: &($($T,)+)) -> bool {
$( ${ignore($T)} self.${index()} == other.${index()} )&&+
}
core::tuple::<impl core::cmp::PartialEq for (C, B, A, Z, Y, X, W, V, U, T)>::ne fn ne(&self, other: &($($T,)+)) -> bool {
$( ${ignore($T)} self.${index()} != other.${index()} )||+
}
core::tuple::<impl core::cmp::PartialEq for (D, C, B, A, Z, Y, X, W, V, U, T)>::eq fn eq(&self, other: &($($T,)+)) -> bool {
$( ${ignore($T)} self.${index()} == other.${index()} )&&+
}
core::tuple::<impl core::cmp::PartialEq for (D, C, B, A, Z, Y, X, W, V, U, T)>::ne fn ne(&self, other: &($($T,)+)) -> bool {
$( ${ignore($T)} self.${index()} != other.${index()} )||+
}
core::tuple::<impl core::cmp::PartialEq for (E, D, C, B, A, Z, Y, X, W, V, U, T)>::eq fn eq(&self, other: &($($T,)+)) -> bool {
$( ${ignore($T)} self.${index()} == other.${index()} )&&+
}
core::tuple::<impl core::cmp::PartialEq for (E, D, C, B, A, Z, Y, X, W, V, U, T)>::ne fn ne(&self, other: &($($T,)+)) -> bool {
$( ${ignore($T)} self.${index()} != other.${index()} )||+
}
core::tuple::<impl core::cmp::PartialEq for (T,)>::eq fn eq(&self, other: &($($T,)+)) -> bool {
$( ${ignore($T)} self.${index()} == other.${index()} )&&+
}
core::tuple::<impl core::cmp::PartialEq for (T,)>::ne fn ne(&self, other: &($($T,)+)) -> bool {
$( ${ignore($T)} self.${index()} != other.${index()} )||+
}
core::tuple::<impl core::cmp::PartialEq for (U, T)>::eq fn eq(&self, other: &($($T,)+)) -> bool {
$( ${ignore($T)} self.${index()} == other.${index()} )&&+
}
core::tuple::<impl core::cmp::PartialEq for (U, T)>::ne fn ne(&self, other: &($($T,)+)) -> bool {
$( ${ignore($T)} self.${index()} != other.${index()} )||+
}
core::tuple::<impl core::cmp::PartialEq for (V, U, T)>::eq fn eq(&self, other: &($($T,)+)) -> bool {
$( ${ignore($T)} self.${index()} == other.${index()} )&&+
}
core::tuple::<impl core::cmp::PartialEq for (V, U, T)>::ne fn ne(&self, other: &($($T,)+)) -> bool {
$( ${ignore($T)} self.${index()} != other.${index()} )||+
}
core::tuple::<impl core::cmp::PartialEq for (W, V, U, T)>::eq fn eq(&self, other: &($($T,)+)) -> bool {
$( ${ignore($T)} self.${index()} == other.${index()} )&&+
}
core::tuple::<impl core::cmp::PartialEq for (W, V, U, T)>::ne fn ne(&self, other: &($($T,)+)) -> bool {
$( ${ignore($T)} self.${index()} != other.${index()} )||+
}
core::tuple::<impl core::cmp::PartialEq for (X, W, V, U, T)>::eq fn eq(&self, other: &($($T,)+)) -> bool {
$( ${ignore($T)} self.${index()} == other.${index()} )&&+
}
core::tuple::<impl core::cmp::PartialEq for (X, W, V, U, T)>::ne fn ne(&self, other: &($($T,)+)) -> bool {
$( ${ignore($T)} self.${index()} != other.${index()} )||+
}
core::tuple::<impl core::cmp::PartialEq for (Y, X, W, V, U, T)>::eq fn eq(&self, other: &($($T,)+)) -> bool {
$( ${ignore($T)} self.${index()} == other.${index()} )&&+
}
core::tuple::<impl core::cmp::PartialEq for (Y, X, W, V, U, T)>::ne fn ne(&self, other: &($($T,)+)) -> bool {
$( ${ignore($T)} self.${index()} != other.${index()} )||+
}
core::tuple::<impl core::cmp::PartialEq for (Z, Y, X, W, V, U, T)>::eq fn eq(&self, other: &($($T,)+)) -> bool {
$( ${ignore($T)} self.${index()} == other.${index()} )&&+
}
core::tuple::<impl core::cmp::PartialEq for (Z, Y, X, W, V, U, T)>::ne fn ne(&self, other: &($($T,)+)) -> bool {
$( ${ignore($T)} self.${index()} != other.${index()} )||+
}
core::ub_checks::check_language_ubpub(crate) const fn check_language_ub() -> bool {
// Only used for UB checks so we may const_eval_select.
const_eval_select!(
@capture { } -> bool:
if const {
// Always disable UB checks.
false
} else {
// Disable UB checks in Miri.
!cfg!(miri)
}
) && intrinsics::ub_checks()
}
core::ub_checks::check_language_ub::runtime fn runtime$(<$($binders)*>)?($($arg: $ty),*) $( -> $ret )? {
$runtime
}
core::ub_checks::is_valid_allocation_sizepub(crate) const fn is_valid_allocation_size(size: usize, len: usize) -> bool {
let max_len = if size == 0 { usize::MAX } else { isize::MAX as usize / size };
len <= max_len
}
core::ub_checks::maybe_is_alignedpub(crate) const fn maybe_is_aligned(ptr: *const (), align: usize) -> bool {
// This is just for safety checks so we can const_eval_select.
const_eval_select!(
@capture { ptr: *const (), align: usize } -> bool:
if const {
true
} else {
ptr.is_aligned_to(align)
}
)
}
core::ub_checks::maybe_is_aligned::runtime fn runtime$(<$($binders)*>)?($($arg: $ty),*) $( -> $ret )? {
$runtime
}
core::ub_checks::maybe_is_aligned_and_not_nullpub(crate) const fn maybe_is_aligned_and_not_null(
ptr: *const (),
align: usize,
is_zst: bool,
) -> bool {
// This is just for safety checks so we can const_eval_select.
maybe_is_aligned(ptr, align) && (is_zst || !ptr.is_null())
}
core::ub_checks::maybe_is_nonoverlappingpub(crate) const fn maybe_is_nonoverlapping(
src: *const (),
dst: *const (),
size: usize,
count: usize,
) -> bool {
// This is just for safety checks so we can const_eval_select.
const_eval_select!(
@capture { src: *const (), dst: *const (), size: usize, count: usize } -> bool:
if const {
true
} else {
let src_usize = src.addr();
let dst_usize = dst.addr();
let Some(size) = size.checked_mul(count) else {
crate::panicking::panic_nounwind(
"is_nonoverlapping: `size_of::<T>() * count` overflows a usize",
)
};
let diff = src_usize.abs_diff(dst_usize);
// If the absolute distance between the ptrs is at least as big as the size of the buffer,
// they do not overlap.
diff >= size
}
)
}
core::ub_checks::maybe_is_nonoverlapping::runtime fn runtime$(<$($binders)*>)?($($arg: $ty),*) $( -> $ret )? {
$runtime
}
core::unicode::printable::checkfn check(x: u16, singletonuppers: &[(u8, u8)], singletonlowers: &[u8], normal: &[u8]) -> bool {
let xupper = (x >> 8) as u8;
let mut lowerstart = 0;
for &(upper, lowercount) in singletonuppers {
let lowerend = lowerstart + lowercount as usize;
if xupper == upper {
for &lower in &singletonlowers[lowerstart..lowerend] {
if lower == x as u8 {
return false;
}
}
} else if xupper < upper {
break;
}
lowerstart = lowerend;
}
let mut x = x as i32;
let mut normal = normal.iter().cloned();
let mut current = true;
while let Some(v) = normal.next() {
let len = if v & 0x80 != 0 {
((v & 0x7f) as i32) << 8 | normal.next().unwrap() as i32
} else {
v as i32
};
x -= len;
if x < 0 {
break;
}
current = !current;
}
current
}
core::unicode::printable::is_printablepub(crate) fn is_printable(x: char) -> bool {
let x = x as u32;
let lower = x as u16;
if x < 32 {
// ASCII fast path
false
} else if x < 127 {
// ASCII fast path
true
} else if x < 0x10000 {
check(lower, SINGLETONS0U, SINGLETONS0L, NORMAL0)
} else if x < 0x20000 {
check(lower, SINGLETONS1U, SINGLETONS1L, NORMAL1)
} else {
if 0x2a6e0 <= x && x < 0x2a700 {
return false;
}
if 0x2b81e <= x && x < 0x2b820 {
return false;
}
if 0x2ceae <= x && x < 0x2ceb0 {
return false;
}
if 0x2ebe1 <= x && x < 0x2ebf0 {
return false;
}
if 0x2ee5e <= x && x < 0x2f800 {
return false;
}
if 0x2fa1e <= x && x < 0x30000 {
return false;
}
if 0x3134b <= x && x < 0x31350 {
return false;
}
if 0x3347a <= x && x < 0xe0100 {
return false;
}
if 0xe01f0 <= x && x < 0x110000 {
return false;
}
true
}
}
core::unicode::unicode_data::ShortOffsetRunHeader::prefix_sum const fn prefix_sum(&self) -> u32 {
self.0 & ((1 << 21) - 1)
}
core::unicode::unicode_data::ShortOffsetRunHeader::start_index const fn start_index(&self) -> usize {
(self.0 >> 21) as usize
}
core::unicode::unicode_data::grapheme_extend::lookup pub fn lookup(c: char) -> bool {
debug_assert!(!c.is_ascii());
(c as u32) >= 0x300 && lookup_slow(c)
}
core::unicode::unicode_data::grapheme_extend::lookup_slow fn lookup_slow(c: char) -> bool {
const {
assert!(SHORT_OFFSET_RUNS.last().unwrap().0 > char::MAX as u32);
let mut i = 0;
while i < SHORT_OFFSET_RUNS.len() {
assert!(SHORT_OFFSET_RUNS[i].start_index() < OFFSETS.len());
i += 1;
}
}
// SAFETY: We just ensured the last element of `SHORT_OFFSET_RUNS` is greater than `std::char::MAX`
// and the start indices of all elements in `SHORT_OFFSET_RUNS` are smaller than `OFFSETS.len()`.
unsafe { super::skip_search(c, &SHORT_OFFSET_RUNS, &OFFSETS) }
}
core::unicode::unicode_data::skip_searchunsafe fn skip_search<const SOR: usize, const OFFSETS: usize>(
needle: char,
short_offset_runs: &[ShortOffsetRunHeader; SOR],
offsets: &[u8; OFFSETS],
) -> bool {
let needle = needle as u32;
let last_idx =
match short_offset_runs.binary_search_by_key(&(needle << 11), |header| header.0 << 11) {
Ok(idx) => idx + 1,
Err(idx) => idx,
};
// SAFETY: `last_idx` *cannot* be past the end of the array, as the last
// element is greater than `std::char::MAX` (the largest possible needle)
// as guaranteed by the caller.
//
// So, we cannot have found it (i.e. `Ok(idx) => idx + 1 != length`) and the
// correct location cannot be past it, so `Err(idx) => idx != length` either.
//
// This means that we can avoid bounds checking for the accesses below, too.
//
// We need to use `intrinsics::assume` since the `panic_nounwind` contained
// in `hint::assert_unchecked` may not be optimized out.
unsafe { crate::intrinsics::assume(last_idx < SOR) };
let mut offset_idx = short_offset_runs[last_idx].start_index();
let length = if let Some(next) = short_offset_runs.get(last_idx + 1) {
(*next).start_index() - offset_idx
} else {
offsets.len() - offset_idx
};
let prev =
last_idx.checked_sub(1).map(|prev| short_offset_runs[prev].prefix_sum()).unwrap_or(0);
let total = needle - prev;
let mut prefix_sum = 0;
for _ in 0..(length - 1) {
// SAFETY: It is guaranteed that `length <= OFFSETS - offset_idx`,
// so it follows that `length - 1 + offset_idx < OFFSETS`, therefore
// `offset_idx < OFFSETS` is always true in this loop.
//
// We need to use `intrinsics::assume` since the `panic_nounwind` contained
// in `hint::assert_unchecked` may not be optimized out.
unsafe { crate::intrinsics::assume(offset_idx < OFFSETS) };
let offset = offsets[offset_idx];
prefix_sum += offset as u32;
if prefix_sum > total {
break;
}
offset_idx += 1;
}
offset_idx % 2 == 1
}
core::unit::<impl core::iter::traits::collect::FromIterator<()> for ()>::from_iter fn from_iter<I: IntoIterator<Item = ()>>(iter: I) -> Self {
iter.into_iter().for_each(|()| {})
}
<! as core::fmt::Debug>::fmt fn fmt(&self, _: &mut Formatter<'_>) -> Result {
*self
}
<! as core::fmt::Display>::fmt fn fmt(&self, _: &mut Formatter<'_>) -> Result {
*self
}
<core::alloc::layout::Layout as core::clone::Clone>::clone#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
<core::alloc::layout::Layout as core::cmp::Eq>::assert_fields_are_eq#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
<core::alloc::layout::Layout as core::cmp::PartialEq>::eq#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
<core::alloc::layout::Layout as core::fmt::Debug>::fmt#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
<core::alloc::layout::Layout as core::hash::Hash>::hash#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
<core::any::TypeId as core::clone::Clone>::clone#[cfg_attr(feature = "ferrocene_subset", derive_const(Clone))]
<core::array::TryFromSliceError as core::clone::Clone>::clone#[derive(Debug, Copy, Clone)]
<core::array::TryFromSliceError as core::fmt::Debug>::fmt#[derive(Debug, Copy, Clone)]
<core::array::iter::IntoIter<T, N> as core::clone::Clone>::clone#[derive(Clone)]
<core::ascii::EscapeDefault as core::clone::Clone>::clone#[derive(Clone)]
<core::ascii::ascii_char::AsciiChar as core::clone::Clone>::clone#[derive_const(Clone, Eq, PartialEq, Ord, PartialOrd)]
<core::ascii::ascii_char::AsciiChar as core::cmp::Eq>::assert_fields_are_eq#[derive_const(Clone, Eq, PartialEq, Ord, PartialOrd)]
<core::ascii::ascii_char::AsciiChar as core::cmp::Ord>::cmp#[derive_const(Clone, Eq, PartialEq, Ord, PartialOrd)]
<core::ascii::ascii_char::AsciiChar as core::cmp::PartialEq>::eq#[derive_const(Clone, Eq, PartialEq, Ord, PartialOrd)]
<core::ascii::ascii_char::AsciiChar as core::cmp::PartialOrd>::partial_cmp#[derive_const(Clone, Eq, PartialEq, Ord, PartialOrd)]
<core::ascii::ascii_char::AsciiChar as core::hash::Hash>::hash#[derive(Copy, Hash)]
<core::cell::BorrowError as core::fmt::Debug>::fmt#[derive(Debug)]
<core::cell::BorrowMutError as core::fmt::Debug>::fmt#[derive(Debug)]
<core::char::EscapeDebug as core::clone::Clone>::clone#[derive(Clone, Debug)]
<core::char::EscapeDebug as core::fmt::Debug>::fmt#[derive(Clone, Debug)]
<core::char::convert::CharTryFromError as core::clone::Clone>::clone#[derive(Copy, Clone, Debug, PartialEq, Eq)]
<core::char::convert::CharTryFromError as core::cmp::Eq>::assert_fields_are_eq#[derive(Copy, Clone, Debug, PartialEq, Eq)]
<core::char::convert::CharTryFromError as core::cmp::PartialEq>::eq#[derive(Copy, Clone, Debug, PartialEq, Eq)]
<core::char::convert::CharTryFromError as core::fmt::Debug>::fmt#[derive(Copy, Clone, Debug, PartialEq, Eq)]
<core::char::decode::DecodeUtf16<I> as core::clone::Clone>::clone#[derive(Clone, Debug)]
<core::char::decode::DecodeUtf16<I> as core::fmt::Debug>::fmt#[derive(Clone, Debug)]
<core::char::decode::DecodeUtf16Error as core::clone::Clone>::clone#[derive(Debug, Clone, Eq, PartialEq)]
<core::char::decode::DecodeUtf16Error as core::cmp::Eq>::assert_fields_are_eq#[derive(Debug, Clone, Eq, PartialEq)]
<core::char::decode::DecodeUtf16Error as core::cmp::PartialEq>::eq#[derive(Debug, Clone, Eq, PartialEq)]
<core::char::decode::DecodeUtf16Error as core::fmt::Debug>::fmt#[derive(Debug, Clone, Eq, PartialEq)]
<core::cmp::Ordering as core::clone::Clone>::clone#[derive_const(Clone, Eq, PartialOrd, Ord, PartialEq)]
<core::cmp::Ordering as core::cmp::Eq>::assert_fields_are_eq#[derive_const(Clone, Eq, PartialOrd, Ord, PartialEq)]
<core::cmp::Ordering as core::cmp::Ord>::cmp#[derive_const(Clone, Eq, PartialOrd, Ord, PartialEq)]
<core::cmp::Ordering as core::cmp::PartialEq>::eq#[derive_const(Clone, Eq, PartialOrd, Ord, PartialEq)]
<core::cmp::Ordering as core::cmp::PartialOrd>::partial_cmp#[derive_const(Clone, Eq, PartialOrd, Ord, PartialEq)]
<core::cmp::Ordering as core::fmt::Debug>::fmt#[derive(Copy, Debug, Hash)]
<core::cmp::Ordering as core::hash::Hash>::hash#[derive(Copy, Debug, Hash)]
<core::convert::Infallible as core::clone::Clone>::clone fn clone(&self) -> Infallible {
match *self {}
}
<core::convert::Infallible as core::cmp::PartialEq>::eq fn eq(&self, _: &Infallible) -> bool {
match *self {}
}
<core::convert::Infallible as core::fmt::Debug>::fmt fn fmt(&self, _: &mut fmt::Formatter<'_>) -> fmt::Result {
match *self {}
}
<core::convert::Infallible as core::fmt::Display>::fmt fn fmt(&self, _: &mut fmt::Formatter<'_>) -> fmt::Result {
match *self {}
}
<core::escape::AlwaysEscaped as core::clone::Clone>::clone#[derive(Clone, Copy)]
<core::escape::EscapeIterInner<N, ESCAPING> as core::clone::Clone>::clone#[derive(Clone)]
<core::escape::MaybeEscaped as core::clone::Clone>::clone#[derive(Clone, Copy)]
<core::escape::MaybeEscapedCharacter<N> as core::clone::Clone>::clone#[derive(Clone, Copy)]
<core::ffi::c_str::CStr as core::cmp::Eq>::assert_fields_are_eq#[derive(PartialEq, Eq, Hash)]
<core::ffi::c_str::CStr as core::cmp::PartialEq>::eq#[derive(PartialEq, Eq, Hash)]
<core::ffi::c_str::CStr as core::hash::Hash>::hash#[derive(PartialEq, Eq, Hash)]
<core::ffi::c_str::FromBytesWithNulError as core::clone::Clone>::clone#[derive(Clone, Copy, PartialEq, Eq, Debug)]
<core::ffi::c_str::FromBytesWithNulError as core::cmp::Eq>::assert_fields_are_eq#[derive(Clone, Copy, PartialEq, Eq, Debug)]
<core::ffi::c_str::FromBytesWithNulError as core::cmp::PartialEq>::eq#[derive(Clone, Copy, PartialEq, Eq, Debug)]
<core::ffi::c_str::FromBytesWithNulError as core::fmt::Debug>::fmt#[derive(Clone, Copy, PartialEq, Eq, Debug)]
<core::fmt::Alignment as core::clone::Clone>::clone#[derive(Copy, Clone, Debug, PartialEq, Eq)]
<core::fmt::Alignment as core::cmp::Eq>::assert_fields_are_eq#[derive(Copy, Clone, Debug, PartialEq, Eq)]
<core::fmt::Alignment as core::cmp::PartialEq>::eq#[derive(Copy, Clone, Debug, PartialEq, Eq)]
<core::fmt::Alignment as core::fmt::Debug>::fmt#[derive(Copy, Clone, Debug, PartialEq, Eq)]
<core::fmt::Arguments<'a> as core::clone::Clone>::clone#[derive(Copy, Clone)]
<core::fmt::DebugAsHex as core::clone::Clone>::clone#[derive(Copy, Clone, Debug, PartialEq, Eq)]
<core::fmt::DebugAsHex as core::cmp::Eq>::assert_fields_are_eq#[derive(Copy, Clone, Debug, PartialEq, Eq)]
<core::fmt::DebugAsHex as core::cmp::PartialEq>::eq#[derive(Copy, Clone, Debug, PartialEq, Eq)]
<core::fmt::DebugAsHex as core::fmt::Debug>::fmt#[derive(Copy, Clone, Debug, PartialEq, Eq)]
<core::fmt::Error as core::clone::Clone>::clone#[derive(Copy, Clone, Debug, Default, Eq, Hash, Ord, PartialEq, PartialOrd)]
<core::fmt::Error as core::cmp::Eq>::assert_fields_are_eq#[derive(Copy, Clone, Debug, Default, Eq, Hash, Ord, PartialEq, PartialOrd)]
<core::fmt::Error as core::cmp::Ord>::cmp#[derive(Copy, Clone, Debug, Default, Eq, Hash, Ord, PartialEq, PartialOrd)]
<core::fmt::Error as core::cmp::PartialEq>::eq#[derive(Copy, Clone, Debug, Default, Eq, Hash, Ord, PartialEq, PartialOrd)]
<core::fmt::Error as core::cmp::PartialOrd>::partial_cmp#[derive(Copy, Clone, Debug, Default, Eq, Hash, Ord, PartialEq, PartialOrd)]
<core::fmt::Error as core::default::Default>::default#[derive(Copy, Clone, Debug, Default, Eq, Hash, Ord, PartialEq, PartialOrd)]
<core::fmt::Error as core::fmt::Debug>::fmt#[derive(Copy, Clone, Debug, Default, Eq, Hash, Ord, PartialEq, PartialOrd)]
<core::fmt::Error as core::hash::Hash>::hash#[derive(Copy, Clone, Debug, Default, Eq, Hash, Ord, PartialEq, PartialOrd)]
<core::fmt::FormattingOptions as core::clone::Clone>::clone#[derive(Copy, Clone, Debug, PartialEq, Eq)]
<core::fmt::FormattingOptions as core::cmp::Eq>::assert_fields_are_eq#[derive(Copy, Clone, Debug, PartialEq, Eq)]
<core::fmt::FormattingOptions as core::cmp::PartialEq>::eq#[derive(Copy, Clone, Debug, PartialEq, Eq)]
<core::fmt::FormattingOptions as core::fmt::Debug>::fmt#[derive(Copy, Clone, Debug, PartialEq, Eq)]
<core::fmt::Sign as core::clone::Clone>::clone#[derive(Copy, Clone, Debug, PartialEq, Eq)]
<core::fmt::Sign as core::cmp::Eq>::assert_fields_are_eq#[derive(Copy, Clone, Debug, PartialEq, Eq)]
<core::fmt::Sign as core::cmp::PartialEq>::eq#[derive(Copy, Clone, Debug, PartialEq, Eq)]
<core::fmt::Sign as core::fmt::Debug>::fmt#[derive(Copy, Clone, Debug, PartialEq, Eq)]
<core::fmt::num_buffer::NumBuffer<T> as core::fmt::Debug>::fmt#[derive(Debug)]
<core::fmt::rt::Argument<'a> as core::clone::Clone>::clone#[derive(Copy, Clone)]
<core::fmt::rt::ArgumentType<'a> as core::clone::Clone>::clone#[derive(Copy, Clone)]
<core::intrinsics::AtomicOrdering as core::cmp::Eq>::assert_fields_are_eq#[derive(Debug, ConstParamTy, PartialEq, Eq)]
<core::intrinsics::AtomicOrdering as core::cmp::PartialEq>::eq#[derive(Debug, ConstParamTy, PartialEq, Eq)]
<core::intrinsics::AtomicOrdering as core::fmt::Debug>::fmt#[derive(Debug, ConstParamTy, PartialEq, Eq)]
<core::iter::adapters::chain::Chain<A, B> as core::clone::Clone>::clone#[derive(Clone, Debug)]
<core::iter::adapters::chain::Chain<A, B> as core::fmt::Debug>::fmt#[derive(Clone, Debug)]
<core::iter::adapters::cloned::Cloned<I> as core::clone::Clone>::clone#[derive(Clone, Debug)]
<core::iter::adapters::cloned::Cloned<I> as core::fmt::Debug>::fmt#[derive(Clone, Debug)]
<core::iter::adapters::copied::Copied<I> as core::clone::Clone>::clone#[derive(Clone, Debug)]
<core::iter::adapters::copied::Copied<I> as core::fmt::Debug>::fmt#[derive(Clone, Debug)]
<core::iter::adapters::enumerate::Enumerate<I> as core::clone::Clone>::clone#[derive(Clone, Debug)]
<core::iter::adapters::enumerate::Enumerate<I> as core::fmt::Debug>::fmt#[derive(Clone, Debug)]
<core::iter::adapters::filter::Filter<I, P> as core::clone::Clone>::clone#[derive(Clone)]
<core::iter::adapters::flatten::FlattenCompat<I, U> as core::clone::Clone>::clone#[derive(Clone, Debug)]
<core::iter::adapters::flatten::FlattenCompat<I, U> as core::fmt::Debug>::fmt#[derive(Clone, Debug)]
<core::iter::adapters::fuse::Fuse<I> as core::clone::Clone>::clone#[derive(Clone, Debug)]
<core::iter::adapters::fuse::Fuse<I> as core::fmt::Debug>::fmt#[derive(Clone, Debug)]
<core::iter::adapters::map::Map<I, F> as core::clone::Clone>::clone#[derive(Clone)]
<core::iter::adapters::rev::Rev<T> as core::clone::Clone>::clone#[derive(Clone, Debug)]
<core::iter::adapters::rev::Rev<T> as core::fmt::Debug>::fmt#[derive(Clone, Debug)]
<core::iter::adapters::skip::Skip<I> as core::clone::Clone>::clone#[derive(Clone, Debug)]
<core::iter::adapters::skip::Skip<I> as core::fmt::Debug>::fmt#[derive(Clone, Debug)]
<core::iter::adapters::step_by::StepBy<I> as core::clone::Clone>::clone#[derive(Clone, Debug)]
<core::iter::adapters::step_by::StepBy<I> as core::fmt::Debug>::fmt#[derive(Clone, Debug)]
<core::iter::adapters::take::Take<I> as core::clone::Clone>::clone#[derive(Clone, Debug)]
<core::iter::adapters::take::Take<I> as core::fmt::Debug>::fmt#[derive(Clone, Debug)]
<core::iter::adapters::take_while::TakeWhile<I, P> as core::clone::Clone>::clone#[derive(Clone)]
<core::iter::adapters::zip::Zip<A, B> as core::clone::Clone>::clone#[derive(Clone)]
<core::iter::sources::from_fn::FromFn<F> as core::clone::Clone>::clone#[derive(Clone)]
<core::mem::manually_drop::ManuallyDrop<T> as core::clone::Clone>::clone#[derive(Copy, Clone, Debug, Default)]
<core::mem::manually_drop::ManuallyDrop<T> as core::default::Default>::default#[derive(Copy, Clone, Debug, Default)]
<core::mem::manually_drop::ManuallyDrop<T> as core::fmt::Debug>::fmt#[derive(Copy, Clone, Debug, Default)]
<core::mem::maybe_dangling::MaybeDangling<P> as core::clone::Clone>::clone#[derive(Copy, Clone, Debug, Default)]
<core::mem::maybe_dangling::MaybeDangling<P> as core::default::Default>::default#[derive(Copy, Clone, Debug, Default)]
<core::mem::maybe_dangling::MaybeDangling<P> as core::fmt::Debug>::fmt#[derive(Copy, Clone, Debug, Default)]
<core::num::FpCategory as core::clone::Clone>::clone#[derive(Copy, Clone, PartialEq, Eq, Debug)]
<core::num::FpCategory as core::cmp::Eq>::assert_fields_are_eq#[derive(Copy, Clone, PartialEq, Eq, Debug)]
<core::num::FpCategory as core::cmp::PartialEq>::eq#[derive(Copy, Clone, PartialEq, Eq, Debug)]
<core::num::FpCategory as core::fmt::Debug>::fmt#[derive(Copy, Clone, PartialEq, Eq, Debug)]
<core::num::diy_float::Fp as core::clone::Clone>::clone#[derive(Copy, Clone, Debug)]
<core::num::diy_float::Fp as core::fmt::Debug>::fmt#[derive(Copy, Clone, Debug)]
<core::num::error::IntErrorKind as core::clone::Clone>::clone#[derive(Debug, Clone, PartialEq, Eq, Copy, Hash)]
<core::num::error::IntErrorKind as core::cmp::Eq>::assert_fields_are_eq#[derive(Debug, Clone, PartialEq, Eq, Copy, Hash)]
<core::num::error::IntErrorKind as core::cmp::PartialEq>::eq#[derive(Debug, Clone, PartialEq, Eq, Copy, Hash)]
<core::num::error::IntErrorKind as core::fmt::Debug>::fmt#[derive(Debug, Clone, PartialEq, Eq, Copy, Hash)]
<core::num::error::IntErrorKind as core::hash::Hash>::hash#[derive(Debug, Clone, PartialEq, Eq, Copy, Hash)]
<core::num::error::ParseIntError as core::clone::Clone>::clone#[derive(Debug, Clone, PartialEq, Eq)]
<core::num::error::ParseIntError as core::cmp::Eq>::assert_fields_are_eq#[derive(Debug, Clone, PartialEq, Eq)]
<core::num::error::ParseIntError as core::cmp::PartialEq>::eq#[derive(Debug, Clone, PartialEq, Eq)]
<core::num::error::ParseIntError as core::fmt::Debug>::fmt#[derive(Debug, Clone, PartialEq, Eq)]
<core::num::error::TryFromIntError as core::clone::Clone>::clone#[derive(Debug, Copy, Clone, PartialEq, Eq)]
<core::num::error::TryFromIntError as core::cmp::Eq>::assert_fields_are_eq#[derive(Debug, Copy, Clone, PartialEq, Eq)]
<core::num::error::TryFromIntError as core::cmp::PartialEq>::eq#[derive(Debug, Copy, Clone, PartialEq, Eq)]
<core::num::error::TryFromIntError as core::convert::From<!>>::from fn from(never: !) -> TryFromIntError {
// Match rather than coerce to make sure that code like
// `From<Infallible> for TryFromIntError` above will keep working
// when `Infallible` becomes an alias to `!`.
match never {}
}
<core::num::error::TryFromIntError as core::convert::From<core::convert::Infallible>>::from fn from(x: Infallible) -> TryFromIntError {
match x {}
}
<core::num::error::TryFromIntError as core::fmt::Debug>::fmt#[derive(Debug, Copy, Clone, PartialEq, Eq)]
<core::num::flt2dec::Sign as core::clone::Clone>::clone#[derive(Copy, Clone, PartialEq, Eq, Debug)]
<core::num::flt2dec::Sign as core::cmp::Eq>::assert_fields_are_eq#[derive(Copy, Clone, PartialEq, Eq, Debug)]
<core::num::flt2dec::Sign as core::cmp::PartialEq>::eq#[derive(Copy, Clone, PartialEq, Eq, Debug)]
<core::num::flt2dec::Sign as core::fmt::Debug>::fmt#[derive(Copy, Clone, PartialEq, Eq, Debug)]
<core::num::flt2dec::decoder::Decoded as core::clone::Clone>::clone#[derive(Copy, Clone, Debug, PartialEq, Eq)]
<core::num::flt2dec::decoder::Decoded as core::cmp::Eq>::assert_fields_are_eq#[derive(Copy, Clone, Debug, PartialEq, Eq)]
<core::num::flt2dec::decoder::Decoded as core::cmp::PartialEq>::eq#[derive(Copy, Clone, Debug, PartialEq, Eq)]
<core::num::flt2dec::decoder::Decoded as core::fmt::Debug>::fmt#[derive(Copy, Clone, Debug, PartialEq, Eq)]
<core::num::flt2dec::decoder::FullDecoded as core::clone::Clone>::clone#[derive(Copy, Clone, Debug, PartialEq, Eq)]
<core::num::flt2dec::decoder::FullDecoded as core::cmp::Eq>::assert_fields_are_eq#[derive(Copy, Clone, Debug, PartialEq, Eq)]
<core::num::flt2dec::decoder::FullDecoded as core::cmp::PartialEq>::eq#[derive(Copy, Clone, Debug, PartialEq, Eq)]
<core::num::flt2dec::decoder::FullDecoded as core::fmt::Debug>::fmt#[derive(Copy, Clone, Debug, PartialEq, Eq)]
<core::num::fmt::Formatted<'a> as core::clone::Clone>::clone#[derive(Clone)]
<core::num::fmt::Part<'a> as core::clone::Clone>::clone#[derive(Copy, Clone, PartialEq, Eq, Debug)]
<core::num::fmt::Part<'a> as core::cmp::Eq>::assert_fields_are_eq#[derive(Copy, Clone, PartialEq, Eq, Debug)]
<core::num::fmt::Part<'a> as core::cmp::PartialEq>::eq#[derive(Copy, Clone, PartialEq, Eq, Debug)]
<core::num::fmt::Part<'a> as core::fmt::Debug>::fmt#[derive(Copy, Clone, PartialEq, Eq, Debug)]
<core::num::niche_types::Nanoseconds as core::clone::Clone>::clone #[derive(Clone, Copy)]
<core::num::niche_types::NonZeroCharInner as core::clone::Clone>::clone #[derive(Clone, Copy)]
<core::num::niche_types::NonZeroI128Inner as core::clone::Clone>::clone #[derive(Clone, Copy)]
<core::num::niche_types::NonZeroI16Inner as core::clone::Clone>::clone #[derive(Clone, Copy)]
<core::num::niche_types::NonZeroI32Inner as core::clone::Clone>::clone #[derive(Clone, Copy)]
<core::num::niche_types::NonZeroI64Inner as core::clone::Clone>::clone #[derive(Clone, Copy)]
<core::num::niche_types::NonZeroI8Inner as core::clone::Clone>::clone #[derive(Clone, Copy)]
<core::num::niche_types::NonZeroIsizeInner as core::clone::Clone>::clone #[derive(Clone, Copy)]
<core::num::niche_types::NonZeroU128Inner as core::clone::Clone>::clone #[derive(Clone, Copy)]
<core::num::niche_types::NonZeroU16Inner as core::clone::Clone>::clone #[derive(Clone, Copy)]
<core::num::niche_types::NonZeroU32Inner as core::clone::Clone>::clone #[derive(Clone, Copy)]
<core::num::niche_types::NonZeroU64Inner as core::clone::Clone>::clone #[derive(Clone, Copy)]
<core::num::niche_types::NonZeroU8Inner as core::clone::Clone>::clone #[derive(Clone, Copy)]
<core::num::niche_types::NonZeroUsizeInner as core::clone::Clone>::clone #[derive(Clone, Copy)]
<core::num::niche_types::UsizeNoHighBit as core::clone::Clone>::clone #[derive(Clone, Copy)]
<core::ops::control_flow::ControlFlow<B, C> as core::clone::Clone>::clone#[derive_const(Clone, PartialEq, Eq)]
<core::ops::control_flow::ControlFlow<B, C> as core::cmp::Eq>::assert_fields_are_eq#[derive_const(Clone, PartialEq, Eq)]
<core::ops::control_flow::ControlFlow<B, C> as core::cmp::PartialEq>::eq#[derive_const(Clone, PartialEq, Eq)]
<core::ops::control_flow::ControlFlow<B, C> as core::fmt::Debug>::fmt#[derive(Debug, Copy, Hash)]
<core::ops::control_flow::ControlFlow<B, C> as core::hash::Hash>::hash#[derive(Debug, Copy, Hash)]
<core::ops::index_range::IndexRange as core::clone::Clone>::clone#[derive_const(Clone, Eq, PartialEq)]
<core::ops::index_range::IndexRange as core::cmp::Eq>::assert_fields_are_eq#[derive_const(Clone, Eq, PartialEq)]
<core::ops::index_range::IndexRange as core::cmp::PartialEq>::eq#[derive_const(Clone, Eq, PartialEq)]
<core::ops::index_range::IndexRange as core::fmt::Debug>::fmt#[derive(Debug)]
<core::ops::range::Bound<T> as core::clone::Clone>::clone#[derive_const(Clone, Eq, PartialEq)]
<core::ops::range::Bound<T> as core::cmp::Eq>::assert_fields_are_eq#[derive_const(Clone, Eq, PartialEq)]
<core::ops::range::Bound<T> as core::cmp::PartialEq>::eq#[derive_const(Clone, Eq, PartialEq)]
<core::ops::range::Bound<T> as core::fmt::Debug>::fmt#[derive(Copy, Debug, Hash)]
<core::ops::range::Bound<T> as core::hash::Hash>::hash#[derive(Copy, Debug, Hash)]
<core::ops::range::Range<Idx> as core::clone::Clone>::clone#[derive_const(Clone, Default, PartialEq)] // not Copy -- see #27186
<core::ops::range::Range<Idx> as core::cmp::Eq>::assert_fields_are_eq#[derive(Eq, Hash)]
<core::ops::range::Range<Idx> as core::cmp::PartialEq>::eq#[derive_const(Clone, Default, PartialEq)] // not Copy -- see #27186
<core::ops::range::Range<Idx> as core::default::Default>::default#[derive_const(Clone, Default, PartialEq)] // not Copy -- see #27186
<core::ops::range::Range<Idx> as core::hash::Hash>::hash#[derive(Eq, Hash)]
<core::ops::range::RangeFrom<Idx> as core::clone::Clone>::clone#[derive_const(Clone, PartialEq)] // not Copy -- see #27186
<core::ops::range::RangeFrom<Idx> as core::cmp::Eq>::assert_fields_are_eq#[derive(Eq, Hash)]
<core::ops::range::RangeFrom<Idx> as core::cmp::PartialEq>::eq#[derive_const(Clone, PartialEq)] // not Copy -- see #27186
<core::ops::range::RangeFrom<Idx> as core::hash::Hash>::hash#[derive(Eq, Hash)]
<core::ops::range::RangeFull as core::clone::Clone>::clone#[derive_const(Clone, Default, PartialEq, Eq)]
<core::ops::range::RangeFull as core::cmp::Eq>::assert_fields_are_eq#[derive_const(Clone, Default, PartialEq, Eq)]
<core::ops::range::RangeFull as core::cmp::PartialEq>::eq#[derive_const(Clone, Default, PartialEq, Eq)]
<core::ops::range::RangeFull as core::default::Default>::default#[derive_const(Clone, Default, PartialEq, Eq)]
<core::ops::range::RangeFull as core::hash::Hash>::hash#[derive(Copy, Hash)]
<core::ops::range::RangeInclusive<Idx> as core::clone::Clone>::clone#[derive(Clone, Hash)]
<core::ops::range::RangeInclusive<Idx> as core::cmp::Eq>::assert_fields_are_eq#[derive_const(Eq, PartialEq)] // not Copy -- see #27186
<core::ops::range::RangeInclusive<Idx> as core::cmp::PartialEq>::eq#[derive_const(Eq, PartialEq)] // not Copy -- see #27186
<core::ops::range::RangeInclusive<Idx> as core::hash::Hash>::hash#[derive(Clone, Hash)]
<core::ops::range::RangeTo<Idx> as core::clone::Clone>::clone#[derive_const(Clone, PartialEq)]
<core::ops::range::RangeTo<Idx> as core::cmp::Eq>::assert_fields_are_eq#[derive(Copy, Eq, Hash)]
<core::ops::range::RangeTo<Idx> as core::cmp::PartialEq>::eq#[derive_const(Clone, PartialEq)]
<core::ops::range::RangeTo<Idx> as core::hash::Hash>::hash#[derive(Copy, Eq, Hash)]
<core::ops::range::RangeToInclusive<Idx> as core::clone::Clone>::clone#[derive(Clone, PartialEq, Eq)]
<core::ops::range::RangeToInclusive<Idx> as core::cmp::Eq>::assert_fields_are_eq#[derive(Clone, PartialEq, Eq)]
<core::ops::range::RangeToInclusive<Idx> as core::cmp::PartialEq>::eq#[derive(Clone, PartialEq, Eq)]
<core::ops::range::RangeToInclusive<Idx> as core::hash::Hash>::hash#[derive(Copy, Hash)]
<core::ops::try_trait::NeverShortCircuit<T> as core::ops::try_trait::FromResidual>::from_residual fn from_residual(never: NeverShortCircuitResidual) -> Self {
match never {}
}
<core::option::IntoIter<A> as core::clone::Clone>::clone#[derive(Clone, Debug)]
<core::option::IntoIter<A> as core::fmt::Debug>::fmt#[derive(Clone, Debug)]
<core::option::Item<A> as core::clone::Clone>::clone#[derive(Clone, Debug)]
<core::option::Item<A> as core::fmt::Debug>::fmt#[derive(Clone, Debug)]
<core::option::Iter<'a, A> as core::fmt::Debug>::fmt#[derive(Debug)]
<core::option::IterMut<'a, A> as core::fmt::Debug>::fmt#[derive(Debug)]
<core::option::Option<T> as core::cmp::Eq>::assert_fields_are_eq#[derive_const(Eq)]
<core::option::Option<T> as core::fmt::Debug>::fmt#[derive(Copy, Debug, Hash)]
<core::option::Option<T> as core::hash::Hash>::hash#[derive(Copy, Debug, Hash)]
<core::panic::location::Location<'a> as core::clone::Clone>::clone#[derive(Copy, Clone)]
<core::panic::panic_info::PanicInfo<'a> as core::fmt::Debug>::fmt#[derive(Debug)]
<core::panicking::AssertKind as core::fmt::Debug>::fmt#[derive(Debug)]
<core::ptr::alignment::Alignment as core::clone::Clone>::clone#[derive(Copy, Clone, PartialEq, Eq)]
<core::ptr::alignment::Alignment as core::cmp::Eq>::assert_fields_are_eq#[derive(Copy, Clone, PartialEq, Eq)]
<core::ptr::alignment::Alignment as core::cmp::PartialEq>::eq#[derive(Copy, Clone, PartialEq, Eq)]
<core::ptr::alignment::AlignmentEnum as core::clone::Clone>::clone#[derive(Copy, Clone, PartialEq, Eq)]
<core::ptr::alignment::AlignmentEnum as core::cmp::Eq>::assert_fields_are_eq#[derive(Copy, Clone, PartialEq, Eq)]
<core::ptr::alignment::AlignmentEnum as core::cmp::PartialEq>::eq#[derive(Copy, Clone, PartialEq, Eq)]
<core::result::Result<T, E> as core::cmp::Eq>::assert_fields_are_eq#[derive_const(PartialEq, PartialOrd, Eq, Ord)]
<core::result::Result<T, E> as core::cmp::Ord>::cmp#[derive_const(PartialEq, PartialOrd, Eq, Ord)]
<core::result::Result<T, E> as core::cmp::PartialEq>::eq#[derive_const(PartialEq, PartialOrd, Eq, Ord)]
<core::result::Result<T, E> as core::cmp::PartialOrd>::partial_cmp#[derive_const(PartialEq, PartialOrd, Eq, Ord)]
<core::result::Result<T, E> as core::fmt::Debug>::fmt#[derive(Copy, Debug, Hash)]
<core::result::Result<T, E> as core::hash::Hash>::hash#[derive(Copy, Debug, Hash)]
<core::slice::ascii::EscapeAscii<'a> as core::clone::Clone>::clone#[derive(Clone)]
<core::slice::ascii::EscapeByte as core::clone::Clone>::clone #[derive(Clone)]
<core::slice::iter::Chunks<'a, T> as core::fmt::Debug>::fmt#[derive(Debug)]
<core::slice::iter::ChunksExact<'a, T> as core::fmt::Debug>::fmt#[derive(Debug)]
<core::slice::iter::ChunksExactMut<'a, T> as core::fmt::Debug>::fmt#[derive(Debug)]
<core::slice::iter::ChunksMut<'a, T> as core::fmt::Debug>::fmt#[derive(Debug)]
<core::slice::iter::Windows<'a, T> as core::fmt::Debug>::fmt#[derive(Debug)]
<core::str::error::Utf8Error as core::clone::Clone>::clone#[derive(Copy, Eq, PartialEq, Clone, Debug)]
<core::str::error::Utf8Error as core::cmp::Eq>::assert_fields_are_eq#[derive(Copy, Eq, PartialEq, Clone, Debug)]
<core::str::error::Utf8Error as core::cmp::PartialEq>::eq#[derive(Copy, Eq, PartialEq, Clone, Debug)]
<core::str::error::Utf8Error as core::fmt::Debug>::fmt#[derive(Copy, Eq, PartialEq, Clone, Debug)]
<core::str::iter::Bytes<'a> as core::clone::Clone>::clone#[derive(Clone, Debug)]
<core::str::iter::Bytes<'a> as core::fmt::Debug>::fmt#[derive(Clone, Debug)]
<core::str::iter::CharIndices<'a> as core::clone::Clone>::clone#[derive(Clone, Debug)]
<core::str::iter::CharIndices<'a> as core::fmt::Debug>::fmt#[derive(Clone, Debug)]
<core::str::iter::Chars<'a> as core::clone::Clone>::clone#[derive(Clone)]
<core::str::lossy::Utf8Chunk<'a> as core::clone::Clone>::clone#[derive(Clone, Debug, PartialEq, Eq)]
<core::str::lossy::Utf8Chunk<'a> as core::cmp::Eq>::assert_fields_are_eq#[derive(Clone, Debug, PartialEq, Eq)]
<core::str::lossy::Utf8Chunk<'a> as core::cmp::PartialEq>::eq#[derive(Clone, Debug, PartialEq, Eq)]
<core::str::lossy::Utf8Chunk<'a> as core::fmt::Debug>::fmt#[derive(Clone, Debug, PartialEq, Eq)]
<core::str::lossy::Utf8Chunks<'a> as core::clone::Clone>::clone#[derive(Clone)]
<core::str::pattern::CharSearcher<'a> as core::clone::Clone>::clone#[derive(Clone, Debug)]
<core::str::pattern::CharSearcher<'a> as core::fmt::Debug>::fmt#[derive(Clone, Debug)]
<core::str::pattern::EmptyNeedle as core::clone::Clone>::clone#[derive(Clone, Debug)]
<core::str::pattern::EmptyNeedle as core::fmt::Debug>::fmt#[derive(Clone, Debug)]
<core::str::pattern::SearchStep as core::clone::Clone>::clone#[derive(Copy, Clone, Eq, PartialEq, Debug)]
<core::str::pattern::SearchStep as core::cmp::Eq>::assert_fields_are_eq#[derive(Copy, Clone, Eq, PartialEq, Debug)]
<core::str::pattern::SearchStep as core::cmp::PartialEq>::eq#[derive(Copy, Clone, Eq, PartialEq, Debug)]
<core::str::pattern::SearchStep as core::fmt::Debug>::fmt#[derive(Copy, Clone, Eq, PartialEq, Debug)]
<core::str::pattern::StrSearcher<'a, 'b> as core::clone::Clone>::clone#[derive(Clone, Debug)]
<core::str::pattern::StrSearcher<'a, 'b> as core::fmt::Debug>::fmt#[derive(Clone, Debug)]
<core::str::pattern::StrSearcherImpl as core::clone::Clone>::clone#[derive(Clone, Debug)]
<core::str::pattern::StrSearcherImpl as core::fmt::Debug>::fmt#[derive(Clone, Debug)]
<core::str::pattern::TwoWaySearcher as core::clone::Clone>::clone#[derive(Clone, Debug)]
<core::str::pattern::TwoWaySearcher as core::fmt::Debug>::fmt#[derive(Clone, Debug)]
<core::sync::atomic::Ordering as core::clone::Clone>::clone#[derive(Copy, Clone, Debug, Eq, PartialEq, Hash)]
<core::sync::atomic::Ordering as core::cmp::Eq>::assert_fields_are_eq#[derive(Copy, Clone, Debug, Eq, PartialEq, Hash)]
<core::sync::atomic::Ordering as core::cmp::PartialEq>::eq#[derive(Copy, Clone, Debug, Eq, PartialEq, Hash)]
<core::sync::atomic::Ordering as core::fmt::Debug>::fmt#[derive(Copy, Clone, Debug, Eq, PartialEq, Hash)]
<core::sync::atomic::Ordering as core::hash::Hash>::hash#[derive(Copy, Clone, Debug, Eq, PartialEq, Hash)]
<core::time::Duration as core::clone::Clone>::clone#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)]
<core::time::Duration as core::cmp::Eq>::assert_fields_are_eq#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)]
<core::time::Duration as core::cmp::Ord>::cmp#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)]
<core::time::Duration as core::cmp::PartialEq>::eq#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)]
<core::time::Duration as core::cmp::PartialOrd>::partial_cmp#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)]
<core::time::Duration as core::default::Default>::default#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)]
<core::time::Duration as core::hash::Hash>::hash#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)]
<core::time::TryFromFloatSecsError as core::clone::Clone>::clone#[derive(Debug, Clone, PartialEq, Eq)]
<core::time::TryFromFloatSecsError as core::cmp::Eq>::assert_fields_are_eq#[derive(Debug, Clone, PartialEq, Eq)]
<core::time::TryFromFloatSecsError as core::cmp::PartialEq>::eq#[derive(Debug, Clone, PartialEq, Eq)]
<core::time::TryFromFloatSecsError as core::fmt::Debug>::fmt#[derive(Debug, Clone, PartialEq, Eq)]
<core::time::TryFromFloatSecsErrorKind as core::clone::Clone>::clone#[derive(Debug, Clone, PartialEq, Eq)]
<core::time::TryFromFloatSecsErrorKind as core::cmp::Eq>::assert_fields_are_eq#[derive(Debug, Clone, PartialEq, Eq)]
<core::time::TryFromFloatSecsErrorKind as core::cmp::PartialEq>::eq#[derive(Debug, Clone, PartialEq, Eq)]
<core::time::TryFromFloatSecsErrorKind as core::fmt::Debug>::fmt#[derive(Debug, Clone, PartialEq, Eq)]
core::cmp::Eq::assert_fields_are_eq fn assert_fields_are_eq(&self) {}
core::cmp::Eq::assert_receiver_is_total_eq fn assert_receiver_is_total_eq(&self) {}
core::cmp::impls::<impl core::cmp::Ord for !>::cmp fn cmp(&self, _: &!) -> Ordering {
*self
}
core::cmp::impls::<impl core::cmp::PartialEq for !>::eq fn eq(&self, _: &!) -> bool {
*self
}
core::cmp::impls::<impl core::cmp::PartialOrd for !>::partial_cmp fn partial_cmp(&self, _: &!) -> Option<Ordering> {
*self
}
core::ffi::c_str::strlen::runtime::strlen fn strlen(s: *const c_char) -> usize;
core::hash::impls::<impl core::hash::Hash for !>::hash fn hash<H: Hasher>(&self, _: &mut H) {
*self
}
core::hint::unreachable_uncheckedpub const unsafe fn unreachable_unchecked() -> ! {
ub_checks::assert_unsafe_precondition!(
check_language_ub,
"hint::unreachable_unchecked must never be reached",
() => false
);
// SAFETY: the safety contract for `intrinsics::unreachable` must
// be upheld by the caller.
unsafe { intrinsics::unreachable() }
}
core::panicking::panic_fmt::panic_impl fn panic_impl(pi: &PanicInfo<'_>) -> !;
core::panicking::panic_nounwind_fmt::runtime::panic_impl fn panic_impl(pi: &PanicInfo<'_>) -> !;
core::ptr::drop_in_placepub const unsafe fn drop_in_place<T: PointeeSized>(to_drop: *mut T)
where
T: [const] Destruct,
{
// Code here does not matter - this is replaced by the
// real drop glue by the compiler.
// SAFETY: see comment above
unsafe { drop_in_place(to_drop) }
}
core::slice::ascii::is_asciiconst fn is_ascii(s: &[u8]) -> bool {
// The runtime version behaves the same as the compiletime version, it's
// just more optimized.
const_eval_select!(
@capture { s: &[u8] } -> bool:
if const {
is_ascii_simple(s)
} else {
/// Returns `true` if any byte in the word `v` is nonascii (>= 128). Snarfed
/// from `../str/mod.rs`, which does something similar for utf8 validation.
const fn contains_nonascii(v: usize) -> bool {
const NONASCII_MASK: usize = usize::repeat_u8(0x80);
(NONASCII_MASK & v) != 0
}
const USIZE_SIZE: usize = size_of::<usize>();
let len = s.len();
let align_offset = s.as_ptr().align_offset(USIZE_SIZE);
// If we wouldn't gain anything from the word-at-a-time implementation, fall
// back to a scalar loop.
//
// We also do this for architectures where `size_of::<usize>()` isn't
// sufficient alignment for `usize`, because it's a weird edge case.
if len < USIZE_SIZE || len < align_offset || USIZE_SIZE < align_of::<usize>() {
return is_ascii_simple(s);
}
// We always read the first word unaligned, which means `align_offset` is
// 0, we'd read the same value again for the aligned read.
let offset_to_aligned = if align_offset == 0 { USIZE_SIZE } else { align_offset };
let start = s.as_ptr();
// SAFETY: We verify `len < USIZE_SIZE` above.
let first_word = unsafe { (start as *const usize).read_unaligned() };
if contains_nonascii(first_word) {
return false;
}
// We checked this above, somewhat implicitly. Note that `offset_to_aligned`
// is either `align_offset` or `USIZE_SIZE`, both of are explicitly checked
// above.
debug_assert!(offset_to_aligned <= len);
// SAFETY: word_ptr is the (properly aligned) usize ptr we use to read the
// middle chunk of the slice.
let mut word_ptr = unsafe { start.add(offset_to_aligned) as *const usize };
// `byte_pos` is the byte index of `word_ptr`, used for loop end checks.
let mut byte_pos = offset_to_aligned;
// Paranoia check about alignment, since we're about to do a bunch of
// unaligned loads. In practice this should be impossible barring a bug in
// `align_offset` though.
// While this method is allowed to spuriously fail in CTFE, if it doesn't
// have alignment information it should have given a `usize::MAX` for
// `align_offset` earlier, sending things through the scalar path instead of
// this one, so this check should pass if it's reachable.
debug_assert!(word_ptr.is_aligned_to(align_of::<usize>()));
// Read subsequent words until the last aligned word, excluding the last
// aligned word by itself to be done in tail check later, to ensure that
// tail is always one `usize` at most to extra branch `byte_pos == len`.
while byte_pos < len - USIZE_SIZE {
// Sanity check that the read is in bounds
debug_assert!(byte_pos + USIZE_SIZE <= len);
// And that our assumptions about `byte_pos` hold.
debug_assert!(word_ptr.cast::<u8>() == start.wrapping_add(byte_pos));
// SAFETY: We know `word_ptr` is properly aligned (because of
// `align_offset`), and we know that we have enough bytes between `word_ptr` and the end
let word = unsafe { word_ptr.read() };
if contains_nonascii(word) {
return false;
}
byte_pos += USIZE_SIZE;
// SAFETY: We know that `byte_pos <= len - USIZE_SIZE`, which means that
// after this `add`, `word_ptr` will be at most one-past-the-end.
word_ptr = unsafe { word_ptr.add(1) };
}
// Sanity check to ensure there really is only one `usize` left. This should
// be guaranteed by our loop condition.
debug_assert!(byte_pos <= len && len - byte_pos <= USIZE_SIZE);
// SAFETY: This relies on `len >= USIZE_SIZE`, which we check at the start.
let last_word = unsafe { (start.add(len - USIZE_SIZE) as *const usize).read_unaligned() };
!contains_nonascii(last_word)
}
)
}
core::slice::ascii::is_ascii::runtime::contains_nonascii const fn contains_nonascii(v: usize) -> bool {
const NONASCII_MASK: usize = usize::repeat_u8(0x80);
(NONASCII_MASK & v) != 0
}