<core::iter::adapters::skip::Skip<I> as core::iter::traits::iterator::Iterator>::fold fn fold<Acc, Fold>(mut self, init: Acc, fold: Fold) -> Acc
where
Fold: FnMut(Acc, Self::Item) -> Acc,
{
if self.n > 0 {
// nth(n) skips n+1
if self.iter.nth(self.n - 1).is_none() {
return init;
}
}
self.iter.fold(init, fold)
}
<core::iter::adapters::skip::Skip<I> as core::iter::traits::iterator::Iterator>::try_fold fn try_fold<Acc, Fold, R>(&mut self, init: Acc, fold: Fold) -> R
where
Self: Sized,
Fold: FnMut(Acc, Self::Item) -> R,
R: Try<Output = Acc>,
{
let n = self.n;
self.n = 0;
if n > 0 {
// nth(n) skips n+1
if self.iter.nth(n - 1).is_none() {
return try { init };
}
}
self.iter.try_fold(init, fold)
}
<core::iter::adapters::step_by::StepBy<I> as core::iter::adapters::step_by::StepByImpl<I>>::spec_fold default fn spec_fold<Acc, F>(mut self, mut acc: Acc, mut f: F) -> Acc
where
F: FnMut(Acc, Self::Item) -> Acc,
{
#[inline]
fn nth<I: Iterator>(
iter: &mut I,
step_minus_one: usize,
) -> impl FnMut() -> Option<I::Item> + '_ {
move || iter.nth(step_minus_one)
}
if self.first_take {
self.first_take = false;
match self.iter.next() {
None => return acc,
Some(x) => acc = f(acc, x),
}
}
from_fn(nth(&mut self.iter, self.step_minus_one)).fold(acc, f)
}
<core::iter::adapters::step_by::StepBy<I> as core::iter::adapters::step_by::StepByImpl<I>>::spec_nth default fn spec_nth(&mut self, mut n: usize) -> Option<I::Item> {
if self.first_take {
self.first_take = false;
let first = self.iter.next();
if n == 0 {
return first;
}
n -= 1;
}
// n and self.step_minus_one are indices, we need to add 1 to get the amount of elements
// When calling `.nth`, we need to subtract 1 again to convert back to an index
let mut step = self.original_step().get();
// n + 1 could overflow
// thus, if n is usize::MAX, instead of adding one, we call .nth(step)
if n == usize::MAX {
self.iter.nth(step - 1);
} else {
n += 1;
}
// overflow handling
loop {
let mul = n.checked_mul(step);
{
if intrinsics::likely(mul.is_some()) {
return self.iter.nth(mul.unwrap() - 1);
}
}
let div_n = usize::MAX / n;
let div_step = usize::MAX / step;
let nth_n = div_n * n;
let nth_step = div_step * step;
let nth = if nth_n > nth_step {
step -= div_n;
nth_n
} else {
n -= div_step;
nth_step
};
self.iter.nth(nth - 1);
}
}
<core::iter::adapters::take::Take<I> as core::iter::traits::iterator::Iterator>::nth fn nth(&mut self, n: usize) -> Option<I::Item> {
if self.n > n {
self.n -= n + 1;
self.iter.nth(n)
} else {
if self.n > 0 {
self.iter.nth(self.n - 1);
self.n = 0;
}
None
}
}
<core::iter::adapters::zip::Zip<A, B> as core::iter::adapters::zip::ZipImpl<A, B>>::size_hint default fn size_hint(&self) -> (usize, Option<usize>) {
let (a_lower, a_upper) = self.a.size_hint();
let (b_lower, b_upper) = self.b.size_hint();
let lower = cmp::min(a_lower, b_lower);
let upper = match (a_upper, b_upper) {
(Some(x), Some(y)) => Some(cmp::min(x, y)),
(Some(x), None) => Some(x),
(None, Some(y)) => Some(y),
(None, None) => None,
};
(lower, upper)
}
<core::ops::index_range::IndexRange as core::slice::index::SliceIndex<[T]>>::get_unchecked::precondition_check const fn precondition_check($($name:$ty),*) {
if !$e {
let msg = concat!("unsafe precondition(s) violated: ", $message,
"\n\nThis indicates a bug in the program. \
This Undefined Behavior check is optional, and cannot be relied on for safety.");
#[cfg(not(feature = "ferrocene_certified"))]
::core::panicking::panic_nounwind_fmt(::core::fmt::Arguments::from_str(msg), false);
#[cfg(feature = "ferrocene_certified")]
::core::panicking::panic_nounwind(msg);
}
}
<core::ops::index_range::IndexRange as core::slice::index::SliceIndex<[T]>>::get_unchecked_mut::precondition_check const fn precondition_check($($name:$ty),*) {
if !$e {
let msg = concat!("unsafe precondition(s) violated: ", $message,
"\n\nThis indicates a bug in the program. \
This Undefined Behavior check is optional, and cannot be relied on for safety.");
#[cfg(not(feature = "ferrocene_certified"))]
::core::panicking::panic_nounwind_fmt(::core::fmt::Arguments::from_str(msg), false);
#[cfg(feature = "ferrocene_certified")]
::core::panicking::panic_nounwind(msg);
}
}
<core::ops::range::Range<usize> as core::slice::index::SliceIndex<[T]>>::get_unchecked::precondition_check const fn precondition_check($($name:$ty),*) {
if !$e {
let msg = concat!("unsafe precondition(s) violated: ", $message,
"\n\nThis indicates a bug in the program. \
This Undefined Behavior check is optional, and cannot be relied on for safety.");
#[cfg(not(feature = "ferrocene_certified"))]
::core::panicking::panic_nounwind_fmt(::core::fmt::Arguments::from_str(msg), false);
#[cfg(feature = "ferrocene_certified")]
::core::panicking::panic_nounwind(msg);
}
}
<core::ops::range::Range<usize> as core::slice::index::SliceIndex<[T]>>::get_unchecked_mut::precondition_check const fn precondition_check($($name:$ty),*) {
if !$e {
let msg = concat!("unsafe precondition(s) violated: ", $message,
"\n\nThis indicates a bug in the program. \
This Undefined Behavior check is optional, and cannot be relied on for safety.");
#[cfg(not(feature = "ferrocene_certified"))]
::core::panicking::panic_nounwind_fmt(::core::fmt::Arguments::from_str(msg), false);
#[cfg(feature = "ferrocene_certified")]
::core::panicking::panic_nounwind(msg);
}
}
<core::slice::iter::Chunks<'a, T> as core::iter::traits::iterator::Iterator>::nth fn nth(&mut self, n: usize) -> Option<Self::Item> {
let (start, overflow) = n.overflowing_mul(self.chunk_size);
// min(len) makes a wrong start harmless, but enables optimizing this to brachless code
let chunk_start = &self.v[start.min(self.v.len())..];
let (nth, remainder) = chunk_start.split_at(self.chunk_size.min(chunk_start.len()));
if !overflow && start < self.v.len() {
self.v = remainder;
Some(nth)
} else {
self.v = &self.v[..0]; // cheaper than &[]
None
}
}
<core::slice::iter::ChunksExact<'a, T> as core::iter::traits::iterator::Iterator>::nth fn nth(&mut self, n: usize) -> Option<Self::Item> {
let (start, overflow) = n.overflowing_mul(self.chunk_size);
if start >= self.v.len() || overflow {
self.v = &self.v[..0]; // cheaper than &[]
None
} else {
let (_, snd) = self.v.split_at(start);
self.v = snd;
self.next()
}
}
<core::slice::iter::ChunksExactMut<'a, T> as core::iter::traits::iterator::Iterator>::nth fn nth(&mut self, n: usize) -> Option<&'a mut [T]> {
let (start, overflow) = n.overflowing_mul(self.chunk_size);
if start >= self.v.len() || overflow {
self.v = &mut [];
None
} else {
// SAFETY: The self.v contract ensures that any split_at_mut is valid.
let (_, snd) = unsafe { self.v.split_at_mut(start) };
self.v = snd;
self.next()
}
}
<core::slice::iter::ChunksMut<'a, T> as core::iter::traits::iterator::Iterator>::nth fn nth(&mut self, n: usize) -> Option<&'a mut [T]> {
let (start, overflow) = n.overflowing_mul(self.chunk_size);
if start >= self.v.len() || overflow {
self.v = &mut [];
None
} else {
let end = match start.checked_add(self.chunk_size) {
Some(sum) => cmp::min(self.v.len(), sum),
None => self.v.len(),
};
// SAFETY: The self.v contract ensures that any split_at_mut is valid.
let (head, tail) = unsafe { self.v.split_at_mut(end) };
// SAFETY: The self.v contract ensures that any split_at_mut is valid.
let (_, nth) = unsafe { head.split_at_mut(start) };
self.v = tail;
// SAFETY: Nothing else points to or will point to the contents of this slice.
Some(unsafe { &mut *nth })
}
}
<core::slice::iter::Windows<'a, T> as core::iter::traits::iterator::Iterator>::nth fn nth(&mut self, n: usize) -> Option<Self::Item> {
let size = self.size.get();
if let Some(rest) = self.v.get(n..)
&& let Some(nth) = rest.get(..size)
{
self.v = &rest[1..];
Some(nth)
} else {
// setting length to 0 is cheaper than overwriting the pointer when assigning &[]
self.v = &self.v[..0]; // cheaper than &[]
None
}
}
<core::str::iter::Chars<'a> as core::iter::traits::iterator::Iterator>::advance_by fn advance_by(&mut self, mut remainder: usize) -> Result<(), NonZero<usize>> {
const CHUNK_SIZE: usize = 32;
if remainder >= CHUNK_SIZE {
let mut chunks = self.iter.as_slice().as_chunks::<CHUNK_SIZE>().0.iter();
let mut bytes_skipped: usize = 0;
while remainder > CHUNK_SIZE
&& let Some(chunk) = chunks.next()
{
bytes_skipped += CHUNK_SIZE;
let mut start_bytes = [false; CHUNK_SIZE];
for i in 0..CHUNK_SIZE {
start_bytes[i] = !super::validations::utf8_is_cont_byte(chunk[i]);
}
remainder -= start_bytes.into_iter().map(|i| i as u8).sum::<u8>() as usize;
}
// SAFETY: The amount of bytes exists since we just iterated over them,
// so advance_by will succeed.
unsafe { self.iter.advance_by(bytes_skipped).unwrap_unchecked() };
// skip trailing continuation bytes
while self.iter.len() > 0 {
let b = self.iter.as_slice()[0];
if !super::validations::utf8_is_cont_byte(b) {
break;
}
// SAFETY: We just peeked at the byte, therefore it exists
unsafe { self.iter.advance_by(1).unwrap_unchecked() };
}
}
while (remainder > 0) && (self.iter.len() > 0) {
remainder -= 1;
let b = self.iter.as_slice()[0];
let slurp = super::validations::utf8_char_width(b);
// SAFETY: utf8 validity requires that the string must contain
// the continuation bytes (if any)
unsafe { self.iter.advance_by(slurp).unwrap_unchecked() };
}
NonZero::new(remainder).map_or(Ok(()), Err)
}
<i128 as core::iter::range::Step>::steps_between fn steps_between(start: &Self, end: &Self) -> (usize, Option<usize>) {
if *start <= *end {
match end.checked_sub(*start) {
Some(result) => {
if let Ok(steps) = usize::try_from(result) {
(steps, Some(steps))
} else {
(usize::MAX, None)
}
}
// If the difference is too big for e.g. i128,
// it's also gonna be too big for usize with fewer bits.
None => (usize::MAX, None),
}
} else {
(0, None)
}
}
<i16 as core::iter::range::Step>::steps_between fn steps_between(start: &Self, end: &Self) -> (usize, Option<usize>) {
if *start <= *end {
// This relies on $i_narrower <= usize
//
// Casting to isize extends the width but preserves the sign.
// Use wrapping_sub in isize space and cast to usize to compute
// the difference that might not fit inside the range of isize.
let steps = (*end as isize).wrapping_sub(*start as isize) as usize;
(steps, Some(steps))
} else {
(0, None)
}
}
<i32 as core::iter::range::Step>::steps_between fn steps_between(start: &Self, end: &Self) -> (usize, Option<usize>) {
if *start <= *end {
// This relies on $i_narrower <= usize
//
// Casting to isize extends the width but preserves the sign.
// Use wrapping_sub in isize space and cast to usize to compute
// the difference that might not fit inside the range of isize.
let steps = (*end as isize).wrapping_sub(*start as isize) as usize;
(steps, Some(steps))
} else {
(0, None)
}
}
<i64 as core::iter::range::Step>::steps_between fn steps_between(start: &Self, end: &Self) -> (usize, Option<usize>) {
if *start <= *end {
// This relies on $i_narrower <= usize
//
// Casting to isize extends the width but preserves the sign.
// Use wrapping_sub in isize space and cast to usize to compute
// the difference that might not fit inside the range of isize.
let steps = (*end as isize).wrapping_sub(*start as isize) as usize;
(steps, Some(steps))
} else {
(0, None)
}
}
<i8 as core::iter::range::Step>::steps_between fn steps_between(start: &Self, end: &Self) -> (usize, Option<usize>) {
if *start <= *end {
// This relies on $i_narrower <= usize
//
// Casting to isize extends the width but preserves the sign.
// Use wrapping_sub in isize space and cast to usize to compute
// the difference that might not fit inside the range of isize.
let steps = (*end as isize).wrapping_sub(*start as isize) as usize;
(steps, Some(steps))
} else {
(0, None)
}
}
<isize as core::iter::range::Step>::steps_between fn steps_between(start: &Self, end: &Self) -> (usize, Option<usize>) {
if *start <= *end {
// This relies on $i_narrower <= usize
//
// Casting to isize extends the width but preserves the sign.
// Use wrapping_sub in isize space and cast to usize to compute
// the difference that might not fit inside the range of isize.
let steps = (*end as isize).wrapping_sub(*start as isize) as usize;
(steps, Some(steps))
} else {
(0, None)
}
}
<u128 as core::iter::range::Step>::steps_between fn steps_between(start: &Self, end: &Self) -> (usize, Option<usize>) {
if *start <= *end {
if let Ok(steps) = usize::try_from(*end - *start) {
(steps, Some(steps))
} else {
(usize::MAX, None)
}
} else {
(0, None)
}
}
<usize as core::slice::index::SliceIndex<[T]>>::get_mut fn get_mut(self, slice: &mut [T]) -> Option<&mut T> {
if self < slice.len() {
// SAFETY: `self` is checked to be in bounds.
unsafe { Some(slice_get_unchecked(slice, self)) }
} else {
None
}
}
<usize as core::slice::index::SliceIndex<[T]>>::get_unchecked::precondition_check const fn precondition_check($($name:$ty),*) {
if !$e {
let msg = concat!("unsafe precondition(s) violated: ", $message,
"\n\nThis indicates a bug in the program. \
This Undefined Behavior check is optional, and cannot be relied on for safety.");
#[cfg(not(feature = "ferrocene_certified"))]
::core::panicking::panic_nounwind_fmt(::core::fmt::Arguments::from_str(msg), false);
#[cfg(feature = "ferrocene_certified")]
::core::panicking::panic_nounwind(msg);
}
}
<usize as core::slice::index::SliceIndex<[T]>>::get_unchecked_mut::precondition_check const fn precondition_check($($name:$ty),*) {
if !$e {
let msg = concat!("unsafe precondition(s) violated: ", $message,
"\n\nThis indicates a bug in the program. \
This Undefined Behavior check is optional, and cannot be relied on for safety.");
#[cfg(not(feature = "ferrocene_certified"))]
::core::panicking::panic_nounwind_fmt(::core::fmt::Arguments::from_str(msg), false);
#[cfg(feature = "ferrocene_certified")]
::core::panicking::panic_nounwind(msg);
}
}
core::alloc::layout::Layout::from_size_align_unchecked::precondition_check const fn precondition_check($($name:$ty),*) {
if !$e {
let msg = concat!("unsafe precondition(s) violated: ", $message,
"\n\nThis indicates a bug in the program. \
This Undefined Behavior check is optional, and cannot be relied on for safety.");
#[cfg(not(feature = "ferrocene_certified"))]
::core::panicking::panic_nounwind_fmt(::core::fmt::Arguments::from_str(msg), false);
#[cfg(feature = "ferrocene_certified")]
::core::panicking::panic_nounwind(msg);
}
}
core::cell::RefCell::<T>::borrow pub const fn borrow(&self) -> Ref<'_, T> {
match self.try_borrow() {
Ok(b) => b,
Err(err) => panic_already_mutably_borrowed(err),
}
}
core::char::convert::from_u32_unchecked::precondition_check const fn precondition_check($($name:$ty),*) {
if !$e {
let msg = concat!("unsafe precondition(s) violated: ", $message,
"\n\nThis indicates a bug in the program. \
This Undefined Behavior check is optional, and cannot be relied on for safety.");
#[cfg(not(feature = "ferrocene_certified"))]
::core::panicking::panic_nounwind_fmt(::core::fmt::Arguments::from_str(msg), false);
#[cfg(feature = "ferrocene_certified")]
::core::panicking::panic_nounwind(msg);
}
}
core::cmp::impls::<impl core::cmp::Ord for bool>::cmp fn cmp(&self, other: &bool) -> Ordering {
// Casting to i8's and converting the difference to an Ordering generates
// more optimal assembly.
// See <https://github.com/rust-lang/rust/issues/66780> for more info.
match (*self as i8) - (*other as i8) {
-1 => Less,
0 => Equal,
1 => Greater,
#[ferrocene::annotation(
"This match arm cannot be covered because it is unreachable. See the safety comment below."
)]
// SAFETY: bool as i8 returns 0 or 1, so the difference can't be anything else
_ => unsafe { unreachable_unchecked() },
}
}
core::hint::assert_unchecked::precondition_check const fn precondition_check($($name:$ty),*) {
if !$e {
let msg = concat!("unsafe precondition(s) violated: ", $message,
"\n\nThis indicates a bug in the program. \
This Undefined Behavior check is optional, and cannot be relied on for safety.");
#[cfg(not(feature = "ferrocene_certified"))]
::core::panicking::panic_nounwind_fmt(::core::fmt::Arguments::from_str(msg), false);
#[cfg(feature = "ferrocene_certified")]
::core::panicking::panic_nounwind(msg);
}
}
core::hint::unreachable_unchecked::precondition_check const fn precondition_check($($name:$ty),*) {
if !$e {
let msg = concat!("unsafe precondition(s) violated: ", $message,
"\n\nThis indicates a bug in the program. \
This Undefined Behavior check is optional, and cannot be relied on for safety.");
#[cfg(not(feature = "ferrocene_certified"))]
::core::panicking::panic_nounwind_fmt(::core::fmt::Arguments::from_str(msg), false);
#[cfg(feature = "ferrocene_certified")]
::core::panicking::panic_nounwind(msg);
}
}
core::iter::adapters::zip::Zip::<A, B>::super_nth fn super_nth(&mut self, mut n: usize) -> Option<(A::Item, B::Item)> {
while let Some(x) = Iterator::next(self) {
if n == 0 {
return Some(x);
}
n -= 1;
}
None
}
core::iter::traits::double_ended::DoubleEndedIterator::nth_back fn nth_back(&mut self, n: usize) -> Option<Self::Item> {
if self.advance_back_by(n).is_err() {
return None;
}
self.next_back()
}
core::iter::traits::iterator::Iterator::advance_by fn advance_by(&mut self, n: usize) -> Result<(), NonZero<usize>> {
/// Helper trait to specialize `advance_by` via `try_fold` for `Sized` iterators.
trait SpecAdvanceBy {
fn spec_advance_by(&mut self, n: usize) -> Result<(), NonZero<usize>>;
}
impl<I: Iterator + ?Sized> SpecAdvanceBy for I {
default fn spec_advance_by(&mut self, n: usize) -> Result<(), NonZero<usize>> {
for i in 0..n {
if self.next().is_none() {
// SAFETY: `i` is always less than `n`.
return Err(unsafe { NonZero::new_unchecked(n - i) });
}
}
Ok(())
}
}
impl<I: Iterator> SpecAdvanceBy for I {
fn spec_advance_by(&mut self, n: usize) -> Result<(), NonZero<usize>> {
let Some(n) = NonZero::new(n) else {
return Ok(());
};
let res = self.try_fold(n, |n, _| NonZero::new(n.get() - 1));
match res {
None => Ok(()),
Some(n) => Err(n),
}
}
}
self.spec_advance_by(n)
}
core::iter::traits::iterator::Iterator::collect fn collect<B: FromIterator<Self::Item>>(self) -> B
where
Self: Sized,
{
// This is too aggressive to turn on for everything all the time, but PR#137908
// accidentally noticed that some rustc iterators had malformed `size_hint`s,
// so this will help catch such things in debug-assertions-std runners,
// even if users won't actually ever see it.
if cfg!(debug_assertions) {
let hint = self.size_hint();
assert!(hint.1.is_none_or(|high| high >= hint.0), "Malformed size_hint {hint:?}");
}
FromIterator::from_iter(self)
}
core::num::<impl u128>::unchecked_add::precondition_check const fn precondition_check($($name:$ty),*) {
if !$e {
let msg = concat!("unsafe precondition(s) violated: ", $message,
"\n\nThis indicates a bug in the program. \
This Undefined Behavior check is optional, and cannot be relied on for safety.");
#[cfg(not(feature = "ferrocene_certified"))]
::core::panicking::panic_nounwind_fmt(::core::fmt::Arguments::from_str(msg), false);
#[cfg(feature = "ferrocene_certified")]
::core::panicking::panic_nounwind(msg);
}
}
core::num::<impl u128>::unchecked_sub::precondition_check const fn precondition_check($($name:$ty),*) {
if !$e {
let msg = concat!("unsafe precondition(s) violated: ", $message,
"\n\nThis indicates a bug in the program. \
This Undefined Behavior check is optional, and cannot be relied on for safety.");
#[cfg(not(feature = "ferrocene_certified"))]
::core::panicking::panic_nounwind_fmt(::core::fmt::Arguments::from_str(msg), false);
#[cfg(feature = "ferrocene_certified")]
::core::panicking::panic_nounwind(msg);
}
}
core::num::<impl u16>::unchecked_add::precondition_check const fn precondition_check($($name:$ty),*) {
if !$e {
let msg = concat!("unsafe precondition(s) violated: ", $message,
"\n\nThis indicates a bug in the program. \
This Undefined Behavior check is optional, and cannot be relied on for safety.");
#[cfg(not(feature = "ferrocene_certified"))]
::core::panicking::panic_nounwind_fmt(::core::fmt::Arguments::from_str(msg), false);
#[cfg(feature = "ferrocene_certified")]
::core::panicking::panic_nounwind(msg);
}
}
core::num::<impl u16>::unchecked_sub::precondition_check const fn precondition_check($($name:$ty),*) {
if !$e {
let msg = concat!("unsafe precondition(s) violated: ", $message,
"\n\nThis indicates a bug in the program. \
This Undefined Behavior check is optional, and cannot be relied on for safety.");
#[cfg(not(feature = "ferrocene_certified"))]
::core::panicking::panic_nounwind_fmt(::core::fmt::Arguments::from_str(msg), false);
#[cfg(feature = "ferrocene_certified")]
::core::panicking::panic_nounwind(msg);
}
}
core::num::<impl u32>::unchecked_add::precondition_check const fn precondition_check($($name:$ty),*) {
if !$e {
let msg = concat!("unsafe precondition(s) violated: ", $message,
"\n\nThis indicates a bug in the program. \
This Undefined Behavior check is optional, and cannot be relied on for safety.");
#[cfg(not(feature = "ferrocene_certified"))]
::core::panicking::panic_nounwind_fmt(::core::fmt::Arguments::from_str(msg), false);
#[cfg(feature = "ferrocene_certified")]
::core::panicking::panic_nounwind(msg);
}
}
core::num::<impl u32>::unchecked_sub::precondition_check const fn precondition_check($($name:$ty),*) {
if !$e {
let msg = concat!("unsafe precondition(s) violated: ", $message,
"\n\nThis indicates a bug in the program. \
This Undefined Behavior check is optional, and cannot be relied on for safety.");
#[cfg(not(feature = "ferrocene_certified"))]
::core::panicking::panic_nounwind_fmt(::core::fmt::Arguments::from_str(msg), false);
#[cfg(feature = "ferrocene_certified")]
::core::panicking::panic_nounwind(msg);
}
}
core::num::<impl u64>::unchecked_add::precondition_check const fn precondition_check($($name:$ty),*) {
if !$e {
let msg = concat!("unsafe precondition(s) violated: ", $message,
"\n\nThis indicates a bug in the program. \
This Undefined Behavior check is optional, and cannot be relied on for safety.");
#[cfg(not(feature = "ferrocene_certified"))]
::core::panicking::panic_nounwind_fmt(::core::fmt::Arguments::from_str(msg), false);
#[cfg(feature = "ferrocene_certified")]
::core::panicking::panic_nounwind(msg);
}
}
core::num::<impl u64>::unchecked_sub::precondition_check const fn precondition_check($($name:$ty),*) {
if !$e {
let msg = concat!("unsafe precondition(s) violated: ", $message,
"\n\nThis indicates a bug in the program. \
This Undefined Behavior check is optional, and cannot be relied on for safety.");
#[cfg(not(feature = "ferrocene_certified"))]
::core::panicking::panic_nounwind_fmt(::core::fmt::Arguments::from_str(msg), false);
#[cfg(feature = "ferrocene_certified")]
::core::panicking::panic_nounwind(msg);
}
}
core::num::<impl u8>::unchecked_add::precondition_check const fn precondition_check($($name:$ty),*) {
if !$e {
let msg = concat!("unsafe precondition(s) violated: ", $message,
"\n\nThis indicates a bug in the program. \
This Undefined Behavior check is optional, and cannot be relied on for safety.");
#[cfg(not(feature = "ferrocene_certified"))]
::core::panicking::panic_nounwind_fmt(::core::fmt::Arguments::from_str(msg), false);
#[cfg(feature = "ferrocene_certified")]
::core::panicking::panic_nounwind(msg);
}
}
core::num::<impl u8>::unchecked_sub::precondition_check const fn precondition_check($($name:$ty),*) {
if !$e {
let msg = concat!("unsafe precondition(s) violated: ", $message,
"\n\nThis indicates a bug in the program. \
This Undefined Behavior check is optional, and cannot be relied on for safety.");
#[cfg(not(feature = "ferrocene_certified"))]
::core::panicking::panic_nounwind_fmt(::core::fmt::Arguments::from_str(msg), false);
#[cfg(feature = "ferrocene_certified")]
::core::panicking::panic_nounwind(msg);
}
}
core::num::<impl usize>::unchecked_add::precondition_check const fn precondition_check($($name:$ty),*) {
if !$e {
let msg = concat!("unsafe precondition(s) violated: ", $message,
"\n\nThis indicates a bug in the program. \
This Undefined Behavior check is optional, and cannot be relied on for safety.");
#[cfg(not(feature = "ferrocene_certified"))]
::core::panicking::panic_nounwind_fmt(::core::fmt::Arguments::from_str(msg), false);
#[cfg(feature = "ferrocene_certified")]
::core::panicking::panic_nounwind(msg);
}
}
core::num::<impl usize>::unchecked_sub::precondition_check const fn precondition_check($($name:$ty),*) {
if !$e {
let msg = concat!("unsafe precondition(s) violated: ", $message,
"\n\nThis indicates a bug in the program. \
This Undefined Behavior check is optional, and cannot be relied on for safety.");
#[cfg(not(feature = "ferrocene_certified"))]
::core::panicking::panic_nounwind_fmt(::core::fmt::Arguments::from_str(msg), false);
#[cfg(feature = "ferrocene_certified")]
::core::panicking::panic_nounwind(msg);
}
}
core::num::niche_types::Nanoseconds::new pub const fn new(val: $int) -> Option<Self> {
if (val as $uint) >= ($low as $uint) && (val as $uint) <= ($high as $uint) {
// SAFETY: just checked the inclusive range
Some(unsafe { $name(val) })
} else {
None
}
}
core::num::niche_types::NonZeroCharInner::new pub const fn new(val: $int) -> Option<Self> {
if (val as $uint) >= ($low as $uint) && (val as $uint) <= ($high as $uint) {
// SAFETY: just checked the inclusive range
Some(unsafe { $name(val) })
} else {
None
}
}
core::num::niche_types::NonZeroI128Inner::new pub const fn new(val: $int) -> Option<Self> {
if (val as $uint) >= ($low as $uint) && (val as $uint) <= ($high as $uint) {
// SAFETY: just checked the inclusive range
Some(unsafe { $name(val) })
} else {
None
}
}
core::num::niche_types::NonZeroI16Inner::new pub const fn new(val: $int) -> Option<Self> {
if (val as $uint) >= ($low as $uint) && (val as $uint) <= ($high as $uint) {
// SAFETY: just checked the inclusive range
Some(unsafe { $name(val) })
} else {
None
}
}
core::num::niche_types::NonZeroI32Inner::new pub const fn new(val: $int) -> Option<Self> {
if (val as $uint) >= ($low as $uint) && (val as $uint) <= ($high as $uint) {
// SAFETY: just checked the inclusive range
Some(unsafe { $name(val) })
} else {
None
}
}
core::num::niche_types::NonZeroI64Inner::new pub const fn new(val: $int) -> Option<Self> {
if (val as $uint) >= ($low as $uint) && (val as $uint) <= ($high as $uint) {
// SAFETY: just checked the inclusive range
Some(unsafe { $name(val) })
} else {
None
}
}
core::num::niche_types::NonZeroI8Inner::new pub const fn new(val: $int) -> Option<Self> {
if (val as $uint) >= ($low as $uint) && (val as $uint) <= ($high as $uint) {
// SAFETY: just checked the inclusive range
Some(unsafe { $name(val) })
} else {
None
}
}
core::num::niche_types::NonZeroIsizeInner::new pub const fn new(val: $int) -> Option<Self> {
if (val as $uint) >= ($low as $uint) && (val as $uint) <= ($high as $uint) {
// SAFETY: just checked the inclusive range
Some(unsafe { $name(val) })
} else {
None
}
}
core::num::niche_types::NonZeroU128Inner::new pub const fn new(val: $int) -> Option<Self> {
if (val as $uint) >= ($low as $uint) && (val as $uint) <= ($high as $uint) {
// SAFETY: just checked the inclusive range
Some(unsafe { $name(val) })
} else {
None
}
}
core::num::niche_types::NonZeroU16Inner::new pub const fn new(val: $int) -> Option<Self> {
if (val as $uint) >= ($low as $uint) && (val as $uint) <= ($high as $uint) {
// SAFETY: just checked the inclusive range
Some(unsafe { $name(val) })
} else {
None
}
}
core::num::niche_types::NonZeroU32Inner::new pub const fn new(val: $int) -> Option<Self> {
if (val as $uint) >= ($low as $uint) && (val as $uint) <= ($high as $uint) {
// SAFETY: just checked the inclusive range
Some(unsafe { $name(val) })
} else {
None
}
}
core::num::niche_types::NonZeroU64Inner::new pub const fn new(val: $int) -> Option<Self> {
if (val as $uint) >= ($low as $uint) && (val as $uint) <= ($high as $uint) {
// SAFETY: just checked the inclusive range
Some(unsafe { $name(val) })
} else {
None
}
}
core::num::niche_types::NonZeroU8Inner::new pub const fn new(val: $int) -> Option<Self> {
if (val as $uint) >= ($low as $uint) && (val as $uint) <= ($high as $uint) {
// SAFETY: just checked the inclusive range
Some(unsafe { $name(val) })
} else {
None
}
}
core::num::niche_types::NonZeroUsizeInner::new pub const fn new(val: $int) -> Option<Self> {
if (val as $uint) >= ($low as $uint) && (val as $uint) <= ($high as $uint) {
// SAFETY: just checked the inclusive range
Some(unsafe { $name(val) })
} else {
None
}
}
core::num::niche_types::UsizeNoHighBit::new pub const fn new(val: $int) -> Option<Self> {
if (val as $uint) >= ($low as $uint) && (val as $uint) <= ($high as $uint) {
// SAFETY: just checked the inclusive range
Some(unsafe { $name(val) })
} else {
None
}
}
core::num::nonzero::NonZero::<T>::new_unchecked pub const unsafe fn new_unchecked(n: T) -> Self {
match Self::new(n) {
Some(n) => n,
None => {
// SAFETY: The caller guarantees that `n` is non-zero, so this is unreachable.
unsafe {
ub_checks::assert_unsafe_precondition!(
check_language_ub,
"NonZero::new_unchecked requires the argument to be non-zero",
() => false,
);
intrinsics::unreachable()
}
}
}
}
core::num::nonzero::NonZero::<T>::new_unchecked::precondition_check const fn precondition_check($($name:$ty),*) {
if !$e {
let msg = concat!("unsafe precondition(s) violated: ", $message,
"\n\nThis indicates a bug in the program. \
This Undefined Behavior check is optional, and cannot be relied on for safety.");
#[cfg(not(feature = "ferrocene_certified"))]
::core::panicking::panic_nounwind_fmt(::core::fmt::Arguments::from_str(msg), false);
#[cfg(feature = "ferrocene_certified")]
::core::panicking::panic_nounwind(msg);
}
}
core::ops::index_range::IndexRange::new_unchecked::precondition_check const fn precondition_check($($name:$ty),*) {
if !$e {
let msg = concat!("unsafe precondition(s) violated: ", $message,
"\n\nThis indicates a bug in the program. \
This Undefined Behavior check is optional, and cannot be relied on for safety.");
#[cfg(not(feature = "ferrocene_certified"))]
::core::panicking::panic_nounwind_fmt(::core::fmt::Arguments::from_str(msg), false);
#[cfg(feature = "ferrocene_certified")]
::core::panicking::panic_nounwind(msg);
}
}
core::option::Option::<T>::unwrap_unchecked pub const unsafe fn unwrap_unchecked(self) -> T {
match self {
Some(val) => val,
#[ferrocene::annotation(
"This line cannot be covered as reaching `unreachable_unchecked` is undefined behavior."
)]
// SAFETY: the safety contract must be upheld by the caller.
None => unsafe { hint::unreachable_unchecked() },
}
}
core::panicking::panic_bounds_checkfn panic_bounds_check(index: usize, len: usize) -> ! {
if cfg!(panic = "immediate-abort") {
super::intrinsics::abort()
}
panic!("index out of bounds: the len is {len} but the index is {index}")
}
core::panicking::panic_fmtpub const fn panic_fmt(fmt: PanicFmt<'_>) -> ! {
#[ferrocene::annotation(
"The `immediate-abort` behavior is not certified, we only support `abort`."
)]
if cfg!(panic = "immediate-abort") {
super::intrinsics::abort()
};
// NOTE This function never crosses the FFI boundary; it's a Rust-to-Rust call
// that gets resolved to the `#[panic_handler]` function.
unsafe extern "Rust" {
#[lang = "panic_impl"]
fn panic_impl(pi: &PanicInfo<'_>) -> !;
}
#[cfg(not(feature = "ferrocene_certified"))]
let pi = PanicInfo::new(
&fmt,
Location::caller(),
/* can_unwind */ true,
/* force_no_backtrace */ false,
);
#[cfg(feature = "ferrocene_certified")]
let pi = PanicInfo::new(&fmt, Location::caller());
// SAFETY: `panic_impl` is defined in safe Rust code and thus is safe to call.
unsafe { panic_impl(&pi) }
}
core::ptr::alignment::Alignment::new_unchecked::precondition_check const fn precondition_check($($name:$ty),*) {
if !$e {
let msg = concat!("unsafe precondition(s) violated: ", $message,
"\n\nThis indicates a bug in the program. \
This Undefined Behavior check is optional, and cannot be relied on for safety.");
#[cfg(not(feature = "ferrocene_certified"))]
::core::panicking::panic_nounwind_fmt(::core::fmt::Arguments::from_str(msg), false);
#[cfg(feature = "ferrocene_certified")]
::core::panicking::panic_nounwind(msg);
}
}
core::ptr::const_ptr::<impl *const T>::add::precondition_check const fn precondition_check($($name:$ty),*) {
if !$e {
let msg = concat!("unsafe precondition(s) violated: ", $message,
"\n\nThis indicates a bug in the program. \
This Undefined Behavior check is optional, and cannot be relied on for safety.");
#[cfg(not(feature = "ferrocene_certified"))]
::core::panicking::panic_nounwind_fmt(::core::fmt::Arguments::from_str(msg), false);
#[cfg(feature = "ferrocene_certified")]
::core::panicking::panic_nounwind(msg);
}
}
core::ptr::const_ptr::<impl *const T>::align_offset pub fn align_offset(self, align: usize) -> usize
where
T: Sized,
{
if !align.is_power_of_two() {
panic!("align_offset: align is not a power-of-two");
}
// SAFETY: `align` has been checked to be a power of 2 above
let ret = unsafe { align_offset(self, align) };
// Inform Miri that we want to consider the resulting pointer to be suitably aligned.
#[cfg(miri)]
if ret != usize::MAX {
intrinsics::miri_promise_symbolic_alignment(self.wrapping_add(ret).cast(), align);
}
ret
}
core::ptr::const_ptr::<impl *const T>::guaranteed_eq pub const fn guaranteed_eq(self, other: *const T) -> Option<bool>
where
T: Sized,
{
match intrinsics::ptr_guaranteed_cmp(self, other) {
#[ferrocene::annotation(
"This cannot be reached in runtime code so it cannot be covered."
)]
2 => None,
other => Some(other == 1),
}
}
core::ptr::const_ptr::<impl *const T>::offset::precondition_check const fn precondition_check($($name:$ty),*) {
if !$e {
let msg = concat!("unsafe precondition(s) violated: ", $message,
"\n\nThis indicates a bug in the program. \
This Undefined Behavior check is optional, and cannot be relied on for safety.");
#[cfg(not(feature = "ferrocene_certified"))]
::core::panicking::panic_nounwind_fmt(::core::fmt::Arguments::from_str(msg), false);
#[cfg(feature = "ferrocene_certified")]
::core::panicking::panic_nounwind(msg);
}
}
core::ptr::const_ptr::<impl *const T>::offset_from_unsigned::precondition_check const fn precondition_check($($name:$ty),*) {
if !$e {
let msg = concat!("unsafe precondition(s) violated: ", $message,
"\n\nThis indicates a bug in the program. \
This Undefined Behavior check is optional, and cannot be relied on for safety.");
#[cfg(not(feature = "ferrocene_certified"))]
::core::panicking::panic_nounwind_fmt(::core::fmt::Arguments::from_str(msg), false);
#[cfg(feature = "ferrocene_certified")]
::core::panicking::panic_nounwind(msg);
}
}
core::ptr::copy::precondition_check const fn precondition_check($($name:$ty),*) {
if !$e {
let msg = concat!("unsafe precondition(s) violated: ", $message,
"\n\nThis indicates a bug in the program. \
This Undefined Behavior check is optional, and cannot be relied on for safety.");
#[cfg(not(feature = "ferrocene_certified"))]
::core::panicking::panic_nounwind_fmt(::core::fmt::Arguments::from_str(msg), false);
#[cfg(feature = "ferrocene_certified")]
::core::panicking::panic_nounwind(msg);
}
}
core::ptr::copy_nonoverlapping::precondition_check const fn precondition_check($($name:$ty),*) {
if !$e {
let msg = concat!("unsafe precondition(s) violated: ", $message,
"\n\nThis indicates a bug in the program. \
This Undefined Behavior check is optional, and cannot be relied on for safety.");
#[cfg(not(feature = "ferrocene_certified"))]
::core::panicking::panic_nounwind_fmt(::core::fmt::Arguments::from_str(msg), false);
#[cfg(feature = "ferrocene_certified")]
::core::panicking::panic_nounwind(msg);
}
}
core::ptr::mut_ptr::<impl *mut T>::add::precondition_check const fn precondition_check($($name:$ty),*) {
if !$e {
let msg = concat!("unsafe precondition(s) violated: ", $message,
"\n\nThis indicates a bug in the program. \
This Undefined Behavior check is optional, and cannot be relied on for safety.");
#[cfg(not(feature = "ferrocene_certified"))]
::core::panicking::panic_nounwind_fmt(::core::fmt::Arguments::from_str(msg), false);
#[cfg(feature = "ferrocene_certified")]
::core::panicking::panic_nounwind(msg);
}
}
core::ptr::mut_ptr::<impl *mut T>::offset::precondition_check const fn precondition_check($($name:$ty),*) {
if !$e {
let msg = concat!("unsafe precondition(s) violated: ", $message,
"\n\nThis indicates a bug in the program. \
This Undefined Behavior check is optional, and cannot be relied on for safety.");
#[cfg(not(feature = "ferrocene_certified"))]
::core::panicking::panic_nounwind_fmt(::core::fmt::Arguments::from_str(msg), false);
#[cfg(feature = "ferrocene_certified")]
::core::panicking::panic_nounwind(msg);
}
}
core::ptr::mut_ptr::<impl *mut T>::sub pub const unsafe fn sub(self, count: usize) -> Self
where
T: Sized,
{
#[cfg(debug_assertions)]
#[inline]
#[rustc_allow_const_fn_unstable(const_eval_select)]
const fn runtime_sub_nowrap(this: *const (), count: usize, size: usize) -> bool {
const_eval_select!(
@capture { this: *const (), count: usize, size: usize } -> bool:
if const {
true
} else {
let Some(byte_offset) = count.checked_mul(size) else {
return false;
};
byte_offset <= (isize::MAX as usize) && this.addr() >= byte_offset
}
)
}
#[cfg(debug_assertions)] // Expensive, and doesn't catch much in the wild.
ub_checks::assert_unsafe_precondition!(
check_language_ub,
"ptr::sub requires that the address calculation does not overflow",
(
this: *const () = self as *const (),
count: usize = count,
size: usize = size_of::<T>(),
) => runtime_sub_nowrap(this, count, size)
);
if T::IS_ZST {
// Pointer arithmetic does nothing when the pointee is a ZST.
self
} else {
// SAFETY: the caller must uphold the safety contract for `offset`.
// Because the pointee is *not* a ZST, that means that `count` is
// at most `isize::MAX`, and thus the negation cannot overflow.
unsafe { intrinsics::offset(self, intrinsics::unchecked_sub(0, count as isize)) }
}
}
core::ptr::mut_ptr::<impl *mut T>::sub::precondition_check const fn precondition_check($($name:$ty),*) {
if !$e {
let msg = concat!("unsafe precondition(s) violated: ", $message,
"\n\nThis indicates a bug in the program. \
This Undefined Behavior check is optional, and cannot be relied on for safety.");
#[cfg(not(feature = "ferrocene_certified"))]
::core::panicking::panic_nounwind_fmt(::core::fmt::Arguments::from_str(msg), false);
#[cfg(feature = "ferrocene_certified")]
::core::panicking::panic_nounwind(msg);
}
}
core::ptr::non_null::NonNull::<T>::new_unchecked::precondition_check const fn precondition_check($($name:$ty),*) {
if !$e {
let msg = concat!("unsafe precondition(s) violated: ", $message,
"\n\nThis indicates a bug in the program. \
This Undefined Behavior check is optional, and cannot be relied on for safety.");
#[cfg(not(feature = "ferrocene_certified"))]
::core::panicking::panic_nounwind_fmt(::core::fmt::Arguments::from_str(msg), false);
#[cfg(feature = "ferrocene_certified")]
::core::panicking::panic_nounwind(msg);
}
}
core::ptr::read::precondition_check const fn precondition_check($($name:$ty),*) {
if !$e {
let msg = concat!("unsafe precondition(s) violated: ", $message,
"\n\nThis indicates a bug in the program. \
This Undefined Behavior check is optional, and cannot be relied on for safety.");
#[cfg(not(feature = "ferrocene_certified"))]
::core::panicking::panic_nounwind_fmt(::core::fmt::Arguments::from_str(msg), false);
#[cfg(feature = "ferrocene_certified")]
::core::panicking::panic_nounwind(msg);
}
}
core::ptr::read_volatile::precondition_check const fn precondition_check($($name:$ty),*) {
if !$e {
let msg = concat!("unsafe precondition(s) violated: ", $message,
"\n\nThis indicates a bug in the program. \
This Undefined Behavior check is optional, and cannot be relied on for safety.");
#[cfg(not(feature = "ferrocene_certified"))]
::core::panicking::panic_nounwind_fmt(::core::fmt::Arguments::from_str(msg), false);
#[cfg(feature = "ferrocene_certified")]
::core::panicking::panic_nounwind(msg);
}
}
core::ptr::replace::precondition_check const fn precondition_check($($name:$ty),*) {
if !$e {
let msg = concat!("unsafe precondition(s) violated: ", $message,
"\n\nThis indicates a bug in the program. \
This Undefined Behavior check is optional, and cannot be relied on for safety.");
#[cfg(not(feature = "ferrocene_certified"))]
::core::panicking::panic_nounwind_fmt(::core::fmt::Arguments::from_str(msg), false);
#[cfg(feature = "ferrocene_certified")]
::core::panicking::panic_nounwind(msg);
}
}
core::ptr::swap_nonoverlapping::precondition_check const fn precondition_check($($name:$ty),*) {
if !$e {
let msg = concat!("unsafe precondition(s) violated: ", $message,
"\n\nThis indicates a bug in the program. \
This Undefined Behavior check is optional, and cannot be relied on for safety.");
#[cfg(not(feature = "ferrocene_certified"))]
::core::panicking::panic_nounwind_fmt(::core::fmt::Arguments::from_str(msg), false);
#[cfg(feature = "ferrocene_certified")]
::core::panicking::panic_nounwind(msg);
}
}
core::ptr::write::precondition_check const fn precondition_check($($name:$ty),*) {
if !$e {
let msg = concat!("unsafe precondition(s) violated: ", $message,
"\n\nThis indicates a bug in the program. \
This Undefined Behavior check is optional, and cannot be relied on for safety.");
#[cfg(not(feature = "ferrocene_certified"))]
::core::panicking::panic_nounwind_fmt(::core::fmt::Arguments::from_str(msg), false);
#[cfg(feature = "ferrocene_certified")]
::core::panicking::panic_nounwind(msg);
}
}
core::ptr::write_bytes::precondition_check const fn precondition_check($($name:$ty),*) {
if !$e {
let msg = concat!("unsafe precondition(s) violated: ", $message,
"\n\nThis indicates a bug in the program. \
This Undefined Behavior check is optional, and cannot be relied on for safety.");
#[cfg(not(feature = "ferrocene_certified"))]
::core::panicking::panic_nounwind_fmt(::core::fmt::Arguments::from_str(msg), false);
#[cfg(feature = "ferrocene_certified")]
::core::panicking::panic_nounwind(msg);
}
}
core::ptr::write_volatile::precondition_check const fn precondition_check($($name:$ty),*) {
if !$e {
let msg = concat!("unsafe precondition(s) violated: ", $message,
"\n\nThis indicates a bug in the program. \
This Undefined Behavior check is optional, and cannot be relied on for safety.");
#[cfg(not(feature = "ferrocene_certified"))]
::core::panicking::panic_nounwind_fmt(::core::fmt::Arguments::from_str(msg), false);
#[cfg(feature = "ferrocene_certified")]
::core::panicking::panic_nounwind(msg);
}
}
core::result::Result::<T, E>::unwrap_err_unchecked pub unsafe fn unwrap_err_unchecked(self) -> E {
match self {
#[ferrocene::annotation(
"This line cannot be covered as reaching `unreachable_unchecked` is undefined behavior"
)]
// SAFETY: the safety contract must be upheld by the caller.
Ok(_) => unsafe { hint::unreachable_unchecked() },
Err(e) => e,
}
}
core::result::Result::<T, E>::unwrap_unchecked pub const unsafe fn unwrap_unchecked(self) -> T {
match self {
Ok(t) => t,
#[ferrocene::annotation(
"This line cannot be covered as reaching `unreachable_unchecked` is undefined behavior"
)]
Err(e) => {
// FIXME(const-hack): to avoid E: const Destruct bound
super::mem::forget(e);
// SAFETY: the safety contract must be upheld by the caller.
unsafe { hint::unreachable_unchecked() }
}
}
}
core::slice::<impl [T]>::align_to_mut pub unsafe fn align_to_mut<U>(&mut self) -> (&mut [T], &mut [U], &mut [T]) {
// Note that most of this function will be constant-evaluated,
if U::IS_ZST || T::IS_ZST {
// handle ZSTs specially, which is – don't handle them at all.
return (self, &mut [], &mut []);
}
// First, find at what point do we split between the first and 2nd slice. Easy with
// ptr.align_offset.
let ptr = self.as_ptr();
// SAFETY: Here we are ensuring we will use aligned pointers for U for the
// rest of the method. This is done by passing a pointer to &[T] with an
// alignment targeted for U.
// `crate::ptr::align_offset` is called with a correctly aligned and
// valid pointer `ptr` (it comes from a reference to `self`) and with
// a size that is a power of two (since it comes from the alignment for U),
// satisfying its safety constraints.
let offset = unsafe { crate::ptr::align_offset(ptr, align_of::<U>()) };
if offset > self.len() {
(self, &mut [], &mut [])
} else {
let (left, rest) = self.split_at_mut(offset);
let (us_len, ts_len) = rest.align_to_offsets::<U>();
let rest_len = rest.len();
let mut_ptr = rest.as_mut_ptr();
// Inform Miri that we want to consider the "middle" pointer to be suitably aligned.
#[cfg(miri)]
crate::intrinsics::miri_promise_symbolic_alignment(
mut_ptr.cast() as *const (),
align_of::<U>(),
);
// We can't use `rest` again after this, that would invalidate its alias `mut_ptr`!
// SAFETY: see comments for `align_to`.
unsafe {
(
left,
from_raw_parts_mut(mut_ptr as *mut U, us_len),
from_raw_parts_mut(mut_ptr.add(rest_len - ts_len), ts_len),
)
}
}
}
core::slice::<impl [T]>::align_to_offsets fn align_to_offsets<U>(&self) -> (usize, usize) {
// What we gonna do about `rest` is figure out what multiple of `U`s we can put in a
// lowest number of `T`s. And how many `T`s we need for each such "multiple".
//
// Consider for example T=u8 U=u16. Then we can put 1 U in 2 Ts. Simple. Now, consider
// for example a case where size_of::<T> = 16, size_of::<U> = 24. We can put 2 Us in
// place of every 3 Ts in the `rest` slice. A bit more complicated.
//
// Formula to calculate this is:
//
// Us = lcm(size_of::<T>, size_of::<U>) / size_of::<U>
// Ts = lcm(size_of::<T>, size_of::<U>) / size_of::<T>
//
// Expanded and simplified:
//
// Us = size_of::<T> / gcd(size_of::<T>, size_of::<U>)
// Ts = size_of::<U> / gcd(size_of::<T>, size_of::<U>)
//
// Luckily since all this is constant-evaluated... performance here matters not!
const fn gcd(a: usize, b: usize) -> usize {
if b == 0 { a } else { gcd(b, a % b) }
}
// Explicitly wrap the function call in a const block so it gets
// constant-evaluated even in debug mode.
let gcd: usize = const { gcd(size_of::<T>(), size_of::<U>()) };
let ts: usize = size_of::<U>() / gcd;
let us: usize = size_of::<T>() / gcd;
// Armed with this knowledge, we can find how many `U`s we can fit!
let us_len = self.len() / ts * us;
// And how many `T`s will be in the trailing slice!
let ts_len = self.len() % ts;
(us_len, ts_len)
}
core::slice::<impl [T]>::as_chunks_unchecked::precondition_check const fn precondition_check($($name:$ty),*) {
if !$e {
let msg = concat!("unsafe precondition(s) violated: ", $message,
"\n\nThis indicates a bug in the program. \
This Undefined Behavior check is optional, and cannot be relied on for safety.");
#[cfg(not(feature = "ferrocene_certified"))]
::core::panicking::panic_nounwind_fmt(::core::fmt::Arguments::from_str(msg), false);
#[cfg(feature = "ferrocene_certified")]
::core::panicking::panic_nounwind(msg);
}
}
core::slice::<impl [T]>::split_at pub const fn split_at(&self, mid: usize) -> (&[T], &[T]) {
match self.split_at_checked(mid) {
Some(pair) => pair,
None => panic!("mid > len"),
}
}
core::slice::<impl [T]>::split_at_mut pub const fn split_at_mut(&mut self, mid: usize) -> (&mut [T], &mut [T]) {
match self.split_at_mut_checked(mid) {
Some(pair) => pair,
None => panic!("mid > len"),
}
}
core::slice::<impl [T]>::split_at_mut_unchecked::precondition_check const fn precondition_check($($name:$ty),*) {
if !$e {
let msg = concat!("unsafe precondition(s) violated: ", $message,
"\n\nThis indicates a bug in the program. \
This Undefined Behavior check is optional, and cannot be relied on for safety.");
#[cfg(not(feature = "ferrocene_certified"))]
::core::panicking::panic_nounwind_fmt(::core::fmt::Arguments::from_str(msg), false);
#[cfg(feature = "ferrocene_certified")]
::core::panicking::panic_nounwind(msg);
}
}
core::slice::<impl [T]>::split_at_unchecked::precondition_check const fn precondition_check($($name:$ty),*) {
if !$e {
let msg = concat!("unsafe precondition(s) violated: ", $message,
"\n\nThis indicates a bug in the program. \
This Undefined Behavior check is optional, and cannot be relied on for safety.");
#[cfg(not(feature = "ferrocene_certified"))]
::core::panicking::panic_nounwind_fmt(::core::fmt::Arguments::from_str(msg), false);
#[cfg(feature = "ferrocene_certified")]
::core::panicking::panic_nounwind(msg);
}
}
core::slice::index::into_slice_rangepub(crate) fn into_slice_range(
len: usize,
(start, end): (ops::Bound<usize>, ops::Bound<usize>),
) -> ops::Range<usize> {
let end = match end {
ops::Bound::Included(end) if end >= len => slice_index_fail(0, end, len),
// Cannot overflow because `end < len` implies `end < usize::MAX`.
ops::Bound::Included(end) => end + 1,
ops::Bound::Excluded(end) if end > len => slice_index_fail(0, end, len),
ops::Bound::Excluded(end) => end,
ops::Bound::Unbounded => len,
};
let start = match start {
ops::Bound::Excluded(start) if start >= end => slice_index_fail(start, end, len),
// Cannot overflow because `start < end` implies `start < usize::MAX`.
ops::Bound::Excluded(start) => start + 1,
ops::Bound::Included(start) if start > end => slice_index_fail(start, end, len),
ops::Bound::Included(start) => start,
ops::Bound::Unbounded => 0,
};
start..end
}
core::slice::raw::from_raw_parts::precondition_check const fn precondition_check($($name:$ty),*) {
if !$e {
let msg = concat!("unsafe precondition(s) violated: ", $message,
"\n\nThis indicates a bug in the program. \
This Undefined Behavior check is optional, and cannot be relied on for safety.");
#[cfg(not(feature = "ferrocene_certified"))]
::core::panicking::panic_nounwind_fmt(::core::fmt::Arguments::from_str(msg), false);
#[cfg(feature = "ferrocene_certified")]
::core::panicking::panic_nounwind(msg);
}
}
core::slice::raw::from_raw_parts_mut::precondition_check const fn precondition_check($($name:$ty),*) {
if !$e {
let msg = concat!("unsafe precondition(s) violated: ", $message,
"\n\nThis indicates a bug in the program. \
This Undefined Behavior check is optional, and cannot be relied on for safety.");
#[cfg(not(feature = "ferrocene_certified"))]
::core::panicking::panic_nounwind_fmt(::core::fmt::Arguments::from_str(msg), false);
#[cfg(feature = "ferrocene_certified")]
::core::panicking::panic_nounwind(msg);
}
}
core::slice::rotate::ptr_rotatepub(super) const unsafe fn ptr_rotate<T>(left: usize, mid: *mut T, right: usize) {
if T::IS_ZST {
return;
}
// abort early if the rotate is a no-op
if (left == 0) || (right == 0) {
return;
}
// `T` is not a zero-sized type, so it's okay to divide by its size.
if !cfg!(feature = "optimize_for_size")
// FIXME(const-hack): Use cmp::min when available in const
&& const_min(left, right) <= size_of::<BufType>() / size_of::<T>()
{
// SAFETY: guaranteed by the caller
unsafe { ptr_rotate_memmove(left, mid, right) };
} else if !cfg!(feature = "optimize_for_size")
&& ((left + right < 24) || (size_of::<T>() > size_of::<[usize; 4]>()))
{
// SAFETY: guaranteed by the caller
unsafe { ptr_rotate_gcd(left, mid, right) }
} else {
// SAFETY: guaranteed by the caller
unsafe { ptr_rotate_swap(left, mid, right) }
}
}
core::str::pattern::StrSearcher::<'a, 'b>::new fn new(haystack: &'a str, needle: &'b str) -> StrSearcher<'a, 'b> {
if needle.is_empty() {
StrSearcher {
haystack,
needle,
searcher: StrSearcherImpl::Empty(EmptyNeedle {
position: 0,
end: haystack.len(),
is_match_fw: true,
is_match_bw: true,
is_finished: false,
}),
}
} else {
StrSearcher {
haystack,
needle,
searcher: StrSearcherImpl::TwoWay(TwoWaySearcher::new(
needle.as_bytes(),
haystack.len(),
)),
}
}
}
core::str::pattern::TwoWaySearcher::next fn next<S>(&mut self, haystack: &[u8], needle: &[u8], long_period: bool) -> S::Output
where
S: TwoWayStrategy,
{
// `next()` uses `self.position` as its cursor
let old_pos = self.position;
let needle_last = needle.len() - 1;
'search: loop {
// Check that we have room to search in
// position + needle_last can not overflow if we assume slices
// are bounded by isize's range.
let tail_byte = match haystack.get(self.position + needle_last) {
Some(&b) => b,
None => {
self.position = haystack.len();
return S::rejecting(old_pos, self.position);
}
};
if S::use_early_reject() && old_pos != self.position {
return S::rejecting(old_pos, self.position);
}
// Quickly skip by large portions unrelated to our substring
if !self.byteset_contains(tail_byte) {
self.position += needle.len();
if !long_period {
self.memory = 0;
}
continue 'search;
}
// See if the right part of the needle matches
let start =
if long_period { self.crit_pos } else { cmp::max(self.crit_pos, self.memory) };
for i in start..needle.len() {
if needle[i] != haystack[self.position + i] {
self.position += i - self.crit_pos + 1;
if !long_period {
self.memory = 0;
}
continue 'search;
}
}
// See if the left part of the needle matches
let start = if long_period { 0 } else { self.memory };
for i in (start..self.crit_pos).rev() {
if needle[i] != haystack[self.position + i] {
self.position += self.period;
if !long_period {
self.memory = needle.len() - self.period;
}
continue 'search;
}
}
// We have found a match!
let match_pos = self.position;
// Note: add self.period instead of needle.len() to have overlapping matches
self.position += needle.len();
if !long_period {
self.memory = 0; // set to needle.len() - self.period for overlapping matches
}
return S::matching(match_pos, match_pos + needle.len());
}
}
core::str::pattern::TwoWaySearcher::reverse_maximal_suffix fn reverse_maximal_suffix(arr: &[u8], known_period: usize, order_greater: bool) -> usize {
let mut left = 0; // Corresponds to i in the paper
let mut right = 1; // Corresponds to j in the paper
let mut offset = 0; // Corresponds to k in the paper, but starting at 0
// to match 0-based indexing.
let mut period = 1; // Corresponds to p in the paper
let n = arr.len();
while right + offset < n {
let a = arr[n - (1 + right + offset)];
let b = arr[n - (1 + left + offset)];
if (a < b && !order_greater) || (a > b && order_greater) {
// Suffix is smaller, period is entire prefix so far.
right += offset + 1;
offset = 0;
period = right - left;
} else if a == b {
// Advance through repetition of the current period.
if offset + 1 == period {
right += offset + 1;
offset = 0;
} else {
offset += 1;
}
} else {
// Suffix is larger, start over from current location.
left = right;
right += 1;
offset = 0;
period = 1;
}
if period == known_period {
break;
}
}
debug_assert!(period <= known_period);
left
}
core::str::traits::<impl core::slice::index::SliceIndex<str> for core::ops::range::Range<usize>>::get fn get(self, slice: &str) -> Option<&Self::Output> {
if self.start <= self.end
&& slice.is_char_boundary(self.start)
&& slice.is_char_boundary(self.end)
{
// SAFETY: just checked that `start` and `end` are on a char boundary,
// and we are passing in a safe reference, so the return value will also be one.
// We also checked char boundaries, so this is valid UTF-8.
Some(unsafe { &*self.get_unchecked(slice) })
} else {
None
}
}
core::str::traits::<impl core::slice::index::SliceIndex<str> for core::ops::range::Range<usize>>::get_unchecked::precondition_check const fn precondition_check($($name:$ty),*) {
if !$e {
let msg = concat!("unsafe precondition(s) violated: ", $message,
"\n\nThis indicates a bug in the program. \
This Undefined Behavior check is optional, and cannot be relied on for safety.");
#[cfg(not(feature = "ferrocene_certified"))]
::core::panicking::panic_nounwind_fmt(::core::fmt::Arguments::from_str(msg), false);
#[cfg(feature = "ferrocene_certified")]
::core::panicking::panic_nounwind(msg);
}
}
core::str::traits::<impl core::slice::index::SliceIndex<str> for core::ops::range::Range<usize>>::get_unchecked_mut::precondition_check const fn precondition_check($($name:$ty),*) {
if !$e {
let msg = concat!("unsafe precondition(s) violated: ", $message,
"\n\nThis indicates a bug in the program. \
This Undefined Behavior check is optional, and cannot be relied on for safety.");
#[cfg(not(feature = "ferrocene_certified"))]
::core::panicking::panic_nounwind_fmt(::core::fmt::Arguments::from_str(msg), false);
#[cfg(feature = "ferrocene_certified")]
::core::panicking::panic_nounwind(msg);
}
}
core::str::traits::<impl core::slice::index::SliceIndex<str> for core::ops::range::Range<usize>>::index fn index(self, slice: &str) -> &Self::Output {
let (start, end) = (self.start, self.end);
match self.get(slice) {
Some(s) => s,
None => super::slice_error_fail(slice, start, end),
}
}
core::str::traits::<impl core::slice::index::SliceIndex<str> for core::ops::range::RangeFrom<usize>>::get fn get(self, slice: &str) -> Option<&Self::Output> {
if slice.is_char_boundary(self.start) {
// SAFETY: just checked that `start` is on a char boundary,
// and we are passing in a safe reference, so the return value will also be one.
Some(unsafe { &*self.get_unchecked(slice) })
} else {
None
}
}
core::str::traits::<impl core::slice::index::SliceIndex<str> for core::ops::range::RangeFrom<usize>>::index fn index(self, slice: &str) -> &Self::Output {
let (start, end) = (self.start, slice.len());
match self.get(slice) {
Some(s) => s,
None => super::slice_error_fail(slice, start, end),
}
}
core::str::traits::<impl core::slice::index::SliceIndex<str> for core::ops::range::RangeTo<usize>>::get fn get(self, slice: &str) -> Option<&Self::Output> {
if slice.is_char_boundary(self.end) {
// SAFETY: just checked that `end` is on a char boundary,
// and we are passing in a safe reference, so the return value will also be one.
Some(unsafe { &*self.get_unchecked(slice) })
} else {
None
}
}
core::str::traits::<impl core::slice::index::SliceIndex<str> for core::ops::range::RangeTo<usize>>::index fn index(self, slice: &str) -> &Self::Output {
let end = self.end;
match self.get(slice) {
Some(s) => s,
None => super::slice_error_fail(slice, 0, end),
}
}
core::str::traits::<impl core::slice::index::SliceIndex<str> for core::ops::range::RangeTo<usize>>::index_mut fn index_mut(self, slice: &mut str) -> &mut Self::Output {
if slice.is_char_boundary(self.end) {
// SAFETY: just checked that `end` is on a char boundary,
// and we are passing in a safe reference, so the return value will also be one.
unsafe { &mut *self.get_unchecked_mut(slice) }
} else {
super::slice_error_fail(slice, 0, self.end)
}
}
core::str::validations::run_utf8_validationpub(super) const fn run_utf8_validation(v: &[u8]) -> Result<(), Utf8Error> {
let mut index = 0;
let len = v.len();
const USIZE_BYTES: usize = size_of::<usize>();
let ascii_block_size = 2 * USIZE_BYTES;
let blocks_end = if len >= ascii_block_size { len - ascii_block_size + 1 } else { 0 };
// Below, we safely fall back to a slower codepath if the offset is `usize::MAX`,
// so the end-to-end behavior is the same at compiletime and runtime.
let align = const_eval_select!(
@capture { v: &[u8] } -> usize:
if const {
usize::MAX
} else {
v.as_ptr().align_offset(USIZE_BYTES)
}
);
while index < len {
let old_offset = index;
macro_rules! err {
($error_len: expr) => {
return Err(Utf8Error { valid_up_to: old_offset, error_len: $error_len })
};
}
macro_rules! next {
() => {{
index += 1;
// we needed data, but there was none: error!
if index >= len {
err!(None)
}
v[index]
}};
}
let first = v[index];
if first >= 128 {
let w = utf8_char_width(first);
// 2-byte encoding is for codepoints \u{0080} to \u{07ff}
// first C2 80 last DF BF
// 3-byte encoding is for codepoints \u{0800} to \u{ffff}
// first E0 A0 80 last EF BF BF
// excluding surrogates codepoints \u{d800} to \u{dfff}
// ED A0 80 to ED BF BF
// 4-byte encoding is for codepoints \u{10000} to \u{10ffff}
// first F0 90 80 80 last F4 8F BF BF
//
// Use the UTF-8 syntax from the RFC
//
// https://tools.ietf.org/html/rfc3629
// UTF8-1 = %x00-7F
// UTF8-2 = %xC2-DF UTF8-tail
// UTF8-3 = %xE0 %xA0-BF UTF8-tail / %xE1-EC 2( UTF8-tail ) /
// %xED %x80-9F UTF8-tail / %xEE-EF 2( UTF8-tail )
// UTF8-4 = %xF0 %x90-BF 2( UTF8-tail ) / %xF1-F3 3( UTF8-tail ) /
// %xF4 %x80-8F 2( UTF8-tail )
match w {
2 => {
if next!() as i8 >= -64 {
err!(Some(1))
}
}
3 => {
match (first, next!()) {
(0xE0, 0xA0..=0xBF)
| (0xE1..=0xEC, 0x80..=0xBF)
| (0xED, 0x80..=0x9F)
| (0xEE..=0xEF, 0x80..=0xBF) => {}
_ => err!(Some(1)),
}
if next!() as i8 >= -64 {
err!(Some(2))
}
}
4 => {
match (first, next!()) {
(0xF0, 0x90..=0xBF) | (0xF1..=0xF3, 0x80..=0xBF) | (0xF4, 0x80..=0x8F) => {}
_ => err!(Some(1)),
}
if next!() as i8 >= -64 {
err!(Some(2))
}
if next!() as i8 >= -64 {
err!(Some(3))
}
}
_ => err!(Some(1)),
}
index += 1;
} else {
// Ascii case, try to skip forward quickly.
// When the pointer is aligned, read 2 words of data per iteration
// until we find a word containing a non-ascii byte.
if align != usize::MAX && align.wrapping_sub(index).is_multiple_of(USIZE_BYTES) {
let ptr = v.as_ptr();
while index < blocks_end {
// SAFETY: since `align - index` and `ascii_block_size` are
// multiples of `USIZE_BYTES`, `block = ptr.add(index)` is
// always aligned with a `usize` so it's safe to dereference
// both `block` and `block.add(1)`.
unsafe {
let block = ptr.add(index) as *const usize;
// break if there is a nonascii byte
let zu = contains_nonascii(*block);
let zv = contains_nonascii(*block.add(1));
if zu || zv {
break;
}
}
index += ascii_block_size;
}
// step from the point where the wordwise loop stopped
while index < len && v[index] < 128 {
index += 1;
}
} else {
index += 1;
}
}
}
Ok(())
}
core::sync::atomic::AtomicBool::compare_exchange pub fn compare_exchange(
&self,
current: bool,
new: bool,
success: Ordering,
failure: Ordering,
) -> Result<bool, bool> {
if EMULATE_ATOMIC_BOOL {
// Pick the strongest ordering from success and failure.
let order = match (success, failure) {
(SeqCst, _) => SeqCst,
(_, SeqCst) => SeqCst,
(AcqRel, _) => AcqRel,
(_, AcqRel) => {
panic!("there is no such thing as an acquire-release failure ordering")
}
(Release, Acquire) => AcqRel,
(Acquire, _) => Acquire,
(_, Acquire) => Acquire,
(Release, Relaxed) => Release,
(_, Release) => panic!("there is no such thing as a release failure ordering"),
(Relaxed, Relaxed) => Relaxed,
};
let old = if current == new {
// This is a no-op, but we still need to perform the operation
// for memory ordering reasons.
self.fetch_or(false, order)
} else {
// This sets the value to the new one and returns the old one.
self.swap(new, order)
};
if old == current { Ok(old) } else { Err(old) }
} else {
// SAFETY: data races are prevented by atomic intrinsics.
match unsafe {
atomic_compare_exchange(self.v.get(), current as u8, new as u8, success, failure)
} {
Ok(x) => Ok(x != 0),
Err(x) => Err(x != 0),
}
}
}
core::sync::atomic::AtomicBool::swap pub fn swap(&self, val: bool, order: Ordering) -> bool {
if EMULATE_ATOMIC_BOOL {
if val { self.fetch_or(true, order) } else { self.fetch_and(false, order) }
} else {
// SAFETY: data races are prevented by atomic intrinsics.
unsafe { atomic_swap(self.v.get(), val as u8, order) != 0 }
}
}
core::sync::atomic::fencepub fn fence(order: Ordering) {
// SAFETY: using an atomic fence is safe.
unsafe {
match order {
Acquire => intrinsics::atomic_fence::<{ AO::Acquire }>(),
Release => intrinsics::atomic_fence::<{ AO::Release }>(),
AcqRel => intrinsics::atomic_fence::<{ AO::AcqRel }>(),
SeqCst => intrinsics::atomic_fence::<{ AO::SeqCst }>(),
Relaxed => panic!("there is no such thing as a relaxed fence"),
}
}
}
core::time::Duration::checked_add pub const fn checked_add(self, rhs: Duration) -> Option<Duration> {
if let Some(mut secs) = self.secs.checked_add(rhs.secs) {
let mut nanos = self.nanos.as_inner() + rhs.nanos.as_inner();
if nanos >= NANOS_PER_SEC {
nanos -= NANOS_PER_SEC;
if let Some(new_secs) = secs.checked_add(1) {
secs = new_secs;
} else {
return None;
}
}
debug_assert!(nanos < NANOS_PER_SEC);
Some(Duration::new(secs, nanos))
} else {
None
}
}
core::time::Duration::from_secs_f32 pub fn from_secs_f32(secs: f32) -> Duration {
match Duration::try_from_secs_f32(secs) {
Ok(v) => v,
Err(e) => panic!("{e}"),
}
}
core::time::Duration::try_from_secs_f32 pub fn try_from_secs_f32(secs: f32) -> Result<Duration, TryFromFloatSecsError> {
try_from_secs!(
secs = secs,
mantissa_bits = 23,
exponent_bits = 8,
offset = 41,
bits_ty = u32,
double_ty = u64,
)
}
<&mut I as core::iter::traits::exact_size::ExactSizeIterator>::is_empty fn is_empty(&self) -> bool {
(**self).is_empty()
}
<&mut I as core::iter::traits::iterator::Iterator>::advance_by fn advance_by(&mut self, n: usize) -> Result<(), NonZero<usize>> {
(**self).advance_by(n)
}
<&mut I as core::iter::traits::iterator::IteratorRefSpec>::spec_try_fold default fn spec_try_fold<B, F, R>(&mut self, init: B, mut f: F) -> R
where
F: FnMut(B, Self::Item) -> R,
R: Try<Output = B>,
{
let mut accum = init;
while let Some(x) = self.next() {
accum = f(accum, x)?;
}
try { accum }
}
<I as core::iter::traits::iterator::Iterator::advance_by::SpecAdvanceBy>::spec_advance_by default fn spec_advance_by(&mut self, n: usize) -> Result<(), NonZero<usize>> {
for i in 0..n {
if self.next().is_none() {
// SAFETY: `i` is always less than `n`.
return Err(unsafe { NonZero::new_unchecked(n - i) });
}
}
Ok(())
}
<[T] as core::slice::CloneFromSpec<T>>::spec_clone_from default fn spec_clone_from(&mut self, src: &[T]) {
assert!(self.len() == src.len(), "destination and source slices have different lengths");
// NOTE: We need to explicitly slice them to the same length
// to make it easier for the optimizer to elide bounds checking.
// But since it can't be relied on we also have an explicit specialization for T: Copy.
let len = self.len();
let src = &src[..len];
for i in 0..len {
self[i].clone_from(&src[i]);
}
}
<[T] as core::slice::specialize::SpecFill<T>>::spec_fill default fn spec_fill(&mut self, value: T) {
if let Some((last, elems)) = self.split_last_mut() {
for el in elems {
el.clone_from(&value);
}
*last = value
}
}
<core::any::TypeId as core::cmp::PartialEq>::eq::compiletime const fn compiletime$(<$($binders)*>)?($($arg: $ty),*) $( -> $ret )? {
// Don't warn if one of the arguments is unused.
$(let _ = $arg;)*
$compiletime
}
<core::array::drain::Drain<'_, T> as core::iter::traits::iterator::Iterator>::next fn next(&mut self) -> Option<T> {
let p: *const T = self.0.next()?;
// SAFETY: The iterator was already advanced, so we won't drop this later.
Some(unsafe { p.read() })
}
<core::iter::adapters::rev::Rev<I> as core::iter::traits::double_ended::DoubleEndedIterator>::rfind fn rfind<P>(&mut self, predicate: P) -> Option<Self::Item>
where
P: FnMut(&Self::Item) -> bool,
{
self.iter.find(predicate)
}
<core::iter::adapters::rev::Rev<I> as core::iter::traits::double_ended::DoubleEndedIterator>::rfold fn rfold<Acc, F>(self, init: Acc, f: F) -> Acc
where
F: FnMut(Acc, Self::Item) -> Acc,
{
self.iter.fold(init, f)
}
<core::iter::adapters::rev::Rev<I> as core::iter::traits::iterator::Iterator>::find fn find<P>(&mut self, predicate: P) -> Option<Self::Item>
where
P: FnMut(&Self::Item) -> bool,
{
self.iter.rfind(predicate)
}
<core::iter::adapters::step_by::StepBy<core::ops::range::Range<u16>> as core::iter::adapters::step_by::StepByImpl<core::ops::range::Range<u16>>>::spec_nth fn spec_nth(&mut self, n: usize) -> Option<Self::Item> {
self.advance_by(n).ok()?;
self.next()
}
<core::iter::adapters::step_by::StepBy<core::ops::range::Range<u16>> as core::iter::adapters::step_by::StepByImpl<core::ops::range::Range<u16>>>::spec_try_fold fn spec_try_fold<Acc, F, R>(&mut self, init: Acc, mut f: F) -> R
where
F: FnMut(Acc, Self::Item) -> R,
R: Try<Output = Acc>
{
let mut accum = init;
while let Some(x) = self.next() {
accum = f(accum, x)?;
}
try { accum }
}
<core::iter::adapters::step_by::StepBy<core::ops::range::Range<u32>> as core::iter::adapters::step_by::StepByImpl<core::ops::range::Range<u32>>>::spec_nth fn spec_nth(&mut self, n: usize) -> Option<Self::Item> {
self.advance_by(n).ok()?;
self.next()
}
<core::iter::adapters::step_by::StepBy<core::ops::range::Range<u32>> as core::iter::adapters::step_by::StepByImpl<core::ops::range::Range<u32>>>::spec_try_fold fn spec_try_fold<Acc, F, R>(&mut self, init: Acc, mut f: F) -> R
where
F: FnMut(Acc, Self::Item) -> R,
R: Try<Output = Acc>
{
let mut accum = init;
while let Some(x) = self.next() {
accum = f(accum, x)?;
}
try { accum }
}
<core::iter::adapters::step_by::StepBy<core::ops::range::Range<u64>> as core::iter::adapters::step_by::StepByImpl<core::ops::range::Range<u64>>>::spec_nth fn spec_nth(&mut self, n: usize) -> Option<Self::Item> {
self.advance_by(n).ok()?;
self.next()
}
<core::iter::adapters::step_by::StepBy<core::ops::range::Range<u64>> as core::iter::adapters::step_by::StepByImpl<core::ops::range::Range<u64>>>::spec_try_fold fn spec_try_fold<Acc, F, R>(&mut self, init: Acc, mut f: F) -> R
where
F: FnMut(Acc, Self::Item) -> R,
R: Try<Output = Acc>
{
let mut accum = init;
while let Some(x) = self.next() {
accum = f(accum, x)?;
}
try { accum }
}
<core::iter::adapters::step_by::StepBy<core::ops::range::Range<u8>> as core::iter::adapters::step_by::StepByImpl<core::ops::range::Range<u8>>>::spec_nth fn spec_nth(&mut self, n: usize) -> Option<Self::Item> {
self.advance_by(n).ok()?;
self.next()
}
<core::iter::adapters::step_by::StepBy<core::ops::range::Range<u8>> as core::iter::adapters::step_by::StepByImpl<core::ops::range::Range<u8>>>::spec_try_fold fn spec_try_fold<Acc, F, R>(&mut self, init: Acc, mut f: F) -> R
where
F: FnMut(Acc, Self::Item) -> R,
R: Try<Output = Acc>
{
let mut accum = init;
while let Some(x) = self.next() {
accum = f(accum, x)?;
}
try { accum }
}
<core::iter::adapters::step_by::StepBy<core::ops::range::Range<usize>> as core::iter::adapters::step_by::StepByImpl<core::ops::range::Range<usize>>>::spec_nth fn spec_nth(&mut self, n: usize) -> Option<Self::Item> {
self.advance_by(n).ok()?;
self.next()
}
<core::iter::adapters::step_by::StepBy<core::ops::range::Range<usize>> as core::iter::adapters::step_by::StepByImpl<core::ops::range::Range<usize>>>::spec_try_fold fn spec_try_fold<Acc, F, R>(&mut self, init: Acc, mut f: F) -> R
where
F: FnMut(Acc, Self::Item) -> R,
R: Try<Output = Acc>
{
let mut accum = init;
while let Some(x) = self.next() {
accum = f(accum, x)?;
}
try { accum }
}
<core::iter::adapters::zip::Zip<A, B> as core::iter::adapters::zip::SpecFold>::spec_fold default fn spec_fold<Acc, F>(mut self, init: Acc, mut f: F) -> Acc
where
F: FnMut(Acc, Self::Item) -> Acc,
{
let mut accum = init;
while let Some(x) = ZipImpl::next(&mut self) {
accum = f(accum, x);
}
accum
}
<core::iter::adapters::zip::Zip<A, B> as core::iter::adapters::zip::ZipImpl<A, B>>::fold default fn fold<Acc, F>(self, init: Acc, f: F) -> Acc
where
F: FnMut(Acc, Self::Item) -> Acc,
{
SpecFold::spec_fold(self, init, f)
}
<core::iter::adapters::zip::Zip<A, B> as core::iter::adapters::zip::ZipImpl<A, B>>::nth default fn nth(&mut self, n: usize) -> Option<Self::Item> {
self.super_nth(n)
}
<core::num::nonzero::NonZero<T> as core::clone::Clone>::clone fn clone(&self) -> Self {
*self
}
<core::ops::index_range::IndexRange as core::iter::traits::iterator::Iterator>::advance_by fn advance_by(&mut self, n: usize) -> Result<(), NonZero<usize>> {
let taken = self.take_prefix(n);
NonZero::new(n - taken.len()).map_or(Ok(()), Err)
}
<core::ops::index_range::IndexRange as core::iter::traits::iterator::Iterator>::fold fn fold<B, F: FnMut(B, usize) -> B>(mut self, init: B, f: F) -> B {
self.try_fold(init, NeverShortCircuit::wrap_mut_2(f)).0
}
<core::ops::index_range::IndexRange as core::iter::traits::iterator::Iterator>::size_hint fn size_hint(&self) -> (usize, Option<usize>) {
let len = self.len();
(len, Some(len))
}
<core::ops::index_range::IndexRange as core::slice::index::SliceIndex<[T]>>::get fn get(self, slice: &[T]) -> Option<&[T]> {
if self.end() <= slice.len() {
// SAFETY: `self` is checked to be valid and in bounds above.
unsafe { Some(&*get_offset_len_noubcheck(slice, self.start(), self.len())) }
} else {
None
}
}
<core::ops::index_range::IndexRange as core::slice::index::SliceIndex<[T]>>::get_mut fn get_mut(self, slice: &mut [T]) -> Option<&mut [T]> {
if self.end() <= slice.len() {
// SAFETY: `self` is checked to be valid and in bounds above.
unsafe { Some(&mut *get_offset_len_mut_noubcheck(slice, self.start(), self.len())) }
} else {
None
}
}
<core::ops::index_range::IndexRange as core::slice::index::SliceIndex<[T]>>::index fn index(self, slice: &[T]) -> &[T] {
if self.end() <= slice.len() {
// SAFETY: `self` is checked to be valid and in bounds above.
unsafe { &*get_offset_len_noubcheck(slice, self.start(), self.len()) }
} else {
slice_index_fail(self.start(), self.end(), slice.len())
}
}
<core::ops::index_range::IndexRange as core::slice::index::SliceIndex<[T]>>::index_mut fn index_mut(self, slice: &mut [T]) -> &mut [T] {
if self.end() <= slice.len() {
// SAFETY: `self` is checked to be valid and in bounds above.
unsafe { &mut *get_offset_len_mut_noubcheck(slice, self.start(), self.len()) }
} else {
slice_index_fail(self.start(), self.end(), slice.len())
}
}
<core::ops::range::Range<A> as core::iter::range::RangeIteratorImpl>::spec_advance_back_by default fn spec_advance_back_by(&mut self, n: usize) -> Result<(), NonZero<usize>> {
let steps = Step::steps_between(&self.start, &self.end);
let available = steps.1.unwrap_or(steps.0);
let taken = available.min(n);
self.end =
Step::backward_checked(self.end.clone(), taken).expect("`Step` invariants not upheld");
NonZero::new(n - taken).map_or(Ok(()), Err)
}
<core::ops::range::Range<A> as core::iter::range::RangeIteratorImpl>::spec_advance_by default fn spec_advance_by(&mut self, n: usize) -> Result<(), NonZero<usize>> {
let steps = Step::steps_between(&self.start, &self.end);
let available = steps.1.unwrap_or(steps.0);
let taken = available.min(n);
self.start =
Step::forward_checked(self.start.clone(), taken).expect("`Step` invariants not upheld");
NonZero::new(n - taken).map_or(Ok(()), Err)
}
<core::ops::range::Range<A> as core::iter::range::RangeIteratorImpl>::spec_next default fn spec_next(&mut self) -> Option<A> {
if self.start < self.end {
let n =
Step::forward_checked(self.start.clone(), 1).expect("`Step` invariants not upheld");
Some(mem::replace(&mut self.start, n))
} else {
None
}
}
<core::ops::range::Range<A> as core::iter::range::RangeIteratorImpl>::spec_next_back default fn spec_next_back(&mut self) -> Option<A> {
if self.start < self.end {
self.end =
Step::backward_checked(self.end.clone(), 1).expect("`Step` invariants not upheld");
Some(self.end.clone())
} else {
None
}
}
<core::ops::range::Range<A> as core::iter::range::RangeIteratorImpl>::spec_nth default fn spec_nth(&mut self, n: usize) -> Option<A> {
if let Some(plus_n) = Step::forward_checked(self.start.clone(), n) {
if plus_n < self.end {
self.start =
Step::forward_checked(plus_n.clone(), 1).expect("`Step` invariants not upheld");
return Some(plus_n);
}
}
self.start = self.end.clone();
None
}
<core::ops::range::Range<A> as core::iter::range::RangeIteratorImpl>::spec_nth_back default fn spec_nth_back(&mut self, n: usize) -> Option<A> {
if let Some(minus_n) = Step::backward_checked(self.end.clone(), n) {
if minus_n > self.start {
self.end =
Step::backward_checked(minus_n, 1).expect("`Step` invariants not upheld");
return Some(self.end.clone());
}
}
self.end = self.start.clone();
None
}
<core::str::pattern::RejectAndMatch as core::str::pattern::TwoWayStrategy>::matching fn matching(a: usize, b: usize) -> Self::Output {
SearchStep::Match(a, b)
}
<core::str::pattern::RejectAndMatch as core::str::pattern::TwoWayStrategy>::rejecting fn rejecting(a: usize, b: usize) -> Self::Output {
SearchStep::Reject(a, b)
}
<core::str::pattern::RejectAndMatch as core::str::pattern::TwoWayStrategy>::use_early_reject fn use_early_reject() -> bool {
true
}
<core::str::pattern::StrSearcher<'a, 'b> as core::str::pattern::Searcher<'a>>::next fn next(&mut self) -> SearchStep {
match self.searcher {
StrSearcherImpl::Empty(ref mut searcher) => {
if searcher.is_finished {
return SearchStep::Done;
}
// empty needle rejects every char and matches every empty string between them
let is_match = searcher.is_match_fw;
searcher.is_match_fw = !searcher.is_match_fw;
let pos = searcher.position;
match self.haystack[pos..].chars().next() {
_ if is_match => SearchStep::Match(pos, pos),
None => {
searcher.is_finished = true;
SearchStep::Done
}
Some(ch) => {
searcher.position += ch.len_utf8();
SearchStep::Reject(pos, searcher.position)
}
}
}
StrSearcherImpl::TwoWay(ref mut searcher) => {
// TwoWaySearcher produces valid *Match* indices that split at char boundaries
// as long as it does correct matching and that haystack and needle are
// valid UTF-8
// *Rejects* from the algorithm can fall on any indices, but we will walk them
// manually to the next character boundary, so that they are utf-8 safe.
if searcher.position == self.haystack.len() {
return SearchStep::Done;
}
let is_long = searcher.memory == usize::MAX;
match searcher.next::<RejectAndMatch>(
self.haystack.as_bytes(),
self.needle.as_bytes(),
is_long,
) {
SearchStep::Reject(a, mut b) => {
// skip to next char boundary
while !self.haystack.is_char_boundary(b) {
b += 1;
}
searcher.position = cmp::max(b, searcher.position);
SearchStep::Reject(a, b)
}
otherwise => otherwise,
}
}
}
}
<u128 as core::intrinsics::fallback::FunnelShift>::unchecked_funnel_shl unsafe fn unchecked_funnel_shl(self, rhs: Self, shift: u32) -> Self {
// This implementation is also used by Miri so we have to check the precondition.
// SAFETY: this is guaranteed by the caller
unsafe { super::assume(shift < $type::BITS) };
if shift == 0 {
self
} else {
// SAFETY:
// - `shift < T::BITS`, which satisfies `unchecked_shl`
// - this also ensures that `T::BITS - shift < T::BITS` (shift = 0 is checked
// above), which satisfies `unchecked_shr`
// - because the types are unsigned, the combination are disjoint bits (this is
// not true if they're signed, since SHR will fill in the empty space with a
// sign bit, not zero)
unsafe {
super::disjoint_bitor(
super::unchecked_shl(self, shift),
super::unchecked_shr(rhs, $type::BITS - shift),
)
}
}
}
<u128 as core::intrinsics::fallback::FunnelShift>::unchecked_funnel_shr unsafe fn unchecked_funnel_shr(self, rhs: Self, shift: u32) -> Self {
// This implementation is also used by Miri so we have to check the precondition.
// SAFETY: this is guaranteed by the caller
unsafe { super::assume(shift < $type::BITS) };
if shift == 0 {
rhs
} else {
// SAFETY:
// - `shift < T::BITS`, which satisfies `unchecked_shr`
// - this also ensures that `T::BITS - shift < T::BITS` (shift = 0 is checked
// above), which satisfies `unchecked_shl`
// - because the types are unsigned, the combination are disjoint bits (this is
// not true if they're signed, since SHR will fill in the empty space with a
// sign bit, not zero)
unsafe {
super::disjoint_bitor(
super::unchecked_shl(self, $type::BITS - shift),
super::unchecked_shr(rhs, shift),
)
}
}
}
<u16 as core::intrinsics::fallback::FunnelShift>::unchecked_funnel_shl unsafe fn unchecked_funnel_shl(self, rhs: Self, shift: u32) -> Self {
// This implementation is also used by Miri so we have to check the precondition.
// SAFETY: this is guaranteed by the caller
unsafe { super::assume(shift < $type::BITS) };
if shift == 0 {
self
} else {
// SAFETY:
// - `shift < T::BITS`, which satisfies `unchecked_shl`
// - this also ensures that `T::BITS - shift < T::BITS` (shift = 0 is checked
// above), which satisfies `unchecked_shr`
// - because the types are unsigned, the combination are disjoint bits (this is
// not true if they're signed, since SHR will fill in the empty space with a
// sign bit, not zero)
unsafe {
super::disjoint_bitor(
super::unchecked_shl(self, shift),
super::unchecked_shr(rhs, $type::BITS - shift),
)
}
}
}
<u16 as core::intrinsics::fallback::FunnelShift>::unchecked_funnel_shr unsafe fn unchecked_funnel_shr(self, rhs: Self, shift: u32) -> Self {
// This implementation is also used by Miri so we have to check the precondition.
// SAFETY: this is guaranteed by the caller
unsafe { super::assume(shift < $type::BITS) };
if shift == 0 {
rhs
} else {
// SAFETY:
// - `shift < T::BITS`, which satisfies `unchecked_shr`
// - this also ensures that `T::BITS - shift < T::BITS` (shift = 0 is checked
// above), which satisfies `unchecked_shl`
// - because the types are unsigned, the combination are disjoint bits (this is
// not true if they're signed, since SHR will fill in the empty space with a
// sign bit, not zero)
unsafe {
super::disjoint_bitor(
super::unchecked_shl(self, $type::BITS - shift),
super::unchecked_shr(rhs, shift),
)
}
}
}
<u32 as core::intrinsics::fallback::FunnelShift>::unchecked_funnel_shl unsafe fn unchecked_funnel_shl(self, rhs: Self, shift: u32) -> Self {
// This implementation is also used by Miri so we have to check the precondition.
// SAFETY: this is guaranteed by the caller
unsafe { super::assume(shift < $type::BITS) };
if shift == 0 {
self
} else {
// SAFETY:
// - `shift < T::BITS`, which satisfies `unchecked_shl`
// - this also ensures that `T::BITS - shift < T::BITS` (shift = 0 is checked
// above), which satisfies `unchecked_shr`
// - because the types are unsigned, the combination are disjoint bits (this is
// not true if they're signed, since SHR will fill in the empty space with a
// sign bit, not zero)
unsafe {
super::disjoint_bitor(
super::unchecked_shl(self, shift),
super::unchecked_shr(rhs, $type::BITS - shift),
)
}
}
}
<u32 as core::intrinsics::fallback::FunnelShift>::unchecked_funnel_shr unsafe fn unchecked_funnel_shr(self, rhs: Self, shift: u32) -> Self {
// This implementation is also used by Miri so we have to check the precondition.
// SAFETY: this is guaranteed by the caller
unsafe { super::assume(shift < $type::BITS) };
if shift == 0 {
rhs
} else {
// SAFETY:
// - `shift < T::BITS`, which satisfies `unchecked_shr`
// - this also ensures that `T::BITS - shift < T::BITS` (shift = 0 is checked
// above), which satisfies `unchecked_shl`
// - because the types are unsigned, the combination are disjoint bits (this is
// not true if they're signed, since SHR will fill in the empty space with a
// sign bit, not zero)
unsafe {
super::disjoint_bitor(
super::unchecked_shl(self, $type::BITS - shift),
super::unchecked_shr(rhs, shift),
)
}
}
}
<u64 as core::intrinsics::fallback::FunnelShift>::unchecked_funnel_shl unsafe fn unchecked_funnel_shl(self, rhs: Self, shift: u32) -> Self {
// This implementation is also used by Miri so we have to check the precondition.
// SAFETY: this is guaranteed by the caller
unsafe { super::assume(shift < $type::BITS) };
if shift == 0 {
self
} else {
// SAFETY:
// - `shift < T::BITS`, which satisfies `unchecked_shl`
// - this also ensures that `T::BITS - shift < T::BITS` (shift = 0 is checked
// above), which satisfies `unchecked_shr`
// - because the types are unsigned, the combination are disjoint bits (this is
// not true if they're signed, since SHR will fill in the empty space with a
// sign bit, not zero)
unsafe {
super::disjoint_bitor(
super::unchecked_shl(self, shift),
super::unchecked_shr(rhs, $type::BITS - shift),
)
}
}
}
<u64 as core::intrinsics::fallback::FunnelShift>::unchecked_funnel_shr unsafe fn unchecked_funnel_shr(self, rhs: Self, shift: u32) -> Self {
// This implementation is also used by Miri so we have to check the precondition.
// SAFETY: this is guaranteed by the caller
unsafe { super::assume(shift < $type::BITS) };
if shift == 0 {
rhs
} else {
// SAFETY:
// - `shift < T::BITS`, which satisfies `unchecked_shr`
// - this also ensures that `T::BITS - shift < T::BITS` (shift = 0 is checked
// above), which satisfies `unchecked_shl`
// - because the types are unsigned, the combination are disjoint bits (this is
// not true if they're signed, since SHR will fill in the empty space with a
// sign bit, not zero)
unsafe {
super::disjoint_bitor(
super::unchecked_shl(self, $type::BITS - shift),
super::unchecked_shr(rhs, shift),
)
}
}
}
<u8 as core::intrinsics::fallback::FunnelShift>::unchecked_funnel_shl unsafe fn unchecked_funnel_shl(self, rhs: Self, shift: u32) -> Self {
// This implementation is also used by Miri so we have to check the precondition.
// SAFETY: this is guaranteed by the caller
unsafe { super::assume(shift < $type::BITS) };
if shift == 0 {
self
} else {
// SAFETY:
// - `shift < T::BITS`, which satisfies `unchecked_shl`
// - this also ensures that `T::BITS - shift < T::BITS` (shift = 0 is checked
// above), which satisfies `unchecked_shr`
// - because the types are unsigned, the combination are disjoint bits (this is
// not true if they're signed, since SHR will fill in the empty space with a
// sign bit, not zero)
unsafe {
super::disjoint_bitor(
super::unchecked_shl(self, shift),
super::unchecked_shr(rhs, $type::BITS - shift),
)
}
}
}
<u8 as core::intrinsics::fallback::FunnelShift>::unchecked_funnel_shr unsafe fn unchecked_funnel_shr(self, rhs: Self, shift: u32) -> Self {
// This implementation is also used by Miri so we have to check the precondition.
// SAFETY: this is guaranteed by the caller
unsafe { super::assume(shift < $type::BITS) };
if shift == 0 {
rhs
} else {
// SAFETY:
// - `shift < T::BITS`, which satisfies `unchecked_shr`
// - this also ensures that `T::BITS - shift < T::BITS` (shift = 0 is checked
// above), which satisfies `unchecked_shl`
// - because the types are unsigned, the combination are disjoint bits (this is
// not true if they're signed, since SHR will fill in the empty space with a
// sign bit, not zero)
unsafe {
super::disjoint_bitor(
super::unchecked_shl(self, $type::BITS - shift),
super::unchecked_shr(rhs, shift),
)
}
}
}
<usize as core::intrinsics::fallback::FunnelShift>::unchecked_funnel_shl unsafe fn unchecked_funnel_shl(self, rhs: Self, shift: u32) -> Self {
// This implementation is also used by Miri so we have to check the precondition.
// SAFETY: this is guaranteed by the caller
unsafe { super::assume(shift < $type::BITS) };
if shift == 0 {
self
} else {
// SAFETY:
// - `shift < T::BITS`, which satisfies `unchecked_shl`
// - this also ensures that `T::BITS - shift < T::BITS` (shift = 0 is checked
// above), which satisfies `unchecked_shr`
// - because the types are unsigned, the combination are disjoint bits (this is
// not true if they're signed, since SHR will fill in the empty space with a
// sign bit, not zero)
unsafe {
super::disjoint_bitor(
super::unchecked_shl(self, shift),
super::unchecked_shr(rhs, $type::BITS - shift),
)
}
}
}
<usize as core::intrinsics::fallback::FunnelShift>::unchecked_funnel_shr unsafe fn unchecked_funnel_shr(self, rhs: Self, shift: u32) -> Self {
// This implementation is also used by Miri so we have to check the precondition.
// SAFETY: this is guaranteed by the caller
unsafe { super::assume(shift < $type::BITS) };
if shift == 0 {
rhs
} else {
// SAFETY:
// - `shift < T::BITS`, which satisfies `unchecked_shr`
// - this also ensures that `T::BITS - shift < T::BITS` (shift = 0 is checked
// above), which satisfies `unchecked_shl`
// - because the types are unsigned, the combination are disjoint bits (this is
// not true if they're signed, since SHR will fill in the empty space with a
// sign bit, not zero)
unsafe {
super::disjoint_bitor(
super::unchecked_shl(self, $type::BITS - shift),
super::unchecked_shr(rhs, shift),
)
}
}
}
core::cell::panic_already_borrowed::do_panic::compiletime const fn compiletime$(<$($binders)*>)?($($arg: $ty),*) $( -> $ret )? {
// Don't warn if one of the arguments is unused.
$(let _ = $arg;)*
$compiletime
}
core::cell::panic_already_mutably_borrowedconst fn panic_already_mutably_borrowed(err: BorrowError) -> ! {
const_panic!(
"RefCell already mutably borrowed",
"{err}",
err: BorrowError = err,
)
}
core::cell::panic_already_mutably_borrowed::do_panic::compiletime const fn compiletime$(<$($binders)*>)?($($arg: $ty),*) $( -> $ret )? {
// Don't warn if one of the arguments is unused.
$(let _ = $arg;)*
$compiletime
}
core::clone::impls::<impl core::clone::Clone for !>::clone fn clone(&self) -> Self {
*self
}
core::clone::impls::<impl core::clone::Clone for *const T>::clone fn clone(&self) -> Self {
*self
}
core::clone::impls::<impl core::clone::Clone for *mut T>::clone fn clone(&self) -> Self {
*self
}
core::intrinsics::assumepub const unsafe fn assume(b: bool) {
if !b {
// SAFETY: the caller must guarantee the argument is never `false`
unsafe { unreachable() }
}
}
core::intrinsics::cold_pathpub const fn cold_path() {}
core::intrinsics::const_make_globalpub const unsafe fn const_make_global(ptr: *mut u8) -> *const u8 {
// const eval overrides this function; at runtime, it is a NOP.
ptr
}
core::intrinsics::disjoint_bitorpub const unsafe fn disjoint_bitor<T: [const] fallback::DisjointBitOr>(a: T, b: T) -> T {
// SAFETY: same preconditions as this function.
unsafe { fallback::DisjointBitOr::disjoint_bitor(a, b) }
}
core::intrinsics::overflow_checkspub const fn overflow_checks() -> bool {
cfg!(debug_assertions)
}
core::intrinsics::type_id_eqpub const fn type_id_eq(a: crate::any::TypeId, b: crate::any::TypeId) -> bool {
a.data == b.data
}
core::intrinsics::ub_checkspub const fn ub_checks() -> bool {
cfg!(ub_checks)
}
core::intrinsics::unchecked_funnel_shlpub const unsafe fn unchecked_funnel_shl<T: [const] fallback::FunnelShift>(
a: T,
b: T,
shift: u32,
) -> T {
// SAFETY: caller ensures that `shift` is in-range
unsafe { a.unchecked_funnel_shl(b, shift) }
}
core::intrinsics::unchecked_funnel_shrpub const unsafe fn unchecked_funnel_shr<T: [const] fallback::FunnelShift>(
a: T,
b: T,
shift: u32,
) -> T {
// SAFETY: caller ensures that `shift` is in-range
unsafe { a.unchecked_funnel_shr(b, shift) }
}
core::mem::discriminantpub const fn discriminant<T>(v: &T) -> Discriminant<T> {
Discriminant(intrinsics::discriminant_value(v))
}
core::num::<impl u128>::wrapping_add_signed pub const fn wrapping_add_signed(self, rhs: $SignedT) -> Self {
self.wrapping_add(rhs as Self)
}
core::num::<impl u16>::wrapping_add_signed pub const fn wrapping_add_signed(self, rhs: $SignedT) -> Self {
self.wrapping_add(rhs as Self)
}
core::num::<impl u32>::wrapping_add_signed pub const fn wrapping_add_signed(self, rhs: $SignedT) -> Self {
self.wrapping_add(rhs as Self)
}
core::num::<impl u64>::wrapping_add_signed pub const fn wrapping_add_signed(self, rhs: $SignedT) -> Self {
self.wrapping_add(rhs as Self)
}
core::num::<impl u8>::wrapping_add_signed pub const fn wrapping_add_signed(self, rhs: $SignedT) -> Self {
self.wrapping_add(rhs as Self)
}
core::num::<impl usize>::wrapping_add_signed pub const fn wrapping_add_signed(self, rhs: $SignedT) -> Self {
self.wrapping_add(rhs as Self)
}
core::num::from_ascii_radix_panic::do_panic::compiletime const fn compiletime$(<$($binders)*>)?($($arg: $ty),*) $( -> $ret )? {
// Don't warn if one of the arguments is unused.
$(let _ = $arg;)*
$compiletime
}
core::panicking::panic_cannot_unwindfn panic_cannot_unwind() -> ! {
// Keep the text in sync with `UnwindTerminateReason::as_str` in `rustc_middle`.
panic_nounwind("panic in a function that cannot unwind")
}
core::panicking::panic_const::panic_const_add_overflow pub const fn $lang() -> ! {
// See the comment in `panic(&'static str)` for why we use `Arguments::from_str` here.
#[cfg(not(feature = "ferrocene_certified"))]
panic_fmt(fmt::Arguments::from_str($message));
#[cfg(feature = "ferrocene_certified")]
panic_fmt(&$message);
}
core::panicking::panic_const::panic_const_async_fn_resumed pub const fn $lang() -> ! {
// See the comment in `panic(&'static str)` for why we use `Arguments::from_str` here.
#[cfg(not(feature = "ferrocene_certified"))]
panic_fmt(fmt::Arguments::from_str($message));
#[cfg(feature = "ferrocene_certified")]
panic_fmt(&$message);
}
core::panicking::panic_const::panic_const_async_fn_resumed_drop pub const fn $lang() -> ! {
// See the comment in `panic(&'static str)` for why we use `Arguments::from_str` here.
#[cfg(not(feature = "ferrocene_certified"))]
panic_fmt(fmt::Arguments::from_str($message));
#[cfg(feature = "ferrocene_certified")]
panic_fmt(&$message);
}
core::panicking::panic_const::panic_const_async_fn_resumed_panic pub const fn $lang() -> ! {
// See the comment in `panic(&'static str)` for why we use `Arguments::from_str` here.
#[cfg(not(feature = "ferrocene_certified"))]
panic_fmt(fmt::Arguments::from_str($message));
#[cfg(feature = "ferrocene_certified")]
panic_fmt(&$message);
}
core::panicking::panic_const::panic_const_async_gen_fn_resumed pub const fn $lang() -> ! {
// See the comment in `panic(&'static str)` for why we use `Arguments::from_str` here.
#[cfg(not(feature = "ferrocene_certified"))]
panic_fmt(fmt::Arguments::from_str($message));
#[cfg(feature = "ferrocene_certified")]
panic_fmt(&$message);
}
core::panicking::panic_const::panic_const_async_gen_fn_resumed_drop pub const fn $lang() -> ! {
// See the comment in `panic(&'static str)` for why we use `Arguments::from_str` here.
#[cfg(not(feature = "ferrocene_certified"))]
panic_fmt(fmt::Arguments::from_str($message));
#[cfg(feature = "ferrocene_certified")]
panic_fmt(&$message);
}
core::panicking::panic_const::panic_const_async_gen_fn_resumed_panic pub const fn $lang() -> ! {
// See the comment in `panic(&'static str)` for why we use `Arguments::from_str` here.
#[cfg(not(feature = "ferrocene_certified"))]
panic_fmt(fmt::Arguments::from_str($message));
#[cfg(feature = "ferrocene_certified")]
panic_fmt(&$message);
}
core::panicking::panic_const::panic_const_coroutine_resumed pub const fn $lang() -> ! {
// See the comment in `panic(&'static str)` for why we use `Arguments::from_str` here.
#[cfg(not(feature = "ferrocene_certified"))]
panic_fmt(fmt::Arguments::from_str($message));
#[cfg(feature = "ferrocene_certified")]
panic_fmt(&$message);
}
core::panicking::panic_const::panic_const_coroutine_resumed_drop pub const fn $lang() -> ! {
// See the comment in `panic(&'static str)` for why we use `Arguments::from_str` here.
#[cfg(not(feature = "ferrocene_certified"))]
panic_fmt(fmt::Arguments::from_str($message));
#[cfg(feature = "ferrocene_certified")]
panic_fmt(&$message);
}
core::panicking::panic_const::panic_const_coroutine_resumed_panic pub const fn $lang() -> ! {
// See the comment in `panic(&'static str)` for why we use `Arguments::from_str` here.
#[cfg(not(feature = "ferrocene_certified"))]
panic_fmt(fmt::Arguments::from_str($message));
#[cfg(feature = "ferrocene_certified")]
panic_fmt(&$message);
}
core::panicking::panic_const::panic_const_div_by_zero pub const fn $lang() -> ! {
// See the comment in `panic(&'static str)` for why we use `Arguments::from_str` here.
#[cfg(not(feature = "ferrocene_certified"))]
panic_fmt(fmt::Arguments::from_str($message));
#[cfg(feature = "ferrocene_certified")]
panic_fmt(&$message);
}
core::panicking::panic_const::panic_const_div_overflow pub const fn $lang() -> ! {
// See the comment in `panic(&'static str)` for why we use `Arguments::from_str` here.
#[cfg(not(feature = "ferrocene_certified"))]
panic_fmt(fmt::Arguments::from_str($message));
#[cfg(feature = "ferrocene_certified")]
panic_fmt(&$message);
}
core::panicking::panic_const::panic_const_gen_fn_none pub const fn $lang() -> ! {
// See the comment in `panic(&'static str)` for why we use `Arguments::from_str` here.
#[cfg(not(feature = "ferrocene_certified"))]
panic_fmt(fmt::Arguments::from_str($message));
#[cfg(feature = "ferrocene_certified")]
panic_fmt(&$message);
}
core::panicking::panic_const::panic_const_gen_fn_none_drop pub const fn $lang() -> ! {
// See the comment in `panic(&'static str)` for why we use `Arguments::from_str` here.
#[cfg(not(feature = "ferrocene_certified"))]
panic_fmt(fmt::Arguments::from_str($message));
#[cfg(feature = "ferrocene_certified")]
panic_fmt(&$message);
}
core::panicking::panic_const::panic_const_gen_fn_none_panic pub const fn $lang() -> ! {
// See the comment in `panic(&'static str)` for why we use `Arguments::from_str` here.
#[cfg(not(feature = "ferrocene_certified"))]
panic_fmt(fmt::Arguments::from_str($message));
#[cfg(feature = "ferrocene_certified")]
panic_fmt(&$message);
}
core::panicking::panic_const::panic_const_mul_overflow pub const fn $lang() -> ! {
// See the comment in `panic(&'static str)` for why we use `Arguments::from_str` here.
#[cfg(not(feature = "ferrocene_certified"))]
panic_fmt(fmt::Arguments::from_str($message));
#[cfg(feature = "ferrocene_certified")]
panic_fmt(&$message);
}
core::panicking::panic_const::panic_const_neg_overflow pub const fn $lang() -> ! {
// See the comment in `panic(&'static str)` for why we use `Arguments::from_str` here.
#[cfg(not(feature = "ferrocene_certified"))]
panic_fmt(fmt::Arguments::from_str($message));
#[cfg(feature = "ferrocene_certified")]
panic_fmt(&$message);
}
core::panicking::panic_const::panic_const_rem_by_zero pub const fn $lang() -> ! {
// See the comment in `panic(&'static str)` for why we use `Arguments::from_str` here.
#[cfg(not(feature = "ferrocene_certified"))]
panic_fmt(fmt::Arguments::from_str($message));
#[cfg(feature = "ferrocene_certified")]
panic_fmt(&$message);
}
core::panicking::panic_const::panic_const_rem_overflow pub const fn $lang() -> ! {
// See the comment in `panic(&'static str)` for why we use `Arguments::from_str` here.
#[cfg(not(feature = "ferrocene_certified"))]
panic_fmt(fmt::Arguments::from_str($message));
#[cfg(feature = "ferrocene_certified")]
panic_fmt(&$message);
}
core::panicking::panic_const::panic_const_shl_overflow pub const fn $lang() -> ! {
// See the comment in `panic(&'static str)` for why we use `Arguments::from_str` here.
#[cfg(not(feature = "ferrocene_certified"))]
panic_fmt(fmt::Arguments::from_str($message));
#[cfg(feature = "ferrocene_certified")]
panic_fmt(&$message);
}
core::panicking::panic_const::panic_const_shr_overflow pub const fn $lang() -> ! {
// See the comment in `panic(&'static str)` for why we use `Arguments::from_str` here.
#[cfg(not(feature = "ferrocene_certified"))]
panic_fmt(fmt::Arguments::from_str($message));
#[cfg(feature = "ferrocene_certified")]
panic_fmt(&$message);
}
core::panicking::panic_const::panic_const_sub_overflow pub const fn $lang() -> ! {
// See the comment in `panic(&'static str)` for why we use `Arguments::from_str` here.
#[cfg(not(feature = "ferrocene_certified"))]
panic_fmt(fmt::Arguments::from_str($message));
#[cfg(feature = "ferrocene_certified")]
panic_fmt(&$message);
}
core::panicking::panic_nounwindpub const fn panic_nounwind(expr: &'static str) -> ! {
#[cfg(not(feature = "ferrocene_certified"))]
panic_nounwind_fmt(fmt::Arguments::from_str(expr), /* force_no_backtrace */ false);
#[cfg(feature = "ferrocene_certified")]
panic_nounwind_fmt(&expr, /* force_no_backtrace */ false);
}
core::panicking::panic_nounwind_fmtpub const fn panic_nounwind_fmt(fmt: PanicFmt<'_>, _force_no_backtrace: bool) -> ! {
const_eval_select!(
@capture { fmt: PanicFmt<'_>, _force_no_backtrace: bool } -> !:
if const #[track_caller] {
// We don't unwind anyway at compile-time so we can call the regular `panic_fmt`.
panic_fmt(fmt)
} else #[track_caller] {
if cfg!(panic = "immediate-abort") {
super::intrinsics::abort()
}
// NOTE This function never crosses the FFI boundary; it's a Rust-to-Rust call
// that gets resolved to the `#[panic_handler]` function.
unsafe extern "Rust" {
#[lang = "panic_impl"]
fn panic_impl(pi: &PanicInfo<'_>) -> !;
}
// PanicInfo with the `can_unwind` flag set to false forces an abort.
#[cfg(not(feature = "ferrocene_certified"))]
let pi = PanicInfo::new(
&fmt,
Location::caller(),
/* can_unwind */ false,
_force_no_backtrace,
);
#[cfg(feature = "ferrocene_certified")]
let pi = PanicInfo::new(&fmt, Location::caller());
// SAFETY: `panic_impl` is defined in safe Rust code and thus is safe to call.
unsafe { panic_impl(&pi) }
}
)
}
core::panicking::panic_nounwind_fmt::compiletime const fn compiletime$(<$($binders)*>)?($($arg: $ty),*) $( -> $ret )? {
// Don't warn if one of the arguments is unused.
$(let _ = $arg;)*
$compiletime
}
core::profiling::compiler_copypub fn compiler_copy<T, const SIZE: usize>(_src: *const T, _dst: *mut T) {
unreachable!(
"compiler_copy marks where the compiler-generated a memcpy for Copies. It is never actually called."
)
}
core::profiling::compiler_movepub fn compiler_move<T, const SIZE: usize>(_src: *const T, _dst: *mut T) {
unreachable!(
"compiler_move marks where the compiler-generated a memcpy for moves. It is never actually called."
)
}
core::ptr::const_ptr::<impl *const T>::add::runtime_add_nowrap::compiletime const fn compiletime$(<$($binders)*>)?($($arg: $ty),*) $( -> $ret )? {
// Don't warn if one of the arguments is unused.
$(let _ = $arg;)*
$compiletime
}
core::ptr::const_ptr::<impl *const T>::is_null::compiletime const fn compiletime$(<$($binders)*>)?($($arg: $ty),*) $( -> $ret )? {
// Don't warn if one of the arguments is unused.
$(let _ = $arg;)*
$compiletime
}
core::ptr::const_ptr::<impl *const T>::offset::runtime_offset_nowrap::compiletime const fn compiletime$(<$($binders)*>)?($($arg: $ty),*) $( -> $ret )? {
// Don't warn if one of the arguments is unused.
$(let _ = $arg;)*
$compiletime
}
core::ptr::const_ptr::<impl *const T>::offset_from_unsigned::runtime_ptr_ge::compiletime const fn compiletime$(<$($binders)*>)?($($arg: $ty),*) $( -> $ret )? {
// Don't warn if one of the arguments is unused.
$(let _ = $arg;)*
$compiletime
}
core::ptr::const_ptr::<impl core::cmp::Ord for *const T>::cmp fn cmp(&self, other: &*const T) -> Ordering {
if self < other {
Less
} else if self == other {
Equal
} else {
Greater
}
}
core::ptr::const_ptr::<impl core::cmp::PartialOrd for *const T>::ge fn ge(&self, other: &*const T) -> bool {
*self >= *other
}
core::ptr::const_ptr::<impl core::cmp::PartialOrd for *const T>::gt fn gt(&self, other: &*const T) -> bool {
*self > *other
}
core::ptr::const_ptr::<impl core::cmp::PartialOrd for *const T>::le fn le(&self, other: &*const T) -> bool {
*self <= *other
}
core::ptr::const_ptr::<impl core::cmp::PartialOrd for *const T>::lt fn lt(&self, other: &*const T) -> bool {
*self < *other
}
core::ptr::const_ptr::<impl core::cmp::PartialOrd for *const T>::partial_cmp fn partial_cmp(&self, other: &*const T) -> Option<Ordering> {
Some(self.cmp(other))
}
core::ptr::mut_ptr::<impl *mut T>::add::runtime_add_nowrap::compiletime const fn compiletime$(<$($binders)*>)?($($arg: $ty),*) $( -> $ret )? {
// Don't warn if one of the arguments is unused.
$(let _ = $arg;)*
$compiletime
}
core::ptr::mut_ptr::<impl *mut T>::offset::runtime_offset_nowrap::compiletime const fn compiletime$(<$($binders)*>)?($($arg: $ty),*) $( -> $ret )? {
// Don't warn if one of the arguments is unused.
$(let _ = $arg;)*
$compiletime
}
core::ptr::mut_ptr::<impl *mut T>::sub::runtime_sub_nowrap::compiletime const fn compiletime$(<$($binders)*>)?($($arg: $ty),*) $( -> $ret )? {
// Don't warn if one of the arguments is unused.
$(let _ = $arg;)*
$compiletime
}
core::ptr::swap_nonoverlapping::compiletime const fn compiletime$(<$($binders)*>)?($($arg: $ty),*) $( -> $ret )? {
// Don't warn if one of the arguments is unused.
$(let _ = $arg;)*
$compiletime
}
core::ptr::swap_nonoverlapping_constconst unsafe fn swap_nonoverlapping_const<T>(x: *mut T, y: *mut T, count: usize) {
let mut i = 0;
while i < count {
// SAFETY: By precondition, `i` is in-bounds because it's below `n`
let x = unsafe { x.add(i) };
// SAFETY: By precondition, `i` is in-bounds because it's below `n`
// and it's distinct from `x` since the ranges are non-overlapping
let y = unsafe { y.add(i) };
// SAFETY: we're only ever given pointers that are valid to read/write,
// including being aligned, and nothing here panics so it's drop-safe.
unsafe {
// Note that it's critical that these use `copy_nonoverlapping`,
// rather than `read`/`write`, to avoid #134713 if T has padding.
let mut temp = MaybeUninit::<T>::uninit();
copy_nonoverlapping(x, temp.as_mut_ptr(), 1);
copy_nonoverlapping(y, x, 1);
copy_nonoverlapping(temp.as_ptr(), y, 1);
}
i += 1;
}
}
core::slice::<impl [T]>::align_to_offsets::gcd const fn gcd(a: usize, b: usize) -> usize {
if b == 0 { a } else { gcd(b, a % b) }
}
core::slice::<impl [T]>::clone_from_slice pub fn clone_from_slice(&mut self, src: &[T])
where
T: Clone,
{
self.spec_clone_from(src);
}
core::slice::<impl [T]>::len pub const fn len(&self) -> usize {
ptr::metadata(self)
}
core::slice::copy_from_slice_impl::len_mismatch_fail::do_panic::compiletime const fn compiletime$(<$($binders)*>)?($($arg: $ty),*) $( -> $ret )? {
// Don't warn if one of the arguments is unused.
$(let _ = $arg;)*
$compiletime
}
core::slice::index::slice_index_fail::do_panic::compiletime const fn compiletime$(<$($binders)*>)?($($arg: $ty),*) $( -> $ret )? {
// Don't warn if one of the arguments is unused.
$(let _ = $arg;)*
$compiletime
}
core::slice::index::slice_index_fail::do_panic::compiletime const fn compiletime$(<$($binders)*>)?($($arg: $ty),*) $( -> $ret )? {
// Don't warn if one of the arguments is unused.
$(let _ = $arg;)*
$compiletime
}
core::slice::index::slice_index_fail::do_panic::compiletime const fn compiletime$(<$($binders)*>)?($($arg: $ty),*) $( -> $ret )? {
// Don't warn if one of the arguments is unused.
$(let _ = $arg;)*
$compiletime
}
core::slice::index::slice_index_fail::do_panic::compiletime const fn compiletime$(<$($binders)*>)?($($arg: $ty),*) $( -> $ret )? {
// Don't warn if one of the arguments is unused.
$(let _ = $arg;)*
$compiletime
}
core::str::<impl str>::floor_char_boundary pub const fn floor_char_boundary(&self, index: usize) -> usize {
if index >= self.len() {
self.len()
} else {
let mut i = index;
while i > 0 {
if self.as_bytes()[i].is_utf8_char_boundary() {
break;
}
i -= 1;
}
// The character boundary will be within four bytes of the index
debug_assert!(i >= index.saturating_sub(3));
i
}
}
core::str::<impl str>::from_utf8 pub const fn from_utf8(v: &[u8]) -> Result<&str, Utf8Error> {
converts::from_utf8(v)
}
core::str::<impl str>::from_utf8_mut pub const fn from_utf8_mut(v: &mut [u8]) -> Result<&mut str, Utf8Error> {
converts::from_utf8_mut(v)
}
core::str::<impl str>::from_utf8_unchecked pub const unsafe fn from_utf8_unchecked(v: &[u8]) -> &str {
// SAFETY: converts::from_utf8_unchecked has the same safety requirements as this function.
unsafe { converts::from_utf8_unchecked(v) }
}
core::str::<impl str>::from_utf8_unchecked_mut pub const unsafe fn from_utf8_unchecked_mut(v: &mut [u8]) -> &mut str {
// SAFETY: converts::from_utf8_unchecked_mut has the same safety requirements as this function.
unsafe { converts::from_utf8_unchecked_mut(v) }
}
core::str::converts::from_utf8_mutpub const fn from_utf8_mut(v: &mut [u8]) -> Result<&mut str, Utf8Error> {
// FIXME(const-hack): This should use `?` again, once it's `const`
match run_utf8_validation(v) {
Ok(_) => {
// SAFETY: validation succeeded.
Ok(unsafe { from_utf8_unchecked_mut(v) })
}
Err(err) => Err(err),
}
}
core::str::error::Utf8Error::error_len pub const fn error_len(&self) -> Option<usize> {
// FIXME(const-hack): This should become `map` again, once it's `const`
match self.error_len {
Some(len) => Some(len as usize),
None => None,
}
}
core::str::pattern::Pattern::is_prefix_of fn is_prefix_of(self, haystack: &str) -> bool {
matches!(self.into_searcher(haystack).next(), SearchStep::Match(0, _))
}
core::str::slice_error_failconst fn slice_error_fail(s: &str, begin: usize, end: usize) -> ! {
crate::intrinsics::const_eval_select((s, begin, end), slice_error_fail_ct, slice_error_fail_rt)
}
core::str::slice_error_fail_ctconst fn slice_error_fail_ct(_: &str, _: usize, _: usize) -> ! {
panic!("failed to slice string");
}
core::str::slice_error_fail_rtfn slice_error_fail_rt(s: &str, begin: usize, end: usize) -> ! {
const MAX_DISPLAY_LENGTH: usize = 256;
let trunc_len = s.floor_char_boundary(MAX_DISPLAY_LENGTH);
let s_trunc = &s[..trunc_len];
let ellipsis = if trunc_len < s.len() { "[...]" } else { "" };
// 1. out of bounds
if begin > s.len() || end > s.len() {
let oob_index = if begin > s.len() { begin } else { end };
panic!("byte index {oob_index} is out of bounds of `{s_trunc}`{ellipsis}");
}
// 2. begin <= end
assert!(
begin <= end,
"begin <= end ({} <= {}) when slicing `{}`{}",
begin,
end,
s_trunc,
ellipsis
);
// 3. character boundary
let index = if !s.is_char_boundary(begin) { begin } else { end };
// find the character
let char_start = s.floor_char_boundary(index);
// `char_start` must be less than len and a char boundary
let ch = s[char_start..].chars().next().unwrap();
let char_range = char_start..char_start + ch.len_utf8();
panic!(
"byte index {} is not a char boundary; it is inside {:?} (bytes {:?}) of `{}`{}",
index, ch, char_range, s_trunc, ellipsis
);
}
core::str::traits::<impl core::slice::index::SliceIndex<str> for core::ops::range::Range<usize>>::get_mut fn get_mut(self, slice: &mut str) -> Option<&mut Self::Output> {
if self.start <= self.end
&& slice.is_char_boundary(self.start)
&& slice.is_char_boundary(self.end)
{
// SAFETY: just checked that `start` and `end` are on a char boundary.
// We know the pointer is unique because we got it from `slice`.
Some(unsafe { &mut *self.get_unchecked_mut(slice) })
} else {
None
}
}
core::str::traits::<impl core::slice::index::SliceIndex<str> for core::ops::range::Range<usize>>::index_mut fn index_mut(self, slice: &mut str) -> &mut Self::Output {
// is_char_boundary checks that the index is in [0, .len()]
// cannot reuse `get` as above, because of NLL trouble
if self.start <= self.end
&& slice.is_char_boundary(self.start)
&& slice.is_char_boundary(self.end)
{
// SAFETY: just checked that `start` and `end` are on a char boundary,
// and we are passing in a safe reference, so the return value will also be one.
unsafe { &mut *self.get_unchecked_mut(slice) }
} else {
super::slice_error_fail(slice, self.start, self.end)
}
}
core::str::traits::<impl core::slice::index::SliceIndex<str> for core::ops::range::RangeFrom<usize>>::get_mut fn get_mut(self, slice: &mut str) -> Option<&mut Self::Output> {
if slice.is_char_boundary(self.start) {
// SAFETY: just checked that `start` is on a char boundary,
// and we are passing in a safe reference, so the return value will also be one.
Some(unsafe { &mut *self.get_unchecked_mut(slice) })
} else {
None
}
}
core::str::traits::<impl core::slice::index::SliceIndex<str> for core::ops::range::RangeFrom<usize>>::get_unchecked_mut unsafe fn get_unchecked_mut(self, slice: *mut str) -> *mut Self::Output {
let len = (slice as *mut [u8]).len();
// SAFETY: the caller has to uphold the safety contract for `get_unchecked_mut`.
unsafe { (self.start..len).get_unchecked_mut(slice) }
}
core::str::traits::<impl core::slice::index::SliceIndex<str> for core::ops::range::RangeFrom<usize>>::index_mut fn index_mut(self, slice: &mut str) -> &mut Self::Output {
if slice.is_char_boundary(self.start) {
// SAFETY: just checked that `start` is on a char boundary,
// and we are passing in a safe reference, so the return value will also be one.
unsafe { &mut *self.get_unchecked_mut(slice) }
} else {
super::slice_error_fail(slice, self.start, slice.len())
}
}
core::str::traits::<impl core::slice::index::SliceIndex<str> for core::ops::range::RangeTo<usize>>::get_mut fn get_mut(self, slice: &mut str) -> Option<&mut Self::Output> {
if slice.is_char_boundary(self.end) {
// SAFETY: just checked that `end` is on a char boundary,
// and we are passing in a safe reference, so the return value will also be one.
Some(unsafe { &mut *self.get_unchecked_mut(slice) })
} else {
None
}
}
core::str::validations::run_utf8_validation::compiletime const fn compiletime$(<$($binders)*>)?($($arg: $ty),*) $( -> $ret )? {
// Don't warn if one of the arguments is unused.
$(let _ = $arg;)*
$compiletime
}
core::sync::atomic::compiler_fencepub fn compiler_fence(order: Ordering) {
// SAFETY: using an atomic fence is safe.
unsafe {
match order {
Acquire => intrinsics::atomic_singlethreadfence::<{ AO::Acquire }>(),
Release => intrinsics::atomic_singlethreadfence::<{ AO::Release }>(),
AcqRel => intrinsics::atomic_singlethreadfence::<{ AO::AcqRel }>(),
SeqCst => intrinsics::atomic_singlethreadfence::<{ AO::SeqCst }>(),
Relaxed => panic!("there is no such thing as a relaxed fence"),
}
}
}
core::ub_checks::check_language_ub::compiletime const fn compiletime$(<$($binders)*>)?($($arg: $ty),*) $( -> $ret )? {
// Don't warn if one of the arguments is unused.
$(let _ = $arg;)*
$compiletime
}
core::ub_checks::maybe_is_aligned::compiletime const fn compiletime$(<$($binders)*>)?($($arg: $ty),*) $( -> $ret )? {
// Don't warn if one of the arguments is unused.
$(let _ = $arg;)*
$compiletime
}
core::ub_checks::maybe_is_nonoverlapping::compiletime const fn compiletime$(<$($binders)*>)?($($arg: $ty),*) $( -> $ret )? {
// Don't warn if one of the arguments is unused.
$(let _ = $arg;)*
$compiletime
}
<&'b str as core::str::pattern::Pattern>::into_searcher fn into_searcher(self, haystack: &str) -> StrSearcher<'_, 'b> {
StrSearcher::new(haystack, self)
}
<&'b str as core::str::pattern::Pattern>::is_prefix_of fn is_prefix_of(self, haystack: &str) -> bool {
haystack.as_bytes().starts_with(self.as_bytes())
}
<&T as core::convert::AsRef<U>>::as_ref fn as_ref(&self) -> &U {
<T as AsRef<U>>::as_ref(*self)
}
<&T as core::ops::deref::Deref>::deref fn deref(&self) -> &T {
self
}
<&bool as core::ops::bit::BitAnd<&bool>>::bitand fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&bool as core::ops::bit::BitAnd<bool>>::bitand fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&bool as core::ops::bit::BitOr<&bool>>::bitor fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&bool as core::ops::bit::BitOr<bool>>::bitor fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&bool as core::ops::bit::BitXor<&bool>>::bitxor fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&bool as core::ops::bit::BitXor<bool>>::bitxor fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&bool as core::ops::bit::Not>::not fn $method(self) -> <$t as $imp>::Output {
$imp::$method(*self)
}
<&f128 as core::ops::arith::Add<&f128>>::add fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&f128 as core::ops::arith::Add<f128>>::add fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&f128 as core::ops::arith::Div<&f128>>::div fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&f128 as core::ops::arith::Div<f128>>::div fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&f128 as core::ops::arith::Mul<&f128>>::mul fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&f128 as core::ops::arith::Mul<f128>>::mul fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&f128 as core::ops::arith::Neg>::neg fn $method(self) -> <$t as $imp>::Output {
$imp::$method(*self)
}
<&f128 as core::ops::arith::Rem<&f128>>::rem fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&f128 as core::ops::arith::Rem<f128>>::rem fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&f128 as core::ops::arith::Sub<&f128>>::sub fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&f128 as core::ops::arith::Sub<f128>>::sub fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&f16 as core::ops::arith::Add<&f16>>::add fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&f16 as core::ops::arith::Add<f16>>::add fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&f16 as core::ops::arith::Div<&f16>>::div fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&f16 as core::ops::arith::Div<f16>>::div fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&f16 as core::ops::arith::Mul<&f16>>::mul fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&f16 as core::ops::arith::Mul<f16>>::mul fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&f16 as core::ops::arith::Neg>::neg fn $method(self) -> <$t as $imp>::Output {
$imp::$method(*self)
}
<&f16 as core::ops::arith::Rem<&f16>>::rem fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&f16 as core::ops::arith::Rem<f16>>::rem fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&f16 as core::ops::arith::Sub<&f16>>::sub fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&f16 as core::ops::arith::Sub<f16>>::sub fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&f32 as core::ops::arith::Add<&f32>>::add fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&f32 as core::ops::arith::Add<f32>>::add fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&f32 as core::ops::arith::Div<&f32>>::div fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&f32 as core::ops::arith::Div<f32>>::div fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&f32 as core::ops::arith::Mul<&f32>>::mul fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&f32 as core::ops::arith::Mul<f32>>::mul fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&f32 as core::ops::arith::Neg>::neg fn $method(self) -> <$t as $imp>::Output {
$imp::$method(*self)
}
<&f32 as core::ops::arith::Rem<&f32>>::rem fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&f32 as core::ops::arith::Rem<f32>>::rem fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&f32 as core::ops::arith::Sub<&f32>>::sub fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&f32 as core::ops::arith::Sub<f32>>::sub fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&f64 as core::ops::arith::Add<&f64>>::add fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&f64 as core::ops::arith::Add<f64>>::add fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&f64 as core::ops::arith::Div<&f64>>::div fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&f64 as core::ops::arith::Div<f64>>::div fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&f64 as core::ops::arith::Mul<&f64>>::mul fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&f64 as core::ops::arith::Mul<f64>>::mul fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&f64 as core::ops::arith::Neg>::neg fn $method(self) -> <$t as $imp>::Output {
$imp::$method(*self)
}
<&f64 as core::ops::arith::Rem<&f64>>::rem fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&f64 as core::ops::arith::Rem<f64>>::rem fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&f64 as core::ops::arith::Sub<&f64>>::sub fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&f64 as core::ops::arith::Sub<f64>>::sub fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i128 as core::ops::arith::Add<&i128>>::add fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i128 as core::ops::arith::Add<i128>>::add fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i128 as core::ops::arith::Div<&i128>>::div fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i128 as core::ops::arith::Div<i128>>::div fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i128 as core::ops::arith::Mul<&i128>>::mul fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i128 as core::ops::arith::Mul<i128>>::mul fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i128 as core::ops::arith::Neg>::neg fn $method(self) -> <$t as $imp>::Output {
$imp::$method(*self)
}
<&i128 as core::ops::arith::Rem<&i128>>::rem fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i128 as core::ops::arith::Rem<i128>>::rem fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i128 as core::ops::arith::Sub<&i128>>::sub fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i128 as core::ops::arith::Sub<i128>>::sub fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i128 as core::ops::bit::BitAnd<&i128>>::bitand fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i128 as core::ops::bit::BitAnd<i128>>::bitand fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i128 as core::ops::bit::BitOr<&i128>>::bitor fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i128 as core::ops::bit::BitOr<i128>>::bitor fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i128 as core::ops::bit::BitXor<&i128>>::bitxor fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i128 as core::ops::bit::BitXor<i128>>::bitxor fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i128 as core::ops::bit::Not>::not fn $method(self) -> <$t as $imp>::Output {
$imp::$method(*self)
}
<&i128 as core::ops::bit::Shl<&i128>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i128 as core::ops::bit::Shl<&i16>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i128 as core::ops::bit::Shl<&i32>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i128 as core::ops::bit::Shl<&i64>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i128 as core::ops::bit::Shl<&i8>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i128 as core::ops::bit::Shl<&isize>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i128 as core::ops::bit::Shl<&u128>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i128 as core::ops::bit::Shl<&u16>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i128 as core::ops::bit::Shl<&u32>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i128 as core::ops::bit::Shl<&u64>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i128 as core::ops::bit::Shl<&u8>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i128 as core::ops::bit::Shl<&usize>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i128 as core::ops::bit::Shl<i128>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i128 as core::ops::bit::Shl<i16>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i128 as core::ops::bit::Shl<i32>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i128 as core::ops::bit::Shl<i64>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i128 as core::ops::bit::Shl<i8>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i128 as core::ops::bit::Shl<isize>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i128 as core::ops::bit::Shl<u128>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i128 as core::ops::bit::Shl<u16>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i128 as core::ops::bit::Shl<u32>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i128 as core::ops::bit::Shl<u64>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i128 as core::ops::bit::Shl<u8>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i128 as core::ops::bit::Shl<usize>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i128 as core::ops::bit::Shr<&i128>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i128 as core::ops::bit::Shr<&i16>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i128 as core::ops::bit::Shr<&i32>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i128 as core::ops::bit::Shr<&i64>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i128 as core::ops::bit::Shr<&i8>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i128 as core::ops::bit::Shr<&isize>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i128 as core::ops::bit::Shr<&u128>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i128 as core::ops::bit::Shr<&u16>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i128 as core::ops::bit::Shr<&u32>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i128 as core::ops::bit::Shr<&u64>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i128 as core::ops::bit::Shr<&u8>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i128 as core::ops::bit::Shr<&usize>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i128 as core::ops::bit::Shr<i128>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i128 as core::ops::bit::Shr<i16>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i128 as core::ops::bit::Shr<i32>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i128 as core::ops::bit::Shr<i64>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i128 as core::ops::bit::Shr<i8>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i128 as core::ops::bit::Shr<isize>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i128 as core::ops::bit::Shr<u128>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i128 as core::ops::bit::Shr<u16>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i128 as core::ops::bit::Shr<u32>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i128 as core::ops::bit::Shr<u64>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i128 as core::ops::bit::Shr<u8>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i128 as core::ops::bit::Shr<usize>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i16 as core::ops::arith::Add<&i16>>::add fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i16 as core::ops::arith::Add<i16>>::add fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i16 as core::ops::arith::Div<&i16>>::div fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i16 as core::ops::arith::Div<i16>>::div fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i16 as core::ops::arith::Mul<&i16>>::mul fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i16 as core::ops::arith::Mul<i16>>::mul fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i16 as core::ops::arith::Neg>::neg fn $method(self) -> <$t as $imp>::Output {
$imp::$method(*self)
}
<&i16 as core::ops::arith::Rem<&i16>>::rem fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i16 as core::ops::arith::Rem<i16>>::rem fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i16 as core::ops::arith::Sub<&i16>>::sub fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i16 as core::ops::arith::Sub<i16>>::sub fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i16 as core::ops::bit::BitAnd<&i16>>::bitand fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i16 as core::ops::bit::BitAnd<i16>>::bitand fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i16 as core::ops::bit::BitOr<&i16>>::bitor fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i16 as core::ops::bit::BitOr<i16>>::bitor fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i16 as core::ops::bit::BitXor<&i16>>::bitxor fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i16 as core::ops::bit::BitXor<i16>>::bitxor fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i16 as core::ops::bit::Not>::not fn $method(self) -> <$t as $imp>::Output {
$imp::$method(*self)
}
<&i16 as core::ops::bit::Shl<&i128>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i16 as core::ops::bit::Shl<&i16>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i16 as core::ops::bit::Shl<&i32>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i16 as core::ops::bit::Shl<&i64>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i16 as core::ops::bit::Shl<&i8>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i16 as core::ops::bit::Shl<&isize>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i16 as core::ops::bit::Shl<&u128>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i16 as core::ops::bit::Shl<&u16>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i16 as core::ops::bit::Shl<&u32>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i16 as core::ops::bit::Shl<&u64>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i16 as core::ops::bit::Shl<&u8>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i16 as core::ops::bit::Shl<&usize>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i16 as core::ops::bit::Shl<i128>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i16 as core::ops::bit::Shl<i16>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i16 as core::ops::bit::Shl<i32>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i16 as core::ops::bit::Shl<i64>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i16 as core::ops::bit::Shl<i8>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i16 as core::ops::bit::Shl<isize>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i16 as core::ops::bit::Shl<u128>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i16 as core::ops::bit::Shl<u16>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i16 as core::ops::bit::Shl<u32>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i16 as core::ops::bit::Shl<u64>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i16 as core::ops::bit::Shl<u8>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i16 as core::ops::bit::Shl<usize>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i16 as core::ops::bit::Shr<&i128>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i16 as core::ops::bit::Shr<&i16>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i16 as core::ops::bit::Shr<&i32>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i16 as core::ops::bit::Shr<&i64>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i16 as core::ops::bit::Shr<&i8>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i16 as core::ops::bit::Shr<&isize>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i16 as core::ops::bit::Shr<&u128>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i16 as core::ops::bit::Shr<&u16>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i16 as core::ops::bit::Shr<&u32>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i16 as core::ops::bit::Shr<&u64>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i16 as core::ops::bit::Shr<&u8>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i16 as core::ops::bit::Shr<&usize>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i16 as core::ops::bit::Shr<i128>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i16 as core::ops::bit::Shr<i16>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i16 as core::ops::bit::Shr<i32>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i16 as core::ops::bit::Shr<i64>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i16 as core::ops::bit::Shr<i8>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i16 as core::ops::bit::Shr<isize>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i16 as core::ops::bit::Shr<u128>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i16 as core::ops::bit::Shr<u16>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i16 as core::ops::bit::Shr<u32>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i16 as core::ops::bit::Shr<u64>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i16 as core::ops::bit::Shr<u8>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i16 as core::ops::bit::Shr<usize>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i32 as core::ops::arith::Add<&i32>>::add fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i32 as core::ops::arith::Add<i32>>::add fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i32 as core::ops::arith::Div<&i32>>::div fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i32 as core::ops::arith::Div<i32>>::div fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i32 as core::ops::arith::Mul<&i32>>::mul fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i32 as core::ops::arith::Mul<i32>>::mul fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i32 as core::ops::arith::Neg>::neg fn $method(self) -> <$t as $imp>::Output {
$imp::$method(*self)
}
<&i32 as core::ops::arith::Rem<&i32>>::rem fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i32 as core::ops::arith::Rem<i32>>::rem fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i32 as core::ops::arith::Sub<&i32>>::sub fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i32 as core::ops::arith::Sub<i32>>::sub fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i32 as core::ops::bit::BitAnd<&i32>>::bitand fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i32 as core::ops::bit::BitAnd<i32>>::bitand fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i32 as core::ops::bit::BitOr<&i32>>::bitor fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i32 as core::ops::bit::BitOr<i32>>::bitor fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i32 as core::ops::bit::BitXor<&i32>>::bitxor fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i32 as core::ops::bit::BitXor<i32>>::bitxor fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i32 as core::ops::bit::Not>::not fn $method(self) -> <$t as $imp>::Output {
$imp::$method(*self)
}
<&i32 as core::ops::bit::Shl<&i128>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i32 as core::ops::bit::Shl<&i16>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i32 as core::ops::bit::Shl<&i32>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i32 as core::ops::bit::Shl<&i64>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i32 as core::ops::bit::Shl<&i8>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i32 as core::ops::bit::Shl<&isize>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i32 as core::ops::bit::Shl<&u128>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i32 as core::ops::bit::Shl<&u16>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i32 as core::ops::bit::Shl<&u32>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i32 as core::ops::bit::Shl<&u64>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i32 as core::ops::bit::Shl<&u8>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i32 as core::ops::bit::Shl<&usize>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i32 as core::ops::bit::Shl<i128>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i32 as core::ops::bit::Shl<i16>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i32 as core::ops::bit::Shl<i32>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i32 as core::ops::bit::Shl<i64>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i32 as core::ops::bit::Shl<i8>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i32 as core::ops::bit::Shl<isize>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i32 as core::ops::bit::Shl<u128>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i32 as core::ops::bit::Shl<u16>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i32 as core::ops::bit::Shl<u32>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i32 as core::ops::bit::Shl<u64>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i32 as core::ops::bit::Shl<u8>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i32 as core::ops::bit::Shl<usize>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i32 as core::ops::bit::Shr<&i128>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i32 as core::ops::bit::Shr<&i16>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i32 as core::ops::bit::Shr<&i32>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i32 as core::ops::bit::Shr<&i64>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i32 as core::ops::bit::Shr<&i8>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i32 as core::ops::bit::Shr<&isize>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i32 as core::ops::bit::Shr<&u128>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i32 as core::ops::bit::Shr<&u16>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i32 as core::ops::bit::Shr<&u32>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i32 as core::ops::bit::Shr<&u64>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i32 as core::ops::bit::Shr<&u8>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i32 as core::ops::bit::Shr<&usize>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i32 as core::ops::bit::Shr<i128>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i32 as core::ops::bit::Shr<i16>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i32 as core::ops::bit::Shr<i32>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i32 as core::ops::bit::Shr<i64>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i32 as core::ops::bit::Shr<i8>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i32 as core::ops::bit::Shr<isize>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i32 as core::ops::bit::Shr<u128>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i32 as core::ops::bit::Shr<u16>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i32 as core::ops::bit::Shr<u32>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i32 as core::ops::bit::Shr<u64>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i32 as core::ops::bit::Shr<u8>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i32 as core::ops::bit::Shr<usize>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i64 as core::ops::arith::Add<&i64>>::add fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i64 as core::ops::arith::Add<i64>>::add fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i64 as core::ops::arith::Div<&i64>>::div fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i64 as core::ops::arith::Div<i64>>::div fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i64 as core::ops::arith::Mul<&i64>>::mul fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i64 as core::ops::arith::Mul<i64>>::mul fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i64 as core::ops::arith::Neg>::neg fn $method(self) -> <$t as $imp>::Output {
$imp::$method(*self)
}
<&i64 as core::ops::arith::Rem<&i64>>::rem fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i64 as core::ops::arith::Rem<i64>>::rem fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i64 as core::ops::arith::Sub<&i64>>::sub fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i64 as core::ops::arith::Sub<i64>>::sub fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i64 as core::ops::bit::BitAnd<&i64>>::bitand fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i64 as core::ops::bit::BitAnd<i64>>::bitand fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i64 as core::ops::bit::BitOr<&i64>>::bitor fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i64 as core::ops::bit::BitOr<i64>>::bitor fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i64 as core::ops::bit::BitXor<&i64>>::bitxor fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i64 as core::ops::bit::BitXor<i64>>::bitxor fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i64 as core::ops::bit::Not>::not fn $method(self) -> <$t as $imp>::Output {
$imp::$method(*self)
}
<&i64 as core::ops::bit::Shl<&i128>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i64 as core::ops::bit::Shl<&i16>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i64 as core::ops::bit::Shl<&i32>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i64 as core::ops::bit::Shl<&i64>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i64 as core::ops::bit::Shl<&i8>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i64 as core::ops::bit::Shl<&isize>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i64 as core::ops::bit::Shl<&u128>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i64 as core::ops::bit::Shl<&u16>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i64 as core::ops::bit::Shl<&u32>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i64 as core::ops::bit::Shl<&u64>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i64 as core::ops::bit::Shl<&u8>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i64 as core::ops::bit::Shl<&usize>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i64 as core::ops::bit::Shl<i128>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i64 as core::ops::bit::Shl<i16>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i64 as core::ops::bit::Shl<i32>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i64 as core::ops::bit::Shl<i64>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i64 as core::ops::bit::Shl<i8>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i64 as core::ops::bit::Shl<isize>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i64 as core::ops::bit::Shl<u128>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i64 as core::ops::bit::Shl<u16>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i64 as core::ops::bit::Shl<u32>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i64 as core::ops::bit::Shl<u64>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i64 as core::ops::bit::Shl<u8>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i64 as core::ops::bit::Shl<usize>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i64 as core::ops::bit::Shr<&i128>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i64 as core::ops::bit::Shr<&i16>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i64 as core::ops::bit::Shr<&i32>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i64 as core::ops::bit::Shr<&i64>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i64 as core::ops::bit::Shr<&i8>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i64 as core::ops::bit::Shr<&isize>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i64 as core::ops::bit::Shr<&u128>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i64 as core::ops::bit::Shr<&u16>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i64 as core::ops::bit::Shr<&u32>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i64 as core::ops::bit::Shr<&u64>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i64 as core::ops::bit::Shr<&u8>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i64 as core::ops::bit::Shr<&usize>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i64 as core::ops::bit::Shr<i128>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i64 as core::ops::bit::Shr<i16>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i64 as core::ops::bit::Shr<i32>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i64 as core::ops::bit::Shr<i64>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i64 as core::ops::bit::Shr<i8>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i64 as core::ops::bit::Shr<isize>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i64 as core::ops::bit::Shr<u128>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i64 as core::ops::bit::Shr<u16>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i64 as core::ops::bit::Shr<u32>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i64 as core::ops::bit::Shr<u64>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i64 as core::ops::bit::Shr<u8>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i64 as core::ops::bit::Shr<usize>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i8 as core::ops::arith::Add<&i8>>::add fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i8 as core::ops::arith::Add<i8>>::add fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i8 as core::ops::arith::Div<&i8>>::div fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i8 as core::ops::arith::Div<i8>>::div fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i8 as core::ops::arith::Mul<&i8>>::mul fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i8 as core::ops::arith::Mul<i8>>::mul fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i8 as core::ops::arith::Neg>::neg fn $method(self) -> <$t as $imp>::Output {
$imp::$method(*self)
}
<&i8 as core::ops::arith::Rem<&i8>>::rem fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i8 as core::ops::arith::Rem<i8>>::rem fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i8 as core::ops::arith::Sub<&i8>>::sub fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i8 as core::ops::arith::Sub<i8>>::sub fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i8 as core::ops::bit::BitAnd<&i8>>::bitand fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i8 as core::ops::bit::BitAnd<i8>>::bitand fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i8 as core::ops::bit::BitOr<&i8>>::bitor fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i8 as core::ops::bit::BitOr<i8>>::bitor fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i8 as core::ops::bit::BitXor<&i8>>::bitxor fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i8 as core::ops::bit::BitXor<i8>>::bitxor fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i8 as core::ops::bit::Not>::not fn $method(self) -> <$t as $imp>::Output {
$imp::$method(*self)
}
<&i8 as core::ops::bit::Shl<&i128>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i8 as core::ops::bit::Shl<&i16>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i8 as core::ops::bit::Shl<&i32>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i8 as core::ops::bit::Shl<&i64>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i8 as core::ops::bit::Shl<&i8>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i8 as core::ops::bit::Shl<&isize>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i8 as core::ops::bit::Shl<&u128>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i8 as core::ops::bit::Shl<&u16>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i8 as core::ops::bit::Shl<&u32>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i8 as core::ops::bit::Shl<&u64>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i8 as core::ops::bit::Shl<&u8>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i8 as core::ops::bit::Shl<&usize>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i8 as core::ops::bit::Shl<i128>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i8 as core::ops::bit::Shl<i16>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i8 as core::ops::bit::Shl<i32>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i8 as core::ops::bit::Shl<i64>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i8 as core::ops::bit::Shl<i8>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i8 as core::ops::bit::Shl<isize>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i8 as core::ops::bit::Shl<u128>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i8 as core::ops::bit::Shl<u16>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i8 as core::ops::bit::Shl<u32>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i8 as core::ops::bit::Shl<u64>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i8 as core::ops::bit::Shl<u8>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i8 as core::ops::bit::Shl<usize>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i8 as core::ops::bit::Shr<&i128>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i8 as core::ops::bit::Shr<&i16>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i8 as core::ops::bit::Shr<&i32>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i8 as core::ops::bit::Shr<&i64>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i8 as core::ops::bit::Shr<&i8>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i8 as core::ops::bit::Shr<&isize>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i8 as core::ops::bit::Shr<&u128>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i8 as core::ops::bit::Shr<&u16>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i8 as core::ops::bit::Shr<&u32>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i8 as core::ops::bit::Shr<&u64>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i8 as core::ops::bit::Shr<&u8>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i8 as core::ops::bit::Shr<&usize>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i8 as core::ops::bit::Shr<i128>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i8 as core::ops::bit::Shr<i16>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i8 as core::ops::bit::Shr<i32>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i8 as core::ops::bit::Shr<i64>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i8 as core::ops::bit::Shr<i8>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i8 as core::ops::bit::Shr<isize>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i8 as core::ops::bit::Shr<u128>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i8 as core::ops::bit::Shr<u16>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i8 as core::ops::bit::Shr<u32>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i8 as core::ops::bit::Shr<u64>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i8 as core::ops::bit::Shr<u8>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i8 as core::ops::bit::Shr<usize>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&isize as core::ops::arith::Add<&isize>>::add fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&isize as core::ops::arith::Add<isize>>::add fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&isize as core::ops::arith::Div<&isize>>::div fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&isize as core::ops::arith::Div<isize>>::div fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&isize as core::ops::arith::Mul<&isize>>::mul fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&isize as core::ops::arith::Mul<isize>>::mul fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&isize as core::ops::arith::Neg>::neg fn $method(self) -> <$t as $imp>::Output {
$imp::$method(*self)
}
<&isize as core::ops::arith::Rem<&isize>>::rem fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&isize as core::ops::arith::Rem<isize>>::rem fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&isize as core::ops::arith::Sub<&isize>>::sub fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&isize as core::ops::arith::Sub<isize>>::sub fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&isize as core::ops::bit::BitAnd<&isize>>::bitand fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&isize as core::ops::bit::BitAnd<isize>>::bitand fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&isize as core::ops::bit::BitOr<&isize>>::bitor fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&isize as core::ops::bit::BitOr<isize>>::bitor fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&isize as core::ops::bit::BitXor<&isize>>::bitxor fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&isize as core::ops::bit::BitXor<isize>>::bitxor fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&isize as core::ops::bit::Not>::not fn $method(self) -> <$t as $imp>::Output {
$imp::$method(*self)
}
<&isize as core::ops::bit::Shl<&i128>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&isize as core::ops::bit::Shl<&i16>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&isize as core::ops::bit::Shl<&i32>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&isize as core::ops::bit::Shl<&i64>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&isize as core::ops::bit::Shl<&i8>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&isize as core::ops::bit::Shl<&isize>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&isize as core::ops::bit::Shl<&u128>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&isize as core::ops::bit::Shl<&u16>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&isize as core::ops::bit::Shl<&u32>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&isize as core::ops::bit::Shl<&u64>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&isize as core::ops::bit::Shl<&u8>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&isize as core::ops::bit::Shl<&usize>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&isize as core::ops::bit::Shl<i128>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&isize as core::ops::bit::Shl<i16>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&isize as core::ops::bit::Shl<i32>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&isize as core::ops::bit::Shl<i64>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&isize as core::ops::bit::Shl<i8>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&isize as core::ops::bit::Shl<isize>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&isize as core::ops::bit::Shl<u128>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&isize as core::ops::bit::Shl<u16>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&isize as core::ops::bit::Shl<u32>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&isize as core::ops::bit::Shl<u64>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&isize as core::ops::bit::Shl<u8>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&isize as core::ops::bit::Shl<usize>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&isize as core::ops::bit::Shr<&i128>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&isize as core::ops::bit::Shr<&i16>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&isize as core::ops::bit::Shr<&i32>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&isize as core::ops::bit::Shr<&i64>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&isize as core::ops::bit::Shr<&i8>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&isize as core::ops::bit::Shr<&isize>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&isize as core::ops::bit::Shr<&u128>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&isize as core::ops::bit::Shr<&u16>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&isize as core::ops::bit::Shr<&u32>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&isize as core::ops::bit::Shr<&u64>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&isize as core::ops::bit::Shr<&u8>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&isize as core::ops::bit::Shr<&usize>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&isize as core::ops::bit::Shr<i128>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&isize as core::ops::bit::Shr<i16>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&isize as core::ops::bit::Shr<i32>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&isize as core::ops::bit::Shr<i64>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&isize as core::ops::bit::Shr<i8>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&isize as core::ops::bit::Shr<isize>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&isize as core::ops::bit::Shr<u128>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&isize as core::ops::bit::Shr<u16>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&isize as core::ops::bit::Shr<u32>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&isize as core::ops::bit::Shr<u64>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&isize as core::ops::bit::Shr<u8>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&isize as core::ops::bit::Shr<usize>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&mut I as core::iter::traits::exact_size::ExactSizeIterator>::len fn len(&self) -> usize {
(**self).len()
}
<&mut I as core::iter::traits::iterator::Iterator>::next fn next(&mut self) -> Option<I::Item> {
(**self).next()
}
<&mut I as core::iter::traits::iterator::Iterator>::nth fn nth(&mut self, n: usize) -> Option<Self::Item> {
(**self).nth(n)
}
<&mut I as core::iter::traits::iterator::Iterator>::size_hint fn size_hint(&self) -> (usize, Option<usize>) {
(**self).size_hint()
}
<&mut I as core::iter::traits::iterator::Iterator>::try_fold fn try_fold<B, F, R>(&mut self, init: B, f: F) -> R
where
F: FnMut(B, Self::Item) -> R,
R: Try<Output = B>,
{
self.spec_try_fold(init, f)
}
<&mut T as core::convert::AsMut<U>>::as_mut fn as_mut(&mut self) -> &mut U {
(*self).as_mut()
}
<&mut T as core::convert::AsRef<U>>::as_ref fn as_ref(&self) -> &U {
<T as AsRef<U>>::as_ref(*self)
}
<&mut T as core::ops::deref::Deref>::deref fn deref(&self) -> &T {
self
}
<&mut T as core::ops::deref::DerefMut>::deref_mut fn deref_mut(&mut self) -> &mut T {
self
}
<&u128 as core::ops::arith::Add<&u128>>::add fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u128 as core::ops::arith::Add<u128>>::add fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u128 as core::ops::arith::Div<&u128>>::div fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u128 as core::ops::arith::Div<u128>>::div fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u128 as core::ops::arith::Mul<&u128>>::mul fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u128 as core::ops::arith::Mul<u128>>::mul fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u128 as core::ops::arith::Rem<&u128>>::rem fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u128 as core::ops::arith::Rem<u128>>::rem fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u128 as core::ops::arith::Sub<&u128>>::sub fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u128 as core::ops::arith::Sub<u128>>::sub fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u128 as core::ops::bit::BitAnd<&u128>>::bitand fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u128 as core::ops::bit::BitAnd<u128>>::bitand fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u128 as core::ops::bit::BitOr<&u128>>::bitor fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u128 as core::ops::bit::BitOr<u128>>::bitor fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u128 as core::ops::bit::BitXor<&u128>>::bitxor fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u128 as core::ops::bit::BitXor<u128>>::bitxor fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u128 as core::ops::bit::Not>::not fn $method(self) -> <$t as $imp>::Output {
$imp::$method(*self)
}
<&u128 as core::ops::bit::Shl<&i128>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u128 as core::ops::bit::Shl<&i16>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u128 as core::ops::bit::Shl<&i32>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u128 as core::ops::bit::Shl<&i64>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u128 as core::ops::bit::Shl<&i8>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u128 as core::ops::bit::Shl<&isize>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u128 as core::ops::bit::Shl<&u128>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u128 as core::ops::bit::Shl<&u16>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u128 as core::ops::bit::Shl<&u32>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u128 as core::ops::bit::Shl<&u64>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u128 as core::ops::bit::Shl<&u8>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u128 as core::ops::bit::Shl<&usize>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u128 as core::ops::bit::Shl<i128>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u128 as core::ops::bit::Shl<i16>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u128 as core::ops::bit::Shl<i32>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u128 as core::ops::bit::Shl<i64>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u128 as core::ops::bit::Shl<i8>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u128 as core::ops::bit::Shl<isize>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u128 as core::ops::bit::Shl<u128>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u128 as core::ops::bit::Shl<u16>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u128 as core::ops::bit::Shl<u32>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u128 as core::ops::bit::Shl<u64>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u128 as core::ops::bit::Shl<u8>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u128 as core::ops::bit::Shl<usize>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u128 as core::ops::bit::Shr<&i128>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u128 as core::ops::bit::Shr<&i16>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u128 as core::ops::bit::Shr<&i32>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u128 as core::ops::bit::Shr<&i64>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u128 as core::ops::bit::Shr<&i8>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u128 as core::ops::bit::Shr<&isize>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u128 as core::ops::bit::Shr<&u128>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u128 as core::ops::bit::Shr<&u16>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u128 as core::ops::bit::Shr<&u32>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u128 as core::ops::bit::Shr<&u64>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u128 as core::ops::bit::Shr<&u8>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u128 as core::ops::bit::Shr<&usize>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u128 as core::ops::bit::Shr<i128>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u128 as core::ops::bit::Shr<i16>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u128 as core::ops::bit::Shr<i32>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u128 as core::ops::bit::Shr<i64>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u128 as core::ops::bit::Shr<i8>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u128 as core::ops::bit::Shr<isize>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u128 as core::ops::bit::Shr<u128>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u128 as core::ops::bit::Shr<u16>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u128 as core::ops::bit::Shr<u32>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u128 as core::ops::bit::Shr<u64>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u128 as core::ops::bit::Shr<u8>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u128 as core::ops::bit::Shr<usize>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u16 as core::ops::arith::Add<&u16>>::add fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u16 as core::ops::arith::Add<u16>>::add fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u16 as core::ops::arith::Div<&u16>>::div fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u16 as core::ops::arith::Div<u16>>::div fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u16 as core::ops::arith::Mul<&u16>>::mul fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u16 as core::ops::arith::Mul<u16>>::mul fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u16 as core::ops::arith::Rem<&u16>>::rem fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u16 as core::ops::arith::Rem<u16>>::rem fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u16 as core::ops::arith::Sub<&u16>>::sub fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u16 as core::ops::arith::Sub<u16>>::sub fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u16 as core::ops::bit::BitAnd<&u16>>::bitand fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u16 as core::ops::bit::BitAnd<u16>>::bitand fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u16 as core::ops::bit::BitOr<&u16>>::bitor fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u16 as core::ops::bit::BitOr<u16>>::bitor fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u16 as core::ops::bit::BitXor<&u16>>::bitxor fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u16 as core::ops::bit::BitXor<u16>>::bitxor fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u16 as core::ops::bit::Not>::not fn $method(self) -> <$t as $imp>::Output {
$imp::$method(*self)
}
<&u16 as core::ops::bit::Shl<&i128>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u16 as core::ops::bit::Shl<&i16>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u16 as core::ops::bit::Shl<&i32>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u16 as core::ops::bit::Shl<&i64>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u16 as core::ops::bit::Shl<&i8>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u16 as core::ops::bit::Shl<&isize>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u16 as core::ops::bit::Shl<&u128>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u16 as core::ops::bit::Shl<&u16>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u16 as core::ops::bit::Shl<&u32>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u16 as core::ops::bit::Shl<&u64>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u16 as core::ops::bit::Shl<&u8>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u16 as core::ops::bit::Shl<&usize>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u16 as core::ops::bit::Shl<i128>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u16 as core::ops::bit::Shl<i16>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u16 as core::ops::bit::Shl<i32>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u16 as core::ops::bit::Shl<i64>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u16 as core::ops::bit::Shl<i8>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u16 as core::ops::bit::Shl<isize>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u16 as core::ops::bit::Shl<u128>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u16 as core::ops::bit::Shl<u16>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u16 as core::ops::bit::Shl<u32>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u16 as core::ops::bit::Shl<u64>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u16 as core::ops::bit::Shl<u8>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u16 as core::ops::bit::Shl<usize>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u16 as core::ops::bit::Shr<&i128>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u16 as core::ops::bit::Shr<&i16>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u16 as core::ops::bit::Shr<&i32>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u16 as core::ops::bit::Shr<&i64>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u16 as core::ops::bit::Shr<&i8>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u16 as core::ops::bit::Shr<&isize>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u16 as core::ops::bit::Shr<&u128>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u16 as core::ops::bit::Shr<&u16>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u16 as core::ops::bit::Shr<&u32>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u16 as core::ops::bit::Shr<&u64>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u16 as core::ops::bit::Shr<&u8>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u16 as core::ops::bit::Shr<&usize>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u16 as core::ops::bit::Shr<i128>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u16 as core::ops::bit::Shr<i16>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u16 as core::ops::bit::Shr<i32>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u16 as core::ops::bit::Shr<i64>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u16 as core::ops::bit::Shr<i8>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u16 as core::ops::bit::Shr<isize>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u16 as core::ops::bit::Shr<u128>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u16 as core::ops::bit::Shr<u16>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u16 as core::ops::bit::Shr<u32>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u16 as core::ops::bit::Shr<u64>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u16 as core::ops::bit::Shr<u8>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u16 as core::ops::bit::Shr<usize>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u32 as core::ops::arith::Add<&u32>>::add fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u32 as core::ops::arith::Add<u32>>::add fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u32 as core::ops::arith::Div<&u32>>::div fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u32 as core::ops::arith::Div<u32>>::div fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u32 as core::ops::arith::Mul<&u32>>::mul fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u32 as core::ops::arith::Mul<u32>>::mul fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u32 as core::ops::arith::Rem<&u32>>::rem fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u32 as core::ops::arith::Rem<u32>>::rem fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u32 as core::ops::arith::Sub<&u32>>::sub fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u32 as core::ops::arith::Sub<u32>>::sub fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u32 as core::ops::bit::BitAnd<&u32>>::bitand fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u32 as core::ops::bit::BitAnd<u32>>::bitand fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u32 as core::ops::bit::BitOr<&u32>>::bitor fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u32 as core::ops::bit::BitOr<u32>>::bitor fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u32 as core::ops::bit::BitXor<&u32>>::bitxor fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u32 as core::ops::bit::BitXor<u32>>::bitxor fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u32 as core::ops::bit::Not>::not fn $method(self) -> <$t as $imp>::Output {
$imp::$method(*self)
}
<&u32 as core::ops::bit::Shl<&i128>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u32 as core::ops::bit::Shl<&i16>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u32 as core::ops::bit::Shl<&i32>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u32 as core::ops::bit::Shl<&i64>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u32 as core::ops::bit::Shl<&i8>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u32 as core::ops::bit::Shl<&isize>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u32 as core::ops::bit::Shl<&u128>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u32 as core::ops::bit::Shl<&u16>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u32 as core::ops::bit::Shl<&u32>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u32 as core::ops::bit::Shl<&u64>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u32 as core::ops::bit::Shl<&u8>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u32 as core::ops::bit::Shl<&usize>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u32 as core::ops::bit::Shl<i128>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u32 as core::ops::bit::Shl<i16>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u32 as core::ops::bit::Shl<i32>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u32 as core::ops::bit::Shl<i64>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u32 as core::ops::bit::Shl<i8>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u32 as core::ops::bit::Shl<isize>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u32 as core::ops::bit::Shl<u128>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u32 as core::ops::bit::Shl<u16>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u32 as core::ops::bit::Shl<u32>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u32 as core::ops::bit::Shl<u64>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u32 as core::ops::bit::Shl<u8>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u32 as core::ops::bit::Shl<usize>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u32 as core::ops::bit::Shr<&i128>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u32 as core::ops::bit::Shr<&i16>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u32 as core::ops::bit::Shr<&i32>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u32 as core::ops::bit::Shr<&i64>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u32 as core::ops::bit::Shr<&i8>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u32 as core::ops::bit::Shr<&isize>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u32 as core::ops::bit::Shr<&u128>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u32 as core::ops::bit::Shr<&u16>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u32 as core::ops::bit::Shr<&u32>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u32 as core::ops::bit::Shr<&u64>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u32 as core::ops::bit::Shr<&u8>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u32 as core::ops::bit::Shr<&usize>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u32 as core::ops::bit::Shr<i128>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u32 as core::ops::bit::Shr<i16>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u32 as core::ops::bit::Shr<i32>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u32 as core::ops::bit::Shr<i64>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u32 as core::ops::bit::Shr<i8>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u32 as core::ops::bit::Shr<isize>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u32 as core::ops::bit::Shr<u128>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u32 as core::ops::bit::Shr<u16>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u32 as core::ops::bit::Shr<u32>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u32 as core::ops::bit::Shr<u64>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u32 as core::ops::bit::Shr<u8>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u32 as core::ops::bit::Shr<usize>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u64 as core::ops::arith::Add<&u64>>::add fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u64 as core::ops::arith::Add<u64>>::add fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u64 as core::ops::arith::Div<&u64>>::div fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u64 as core::ops::arith::Div<u64>>::div fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u64 as core::ops::arith::Mul<&u64>>::mul fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u64 as core::ops::arith::Mul<u64>>::mul fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u64 as core::ops::arith::Rem<&u64>>::rem fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u64 as core::ops::arith::Rem<u64>>::rem fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u64 as core::ops::arith::Sub<&u64>>::sub fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u64 as core::ops::arith::Sub<u64>>::sub fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u64 as core::ops::bit::BitAnd<&u64>>::bitand fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u64 as core::ops::bit::BitAnd<u64>>::bitand fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u64 as core::ops::bit::BitOr<&u64>>::bitor fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u64 as core::ops::bit::BitOr<u64>>::bitor fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u64 as core::ops::bit::BitXor<&u64>>::bitxor fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u64 as core::ops::bit::BitXor<u64>>::bitxor fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u64 as core::ops::bit::Not>::not fn $method(self) -> <$t as $imp>::Output {
$imp::$method(*self)
}
<&u64 as core::ops::bit::Shl<&i128>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u64 as core::ops::bit::Shl<&i16>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u64 as core::ops::bit::Shl<&i32>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u64 as core::ops::bit::Shl<&i64>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u64 as core::ops::bit::Shl<&i8>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u64 as core::ops::bit::Shl<&isize>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u64 as core::ops::bit::Shl<&u128>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u64 as core::ops::bit::Shl<&u16>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u64 as core::ops::bit::Shl<&u32>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u64 as core::ops::bit::Shl<&u64>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u64 as core::ops::bit::Shl<&u8>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u64 as core::ops::bit::Shl<&usize>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u64 as core::ops::bit::Shl<i128>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u64 as core::ops::bit::Shl<i16>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u64 as core::ops::bit::Shl<i32>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u64 as core::ops::bit::Shl<i64>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u64 as core::ops::bit::Shl<i8>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u64 as core::ops::bit::Shl<isize>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u64 as core::ops::bit::Shl<u128>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u64 as core::ops::bit::Shl<u16>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u64 as core::ops::bit::Shl<u32>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u64 as core::ops::bit::Shl<u64>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u64 as core::ops::bit::Shl<u8>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u64 as core::ops::bit::Shl<usize>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u64 as core::ops::bit::Shr<&i128>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u64 as core::ops::bit::Shr<&i16>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u64 as core::ops::bit::Shr<&i32>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u64 as core::ops::bit::Shr<&i64>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u64 as core::ops::bit::Shr<&i8>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u64 as core::ops::bit::Shr<&isize>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u64 as core::ops::bit::Shr<&u128>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u64 as core::ops::bit::Shr<&u16>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u64 as core::ops::bit::Shr<&u32>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u64 as core::ops::bit::Shr<&u64>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u64 as core::ops::bit::Shr<&u8>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u64 as core::ops::bit::Shr<&usize>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u64 as core::ops::bit::Shr<i128>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u64 as core::ops::bit::Shr<i16>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u64 as core::ops::bit::Shr<i32>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u64 as core::ops::bit::Shr<i64>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u64 as core::ops::bit::Shr<i8>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u64 as core::ops::bit::Shr<isize>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u64 as core::ops::bit::Shr<u128>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u64 as core::ops::bit::Shr<u16>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u64 as core::ops::bit::Shr<u32>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u64 as core::ops::bit::Shr<u64>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u64 as core::ops::bit::Shr<u8>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u64 as core::ops::bit::Shr<usize>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u8 as core::ops::arith::Add<&u8>>::add fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u8 as core::ops::arith::Add<u8>>::add fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u8 as core::ops::arith::Div<&u8>>::div fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u8 as core::ops::arith::Div<u8>>::div fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u8 as core::ops::arith::Mul<&u8>>::mul fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u8 as core::ops::arith::Mul<u8>>::mul fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u8 as core::ops::arith::Rem<&u8>>::rem fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u8 as core::ops::arith::Rem<u8>>::rem fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u8 as core::ops::arith::Sub<&u8>>::sub fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u8 as core::ops::arith::Sub<u8>>::sub fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u8 as core::ops::bit::BitAnd<&u8>>::bitand fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u8 as core::ops::bit::BitAnd<u8>>::bitand fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u8 as core::ops::bit::BitOr<&u8>>::bitor fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u8 as core::ops::bit::BitOr<u8>>::bitor fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u8 as core::ops::bit::BitXor<&u8>>::bitxor fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u8 as core::ops::bit::BitXor<u8>>::bitxor fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u8 as core::ops::bit::Not>::not fn $method(self) -> <$t as $imp>::Output {
$imp::$method(*self)
}
<&u8 as core::ops::bit::Shl<&i128>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u8 as core::ops::bit::Shl<&i16>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u8 as core::ops::bit::Shl<&i32>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u8 as core::ops::bit::Shl<&i64>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u8 as core::ops::bit::Shl<&i8>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u8 as core::ops::bit::Shl<&isize>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u8 as core::ops::bit::Shl<&u128>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u8 as core::ops::bit::Shl<&u16>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u8 as core::ops::bit::Shl<&u32>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u8 as core::ops::bit::Shl<&u64>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u8 as core::ops::bit::Shl<&u8>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u8 as core::ops::bit::Shl<&usize>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u8 as core::ops::bit::Shl<i128>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u8 as core::ops::bit::Shl<i16>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u8 as core::ops::bit::Shl<i32>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u8 as core::ops::bit::Shl<i64>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u8 as core::ops::bit::Shl<i8>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u8 as core::ops::bit::Shl<isize>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u8 as core::ops::bit::Shl<u128>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u8 as core::ops::bit::Shl<u16>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u8 as core::ops::bit::Shl<u32>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u8 as core::ops::bit::Shl<u64>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u8 as core::ops::bit::Shl<u8>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u8 as core::ops::bit::Shl<usize>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u8 as core::ops::bit::Shr<&i128>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u8 as core::ops::bit::Shr<&i16>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u8 as core::ops::bit::Shr<&i32>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u8 as core::ops::bit::Shr<&i64>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u8 as core::ops::bit::Shr<&i8>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u8 as core::ops::bit::Shr<&isize>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u8 as core::ops::bit::Shr<&u128>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u8 as core::ops::bit::Shr<&u16>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u8 as core::ops::bit::Shr<&u32>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u8 as core::ops::bit::Shr<&u64>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u8 as core::ops::bit::Shr<&u8>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u8 as core::ops::bit::Shr<&usize>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u8 as core::ops::bit::Shr<i128>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u8 as core::ops::bit::Shr<i16>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u8 as core::ops::bit::Shr<i32>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u8 as core::ops::bit::Shr<i64>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u8 as core::ops::bit::Shr<i8>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u8 as core::ops::bit::Shr<isize>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u8 as core::ops::bit::Shr<u128>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u8 as core::ops::bit::Shr<u16>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u8 as core::ops::bit::Shr<u32>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u8 as core::ops::bit::Shr<u64>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u8 as core::ops::bit::Shr<u8>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u8 as core::ops::bit::Shr<usize>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&usize as core::ops::arith::Add<&usize>>::add fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&usize as core::ops::arith::Add<usize>>::add fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&usize as core::ops::arith::Div<&usize>>::div fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&usize as core::ops::arith::Div<usize>>::div fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&usize as core::ops::arith::Mul<&usize>>::mul fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&usize as core::ops::arith::Mul<usize>>::mul fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&usize as core::ops::arith::Rem<&usize>>::rem fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&usize as core::ops::arith::Rem<usize>>::rem fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&usize as core::ops::arith::Sub<&usize>>::sub fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&usize as core::ops::arith::Sub<usize>>::sub fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&usize as core::ops::bit::BitAnd<&usize>>::bitand fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&usize as core::ops::bit::BitAnd<usize>>::bitand fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&usize as core::ops::bit::BitOr<&usize>>::bitor fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&usize as core::ops::bit::BitOr<usize>>::bitor fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&usize as core::ops::bit::BitXor<&usize>>::bitxor fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&usize as core::ops::bit::BitXor<usize>>::bitxor fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&usize as core::ops::bit::Not>::not fn $method(self) -> <$t as $imp>::Output {
$imp::$method(*self)
}
<&usize as core::ops::bit::Shl<&i128>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&usize as core::ops::bit::Shl<&i16>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&usize as core::ops::bit::Shl<&i32>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&usize as core::ops::bit::Shl<&i64>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&usize as core::ops::bit::Shl<&i8>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&usize as core::ops::bit::Shl<&isize>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&usize as core::ops::bit::Shl<&u128>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&usize as core::ops::bit::Shl<&u16>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&usize as core::ops::bit::Shl<&u32>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&usize as core::ops::bit::Shl<&u64>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&usize as core::ops::bit::Shl<&u8>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&usize as core::ops::bit::Shl<&usize>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&usize as core::ops::bit::Shl<i128>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&usize as core::ops::bit::Shl<i16>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&usize as core::ops::bit::Shl<i32>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&usize as core::ops::bit::Shl<i64>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&usize as core::ops::bit::Shl<i8>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&usize as core::ops::bit::Shl<isize>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&usize as core::ops::bit::Shl<u128>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&usize as core::ops::bit::Shl<u16>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&usize as core::ops::bit::Shl<u32>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&usize as core::ops::bit::Shl<u64>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&usize as core::ops::bit::Shl<u8>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&usize as core::ops::bit::Shl<usize>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&usize as core::ops::bit::Shr<&i128>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&usize as core::ops::bit::Shr<&i16>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&usize as core::ops::bit::Shr<&i32>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&usize as core::ops::bit::Shr<&i64>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&usize as core::ops::bit::Shr<&i8>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&usize as core::ops::bit::Shr<&isize>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&usize as core::ops::bit::Shr<&u128>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&usize as core::ops::bit::Shr<&u16>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&usize as core::ops::bit::Shr<&u32>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&usize as core::ops::bit::Shr<&u64>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&usize as core::ops::bit::Shr<&u8>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&usize as core::ops::bit::Shr<&usize>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&usize as core::ops::bit::Shr<i128>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&usize as core::ops::bit::Shr<i16>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&usize as core::ops::bit::Shr<i32>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&usize as core::ops::bit::Shr<i64>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&usize as core::ops::bit::Shr<i8>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&usize as core::ops::bit::Shr<isize>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&usize as core::ops::bit::Shr<u128>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&usize as core::ops::bit::Shr<u16>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&usize as core::ops::bit::Shr<u32>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&usize as core::ops::bit::Shr<u64>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&usize as core::ops::bit::Shr<u8>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&usize as core::ops::bit::Shr<usize>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<() as core::default::Default>::default fn default() -> $t {
$v
}
<(core::ops::range::Bound<T>, core::ops::range::Bound<T>) as core::ops::range::IntoBounds<T>>::into_bounds fn into_bounds(self) -> (Bound<T>, Bound<T>) {
self
}
<(core::ops::range::Bound<T>, core::ops::range::Bound<T>) as core::ops::range::RangeBounds<T>>::end_bound fn end_bound(&self) -> Bound<&T> {
match *self {
(_, Included(ref end)) => Included(end),
(_, Excluded(ref end)) => Excluded(end),
(_, Unbounded) => Unbounded,
}
}
<(core::ops::range::Bound<T>, core::ops::range::Bound<T>) as core::ops::range::RangeBounds<T>>::start_bound fn start_bound(&self) -> Bound<&T> {
match *self {
(Included(ref start), _) => Included(start),
(Excluded(ref start), _) => Excluded(start),
(Unbounded, _) => Unbounded,
}
}
<(core::ops::range::Bound<usize>, core::ops::range::Bound<usize>) as core::slice::index::SliceIndex<[T]>>::get fn get(self, slice: &[T]) -> Option<&Self::Output> {
into_range(slice.len(), self)?.get(slice)
}
<(core::ops::range::Bound<usize>, core::ops::range::Bound<usize>) as core::slice::index::SliceIndex<[T]>>::get_mut fn get_mut(self, slice: &mut [T]) -> Option<&mut Self::Output> {
into_range(slice.len(), self)?.get_mut(slice)
}
<(core::ops::range::Bound<usize>, core::ops::range::Bound<usize>) as core::slice::index::SliceIndex<[T]>>::get_unchecked unsafe fn get_unchecked(self, slice: *const [T]) -> *const Self::Output {
// SAFETY: the caller has to uphold the safety contract for `get_unchecked`.
unsafe { into_range_unchecked(slice.len(), self).get_unchecked(slice) }
}
<(core::ops::range::Bound<usize>, core::ops::range::Bound<usize>) as core::slice::index::SliceIndex<[T]>>::get_unchecked_mut unsafe fn get_unchecked_mut(self, slice: *mut [T]) -> *mut Self::Output {
// SAFETY: the caller has to uphold the safety contract for `get_unchecked_mut`.
unsafe { into_range_unchecked(slice.len(), self).get_unchecked_mut(slice) }
}
<(core::ops::range::Bound<usize>, core::ops::range::Bound<usize>) as core::slice::index::SliceIndex<[T]>>::index fn index(self, slice: &[T]) -> &Self::Output {
into_slice_range(slice.len(), self).index(slice)
}
<(core::ops::range::Bound<usize>, core::ops::range::Bound<usize>) as core::slice::index::SliceIndex<[T]>>::index_mut fn index_mut(self, slice: &mut [T]) -> &mut Self::Output {
into_slice_range(slice.len(), self).index_mut(slice)
}
<I as core::iter::traits::collect::IntoIterator>::into_iter fn into_iter(self) -> I {
self
}
<I as core::iter::traits::iterator::Iterator::advance_by::SpecAdvanceBy>::spec_advance_by fn spec_advance_by(&mut self, n: usize) -> Result<(), NonZero<usize>> {
let Some(n) = NonZero::new(n) else {
return Ok(());
};
let res = self.try_fold(n, |n, _| NonZero::new(n.get() - 1));
match res {
None => Ok(()),
Some(n) => Err(n),
}
}
<T as core::array::SpecArrayClone>::clone default fn clone<const N: usize>(array: &[T; N]) -> [T; N] {
from_trusted_iterator(array.iter().cloned())
}
<T as core::array::SpecArrayClone>::clone fn clone<const N: usize>(array: &[T; N]) -> [T; N] {
// SAFETY: `TrivialClone` implies that this is equivalent to calling
// `Clone` on every element.
unsafe { ptr::read(array) }
}
<T as core::array::equality::SpecArrayEq<Other, N>>::spec_eq default fn spec_eq(a: &[Self; N], b: &[Other; N]) -> bool {
a[..] == b[..]
}
<T as core::array::equality::SpecArrayEq<Other, N>>::spec_ne default fn spec_ne(a: &[Self; N], b: &[Other; N]) -> bool {
a[..] != b[..]
}
<T as core::convert::From<T>>::from fn from(t: T) -> T {
t
}
<T as core::convert::Into<U>>::into fn into(self) -> U {
U::from(self)
}
<T as core::convert::TryFrom<U>>::try_from fn try_from(value: U) -> Result<Self, Self::Error> {
Ok(U::into(value))
}
<T as core::convert::TryInto<U>>::try_into fn try_into(self) -> Result<U, U::Error> {
U::try_from(self)
}
<T as core::iter::adapters::step_by::SpecRangeSetup<T>>::setup default fn setup(inner: T, _step: usize) -> T {
inner
}
<[A] as core::slice::cmp::SlicePartialEq<B>>::equal default fn equal(&self, other: &[B]) -> bool {
if self.len() != other.len() {
return false;
}
// Implemented as explicit indexing rather
// than zipped iterators for performance reasons.
// See PR https://github.com/rust-lang/rust/pull/116846
// FIXME(const_hack): make this a `for idx in 0..self.len()` loop.
let mut idx = 0;
while idx < self.len() {
// bound checks are optimized away
if self[idx] != other[idx] {
return false;
}
idx += 1;
}
true
}
<[T] as core::convert::AsMut<[T]>>::as_mut fn as_mut(&mut self) -> &mut [T] {
self
}
<[T] as core::convert::AsRef<[T]>>::as_ref fn as_ref(&self) -> &[T] {
self
}
<[T] as core::slice::specialize::SpecFill<T>>::spec_fill default fn spec_fill(&mut self, value: T) {
for item in self.iter_mut() {
// SAFETY: `TrivialClone` indicates that this is equivalent to
// calling `Clone::clone`
*item = unsafe { ptr::read(&value) };
}
}
<[core::mem::maybe_uninit::MaybeUninit<T>; N] as core::array::iter::iter_inner::PartialDrop>::partial_drop unsafe fn partial_drop(&mut self, alive: IndexRange) {
let slice: &mut [MaybeUninit<T>] = self;
// SAFETY: Initialized elements in the array are also initialized in the slice.
unsafe { slice.partial_drop(alive) }
}
<[core::mem::maybe_uninit::MaybeUninit<T>; N]>::transpose pub const fn transpose(self) -> MaybeUninit<[T; N]> {
// SAFETY: T and MaybeUninit<T> have the same layout
unsafe { intrinsics::transmute_unchecked(self) }
}
<[core::mem::maybe_uninit::MaybeUninit<T>] as core::array::iter::iter_inner::PartialDrop>::partial_drop unsafe fn partial_drop(&mut self, alive: IndexRange) {
// SAFETY: We know that all elements within `alive` are properly initialized.
unsafe { self.get_unchecked_mut(alive).assume_init_drop() }
}
<[core::mem::maybe_uninit::MaybeUninit<T>]>::assume_init_drop pub const unsafe fn assume_init_drop(&mut self)
where
T: [const] Destruct,
{
if !self.is_empty() {
// SAFETY: the caller must guarantee that every element of `self`
// is initialized and satisfies all invariants of `T`.
// Dropping the value in place is safe if that is the case.
unsafe { ptr::drop_in_place(self as *mut [MaybeUninit<T>] as *mut [T]) }
}
}
<[core::mem::maybe_uninit::MaybeUninit<T>]>::assume_init_ref pub const unsafe fn assume_init_ref(&self) -> &[T] {
// SAFETY: casting `slice` to a `*const [T]` is safe since the caller guarantees that
// `slice` is initialized, and `MaybeUninit` is guaranteed to have the same layout as `T`.
// The pointer obtained is valid since it refers to memory owned by `slice` which is a
// reference and thus guaranteed to be valid for reads.
unsafe { &*(self as *const Self as *const [T]) }
}
<bool as core::default::Default>::default fn default() -> $t {
$v
}
<bool as core::intrinsics::fallback::DisjointBitOr>::disjoint_bitor unsafe fn disjoint_bitor(self, other: Self) -> Self {
// Note that the assume here is required for UB detection in Miri!
// SAFETY: our precondition is that there are no bits in common,
// so this is just telling that to the backend.
unsafe { super::assume((self & other) == zero!($t)) };
self | other
}
<bool as core::ops::bit::BitAnd<&bool>>::bitand fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<bool as core::ops::bit::BitAnd>::bitand fn bitand(self, rhs: $t) -> $t { self & rhs }
<bool as core::ops::bit::BitAndAssign<&bool>>::bitand_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<bool as core::ops::bit::BitAndAssign>::bitand_assign fn bitand_assign(&mut self, other: $t) { *self &= other }
<bool as core::ops::bit::BitOr<&bool>>::bitor fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<bool as core::ops::bit::BitOr>::bitor fn bitor(self, rhs: $t) -> $t { self | rhs }
<bool as core::ops::bit::BitOrAssign<&bool>>::bitor_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<bool as core::ops::bit::BitOrAssign>::bitor_assign fn bitor_assign(&mut self, other: $t) { *self |= other }
<bool as core::ops::bit::BitXor<&bool>>::bitxor fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<bool as core::ops::bit::BitXor>::bitxor fn bitxor(self, other: $t) -> $t { self ^ other }
<bool as core::ops::bit::BitXorAssign<&bool>>::bitxor_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<bool as core::ops::bit::BitXorAssign>::bitxor_assign fn bitxor_assign(&mut self, other: $t) { *self ^= other }
<bool as core::ops::bit::Not>::not fn not(self) -> $t { !self }
<char as core::default::Default>::default fn default() -> $t {
$v
}
<core::any::TypeId as core::cmp::PartialEq>::eq fn eq(&self, other: &Self) -> bool {
#[cfg(miri)]
return crate::intrinsics::type_id_eq(*self, *other);
#[cfg(not(miri))]
{
let this = self;
crate::intrinsics::const_eval_select!(
@capture { this: &TypeId, other: &TypeId } -> bool:
if const {
crate::intrinsics::type_id_eq(*this, *other)
} else {
// Ideally we would just invoke `type_id_eq` unconditionally here,
// but since we do not MIR inline intrinsics, because backends
// may want to override them (and miri does!), MIR opts do not
// clean up this call sufficiently for LLVM to turn repeated calls
// of `TypeId` comparisons against one specific `TypeId` into
// a lookup table.
// SAFETY: We know that at runtime none of the bits have provenance and all bits
// are initialized. So we can just convert the whole thing to a `u128` and compare that.
unsafe {
crate::mem::transmute::<_, u128>(*this) == crate::mem::transmute::<_, u128>(*other)
}
}
)
}
}
<core::any::TypeId as core::cmp::PartialEq>::eq::runtime fn runtime$(<$($binders)*>)?($($arg: $ty),*) $( -> $ret )? {
$runtime
}
<core::array::Guard<'_, T> as core::ops::drop::Drop>::drop fn drop(&mut self) {
debug_assert!(self.initialized <= self.array_mut.len());
// SAFETY: this slice will contain only initialized objects.
unsafe {
self.array_mut.get_unchecked_mut(..self.initialized).assume_init_drop();
}
}
<core::array::drain::Drain<'_, T> as core::iter::traits::exact_size::ExactSizeIterator>::len fn len(&self) -> usize {
self.0.len()
}
<core::array::drain::Drain<'_, T> as core::iter::traits::iterator::Iterator>::size_hint fn size_hint(&self) -> (usize, Option<usize>) {
let n = self.len();
(n, Some(n))
}
<core::array::drain::Drain<'_, T> as core::iter::traits::unchecked_iterator::UncheckedIterator>::next_unchecked unsafe fn next_unchecked(&mut self) -> T {
// SAFETY: `Drain` is 1:1 with the inner iterator, so if the caller promised
// that there's an element left, the inner iterator has one too.
let p: *const T = unsafe { self.0.next_unchecked() };
// SAFETY: The iterator was already advanced, so we won't drop this later.
unsafe { p.read() }
}
<core::array::drain::Drain<'_, T> as core::ops::drop::Drop>::drop fn drop(&mut self) {
// SAFETY: By the type invariant, we're allowed to drop all these.
unsafe { drop_in_place(self.0.as_mut_slice()) }
}
<core::array::iter::IntoIter<T, N> as core::iter::traits::iterator::Iterator>::advance_by fn advance_by(&mut self, n: usize) -> Result<(), NonZero<usize>> {
self.unsize_mut().advance_by(n)
}
<core::array::iter::IntoIter<T, N> as core::iter::traits::iterator::Iterator>::fold fn fold<Acc, Fold>(mut self, init: Acc, fold: Fold) -> Acc
where
Fold: FnMut(Acc, Self::Item) -> Acc,
{
self.unsize_mut().fold(init, fold)
}
<core::array::iter::IntoIter<T, N> as core::iter::traits::iterator::Iterator>::next fn next(&mut self) -> Option<Self::Item> {
self.unsize_mut().next()
}
<core::array::iter::IntoIter<T, N> as core::iter::traits::iterator::Iterator>::size_hint fn size_hint(&self) -> (usize, Option<usize>) {
self.unsize().size_hint()
}
<core::array::iter::IntoIter<T, N> as core::iter::traits::iterator::Iterator>::try_fold fn try_fold<B, F, R>(&mut self, init: B, f: F) -> R
where
Self: Sized,
F: FnMut(B, Self::Item) -> R,
R: Try<Output = B>,
{
self.unsize_mut().try_fold(init, f)
}
<core::array::iter::IntoIter<T, N> as core::ops::drop::Drop>::drop fn drop(&mut self) {
if crate::mem::needs_drop::<T>() {
// SAFETY: This is the only place where we drop this field.
unsafe { ManuallyDrop::drop(&mut self.inner) }
}
}
<core::array::iter::iter_inner::PolymorphicIter<DATA> as core::ops::drop::Drop>::drop fn drop(&mut self) {
// SAFETY: by our type invariant `self.alive` is exactly the initialized
// items, and this is drop so nothing can use the items afterwards.
unsafe { self.data.partial_drop(self.alive.clone()) }
}
<core::array::iter::iter_inner::PolymorphicIter<[core::mem::maybe_uninit::MaybeUninit<T>; N]> as core::clone::Clone>::clone fn clone(&self) -> Self {
// Note, we don't really need to match the exact same alive range, so
// we can just clone into offset 0 regardless of where `self` is.
let mut new = Self::empty();
fn clone_into_new<U: Clone>(
source: &PolymorphicIter<[MaybeUninit<U>]>,
target: &mut PolymorphicIter<[MaybeUninit<U>]>,
) {
// Clone all alive elements.
for (src, dst) in iter::zip(source.as_slice(), &mut target.data) {
// Write a clone into the new array, then update its alive range.
// If cloning panics, we'll correctly drop the previous items.
dst.write(src.clone());
// This addition cannot overflow as we're iterating a slice,
// the length of which always fits in usize.
target.alive = IndexRange::zero_to(target.alive.end() + 1);
}
}
clone_into_new(self, &mut new);
new
}
<core::array::iter::iter_inner::PolymorphicIter<[core::mem::maybe_uninit::MaybeUninit<T>; N]> as core::clone::Clone>::clone::clone_into_new fn clone_into_new<U: Clone>(
source: &PolymorphicIter<[MaybeUninit<U>]>,
target: &mut PolymorphicIter<[MaybeUninit<U>]>,
) {
// Clone all alive elements.
for (src, dst) in iter::zip(source.as_slice(), &mut target.data) {
// Write a clone into the new array, then update its alive range.
// If cloning panics, we'll correctly drop the previous items.
dst.write(src.clone());
// This addition cannot overflow as we're iterating a slice,
// the length of which always fits in usize.
target.alive = IndexRange::zero_to(target.alive.end() + 1);
}
}
<core::cell::BorrowRef<'_> as core::ops::drop::Drop>::drop fn drop(&mut self) {
let borrow = self.borrow.get();
debug_assert!(is_reading(borrow));
self.borrow.replace(borrow - 1);
}
<core::cell::BorrowRefMut<'_> as core::ops::drop::Drop>::drop fn drop(&mut self) {
let borrow = self.borrow.get();
debug_assert!(is_writing(borrow));
self.borrow.replace(borrow + 1);
}
<core::cell::RefMut<'_, T> as core::ops::deref::Deref>::deref fn deref(&self) -> &T {
// SAFETY: the value is accessible as long as we hold our borrow.
unsafe { self.value.as_ref() }
}
<core::cell::RefMut<'_, T> as core::ops::deref::DerefMut>::deref_mut fn deref_mut(&mut self) -> &mut T {
// SAFETY: the value is accessible as long as we hold our borrow.
unsafe { self.value.as_mut() }
}
<core::iter::adapters::cloned::Cloned<I> as core::iter::traits::iterator::Iterator>::fold fn fold<Acc, F>(self, init: Acc, f: F) -> Acc
where
F: FnMut(Acc, Self::Item) -> Acc,
{
self.it.map(T::clone).fold(init, f)
}
<core::iter::adapters::cloned::Cloned<I> as core::iter::traits::iterator::Iterator>::next fn next(&mut self) -> Option<T> {
self.it.next().cloned()
}
<core::iter::adapters::cloned::Cloned<I> as core::iter::traits::iterator::Iterator>::size_hint fn size_hint(&self) -> (usize, Option<usize>) {
self.it.size_hint()
}
<core::iter::adapters::cloned::Cloned<I> as core::iter::traits::iterator::Iterator>::try_fold fn try_fold<B, F, R>(&mut self, init: B, f: F) -> R
where
Self: Sized,
F: FnMut(B, Self::Item) -> R,
R: Try<Output = B>,
{
self.it.try_fold(init, clone_try_fold(f))
}
<core::iter::adapters::cloned::Cloned<I> as core::iter::traits::unchecked_iterator::UncheckedIterator>::next_unchecked unsafe fn next_unchecked(&mut self) -> T {
// SAFETY: `Cloned` is 1:1 with the inner iterator, so if the caller promised
// that there's an element left, the inner iterator has one too.
let item = unsafe { self.it.next_unchecked() };
item.clone()
}
<core::iter::adapters::enumerate::Enumerate<I> as core::iter::traits::iterator::Iterator>::advance_by fn advance_by(&mut self, n: usize) -> Result<(), NonZero<usize>> {
let remaining = self.iter.advance_by(n);
let advanced = match remaining {
Ok(()) => n,
Err(rem) => n - rem.get(),
};
self.count += advanced;
remaining
}
<core::iter::adapters::enumerate::Enumerate<I> as core::iter::traits::iterator::Iterator>::fold fn fold<Acc, Fold>(self, init: Acc, fold: Fold) -> Acc
where
Fold: FnMut(Acc, Self::Item) -> Acc,
{
#[inline]
fn enumerate<T, Acc>(
mut count: usize,
mut fold: impl FnMut(Acc, (usize, T)) -> Acc,
) -> impl FnMut(Acc, T) -> Acc {
#[rustc_inherit_overflow_checks]
move |acc, item| {
let acc = fold(acc, (count, item));
count += 1;
acc
}
}
self.iter.fold(init, enumerate(self.count, fold))
}
<core::iter::adapters::enumerate::Enumerate<I> as core::iter::traits::iterator::Iterator>::fold::enumerate fn enumerate<T, Acc>(
mut count: usize,
mut fold: impl FnMut(Acc, (usize, T)) -> Acc,
) -> impl FnMut(Acc, T) -> Acc {
#[rustc_inherit_overflow_checks]
move |acc, item| {
let acc = fold(acc, (count, item));
count += 1;
acc
}
}
<core::iter::adapters::enumerate::Enumerate<I> as core::iter::traits::iterator::Iterator>::next fn next(&mut self) -> Option<(usize, <I as Iterator>::Item)> {
let a = self.iter.next()?;
let i = self.count;
self.count += 1;
Some((i, a))
}
<core::iter::adapters::enumerate::Enumerate<I> as core::iter::traits::iterator::Iterator>::nth fn nth(&mut self, n: usize) -> Option<(usize, I::Item)> {
let a = self.iter.nth(n)?;
let i = self.count + n;
self.count = i + 1;
Some((i, a))
}
<core::iter::adapters::enumerate::Enumerate<I> as core::iter::traits::iterator::Iterator>::size_hint fn size_hint(&self) -> (usize, Option<usize>) {
self.iter.size_hint()
}
<core::iter::adapters::enumerate::Enumerate<I> as core::iter::traits::iterator::Iterator>::try_fold fn try_fold<Acc, Fold, R>(&mut self, init: Acc, fold: Fold) -> R
where
Self: Sized,
Fold: FnMut(Acc, Self::Item) -> R,
R: Try<Output = Acc>,
{
#[inline]
fn enumerate<'a, T, Acc, R>(
count: &'a mut usize,
mut fold: impl FnMut(Acc, (usize, T)) -> R + 'a,
) -> impl FnMut(Acc, T) -> R + 'a {
#[rustc_inherit_overflow_checks]
move |acc, item| {
let acc = fold(acc, (*count, item));
*count += 1;
acc
}
}
self.iter.try_fold(init, enumerate(&mut self.count, fold))
}
<core::iter::adapters::enumerate::Enumerate<I> as core::iter::traits::iterator::Iterator>::try_fold::enumerate fn enumerate<'a, T, Acc, R>(
count: &'a mut usize,
mut fold: impl FnMut(Acc, (usize, T)) -> R + 'a,
) -> impl FnMut(Acc, T) -> R + 'a {
#[rustc_inherit_overflow_checks]
move |acc, item| {
let acc = fold(acc, (*count, item));
*count += 1;
acc
}
}
<core::iter::adapters::filter::Filter<I, P> as core::iter::traits::iterator::Iterator>::fold fn fold<Acc, Fold>(self, init: Acc, fold: Fold) -> Acc
where
Fold: FnMut(Acc, Self::Item) -> Acc,
{
self.iter.fold(init, filter_fold(self.predicate, fold))
}
<core::iter::adapters::filter::Filter<I, P> as core::iter::traits::iterator::Iterator>::next fn next(&mut self) -> Option<I::Item> {
self.iter.find(&mut self.predicate)
}
<core::iter::adapters::filter::Filter<I, P> as core::iter::traits::iterator::Iterator>::size_hint fn size_hint(&self) -> (usize, Option<usize>) {
let (_, upper) = self.iter.size_hint();
(0, upper) // can't know a lower bound, due to the predicate
}
<core::iter::adapters::filter::Filter<I, P> as core::iter::traits::iterator::Iterator>::try_fold fn try_fold<Acc, Fold, R>(&mut self, init: Acc, fold: Fold) -> R
where
Self: Sized,
Fold: FnMut(Acc, Self::Item) -> R,
R: Try<Output = Acc>,
{
self.iter.try_fold(init, filter_try_fold(&mut self.predicate, fold))
}
<core::iter::adapters::map::Map<I, F> as core::iter::traits::iterator::Iterator>::fold fn fold<Acc, G>(self, init: Acc, g: G) -> Acc
where
G: FnMut(Acc, Self::Item) -> Acc,
{
self.iter.fold(init, map_fold(self.f, g))
}
<core::iter::adapters::map::Map<I, F> as core::iter::traits::iterator::Iterator>::next fn next(&mut self) -> Option<B> {
self.iter.next().map(&mut self.f)
}
<core::iter::adapters::map::Map<I, F> as core::iter::traits::iterator::Iterator>::size_hint fn size_hint(&self) -> (usize, Option<usize>) {
self.iter.size_hint()
}
<core::iter::adapters::map::Map<I, F> as core::iter::traits::iterator::Iterator>::try_fold fn try_fold<Acc, G, R>(&mut self, init: Acc, g: G) -> R
where
Self: Sized,
G: FnMut(Acc, Self::Item) -> R,
R: Try<Output = Acc>,
{
self.iter.try_fold(init, map_try_fold(&mut self.f, g))
}
<core::iter::adapters::map::Map<I, F> as core::iter::traits::unchecked_iterator::UncheckedIterator>::next_unchecked unsafe fn next_unchecked(&mut self) -> B {
// SAFETY: `Map` is 1:1 with the inner iterator, so if the caller promised
// that there's an element left, the inner iterator has one too.
let item = unsafe { self.iter.next_unchecked() };
(self.f)(item)
}
<core::iter::adapters::rev::Rev<I> as core::iter::traits::double_ended::DoubleEndedIterator>::advance_back_by fn advance_back_by(&mut self, n: usize) -> Result<(), NonZero<usize>> {
self.iter.advance_by(n)
}
<core::iter::adapters::rev::Rev<I> as core::iter::traits::double_ended::DoubleEndedIterator>::next_back fn next_back(&mut self) -> Option<<I as Iterator>::Item> {
self.iter.next()
}
<core::iter::adapters::rev::Rev<I> as core::iter::traits::double_ended::DoubleEndedIterator>::nth_back fn nth_back(&mut self, n: usize) -> Option<<I as Iterator>::Item> {
self.iter.nth(n)
}
<core::iter::adapters::rev::Rev<I> as core::iter::traits::double_ended::DoubleEndedIterator>::try_rfold fn try_rfold<B, F, R>(&mut self, init: B, f: F) -> R
where
Self: Sized,
F: FnMut(B, Self::Item) -> R,
R: Try<Output = B>,
{
self.iter.try_fold(init, f)
}
<core::iter::adapters::rev::Rev<I> as core::iter::traits::iterator::Iterator>::advance_by fn advance_by(&mut self, n: usize) -> Result<(), NonZero<usize>> {
self.iter.advance_back_by(n)
}
<core::iter::adapters::rev::Rev<I> as core::iter::traits::iterator::Iterator>::fold fn fold<Acc, F>(self, init: Acc, f: F) -> Acc
where
F: FnMut(Acc, Self::Item) -> Acc,
{
self.iter.rfold(init, f)
}
<core::iter::adapters::rev::Rev<I> as core::iter::traits::iterator::Iterator>::next fn next(&mut self) -> Option<<I as Iterator>::Item> {
self.iter.next_back()
}
<core::iter::adapters::rev::Rev<I> as core::iter::traits::iterator::Iterator>::nth fn nth(&mut self, n: usize) -> Option<<I as Iterator>::Item> {
self.iter.nth_back(n)
}
<core::iter::adapters::rev::Rev<I> as core::iter::traits::iterator::Iterator>::size_hint fn size_hint(&self) -> (usize, Option<usize>) {
self.iter.size_hint()
}
<core::iter::adapters::rev::Rev<I> as core::iter::traits::iterator::Iterator>::try_fold fn try_fold<B, F, R>(&mut self, init: B, f: F) -> R
where
Self: Sized,
F: FnMut(B, Self::Item) -> R,
R: Try<Output = B>,
{
self.iter.try_rfold(init, f)
}
<core::iter::adapters::skip::Skip<I> as core::iter::traits::iterator::Iterator>::advance_by fn advance_by(&mut self, mut n: usize) -> Result<(), NonZero<usize>> {
let skip_inner = self.n;
let skip_and_advance = skip_inner.saturating_add(n);
let remainder = match self.iter.advance_by(skip_and_advance) {
Ok(()) => 0,
Err(n) => n.get(),
};
let advanced_inner = skip_and_advance - remainder;
n -= advanced_inner.saturating_sub(skip_inner);
self.n = self.n.saturating_sub(advanced_inner);
// skip_and_advance may have saturated
if unlikely(remainder == 0 && n > 0) {
n = match self.iter.advance_by(n) {
Ok(()) => 0,
Err(n) => n.get(),
}
}
NonZero::new(n).map_or(Ok(()), Err)
}
<core::iter::adapters::skip::Skip<I> as core::iter::traits::iterator::Iterator>::next fn next(&mut self) -> Option<I::Item> {
if unlikely(self.n > 0) {
self.iter.nth(crate::mem::take(&mut self.n))
} else {
self.iter.next()
}
}
<core::iter::adapters::skip::Skip<I> as core::iter::traits::iterator::Iterator>::nth fn nth(&mut self, n: usize) -> Option<I::Item> {
if self.n > 0 {
let skip: usize = crate::mem::take(&mut self.n);
// Checked add to handle overflow case.
let n = match skip.checked_add(n) {
Some(nth) => nth,
None => {
// In case of overflow, load skip value, before loading `n`.
// Because the amount of elements to iterate is beyond `usize::MAX`, this
// is split into two `nth` calls where the `skip` `nth` call is discarded.
self.iter.nth(skip - 1)?;
n
}
};
// Load nth element including skip.
self.iter.nth(n)
} else {
self.iter.nth(n)
}
}
<core::iter::adapters::skip::Skip<I> as core::iter::traits::iterator::Iterator>::size_hint fn size_hint(&self) -> (usize, Option<usize>) {
let (lower, upper) = self.iter.size_hint();
let lower = lower.saturating_sub(self.n);
let upper = match upper {
Some(x) => Some(x.saturating_sub(self.n)),
None => None,
};
(lower, upper)
}
<core::iter::adapters::step_by::StepBy<I> as core::iter::adapters::step_by::StepByImpl<I>>::spec_fold::nth fn nth<I: Iterator>(
iter: &mut I,
step_minus_one: usize,
) -> impl FnMut() -> Option<I::Item> + '_ {
move || iter.nth(step_minus_one)
}
<core::iter::adapters::step_by::StepBy<I> as core::iter::adapters::step_by::StepByImpl<I>>::spec_next default fn spec_next(&mut self) -> Option<I::Item> {
let step_size = if self.first_take { 0 } else { self.step_minus_one };
self.first_take = false;
self.iter.nth(step_size)
}
<core::iter::adapters::step_by::StepBy<I> as core::iter::adapters::step_by::StepByImpl<I>>::spec_size_hint default fn spec_size_hint(&self) -> (usize, Option<usize>) {
#[inline]
fn first_size(step: NonZero<usize>) -> impl Fn(usize) -> usize {
move |n| if n == 0 { 0 } else { 1 + (n - 1) / step }
}
#[inline]
fn other_size(step: NonZero<usize>) -> impl Fn(usize) -> usize {
move |n| n / step
}
let (low, high) = self.iter.size_hint();
if self.first_take {
let f = first_size(self.original_step());
(f(low), high.map(f))
} else {
let f = other_size(self.original_step());
(f(low), high.map(f))
}
}
<core::iter::adapters::step_by::StepBy<I> as core::iter::adapters::step_by::StepByImpl<I>>::spec_size_hint::first_size fn first_size(step: NonZero<usize>) -> impl Fn(usize) -> usize {
move |n| if n == 0 { 0 } else { 1 + (n - 1) / step }
}
<core::iter::adapters::step_by::StepBy<I> as core::iter::adapters::step_by::StepByImpl<I>>::spec_size_hint::other_size fn other_size(step: NonZero<usize>) -> impl Fn(usize) -> usize {
move |n| n / step
}
<core::iter::adapters::step_by::StepBy<I> as core::iter::adapters::step_by::StepByImpl<I>>::spec_try_fold default fn spec_try_fold<Acc, F, R>(&mut self, mut acc: Acc, mut f: F) -> R
where
F: FnMut(Acc, Self::Item) -> R,
R: Try<Output = Acc>,
{
#[inline]
fn nth<I: Iterator>(
iter: &mut I,
step_minus_one: usize,
) -> impl FnMut() -> Option<I::Item> + '_ {
move || iter.nth(step_minus_one)
}
if self.first_take {
self.first_take = false;
match self.iter.next() {
None => return try { acc },
Some(x) => acc = f(acc, x)?,
}
}
from_fn(nth(&mut self.iter, self.step_minus_one)).try_fold(acc, f)
}
<core::iter::adapters::step_by::StepBy<I> as core::iter::adapters::step_by::StepByImpl<I>>::spec_try_fold::nth fn nth<I: Iterator>(
iter: &mut I,
step_minus_one: usize,
) -> impl FnMut() -> Option<I::Item> + '_ {
move || iter.nth(step_minus_one)
}
<core::iter::adapters::step_by::StepBy<I> as core::iter::traits::iterator::Iterator>::fold fn fold<Acc, F>(self, acc: Acc, f: F) -> Acc
where
F: FnMut(Acc, Self::Item) -> Acc,
{
self.spec_fold(acc, f)
}
<core::iter::adapters::step_by::StepBy<I> as core::iter::traits::iterator::Iterator>::next fn next(&mut self) -> Option<Self::Item> {
self.spec_next()
}
<core::iter::adapters::step_by::StepBy<I> as core::iter::traits::iterator::Iterator>::nth fn nth(&mut self, n: usize) -> Option<Self::Item> {
self.spec_nth(n)
}
<core::iter::adapters::step_by::StepBy<I> as core::iter::traits::iterator::Iterator>::size_hint fn size_hint(&self) -> (usize, Option<usize>) {
self.spec_size_hint()
}
<core::iter::adapters::step_by::StepBy<I> as core::iter::traits::iterator::Iterator>::try_fold fn try_fold<Acc, F, R>(&mut self, acc: Acc, f: F) -> R
where
F: FnMut(Acc, Self::Item) -> R,
R: Try<Output = Acc>,
{
self.spec_try_fold(acc, f)
}
<core::iter::adapters::step_by::StepBy<core::ops::range::Range<u16>> as core::iter::adapters::step_by::StepByImpl<core::ops::range::Range<u16>>>::spec_fold fn spec_fold<Acc, F>(self, init: Acc, mut f: F) -> Acc
where
F: FnMut(Acc, Self::Item) -> Acc
{
// if a step size larger than the type has been specified fall back to
// t::MAX, in which case remaining will be at most 1.
let step = <$t>::try_from(self.original_step().get()).unwrap_or(<$t>::MAX);
let remaining = self.iter.end;
let mut acc = init;
let mut val = self.iter.start;
for _ in 0..remaining {
acc = f(acc, val);
// this can only overflow during the last step, after which the value
// will no longer be used
val = val.wrapping_add(step);
}
acc
}
<core::iter::adapters::step_by::StepBy<core::ops::range::Range<u16>> as core::iter::adapters::step_by::StepByImpl<core::ops::range::Range<u16>>>::spec_next fn spec_next(&mut self) -> Option<$t> {
// if a step size larger than the type has been specified fall back to
// t::MAX, in which case remaining will be at most 1.
let step = <$t>::try_from(self.original_step().get()).unwrap_or(<$t>::MAX);
let remaining = self.iter.end;
if remaining > 0 {
let val = self.iter.start;
// this can only overflow during the last step, after which the value
// will not be used
self.iter.start = val.wrapping_add(step);
self.iter.end = remaining - 1;
Some(val)
} else {
None
}
}
<core::iter::adapters::step_by::StepBy<core::ops::range::Range<u16>> as core::iter::adapters::step_by::StepByImpl<core::ops::range::Range<u16>>>::spec_size_hint fn spec_size_hint(&self) -> (usize, Option<usize>) {
let remaining = self.iter.end as usize;
(remaining, Some(remaining))
}
<core::iter::adapters::step_by::StepBy<core::ops::range::Range<u32>> as core::iter::adapters::step_by::StepByImpl<core::ops::range::Range<u32>>>::spec_fold fn spec_fold<Acc, F>(self, init: Acc, mut f: F) -> Acc
where
F: FnMut(Acc, Self::Item) -> Acc
{
// if a step size larger than the type has been specified fall back to
// t::MAX, in which case remaining will be at most 1.
let step = <$t>::try_from(self.original_step().get()).unwrap_or(<$t>::MAX);
let remaining = self.iter.end;
let mut acc = init;
let mut val = self.iter.start;
for _ in 0..remaining {
acc = f(acc, val);
// this can only overflow during the last step, after which the value
// will no longer be used
val = val.wrapping_add(step);
}
acc
}
<core::iter::adapters::step_by::StepBy<core::ops::range::Range<u32>> as core::iter::adapters::step_by::StepByImpl<core::ops::range::Range<u32>>>::spec_next fn spec_next(&mut self) -> Option<$t> {
// if a step size larger than the type has been specified fall back to
// t::MAX, in which case remaining will be at most 1.
let step = <$t>::try_from(self.original_step().get()).unwrap_or(<$t>::MAX);
let remaining = self.iter.end;
if remaining > 0 {
let val = self.iter.start;
// this can only overflow during the last step, after which the value
// will not be used
self.iter.start = val.wrapping_add(step);
self.iter.end = remaining - 1;
Some(val)
} else {
None
}
}
<core::iter::adapters::step_by::StepBy<core::ops::range::Range<u32>> as core::iter::adapters::step_by::StepByImpl<core::ops::range::Range<u32>>>::spec_size_hint fn spec_size_hint(&self) -> (usize, Option<usize>) {
let remaining = self.iter.end as usize;
(remaining, Some(remaining))
}
<core::iter::adapters::step_by::StepBy<core::ops::range::Range<u64>> as core::iter::adapters::step_by::StepByImpl<core::ops::range::Range<u64>>>::spec_fold fn spec_fold<Acc, F>(self, init: Acc, mut f: F) -> Acc
where
F: FnMut(Acc, Self::Item) -> Acc
{
// if a step size larger than the type has been specified fall back to
// t::MAX, in which case remaining will be at most 1.
let step = <$t>::try_from(self.original_step().get()).unwrap_or(<$t>::MAX);
let remaining = self.iter.end;
let mut acc = init;
let mut val = self.iter.start;
for _ in 0..remaining {
acc = f(acc, val);
// this can only overflow during the last step, after which the value
// will no longer be used
val = val.wrapping_add(step);
}
acc
}
<core::iter::adapters::step_by::StepBy<core::ops::range::Range<u64>> as core::iter::adapters::step_by::StepByImpl<core::ops::range::Range<u64>>>::spec_next fn spec_next(&mut self) -> Option<$t> {
// if a step size larger than the type has been specified fall back to
// t::MAX, in which case remaining will be at most 1.
let step = <$t>::try_from(self.original_step().get()).unwrap_or(<$t>::MAX);
let remaining = self.iter.end;
if remaining > 0 {
let val = self.iter.start;
// this can only overflow during the last step, after which the value
// will not be used
self.iter.start = val.wrapping_add(step);
self.iter.end = remaining - 1;
Some(val)
} else {
None
}
}
<core::iter::adapters::step_by::StepBy<core::ops::range::Range<u64>> as core::iter::adapters::step_by::StepByImpl<core::ops::range::Range<u64>>>::spec_size_hint fn spec_size_hint(&self) -> (usize, Option<usize>) {
let remaining = self.iter.end as usize;
(remaining, Some(remaining))
}
<core::iter::adapters::step_by::StepBy<core::ops::range::Range<u8>> as core::iter::adapters::step_by::StepByImpl<core::ops::range::Range<u8>>>::spec_fold fn spec_fold<Acc, F>(self, init: Acc, mut f: F) -> Acc
where
F: FnMut(Acc, Self::Item) -> Acc
{
// if a step size larger than the type has been specified fall back to
// t::MAX, in which case remaining will be at most 1.
let step = <$t>::try_from(self.original_step().get()).unwrap_or(<$t>::MAX);
let remaining = self.iter.end;
let mut acc = init;
let mut val = self.iter.start;
for _ in 0..remaining {
acc = f(acc, val);
// this can only overflow during the last step, after which the value
// will no longer be used
val = val.wrapping_add(step);
}
acc
}
<core::iter::adapters::step_by::StepBy<core::ops::range::Range<u8>> as core::iter::adapters::step_by::StepByImpl<core::ops::range::Range<u8>>>::spec_next fn spec_next(&mut self) -> Option<$t> {
// if a step size larger than the type has been specified fall back to
// t::MAX, in which case remaining will be at most 1.
let step = <$t>::try_from(self.original_step().get()).unwrap_or(<$t>::MAX);
let remaining = self.iter.end;
if remaining > 0 {
let val = self.iter.start;
// this can only overflow during the last step, after which the value
// will not be used
self.iter.start = val.wrapping_add(step);
self.iter.end = remaining - 1;
Some(val)
} else {
None
}
}
<core::iter::adapters::step_by::StepBy<core::ops::range::Range<u8>> as core::iter::adapters::step_by::StepByImpl<core::ops::range::Range<u8>>>::spec_size_hint fn spec_size_hint(&self) -> (usize, Option<usize>) {
let remaining = self.iter.end as usize;
(remaining, Some(remaining))
}
<core::iter::adapters::step_by::StepBy<core::ops::range::Range<usize>> as core::iter::adapters::step_by::StepByImpl<core::ops::range::Range<usize>>>::spec_fold fn spec_fold<Acc, F>(self, init: Acc, mut f: F) -> Acc
where
F: FnMut(Acc, Self::Item) -> Acc
{
// if a step size larger than the type has been specified fall back to
// t::MAX, in which case remaining will be at most 1.
let step = <$t>::try_from(self.original_step().get()).unwrap_or(<$t>::MAX);
let remaining = self.iter.end;
let mut acc = init;
let mut val = self.iter.start;
for _ in 0..remaining {
acc = f(acc, val);
// this can only overflow during the last step, after which the value
// will no longer be used
val = val.wrapping_add(step);
}
acc
}
<core::iter::adapters::step_by::StepBy<core::ops::range::Range<usize>> as core::iter::adapters::step_by::StepByImpl<core::ops::range::Range<usize>>>::spec_next fn spec_next(&mut self) -> Option<$t> {
// if a step size larger than the type has been specified fall back to
// t::MAX, in which case remaining will be at most 1.
let step = <$t>::try_from(self.original_step().get()).unwrap_or(<$t>::MAX);
let remaining = self.iter.end;
if remaining > 0 {
let val = self.iter.start;
// this can only overflow during the last step, after which the value
// will not be used
self.iter.start = val.wrapping_add(step);
self.iter.end = remaining - 1;
Some(val)
} else {
None
}
}
<core::iter::adapters::step_by::StepBy<core::ops::range::Range<usize>> as core::iter::adapters::step_by::StepByImpl<core::ops::range::Range<usize>>>::spec_size_hint fn spec_size_hint(&self) -> (usize, Option<usize>) {
let remaining = self.iter.end as usize;
(remaining, Some(remaining))
}
<core::iter::adapters::take::Take<I> as core::iter::adapters::take::SpecTake>::spec_fold default fn spec_fold<B, F>(mut self, init: B, f: F) -> B
where
Self: Sized,
F: FnMut(B, Self::Item) -> B,
{
use crate::ops::NeverShortCircuit;
self.try_fold(init, NeverShortCircuit::wrap_mut_2(f)).0
}
<core::iter::adapters::take::Take<I> as core::iter::adapters::take::SpecTake>::spec_for_each default fn spec_for_each<F: FnMut(Self::Item)>(mut self, f: F) {
// The default implementation would use a unit accumulator, so we can
// avoid a stateful closure by folding over the remaining number
// of items we wish to return instead.
fn check<'a, Item>(
mut action: impl FnMut(Item) + 'a,
) -> impl FnMut(usize, Item) -> Option<usize> + 'a {
move |more, x| {
action(x);
more.checked_sub(1)
}
}
let remaining = self.n;
if remaining > 0 {
self.iter.try_fold(remaining - 1, check(f));
}
}
<core::iter::adapters::take::Take<I> as core::iter::adapters::take::SpecTake>::spec_for_each::check fn check<'a, Item>(
mut action: impl FnMut(Item) + 'a,
) -> impl FnMut(usize, Item) -> Option<usize> + 'a {
move |more, x| {
action(x);
more.checked_sub(1)
}
}
<core::iter::adapters::take::Take<I> as core::iter::traits::iterator::Iterator>::advance_by fn advance_by(&mut self, n: usize) -> Result<(), NonZero<usize>> {
let min = self.n.min(n);
let rem = match self.iter.advance_by(min) {
Ok(()) => 0,
Err(rem) => rem.get(),
};
let advanced = min - rem;
self.n -= advanced;
NonZero::new(n - advanced).map_or(Ok(()), Err)
}
<core::iter::adapters::take::Take<I> as core::iter::traits::iterator::Iterator>::fold fn fold<B, F>(self, init: B, f: F) -> B
where
Self: Sized,
F: FnMut(B, Self::Item) -> B,
{
Self::spec_fold(self, init, f)
}
<core::iter::adapters::take::Take<I> as core::iter::traits::iterator::Iterator>::for_each fn for_each<F: FnMut(Self::Item)>(self, f: F) {
Self::spec_for_each(self, f)
}
<core::iter::adapters::take::Take<I> as core::iter::traits::iterator::Iterator>::next fn next(&mut self) -> Option<<I as Iterator>::Item> {
if self.n != 0 {
self.n -= 1;
self.iter.next()
} else {
None
}
}
<core::iter::adapters::take::Take<I> as core::iter::traits::iterator::Iterator>::size_hint fn size_hint(&self) -> (usize, Option<usize>) {
if self.n == 0 {
return (0, Some(0));
}
let (lower, upper) = self.iter.size_hint();
let lower = cmp::min(lower, self.n);
let upper = match upper {
Some(x) if x < self.n => Some(x),
_ => Some(self.n),
};
(lower, upper)
}
<core::iter::adapters::take::Take<I> as core::iter::traits::iterator::Iterator>::try_fold fn try_fold<Acc, Fold, R>(&mut self, init: Acc, fold: Fold) -> R
where
Fold: FnMut(Acc, Self::Item) -> R,
R: Try<Output = Acc>,
{
fn check<'a, T, Acc, R: Try<Output = Acc>>(
n: &'a mut usize,
mut fold: impl FnMut(Acc, T) -> R + 'a,
) -> impl FnMut(Acc, T) -> ControlFlow<R, Acc> + 'a {
move |acc, x| {
*n -= 1;
let r = fold(acc, x);
if *n == 0 { ControlFlow::Break(r) } else { ControlFlow::from_try(r) }
}
}
if self.n == 0 {
try { init }
} else {
let n = &mut self.n;
self.iter.try_fold(init, check(n, fold)).into_try()
}
}
<core::iter::adapters::take::Take<I> as core::iter::traits::iterator::Iterator>::try_fold::check fn check<'a, T, Acc, R: Try<Output = Acc>>(
n: &'a mut usize,
mut fold: impl FnMut(Acc, T) -> R + 'a,
) -> impl FnMut(Acc, T) -> ControlFlow<R, Acc> + 'a {
move |acc, x| {
*n -= 1;
let r = fold(acc, x);
if *n == 0 { ControlFlow::Break(r) } else { ControlFlow::from_try(r) }
}
}
<core::iter::adapters::zip::Zip<A, B> as core::iter::adapters::zip::ZipImpl<A, B>>::new default fn new(a: A, b: B) -> Self {
Zip {
a,
b,
index: 0, // unused
len: 0, // unused
}
}
<core::iter::adapters::zip::Zip<A, B> as core::iter::adapters::zip::ZipImpl<A, B>>::next default fn next(&mut self) -> Option<(A::Item, B::Item)> {
let x = self.a.next()?;
let y = self.b.next()?;
Some((x, y))
}
<core::iter::adapters::zip::Zip<A, B> as core::iter::traits::iterator::Iterator>::fold fn fold<Acc, F>(self, init: Acc, f: F) -> Acc
where
F: FnMut(Acc, Self::Item) -> Acc,
{
ZipImpl::fold(self, init, f)
}
<core::iter::adapters::zip::Zip<A, B> as core::iter::traits::iterator::Iterator>::next fn next(&mut self) -> Option<Self::Item> {
ZipImpl::next(self)
}
<core::iter::adapters::zip::Zip<A, B> as core::iter::traits::iterator::Iterator>::nth fn nth(&mut self, n: usize) -> Option<Self::Item> {
ZipImpl::nth(self, n)
}
<core::iter::adapters::zip::Zip<A, B> as core::iter::traits::iterator::Iterator>::size_hint fn size_hint(&self) -> (usize, Option<usize>) {
ZipImpl::size_hint(self)
}
<core::iter::sources::from_fn::FromFn<F> as core::iter::traits::iterator::Iterator>::next fn next(&mut self) -> Option<Self::Item> {
(self.0)()
}
<core::mem::manually_drop::ManuallyDrop<T> as core::ops::deref::Deref>::deref fn deref(&self) -> &T {
&self.value
}
<core::mem::manually_drop::ManuallyDrop<T> as core::ops::deref::DerefMut>::deref_mut fn deref_mut(&mut self) -> &mut T {
&mut self.value
}
<core::mem::maybe_uninit::MaybeUninit<T> as core::clone::Clone>::clone fn clone(&self) -> Self {
// Not calling `T::clone()`, we cannot know if we are initialized enough for that.
*self
}
<core::num::niche_types::Nanoseconds as core::cmp::Ord>::cmp fn cmp(&self, other: &Self) -> Ordering {
Ord::cmp(&self.as_inner(), &other.as_inner())
}
<core::num::niche_types::Nanoseconds as core::cmp::PartialEq>::eq fn eq(&self, other: &Self) -> bool {
self.as_inner() == other.as_inner()
}
<core::num::niche_types::Nanoseconds as core::cmp::PartialOrd>::partial_cmp fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(Ord::cmp(self, other))
}
<core::num::niche_types::Nanoseconds as core::default::Default>::default fn default() -> Self {
Self::ZERO
}
<core::num::niche_types::NonZeroCharInner as core::cmp::Ord>::cmp fn cmp(&self, other: &Self) -> Ordering {
Ord::cmp(&self.as_inner(), &other.as_inner())
}
<core::num::niche_types::NonZeroCharInner as core::cmp::PartialEq>::eq fn eq(&self, other: &Self) -> bool {
self.as_inner() == other.as_inner()
}
<core::num::niche_types::NonZeroCharInner as core::cmp::PartialOrd>::partial_cmp fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(Ord::cmp(self, other))
}
<core::num::niche_types::NonZeroI128Inner as core::cmp::Ord>::cmp fn cmp(&self, other: &Self) -> Ordering {
Ord::cmp(&self.as_inner(), &other.as_inner())
}
<core::num::niche_types::NonZeroI128Inner as core::cmp::PartialEq>::eq fn eq(&self, other: &Self) -> bool {
self.as_inner() == other.as_inner()
}
<core::num::niche_types::NonZeroI128Inner as core::cmp::PartialOrd>::partial_cmp fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(Ord::cmp(self, other))
}
<core::num::niche_types::NonZeroI16Inner as core::cmp::Ord>::cmp fn cmp(&self, other: &Self) -> Ordering {
Ord::cmp(&self.as_inner(), &other.as_inner())
}
<core::num::niche_types::NonZeroI16Inner as core::cmp::PartialEq>::eq fn eq(&self, other: &Self) -> bool {
self.as_inner() == other.as_inner()
}
<core::num::niche_types::NonZeroI16Inner as core::cmp::PartialOrd>::partial_cmp fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(Ord::cmp(self, other))
}
<core::num::niche_types::NonZeroI32Inner as core::cmp::Ord>::cmp fn cmp(&self, other: &Self) -> Ordering {
Ord::cmp(&self.as_inner(), &other.as_inner())
}
<core::num::niche_types::NonZeroI32Inner as core::cmp::PartialEq>::eq fn eq(&self, other: &Self) -> bool {
self.as_inner() == other.as_inner()
}
<core::num::niche_types::NonZeroI32Inner as core::cmp::PartialOrd>::partial_cmp fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(Ord::cmp(self, other))
}
<core::num::niche_types::NonZeroI64Inner as core::cmp::Ord>::cmp fn cmp(&self, other: &Self) -> Ordering {
Ord::cmp(&self.as_inner(), &other.as_inner())
}
<core::num::niche_types::NonZeroI64Inner as core::cmp::PartialEq>::eq fn eq(&self, other: &Self) -> bool {
self.as_inner() == other.as_inner()
}
<core::num::niche_types::NonZeroI64Inner as core::cmp::PartialOrd>::partial_cmp fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(Ord::cmp(self, other))
}
<core::num::niche_types::NonZeroI8Inner as core::cmp::Ord>::cmp fn cmp(&self, other: &Self) -> Ordering {
Ord::cmp(&self.as_inner(), &other.as_inner())
}
<core::num::niche_types::NonZeroI8Inner as core::cmp::PartialEq>::eq fn eq(&self, other: &Self) -> bool {
self.as_inner() == other.as_inner()
}
<core::num::niche_types::NonZeroI8Inner as core::cmp::PartialOrd>::partial_cmp fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(Ord::cmp(self, other))
}
<core::num::niche_types::NonZeroIsizeInner as core::cmp::Ord>::cmp fn cmp(&self, other: &Self) -> Ordering {
Ord::cmp(&self.as_inner(), &other.as_inner())
}
<core::num::niche_types::NonZeroIsizeInner as core::cmp::PartialEq>::eq fn eq(&self, other: &Self) -> bool {
self.as_inner() == other.as_inner()
}
<core::num::niche_types::NonZeroIsizeInner as core::cmp::PartialOrd>::partial_cmp fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(Ord::cmp(self, other))
}
<core::num::niche_types::NonZeroU128Inner as core::cmp::Ord>::cmp fn cmp(&self, other: &Self) -> Ordering {
Ord::cmp(&self.as_inner(), &other.as_inner())
}
<core::num::niche_types::NonZeroU128Inner as core::cmp::PartialEq>::eq fn eq(&self, other: &Self) -> bool {
self.as_inner() == other.as_inner()
}
<core::num::niche_types::NonZeroU128Inner as core::cmp::PartialOrd>::partial_cmp fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(Ord::cmp(self, other))
}
<core::num::niche_types::NonZeroU16Inner as core::cmp::Ord>::cmp fn cmp(&self, other: &Self) -> Ordering {
Ord::cmp(&self.as_inner(), &other.as_inner())
}
<core::num::niche_types::NonZeroU16Inner as core::cmp::PartialEq>::eq fn eq(&self, other: &Self) -> bool {
self.as_inner() == other.as_inner()
}
<core::num::niche_types::NonZeroU16Inner as core::cmp::PartialOrd>::partial_cmp fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(Ord::cmp(self, other))
}
<core::num::niche_types::NonZeroU32Inner as core::cmp::Ord>::cmp fn cmp(&self, other: &Self) -> Ordering {
Ord::cmp(&self.as_inner(), &other.as_inner())
}
<core::num::niche_types::NonZeroU32Inner as core::cmp::PartialEq>::eq fn eq(&self, other: &Self) -> bool {
self.as_inner() == other.as_inner()
}
<core::num::niche_types::NonZeroU32Inner as core::cmp::PartialOrd>::partial_cmp fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(Ord::cmp(self, other))
}
<core::num::niche_types::NonZeroU64Inner as core::cmp::Ord>::cmp fn cmp(&self, other: &Self) -> Ordering {
Ord::cmp(&self.as_inner(), &other.as_inner())
}
<core::num::niche_types::NonZeroU64Inner as core::cmp::PartialEq>::eq fn eq(&self, other: &Self) -> bool {
self.as_inner() == other.as_inner()
}
<core::num::niche_types::NonZeroU64Inner as core::cmp::PartialOrd>::partial_cmp fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(Ord::cmp(self, other))
}
<core::num::niche_types::NonZeroU8Inner as core::cmp::Ord>::cmp fn cmp(&self, other: &Self) -> Ordering {
Ord::cmp(&self.as_inner(), &other.as_inner())
}
<core::num::niche_types::NonZeroU8Inner as core::cmp::PartialEq>::eq fn eq(&self, other: &Self) -> bool {
self.as_inner() == other.as_inner()
}
<core::num::niche_types::NonZeroU8Inner as core::cmp::PartialOrd>::partial_cmp fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(Ord::cmp(self, other))
}
<core::num::niche_types::NonZeroUsizeInner as core::cmp::Ord>::cmp fn cmp(&self, other: &Self) -> Ordering {
Ord::cmp(&self.as_inner(), &other.as_inner())
}
<core::num::niche_types::NonZeroUsizeInner as core::cmp::PartialEq>::eq fn eq(&self, other: &Self) -> bool {
self.as_inner() == other.as_inner()
}
<core::num::niche_types::NonZeroUsizeInner as core::cmp::PartialOrd>::partial_cmp fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(Ord::cmp(self, other))
}
<core::num::niche_types::UsizeNoHighBit as core::cmp::Ord>::cmp fn cmp(&self, other: &Self) -> Ordering {
Ord::cmp(&self.as_inner(), &other.as_inner())
}
<core::num::niche_types::UsizeNoHighBit as core::cmp::PartialEq>::eq fn eq(&self, other: &Self) -> bool {
self.as_inner() == other.as_inner()
}
<core::num::niche_types::UsizeNoHighBit as core::cmp::PartialOrd>::partial_cmp fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(Ord::cmp(self, other))
}
<core::ops::control_flow::ControlFlow<B, C> as core::ops::try_trait::FromResidual<core::ops::control_flow::ControlFlow<B, core::convert::Infallible>>>::from_residual fn from_residual(residual: ControlFlow<B, convert::Infallible>) -> Self {
match residual {
ControlFlow::Break(b) => ControlFlow::Break(b),
}
}
<core::ops::control_flow::ControlFlow<B, C> as core::ops::try_trait::Try>::branch fn branch(self) -> ControlFlow<Self::Residual, Self::Output> {
match self {
ControlFlow::Continue(c) => ControlFlow::Continue(c),
ControlFlow::Break(b) => ControlFlow::Break(ControlFlow::Break(b)),
}
}
<core::ops::control_flow::ControlFlow<B, C> as core::ops::try_trait::Try>::from_output fn from_output(output: Self::Output) -> Self {
ControlFlow::Continue(output)
}
<core::ops::index_range::IndexRange as core::iter::traits::iterator::Iterator>::next fn next(&mut self) -> Option<usize> {
if self.len() > 0 {
// SAFETY: We just checked that the range is non-empty
unsafe { Some(self.next_unchecked()) }
} else {
None
}
}
<core::ops::index_range::IndexRange as core::iter::traits::iterator::Iterator>::try_fold fn try_fold<B, F, R>(&mut self, mut accum: B, mut f: F) -> R
where
Self: Sized,
F: FnMut(B, Self::Item) -> R,
R: Try<Output = B>,
{
// `Range` needs to check `start < end`, but thanks to our type invariant
// we can loop on the stricter `start != end`.
self.assume_range();
while self.start != self.end {
// SAFETY: We just checked that the range is non-empty
let i = unsafe { self.next_unchecked() };
accum = f(accum, i)?;
}
try { accum }
}
<core::ops::index_range::IndexRange as core::slice::index::SliceIndex<[T]>>::get_unchecked unsafe fn get_unchecked(self, slice: *const [T]) -> *const [T] {
assert_unsafe_precondition!(
check_library_ub,
"slice::get_unchecked requires that the index is within the slice",
(end: usize = self.end(), len: usize = slice.len()) => end <= len
);
// SAFETY: the caller guarantees that `slice` is not dangling, so it
// cannot be longer than `isize::MAX`. They also guarantee that
// `self` is in bounds of `slice` so `self` cannot overflow an `isize`,
// so the call to `add` is safe.
unsafe { get_offset_len_noubcheck(slice, self.start(), self.len()) }
}
<core::ops::index_range::IndexRange as core::slice::index::SliceIndex<[T]>>::get_unchecked_mut unsafe fn get_unchecked_mut(self, slice: *mut [T]) -> *mut [T] {
assert_unsafe_precondition!(
check_library_ub,
"slice::get_unchecked_mut requires that the index is within the slice",
(end: usize = self.end(), len: usize = slice.len()) => end <= len
);
// SAFETY: see comments for `get_unchecked` above.
unsafe { get_offset_len_mut_noubcheck(slice, self.start(), self.len()) }
}
<core::ops::range::Range<&T> as core::ops::range::RangeBounds<T>>::end_bound fn end_bound(&self) -> Bound<&T> {
Excluded(self.end)
}
<core::ops::range::Range<&T> as core::ops::range::RangeBounds<T>>::start_bound fn start_bound(&self) -> Bound<&T> {
Included(self.start)
}
<core::ops::range::Range<T> as core::ops::range::IntoBounds<T>>::into_bounds fn into_bounds(self) -> (Bound<T>, Bound<T>) {
(Included(self.start), Excluded(self.end))
}
<core::ops::range::Range<T> as core::ops::range::RangeBounds<T>>::end_bound fn end_bound(&self) -> Bound<&T> {
Excluded(&self.end)
}
<core::ops::range::Range<T> as core::ops::range::RangeBounds<T>>::start_bound fn start_bound(&self) -> Bound<&T> {
Included(&self.start)
}
<core::ops::range::Range<u16> as core::iter::adapters::step_by::SpecRangeSetup<core::ops::range::Range<u16>>>::setup fn setup(mut r: Range<$t>, step: usize) -> Range<$t> {
let inner_len = r.size_hint().0;
// If step exceeds $t::MAX, then the count will be at most 1 and
// thus always fit into $t.
let yield_count = inner_len.div_ceil(step);
// Turn the range end into an iteration counter
r.end = yield_count as $t;
r
}
<core::ops::range::Range<u32> as core::iter::adapters::step_by::SpecRangeSetup<core::ops::range::Range<u32>>>::setup fn setup(mut r: Range<$t>, step: usize) -> Range<$t> {
let inner_len = r.size_hint().0;
// If step exceeds $t::MAX, then the count will be at most 1 and
// thus always fit into $t.
let yield_count = inner_len.div_ceil(step);
// Turn the range end into an iteration counter
r.end = yield_count as $t;
r
}
<core::ops::range::Range<u64> as core::iter::adapters::step_by::SpecRangeSetup<core::ops::range::Range<u64>>>::setup fn setup(mut r: Range<$t>, step: usize) -> Range<$t> {
let inner_len = r.size_hint().0;
// If step exceeds $t::MAX, then the count will be at most 1 and
// thus always fit into $t.
let yield_count = inner_len.div_ceil(step);
// Turn the range end into an iteration counter
r.end = yield_count as $t;
r
}
<core::ops::range::Range<u8> as core::iter::adapters::step_by::SpecRangeSetup<core::ops::range::Range<u8>>>::setup fn setup(mut r: Range<$t>, step: usize) -> Range<$t> {
let inner_len = r.size_hint().0;
// If step exceeds $t::MAX, then the count will be at most 1 and
// thus always fit into $t.
let yield_count = inner_len.div_ceil(step);
// Turn the range end into an iteration counter
r.end = yield_count as $t;
r
}
<core::ops::range::Range<usize> as core::iter::adapters::step_by::SpecRangeSetup<core::ops::range::Range<usize>>>::setup fn setup(mut r: Range<$t>, step: usize) -> Range<$t> {
let inner_len = r.size_hint().0;
// If step exceeds $t::MAX, then the count will be at most 1 and
// thus always fit into $t.
let yield_count = inner_len.div_ceil(step);
// Turn the range end into an iteration counter
r.end = yield_count as $t;
r
}
<core::ops::range::Range<usize> as core::slice::index::SliceIndex<[T]>>::get fn get(self, slice: &[T]) -> Option<&[T]> {
// Using checked_sub is a safe way to get `SubUnchecked` in MIR
if let Some(new_len) = usize::checked_sub(self.end, self.start)
&& self.end <= slice.len()
{
// SAFETY: `self` is checked to be valid and in bounds above.
unsafe { Some(&*get_offset_len_noubcheck(slice, self.start, new_len)) }
} else {
None
}
}
<core::ops::range::Range<usize> as core::slice::index::SliceIndex<[T]>>::get_mut fn get_mut(self, slice: &mut [T]) -> Option<&mut [T]> {
if let Some(new_len) = usize::checked_sub(self.end, self.start)
&& self.end <= slice.len()
{
// SAFETY: `self` is checked to be valid and in bounds above.
unsafe { Some(&mut *get_offset_len_mut_noubcheck(slice, self.start, new_len)) }
} else {
None
}
}
<core::ops::range::Range<usize> as core::slice::index::SliceIndex<[T]>>::get_unchecked unsafe fn get_unchecked(self, slice: *const [T]) -> *const [T] {
assert_unsafe_precondition!(
check_library_ub,
"slice::get_unchecked requires that the range is within the slice",
(
start: usize = self.start,
end: usize = self.end,
len: usize = slice.len()
) => end >= start && end <= len
);
// SAFETY: the caller guarantees that `slice` is not dangling, so it
// cannot be longer than `isize::MAX`. They also guarantee that
// `self` is in bounds of `slice` so `self` cannot overflow an `isize`,
// so the call to `add` is safe and the length calculation cannot overflow.
unsafe {
// Using the intrinsic avoids a superfluous UB check,
// since the one on this method already checked `end >= start`.
let new_len = crate::intrinsics::unchecked_sub(self.end, self.start);
get_offset_len_noubcheck(slice, self.start, new_len)
}
}
<core::ops::range::Range<usize> as core::slice::index::SliceIndex<[T]>>::get_unchecked_mut unsafe fn get_unchecked_mut(self, slice: *mut [T]) -> *mut [T] {
assert_unsafe_precondition!(
check_library_ub,
"slice::get_unchecked_mut requires that the range is within the slice",
(
start: usize = self.start,
end: usize = self.end,
len: usize = slice.len()
) => end >= start && end <= len
);
// SAFETY: see comments for `get_unchecked` above.
unsafe {
let new_len = crate::intrinsics::unchecked_sub(self.end, self.start);
get_offset_len_mut_noubcheck(slice, self.start, new_len)
}
}
<core::ops::range::Range<usize> as core::slice::index::SliceIndex<[T]>>::index fn index(self, slice: &[T]) -> &[T] {
// Using checked_sub is a safe way to get `SubUnchecked` in MIR
if let Some(new_len) = usize::checked_sub(self.end, self.start)
&& self.end <= slice.len()
{
// SAFETY: `self` is checked to be valid and in bounds above.
unsafe { &*get_offset_len_noubcheck(slice, self.start, new_len) }
} else {
slice_index_fail(self.start, self.end, slice.len())
}
}
<core::ops::range::Range<usize> as core::slice::index::SliceIndex<[T]>>::index_mut fn index_mut(self, slice: &mut [T]) -> &mut [T] {
// Using checked_sub is a safe way to get `SubUnchecked` in MIR
if let Some(new_len) = usize::checked_sub(self.end, self.start)
&& self.end <= slice.len()
{
// SAFETY: `self` is checked to be valid and in bounds above.
unsafe { &mut *get_offset_len_mut_noubcheck(slice, self.start, new_len) }
} else {
slice_index_fail(self.start, self.end, slice.len())
}
}
<core::ops::range::RangeFrom<&T> as core::ops::range::RangeBounds<T>>::end_bound fn end_bound(&self) -> Bound<&T> {
Unbounded
}
<core::ops::range::RangeFrom<&T> as core::ops::range::RangeBounds<T>>::start_bound fn start_bound(&self) -> Bound<&T> {
Included(self.start)
}
<core::ops::range::RangeFrom<T> as core::ops::range::IntoBounds<T>>::into_bounds fn into_bounds(self) -> (Bound<T>, Bound<T>) {
(Included(self.start), Unbounded)
}
<core::ops::range::RangeFrom<T> as core::ops::range::OneSidedRange<T>>::bound fn bound(self) -> (OneSidedRangeBound, T) {
(OneSidedRangeBound::StartInclusive, self.start)
}
<core::ops::range::RangeFrom<T> as core::ops::range::RangeBounds<T>>::end_bound fn end_bound(&self) -> Bound<&T> {
Unbounded
}
<core::ops::range::RangeFrom<T> as core::ops::range::RangeBounds<T>>::start_bound fn start_bound(&self) -> Bound<&T> {
Included(&self.start)
}
<core::ops::range::RangeFrom<usize> as core::slice::index::SliceIndex<[T]>>::get fn get(self, slice: &[T]) -> Option<&[T]> {
(self.start..slice.len()).get(slice)
}
<core::ops::range::RangeFrom<usize> as core::slice::index::SliceIndex<[T]>>::get_mut fn get_mut(self, slice: &mut [T]) -> Option<&mut [T]> {
(self.start..slice.len()).get_mut(slice)
}
<core::ops::range::RangeFrom<usize> as core::slice::index::SliceIndex<[T]>>::get_unchecked unsafe fn get_unchecked(self, slice: *const [T]) -> *const [T] {
// SAFETY: the caller has to uphold the safety contract for `get_unchecked`.
unsafe { (self.start..slice.len()).get_unchecked(slice) }
}
<core::ops::range::RangeFrom<usize> as core::slice::index::SliceIndex<[T]>>::get_unchecked_mut unsafe fn get_unchecked_mut(self, slice: *mut [T]) -> *mut [T] {
// SAFETY: the caller has to uphold the safety contract for `get_unchecked_mut`.
unsafe { (self.start..slice.len()).get_unchecked_mut(slice) }
}
<core::ops::range::RangeFrom<usize> as core::slice::index::SliceIndex<[T]>>::index fn index(self, slice: &[T]) -> &[T] {
if self.start > slice.len() {
slice_index_fail(self.start, slice.len(), slice.len())
}
// SAFETY: `self` is checked to be valid and in bounds above.
unsafe {
let new_len = crate::intrinsics::unchecked_sub(slice.len(), self.start);
&*get_offset_len_noubcheck(slice, self.start, new_len)
}
}
<core::ops::range::RangeFrom<usize> as core::slice::index::SliceIndex<[T]>>::index_mut fn index_mut(self, slice: &mut [T]) -> &mut [T] {
if self.start > slice.len() {
slice_index_fail(self.start, slice.len(), slice.len())
}
// SAFETY: `self` is checked to be valid and in bounds above.
unsafe {
let new_len = crate::intrinsics::unchecked_sub(slice.len(), self.start);
&mut *get_offset_len_mut_noubcheck(slice, self.start, new_len)
}
}
<core::ops::range::RangeFull as core::ops::range::IntoBounds<T>>::into_bounds fn into_bounds(self) -> (Bound<T>, Bound<T>) {
(Unbounded, Unbounded)
}
<core::ops::range::RangeFull as core::ops::range::RangeBounds<T>>::end_bound fn end_bound(&self) -> Bound<&T> {
Unbounded
}
<core::ops::range::RangeFull as core::ops::range::RangeBounds<T>>::start_bound fn start_bound(&self) -> Bound<&T> {
Unbounded
}
<core::ops::range::RangeFull as core::slice::index::SliceIndex<[T]>>::get fn get(self, slice: &[T]) -> Option<&[T]> {
Some(slice)
}
<core::ops::range::RangeFull as core::slice::index::SliceIndex<[T]>>::get_mut fn get_mut(self, slice: &mut [T]) -> Option<&mut [T]> {
Some(slice)
}
<core::ops::range::RangeFull as core::slice::index::SliceIndex<[T]>>::get_unchecked unsafe fn get_unchecked(self, slice: *const [T]) -> *const [T] {
slice
}
<core::ops::range::RangeFull as core::slice::index::SliceIndex<[T]>>::get_unchecked_mut unsafe fn get_unchecked_mut(self, slice: *mut [T]) -> *mut [T] {
slice
}
<core::ops::range::RangeFull as core::slice::index::SliceIndex<[T]>>::index fn index(self, slice: &[T]) -> &[T] {
slice
}
<core::ops::range::RangeFull as core::slice::index::SliceIndex<[T]>>::index_mut fn index_mut(self, slice: &mut [T]) -> &mut [T] {
slice
}
<core::ops::range::RangeInclusive<&T> as core::ops::range::RangeBounds<T>>::end_bound fn end_bound(&self) -> Bound<&T> {
Included(self.end)
}
<core::ops::range::RangeInclusive<&T> as core::ops::range::RangeBounds<T>>::start_bound fn start_bound(&self) -> Bound<&T> {
Included(self.start)
}
<core::ops::range::RangeInclusive<T> as core::ops::range::IntoBounds<T>>::into_bounds fn into_bounds(self) -> (Bound<T>, Bound<T>) {
(
Included(self.start),
if self.exhausted {
// When the iterator is exhausted, we usually have start == end,
// but we want the range to appear empty, containing nothing.
Excluded(self.end)
} else {
Included(self.end)
},
)
}
<core::ops::range::RangeInclusive<T> as core::ops::range::RangeBounds<T>>::end_bound fn end_bound(&self) -> Bound<&T> {
if self.exhausted {
// When the iterator is exhausted, we usually have start == end,
// but we want the range to appear empty, containing nothing.
Excluded(&self.end)
} else {
Included(&self.end)
}
}
<core::ops::range::RangeInclusive<T> as core::ops::range::RangeBounds<T>>::start_bound fn start_bound(&self) -> Bound<&T> {
Included(&self.start)
}
<core::ops::range::RangeInclusive<usize> as core::slice::index::SliceIndex<[T]>>::get fn get(self, slice: &[T]) -> Option<&[T]> {
if *self.end() == usize::MAX { None } else { self.into_slice_range().get(slice) }
}
<core::ops::range::RangeInclusive<usize> as core::slice::index::SliceIndex<[T]>>::get_mut fn get_mut(self, slice: &mut [T]) -> Option<&mut [T]> {
if *self.end() == usize::MAX { None } else { self.into_slice_range().get_mut(slice) }
}
<core::ops::range::RangeInclusive<usize> as core::slice::index::SliceIndex<[T]>>::get_unchecked unsafe fn get_unchecked(self, slice: *const [T]) -> *const [T] {
// SAFETY: the caller has to uphold the safety contract for `get_unchecked`.
unsafe { self.into_slice_range().get_unchecked(slice) }
}
<core::ops::range::RangeInclusive<usize> as core::slice::index::SliceIndex<[T]>>::get_unchecked_mut unsafe fn get_unchecked_mut(self, slice: *mut [T]) -> *mut [T] {
// SAFETY: the caller has to uphold the safety contract for `get_unchecked_mut`.
unsafe { self.into_slice_range().get_unchecked_mut(slice) }
}
<core::ops::range::RangeInclusive<usize> as core::slice::index::SliceIndex<[T]>>::index fn index(self, slice: &[T]) -> &[T] {
let Self { mut start, mut end, exhausted } = self;
let len = slice.len();
if end < len {
end = end + 1;
start = if exhausted { end } else { start };
if let Some(new_len) = usize::checked_sub(end, start) {
// SAFETY: `self` is checked to be valid and in bounds above.
unsafe { return &*get_offset_len_noubcheck(slice, start, new_len) }
}
}
slice_index_fail(start, end, slice.len())
}
<core::ops::range::RangeInclusive<usize> as core::slice::index::SliceIndex<[T]>>::index_mut fn index_mut(self, slice: &mut [T]) -> &mut [T] {
let Self { mut start, mut end, exhausted } = self;
let len = slice.len();
if end < len {
end = end + 1;
start = if exhausted { end } else { start };
if let Some(new_len) = usize::checked_sub(end, start) {
// SAFETY: `self` is checked to be valid and in bounds above.
unsafe { return &mut *get_offset_len_mut_noubcheck(slice, start, new_len) }
}
}
slice_index_fail(start, end, slice.len())
}
<core::ops::range::RangeTo<&T> as core::ops::range::RangeBounds<T>>::end_bound fn end_bound(&self) -> Bound<&T> {
Excluded(self.end)
}
<core::ops::range::RangeTo<&T> as core::ops::range::RangeBounds<T>>::start_bound fn start_bound(&self) -> Bound<&T> {
Unbounded
}
<core::ops::range::RangeTo<T> as core::ops::range::IntoBounds<T>>::into_bounds fn into_bounds(self) -> (Bound<T>, Bound<T>) {
(Unbounded, Excluded(self.end))
}
<core::ops::range::RangeTo<T> as core::ops::range::OneSidedRange<T>>::bound fn bound(self) -> (OneSidedRangeBound, T) {
(OneSidedRangeBound::End, self.end)
}
<core::ops::range::RangeTo<T> as core::ops::range::RangeBounds<T>>::end_bound fn end_bound(&self) -> Bound<&T> {
Excluded(&self.end)
}
<core::ops::range::RangeTo<T> as core::ops::range::RangeBounds<T>>::start_bound fn start_bound(&self) -> Bound<&T> {
Unbounded
}
<core::ops::range::RangeTo<usize> as core::slice::index::SliceIndex<[T]>>::get fn get(self, slice: &[T]) -> Option<&[T]> {
(0..self.end).get(slice)
}
<core::ops::range::RangeTo<usize> as core::slice::index::SliceIndex<[T]>>::get_mut fn get_mut(self, slice: &mut [T]) -> Option<&mut [T]> {
(0..self.end).get_mut(slice)
}
<core::ops::range::RangeTo<usize> as core::slice::index::SliceIndex<[T]>>::get_unchecked unsafe fn get_unchecked(self, slice: *const [T]) -> *const [T] {
// SAFETY: the caller has to uphold the safety contract for `get_unchecked`.
unsafe { (0..self.end).get_unchecked(slice) }
}
<core::ops::range::RangeTo<usize> as core::slice::index::SliceIndex<[T]>>::get_unchecked_mut unsafe fn get_unchecked_mut(self, slice: *mut [T]) -> *mut [T] {
// SAFETY: the caller has to uphold the safety contract for `get_unchecked_mut`.
unsafe { (0..self.end).get_unchecked_mut(slice) }
}
<core::ops::range::RangeTo<usize> as core::slice::index::SliceIndex<[T]>>::index fn index(self, slice: &[T]) -> &[T] {
(0..self.end).index(slice)
}
<core::ops::range::RangeTo<usize> as core::slice::index::SliceIndex<[T]>>::index_mut fn index_mut(self, slice: &mut [T]) -> &mut [T] {
(0..self.end).index_mut(slice)
}
<core::ops::range::RangeToInclusive<&T> as core::ops::range::RangeBounds<T>>::end_bound fn end_bound(&self) -> Bound<&T> {
Included(self.end)
}
<core::ops::range::RangeToInclusive<&T> as core::ops::range::RangeBounds<T>>::start_bound fn start_bound(&self) -> Bound<&T> {
Unbounded
}
<core::ops::range::RangeToInclusive<T> as core::ops::range::IntoBounds<T>>::into_bounds fn into_bounds(self) -> (Bound<T>, Bound<T>) {
(Unbounded, Included(self.end))
}
<core::ops::range::RangeToInclusive<T> as core::ops::range::OneSidedRange<T>>::bound fn bound(self) -> (OneSidedRangeBound, T) {
(OneSidedRangeBound::EndInclusive, self.end)
}
<core::ops::range::RangeToInclusive<T> as core::ops::range::RangeBounds<T>>::end_bound fn end_bound(&self) -> Bound<&T> {
Included(&self.end)
}
<core::ops::range::RangeToInclusive<T> as core::ops::range::RangeBounds<T>>::start_bound fn start_bound(&self) -> Bound<&T> {
Unbounded
}
<core::ops::range::RangeToInclusive<usize> as core::slice::index::SliceIndex<[T]>>::get fn get(self, slice: &[T]) -> Option<&[T]> {
(0..=self.end).get(slice)
}
<core::ops::range::RangeToInclusive<usize> as core::slice::index::SliceIndex<[T]>>::get_mut fn get_mut(self, slice: &mut [T]) -> Option<&mut [T]> {
(0..=self.end).get_mut(slice)
}
<core::ops::range::RangeToInclusive<usize> as core::slice::index::SliceIndex<[T]>>::get_unchecked unsafe fn get_unchecked(self, slice: *const [T]) -> *const [T] {
// SAFETY: the caller has to uphold the safety contract for `get_unchecked`.
unsafe { (0..=self.end).get_unchecked(slice) }
}
<core::ops::range::RangeToInclusive<usize> as core::slice::index::SliceIndex<[T]>>::get_unchecked_mut unsafe fn get_unchecked_mut(self, slice: *mut [T]) -> *mut [T] {
// SAFETY: the caller has to uphold the safety contract for `get_unchecked_mut`.
unsafe { (0..=self.end).get_unchecked_mut(slice) }
}
<core::ops::range::RangeToInclusive<usize> as core::slice::index::SliceIndex<[T]>>::index fn index(self, slice: &[T]) -> &[T] {
(0..=self.end).index(slice)
}
<core::ops::range::RangeToInclusive<usize> as core::slice::index::SliceIndex<[T]>>::index_mut fn index_mut(self, slice: &mut [T]) -> &mut [T] {
(0..=self.end).index_mut(slice)
}
<core::ops::try_trait::NeverShortCircuit<T> as core::ops::try_trait::Try>::branch fn branch(self) -> ControlFlow<NeverShortCircuitResidual, T> {
ControlFlow::Continue(self.0)
}
<core::ops::try_trait::NeverShortCircuit<T> as core::ops::try_trait::Try>::from_output fn from_output(x: T) -> Self {
NeverShortCircuit(x)
}
<core::option::IntoIter<A> as core::iter::traits::iterator::Iterator>::next fn next(&mut self) -> Option<A> {
self.inner.next()
}
<core::option::IntoIter<A> as core::iter::traits::iterator::Iterator>::size_hint fn size_hint(&self) -> (usize, Option<usize>) {
self.inner.size_hint()
}
<core::option::Item<A> as core::iter::traits::exact_size::ExactSizeIterator>::len fn len(&self) -> usize {
self.opt.len()
}
<core::option::Item<A> as core::iter::traits::iterator::Iterator>::next fn next(&mut self) -> Option<A> {
self.opt.take()
}
<core::option::Item<A> as core::iter::traits::iterator::Iterator>::size_hint fn size_hint(&self) -> (usize, Option<usize>) {
let len = self.len();
(len, Some(len))
}
<core::option::Option<&'a T> as core::convert::From<&'a core::option::Option<T>>>::from fn from(o: &'a Option<T>) -> Option<&'a T> {
o.as_ref()
}
<core::option::Option<&'a mut T> as core::convert::From<&'a mut core::option::Option<T>>>::from fn from(o: &'a mut Option<T>) -> Option<&'a mut T> {
o.as_mut()
}
<core::option::Option<T> as core::clone::Clone>::clone fn clone(&self) -> Self {
match self {
Some(x) => Some(x.clone()),
None => None,
}
}
<core::option::Option<T> as core::clone::Clone>::clone_from fn clone_from(&mut self, source: &Self) {
match (self, source) {
(Some(to), Some(from)) => to.clone_from(from),
(to, from) => *to = from.clone(),
}
}
<core::option::Option<T> as core::cmp::PartialEq>::eq fn eq(&self, other: &Self) -> bool {
// Spelling out the cases explicitly optimizes better than
// `_ => false`
match (self, other) {
(Some(l), Some(r)) => *l == *r,
(Some(_), None) => false,
(None, Some(_)) => false,
(None, None) => true,
}
}
<core::option::Option<T> as core::convert::From<T>>::from fn from(val: T) -> Option<T> {
Some(val)
}
<core::option::Option<T> as core::default::Default>::default fn default() -> Option<T> {
None
}
<core::option::Option<T> as core::iter::traits::collect::IntoIterator>::into_iter fn into_iter(self) -> IntoIter<T> {
IntoIter { inner: Item { opt: self } }
}
<core::option::Option<T> as core::ops::try_trait::FromResidual<core::option::Option<core::convert::Infallible>>>::from_residual fn from_residual(residual: Option<convert::Infallible>) -> Self {
match residual {
None => None,
}
}
<core::option::Option<T> as core::ops::try_trait::Try>::branch fn branch(self) -> ControlFlow<Self::Residual, Self::Output> {
match self {
Some(v) => ControlFlow::Continue(v),
None => ControlFlow::Break(None),
}
}
<core::option::Option<T> as core::ops::try_trait::Try>::from_output fn from_output(output: Self::Output) -> Self {
Some(output)
}
<core::ptr::non_null::NonNull<T> as core::clone::Clone>::clone fn clone(&self) -> Self {
*self
}
<core::ptr::non_null::NonNull<T> as core::cmp::PartialEq>::eq fn eq(&self, other: &Self) -> bool {
self.as_ptr() == other.as_ptr()
}
<core::result::Result<T, E> as core::ops::try_trait::Try>::branch fn branch(self) -> ControlFlow<Self::Residual, Self::Output> {
match self {
Ok(v) => ControlFlow::Continue(v),
Err(e) => ControlFlow::Break(Err(e)),
}
}
<core::result::Result<T, E> as core::ops::try_trait::Try>::from_output fn from_output(output: Self::Output) -> Self {
Ok(output)
}
<core::result::Result<T, F> as core::ops::try_trait::FromResidual<core::result::Result<core::convert::Infallible, E>>>::from_residual fn from_residual(residual: Result<convert::Infallible, E>) -> Self {
match residual {
Err(e) => Err(From::from(e)),
}
}
<core::slice::iter::Chunks<'a, T> as core::iter::traits::iterator::Iterator>::next fn next(&mut self) -> Option<&'a [T]> {
if self.v.is_empty() {
None
} else {
let chunksz = cmp::min(self.v.len(), self.chunk_size);
let (fst, snd) = self.v.split_at(chunksz);
self.v = snd;
Some(fst)
}
}
<core::slice::iter::Chunks<'a, T> as core::iter::traits::iterator::Iterator>::size_hint fn size_hint(&self) -> (usize, Option<usize>) {
if self.v.is_empty() {
(0, Some(0))
} else {
let n = self.v.len() / self.chunk_size;
let rem = self.v.len() % self.chunk_size;
let n = if rem > 0 { n + 1 } else { n };
(n, Some(n))
}
}
<core::slice::iter::ChunksExact<'a, T> as core::iter::traits::iterator::Iterator>::next fn next(&mut self) -> Option<&'a [T]> {
if self.v.len() < self.chunk_size {
None
} else {
let (fst, snd) = self.v.split_at(self.chunk_size);
self.v = snd;
Some(fst)
}
}
<core::slice::iter::ChunksExact<'a, T> as core::iter::traits::iterator::Iterator>::size_hint fn size_hint(&self) -> (usize, Option<usize>) {
let n = self.v.len() / self.chunk_size;
(n, Some(n))
}
<core::slice::iter::ChunksExactMut<'a, T> as core::iter::traits::iterator::Iterator>::next fn next(&mut self) -> Option<&'a mut [T]> {
if self.v.len() < self.chunk_size {
None
} else {
// SAFETY: self.chunk_size is inbounds because we compared above against self.v.len()
let (head, tail) = unsafe { self.v.split_at_mut(self.chunk_size) };
self.v = tail;
// SAFETY: Nothing else points to or will point to the contents of this slice.
Some(unsafe { &mut *head })
}
}
<core::slice::iter::ChunksExactMut<'a, T> as core::iter::traits::iterator::Iterator>::size_hint fn size_hint(&self) -> (usize, Option<usize>) {
let n = self.v.len() / self.chunk_size;
(n, Some(n))
}
<core::slice::iter::ChunksMut<'a, T> as core::iter::traits::iterator::Iterator>::next fn next(&mut self) -> Option<&'a mut [T]> {
if self.v.is_empty() {
None
} else {
let sz = cmp::min(self.v.len(), self.chunk_size);
// SAFETY: The self.v contract ensures that any split_at_mut is valid.
let (head, tail) = unsafe { self.v.split_at_mut(sz) };
self.v = tail;
// SAFETY: Nothing else points to or will point to the contents of this slice.
Some(unsafe { &mut *head })
}
}
<core::slice::iter::ChunksMut<'a, T> as core::iter::traits::iterator::Iterator>::size_hint fn size_hint(&self) -> (usize, Option<usize>) {
if self.v.is_empty() {
(0, Some(0))
} else {
let n = self.v.len() / self.chunk_size;
let rem = self.v.len() % self.chunk_size;
let n = if rem > 0 { n + 1 } else { n };
(n, Some(n))
}
}
<core::slice::iter::Iter<'_, T> as core::iter::traits::exact_size::ExactSizeIterator>::len fn len(&self) -> usize {
len!(self)
}
<core::slice::iter::Iter<'a, T> as core::iter::traits::iterator::Iterator>::advance_by fn advance_by(&mut self, n: usize) -> Result<(), NonZero<usize>> {
let advance = cmp::min(len!(self), n);
// SAFETY: By construction, `advance` does not exceed `self.len()`.
unsafe { self.post_inc_start(advance) };
NonZero::new(n - advance).map_or(Ok(()), Err)
}
<core::slice::iter::Iter<'a, T> as core::iter::traits::iterator::Iterator>::all fn all<F>(&mut self, mut f: F) -> bool
where
Self: Sized,
F: FnMut(Self::Item) -> bool,
{
while let Some(x) = self.next() {
if !f(x) {
return false;
}
}
true
}
<core::slice::iter::Iter<'a, T> as core::iter::traits::iterator::Iterator>::any fn any<F>(&mut self, mut f: F) -> bool
where
Self: Sized,
F: FnMut(Self::Item) -> bool,
{
while let Some(x) = self.next() {
if f(x) {
return true;
}
}
false
}
<core::slice::iter::Iter<'a, T> as core::iter::traits::iterator::Iterator>::find fn find<P>(&mut self, mut predicate: P) -> Option<Self::Item>
where
Self: Sized,
P: FnMut(&Self::Item) -> bool,
{
while let Some(x) = self.next() {
if predicate(&x) {
return Some(x);
}
}
None
}
<core::slice::iter::Iter<'a, T> as core::iter::traits::iterator::Iterator>::fold fn fold<B, F>(self, init: B, mut f: F) -> B
where
F: FnMut(B, Self::Item) -> B,
{
// this implementation consists of the following optimizations compared to the
// default implementation:
// - do-while loop, as is llvm's preferred loop shape,
// see https://releases.llvm.org/16.0.0/docs/LoopTerminology.html#more-canonical-loops
// - bumps an index instead of a pointer since the latter case inhibits
// some optimizations, see #111603
// - avoids Option wrapping/matching
if is_empty!(self) {
return init;
}
let mut acc = init;
let mut i = 0;
let len = len!(self);
loop {
// SAFETY: the loop iterates `i in 0..len`, which always is in bounds of
// the slice allocation
acc = f(acc, unsafe { & $( $mut_ )? *self.ptr.add(i).as_ptr() });
// SAFETY: `i` can't overflow since it'll only reach usize::MAX if the
// slice had that length, in which case we'll break out of the loop
// after the increment
i = unsafe { i.unchecked_add(1) };
if i == len {
break;
}
}
acc
}
<core::slice::iter::Iter<'a, T> as core::iter::traits::iterator::Iterator>::for_each fn for_each<F>(mut self, mut f: F)
where
Self: Sized,
F: FnMut(Self::Item),
{
while let Some(x) = self.next() {
f(x);
}
}
<core::slice::iter::Iter<'a, T> as core::iter::traits::iterator::Iterator>::next fn next(&mut self) -> Option<$elem> {
// intentionally not using the helpers because this is
// one of the most mono'd things in the library.
let ptr = self.ptr;
let end_or_len = self.end_or_len;
// SAFETY: See inner comments. (For some reason having multiple
// block breaks inlining this -- if you can fix that please do!)
unsafe {
if T::IS_ZST {
let len = end_or_len.addr();
if len == 0 {
return None;
}
// SAFETY: just checked that it's not zero, so subtracting one
// cannot wrap. (Ideally this would be `checked_sub`, which
// does the same thing internally, but as of 2025-02 that
// doesn't optimize quite as small in MIR.)
self.end_or_len = without_provenance_mut(len.unchecked_sub(1));
} else {
// SAFETY: by type invariant, the `end_or_len` field is always
// non-null for a non-ZST pointee. (This transmute ensures we
// get `!nonnull` metadata on the load of the field.)
if ptr == crate::intrinsics::transmute::<$ptr, NonNull<T>>(end_or_len) {
return None;
}
// SAFETY: since it's not empty, per the check above, moving
// forward one keeps us inside the slice, and this is valid.
self.ptr = ptr.add(1);
}
// SAFETY: Now that we know it wasn't empty and we've moved past
// the first one (to avoid giving a duplicate `&mut` next time),
// we can give out a reference to it.
Some({ptr}.$into_ref())
}
}
<core::slice::iter::Iter<'a, T> as core::iter::traits::iterator::Iterator>::nth fn nth(&mut self, n: usize) -> Option<$elem> {
if n >= len!(self) {
// This iterator is now empty.
if_zst!(mut self,
len => *len = 0,
end => self.ptr = *end,
);
return None;
}
// SAFETY: We are in bounds. `post_inc_start` does the right thing even for ZSTs.
unsafe {
self.post_inc_start(n);
Some(self.next_unchecked())
}
}
<core::slice::iter::Iter<'a, T> as core::iter::traits::iterator::Iterator>::position fn position<P>(&mut self, mut predicate: P) -> Option<usize> where
Self: Sized,
P: FnMut(Self::Item) -> bool,
{
let n = len!(self);
let mut i = 0;
while let Some(x) = self.next() {
if predicate(x) {
// SAFETY: we are guaranteed to be in bounds by the loop invariant:
// when `i >= n`, `self.next()` returns `None` and the loop breaks.
unsafe { assert_unchecked(i < n) };
return Some(i);
}
i += 1;
}
None
}
<core::slice::iter::Iter<'a, T> as core::iter::traits::unchecked_iterator::UncheckedIterator>::next_unchecked unsafe fn next_unchecked(&mut self) -> $elem {
// SAFETY: The caller promised there's at least one more item.
unsafe {
self.post_inc_start(1).$into_ref()
}
}
<core::slice::iter::IterMut<'_, T> as core::iter::traits::exact_size::ExactSizeIterator>::len fn len(&self) -> usize {
len!(self)
}
<core::slice::iter::IterMut<'a, T> as core::iter::traits::iterator::Iterator>::advance_by fn advance_by(&mut self, n: usize) -> Result<(), NonZero<usize>> {
let advance = cmp::min(len!(self), n);
// SAFETY: By construction, `advance` does not exceed `self.len()`.
unsafe { self.post_inc_start(advance) };
NonZero::new(n - advance).map_or(Ok(()), Err)
}
<core::slice::iter::IterMut<'a, T> as core::iter::traits::iterator::Iterator>::all fn all<F>(&mut self, mut f: F) -> bool
where
Self: Sized,
F: FnMut(Self::Item) -> bool,
{
while let Some(x) = self.next() {
if !f(x) {
return false;
}
}
true
}
<core::slice::iter::IterMut<'a, T> as core::iter::traits::iterator::Iterator>::any fn any<F>(&mut self, mut f: F) -> bool
where
Self: Sized,
F: FnMut(Self::Item) -> bool,
{
while let Some(x) = self.next() {
if f(x) {
return true;
}
}
false
}
<core::slice::iter::IterMut<'a, T> as core::iter::traits::iterator::Iterator>::find fn find<P>(&mut self, mut predicate: P) -> Option<Self::Item>
where
Self: Sized,
P: FnMut(&Self::Item) -> bool,
{
while let Some(x) = self.next() {
if predicate(&x) {
return Some(x);
}
}
None
}
<core::slice::iter::IterMut<'a, T> as core::iter::traits::iterator::Iterator>::fold fn fold<B, F>(self, init: B, mut f: F) -> B
where
F: FnMut(B, Self::Item) -> B,
{
// this implementation consists of the following optimizations compared to the
// default implementation:
// - do-while loop, as is llvm's preferred loop shape,
// see https://releases.llvm.org/16.0.0/docs/LoopTerminology.html#more-canonical-loops
// - bumps an index instead of a pointer since the latter case inhibits
// some optimizations, see #111603
// - avoids Option wrapping/matching
if is_empty!(self) {
return init;
}
let mut acc = init;
let mut i = 0;
let len = len!(self);
loop {
// SAFETY: the loop iterates `i in 0..len`, which always is in bounds of
// the slice allocation
acc = f(acc, unsafe { & $( $mut_ )? *self.ptr.add(i).as_ptr() });
// SAFETY: `i` can't overflow since it'll only reach usize::MAX if the
// slice had that length, in which case we'll break out of the loop
// after the increment
i = unsafe { i.unchecked_add(1) };
if i == len {
break;
}
}
acc
}
<core::slice::iter::IterMut<'a, T> as core::iter::traits::iterator::Iterator>::for_each fn for_each<F>(mut self, mut f: F)
where
Self: Sized,
F: FnMut(Self::Item),
{
while let Some(x) = self.next() {
f(x);
}
}
<core::slice::iter::IterMut<'a, T> as core::iter::traits::iterator::Iterator>::next fn next(&mut self) -> Option<$elem> {
// intentionally not using the helpers because this is
// one of the most mono'd things in the library.
let ptr = self.ptr;
let end_or_len = self.end_or_len;
// SAFETY: See inner comments. (For some reason having multiple
// block breaks inlining this -- if you can fix that please do!)
unsafe {
if T::IS_ZST {
let len = end_or_len.addr();
if len == 0 {
return None;
}
// SAFETY: just checked that it's not zero, so subtracting one
// cannot wrap. (Ideally this would be `checked_sub`, which
// does the same thing internally, but as of 2025-02 that
// doesn't optimize quite as small in MIR.)
self.end_or_len = without_provenance_mut(len.unchecked_sub(1));
} else {
// SAFETY: by type invariant, the `end_or_len` field is always
// non-null for a non-ZST pointee. (This transmute ensures we
// get `!nonnull` metadata on the load of the field.)
if ptr == crate::intrinsics::transmute::<$ptr, NonNull<T>>(end_or_len) {
return None;
}
// SAFETY: since it's not empty, per the check above, moving
// forward one keeps us inside the slice, and this is valid.
self.ptr = ptr.add(1);
}
// SAFETY: Now that we know it wasn't empty and we've moved past
// the first one (to avoid giving a duplicate `&mut` next time),
// we can give out a reference to it.
Some({ptr}.$into_ref())
}
}
<core::slice::iter::IterMut<'a, T> as core::iter::traits::iterator::Iterator>::nth fn nth(&mut self, n: usize) -> Option<$elem> {
if n >= len!(self) {
// This iterator is now empty.
if_zst!(mut self,
len => *len = 0,
end => self.ptr = *end,
);
return None;
}
// SAFETY: We are in bounds. `post_inc_start` does the right thing even for ZSTs.
unsafe {
self.post_inc_start(n);
Some(self.next_unchecked())
}
}
<core::slice::iter::IterMut<'a, T> as core::iter::traits::iterator::Iterator>::position fn position<P>(&mut self, mut predicate: P) -> Option<usize> where
Self: Sized,
P: FnMut(Self::Item) -> bool,
{
let n = len!(self);
let mut i = 0;
while let Some(x) = self.next() {
if predicate(x) {
// SAFETY: we are guaranteed to be in bounds by the loop invariant:
// when `i >= n`, `self.next()` returns `None` and the loop breaks.
unsafe { assert_unchecked(i < n) };
return Some(i);
}
i += 1;
}
None
}
<core::slice::iter::IterMut<'a, T> as core::iter::traits::unchecked_iterator::UncheckedIterator>::next_unchecked unsafe fn next_unchecked(&mut self) -> $elem {
// SAFETY: The caller promised there's at least one more item.
unsafe {
self.post_inc_start(1).$into_ref()
}
}
<core::slice::iter::Windows<'a, T> as core::iter::traits::iterator::Iterator>::next fn next(&mut self) -> Option<&'a [T]> {
if self.size.get() > self.v.len() {
None
} else {
let ret = Some(&self.v[..self.size.get()]);
self.v = &self.v[1..];
ret
}
}
<core::slice::iter::Windows<'a, T> as core::iter::traits::iterator::Iterator>::size_hint fn size_hint(&self) -> (usize, Option<usize>) {
if self.size.get() > self.v.len() {
(0, Some(0))
} else {
let size = self.v.len() - self.size.get() + 1;
(size, Some(size))
}
}
<core::str::iter::Chars<'a> as core::iter::traits::iterator::Iterator>::next fn next(&mut self) -> Option<char> {
// SAFETY: `str` invariant says `self.iter` is a valid UTF-8 string and
// the resulting `ch` is a valid Unicode Scalar Value.
unsafe { next_code_point(&mut self.iter).map(|ch| char::from_u32_unchecked(ch)) }
}
<core::str::iter::Chars<'a> as core::iter::traits::iterator::Iterator>::size_hint fn size_hint(&self) -> (usize, Option<usize>) {
let len = self.iter.len();
// `(len + 3)` can't overflow, because we know that the `slice::Iter`
// belongs to a slice in memory which has a maximum length of
// `isize::MAX` (that's well below `usize::MAX`).
(len.div_ceil(4), Some(len))
}
<core::sync::atomic::AtomicU32 as core::convert::From<u32>>::from fn from(v: $int_type) -> Self { Self::new(v) }
<core::sync::atomic::AtomicU32 as core::default::Default>::default fn default() -> Self {
Self::new(Default::default())
}
<core::sync::atomic::AtomicU64 as core::convert::From<u64>>::from fn from(v: $int_type) -> Self { Self::new(v) }
<core::sync::atomic::AtomicU64 as core::default::Default>::default fn default() -> Self {
Self::new(Default::default())
}
<core::sync::atomic::AtomicU8 as core::convert::From<u8>>::from fn from(v: $int_type) -> Self { Self::new(v) }
<core::sync::atomic::AtomicU8 as core::default::Default>::default fn default() -> Self {
Self::new(Default::default())
}
<core::sync::atomic::AtomicUsize as core::convert::From<usize>>::from fn from(v: $int_type) -> Self { Self::new(v) }
<core::sync::atomic::AtomicUsize as core::default::Default>::default fn default() -> Self {
Self::new(Default::default())
}
<core::time::Duration as core::ops::arith::Add>::add fn add(self, rhs: Duration) -> Duration {
self.checked_add(rhs).expect("overflow when adding durations")
}
<core::time::Duration as core::ops::arith::Div<u32>>::div fn div(self, rhs: u32) -> Duration {
self.checked_div(rhs).expect("divide by zero error when dividing duration by scalar")
}
<f128 as core::default::Default>::default fn default() -> $t {
$v
}
<f128 as core::iter::traits::accum::Sum<&'a f128>>::sum fn sum<I: Iterator<Item=&'a Self>>(iter: I) -> Self {
iter.fold(
-0.0,
#[rustc_inherit_overflow_checks]
|a, b| a + b,
)
}
<f128 as core::iter::traits::accum::Sum>::sum fn sum<I: Iterator<Item=Self>>(iter: I) -> Self {
iter.fold(
-0.0,
#[rustc_inherit_overflow_checks]
|a, b| a + b,
)
}
<f128 as core::ops::arith::Add<&f128>>::add fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<f128 as core::ops::arith::Add>::add fn add(self, other: $t) -> $t { self + other }
<f128 as core::ops::arith::AddAssign<&f128>>::add_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<f128 as core::ops::arith::AddAssign>::add_assign fn add_assign(&mut self, other: $t) { *self += other }
<f128 as core::ops::arith::Div<&f128>>::div fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<f128 as core::ops::arith::Div>::div fn div(self, other: $t) -> $t { self / other }
<f128 as core::ops::arith::DivAssign<&f128>>::div_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<f128 as core::ops::arith::DivAssign>::div_assign fn div_assign(&mut self, other: $t) { *self /= other }
<f128 as core::ops::arith::Mul<&f128>>::mul fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<f128 as core::ops::arith::Mul>::mul fn mul(self, other: $t) -> $t { self * other }
<f128 as core::ops::arith::MulAssign<&f128>>::mul_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<f128 as core::ops::arith::MulAssign>::mul_assign fn mul_assign(&mut self, other: $t) { *self *= other }
<f128 as core::ops::arith::Neg>::neg fn neg(self) -> $t { -self }
<f128 as core::ops::arith::Rem<&f128>>::rem fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<f128 as core::ops::arith::Rem>::rem fn rem(self, other: $t) -> $t { self % other }
<f128 as core::ops::arith::RemAssign<&f128>>::rem_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<f128 as core::ops::arith::RemAssign>::rem_assign fn rem_assign(&mut self, other: $t) { *self %= other }
<f128 as core::ops::arith::Sub<&f128>>::sub fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<f128 as core::ops::arith::Sub>::sub fn sub(self, other: $t) -> $t { self - other }
<f128 as core::ops::arith::SubAssign<&f128>>::sub_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<f128 as core::ops::arith::SubAssign>::sub_assign fn sub_assign(&mut self, other: $t) { *self -= other }
<f16 as core::default::Default>::default fn default() -> $t {
$v
}
<f16 as core::iter::traits::accum::Sum<&'a f16>>::sum fn sum<I: Iterator<Item=&'a Self>>(iter: I) -> Self {
iter.fold(
-0.0,
#[rustc_inherit_overflow_checks]
|a, b| a + b,
)
}
<f16 as core::iter::traits::accum::Sum>::sum fn sum<I: Iterator<Item=Self>>(iter: I) -> Self {
iter.fold(
-0.0,
#[rustc_inherit_overflow_checks]
|a, b| a + b,
)
}
<f16 as core::ops::arith::Add<&f16>>::add fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<f16 as core::ops::arith::Add>::add fn add(self, other: $t) -> $t { self + other }
<f16 as core::ops::arith::AddAssign<&f16>>::add_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<f16 as core::ops::arith::AddAssign>::add_assign fn add_assign(&mut self, other: $t) { *self += other }
<f16 as core::ops::arith::Div<&f16>>::div fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<f16 as core::ops::arith::Div>::div fn div(self, other: $t) -> $t { self / other }
<f16 as core::ops::arith::DivAssign<&f16>>::div_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<f16 as core::ops::arith::DivAssign>::div_assign fn div_assign(&mut self, other: $t) { *self /= other }
<f16 as core::ops::arith::Mul<&f16>>::mul fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<f16 as core::ops::arith::Mul>::mul fn mul(self, other: $t) -> $t { self * other }
<f16 as core::ops::arith::MulAssign<&f16>>::mul_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<f16 as core::ops::arith::MulAssign>::mul_assign fn mul_assign(&mut self, other: $t) { *self *= other }
<f16 as core::ops::arith::Neg>::neg fn neg(self) -> $t { -self }
<f16 as core::ops::arith::Rem<&f16>>::rem fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<f16 as core::ops::arith::Rem>::rem fn rem(self, other: $t) -> $t { self % other }
<f16 as core::ops::arith::RemAssign<&f16>>::rem_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<f16 as core::ops::arith::RemAssign>::rem_assign fn rem_assign(&mut self, other: $t) { *self %= other }
<f16 as core::ops::arith::Sub<&f16>>::sub fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<f16 as core::ops::arith::Sub>::sub fn sub(self, other: $t) -> $t { self - other }
<f16 as core::ops::arith::SubAssign<&f16>>::sub_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<f16 as core::ops::arith::SubAssign>::sub_assign fn sub_assign(&mut self, other: $t) { *self -= other }
<f32 as core::default::Default>::default fn default() -> $t {
$v
}
<f32 as core::iter::traits::accum::Sum<&'a f32>>::sum fn sum<I: Iterator<Item=&'a Self>>(iter: I) -> Self {
iter.fold(
-0.0,
#[rustc_inherit_overflow_checks]
|a, b| a + b,
)
}
<f32 as core::iter::traits::accum::Sum>::sum fn sum<I: Iterator<Item=Self>>(iter: I) -> Self {
iter.fold(
-0.0,
#[rustc_inherit_overflow_checks]
|a, b| a + b,
)
}
<f32 as core::ops::arith::Add<&f32>>::add fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<f32 as core::ops::arith::Add>::add fn add(self, other: $t) -> $t { self + other }
<f32 as core::ops::arith::AddAssign<&f32>>::add_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<f32 as core::ops::arith::AddAssign>::add_assign fn add_assign(&mut self, other: $t) { *self += other }
<f32 as core::ops::arith::Div<&f32>>::div fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<f32 as core::ops::arith::Div>::div fn div(self, other: $t) -> $t { self / other }
<f32 as core::ops::arith::DivAssign<&f32>>::div_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<f32 as core::ops::arith::DivAssign>::div_assign fn div_assign(&mut self, other: $t) { *self /= other }
<f32 as core::ops::arith::Mul<&f32>>::mul fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<f32 as core::ops::arith::Mul>::mul fn mul(self, other: $t) -> $t { self * other }
<f32 as core::ops::arith::MulAssign<&f32>>::mul_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<f32 as core::ops::arith::MulAssign>::mul_assign fn mul_assign(&mut self, other: $t) { *self *= other }
<f32 as core::ops::arith::Neg>::neg fn neg(self) -> $t { -self }
<f32 as core::ops::arith::Rem<&f32>>::rem fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<f32 as core::ops::arith::Rem>::rem fn rem(self, other: $t) -> $t { self % other }
<f32 as core::ops::arith::RemAssign<&f32>>::rem_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<f32 as core::ops::arith::RemAssign>::rem_assign fn rem_assign(&mut self, other: $t) { *self %= other }
<f32 as core::ops::arith::Sub<&f32>>::sub fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<f32 as core::ops::arith::Sub>::sub fn sub(self, other: $t) -> $t { self - other }
<f32 as core::ops::arith::SubAssign<&f32>>::sub_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<f32 as core::ops::arith::SubAssign>::sub_assign fn sub_assign(&mut self, other: $t) { *self -= other }
<f64 as core::default::Default>::default fn default() -> $t {
$v
}
<f64 as core::iter::traits::accum::Sum<&'a f64>>::sum fn sum<I: Iterator<Item=&'a Self>>(iter: I) -> Self {
iter.fold(
-0.0,
#[rustc_inherit_overflow_checks]
|a, b| a + b,
)
}
<f64 as core::iter::traits::accum::Sum>::sum fn sum<I: Iterator<Item=Self>>(iter: I) -> Self {
iter.fold(
-0.0,
#[rustc_inherit_overflow_checks]
|a, b| a + b,
)
}
<f64 as core::ops::arith::Add<&f64>>::add fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<f64 as core::ops::arith::Add>::add fn add(self, other: $t) -> $t { self + other }
<f64 as core::ops::arith::AddAssign<&f64>>::add_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<f64 as core::ops::arith::AddAssign>::add_assign fn add_assign(&mut self, other: $t) { *self += other }
<f64 as core::ops::arith::Div<&f64>>::div fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<f64 as core::ops::arith::Div>::div fn div(self, other: $t) -> $t { self / other }
<f64 as core::ops::arith::DivAssign<&f64>>::div_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<f64 as core::ops::arith::DivAssign>::div_assign fn div_assign(&mut self, other: $t) { *self /= other }
<f64 as core::ops::arith::Mul<&f64>>::mul fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<f64 as core::ops::arith::Mul>::mul fn mul(self, other: $t) -> $t { self * other }
<f64 as core::ops::arith::MulAssign<&f64>>::mul_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<f64 as core::ops::arith::MulAssign>::mul_assign fn mul_assign(&mut self, other: $t) { *self *= other }
<f64 as core::ops::arith::Neg>::neg fn neg(self) -> $t { -self }
<f64 as core::ops::arith::Rem<&f64>>::rem fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<f64 as core::ops::arith::Rem>::rem fn rem(self, other: $t) -> $t { self % other }
<f64 as core::ops::arith::RemAssign<&f64>>::rem_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<f64 as core::ops::arith::RemAssign>::rem_assign fn rem_assign(&mut self, other: $t) { *self %= other }
<f64 as core::ops::arith::Sub<&f64>>::sub fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<f64 as core::ops::arith::Sub>::sub fn sub(self, other: $t) -> $t { self - other }
<f64 as core::ops::arith::SubAssign<&f64>>::sub_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<f64 as core::ops::arith::SubAssign>::sub_assign fn sub_assign(&mut self, other: $t) { *self -= other }
<i128 as core::default::Default>::default fn default() -> $t {
$v
}
<i128 as core::intrinsics::fallback::DisjointBitOr>::disjoint_bitor unsafe fn disjoint_bitor(self, other: Self) -> Self {
// Note that the assume here is required for UB detection in Miri!
// SAFETY: our precondition is that there are no bits in common,
// so this is just telling that to the backend.
unsafe { super::assume((self & other) == zero!($t)) };
self | other
}
<i128 as core::iter::range::Step>::backward_checked fn backward_checked(start: Self, n: usize) -> Option<Self> {
start.checked_sub(n as Self)
}
<i128 as core::iter::range::Step>::forward_checked fn forward_checked(start: Self, n: usize) -> Option<Self> {
start.checked_add(n as Self)
}
<i128 as core::iter::traits::accum::Sum<&'a i128>>::sum fn sum<I: Iterator<Item=&'a Self>>(iter: I) -> Self {
iter.fold(
$zero,
#[rustc_inherit_overflow_checks]
|a, b| a + b,
)
}
<i128 as core::iter::traits::accum::Sum>::sum fn sum<I: Iterator<Item=Self>>(iter: I) -> Self {
iter.fold(
$zero,
#[rustc_inherit_overflow_checks]
|a, b| a + b,
)
}
<i128 as core::ops::arith::Add<&i128>>::add fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i128 as core::ops::arith::Add>::add fn add(self, other: $t) -> $t { self + other }
<i128 as core::ops::arith::AddAssign<&i128>>::add_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i128 as core::ops::arith::AddAssign>::add_assign fn add_assign(&mut self, other: $t) { *self += other }
<i128 as core::ops::arith::Div<&i128>>::div fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i128 as core::ops::arith::Div>::div fn div(self, other: $t) -> $t { self / other }
<i128 as core::ops::arith::DivAssign<&i128>>::div_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i128 as core::ops::arith::DivAssign>::div_assign fn div_assign(&mut self, other: $t) { *self /= other }
<i128 as core::ops::arith::Mul<&i128>>::mul fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i128 as core::ops::arith::Mul>::mul fn mul(self, other: $t) -> $t { self * other }
<i128 as core::ops::arith::MulAssign<&i128>>::mul_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i128 as core::ops::arith::MulAssign>::mul_assign fn mul_assign(&mut self, other: $t) { *self *= other }
<i128 as core::ops::arith::Neg>::neg fn neg(self) -> $t { -self }
<i128 as core::ops::arith::Rem<&i128>>::rem fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i128 as core::ops::arith::Rem>::rem fn rem(self, other: $t) -> $t { self % other }
<i128 as core::ops::arith::RemAssign<&i128>>::rem_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i128 as core::ops::arith::RemAssign>::rem_assign fn rem_assign(&mut self, other: $t) { *self %= other }
<i128 as core::ops::arith::Sub<&i128>>::sub fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i128 as core::ops::arith::Sub>::sub fn sub(self, other: $t) -> $t { self - other }
<i128 as core::ops::arith::SubAssign<&i128>>::sub_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i128 as core::ops::arith::SubAssign>::sub_assign fn sub_assign(&mut self, other: $t) { *self -= other }
<i128 as core::ops::bit::BitAnd<&i128>>::bitand fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i128 as core::ops::bit::BitAnd>::bitand fn bitand(self, rhs: $t) -> $t { self & rhs }
<i128 as core::ops::bit::BitAndAssign<&i128>>::bitand_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i128 as core::ops::bit::BitAndAssign>::bitand_assign fn bitand_assign(&mut self, other: $t) { *self &= other }
<i128 as core::ops::bit::BitOr<&i128>>::bitor fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i128 as core::ops::bit::BitOr>::bitor fn bitor(self, rhs: $t) -> $t { self | rhs }
<i128 as core::ops::bit::BitOrAssign<&i128>>::bitor_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i128 as core::ops::bit::BitOrAssign>::bitor_assign fn bitor_assign(&mut self, other: $t) { *self |= other }
<i128 as core::ops::bit::BitXor<&i128>>::bitxor fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i128 as core::ops::bit::BitXor>::bitxor fn bitxor(self, other: $t) -> $t { self ^ other }
<i128 as core::ops::bit::BitXorAssign<&i128>>::bitxor_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i128 as core::ops::bit::BitXorAssign>::bitxor_assign fn bitxor_assign(&mut self, other: $t) { *self ^= other }
<i128 as core::ops::bit::Not>::not fn not(self) -> $t { !self }
<i128 as core::ops::bit::Shl<&i128>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i128 as core::ops::bit::Shl<&i16>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i128 as core::ops::bit::Shl<&i32>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i128 as core::ops::bit::Shl<&i64>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i128 as core::ops::bit::Shl<&i8>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i128 as core::ops::bit::Shl<&isize>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i128 as core::ops::bit::Shl<&u128>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i128 as core::ops::bit::Shl<&u16>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i128 as core::ops::bit::Shl<&u32>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i128 as core::ops::bit::Shl<&u64>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i128 as core::ops::bit::Shl<&u8>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i128 as core::ops::bit::Shl<&usize>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i128 as core::ops::bit::Shl<i16>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<i128 as core::ops::bit::Shl<i32>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<i128 as core::ops::bit::Shl<i64>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<i128 as core::ops::bit::Shl<i8>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<i128 as core::ops::bit::Shl<isize>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<i128 as core::ops::bit::Shl<u128>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<i128 as core::ops::bit::Shl<u16>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<i128 as core::ops::bit::Shl<u32>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<i128 as core::ops::bit::Shl<u64>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<i128 as core::ops::bit::Shl<u8>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<i128 as core::ops::bit::Shl<usize>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<i128 as core::ops::bit::Shl>::shl fn shl(self, other: $f) -> $t {
self << other
}
<i128 as core::ops::bit::ShlAssign<&i128>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i128 as core::ops::bit::ShlAssign<&i16>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i128 as core::ops::bit::ShlAssign<&i32>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i128 as core::ops::bit::ShlAssign<&i64>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i128 as core::ops::bit::ShlAssign<&i8>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i128 as core::ops::bit::ShlAssign<&isize>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i128 as core::ops::bit::ShlAssign<&u128>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i128 as core::ops::bit::ShlAssign<&u16>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i128 as core::ops::bit::ShlAssign<&u32>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i128 as core::ops::bit::ShlAssign<&u64>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i128 as core::ops::bit::ShlAssign<&u8>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i128 as core::ops::bit::ShlAssign<&usize>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i128 as core::ops::bit::ShlAssign<i16>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<i128 as core::ops::bit::ShlAssign<i32>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<i128 as core::ops::bit::ShlAssign<i64>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<i128 as core::ops::bit::ShlAssign<i8>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<i128 as core::ops::bit::ShlAssign<isize>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<i128 as core::ops::bit::ShlAssign<u128>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<i128 as core::ops::bit::ShlAssign<u16>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<i128 as core::ops::bit::ShlAssign<u32>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<i128 as core::ops::bit::ShlAssign<u64>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<i128 as core::ops::bit::ShlAssign<u8>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<i128 as core::ops::bit::ShlAssign<usize>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<i128 as core::ops::bit::ShlAssign>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<i128 as core::ops::bit::Shr<&i128>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i128 as core::ops::bit::Shr<&i16>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i128 as core::ops::bit::Shr<&i32>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i128 as core::ops::bit::Shr<&i64>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i128 as core::ops::bit::Shr<&i8>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i128 as core::ops::bit::Shr<&isize>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i128 as core::ops::bit::Shr<&u128>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i128 as core::ops::bit::Shr<&u16>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i128 as core::ops::bit::Shr<&u32>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i128 as core::ops::bit::Shr<&u64>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i128 as core::ops::bit::Shr<&u8>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i128 as core::ops::bit::Shr<&usize>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i128 as core::ops::bit::Shr<i16>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<i128 as core::ops::bit::Shr<i32>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<i128 as core::ops::bit::Shr<i64>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<i128 as core::ops::bit::Shr<i8>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<i128 as core::ops::bit::Shr<isize>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<i128 as core::ops::bit::Shr<u128>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<i128 as core::ops::bit::Shr<u16>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<i128 as core::ops::bit::Shr<u32>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<i128 as core::ops::bit::Shr<u64>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<i128 as core::ops::bit::Shr<u8>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<i128 as core::ops::bit::Shr<usize>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<i128 as core::ops::bit::Shr>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<i128 as core::ops::bit::ShrAssign<&i128>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i128 as core::ops::bit::ShrAssign<&i16>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i128 as core::ops::bit::ShrAssign<&i32>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i128 as core::ops::bit::ShrAssign<&i64>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i128 as core::ops::bit::ShrAssign<&i8>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i128 as core::ops::bit::ShrAssign<&isize>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i128 as core::ops::bit::ShrAssign<&u128>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i128 as core::ops::bit::ShrAssign<&u16>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i128 as core::ops::bit::ShrAssign<&u32>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i128 as core::ops::bit::ShrAssign<&u64>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i128 as core::ops::bit::ShrAssign<&u8>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i128 as core::ops::bit::ShrAssign<&usize>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i128 as core::ops::bit::ShrAssign<i16>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<i128 as core::ops::bit::ShrAssign<i32>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<i128 as core::ops::bit::ShrAssign<i64>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<i128 as core::ops::bit::ShrAssign<i8>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<i128 as core::ops::bit::ShrAssign<isize>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<i128 as core::ops::bit::ShrAssign<u128>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<i128 as core::ops::bit::ShrAssign<u16>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<i128 as core::ops::bit::ShrAssign<u32>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<i128 as core::ops::bit::ShrAssign<u64>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<i128 as core::ops::bit::ShrAssign<u8>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<i128 as core::ops::bit::ShrAssign<usize>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<i128 as core::ops::bit::ShrAssign>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<i16 as core::default::Default>::default fn default() -> $t {
$v
}
<i16 as core::intrinsics::fallback::DisjointBitOr>::disjoint_bitor unsafe fn disjoint_bitor(self, other: Self) -> Self {
// Note that the assume here is required for UB detection in Miri!
// SAFETY: our precondition is that there are no bits in common,
// so this is just telling that to the backend.
unsafe { super::assume((self & other) == zero!($t)) };
self | other
}
<i16 as core::iter::range::Step>::backward_checked fn backward_checked(start: Self, n: usize) -> Option<Self> {
match $u_narrower::try_from(n) {
Ok(n) => {
// Wrapping handles cases like
// `Step::forward(-120_i8, 200) == Some(80_i8)`,
// even though 200 is out of range for i8.
let wrapped = start.wrapping_sub(n as Self);
if wrapped <= start {
Some(wrapped)
} else {
None // Subtraction overflowed
}
}
// If n is out of range of e.g. u8,
// then it is bigger than the entire range for i8 is wide
// so `any_i8 - n` necessarily overflows i8.
Err(_) => None,
}
}
<i16 as core::iter::range::Step>::forward_checked fn forward_checked(start: Self, n: usize) -> Option<Self> {
match $u_narrower::try_from(n) {
Ok(n) => {
// Wrapping handles cases like
// `Step::forward(-120_i8, 200) == Some(80_i8)`,
// even though 200 is out of range for i8.
let wrapped = start.wrapping_add(n as Self);
if wrapped >= start {
Some(wrapped)
} else {
None // Addition overflowed
}
}
// If n is out of range of e.g. u8,
// then it is bigger than the entire range for i8 is wide
// so `any_i8 + n` necessarily overflows i8.
Err(_) => None,
}
}
<i16 as core::iter::traits::accum::Sum<&'a i16>>::sum fn sum<I: Iterator<Item=&'a Self>>(iter: I) -> Self {
iter.fold(
$zero,
#[rustc_inherit_overflow_checks]
|a, b| a + b,
)
}
<i16 as core::iter::traits::accum::Sum>::sum fn sum<I: Iterator<Item=Self>>(iter: I) -> Self {
iter.fold(
$zero,
#[rustc_inherit_overflow_checks]
|a, b| a + b,
)
}
<i16 as core::ops::arith::Add<&i16>>::add fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i16 as core::ops::arith::Add>::add fn add(self, other: $t) -> $t { self + other }
<i16 as core::ops::arith::AddAssign<&i16>>::add_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i16 as core::ops::arith::AddAssign>::add_assign fn add_assign(&mut self, other: $t) { *self += other }
<i16 as core::ops::arith::Div<&i16>>::div fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i16 as core::ops::arith::Div>::div fn div(self, other: $t) -> $t { self / other }
<i16 as core::ops::arith::DivAssign<&i16>>::div_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i16 as core::ops::arith::DivAssign>::div_assign fn div_assign(&mut self, other: $t) { *self /= other }
<i16 as core::ops::arith::Mul<&i16>>::mul fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i16 as core::ops::arith::Mul>::mul fn mul(self, other: $t) -> $t { self * other }
<i16 as core::ops::arith::MulAssign<&i16>>::mul_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i16 as core::ops::arith::MulAssign>::mul_assign fn mul_assign(&mut self, other: $t) { *self *= other }
<i16 as core::ops::arith::Neg>::neg fn neg(self) -> $t { -self }
<i16 as core::ops::arith::Rem<&i16>>::rem fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i16 as core::ops::arith::Rem>::rem fn rem(self, other: $t) -> $t { self % other }
<i16 as core::ops::arith::RemAssign<&i16>>::rem_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i16 as core::ops::arith::RemAssign>::rem_assign fn rem_assign(&mut self, other: $t) { *self %= other }
<i16 as core::ops::arith::Sub<&i16>>::sub fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i16 as core::ops::arith::Sub>::sub fn sub(self, other: $t) -> $t { self - other }
<i16 as core::ops::arith::SubAssign<&i16>>::sub_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i16 as core::ops::arith::SubAssign>::sub_assign fn sub_assign(&mut self, other: $t) { *self -= other }
<i16 as core::ops::bit::BitAnd<&i16>>::bitand fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i16 as core::ops::bit::BitAnd>::bitand fn bitand(self, rhs: $t) -> $t { self & rhs }
<i16 as core::ops::bit::BitAndAssign<&i16>>::bitand_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i16 as core::ops::bit::BitAndAssign>::bitand_assign fn bitand_assign(&mut self, other: $t) { *self &= other }
<i16 as core::ops::bit::BitOr<&i16>>::bitor fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i16 as core::ops::bit::BitOr>::bitor fn bitor(self, rhs: $t) -> $t { self | rhs }
<i16 as core::ops::bit::BitOrAssign<&i16>>::bitor_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i16 as core::ops::bit::BitOrAssign>::bitor_assign fn bitor_assign(&mut self, other: $t) { *self |= other }
<i16 as core::ops::bit::BitXor<&i16>>::bitxor fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i16 as core::ops::bit::BitXor>::bitxor fn bitxor(self, other: $t) -> $t { self ^ other }
<i16 as core::ops::bit::BitXorAssign<&i16>>::bitxor_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i16 as core::ops::bit::BitXorAssign>::bitxor_assign fn bitxor_assign(&mut self, other: $t) { *self ^= other }
<i16 as core::ops::bit::Not>::not fn not(self) -> $t { !self }
<i16 as core::ops::bit::Shl<&i128>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i16 as core::ops::bit::Shl<&i16>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i16 as core::ops::bit::Shl<&i32>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i16 as core::ops::bit::Shl<&i64>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i16 as core::ops::bit::Shl<&i8>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i16 as core::ops::bit::Shl<&isize>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i16 as core::ops::bit::Shl<&u128>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i16 as core::ops::bit::Shl<&u16>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i16 as core::ops::bit::Shl<&u32>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i16 as core::ops::bit::Shl<&u64>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i16 as core::ops::bit::Shl<&u8>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i16 as core::ops::bit::Shl<&usize>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i16 as core::ops::bit::Shl<i128>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<i16 as core::ops::bit::Shl<i32>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<i16 as core::ops::bit::Shl<i64>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<i16 as core::ops::bit::Shl<i8>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<i16 as core::ops::bit::Shl<isize>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<i16 as core::ops::bit::Shl<u128>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<i16 as core::ops::bit::Shl<u16>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<i16 as core::ops::bit::Shl<u32>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<i16 as core::ops::bit::Shl<u64>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<i16 as core::ops::bit::Shl<u8>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<i16 as core::ops::bit::Shl<usize>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<i16 as core::ops::bit::Shl>::shl fn shl(self, other: $f) -> $t {
self << other
}
<i16 as core::ops::bit::ShlAssign<&i128>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i16 as core::ops::bit::ShlAssign<&i16>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i16 as core::ops::bit::ShlAssign<&i32>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i16 as core::ops::bit::ShlAssign<&i64>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i16 as core::ops::bit::ShlAssign<&i8>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i16 as core::ops::bit::ShlAssign<&isize>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i16 as core::ops::bit::ShlAssign<&u128>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i16 as core::ops::bit::ShlAssign<&u16>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i16 as core::ops::bit::ShlAssign<&u32>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i16 as core::ops::bit::ShlAssign<&u64>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i16 as core::ops::bit::ShlAssign<&u8>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i16 as core::ops::bit::ShlAssign<&usize>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i16 as core::ops::bit::ShlAssign<i128>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<i16 as core::ops::bit::ShlAssign<i32>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<i16 as core::ops::bit::ShlAssign<i64>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<i16 as core::ops::bit::ShlAssign<i8>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<i16 as core::ops::bit::ShlAssign<isize>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<i16 as core::ops::bit::ShlAssign<u128>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<i16 as core::ops::bit::ShlAssign<u16>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<i16 as core::ops::bit::ShlAssign<u32>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<i16 as core::ops::bit::ShlAssign<u64>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<i16 as core::ops::bit::ShlAssign<u8>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<i16 as core::ops::bit::ShlAssign<usize>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<i16 as core::ops::bit::ShlAssign>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<i16 as core::ops::bit::Shr<&i128>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i16 as core::ops::bit::Shr<&i16>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i16 as core::ops::bit::Shr<&i32>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i16 as core::ops::bit::Shr<&i64>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i16 as core::ops::bit::Shr<&i8>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i16 as core::ops::bit::Shr<&isize>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i16 as core::ops::bit::Shr<&u128>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i16 as core::ops::bit::Shr<&u16>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i16 as core::ops::bit::Shr<&u32>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i16 as core::ops::bit::Shr<&u64>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i16 as core::ops::bit::Shr<&u8>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i16 as core::ops::bit::Shr<&usize>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i16 as core::ops::bit::Shr<i128>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<i16 as core::ops::bit::Shr<i32>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<i16 as core::ops::bit::Shr<i64>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<i16 as core::ops::bit::Shr<i8>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<i16 as core::ops::bit::Shr<isize>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<i16 as core::ops::bit::Shr<u128>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<i16 as core::ops::bit::Shr<u16>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<i16 as core::ops::bit::Shr<u32>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<i16 as core::ops::bit::Shr<u64>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<i16 as core::ops::bit::Shr<u8>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<i16 as core::ops::bit::Shr<usize>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<i16 as core::ops::bit::Shr>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<i16 as core::ops::bit::ShrAssign<&i128>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i16 as core::ops::bit::ShrAssign<&i16>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i16 as core::ops::bit::ShrAssign<&i32>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i16 as core::ops::bit::ShrAssign<&i64>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i16 as core::ops::bit::ShrAssign<&i8>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i16 as core::ops::bit::ShrAssign<&isize>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i16 as core::ops::bit::ShrAssign<&u128>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i16 as core::ops::bit::ShrAssign<&u16>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i16 as core::ops::bit::ShrAssign<&u32>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i16 as core::ops::bit::ShrAssign<&u64>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i16 as core::ops::bit::ShrAssign<&u8>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i16 as core::ops::bit::ShrAssign<&usize>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i16 as core::ops::bit::ShrAssign<i128>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<i16 as core::ops::bit::ShrAssign<i32>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<i16 as core::ops::bit::ShrAssign<i64>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<i16 as core::ops::bit::ShrAssign<i8>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<i16 as core::ops::bit::ShrAssign<isize>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<i16 as core::ops::bit::ShrAssign<u128>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<i16 as core::ops::bit::ShrAssign<u16>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<i16 as core::ops::bit::ShrAssign<u32>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<i16 as core::ops::bit::ShrAssign<u64>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<i16 as core::ops::bit::ShrAssign<u8>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<i16 as core::ops::bit::ShrAssign<usize>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<i16 as core::ops::bit::ShrAssign>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<i32 as core::default::Default>::default fn default() -> $t {
$v
}
<i32 as core::intrinsics::fallback::DisjointBitOr>::disjoint_bitor unsafe fn disjoint_bitor(self, other: Self) -> Self {
// Note that the assume here is required for UB detection in Miri!
// SAFETY: our precondition is that there are no bits in common,
// so this is just telling that to the backend.
unsafe { super::assume((self & other) == zero!($t)) };
self | other
}
<i32 as core::iter::range::Step>::backward_checked fn backward_checked(start: Self, n: usize) -> Option<Self> {
match $u_narrower::try_from(n) {
Ok(n) => {
// Wrapping handles cases like
// `Step::forward(-120_i8, 200) == Some(80_i8)`,
// even though 200 is out of range for i8.
let wrapped = start.wrapping_sub(n as Self);
if wrapped <= start {
Some(wrapped)
} else {
None // Subtraction overflowed
}
}
// If n is out of range of e.g. u8,
// then it is bigger than the entire range for i8 is wide
// so `any_i8 - n` necessarily overflows i8.
Err(_) => None,
}
}
<i32 as core::iter::range::Step>::forward_checked fn forward_checked(start: Self, n: usize) -> Option<Self> {
match $u_narrower::try_from(n) {
Ok(n) => {
// Wrapping handles cases like
// `Step::forward(-120_i8, 200) == Some(80_i8)`,
// even though 200 is out of range for i8.
let wrapped = start.wrapping_add(n as Self);
if wrapped >= start {
Some(wrapped)
} else {
None // Addition overflowed
}
}
// If n is out of range of e.g. u8,
// then it is bigger than the entire range for i8 is wide
// so `any_i8 + n` necessarily overflows i8.
Err(_) => None,
}
}
<i32 as core::iter::traits::accum::Sum<&'a i32>>::sum fn sum<I: Iterator<Item=&'a Self>>(iter: I) -> Self {
iter.fold(
$zero,
#[rustc_inherit_overflow_checks]
|a, b| a + b,
)
}
<i32 as core::iter::traits::accum::Sum>::sum fn sum<I: Iterator<Item=Self>>(iter: I) -> Self {
iter.fold(
$zero,
#[rustc_inherit_overflow_checks]
|a, b| a + b,
)
}
<i32 as core::ops::arith::Add<&i32>>::add fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i32 as core::ops::arith::Add>::add fn add(self, other: $t) -> $t { self + other }
<i32 as core::ops::arith::AddAssign<&i32>>::add_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i32 as core::ops::arith::AddAssign>::add_assign fn add_assign(&mut self, other: $t) { *self += other }
<i32 as core::ops::arith::Div<&i32>>::div fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i32 as core::ops::arith::Div>::div fn div(self, other: $t) -> $t { self / other }
<i32 as core::ops::arith::DivAssign<&i32>>::div_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i32 as core::ops::arith::DivAssign>::div_assign fn div_assign(&mut self, other: $t) { *self /= other }
<i32 as core::ops::arith::Mul<&i32>>::mul fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i32 as core::ops::arith::Mul>::mul fn mul(self, other: $t) -> $t { self * other }
<i32 as core::ops::arith::MulAssign<&i32>>::mul_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i32 as core::ops::arith::MulAssign>::mul_assign fn mul_assign(&mut self, other: $t) { *self *= other }
<i32 as core::ops::arith::Neg>::neg fn neg(self) -> $t { -self }
<i32 as core::ops::arith::Rem<&i32>>::rem fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i32 as core::ops::arith::Rem>::rem fn rem(self, other: $t) -> $t { self % other }
<i32 as core::ops::arith::RemAssign<&i32>>::rem_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i32 as core::ops::arith::RemAssign>::rem_assign fn rem_assign(&mut self, other: $t) { *self %= other }
<i32 as core::ops::arith::Sub<&i32>>::sub fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i32 as core::ops::arith::Sub>::sub fn sub(self, other: $t) -> $t { self - other }
<i32 as core::ops::arith::SubAssign<&i32>>::sub_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i32 as core::ops::arith::SubAssign>::sub_assign fn sub_assign(&mut self, other: $t) { *self -= other }
<i32 as core::ops::bit::BitAnd<&i32>>::bitand fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i32 as core::ops::bit::BitAnd>::bitand fn bitand(self, rhs: $t) -> $t { self & rhs }
<i32 as core::ops::bit::BitAndAssign<&i32>>::bitand_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i32 as core::ops::bit::BitAndAssign>::bitand_assign fn bitand_assign(&mut self, other: $t) { *self &= other }
<i32 as core::ops::bit::BitOr<&i32>>::bitor fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i32 as core::ops::bit::BitOr>::bitor fn bitor(self, rhs: $t) -> $t { self | rhs }
<i32 as core::ops::bit::BitOrAssign<&i32>>::bitor_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i32 as core::ops::bit::BitOrAssign>::bitor_assign fn bitor_assign(&mut self, other: $t) { *self |= other }
<i32 as core::ops::bit::BitXor<&i32>>::bitxor fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i32 as core::ops::bit::BitXor>::bitxor fn bitxor(self, other: $t) -> $t { self ^ other }
<i32 as core::ops::bit::BitXorAssign<&i32>>::bitxor_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i32 as core::ops::bit::BitXorAssign>::bitxor_assign fn bitxor_assign(&mut self, other: $t) { *self ^= other }
<i32 as core::ops::bit::Not>::not fn not(self) -> $t { !self }
<i32 as core::ops::bit::Shl<&i128>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i32 as core::ops::bit::Shl<&i16>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i32 as core::ops::bit::Shl<&i32>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i32 as core::ops::bit::Shl<&i64>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i32 as core::ops::bit::Shl<&i8>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i32 as core::ops::bit::Shl<&isize>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i32 as core::ops::bit::Shl<&u128>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i32 as core::ops::bit::Shl<&u16>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i32 as core::ops::bit::Shl<&u32>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i32 as core::ops::bit::Shl<&u64>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i32 as core::ops::bit::Shl<&u8>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i32 as core::ops::bit::Shl<&usize>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i32 as core::ops::bit::Shl<i128>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<i32 as core::ops::bit::Shl<i16>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<i32 as core::ops::bit::Shl<i64>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<i32 as core::ops::bit::Shl<i8>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<i32 as core::ops::bit::Shl<isize>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<i32 as core::ops::bit::Shl<u128>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<i32 as core::ops::bit::Shl<u16>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<i32 as core::ops::bit::Shl<u32>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<i32 as core::ops::bit::Shl<u64>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<i32 as core::ops::bit::Shl<u8>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<i32 as core::ops::bit::Shl<usize>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<i32 as core::ops::bit::Shl>::shl fn shl(self, other: $f) -> $t {
self << other
}
<i32 as core::ops::bit::ShlAssign<&i128>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i32 as core::ops::bit::ShlAssign<&i16>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i32 as core::ops::bit::ShlAssign<&i32>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i32 as core::ops::bit::ShlAssign<&i64>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i32 as core::ops::bit::ShlAssign<&i8>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i32 as core::ops::bit::ShlAssign<&isize>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i32 as core::ops::bit::ShlAssign<&u128>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i32 as core::ops::bit::ShlAssign<&u16>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i32 as core::ops::bit::ShlAssign<&u32>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i32 as core::ops::bit::ShlAssign<&u64>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i32 as core::ops::bit::ShlAssign<&u8>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i32 as core::ops::bit::ShlAssign<&usize>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i32 as core::ops::bit::ShlAssign<i128>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<i32 as core::ops::bit::ShlAssign<i16>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<i32 as core::ops::bit::ShlAssign<i64>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<i32 as core::ops::bit::ShlAssign<i8>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<i32 as core::ops::bit::ShlAssign<isize>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<i32 as core::ops::bit::ShlAssign<u128>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<i32 as core::ops::bit::ShlAssign<u16>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<i32 as core::ops::bit::ShlAssign<u32>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<i32 as core::ops::bit::ShlAssign<u64>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<i32 as core::ops::bit::ShlAssign<u8>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<i32 as core::ops::bit::ShlAssign<usize>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<i32 as core::ops::bit::ShlAssign>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<i32 as core::ops::bit::Shr<&i128>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i32 as core::ops::bit::Shr<&i16>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i32 as core::ops::bit::Shr<&i32>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i32 as core::ops::bit::Shr<&i64>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i32 as core::ops::bit::Shr<&i8>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i32 as core::ops::bit::Shr<&isize>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i32 as core::ops::bit::Shr<&u128>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i32 as core::ops::bit::Shr<&u16>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i32 as core::ops::bit::Shr<&u32>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i32 as core::ops::bit::Shr<&u64>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i32 as core::ops::bit::Shr<&u8>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i32 as core::ops::bit::Shr<&usize>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i32 as core::ops::bit::Shr<i128>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<i32 as core::ops::bit::Shr<i16>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<i32 as core::ops::bit::Shr<i64>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<i32 as core::ops::bit::Shr<i8>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<i32 as core::ops::bit::Shr<isize>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<i32 as core::ops::bit::Shr<u128>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<i32 as core::ops::bit::Shr<u16>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<i32 as core::ops::bit::Shr<u32>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<i32 as core::ops::bit::Shr<u64>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<i32 as core::ops::bit::Shr<u8>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<i32 as core::ops::bit::Shr<usize>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<i32 as core::ops::bit::Shr>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<i32 as core::ops::bit::ShrAssign<&i128>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i32 as core::ops::bit::ShrAssign<&i16>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i32 as core::ops::bit::ShrAssign<&i32>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i32 as core::ops::bit::ShrAssign<&i64>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i32 as core::ops::bit::ShrAssign<&i8>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i32 as core::ops::bit::ShrAssign<&isize>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i32 as core::ops::bit::ShrAssign<&u128>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i32 as core::ops::bit::ShrAssign<&u16>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i32 as core::ops::bit::ShrAssign<&u32>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i32 as core::ops::bit::ShrAssign<&u64>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i32 as core::ops::bit::ShrAssign<&u8>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i32 as core::ops::bit::ShrAssign<&usize>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i32 as core::ops::bit::ShrAssign<i128>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<i32 as core::ops::bit::ShrAssign<i16>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<i32 as core::ops::bit::ShrAssign<i64>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<i32 as core::ops::bit::ShrAssign<i8>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<i32 as core::ops::bit::ShrAssign<isize>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<i32 as core::ops::bit::ShrAssign<u128>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<i32 as core::ops::bit::ShrAssign<u16>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<i32 as core::ops::bit::ShrAssign<u32>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<i32 as core::ops::bit::ShrAssign<u64>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<i32 as core::ops::bit::ShrAssign<u8>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<i32 as core::ops::bit::ShrAssign<usize>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<i32 as core::ops::bit::ShrAssign>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<i64 as core::default::Default>::default fn default() -> $t {
$v
}
<i64 as core::intrinsics::fallback::DisjointBitOr>::disjoint_bitor unsafe fn disjoint_bitor(self, other: Self) -> Self {
// Note that the assume here is required for UB detection in Miri!
// SAFETY: our precondition is that there are no bits in common,
// so this is just telling that to the backend.
unsafe { super::assume((self & other) == zero!($t)) };
self | other
}
<i64 as core::iter::range::Step>::backward_checked fn backward_checked(start: Self, n: usize) -> Option<Self> {
match $u_narrower::try_from(n) {
Ok(n) => {
// Wrapping handles cases like
// `Step::forward(-120_i8, 200) == Some(80_i8)`,
// even though 200 is out of range for i8.
let wrapped = start.wrapping_sub(n as Self);
if wrapped <= start {
Some(wrapped)
} else {
None // Subtraction overflowed
}
}
// If n is out of range of e.g. u8,
// then it is bigger than the entire range for i8 is wide
// so `any_i8 - n` necessarily overflows i8.
Err(_) => None,
}
}
<i64 as core::iter::range::Step>::forward_checked fn forward_checked(start: Self, n: usize) -> Option<Self> {
match $u_narrower::try_from(n) {
Ok(n) => {
// Wrapping handles cases like
// `Step::forward(-120_i8, 200) == Some(80_i8)`,
// even though 200 is out of range for i8.
let wrapped = start.wrapping_add(n as Self);
if wrapped >= start {
Some(wrapped)
} else {
None // Addition overflowed
}
}
// If n is out of range of e.g. u8,
// then it is bigger than the entire range for i8 is wide
// so `any_i8 + n` necessarily overflows i8.
Err(_) => None,
}
}
<i64 as core::iter::traits::accum::Sum<&'a i64>>::sum fn sum<I: Iterator<Item=&'a Self>>(iter: I) -> Self {
iter.fold(
$zero,
#[rustc_inherit_overflow_checks]
|a, b| a + b,
)
}
<i64 as core::iter::traits::accum::Sum>::sum fn sum<I: Iterator<Item=Self>>(iter: I) -> Self {
iter.fold(
$zero,
#[rustc_inherit_overflow_checks]
|a, b| a + b,
)
}
<i64 as core::ops::arith::Add<&i64>>::add fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i64 as core::ops::arith::Add>::add fn add(self, other: $t) -> $t { self + other }
<i64 as core::ops::arith::AddAssign<&i64>>::add_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i64 as core::ops::arith::AddAssign>::add_assign fn add_assign(&mut self, other: $t) { *self += other }
<i64 as core::ops::arith::Div<&i64>>::div fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i64 as core::ops::arith::Div>::div fn div(self, other: $t) -> $t { self / other }
<i64 as core::ops::arith::DivAssign<&i64>>::div_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i64 as core::ops::arith::DivAssign>::div_assign fn div_assign(&mut self, other: $t) { *self /= other }
<i64 as core::ops::arith::Mul<&i64>>::mul fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i64 as core::ops::arith::Mul>::mul fn mul(self, other: $t) -> $t { self * other }
<i64 as core::ops::arith::MulAssign<&i64>>::mul_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i64 as core::ops::arith::MulAssign>::mul_assign fn mul_assign(&mut self, other: $t) { *self *= other }
<i64 as core::ops::arith::Neg>::neg fn neg(self) -> $t { -self }
<i64 as core::ops::arith::Rem<&i64>>::rem fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i64 as core::ops::arith::Rem>::rem fn rem(self, other: $t) -> $t { self % other }
<i64 as core::ops::arith::RemAssign<&i64>>::rem_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i64 as core::ops::arith::RemAssign>::rem_assign fn rem_assign(&mut self, other: $t) { *self %= other }
<i64 as core::ops::arith::Sub<&i64>>::sub fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i64 as core::ops::arith::Sub>::sub fn sub(self, other: $t) -> $t { self - other }
<i64 as core::ops::arith::SubAssign<&i64>>::sub_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i64 as core::ops::arith::SubAssign>::sub_assign fn sub_assign(&mut self, other: $t) { *self -= other }
<i64 as core::ops::bit::BitAnd<&i64>>::bitand fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i64 as core::ops::bit::BitAnd>::bitand fn bitand(self, rhs: $t) -> $t { self & rhs }
<i64 as core::ops::bit::BitAndAssign<&i64>>::bitand_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i64 as core::ops::bit::BitAndAssign>::bitand_assign fn bitand_assign(&mut self, other: $t) { *self &= other }
<i64 as core::ops::bit::BitOr<&i64>>::bitor fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i64 as core::ops::bit::BitOr>::bitor fn bitor(self, rhs: $t) -> $t { self | rhs }
<i64 as core::ops::bit::BitOrAssign<&i64>>::bitor_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i64 as core::ops::bit::BitOrAssign>::bitor_assign fn bitor_assign(&mut self, other: $t) { *self |= other }
<i64 as core::ops::bit::BitXor<&i64>>::bitxor fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i64 as core::ops::bit::BitXor>::bitxor fn bitxor(self, other: $t) -> $t { self ^ other }
<i64 as core::ops::bit::BitXorAssign<&i64>>::bitxor_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i64 as core::ops::bit::BitXorAssign>::bitxor_assign fn bitxor_assign(&mut self, other: $t) { *self ^= other }
<i64 as core::ops::bit::Not>::not fn not(self) -> $t { !self }
<i64 as core::ops::bit::Shl<&i128>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i64 as core::ops::bit::Shl<&i16>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i64 as core::ops::bit::Shl<&i32>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i64 as core::ops::bit::Shl<&i64>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i64 as core::ops::bit::Shl<&i8>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i64 as core::ops::bit::Shl<&isize>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i64 as core::ops::bit::Shl<&u128>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i64 as core::ops::bit::Shl<&u16>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i64 as core::ops::bit::Shl<&u32>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i64 as core::ops::bit::Shl<&u64>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i64 as core::ops::bit::Shl<&u8>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i64 as core::ops::bit::Shl<&usize>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i64 as core::ops::bit::Shl<i128>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<i64 as core::ops::bit::Shl<i16>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<i64 as core::ops::bit::Shl<i32>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<i64 as core::ops::bit::Shl<i8>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<i64 as core::ops::bit::Shl<isize>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<i64 as core::ops::bit::Shl<u128>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<i64 as core::ops::bit::Shl<u16>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<i64 as core::ops::bit::Shl<u32>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<i64 as core::ops::bit::Shl<u64>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<i64 as core::ops::bit::Shl<u8>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<i64 as core::ops::bit::Shl<usize>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<i64 as core::ops::bit::Shl>::shl fn shl(self, other: $f) -> $t {
self << other
}
<i64 as core::ops::bit::ShlAssign<&i128>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i64 as core::ops::bit::ShlAssign<&i16>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i64 as core::ops::bit::ShlAssign<&i32>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i64 as core::ops::bit::ShlAssign<&i64>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i64 as core::ops::bit::ShlAssign<&i8>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i64 as core::ops::bit::ShlAssign<&isize>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i64 as core::ops::bit::ShlAssign<&u128>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i64 as core::ops::bit::ShlAssign<&u16>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i64 as core::ops::bit::ShlAssign<&u32>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i64 as core::ops::bit::ShlAssign<&u64>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i64 as core::ops::bit::ShlAssign<&u8>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i64 as core::ops::bit::ShlAssign<&usize>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i64 as core::ops::bit::ShlAssign<i128>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<i64 as core::ops::bit::ShlAssign<i16>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<i64 as core::ops::bit::ShlAssign<i32>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<i64 as core::ops::bit::ShlAssign<i8>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<i64 as core::ops::bit::ShlAssign<isize>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<i64 as core::ops::bit::ShlAssign<u128>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<i64 as core::ops::bit::ShlAssign<u16>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<i64 as core::ops::bit::ShlAssign<u32>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<i64 as core::ops::bit::ShlAssign<u64>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<i64 as core::ops::bit::ShlAssign<u8>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<i64 as core::ops::bit::ShlAssign<usize>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<i64 as core::ops::bit::ShlAssign>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<i64 as core::ops::bit::Shr<&i128>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i64 as core::ops::bit::Shr<&i16>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i64 as core::ops::bit::Shr<&i32>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i64 as core::ops::bit::Shr<&i64>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i64 as core::ops::bit::Shr<&i8>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i64 as core::ops::bit::Shr<&isize>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i64 as core::ops::bit::Shr<&u128>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i64 as core::ops::bit::Shr<&u16>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i64 as core::ops::bit::Shr<&u32>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i64 as core::ops::bit::Shr<&u64>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i64 as core::ops::bit::Shr<&u8>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i64 as core::ops::bit::Shr<&usize>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i64 as core::ops::bit::Shr<i128>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<i64 as core::ops::bit::Shr<i16>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<i64 as core::ops::bit::Shr<i32>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<i64 as core::ops::bit::Shr<i8>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<i64 as core::ops::bit::Shr<isize>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<i64 as core::ops::bit::Shr<u128>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<i64 as core::ops::bit::Shr<u16>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<i64 as core::ops::bit::Shr<u32>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<i64 as core::ops::bit::Shr<u64>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<i64 as core::ops::bit::Shr<u8>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<i64 as core::ops::bit::Shr<usize>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<i64 as core::ops::bit::Shr>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<i64 as core::ops::bit::ShrAssign<&i128>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i64 as core::ops::bit::ShrAssign<&i16>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i64 as core::ops::bit::ShrAssign<&i32>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i64 as core::ops::bit::ShrAssign<&i64>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i64 as core::ops::bit::ShrAssign<&i8>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i64 as core::ops::bit::ShrAssign<&isize>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i64 as core::ops::bit::ShrAssign<&u128>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i64 as core::ops::bit::ShrAssign<&u16>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i64 as core::ops::bit::ShrAssign<&u32>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i64 as core::ops::bit::ShrAssign<&u64>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i64 as core::ops::bit::ShrAssign<&u8>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i64 as core::ops::bit::ShrAssign<&usize>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i64 as core::ops::bit::ShrAssign<i128>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<i64 as core::ops::bit::ShrAssign<i16>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<i64 as core::ops::bit::ShrAssign<i32>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<i64 as core::ops::bit::ShrAssign<i8>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<i64 as core::ops::bit::ShrAssign<isize>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<i64 as core::ops::bit::ShrAssign<u128>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<i64 as core::ops::bit::ShrAssign<u16>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<i64 as core::ops::bit::ShrAssign<u32>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<i64 as core::ops::bit::ShrAssign<u64>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<i64 as core::ops::bit::ShrAssign<u8>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<i64 as core::ops::bit::ShrAssign<usize>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<i64 as core::ops::bit::ShrAssign>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<i8 as core::default::Default>::default fn default() -> $t {
$v
}
<i8 as core::intrinsics::fallback::DisjointBitOr>::disjoint_bitor unsafe fn disjoint_bitor(self, other: Self) -> Self {
// Note that the assume here is required for UB detection in Miri!
// SAFETY: our precondition is that there are no bits in common,
// so this is just telling that to the backend.
unsafe { super::assume((self & other) == zero!($t)) };
self | other
}
<i8 as core::iter::range::Step>::backward_checked fn backward_checked(start: Self, n: usize) -> Option<Self> {
match $u_narrower::try_from(n) {
Ok(n) => {
// Wrapping handles cases like
// `Step::forward(-120_i8, 200) == Some(80_i8)`,
// even though 200 is out of range for i8.
let wrapped = start.wrapping_sub(n as Self);
if wrapped <= start {
Some(wrapped)
} else {
None // Subtraction overflowed
}
}
// If n is out of range of e.g. u8,
// then it is bigger than the entire range for i8 is wide
// so `any_i8 - n` necessarily overflows i8.
Err(_) => None,
}
}
<i8 as core::iter::range::Step>::forward_checked fn forward_checked(start: Self, n: usize) -> Option<Self> {
match $u_narrower::try_from(n) {
Ok(n) => {
// Wrapping handles cases like
// `Step::forward(-120_i8, 200) == Some(80_i8)`,
// even though 200 is out of range for i8.
let wrapped = start.wrapping_add(n as Self);
if wrapped >= start {
Some(wrapped)
} else {
None // Addition overflowed
}
}
// If n is out of range of e.g. u8,
// then it is bigger than the entire range for i8 is wide
// so `any_i8 + n` necessarily overflows i8.
Err(_) => None,
}
}
<i8 as core::iter::traits::accum::Sum<&'a i8>>::sum fn sum<I: Iterator<Item=&'a Self>>(iter: I) -> Self {
iter.fold(
$zero,
#[rustc_inherit_overflow_checks]
|a, b| a + b,
)
}
<i8 as core::iter::traits::accum::Sum>::sum fn sum<I: Iterator<Item=Self>>(iter: I) -> Self {
iter.fold(
$zero,
#[rustc_inherit_overflow_checks]
|a, b| a + b,
)
}
<i8 as core::ops::arith::Add<&i8>>::add fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i8 as core::ops::arith::Add>::add fn add(self, other: $t) -> $t { self + other }
<i8 as core::ops::arith::AddAssign<&i8>>::add_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i8 as core::ops::arith::AddAssign>::add_assign fn add_assign(&mut self, other: $t) { *self += other }
<i8 as core::ops::arith::Div<&i8>>::div fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i8 as core::ops::arith::Div>::div fn div(self, other: $t) -> $t { self / other }
<i8 as core::ops::arith::DivAssign<&i8>>::div_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i8 as core::ops::arith::DivAssign>::div_assign fn div_assign(&mut self, other: $t) { *self /= other }
<i8 as core::ops::arith::Mul<&i8>>::mul fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i8 as core::ops::arith::Mul>::mul fn mul(self, other: $t) -> $t { self * other }
<i8 as core::ops::arith::MulAssign<&i8>>::mul_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i8 as core::ops::arith::MulAssign>::mul_assign fn mul_assign(&mut self, other: $t) { *self *= other }
<i8 as core::ops::arith::Neg>::neg fn neg(self) -> $t { -self }
<i8 as core::ops::arith::Rem<&i8>>::rem fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i8 as core::ops::arith::Rem>::rem fn rem(self, other: $t) -> $t { self % other }
<i8 as core::ops::arith::RemAssign<&i8>>::rem_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i8 as core::ops::arith::RemAssign>::rem_assign fn rem_assign(&mut self, other: $t) { *self %= other }
<i8 as core::ops::arith::Sub<&i8>>::sub fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i8 as core::ops::arith::Sub>::sub fn sub(self, other: $t) -> $t { self - other }
<i8 as core::ops::arith::SubAssign<&i8>>::sub_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i8 as core::ops::arith::SubAssign>::sub_assign fn sub_assign(&mut self, other: $t) { *self -= other }
<i8 as core::ops::bit::BitAnd<&i8>>::bitand fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i8 as core::ops::bit::BitAnd>::bitand fn bitand(self, rhs: $t) -> $t { self & rhs }
<i8 as core::ops::bit::BitAndAssign<&i8>>::bitand_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i8 as core::ops::bit::BitAndAssign>::bitand_assign fn bitand_assign(&mut self, other: $t) { *self &= other }
<i8 as core::ops::bit::BitOr<&i8>>::bitor fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i8 as core::ops::bit::BitOr>::bitor fn bitor(self, rhs: $t) -> $t { self | rhs }
<i8 as core::ops::bit::BitOrAssign<&i8>>::bitor_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i8 as core::ops::bit::BitOrAssign>::bitor_assign fn bitor_assign(&mut self, other: $t) { *self |= other }
<i8 as core::ops::bit::BitXor<&i8>>::bitxor fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i8 as core::ops::bit::BitXor>::bitxor fn bitxor(self, other: $t) -> $t { self ^ other }
<i8 as core::ops::bit::BitXorAssign<&i8>>::bitxor_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i8 as core::ops::bit::BitXorAssign>::bitxor_assign fn bitxor_assign(&mut self, other: $t) { *self ^= other }
<i8 as core::ops::bit::Not>::not fn not(self) -> $t { !self }
<i8 as core::ops::bit::Shl<&i128>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i8 as core::ops::bit::Shl<&i16>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i8 as core::ops::bit::Shl<&i32>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i8 as core::ops::bit::Shl<&i64>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i8 as core::ops::bit::Shl<&i8>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i8 as core::ops::bit::Shl<&isize>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i8 as core::ops::bit::Shl<&u128>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i8 as core::ops::bit::Shl<&u16>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i8 as core::ops::bit::Shl<&u32>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i8 as core::ops::bit::Shl<&u64>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i8 as core::ops::bit::Shl<&u8>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i8 as core::ops::bit::Shl<&usize>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i8 as core::ops::bit::Shl<i128>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<i8 as core::ops::bit::Shl<i16>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<i8 as core::ops::bit::Shl<i32>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<i8 as core::ops::bit::Shl<i64>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<i8 as core::ops::bit::Shl<isize>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<i8 as core::ops::bit::Shl<u128>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<i8 as core::ops::bit::Shl<u16>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<i8 as core::ops::bit::Shl<u32>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<i8 as core::ops::bit::Shl<u64>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<i8 as core::ops::bit::Shl<u8>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<i8 as core::ops::bit::Shl<usize>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<i8 as core::ops::bit::Shl>::shl fn shl(self, other: $f) -> $t {
self << other
}
<i8 as core::ops::bit::ShlAssign<&i128>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i8 as core::ops::bit::ShlAssign<&i16>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i8 as core::ops::bit::ShlAssign<&i32>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i8 as core::ops::bit::ShlAssign<&i64>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i8 as core::ops::bit::ShlAssign<&i8>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i8 as core::ops::bit::ShlAssign<&isize>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i8 as core::ops::bit::ShlAssign<&u128>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i8 as core::ops::bit::ShlAssign<&u16>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i8 as core::ops::bit::ShlAssign<&u32>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i8 as core::ops::bit::ShlAssign<&u64>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i8 as core::ops::bit::ShlAssign<&u8>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i8 as core::ops::bit::ShlAssign<&usize>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i8 as core::ops::bit::ShlAssign<i128>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<i8 as core::ops::bit::ShlAssign<i16>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<i8 as core::ops::bit::ShlAssign<i32>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<i8 as core::ops::bit::ShlAssign<i64>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<i8 as core::ops::bit::ShlAssign<isize>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<i8 as core::ops::bit::ShlAssign<u128>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<i8 as core::ops::bit::ShlAssign<u16>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<i8 as core::ops::bit::ShlAssign<u32>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<i8 as core::ops::bit::ShlAssign<u64>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<i8 as core::ops::bit::ShlAssign<u8>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<i8 as core::ops::bit::ShlAssign<usize>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<i8 as core::ops::bit::ShlAssign>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<i8 as core::ops::bit::Shr<&i128>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i8 as core::ops::bit::Shr<&i16>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i8 as core::ops::bit::Shr<&i32>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i8 as core::ops::bit::Shr<&i64>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i8 as core::ops::bit::Shr<&i8>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i8 as core::ops::bit::Shr<&isize>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i8 as core::ops::bit::Shr<&u128>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i8 as core::ops::bit::Shr<&u16>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i8 as core::ops::bit::Shr<&u32>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i8 as core::ops::bit::Shr<&u64>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i8 as core::ops::bit::Shr<&u8>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i8 as core::ops::bit::Shr<&usize>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i8 as core::ops::bit::Shr<i128>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<i8 as core::ops::bit::Shr<i16>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<i8 as core::ops::bit::Shr<i32>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<i8 as core::ops::bit::Shr<i64>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<i8 as core::ops::bit::Shr<isize>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<i8 as core::ops::bit::Shr<u128>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<i8 as core::ops::bit::Shr<u16>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<i8 as core::ops::bit::Shr<u32>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<i8 as core::ops::bit::Shr<u64>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<i8 as core::ops::bit::Shr<u8>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<i8 as core::ops::bit::Shr<usize>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<i8 as core::ops::bit::Shr>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<i8 as core::ops::bit::ShrAssign<&i128>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i8 as core::ops::bit::ShrAssign<&i16>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i8 as core::ops::bit::ShrAssign<&i32>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i8 as core::ops::bit::ShrAssign<&i64>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i8 as core::ops::bit::ShrAssign<&i8>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i8 as core::ops::bit::ShrAssign<&isize>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i8 as core::ops::bit::ShrAssign<&u128>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i8 as core::ops::bit::ShrAssign<&u16>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i8 as core::ops::bit::ShrAssign<&u32>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i8 as core::ops::bit::ShrAssign<&u64>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i8 as core::ops::bit::ShrAssign<&u8>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i8 as core::ops::bit::ShrAssign<&usize>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i8 as core::ops::bit::ShrAssign<i128>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<i8 as core::ops::bit::ShrAssign<i16>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<i8 as core::ops::bit::ShrAssign<i32>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<i8 as core::ops::bit::ShrAssign<i64>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<i8 as core::ops::bit::ShrAssign<isize>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<i8 as core::ops::bit::ShrAssign<u128>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<i8 as core::ops::bit::ShrAssign<u16>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<i8 as core::ops::bit::ShrAssign<u32>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<i8 as core::ops::bit::ShrAssign<u64>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<i8 as core::ops::bit::ShrAssign<u8>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<i8 as core::ops::bit::ShrAssign<usize>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<i8 as core::ops::bit::ShrAssign>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<isize as core::default::Default>::default fn default() -> $t {
$v
}
<isize as core::intrinsics::fallback::DisjointBitOr>::disjoint_bitor unsafe fn disjoint_bitor(self, other: Self) -> Self {
// Note that the assume here is required for UB detection in Miri!
// SAFETY: our precondition is that there are no bits in common,
// so this is just telling that to the backend.
unsafe { super::assume((self & other) == zero!($t)) };
self | other
}
<isize as core::iter::range::Step>::backward_checked fn backward_checked(start: Self, n: usize) -> Option<Self> {
match $u_narrower::try_from(n) {
Ok(n) => {
// Wrapping handles cases like
// `Step::forward(-120_i8, 200) == Some(80_i8)`,
// even though 200 is out of range for i8.
let wrapped = start.wrapping_sub(n as Self);
if wrapped <= start {
Some(wrapped)
} else {
None // Subtraction overflowed
}
}
// If n is out of range of e.g. u8,
// then it is bigger than the entire range for i8 is wide
// so `any_i8 - n` necessarily overflows i8.
Err(_) => None,
}
}
<isize as core::iter::range::Step>::forward_checked fn forward_checked(start: Self, n: usize) -> Option<Self> {
match $u_narrower::try_from(n) {
Ok(n) => {
// Wrapping handles cases like
// `Step::forward(-120_i8, 200) == Some(80_i8)`,
// even though 200 is out of range for i8.
let wrapped = start.wrapping_add(n as Self);
if wrapped >= start {
Some(wrapped)
} else {
None // Addition overflowed
}
}
// If n is out of range of e.g. u8,
// then it is bigger than the entire range for i8 is wide
// so `any_i8 + n` necessarily overflows i8.
Err(_) => None,
}
}
<isize as core::iter::traits::accum::Sum<&'a isize>>::sum fn sum<I: Iterator<Item=&'a Self>>(iter: I) -> Self {
iter.fold(
$zero,
#[rustc_inherit_overflow_checks]
|a, b| a + b,
)
}
<isize as core::iter::traits::accum::Sum>::sum fn sum<I: Iterator<Item=Self>>(iter: I) -> Self {
iter.fold(
$zero,
#[rustc_inherit_overflow_checks]
|a, b| a + b,
)
}
<isize as core::ops::arith::Add<&isize>>::add fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<isize as core::ops::arith::Add>::add fn add(self, other: $t) -> $t { self + other }
<isize as core::ops::arith::AddAssign<&isize>>::add_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<isize as core::ops::arith::AddAssign>::add_assign fn add_assign(&mut self, other: $t) { *self += other }
<isize as core::ops::arith::Div<&isize>>::div fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<isize as core::ops::arith::Div>::div fn div(self, other: $t) -> $t { self / other }
<isize as core::ops::arith::DivAssign<&isize>>::div_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<isize as core::ops::arith::DivAssign>::div_assign fn div_assign(&mut self, other: $t) { *self /= other }
<isize as core::ops::arith::Mul<&isize>>::mul fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<isize as core::ops::arith::Mul>::mul fn mul(self, other: $t) -> $t { self * other }
<isize as core::ops::arith::MulAssign<&isize>>::mul_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<isize as core::ops::arith::MulAssign>::mul_assign fn mul_assign(&mut self, other: $t) { *self *= other }
<isize as core::ops::arith::Neg>::neg fn neg(self) -> $t { -self }
<isize as core::ops::arith::Rem<&isize>>::rem fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<isize as core::ops::arith::Rem>::rem fn rem(self, other: $t) -> $t { self % other }
<isize as core::ops::arith::RemAssign<&isize>>::rem_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<isize as core::ops::arith::RemAssign>::rem_assign fn rem_assign(&mut self, other: $t) { *self %= other }
<isize as core::ops::arith::Sub<&isize>>::sub fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<isize as core::ops::arith::Sub>::sub fn sub(self, other: $t) -> $t { self - other }
<isize as core::ops::arith::SubAssign<&isize>>::sub_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<isize as core::ops::arith::SubAssign>::sub_assign fn sub_assign(&mut self, other: $t) { *self -= other }
<isize as core::ops::bit::BitAnd<&isize>>::bitand fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<isize as core::ops::bit::BitAnd>::bitand fn bitand(self, rhs: $t) -> $t { self & rhs }
<isize as core::ops::bit::BitAndAssign<&isize>>::bitand_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<isize as core::ops::bit::BitAndAssign>::bitand_assign fn bitand_assign(&mut self, other: $t) { *self &= other }
<isize as core::ops::bit::BitOr<&isize>>::bitor fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<isize as core::ops::bit::BitOr>::bitor fn bitor(self, rhs: $t) -> $t { self | rhs }
<isize as core::ops::bit::BitOrAssign<&isize>>::bitor_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<isize as core::ops::bit::BitOrAssign>::bitor_assign fn bitor_assign(&mut self, other: $t) { *self |= other }
<isize as core::ops::bit::BitXor<&isize>>::bitxor fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<isize as core::ops::bit::BitXor>::bitxor fn bitxor(self, other: $t) -> $t { self ^ other }
<isize as core::ops::bit::BitXorAssign<&isize>>::bitxor_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<isize as core::ops::bit::BitXorAssign>::bitxor_assign fn bitxor_assign(&mut self, other: $t) { *self ^= other }
<isize as core::ops::bit::Not>::not fn not(self) -> $t { !self }
<isize as core::ops::bit::Shl<&i128>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<isize as core::ops::bit::Shl<&i16>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<isize as core::ops::bit::Shl<&i32>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<isize as core::ops::bit::Shl<&i64>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<isize as core::ops::bit::Shl<&i8>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<isize as core::ops::bit::Shl<&isize>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<isize as core::ops::bit::Shl<&u128>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<isize as core::ops::bit::Shl<&u16>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<isize as core::ops::bit::Shl<&u32>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<isize as core::ops::bit::Shl<&u64>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<isize as core::ops::bit::Shl<&u8>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<isize as core::ops::bit::Shl<&usize>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<isize as core::ops::bit::Shl<i128>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<isize as core::ops::bit::Shl<i16>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<isize as core::ops::bit::Shl<i32>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<isize as core::ops::bit::Shl<i64>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<isize as core::ops::bit::Shl<i8>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<isize as core::ops::bit::Shl<u128>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<isize as core::ops::bit::Shl<u16>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<isize as core::ops::bit::Shl<u32>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<isize as core::ops::bit::Shl<u64>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<isize as core::ops::bit::Shl<u8>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<isize as core::ops::bit::Shl<usize>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<isize as core::ops::bit::Shl>::shl fn shl(self, other: $f) -> $t {
self << other
}
<isize as core::ops::bit::ShlAssign<&i128>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<isize as core::ops::bit::ShlAssign<&i16>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<isize as core::ops::bit::ShlAssign<&i32>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<isize as core::ops::bit::ShlAssign<&i64>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<isize as core::ops::bit::ShlAssign<&i8>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<isize as core::ops::bit::ShlAssign<&isize>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<isize as core::ops::bit::ShlAssign<&u128>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<isize as core::ops::bit::ShlAssign<&u16>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<isize as core::ops::bit::ShlAssign<&u32>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<isize as core::ops::bit::ShlAssign<&u64>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<isize as core::ops::bit::ShlAssign<&u8>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<isize as core::ops::bit::ShlAssign<&usize>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<isize as core::ops::bit::ShlAssign<i128>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<isize as core::ops::bit::ShlAssign<i16>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<isize as core::ops::bit::ShlAssign<i32>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<isize as core::ops::bit::ShlAssign<i64>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<isize as core::ops::bit::ShlAssign<i8>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<isize as core::ops::bit::ShlAssign<u128>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<isize as core::ops::bit::ShlAssign<u16>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<isize as core::ops::bit::ShlAssign<u32>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<isize as core::ops::bit::ShlAssign<u64>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<isize as core::ops::bit::ShlAssign<u8>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<isize as core::ops::bit::ShlAssign<usize>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<isize as core::ops::bit::ShlAssign>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<isize as core::ops::bit::Shr<&i128>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<isize as core::ops::bit::Shr<&i16>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<isize as core::ops::bit::Shr<&i32>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<isize as core::ops::bit::Shr<&i64>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<isize as core::ops::bit::Shr<&i8>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<isize as core::ops::bit::Shr<&isize>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<isize as core::ops::bit::Shr<&u128>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<isize as core::ops::bit::Shr<&u16>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<isize as core::ops::bit::Shr<&u32>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<isize as core::ops::bit::Shr<&u64>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<isize as core::ops::bit::Shr<&u8>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<isize as core::ops::bit::Shr<&usize>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<isize as core::ops::bit::Shr<i128>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<isize as core::ops::bit::Shr<i16>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<isize as core::ops::bit::Shr<i32>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<isize as core::ops::bit::Shr<i64>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<isize as core::ops::bit::Shr<i8>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<isize as core::ops::bit::Shr<u128>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<isize as core::ops::bit::Shr<u16>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<isize as core::ops::bit::Shr<u32>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<isize as core::ops::bit::Shr<u64>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<isize as core::ops::bit::Shr<u8>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<isize as core::ops::bit::Shr<usize>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<isize as core::ops::bit::Shr>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<isize as core::ops::bit::ShrAssign<&i128>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<isize as core::ops::bit::ShrAssign<&i16>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<isize as core::ops::bit::ShrAssign<&i32>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<isize as core::ops::bit::ShrAssign<&i64>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<isize as core::ops::bit::ShrAssign<&i8>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<isize as core::ops::bit::ShrAssign<&isize>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<isize as core::ops::bit::ShrAssign<&u128>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<isize as core::ops::bit::ShrAssign<&u16>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<isize as core::ops::bit::ShrAssign<&u32>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<isize as core::ops::bit::ShrAssign<&u64>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<isize as core::ops::bit::ShrAssign<&u8>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<isize as core::ops::bit::ShrAssign<&usize>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<isize as core::ops::bit::ShrAssign<i128>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<isize as core::ops::bit::ShrAssign<i16>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<isize as core::ops::bit::ShrAssign<i32>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<isize as core::ops::bit::ShrAssign<i64>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<isize as core::ops::bit::ShrAssign<i8>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<isize as core::ops::bit::ShrAssign<u128>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<isize as core::ops::bit::ShrAssign<u16>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<isize as core::ops::bit::ShrAssign<u32>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<isize as core::ops::bit::ShrAssign<u64>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<isize as core::ops::bit::ShrAssign<u8>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<isize as core::ops::bit::ShrAssign<usize>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<isize as core::ops::bit::ShrAssign>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<str as core::convert::AsMut<str>>::as_mut fn as_mut(&mut self) -> &mut str {
self
}
<u128 as core::default::Default>::default fn default() -> $t {
$v
}
<u128 as core::intrinsics::fallback::DisjointBitOr>::disjoint_bitor unsafe fn disjoint_bitor(self, other: Self) -> Self {
// Note that the assume here is required for UB detection in Miri!
// SAFETY: our precondition is that there are no bits in common,
// so this is just telling that to the backend.
unsafe { super::assume((self & other) == zero!($t)) };
self | other
}
<u128 as core::iter::range::Step>::backward_checked fn backward_checked(start: Self, n: usize) -> Option<Self> {
start.checked_sub(n as Self)
}
<u128 as core::iter::range::Step>::forward_checked fn forward_checked(start: Self, n: usize) -> Option<Self> {
start.checked_add(n as Self)
}
<u128 as core::iter::traits::accum::Sum<&'a u128>>::sum fn sum<I: Iterator<Item=&'a Self>>(iter: I) -> Self {
iter.fold(
$zero,
#[rustc_inherit_overflow_checks]
|a, b| a + b,
)
}
<u128 as core::iter::traits::accum::Sum>::sum fn sum<I: Iterator<Item=Self>>(iter: I) -> Self {
iter.fold(
$zero,
#[rustc_inherit_overflow_checks]
|a, b| a + b,
)
}
<u128 as core::ops::arith::Add<&u128>>::add fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u128 as core::ops::arith::Add>::add fn add(self, other: $t) -> $t { self + other }
<u128 as core::ops::arith::AddAssign<&u128>>::add_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u128 as core::ops::arith::AddAssign>::add_assign fn add_assign(&mut self, other: $t) { *self += other }
<u128 as core::ops::arith::Div<&u128>>::div fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u128 as core::ops::arith::Div>::div fn div(self, other: $t) -> $t { self / other }
<u128 as core::ops::arith::DivAssign<&u128>>::div_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u128 as core::ops::arith::DivAssign>::div_assign fn div_assign(&mut self, other: $t) { *self /= other }
<u128 as core::ops::arith::Mul<&u128>>::mul fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u128 as core::ops::arith::Mul>::mul fn mul(self, other: $t) -> $t { self * other }
<u128 as core::ops::arith::MulAssign<&u128>>::mul_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u128 as core::ops::arith::MulAssign>::mul_assign fn mul_assign(&mut self, other: $t) { *self *= other }
<u128 as core::ops::arith::Rem<&u128>>::rem fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u128 as core::ops::arith::Rem>::rem fn rem(self, other: $t) -> $t { self % other }
<u128 as core::ops::arith::RemAssign<&u128>>::rem_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u128 as core::ops::arith::RemAssign>::rem_assign fn rem_assign(&mut self, other: $t) { *self %= other }
<u128 as core::ops::arith::Sub<&u128>>::sub fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u128 as core::ops::arith::Sub>::sub fn sub(self, other: $t) -> $t { self - other }
<u128 as core::ops::arith::SubAssign<&u128>>::sub_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u128 as core::ops::arith::SubAssign>::sub_assign fn sub_assign(&mut self, other: $t) { *self -= other }
<u128 as core::ops::bit::BitAnd<&u128>>::bitand fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u128 as core::ops::bit::BitAnd>::bitand fn bitand(self, rhs: $t) -> $t { self & rhs }
<u128 as core::ops::bit::BitAndAssign<&u128>>::bitand_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u128 as core::ops::bit::BitAndAssign>::bitand_assign fn bitand_assign(&mut self, other: $t) { *self &= other }
<u128 as core::ops::bit::BitOr<&u128>>::bitor fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u128 as core::ops::bit::BitOr>::bitor fn bitor(self, rhs: $t) -> $t { self | rhs }
<u128 as core::ops::bit::BitOrAssign<&u128>>::bitor_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u128 as core::ops::bit::BitOrAssign>::bitor_assign fn bitor_assign(&mut self, other: $t) { *self |= other }
<u128 as core::ops::bit::BitXor<&u128>>::bitxor fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u128 as core::ops::bit::BitXor>::bitxor fn bitxor(self, other: $t) -> $t { self ^ other }
<u128 as core::ops::bit::BitXorAssign<&u128>>::bitxor_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u128 as core::ops::bit::BitXorAssign>::bitxor_assign fn bitxor_assign(&mut self, other: $t) { *self ^= other }
<u128 as core::ops::bit::Not>::not fn not(self) -> $t { !self }
<u128 as core::ops::bit::Shl<&i128>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u128 as core::ops::bit::Shl<&i16>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u128 as core::ops::bit::Shl<&i32>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u128 as core::ops::bit::Shl<&i64>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u128 as core::ops::bit::Shl<&i8>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u128 as core::ops::bit::Shl<&isize>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u128 as core::ops::bit::Shl<&u128>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u128 as core::ops::bit::Shl<&u16>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u128 as core::ops::bit::Shl<&u32>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u128 as core::ops::bit::Shl<&u64>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u128 as core::ops::bit::Shl<&u8>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u128 as core::ops::bit::Shl<&usize>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u128 as core::ops::bit::Shl<i128>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<u128 as core::ops::bit::Shl<i16>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<u128 as core::ops::bit::Shl<i32>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<u128 as core::ops::bit::Shl<i64>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<u128 as core::ops::bit::Shl<i8>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<u128 as core::ops::bit::Shl<isize>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<u128 as core::ops::bit::Shl<u16>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<u128 as core::ops::bit::Shl<u32>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<u128 as core::ops::bit::Shl<u64>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<u128 as core::ops::bit::Shl<u8>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<u128 as core::ops::bit::Shl<usize>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<u128 as core::ops::bit::Shl>::shl fn shl(self, other: $f) -> $t {
self << other
}
<u128 as core::ops::bit::ShlAssign<&i128>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u128 as core::ops::bit::ShlAssign<&i16>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u128 as core::ops::bit::ShlAssign<&i32>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u128 as core::ops::bit::ShlAssign<&i64>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u128 as core::ops::bit::ShlAssign<&i8>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u128 as core::ops::bit::ShlAssign<&isize>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u128 as core::ops::bit::ShlAssign<&u128>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u128 as core::ops::bit::ShlAssign<&u16>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u128 as core::ops::bit::ShlAssign<&u32>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u128 as core::ops::bit::ShlAssign<&u64>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u128 as core::ops::bit::ShlAssign<&u8>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u128 as core::ops::bit::ShlAssign<&usize>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u128 as core::ops::bit::ShlAssign<i128>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<u128 as core::ops::bit::ShlAssign<i16>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<u128 as core::ops::bit::ShlAssign<i32>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<u128 as core::ops::bit::ShlAssign<i64>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<u128 as core::ops::bit::ShlAssign<i8>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<u128 as core::ops::bit::ShlAssign<isize>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<u128 as core::ops::bit::ShlAssign<u16>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<u128 as core::ops::bit::ShlAssign<u32>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<u128 as core::ops::bit::ShlAssign<u64>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<u128 as core::ops::bit::ShlAssign<u8>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<u128 as core::ops::bit::ShlAssign<usize>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<u128 as core::ops::bit::ShlAssign>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<u128 as core::ops::bit::Shr<&i128>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u128 as core::ops::bit::Shr<&i16>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u128 as core::ops::bit::Shr<&i32>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u128 as core::ops::bit::Shr<&i64>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u128 as core::ops::bit::Shr<&i8>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u128 as core::ops::bit::Shr<&isize>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u128 as core::ops::bit::Shr<&u128>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u128 as core::ops::bit::Shr<&u16>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u128 as core::ops::bit::Shr<&u32>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u128 as core::ops::bit::Shr<&u64>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u128 as core::ops::bit::Shr<&u8>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u128 as core::ops::bit::Shr<&usize>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u128 as core::ops::bit::Shr<i128>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<u128 as core::ops::bit::Shr<i16>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<u128 as core::ops::bit::Shr<i32>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<u128 as core::ops::bit::Shr<i64>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<u128 as core::ops::bit::Shr<i8>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<u128 as core::ops::bit::Shr<isize>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<u128 as core::ops::bit::Shr<u16>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<u128 as core::ops::bit::Shr<u32>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<u128 as core::ops::bit::Shr<u64>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<u128 as core::ops::bit::Shr<u8>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<u128 as core::ops::bit::Shr<usize>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<u128 as core::ops::bit::Shr>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<u128 as core::ops::bit::ShrAssign<&i128>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u128 as core::ops::bit::ShrAssign<&i16>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u128 as core::ops::bit::ShrAssign<&i32>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u128 as core::ops::bit::ShrAssign<&i64>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u128 as core::ops::bit::ShrAssign<&i8>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u128 as core::ops::bit::ShrAssign<&isize>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u128 as core::ops::bit::ShrAssign<&u128>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u128 as core::ops::bit::ShrAssign<&u16>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u128 as core::ops::bit::ShrAssign<&u32>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u128 as core::ops::bit::ShrAssign<&u64>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u128 as core::ops::bit::ShrAssign<&u8>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u128 as core::ops::bit::ShrAssign<&usize>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u128 as core::ops::bit::ShrAssign<i128>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<u128 as core::ops::bit::ShrAssign<i16>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<u128 as core::ops::bit::ShrAssign<i32>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<u128 as core::ops::bit::ShrAssign<i64>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<u128 as core::ops::bit::ShrAssign<i8>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<u128 as core::ops::bit::ShrAssign<isize>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<u128 as core::ops::bit::ShrAssign<u16>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<u128 as core::ops::bit::ShrAssign<u32>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<u128 as core::ops::bit::ShrAssign<u64>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<u128 as core::ops::bit::ShrAssign<u8>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<u128 as core::ops::bit::ShrAssign<usize>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<u128 as core::ops::bit::ShrAssign>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<u16 as core::default::Default>::default fn default() -> $t {
$v
}
<u16 as core::intrinsics::fallback::DisjointBitOr>::disjoint_bitor unsafe fn disjoint_bitor(self, other: Self) -> Self {
// Note that the assume here is required for UB detection in Miri!
// SAFETY: our precondition is that there are no bits in common,
// so this is just telling that to the backend.
unsafe { super::assume((self & other) == zero!($t)) };
self | other
}
<u16 as core::iter::range::Step>::backward_checked fn backward_checked(start: Self, n: usize) -> Option<Self> {
match Self::try_from(n) {
Ok(n) => start.checked_sub(n),
Err(_) => None, // if n is out of range, `unsigned_start - n` is too
}
}
<u16 as core::iter::range::Step>::forward_checked fn forward_checked(start: Self, n: usize) -> Option<Self> {
match Self::try_from(n) {
Ok(n) => start.checked_add(n),
Err(_) => None, // if n is out of range, `unsigned_start + n` is too
}
}
<u16 as core::iter::range::Step>::steps_between fn steps_between(start: &Self, end: &Self) -> (usize, Option<usize>) {
if *start <= *end {
// This relies on $u_narrower <= usize
let steps = (*end - *start) as usize;
(steps, Some(steps))
} else {
(0, None)
}
}
<u16 as core::iter::traits::accum::Sum<&'a u16>>::sum fn sum<I: Iterator<Item=&'a Self>>(iter: I) -> Self {
iter.fold(
$zero,
#[rustc_inherit_overflow_checks]
|a, b| a + b,
)
}
<u16 as core::iter::traits::accum::Sum>::sum fn sum<I: Iterator<Item=Self>>(iter: I) -> Self {
iter.fold(
$zero,
#[rustc_inherit_overflow_checks]
|a, b| a + b,
)
}
<u16 as core::ops::arith::Add<&u16>>::add fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u16 as core::ops::arith::Add>::add fn add(self, other: $t) -> $t { self + other }
<u16 as core::ops::arith::AddAssign<&u16>>::add_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u16 as core::ops::arith::AddAssign>::add_assign fn add_assign(&mut self, other: $t) { *self += other }
<u16 as core::ops::arith::Div<&u16>>::div fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u16 as core::ops::arith::Div>::div fn div(self, other: $t) -> $t { self / other }
<u16 as core::ops::arith::DivAssign<&u16>>::div_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u16 as core::ops::arith::DivAssign>::div_assign fn div_assign(&mut self, other: $t) { *self /= other }
<u16 as core::ops::arith::Mul<&u16>>::mul fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u16 as core::ops::arith::Mul>::mul fn mul(self, other: $t) -> $t { self * other }
<u16 as core::ops::arith::MulAssign<&u16>>::mul_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u16 as core::ops::arith::MulAssign>::mul_assign fn mul_assign(&mut self, other: $t) { *self *= other }
<u16 as core::ops::arith::Rem<&u16>>::rem fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u16 as core::ops::arith::Rem>::rem fn rem(self, other: $t) -> $t { self % other }
<u16 as core::ops::arith::RemAssign<&u16>>::rem_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u16 as core::ops::arith::RemAssign>::rem_assign fn rem_assign(&mut self, other: $t) { *self %= other }
<u16 as core::ops::arith::Sub<&u16>>::sub fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u16 as core::ops::arith::Sub>::sub fn sub(self, other: $t) -> $t { self - other }
<u16 as core::ops::arith::SubAssign<&u16>>::sub_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u16 as core::ops::arith::SubAssign>::sub_assign fn sub_assign(&mut self, other: $t) { *self -= other }
<u16 as core::ops::bit::BitAnd<&u16>>::bitand fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u16 as core::ops::bit::BitAnd>::bitand fn bitand(self, rhs: $t) -> $t { self & rhs }
<u16 as core::ops::bit::BitAndAssign<&u16>>::bitand_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u16 as core::ops::bit::BitAndAssign>::bitand_assign fn bitand_assign(&mut self, other: $t) { *self &= other }
<u16 as core::ops::bit::BitOr<&u16>>::bitor fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u16 as core::ops::bit::BitOr>::bitor fn bitor(self, rhs: $t) -> $t { self | rhs }
<u16 as core::ops::bit::BitOrAssign<&u16>>::bitor_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u16 as core::ops::bit::BitOrAssign>::bitor_assign fn bitor_assign(&mut self, other: $t) { *self |= other }
<u16 as core::ops::bit::BitXor<&u16>>::bitxor fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u16 as core::ops::bit::BitXor>::bitxor fn bitxor(self, other: $t) -> $t { self ^ other }
<u16 as core::ops::bit::BitXorAssign<&u16>>::bitxor_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u16 as core::ops::bit::BitXorAssign>::bitxor_assign fn bitxor_assign(&mut self, other: $t) { *self ^= other }
<u16 as core::ops::bit::Not>::not fn not(self) -> $t { !self }
<u16 as core::ops::bit::Shl<&i128>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u16 as core::ops::bit::Shl<&i16>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u16 as core::ops::bit::Shl<&i32>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u16 as core::ops::bit::Shl<&i64>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u16 as core::ops::bit::Shl<&i8>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u16 as core::ops::bit::Shl<&isize>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u16 as core::ops::bit::Shl<&u128>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u16 as core::ops::bit::Shl<&u16>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u16 as core::ops::bit::Shl<&u32>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u16 as core::ops::bit::Shl<&u64>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u16 as core::ops::bit::Shl<&u8>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u16 as core::ops::bit::Shl<&usize>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u16 as core::ops::bit::Shl<i128>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<u16 as core::ops::bit::Shl<i16>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<u16 as core::ops::bit::Shl<i32>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<u16 as core::ops::bit::Shl<i64>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<u16 as core::ops::bit::Shl<i8>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<u16 as core::ops::bit::Shl<isize>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<u16 as core::ops::bit::Shl<u128>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<u16 as core::ops::bit::Shl<u32>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<u16 as core::ops::bit::Shl<u64>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<u16 as core::ops::bit::Shl<u8>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<u16 as core::ops::bit::Shl<usize>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<u16 as core::ops::bit::Shl>::shl fn shl(self, other: $f) -> $t {
self << other
}
<u16 as core::ops::bit::ShlAssign<&i128>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u16 as core::ops::bit::ShlAssign<&i16>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u16 as core::ops::bit::ShlAssign<&i32>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u16 as core::ops::bit::ShlAssign<&i64>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u16 as core::ops::bit::ShlAssign<&i8>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u16 as core::ops::bit::ShlAssign<&isize>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u16 as core::ops::bit::ShlAssign<&u128>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u16 as core::ops::bit::ShlAssign<&u16>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u16 as core::ops::bit::ShlAssign<&u32>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u16 as core::ops::bit::ShlAssign<&u64>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u16 as core::ops::bit::ShlAssign<&u8>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u16 as core::ops::bit::ShlAssign<&usize>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u16 as core::ops::bit::ShlAssign<i128>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<u16 as core::ops::bit::ShlAssign<i16>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<u16 as core::ops::bit::ShlAssign<i32>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<u16 as core::ops::bit::ShlAssign<i64>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<u16 as core::ops::bit::ShlAssign<i8>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<u16 as core::ops::bit::ShlAssign<isize>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<u16 as core::ops::bit::ShlAssign<u128>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<u16 as core::ops::bit::ShlAssign<u32>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<u16 as core::ops::bit::ShlAssign<u64>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<u16 as core::ops::bit::ShlAssign<u8>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<u16 as core::ops::bit::ShlAssign<usize>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<u16 as core::ops::bit::ShlAssign>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<u16 as core::ops::bit::Shr<&i128>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u16 as core::ops::bit::Shr<&i16>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u16 as core::ops::bit::Shr<&i32>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u16 as core::ops::bit::Shr<&i64>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u16 as core::ops::bit::Shr<&i8>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u16 as core::ops::bit::Shr<&isize>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u16 as core::ops::bit::Shr<&u128>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u16 as core::ops::bit::Shr<&u16>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u16 as core::ops::bit::Shr<&u32>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u16 as core::ops::bit::Shr<&u64>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u16 as core::ops::bit::Shr<&u8>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u16 as core::ops::bit::Shr<&usize>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u16 as core::ops::bit::Shr<i128>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<u16 as core::ops::bit::Shr<i16>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<u16 as core::ops::bit::Shr<i32>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<u16 as core::ops::bit::Shr<i64>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<u16 as core::ops::bit::Shr<i8>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<u16 as core::ops::bit::Shr<isize>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<u16 as core::ops::bit::Shr<u128>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<u16 as core::ops::bit::Shr<u32>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<u16 as core::ops::bit::Shr<u64>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<u16 as core::ops::bit::Shr<u8>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<u16 as core::ops::bit::Shr<usize>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<u16 as core::ops::bit::Shr>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<u16 as core::ops::bit::ShrAssign<&i128>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u16 as core::ops::bit::ShrAssign<&i16>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u16 as core::ops::bit::ShrAssign<&i32>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u16 as core::ops::bit::ShrAssign<&i64>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u16 as core::ops::bit::ShrAssign<&i8>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u16 as core::ops::bit::ShrAssign<&isize>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u16 as core::ops::bit::ShrAssign<&u128>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u16 as core::ops::bit::ShrAssign<&u16>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u16 as core::ops::bit::ShrAssign<&u32>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u16 as core::ops::bit::ShrAssign<&u64>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u16 as core::ops::bit::ShrAssign<&u8>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u16 as core::ops::bit::ShrAssign<&usize>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u16 as core::ops::bit::ShrAssign<i128>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<u16 as core::ops::bit::ShrAssign<i16>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<u16 as core::ops::bit::ShrAssign<i32>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<u16 as core::ops::bit::ShrAssign<i64>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<u16 as core::ops::bit::ShrAssign<i8>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<u16 as core::ops::bit::ShrAssign<isize>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<u16 as core::ops::bit::ShrAssign<u128>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<u16 as core::ops::bit::ShrAssign<u32>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<u16 as core::ops::bit::ShrAssign<u64>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<u16 as core::ops::bit::ShrAssign<u8>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<u16 as core::ops::bit::ShrAssign<usize>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<u16 as core::ops::bit::ShrAssign>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<u32 as core::default::Default>::default fn default() -> $t {
$v
}
<u32 as core::intrinsics::fallback::DisjointBitOr>::disjoint_bitor unsafe fn disjoint_bitor(self, other: Self) -> Self {
// Note that the assume here is required for UB detection in Miri!
// SAFETY: our precondition is that there are no bits in common,
// so this is just telling that to the backend.
unsafe { super::assume((self & other) == zero!($t)) };
self | other
}
<u32 as core::iter::range::Step>::backward_checked fn backward_checked(start: Self, n: usize) -> Option<Self> {
match Self::try_from(n) {
Ok(n) => start.checked_sub(n),
Err(_) => None, // if n is out of range, `unsigned_start - n` is too
}
}
<u32 as core::iter::range::Step>::forward_checked fn forward_checked(start: Self, n: usize) -> Option<Self> {
match Self::try_from(n) {
Ok(n) => start.checked_add(n),
Err(_) => None, // if n is out of range, `unsigned_start + n` is too
}
}
<u32 as core::iter::range::Step>::steps_between fn steps_between(start: &Self, end: &Self) -> (usize, Option<usize>) {
if *start <= *end {
// This relies on $u_narrower <= usize
let steps = (*end - *start) as usize;
(steps, Some(steps))
} else {
(0, None)
}
}
<u32 as core::iter::traits::accum::Sum<&'a u32>>::sum fn sum<I: Iterator<Item=&'a Self>>(iter: I) -> Self {
iter.fold(
$zero,
#[rustc_inherit_overflow_checks]
|a, b| a + b,
)
}
<u32 as core::iter::traits::accum::Sum>::sum fn sum<I: Iterator<Item=Self>>(iter: I) -> Self {
iter.fold(
$zero,
#[rustc_inherit_overflow_checks]
|a, b| a + b,
)
}
<u32 as core::ops::arith::Add<&u32>>::add fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u32 as core::ops::arith::Add>::add fn add(self, other: $t) -> $t { self + other }
<u32 as core::ops::arith::AddAssign<&u32>>::add_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u32 as core::ops::arith::AddAssign>::add_assign fn add_assign(&mut self, other: $t) { *self += other }
<u32 as core::ops::arith::Div<&u32>>::div fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u32 as core::ops::arith::Div>::div fn div(self, other: $t) -> $t { self / other }
<u32 as core::ops::arith::DivAssign<&u32>>::div_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u32 as core::ops::arith::DivAssign>::div_assign fn div_assign(&mut self, other: $t) { *self /= other }
<u32 as core::ops::arith::Mul<&u32>>::mul fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u32 as core::ops::arith::Mul>::mul fn mul(self, other: $t) -> $t { self * other }
<u32 as core::ops::arith::MulAssign<&u32>>::mul_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u32 as core::ops::arith::MulAssign>::mul_assign fn mul_assign(&mut self, other: $t) { *self *= other }
<u32 as core::ops::arith::Rem<&u32>>::rem fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u32 as core::ops::arith::Rem>::rem fn rem(self, other: $t) -> $t { self % other }
<u32 as core::ops::arith::RemAssign<&u32>>::rem_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u32 as core::ops::arith::RemAssign>::rem_assign fn rem_assign(&mut self, other: $t) { *self %= other }
<u32 as core::ops::arith::Sub<&u32>>::sub fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u32 as core::ops::arith::Sub>::sub fn sub(self, other: $t) -> $t { self - other }
<u32 as core::ops::arith::SubAssign<&u32>>::sub_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u32 as core::ops::arith::SubAssign>::sub_assign fn sub_assign(&mut self, other: $t) { *self -= other }
<u32 as core::ops::bit::BitAnd<&u32>>::bitand fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u32 as core::ops::bit::BitAnd>::bitand fn bitand(self, rhs: $t) -> $t { self & rhs }
<u32 as core::ops::bit::BitAndAssign<&u32>>::bitand_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u32 as core::ops::bit::BitAndAssign>::bitand_assign fn bitand_assign(&mut self, other: $t) { *self &= other }
<u32 as core::ops::bit::BitOr<&u32>>::bitor fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u32 as core::ops::bit::BitOr>::bitor fn bitor(self, rhs: $t) -> $t { self | rhs }
<u32 as core::ops::bit::BitOrAssign<&u32>>::bitor_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u32 as core::ops::bit::BitOrAssign>::bitor_assign fn bitor_assign(&mut self, other: $t) { *self |= other }
<u32 as core::ops::bit::BitXor<&u32>>::bitxor fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u32 as core::ops::bit::BitXor>::bitxor fn bitxor(self, other: $t) -> $t { self ^ other }
<u32 as core::ops::bit::BitXorAssign<&u32>>::bitxor_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u32 as core::ops::bit::BitXorAssign>::bitxor_assign fn bitxor_assign(&mut self, other: $t) { *self ^= other }
<u32 as core::ops::bit::Not>::not fn not(self) -> $t { !self }
<u32 as core::ops::bit::Shl<&i128>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u32 as core::ops::bit::Shl<&i16>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u32 as core::ops::bit::Shl<&i32>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u32 as core::ops::bit::Shl<&i64>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u32 as core::ops::bit::Shl<&i8>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u32 as core::ops::bit::Shl<&isize>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u32 as core::ops::bit::Shl<&u128>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u32 as core::ops::bit::Shl<&u16>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u32 as core::ops::bit::Shl<&u32>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u32 as core::ops::bit::Shl<&u64>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u32 as core::ops::bit::Shl<&u8>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u32 as core::ops::bit::Shl<&usize>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u32 as core::ops::bit::Shl<i128>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<u32 as core::ops::bit::Shl<i16>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<u32 as core::ops::bit::Shl<i32>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<u32 as core::ops::bit::Shl<i64>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<u32 as core::ops::bit::Shl<i8>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<u32 as core::ops::bit::Shl<isize>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<u32 as core::ops::bit::Shl<u128>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<u32 as core::ops::bit::Shl<u16>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<u32 as core::ops::bit::Shl<u64>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<u32 as core::ops::bit::Shl<u8>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<u32 as core::ops::bit::Shl<usize>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<u32 as core::ops::bit::Shl>::shl fn shl(self, other: $f) -> $t {
self << other
}
<u32 as core::ops::bit::ShlAssign<&i128>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u32 as core::ops::bit::ShlAssign<&i16>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u32 as core::ops::bit::ShlAssign<&i32>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u32 as core::ops::bit::ShlAssign<&i64>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u32 as core::ops::bit::ShlAssign<&i8>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u32 as core::ops::bit::ShlAssign<&isize>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u32 as core::ops::bit::ShlAssign<&u128>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u32 as core::ops::bit::ShlAssign<&u16>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u32 as core::ops::bit::ShlAssign<&u32>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u32 as core::ops::bit::ShlAssign<&u64>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u32 as core::ops::bit::ShlAssign<&u8>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u32 as core::ops::bit::ShlAssign<&usize>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u32 as core::ops::bit::ShlAssign<i128>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<u32 as core::ops::bit::ShlAssign<i16>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<u32 as core::ops::bit::ShlAssign<i32>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<u32 as core::ops::bit::ShlAssign<i64>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<u32 as core::ops::bit::ShlAssign<i8>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<u32 as core::ops::bit::ShlAssign<isize>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<u32 as core::ops::bit::ShlAssign<u128>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<u32 as core::ops::bit::ShlAssign<u16>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<u32 as core::ops::bit::ShlAssign<u64>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<u32 as core::ops::bit::ShlAssign<u8>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<u32 as core::ops::bit::ShlAssign<usize>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<u32 as core::ops::bit::ShlAssign>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<u32 as core::ops::bit::Shr<&i128>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u32 as core::ops::bit::Shr<&i16>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u32 as core::ops::bit::Shr<&i32>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u32 as core::ops::bit::Shr<&i64>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u32 as core::ops::bit::Shr<&i8>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u32 as core::ops::bit::Shr<&isize>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u32 as core::ops::bit::Shr<&u128>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u32 as core::ops::bit::Shr<&u16>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u32 as core::ops::bit::Shr<&u32>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u32 as core::ops::bit::Shr<&u64>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u32 as core::ops::bit::Shr<&u8>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u32 as core::ops::bit::Shr<&usize>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u32 as core::ops::bit::Shr<i128>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<u32 as core::ops::bit::Shr<i16>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<u32 as core::ops::bit::Shr<i32>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<u32 as core::ops::bit::Shr<i64>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<u32 as core::ops::bit::Shr<i8>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<u32 as core::ops::bit::Shr<isize>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<u32 as core::ops::bit::Shr<u128>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<u32 as core::ops::bit::Shr<u16>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<u32 as core::ops::bit::Shr<u64>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<u32 as core::ops::bit::Shr<u8>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<u32 as core::ops::bit::Shr<usize>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<u32 as core::ops::bit::Shr>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<u32 as core::ops::bit::ShrAssign<&i128>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u32 as core::ops::bit::ShrAssign<&i16>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u32 as core::ops::bit::ShrAssign<&i32>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u32 as core::ops::bit::ShrAssign<&i64>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u32 as core::ops::bit::ShrAssign<&i8>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u32 as core::ops::bit::ShrAssign<&isize>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u32 as core::ops::bit::ShrAssign<&u128>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u32 as core::ops::bit::ShrAssign<&u16>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u32 as core::ops::bit::ShrAssign<&u32>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u32 as core::ops::bit::ShrAssign<&u64>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u32 as core::ops::bit::ShrAssign<&u8>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u32 as core::ops::bit::ShrAssign<&usize>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u32 as core::ops::bit::ShrAssign<i128>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<u32 as core::ops::bit::ShrAssign<i16>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<u32 as core::ops::bit::ShrAssign<i32>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<u32 as core::ops::bit::ShrAssign<i64>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<u32 as core::ops::bit::ShrAssign<i8>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<u32 as core::ops::bit::ShrAssign<isize>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<u32 as core::ops::bit::ShrAssign<u128>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<u32 as core::ops::bit::ShrAssign<u16>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<u32 as core::ops::bit::ShrAssign<u64>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<u32 as core::ops::bit::ShrAssign<u8>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<u32 as core::ops::bit::ShrAssign<usize>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<u32 as core::ops::bit::ShrAssign>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<u64 as core::default::Default>::default fn default() -> $t {
$v
}
<u64 as core::intrinsics::fallback::DisjointBitOr>::disjoint_bitor unsafe fn disjoint_bitor(self, other: Self) -> Self {
// Note that the assume here is required for UB detection in Miri!
// SAFETY: our precondition is that there are no bits in common,
// so this is just telling that to the backend.
unsafe { super::assume((self & other) == zero!($t)) };
self | other
}
<u64 as core::iter::range::Step>::backward_checked fn backward_checked(start: Self, n: usize) -> Option<Self> {
match Self::try_from(n) {
Ok(n) => start.checked_sub(n),
Err(_) => None, // if n is out of range, `unsigned_start - n` is too
}
}
<u64 as core::iter::range::Step>::forward_checked fn forward_checked(start: Self, n: usize) -> Option<Self> {
match Self::try_from(n) {
Ok(n) => start.checked_add(n),
Err(_) => None, // if n is out of range, `unsigned_start + n` is too
}
}
<u64 as core::iter::range::Step>::steps_between fn steps_between(start: &Self, end: &Self) -> (usize, Option<usize>) {
if *start <= *end {
// This relies on $u_narrower <= usize
let steps = (*end - *start) as usize;
(steps, Some(steps))
} else {
(0, None)
}
}
<u64 as core::iter::traits::accum::Sum<&'a u64>>::sum fn sum<I: Iterator<Item=&'a Self>>(iter: I) -> Self {
iter.fold(
$zero,
#[rustc_inherit_overflow_checks]
|a, b| a + b,
)
}
<u64 as core::iter::traits::accum::Sum>::sum fn sum<I: Iterator<Item=Self>>(iter: I) -> Self {
iter.fold(
$zero,
#[rustc_inherit_overflow_checks]
|a, b| a + b,
)
}
<u64 as core::ops::arith::Add<&u64>>::add fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u64 as core::ops::arith::Add>::add fn add(self, other: $t) -> $t { self + other }
<u64 as core::ops::arith::AddAssign<&u64>>::add_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u64 as core::ops::arith::AddAssign>::add_assign fn add_assign(&mut self, other: $t) { *self += other }
<u64 as core::ops::arith::Div<&u64>>::div fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u64 as core::ops::arith::Div>::div fn div(self, other: $t) -> $t { self / other }
<u64 as core::ops::arith::DivAssign<&u64>>::div_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u64 as core::ops::arith::DivAssign>::div_assign fn div_assign(&mut self, other: $t) { *self /= other }
<u64 as core::ops::arith::Mul<&u64>>::mul fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u64 as core::ops::arith::Mul>::mul fn mul(self, other: $t) -> $t { self * other }
<u64 as core::ops::arith::MulAssign<&u64>>::mul_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u64 as core::ops::arith::MulAssign>::mul_assign fn mul_assign(&mut self, other: $t) { *self *= other }
<u64 as core::ops::arith::Rem<&u64>>::rem fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u64 as core::ops::arith::Rem>::rem fn rem(self, other: $t) -> $t { self % other }
<u64 as core::ops::arith::RemAssign<&u64>>::rem_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u64 as core::ops::arith::RemAssign>::rem_assign fn rem_assign(&mut self, other: $t) { *self %= other }
<u64 as core::ops::arith::Sub<&u64>>::sub fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u64 as core::ops::arith::Sub>::sub fn sub(self, other: $t) -> $t { self - other }
<u64 as core::ops::arith::SubAssign<&u64>>::sub_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u64 as core::ops::arith::SubAssign>::sub_assign fn sub_assign(&mut self, other: $t) { *self -= other }
<u64 as core::ops::bit::BitAnd<&u64>>::bitand fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u64 as core::ops::bit::BitAnd>::bitand fn bitand(self, rhs: $t) -> $t { self & rhs }
<u64 as core::ops::bit::BitAndAssign<&u64>>::bitand_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u64 as core::ops::bit::BitAndAssign>::bitand_assign fn bitand_assign(&mut self, other: $t) { *self &= other }
<u64 as core::ops::bit::BitOr<&u64>>::bitor fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u64 as core::ops::bit::BitOr>::bitor fn bitor(self, rhs: $t) -> $t { self | rhs }
<u64 as core::ops::bit::BitOrAssign<&u64>>::bitor_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u64 as core::ops::bit::BitOrAssign>::bitor_assign fn bitor_assign(&mut self, other: $t) { *self |= other }
<u64 as core::ops::bit::BitXor<&u64>>::bitxor fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u64 as core::ops::bit::BitXor>::bitxor fn bitxor(self, other: $t) -> $t { self ^ other }
<u64 as core::ops::bit::BitXorAssign<&u64>>::bitxor_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u64 as core::ops::bit::BitXorAssign>::bitxor_assign fn bitxor_assign(&mut self, other: $t) { *self ^= other }
<u64 as core::ops::bit::Not>::not fn not(self) -> $t { !self }
<u64 as core::ops::bit::Shl<&i128>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u64 as core::ops::bit::Shl<&i16>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u64 as core::ops::bit::Shl<&i32>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u64 as core::ops::bit::Shl<&i64>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u64 as core::ops::bit::Shl<&i8>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u64 as core::ops::bit::Shl<&isize>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u64 as core::ops::bit::Shl<&u128>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u64 as core::ops::bit::Shl<&u16>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u64 as core::ops::bit::Shl<&u32>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u64 as core::ops::bit::Shl<&u64>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u64 as core::ops::bit::Shl<&u8>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u64 as core::ops::bit::Shl<&usize>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u64 as core::ops::bit::Shl<i128>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<u64 as core::ops::bit::Shl<i16>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<u64 as core::ops::bit::Shl<i32>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<u64 as core::ops::bit::Shl<i64>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<u64 as core::ops::bit::Shl<i8>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<u64 as core::ops::bit::Shl<isize>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<u64 as core::ops::bit::Shl<u128>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<u64 as core::ops::bit::Shl<u16>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<u64 as core::ops::bit::Shl<u32>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<u64 as core::ops::bit::Shl<u8>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<u64 as core::ops::bit::Shl<usize>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<u64 as core::ops::bit::Shl>::shl fn shl(self, other: $f) -> $t {
self << other
}
<u64 as core::ops::bit::ShlAssign<&i128>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u64 as core::ops::bit::ShlAssign<&i16>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u64 as core::ops::bit::ShlAssign<&i32>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u64 as core::ops::bit::ShlAssign<&i64>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u64 as core::ops::bit::ShlAssign<&i8>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u64 as core::ops::bit::ShlAssign<&isize>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u64 as core::ops::bit::ShlAssign<&u128>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u64 as core::ops::bit::ShlAssign<&u16>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u64 as core::ops::bit::ShlAssign<&u32>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u64 as core::ops::bit::ShlAssign<&u64>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u64 as core::ops::bit::ShlAssign<&u8>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u64 as core::ops::bit::ShlAssign<&usize>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u64 as core::ops::bit::ShlAssign<i128>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<u64 as core::ops::bit::ShlAssign<i16>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<u64 as core::ops::bit::ShlAssign<i32>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<u64 as core::ops::bit::ShlAssign<i64>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<u64 as core::ops::bit::ShlAssign<i8>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<u64 as core::ops::bit::ShlAssign<isize>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<u64 as core::ops::bit::ShlAssign<u128>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<u64 as core::ops::bit::ShlAssign<u16>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<u64 as core::ops::bit::ShlAssign<u32>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<u64 as core::ops::bit::ShlAssign<u8>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<u64 as core::ops::bit::ShlAssign<usize>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<u64 as core::ops::bit::ShlAssign>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<u64 as core::ops::bit::Shr<&i128>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u64 as core::ops::bit::Shr<&i16>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u64 as core::ops::bit::Shr<&i32>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u64 as core::ops::bit::Shr<&i64>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u64 as core::ops::bit::Shr<&i8>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u64 as core::ops::bit::Shr<&isize>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u64 as core::ops::bit::Shr<&u128>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u64 as core::ops::bit::Shr<&u16>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u64 as core::ops::bit::Shr<&u32>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u64 as core::ops::bit::Shr<&u64>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u64 as core::ops::bit::Shr<&u8>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u64 as core::ops::bit::Shr<&usize>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u64 as core::ops::bit::Shr<i128>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<u64 as core::ops::bit::Shr<i16>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<u64 as core::ops::bit::Shr<i32>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<u64 as core::ops::bit::Shr<i64>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<u64 as core::ops::bit::Shr<i8>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<u64 as core::ops::bit::Shr<isize>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<u64 as core::ops::bit::Shr<u128>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<u64 as core::ops::bit::Shr<u16>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<u64 as core::ops::bit::Shr<u32>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<u64 as core::ops::bit::Shr<u8>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<u64 as core::ops::bit::Shr<usize>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<u64 as core::ops::bit::Shr>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<u64 as core::ops::bit::ShrAssign<&i128>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u64 as core::ops::bit::ShrAssign<&i16>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u64 as core::ops::bit::ShrAssign<&i32>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u64 as core::ops::bit::ShrAssign<&i64>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u64 as core::ops::bit::ShrAssign<&i8>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u64 as core::ops::bit::ShrAssign<&isize>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u64 as core::ops::bit::ShrAssign<&u128>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u64 as core::ops::bit::ShrAssign<&u16>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u64 as core::ops::bit::ShrAssign<&u32>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u64 as core::ops::bit::ShrAssign<&u64>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u64 as core::ops::bit::ShrAssign<&u8>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u64 as core::ops::bit::ShrAssign<&usize>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u64 as core::ops::bit::ShrAssign<i128>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<u64 as core::ops::bit::ShrAssign<i16>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<u64 as core::ops::bit::ShrAssign<i32>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<u64 as core::ops::bit::ShrAssign<i64>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<u64 as core::ops::bit::ShrAssign<i8>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<u64 as core::ops::bit::ShrAssign<isize>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<u64 as core::ops::bit::ShrAssign<u128>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<u64 as core::ops::bit::ShrAssign<u16>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<u64 as core::ops::bit::ShrAssign<u32>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<u64 as core::ops::bit::ShrAssign<u8>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<u64 as core::ops::bit::ShrAssign<usize>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<u64 as core::ops::bit::ShrAssign>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<u8 as core::default::Default>::default fn default() -> $t {
$v
}
<u8 as core::intrinsics::fallback::DisjointBitOr>::disjoint_bitor unsafe fn disjoint_bitor(self, other: Self) -> Self {
// Note that the assume here is required for UB detection in Miri!
// SAFETY: our precondition is that there are no bits in common,
// so this is just telling that to the backend.
unsafe { super::assume((self & other) == zero!($t)) };
self | other
}
<u8 as core::iter::range::Step>::backward_checked fn backward_checked(start: Self, n: usize) -> Option<Self> {
match Self::try_from(n) {
Ok(n) => start.checked_sub(n),
Err(_) => None, // if n is out of range, `unsigned_start - n` is too
}
}
<u8 as core::iter::range::Step>::forward_checked fn forward_checked(start: Self, n: usize) -> Option<Self> {
match Self::try_from(n) {
Ok(n) => start.checked_add(n),
Err(_) => None, // if n is out of range, `unsigned_start + n` is too
}
}
<u8 as core::iter::range::Step>::steps_between fn steps_between(start: &Self, end: &Self) -> (usize, Option<usize>) {
if *start <= *end {
// This relies on $u_narrower <= usize
let steps = (*end - *start) as usize;
(steps, Some(steps))
} else {
(0, None)
}
}
<u8 as core::iter::traits::accum::Sum<&'a u8>>::sum fn sum<I: Iterator<Item=&'a Self>>(iter: I) -> Self {
iter.fold(
$zero,
#[rustc_inherit_overflow_checks]
|a, b| a + b,
)
}
<u8 as core::iter::traits::accum::Sum>::sum fn sum<I: Iterator<Item=Self>>(iter: I) -> Self {
iter.fold(
$zero,
#[rustc_inherit_overflow_checks]
|a, b| a + b,
)
}
<u8 as core::ops::arith::Add<&u8>>::add fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u8 as core::ops::arith::Add>::add fn add(self, other: $t) -> $t { self + other }
<u8 as core::ops::arith::AddAssign<&u8>>::add_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u8 as core::ops::arith::AddAssign>::add_assign fn add_assign(&mut self, other: $t) { *self += other }
<u8 as core::ops::arith::Div<&u8>>::div fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u8 as core::ops::arith::Div>::div fn div(self, other: $t) -> $t { self / other }
<u8 as core::ops::arith::DivAssign<&u8>>::div_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u8 as core::ops::arith::DivAssign>::div_assign fn div_assign(&mut self, other: $t) { *self /= other }
<u8 as core::ops::arith::Mul<&u8>>::mul fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u8 as core::ops::arith::Mul>::mul fn mul(self, other: $t) -> $t { self * other }
<u8 as core::ops::arith::MulAssign<&u8>>::mul_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u8 as core::ops::arith::MulAssign>::mul_assign fn mul_assign(&mut self, other: $t) { *self *= other }
<u8 as core::ops::arith::Rem<&u8>>::rem fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u8 as core::ops::arith::Rem>::rem fn rem(self, other: $t) -> $t { self % other }
<u8 as core::ops::arith::RemAssign<&u8>>::rem_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u8 as core::ops::arith::RemAssign>::rem_assign fn rem_assign(&mut self, other: $t) { *self %= other }
<u8 as core::ops::arith::Sub<&u8>>::sub fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u8 as core::ops::arith::Sub>::sub fn sub(self, other: $t) -> $t { self - other }
<u8 as core::ops::arith::SubAssign<&u8>>::sub_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u8 as core::ops::arith::SubAssign>::sub_assign fn sub_assign(&mut self, other: $t) { *self -= other }
<u8 as core::ops::bit::BitAnd<&u8>>::bitand fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u8 as core::ops::bit::BitAnd>::bitand fn bitand(self, rhs: $t) -> $t { self & rhs }
<u8 as core::ops::bit::BitAndAssign<&u8>>::bitand_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u8 as core::ops::bit::BitAndAssign>::bitand_assign fn bitand_assign(&mut self, other: $t) { *self &= other }
<u8 as core::ops::bit::BitOr<&u8>>::bitor fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u8 as core::ops::bit::BitOr>::bitor fn bitor(self, rhs: $t) -> $t { self | rhs }
<u8 as core::ops::bit::BitOrAssign<&u8>>::bitor_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u8 as core::ops::bit::BitOrAssign>::bitor_assign fn bitor_assign(&mut self, other: $t) { *self |= other }
<u8 as core::ops::bit::BitXor<&u8>>::bitxor fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u8 as core::ops::bit::BitXor>::bitxor fn bitxor(self, other: $t) -> $t { self ^ other }
<u8 as core::ops::bit::BitXorAssign<&u8>>::bitxor_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u8 as core::ops::bit::BitXorAssign>::bitxor_assign fn bitxor_assign(&mut self, other: $t) { *self ^= other }
<u8 as core::ops::bit::Not>::not fn not(self) -> $t { !self }
<u8 as core::ops::bit::Shl<&i128>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u8 as core::ops::bit::Shl<&i16>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u8 as core::ops::bit::Shl<&i32>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u8 as core::ops::bit::Shl<&i64>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u8 as core::ops::bit::Shl<&i8>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u8 as core::ops::bit::Shl<&isize>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u8 as core::ops::bit::Shl<&u128>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u8 as core::ops::bit::Shl<&u16>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u8 as core::ops::bit::Shl<&u32>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u8 as core::ops::bit::Shl<&u64>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u8 as core::ops::bit::Shl<&u8>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u8 as core::ops::bit::Shl<&usize>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u8 as core::ops::bit::Shl<i128>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<u8 as core::ops::bit::Shl<i16>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<u8 as core::ops::bit::Shl<i32>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<u8 as core::ops::bit::Shl<i64>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<u8 as core::ops::bit::Shl<i8>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<u8 as core::ops::bit::Shl<isize>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<u8 as core::ops::bit::Shl<u128>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<u8 as core::ops::bit::Shl<u16>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<u8 as core::ops::bit::Shl<u32>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<u8 as core::ops::bit::Shl<u64>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<u8 as core::ops::bit::Shl<usize>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<u8 as core::ops::bit::Shl>::shl fn shl(self, other: $f) -> $t {
self << other
}
<u8 as core::ops::bit::ShlAssign<&i128>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u8 as core::ops::bit::ShlAssign<&i16>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u8 as core::ops::bit::ShlAssign<&i32>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u8 as core::ops::bit::ShlAssign<&i64>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u8 as core::ops::bit::ShlAssign<&i8>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u8 as core::ops::bit::ShlAssign<&isize>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u8 as core::ops::bit::ShlAssign<&u128>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u8 as core::ops::bit::ShlAssign<&u16>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u8 as core::ops::bit::ShlAssign<&u32>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u8 as core::ops::bit::ShlAssign<&u64>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u8 as core::ops::bit::ShlAssign<&u8>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u8 as core::ops::bit::ShlAssign<&usize>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u8 as core::ops::bit::ShlAssign<i128>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<u8 as core::ops::bit::ShlAssign<i16>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<u8 as core::ops::bit::ShlAssign<i32>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<u8 as core::ops::bit::ShlAssign<i64>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<u8 as core::ops::bit::ShlAssign<i8>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<u8 as core::ops::bit::ShlAssign<isize>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<u8 as core::ops::bit::ShlAssign<u128>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<u8 as core::ops::bit::ShlAssign<u16>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<u8 as core::ops::bit::ShlAssign<u32>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<u8 as core::ops::bit::ShlAssign<u64>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<u8 as core::ops::bit::ShlAssign<usize>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<u8 as core::ops::bit::ShlAssign>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<u8 as core::ops::bit::Shr<&i128>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u8 as core::ops::bit::Shr<&i16>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u8 as core::ops::bit::Shr<&i32>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u8 as core::ops::bit::Shr<&i64>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u8 as core::ops::bit::Shr<&i8>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u8 as core::ops::bit::Shr<&isize>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u8 as core::ops::bit::Shr<&u128>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u8 as core::ops::bit::Shr<&u16>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u8 as core::ops::bit::Shr<&u32>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u8 as core::ops::bit::Shr<&u64>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u8 as core::ops::bit::Shr<&u8>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u8 as core::ops::bit::Shr<&usize>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u8 as core::ops::bit::Shr<i128>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<u8 as core::ops::bit::Shr<i16>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<u8 as core::ops::bit::Shr<i32>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<u8 as core::ops::bit::Shr<i64>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<u8 as core::ops::bit::Shr<i8>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<u8 as core::ops::bit::Shr<isize>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<u8 as core::ops::bit::Shr<u128>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<u8 as core::ops::bit::Shr<u16>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<u8 as core::ops::bit::Shr<u32>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<u8 as core::ops::bit::Shr<u64>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<u8 as core::ops::bit::Shr<usize>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<u8 as core::ops::bit::Shr>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<u8 as core::ops::bit::ShrAssign<&i128>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u8 as core::ops::bit::ShrAssign<&i16>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u8 as core::ops::bit::ShrAssign<&i32>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u8 as core::ops::bit::ShrAssign<&i64>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u8 as core::ops::bit::ShrAssign<&i8>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u8 as core::ops::bit::ShrAssign<&isize>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u8 as core::ops::bit::ShrAssign<&u128>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u8 as core::ops::bit::ShrAssign<&u16>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u8 as core::ops::bit::ShrAssign<&u32>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u8 as core::ops::bit::ShrAssign<&u64>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u8 as core::ops::bit::ShrAssign<&u8>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u8 as core::ops::bit::ShrAssign<&usize>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u8 as core::ops::bit::ShrAssign<i128>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<u8 as core::ops::bit::ShrAssign<i16>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<u8 as core::ops::bit::ShrAssign<i32>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<u8 as core::ops::bit::ShrAssign<i64>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<u8 as core::ops::bit::ShrAssign<i8>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<u8 as core::ops::bit::ShrAssign<isize>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<u8 as core::ops::bit::ShrAssign<u128>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<u8 as core::ops::bit::ShrAssign<u16>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<u8 as core::ops::bit::ShrAssign<u32>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<u8 as core::ops::bit::ShrAssign<u64>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<u8 as core::ops::bit::ShrAssign<usize>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<u8 as core::ops::bit::ShrAssign>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<usize as core::default::Default>::default fn default() -> $t {
$v
}
<usize as core::intrinsics::fallback::DisjointBitOr>::disjoint_bitor unsafe fn disjoint_bitor(self, other: Self) -> Self {
// Note that the assume here is required for UB detection in Miri!
// SAFETY: our precondition is that there are no bits in common,
// so this is just telling that to the backend.
unsafe { super::assume((self & other) == zero!($t)) };
self | other
}
<usize as core::iter::range::Step>::backward_checked fn backward_checked(start: Self, n: usize) -> Option<Self> {
match Self::try_from(n) {
Ok(n) => start.checked_sub(n),
Err(_) => None, // if n is out of range, `unsigned_start - n` is too
}
}
<usize as core::iter::range::Step>::forward_checked fn forward_checked(start: Self, n: usize) -> Option<Self> {
match Self::try_from(n) {
Ok(n) => start.checked_add(n),
Err(_) => None, // if n is out of range, `unsigned_start + n` is too
}
}
<usize as core::iter::range::Step>::steps_between fn steps_between(start: &Self, end: &Self) -> (usize, Option<usize>) {
if *start <= *end {
// This relies on $u_narrower <= usize
let steps = (*end - *start) as usize;
(steps, Some(steps))
} else {
(0, None)
}
}
<usize as core::iter::traits::accum::Sum<&'a usize>>::sum fn sum<I: Iterator<Item=&'a Self>>(iter: I) -> Self {
iter.fold(
$zero,
#[rustc_inherit_overflow_checks]
|a, b| a + b,
)
}
<usize as core::iter::traits::accum::Sum>::sum fn sum<I: Iterator<Item=Self>>(iter: I) -> Self {
iter.fold(
$zero,
#[rustc_inherit_overflow_checks]
|a, b| a + b,
)
}
<usize as core::ops::arith::Add<&usize>>::add fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<usize as core::ops::arith::Add>::add fn add(self, other: $t) -> $t { self + other }
<usize as core::ops::arith::AddAssign<&usize>>::add_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<usize as core::ops::arith::AddAssign>::add_assign fn add_assign(&mut self, other: $t) { *self += other }
<usize as core::ops::arith::Div<&usize>>::div fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<usize as core::ops::arith::Div>::div fn div(self, other: $t) -> $t { self / other }
<usize as core::ops::arith::DivAssign<&usize>>::div_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<usize as core::ops::arith::DivAssign>::div_assign fn div_assign(&mut self, other: $t) { *self /= other }
<usize as core::ops::arith::Mul<&usize>>::mul fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<usize as core::ops::arith::Mul>::mul fn mul(self, other: $t) -> $t { self * other }
<usize as core::ops::arith::MulAssign<&usize>>::mul_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<usize as core::ops::arith::MulAssign>::mul_assign fn mul_assign(&mut self, other: $t) { *self *= other }
<usize as core::ops::arith::Rem<&usize>>::rem fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<usize as core::ops::arith::Rem>::rem fn rem(self, other: $t) -> $t { self % other }
<usize as core::ops::arith::RemAssign<&usize>>::rem_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<usize as core::ops::arith::RemAssign>::rem_assign fn rem_assign(&mut self, other: $t) { *self %= other }
<usize as core::ops::arith::Sub<&usize>>::sub fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<usize as core::ops::arith::Sub>::sub fn sub(self, other: $t) -> $t { self - other }
<usize as core::ops::arith::SubAssign<&usize>>::sub_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<usize as core::ops::arith::SubAssign>::sub_assign fn sub_assign(&mut self, other: $t) { *self -= other }
<usize as core::ops::bit::BitAnd<&usize>>::bitand fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<usize as core::ops::bit::BitAnd>::bitand fn bitand(self, rhs: $t) -> $t { self & rhs }
<usize as core::ops::bit::BitAndAssign<&usize>>::bitand_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<usize as core::ops::bit::BitAndAssign>::bitand_assign fn bitand_assign(&mut self, other: $t) { *self &= other }
<usize as core::ops::bit::BitOr<&usize>>::bitor fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<usize as core::ops::bit::BitOr>::bitor fn bitor(self, rhs: $t) -> $t { self | rhs }
<usize as core::ops::bit::BitOrAssign<&usize>>::bitor_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<usize as core::ops::bit::BitOrAssign>::bitor_assign fn bitor_assign(&mut self, other: $t) { *self |= other }
<usize as core::ops::bit::BitXor<&usize>>::bitxor fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<usize as core::ops::bit::BitXor>::bitxor fn bitxor(self, other: $t) -> $t { self ^ other }
<usize as core::ops::bit::BitXorAssign<&usize>>::bitxor_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<usize as core::ops::bit::BitXorAssign>::bitxor_assign fn bitxor_assign(&mut self, other: $t) { *self ^= other }
<usize as core::ops::bit::Not>::not fn not(self) -> $t { !self }
<usize as core::ops::bit::Shl<&i128>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<usize as core::ops::bit::Shl<&i16>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<usize as core::ops::bit::Shl<&i32>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<usize as core::ops::bit::Shl<&i64>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<usize as core::ops::bit::Shl<&i8>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<usize as core::ops::bit::Shl<&isize>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<usize as core::ops::bit::Shl<&u128>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<usize as core::ops::bit::Shl<&u16>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<usize as core::ops::bit::Shl<&u32>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<usize as core::ops::bit::Shl<&u64>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<usize as core::ops::bit::Shl<&u8>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<usize as core::ops::bit::Shl<&usize>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<usize as core::ops::bit::Shl<i128>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<usize as core::ops::bit::Shl<i16>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<usize as core::ops::bit::Shl<i32>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<usize as core::ops::bit::Shl<i64>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<usize as core::ops::bit::Shl<i8>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<usize as core::ops::bit::Shl<isize>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<usize as core::ops::bit::Shl<u128>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<usize as core::ops::bit::Shl<u16>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<usize as core::ops::bit::Shl<u32>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<usize as core::ops::bit::Shl<u64>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<usize as core::ops::bit::Shl<u8>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<usize as core::ops::bit::Shl>::shl fn shl(self, other: $f) -> $t {
self << other
}
<usize as core::ops::bit::ShlAssign<&i128>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<usize as core::ops::bit::ShlAssign<&i16>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<usize as core::ops::bit::ShlAssign<&i32>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<usize as core::ops::bit::ShlAssign<&i64>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<usize as core::ops::bit::ShlAssign<&i8>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<usize as core::ops::bit::ShlAssign<&isize>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<usize as core::ops::bit::ShlAssign<&u128>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<usize as core::ops::bit::ShlAssign<&u16>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<usize as core::ops::bit::ShlAssign<&u32>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<usize as core::ops::bit::ShlAssign<&u64>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<usize as core::ops::bit::ShlAssign<&u8>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<usize as core::ops::bit::ShlAssign<&usize>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<usize as core::ops::bit::ShlAssign<i128>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<usize as core::ops::bit::ShlAssign<i16>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<usize as core::ops::bit::ShlAssign<i32>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<usize as core::ops::bit::ShlAssign<i64>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<usize as core::ops::bit::ShlAssign<i8>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<usize as core::ops::bit::ShlAssign<isize>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<usize as core::ops::bit::ShlAssign<u128>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<usize as core::ops::bit::ShlAssign<u16>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<usize as core::ops::bit::ShlAssign<u32>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<usize as core::ops::bit::ShlAssign<u64>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<usize as core::ops::bit::ShlAssign<u8>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<usize as core::ops::bit::ShlAssign>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<usize as core::ops::bit::Shr<&i128>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<usize as core::ops::bit::Shr<&i16>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<usize as core::ops::bit::Shr<&i32>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<usize as core::ops::bit::Shr<&i64>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<usize as core::ops::bit::Shr<&i8>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<usize as core::ops::bit::Shr<&isize>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<usize as core::ops::bit::Shr<&u128>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<usize as core::ops::bit::Shr<&u16>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<usize as core::ops::bit::Shr<&u32>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<usize as core::ops::bit::Shr<&u64>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<usize as core::ops::bit::Shr<&u8>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<usize as core::ops::bit::Shr<&usize>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<usize as core::ops::bit::Shr<i128>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<usize as core::ops::bit::Shr<i16>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<usize as core::ops::bit::Shr<i32>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<usize as core::ops::bit::Shr<i64>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<usize as core::ops::bit::Shr<i8>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<usize as core::ops::bit::Shr<isize>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<usize as core::ops::bit::Shr<u128>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<usize as core::ops::bit::Shr<u16>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<usize as core::ops::bit::Shr<u32>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<usize as core::ops::bit::Shr<u64>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<usize as core::ops::bit::Shr<u8>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<usize as core::ops::bit::Shr>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<usize as core::ops::bit::ShrAssign<&i128>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<usize as core::ops::bit::ShrAssign<&i16>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<usize as core::ops::bit::ShrAssign<&i32>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<usize as core::ops::bit::ShrAssign<&i64>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<usize as core::ops::bit::ShrAssign<&i8>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<usize as core::ops::bit::ShrAssign<&isize>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<usize as core::ops::bit::ShrAssign<&u128>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<usize as core::ops::bit::ShrAssign<&u16>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<usize as core::ops::bit::ShrAssign<&u32>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<usize as core::ops::bit::ShrAssign<&u64>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<usize as core::ops::bit::ShrAssign<&u8>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<usize as core::ops::bit::ShrAssign<&usize>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<usize as core::ops::bit::ShrAssign<i128>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<usize as core::ops::bit::ShrAssign<i16>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<usize as core::ops::bit::ShrAssign<i32>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<usize as core::ops::bit::ShrAssign<i64>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<usize as core::ops::bit::ShrAssign<i8>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<usize as core::ops::bit::ShrAssign<isize>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<usize as core::ops::bit::ShrAssign<u128>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<usize as core::ops::bit::ShrAssign<u16>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<usize as core::ops::bit::ShrAssign<u32>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<usize as core::ops::bit::ShrAssign<u64>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<usize as core::ops::bit::ShrAssign<u8>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<usize as core::ops::bit::ShrAssign>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<usize as core::slice::index::SliceIndex<[T]>>::get fn get(self, slice: &[T]) -> Option<&T> {
if self < slice.len() {
// SAFETY: `self` is checked to be in bounds.
unsafe { Some(slice_get_unchecked(slice, self)) }
} else {
None
}
}
<usize as core::slice::index::SliceIndex<[T]>>::get_unchecked unsafe fn get_unchecked(self, slice: *const [T]) -> *const T {
assert_unsafe_precondition!(
check_language_ub, // okay because of the `assume` below
"slice::get_unchecked requires that the index is within the slice",
(this: usize = self, len: usize = slice.len()) => this < len
);
// SAFETY: the caller guarantees that `slice` is not dangling, so it
// cannot be longer than `isize::MAX`. They also guarantee that
// `self` is in bounds of `slice` so `self` cannot overflow an `isize`,
// so the call to `add` is safe.
unsafe {
// Use intrinsics::assume instead of hint::assert_unchecked so that we don't check the
// precondition of this function twice.
crate::intrinsics::assume(self < slice.len());
slice_get_unchecked(slice, self)
}
}
<usize as core::slice::index::SliceIndex<[T]>>::get_unchecked_mut unsafe fn get_unchecked_mut(self, slice: *mut [T]) -> *mut T {
assert_unsafe_precondition!(
check_library_ub,
"slice::get_unchecked_mut requires that the index is within the slice",
(this: usize = self, len: usize = slice.len()) => this < len
);
// SAFETY: see comments for `get_unchecked` above.
unsafe { slice_get_unchecked(slice, self) }
}
<usize as core::slice::index::SliceIndex<[T]>>::index fn index(self, slice: &[T]) -> &T {
// N.B., use intrinsic indexing
&(*slice)[self]
}
<usize as core::slice::index::SliceIndex<[T]>>::index_mut fn index_mut(self, slice: &mut [T]) -> &mut T {
// N.B., use intrinsic indexing
&mut (*slice)[self]
}
core::alloc::layout::Layout::align pub const fn align(&self) -> usize {
self.align.as_usize()
}
core::alloc::layout::Layout::from_size_align_unchecked pub const unsafe fn from_size_align_unchecked(size: usize, align: usize) -> Self {
assert_unsafe_precondition!(
check_library_ub,
"Layout::from_size_align_unchecked requires that align is a power of 2 \
and the rounded-up allocation size does not exceed isize::MAX",
(
size: usize = size,
align: usize = align,
) => Layout::is_size_align_valid(size, align)
);
// SAFETY: the caller is required to uphold the preconditions.
unsafe { Layout { size, align: mem::transmute(align) } }
}
core::alloc::layout::Layout::is_size_align_valid const fn is_size_align_valid(size: usize, align: usize) -> bool {
let Some(align) = Alignment::new(align) else { return false };
if size > Self::max_size_for_align(align) {
return false;
}
true
}
core::alloc::layout::Layout::max_size_for_align const fn max_size_for_align(align: Alignment) -> usize {
// (power-of-two implies align != 0.)
// Rounded up size is:
// size_rounded_up = (size + align - 1) & !(align - 1);
//
// We know from above that align != 0. If adding (align - 1)
// does not overflow, then rounding up will be fine.
//
// Conversely, &-masking with !(align - 1) will subtract off
// only low-order-bits. Thus if overflow occurs with the sum,
// the &-mask cannot subtract enough to undo that overflow.
//
// Above implies that checking for summation overflow is both
// necessary and sufficient.
// SAFETY: the maximum possible alignment is `isize::MAX + 1`,
// so the subtraction cannot overflow.
unsafe { unchecked_sub(isize::MAX as usize + 1, align.as_usize()) }
}
core::alloc::layout::Layout::new pub const fn new<T>() -> Self {
let (size, align) = size_align::<T>();
// SAFETY: if the type is instantiated, rustc already ensures that its
// layout is valid. Use the unchecked constructor to avoid inserting a
// panicking codepath that needs to be optimized out.
unsafe { Layout::from_size_align_unchecked(size, align) }
}
core::alloc::layout::Layout::size pub const fn size(&self) -> usize {
self.size
}
core::alloc::layout::size_alignconst fn size_align<T>() -> (usize, usize) {
(size_of::<T>(), align_of::<T>())
}
core::any::TypeId::of pub const fn of<T: ?Sized + 'static>() -> TypeId {
const { intrinsics::type_id::<T>() }
}
core::any::type_namepub const fn type_name<T: ?Sized>() -> &'static str {
const { intrinsics::type_name::<T>() }
}
core::any::type_name_of_valpub const fn type_name_of_val<T: ?Sized>(_val: &T) -> &'static str {
type_name::<T>()
}
core::array::<impl [T; N]>::as_mut_slice pub const fn as_mut_slice(&mut self) -> &mut [T] {
self
}
core::array::<impl [T; N]>::as_slice pub const fn as_slice(&self) -> &[T] {
self
}
core::array::<impl [T; N]>::map pub fn map<F, U>(self, f: F) -> [U; N]
where
F: FnMut(T) -> U,
{
self.try_map(NeverShortCircuit::wrap_mut_1(f)).0
}
core::array::<impl [T; N]>::try_map pub fn try_map<R>(self, f: impl FnMut(T) -> R) -> ChangeOutputType<R, [R::Output; N]>
where
R: Try<Residual: Residual<[R::Output; N]>>,
{
drain_array_with(self, |iter| try_from_trusted_iterator(iter.map(f)))
}
core::array::<impl core::clone::Clone for [T; N]>::clone fn clone(&self) -> Self {
SpecArrayClone::clone(self)
}
core::array::<impl core::convert::AsRef<[T]> for [T; N]>::as_ref fn as_ref(&self) -> &[T] {
&self[..]
}
core::array::<impl core::convert::TryFrom<&'a [T]> for &'a [T; N]>::try_from fn try_from(slice: &'a [T]) -> Result<&'a [T; N], TryFromSliceError> {
slice.as_array().ok_or(TryFromSliceError(()))
}
core::array::<impl core::convert::TryFrom<&'a mut [T]> for &'a mut [T; N]>::try_from fn try_from(slice: &'a mut [T]) -> Result<&'a mut [T; N], TryFromSliceError> {
slice.as_mut_array().ok_or(TryFromSliceError(()))
}
core::array::<impl core::convert::TryFrom<&[T]> for [T; N]>::try_from fn try_from(slice: &[T]) -> Result<[T; N], TryFromSliceError> {
<&Self>::try_from(slice).copied()
}
core::array::<impl core::convert::TryFrom<&mut [T]> for [T; N]>::try_from fn try_from(slice: &mut [T]) -> Result<[T; N], TryFromSliceError> {
<Self>::try_from(&*slice)
}
core::array::<impl core::iter::traits::collect::IntoIterator for &'a [T; N]>::into_iter fn into_iter(self) -> Iter<'a, T> {
self.iter()
}
core::array::<impl core::iter::traits::collect::IntoIterator for &'a mut [T; N]>::into_iter fn into_iter(self) -> IterMut<'a, T> {
self.iter_mut()
}
core::array::<impl core::ops::index::Index<I> for [T; N]>::index fn index(&self, index: I) -> &Self::Output {
Index::index(self as &[T], index)
}
core::array::<impl core::ops::index::IndexMut<I> for [T; N]>::index_mut fn index_mut(&mut self, index: I) -> &mut Self::Output {
IndexMut::index_mut(self as &mut [T], index)
}
core::array::Guard::<'_, T>::push_unchecked pub(crate) unsafe fn push_unchecked(&mut self, item: T) {
// SAFETY: If `initialized` was correct before and the caller does not
// invoke this method more than N times then writes will be in-bounds
// and slots will not be initialized more than once.
unsafe {
self.array_mut.get_unchecked_mut(self.initialized).write(item);
self.initialized = self.initialized.unchecked_add(1);
}
}
core::array::drain::drain_array_withpub(crate) fn drain_array_with<T, R, const N: usize>(
array: [T; N],
func: impl for<'a> FnOnce(Drain<'a, T>) -> R,
) -> R {
let mut array = ManuallyDrop::new(array);
// SAFETY: Now that the local won't drop it, it's ok to construct the `Drain` which will.
let drain = Drain(array.iter_mut());
func(drain)
}
core::array::equality::<impl core::cmp::PartialEq<&[U]> for [T; N]>::eq fn eq(&self, other: &&[U]) -> bool {
*self == **other
}
core::array::equality::<impl core::cmp::PartialEq<&[U]> for [T; N]>::ne fn ne(&self, other: &&[U]) -> bool {
*self != **other
}
core::array::equality::<impl core::cmp::PartialEq<&mut [U]> for [T; N]>::eq fn eq(&self, other: &&mut [U]) -> bool {
*self == **other
}
core::array::equality::<impl core::cmp::PartialEq<&mut [U]> for [T; N]>::ne fn ne(&self, other: &&mut [U]) -> bool {
*self != **other
}
core::array::equality::<impl core::cmp::PartialEq<[U; N]> for &[T]>::eq fn eq(&self, other: &[U; N]) -> bool {
**self == *other
}
core::array::equality::<impl core::cmp::PartialEq<[U; N]> for &[T]>::ne fn ne(&self, other: &[U; N]) -> bool {
**self != *other
}
core::array::equality::<impl core::cmp::PartialEq<[U; N]> for &mut [T]>::eq fn eq(&self, other: &[U; N]) -> bool {
**self == *other
}
core::array::equality::<impl core::cmp::PartialEq<[U; N]> for &mut [T]>::ne fn ne(&self, other: &[U; N]) -> bool {
**self != *other
}
core::array::equality::<impl core::cmp::PartialEq<[U; N]> for [T; N]>::eq fn eq(&self, other: &[U; N]) -> bool {
SpecArrayEq::spec_eq(self, other)
}
core::array::equality::<impl core::cmp::PartialEq<[U; N]> for [T; N]>::ne fn ne(&self, other: &[U; N]) -> bool {
SpecArrayEq::spec_ne(self, other)
}
core::array::equality::<impl core::cmp::PartialEq<[U; N]> for [T]>::eq fn eq(&self, other: &[U; N]) -> bool {
match self.as_array::<N>() {
Some(b) => *b == *other,
None => false,
}
}
core::array::equality::<impl core::cmp::PartialEq<[U; N]> for [T]>::ne fn ne(&self, other: &[U; N]) -> bool {
match self.as_array::<N>() {
Some(b) => *b != *other,
None => true,
}
}
core::array::equality::<impl core::cmp::PartialEq<[U]> for [T; N]>::eq fn eq(&self, other: &[U]) -> bool {
match other.as_array::<N>() {
Some(b) => *self == *b,
None => false,
}
}
core::array::equality::<impl core::cmp::PartialEq<[U]> for [T; N]>::ne fn ne(&self, other: &[U]) -> bool {
match other.as_array::<N>() {
Some(b) => *self != *b,
None => true,
}
}
core::array::from_fnpub fn from_fn<T, const N: usize, F>(f: F) -> [T; N]
where
F: FnMut(usize) -> T,
{
try_from_fn(NeverShortCircuit::wrap_mut_1(f)).0
}
core::array::from_mutpub const fn from_mut<T>(s: &mut T) -> &mut [T; 1] {
// SAFETY: Converting `&mut T` to `&mut [T; 1]` is sound.
unsafe { &mut *(s as *mut T).cast::<[T; 1]>() }
}
core::array::from_refpub const fn from_ref<T>(s: &T) -> &[T; 1] {
// SAFETY: Converting `&T` to `&[T; 1]` is sound.
unsafe { &*(s as *const T).cast::<[T; 1]>() }
}
core::array::from_trusted_iteratorfn from_trusted_iterator<T, const N: usize>(iter: impl UncheckedIterator<Item = T>) -> [T; N] {
try_from_trusted_iterator(iter.map(NeverShortCircuit)).0
}
core::array::iter::<impl core::iter::traits::collect::IntoIterator for [T; N]>::into_iter fn into_iter(self) -> Self::IntoIter {
// SAFETY: The transmute here is actually safe. The docs of `MaybeUninit`
// promise:
//
// > `MaybeUninit<T>` is guaranteed to have the same size and alignment
// > as `T`.
//
// The docs even show a transmute from an array of `MaybeUninit<T>` to
// an array of `T`.
//
// With that, this initialization satisfies the invariants.
//
// FIXME: If normal `transmute` ever gets smart enough to allow this
// directly, use it instead of `transmute_unchecked`.
let data: [MaybeUninit<T>; N] = unsafe { transmute_unchecked(self) };
// SAFETY: The original array was entirely initialized and the the alive
// range we're passing here represents that fact.
let inner = unsafe { InnerSized::new_unchecked(IndexRange::zero_to(N), data) };
IntoIter { inner: ManuallyDrop::new(inner) }
}
core::array::iter::IntoIter::<T, N>::unsize fn unsize(&self) -> &InnerUnsized<T> {
self.inner.deref()
}
core::array::iter::IntoIter::<T, N>::unsize_mut fn unsize_mut(&mut self) -> &mut InnerUnsized<T> {
self.inner.deref_mut()
}
core::array::iter::iter_inner::PolymorphicIter::<DATA>::len pub(super) const fn len(&self) -> usize {
self.alive.len()
}
core::array::iter::iter_inner::PolymorphicIter::<[core::mem::maybe_uninit::MaybeUninit<T>; N]>::empty pub(super) const fn empty() -> Self {
Self { alive: IndexRange::zero_to(0), data: [const { MaybeUninit::uninit() }; N] }
}
core::array::iter::iter_inner::PolymorphicIter::<[core::mem::maybe_uninit::MaybeUninit<T>; N]>::new_unchecked pub(super) const unsafe fn new_unchecked(alive: IndexRange, data: [MaybeUninit<T>; N]) -> Self {
Self { alive, data }
}
core::array::iter::iter_inner::PolymorphicIter::<[core::mem::maybe_uninit::MaybeUninit<T>]>::advance_by pub(super) fn advance_by(&mut self, n: usize) -> Result<(), NonZero<usize>> {
// This also moves the start, which marks them as conceptually "dropped",
// so if anything goes bad then our drop impl won't double-free them.
let range_to_drop = self.alive.take_prefix(n);
let remaining = n - range_to_drop.len();
// SAFETY: These elements are currently initialized, so it's fine to drop them.
unsafe {
let slice = self.data.get_unchecked_mut(range_to_drop);
slice.assume_init_drop();
}
NonZero::new(remaining).map_or(Ok(()), Err)
}
core::array::iter::iter_inner::PolymorphicIter::<[core::mem::maybe_uninit::MaybeUninit<T>]>::as_slice pub(super) fn as_slice(&self) -> &[T] {
// SAFETY: We know that all elements within `alive` are properly initialized.
unsafe {
let slice = self.data.get_unchecked(self.alive.clone());
slice.assume_init_ref()
}
}
core::array::iter::iter_inner::PolymorphicIter::<[core::mem::maybe_uninit::MaybeUninit<T>]>::fold pub(super) fn fold<B>(&mut self, init: B, f: impl FnMut(B, T) -> B) -> B {
self.try_fold(init, NeverShortCircuit::wrap_mut_2(f)).0
}
core::array::iter::iter_inner::PolymorphicIter::<[core::mem::maybe_uninit::MaybeUninit<T>]>::next pub(super) fn next(&mut self) -> Option<T> {
// Get the next index from the front.
//
// Increasing `alive.start` by 1 maintains the invariant regarding
// `alive`. However, due to this change, for a short time, the alive
// zone is not `data[alive]` anymore, but `data[idx..alive.end]`.
self.alive.next().map(|idx| {
// Read the element from the array.
// SAFETY: `idx` is an index into the former "alive" region of the
// array. Reading this element means that `data[idx]` is regarded as
// dead now (i.e. do not touch). As `idx` was the start of the
// alive-zone, the alive zone is now `data[alive]` again, restoring
// all invariants.
unsafe { self.data.get_unchecked(idx).assume_init_read() }
})
}
core::array::iter::iter_inner::PolymorphicIter::<[core::mem::maybe_uninit::MaybeUninit<T>]>::size_hint pub(super) fn size_hint(&self) -> (usize, Option<usize>) {
let len = self.len();
(len, Some(len))
}
core::array::iter::iter_inner::PolymorphicIter::<[core::mem::maybe_uninit::MaybeUninit<T>]>::try_fold pub(super) fn try_fold<B, F, R>(&mut self, init: B, mut f: F) -> R
where
F: FnMut(B, T) -> R,
R: Try<Output = B>,
{
// `alive` is an `IndexRange`, not an arbitrary iterator, so we can
// trust that its `try_fold` isn't going to do something weird like
// call the fold-er multiple times for the same index.
let data = &mut self.data;
self.alive.try_fold(init, move |accum, idx| {
// SAFETY: `idx` has been removed from the alive range, so we're not
// going to drop it (even if `f` panics) and thus its ok to give
// out ownership of that item to `f` to handle.
let elem = unsafe { data.get_unchecked(idx).assume_init_read() };
f(accum, elem)
})
}
core::array::try_from_fnpub fn try_from_fn<R, const N: usize, F>(cb: F) -> ChangeOutputType<R, [R::Output; N]>
where
F: FnMut(usize) -> R,
R: Try,
R::Residual: Residual<[R::Output; N]>,
{
let mut array = [const { MaybeUninit::uninit() }; N];
match try_from_fn_erased(&mut array, cb) {
ControlFlow::Break(r) => FromResidual::from_residual(r),
ControlFlow::Continue(()) => {
// SAFETY: All elements of the array were populated.
try { unsafe { MaybeUninit::array_assume_init(array) } }
}
}
}
core::array::try_from_fn_erasedfn try_from_fn_erased<T, R>(
buffer: &mut [MaybeUninit<T>],
mut generator: impl FnMut(usize) -> R,
) -> ControlFlow<R::Residual>
where
R: Try<Output = T>,
{
let mut guard = Guard { array_mut: buffer, initialized: 0 };
while guard.initialized < guard.array_mut.len() {
let item = generator(guard.initialized).branch()?;
// SAFETY: The loop condition ensures we have space to push the item
unsafe { guard.push_unchecked(item) };
}
mem::forget(guard);
ControlFlow::Continue(())
}
core::array::try_from_trusted_iteratorfn try_from_trusted_iterator<T, R, const N: usize>(
iter: impl UncheckedIterator<Item = R>,
) -> ChangeOutputType<R, [T; N]>
where
R: Try<Output = T>,
R::Residual: Residual<[T; N]>,
{
assert!(iter.size_hint().0 >= N);
fn next<T>(mut iter: impl UncheckedIterator<Item = T>) -> impl FnMut(usize) -> T {
move |_| {
// SAFETY: We know that `from_fn` will call this at most N times,
// and we checked to ensure that we have at least that many items.
unsafe { iter.next_unchecked() }
}
}
try_from_fn(next(iter))
}
core::array::try_from_trusted_iterator::next fn next<T>(mut iter: impl UncheckedIterator<Item = T>) -> impl FnMut(usize) -> T {
move |_| {
// SAFETY: We know that `from_fn` will call this at most N times,
// and we checked to ensure that we have at least that many items.
unsafe { iter.next_unchecked() }
}
}
core::bool::<impl bool>::ok_or pub fn ok_or<E>(self, err: E) -> Result<(), E> {
if self { Ok(()) } else { Err(err) }
}
core::bool::<impl bool>::ok_or_else pub fn ok_or_else<E, F: FnOnce() -> E>(self, f: F) -> Result<(), E> {
if self { Ok(()) } else { Err(f()) }
}
core::bool::<impl bool>::then pub fn then<T, F: FnOnce() -> T>(self, f: F) -> Option<T> {
if self { Some(f()) } else { None }
}
core::bool::<impl bool>::then_some pub fn then_some<T>(self, t: T) -> Option<T> {
if self { Some(t) } else { None }
}
core::cell::BorrowRef::<'b>::new const fn new(borrow: &'b Cell<BorrowCounter>) -> Option<BorrowRef<'b>> {
let b = borrow.get().wrapping_add(1);
if !is_reading(b) {
// Incrementing borrow can result in a non-reading value (<= 0) in these cases:
// 1. It was < 0, i.e. there are writing borrows, so we can't allow a read borrow
// due to Rust's reference aliasing rules
// 2. It was isize::MAX (the max amount of reading borrows) and it overflowed
// into isize::MIN (the max amount of writing borrows) so we can't allow
// an additional read borrow because isize can't represent so many read borrows
// (this can only happen if you mem::forget more than a small constant amount of
// `Ref`s, which is not good practice)
None
} else {
// Incrementing borrow can result in a reading value (> 0) in these cases:
// 1. It was = 0, i.e. it wasn't borrowed, and we are taking the first read borrow
// 2. It was > 0 and < isize::MAX, i.e. there were read borrows, and isize
// is large enough to represent having one more read borrow
borrow.replace(b);
Some(BorrowRef { borrow })
}
}
core::cell::BorrowRefMut::<'b>::new const fn new(borrow: &'b Cell<BorrowCounter>) -> Option<BorrowRefMut<'b>> {
// NOTE: Unlike BorrowRefMut::clone, new is called to create the initial
// mutable reference, and so there must currently be no existing
// references. Thus, while clone increments the mutable refcount, here
// we explicitly only allow going from UNUSED to UNUSED - 1.
match borrow.get() {
UNUSED => {
borrow.replace(UNUSED - 1);
Some(BorrowRefMut { borrow })
}
_ => None,
}
}
core::cell::Cell::<T>::get pub const fn get(&self) -> T {
// SAFETY: This can cause data races if called from a separate thread,
// but `Cell` is `!Sync` so this won't happen.
unsafe { *self.value.get() }
}
core::cell::Cell::<T>::new pub const fn new(value: T) -> Cell<T> {
Cell { value: UnsafeCell::new(value) }
}
core::cell::Cell::<T>::replace pub const fn replace(&self, val: T) -> T {
// SAFETY: This can cause data races if called from a separate thread,
// but `Cell` is `!Sync` so this won't happen.
mem::replace(unsafe { &mut *self.value.get() }, val)
}
core::cell::Cell::<T>::set pub const fn set(&self, val: T)
where
T: [const] Destruct,
{
self.replace(val);
}
core::cell::RefCell::<T>::borrow_mut pub const fn borrow_mut(&self) -> RefMut<'_, T> {
match self.try_borrow_mut() {
Ok(b) => b,
Err(err) => panic_already_borrowed(err),
}
}
core::cell::RefCell::<T>::new pub const fn new(value: T) -> RefCell<T> {
RefCell {
value: UnsafeCell::new(value),
borrow: Cell::new(UNUSED),
#[cfg(feature = "debug_refcell")]
borrowed_at: Cell::new(None),
}
}
core::cell::RefCell::<T>::replace pub const fn replace(&self, t: T) -> T {
mem::replace(&mut self.borrow_mut(), t)
}
core::cell::RefCell::<T>::try_borrow pub const fn try_borrow(&self) -> Result<Ref<'_, T>, BorrowError> {
match BorrowRef::new(&self.borrow) {
Some(b) => {
#[cfg(feature = "debug_refcell")]
{
// `borrowed_at` is always the *first* active borrow
if b.borrow.get() == 1 {
self.borrowed_at.replace(Some(crate::panic::Location::caller()));
}
}
// SAFETY: `BorrowRef` ensures that there is only immutable access
// to the value while borrowed.
let value = unsafe { NonNull::new_unchecked(self.value.get()) };
Ok(Ref { value, borrow: b })
}
None => Err(BorrowError {
// If a borrow occurred, then we must already have an outstanding borrow,
// so `borrowed_at` will be `Some`
#[cfg(feature = "debug_refcell")]
location: self.borrowed_at.get().unwrap(),
}),
}
}
core::cell::RefCell::<T>::try_borrow_mut pub const fn try_borrow_mut(&self) -> Result<RefMut<'_, T>, BorrowMutError> {
match BorrowRefMut::new(&self.borrow) {
Some(b) => {
#[cfg(feature = "debug_refcell")]
{
self.borrowed_at.replace(Some(crate::panic::Location::caller()));
}
// SAFETY: `BorrowRefMut` guarantees unique access.
let value = unsafe { NonNull::new_unchecked(self.value.get()) };
Ok(RefMut { value, borrow: b, marker: PhantomData })
}
None => Err(BorrowMutError {
// If a borrow occurred, then we must already have an outstanding borrow,
// so `borrowed_at` will be `Some`
#[cfg(feature = "debug_refcell")]
location: self.borrowed_at.get().unwrap(),
}),
}
}
core::cell::UnsafeCell::<T>::get pub const fn get(&self) -> *mut T {
// We can just cast the pointer from `UnsafeCell<T>` to `T` because of
// #[repr(transparent)]. This exploits std's special status, there is
// no guarantee for user code that this will work in future versions of the compiler!
self as *const UnsafeCell<T> as *const T as *mut T
}
core::cell::UnsafeCell::<T>::get_mut pub const fn get_mut(&mut self) -> &mut T {
&mut self.value
}
core::cell::UnsafeCell::<T>::into_inner pub const fn into_inner(self) -> T {
self.value
}
core::cell::UnsafeCell::<T>::new pub const fn new(value: T) -> UnsafeCell<T> {
UnsafeCell { value }
}
core::cell::is_readingconst fn is_reading(x: BorrowCounter) -> bool {
x > UNUSED
}
core::cell::is_writingconst fn is_writing(x: BorrowCounter) -> bool {
x < UNUSED
}
core::cell::panic_already_borrowedconst fn panic_already_borrowed(err: BorrowMutError) -> ! {
const_panic!(
"RefCell already borrowed",
"{err}",
err: BorrowMutError = err,
)
}
core::cell::panic_already_borrowed::do_panic const fn do_panic($($arg: $ty),*) -> ! {
$crate::intrinsics::const_eval_select!(
@capture { $($arg: $ty = $arg),* } -> !:
#[noinline]
if const #[track_caller] #[inline] { // Inline this, to prevent codegen
$crate::panic!($const_msg)
} else #[track_caller] { // Do not inline this, it makes perf worse
$crate::panic!($runtime_msg)
}
)
}
core::cell::panic_already_borrowed::do_panic::runtime fn runtime$(<$($binders)*>)?($($arg: $ty),*) $( -> $ret )? {
$runtime
}
core::cell::panic_already_mutably_borrowed::do_panic const fn do_panic($($arg: $ty),*) -> ! {
$crate::intrinsics::const_eval_select!(
@capture { $($arg: $ty = $arg),* } -> !:
#[noinline]
if const #[track_caller] #[inline] { // Inline this, to prevent codegen
$crate::panic!($const_msg)
} else #[track_caller] { // Do not inline this, it makes perf worse
$crate::panic!($runtime_msg)
}
)
}
core::cell::panic_already_mutably_borrowed::do_panic::runtime fn runtime$(<$($binders)*>)?($($arg: $ty),*) $( -> $ret )? {
$runtime
}
core::char::convert::char_try_from_u32const fn char_try_from_u32(i: u32) -> Result<char, CharTryFromError> {
// This is an optimized version of the check
// (i > MAX as u32) || (i >= 0xD800 && i <= 0xDFFF),
// which can also be written as
// i >= 0x110000 || (i >= 0xD800 && i < 0xE000).
//
// The XOR with 0xD800 permutes the ranges such that 0xD800..0xE000 is
// mapped to 0x0000..0x0800, while keeping all the high bits outside 0xFFFF the same.
// In particular, numbers >= 0x110000 stay in this range.
//
// Subtracting 0x800 causes 0x0000..0x0800 to wrap, meaning that a single
// unsigned comparison against 0x110000 - 0x800 will detect both the wrapped
// surrogate range as well as the numbers originally larger than 0x110000.
//
if (i ^ 0xD800).wrapping_sub(0x800) >= 0x110000 - 0x800 {
Err(CharTryFromError(()))
} else {
// SAFETY: checked that it's a legal unicode value
Ok(unsafe { transmute(i) })
}
}
core::char::convert::from_u32_uncheckedpub(super) const unsafe fn from_u32_unchecked(i: u32) -> char {
// SAFETY: the caller must guarantee that `i` is a valid char value.
unsafe {
assert_unsafe_precondition!(
check_language_ub,
"invalid value for `char`",
(i: u32 = i) => char_try_from_u32(i).is_ok()
);
transmute(i)
}
}
core::char::methods::<impl char>::from_u32_unchecked pub const unsafe fn from_u32_unchecked(i: u32) -> char {
// SAFETY: the safety contract must be upheld by the caller.
unsafe { super::convert::from_u32_unchecked(i) }
}
core::char::methods::<impl char>::len_utf8 pub const fn len_utf8(self) -> usize {
len_utf8(self as u32)
}
core::char::methods::<impl char>::to_digit pub const fn to_digit(self, radix: u32) -> Option<u32> {
assert!(
radix >= 2 && radix <= 36,
"to_digit: invalid radix -- radix must be in the range 2 to 36 inclusive"
);
// check radix to remove letter handling code when radix is a known constant
let value = if self > '9' && radix > 10 {
// mask to convert ASCII letters to uppercase
const TO_UPPERCASE_MASK: u32 = !0b0010_0000;
// Converts an ASCII letter to its corresponding integer value:
// A-Z => 10-35, a-z => 10-35. Other characters produce values >= 36.
//
// Add Overflow Safety:
// By applying the mask after the subtraction, the first addendum is
// constrained such that it never exceeds u32::MAX - 0x20.
((self as u32).wrapping_sub('A' as u32) & TO_UPPERCASE_MASK) + 10
} else {
// convert digit to value, non-digits wrap to values > 36
(self as u32).wrapping_sub('0' as u32)
};
// FIXME(const-hack): once then_some is const fn, use it here
if value < radix { Some(value) } else { None }
}
core::char::methods::len_utf8const fn len_utf8(code: u32) -> usize {
match code {
..MAX_ONE_B => 1,
..MAX_TWO_B => 2,
..MAX_THREE_B => 3,
_ => 4,
}
}
core::clone::Clone::clone_from fn clone_from(&mut self, source: &Self)
where
Self: [const] Destruct,
{
*self = source.clone()
}
core::clone::impls::<impl core::clone::Clone for &T>::clone fn clone(&self) -> Self {
self
}
core::clone::impls::<impl core::clone::Clone for bool>::clone fn clone(&self) -> Self {
*self
}
core::clone::impls::<impl core::clone::Clone for char>::clone fn clone(&self) -> Self {
*self
}
core::clone::impls::<impl core::clone::Clone for f128>::clone fn clone(&self) -> Self {
*self
}
core::clone::impls::<impl core::clone::Clone for f16>::clone fn clone(&self) -> Self {
*self
}
core::clone::impls::<impl core::clone::Clone for f32>::clone fn clone(&self) -> Self {
*self
}
core::clone::impls::<impl core::clone::Clone for f64>::clone fn clone(&self) -> Self {
*self
}
core::clone::impls::<impl core::clone::Clone for i128>::clone fn clone(&self) -> Self {
*self
}
core::clone::impls::<impl core::clone::Clone for i16>::clone fn clone(&self) -> Self {
*self
}
core::clone::impls::<impl core::clone::Clone for i32>::clone fn clone(&self) -> Self {
*self
}
core::clone::impls::<impl core::clone::Clone for i64>::clone fn clone(&self) -> Self {
*self
}
core::clone::impls::<impl core::clone::Clone for i8>::clone fn clone(&self) -> Self {
*self
}
core::clone::impls::<impl core::clone::Clone for isize>::clone fn clone(&self) -> Self {
*self
}
core::clone::impls::<impl core::clone::Clone for u128>::clone fn clone(&self) -> Self {
*self
}
core::clone::impls::<impl core::clone::Clone for u16>::clone fn clone(&self) -> Self {
*self
}
core::clone::impls::<impl core::clone::Clone for u32>::clone fn clone(&self) -> Self {
*self
}
core::clone::impls::<impl core::clone::Clone for u64>::clone fn clone(&self) -> Self {
*self
}
core::clone::impls::<impl core::clone::Clone for u8>::clone fn clone(&self) -> Self {
*self
}
core::clone::impls::<impl core::clone::Clone for usize>::clone fn clone(&self) -> Self {
*self
}
core::cmp::Ord::clamp fn clamp(self, min: Self, max: Self) -> Self
where
Self: Sized + [const] Destruct,
{
assert!(min <= max);
if self < min {
min
} else if self > max {
max
} else {
self
}
}
core::cmp::Ord::max fn max(self, other: Self) -> Self
where
Self: Sized + [const] Destruct,
{
if other < self { self } else { other }
}
core::cmp::Ord::min fn min(self, other: Self) -> Self
where
Self: Sized + [const] Destruct,
{
if other < self { other } else { self }
}
core::cmp::Ordering::as_raw const fn as_raw(self) -> i8 {
// FIXME(const-hack): just use `PartialOrd` against `Equal` once that's const
crate::intrinsics::discriminant_value(&self)
}
core::cmp::Ordering::is_eq pub const fn is_eq(self) -> bool {
// All the `is_*` methods are implemented as comparisons against zero
// to follow how clang's libcxx implements their equivalents in
// <https://github.com/llvm/llvm-project/blob/60486292b79885b7800b082754153202bef5b1f0/libcxx/include/__compare/is_eq.h#L23-L28>
self.as_raw() == 0
}
core::cmp::Ordering::is_ge pub const fn is_ge(self) -> bool {
self.as_raw() >= 0
}
core::cmp::Ordering::is_gt pub const fn is_gt(self) -> bool {
self.as_raw() > 0
}
core::cmp::Ordering::is_le pub const fn is_le(self) -> bool {
self.as_raw() <= 0
}
core::cmp::Ordering::is_lt pub const fn is_lt(self) -> bool {
self.as_raw() < 0
}
core::cmp::Ordering::is_ne pub const fn is_ne(self) -> bool {
self.as_raw() != 0
}
core::cmp::Ordering::reverse pub const fn reverse(self) -> Ordering {
match self {
Less => Greater,
Equal => Equal,
Greater => Less,
}
}
core::cmp::Ordering::then pub const fn then(self, other: Ordering) -> Ordering {
match self {
Equal => other,
_ => self,
}
}
core::cmp::PartialEq::ne fn ne(&self, other: &Rhs) -> bool {
!self.eq(other)
}
core::cmp::PartialOrd::__chaining_ge fn __chaining_ge(&self, other: &Rhs) -> ControlFlow<bool> {
default_chaining_impl(self, other, Ordering::is_ge)
}
core::cmp::PartialOrd::__chaining_gt fn __chaining_gt(&self, other: &Rhs) -> ControlFlow<bool> {
default_chaining_impl(self, other, Ordering::is_gt)
}
core::cmp::PartialOrd::__chaining_le fn __chaining_le(&self, other: &Rhs) -> ControlFlow<bool> {
default_chaining_impl(self, other, Ordering::is_le)
}
core::cmp::PartialOrd::__chaining_lt fn __chaining_lt(&self, other: &Rhs) -> ControlFlow<bool> {
default_chaining_impl(self, other, Ordering::is_lt)
}
core::cmp::PartialOrd::ge fn ge(&self, other: &Rhs) -> bool {
self.partial_cmp(other).is_some_and(Ordering::is_ge)
}
core::cmp::PartialOrd::gt fn gt(&self, other: &Rhs) -> bool {
self.partial_cmp(other).is_some_and(Ordering::is_gt)
}
core::cmp::PartialOrd::le fn le(&self, other: &Rhs) -> bool {
self.partial_cmp(other).is_some_and(Ordering::is_le)
}
core::cmp::PartialOrd::lt fn lt(&self, other: &Rhs) -> bool {
self.partial_cmp(other).is_some_and(Ordering::is_lt)
}
core::cmp::default_chaining_implconst fn default_chaining_impl<T, U>(
lhs: &T,
rhs: &U,
p: impl [const] FnOnce(Ordering) -> bool + [const] Destruct,
) -> ControlFlow<bool>
where
T: [const] PartialOrd<U> + PointeeSized,
U: PointeeSized,
{
// It's important that this only call `partial_cmp` once, not call `eq` then
// one of the relational operators. We don't want to `bcmp`-then-`memcp` a
// `String`, for example, or similarly for other data structures (#108157).
match <T as PartialOrd<U>>::partial_cmp(lhs, rhs) {
Some(Equal) => ControlFlow::Continue(()),
Some(c) => ControlFlow::Break(p(c)),
None => ControlFlow::Break(false),
}
}
core::cmp::impls::<impl core::cmp::Ord for ()>::cmp fn cmp(&self, _other: &()) -> Ordering {
Equal
}
core::cmp::impls::<impl core::cmp::Ord for bool>::clamp fn clamp(self, min: bool, max: bool) -> bool {
assert!(min <= max);
self.max(min).min(max)
}
core::cmp::impls::<impl core::cmp::Ord for bool>::max fn max(self, other: bool) -> bool {
self | other
}
core::cmp::impls::<impl core::cmp::Ord for bool>::min fn min(self, other: bool) -> bool {
self & other
}
core::cmp::impls::<impl core::cmp::Ord for char>::cmp fn cmp(&self, other: &Self) -> Ordering {
crate::intrinsics::three_way_compare(*self, *other)
}
core::cmp::impls::<impl core::cmp::Ord for i128>::cmp fn cmp(&self, other: &Self) -> Ordering {
crate::intrinsics::three_way_compare(*self, *other)
}
core::cmp::impls::<impl core::cmp::Ord for i16>::cmp fn cmp(&self, other: &Self) -> Ordering {
crate::intrinsics::three_way_compare(*self, *other)
}
core::cmp::impls::<impl core::cmp::Ord for i32>::cmp fn cmp(&self, other: &Self) -> Ordering {
crate::intrinsics::three_way_compare(*self, *other)
}
core::cmp::impls::<impl core::cmp::Ord for i64>::cmp fn cmp(&self, other: &Self) -> Ordering {
crate::intrinsics::three_way_compare(*self, *other)
}
core::cmp::impls::<impl core::cmp::Ord for i8>::cmp fn cmp(&self, other: &Self) -> Ordering {
crate::intrinsics::three_way_compare(*self, *other)
}
core::cmp::impls::<impl core::cmp::Ord for isize>::cmp fn cmp(&self, other: &Self) -> Ordering {
crate::intrinsics::three_way_compare(*self, *other)
}
core::cmp::impls::<impl core::cmp::Ord for u128>::cmp fn cmp(&self, other: &Self) -> Ordering {
crate::intrinsics::three_way_compare(*self, *other)
}
core::cmp::impls::<impl core::cmp::Ord for u16>::cmp fn cmp(&self, other: &Self) -> Ordering {
crate::intrinsics::three_way_compare(*self, *other)
}
core::cmp::impls::<impl core::cmp::Ord for u32>::cmp fn cmp(&self, other: &Self) -> Ordering {
crate::intrinsics::three_way_compare(*self, *other)
}
core::cmp::impls::<impl core::cmp::Ord for u64>::cmp fn cmp(&self, other: &Self) -> Ordering {
crate::intrinsics::three_way_compare(*self, *other)
}
core::cmp::impls::<impl core::cmp::Ord for u8>::cmp fn cmp(&self, other: &Self) -> Ordering {
crate::intrinsics::three_way_compare(*self, *other)
}
core::cmp::impls::<impl core::cmp::Ord for usize>::cmp fn cmp(&self, other: &Self) -> Ordering {
crate::intrinsics::three_way_compare(*self, *other)
}
core::cmp::impls::<impl core::cmp::PartialEq for ()>::eq fn eq(&self, _other: &()) -> bool {
true
}
core::cmp::impls::<impl core::cmp::PartialEq for ()>::ne fn ne(&self, _other: &()) -> bool {
false
}
core::cmp::impls::<impl core::cmp::PartialEq for bool>::eq fn eq(&self, other: &Self) -> bool { *self == *other }
core::cmp::impls::<impl core::cmp::PartialEq for bool>::ne fn ne(&self, other: &Self) -> bool { *self != *other }
core::cmp::impls::<impl core::cmp::PartialEq for char>::eq fn eq(&self, other: &Self) -> bool { *self == *other }
core::cmp::impls::<impl core::cmp::PartialEq for char>::ne fn ne(&self, other: &Self) -> bool { *self != *other }
core::cmp::impls::<impl core::cmp::PartialEq for f128>::eq fn eq(&self, other: &Self) -> bool { *self == *other }
core::cmp::impls::<impl core::cmp::PartialEq for f128>::ne fn ne(&self, other: &Self) -> bool { *self != *other }
core::cmp::impls::<impl core::cmp::PartialEq for f16>::eq fn eq(&self, other: &Self) -> bool { *self == *other }
core::cmp::impls::<impl core::cmp::PartialEq for f16>::ne fn ne(&self, other: &Self) -> bool { *self != *other }
core::cmp::impls::<impl core::cmp::PartialEq for f32>::eq fn eq(&self, other: &Self) -> bool { *self == *other }
core::cmp::impls::<impl core::cmp::PartialEq for f32>::ne fn ne(&self, other: &Self) -> bool { *self != *other }
core::cmp::impls::<impl core::cmp::PartialEq for f64>::eq fn eq(&self, other: &Self) -> bool { *self == *other }
core::cmp::impls::<impl core::cmp::PartialEq for f64>::ne fn ne(&self, other: &Self) -> bool { *self != *other }
core::cmp::impls::<impl core::cmp::PartialEq for i128>::eq fn eq(&self, other: &Self) -> bool { *self == *other }
core::cmp::impls::<impl core::cmp::PartialEq for i128>::ne fn ne(&self, other: &Self) -> bool { *self != *other }
core::cmp::impls::<impl core::cmp::PartialEq for i16>::eq fn eq(&self, other: &Self) -> bool { *self == *other }
core::cmp::impls::<impl core::cmp::PartialEq for i16>::ne fn ne(&self, other: &Self) -> bool { *self != *other }
core::cmp::impls::<impl core::cmp::PartialEq for i32>::eq fn eq(&self, other: &Self) -> bool { *self == *other }
core::cmp::impls::<impl core::cmp::PartialEq for i32>::ne fn ne(&self, other: &Self) -> bool { *self != *other }
core::cmp::impls::<impl core::cmp::PartialEq for i64>::eq fn eq(&self, other: &Self) -> bool { *self == *other }
core::cmp::impls::<impl core::cmp::PartialEq for i64>::ne fn ne(&self, other: &Self) -> bool { *self != *other }
core::cmp::impls::<impl core::cmp::PartialEq for i8>::eq fn eq(&self, other: &Self) -> bool { *self == *other }
core::cmp::impls::<impl core::cmp::PartialEq for i8>::ne fn ne(&self, other: &Self) -> bool { *self != *other }
core::cmp::impls::<impl core::cmp::PartialEq for isize>::eq fn eq(&self, other: &Self) -> bool { *self == *other }
core::cmp::impls::<impl core::cmp::PartialEq for isize>::ne fn ne(&self, other: &Self) -> bool { *self != *other }
core::cmp::impls::<impl core::cmp::PartialEq for u128>::eq fn eq(&self, other: &Self) -> bool { *self == *other }
core::cmp::impls::<impl core::cmp::PartialEq for u128>::ne fn ne(&self, other: &Self) -> bool { *self != *other }
core::cmp::impls::<impl core::cmp::PartialEq for u16>::eq fn eq(&self, other: &Self) -> bool { *self == *other }
core::cmp::impls::<impl core::cmp::PartialEq for u16>::ne fn ne(&self, other: &Self) -> bool { *self != *other }
core::cmp::impls::<impl core::cmp::PartialEq for u32>::eq fn eq(&self, other: &Self) -> bool { *self == *other }
core::cmp::impls::<impl core::cmp::PartialEq for u32>::ne fn ne(&self, other: &Self) -> bool { *self != *other }
core::cmp::impls::<impl core::cmp::PartialEq for u64>::eq fn eq(&self, other: &Self) -> bool { *self == *other }
core::cmp::impls::<impl core::cmp::PartialEq for u64>::ne fn ne(&self, other: &Self) -> bool { *self != *other }
core::cmp::impls::<impl core::cmp::PartialEq for u8>::eq fn eq(&self, other: &Self) -> bool { *self == *other }
core::cmp::impls::<impl core::cmp::PartialEq for u8>::ne fn ne(&self, other: &Self) -> bool { *self != *other }
core::cmp::impls::<impl core::cmp::PartialEq for usize>::eq fn eq(&self, other: &Self) -> bool { *self == *other }
core::cmp::impls::<impl core::cmp::PartialEq for usize>::ne fn ne(&self, other: &Self) -> bool { *self != *other }
core::cmp::impls::<impl core::cmp::PartialEq<&B> for &A>::eq fn eq(&self, other: &&B) -> bool {
PartialEq::eq(*self, *other)
}
core::cmp::impls::<impl core::cmp::PartialEq<&B> for &A>::ne fn ne(&self, other: &&B) -> bool {
PartialEq::ne(*self, *other)
}
core::cmp::impls::<impl core::cmp::PartialEq<&B> for &mut A>::eq fn eq(&self, other: &&B) -> bool {
PartialEq::eq(*self, *other)
}
core::cmp::impls::<impl core::cmp::PartialEq<&B> for &mut A>::ne fn ne(&self, other: &&B) -> bool {
PartialEq::ne(*self, *other)
}
core::cmp::impls::<impl core::cmp::PartialEq<&mut B> for &A>::eq fn eq(&self, other: &&mut B) -> bool {
PartialEq::eq(*self, *other)
}
core::cmp::impls::<impl core::cmp::PartialEq<&mut B> for &A>::ne fn ne(&self, other: &&mut B) -> bool {
PartialEq::ne(*self, *other)
}
core::cmp::impls::<impl core::cmp::PartialEq<&mut B> for &mut A>::eq fn eq(&self, other: &&mut B) -> bool {
PartialEq::eq(*self, *other)
}
core::cmp::impls::<impl core::cmp::PartialEq<&mut B> for &mut A>::ne fn ne(&self, other: &&mut B) -> bool {
PartialEq::ne(*self, *other)
}
core::cmp::impls::<impl core::cmp::PartialOrd for ()>::partial_cmp fn partial_cmp(&self, _: &()) -> Option<Ordering> {
Some(Equal)
}
core::cmp::impls::<impl core::cmp::PartialOrd for bool>::__chaining_ge fn __chaining_ge(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs >= rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for bool>::__chaining_gt fn __chaining_gt(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs > rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for bool>::__chaining_le fn __chaining_le(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs <= rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for bool>::__chaining_lt fn __chaining_lt(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs < rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for bool>::ge fn ge(&self, other: &Self) -> bool { *self >= *other }
core::cmp::impls::<impl core::cmp::PartialOrd for bool>::gt fn gt(&self, other: &Self) -> bool { *self > *other }
core::cmp::impls::<impl core::cmp::PartialOrd for bool>::le fn le(&self, other: &Self) -> bool { *self <= *other }
core::cmp::impls::<impl core::cmp::PartialOrd for bool>::lt fn lt(&self, other: &Self) -> bool { *self < *other }
core::cmp::impls::<impl core::cmp::PartialOrd for bool>::partial_cmp fn partial_cmp(&self, other: &bool) -> Option<Ordering> {
Some(self.cmp(other))
}
core::cmp::impls::<impl core::cmp::PartialOrd for char>::__chaining_ge fn __chaining_ge(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs >= rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for char>::__chaining_gt fn __chaining_gt(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs > rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for char>::__chaining_le fn __chaining_le(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs <= rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for char>::__chaining_lt fn __chaining_lt(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs < rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for char>::ge fn ge(&self, other: &Self) -> bool { *self >= *other }
core::cmp::impls::<impl core::cmp::PartialOrd for char>::gt fn gt(&self, other: &Self) -> bool { *self > *other }
core::cmp::impls::<impl core::cmp::PartialOrd for char>::le fn le(&self, other: &Self) -> bool { *self <= *other }
core::cmp::impls::<impl core::cmp::PartialOrd for char>::lt fn lt(&self, other: &Self) -> bool { *self < *other }
core::cmp::impls::<impl core::cmp::PartialOrd for char>::partial_cmp fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(crate::intrinsics::three_way_compare(*self, *other))
}
core::cmp::impls::<impl core::cmp::PartialOrd for f128>::__chaining_ge fn __chaining_ge(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs >= rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for f128>::__chaining_gt fn __chaining_gt(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs > rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for f128>::__chaining_le fn __chaining_le(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs <= rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for f128>::__chaining_lt fn __chaining_lt(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs < rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for f128>::ge fn ge(&self, other: &Self) -> bool { *self >= *other }
core::cmp::impls::<impl core::cmp::PartialOrd for f128>::gt fn gt(&self, other: &Self) -> bool { *self > *other }
core::cmp::impls::<impl core::cmp::PartialOrd for f128>::le fn le(&self, other: &Self) -> bool { *self <= *other }
core::cmp::impls::<impl core::cmp::PartialOrd for f128>::lt fn lt(&self, other: &Self) -> bool { *self < *other }
core::cmp::impls::<impl core::cmp::PartialOrd for f128>::partial_cmp fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
match (*self <= *other, *self >= *other) {
(false, false) => None,
(false, true) => Some(Greater),
(true, false) => Some(Less),
(true, true) => Some(Equal),
}
}
core::cmp::impls::<impl core::cmp::PartialOrd for f16>::__chaining_ge fn __chaining_ge(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs >= rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for f16>::__chaining_gt fn __chaining_gt(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs > rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for f16>::__chaining_le fn __chaining_le(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs <= rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for f16>::__chaining_lt fn __chaining_lt(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs < rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for f16>::ge fn ge(&self, other: &Self) -> bool { *self >= *other }
core::cmp::impls::<impl core::cmp::PartialOrd for f16>::gt fn gt(&self, other: &Self) -> bool { *self > *other }
core::cmp::impls::<impl core::cmp::PartialOrd for f16>::le fn le(&self, other: &Self) -> bool { *self <= *other }
core::cmp::impls::<impl core::cmp::PartialOrd for f16>::lt fn lt(&self, other: &Self) -> bool { *self < *other }
core::cmp::impls::<impl core::cmp::PartialOrd for f16>::partial_cmp fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
match (*self <= *other, *self >= *other) {
(false, false) => None,
(false, true) => Some(Greater),
(true, false) => Some(Less),
(true, true) => Some(Equal),
}
}
core::cmp::impls::<impl core::cmp::PartialOrd for f32>::__chaining_ge fn __chaining_ge(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs >= rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for f32>::__chaining_gt fn __chaining_gt(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs > rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for f32>::__chaining_le fn __chaining_le(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs <= rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for f32>::__chaining_lt fn __chaining_lt(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs < rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for f32>::ge fn ge(&self, other: &Self) -> bool { *self >= *other }
core::cmp::impls::<impl core::cmp::PartialOrd for f32>::gt fn gt(&self, other: &Self) -> bool { *self > *other }
core::cmp::impls::<impl core::cmp::PartialOrd for f32>::le fn le(&self, other: &Self) -> bool { *self <= *other }
core::cmp::impls::<impl core::cmp::PartialOrd for f32>::lt fn lt(&self, other: &Self) -> bool { *self < *other }
core::cmp::impls::<impl core::cmp::PartialOrd for f32>::partial_cmp fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
match (*self <= *other, *self >= *other) {
(false, false) => None,
(false, true) => Some(Greater),
(true, false) => Some(Less),
(true, true) => Some(Equal),
}
}
core::cmp::impls::<impl core::cmp::PartialOrd for f64>::__chaining_ge fn __chaining_ge(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs >= rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for f64>::__chaining_gt fn __chaining_gt(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs > rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for f64>::__chaining_le fn __chaining_le(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs <= rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for f64>::__chaining_lt fn __chaining_lt(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs < rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for f64>::ge fn ge(&self, other: &Self) -> bool { *self >= *other }
core::cmp::impls::<impl core::cmp::PartialOrd for f64>::gt fn gt(&self, other: &Self) -> bool { *self > *other }
core::cmp::impls::<impl core::cmp::PartialOrd for f64>::le fn le(&self, other: &Self) -> bool { *self <= *other }
core::cmp::impls::<impl core::cmp::PartialOrd for f64>::lt fn lt(&self, other: &Self) -> bool { *self < *other }
core::cmp::impls::<impl core::cmp::PartialOrd for f64>::partial_cmp fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
match (*self <= *other, *self >= *other) {
(false, false) => None,
(false, true) => Some(Greater),
(true, false) => Some(Less),
(true, true) => Some(Equal),
}
}
core::cmp::impls::<impl core::cmp::PartialOrd for i128>::__chaining_ge fn __chaining_ge(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs >= rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for i128>::__chaining_gt fn __chaining_gt(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs > rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for i128>::__chaining_le fn __chaining_le(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs <= rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for i128>::__chaining_lt fn __chaining_lt(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs < rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for i128>::ge fn ge(&self, other: &Self) -> bool { *self >= *other }
core::cmp::impls::<impl core::cmp::PartialOrd for i128>::gt fn gt(&self, other: &Self) -> bool { *self > *other }
core::cmp::impls::<impl core::cmp::PartialOrd for i128>::le fn le(&self, other: &Self) -> bool { *self <= *other }
core::cmp::impls::<impl core::cmp::PartialOrd for i128>::lt fn lt(&self, other: &Self) -> bool { *self < *other }
core::cmp::impls::<impl core::cmp::PartialOrd for i128>::partial_cmp fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(crate::intrinsics::three_way_compare(*self, *other))
}
core::cmp::impls::<impl core::cmp::PartialOrd for i16>::__chaining_ge fn __chaining_ge(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs >= rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for i16>::__chaining_gt fn __chaining_gt(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs > rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for i16>::__chaining_le fn __chaining_le(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs <= rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for i16>::__chaining_lt fn __chaining_lt(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs < rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for i16>::ge fn ge(&self, other: &Self) -> bool { *self >= *other }
core::cmp::impls::<impl core::cmp::PartialOrd for i16>::gt fn gt(&self, other: &Self) -> bool { *self > *other }
core::cmp::impls::<impl core::cmp::PartialOrd for i16>::le fn le(&self, other: &Self) -> bool { *self <= *other }
core::cmp::impls::<impl core::cmp::PartialOrd for i16>::lt fn lt(&self, other: &Self) -> bool { *self < *other }
core::cmp::impls::<impl core::cmp::PartialOrd for i16>::partial_cmp fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(crate::intrinsics::three_way_compare(*self, *other))
}
core::cmp::impls::<impl core::cmp::PartialOrd for i32>::__chaining_ge fn __chaining_ge(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs >= rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for i32>::__chaining_gt fn __chaining_gt(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs > rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for i32>::__chaining_le fn __chaining_le(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs <= rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for i32>::__chaining_lt fn __chaining_lt(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs < rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for i32>::ge fn ge(&self, other: &Self) -> bool { *self >= *other }
core::cmp::impls::<impl core::cmp::PartialOrd for i32>::gt fn gt(&self, other: &Self) -> bool { *self > *other }
core::cmp::impls::<impl core::cmp::PartialOrd for i32>::le fn le(&self, other: &Self) -> bool { *self <= *other }
core::cmp::impls::<impl core::cmp::PartialOrd for i32>::lt fn lt(&self, other: &Self) -> bool { *self < *other }
core::cmp::impls::<impl core::cmp::PartialOrd for i32>::partial_cmp fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(crate::intrinsics::three_way_compare(*self, *other))
}
core::cmp::impls::<impl core::cmp::PartialOrd for i64>::__chaining_ge fn __chaining_ge(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs >= rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for i64>::__chaining_gt fn __chaining_gt(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs > rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for i64>::__chaining_le fn __chaining_le(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs <= rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for i64>::__chaining_lt fn __chaining_lt(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs < rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for i64>::ge fn ge(&self, other: &Self) -> bool { *self >= *other }
core::cmp::impls::<impl core::cmp::PartialOrd for i64>::gt fn gt(&self, other: &Self) -> bool { *self > *other }
core::cmp::impls::<impl core::cmp::PartialOrd for i64>::le fn le(&self, other: &Self) -> bool { *self <= *other }
core::cmp::impls::<impl core::cmp::PartialOrd for i64>::lt fn lt(&self, other: &Self) -> bool { *self < *other }
core::cmp::impls::<impl core::cmp::PartialOrd for i64>::partial_cmp fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(crate::intrinsics::three_way_compare(*self, *other))
}
core::cmp::impls::<impl core::cmp::PartialOrd for i8>::__chaining_ge fn __chaining_ge(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs >= rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for i8>::__chaining_gt fn __chaining_gt(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs > rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for i8>::__chaining_le fn __chaining_le(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs <= rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for i8>::__chaining_lt fn __chaining_lt(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs < rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for i8>::ge fn ge(&self, other: &Self) -> bool { *self >= *other }
core::cmp::impls::<impl core::cmp::PartialOrd for i8>::gt fn gt(&self, other: &Self) -> bool { *self > *other }
core::cmp::impls::<impl core::cmp::PartialOrd for i8>::le fn le(&self, other: &Self) -> bool { *self <= *other }
core::cmp::impls::<impl core::cmp::PartialOrd for i8>::lt fn lt(&self, other: &Self) -> bool { *self < *other }
core::cmp::impls::<impl core::cmp::PartialOrd for i8>::partial_cmp fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(crate::intrinsics::three_way_compare(*self, *other))
}
core::cmp::impls::<impl core::cmp::PartialOrd for isize>::__chaining_ge fn __chaining_ge(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs >= rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for isize>::__chaining_gt fn __chaining_gt(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs > rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for isize>::__chaining_le fn __chaining_le(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs <= rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for isize>::__chaining_lt fn __chaining_lt(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs < rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for isize>::ge fn ge(&self, other: &Self) -> bool { *self >= *other }
core::cmp::impls::<impl core::cmp::PartialOrd for isize>::gt fn gt(&self, other: &Self) -> bool { *self > *other }
core::cmp::impls::<impl core::cmp::PartialOrd for isize>::le fn le(&self, other: &Self) -> bool { *self <= *other }
core::cmp::impls::<impl core::cmp::PartialOrd for isize>::lt fn lt(&self, other: &Self) -> bool { *self < *other }
core::cmp::impls::<impl core::cmp::PartialOrd for isize>::partial_cmp fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(crate::intrinsics::three_way_compare(*self, *other))
}
core::cmp::impls::<impl core::cmp::PartialOrd for u128>::__chaining_ge fn __chaining_ge(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs >= rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for u128>::__chaining_gt fn __chaining_gt(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs > rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for u128>::__chaining_le fn __chaining_le(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs <= rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for u128>::__chaining_lt fn __chaining_lt(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs < rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for u128>::ge fn ge(&self, other: &Self) -> bool { *self >= *other }
core::cmp::impls::<impl core::cmp::PartialOrd for u128>::gt fn gt(&self, other: &Self) -> bool { *self > *other }
core::cmp::impls::<impl core::cmp::PartialOrd for u128>::le fn le(&self, other: &Self) -> bool { *self <= *other }
core::cmp::impls::<impl core::cmp::PartialOrd for u128>::lt fn lt(&self, other: &Self) -> bool { *self < *other }
core::cmp::impls::<impl core::cmp::PartialOrd for u128>::partial_cmp fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(crate::intrinsics::three_way_compare(*self, *other))
}
core::cmp::impls::<impl core::cmp::PartialOrd for u16>::__chaining_ge fn __chaining_ge(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs >= rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for u16>::__chaining_gt fn __chaining_gt(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs > rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for u16>::__chaining_le fn __chaining_le(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs <= rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for u16>::__chaining_lt fn __chaining_lt(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs < rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for u16>::ge fn ge(&self, other: &Self) -> bool { *self >= *other }
core::cmp::impls::<impl core::cmp::PartialOrd for u16>::gt fn gt(&self, other: &Self) -> bool { *self > *other }
core::cmp::impls::<impl core::cmp::PartialOrd for u16>::le fn le(&self, other: &Self) -> bool { *self <= *other }
core::cmp::impls::<impl core::cmp::PartialOrd for u16>::lt fn lt(&self, other: &Self) -> bool { *self < *other }
core::cmp::impls::<impl core::cmp::PartialOrd for u16>::partial_cmp fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(crate::intrinsics::three_way_compare(*self, *other))
}
core::cmp::impls::<impl core::cmp::PartialOrd for u32>::__chaining_ge fn __chaining_ge(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs >= rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for u32>::__chaining_gt fn __chaining_gt(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs > rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for u32>::__chaining_le fn __chaining_le(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs <= rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for u32>::__chaining_lt fn __chaining_lt(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs < rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for u32>::ge fn ge(&self, other: &Self) -> bool { *self >= *other }
core::cmp::impls::<impl core::cmp::PartialOrd for u32>::gt fn gt(&self, other: &Self) -> bool { *self > *other }
core::cmp::impls::<impl core::cmp::PartialOrd for u32>::le fn le(&self, other: &Self) -> bool { *self <= *other }
core::cmp::impls::<impl core::cmp::PartialOrd for u32>::lt fn lt(&self, other: &Self) -> bool { *self < *other }
core::cmp::impls::<impl core::cmp::PartialOrd for u32>::partial_cmp fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(crate::intrinsics::three_way_compare(*self, *other))
}
core::cmp::impls::<impl core::cmp::PartialOrd for u64>::__chaining_ge fn __chaining_ge(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs >= rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for u64>::__chaining_gt fn __chaining_gt(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs > rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for u64>::__chaining_le fn __chaining_le(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs <= rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for u64>::__chaining_lt fn __chaining_lt(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs < rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for u64>::ge fn ge(&self, other: &Self) -> bool { *self >= *other }
core::cmp::impls::<impl core::cmp::PartialOrd for u64>::gt fn gt(&self, other: &Self) -> bool { *self > *other }
core::cmp::impls::<impl core::cmp::PartialOrd for u64>::le fn le(&self, other: &Self) -> bool { *self <= *other }
core::cmp::impls::<impl core::cmp::PartialOrd for u64>::lt fn lt(&self, other: &Self) -> bool { *self < *other }
core::cmp::impls::<impl core::cmp::PartialOrd for u64>::partial_cmp fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(crate::intrinsics::three_way_compare(*self, *other))
}
core::cmp::impls::<impl core::cmp::PartialOrd for u8>::__chaining_ge fn __chaining_ge(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs >= rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for u8>::__chaining_gt fn __chaining_gt(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs > rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for u8>::__chaining_le fn __chaining_le(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs <= rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for u8>::__chaining_lt fn __chaining_lt(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs < rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for u8>::ge fn ge(&self, other: &Self) -> bool { *self >= *other }
core::cmp::impls::<impl core::cmp::PartialOrd for u8>::gt fn gt(&self, other: &Self) -> bool { *self > *other }
core::cmp::impls::<impl core::cmp::PartialOrd for u8>::le fn le(&self, other: &Self) -> bool { *self <= *other }
core::cmp::impls::<impl core::cmp::PartialOrd for u8>::lt fn lt(&self, other: &Self) -> bool { *self < *other }
core::cmp::impls::<impl core::cmp::PartialOrd for u8>::partial_cmp fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(crate::intrinsics::three_way_compare(*self, *other))
}
core::cmp::impls::<impl core::cmp::PartialOrd for usize>::__chaining_ge fn __chaining_ge(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs >= rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for usize>::__chaining_gt fn __chaining_gt(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs > rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for usize>::__chaining_le fn __chaining_le(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs <= rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for usize>::__chaining_lt fn __chaining_lt(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs < rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for usize>::ge fn ge(&self, other: &Self) -> bool { *self >= *other }
core::cmp::impls::<impl core::cmp::PartialOrd for usize>::gt fn gt(&self, other: &Self) -> bool { *self > *other }
core::cmp::impls::<impl core::cmp::PartialOrd for usize>::le fn le(&self, other: &Self) -> bool { *self <= *other }
core::cmp::impls::<impl core::cmp::PartialOrd for usize>::lt fn lt(&self, other: &Self) -> bool { *self < *other }
core::cmp::impls::<impl core::cmp::PartialOrd for usize>::partial_cmp fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(crate::intrinsics::three_way_compare(*self, *other))
}
core::cmp::impls::<impl core::cmp::PartialOrd<&B> for &A>::__chaining_ge fn __chaining_ge(&self, other: &&B) -> ControlFlow<bool> {
PartialOrd::__chaining_ge(*self, *other)
}
core::cmp::impls::<impl core::cmp::PartialOrd<&B> for &A>::__chaining_gt fn __chaining_gt(&self, other: &&B) -> ControlFlow<bool> {
PartialOrd::__chaining_gt(*self, *other)
}
core::cmp::impls::<impl core::cmp::PartialOrd<&B> for &A>::__chaining_le fn __chaining_le(&self, other: &&B) -> ControlFlow<bool> {
PartialOrd::__chaining_le(*self, *other)
}
core::cmp::impls::<impl core::cmp::PartialOrd<&B> for &A>::__chaining_lt fn __chaining_lt(&self, other: &&B) -> ControlFlow<bool> {
PartialOrd::__chaining_lt(*self, *other)
}
core::cmp::impls::<impl core::cmp::PartialOrd<&B> for &A>::ge fn ge(&self, other: &&B) -> bool {
PartialOrd::ge(*self, *other)
}
core::cmp::impls::<impl core::cmp::PartialOrd<&B> for &A>::gt fn gt(&self, other: &&B) -> bool {
PartialOrd::gt(*self, *other)
}
core::cmp::impls::<impl core::cmp::PartialOrd<&B> for &A>::le fn le(&self, other: &&B) -> bool {
PartialOrd::le(*self, *other)
}
core::cmp::impls::<impl core::cmp::PartialOrd<&B> for &A>::lt fn lt(&self, other: &&B) -> bool {
PartialOrd::lt(*self, *other)
}
core::cmp::impls::<impl core::cmp::PartialOrd<&B> for &A>::partial_cmp fn partial_cmp(&self, other: &&B) -> Option<Ordering> {
PartialOrd::partial_cmp(*self, *other)
}
core::cmp::maxpub const fn max<T: [const] Ord + [const] Destruct>(v1: T, v2: T) -> T {
v1.max(v2)
}
core::cmp::max_bypub const fn max_by<T: [const] Destruct, F: [const] FnOnce(&T, &T) -> Ordering>(
v1: T,
v2: T,
compare: F,
) -> T {
if compare(&v1, &v2).is_gt() { v1 } else { v2 }
}
core::cmp::minpub const fn min<T: [const] Ord + [const] Destruct>(v1: T, v2: T) -> T {
v1.min(v2)
}
core::convert::identitypub const fn identity<T>(x: T) -> T {
x
}
core::convert::num::<impl core::convert::From<bool> for u128>::from fn from(small: $Small) -> Self {
small as Self
}
core::convert::num::<impl core::convert::From<bool> for u16>::from fn from(small: $Small) -> Self {
small as Self
}
core::convert::num::<impl core::convert::From<bool> for u32>::from fn from(small: $Small) -> Self {
small as Self
}
core::convert::num::<impl core::convert::From<bool> for u64>::from fn from(small: $Small) -> Self {
small as Self
}
core::convert::num::<impl core::convert::From<bool> for u8>::from fn from(small: $Small) -> Self {
small as Self
}
core::convert::num::<impl core::convert::From<bool> for usize>::from fn from(small: $Small) -> Self {
small as Self
}
core::convert::num::<impl core::convert::From<u32> for u128>::from fn from(small: $Small) -> Self {
small as Self
}
core::convert::num::<impl core::convert::From<u32> for u64>::from fn from(small: $Small) -> Self {
small as Self
}
core::convert::num::<impl core::convert::From<u64> for u128>::from fn from(small: $Small) -> Self {
small as Self
}
core::convert::num::<impl core::convert::TryFrom<u128> for u16>::try_from fn try_from(u: $source) -> Result<Self, Self::Error> {
if u > (Self::MAX as $source) {
Err(TryFromIntError(()))
} else {
Ok(u as Self)
}
}
core::convert::num::<impl core::convert::TryFrom<u128> for u32>::try_from fn try_from(u: $source) -> Result<Self, Self::Error> {
if u > (Self::MAX as $source) {
Err(TryFromIntError(()))
} else {
Ok(u as Self)
}
}
core::convert::num::<impl core::convert::TryFrom<u128> for u64>::try_from fn try_from(u: $source) -> Result<Self, Self::Error> {
if u > (Self::MAX as $source) {
Err(TryFromIntError(()))
} else {
Ok(u as Self)
}
}
core::convert::num::<impl core::convert::TryFrom<u128> for u8>::try_from fn try_from(u: $source) -> Result<Self, Self::Error> {
if u > (Self::MAX as $source) {
Err(TryFromIntError(()))
} else {
Ok(u as Self)
}
}
core::convert::num::<impl core::convert::TryFrom<u16> for u8>::try_from fn try_from(u: $source) -> Result<Self, Self::Error> {
if u > (Self::MAX as $source) {
Err(TryFromIntError(()))
} else {
Ok(u as Self)
}
}
core::convert::num::<impl core::convert::TryFrom<u32> for u16>::try_from fn try_from(u: $source) -> Result<Self, Self::Error> {
if u > (Self::MAX as $source) {
Err(TryFromIntError(()))
} else {
Ok(u as Self)
}
}
core::convert::num::<impl core::convert::TryFrom<u32> for u8>::try_from fn try_from(u: $source) -> Result<Self, Self::Error> {
if u > (Self::MAX as $source) {
Err(TryFromIntError(()))
} else {
Ok(u as Self)
}
}
core::convert::num::<impl core::convert::TryFrom<u64> for u16>::try_from fn try_from(u: $source) -> Result<Self, Self::Error> {
if u > (Self::MAX as $source) {
Err(TryFromIntError(()))
} else {
Ok(u as Self)
}
}
core::convert::num::<impl core::convert::TryFrom<u64> for u32>::try_from fn try_from(u: $source) -> Result<Self, Self::Error> {
if u > (Self::MAX as $source) {
Err(TryFromIntError(()))
} else {
Ok(u as Self)
}
}
core::convert::num::<impl core::convert::TryFrom<u64> for u8>::try_from fn try_from(u: $source) -> Result<Self, Self::Error> {
if u > (Self::MAX as $source) {
Err(TryFromIntError(()))
} else {
Ok(u as Self)
}
}
core::convert::num::ptr_try_from_impls::<impl core::convert::TryFrom<i128> for usize>::try_from fn try_from(u: $source) -> Result<Self, Self::Error> {
let min = Self::MIN as $source;
let max = Self::MAX as $source;
if u < min || u > max {
Err(TryFromIntError(()))
} else {
Ok(u as Self)
}
}
core::convert::num::ptr_try_from_impls::<impl core::convert::TryFrom<u128> for usize>::try_from fn try_from(u: $source) -> Result<Self, Self::Error> {
if u > (Self::MAX as $source) {
Err(TryFromIntError(()))
} else {
Ok(u as Self)
}
}
core::convert::num::ptr_try_from_impls::<impl core::convert::TryFrom<u32> for usize>::try_from fn try_from(value: $source) -> Result<Self, Self::Error> {
Ok(value as Self)
}
core::convert::num::ptr_try_from_impls::<impl core::convert::TryFrom<u64> for usize>::try_from fn try_from(value: $source) -> Result<Self, Self::Error> {
Ok(value as Self)
}
core::convert::num::ptr_try_from_impls::<impl core::convert::TryFrom<usize> for u128>::try_from fn try_from(value: $source) -> Result<Self, Self::Error> {
Ok(value as Self)
}
core::convert::num::ptr_try_from_impls::<impl core::convert::TryFrom<usize> for u16>::try_from fn try_from(u: $source) -> Result<Self, Self::Error> {
if u > (Self::MAX as $source) {
Err(TryFromIntError(()))
} else {
Ok(u as Self)
}
}
core::convert::num::ptr_try_from_impls::<impl core::convert::TryFrom<usize> for u32>::try_from fn try_from(u: $source) -> Result<Self, Self::Error> {
if u > (Self::MAX as $source) {
Err(TryFromIntError(()))
} else {
Ok(u as Self)
}
}
core::convert::num::ptr_try_from_impls::<impl core::convert::TryFrom<usize> for u64>::try_from fn try_from(value: $source) -> Result<Self, Self::Error> {
Ok(value as Self)
}
core::convert::num::ptr_try_from_impls::<impl core::convert::TryFrom<usize> for u8>::try_from fn try_from(u: $source) -> Result<Self, Self::Error> {
if u > (Self::MAX as $source) {
Err(TryFromIntError(()))
} else {
Ok(u as Self)
}
}
core::f32::<impl f32>::abs pub const fn abs(self) -> f32 {
intrinsics::fabsf32(self)
}
core::f32::<impl f32>::copysign pub const fn copysign(self, sign: f32) -> f32 {
intrinsics::copysignf32(self, sign)
}
core::f32::<impl f32>::from_bits pub const fn from_bits(v: u32) -> Self {
// It turns out the safety issues with sNaN were overblown! Hooray!
// SAFETY: `u32` is a plain old datatype so we can always transmute from it.
unsafe { mem::transmute(v) }
}
core::f32::<impl f32>::from_le_bytes pub const fn from_le_bytes(bytes: [u8; 4]) -> Self {
Self::from_bits(u32::from_le_bytes(bytes))
}
core::f32::<impl f32>::is_nan pub const fn is_nan(self) -> bool {
self != self
}
core::f32::<impl f32>::max pub const fn max(self, other: f32) -> f32 {
intrinsics::maxnumf32(self, other)
}
core::f32::<impl f32>::min pub const fn min(self, other: f32) -> f32 {
intrinsics::minnumf32(self, other)
}
core::f32::<impl f32>::signum pub const fn signum(self) -> f32 {
if self.is_nan() { Self::NAN } else { 1.0_f32.copysign(self) }
}
core::f32::<impl f32>::to_bits pub const fn to_bits(self) -> u32 {
// SAFETY: `u32` is a plain old datatype so we can always transmute to it.
unsafe { mem::transmute(self) }
}
core::f32::<impl f32>::to_le_bytes pub const fn to_le_bytes(self) -> [u8; 4] {
self.to_bits().to_le_bytes()
}
core::f64::<impl f64>::abs pub const fn abs(self) -> f64 {
intrinsics::fabsf64(self)
}
core::f64::<impl f64>::from_bits pub const fn from_bits(v: u64) -> Self {
// It turns out the safety issues with sNaN were overblown! Hooray!
// SAFETY: `u64` is a plain old datatype so we can always transmute from it.
unsafe { mem::transmute(v) }
}
core::f64::<impl f64>::from_le_bytes pub const fn from_le_bytes(bytes: [u8; 8]) -> Self {
Self::from_bits(u64::from_le_bytes(bytes))
}
core::f64::<impl f64>::to_bits pub const fn to_bits(self) -> u64 {
// SAFETY: `u64` is a plain old datatype so we can always transmute to it.
unsafe { mem::transmute(self) }
}
core::f64::<impl f64>::to_le_bytes pub const fn to_le_bytes(self) -> [u8; 8] {
self.to_bits().to_le_bytes()
}
core::hint::assert_uncheckedpub const unsafe fn assert_unchecked(cond: bool) {
// SAFETY: The caller promised `cond` is true.
unsafe {
ub_checks::assert_unsafe_precondition!(
check_language_ub,
"hint::assert_unchecked must never be called when the condition is false",
(cond: bool = cond) => cond,
);
crate::intrinsics::assume(cond);
}
}
core::intrinsics::likelypub const fn likely(b: bool) -> bool {
if b {
true
} else {
cold_path();
false
}
}
core::intrinsics::ptr_guaranteed_cmppub const fn ptr_guaranteed_cmp<T>(ptr: *const T, other: *const T) -> u8 {
(ptr == other) as u8
}
core::intrinsics::rotate_leftpub const fn rotate_left<T: [const] fallback::FunnelShift>(x: T, shift: u32) -> T {
// Make sure to call the intrinsic for `funnel_shl`, not the fallback impl.
// SAFETY: we modulo `shift` so that the result is definitely less than the size of
// `T` in bits.
unsafe { unchecked_funnel_shl(x, x, shift % (mem::size_of::<T>() as u32 * 8)) }
}
core::intrinsics::rotate_rightpub const fn rotate_right<T: [const] fallback::FunnelShift>(x: T, shift: u32) -> T {
// Make sure to call the intrinsic for `funnel_shr`, not the fallback impl.
// SAFETY: we modulo `shift` so that the result is definitely less than the size of
// `T` in bits.
unsafe { unchecked_funnel_shr(x, x, shift % (mem::size_of::<T>() as u32 * 8)) }
}
core::intrinsics::typed_swap_nonoverlappingpub const unsafe fn typed_swap_nonoverlapping<T>(x: *mut T, y: *mut T) {
// SAFETY: The caller provided single non-overlapping items behind
// pointers, so swapping them with `count: 1` is fine.
unsafe { ptr::swap_nonoverlapping(x, y, 1) };
}
core::intrinsics::unlikelypub const fn unlikely(b: bool) -> bool {
if b {
cold_path();
true
} else {
false
}
}
core::iter::adapters::cloned::Cloned::<I>::new pub(in crate::iter) fn new(it: I) -> Cloned<I> {
Cloned { it }
}
core::iter::adapters::cloned::clone_try_foldfn clone_try_fold<T: Clone, Acc, R>(mut f: impl FnMut(Acc, T) -> R) -> impl FnMut(Acc, &T) -> R {
move |acc, elt| f(acc, elt.clone())
}
core::iter::adapters::enumerate::Enumerate::<I>::new pub(in crate::iter) fn new(iter: I) -> Enumerate<I> {
Enumerate { iter, count: 0 }
}
core::iter::adapters::filter::Filter::<I, P>::new pub(in crate::iter) fn new(iter: I, predicate: P) -> Filter<I, P> {
Filter { iter, predicate }
}
core::iter::adapters::filter::filter_foldfn filter_fold<T, Acc>(
mut predicate: impl FnMut(&T) -> bool,
mut fold: impl FnMut(Acc, T) -> Acc,
) -> impl FnMut(Acc, T) -> Acc {
move |acc, item| if predicate(&item) { fold(acc, item) } else { acc }
}
core::iter::adapters::filter::filter_try_foldfn filter_try_fold<'a, T, Acc, R: Try<Output = Acc>>(
predicate: &'a mut impl FnMut(&T) -> bool,
mut fold: impl FnMut(Acc, T) -> R + 'a,
) -> impl FnMut(Acc, T) -> R + 'a {
move |acc, item| if predicate(&item) { fold(acc, item) } else { try { acc } }
}
core::iter::adapters::map::Map::<I, F>::new pub(in crate::iter) fn new(iter: I, f: F) -> Map<I, F> {
Map { iter, f }
}
core::iter::adapters::map::map_foldfn map_fold<T, B, Acc>(
mut f: impl FnMut(T) -> B,
mut g: impl FnMut(Acc, B) -> Acc,
) -> impl FnMut(Acc, T) -> Acc {
move |acc, elt| g(acc, f(elt))
}
core::iter::adapters::map::map_try_foldfn map_try_fold<'a, T, B, Acc, R>(
f: &'a mut impl FnMut(T) -> B,
mut g: impl FnMut(Acc, B) -> R + 'a,
) -> impl FnMut(Acc, T) -> R + 'a {
move |acc, elt| g(acc, f(elt))
}
core::iter::adapters::rev::Rev::<T>::new pub(in crate::iter) fn new(iter: T) -> Rev<T> {
Rev { iter }
}
core::iter::adapters::skip::Skip::<I>::new pub(in crate::iter) fn new(iter: I, n: usize) -> Skip<I> {
Skip { iter, n }
}
core::iter::adapters::step_by::StepBy::<I>::new pub(in crate::iter) fn new(iter: I, step: usize) -> StepBy<I> {
assert!(step != 0);
let iter = <I as SpecRangeSetup<I>>::setup(iter, step);
StepBy { iter, step_minus_one: step - 1, first_take: true }
}
core::iter::adapters::step_by::StepBy::<I>::original_step fn original_step(&self) -> NonZero<usize> {
// SAFETY: By type invariant, `step_minus_one` cannot be `MAX`, which
// means the addition cannot overflow and the result cannot be zero.
unsafe { NonZero::new_unchecked(intrinsics::unchecked_add(self.step_minus_one, 1)) }
}
core::iter::adapters::take::Take::<I>::new pub(in crate::iter) fn new(iter: I, n: usize) -> Take<I> {
Take { iter, n }
}
core::iter::adapters::zip::Zip::<A, B>::new pub(in crate::iter) fn new(a: A, b: B) -> Zip<A, B> {
ZipImpl::new(a, b)
}
core::iter::adapters::zip::zippub fn zip<A, B>(a: A, b: B) -> Zip<A::IntoIter, B::IntoIter>
where
A: IntoIterator,
B: IntoIterator,
{
ZipImpl::new(a.into_iter(), b.into_iter())
}
core::iter::range::<impl core::iter::traits::double_ended::DoubleEndedIterator for core::ops::range::Range<A>>::advance_back_by fn advance_back_by(&mut self, n: usize) -> Result<(), NonZero<usize>> {
self.spec_advance_back_by(n)
}
core::iter::range::<impl core::iter::traits::double_ended::DoubleEndedIterator for core::ops::range::Range<A>>::next_back fn next_back(&mut self) -> Option<A> {
self.spec_next_back()
}
core::iter::range::<impl core::iter::traits::double_ended::DoubleEndedIterator for core::ops::range::Range<A>>::nth_back fn nth_back(&mut self, n: usize) -> Option<A> {
self.spec_nth_back(n)
}
core::iter::range::<impl core::iter::traits::iterator::Iterator for core::ops::range::Range<A>>::advance_by fn advance_by(&mut self, n: usize) -> Result<(), NonZero<usize>> {
self.spec_advance_by(n)
}
core::iter::range::<impl core::iter::traits::iterator::Iterator for core::ops::range::Range<A>>::next fn next(&mut self) -> Option<A> {
self.spec_next()
}
core::iter::range::<impl core::iter::traits::iterator::Iterator for core::ops::range::Range<A>>::nth fn nth(&mut self, n: usize) -> Option<A> {
self.spec_nth(n)
}
core::iter::range::<impl core::iter::traits::iterator::Iterator for core::ops::range::Range<A>>::size_hint fn size_hint(&self) -> (usize, Option<usize>) {
if self.start < self.end {
Step::steps_between(&self.start, &self.end)
} else {
(0, Some(0))
}
}
core::iter::sources::from_fn::from_fnpub fn from_fn<T, F>(f: F) -> FromFn<F>
where
F: FnMut() -> Option<T>,
{
FromFn(f)
}
core::iter::traits::collect::Extend::extend_one fn extend_one(&mut self, item: A) {
self.extend(Some(item));
}
core::iter::traits::collect::Extend::extend_one_unchecked unsafe fn extend_one_unchecked(&mut self, item: A)
where
Self: Sized,
{
self.extend_one(item);
}
core::iter::traits::collect::Extend::extend_reserve fn extend_reserve(&mut self, additional: usize) {
let _ = additional;
}
core::iter::traits::double_ended::DoubleEndedIterator::advance_back_by fn advance_back_by(&mut self, n: usize) -> Result<(), NonZero<usize>> {
for i in 0..n {
if self.next_back().is_none() {
// SAFETY: `i` is always less than `n`.
return Err(unsafe { NonZero::new_unchecked(n - i) });
}
}
Ok(())
}
core::iter::traits::double_ended::DoubleEndedIterator::rfind fn rfind<P>(&mut self, predicate: P) -> Option<Self::Item>
where
Self: Sized,
P: FnMut(&Self::Item) -> bool,
{
#[inline]
fn check<T>(mut predicate: impl FnMut(&T) -> bool) -> impl FnMut((), T) -> ControlFlow<T> {
move |(), x| {
if predicate(&x) { ControlFlow::Break(x) } else { ControlFlow::Continue(()) }
}
}
self.try_rfold((), check(predicate)).break_value()
}
core::iter::traits::double_ended::DoubleEndedIterator::rfind::check fn check<T>(mut predicate: impl FnMut(&T) -> bool) -> impl FnMut((), T) -> ControlFlow<T> {
move |(), x| {
if predicate(&x) { ControlFlow::Break(x) } else { ControlFlow::Continue(()) }
}
}
core::iter::traits::double_ended::DoubleEndedIterator::rfold fn rfold<B, F>(mut self, init: B, mut f: F) -> B
where
Self: Sized,
F: FnMut(B, Self::Item) -> B,
{
let mut accum = init;
while let Some(x) = self.next_back() {
accum = f(accum, x);
}
accum
}
core::iter::traits::double_ended::DoubleEndedIterator::try_rfold fn try_rfold<B, F, R>(&mut self, init: B, mut f: F) -> R
where
Self: Sized,
F: FnMut(B, Self::Item) -> R,
R: Try<Output = B>,
{
let mut accum = init;
while let Some(x) = self.next_back() {
accum = f(accum, x)?;
}
try { accum }
}
core::iter::traits::exact_size::ExactSizeIterator::is_empty fn is_empty(&self) -> bool {
self.len() == 0
}
core::iter::traits::iterator::Iterator::all fn all<F>(&mut self, f: F) -> bool
where
Self: Sized,
F: FnMut(Self::Item) -> bool,
{
#[inline]
fn check<T>(mut f: impl FnMut(T) -> bool) -> impl FnMut((), T) -> ControlFlow<()> {
move |(), x| {
if f(x) { ControlFlow::Continue(()) } else { ControlFlow::Break(()) }
}
}
self.try_fold((), check(f)) == ControlFlow::Continue(())
}
core::iter::traits::iterator::Iterator::all::check fn check<T>(mut f: impl FnMut(T) -> bool) -> impl FnMut((), T) -> ControlFlow<()> {
move |(), x| {
if f(x) { ControlFlow::Continue(()) } else { ControlFlow::Break(()) }
}
}
core::iter::traits::iterator::Iterator::any fn any<F>(&mut self, f: F) -> bool
where
Self: Sized,
F: FnMut(Self::Item) -> bool,
{
#[inline]
fn check<T>(mut f: impl FnMut(T) -> bool) -> impl FnMut((), T) -> ControlFlow<()> {
move |(), x| {
if f(x) { ControlFlow::Break(()) } else { ControlFlow::Continue(()) }
}
}
self.try_fold((), check(f)) == ControlFlow::Break(())
}
core::iter::traits::iterator::Iterator::any::check fn check<T>(mut f: impl FnMut(T) -> bool) -> impl FnMut((), T) -> ControlFlow<()> {
move |(), x| {
if f(x) { ControlFlow::Break(()) } else { ControlFlow::Continue(()) }
}
}
core::iter::traits::iterator::Iterator::cloned fn cloned<'a, T>(self) -> Cloned<Self>
where
T: Clone + 'a,
Self: Sized + Iterator<Item = &'a T>,
{
Cloned::new(self)
}
core::iter::traits::iterator::Iterator::enumerate fn enumerate(self) -> Enumerate<Self>
where
Self: Sized,
{
Enumerate::new(self)
}
core::iter::traits::iterator::Iterator::filter fn filter<P>(self, predicate: P) -> Filter<Self, P>
where
Self: Sized,
P: FnMut(&Self::Item) -> bool,
{
Filter::new(self, predicate)
}
core::iter::traits::iterator::Iterator::find fn find<P>(&mut self, predicate: P) -> Option<Self::Item>
where
Self: Sized,
P: FnMut(&Self::Item) -> bool,
{
#[inline]
fn check<T>(mut predicate: impl FnMut(&T) -> bool) -> impl FnMut((), T) -> ControlFlow<T> {
move |(), x| {
if predicate(&x) { ControlFlow::Break(x) } else { ControlFlow::Continue(()) }
}
}
self.try_fold((), check(predicate)).break_value()
}
core::iter::traits::iterator::Iterator::find::check fn check<T>(mut predicate: impl FnMut(&T) -> bool) -> impl FnMut((), T) -> ControlFlow<T> {
move |(), x| {
if predicate(&x) { ControlFlow::Break(x) } else { ControlFlow::Continue(()) }
}
}
core::iter::traits::iterator::Iterator::fold fn fold<B, F>(mut self, init: B, mut f: F) -> B
where
Self: Sized,
F: FnMut(B, Self::Item) -> B,
{
let mut accum = init;
while let Some(x) = self.next() {
accum = f(accum, x);
}
accum
}
core::iter::traits::iterator::Iterator::for_each fn for_each<F>(self, f: F)
where
Self: Sized,
F: FnMut(Self::Item),
{
#[inline]
fn call<T>(mut f: impl FnMut(T)) -> impl FnMut((), T) {
move |(), item| f(item)
}
self.fold((), call(f));
}
core::iter::traits::iterator::Iterator::for_each::call fn call<T>(mut f: impl FnMut(T)) -> impl FnMut((), T) {
move |(), item| f(item)
}
core::iter::traits::iterator::Iterator::map fn map<B, F>(self, f: F) -> Map<Self, F>
where
Self: Sized,
F: FnMut(Self::Item) -> B,
{
Map::new(self, f)
}
core::iter::traits::iterator::Iterator::max_by fn max_by<F>(self, compare: F) -> Option<Self::Item>
where
Self: Sized,
F: FnMut(&Self::Item, &Self::Item) -> Ordering,
{
#[inline]
fn fold<T>(mut compare: impl FnMut(&T, &T) -> Ordering) -> impl FnMut(T, T) -> T {
move |x, y| cmp::max_by(x, y, &mut compare)
}
self.reduce(fold(compare))
}
core::iter::traits::iterator::Iterator::max_by::fold fn fold<T>(mut compare: impl FnMut(&T, &T) -> Ordering) -> impl FnMut(T, T) -> T {
move |x, y| cmp::max_by(x, y, &mut compare)
}
core::iter::traits::iterator::Iterator::nth fn nth(&mut self, n: usize) -> Option<Self::Item> {
self.advance_by(n).ok()?;
self.next()
}
core::iter::traits::iterator::Iterator::position fn position<P>(&mut self, predicate: P) -> Option<usize>
where
Self: Sized,
P: FnMut(Self::Item) -> bool,
{
#[inline]
fn check<'a, T>(
mut predicate: impl FnMut(T) -> bool + 'a,
acc: &'a mut usize,
) -> impl FnMut((), T) -> ControlFlow<usize, ()> + 'a {
#[rustc_inherit_overflow_checks]
move |_, x| {
if predicate(x) {
ControlFlow::Break(*acc)
} else {
*acc += 1;
ControlFlow::Continue(())
}
}
}
let mut acc = 0;
self.try_fold((), check(predicate, &mut acc)).break_value()
}
core::iter::traits::iterator::Iterator::position::check fn check<'a, T>(
mut predicate: impl FnMut(T) -> bool + 'a,
acc: &'a mut usize,
) -> impl FnMut((), T) -> ControlFlow<usize, ()> + 'a {
#[rustc_inherit_overflow_checks]
move |_, x| {
if predicate(x) {
ControlFlow::Break(*acc)
} else {
*acc += 1;
ControlFlow::Continue(())
}
}
}
core::iter::traits::iterator::Iterator::reduce fn reduce<F>(mut self, f: F) -> Option<Self::Item>
where
Self: Sized,
F: FnMut(Self::Item, Self::Item) -> Self::Item,
{
let first = self.next()?;
Some(self.fold(first, f))
}
core::iter::traits::iterator::Iterator::rev fn rev(self) -> Rev<Self>
where
Self: Sized + DoubleEndedIterator,
{
Rev::new(self)
}
core::iter::traits::iterator::Iterator::size_hint fn size_hint(&self) -> (usize, Option<usize>) {
(0, None)
}
core::iter::traits::iterator::Iterator::skip fn skip(self, n: usize) -> Skip<Self>
where
Self: Sized,
{
Skip::new(self, n)
}
core::iter::traits::iterator::Iterator::step_by fn step_by(self, step: usize) -> StepBy<Self>
where
Self: Sized,
{
StepBy::new(self, step)
}
core::iter::traits::iterator::Iterator::sum fn sum<S>(self) -> S
where
Self: Sized,
S: Sum<Self::Item>,
{
Sum::sum(self)
}
core::iter::traits::iterator::Iterator::take fn take(self, n: usize) -> Take<Self>
where
Self: Sized,
{
Take::new(self, n)
}
core::iter::traits::iterator::Iterator::try_fold fn try_fold<B, F, R>(&mut self, init: B, mut f: F) -> R
where
Self: Sized,
F: FnMut(B, Self::Item) -> R,
R: Try<Output = B>,
{
let mut accum = init;
while let Some(x) = self.next() {
accum = f(accum, x)?;
}
try { accum }
}
core::iter::traits::iterator::Iterator::zip fn zip<U>(self, other: U) -> Zip<Self, U::IntoIter>
where
Self: Sized,
U: IntoIterator,
{
Zip::new(self, other.into_iter())
}
core::mem::align_ofpub const fn align_of<T>() -> usize {
<T as SizedTypeProperties>::ALIGN
}
core::mem::align_of_valpub const fn align_of_val<T: ?Sized>(val: &T) -> usize {
// SAFETY: val is a reference, so it's a valid raw pointer
unsafe { intrinsics::align_of_val(val) }
}
core::mem::droppub const fn drop<T>(_x: T)
where
T: [const] Destruct,
{
}
core::mem::forgetpub const fn forget<T>(t: T) {
let _ = ManuallyDrop::new(t);
}
core::mem::manually_drop::ManuallyDrop::<T>::drop pub const unsafe fn drop(slot: &mut ManuallyDrop<T>)
where
T: [const] Destruct,
{
// SAFETY: we are dropping the value pointed to by a mutable reference
// which is guaranteed to be valid for writes.
// It is up to the caller to make sure that `slot` isn't dropped again.
unsafe { ptr::drop_in_place(&mut slot.value) }
}
core::mem::manually_drop::ManuallyDrop::<T>::into_inner pub const fn into_inner(slot: ManuallyDrop<T>) -> T {
slot.value
}
core::mem::manually_drop::ManuallyDrop::<T>::new pub const fn new(value: T) -> ManuallyDrop<T> {
ManuallyDrop { value }
}
core::mem::manually_drop::ManuallyDrop::<T>::take pub const unsafe fn take(slot: &mut ManuallyDrop<T>) -> T {
// SAFETY: we are reading from a reference, which is guaranteed
// to be valid for reads.
unsafe { ptr::read(&slot.value) }
}
core::mem::maybe_uninit::MaybeUninit::<T>::array_assume_init pub const unsafe fn array_assume_init<const N: usize>(array: [Self; N]) -> [T; N] {
// SAFETY:
// * The caller guarantees that all elements of the array are initialized
// * `MaybeUninit<T>` and T are guaranteed to have the same layout
// * `MaybeUninit` does not drop, so there are no double-frees
// And thus the conversion is safe
unsafe {
intrinsics::assert_inhabited::<[T; N]>();
intrinsics::transmute_unchecked(array)
}
}
core::mem::maybe_uninit::MaybeUninit::<T>::as_bytes pub const fn as_bytes(&self) -> &[MaybeUninit<u8>] {
// SAFETY: MaybeUninit<u8> is always valid, even for padding bytes
unsafe {
slice::from_raw_parts(self.as_ptr().cast::<MaybeUninit<u8>>(), super::size_of::<T>())
}
}
core::mem::maybe_uninit::MaybeUninit::<T>::as_bytes_mut pub const fn as_bytes_mut(&mut self) -> &mut [MaybeUninit<u8>] {
// SAFETY: MaybeUninit<u8> is always valid, even for padding bytes
unsafe {
slice::from_raw_parts_mut(
self.as_mut_ptr().cast::<MaybeUninit<u8>>(),
super::size_of::<T>(),
)
}
}
core::mem::maybe_uninit::MaybeUninit::<T>::as_mut_ptr pub const fn as_mut_ptr(&mut self) -> *mut T {
// `MaybeUninit` and `ManuallyDrop` are both `repr(transparent)` so we can cast the pointer.
self as *mut _ as *mut T
}
core::mem::maybe_uninit::MaybeUninit::<T>::as_ptr pub const fn as_ptr(&self) -> *const T {
// `MaybeUninit` and `ManuallyDrop` are both `repr(transparent)` so we can cast the pointer.
self as *const _ as *const T
}
core::mem::maybe_uninit::MaybeUninit::<T>::assume_init pub const unsafe fn assume_init(self) -> T {
// SAFETY: the caller must guarantee that `self` is initialized.
// This also means that `self` must be a `value` variant.
unsafe {
intrinsics::assert_inhabited::<T>();
// We do this via a raw ptr read instead of `ManuallyDrop::into_inner` so that there's
// no trace of `ManuallyDrop` in Miri's error messages here.
(&raw const self.value).cast::<T>().read()
}
}
core::mem::maybe_uninit::MaybeUninit::<T>::assume_init_drop pub const unsafe fn assume_init_drop(&mut self)
where
T: [const] Destruct,
{
// SAFETY: the caller must guarantee that `self` is initialized and
// satisfies all invariants of `T`.
// Dropping the value in place is safe if that is the case.
unsafe { ptr::drop_in_place(self.as_mut_ptr()) }
}
core::mem::maybe_uninit::MaybeUninit::<T>::assume_init_mut pub const unsafe fn assume_init_mut(&mut self) -> &mut T {
// SAFETY: the caller must guarantee that `self` is initialized.
// This also means that `self` must be a `value` variant.
unsafe {
intrinsics::assert_inhabited::<T>();
&mut *self.as_mut_ptr()
}
}
core::mem::maybe_uninit::MaybeUninit::<T>::assume_init_read pub const unsafe fn assume_init_read(&self) -> T {
// SAFETY: the caller must guarantee that `self` is initialized.
// Reading from `self.as_ptr()` is safe since `self` should be initialized.
unsafe {
intrinsics::assert_inhabited::<T>();
self.as_ptr().read()
}
}
core::mem::maybe_uninit::MaybeUninit::<T>::assume_init_ref pub const unsafe fn assume_init_ref(&self) -> &T {
// SAFETY: the caller must guarantee that `self` is initialized.
// This also means that `self` must be a `value` variant.
unsafe {
intrinsics::assert_inhabited::<T>();
&*self.as_ptr()
}
}
core::mem::maybe_uninit::MaybeUninit::<T>::new pub const fn new(val: T) -> MaybeUninit<T> {
MaybeUninit { value: ManuallyDrop::new(val) }
}
core::mem::maybe_uninit::MaybeUninit::<T>::slice_as_mut_ptr pub const fn slice_as_mut_ptr(this: &mut [MaybeUninit<T>]) -> *mut T {
this.as_mut_ptr() as *mut T
}
core::mem::maybe_uninit::MaybeUninit::<T>::slice_as_ptr pub const fn slice_as_ptr(this: &[MaybeUninit<T>]) -> *const T {
this.as_ptr() as *const T
}
core::mem::maybe_uninit::MaybeUninit::<T>::uninit pub const fn uninit() -> MaybeUninit<T> {
MaybeUninit { uninit: () }
}
core::mem::maybe_uninit::MaybeUninit::<T>::write pub const fn write(&mut self, val: T) -> &mut T {
*self = MaybeUninit::new(val);
// SAFETY: We just initialized this value.
unsafe { self.assume_init_mut() }
}
core::mem::maybe_uninit::MaybeUninit::<T>::zeroed pub const fn zeroed() -> MaybeUninit<T> {
let mut u = MaybeUninit::<T>::uninit();
// SAFETY: `u.as_mut_ptr()` points to allocated memory.
unsafe { u.as_mut_ptr().write_bytes(0u8, 1) };
u
}
core::mem::maybe_uninit::MaybeUninit::<[T; N]>::transpose pub const fn transpose(self) -> [MaybeUninit<T>; N] {
// SAFETY: T and MaybeUninit<T> have the same layout
unsafe { intrinsics::transmute_unchecked(self) }
}
core::mem::needs_droppub const fn needs_drop<T: ?Sized>() -> bool {
const { intrinsics::needs_drop::<T>() }
}
core::mem::replacepub const fn replace<T>(dest: &mut T, src: T) -> T {
// It may be tempting to use `swap` to avoid `unsafe` here. Don't!
// The compiler optimizes the implementation below to two `memcpy`s
// while `swap` would require at least three. See PR#83022 for details.
// SAFETY: We read from `dest` but directly write `src` into it afterwards,
// such that the old value is not duplicated. Nothing is dropped and
// nothing here can panic.
unsafe {
// Ideally we wouldn't use the intrinsics here, but going through the
// `ptr` methods introduces two unnecessary UbChecks, so until we can
// remove those for pointers that come from references, this uses the
// intrinsics instead so this stays very cheap in MIR (and debug).
let result = crate::intrinsics::read_via_copy(dest);
crate::intrinsics::write_via_move(dest, src);
result
}
}
core::mem::size_ofpub const fn size_of<T>() -> usize {
<T as SizedTypeProperties>::SIZE
}
core::mem::size_of_valpub const fn size_of_val<T: ?Sized>(val: &T) -> usize {
// SAFETY: `val` is a reference, so it's a valid raw pointer
unsafe { intrinsics::size_of_val(val) }
}
core::mem::size_of_val_rawpub const unsafe fn size_of_val_raw<T: ?Sized>(val: *const T) -> usize {
// SAFETY: the caller must provide a valid raw pointer
unsafe { intrinsics::size_of_val(val) }
}
core::mem::swappub const fn swap<T>(x: &mut T, y: &mut T) {
// SAFETY: `&mut` guarantees these are typed readable and writable
// as well as non-overlapping.
unsafe { intrinsics::typed_swap_nonoverlapping(x, y) }
}
core::mem::takepub const fn take<T: [const] Default>(dest: &mut T) -> T {
replace(dest, T::default())
}
core::mem::transmute_copypub const unsafe fn transmute_copy<Src, Dst>(src: &Src) -> Dst {
assert!(
size_of::<Src>() >= size_of::<Dst>(),
"cannot transmute_copy if Dst is larger than Src"
);
// If Dst has a higher alignment requirement, src might not be suitably aligned.
if align_of::<Dst>() > align_of::<Src>() {
// SAFETY: `src` is a reference which is guaranteed to be valid for reads.
// The caller must guarantee that the actual transmutation is safe.
unsafe { ptr::read_unaligned(src as *const Src as *const Dst) }
} else {
// SAFETY: `src` is a reference which is guaranteed to be valid for reads.
// We just checked that `src as *const Dst` was properly aligned.
// The caller must guarantee that the actual transmutation is safe.
unsafe { ptr::read(src as *const Src as *const Dst) }
}
}
core::mem::zeroedpub const unsafe fn zeroed<T>() -> T {
// SAFETY: the caller must guarantee that an all-zero value is valid for `T`.
unsafe {
intrinsics::assert_zero_valid::<T>();
MaybeUninit::zeroed().assume_init()
}
}
core::num::<impl i128>::abs pub const fn abs(self) -> Self {
// Note that the #[rustc_inherit_overflow_checks] and #[inline]
// above mean that the overflow semantics of the subtraction
// depend on the crate we're being called from.
if self.is_negative() {
-self
} else {
self
}
}
core::num::<impl i128>::checked_add pub const fn checked_add(self, rhs: Self) -> Option<Self> {
let (a, b) = self.overflowing_add(rhs);
if intrinsics::unlikely(b) { None } else { Some(a) }
}
core::num::<impl i128>::checked_mul pub const fn checked_mul(self, rhs: Self) -> Option<Self> {
let (a, b) = self.overflowing_mul(rhs);
if intrinsics::unlikely(b) { None } else { Some(a) }
}
core::num::<impl i128>::checked_sub pub const fn checked_sub(self, rhs: Self) -> Option<Self> {
let (a, b) = self.overflowing_sub(rhs);
if intrinsics::unlikely(b) { None } else { Some(a) }
}
core::num::<impl i128>::from_le pub const fn from_le(x: Self) -> Self {
#[cfg(target_endian = "little")]
{
x
}
#[cfg(not(target_endian = "little"))]
{
x.swap_bytes()
}
}
core::num::<impl i128>::from_le_bytes pub const fn from_le_bytes(bytes: [u8; size_of::<Self>()]) -> Self {
Self::from_le(Self::from_ne_bytes(bytes))
}
core::num::<impl i128>::from_ne_bytes pub const fn from_ne_bytes(bytes: [u8; size_of::<Self>()]) -> Self {
// SAFETY: integers are plain old datatypes so we can always transmute to them
unsafe { mem::transmute(bytes) }
}
core::num::<impl i128>::is_negative pub const fn is_negative(self) -> bool { self < 0 }
core::num::<impl i128>::overflowing_add pub const fn overflowing_add(self, rhs: Self) -> (Self, bool) {
let (a, b) = intrinsics::add_with_overflow(self as $ActualT, rhs as $ActualT);
(a as Self, b)
}
core::num::<impl i128>::overflowing_mul pub const fn overflowing_mul(self, rhs: Self) -> (Self, bool) {
let (a, b) = intrinsics::mul_with_overflow(self as $ActualT, rhs as $ActualT);
(a as Self, b)
}
core::num::<impl i128>::overflowing_sub pub const fn overflowing_sub(self, rhs: Self) -> (Self, bool) {
let (a, b) = intrinsics::sub_with_overflow(self as $ActualT, rhs as $ActualT);
(a as Self, b)
}
core::num::<impl i128>::to_le pub const fn to_le(self) -> Self {
#[cfg(target_endian = "little")]
{
self
}
#[cfg(not(target_endian = "little"))]
{
self.swap_bytes()
}
}
core::num::<impl i128>::to_le_bytes pub const fn to_le_bytes(self) -> [u8; size_of::<Self>()] {
self.to_le().to_ne_bytes()
}
core::num::<impl i128>::to_ne_bytes pub const fn to_ne_bytes(self) -> [u8; size_of::<Self>()] {
// SAFETY: integers are plain old datatypes so we can always transmute them to
// arrays of bytes
unsafe { mem::transmute(self) }
}
core::num::<impl i128>::unsigned_abs pub const fn unsigned_abs(self) -> $UnsignedT {
self.wrapping_abs() as $UnsignedT
}
core::num::<impl i128>::wrapping_abs pub const fn wrapping_abs(self) -> Self {
if self.is_negative() {
self.wrapping_neg()
} else {
self
}
}
core::num::<impl i128>::wrapping_add pub const fn wrapping_add(self, rhs: Self) -> Self {
intrinsics::wrapping_add(self, rhs)
}
core::num::<impl i128>::wrapping_neg pub const fn wrapping_neg(self) -> Self {
(0 as $SelfT).wrapping_sub(self)
}
core::num::<impl i128>::wrapping_sub pub const fn wrapping_sub(self, rhs: Self) -> Self {
intrinsics::wrapping_sub(self, rhs)
}
core::num::<impl i16>::abs pub const fn abs(self) -> Self {
// Note that the #[rustc_inherit_overflow_checks] and #[inline]
// above mean that the overflow semantics of the subtraction
// depend on the crate we're being called from.
if self.is_negative() {
-self
} else {
self
}
}
core::num::<impl i16>::checked_add pub const fn checked_add(self, rhs: Self) -> Option<Self> {
let (a, b) = self.overflowing_add(rhs);
if intrinsics::unlikely(b) { None } else { Some(a) }
}
core::num::<impl i16>::checked_mul pub const fn checked_mul(self, rhs: Self) -> Option<Self> {
let (a, b) = self.overflowing_mul(rhs);
if intrinsics::unlikely(b) { None } else { Some(a) }
}
core::num::<impl i16>::checked_sub pub const fn checked_sub(self, rhs: Self) -> Option<Self> {
let (a, b) = self.overflowing_sub(rhs);
if intrinsics::unlikely(b) { None } else { Some(a) }
}
core::num::<impl i16>::from_le pub const fn from_le(x: Self) -> Self {
#[cfg(target_endian = "little")]
{
x
}
#[cfg(not(target_endian = "little"))]
{
x.swap_bytes()
}
}
core::num::<impl i16>::from_le_bytes pub const fn from_le_bytes(bytes: [u8; size_of::<Self>()]) -> Self {
Self::from_le(Self::from_ne_bytes(bytes))
}
core::num::<impl i16>::from_ne_bytes pub const fn from_ne_bytes(bytes: [u8; size_of::<Self>()]) -> Self {
// SAFETY: integers are plain old datatypes so we can always transmute to them
unsafe { mem::transmute(bytes) }
}
core::num::<impl i16>::is_negative pub const fn is_negative(self) -> bool { self < 0 }
core::num::<impl i16>::overflowing_add pub const fn overflowing_add(self, rhs: Self) -> (Self, bool) {
let (a, b) = intrinsics::add_with_overflow(self as $ActualT, rhs as $ActualT);
(a as Self, b)
}
core::num::<impl i16>::overflowing_mul pub const fn overflowing_mul(self, rhs: Self) -> (Self, bool) {
let (a, b) = intrinsics::mul_with_overflow(self as $ActualT, rhs as $ActualT);
(a as Self, b)
}
core::num::<impl i16>::overflowing_sub pub const fn overflowing_sub(self, rhs: Self) -> (Self, bool) {
let (a, b) = intrinsics::sub_with_overflow(self as $ActualT, rhs as $ActualT);
(a as Self, b)
}
core::num::<impl i16>::to_le pub const fn to_le(self) -> Self {
#[cfg(target_endian = "little")]
{
self
}
#[cfg(not(target_endian = "little"))]
{
self.swap_bytes()
}
}
core::num::<impl i16>::to_le_bytes pub const fn to_le_bytes(self) -> [u8; size_of::<Self>()] {
self.to_le().to_ne_bytes()
}
core::num::<impl i16>::to_ne_bytes pub const fn to_ne_bytes(self) -> [u8; size_of::<Self>()] {
// SAFETY: integers are plain old datatypes so we can always transmute them to
// arrays of bytes
unsafe { mem::transmute(self) }
}
core::num::<impl i16>::unsigned_abs pub const fn unsigned_abs(self) -> $UnsignedT {
self.wrapping_abs() as $UnsignedT
}
core::num::<impl i16>::wrapping_abs pub const fn wrapping_abs(self) -> Self {
if self.is_negative() {
self.wrapping_neg()
} else {
self
}
}
core::num::<impl i16>::wrapping_add pub const fn wrapping_add(self, rhs: Self) -> Self {
intrinsics::wrapping_add(self, rhs)
}
core::num::<impl i16>::wrapping_neg pub const fn wrapping_neg(self) -> Self {
(0 as $SelfT).wrapping_sub(self)
}
core::num::<impl i16>::wrapping_sub pub const fn wrapping_sub(self, rhs: Self) -> Self {
intrinsics::wrapping_sub(self, rhs)
}
core::num::<impl i32>::abs pub const fn abs(self) -> Self {
// Note that the #[rustc_inherit_overflow_checks] and #[inline]
// above mean that the overflow semantics of the subtraction
// depend on the crate we're being called from.
if self.is_negative() {
-self
} else {
self
}
}
core::num::<impl i32>::checked_add pub const fn checked_add(self, rhs: Self) -> Option<Self> {
let (a, b) = self.overflowing_add(rhs);
if intrinsics::unlikely(b) { None } else { Some(a) }
}
core::num::<impl i32>::checked_mul pub const fn checked_mul(self, rhs: Self) -> Option<Self> {
let (a, b) = self.overflowing_mul(rhs);
if intrinsics::unlikely(b) { None } else { Some(a) }
}
core::num::<impl i32>::checked_sub pub const fn checked_sub(self, rhs: Self) -> Option<Self> {
let (a, b) = self.overflowing_sub(rhs);
if intrinsics::unlikely(b) { None } else { Some(a) }
}
core::num::<impl i32>::from_le pub const fn from_le(x: Self) -> Self {
#[cfg(target_endian = "little")]
{
x
}
#[cfg(not(target_endian = "little"))]
{
x.swap_bytes()
}
}
core::num::<impl i32>::from_le_bytes pub const fn from_le_bytes(bytes: [u8; size_of::<Self>()]) -> Self {
Self::from_le(Self::from_ne_bytes(bytes))
}
core::num::<impl i32>::from_ne_bytes pub const fn from_ne_bytes(bytes: [u8; size_of::<Self>()]) -> Self {
// SAFETY: integers are plain old datatypes so we can always transmute to them
unsafe { mem::transmute(bytes) }
}
core::num::<impl i32>::is_negative pub const fn is_negative(self) -> bool { self < 0 }
core::num::<impl i32>::overflowing_add pub const fn overflowing_add(self, rhs: Self) -> (Self, bool) {
let (a, b) = intrinsics::add_with_overflow(self as $ActualT, rhs as $ActualT);
(a as Self, b)
}
core::num::<impl i32>::overflowing_mul pub const fn overflowing_mul(self, rhs: Self) -> (Self, bool) {
let (a, b) = intrinsics::mul_with_overflow(self as $ActualT, rhs as $ActualT);
(a as Self, b)
}
core::num::<impl i32>::overflowing_sub pub const fn overflowing_sub(self, rhs: Self) -> (Self, bool) {
let (a, b) = intrinsics::sub_with_overflow(self as $ActualT, rhs as $ActualT);
(a as Self, b)
}
core::num::<impl i32>::to_le pub const fn to_le(self) -> Self {
#[cfg(target_endian = "little")]
{
self
}
#[cfg(not(target_endian = "little"))]
{
self.swap_bytes()
}
}
core::num::<impl i32>::to_le_bytes pub const fn to_le_bytes(self) -> [u8; size_of::<Self>()] {
self.to_le().to_ne_bytes()
}
core::num::<impl i32>::to_ne_bytes pub const fn to_ne_bytes(self) -> [u8; size_of::<Self>()] {
// SAFETY: integers are plain old datatypes so we can always transmute them to
// arrays of bytes
unsafe { mem::transmute(self) }
}
core::num::<impl i32>::unsigned_abs pub const fn unsigned_abs(self) -> $UnsignedT {
self.wrapping_abs() as $UnsignedT
}
core::num::<impl i32>::wrapping_abs pub const fn wrapping_abs(self) -> Self {
if self.is_negative() {
self.wrapping_neg()
} else {
self
}
}
core::num::<impl i32>::wrapping_add pub const fn wrapping_add(self, rhs: Self) -> Self {
intrinsics::wrapping_add(self, rhs)
}
core::num::<impl i32>::wrapping_neg pub const fn wrapping_neg(self) -> Self {
(0 as $SelfT).wrapping_sub(self)
}
core::num::<impl i32>::wrapping_sub pub const fn wrapping_sub(self, rhs: Self) -> Self {
intrinsics::wrapping_sub(self, rhs)
}
core::num::<impl i64>::abs pub const fn abs(self) -> Self {
// Note that the #[rustc_inherit_overflow_checks] and #[inline]
// above mean that the overflow semantics of the subtraction
// depend on the crate we're being called from.
if self.is_negative() {
-self
} else {
self
}
}
core::num::<impl i64>::checked_add pub const fn checked_add(self, rhs: Self) -> Option<Self> {
let (a, b) = self.overflowing_add(rhs);
if intrinsics::unlikely(b) { None } else { Some(a) }
}
core::num::<impl i64>::checked_mul pub const fn checked_mul(self, rhs: Self) -> Option<Self> {
let (a, b) = self.overflowing_mul(rhs);
if intrinsics::unlikely(b) { None } else { Some(a) }
}
core::num::<impl i64>::checked_sub pub const fn checked_sub(self, rhs: Self) -> Option<Self> {
let (a, b) = self.overflowing_sub(rhs);
if intrinsics::unlikely(b) { None } else { Some(a) }
}
core::num::<impl i64>::from_le pub const fn from_le(x: Self) -> Self {
#[cfg(target_endian = "little")]
{
x
}
#[cfg(not(target_endian = "little"))]
{
x.swap_bytes()
}
}
core::num::<impl i64>::from_le_bytes pub const fn from_le_bytes(bytes: [u8; size_of::<Self>()]) -> Self {
Self::from_le(Self::from_ne_bytes(bytes))
}
core::num::<impl i64>::from_ne_bytes pub const fn from_ne_bytes(bytes: [u8; size_of::<Self>()]) -> Self {
// SAFETY: integers are plain old datatypes so we can always transmute to them
unsafe { mem::transmute(bytes) }
}
core::num::<impl i64>::is_negative pub const fn is_negative(self) -> bool { self < 0 }
core::num::<impl i64>::overflowing_add pub const fn overflowing_add(self, rhs: Self) -> (Self, bool) {
let (a, b) = intrinsics::add_with_overflow(self as $ActualT, rhs as $ActualT);
(a as Self, b)
}
core::num::<impl i64>::overflowing_mul pub const fn overflowing_mul(self, rhs: Self) -> (Self, bool) {
let (a, b) = intrinsics::mul_with_overflow(self as $ActualT, rhs as $ActualT);
(a as Self, b)
}
core::num::<impl i64>::overflowing_sub pub const fn overflowing_sub(self, rhs: Self) -> (Self, bool) {
let (a, b) = intrinsics::sub_with_overflow(self as $ActualT, rhs as $ActualT);
(a as Self, b)
}
core::num::<impl i64>::to_le pub const fn to_le(self) -> Self {
#[cfg(target_endian = "little")]
{
self
}
#[cfg(not(target_endian = "little"))]
{
self.swap_bytes()
}
}
core::num::<impl i64>::to_le_bytes pub const fn to_le_bytes(self) -> [u8; size_of::<Self>()] {
self.to_le().to_ne_bytes()
}
core::num::<impl i64>::to_ne_bytes pub const fn to_ne_bytes(self) -> [u8; size_of::<Self>()] {
// SAFETY: integers are plain old datatypes so we can always transmute them to
// arrays of bytes
unsafe { mem::transmute(self) }
}
core::num::<impl i64>::unsigned_abs pub const fn unsigned_abs(self) -> $UnsignedT {
self.wrapping_abs() as $UnsignedT
}
core::num::<impl i64>::wrapping_abs pub const fn wrapping_abs(self) -> Self {
if self.is_negative() {
self.wrapping_neg()
} else {
self
}
}
core::num::<impl i64>::wrapping_add pub const fn wrapping_add(self, rhs: Self) -> Self {
intrinsics::wrapping_add(self, rhs)
}
core::num::<impl i64>::wrapping_neg pub const fn wrapping_neg(self) -> Self {
(0 as $SelfT).wrapping_sub(self)
}
core::num::<impl i64>::wrapping_sub pub const fn wrapping_sub(self, rhs: Self) -> Self {
intrinsics::wrapping_sub(self, rhs)
}
core::num::<impl i8>::abs pub const fn abs(self) -> Self {
// Note that the #[rustc_inherit_overflow_checks] and #[inline]
// above mean that the overflow semantics of the subtraction
// depend on the crate we're being called from.
if self.is_negative() {
-self
} else {
self
}
}
core::num::<impl i8>::checked_add pub const fn checked_add(self, rhs: Self) -> Option<Self> {
let (a, b) = self.overflowing_add(rhs);
if intrinsics::unlikely(b) { None } else { Some(a) }
}
core::num::<impl i8>::checked_mul pub const fn checked_mul(self, rhs: Self) -> Option<Self> {
let (a, b) = self.overflowing_mul(rhs);
if intrinsics::unlikely(b) { None } else { Some(a) }
}
core::num::<impl i8>::checked_sub pub const fn checked_sub(self, rhs: Self) -> Option<Self> {
let (a, b) = self.overflowing_sub(rhs);
if intrinsics::unlikely(b) { None } else { Some(a) }
}
core::num::<impl i8>::from_le pub const fn from_le(x: Self) -> Self {
#[cfg(target_endian = "little")]
{
x
}
#[cfg(not(target_endian = "little"))]
{
x.swap_bytes()
}
}
core::num::<impl i8>::from_le_bytes pub const fn from_le_bytes(bytes: [u8; size_of::<Self>()]) -> Self {
Self::from_le(Self::from_ne_bytes(bytes))
}
core::num::<impl i8>::from_ne_bytes pub const fn from_ne_bytes(bytes: [u8; size_of::<Self>()]) -> Self {
// SAFETY: integers are plain old datatypes so we can always transmute to them
unsafe { mem::transmute(bytes) }
}
core::num::<impl i8>::is_negative pub const fn is_negative(self) -> bool { self < 0 }
core::num::<impl i8>::overflowing_add pub const fn overflowing_add(self, rhs: Self) -> (Self, bool) {
let (a, b) = intrinsics::add_with_overflow(self as $ActualT, rhs as $ActualT);
(a as Self, b)
}
core::num::<impl i8>::overflowing_mul pub const fn overflowing_mul(self, rhs: Self) -> (Self, bool) {
let (a, b) = intrinsics::mul_with_overflow(self as $ActualT, rhs as $ActualT);
(a as Self, b)
}
core::num::<impl i8>::overflowing_sub pub const fn overflowing_sub(self, rhs: Self) -> (Self, bool) {
let (a, b) = intrinsics::sub_with_overflow(self as $ActualT, rhs as $ActualT);
(a as Self, b)
}
core::num::<impl i8>::to_le pub const fn to_le(self) -> Self {
#[cfg(target_endian = "little")]
{
self
}
#[cfg(not(target_endian = "little"))]
{
self.swap_bytes()
}
}
core::num::<impl i8>::to_le_bytes pub const fn to_le_bytes(self) -> [u8; size_of::<Self>()] {
self.to_le().to_ne_bytes()
}
core::num::<impl i8>::to_ne_bytes pub const fn to_ne_bytes(self) -> [u8; size_of::<Self>()] {
// SAFETY: integers are plain old datatypes so we can always transmute them to
// arrays of bytes
unsafe { mem::transmute(self) }
}
core::num::<impl i8>::unsigned_abs pub const fn unsigned_abs(self) -> $UnsignedT {
self.wrapping_abs() as $UnsignedT
}
core::num::<impl i8>::wrapping_abs pub const fn wrapping_abs(self) -> Self {
if self.is_negative() {
self.wrapping_neg()
} else {
self
}
}
core::num::<impl i8>::wrapping_add pub const fn wrapping_add(self, rhs: Self) -> Self {
intrinsics::wrapping_add(self, rhs)
}
core::num::<impl i8>::wrapping_neg pub const fn wrapping_neg(self) -> Self {
(0 as $SelfT).wrapping_sub(self)
}
core::num::<impl i8>::wrapping_sub pub const fn wrapping_sub(self, rhs: Self) -> Self {
intrinsics::wrapping_sub(self, rhs)
}
core::num::<impl isize>::abs pub const fn abs(self) -> Self {
// Note that the #[rustc_inherit_overflow_checks] and #[inline]
// above mean that the overflow semantics of the subtraction
// depend on the crate we're being called from.
if self.is_negative() {
-self
} else {
self
}
}
core::num::<impl isize>::checked_add pub const fn checked_add(self, rhs: Self) -> Option<Self> {
let (a, b) = self.overflowing_add(rhs);
if intrinsics::unlikely(b) { None } else { Some(a) }
}
core::num::<impl isize>::checked_mul pub const fn checked_mul(self, rhs: Self) -> Option<Self> {
let (a, b) = self.overflowing_mul(rhs);
if intrinsics::unlikely(b) { None } else { Some(a) }
}
core::num::<impl isize>::checked_sub pub const fn checked_sub(self, rhs: Self) -> Option<Self> {
let (a, b) = self.overflowing_sub(rhs);
if intrinsics::unlikely(b) { None } else { Some(a) }
}
core::num::<impl isize>::from_le pub const fn from_le(x: Self) -> Self {
#[cfg(target_endian = "little")]
{
x
}
#[cfg(not(target_endian = "little"))]
{
x.swap_bytes()
}
}
core::num::<impl isize>::from_le_bytes pub const fn from_le_bytes(bytes: [u8; size_of::<Self>()]) -> Self {
Self::from_le(Self::from_ne_bytes(bytes))
}
core::num::<impl isize>::from_ne_bytes pub const fn from_ne_bytes(bytes: [u8; size_of::<Self>()]) -> Self {
// SAFETY: integers are plain old datatypes so we can always transmute to them
unsafe { mem::transmute(bytes) }
}
core::num::<impl isize>::is_negative pub const fn is_negative(self) -> bool { self < 0 }
core::num::<impl isize>::overflowing_add pub const fn overflowing_add(self, rhs: Self) -> (Self, bool) {
let (a, b) = intrinsics::add_with_overflow(self as $ActualT, rhs as $ActualT);
(a as Self, b)
}
core::num::<impl isize>::overflowing_mul pub const fn overflowing_mul(self, rhs: Self) -> (Self, bool) {
let (a, b) = intrinsics::mul_with_overflow(self as $ActualT, rhs as $ActualT);
(a as Self, b)
}
core::num::<impl isize>::overflowing_sub pub const fn overflowing_sub(self, rhs: Self) -> (Self, bool) {
let (a, b) = intrinsics::sub_with_overflow(self as $ActualT, rhs as $ActualT);
(a as Self, b)
}
core::num::<impl isize>::to_le pub const fn to_le(self) -> Self {
#[cfg(target_endian = "little")]
{
self
}
#[cfg(not(target_endian = "little"))]
{
self.swap_bytes()
}
}
core::num::<impl isize>::to_le_bytes pub const fn to_le_bytes(self) -> [u8; size_of::<Self>()] {
self.to_le().to_ne_bytes()
}
core::num::<impl isize>::to_ne_bytes pub const fn to_ne_bytes(self) -> [u8; size_of::<Self>()] {
// SAFETY: integers are plain old datatypes so we can always transmute them to
// arrays of bytes
unsafe { mem::transmute(self) }
}
core::num::<impl isize>::unsigned_abs pub const fn unsigned_abs(self) -> $UnsignedT {
self.wrapping_abs() as $UnsignedT
}
core::num::<impl isize>::wrapping_abs pub const fn wrapping_abs(self) -> Self {
if self.is_negative() {
self.wrapping_neg()
} else {
self
}
}
core::num::<impl isize>::wrapping_add pub const fn wrapping_add(self, rhs: Self) -> Self {
intrinsics::wrapping_add(self, rhs)
}
core::num::<impl isize>::wrapping_neg pub const fn wrapping_neg(self) -> Self {
(0 as $SelfT).wrapping_sub(self)
}
core::num::<impl isize>::wrapping_sub pub const fn wrapping_sub(self, rhs: Self) -> Self {
intrinsics::wrapping_sub(self, rhs)
}
core::num::<impl u128>::abs_diff pub const fn abs_diff(self, other: Self) -> Self {
if size_of::<Self>() == 1 {
// Trick LLVM into generating the psadbw instruction when SSE2
// is available and this function is autovectorized for u8's.
(self as i32).wrapping_sub(other as i32).unsigned_abs() as Self
} else {
if self < other {
other - self
} else {
self - other
}
}
}
core::num::<impl u128>::checked_add pub const fn checked_add(self, rhs: Self) -> Option<Self> {
// This used to use `overflowing_add`, but that means it ends up being
// a `wrapping_add`, losing some optimization opportunities. Notably,
// phrasing it this way helps `.checked_add(1)` optimize to a check
// against `MAX` and a `add nuw`.
// Per <https://github.com/rust-lang/rust/pull/124114#issuecomment-2066173305>,
// LLVM is happy to re-form the intrinsic later if useful.
if intrinsics::unlikely(intrinsics::add_with_overflow(self, rhs).1) {
None
} else {
// SAFETY: Just checked it doesn't overflow
Some(unsafe { intrinsics::unchecked_add(self, rhs) })
}
}
core::num::<impl u128>::checked_mul pub const fn checked_mul(self, rhs: Self) -> Option<Self> {
let (a, b) = self.overflowing_mul(rhs);
if intrinsics::unlikely(b) { None } else { Some(a) }
}
core::num::<impl u128>::checked_sub pub const fn checked_sub(self, rhs: Self) -> Option<Self> {
// Per PR#103299, there's no advantage to the `overflowing` intrinsic
// for *unsigned* subtraction and we just emit the manual check anyway.
// Thus, rather than using `overflowing_sub` that produces a wrapping
// subtraction, check it ourself so we can use an unchecked one.
if self < rhs {
None
} else {
// SAFETY: just checked this can't overflow
Some(unsafe { intrinsics::unchecked_sub(self, rhs) })
}
}
core::num::<impl u128>::count_ones pub const fn count_ones(self) -> u32 {
return intrinsics::ctpop(self);
}
core::num::<impl u128>::div_ceil pub const fn div_ceil(self, rhs: Self) -> Self {
let d = self / rhs;
let r = self % rhs;
if r > 0 {
d + 1
} else {
d
}
}
core::num::<impl u128>::from_ascii_radix pub const fn from_ascii_radix(src: &[u8], radix: u32) -> Result<$int_ty, ParseIntError> {
use self::IntErrorKind::*;
use self::ParseIntError as PIE;
if 2 > radix || radix > 36 {
from_ascii_radix_panic(radix);
}
if src.is_empty() {
return Err(PIE { kind: Empty });
}
#[allow(unused_comparisons)]
let is_signed_ty = 0 > <$int_ty>::MIN;
let (is_positive, mut digits) = match src {
[b'+' | b'-'] => {
return Err(PIE { kind: InvalidDigit });
}
[b'+', rest @ ..] => (true, rest),
[b'-', rest @ ..] if is_signed_ty => (false, rest),
_ => (true, src),
};
let mut result = 0;
macro_rules! unwrap_or_PIE {
($option:expr, $kind:ident) => {
match $option {
Some(value) => value,
None => return Err(PIE { kind: $kind }),
}
};
}
if can_not_overflow::<$int_ty>(radix, is_signed_ty, digits) {
// If the len of the str is short compared to the range of the type
// we are parsing into, then we can be certain that an overflow will not occur.
// This bound is when `radix.pow(digits.len()) - 1 <= T::MAX` but the condition
// above is a faster (conservative) approximation of this.
//
// Consider radix 16 as it has the highest information density per digit and will thus overflow the earliest:
// `u8::MAX` is `ff` - any str of len 2 is guaranteed to not overflow.
// `i8::MAX` is `7f` - only a str of len 1 is guaranteed to not overflow.
macro_rules! run_unchecked_loop {
($unchecked_additive_op:tt) => {{
while let [c, rest @ ..] = digits {
result = result * (radix as $int_ty);
let x = unwrap_or_PIE!((*c as char).to_digit(radix), InvalidDigit);
result = result $unchecked_additive_op (x as $int_ty);
digits = rest;
}
}};
}
if is_positive {
run_unchecked_loop!(+)
} else {
run_unchecked_loop!(-)
};
} else {
macro_rules! run_checked_loop {
($checked_additive_op:ident, $overflow_err:ident) => {{
while let [c, rest @ ..] = digits {
// When `radix` is passed in as a literal, rather than doing a slow `imul`
// the compiler can use shifts if `radix` can be expressed as a
// sum of powers of 2 (x*10 can be written as x*8 + x*2).
// When the compiler can't use these optimisations,
// the latency of the multiplication can be hidden by issuing it
// before the result is needed to improve performance on
// modern out-of-order CPU as multiplication here is slower
// than the other instructions, we can get the end result faster
// doing multiplication first and let the CPU spends other cycles
// doing other computation and get multiplication result later.
let mul = result.checked_mul(radix as $int_ty);
let x = unwrap_or_PIE!((*c as char).to_digit(radix), InvalidDigit) as $int_ty;
result = unwrap_or_PIE!(mul, $overflow_err);
result = unwrap_or_PIE!(<$int_ty>::$checked_additive_op(result, x), $overflow_err);
digits = rest;
}
}};
}
if is_positive {
run_checked_loop!(checked_add, PosOverflow)
} else {
run_checked_loop!(checked_sub, NegOverflow)
};
}
Ok(result)
}
core::num::<impl u128>::from_be pub const fn from_be(x: Self) -> Self {
#[cfg(target_endian = "big")]
{
x
}
#[cfg(not(target_endian = "big"))]
{
x.swap_bytes()
}
}
core::num::<impl u128>::from_be_bytes pub const fn from_be_bytes(bytes: [u8; size_of::<Self>()]) -> Self {
Self::from_be(Self::from_ne_bytes(bytes))
}
core::num::<impl u128>::from_le pub const fn from_le(x: Self) -> Self {
#[cfg(target_endian = "little")]
{
x
}
#[cfg(not(target_endian = "little"))]
{
x.swap_bytes()
}
}
core::num::<impl u128>::from_le_bytes pub const fn from_le_bytes(bytes: [u8; size_of::<Self>()]) -> Self {
Self::from_le(Self::from_ne_bytes(bytes))
}
core::num::<impl u128>::from_ne_bytes pub const fn from_ne_bytes(bytes: [u8; size_of::<Self>()]) -> Self {
// SAFETY: integers are plain old datatypes so we can always transmute to them
unsafe { mem::transmute(bytes) }
}
core::num::<impl u128>::from_str_radix pub const fn from_str_radix(src: &str, radix: u32) -> Result<$int_ty, ParseIntError> {
<$int_ty>::from_ascii_radix(src.as_bytes(), radix)
}
core::num::<impl u128>::is_multiple_of pub const fn is_multiple_of(self, rhs: Self) -> bool {
match rhs {
0 => self == 0,
_ => self % rhs == 0,
}
}
core::num::<impl u128>::is_power_of_two pub const fn is_power_of_two(self) -> bool {
self.count_ones() == 1
}
core::num::<impl u128>::overflowing_add pub const fn overflowing_add(self, rhs: Self) -> (Self, bool) {
let (a, b) = intrinsics::add_with_overflow(self as $ActualT, rhs as $ActualT);
(a as Self, b)
}
core::num::<impl u128>::overflowing_add_signed pub const fn overflowing_add_signed(self, rhs: $SignedT) -> (Self, bool) {
let (res, overflowed) = self.overflowing_add(rhs as Self);
(res, overflowed ^ (rhs < 0))
}
core::num::<impl u128>::overflowing_mul pub const fn overflowing_mul(self, rhs: Self) -> (Self, bool) {
let (a, b) = intrinsics::mul_with_overflow(self as $ActualT, rhs as $ActualT);
(a as Self, b)
}
core::num::<impl u128>::overflowing_sub pub const fn overflowing_sub(self, rhs: Self) -> (Self, bool) {
let (a, b) = intrinsics::sub_with_overflow(self as $ActualT, rhs as $ActualT);
(a as Self, b)
}
core::num::<impl u128>::rotate_left pub const fn rotate_left(self, n: u32) -> Self {
return intrinsics::rotate_left(self, n);
}
core::num::<impl u128>::rotate_right pub const fn rotate_right(self, n: u32) -> Self {
return intrinsics::rotate_right(self, n);
}
core::num::<impl u128>::saturating_add pub const fn saturating_add(self, rhs: Self) -> Self {
intrinsics::saturating_add(self, rhs)
}
core::num::<impl u128>::saturating_mul pub const fn saturating_mul(self, rhs: Self) -> Self {
match self.checked_mul(rhs) {
Some(x) => x,
None => Self::MAX,
}
}
core::num::<impl u128>::saturating_sub pub const fn saturating_sub(self, rhs: Self) -> Self {
intrinsics::saturating_sub(self, rhs)
}
core::num::<impl u128>::swap_bytes pub const fn swap_bytes(self) -> Self {
intrinsics::bswap(self as $ActualT) as Self
}
core::num::<impl u128>::to_be pub const fn to_be(self) -> Self { // or not to be?
#[cfg(target_endian = "big")]
{
self
}
#[cfg(not(target_endian = "big"))]
{
self.swap_bytes()
}
}
core::num::<impl u128>::to_be_bytes pub const fn to_be_bytes(self) -> [u8; size_of::<Self>()] {
self.to_be().to_ne_bytes()
}
core::num::<impl u128>::to_le pub const fn to_le(self) -> Self {
#[cfg(target_endian = "little")]
{
self
}
#[cfg(not(target_endian = "little"))]
{
self.swap_bytes()
}
}
core::num::<impl u128>::to_le_bytes pub const fn to_le_bytes(self) -> [u8; size_of::<Self>()] {
self.to_le().to_ne_bytes()
}
core::num::<impl u128>::to_ne_bytes pub const fn to_ne_bytes(self) -> [u8; size_of::<Self>()] {
// SAFETY: integers are plain old datatypes so we can always transmute them to
// arrays of bytes
unsafe { mem::transmute(self) }
}
core::num::<impl u128>::trailing_zeros pub const fn trailing_zeros(self) -> u32 {
return intrinsics::cttz(self);
}
core::num::<impl u128>::unchecked_add pub const unsafe fn unchecked_add(self, rhs: Self) -> Self {
assert_unsafe_precondition!(
check_language_ub,
concat!(stringify!($SelfT), "::unchecked_add cannot overflow"),
(
lhs: $SelfT = self,
rhs: $SelfT = rhs,
) => !lhs.overflowing_add(rhs).1,
);
// SAFETY: this is guaranteed to be safe by the caller.
unsafe {
intrinsics::unchecked_add(self, rhs)
}
}
core::num::<impl u128>::unchecked_sub pub const unsafe fn unchecked_sub(self, rhs: Self) -> Self {
assert_unsafe_precondition!(
check_language_ub,
concat!(stringify!($SelfT), "::unchecked_sub cannot overflow"),
(
lhs: $SelfT = self,
rhs: $SelfT = rhs,
) => !lhs.overflowing_sub(rhs).1,
);
// SAFETY: this is guaranteed to be safe by the caller.
unsafe {
intrinsics::unchecked_sub(self, rhs)
}
}
core::num::<impl u128>::wrapping_add pub const fn wrapping_add(self, rhs: Self) -> Self {
intrinsics::wrapping_add(self, rhs)
}
core::num::<impl u128>::wrapping_sub pub const fn wrapping_sub(self, rhs: Self) -> Self {
intrinsics::wrapping_sub(self, rhs)
}
core::num::<impl u16>::abs_diff pub const fn abs_diff(self, other: Self) -> Self {
if size_of::<Self>() == 1 {
// Trick LLVM into generating the psadbw instruction when SSE2
// is available and this function is autovectorized for u8's.
(self as i32).wrapping_sub(other as i32).unsigned_abs() as Self
} else {
if self < other {
other - self
} else {
self - other
}
}
}
core::num::<impl u16>::checked_add pub const fn checked_add(self, rhs: Self) -> Option<Self> {
// This used to use `overflowing_add`, but that means it ends up being
// a `wrapping_add`, losing some optimization opportunities. Notably,
// phrasing it this way helps `.checked_add(1)` optimize to a check
// against `MAX` and a `add nuw`.
// Per <https://github.com/rust-lang/rust/pull/124114#issuecomment-2066173305>,
// LLVM is happy to re-form the intrinsic later if useful.
if intrinsics::unlikely(intrinsics::add_with_overflow(self, rhs).1) {
None
} else {
// SAFETY: Just checked it doesn't overflow
Some(unsafe { intrinsics::unchecked_add(self, rhs) })
}
}
core::num::<impl u16>::checked_mul pub const fn checked_mul(self, rhs: Self) -> Option<Self> {
let (a, b) = self.overflowing_mul(rhs);
if intrinsics::unlikely(b) { None } else { Some(a) }
}
core::num::<impl u16>::checked_sub pub const fn checked_sub(self, rhs: Self) -> Option<Self> {
// Per PR#103299, there's no advantage to the `overflowing` intrinsic
// for *unsigned* subtraction and we just emit the manual check anyway.
// Thus, rather than using `overflowing_sub` that produces a wrapping
// subtraction, check it ourself so we can use an unchecked one.
if self < rhs {
None
} else {
// SAFETY: just checked this can't overflow
Some(unsafe { intrinsics::unchecked_sub(self, rhs) })
}
}
core::num::<impl u16>::count_ones pub const fn count_ones(self) -> u32 {
return intrinsics::ctpop(self);
}
core::num::<impl u16>::div_ceil pub const fn div_ceil(self, rhs: Self) -> Self {
let d = self / rhs;
let r = self % rhs;
if r > 0 {
d + 1
} else {
d
}
}
core::num::<impl u16>::from_ascii_radix pub const fn from_ascii_radix(src: &[u8], radix: u32) -> Result<$int_ty, ParseIntError> {
use self::IntErrorKind::*;
use self::ParseIntError as PIE;
if 2 > radix || radix > 36 {
from_ascii_radix_panic(radix);
}
if src.is_empty() {
return Err(PIE { kind: Empty });
}
#[allow(unused_comparisons)]
let is_signed_ty = 0 > <$int_ty>::MIN;
let (is_positive, mut digits) = match src {
[b'+' | b'-'] => {
return Err(PIE { kind: InvalidDigit });
}
[b'+', rest @ ..] => (true, rest),
[b'-', rest @ ..] if is_signed_ty => (false, rest),
_ => (true, src),
};
let mut result = 0;
macro_rules! unwrap_or_PIE {
($option:expr, $kind:ident) => {
match $option {
Some(value) => value,
None => return Err(PIE { kind: $kind }),
}
};
}
if can_not_overflow::<$int_ty>(radix, is_signed_ty, digits) {
// If the len of the str is short compared to the range of the type
// we are parsing into, then we can be certain that an overflow will not occur.
// This bound is when `radix.pow(digits.len()) - 1 <= T::MAX` but the condition
// above is a faster (conservative) approximation of this.
//
// Consider radix 16 as it has the highest information density per digit and will thus overflow the earliest:
// `u8::MAX` is `ff` - any str of len 2 is guaranteed to not overflow.
// `i8::MAX` is `7f` - only a str of len 1 is guaranteed to not overflow.
macro_rules! run_unchecked_loop {
($unchecked_additive_op:tt) => {{
while let [c, rest @ ..] = digits {
result = result * (radix as $int_ty);
let x = unwrap_or_PIE!((*c as char).to_digit(radix), InvalidDigit);
result = result $unchecked_additive_op (x as $int_ty);
digits = rest;
}
}};
}
if is_positive {
run_unchecked_loop!(+)
} else {
run_unchecked_loop!(-)
};
} else {
macro_rules! run_checked_loop {
($checked_additive_op:ident, $overflow_err:ident) => {{
while let [c, rest @ ..] = digits {
// When `radix` is passed in as a literal, rather than doing a slow `imul`
// the compiler can use shifts if `radix` can be expressed as a
// sum of powers of 2 (x*10 can be written as x*8 + x*2).
// When the compiler can't use these optimisations,
// the latency of the multiplication can be hidden by issuing it
// before the result is needed to improve performance on
// modern out-of-order CPU as multiplication here is slower
// than the other instructions, we can get the end result faster
// doing multiplication first and let the CPU spends other cycles
// doing other computation and get multiplication result later.
let mul = result.checked_mul(radix as $int_ty);
let x = unwrap_or_PIE!((*c as char).to_digit(radix), InvalidDigit) as $int_ty;
result = unwrap_or_PIE!(mul, $overflow_err);
result = unwrap_or_PIE!(<$int_ty>::$checked_additive_op(result, x), $overflow_err);
digits = rest;
}
}};
}
if is_positive {
run_checked_loop!(checked_add, PosOverflow)
} else {
run_checked_loop!(checked_sub, NegOverflow)
};
}
Ok(result)
}
core::num::<impl u16>::from_be pub const fn from_be(x: Self) -> Self {
#[cfg(target_endian = "big")]
{
x
}
#[cfg(not(target_endian = "big"))]
{
x.swap_bytes()
}
}
core::num::<impl u16>::from_be_bytes pub const fn from_be_bytes(bytes: [u8; size_of::<Self>()]) -> Self {
Self::from_be(Self::from_ne_bytes(bytes))
}
core::num::<impl u16>::from_le pub const fn from_le(x: Self) -> Self {
#[cfg(target_endian = "little")]
{
x
}
#[cfg(not(target_endian = "little"))]
{
x.swap_bytes()
}
}
core::num::<impl u16>::from_le_bytes pub const fn from_le_bytes(bytes: [u8; size_of::<Self>()]) -> Self {
Self::from_le(Self::from_ne_bytes(bytes))
}
core::num::<impl u16>::from_ne_bytes pub const fn from_ne_bytes(bytes: [u8; size_of::<Self>()]) -> Self {
// SAFETY: integers are plain old datatypes so we can always transmute to them
unsafe { mem::transmute(bytes) }
}
core::num::<impl u16>::from_str_radix pub const fn from_str_radix(src: &str, radix: u32) -> Result<$int_ty, ParseIntError> {
<$int_ty>::from_ascii_radix(src.as_bytes(), radix)
}
core::num::<impl u16>::is_multiple_of pub const fn is_multiple_of(self, rhs: Self) -> bool {
match rhs {
0 => self == 0,
_ => self % rhs == 0,
}
}
core::num::<impl u16>::is_power_of_two pub const fn is_power_of_two(self) -> bool {
self.count_ones() == 1
}
core::num::<impl u16>::overflowing_add pub const fn overflowing_add(self, rhs: Self) -> (Self, bool) {
let (a, b) = intrinsics::add_with_overflow(self as $ActualT, rhs as $ActualT);
(a as Self, b)
}
core::num::<impl u16>::overflowing_add_signed pub const fn overflowing_add_signed(self, rhs: $SignedT) -> (Self, bool) {
let (res, overflowed) = self.overflowing_add(rhs as Self);
(res, overflowed ^ (rhs < 0))
}
core::num::<impl u16>::overflowing_mul pub const fn overflowing_mul(self, rhs: Self) -> (Self, bool) {
let (a, b) = intrinsics::mul_with_overflow(self as $ActualT, rhs as $ActualT);
(a as Self, b)
}
core::num::<impl u16>::overflowing_sub pub const fn overflowing_sub(self, rhs: Self) -> (Self, bool) {
let (a, b) = intrinsics::sub_with_overflow(self as $ActualT, rhs as $ActualT);
(a as Self, b)
}
core::num::<impl u16>::rotate_left pub const fn rotate_left(self, n: u32) -> Self {
return intrinsics::rotate_left(self, n);
}
core::num::<impl u16>::rotate_right pub const fn rotate_right(self, n: u32) -> Self {
return intrinsics::rotate_right(self, n);
}
core::num::<impl u16>::saturating_add pub const fn saturating_add(self, rhs: Self) -> Self {
intrinsics::saturating_add(self, rhs)
}
core::num::<impl u16>::saturating_mul pub const fn saturating_mul(self, rhs: Self) -> Self {
match self.checked_mul(rhs) {
Some(x) => x,
None => Self::MAX,
}
}
core::num::<impl u16>::saturating_sub pub const fn saturating_sub(self, rhs: Self) -> Self {
intrinsics::saturating_sub(self, rhs)
}
core::num::<impl u16>::swap_bytes pub const fn swap_bytes(self) -> Self {
intrinsics::bswap(self as $ActualT) as Self
}
core::num::<impl u16>::to_be pub const fn to_be(self) -> Self { // or not to be?
#[cfg(target_endian = "big")]
{
self
}
#[cfg(not(target_endian = "big"))]
{
self.swap_bytes()
}
}
core::num::<impl u16>::to_be_bytes pub const fn to_be_bytes(self) -> [u8; size_of::<Self>()] {
self.to_be().to_ne_bytes()
}
core::num::<impl u16>::to_le pub const fn to_le(self) -> Self {
#[cfg(target_endian = "little")]
{
self
}
#[cfg(not(target_endian = "little"))]
{
self.swap_bytes()
}
}
core::num::<impl u16>::to_le_bytes pub const fn to_le_bytes(self) -> [u8; size_of::<Self>()] {
self.to_le().to_ne_bytes()
}
core::num::<impl u16>::to_ne_bytes pub const fn to_ne_bytes(self) -> [u8; size_of::<Self>()] {
// SAFETY: integers are plain old datatypes so we can always transmute them to
// arrays of bytes
unsafe { mem::transmute(self) }
}
core::num::<impl u16>::trailing_zeros pub const fn trailing_zeros(self) -> u32 {
return intrinsics::cttz(self);
}
core::num::<impl u16>::unchecked_add pub const unsafe fn unchecked_add(self, rhs: Self) -> Self {
assert_unsafe_precondition!(
check_language_ub,
concat!(stringify!($SelfT), "::unchecked_add cannot overflow"),
(
lhs: $SelfT = self,
rhs: $SelfT = rhs,
) => !lhs.overflowing_add(rhs).1,
);
// SAFETY: this is guaranteed to be safe by the caller.
unsafe {
intrinsics::unchecked_add(self, rhs)
}
}
core::num::<impl u16>::unchecked_sub pub const unsafe fn unchecked_sub(self, rhs: Self) -> Self {
assert_unsafe_precondition!(
check_language_ub,
concat!(stringify!($SelfT), "::unchecked_sub cannot overflow"),
(
lhs: $SelfT = self,
rhs: $SelfT = rhs,
) => !lhs.overflowing_sub(rhs).1,
);
// SAFETY: this is guaranteed to be safe by the caller.
unsafe {
intrinsics::unchecked_sub(self, rhs)
}
}
core::num::<impl u16>::wrapping_add pub const fn wrapping_add(self, rhs: Self) -> Self {
intrinsics::wrapping_add(self, rhs)
}
core::num::<impl u16>::wrapping_sub pub const fn wrapping_sub(self, rhs: Self) -> Self {
intrinsics::wrapping_sub(self, rhs)
}
core::num::<impl u32>::abs_diff pub const fn abs_diff(self, other: Self) -> Self {
if size_of::<Self>() == 1 {
// Trick LLVM into generating the psadbw instruction when SSE2
// is available and this function is autovectorized for u8's.
(self as i32).wrapping_sub(other as i32).unsigned_abs() as Self
} else {
if self < other {
other - self
} else {
self - other
}
}
}
core::num::<impl u32>::checked_add pub const fn checked_add(self, rhs: Self) -> Option<Self> {
// This used to use `overflowing_add`, but that means it ends up being
// a `wrapping_add`, losing some optimization opportunities. Notably,
// phrasing it this way helps `.checked_add(1)` optimize to a check
// against `MAX` and a `add nuw`.
// Per <https://github.com/rust-lang/rust/pull/124114#issuecomment-2066173305>,
// LLVM is happy to re-form the intrinsic later if useful.
if intrinsics::unlikely(intrinsics::add_with_overflow(self, rhs).1) {
None
} else {
// SAFETY: Just checked it doesn't overflow
Some(unsafe { intrinsics::unchecked_add(self, rhs) })
}
}
core::num::<impl u32>::checked_mul pub const fn checked_mul(self, rhs: Self) -> Option<Self> {
let (a, b) = self.overflowing_mul(rhs);
if intrinsics::unlikely(b) { None } else { Some(a) }
}
core::num::<impl u32>::checked_sub pub const fn checked_sub(self, rhs: Self) -> Option<Self> {
// Per PR#103299, there's no advantage to the `overflowing` intrinsic
// for *unsigned* subtraction and we just emit the manual check anyway.
// Thus, rather than using `overflowing_sub` that produces a wrapping
// subtraction, check it ourself so we can use an unchecked one.
if self < rhs {
None
} else {
// SAFETY: just checked this can't overflow
Some(unsafe { intrinsics::unchecked_sub(self, rhs) })
}
}
core::num::<impl u32>::count_ones pub const fn count_ones(self) -> u32 {
return intrinsics::ctpop(self);
}
core::num::<impl u32>::div_ceil pub const fn div_ceil(self, rhs: Self) -> Self {
let d = self / rhs;
let r = self % rhs;
if r > 0 {
d + 1
} else {
d
}
}
core::num::<impl u32>::from_ascii_radix pub const fn from_ascii_radix(src: &[u8], radix: u32) -> Result<$int_ty, ParseIntError> {
use self::IntErrorKind::*;
use self::ParseIntError as PIE;
if 2 > radix || radix > 36 {
from_ascii_radix_panic(radix);
}
if src.is_empty() {
return Err(PIE { kind: Empty });
}
#[allow(unused_comparisons)]
let is_signed_ty = 0 > <$int_ty>::MIN;
let (is_positive, mut digits) = match src {
[b'+' | b'-'] => {
return Err(PIE { kind: InvalidDigit });
}
[b'+', rest @ ..] => (true, rest),
[b'-', rest @ ..] if is_signed_ty => (false, rest),
_ => (true, src),
};
let mut result = 0;
macro_rules! unwrap_or_PIE {
($option:expr, $kind:ident) => {
match $option {
Some(value) => value,
None => return Err(PIE { kind: $kind }),
}
};
}
if can_not_overflow::<$int_ty>(radix, is_signed_ty, digits) {
// If the len of the str is short compared to the range of the type
// we are parsing into, then we can be certain that an overflow will not occur.
// This bound is when `radix.pow(digits.len()) - 1 <= T::MAX` but the condition
// above is a faster (conservative) approximation of this.
//
// Consider radix 16 as it has the highest information density per digit and will thus overflow the earliest:
// `u8::MAX` is `ff` - any str of len 2 is guaranteed to not overflow.
// `i8::MAX` is `7f` - only a str of len 1 is guaranteed to not overflow.
macro_rules! run_unchecked_loop {
($unchecked_additive_op:tt) => {{
while let [c, rest @ ..] = digits {
result = result * (radix as $int_ty);
let x = unwrap_or_PIE!((*c as char).to_digit(radix), InvalidDigit);
result = result $unchecked_additive_op (x as $int_ty);
digits = rest;
}
}};
}
if is_positive {
run_unchecked_loop!(+)
} else {
run_unchecked_loop!(-)
};
} else {
macro_rules! run_checked_loop {
($checked_additive_op:ident, $overflow_err:ident) => {{
while let [c, rest @ ..] = digits {
// When `radix` is passed in as a literal, rather than doing a slow `imul`
// the compiler can use shifts if `radix` can be expressed as a
// sum of powers of 2 (x*10 can be written as x*8 + x*2).
// When the compiler can't use these optimisations,
// the latency of the multiplication can be hidden by issuing it
// before the result is needed to improve performance on
// modern out-of-order CPU as multiplication here is slower
// than the other instructions, we can get the end result faster
// doing multiplication first and let the CPU spends other cycles
// doing other computation and get multiplication result later.
let mul = result.checked_mul(radix as $int_ty);
let x = unwrap_or_PIE!((*c as char).to_digit(radix), InvalidDigit) as $int_ty;
result = unwrap_or_PIE!(mul, $overflow_err);
result = unwrap_or_PIE!(<$int_ty>::$checked_additive_op(result, x), $overflow_err);
digits = rest;
}
}};
}
if is_positive {
run_checked_loop!(checked_add, PosOverflow)
} else {
run_checked_loop!(checked_sub, NegOverflow)
};
}
Ok(result)
}
core::num::<impl u32>::from_be pub const fn from_be(x: Self) -> Self {
#[cfg(target_endian = "big")]
{
x
}
#[cfg(not(target_endian = "big"))]
{
x.swap_bytes()
}
}
core::num::<impl u32>::from_be_bytes pub const fn from_be_bytes(bytes: [u8; size_of::<Self>()]) -> Self {
Self::from_be(Self::from_ne_bytes(bytes))
}
core::num::<impl u32>::from_le pub const fn from_le(x: Self) -> Self {
#[cfg(target_endian = "little")]
{
x
}
#[cfg(not(target_endian = "little"))]
{
x.swap_bytes()
}
}
core::num::<impl u32>::from_le_bytes pub const fn from_le_bytes(bytes: [u8; size_of::<Self>()]) -> Self {
Self::from_le(Self::from_ne_bytes(bytes))
}
core::num::<impl u32>::from_ne_bytes pub const fn from_ne_bytes(bytes: [u8; size_of::<Self>()]) -> Self {
// SAFETY: integers are plain old datatypes so we can always transmute to them
unsafe { mem::transmute(bytes) }
}
core::num::<impl u32>::from_str_radix pub const fn from_str_radix(src: &str, radix: u32) -> Result<$int_ty, ParseIntError> {
<$int_ty>::from_ascii_radix(src.as_bytes(), radix)
}
core::num::<impl u32>::is_multiple_of pub const fn is_multiple_of(self, rhs: Self) -> bool {
match rhs {
0 => self == 0,
_ => self % rhs == 0,
}
}
core::num::<impl u32>::is_power_of_two pub const fn is_power_of_two(self) -> bool {
self.count_ones() == 1
}
core::num::<impl u32>::overflowing_add pub const fn overflowing_add(self, rhs: Self) -> (Self, bool) {
let (a, b) = intrinsics::add_with_overflow(self as $ActualT, rhs as $ActualT);
(a as Self, b)
}
core::num::<impl u32>::overflowing_add_signed pub const fn overflowing_add_signed(self, rhs: $SignedT) -> (Self, bool) {
let (res, overflowed) = self.overflowing_add(rhs as Self);
(res, overflowed ^ (rhs < 0))
}
core::num::<impl u32>::overflowing_mul pub const fn overflowing_mul(self, rhs: Self) -> (Self, bool) {
let (a, b) = intrinsics::mul_with_overflow(self as $ActualT, rhs as $ActualT);
(a as Self, b)
}
core::num::<impl u32>::overflowing_sub pub const fn overflowing_sub(self, rhs: Self) -> (Self, bool) {
let (a, b) = intrinsics::sub_with_overflow(self as $ActualT, rhs as $ActualT);
(a as Self, b)
}
core::num::<impl u32>::rotate_left pub const fn rotate_left(self, n: u32) -> Self {
return intrinsics::rotate_left(self, n);
}
core::num::<impl u32>::rotate_right pub const fn rotate_right(self, n: u32) -> Self {
return intrinsics::rotate_right(self, n);
}
core::num::<impl u32>::saturating_add pub const fn saturating_add(self, rhs: Self) -> Self {
intrinsics::saturating_add(self, rhs)
}
core::num::<impl u32>::saturating_mul pub const fn saturating_mul(self, rhs: Self) -> Self {
match self.checked_mul(rhs) {
Some(x) => x,
None => Self::MAX,
}
}
core::num::<impl u32>::saturating_sub pub const fn saturating_sub(self, rhs: Self) -> Self {
intrinsics::saturating_sub(self, rhs)
}
core::num::<impl u32>::swap_bytes pub const fn swap_bytes(self) -> Self {
intrinsics::bswap(self as $ActualT) as Self
}
core::num::<impl u32>::to_be pub const fn to_be(self) -> Self { // or not to be?
#[cfg(target_endian = "big")]
{
self
}
#[cfg(not(target_endian = "big"))]
{
self.swap_bytes()
}
}
core::num::<impl u32>::to_be_bytes pub const fn to_be_bytes(self) -> [u8; size_of::<Self>()] {
self.to_be().to_ne_bytes()
}
core::num::<impl u32>::to_le pub const fn to_le(self) -> Self {
#[cfg(target_endian = "little")]
{
self
}
#[cfg(not(target_endian = "little"))]
{
self.swap_bytes()
}
}
core::num::<impl u32>::to_le_bytes pub const fn to_le_bytes(self) -> [u8; size_of::<Self>()] {
self.to_le().to_ne_bytes()
}
core::num::<impl u32>::to_ne_bytes pub const fn to_ne_bytes(self) -> [u8; size_of::<Self>()] {
// SAFETY: integers are plain old datatypes so we can always transmute them to
// arrays of bytes
unsafe { mem::transmute(self) }
}
core::num::<impl u32>::trailing_zeros pub const fn trailing_zeros(self) -> u32 {
return intrinsics::cttz(self);
}
core::num::<impl u32>::unchecked_add pub const unsafe fn unchecked_add(self, rhs: Self) -> Self {
assert_unsafe_precondition!(
check_language_ub,
concat!(stringify!($SelfT), "::unchecked_add cannot overflow"),
(
lhs: $SelfT = self,
rhs: $SelfT = rhs,
) => !lhs.overflowing_add(rhs).1,
);
// SAFETY: this is guaranteed to be safe by the caller.
unsafe {
intrinsics::unchecked_add(self, rhs)
}
}
core::num::<impl u32>::unchecked_sub pub const unsafe fn unchecked_sub(self, rhs: Self) -> Self {
assert_unsafe_precondition!(
check_language_ub,
concat!(stringify!($SelfT), "::unchecked_sub cannot overflow"),
(
lhs: $SelfT = self,
rhs: $SelfT = rhs,
) => !lhs.overflowing_sub(rhs).1,
);
// SAFETY: this is guaranteed to be safe by the caller.
unsafe {
intrinsics::unchecked_sub(self, rhs)
}
}
core::num::<impl u32>::wrapping_add pub const fn wrapping_add(self, rhs: Self) -> Self {
intrinsics::wrapping_add(self, rhs)
}
core::num::<impl u32>::wrapping_sub pub const fn wrapping_sub(self, rhs: Self) -> Self {
intrinsics::wrapping_sub(self, rhs)
}
core::num::<impl u64>::abs_diff pub const fn abs_diff(self, other: Self) -> Self {
if size_of::<Self>() == 1 {
// Trick LLVM into generating the psadbw instruction when SSE2
// is available and this function is autovectorized for u8's.
(self as i32).wrapping_sub(other as i32).unsigned_abs() as Self
} else {
if self < other {
other - self
} else {
self - other
}
}
}
core::num::<impl u64>::checked_add pub const fn checked_add(self, rhs: Self) -> Option<Self> {
// This used to use `overflowing_add`, but that means it ends up being
// a `wrapping_add`, losing some optimization opportunities. Notably,
// phrasing it this way helps `.checked_add(1)` optimize to a check
// against `MAX` and a `add nuw`.
// Per <https://github.com/rust-lang/rust/pull/124114#issuecomment-2066173305>,
// LLVM is happy to re-form the intrinsic later if useful.
if intrinsics::unlikely(intrinsics::add_with_overflow(self, rhs).1) {
None
} else {
// SAFETY: Just checked it doesn't overflow
Some(unsafe { intrinsics::unchecked_add(self, rhs) })
}
}
core::num::<impl u64>::checked_mul pub const fn checked_mul(self, rhs: Self) -> Option<Self> {
let (a, b) = self.overflowing_mul(rhs);
if intrinsics::unlikely(b) { None } else { Some(a) }
}
core::num::<impl u64>::checked_sub pub const fn checked_sub(self, rhs: Self) -> Option<Self> {
// Per PR#103299, there's no advantage to the `overflowing` intrinsic
// for *unsigned* subtraction and we just emit the manual check anyway.
// Thus, rather than using `overflowing_sub` that produces a wrapping
// subtraction, check it ourself so we can use an unchecked one.
if self < rhs {
None
} else {
// SAFETY: just checked this can't overflow
Some(unsafe { intrinsics::unchecked_sub(self, rhs) })
}
}
core::num::<impl u64>::count_ones pub const fn count_ones(self) -> u32 {
return intrinsics::ctpop(self);
}
core::num::<impl u64>::div_ceil pub const fn div_ceil(self, rhs: Self) -> Self {
let d = self / rhs;
let r = self % rhs;
if r > 0 {
d + 1
} else {
d
}
}
core::num::<impl u64>::from_ascii_radix pub const fn from_ascii_radix(src: &[u8], radix: u32) -> Result<$int_ty, ParseIntError> {
use self::IntErrorKind::*;
use self::ParseIntError as PIE;
if 2 > radix || radix > 36 {
from_ascii_radix_panic(radix);
}
if src.is_empty() {
return Err(PIE { kind: Empty });
}
#[allow(unused_comparisons)]
let is_signed_ty = 0 > <$int_ty>::MIN;
let (is_positive, mut digits) = match src {
[b'+' | b'-'] => {
return Err(PIE { kind: InvalidDigit });
}
[b'+', rest @ ..] => (true, rest),
[b'-', rest @ ..] if is_signed_ty => (false, rest),
_ => (true, src),
};
let mut result = 0;
macro_rules! unwrap_or_PIE {
($option:expr, $kind:ident) => {
match $option {
Some(value) => value,
None => return Err(PIE { kind: $kind }),
}
};
}
if can_not_overflow::<$int_ty>(radix, is_signed_ty, digits) {
// If the len of the str is short compared to the range of the type
// we are parsing into, then we can be certain that an overflow will not occur.
// This bound is when `radix.pow(digits.len()) - 1 <= T::MAX` but the condition
// above is a faster (conservative) approximation of this.
//
// Consider radix 16 as it has the highest information density per digit and will thus overflow the earliest:
// `u8::MAX` is `ff` - any str of len 2 is guaranteed to not overflow.
// `i8::MAX` is `7f` - only a str of len 1 is guaranteed to not overflow.
macro_rules! run_unchecked_loop {
($unchecked_additive_op:tt) => {{
while let [c, rest @ ..] = digits {
result = result * (radix as $int_ty);
let x = unwrap_or_PIE!((*c as char).to_digit(radix), InvalidDigit);
result = result $unchecked_additive_op (x as $int_ty);
digits = rest;
}
}};
}
if is_positive {
run_unchecked_loop!(+)
} else {
run_unchecked_loop!(-)
};
} else {
macro_rules! run_checked_loop {
($checked_additive_op:ident, $overflow_err:ident) => {{
while let [c, rest @ ..] = digits {
// When `radix` is passed in as a literal, rather than doing a slow `imul`
// the compiler can use shifts if `radix` can be expressed as a
// sum of powers of 2 (x*10 can be written as x*8 + x*2).
// When the compiler can't use these optimisations,
// the latency of the multiplication can be hidden by issuing it
// before the result is needed to improve performance on
// modern out-of-order CPU as multiplication here is slower
// than the other instructions, we can get the end result faster
// doing multiplication first and let the CPU spends other cycles
// doing other computation and get multiplication result later.
let mul = result.checked_mul(radix as $int_ty);
let x = unwrap_or_PIE!((*c as char).to_digit(radix), InvalidDigit) as $int_ty;
result = unwrap_or_PIE!(mul, $overflow_err);
result = unwrap_or_PIE!(<$int_ty>::$checked_additive_op(result, x), $overflow_err);
digits = rest;
}
}};
}
if is_positive {
run_checked_loop!(checked_add, PosOverflow)
} else {
run_checked_loop!(checked_sub, NegOverflow)
};
}
Ok(result)
}
core::num::<impl u64>::from_be pub const fn from_be(x: Self) -> Self {
#[cfg(target_endian = "big")]
{
x
}
#[cfg(not(target_endian = "big"))]
{
x.swap_bytes()
}
}
core::num::<impl u64>::from_be_bytes pub const fn from_be_bytes(bytes: [u8; size_of::<Self>()]) -> Self {
Self::from_be(Self::from_ne_bytes(bytes))
}
core::num::<impl u64>::from_le pub const fn from_le(x: Self) -> Self {
#[cfg(target_endian = "little")]
{
x
}
#[cfg(not(target_endian = "little"))]
{
x.swap_bytes()
}
}
core::num::<impl u64>::from_le_bytes pub const fn from_le_bytes(bytes: [u8; size_of::<Self>()]) -> Self {
Self::from_le(Self::from_ne_bytes(bytes))
}
core::num::<impl u64>::from_ne_bytes pub const fn from_ne_bytes(bytes: [u8; size_of::<Self>()]) -> Self {
// SAFETY: integers are plain old datatypes so we can always transmute to them
unsafe { mem::transmute(bytes) }
}
core::num::<impl u64>::from_str_radix pub const fn from_str_radix(src: &str, radix: u32) -> Result<$int_ty, ParseIntError> {
<$int_ty>::from_ascii_radix(src.as_bytes(), radix)
}
core::num::<impl u64>::is_multiple_of pub const fn is_multiple_of(self, rhs: Self) -> bool {
match rhs {
0 => self == 0,
_ => self % rhs == 0,
}
}
core::num::<impl u64>::is_power_of_two pub const fn is_power_of_two(self) -> bool {
self.count_ones() == 1
}
core::num::<impl u64>::overflowing_add pub const fn overflowing_add(self, rhs: Self) -> (Self, bool) {
let (a, b) = intrinsics::add_with_overflow(self as $ActualT, rhs as $ActualT);
(a as Self, b)
}
core::num::<impl u64>::overflowing_add_signed pub const fn overflowing_add_signed(self, rhs: $SignedT) -> (Self, bool) {
let (res, overflowed) = self.overflowing_add(rhs as Self);
(res, overflowed ^ (rhs < 0))
}
core::num::<impl u64>::overflowing_mul pub const fn overflowing_mul(self, rhs: Self) -> (Self, bool) {
let (a, b) = intrinsics::mul_with_overflow(self as $ActualT, rhs as $ActualT);
(a as Self, b)
}
core::num::<impl u64>::overflowing_sub pub const fn overflowing_sub(self, rhs: Self) -> (Self, bool) {
let (a, b) = intrinsics::sub_with_overflow(self as $ActualT, rhs as $ActualT);
(a as Self, b)
}
core::num::<impl u64>::rotate_left pub const fn rotate_left(self, n: u32) -> Self {
return intrinsics::rotate_left(self, n);
}
core::num::<impl u64>::rotate_right pub const fn rotate_right(self, n: u32) -> Self {
return intrinsics::rotate_right(self, n);
}
core::num::<impl u64>::saturating_add pub const fn saturating_add(self, rhs: Self) -> Self {
intrinsics::saturating_add(self, rhs)
}
core::num::<impl u64>::saturating_mul pub const fn saturating_mul(self, rhs: Self) -> Self {
match self.checked_mul(rhs) {
Some(x) => x,
None => Self::MAX,
}
}
core::num::<impl u64>::saturating_sub pub const fn saturating_sub(self, rhs: Self) -> Self {
intrinsics::saturating_sub(self, rhs)
}
core::num::<impl u64>::swap_bytes pub const fn swap_bytes(self) -> Self {
intrinsics::bswap(self as $ActualT) as Self
}
core::num::<impl u64>::to_be pub const fn to_be(self) -> Self { // or not to be?
#[cfg(target_endian = "big")]
{
self
}
#[cfg(not(target_endian = "big"))]
{
self.swap_bytes()
}
}
core::num::<impl u64>::to_be_bytes pub const fn to_be_bytes(self) -> [u8; size_of::<Self>()] {
self.to_be().to_ne_bytes()
}
core::num::<impl u64>::to_le pub const fn to_le(self) -> Self {
#[cfg(target_endian = "little")]
{
self
}
#[cfg(not(target_endian = "little"))]
{
self.swap_bytes()
}
}
core::num::<impl u64>::to_le_bytes pub const fn to_le_bytes(self) -> [u8; size_of::<Self>()] {
self.to_le().to_ne_bytes()
}
core::num::<impl u64>::to_ne_bytes pub const fn to_ne_bytes(self) -> [u8; size_of::<Self>()] {
// SAFETY: integers are plain old datatypes so we can always transmute them to
// arrays of bytes
unsafe { mem::transmute(self) }
}
core::num::<impl u64>::trailing_zeros pub const fn trailing_zeros(self) -> u32 {
return intrinsics::cttz(self);
}
core::num::<impl u64>::unchecked_add pub const unsafe fn unchecked_add(self, rhs: Self) -> Self {
assert_unsafe_precondition!(
check_language_ub,
concat!(stringify!($SelfT), "::unchecked_add cannot overflow"),
(
lhs: $SelfT = self,
rhs: $SelfT = rhs,
) => !lhs.overflowing_add(rhs).1,
);
// SAFETY: this is guaranteed to be safe by the caller.
unsafe {
intrinsics::unchecked_add(self, rhs)
}
}
core::num::<impl u64>::unchecked_sub pub const unsafe fn unchecked_sub(self, rhs: Self) -> Self {
assert_unsafe_precondition!(
check_language_ub,
concat!(stringify!($SelfT), "::unchecked_sub cannot overflow"),
(
lhs: $SelfT = self,
rhs: $SelfT = rhs,
) => !lhs.overflowing_sub(rhs).1,
);
// SAFETY: this is guaranteed to be safe by the caller.
unsafe {
intrinsics::unchecked_sub(self, rhs)
}
}
core::num::<impl u64>::wrapping_add pub const fn wrapping_add(self, rhs: Self) -> Self {
intrinsics::wrapping_add(self, rhs)
}
core::num::<impl u64>::wrapping_sub pub const fn wrapping_sub(self, rhs: Self) -> Self {
intrinsics::wrapping_sub(self, rhs)
}
core::num::<impl u8>::abs_diff pub const fn abs_diff(self, other: Self) -> Self {
if size_of::<Self>() == 1 {
// Trick LLVM into generating the psadbw instruction when SSE2
// is available and this function is autovectorized for u8's.
(self as i32).wrapping_sub(other as i32).unsigned_abs() as Self
} else {
if self < other {
other - self
} else {
self - other
}
}
}
core::num::<impl u8>::checked_add pub const fn checked_add(self, rhs: Self) -> Option<Self> {
// This used to use `overflowing_add`, but that means it ends up being
// a `wrapping_add`, losing some optimization opportunities. Notably,
// phrasing it this way helps `.checked_add(1)` optimize to a check
// against `MAX` and a `add nuw`.
// Per <https://github.com/rust-lang/rust/pull/124114#issuecomment-2066173305>,
// LLVM is happy to re-form the intrinsic later if useful.
if intrinsics::unlikely(intrinsics::add_with_overflow(self, rhs).1) {
None
} else {
// SAFETY: Just checked it doesn't overflow
Some(unsafe { intrinsics::unchecked_add(self, rhs) })
}
}
core::num::<impl u8>::checked_mul pub const fn checked_mul(self, rhs: Self) -> Option<Self> {
let (a, b) = self.overflowing_mul(rhs);
if intrinsics::unlikely(b) { None } else { Some(a) }
}
core::num::<impl u8>::checked_sub pub const fn checked_sub(self, rhs: Self) -> Option<Self> {
// Per PR#103299, there's no advantage to the `overflowing` intrinsic
// for *unsigned* subtraction and we just emit the manual check anyway.
// Thus, rather than using `overflowing_sub` that produces a wrapping
// subtraction, check it ourself so we can use an unchecked one.
if self < rhs {
None
} else {
// SAFETY: just checked this can't overflow
Some(unsafe { intrinsics::unchecked_sub(self, rhs) })
}
}
core::num::<impl u8>::count_ones pub const fn count_ones(self) -> u32 {
return intrinsics::ctpop(self);
}
core::num::<impl u8>::div_ceil pub const fn div_ceil(self, rhs: Self) -> Self {
let d = self / rhs;
let r = self % rhs;
if r > 0 {
d + 1
} else {
d
}
}
core::num::<impl u8>::from_ascii_radix pub const fn from_ascii_radix(src: &[u8], radix: u32) -> Result<$int_ty, ParseIntError> {
use self::IntErrorKind::*;
use self::ParseIntError as PIE;
if 2 > radix || radix > 36 {
from_ascii_radix_panic(radix);
}
if src.is_empty() {
return Err(PIE { kind: Empty });
}
#[allow(unused_comparisons)]
let is_signed_ty = 0 > <$int_ty>::MIN;
let (is_positive, mut digits) = match src {
[b'+' | b'-'] => {
return Err(PIE { kind: InvalidDigit });
}
[b'+', rest @ ..] => (true, rest),
[b'-', rest @ ..] if is_signed_ty => (false, rest),
_ => (true, src),
};
let mut result = 0;
macro_rules! unwrap_or_PIE {
($option:expr, $kind:ident) => {
match $option {
Some(value) => value,
None => return Err(PIE { kind: $kind }),
}
};
}
if can_not_overflow::<$int_ty>(radix, is_signed_ty, digits) {
// If the len of the str is short compared to the range of the type
// we are parsing into, then we can be certain that an overflow will not occur.
// This bound is when `radix.pow(digits.len()) - 1 <= T::MAX` but the condition
// above is a faster (conservative) approximation of this.
//
// Consider radix 16 as it has the highest information density per digit and will thus overflow the earliest:
// `u8::MAX` is `ff` - any str of len 2 is guaranteed to not overflow.
// `i8::MAX` is `7f` - only a str of len 1 is guaranteed to not overflow.
macro_rules! run_unchecked_loop {
($unchecked_additive_op:tt) => {{
while let [c, rest @ ..] = digits {
result = result * (radix as $int_ty);
let x = unwrap_or_PIE!((*c as char).to_digit(radix), InvalidDigit);
result = result $unchecked_additive_op (x as $int_ty);
digits = rest;
}
}};
}
if is_positive {
run_unchecked_loop!(+)
} else {
run_unchecked_loop!(-)
};
} else {
macro_rules! run_checked_loop {
($checked_additive_op:ident, $overflow_err:ident) => {{
while let [c, rest @ ..] = digits {
// When `radix` is passed in as a literal, rather than doing a slow `imul`
// the compiler can use shifts if `radix` can be expressed as a
// sum of powers of 2 (x*10 can be written as x*8 + x*2).
// When the compiler can't use these optimisations,
// the latency of the multiplication can be hidden by issuing it
// before the result is needed to improve performance on
// modern out-of-order CPU as multiplication here is slower
// than the other instructions, we can get the end result faster
// doing multiplication first and let the CPU spends other cycles
// doing other computation and get multiplication result later.
let mul = result.checked_mul(radix as $int_ty);
let x = unwrap_or_PIE!((*c as char).to_digit(radix), InvalidDigit) as $int_ty;
result = unwrap_or_PIE!(mul, $overflow_err);
result = unwrap_or_PIE!(<$int_ty>::$checked_additive_op(result, x), $overflow_err);
digits = rest;
}
}};
}
if is_positive {
run_checked_loop!(checked_add, PosOverflow)
} else {
run_checked_loop!(checked_sub, NegOverflow)
};
}
Ok(result)
}
core::num::<impl u8>::from_be pub const fn from_be(x: Self) -> Self {
#[cfg(target_endian = "big")]
{
x
}
#[cfg(not(target_endian = "big"))]
{
x.swap_bytes()
}
}
core::num::<impl u8>::from_be_bytes pub const fn from_be_bytes(bytes: [u8; size_of::<Self>()]) -> Self {
Self::from_be(Self::from_ne_bytes(bytes))
}
core::num::<impl u8>::from_le pub const fn from_le(x: Self) -> Self {
#[cfg(target_endian = "little")]
{
x
}
#[cfg(not(target_endian = "little"))]
{
x.swap_bytes()
}
}
core::num::<impl u8>::from_le_bytes pub const fn from_le_bytes(bytes: [u8; size_of::<Self>()]) -> Self {
Self::from_le(Self::from_ne_bytes(bytes))
}
core::num::<impl u8>::from_ne_bytes pub const fn from_ne_bytes(bytes: [u8; size_of::<Self>()]) -> Self {
// SAFETY: integers are plain old datatypes so we can always transmute to them
unsafe { mem::transmute(bytes) }
}
core::num::<impl u8>::from_str_radix pub const fn from_str_radix(src: &str, radix: u32) -> Result<$int_ty, ParseIntError> {
<$int_ty>::from_ascii_radix(src.as_bytes(), radix)
}
core::num::<impl u8>::is_multiple_of pub const fn is_multiple_of(self, rhs: Self) -> bool {
match rhs {
0 => self == 0,
_ => self % rhs == 0,
}
}
core::num::<impl u8>::is_power_of_two pub const fn is_power_of_two(self) -> bool {
self.count_ones() == 1
}
core::num::<impl u8>::is_utf8_char_boundary pub(crate) const fn is_utf8_char_boundary(self) -> bool {
// This is bit magic equivalent to: b < 128 || b >= 192
(self as i8) >= -0x40
}
core::num::<impl u8>::overflowing_add pub const fn overflowing_add(self, rhs: Self) -> (Self, bool) {
let (a, b) = intrinsics::add_with_overflow(self as $ActualT, rhs as $ActualT);
(a as Self, b)
}
core::num::<impl u8>::overflowing_add_signed pub const fn overflowing_add_signed(self, rhs: $SignedT) -> (Self, bool) {
let (res, overflowed) = self.overflowing_add(rhs as Self);
(res, overflowed ^ (rhs < 0))
}
core::num::<impl u8>::overflowing_mul pub const fn overflowing_mul(self, rhs: Self) -> (Self, bool) {
let (a, b) = intrinsics::mul_with_overflow(self as $ActualT, rhs as $ActualT);
(a as Self, b)
}
core::num::<impl u8>::overflowing_sub pub const fn overflowing_sub(self, rhs: Self) -> (Self, bool) {
let (a, b) = intrinsics::sub_with_overflow(self as $ActualT, rhs as $ActualT);
(a as Self, b)
}
core::num::<impl u8>::rotate_left pub const fn rotate_left(self, n: u32) -> Self {
return intrinsics::rotate_left(self, n);
}
core::num::<impl u8>::rotate_right pub const fn rotate_right(self, n: u32) -> Self {
return intrinsics::rotate_right(self, n);
}
core::num::<impl u8>::saturating_add pub const fn saturating_add(self, rhs: Self) -> Self {
intrinsics::saturating_add(self, rhs)
}
core::num::<impl u8>::saturating_mul pub const fn saturating_mul(self, rhs: Self) -> Self {
match self.checked_mul(rhs) {
Some(x) => x,
None => Self::MAX,
}
}
core::num::<impl u8>::saturating_sub pub const fn saturating_sub(self, rhs: Self) -> Self {
intrinsics::saturating_sub(self, rhs)
}
core::num::<impl u8>::swap_bytes pub const fn swap_bytes(self) -> Self {
intrinsics::bswap(self as $ActualT) as Self
}
core::num::<impl u8>::to_be pub const fn to_be(self) -> Self { // or not to be?
#[cfg(target_endian = "big")]
{
self
}
#[cfg(not(target_endian = "big"))]
{
self.swap_bytes()
}
}
core::num::<impl u8>::to_be_bytes pub const fn to_be_bytes(self) -> [u8; size_of::<Self>()] {
self.to_be().to_ne_bytes()
}
core::num::<impl u8>::to_le pub const fn to_le(self) -> Self {
#[cfg(target_endian = "little")]
{
self
}
#[cfg(not(target_endian = "little"))]
{
self.swap_bytes()
}
}
core::num::<impl u8>::to_le_bytes pub const fn to_le_bytes(self) -> [u8; size_of::<Self>()] {
self.to_le().to_ne_bytes()
}
core::num::<impl u8>::to_ne_bytes pub const fn to_ne_bytes(self) -> [u8; size_of::<Self>()] {
// SAFETY: integers are plain old datatypes so we can always transmute them to
// arrays of bytes
unsafe { mem::transmute(self) }
}
core::num::<impl u8>::trailing_zeros pub const fn trailing_zeros(self) -> u32 {
return intrinsics::cttz(self);
}
core::num::<impl u8>::unchecked_add pub const unsafe fn unchecked_add(self, rhs: Self) -> Self {
assert_unsafe_precondition!(
check_language_ub,
concat!(stringify!($SelfT), "::unchecked_add cannot overflow"),
(
lhs: $SelfT = self,
rhs: $SelfT = rhs,
) => !lhs.overflowing_add(rhs).1,
);
// SAFETY: this is guaranteed to be safe by the caller.
unsafe {
intrinsics::unchecked_add(self, rhs)
}
}
core::num::<impl u8>::unchecked_sub pub const unsafe fn unchecked_sub(self, rhs: Self) -> Self {
assert_unsafe_precondition!(
check_language_ub,
concat!(stringify!($SelfT), "::unchecked_sub cannot overflow"),
(
lhs: $SelfT = self,
rhs: $SelfT = rhs,
) => !lhs.overflowing_sub(rhs).1,
);
// SAFETY: this is guaranteed to be safe by the caller.
unsafe {
intrinsics::unchecked_sub(self, rhs)
}
}
core::num::<impl u8>::wrapping_add pub const fn wrapping_add(self, rhs: Self) -> Self {
intrinsics::wrapping_add(self, rhs)
}
core::num::<impl u8>::wrapping_sub pub const fn wrapping_sub(self, rhs: Self) -> Self {
intrinsics::wrapping_sub(self, rhs)
}
core::num::<impl usize>::abs_diff pub const fn abs_diff(self, other: Self) -> Self {
if size_of::<Self>() == 1 {
// Trick LLVM into generating the psadbw instruction when SSE2
// is available and this function is autovectorized for u8's.
(self as i32).wrapping_sub(other as i32).unsigned_abs() as Self
} else {
if self < other {
other - self
} else {
self - other
}
}
}
core::num::<impl usize>::checked_add pub const fn checked_add(self, rhs: Self) -> Option<Self> {
// This used to use `overflowing_add`, but that means it ends up being
// a `wrapping_add`, losing some optimization opportunities. Notably,
// phrasing it this way helps `.checked_add(1)` optimize to a check
// against `MAX` and a `add nuw`.
// Per <https://github.com/rust-lang/rust/pull/124114#issuecomment-2066173305>,
// LLVM is happy to re-form the intrinsic later if useful.
if intrinsics::unlikely(intrinsics::add_with_overflow(self, rhs).1) {
None
} else {
// SAFETY: Just checked it doesn't overflow
Some(unsafe { intrinsics::unchecked_add(self, rhs) })
}
}
core::num::<impl usize>::checked_mul pub const fn checked_mul(self, rhs: Self) -> Option<Self> {
let (a, b) = self.overflowing_mul(rhs);
if intrinsics::unlikely(b) { None } else { Some(a) }
}
core::num::<impl usize>::checked_sub pub const fn checked_sub(self, rhs: Self) -> Option<Self> {
// Per PR#103299, there's no advantage to the `overflowing` intrinsic
// for *unsigned* subtraction and we just emit the manual check anyway.
// Thus, rather than using `overflowing_sub` that produces a wrapping
// subtraction, check it ourself so we can use an unchecked one.
if self < rhs {
None
} else {
// SAFETY: just checked this can't overflow
Some(unsafe { intrinsics::unchecked_sub(self, rhs) })
}
}
core::num::<impl usize>::count_ones pub const fn count_ones(self) -> u32 {
return intrinsics::ctpop(self);
}
core::num::<impl usize>::div_ceil pub const fn div_ceil(self, rhs: Self) -> Self {
let d = self / rhs;
let r = self % rhs;
if r > 0 {
d + 1
} else {
d
}
}
core::num::<impl usize>::from_ascii_radix pub const fn from_ascii_radix(src: &[u8], radix: u32) -> Result<$int_ty, ParseIntError> {
use self::IntErrorKind::*;
use self::ParseIntError as PIE;
if 2 > radix || radix > 36 {
from_ascii_radix_panic(radix);
}
if src.is_empty() {
return Err(PIE { kind: Empty });
}
#[allow(unused_comparisons)]
let is_signed_ty = 0 > <$int_ty>::MIN;
let (is_positive, mut digits) = match src {
[b'+' | b'-'] => {
return Err(PIE { kind: InvalidDigit });
}
[b'+', rest @ ..] => (true, rest),
[b'-', rest @ ..] if is_signed_ty => (false, rest),
_ => (true, src),
};
let mut result = 0;
macro_rules! unwrap_or_PIE {
($option:expr, $kind:ident) => {
match $option {
Some(value) => value,
None => return Err(PIE { kind: $kind }),
}
};
}
if can_not_overflow::<$int_ty>(radix, is_signed_ty, digits) {
// If the len of the str is short compared to the range of the type
// we are parsing into, then we can be certain that an overflow will not occur.
// This bound is when `radix.pow(digits.len()) - 1 <= T::MAX` but the condition
// above is a faster (conservative) approximation of this.
//
// Consider radix 16 as it has the highest information density per digit and will thus overflow the earliest:
// `u8::MAX` is `ff` - any str of len 2 is guaranteed to not overflow.
// `i8::MAX` is `7f` - only a str of len 1 is guaranteed to not overflow.
macro_rules! run_unchecked_loop {
($unchecked_additive_op:tt) => {{
while let [c, rest @ ..] = digits {
result = result * (radix as $int_ty);
let x = unwrap_or_PIE!((*c as char).to_digit(radix), InvalidDigit);
result = result $unchecked_additive_op (x as $int_ty);
digits = rest;
}
}};
}
if is_positive {
run_unchecked_loop!(+)
} else {
run_unchecked_loop!(-)
};
} else {
macro_rules! run_checked_loop {
($checked_additive_op:ident, $overflow_err:ident) => {{
while let [c, rest @ ..] = digits {
// When `radix` is passed in as a literal, rather than doing a slow `imul`
// the compiler can use shifts if `radix` can be expressed as a
// sum of powers of 2 (x*10 can be written as x*8 + x*2).
// When the compiler can't use these optimisations,
// the latency of the multiplication can be hidden by issuing it
// before the result is needed to improve performance on
// modern out-of-order CPU as multiplication here is slower
// than the other instructions, we can get the end result faster
// doing multiplication first and let the CPU spends other cycles
// doing other computation and get multiplication result later.
let mul = result.checked_mul(radix as $int_ty);
let x = unwrap_or_PIE!((*c as char).to_digit(radix), InvalidDigit) as $int_ty;
result = unwrap_or_PIE!(mul, $overflow_err);
result = unwrap_or_PIE!(<$int_ty>::$checked_additive_op(result, x), $overflow_err);
digits = rest;
}
}};
}
if is_positive {
run_checked_loop!(checked_add, PosOverflow)
} else {
run_checked_loop!(checked_sub, NegOverflow)
};
}
Ok(result)
}
core::num::<impl usize>::from_be pub const fn from_be(x: Self) -> Self {
#[cfg(target_endian = "big")]
{
x
}
#[cfg(not(target_endian = "big"))]
{
x.swap_bytes()
}
}
core::num::<impl usize>::from_be_bytes pub const fn from_be_bytes(bytes: [u8; size_of::<Self>()]) -> Self {
Self::from_be(Self::from_ne_bytes(bytes))
}
core::num::<impl usize>::from_le pub const fn from_le(x: Self) -> Self {
#[cfg(target_endian = "little")]
{
x
}
#[cfg(not(target_endian = "little"))]
{
x.swap_bytes()
}
}
core::num::<impl usize>::from_le_bytes pub const fn from_le_bytes(bytes: [u8; size_of::<Self>()]) -> Self {
Self::from_le(Self::from_ne_bytes(bytes))
}
core::num::<impl usize>::from_ne_bytes pub const fn from_ne_bytes(bytes: [u8; size_of::<Self>()]) -> Self {
// SAFETY: integers are plain old datatypes so we can always transmute to them
unsafe { mem::transmute(bytes) }
}
core::num::<impl usize>::from_str_radix pub const fn from_str_radix(src: &str, radix: u32) -> Result<$int_ty, ParseIntError> {
<$int_ty>::from_ascii_radix(src.as_bytes(), radix)
}
core::num::<impl usize>::is_multiple_of pub const fn is_multiple_of(self, rhs: Self) -> bool {
match rhs {
0 => self == 0,
_ => self % rhs == 0,
}
}
core::num::<impl usize>::is_power_of_two pub const fn is_power_of_two(self) -> bool {
self.count_ones() == 1
}
core::num::<impl usize>::overflowing_add pub const fn overflowing_add(self, rhs: Self) -> (Self, bool) {
let (a, b) = intrinsics::add_with_overflow(self as $ActualT, rhs as $ActualT);
(a as Self, b)
}
core::num::<impl usize>::overflowing_add_signed pub const fn overflowing_add_signed(self, rhs: $SignedT) -> (Self, bool) {
let (res, overflowed) = self.overflowing_add(rhs as Self);
(res, overflowed ^ (rhs < 0))
}
core::num::<impl usize>::overflowing_mul pub const fn overflowing_mul(self, rhs: Self) -> (Self, bool) {
let (a, b) = intrinsics::mul_with_overflow(self as $ActualT, rhs as $ActualT);
(a as Self, b)
}
core::num::<impl usize>::overflowing_sub pub const fn overflowing_sub(self, rhs: Self) -> (Self, bool) {
let (a, b) = intrinsics::sub_with_overflow(self as $ActualT, rhs as $ActualT);
(a as Self, b)
}
core::num::<impl usize>::repeat_u8 pub(crate) const fn repeat_u8(x: u8) -> usize {
usize::from_ne_bytes([x; size_of::<usize>()])
}
core::num::<impl usize>::rotate_left pub const fn rotate_left(self, n: u32) -> Self {
return intrinsics::rotate_left(self, n);
}
core::num::<impl usize>::rotate_right pub const fn rotate_right(self, n: u32) -> Self {
return intrinsics::rotate_right(self, n);
}
core::num::<impl usize>::saturating_add pub const fn saturating_add(self, rhs: Self) -> Self {
intrinsics::saturating_add(self, rhs)
}
core::num::<impl usize>::saturating_mul pub const fn saturating_mul(self, rhs: Self) -> Self {
match self.checked_mul(rhs) {
Some(x) => x,
None => Self::MAX,
}
}
core::num::<impl usize>::saturating_sub pub const fn saturating_sub(self, rhs: Self) -> Self {
intrinsics::saturating_sub(self, rhs)
}
core::num::<impl usize>::swap_bytes pub const fn swap_bytes(self) -> Self {
intrinsics::bswap(self as $ActualT) as Self
}
core::num::<impl usize>::to_be pub const fn to_be(self) -> Self { // or not to be?
#[cfg(target_endian = "big")]
{
self
}
#[cfg(not(target_endian = "big"))]
{
self.swap_bytes()
}
}
core::num::<impl usize>::to_be_bytes pub const fn to_be_bytes(self) -> [u8; size_of::<Self>()] {
self.to_be().to_ne_bytes()
}
core::num::<impl usize>::to_le pub const fn to_le(self) -> Self {
#[cfg(target_endian = "little")]
{
self
}
#[cfg(not(target_endian = "little"))]
{
self.swap_bytes()
}
}
core::num::<impl usize>::to_le_bytes pub const fn to_le_bytes(self) -> [u8; size_of::<Self>()] {
self.to_le().to_ne_bytes()
}
core::num::<impl usize>::to_ne_bytes pub const fn to_ne_bytes(self) -> [u8; size_of::<Self>()] {
// SAFETY: integers are plain old datatypes so we can always transmute them to
// arrays of bytes
unsafe { mem::transmute(self) }
}
core::num::<impl usize>::trailing_zeros pub const fn trailing_zeros(self) -> u32 {
return intrinsics::cttz(self);
}
core::num::<impl usize>::unchecked_add pub const unsafe fn unchecked_add(self, rhs: Self) -> Self {
assert_unsafe_precondition!(
check_language_ub,
concat!(stringify!($SelfT), "::unchecked_add cannot overflow"),
(
lhs: $SelfT = self,
rhs: $SelfT = rhs,
) => !lhs.overflowing_add(rhs).1,
);
// SAFETY: this is guaranteed to be safe by the caller.
unsafe {
intrinsics::unchecked_add(self, rhs)
}
}
core::num::<impl usize>::unchecked_sub pub const unsafe fn unchecked_sub(self, rhs: Self) -> Self {
assert_unsafe_precondition!(
check_language_ub,
concat!(stringify!($SelfT), "::unchecked_sub cannot overflow"),
(
lhs: $SelfT = self,
rhs: $SelfT = rhs,
) => !lhs.overflowing_sub(rhs).1,
);
// SAFETY: this is guaranteed to be safe by the caller.
unsafe {
intrinsics::unchecked_sub(self, rhs)
}
}
core::num::<impl usize>::wrapping_add pub const fn wrapping_add(self, rhs: Self) -> Self {
intrinsics::wrapping_add(self, rhs)
}
core::num::<impl usize>::wrapping_sub pub const fn wrapping_sub(self, rhs: Self) -> Self {
intrinsics::wrapping_sub(self, rhs)
}
core::num::can_not_overflowpub const fn can_not_overflow<T>(radix: u32, is_signed_ty: bool, digits: &[u8]) -> bool {
radix <= 16 && digits.len() <= size_of::<T>() * 2 - is_signed_ty as usize
}
core::num::from_ascii_radix_panicconst fn from_ascii_radix_panic(radix: u32) -> ! {
const_panic!(
"from_ascii_radix: radix must lie in the range `[2, 36]`",
"from_ascii_radix: radix must lie in the range `[2, 36]` - found {radix}",
radix: u32 = radix,
)
}
core::num::from_ascii_radix_panic::do_panic const fn do_panic($($arg: $ty),*) -> ! {
$crate::intrinsics::const_eval_select!(
@capture { $($arg: $ty = $arg),* } -> !:
#[noinline]
if const #[track_caller] #[inline] { // Inline this, to prevent codegen
$crate::panic!($const_msg)
} else #[track_caller] { // Do not inline this, it makes perf worse
$crate::panic!($runtime_msg)
}
)
}
core::num::from_ascii_radix_panic::do_panic::runtime fn runtime$(<$($binders)*>)?($($arg: $ty),*) $( -> $ret )? {
$runtime
}
core::num::niche_types::Nanoseconds::as_inner pub const fn as_inner(self) -> $int {
// SAFETY: This is a transparent wrapper, so unwrapping it is sound
// (Not using `.0` due to MCP#807.)
unsafe { crate::mem::transmute(self) }
}
core::num::niche_types::Nanoseconds::new_unchecked pub const unsafe fn new_unchecked(val: $int) -> Self {
// SAFETY: Caller promised that `val` is within the valid range.
unsafe { $name(val) }
}
core::num::niche_types::NonZeroCharInner::as_inner pub const fn as_inner(self) -> $int {
// SAFETY: This is a transparent wrapper, so unwrapping it is sound
// (Not using `.0` due to MCP#807.)
unsafe { crate::mem::transmute(self) }
}
core::num::niche_types::NonZeroCharInner::new_unchecked pub const unsafe fn new_unchecked(val: $int) -> Self {
// SAFETY: Caller promised that `val` is within the valid range.
unsafe { $name(val) }
}
core::num::niche_types::NonZeroI128Inner::as_inner pub const fn as_inner(self) -> $int {
// SAFETY: This is a transparent wrapper, so unwrapping it is sound
// (Not using `.0` due to MCP#807.)
unsafe { crate::mem::transmute(self) }
}
core::num::niche_types::NonZeroI128Inner::new_unchecked pub const unsafe fn new_unchecked(val: $int) -> Self {
// SAFETY: Caller promised that `val` is within the valid range.
unsafe { $name(val) }
}
core::num::niche_types::NonZeroI16Inner::as_inner pub const fn as_inner(self) -> $int {
// SAFETY: This is a transparent wrapper, so unwrapping it is sound
// (Not using `.0` due to MCP#807.)
unsafe { crate::mem::transmute(self) }
}
core::num::niche_types::NonZeroI16Inner::new_unchecked pub const unsafe fn new_unchecked(val: $int) -> Self {
// SAFETY: Caller promised that `val` is within the valid range.
unsafe { $name(val) }
}
core::num::niche_types::NonZeroI32Inner::as_inner pub const fn as_inner(self) -> $int {
// SAFETY: This is a transparent wrapper, so unwrapping it is sound
// (Not using `.0` due to MCP#807.)
unsafe { crate::mem::transmute(self) }
}
core::num::niche_types::NonZeroI32Inner::new_unchecked pub const unsafe fn new_unchecked(val: $int) -> Self {
// SAFETY: Caller promised that `val` is within the valid range.
unsafe { $name(val) }
}
core::num::niche_types::NonZeroI64Inner::as_inner pub const fn as_inner(self) -> $int {
// SAFETY: This is a transparent wrapper, so unwrapping it is sound
// (Not using `.0` due to MCP#807.)
unsafe { crate::mem::transmute(self) }
}
core::num::niche_types::NonZeroI64Inner::new_unchecked pub const unsafe fn new_unchecked(val: $int) -> Self {
// SAFETY: Caller promised that `val` is within the valid range.
unsafe { $name(val) }
}
core::num::niche_types::NonZeroI8Inner::as_inner pub const fn as_inner(self) -> $int {
// SAFETY: This is a transparent wrapper, so unwrapping it is sound
// (Not using `.0` due to MCP#807.)
unsafe { crate::mem::transmute(self) }
}
core::num::niche_types::NonZeroI8Inner::new_unchecked pub const unsafe fn new_unchecked(val: $int) -> Self {
// SAFETY: Caller promised that `val` is within the valid range.
unsafe { $name(val) }
}
core::num::niche_types::NonZeroIsizeInner::as_inner pub const fn as_inner(self) -> $int {
// SAFETY: This is a transparent wrapper, so unwrapping it is sound
// (Not using `.0` due to MCP#807.)
unsafe { crate::mem::transmute(self) }
}
core::num::niche_types::NonZeroIsizeInner::new_unchecked pub const unsafe fn new_unchecked(val: $int) -> Self {
// SAFETY: Caller promised that `val` is within the valid range.
unsafe { $name(val) }
}
core::num::niche_types::NonZeroU128Inner::as_inner pub const fn as_inner(self) -> $int {
// SAFETY: This is a transparent wrapper, so unwrapping it is sound
// (Not using `.0` due to MCP#807.)
unsafe { crate::mem::transmute(self) }
}
core::num::niche_types::NonZeroU128Inner::new_unchecked pub const unsafe fn new_unchecked(val: $int) -> Self {
// SAFETY: Caller promised that `val` is within the valid range.
unsafe { $name(val) }
}
core::num::niche_types::NonZeroU16Inner::as_inner pub const fn as_inner(self) -> $int {
// SAFETY: This is a transparent wrapper, so unwrapping it is sound
// (Not using `.0` due to MCP#807.)
unsafe { crate::mem::transmute(self) }
}
core::num::niche_types::NonZeroU16Inner::new_unchecked pub const unsafe fn new_unchecked(val: $int) -> Self {
// SAFETY: Caller promised that `val` is within the valid range.
unsafe { $name(val) }
}
core::num::niche_types::NonZeroU32Inner::as_inner pub const fn as_inner(self) -> $int {
// SAFETY: This is a transparent wrapper, so unwrapping it is sound
// (Not using `.0` due to MCP#807.)
unsafe { crate::mem::transmute(self) }
}
core::num::niche_types::NonZeroU32Inner::new_unchecked pub const unsafe fn new_unchecked(val: $int) -> Self {
// SAFETY: Caller promised that `val` is within the valid range.
unsafe { $name(val) }
}
core::num::niche_types::NonZeroU64Inner::as_inner pub const fn as_inner(self) -> $int {
// SAFETY: This is a transparent wrapper, so unwrapping it is sound
// (Not using `.0` due to MCP#807.)
unsafe { crate::mem::transmute(self) }
}
core::num::niche_types::NonZeroU64Inner::new_unchecked pub const unsafe fn new_unchecked(val: $int) -> Self {
// SAFETY: Caller promised that `val` is within the valid range.
unsafe { $name(val) }
}
core::num::niche_types::NonZeroU8Inner::as_inner pub const fn as_inner(self) -> $int {
// SAFETY: This is a transparent wrapper, so unwrapping it is sound
// (Not using `.0` due to MCP#807.)
unsafe { crate::mem::transmute(self) }
}
core::num::niche_types::NonZeroU8Inner::new_unchecked pub const unsafe fn new_unchecked(val: $int) -> Self {
// SAFETY: Caller promised that `val` is within the valid range.
unsafe { $name(val) }
}
core::num::niche_types::NonZeroUsizeInner::as_inner pub const fn as_inner(self) -> $int {
// SAFETY: This is a transparent wrapper, so unwrapping it is sound
// (Not using `.0` due to MCP#807.)
unsafe { crate::mem::transmute(self) }
}
core::num::niche_types::NonZeroUsizeInner::new_unchecked pub const unsafe fn new_unchecked(val: $int) -> Self {
// SAFETY: Caller promised that `val` is within the valid range.
unsafe { $name(val) }
}
core::num::niche_types::UsizeNoHighBit::as_inner pub const fn as_inner(self) -> $int {
// SAFETY: This is a transparent wrapper, so unwrapping it is sound
// (Not using `.0` due to MCP#807.)
unsafe { crate::mem::transmute(self) }
}
core::num::niche_types::UsizeNoHighBit::new_unchecked pub const unsafe fn new_unchecked(val: $int) -> Self {
// SAFETY: Caller promised that `val` is within the valid range.
unsafe { $name(val) }
}
core::num::nonzero::<impl core::ops::arith::Div<core::num::nonzero::NonZero<usize>> for usize>::div fn div(self, other: NonZero<$Int>) -> $Int {
// SAFETY: Division by zero is checked because `other` is non-zero,
// and MIN/-1 is checked because `self` is an unsigned int.
unsafe { intrinsics::unchecked_div(self, other.get()) }
}
core::num::nonzero::NonZero::<T>::get pub const fn get(self) -> T {
// Rustc can set range metadata only if it loads `self` from
// memory somewhere. If the value of `self` was from by-value argument
// of some not-inlined function, LLVM don't have range metadata
// to understand that the value cannot be zero.
//
// Using the transmute `assume`s the range at runtime.
//
// Even once LLVM supports `!range` metadata for function arguments
// (see <https://github.com/llvm/llvm-project/issues/76628>), this can't
// be `.0` because MCP#807 bans field-projecting into `scalar_valid_range`
// types, and it arguably wouldn't want to be anyway because if this is
// MIR-inlined, there's no opportunity to put that argument metadata anywhere.
//
// The good answer here will eventually be pattern types, which will hopefully
// allow it to go back to `.0`, maybe with a cast of some sort.
//
// SAFETY: `ZeroablePrimitive` guarantees that the size and bit validity
// of `.0` is such that this transmute is sound.
unsafe { intrinsics::transmute_unchecked(self) }
}
core::num::nonzero::NonZero::<T>::new pub const fn new(n: T) -> Option<Self> {
// SAFETY: Memory layout optimization guarantees that `Option<NonZero<T>>` has
// the same layout and size as `T`, with `0` representing `None`.
unsafe { intrinsics::transmute_unchecked(n) }
}
core::ops::control_flow::ControlFlow::<B, C>::break_ok pub const fn break_ok(self) -> Result<B, C> {
match self {
ControlFlow::Continue(c) => Err(c),
ControlFlow::Break(b) => Ok(b),
}
}
core::ops::control_flow::ControlFlow::<B, C>::break_value pub const fn break_value(self) -> Option<B>
where
Self: [const] Destruct,
{
match self {
ControlFlow::Continue(..) => None,
ControlFlow::Break(x) => Some(x),
}
}
core::ops::control_flow::ControlFlow::<B, C>::continue_ok pub const fn continue_ok(self) -> Result<C, B> {
match self {
ControlFlow::Continue(c) => Ok(c),
ControlFlow::Break(b) => Err(b),
}
}
core::ops::control_flow::ControlFlow::<B, C>::continue_value pub const fn continue_value(self) -> Option<C>
where
Self: [const] Destruct,
{
match self {
ControlFlow::Continue(x) => Some(x),
ControlFlow::Break(..) => None,
}
}
core::ops::control_flow::ControlFlow::<B, C>::is_break pub const fn is_break(&self) -> bool {
matches!(*self, ControlFlow::Break(_))
}
core::ops::control_flow::ControlFlow::<B, C>::is_continue pub const fn is_continue(&self) -> bool {
matches!(*self, ControlFlow::Continue(_))
}
core::ops::control_flow::ControlFlow::<B, C>::map_break pub const fn map_break<T, F>(self, f: F) -> ControlFlow<T, C>
where
F: [const] FnOnce(B) -> T + [const] Destruct,
{
match self {
ControlFlow::Continue(x) => ControlFlow::Continue(x),
ControlFlow::Break(x) => ControlFlow::Break(f(x)),
}
}
core::ops::control_flow::ControlFlow::<B, C>::map_continue pub const fn map_continue<T, F>(self, f: F) -> ControlFlow<B, T>
where
F: [const] FnOnce(C) -> T + [const] Destruct,
{
match self {
ControlFlow::Continue(x) => ControlFlow::Continue(f(x)),
ControlFlow::Break(x) => ControlFlow::Break(x),
}
}
core::ops::control_flow::ControlFlow::<R, <R as core::ops::try_trait::Try>::Output>::from_try pub(crate) fn from_try(r: R) -> Self {
match R::branch(r) {
ControlFlow::Continue(v) => ControlFlow::Continue(v),
ControlFlow::Break(v) => ControlFlow::Break(R::from_residual(v)),
}
}
core::ops::control_flow::ControlFlow::<R, <R as core::ops::try_trait::Try>::Output>::into_try pub(crate) fn into_try(self) -> R {
match self {
ControlFlow::Continue(v) => R::from_output(v),
ControlFlow::Break(v) => v,
}
}
core::ops::control_flow::ControlFlow::<T, T>::into_value pub const fn into_value(self) -> T {
match self {
ControlFlow::Continue(x) | ControlFlow::Break(x) => x,
}
}
core::ops::function::impls::<impl core::ops::function::Fn<A> for &F>::call extern "rust-call" fn call(&self, args: A) -> F::Output {
(**self).call(args)
}
core::ops::function::impls::<impl core::ops::function::FnMut<A> for &F>::call_mut extern "rust-call" fn call_mut(&mut self, args: A) -> F::Output {
(**self).call(args)
}
core::ops::function::impls::<impl core::ops::function::FnMut<A> for &mut F>::call_mut extern "rust-call" fn call_mut(&mut self, args: A) -> F::Output {
(*self).call_mut(args)
}
core::ops::function::impls::<impl core::ops::function::FnOnce<A> for &F>::call_once extern "rust-call" fn call_once(self, args: A) -> F::Output {
(*self).call(args)
}
core::ops::function::impls::<impl core::ops::function::FnOnce<A> for &mut F>::call_once extern "rust-call" fn call_once(self, args: A) -> F::Output {
(*self).call_mut(args)
}
core::ops::index_range::IndexRange::assume_range const fn assume_range(&self) {
// SAFETY: This is the type invariant
unsafe { crate::hint::assert_unchecked(self.start <= self.end) }
}
core::ops::index_range::IndexRange::end pub(crate) const fn end(&self) -> usize {
self.end
}
core::ops::index_range::IndexRange::len pub(crate) const fn len(&self) -> usize {
// SAFETY: By invariant, this cannot wrap
// Using the intrinsic because a UB check here impedes LLVM optimization. (#131563)
unsafe { crate::intrinsics::unchecked_sub(self.end, self.start) }
}
core::ops::index_range::IndexRange::new_unchecked pub(crate) const unsafe fn new_unchecked(start: usize, end: usize) -> Self {
ub_checks::assert_unsafe_precondition!(
check_library_ub,
"IndexRange::new_unchecked requires `start <= end`",
(start: usize = start, end: usize = end) => start <= end,
);
IndexRange { start, end }
}
core::ops::index_range::IndexRange::next_unchecked const unsafe fn next_unchecked(&mut self) -> usize {
debug_assert!(self.start < self.end);
let value = self.start;
// SAFETY: The range isn't empty, so this cannot overflow
self.start = unsafe { value.unchecked_add(1) };
value
}
core::ops::index_range::IndexRange::start pub(crate) const fn start(&self) -> usize {
self.start
}
core::ops::index_range::IndexRange::take_prefix pub(crate) fn take_prefix(&mut self, n: usize) -> Self {
let mid = if n <= self.len() {
// SAFETY: We just checked that this will be between start and end,
// and thus the addition cannot overflow.
// Using the intrinsic avoids a superfluous UB check.
unsafe { crate::intrinsics::unchecked_add(self.start, n) }
} else {
self.end
};
let prefix = Self { start: self.start, end: mid };
self.start = mid;
prefix
}
core::ops::index_range::IndexRange::zero_to pub(crate) const fn zero_to(end: usize) -> Self {
IndexRange { start: 0, end }
}
core::ops::range::Bound::<&T>::cloned pub const fn cloned(self) -> Bound<T>
where
T: [const] Clone,
{
match self {
Bound::Unbounded => Bound::Unbounded,
Bound::Included(x) => Bound::Included(x.clone()),
Bound::Excluded(x) => Bound::Excluded(x.clone()),
}
}
core::ops::range::Bound::<&T>::copied pub fn copied(self) -> Bound<T> {
match self {
Bound::Unbounded => Bound::Unbounded,
Bound::Included(x) => Bound::Included(*x),
Bound::Excluded(x) => Bound::Excluded(*x),
}
}
core::ops::range::Bound::<T>::as_mut pub const fn as_mut(&mut self) -> Bound<&mut T> {
match *self {
Included(ref mut x) => Included(x),
Excluded(ref mut x) => Excluded(x),
Unbounded => Unbounded,
}
}
core::ops::range::Bound::<T>::as_ref pub const fn as_ref(&self) -> Bound<&T> {
match *self {
Included(ref x) => Included(x),
Excluded(ref x) => Excluded(x),
Unbounded => Unbounded,
}
}
core::ops::range::Bound::<T>::map pub fn map<U, F: FnOnce(T) -> U>(self, f: F) -> Bound<U> {
match self {
Unbounded => Unbounded,
Included(x) => Included(f(x)),
Excluded(x) => Excluded(f(x)),
}
}
core::ops::range::Range::<Idx>::contains pub const fn contains<U>(&self, item: &U) -> bool
where
Idx: [const] PartialOrd<U>,
U: ?Sized + [const] PartialOrd<Idx>,
{
<Self as RangeBounds<Idx>>::contains(self, item)
}
core::ops::range::RangeBounds::contains fn contains<U>(&self, item: &U) -> bool
where
T: [const] PartialOrd<U>,
U: ?Sized + [const] PartialOrd<T>,
{
(match self.start_bound() {
Included(start) => start <= item,
Excluded(start) => start < item,
Unbounded => true,
}) && (match self.end_bound() {
Included(end) => item <= end,
Excluded(end) => item < end,
Unbounded => true,
})
}
core::ops::range::RangeInclusive::<Idx>::contains pub const fn contains<U>(&self, item: &U) -> bool
where
Idx: [const] PartialOrd<U>,
U: ?Sized + [const] PartialOrd<Idx>,
{
<Self as RangeBounds<Idx>>::contains(self, item)
}
core::ops::range::RangeInclusive::<Idx>::end pub const fn end(&self) -> &Idx {
&self.end
}
core::ops::range::RangeInclusive::<Idx>::into_inner pub const fn into_inner(self) -> (Idx, Idx) {
(self.start, self.end)
}
core::ops::range::RangeInclusive::<Idx>::new pub const fn new(start: Idx, end: Idx) -> Self {
Self { start, end, exhausted: false }
}
core::ops::range::RangeInclusive::<Idx>::start pub const fn start(&self) -> &Idx {
&self.start
}
core::ops::range::RangeInclusive::<usize>::into_slice_range pub(crate) const fn into_slice_range(self) -> Range<usize> {
// If we're not exhausted, we want to simply slice `start..end + 1`.
// If we are exhausted, then slicing with `end + 1..end + 1` gives us an
// empty range that is still subject to bounds-checks for that endpoint.
let exclusive_end = self.end + 1;
let start = if self.exhausted { exclusive_end } else { self.start };
start..exclusive_end
}
core::ops::try_trait::NeverShortCircuit::<T>::wrap_mut_1 pub(crate) fn wrap_mut_1<A>(
mut f: impl FnMut(A) -> T,
) -> impl FnMut(A) -> NeverShortCircuit<T> {
move |a| NeverShortCircuit(f(a))
}
core::ops::try_trait::NeverShortCircuit::<T>::wrap_mut_2 pub(crate) fn wrap_mut_2<A, B>(mut f: impl FnMut(A, B) -> T) -> impl FnMut(A, B) -> Self {
move |a, b| NeverShortCircuit(f(a, b))
}
core::ops::try_trait::residual_into_try_typepub fn residual_into_try_type<R: Residual<O>, O>(r: R) -> <R as Residual<O>>::TryType {
FromResidual::from_residual(r)
}
core::option::Option::<&T>::cloned pub fn cloned(self) -> Option<T>
where
T: Clone,
{
match self {
Some(t) => Some(t.clone()),
None => None,
}
}
core::option::Option::<&T>::copied pub const fn copied(self) -> Option<T>
where
T: Copy,
{
// FIXME(const-hack): this implementation, which sidesteps using `Option::map` since it's not const
// ready yet, should be reverted when possible to avoid code repetition
match self {
Some(&v) => Some(v),
None => None,
}
}
core::option::Option::<&mut T>::cloned pub fn cloned(self) -> Option<T>
where
T: Clone,
{
match self {
Some(t) => Some(t.clone()),
None => None,
}
}
core::option::Option::<&mut T>::copied pub const fn copied(self) -> Option<T>
where
T: Copy,
{
match self {
Some(&mut t) => Some(t),
None => None,
}
}
core::option::Option::<(T, U)>::unzip pub fn unzip(self) -> (Option<T>, Option<U>) {
match self {
Some((a, b)) => (Some(a), Some(b)),
None => (None, None),
}
}
core::option::Option::<T>::and pub const fn and<U>(self, optb: Option<U>) -> Option<U>
where
T: [const] Destruct,
U: [const] Destruct,
{
match self {
Some(_) => optb,
None => None,
}
}
core::option::Option::<T>::and_then pub const fn and_then<U, F>(self, f: F) -> Option<U>
where
F: [const] FnOnce(T) -> Option<U> + [const] Destruct,
{
match self {
Some(x) => f(x),
None => None,
}
}
core::option::Option::<T>::as_deref pub const fn as_deref(&self) -> Option<&T::Target>
where
T: [const] Deref,
{
self.as_ref().map(Deref::deref)
}
core::option::Option::<T>::as_deref_mut pub const fn as_deref_mut(&mut self) -> Option<&mut T::Target>
where
T: [const] DerefMut,
{
self.as_mut().map(DerefMut::deref_mut)
}
core::option::Option::<T>::as_mut pub const fn as_mut(&mut self) -> Option<&mut T> {
match *self {
Some(ref mut x) => Some(x),
None => None,
}
}
core::option::Option::<T>::as_ref pub const fn as_ref(&self) -> Option<&T> {
match *self {
Some(ref x) => Some(x),
None => None,
}
}
core::option::Option::<T>::expect pub const fn expect(
self,
#[cfg(not(feature = "ferrocene_certified"))] msg: &str,
#[cfg(feature = "ferrocene_certified")] msg: &'static str,
) -> T {
match self {
Some(val) => val,
#[cfg(not(feature = "ferrocene_certified"))]
None => expect_failed(msg),
#[cfg(feature = "ferrocene_certified")]
None => panic(msg),
}
}
core::option::Option::<T>::filter pub const fn filter<P>(self, predicate: P) -> Self
where
P: [const] FnOnce(&T) -> bool + [const] Destruct,
T: [const] Destruct,
{
if let Some(x) = self {
if predicate(&x) {
return Some(x);
}
}
None
}
core::option::Option::<T>::inspect pub const fn inspect<F>(self, f: F) -> Self
where
F: [const] FnOnce(&T) + [const] Destruct,
{
if let Some(ref x) = self {
f(x);
}
self
}
core::option::Option::<T>::is_none pub const fn is_none(&self) -> bool {
!self.is_some()
}
core::option::Option::<T>::is_none_or pub const fn is_none_or(self, f: impl [const] FnOnce(T) -> bool + [const] Destruct) -> bool {
match self {
None => true,
Some(x) => f(x),
}
}
core::option::Option::<T>::is_some pub const fn is_some(&self) -> bool {
matches!(*self, Some(_))
}
core::option::Option::<T>::is_some_and pub const fn is_some_and(self, f: impl [const] FnOnce(T) -> bool + [const] Destruct) -> bool {
match self {
None => false,
Some(x) => f(x),
}
}
core::option::Option::<T>::iter pub fn iter(&self) -> Iter<'_, T> {
Iter { inner: Item { opt: self.as_ref() } }
}
core::option::Option::<T>::iter_mut pub fn iter_mut(&mut self) -> IterMut<'_, T> {
IterMut { inner: Item { opt: self.as_mut() } }
}
core::option::Option::<T>::len const fn len(&self) -> usize {
// Using the intrinsic avoids emitting a branch to get the 0 or 1.
let discriminant: isize = crate::intrinsics::discriminant_value(self);
discriminant as usize
}
core::option::Option::<T>::map pub const fn map<U, F>(self, f: F) -> Option<U>
where
F: [const] FnOnce(T) -> U + [const] Destruct,
{
match self {
Some(x) => Some(f(x)),
None => None,
}
}
core::option::Option::<T>::map_or pub const fn map_or<U, F>(self, default: U, f: F) -> U
where
F: [const] FnOnce(T) -> U + [const] Destruct,
U: [const] Destruct,
{
match self {
Some(t) => f(t),
None => default,
}
}
core::option::Option::<T>::map_or_default pub const fn map_or_default<U, F>(self, f: F) -> U
where
U: [const] Default,
F: [const] FnOnce(T) -> U + [const] Destruct,
{
match self {
Some(t) => f(t),
None => U::default(),
}
}
core::option::Option::<T>::map_or_else pub const fn map_or_else<U, D, F>(self, default: D, f: F) -> U
where
D: [const] FnOnce() -> U + [const] Destruct,
F: [const] FnOnce(T) -> U + [const] Destruct,
{
match self {
Some(t) => f(t),
None => default(),
}
}
core::option::Option::<T>::ok_or pub const fn ok_or<E: [const] Destruct>(self, err: E) -> Result<T, E> {
match self {
Some(v) => Ok(v),
None => Err(err),
}
}
core::option::Option::<T>::ok_or_else pub const fn ok_or_else<E, F>(self, err: F) -> Result<T, E>
where
F: [const] FnOnce() -> E + [const] Destruct,
{
match self {
Some(v) => Ok(v),
None => Err(err()),
}
}
core::option::Option::<T>::or pub const fn or(self, optb: Option<T>) -> Option<T>
where
T: [const] Destruct,
{
match self {
x @ Some(_) => x,
None => optb,
}
}
core::option::Option::<T>::or_else pub const fn or_else<F>(self, f: F) -> Option<T>
where
F: [const] FnOnce() -> Option<T> + [const] Destruct,
//FIXME(const_hack): this `T: [const] Destruct` is unnecessary, but even precise live drops can't tell
// no value of type `T` gets dropped here
T: [const] Destruct,
{
match self {
x @ Some(_) => x,
None => f(),
}
}
core::option::Option::<T>::reduce pub fn reduce<U, R, F>(self, other: Option<U>, f: F) -> Option<R>
where
T: Into<R>,
U: Into<R>,
F: FnOnce(T, U) -> R,
{
match (self, other) {
(Some(a), Some(b)) => Some(f(a, b)),
(Some(a), _) => Some(a.into()),
(_, Some(b)) => Some(b.into()),
_ => None,
}
}
core::option::Option::<T>::replace pub const fn replace(&mut self, value: T) -> Option<T> {
mem::replace(self, Some(value))
}
core::option::Option::<T>::take pub const fn take(&mut self) -> Option<T> {
// FIXME(const-hack) replace `mem::replace` by `mem::take` when the latter is const ready
mem::replace(self, None)
}
core::option::Option::<T>::unwrap pub const fn unwrap(self) -> T {
match self {
Some(val) => val,
None => unwrap_failed(),
}
}
core::option::Option::<T>::unwrap_or pub const fn unwrap_or(self, default: T) -> T
where
T: [const] Destruct,
{
match self {
Some(x) => x,
None => default,
}
}
core::option::Option::<T>::unwrap_or_default pub const fn unwrap_or_default(self) -> T
where
T: [const] Default,
{
match self {
Some(x) => x,
None => T::default(),
}
}
core::option::Option::<T>::unwrap_or_else pub const fn unwrap_or_else<F>(self, f: F) -> T
where
F: [const] FnOnce() -> T + [const] Destruct,
{
match self {
Some(x) => x,
None => f(),
}
}
core::option::Option::<T>::xor pub const fn xor(self, optb: Option<T>) -> Option<T>
where
T: [const] Destruct,
{
match (self, optb) {
(a @ Some(_), None) => a,
(None, b @ Some(_)) => b,
_ => None,
}
}
core::option::Option::<T>::zip pub const fn zip<U>(self, other: Option<U>) -> Option<(T, U)>
where
T: [const] Destruct,
U: [const] Destruct,
{
match (self, other) {
(Some(a), Some(b)) => Some((a, b)),
_ => None,
}
}
core::option::Option::<core::option::Option<T>>::flatten pub const fn flatten(self) -> Option<T> {
// FIXME(const-hack): could be written with `and_then`
match self {
Some(inner) => inner,
None => None,
}
}
core::option::Option::<core::result::Result<T, E>>::transpose pub const fn transpose(self) -> Result<Option<T>, E> {
match self {
Some(Ok(x)) => Ok(Some(x)),
Some(Err(e)) => Err(e),
None => Ok(None),
}
}
core::option::unwrap_failedconst fn unwrap_failed() -> ! {
panic("called `Option::unwrap()` on a `None` value")
}
core::panic::location::Location::<'a>::caller pub const fn caller() -> &'static Location<'static> {
crate::intrinsics::caller_location()
}
core::panic::location::Location::<'a>::column pub const fn column(&self) -> u32 {
self.col
}
core::panic::location::Location::<'a>::line pub const fn line(&self) -> u32 {
self.line
}
core::panic::panic_info::PanicInfo::<'a>::location pub fn location(&self) -> Option<&Location<'_>> {
// NOTE: If this is changed to sometimes return None,
// deal with that case in std::panicking::default_hook and core::panicking::panic_fmt.
Some(&self.location)
}
core::panicking::panicpub const fn panic(expr: &'static str) -> ! {
// Use Arguments::from_str instead of format_args!("{expr}") to potentially
// reduce size overhead. The format_args! macro uses str's Display trait to
// write expr, which calls Formatter::pad, which must accommodate string
// truncation and padding (even though none is used here). Using
// Arguments::from_str may allow the compiler to omit Formatter::pad from the
// output binary, saving up to a few kilobytes.
// However, this optimization only works for `'static` strings: `from_str` also makes this
// message return `Some` from `Arguments::as_str`, which means it can become part of the panic
// payload without any allocation or copying. Shorter-lived strings would become invalid as
// stack frames get popped during unwinding, and couldn't be directly referenced from the
// payload.
#[cfg(not(feature = "ferrocene_certified"))]
panic_fmt(fmt::Arguments::from_str(expr));
#[cfg(feature = "ferrocene_certified")]
panic_fmt(&expr)
}
core::panicking::panic_nounwind_fmt::runtime fn runtime$(<$($binders)*>)?($($arg: $ty),*) $( -> $ret )? {
$runtime
}
core::ptr::align_offsetpub(crate) unsafe fn align_offset<T: Sized>(p: *const T, a: usize) -> usize {
// FIXME(#75598): Direct use of these intrinsics improves codegen significantly at opt-level <=
// 1, where the method versions of these operations are not inlined.
use intrinsics::{
assume, cttz_nonzero, exact_div, mul_with_overflow, unchecked_rem, unchecked_shl,
unchecked_shr, unchecked_sub, wrapping_add, wrapping_mul, wrapping_sub,
};
/// Calculate multiplicative modular inverse of `x` modulo `m`.
///
/// This implementation is tailored for `align_offset` and has following preconditions:
///
/// * `m` is a power-of-two;
/// * `x < m`; (if `x ≥ m`, pass in `x % m` instead)
///
/// Implementation of this function shall not panic. Ever.
#[inline]
const unsafe fn mod_inv(x: usize, m: usize) -> usize {
/// Multiplicative modular inverse table modulo 2⁴ = 16.
///
/// Note, that this table does not contain values where inverse does not exist (i.e., for
/// `0⁻¹ mod 16`, `2⁻¹ mod 16`, etc.)
const INV_TABLE_MOD_16: [u8; 8] = [1, 11, 13, 7, 9, 3, 5, 15];
/// Modulo for which the `INV_TABLE_MOD_16` is intended.
const INV_TABLE_MOD: usize = 16;
// SAFETY: `m` is required to be a power-of-two, hence non-zero.
let m_minus_one = unsafe { unchecked_sub(m, 1) };
let mut inverse = INV_TABLE_MOD_16[(x & (INV_TABLE_MOD - 1)) >> 1] as usize;
let mut mod_gate = INV_TABLE_MOD;
// We iterate "up" using the following formula:
//
// $$ xy ≡ 1 (mod 2ⁿ) → xy (2 - xy) ≡ 1 (mod 2²ⁿ) $$
//
// This application needs to be applied at least until `2²ⁿ ≥ m`, at which point we can
// finally reduce the computation to our desired `m` by taking `inverse mod m`.
//
// This computation is `O(log log m)`, which is to say, that on 64-bit machines this loop
// will always finish in at most 4 iterations.
loop {
// y = y * (2 - xy) mod n
//
// Note, that we use wrapping operations here intentionally – the original formula
// uses e.g., subtraction `mod n`. It is entirely fine to do them `mod
// usize::MAX` instead, because we take the result `mod n` at the end
// anyway.
if mod_gate >= m {
break;
}
inverse = wrapping_mul(inverse, wrapping_sub(2usize, wrapping_mul(x, inverse)));
let (new_gate, overflow) = mul_with_overflow(mod_gate, mod_gate);
if overflow {
break;
}
mod_gate = new_gate;
}
inverse & m_minus_one
}
let stride = size_of::<T>();
let addr: usize = p.addr();
// SAFETY: `a` is a power-of-two, therefore non-zero.
let a_minus_one = unsafe { unchecked_sub(a, 1) };
if stride == 0 {
// SPECIAL_CASE: handle 0-sized types. No matter how many times we step, the address will
// stay the same, so no offset will be able to align the pointer unless it is already
// aligned. This branch _will_ be optimized out as `stride` is known at compile-time.
let p_mod_a = addr & a_minus_one;
return if p_mod_a == 0 { 0 } else { usize::MAX };
}
// SAFETY: `stride == 0` case has been handled by the special case above.
let a_mod_stride = unsafe { unchecked_rem(a, stride) };
if a_mod_stride == 0 {
// SPECIAL_CASE: In cases where the `a` is divisible by `stride`, byte offset to align a
// pointer can be computed more simply through `-p (mod a)`. In the off-chance the byte
// offset is not a multiple of `stride`, the input pointer was misaligned and no pointer
// offset will be able to produce a `p` aligned to the specified `a`.
//
// The naive `-p (mod a)` equation inhibits LLVM's ability to select instructions
// like `lea`. We compute `(round_up_to_next_alignment(p, a) - p)` instead. This
// redistributes operations around the load-bearing, but pessimizing `and` instruction
// sufficiently for LLVM to be able to utilize the various optimizations it knows about.
//
// LLVM handles the branch here particularly nicely. If this branch needs to be evaluated
// at runtime, it will produce a mask `if addr_mod_stride == 0 { 0 } else { usize::MAX }`
// in a branch-free way and then bitwise-OR it with whatever result the `-p mod a`
// computation produces.
let aligned_address = wrapping_add(addr, a_minus_one) & wrapping_sub(0, a);
let byte_offset = wrapping_sub(aligned_address, addr);
// FIXME: Remove the assume after <https://github.com/llvm/llvm-project/issues/62502>
// SAFETY: Masking by `-a` can only affect the low bits, and thus cannot have reduced
// the value by more than `a-1`, so even though the intermediate values might have
// wrapped, the byte_offset is always in `[0, a)`.
unsafe { assume(byte_offset < a) };
// SAFETY: `stride == 0` case has been handled by the special case above.
let addr_mod_stride = unsafe { unchecked_rem(addr, stride) };
return if addr_mod_stride == 0 {
// SAFETY: `stride` is non-zero. This is guaranteed to divide exactly as well, because
// addr has been verified to be aligned to the original type’s alignment requirements.
unsafe { exact_div(byte_offset, stride) }
} else {
usize::MAX
};
}
// GENERAL_CASE: From here on we’re handling the very general case where `addr` may be
// misaligned, there isn’t an obvious relationship between `stride` and `a` that we can take an
// advantage of, etc. This case produces machine code that isn’t particularly high quality,
// compared to the special cases above. The code produced here is still within the realm of
// miracles, given the situations this case has to deal with.
// SAFETY: a is power-of-two hence non-zero. stride == 0 case is handled above.
// FIXME(const-hack) replace with min
let gcdpow = unsafe {
let x = cttz_nonzero(stride);
let y = cttz_nonzero(a);
if x < y { x } else { y }
};
// SAFETY: gcdpow has an upper-bound that’s at most the number of bits in a `usize`.
let gcd = unsafe { unchecked_shl(1usize, gcdpow) };
// SAFETY: gcd is always greater or equal to 1.
if addr & unsafe { unchecked_sub(gcd, 1) } == 0 {
// This branch solves for the following linear congruence equation:
//
// ` p + so = 0 mod a `
//
// `p` here is the pointer value, `s` - stride of `T`, `o` offset in `T`s, and `a` - the
// requested alignment.
//
// With `g = gcd(a, s)`, and the above condition asserting that `p` is also divisible by
// `g`, we can denote `a' = a/g`, `s' = s/g`, `p' = p/g`, then this becomes equivalent to:
//
// ` p' + s'o = 0 mod a' `
// ` o = (a' - (p' mod a')) * (s'^-1 mod a') `
//
// The first term is "the relative alignment of `p` to `a`" (divided by the `g`), the
// second term is "how does incrementing `p` by `s` bytes change the relative alignment of
// `p`" (again divided by `g`). Division by `g` is necessary to make the inverse well
// formed if `a` and `s` are not co-prime.
//
// Furthermore, the result produced by this solution is not "minimal", so it is necessary
// to take the result `o mod lcm(s, a)`. This `lcm(s, a)` is the same as `a'`.
// SAFETY: `gcdpow` has an upper-bound not greater than the number of trailing 0-bits in
// `a`.
let a2 = unsafe { unchecked_shr(a, gcdpow) };
// SAFETY: `a2` is non-zero. Shifting `a` by `gcdpow` cannot shift out any of the set bits
// in `a` (of which it has exactly one).
let a2minus1 = unsafe { unchecked_sub(a2, 1) };
// SAFETY: `gcdpow` has an upper-bound not greater than the number of trailing 0-bits in
// `a`.
let s2 = unsafe { unchecked_shr(stride & a_minus_one, gcdpow) };
// SAFETY: `gcdpow` has an upper-bound not greater than the number of trailing 0-bits in
// `a`. Furthermore, the subtraction cannot overflow, because `a2 = a >> gcdpow` will
// always be strictly greater than `(p % a) >> gcdpow`.
let minusp2 = unsafe { unchecked_sub(a2, unchecked_shr(addr & a_minus_one, gcdpow)) };
// SAFETY: `a2` is a power-of-two, as proven above. `s2` is strictly less than `a2`
// because `(s % a) >> gcdpow` is strictly less than `a >> gcdpow`.
return wrapping_mul(minusp2, unsafe { mod_inv(s2, a2) }) & a2minus1;
}
// Cannot be aligned at all.
usize::MAX
}
core::ptr::align_offset::mod_inv const unsafe fn mod_inv(x: usize, m: usize) -> usize {
/// Multiplicative modular inverse table modulo 2⁴ = 16.
///
/// Note, that this table does not contain values where inverse does not exist (i.e., for
/// `0⁻¹ mod 16`, `2⁻¹ mod 16`, etc.)
const INV_TABLE_MOD_16: [u8; 8] = [1, 11, 13, 7, 9, 3, 5, 15];
/// Modulo for which the `INV_TABLE_MOD_16` is intended.
const INV_TABLE_MOD: usize = 16;
// SAFETY: `m` is required to be a power-of-two, hence non-zero.
let m_minus_one = unsafe { unchecked_sub(m, 1) };
let mut inverse = INV_TABLE_MOD_16[(x & (INV_TABLE_MOD - 1)) >> 1] as usize;
let mut mod_gate = INV_TABLE_MOD;
// We iterate "up" using the following formula:
//
// $$ xy ≡ 1 (mod 2ⁿ) → xy (2 - xy) ≡ 1 (mod 2²ⁿ) $$
//
// This application needs to be applied at least until `2²ⁿ ≥ m`, at which point we can
// finally reduce the computation to our desired `m` by taking `inverse mod m`.
//
// This computation is `O(log log m)`, which is to say, that on 64-bit machines this loop
// will always finish in at most 4 iterations.
loop {
// y = y * (2 - xy) mod n
//
// Note, that we use wrapping operations here intentionally – the original formula
// uses e.g., subtraction `mod n`. It is entirely fine to do them `mod
// usize::MAX` instead, because we take the result `mod n` at the end
// anyway.
if mod_gate >= m {
break;
}
inverse = wrapping_mul(inverse, wrapping_sub(2usize, wrapping_mul(x, inverse)));
let (new_gate, overflow) = mul_with_overflow(mod_gate, mod_gate);
if overflow {
break;
}
mod_gate = new_gate;
}
inverse & m_minus_one
}
core::ptr::alignment::Alignment::as_usize pub const fn as_usize(self) -> usize {
self.0 as usize
}
core::ptr::alignment::Alignment::new pub const fn new(align: usize) -> Option<Self> {
if align.is_power_of_two() {
// SAFETY: Just checked it only has one bit set
Some(unsafe { Self::new_unchecked(align) })
} else {
None
}
}
core::ptr::alignment::Alignment::new_unchecked pub const unsafe fn new_unchecked(align: usize) -> Self {
assert_unsafe_precondition!(
check_language_ub,
"Alignment::new_unchecked requires a power of two",
(align: usize = align) => align.is_power_of_two()
);
// SAFETY: By precondition, this must be a power of two, and
// our variants encompass all possible powers of two.
unsafe { mem::transmute::<usize, Alignment>(align) }
}
core::ptr::const_ptr::<impl *const T>::add pub const unsafe fn add(self, count: usize) -> Self
where
T: Sized,
{
#[cfg(debug_assertions)]
#[inline]
#[rustc_allow_const_fn_unstable(const_eval_select)]
const fn runtime_add_nowrap(this: *const (), count: usize, size: usize) -> bool {
const_eval_select!(
@capture { this: *const (), count: usize, size: usize } -> bool:
if const {
true
} else {
let Some(byte_offset) = count.checked_mul(size) else {
return false;
};
let (_, overflow) = this.addr().overflowing_add(byte_offset);
byte_offset <= (isize::MAX as usize) && !overflow
}
)
}
#[cfg(debug_assertions)] // Expensive, and doesn't catch much in the wild.
ub_checks::assert_unsafe_precondition!(
check_language_ub,
"ptr::add requires that the address calculation does not overflow",
(
this: *const () = self as *const (),
count: usize = count,
size: usize = size_of::<T>(),
) => runtime_add_nowrap(this, count, size)
);
// SAFETY: the caller must uphold the safety contract for `offset`.
unsafe { intrinsics::offset(self, count) }
}
core::ptr::const_ptr::<impl *const T>::add::runtime_add_nowrap const fn runtime_add_nowrap(this: *const (), count: usize, size: usize) -> bool {
const_eval_select!(
@capture { this: *const (), count: usize, size: usize } -> bool:
if const {
true
} else {
let Some(byte_offset) = count.checked_mul(size) else {
return false;
};
let (_, overflow) = this.addr().overflowing_add(byte_offset);
byte_offset <= (isize::MAX as usize) && !overflow
}
)
}
core::ptr::const_ptr::<impl *const T>::add::runtime_add_nowrap::runtime fn runtime$(<$($binders)*>)?($($arg: $ty),*) $( -> $ret )? {
$runtime
}
core::ptr::const_ptr::<impl *const T>::addr pub fn addr(self) -> usize {
// A pointer-to-integer transmute currently has exactly the right semantics: it returns the
// address without exposing the provenance. Note that this is *not* a stable guarantee about
// transmute semantics, it relies on sysroot crates having special status.
// SAFETY: Pointer-to-integer transmutes are valid (if you are okay with losing the
// provenance).
unsafe { mem::transmute(self.cast::<()>()) }
}
core::ptr::const_ptr::<impl *const T>::byte_add pub const unsafe fn byte_add(self, count: usize) -> Self {
// SAFETY: the caller must uphold the safety contract for `add`.
unsafe { self.cast::<u8>().add(count).with_metadata_of(self) }
}
core::ptr::const_ptr::<impl *const T>::cast pub const fn cast<U>(self) -> *const U {
self as _
}
core::ptr::const_ptr::<impl *const T>::cast_array pub const fn cast_array<const N: usize>(self) -> *const [T; N] {
self.cast()
}
core::ptr::const_ptr::<impl *const T>::is_aligned_to pub fn is_aligned_to(self, align: usize) -> bool {
if !align.is_power_of_two() {
panic!("is_aligned_to: align is not a power-of-two");
}
self.addr() & (align - 1) == 0
}
core::ptr::const_ptr::<impl *const T>::is_null pub const fn is_null(self) -> bool {
// Compare via a cast to a thin pointer, so fat pointers are only
// considering their "data" part for null-ness.
let ptr = self as *const u8;
const_eval_select!(
@capture { ptr: *const u8 } -> bool:
// This use of `const_raw_ptr_comparison` has been explicitly blessed by t-lang.
if const #[rustc_allow_const_fn_unstable(const_raw_ptr_comparison)] {
match (ptr).guaranteed_eq(null_mut()) {
Some(res) => res,
// To remain maximally conservative, we stop execution when we don't
// know whether the pointer is null or not.
// We can *not* return `false` here, that would be unsound in `NonNull::new`!
None => panic!("null-ness of this pointer cannot be determined in const context"),
}
} else {
ptr.addr() == 0
}
)
}
core::ptr::const_ptr::<impl *const T>::is_null::runtime fn runtime$(<$($binders)*>)?($($arg: $ty),*) $( -> $ret )? {
$runtime
}
core::ptr::const_ptr::<impl *const T>::offset pub const unsafe fn offset(self, count: isize) -> *const T
where
T: Sized,
{
#[inline]
#[rustc_allow_const_fn_unstable(const_eval_select)]
const fn runtime_offset_nowrap(this: *const (), count: isize, size: usize) -> bool {
// We can use const_eval_select here because this is only for UB checks.
const_eval_select!(
@capture { this: *const (), count: isize, size: usize } -> bool:
if const {
true
} else {
// `size` is the size of a Rust type, so we know that
// `size <= isize::MAX` and thus `as` cast here is not lossy.
let Some(byte_offset) = count.checked_mul(size as isize) else {
return false;
};
let (_, overflow) = this.addr().overflowing_add_signed(byte_offset);
!overflow
}
)
}
ub_checks::assert_unsafe_precondition!(
check_language_ub,
"ptr::offset requires the address calculation to not overflow",
(
this: *const () = self as *const (),
count: isize = count,
size: usize = size_of::<T>(),
) => runtime_offset_nowrap(this, count, size)
);
// SAFETY: the caller must uphold the safety contract for `offset`.
unsafe { intrinsics::offset(self, count) }
}
core::ptr::const_ptr::<impl *const T>::offset::runtime_offset_nowrap const fn runtime_offset_nowrap(this: *const (), count: isize, size: usize) -> bool {
// We can use const_eval_select here because this is only for UB checks.
const_eval_select!(
@capture { this: *const (), count: isize, size: usize } -> bool:
if const {
true
} else {
// `size` is the size of a Rust type, so we know that
// `size <= isize::MAX` and thus `as` cast here is not lossy.
let Some(byte_offset) = count.checked_mul(size as isize) else {
return false;
};
let (_, overflow) = this.addr().overflowing_add_signed(byte_offset);
!overflow
}
)
}
core::ptr::const_ptr::<impl *const T>::offset::runtime_offset_nowrap::runtime fn runtime$(<$($binders)*>)?($($arg: $ty),*) $( -> $ret )? {
$runtime
}
core::ptr::const_ptr::<impl *const T>::offset_from_unsigned pub const unsafe fn offset_from_unsigned(self, origin: *const T) -> usize
where
T: Sized,
{
#[rustc_allow_const_fn_unstable(const_eval_select)]
const fn runtime_ptr_ge(this: *const (), origin: *const ()) -> bool {
const_eval_select!(
@capture { this: *const (), origin: *const () } -> bool:
if const {
true
} else {
this >= origin
}
)
}
ub_checks::assert_unsafe_precondition!(
check_language_ub,
"ptr::offset_from_unsigned requires `self >= origin`",
(
this: *const () = self as *const (),
origin: *const () = origin as *const (),
) => runtime_ptr_ge(this, origin)
);
let pointee_size = size_of::<T>();
assert!(0 < pointee_size && pointee_size <= isize::MAX as usize);
// SAFETY: the caller must uphold the safety contract for `ptr_offset_from_unsigned`.
unsafe { intrinsics::ptr_offset_from_unsigned(self, origin) }
}
core::ptr::const_ptr::<impl *const T>::offset_from_unsigned::runtime_ptr_ge const fn runtime_ptr_ge(this: *const (), origin: *const ()) -> bool {
const_eval_select!(
@capture { this: *const (), origin: *const () } -> bool:
if const {
true
} else {
this >= origin
}
)
}
core::ptr::const_ptr::<impl *const T>::offset_from_unsigned::runtime_ptr_ge::runtime fn runtime$(<$($binders)*>)?($($arg: $ty),*) $( -> $ret )? {
$runtime
}
core::ptr::const_ptr::<impl *const T>::read pub const unsafe fn read(self) -> T
where
T: Sized,
{
// SAFETY: the caller must uphold the safety contract for `read`.
unsafe { read(self) }
}
core::ptr::const_ptr::<impl *const T>::try_cast_aligned pub fn try_cast_aligned<U>(self) -> Option<*const U> {
if self.is_aligned_to(align_of::<U>()) { Some(self.cast()) } else { None }
}
core::ptr::const_ptr::<impl *const T>::with_metadata_of pub const fn with_metadata_of<U>(self, meta: *const U) -> *const U
where
U: PointeeSized,
{
from_raw_parts::<U>(self as *const (), metadata(meta))
}
core::ptr::const_ptr::<impl *const [T]>::as_array pub const fn as_array<const N: usize>(self) -> Option<*const [T; N]> {
if self.len() == N {
let me = self.as_ptr() as *const [T; N];
Some(me)
} else {
None
}
}
core::ptr::const_ptr::<impl *const [T]>::as_ptr pub const fn as_ptr(self) -> *const T {
self as *const T
}
core::ptr::const_ptr::<impl *const [T]>::is_empty pub const fn is_empty(self) -> bool {
self.len() == 0
}
core::ptr::const_ptr::<impl *const [T]>::len pub const fn len(self) -> usize {
metadata(self)
}
core::ptr::const_ptr::<impl core::cmp::PartialEq for *const T>::eq fn eq(&self, other: &*const T) -> bool {
*self == *other
}
core::ptr::copypub const unsafe fn copy<T>(src: *const T, dst: *mut T, count: usize) {
// SAFETY: the safety contract for `copy` must be upheld by the caller.
unsafe {
ub_checks::assert_unsafe_precondition!(
check_language_ub,
"ptr::copy requires that both pointer arguments are aligned and non-null",
(
src: *const () = src as *const (),
dst: *mut () = dst as *mut (),
align: usize = align_of::<T>(),
zero_size: bool = T::IS_ZST || count == 0,
) =>
ub_checks::maybe_is_aligned_and_not_null(src, align, zero_size)
&& ub_checks::maybe_is_aligned_and_not_null(dst, align, zero_size)
);
crate::intrinsics::copy(src, dst, count)
}
}
core::ptr::copy_nonoverlappingpub const unsafe fn copy_nonoverlapping<T>(src: *const T, dst: *mut T, count: usize) {
ub_checks::assert_unsafe_precondition!(
check_language_ub,
"ptr::copy_nonoverlapping requires that both pointer arguments are aligned and non-null \
and the specified memory ranges do not overlap",
(
src: *const () = src as *const (),
dst: *mut () = dst as *mut (),
size: usize = size_of::<T>(),
align: usize = align_of::<T>(),
count: usize = count,
) => {
let zero_size = count == 0 || size == 0;
ub_checks::maybe_is_aligned_and_not_null(src, align, zero_size)
&& ub_checks::maybe_is_aligned_and_not_null(dst, align, zero_size)
&& ub_checks::maybe_is_nonoverlapping(src, dst, size, count)
}
);
// SAFETY: the safety contract for `copy_nonoverlapping` must be
// upheld by the caller.
unsafe { crate::intrinsics::copy_nonoverlapping(src, dst, count) }
}
core::ptr::from_refpub const fn from_ref<T: PointeeSized>(r: &T) -> *const T {
r
}
core::ptr::metadata::from_raw_partspub const fn from_raw_parts<T: PointeeSized>(
data_pointer: *const impl Thin,
metadata: <T as Pointee>::Metadata,
) -> *const T {
aggregate_raw_ptr(data_pointer, metadata)
}
core::ptr::metadata::from_raw_parts_mutpub const fn from_raw_parts_mut<T: PointeeSized>(
data_pointer: *mut impl Thin,
metadata: <T as Pointee>::Metadata,
) -> *mut T {
aggregate_raw_ptr(data_pointer, metadata)
}
core::ptr::metadata::metadatapub const fn metadata<T: PointeeSized>(ptr: *const T) -> <T as Pointee>::Metadata {
ptr_metadata(ptr)
}
core::ptr::mut_ptr::<impl *mut T>::add pub const unsafe fn add(self, count: usize) -> Self
where
T: Sized,
{
#[cfg(debug_assertions)]
#[inline]
#[rustc_allow_const_fn_unstable(const_eval_select)]
const fn runtime_add_nowrap(this: *const (), count: usize, size: usize) -> bool {
const_eval_select!(
@capture { this: *const (), count: usize, size: usize } -> bool:
if const {
true
} else {
let Some(byte_offset) = count.checked_mul(size) else {
return false;
};
let (_, overflow) = this.addr().overflowing_add(byte_offset);
byte_offset <= (isize::MAX as usize) && !overflow
}
)
}
#[cfg(debug_assertions)] // Expensive, and doesn't catch much in the wild.
ub_checks::assert_unsafe_precondition!(
check_language_ub,
"ptr::add requires that the address calculation does not overflow",
(
this: *const () = self as *const (),
count: usize = count,
size: usize = size_of::<T>(),
) => runtime_add_nowrap(this, count, size)
);
// SAFETY: the caller must uphold the safety contract for `offset`.
unsafe { intrinsics::offset(self, count) }
}
core::ptr::mut_ptr::<impl *mut T>::add::runtime_add_nowrap const fn runtime_add_nowrap(this: *const (), count: usize, size: usize) -> bool {
const_eval_select!(
@capture { this: *const (), count: usize, size: usize } -> bool:
if const {
true
} else {
let Some(byte_offset) = count.checked_mul(size) else {
return false;
};
let (_, overflow) = this.addr().overflowing_add(byte_offset);
byte_offset <= (isize::MAX as usize) && !overflow
}
)
}
core::ptr::mut_ptr::<impl *mut T>::add::runtime_add_nowrap::runtime fn runtime$(<$($binders)*>)?($($arg: $ty),*) $( -> $ret )? {
$runtime
}
core::ptr::mut_ptr::<impl *mut T>::addr pub fn addr(self) -> usize {
// A pointer-to-integer transmute currently has exactly the right semantics: it returns the
// address without exposing the provenance. Note that this is *not* a stable guarantee about
// transmute semantics, it relies on sysroot crates having special status.
// SAFETY: Pointer-to-integer transmutes are valid (if you are okay with losing the
// provenance).
unsafe { mem::transmute(self.cast::<()>()) }
}
core::ptr::mut_ptr::<impl *mut T>::as_mut pub const unsafe fn as_mut<'a>(self) -> Option<&'a mut T> {
// SAFETY: the caller must guarantee that `self` is be valid for
// a mutable reference if it isn't null.
if self.is_null() { None } else { unsafe { Some(&mut *self) } }
}
core::ptr::mut_ptr::<impl *mut T>::cast pub const fn cast<U>(self) -> *mut U {
self as _
}
core::ptr::mut_ptr::<impl *mut T>::cast_array pub const fn cast_array<const N: usize>(self) -> *mut [T; N] {
self.cast()
}
core::ptr::mut_ptr::<impl *mut T>::cast_const pub const fn cast_const(self) -> *const T {
self as _
}
core::ptr::mut_ptr::<impl *mut T>::is_null pub const fn is_null(self) -> bool {
self.cast_const().is_null()
}
core::ptr::mut_ptr::<impl *mut T>::offset pub const unsafe fn offset(self, count: isize) -> *mut T
where
T: Sized,
{
#[inline]
#[rustc_allow_const_fn_unstable(const_eval_select)]
const fn runtime_offset_nowrap(this: *const (), count: isize, size: usize) -> bool {
// We can use const_eval_select here because this is only for UB checks.
const_eval_select!(
@capture { this: *const (), count: isize, size: usize } -> bool:
if const {
true
} else {
// `size` is the size of a Rust type, so we know that
// `size <= isize::MAX` and thus `as` cast here is not lossy.
let Some(byte_offset) = count.checked_mul(size as isize) else {
return false;
};
let (_, overflow) = this.addr().overflowing_add_signed(byte_offset);
!overflow
}
)
}
ub_checks::assert_unsafe_precondition!(
check_language_ub,
"ptr::offset requires the address calculation to not overflow",
(
this: *const () = self as *const (),
count: isize = count,
size: usize = size_of::<T>(),
) => runtime_offset_nowrap(this, count, size)
);
// SAFETY: the caller must uphold the safety contract for `offset`.
// The obtained pointer is valid for writes since the caller must
// guarantee that it points to the same allocation as `self`.
unsafe { intrinsics::offset(self, count) }
}
core::ptr::mut_ptr::<impl *mut T>::offset::runtime_offset_nowrap const fn runtime_offset_nowrap(this: *const (), count: isize, size: usize) -> bool {
// We can use const_eval_select here because this is only for UB checks.
const_eval_select!(
@capture { this: *const (), count: isize, size: usize } -> bool:
if const {
true
} else {
// `size` is the size of a Rust type, so we know that
// `size <= isize::MAX` and thus `as` cast here is not lossy.
let Some(byte_offset) = count.checked_mul(size as isize) else {
return false;
};
let (_, overflow) = this.addr().overflowing_add_signed(byte_offset);
!overflow
}
)
}
core::ptr::mut_ptr::<impl *mut T>::offset::runtime_offset_nowrap::runtime fn runtime$(<$($binders)*>)?($($arg: $ty),*) $( -> $ret )? {
$runtime
}
core::ptr::mut_ptr::<impl *mut T>::offset_from_unsigned pub const unsafe fn offset_from_unsigned(self, origin: *const T) -> usize
where
T: Sized,
{
// SAFETY: the caller must uphold the safety contract for `offset_from_unsigned`.
unsafe { (self as *const T).offset_from_unsigned(origin) }
}
core::ptr::mut_ptr::<impl *mut T>::read pub const unsafe fn read(self) -> T
where
T: Sized,
{
// SAFETY: the caller must uphold the safety contract for ``.
unsafe { read(self) }
}
core::ptr::mut_ptr::<impl *mut T>::replace pub const unsafe fn replace(self, src: T) -> T
where
T: Sized,
{
// SAFETY: the caller must uphold the safety contract for `replace`.
unsafe { replace(self, src) }
}
core::ptr::mut_ptr::<impl *mut T>::sub::runtime_sub_nowrap const fn runtime_sub_nowrap(this: *const (), count: usize, size: usize) -> bool {
const_eval_select!(
@capture { this: *const (), count: usize, size: usize } -> bool:
if const {
true
} else {
let Some(byte_offset) = count.checked_mul(size) else {
return false;
};
byte_offset <= (isize::MAX as usize) && this.addr() >= byte_offset
}
)
}
core::ptr::mut_ptr::<impl *mut T>::sub::runtime_sub_nowrap::runtime fn runtime$(<$($binders)*>)?($($arg: $ty),*) $( -> $ret )? {
$runtime
}
core::ptr::mut_ptr::<impl *mut T>::wrapping_add pub const fn wrapping_add(self, count: usize) -> Self
where
T: Sized,
{
self.wrapping_offset(count as isize)
}
core::ptr::mut_ptr::<impl *mut T>::wrapping_offset pub const fn wrapping_offset(self, count: isize) -> *mut T
where
T: Sized,
{
// SAFETY: the `arith_offset` intrinsic has no prerequisites to be called.
unsafe { intrinsics::arith_offset(self, count) as *mut T }
}
core::ptr::mut_ptr::<impl *mut T>::write pub const unsafe fn write(self, val: T)
where
T: Sized,
{
// SAFETY: the caller must uphold the safety contract for `write`.
unsafe { write(self, val) }
}
core::ptr::mut_ptr::<impl *mut T>::write_bytes pub const unsafe fn write_bytes(self, val: u8, count: usize)
where
T: Sized,
{
// SAFETY: the caller must uphold the safety contract for `write_bytes`.
unsafe { write_bytes(self, val, count) }
}
core::ptr::mut_ptr::<impl *mut [T]>::as_mut_array pub const fn as_mut_array<const N: usize>(self) -> Option<*mut [T; N]> {
if self.len() == N {
let me = self.as_mut_ptr() as *mut [T; N];
Some(me)
} else {
None
}
}
core::ptr::mut_ptr::<impl *mut [T]>::as_mut_ptr pub const fn as_mut_ptr(self) -> *mut T {
self as *mut T
}
core::ptr::mut_ptr::<impl *mut [T]>::is_empty pub const fn is_empty(self) -> bool {
self.len() == 0
}
core::ptr::mut_ptr::<impl *mut [T]>::len pub const fn len(self) -> usize {
metadata(self)
}
core::ptr::mut_ptr::<impl *mut [T]>::split_at_mut pub unsafe fn split_at_mut(self, mid: usize) -> (*mut [T], *mut [T]) {
assert!(mid <= self.len());
// SAFETY: The assert above is only a safety-net as long as `self.len()` is correct
// The actual safety requirements of this function are the same as for `split_at_mut_unchecked`
unsafe { self.split_at_mut_unchecked(mid) }
}
core::ptr::mut_ptr::<impl *mut [T]>::split_at_mut_unchecked pub unsafe fn split_at_mut_unchecked(self, mid: usize) -> (*mut [T], *mut [T]) {
let len = self.len();
let ptr = self.as_mut_ptr();
// SAFETY: Caller must pass a valid pointer and an index that is in-bounds.
let tail = unsafe { ptr.add(mid) };
(
crate::ptr::slice_from_raw_parts_mut(ptr, mid),
crate::ptr::slice_from_raw_parts_mut(tail, len - mid),
)
}
core::ptr::mut_ptr::<impl core::cmp::PartialEq for *mut T>::eq fn eq(&self, other: &*mut T) -> bool {
*self == *other
}
core::ptr::non_null::NonNull::<T>::add pub const unsafe fn add(self, count: usize) -> Self
where
T: Sized,
{
// SAFETY: the caller must uphold the safety contract for `offset`.
// Additionally safety contract of `offset` guarantees that the resulting pointer is
// pointing to an allocation, there can't be an allocation at null, thus it's safe to
// construct `NonNull`.
unsafe { NonNull { pointer: intrinsics::offset(self.as_ptr(), count) } }
}
core::ptr::non_null::NonNull::<T>::as_mut pub const unsafe fn as_mut<'a>(&mut self) -> &'a mut T {
// SAFETY: the caller must guarantee that `self` meets all the
// requirements for a mutable reference.
unsafe { &mut *self.as_ptr() }
}
core::ptr::non_null::NonNull::<T>::as_ptr pub const fn as_ptr(self) -> *mut T {
// This is a transmute for the same reasons as `NonZero::get`.
// SAFETY: `NonNull` is `transparent` over a `*const T`, and `*const T`
// and `*mut T` have the same layout, so transitively we can transmute
// our `NonNull` to a `*mut T` directly.
unsafe { mem::transmute::<Self, *mut T>(self) }
}
core::ptr::non_null::NonNull::<T>::as_ref pub const unsafe fn as_ref<'a>(&self) -> &'a T {
// SAFETY: the caller must guarantee that `self` meets all the
// requirements for a reference.
// `cast_const` avoids a mutable raw pointer deref.
unsafe { &*self.as_ptr().cast_const() }
}
core::ptr::non_null::NonNull::<T>::cast pub const fn cast<U>(self) -> NonNull<U> {
// SAFETY: `self` is a `NonNull` pointer which is necessarily non-null
unsafe { NonNull { pointer: self.as_ptr() as *mut U } }
}
core::ptr::non_null::NonNull::<T>::from_mut pub const fn from_mut(r: &mut T) -> Self {
// SAFETY: A mutable reference cannot be null.
unsafe { NonNull { pointer: r as *mut T } }
}
core::ptr::non_null::NonNull::<T>::from_ref pub const fn from_ref(r: &T) -> Self {
// SAFETY: A reference cannot be null.
unsafe { NonNull { pointer: r as *const T } }
}
core::ptr::non_null::NonNull::<T>::new_unchecked pub const unsafe fn new_unchecked(ptr: *mut T) -> Self {
// SAFETY: the caller must guarantee that `ptr` is non-null.
unsafe {
assert_unsafe_precondition!(
check_language_ub,
"NonNull::new_unchecked requires that the pointer is non-null",
(ptr: *mut () = ptr as *mut ()) => !ptr.is_null()
);
NonNull { pointer: ptr as _ }
}
}
core::ptr::non_null::NonNull::<T>::offset_from_unsigned pub const unsafe fn offset_from_unsigned(self, subtracted: NonNull<T>) -> usize
where
T: Sized,
{
// SAFETY: the caller must uphold the safety contract for `offset_from_unsigned`.
unsafe { self.as_ptr().offset_from_unsigned(subtracted.as_ptr()) }
}
core::ptr::nullpub const fn null<T: PointeeSized + Thin>() -> *const T {
from_raw_parts(without_provenance::<()>(0), ())
}
core::ptr::null_mutpub const fn null_mut<T: PointeeSized + Thin>() -> *mut T {
from_raw_parts_mut(without_provenance_mut::<()>(0), ())
}
core::ptr::readpub const unsafe fn read<T>(src: *const T) -> T {
// It would be semantically correct to implement this via `copy_nonoverlapping`
// and `MaybeUninit`, as was done before PR #109035. Calling `assume_init`
// provides enough information to know that this is a typed operation.
// However, as of March 2023 the compiler was not capable of taking advantage
// of that information. Thus, the implementation here switched to an intrinsic,
// which lowers to `_0 = *src` in MIR, to address a few issues:
//
// - Using `MaybeUninit::assume_init` after a `copy_nonoverlapping` was not
// turning the untyped copy into a typed load. As such, the generated
// `load` in LLVM didn't get various metadata, such as `!range` (#73258),
// `!nonnull`, and `!noundef`, resulting in poorer optimization.
// - Going through the extra local resulted in multiple extra copies, even
// in optimized MIR. (Ignoring StorageLive/Dead, the intrinsic is one
// MIR statement, while the previous implementation was eight.) LLVM
// could sometimes optimize them away, but because `read` is at the core
// of so many things, not having them in the first place improves what we
// hand off to the backend. For example, `mem::replace::<Big>` previously
// emitted 4 `alloca` and 6 `memcpy`s, but is now 1 `alloc` and 3 `memcpy`s.
// - In general, this approach keeps us from getting any more bugs (like
// #106369) that boil down to "`read(p)` is worse than `*p`", as this
// makes them look identical to the backend (or other MIR consumers).
//
// Future enhancements to MIR optimizations might well allow this to return
// to the previous implementation, rather than using an intrinsic.
// SAFETY: the caller must guarantee that `src` is valid for reads.
unsafe {
#[cfg(debug_assertions)] // Too expensive to always enable (for now?)
ub_checks::assert_unsafe_precondition!(
check_language_ub,
"ptr::read requires that the pointer argument is aligned and non-null",
(
addr: *const () = src as *const (),
align: usize = align_of::<T>(),
is_zst: bool = T::IS_ZST,
) => ub_checks::maybe_is_aligned_and_not_null(addr, align, is_zst)
);
crate::intrinsics::read_via_copy(src)
}
}
core::ptr::read_unalignedpub const unsafe fn read_unaligned<T>(src: *const T) -> T {
let mut tmp = MaybeUninit::<T>::uninit();
// SAFETY: the caller must guarantee that `src` is valid for reads.
// `src` cannot overlap `tmp` because `tmp` was just allocated on
// the stack as a separate allocation.
//
// Also, since we just wrote a valid value into `tmp`, it is guaranteed
// to be properly initialized.
unsafe {
copy_nonoverlapping(src as *const u8, tmp.as_mut_ptr() as *mut u8, size_of::<T>());
tmp.assume_init()
}
}
core::ptr::read_volatilepub unsafe fn read_volatile<T>(src: *const T) -> T {
// SAFETY: the caller must uphold the safety contract for `volatile_load`.
unsafe {
ub_checks::assert_unsafe_precondition!(
check_language_ub,
"ptr::read_volatile requires that the pointer argument is aligned",
(
addr: *const () = src as *const (),
align: usize = align_of::<T>(),
) => ub_checks::maybe_is_aligned(addr, align)
);
intrinsics::volatile_load(src)
}
}
core::ptr::replacepub const unsafe fn replace<T>(dst: *mut T, src: T) -> T {
// SAFETY: the caller must guarantee that `dst` is valid to be
// cast to a mutable reference (valid for writes, aligned, initialized),
// and cannot overlap `src` since `dst` must point to a distinct
// allocation.
unsafe {
ub_checks::assert_unsafe_precondition!(
check_language_ub,
"ptr::replace requires that the pointer argument is aligned and non-null",
(
addr: *const () = dst as *const (),
align: usize = align_of::<T>(),
is_zst: bool = T::IS_ZST,
) => ub_checks::maybe_is_aligned_and_not_null(addr, align, is_zst)
);
mem::replace(&mut *dst, src)
}
}
core::ptr::slice_from_raw_partspub const fn slice_from_raw_parts<T>(data: *const T, len: usize) -> *const [T] {
from_raw_parts(data, len)
}
core::ptr::slice_from_raw_parts_mutpub const fn slice_from_raw_parts_mut<T>(data: *mut T, len: usize) -> *mut [T] {
from_raw_parts_mut(data, len)
}
core::ptr::swappub const unsafe fn swap<T>(x: *mut T, y: *mut T) {
// Give ourselves some scratch space to work with.
// We do not have to worry about drops: `MaybeUninit` does nothing when dropped.
let mut tmp = MaybeUninit::<T>::uninit();
// Perform the swap
// SAFETY: the caller must guarantee that `x` and `y` are
// valid for writes and properly aligned. `tmp` cannot be
// overlapping either `x` or `y` because `tmp` was just allocated
// on the stack as a separate allocation.
unsafe {
copy_nonoverlapping(x, tmp.as_mut_ptr(), 1);
copy(y, x, 1); // `x` and `y` may overlap
copy_nonoverlapping(tmp.as_ptr(), y, 1);
}
}
core::ptr::swap_chunkfn swap_chunk<const N: usize>(x: &mut MaybeUninit<[u8; N]>, y: &mut MaybeUninit<[u8; N]>) {
let a = *x;
let b = *y;
*x = b;
*y = a;
}
core::ptr::swap_nonoverlappingpub const unsafe fn swap_nonoverlapping<T>(x: *mut T, y: *mut T, count: usize) {
ub_checks::assert_unsafe_precondition!(
check_library_ub,
"ptr::swap_nonoverlapping requires that both pointer arguments are aligned and non-null \
and the specified memory ranges do not overlap",
(
x: *mut () = x as *mut (),
y: *mut () = y as *mut (),
size: usize = size_of::<T>(),
align: usize = align_of::<T>(),
count: usize = count,
) => {
let zero_size = size == 0 || count == 0;
ub_checks::maybe_is_aligned_and_not_null(x, align, zero_size)
&& ub_checks::maybe_is_aligned_and_not_null(y, align, zero_size)
&& ub_checks::maybe_is_nonoverlapping(x, y, size, count)
}
);
const_eval_select!(
@capture[T] { x: *mut T, y: *mut T, count: usize }:
if const {
// At compile-time we don't need all the special code below.
// SAFETY: Same preconditions as this function
unsafe { swap_nonoverlapping_const(x, y, count) }
} else {
// Going though a slice here helps codegen know the size fits in `isize`
let slice = slice_from_raw_parts_mut(x, count);
// SAFETY: This is all readable from the pointer, meaning it's one
// allocation, and thus cannot be more than isize::MAX bytes.
let bytes = unsafe { mem::size_of_val_raw::<[T]>(slice) };
if let Some(bytes) = NonZero::new(bytes) {
// SAFETY: These are the same ranges, just expressed in a different
// type, so they're still non-overlapping.
unsafe { swap_nonoverlapping_bytes(x.cast(), y.cast(), bytes) };
}
}
)
}
core::ptr::swap_nonoverlapping::runtime fn runtime$(<$($binders)*>)?($($arg: $ty),*) $( -> $ret )? {
$runtime
}
core::ptr::swap_nonoverlapping_bytesunsafe fn swap_nonoverlapping_bytes(x: *mut u8, y: *mut u8, bytes: NonZero<usize>) {
// Same as `swap_nonoverlapping::<[u8; N]>`.
unsafe fn swap_nonoverlapping_chunks<const N: usize>(
x: *mut MaybeUninit<[u8; N]>,
y: *mut MaybeUninit<[u8; N]>,
chunks: NonZero<usize>,
) {
let chunks = chunks.get();
for i in 0..chunks {
// SAFETY: i is in [0, chunks) so the adds and dereferences are in-bounds.
unsafe { swap_chunk(&mut *x.add(i), &mut *y.add(i)) };
}
}
// Same as `swap_nonoverlapping_bytes`, but accepts at most 1+2+4=7 bytes
#[inline]
unsafe fn swap_nonoverlapping_short(x: *mut u8, y: *mut u8, bytes: NonZero<usize>) {
// Tail handling for auto-vectorized code sometimes has element-at-a-time behaviour,
// see <https://github.com/rust-lang/rust/issues/134946>.
// By swapping as different sizes, rather than as a loop over bytes,
// we make sure not to end up with, say, seven byte-at-a-time copies.
let bytes = bytes.get();
let mut i = 0;
macro_rules! swap_prefix {
($($n:literal)+) => {$(
if (bytes & $n) != 0 {
// SAFETY: `i` can only have the same bits set as those in bytes,
// so these `add`s are in-bounds of `bytes`. But the bit for
// `$n` hasn't been set yet, so the `$n` bytes that `swap_chunk`
// will read and write are within the usable range.
unsafe { swap_chunk::<$n>(&mut*x.add(i).cast(), &mut*y.add(i).cast()) };
i |= $n;
}
)+};
}
swap_prefix!(4 2 1);
#[cfg(not(feature = "ferrocene_certified"))]
debug_assert_eq!(i, bytes);
#[cfg(feature = "ferrocene_certified")]
debug_assert!(i == bytes);
}
const CHUNK_SIZE: usize = size_of::<*const ()>();
let bytes = bytes.get();
let chunks = bytes / CHUNK_SIZE;
let tail = bytes % CHUNK_SIZE;
if let Some(chunks) = NonZero::new(chunks) {
// SAFETY: this is bytes/CHUNK_SIZE*CHUNK_SIZE bytes, which is <= bytes,
// so it's within the range of our non-overlapping bytes.
unsafe { swap_nonoverlapping_chunks::<CHUNK_SIZE>(x.cast(), y.cast(), chunks) };
}
if let Some(tail) = NonZero::new(tail) {
const { assert!(CHUNK_SIZE <= 8) };
let delta = chunks * CHUNK_SIZE;
// SAFETY: the tail length is below CHUNK SIZE because of the remainder,
// and CHUNK_SIZE is at most 8 by the const assert, so tail <= 7
unsafe { swap_nonoverlapping_short(x.add(delta), y.add(delta), tail) };
}
}
core::ptr::swap_nonoverlapping_bytes::swap_nonoverlapping_chunks unsafe fn swap_nonoverlapping_chunks<const N: usize>(
x: *mut MaybeUninit<[u8; N]>,
y: *mut MaybeUninit<[u8; N]>,
chunks: NonZero<usize>,
) {
let chunks = chunks.get();
for i in 0..chunks {
// SAFETY: i is in [0, chunks) so the adds and dereferences are in-bounds.
unsafe { swap_chunk(&mut *x.add(i), &mut *y.add(i)) };
}
}
core::ptr::swap_nonoverlapping_bytes::swap_nonoverlapping_short unsafe fn swap_nonoverlapping_short(x: *mut u8, y: *mut u8, bytes: NonZero<usize>) {
// Tail handling for auto-vectorized code sometimes has element-at-a-time behaviour,
// see <https://github.com/rust-lang/rust/issues/134946>.
// By swapping as different sizes, rather than as a loop over bytes,
// we make sure not to end up with, say, seven byte-at-a-time copies.
let bytes = bytes.get();
let mut i = 0;
macro_rules! swap_prefix {
($($n:literal)+) => {$(
if (bytes & $n) != 0 {
// SAFETY: `i` can only have the same bits set as those in bytes,
// so these `add`s are in-bounds of `bytes`. But the bit for
// `$n` hasn't been set yet, so the `$n` bytes that `swap_chunk`
// will read and write are within the usable range.
unsafe { swap_chunk::<$n>(&mut*x.add(i).cast(), &mut*y.add(i).cast()) };
i |= $n;
}
)+};
}
swap_prefix!(4 2 1);
#[cfg(not(feature = "ferrocene_certified"))]
debug_assert_eq!(i, bytes);
#[cfg(feature = "ferrocene_certified")]
debug_assert!(i == bytes);
}
core::ptr::without_provenancepub const fn without_provenance<T>(addr: usize) -> *const T {
without_provenance_mut(addr)
}
core::ptr::without_provenance_mutpub const fn without_provenance_mut<T>(addr: usize) -> *mut T {
// An int-to-pointer transmute currently has exactly the intended semantics: it creates a
// pointer without provenance. Note that this is *not* a stable guarantee about transmute
// semantics, it relies on sysroot crates having special status.
// SAFETY: every valid integer is also a valid pointer (as long as you don't dereference that
// pointer).
unsafe { mem::transmute(addr) }
}
core::ptr::writepub const unsafe fn write<T>(dst: *mut T, src: T) {
// Semantically, it would be fine for this to be implemented as a
// `copy_nonoverlapping` and appropriate drop suppression of `src`.
// However, implementing via that currently produces more MIR than is ideal.
// Using an intrinsic keeps it down to just the simple `*dst = move src` in
// MIR (11 statements shorter, at the time of writing), and also allows
// `src` to stay an SSA value in codegen_ssa, rather than a memory one.
// SAFETY: the caller must guarantee that `dst` is valid for writes.
// `dst` cannot overlap `src` because the caller has mutable access
// to `dst` while `src` is owned by this function.
unsafe {
#[cfg(debug_assertions)] // Too expensive to always enable (for now?)
ub_checks::assert_unsafe_precondition!(
check_language_ub,
"ptr::write requires that the pointer argument is aligned and non-null",
(
addr: *mut () = dst as *mut (),
align: usize = align_of::<T>(),
is_zst: bool = T::IS_ZST,
) => ub_checks::maybe_is_aligned_and_not_null(addr, align, is_zst)
);
intrinsics::write_via_move(dst, src)
}
}
core::ptr::write_bytespub const unsafe fn write_bytes<T>(dst: *mut T, val: u8, count: usize) {
// SAFETY: the safety contract for `write_bytes` must be upheld by the caller.
unsafe {
ub_checks::assert_unsafe_precondition!(
check_language_ub,
"ptr::write_bytes requires that the destination pointer is aligned and non-null",
(
addr: *const () = dst as *const (),
align: usize = align_of::<T>(),
zero_size: bool = T::IS_ZST || count == 0,
) => ub_checks::maybe_is_aligned_and_not_null(addr, align, zero_size)
);
crate::intrinsics::write_bytes(dst, val, count)
}
}
core::ptr::write_volatilepub unsafe fn write_volatile<T>(dst: *mut T, src: T) {
// SAFETY: the caller must uphold the safety contract for `volatile_store`.
unsafe {
ub_checks::assert_unsafe_precondition!(
check_language_ub,
"ptr::write_volatile requires that the pointer argument is aligned",
(
addr: *mut () = dst as *mut (),
align: usize = align_of::<T>(),
) => ub_checks::maybe_is_aligned(addr, align)
);
intrinsics::volatile_store(dst, src);
}
}
core::result::Result::<&T, E>::cloned pub fn cloned(self) -> Result<T, E>
where
T: Clone,
{
self.map(|t| t.clone())
}
core::result::Result::<&T, E>::copied pub const fn copied(self) -> Result<T, E>
where
T: Copy,
{
// FIXME(const-hack): this implementation, which sidesteps using `Result::map` since it's not const
// ready yet, should be reverted when possible to avoid code repetition
match self {
Ok(&v) => Ok(v),
Err(e) => Err(e),
}
}
core::result::Result::<&mut T, E>::cloned pub fn cloned(self) -> Result<T, E>
where
T: Clone,
{
self.map(|t| t.clone())
}
core::result::Result::<&mut T, E>::copied pub const fn copied(self) -> Result<T, E>
where
T: Copy,
{
// FIXME(const-hack): this implementation, which sidesteps using `Result::map` since it's not const
// ready yet, should be reverted when possible to avoid code repetition
match self {
Ok(&mut v) => Ok(v),
Err(e) => Err(e),
}
}
core::result::Result::<T, E>::and pub const fn and<U>(self, res: Result<U, E>) -> Result<U, E>
where
T: [const] Destruct,
E: [const] Destruct,
U: [const] Destruct,
{
match self {
Ok(_) => res,
Err(e) => Err(e),
}
}
core::result::Result::<T, E>::and_then pub const fn and_then<U, F>(self, op: F) -> Result<U, E>
where
F: [const] FnOnce(T) -> Result<U, E> + [const] Destruct,
{
match self {
Ok(t) => op(t),
Err(e) => Err(e),
}
}
core::result::Result::<T, E>::as_deref pub const fn as_deref(&self) -> Result<&T::Target, &E>
where
T: [const] Deref,
{
self.as_ref().map(Deref::deref)
}
core::result::Result::<T, E>::as_deref_mut pub const fn as_deref_mut(&mut self) -> Result<&mut T::Target, &mut E>
where
T: [const] DerefMut,
{
self.as_mut().map(DerefMut::deref_mut)
}
core::result::Result::<T, E>::as_mut pub const fn as_mut(&mut self) -> Result<&mut T, &mut E> {
match *self {
Ok(ref mut x) => Ok(x),
Err(ref mut x) => Err(x),
}
}
core::result::Result::<T, E>::as_ref pub const fn as_ref(&self) -> Result<&T, &E> {
match *self {
Ok(ref x) => Ok(x),
Err(ref x) => Err(x),
}
}
core::result::Result::<T, E>::err pub const fn err(self) -> Option<E>
where
T: [const] Destruct,
E: [const] Destruct,
{
match self {
Ok(_) => None,
Err(x) => Some(x),
}
}
core::result::Result::<T, E>::inspect pub const fn inspect<F>(self, f: F) -> Self
where
F: [const] FnOnce(&T) + [const] Destruct,
{
if let Ok(ref t) = self {
f(t);
}
self
}
core::result::Result::<T, E>::inspect_err pub const fn inspect_err<F>(self, f: F) -> Self
where
F: [const] FnOnce(&E) + [const] Destruct,
{
if let Err(ref e) = self {
f(e);
}
self
}
core::result::Result::<T, E>::is_err pub const fn is_err(&self) -> bool {
!self.is_ok()
}
core::result::Result::<T, E>::is_err_and pub const fn is_err_and<F>(self, f: F) -> bool
where
F: [const] FnOnce(E) -> bool + [const] Destruct,
E: [const] Destruct,
T: [const] Destruct,
{
match self {
Ok(_) => false,
Err(e) => f(e),
}
}
core::result::Result::<T, E>::is_ok pub const fn is_ok(&self) -> bool {
matches!(*self, Ok(_))
}
core::result::Result::<T, E>::is_ok_and pub const fn is_ok_and<F>(self, f: F) -> bool
where
F: [const] FnOnce(T) -> bool + [const] Destruct,
T: [const] Destruct,
E: [const] Destruct,
{
match self {
Err(_) => false,
Ok(x) => f(x),
}
}
core::result::Result::<T, E>::map pub const fn map<U, F>(self, op: F) -> Result<U, E>
where
F: [const] FnOnce(T) -> U + [const] Destruct,
{
match self {
Ok(t) => Ok(op(t)),
Err(e) => Err(e),
}
}
core::result::Result::<T, E>::map_err pub const fn map_err<F, O>(self, op: O) -> Result<T, F>
where
O: [const] FnOnce(E) -> F + [const] Destruct,
{
match self {
Ok(t) => Ok(t),
Err(e) => Err(op(e)),
}
}
core::result::Result::<T, E>::map_or pub const fn map_or<U, F>(self, default: U, f: F) -> U
where
F: [const] FnOnce(T) -> U + [const] Destruct,
T: [const] Destruct,
E: [const] Destruct,
U: [const] Destruct,
{
match self {
Ok(t) => f(t),
Err(_) => default,
}
}
core::result::Result::<T, E>::map_or_default pub const fn map_or_default<U, F>(self, f: F) -> U
where
F: [const] FnOnce(T) -> U + [const] Destruct,
U: [const] Default,
T: [const] Destruct,
E: [const] Destruct,
{
match self {
Ok(t) => f(t),
Err(_) => U::default(),
}
}
core::result::Result::<T, E>::map_or_else pub const fn map_or_else<U, D, F>(self, default: D, f: F) -> U
where
D: [const] FnOnce(E) -> U + [const] Destruct,
F: [const] FnOnce(T) -> U + [const] Destruct,
{
match self {
Ok(t) => f(t),
Err(e) => default(e),
}
}
core::result::Result::<T, E>::ok pub const fn ok(self) -> Option<T>
where
T: [const] Destruct,
E: [const] Destruct,
{
match self {
Ok(x) => Some(x),
Err(_) => None,
}
}
core::result::Result::<T, E>::or pub const fn or<F>(self, res: Result<T, F>) -> Result<T, F>
where
T: [const] Destruct,
E: [const] Destruct,
F: [const] Destruct,
{
match self {
Ok(v) => Ok(v),
Err(_) => res,
}
}
core::result::Result::<T, E>::or_else pub const fn or_else<F, O>(self, op: O) -> Result<T, F>
where
O: [const] FnOnce(E) -> Result<T, F> + [const] Destruct,
{
match self {
Ok(t) => Ok(t),
Err(e) => op(e),
}
}
core::result::Result::<T, E>::unwrap_or pub const fn unwrap_or(self, default: T) -> T
where
T: [const] Destruct,
E: [const] Destruct,
{
match self {
Ok(t) => t,
Err(_) => default,
}
}
core::result::Result::<T, E>::unwrap_or_default pub const fn unwrap_or_default(self) -> T
where
T: [const] Default + [const] Destruct,
E: [const] Destruct,
{
match self {
Ok(x) => x,
Err(_) => Default::default(),
}
}
core::result::Result::<T, E>::unwrap_or_else pub const fn unwrap_or_else<F>(self, op: F) -> T
where
F: [const] FnOnce(E) -> T + [const] Destruct,
{
match self {
Ok(t) => t,
Err(e) => op(e),
}
}
core::result::Result::<core::option::Option<T>, E>::transpose pub const fn transpose(self) -> Option<Result<T, E>> {
match self {
Ok(Some(x)) => Some(Ok(x)),
Ok(None) => None,
Err(e) => Some(Err(e)),
}
}
core::slice::<impl [T]>::align_to pub unsafe fn align_to<U>(&self) -> (&[T], &[U], &[T]) {
// Note that most of this function will be constant-evaluated,
if U::IS_ZST || T::IS_ZST {
// handle ZSTs specially, which is – don't handle them at all.
return (self, &[], &[]);
}
// First, find at what point do we split between the first and 2nd slice. Easy with
// ptr.align_offset.
let ptr = self.as_ptr();
// SAFETY: See the `align_to_mut` method for the detailed safety comment.
let offset = unsafe { crate::ptr::align_offset(ptr, align_of::<U>()) };
if offset > self.len() {
(self, &[], &[])
} else {
let (left, rest) = self.split_at(offset);
let (us_len, ts_len) = rest.align_to_offsets::<U>();
// Inform Miri that we want to consider the "middle" pointer to be suitably aligned.
#[cfg(miri)]
crate::intrinsics::miri_promise_symbolic_alignment(
rest.as_ptr().cast(),
align_of::<U>(),
);
// SAFETY: now `rest` is definitely aligned, so `from_raw_parts` below is okay,
// since the caller guarantees that we can transmute `T` to `U` safely.
unsafe {
(
left,
from_raw_parts(rest.as_ptr() as *const U, us_len),
from_raw_parts(rest.as_ptr().add(rest.len() - ts_len), ts_len),
)
}
}
}
core::slice::<impl [T]>::as_array pub const fn as_array<const N: usize>(&self) -> Option<&[T; N]> {
if self.len() == N {
let ptr = self.as_ptr().cast_array();
// SAFETY: The underlying array of a slice can be reinterpreted as an actual array `[T; N]` if `N` is not greater than the slice's length.
let me = unsafe { &*ptr };
Some(me)
} else {
None
}
}
core::slice::<impl [T]>::as_chunks pub const fn as_chunks<const N: usize>(&self) -> (&[[T; N]], &[T]) {
assert!(N != 0, "chunk size must be non-zero");
let len_rounded_down = self.len() / N * N;
// SAFETY: The rounded-down value is always the same or smaller than the
// original length, and thus must be in-bounds of the slice.
let (multiple_of_n, remainder) = unsafe { self.split_at_unchecked(len_rounded_down) };
// SAFETY: We already panicked for zero, and ensured by construction
// that the length of the subslice is a multiple of N.
let array_slice = unsafe { multiple_of_n.as_chunks_unchecked() };
(array_slice, remainder)
}
core::slice::<impl [T]>::as_chunks_unchecked pub const unsafe fn as_chunks_unchecked<const N: usize>(&self) -> &[[T; N]] {
assert_unsafe_precondition!(
check_language_ub,
"slice::as_chunks_unchecked requires `N != 0` and the slice to split exactly into `N`-element chunks",
(n: usize = N, len: usize = self.len()) => n != 0 && len.is_multiple_of(n),
);
// SAFETY: Caller must guarantee that `N` is nonzero and exactly divides the slice length
let new_len = unsafe { exact_div(self.len(), N) };
// SAFETY: We cast a slice of `new_len * N` elements into
// a slice of `new_len` many `N` elements chunks.
unsafe { from_raw_parts(self.as_ptr().cast(), new_len) }
}
core::slice::<impl [T]>::as_mut_array pub const fn as_mut_array<const N: usize>(&mut self) -> Option<&mut [T; N]> {
if self.len() == N {
let ptr = self.as_mut_ptr().cast_array();
// SAFETY: The underlying array of a slice can be reinterpreted as an actual array `[T; N]` if `N` is not greater than the slice's length.
let me = unsafe { &mut *ptr };
Some(me)
} else {
None
}
}
core::slice::<impl [T]>::as_mut_ptr pub const fn as_mut_ptr(&mut self) -> *mut T {
self as *mut [T] as *mut T
}
core::slice::<impl [T]>::as_ptr pub const fn as_ptr(&self) -> *const T {
self as *const [T] as *const T
}
core::slice::<impl [T]>::chunks pub const fn chunks(&self, chunk_size: usize) -> Chunks<'_, T> {
assert!(chunk_size != 0, "chunk size must be non-zero");
Chunks::new(self, chunk_size)
}
core::slice::<impl [T]>::chunks_exact pub const fn chunks_exact(&self, chunk_size: usize) -> ChunksExact<'_, T> {
assert!(chunk_size != 0, "chunk size must be non-zero");
ChunksExact::new(self, chunk_size)
}
core::slice::<impl [T]>::chunks_exact_mut pub const fn chunks_exact_mut(&mut self, chunk_size: usize) -> ChunksExactMut<'_, T> {
assert!(chunk_size != 0, "chunk size must be non-zero");
ChunksExactMut::new(self, chunk_size)
}
core::slice::<impl [T]>::chunks_mut pub const fn chunks_mut(&mut self, chunk_size: usize) -> ChunksMut<'_, T> {
assert!(chunk_size != 0, "chunk size must be non-zero");
ChunksMut::new(self, chunk_size)
}
core::slice::<impl [T]>::copy_from_slice pub const fn copy_from_slice(&mut self, src: &[T])
where
T: Copy,
{
// SAFETY: `T` implements `Copy`.
unsafe { copy_from_slice_impl(self, src) }
}
core::slice::<impl [T]>::fill pub fn fill(&mut self, value: T)
where
T: Clone,
{
specialize::SpecFill::spec_fill(self, value);
}
core::slice::<impl [T]>::first pub const fn first(&self) -> Option<&T> {
if let [first, ..] = self { Some(first) } else { None }
}
core::slice::<impl [T]>::first_chunk pub const fn first_chunk<const N: usize>(&self) -> Option<&[T; N]> {
if self.len() < N {
None
} else {
// SAFETY: We explicitly check for the correct number of elements,
// and do not let the reference outlive the slice.
Some(unsafe { &*(self.as_ptr().cast_array()) })
}
}
core::slice::<impl [T]>::first_chunk_mut pub const fn first_chunk_mut<const N: usize>(&mut self) -> Option<&mut [T; N]> {
if self.len() < N {
None
} else {
// SAFETY: We explicitly check for the correct number of elements,
// do not let the reference outlive the slice,
// and require exclusive access to the entire slice to mutate the chunk.
Some(unsafe { &mut *(self.as_mut_ptr().cast_array()) })
}
}
core::slice::<impl [T]>::first_mut pub const fn first_mut(&mut self) -> Option<&mut T> {
if let [first, ..] = self { Some(first) } else { None }
}
core::slice::<impl [T]>::get pub const fn get<I>(&self, index: I) -> Option<&I::Output>
where
I: [const] SliceIndex<Self>,
{
index.get(self)
}
core::slice::<impl [T]>::get_mut pub const fn get_mut<I>(&mut self, index: I) -> Option<&mut I::Output>
where
I: [const] SliceIndex<Self>,
{
index.get_mut(self)
}
core::slice::<impl [T]>::get_unchecked pub const unsafe fn get_unchecked<I>(&self, index: I) -> &I::Output
where
I: [const] SliceIndex<Self>,
{
// SAFETY: the caller must uphold most of the safety requirements for `get_unchecked`;
// the slice is dereferenceable because `self` is a safe reference.
// The returned pointer is safe because impls of `SliceIndex` have to guarantee that it is.
unsafe { &*index.get_unchecked(self) }
}
core::slice::<impl [T]>::get_unchecked_mut pub const unsafe fn get_unchecked_mut<I>(&mut self, index: I) -> &mut I::Output
where
I: [const] SliceIndex<Self>,
{
// SAFETY: the caller must uphold the safety requirements for `get_unchecked_mut`;
// the slice is dereferenceable because `self` is a safe reference.
// The returned pointer is safe because impls of `SliceIndex` have to guarantee that it is.
unsafe { &mut *index.get_unchecked_mut(self) }
}
core::slice::<impl [T]>::is_empty pub const fn is_empty(&self) -> bool {
self.len() == 0
}
core::slice::<impl [T]>::iter pub const fn iter(&self) -> Iter<'_, T> {
Iter::new(self)
}
core::slice::<impl [T]>::iter_mut pub const fn iter_mut(&mut self) -> IterMut<'_, T> {
IterMut::new(self)
}
core::slice::<impl [T]>::last pub const fn last(&self) -> Option<&T> {
if let [.., last] = self { Some(last) } else { None }
}
core::slice::<impl [T]>::last_mut pub const fn last_mut(&mut self) -> Option<&mut T> {
if let [.., last] = self { Some(last) } else { None }
}
core::slice::<impl [T]>::rotate_left pub const fn rotate_left(&mut self, mid: usize) {
assert!(mid <= self.len());
let k = self.len() - mid;
let p = self.as_mut_ptr();
// SAFETY: The range `[p.add(mid) - mid, p.add(mid) + k)` is trivially
// valid for reading and writing, as required by `ptr_rotate`.
unsafe {
rotate::ptr_rotate(mid, p.add(mid), k);
}
}
core::slice::<impl [T]>::rotate_right pub const fn rotate_right(&mut self, k: usize) {
assert!(k <= self.len());
let mid = self.len() - k;
let p = self.as_mut_ptr();
// SAFETY: The range `[p.add(mid) - mid, p.add(mid) + k)` is trivially
// valid for reading and writing, as required by `ptr_rotate`.
unsafe {
rotate::ptr_rotate(mid, p.add(mid), k);
}
}
core::slice::<impl [T]>::split_at_checked pub const fn split_at_checked(&self, mid: usize) -> Option<(&[T], &[T])> {
if mid <= self.len() {
// SAFETY: `[ptr; mid]` and `[mid; len]` are inside `self`, which
// fulfills the requirements of `split_at_unchecked`.
Some(unsafe { self.split_at_unchecked(mid) })
} else {
None
}
}
core::slice::<impl [T]>::split_at_mut_checked pub const fn split_at_mut_checked(&mut self, mid: usize) -> Option<(&mut [T], &mut [T])> {
if mid <= self.len() {
// SAFETY: `[ptr; mid]` and `[mid; len]` are inside `self`, which
// fulfills the requirements of `split_at_unchecked`.
Some(unsafe { self.split_at_mut_unchecked(mid) })
} else {
None
}
}
core::slice::<impl [T]>::split_at_mut_unchecked pub const unsafe fn split_at_mut_unchecked(&mut self, mid: usize) -> (&mut [T], &mut [T]) {
let len = self.len();
let ptr = self.as_mut_ptr();
assert_unsafe_precondition!(
check_library_ub,
"slice::split_at_mut_unchecked requires the index to be within the slice",
(mid: usize = mid, len: usize = len) => mid <= len,
);
// SAFETY: Caller has to check that `0 <= mid <= self.len()`.
//
// `[ptr; mid]` and `[mid; len]` are not overlapping, so returning a mutable reference
// is fine.
unsafe {
(
from_raw_parts_mut(ptr, mid),
from_raw_parts_mut(ptr.add(mid), unchecked_sub(len, mid)),
)
}
}
core::slice::<impl [T]>::split_at_unchecked pub const unsafe fn split_at_unchecked(&self, mid: usize) -> (&[T], &[T]) {
// FIXME(const-hack): the const function `from_raw_parts` is used to make this
// function const; previously the implementation used
// `(self.get_unchecked(..mid), self.get_unchecked(mid..))`
let len = self.len();
let ptr = self.as_ptr();
assert_unsafe_precondition!(
check_library_ub,
"slice::split_at_unchecked requires the index to be within the slice",
(mid: usize = mid, len: usize = len) => mid <= len,
);
// SAFETY: Caller has to check that `0 <= mid <= self.len()`
unsafe { (from_raw_parts(ptr, mid), from_raw_parts(ptr.add(mid), unchecked_sub(len, mid))) }
}
core::slice::<impl [T]>::split_first pub const fn split_first(&self) -> Option<(&T, &[T])> {
if let [first, tail @ ..] = self { Some((first, tail)) } else { None }
}
core::slice::<impl [T]>::split_first_mut pub const fn split_first_mut(&mut self) -> Option<(&mut T, &mut [T])> {
if let [first, tail @ ..] = self { Some((first, tail)) } else { None }
}
core::slice::<impl [T]>::split_last pub const fn split_last(&self) -> Option<(&T, &[T])> {
if let [init @ .., last] = self { Some((last, init)) } else { None }
}
core::slice::<impl [T]>::split_last_mut pub const fn split_last_mut(&mut self) -> Option<(&mut T, &mut [T])> {
if let [init @ .., last] = self { Some((last, init)) } else { None }
}
core::slice::<impl [T]>::starts_with pub fn starts_with(&self, needle: &[T]) -> bool
where
T: PartialEq,
{
let n = needle.len();
self.len() >= n && needle == &self[..n]
}
core::slice::<impl [T]>::swap pub const fn swap(&mut self, a: usize, b: usize) {
// FIXME: use swap_unchecked here (https://github.com/rust-lang/rust/pull/88540#issuecomment-944344343)
// Can't take two mutable loans from one vector, so instead use raw pointers.
let pa = &raw mut self[a];
let pb = &raw mut self[b];
// SAFETY: `pa` and `pb` have been created from safe mutable references and refer
// to elements in the slice and therefore are guaranteed to be valid and aligned.
// Note that accessing the elements behind `a` and `b` is checked and will
// panic when out of bounds.
unsafe {
ptr::swap(pa, pb);
}
}
core::slice::<impl [T]>::windows pub const fn windows(&self, size: usize) -> Windows<'_, T> {
let size = NonZero::new(size).expect("window size must be non-zero");
Windows::new(self, size)
}
core::slice::cmp::<impl core::cmp::PartialEq<[U]> for [T]>::eq fn eq(&self, other: &[U]) -> bool {
SlicePartialEq::equal(self, other)
}
core::slice::cmp::<impl core::cmp::PartialEq<[U]> for [T]>::ne fn ne(&self, other: &[U]) -> bool {
SlicePartialEq::not_equal(self, other)
}
core::slice::cmp::SlicePartialEq::not_equal fn not_equal(&self, other: &[B]) -> bool {
!self.equal(other)
}
core::slice::copy_from_slice_implconst unsafe fn copy_from_slice_impl<T: Clone>(dest: &mut [T], src: &[T]) {
// The panic code path was put into a cold function to not bloat the
// call site.
#[cfg_attr(not(panic = "immediate-abort"), inline(never), cold)]
#[cfg_attr(panic = "immediate-abort", inline)]
#[track_caller]
const fn len_mismatch_fail(dst_len: usize, src_len: usize) -> ! {
const_panic!(
"copy_from_slice: source slice length does not match destination slice length",
"copy_from_slice: source slice length ({src_len}) does not match destination slice length ({dst_len})",
src_len: usize,
dst_len: usize,
)
}
if dest.len() != src.len() {
len_mismatch_fail(dest.len(), src.len());
}
// SAFETY: `self` is valid for `self.len()` elements by definition, and `src` was
// checked to have the same length. The slices cannot overlap because
// mutable references are exclusive.
unsafe {
ptr::copy_nonoverlapping(src.as_ptr(), dest.as_mut_ptr(), dest.len());
}
}
core::slice::copy_from_slice_impl::len_mismatch_fail const fn len_mismatch_fail(dst_len: usize, src_len: usize) -> ! {
const_panic!(
"copy_from_slice: source slice length does not match destination slice length",
"copy_from_slice: source slice length ({src_len}) does not match destination slice length ({dst_len})",
src_len: usize,
dst_len: usize,
)
}
core::slice::copy_from_slice_impl::len_mismatch_fail::do_panic const fn do_panic($($arg: $ty),*) -> ! {
$crate::intrinsics::const_eval_select!(
@capture { $($arg: $ty = $arg),* } -> !:
#[noinline]
if const #[track_caller] #[inline] { // Inline this, to prevent codegen
$crate::panic!($const_msg)
} else #[track_caller] { // Do not inline this, it makes perf worse
$crate::panic!($runtime_msg)
}
)
}
core::slice::copy_from_slice_impl::len_mismatch_fail::do_panic::runtime fn runtime$(<$($binders)*>)?($($arg: $ty),*) $( -> $ret )? {
$runtime
}
core::slice::index::<impl core::ops::index::Index<I> for [T]>::index fn index(&self, index: I) -> &I::Output {
index.index(self)
}
core::slice::index::<impl core::ops::index::IndexMut<I> for [T]>::index_mut fn index_mut(&mut self, index: I) -> &mut I::Output {
index.index_mut(self)
}
core::slice::index::get_offset_len_mut_noubcheckconst unsafe fn get_offset_len_mut_noubcheck<T>(
ptr: *mut [T],
offset: usize,
len: usize,
) -> *mut [T] {
let ptr = ptr as *mut T;
// SAFETY: The caller already checked these preconditions
let ptr = unsafe { crate::intrinsics::offset(ptr, offset) };
crate::intrinsics::aggregate_raw_ptr(ptr, len)
}
core::slice::index::get_offset_len_noubcheckconst unsafe fn get_offset_len_noubcheck<T>(
ptr: *const [T],
offset: usize,
len: usize,
) -> *const [T] {
let ptr = ptr as *const T;
// SAFETY: The caller already checked these preconditions
let ptr = unsafe { crate::intrinsics::offset(ptr, offset) };
crate::intrinsics::aggregate_raw_ptr(ptr, len)
}
core::slice::index::into_rangepub(crate) const fn into_range(
len: usize,
(start, end): (ops::Bound<usize>, ops::Bound<usize>),
) -> Option<ops::Range<usize>> {
use ops::Bound;
let start = match start {
Bound::Included(start) => start,
Bound::Excluded(start) => start.checked_add(1)?,
Bound::Unbounded => 0,
};
let end = match end {
Bound::Included(end) => end.checked_add(1)?,
Bound::Excluded(end) => end,
Bound::Unbounded => len,
};
// Don't bother with checking `start < end` and `end <= len`
// since these checks are handled by `Range` impls
Some(start..end)
}
core::slice::index::into_range_uncheckedpub(crate) const fn into_range_unchecked(
len: usize,
(start, end): (ops::Bound<usize>, ops::Bound<usize>),
) -> ops::Range<usize> {
use ops::Bound;
let start = match start {
Bound::Included(i) => i,
Bound::Excluded(i) => i + 1,
Bound::Unbounded => 0,
};
let end = match end {
Bound::Included(i) => i + 1,
Bound::Excluded(i) => i,
Bound::Unbounded => len,
};
start..end
}
core::slice::index::slice_index_failconst fn slice_index_fail(start: usize, end: usize, len: usize) -> ! {
if start > len {
const_panic!(
"slice start index is out of range for slice",
"range start index {start} out of range for slice of length {len}",
start: usize,
len: usize,
)
}
if end > len {
const_panic!(
"slice end index is out of range for slice",
"range end index {end} out of range for slice of length {len}",
end: usize,
len: usize,
)
}
if start > end {
const_panic!(
"slice index start is larger than end",
"slice index starts at {start} but ends at {end}",
start: usize,
end: usize,
)
}
// Only reachable if the range was a `RangeInclusive` or a
// `RangeToInclusive`, with `end == len`.
const_panic!(
"slice end index is out of range for slice",
"range end index {end} out of range for slice of length {len}",
end: usize,
len: usize,
)
}
core::slice::index::slice_index_fail::do_panic const fn do_panic($($arg: $ty),*) -> ! {
$crate::intrinsics::const_eval_select!(
@capture { $($arg: $ty = $arg),* } -> !:
#[noinline]
if const #[track_caller] #[inline] { // Inline this, to prevent codegen
$crate::panic!($const_msg)
} else #[track_caller] { // Do not inline this, it makes perf worse
$crate::panic!($runtime_msg)
}
)
}
core::slice::index::slice_index_fail::do_panic const fn do_panic($($arg: $ty),*) -> ! {
$crate::intrinsics::const_eval_select!(
@capture { $($arg: $ty = $arg),* } -> !:
#[noinline]
if const #[track_caller] #[inline] { // Inline this, to prevent codegen
$crate::panic!($const_msg)
} else #[track_caller] { // Do not inline this, it makes perf worse
$crate::panic!($runtime_msg)
}
)
}
core::slice::index::slice_index_fail::do_panic const fn do_panic($($arg: $ty),*) -> ! {
$crate::intrinsics::const_eval_select!(
@capture { $($arg: $ty = $arg),* } -> !:
#[noinline]
if const #[track_caller] #[inline] { // Inline this, to prevent codegen
$crate::panic!($const_msg)
} else #[track_caller] { // Do not inline this, it makes perf worse
$crate::panic!($runtime_msg)
}
)
}
core::slice::index::slice_index_fail::do_panic const fn do_panic($($arg: $ty),*) -> ! {
$crate::intrinsics::const_eval_select!(
@capture { $($arg: $ty = $arg),* } -> !:
#[noinline]
if const #[track_caller] #[inline] { // Inline this, to prevent codegen
$crate::panic!($const_msg)
} else #[track_caller] { // Do not inline this, it makes perf worse
$crate::panic!($runtime_msg)
}
)
}
core::slice::index::slice_index_fail::do_panic::runtime fn runtime$(<$($binders)*>)?($($arg: $ty),*) $( -> $ret )? {
$runtime
}
core::slice::index::slice_index_fail::do_panic::runtime fn runtime$(<$($binders)*>)?($($arg: $ty),*) $( -> $ret )? {
$runtime
}
core::slice::index::slice_index_fail::do_panic::runtime fn runtime$(<$($binders)*>)?($($arg: $ty),*) $( -> $ret )? {
$runtime
}
core::slice::index::slice_index_fail::do_panic::runtime fn runtime$(<$($binders)*>)?($($arg: $ty),*) $( -> $ret )? {
$runtime
}
core::slice::iter::<impl core::iter::traits::collect::IntoIterator for &'a [T]>::into_iter fn into_iter(self) -> Iter<'a, T> {
self.iter()
}
core::slice::iter::<impl core::iter::traits::collect::IntoIterator for &'a mut [T]>::into_iter fn into_iter(self) -> IterMut<'a, T> {
self.iter_mut()
}
core::slice::iter::Chunks::<'a, T>::new pub(super) const fn new(slice: &'a [T], size: usize) -> Self {
Self { v: slice, chunk_size: size }
}
core::slice::iter::ChunksExact::<'a, T>::new pub(super) const fn new(slice: &'a [T], chunk_size: usize) -> Self {
let rem = slice.len() % chunk_size;
let fst_len = slice.len() - rem;
// SAFETY: 0 <= fst_len <= slice.len() by construction above
let (fst, snd) = unsafe { slice.split_at_unchecked(fst_len) };
Self { v: fst, rem: snd, chunk_size }
}
core::slice::iter::ChunksExact::<'a, T>::remainder pub fn remainder(&self) -> &'a [T] {
self.rem
}
core::slice::iter::ChunksExactMut::<'a, T>::into_remainder pub fn into_remainder(self) -> &'a mut [T] {
self.rem
}
core::slice::iter::ChunksExactMut::<'a, T>::new pub(super) const fn new(slice: &'a mut [T], chunk_size: usize) -> Self {
let rem = slice.len() % chunk_size;
let fst_len = slice.len() - rem;
// SAFETY: 0 <= fst_len <= slice.len() by construction above
let (fst, snd) = unsafe { slice.split_at_mut_unchecked(fst_len) };
Self { v: fst, rem: snd, chunk_size, _marker: PhantomData }
}
core::slice::iter::ChunksMut::<'a, T>::new pub(super) const fn new(slice: &'a mut [T], size: usize) -> Self {
Self { v: slice, chunk_size: size, _marker: PhantomData }
}
core::slice::iter::Iter::<'a, T>::as_slice pub fn as_slice(&self) -> &'a [T] {
self.make_slice()
}
core::slice::iter::Iter::<'a, T>::make_slice fn make_slice(&self) -> &'a [T] {
// SAFETY: the iterator was created from a slice with pointer
// `self.ptr` and length `len!(self)`. This guarantees that all
// the prerequisites for `from_raw_parts` are fulfilled.
unsafe { from_raw_parts(self.ptr.as_ptr(), len!(self)) }
}
core::slice::iter::Iter::<'a, T>::new pub(super) const fn new(slice: &'a [T]) -> Self {
let len = slice.len();
let ptr: NonNull<T> = NonNull::from_ref(slice).cast();
// SAFETY: Similar to `IterMut::new`.
unsafe {
let end_or_len =
if T::IS_ZST { without_provenance(len) } else { ptr.as_ptr().add(len) };
Self { ptr, end_or_len, _marker: PhantomData }
}
}
core::slice::iter::Iter::<'a, T>::post_inc_start unsafe fn post_inc_start(&mut self, offset: usize) -> NonNull<T> {
let old = self.ptr;
// SAFETY: the caller guarantees that `offset` doesn't exceed `self.len()`,
// so this new pointer is inside `self` and thus guaranteed to be non-null.
unsafe {
if_zst!(mut self,
// Using the intrinsic directly avoids emitting a UbCheck
len => *len = crate::intrinsics::unchecked_sub(*len, offset),
_end => self.ptr = self.ptr.add(offset),
);
}
old
}
core::slice::iter::IterMut::<'a, T>::as_mut_slice pub fn as_mut_slice(&mut self) -> &mut [T] {
// SAFETY: the iterator was created from a mutable slice with pointer
// `self.ptr` and length `len!(self)`. This guarantees that all the prerequisites
// for `from_raw_parts_mut` are fulfilled.
unsafe { from_raw_parts_mut(self.ptr.as_ptr(), len!(self)) }
}
core::slice::iter::IterMut::<'a, T>::make_slice fn make_slice(&self) -> &'a [T] {
// SAFETY: the iterator was created from a slice with pointer
// `self.ptr` and length `len!(self)`. This guarantees that all
// the prerequisites for `from_raw_parts` are fulfilled.
unsafe { from_raw_parts(self.ptr.as_ptr(), len!(self)) }
}
core::slice::iter::IterMut::<'a, T>::new pub(super) const fn new(slice: &'a mut [T]) -> Self {
let len = slice.len();
let ptr: NonNull<T> = NonNull::from_mut(slice).cast();
// SAFETY: There are several things here:
//
// `ptr` has been obtained by `slice.as_ptr()` where `slice` is a valid
// reference thus it is non-NUL and safe to use and pass to
// `NonNull::new_unchecked` .
//
// Adding `slice.len()` to the starting pointer gives a pointer
// at the end of `slice`. `end` will never be dereferenced, only checked
// for direct pointer equality with `ptr` to check if the iterator is
// done.
//
// In the case of a ZST, the end pointer is just the length. It's never
// used as a pointer at all, and thus it's fine to have no provenance.
//
// See the `next_unchecked!` and `is_empty!` macros as well as the
// `post_inc_start` method for more information.
unsafe {
let end_or_len =
if T::IS_ZST { without_provenance_mut(len) } else { ptr.as_ptr().add(len) };
Self { ptr, end_or_len, _marker: PhantomData }
}
}
core::slice::iter::IterMut::<'a, T>::post_inc_start unsafe fn post_inc_start(&mut self, offset: usize) -> NonNull<T> {
let old = self.ptr;
// SAFETY: the caller guarantees that `offset` doesn't exceed `self.len()`,
// so this new pointer is inside `self` and thus guaranteed to be non-null.
unsafe {
if_zst!(mut self,
// Using the intrinsic directly avoids emitting a UbCheck
len => *len = crate::intrinsics::unchecked_sub(*len, offset),
_end => self.ptr = self.ptr.add(offset),
);
}
old
}
core::slice::iter::Windows::<'a, T>::new pub(super) const fn new(slice: &'a [T], size: NonZero<usize>) -> Self {
Self { v: slice, size }
}
core::slice::raw::from_mutpub const fn from_mut<T>(s: &mut T) -> &mut [T] {
array::from_mut(s)
}
core::slice::raw::from_raw_partspub const unsafe fn from_raw_parts<'a, T>(data: *const T, len: usize) -> &'a [T] {
// SAFETY: the caller must uphold the safety contract for `from_raw_parts`.
unsafe {
ub_checks::assert_unsafe_precondition!(
check_language_ub,
"slice::from_raw_parts requires the pointer to be aligned and non-null, and the total size of the slice not to exceed `isize::MAX`",
(
data: *mut () = data as *mut (),
size: usize = size_of::<T>(),
align: usize = align_of::<T>(),
len: usize = len,
) =>
ub_checks::maybe_is_aligned_and_not_null(data, align, false)
&& ub_checks::is_valid_allocation_size(size, len)
);
&*ptr::slice_from_raw_parts(data, len)
}
}
core::slice::raw::from_raw_parts_mutpub const unsafe fn from_raw_parts_mut<'a, T>(data: *mut T, len: usize) -> &'a mut [T] {
// SAFETY: the caller must uphold the safety contract for `from_raw_parts_mut`.
unsafe {
ub_checks::assert_unsafe_precondition!(
check_language_ub,
"slice::from_raw_parts_mut requires the pointer to be aligned and non-null, and the total size of the slice not to exceed `isize::MAX`",
(
data: *mut () = data as *mut (),
size: usize = size_of::<T>(),
align: usize = align_of::<T>(),
len: usize = len,
) =>
ub_checks::maybe_is_aligned_and_not_null(data, align, false)
&& ub_checks::is_valid_allocation_size(size, len)
);
&mut *ptr::slice_from_raw_parts_mut(data, len)
}
}
core::slice::raw::from_refpub const fn from_ref<T>(s: &T) -> &[T] {
array::from_ref(s)
}
core::slice::rotate::const_minconst fn const_min(left: usize, right: usize) -> usize {
if right < left { right } else { left }
}
core::slice::rotate::ptr_rotate_gcdconst unsafe fn ptr_rotate_gcd<T>(left: usize, mid: *mut T, right: usize) {
// Algorithm 2
// Microbenchmarks indicate that the average performance for random shifts is better all
// the way until about `left + right == 32`, but the worst case performance breaks even
// around 16. 24 was chosen as middle ground. If the size of `T` is larger than 4
// `usize`s, this algorithm also outperforms other algorithms.
// SAFETY: callers must ensure `mid - left` is valid for reading and writing.
let x = unsafe { mid.sub(left) };
// beginning of first round
// SAFETY: see previous comment.
let mut tmp: T = unsafe { x.read() };
let mut i = right;
// `gcd` can be found before hand by calculating `gcd(left + right, right)`,
// but it is faster to do one loop which calculates the gcd as a side effect, then
// doing the rest of the chunk
let mut gcd = right;
// benchmarks reveal that it is faster to swap temporaries all the way through instead
// of reading one temporary once, copying backwards, and then writing that temporary at
// the very end. This is possibly due to the fact that swapping or replacing temporaries
// uses only one memory address in the loop instead of needing to manage two.
loop {
// [long-safety-expl]
// SAFETY: callers must ensure `[left, left+mid+right)` are all valid for reading and
// writing.
//
// - `i` start with `right` so `mid-left <= x+i = x+right = mid-left+right < mid+right`
// - `i <= left+right-1` is always true
// - if `i < left`, `right` is added so `i < left+right` and on the next
// iteration `left` is removed from `i` so it doesn't go further
// - if `i >= left`, `left` is removed immediately and so it doesn't go further.
// - overflows cannot happen for `i` since the function's safety contract ask for
// `mid+right-1 = x+left+right` to be valid for writing
// - underflows cannot happen because `i` must be bigger or equal to `left` for
// a subtraction of `left` to happen.
//
// So `x+i` is valid for reading and writing if the caller respected the contract
tmp = unsafe { x.add(i).replace(tmp) };
// instead of incrementing `i` and then checking if it is outside the bounds, we
// check if `i` will go outside the bounds on the next increment. This prevents
// any wrapping of pointers or `usize`.
if i >= left {
i -= left;
if i == 0 {
// end of first round
// SAFETY: tmp has been read from a valid source and x is valid for writing
// according to the caller.
unsafe { x.write(tmp) };
break;
}
// this conditional must be here if `left + right >= 15`
if i < gcd {
gcd = i;
}
} else {
i += right;
}
}
// finish the chunk with more rounds
// FIXME(const-hack): Use `for start in 1..gcd` when available in const
let mut start = 1;
while start < gcd {
// SAFETY: `gcd` is at most equal to `right` so all values in `1..gcd` are valid for
// reading and writing as per the function's safety contract, see [long-safety-expl]
// above
tmp = unsafe { x.add(start).read() };
// [safety-expl-addition]
//
// Here `start < gcd` so `start < right` so `i < right+right`: `right` being the
// greatest common divisor of `(left+right, right)` means that `left = right` so
// `i < left+right` so `x+i = mid-left+i` is always valid for reading and writing
// according to the function's safety contract.
i = start + right;
loop {
// SAFETY: see [long-safety-expl] and [safety-expl-addition]
tmp = unsafe { x.add(i).replace(tmp) };
if i >= left {
i -= left;
if i == start {
// SAFETY: see [long-safety-expl] and [safety-expl-addition]
unsafe { x.add(start).write(tmp) };
break;
}
} else {
i += right;
}
}
start += 1;
}
}
core::slice::rotate::ptr_rotate_memmoveconst unsafe fn ptr_rotate_memmove<T>(left: usize, mid: *mut T, right: usize) {
// The `[T; 0]` here is to ensure this is appropriately aligned for T
let mut rawarray = MaybeUninit::<(BufType, [T; 0])>::uninit();
let buf = rawarray.as_mut_ptr() as *mut T;
// SAFETY: `mid-left <= mid-left+right < mid+right`
let dim = unsafe { mid.sub(left).add(right) };
if left <= right {
// SAFETY:
//
// 1) The `if` condition about the sizes ensures `[mid-left; left]` will fit in
// `buf` without overflow and `buf` was created just above and so cannot be
// overlapped with any value of `[mid-left; left]`
// 2) [mid-left, mid+right) are all valid for reading and writing and we don't care
// about overlaps here.
// 3) The `if` condition about `left <= right` ensures writing `left` elements to
// `dim = mid-left+right` is valid because:
// - `buf` is valid and `left` elements were written in it in 1)
// - `dim+left = mid-left+right+left = mid+right` and we write `[dim, dim+left)`
unsafe {
// 1)
ptr::copy_nonoverlapping(mid.sub(left), buf, left);
// 2)
ptr::copy(mid, mid.sub(left), right);
// 3)
ptr::copy_nonoverlapping(buf, dim, left);
}
} else {
// SAFETY: same reasoning as above but with `left` and `right` reversed
unsafe {
ptr::copy_nonoverlapping(mid, buf, right);
ptr::copy(mid.sub(left), dim, left);
ptr::copy_nonoverlapping(buf, mid.sub(left), right);
}
}
}
core::slice::rotate::ptr_rotate_swapconst unsafe fn ptr_rotate_swap<T>(mut left: usize, mut mid: *mut T, mut right: usize) {
loop {
if left >= right {
// Algorithm 3
// There is an alternate way of swapping that involves finding where the last swap
// of this algorithm would be, and swapping using that last chunk instead of swapping
// adjacent chunks like this algorithm is doing, but this way is still faster.
loop {
// SAFETY:
// `left >= right` so `[mid-right, mid+right)` is valid for reading and writing
// Subtracting `right` from `mid` each turn is counterbalanced by the addition and
// check after it.
unsafe {
ptr::swap_nonoverlapping(mid.sub(right), mid, right);
mid = mid.sub(right);
}
left -= right;
if left < right {
break;
}
}
} else {
// Algorithm 3, `left < right`
loop {
// SAFETY: `[mid-left, mid+left)` is valid for reading and writing because
// `left < right` so `mid+left < mid+right`.
// Adding `left` to `mid` each turn is counterbalanced by the subtraction and check
// after it.
unsafe {
ptr::swap_nonoverlapping(mid.sub(left), mid, left);
mid = mid.add(left);
}
right -= left;
if right < left {
break;
}
}
}
if (right == 0) || (left == 0) {
return;
}
}
}
core::str::<impl core::convert::AsRef<[u8]> for str>::as_ref fn as_ref(&self) -> &[u8] {
self.as_bytes()
}
core::str::<impl core::default::Default for &str>::default fn default() -> Self {
""
}
core::str::<impl str>::as_bytes pub const fn as_bytes(&self) -> &[u8] {
// SAFETY: const sound because we transmute two types with the same layout
unsafe { mem::transmute(self) }
}
core::str::<impl str>::as_bytes_mut pub const unsafe fn as_bytes_mut(&mut self) -> &mut [u8] {
// SAFETY: the cast from `&str` to `&[u8]` is safe since `str`
// has the same layout as `&[u8]` (only std can make this guarantee).
// The pointer dereference is safe since it comes from a mutable reference which
// is guaranteed to be valid for writes.
unsafe { &mut *(self as *mut str as *mut [u8]) }
}
core::str::<impl str>::as_mut_ptr pub const fn as_mut_ptr(&mut self) -> *mut u8 {
self as *mut str as *mut u8
}
core::str::<impl str>::as_ptr pub const fn as_ptr(&self) -> *const u8 {
self as *const str as *const u8
}
core::str::<impl str>::as_str pub const fn as_str(&self) -> &str {
self
}
core::str::<impl str>::chars pub fn chars(&self) -> Chars<'_> {
Chars { iter: self.as_bytes().iter() }
}
core::str::<impl str>::is_char_boundary pub const fn is_char_boundary(&self, index: usize) -> bool {
// 0 is always ok.
// Test for 0 explicitly so that it can optimize out the check
// easily and skip reading string data for that case.
// Note that optimizing `self.get(..index)` relies on this.
if index == 0 {
return true;
}
if index >= self.len() {
// For `true` we have two options:
//
// - index == self.len()
// Empty strings are valid, so return true
// - index > self.len()
// In this case return false
//
// The check is placed exactly here, because it improves generated
// code on higher opt-levels. See PR #84751 for more details.
index == self.len()
} else {
self.as_bytes()[index].is_utf8_char_boundary()
}
}
core::str::<impl str>::is_empty pub const fn is_empty(&self) -> bool {
self.len() == 0
}
core::str::<impl str>::len pub const fn len(&self) -> usize {
self.as_bytes().len()
}
core::str::<impl str>::parse pub fn parse<F: FromStr>(&self) -> Result<F, F::Err> {
FromStr::from_str(self)
}
core::str::<impl str>::starts_with pub fn starts_with<P: Pattern>(&self, pat: P) -> bool {
pat.is_prefix_of(self)
}
core::str::converts::from_utf8pub const fn from_utf8(v: &[u8]) -> Result<&str, Utf8Error> {
// FIXME(const-hack): This should use `?` again, once it's `const`
match run_utf8_validation(v) {
Ok(_) => {
// SAFETY: validation succeeded.
Ok(unsafe { from_utf8_unchecked(v) })
}
Err(err) => Err(err),
}
}
core::str::converts::from_utf8_uncheckedpub const unsafe fn from_utf8_unchecked(v: &[u8]) -> &str {
// SAFETY: the caller must guarantee that the bytes `v` are valid UTF-8.
// Also relies on `&str` and `&[u8]` having the same layout.
unsafe { mem::transmute(v) }
}
core::str::converts::from_utf8_unchecked_mutpub const unsafe fn from_utf8_unchecked_mut(v: &mut [u8]) -> &mut str {
// SAFETY: the caller must guarantee that the bytes `v`
// are valid UTF-8, thus the cast to `*mut str` is safe.
// Also, the pointer dereference is safe because that pointer
// comes from a reference which is guaranteed to be valid for writes.
unsafe { &mut *(v as *mut [u8] as *mut str) }
}
core::str::error::Utf8Error::valid_up_to pub const fn valid_up_to(&self) -> usize {
self.valid_up_to
}
core::str::pattern::TwoWaySearcher::byteset_contains fn byteset_contains(&self, byte: u8) -> bool {
(self.byteset >> ((byte & 0x3f) as usize)) & 1 != 0
}
core::str::pattern::TwoWaySearcher::byteset_create fn byteset_create(bytes: &[u8]) -> u64 {
bytes.iter().fold(0, |a, &b| (1 << (b & 0x3f)) | a)
}
core::str::pattern::TwoWaySearcher::maximal_suffix fn maximal_suffix(arr: &[u8], order_greater: bool) -> (usize, usize) {
let mut left = 0; // Corresponds to i in the paper
let mut right = 1; // Corresponds to j in the paper
let mut offset = 0; // Corresponds to k in the paper, but starting at 0
// to match 0-based indexing.
let mut period = 1; // Corresponds to p in the paper
while let Some(&a) = arr.get(right + offset) {
// `left` will be inbounds when `right` is.
let b = arr[left + offset];
if (a < b && !order_greater) || (a > b && order_greater) {
// Suffix is smaller, period is entire prefix so far.
right += offset + 1;
offset = 0;
period = right - left;
} else if a == b {
// Advance through repetition of the current period.
if offset + 1 == period {
right += offset + 1;
offset = 0;
} else {
offset += 1;
}
} else {
// Suffix is larger, start over from current location.
left = right;
right += 1;
offset = 0;
period = 1;
}
}
(left, period)
}
core::str::pattern::TwoWaySearcher::new fn new(needle: &[u8], end: usize) -> TwoWaySearcher {
let (crit_pos_false, period_false) = TwoWaySearcher::maximal_suffix(needle, false);
let (crit_pos_true, period_true) = TwoWaySearcher::maximal_suffix(needle, true);
let (crit_pos, period) = if crit_pos_false > crit_pos_true {
(crit_pos_false, period_false)
} else {
(crit_pos_true, period_true)
};
// A particularly readable explanation of what's going on here can be found
// in Crochemore and Rytter's book "Text Algorithms", ch 13. Specifically
// see the code for "Algorithm CP" on p. 323.
//
// What's going on is we have some critical factorization (u, v) of the
// needle, and we want to determine whether u is a suffix of
// &v[..period]. If it is, we use "Algorithm CP1". Otherwise we use
// "Algorithm CP2", which is optimized for when the period of the needle
// is large.
if needle[..crit_pos] == needle[period..period + crit_pos] {
// short period case -- the period is exact
// compute a separate critical factorization for the reversed needle
// x = u' v' where |v'| < period(x).
//
// This is sped up by the period being known already.
// Note that a case like x = "acba" may be factored exactly forwards
// (crit_pos = 1, period = 3) while being factored with approximate
// period in reverse (crit_pos = 2, period = 2). We use the given
// reverse factorization but keep the exact period.
let crit_pos_back = needle.len()
- cmp::max(
TwoWaySearcher::reverse_maximal_suffix(needle, period, false),
TwoWaySearcher::reverse_maximal_suffix(needle, period, true),
);
TwoWaySearcher {
crit_pos,
crit_pos_back,
period,
byteset: Self::byteset_create(&needle[..period]),
position: 0,
end,
memory: 0,
memory_back: needle.len(),
}
} else {
// long period case -- we have an approximation to the actual period,
// and don't use memorization.
//
// Approximate the period by lower bound max(|u|, |v|) + 1.
// The critical factorization is efficient to use for both forward and
// reverse search.
TwoWaySearcher {
crit_pos,
crit_pos_back: crit_pos,
period: cmp::max(crit_pos, needle.len() - crit_pos) + 1,
byteset: Self::byteset_create(needle),
position: 0,
end,
memory: usize::MAX, // Dummy value to signify that the period is long
memory_back: usize::MAX,
}
}
}
core::str::traits::<impl core::ops::index::Index<I> for str>::index fn index(&self, index: I) -> &I::Output {
index.index(self)
}
core::str::traits::<impl core::slice::index::SliceIndex<str> for core::ops::range::Range<usize>>::get_unchecked unsafe fn get_unchecked(self, slice: *const str) -> *const Self::Output {
let slice = slice as *const [u8];
assert_unsafe_precondition!(
// We'd like to check that the bounds are on char boundaries,
// but there's not really a way to do so without reading
// behind the pointer, which has aliasing implications.
// It's also not possible to move this check up to
// `str::get_unchecked` without adding a special function
// to `SliceIndex` just for this.
check_library_ub,
"str::get_unchecked requires that the range is within the string slice",
(
start: usize = self.start,
end: usize = self.end,
len: usize = slice.len()
) => end >= start && end <= len,
);
// SAFETY: the caller guarantees that `self` is in bounds of `slice`
// which satisfies all the conditions for `add`.
unsafe {
let new_len = unchecked_sub(self.end, self.start);
ptr::slice_from_raw_parts(slice.as_ptr().add(self.start), new_len) as *const str
}
}
core::str::traits::<impl core::slice::index::SliceIndex<str> for core::ops::range::Range<usize>>::get_unchecked_mut unsafe fn get_unchecked_mut(self, slice: *mut str) -> *mut Self::Output {
let slice = slice as *mut [u8];
assert_unsafe_precondition!(
check_library_ub,
"str::get_unchecked_mut requires that the range is within the string slice",
(
start: usize = self.start,
end: usize = self.end,
len: usize = slice.len()
) => end >= start && end <= len,
);
// SAFETY: see comments for `get_unchecked`.
unsafe {
let new_len = unchecked_sub(self.end, self.start);
ptr::slice_from_raw_parts_mut(slice.as_mut_ptr().add(self.start), new_len) as *mut str
}
}
core::str::traits::<impl core::slice::index::SliceIndex<str> for core::ops::range::RangeFrom<usize>>::get_unchecked unsafe fn get_unchecked(self, slice: *const str) -> *const Self::Output {
let len = (slice as *const [u8]).len();
// SAFETY: the caller has to uphold the safety contract for `get_unchecked`.
unsafe { (self.start..len).get_unchecked(slice) }
}
core::str::traits::<impl core::slice::index::SliceIndex<str> for core::ops::range::RangeTo<usize>>::get_unchecked unsafe fn get_unchecked(self, slice: *const str) -> *const Self::Output {
// SAFETY: the caller has to uphold the safety contract for `get_unchecked`.
unsafe { (0..self.end).get_unchecked(slice) }
}
core::str::traits::<impl core::slice::index::SliceIndex<str> for core::ops::range::RangeTo<usize>>::get_unchecked_mut unsafe fn get_unchecked_mut(self, slice: *mut str) -> *mut Self::Output {
// SAFETY: the caller has to uphold the safety contract for `get_unchecked_mut`.
unsafe { (0..self.end).get_unchecked_mut(slice) }
}
core::str::validations::contains_nonasciiconst fn contains_nonascii(x: usize) -> bool {
(x & NONASCII_MASK) != 0
}
core::str::validations::next_code_pointpub unsafe fn next_code_point<'a, I: Iterator<Item = &'a u8>>(bytes: &mut I) -> Option<u32> {
// Decode UTF-8
let x = *bytes.next()?;
if x < 128 {
return Some(x as u32);
}
// Multibyte case follows
// Decode from a byte combination out of: [[[x y] z] w]
// NOTE: Performance is sensitive to the exact formulation here
let init = utf8_first_byte(x, 2);
// SAFETY: `bytes` produces an UTF-8-like string,
// so the iterator must produce a value here.
let y = unsafe { *bytes.next().unwrap_unchecked() };
let mut ch = utf8_acc_cont_byte(init, y);
if x >= 0xE0 {
// [[x y z] w] case
// 5th bit in 0xE0 .. 0xEF is always clear, so `init` is still valid
// SAFETY: `bytes` produces an UTF-8-like string,
// so the iterator must produce a value here.
let z = unsafe { *bytes.next().unwrap_unchecked() };
let y_z = utf8_acc_cont_byte((y & CONT_MASK) as u32, z);
ch = init << 12 | y_z;
if x >= 0xF0 {
// [x y z w] case
// use only the lower 3 bits of `init`
// SAFETY: `bytes` produces an UTF-8-like string,
// so the iterator must produce a value here.
let w = unsafe { *bytes.next().unwrap_unchecked() };
ch = (init & 7) << 18 | utf8_acc_cont_byte(y_z, w);
}
}
Some(ch)
}
core::str::validations::run_utf8_validation::runtime fn runtime$(<$($binders)*>)?($($arg: $ty),*) $( -> $ret )? {
$runtime
}
core::str::validations::utf8_acc_cont_byteconst fn utf8_acc_cont_byte(ch: u32, byte: u8) -> u32 {
(ch << 6) | (byte & CONT_MASK) as u32
}
core::str::validations::utf8_char_widthpub const fn utf8_char_width(b: u8) -> usize {
UTF8_CHAR_WIDTH[b as usize] as usize
}
core::str::validations::utf8_first_byteconst fn utf8_first_byte(byte: u8, width: u32) -> u32 {
(byte & (0x7F >> width)) as u32
}
core::str::validations::utf8_is_cont_bytepub(super) const fn utf8_is_cont_byte(byte: u8) -> bool {
(byte as i8) < -64
}
core::sync::atomic::AtomicBool::fetch_and pub fn fetch_and(&self, val: bool, order: Ordering) -> bool {
// SAFETY: data races are prevented by atomic intrinsics.
unsafe { atomic_and(self.v.get(), val as u8, order) != 0 }
}
core::sync::atomic::AtomicBool::fetch_or pub fn fetch_or(&self, val: bool, order: Ordering) -> bool {
// SAFETY: data races are prevented by atomic intrinsics.
unsafe { atomic_or(self.v.get(), val as u8, order) != 0 }
}
core::sync::atomic::AtomicBool::load pub fn load(&self, order: Ordering) -> bool {
// SAFETY: any data races are prevented by atomic intrinsics and the raw
// pointer passed in is valid because we got it from a reference.
unsafe { atomic_load(self.v.get(), order) != 0 }
}
core::sync::atomic::AtomicBool::new pub const fn new(v: bool) -> AtomicBool {
AtomicBool { v: UnsafeCell::new(v as u8) }
}
core::sync::atomic::AtomicBool::store pub fn store(&self, val: bool, order: Ordering) {
// SAFETY: any data races are prevented by atomic intrinsics and the raw
// pointer passed in is valid because we got it from a reference.
unsafe {
atomic_store(self.v.get(), val as u8, order);
}
}
core::sync::atomic::AtomicU32::as_ptr pub const fn as_ptr(&self) -> *mut $int_type {
self.v.get()
}
core::sync::atomic::AtomicU32::compare_exchange pub fn compare_exchange(&self,
current: $int_type,
new: $int_type,
success: Ordering,
failure: Ordering) -> Result<$int_type, $int_type> {
// SAFETY: data races are prevented by atomic intrinsics.
unsafe { atomic_compare_exchange(self.v.get(), current, new, success, failure) }
}
core::sync::atomic::AtomicU32::compare_exchange_weak pub fn compare_exchange_weak(&self,
current: $int_type,
new: $int_type,
success: Ordering,
failure: Ordering) -> Result<$int_type, $int_type> {
// SAFETY: data races are prevented by atomic intrinsics.
unsafe {
atomic_compare_exchange_weak(self.v.get(), current, new, success, failure)
}
}
core::sync::atomic::AtomicU32::fetch_add pub fn fetch_add(&self, val: $int_type, order: Ordering) -> $int_type {
// SAFETY: data races are prevented by atomic intrinsics.
unsafe { atomic_add(self.v.get(), val, order) }
}
core::sync::atomic::AtomicU32::fetch_and pub fn fetch_and(&self, val: $int_type, order: Ordering) -> $int_type {
// SAFETY: data races are prevented by atomic intrinsics.
unsafe { atomic_and(self.v.get(), val, order) }
}
core::sync::atomic::AtomicU32::fetch_max pub fn fetch_max(&self, val: $int_type, order: Ordering) -> $int_type {
// SAFETY: data races are prevented by atomic intrinsics.
unsafe { $max_fn(self.v.get(), val, order) }
}
core::sync::atomic::AtomicU32::fetch_min pub fn fetch_min(&self, val: $int_type, order: Ordering) -> $int_type {
// SAFETY: data races are prevented by atomic intrinsics.
unsafe { $min_fn(self.v.get(), val, order) }
}
core::sync::atomic::AtomicU32::fetch_nand pub fn fetch_nand(&self, val: $int_type, order: Ordering) -> $int_type {
// SAFETY: data races are prevented by atomic intrinsics.
unsafe { atomic_nand(self.v.get(), val, order) }
}
core::sync::atomic::AtomicU32::fetch_or pub fn fetch_or(&self, val: $int_type, order: Ordering) -> $int_type {
// SAFETY: data races are prevented by atomic intrinsics.
unsafe { atomic_or(self.v.get(), val, order) }
}
core::sync::atomic::AtomicU32::fetch_sub pub fn fetch_sub(&self, val: $int_type, order: Ordering) -> $int_type {
// SAFETY: data races are prevented by atomic intrinsics.
unsafe { atomic_sub(self.v.get(), val, order) }
}
core::sync::atomic::AtomicU32::fetch_update pub fn fetch_update<F>(&self,
set_order: Ordering,
fetch_order: Ordering,
mut f: F) -> Result<$int_type, $int_type>
where F: FnMut($int_type) -> Option<$int_type> {
let mut prev = self.load(fetch_order);
while let Some(next) = f(prev) {
match self.compare_exchange_weak(prev, next, set_order, fetch_order) {
x @ Ok(_) => return x,
Err(next_prev) => prev = next_prev
}
}
Err(prev)
}
core::sync::atomic::AtomicU32::fetch_xor pub fn fetch_xor(&self, val: $int_type, order: Ordering) -> $int_type {
// SAFETY: data races are prevented by atomic intrinsics.
unsafe { atomic_xor(self.v.get(), val, order) }
}
core::sync::atomic::AtomicU32::from_mut pub fn from_mut(v: &mut $int_type) -> &mut Self {
let [] = [(); align_of::<Self>() - align_of::<$int_type>()];
// SAFETY:
// - the mutable reference guarantees unique ownership.
// - the alignment of `$int_type` and `Self` is the
// same, as promised by $cfg_align and verified above.
unsafe { &mut *(v as *mut $int_type as *mut Self) }
}
core::sync::atomic::AtomicU32::from_mut_slice pub fn from_mut_slice(v: &mut [$int_type]) -> &mut [Self] {
let [] = [(); align_of::<Self>() - align_of::<$int_type>()];
// SAFETY:
// - the mutable reference guarantees unique ownership.
// - the alignment of `$int_type` and `Self` is the
// same, as promised by $cfg_align and verified above.
unsafe { &mut *(v as *mut [$int_type] as *mut [Self]) }
}
core::sync::atomic::AtomicU32::from_ptr pub const unsafe fn from_ptr<'a>(ptr: *mut $int_type) -> &'a $atomic_type {
// SAFETY: guaranteed by the caller
unsafe { &*ptr.cast() }
}
core::sync::atomic::AtomicU32::get_mut pub fn get_mut(&mut self) -> &mut $int_type {
self.v.get_mut()
}
core::sync::atomic::AtomicU32::get_mut_slice pub fn get_mut_slice(this: &mut [Self]) -> &mut [$int_type] {
// SAFETY: the mutable reference guarantees unique ownership.
unsafe { &mut *(this as *mut [Self] as *mut [$int_type]) }
}
core::sync::atomic::AtomicU32::into_inner pub const fn into_inner(self) -> $int_type {
self.v.into_inner()
}
core::sync::atomic::AtomicU32::load pub fn load(&self, order: Ordering) -> $int_type {
// SAFETY: data races are prevented by atomic intrinsics.
unsafe { atomic_load(self.v.get(), order) }
}
core::sync::atomic::AtomicU32::new pub const fn new(v: $int_type) -> Self {
Self {v: UnsafeCell::new(v)}
}
core::sync::atomic::AtomicU32::store pub fn store(&self, val: $int_type, order: Ordering) {
// SAFETY: data races are prevented by atomic intrinsics.
unsafe { atomic_store(self.v.get(), val, order); }
}
core::sync::atomic::AtomicU32::swap pub fn swap(&self, val: $int_type, order: Ordering) -> $int_type {
// SAFETY: data races are prevented by atomic intrinsics.
unsafe { atomic_swap(self.v.get(), val, order) }
}
core::sync::atomic::AtomicU32::try_update pub fn try_update(
&self,
set_order: Ordering,
fetch_order: Ordering,
f: impl FnMut($int_type) -> Option<$int_type>,
) -> Result<$int_type, $int_type> {
// FIXME(atomic_try_update): this is currently an unstable alias to `fetch_update`;
// when stabilizing, turn `fetch_update` into a deprecated alias to `try_update`.
self.fetch_update(set_order, fetch_order, f)
}
core::sync::atomic::AtomicU32::update pub fn update(
&self,
set_order: Ordering,
fetch_order: Ordering,
mut f: impl FnMut($int_type) -> $int_type,
) -> $int_type {
let mut prev = self.load(fetch_order);
loop {
match self.compare_exchange_weak(prev, f(prev), set_order, fetch_order) {
Ok(x) => break x,
Err(next_prev) => prev = next_prev,
}
}
}
core::sync::atomic::AtomicU64::as_ptr pub const fn as_ptr(&self) -> *mut $int_type {
self.v.get()
}
core::sync::atomic::AtomicU64::compare_exchange pub fn compare_exchange(&self,
current: $int_type,
new: $int_type,
success: Ordering,
failure: Ordering) -> Result<$int_type, $int_type> {
// SAFETY: data races are prevented by atomic intrinsics.
unsafe { atomic_compare_exchange(self.v.get(), current, new, success, failure) }
}
core::sync::atomic::AtomicU64::compare_exchange_weak pub fn compare_exchange_weak(&self,
current: $int_type,
new: $int_type,
success: Ordering,
failure: Ordering) -> Result<$int_type, $int_type> {
// SAFETY: data races are prevented by atomic intrinsics.
unsafe {
atomic_compare_exchange_weak(self.v.get(), current, new, success, failure)
}
}
core::sync::atomic::AtomicU64::fetch_add pub fn fetch_add(&self, val: $int_type, order: Ordering) -> $int_type {
// SAFETY: data races are prevented by atomic intrinsics.
unsafe { atomic_add(self.v.get(), val, order) }
}
core::sync::atomic::AtomicU64::fetch_and pub fn fetch_and(&self, val: $int_type, order: Ordering) -> $int_type {
// SAFETY: data races are prevented by atomic intrinsics.
unsafe { atomic_and(self.v.get(), val, order) }
}
core::sync::atomic::AtomicU64::fetch_max pub fn fetch_max(&self, val: $int_type, order: Ordering) -> $int_type {
// SAFETY: data races are prevented by atomic intrinsics.
unsafe { $max_fn(self.v.get(), val, order) }
}
core::sync::atomic::AtomicU64::fetch_min pub fn fetch_min(&self, val: $int_type, order: Ordering) -> $int_type {
// SAFETY: data races are prevented by atomic intrinsics.
unsafe { $min_fn(self.v.get(), val, order) }
}
core::sync::atomic::AtomicU64::fetch_nand pub fn fetch_nand(&self, val: $int_type, order: Ordering) -> $int_type {
// SAFETY: data races are prevented by atomic intrinsics.
unsafe { atomic_nand(self.v.get(), val, order) }
}
core::sync::atomic::AtomicU64::fetch_or pub fn fetch_or(&self, val: $int_type, order: Ordering) -> $int_type {
// SAFETY: data races are prevented by atomic intrinsics.
unsafe { atomic_or(self.v.get(), val, order) }
}
core::sync::atomic::AtomicU64::fetch_sub pub fn fetch_sub(&self, val: $int_type, order: Ordering) -> $int_type {
// SAFETY: data races are prevented by atomic intrinsics.
unsafe { atomic_sub(self.v.get(), val, order) }
}
core::sync::atomic::AtomicU64::fetch_update pub fn fetch_update<F>(&self,
set_order: Ordering,
fetch_order: Ordering,
mut f: F) -> Result<$int_type, $int_type>
where F: FnMut($int_type) -> Option<$int_type> {
let mut prev = self.load(fetch_order);
while let Some(next) = f(prev) {
match self.compare_exchange_weak(prev, next, set_order, fetch_order) {
x @ Ok(_) => return x,
Err(next_prev) => prev = next_prev
}
}
Err(prev)
}
core::sync::atomic::AtomicU64::fetch_xor pub fn fetch_xor(&self, val: $int_type, order: Ordering) -> $int_type {
// SAFETY: data races are prevented by atomic intrinsics.
unsafe { atomic_xor(self.v.get(), val, order) }
}
core::sync::atomic::AtomicU64::from_mut pub fn from_mut(v: &mut $int_type) -> &mut Self {
let [] = [(); align_of::<Self>() - align_of::<$int_type>()];
// SAFETY:
// - the mutable reference guarantees unique ownership.
// - the alignment of `$int_type` and `Self` is the
// same, as promised by $cfg_align and verified above.
unsafe { &mut *(v as *mut $int_type as *mut Self) }
}
core::sync::atomic::AtomicU64::from_mut_slice pub fn from_mut_slice(v: &mut [$int_type]) -> &mut [Self] {
let [] = [(); align_of::<Self>() - align_of::<$int_type>()];
// SAFETY:
// - the mutable reference guarantees unique ownership.
// - the alignment of `$int_type` and `Self` is the
// same, as promised by $cfg_align and verified above.
unsafe { &mut *(v as *mut [$int_type] as *mut [Self]) }
}
core::sync::atomic::AtomicU64::from_ptr pub const unsafe fn from_ptr<'a>(ptr: *mut $int_type) -> &'a $atomic_type {
// SAFETY: guaranteed by the caller
unsafe { &*ptr.cast() }
}
core::sync::atomic::AtomicU64::get_mut pub fn get_mut(&mut self) -> &mut $int_type {
self.v.get_mut()
}
core::sync::atomic::AtomicU64::get_mut_slice pub fn get_mut_slice(this: &mut [Self]) -> &mut [$int_type] {
// SAFETY: the mutable reference guarantees unique ownership.
unsafe { &mut *(this as *mut [Self] as *mut [$int_type]) }
}
core::sync::atomic::AtomicU64::into_inner pub const fn into_inner(self) -> $int_type {
self.v.into_inner()
}
core::sync::atomic::AtomicU64::load pub fn load(&self, order: Ordering) -> $int_type {
// SAFETY: data races are prevented by atomic intrinsics.
unsafe { atomic_load(self.v.get(), order) }
}
core::sync::atomic::AtomicU64::new pub const fn new(v: $int_type) -> Self {
Self {v: UnsafeCell::new(v)}
}
core::sync::atomic::AtomicU64::store pub fn store(&self, val: $int_type, order: Ordering) {
// SAFETY: data races are prevented by atomic intrinsics.
unsafe { atomic_store(self.v.get(), val, order); }
}
core::sync::atomic::AtomicU64::swap pub fn swap(&self, val: $int_type, order: Ordering) -> $int_type {
// SAFETY: data races are prevented by atomic intrinsics.
unsafe { atomic_swap(self.v.get(), val, order) }
}
core::sync::atomic::AtomicU64::try_update pub fn try_update(
&self,
set_order: Ordering,
fetch_order: Ordering,
f: impl FnMut($int_type) -> Option<$int_type>,
) -> Result<$int_type, $int_type> {
// FIXME(atomic_try_update): this is currently an unstable alias to `fetch_update`;
// when stabilizing, turn `fetch_update` into a deprecated alias to `try_update`.
self.fetch_update(set_order, fetch_order, f)
}
core::sync::atomic::AtomicU64::update pub fn update(
&self,
set_order: Ordering,
fetch_order: Ordering,
mut f: impl FnMut($int_type) -> $int_type,
) -> $int_type {
let mut prev = self.load(fetch_order);
loop {
match self.compare_exchange_weak(prev, f(prev), set_order, fetch_order) {
Ok(x) => break x,
Err(next_prev) => prev = next_prev,
}
}
}
core::sync::atomic::AtomicU8::as_ptr pub const fn as_ptr(&self) -> *mut $int_type {
self.v.get()
}
core::sync::atomic::AtomicU8::compare_exchange pub fn compare_exchange(&self,
current: $int_type,
new: $int_type,
success: Ordering,
failure: Ordering) -> Result<$int_type, $int_type> {
// SAFETY: data races are prevented by atomic intrinsics.
unsafe { atomic_compare_exchange(self.v.get(), current, new, success, failure) }
}
core::sync::atomic::AtomicU8::compare_exchange_weak pub fn compare_exchange_weak(&self,
current: $int_type,
new: $int_type,
success: Ordering,
failure: Ordering) -> Result<$int_type, $int_type> {
// SAFETY: data races are prevented by atomic intrinsics.
unsafe {
atomic_compare_exchange_weak(self.v.get(), current, new, success, failure)
}
}
core::sync::atomic::AtomicU8::fetch_add pub fn fetch_add(&self, val: $int_type, order: Ordering) -> $int_type {
// SAFETY: data races are prevented by atomic intrinsics.
unsafe { atomic_add(self.v.get(), val, order) }
}
core::sync::atomic::AtomicU8::fetch_and pub fn fetch_and(&self, val: $int_type, order: Ordering) -> $int_type {
// SAFETY: data races are prevented by atomic intrinsics.
unsafe { atomic_and(self.v.get(), val, order) }
}
core::sync::atomic::AtomicU8::fetch_max pub fn fetch_max(&self, val: $int_type, order: Ordering) -> $int_type {
// SAFETY: data races are prevented by atomic intrinsics.
unsafe { $max_fn(self.v.get(), val, order) }
}
core::sync::atomic::AtomicU8::fetch_min pub fn fetch_min(&self, val: $int_type, order: Ordering) -> $int_type {
// SAFETY: data races are prevented by atomic intrinsics.
unsafe { $min_fn(self.v.get(), val, order) }
}
core::sync::atomic::AtomicU8::fetch_nand pub fn fetch_nand(&self, val: $int_type, order: Ordering) -> $int_type {
// SAFETY: data races are prevented by atomic intrinsics.
unsafe { atomic_nand(self.v.get(), val, order) }
}
core::sync::atomic::AtomicU8::fetch_or pub fn fetch_or(&self, val: $int_type, order: Ordering) -> $int_type {
// SAFETY: data races are prevented by atomic intrinsics.
unsafe { atomic_or(self.v.get(), val, order) }
}
core::sync::atomic::AtomicU8::fetch_sub pub fn fetch_sub(&self, val: $int_type, order: Ordering) -> $int_type {
// SAFETY: data races are prevented by atomic intrinsics.
unsafe { atomic_sub(self.v.get(), val, order) }
}
core::sync::atomic::AtomicU8::fetch_update pub fn fetch_update<F>(&self,
set_order: Ordering,
fetch_order: Ordering,
mut f: F) -> Result<$int_type, $int_type>
where F: FnMut($int_type) -> Option<$int_type> {
let mut prev = self.load(fetch_order);
while let Some(next) = f(prev) {
match self.compare_exchange_weak(prev, next, set_order, fetch_order) {
x @ Ok(_) => return x,
Err(next_prev) => prev = next_prev
}
}
Err(prev)
}
core::sync::atomic::AtomicU8::fetch_xor pub fn fetch_xor(&self, val: $int_type, order: Ordering) -> $int_type {
// SAFETY: data races are prevented by atomic intrinsics.
unsafe { atomic_xor(self.v.get(), val, order) }
}
core::sync::atomic::AtomicU8::from_mut pub fn from_mut(v: &mut $int_type) -> &mut Self {
let [] = [(); align_of::<Self>() - align_of::<$int_type>()];
// SAFETY:
// - the mutable reference guarantees unique ownership.
// - the alignment of `$int_type` and `Self` is the
// same, as promised by $cfg_align and verified above.
unsafe { &mut *(v as *mut $int_type as *mut Self) }
}
core::sync::atomic::AtomicU8::from_mut_slice pub fn from_mut_slice(v: &mut [$int_type]) -> &mut [Self] {
let [] = [(); align_of::<Self>() - align_of::<$int_type>()];
// SAFETY:
// - the mutable reference guarantees unique ownership.
// - the alignment of `$int_type` and `Self` is the
// same, as promised by $cfg_align and verified above.
unsafe { &mut *(v as *mut [$int_type] as *mut [Self]) }
}
core::sync::atomic::AtomicU8::from_ptr pub const unsafe fn from_ptr<'a>(ptr: *mut $int_type) -> &'a $atomic_type {
// SAFETY: guaranteed by the caller
unsafe { &*ptr.cast() }
}
core::sync::atomic::AtomicU8::get_mut pub fn get_mut(&mut self) -> &mut $int_type {
self.v.get_mut()
}
core::sync::atomic::AtomicU8::get_mut_slice pub fn get_mut_slice(this: &mut [Self]) -> &mut [$int_type] {
// SAFETY: the mutable reference guarantees unique ownership.
unsafe { &mut *(this as *mut [Self] as *mut [$int_type]) }
}
core::sync::atomic::AtomicU8::into_inner pub const fn into_inner(self) -> $int_type {
self.v.into_inner()
}
core::sync::atomic::AtomicU8::load pub fn load(&self, order: Ordering) -> $int_type {
// SAFETY: data races are prevented by atomic intrinsics.
unsafe { atomic_load(self.v.get(), order) }
}
core::sync::atomic::AtomicU8::new pub const fn new(v: $int_type) -> Self {
Self {v: UnsafeCell::new(v)}
}
core::sync::atomic::AtomicU8::store pub fn store(&self, val: $int_type, order: Ordering) {
// SAFETY: data races are prevented by atomic intrinsics.
unsafe { atomic_store(self.v.get(), val, order); }
}
core::sync::atomic::AtomicU8::swap pub fn swap(&self, val: $int_type, order: Ordering) -> $int_type {
// SAFETY: data races are prevented by atomic intrinsics.
unsafe { atomic_swap(self.v.get(), val, order) }
}
core::sync::atomic::AtomicU8::try_update pub fn try_update(
&self,
set_order: Ordering,
fetch_order: Ordering,
f: impl FnMut($int_type) -> Option<$int_type>,
) -> Result<$int_type, $int_type> {
// FIXME(atomic_try_update): this is currently an unstable alias to `fetch_update`;
// when stabilizing, turn `fetch_update` into a deprecated alias to `try_update`.
self.fetch_update(set_order, fetch_order, f)
}
core::sync::atomic::AtomicU8::update pub fn update(
&self,
set_order: Ordering,
fetch_order: Ordering,
mut f: impl FnMut($int_type) -> $int_type,
) -> $int_type {
let mut prev = self.load(fetch_order);
loop {
match self.compare_exchange_weak(prev, f(prev), set_order, fetch_order) {
Ok(x) => break x,
Err(next_prev) => prev = next_prev,
}
}
}
core::sync::atomic::AtomicUsize::as_ptr pub const fn as_ptr(&self) -> *mut $int_type {
self.v.get()
}
core::sync::atomic::AtomicUsize::compare_exchange pub fn compare_exchange(&self,
current: $int_type,
new: $int_type,
success: Ordering,
failure: Ordering) -> Result<$int_type, $int_type> {
// SAFETY: data races are prevented by atomic intrinsics.
unsafe { atomic_compare_exchange(self.v.get(), current, new, success, failure) }
}
core::sync::atomic::AtomicUsize::compare_exchange_weak pub fn compare_exchange_weak(&self,
current: $int_type,
new: $int_type,
success: Ordering,
failure: Ordering) -> Result<$int_type, $int_type> {
// SAFETY: data races are prevented by atomic intrinsics.
unsafe {
atomic_compare_exchange_weak(self.v.get(), current, new, success, failure)
}
}
core::sync::atomic::AtomicUsize::fetch_add pub fn fetch_add(&self, val: $int_type, order: Ordering) -> $int_type {
// SAFETY: data races are prevented by atomic intrinsics.
unsafe { atomic_add(self.v.get(), val, order) }
}
core::sync::atomic::AtomicUsize::fetch_and pub fn fetch_and(&self, val: $int_type, order: Ordering) -> $int_type {
// SAFETY: data races are prevented by atomic intrinsics.
unsafe { atomic_and(self.v.get(), val, order) }
}
core::sync::atomic::AtomicUsize::fetch_max pub fn fetch_max(&self, val: $int_type, order: Ordering) -> $int_type {
// SAFETY: data races are prevented by atomic intrinsics.
unsafe { $max_fn(self.v.get(), val, order) }
}
core::sync::atomic::AtomicUsize::fetch_min pub fn fetch_min(&self, val: $int_type, order: Ordering) -> $int_type {
// SAFETY: data races are prevented by atomic intrinsics.
unsafe { $min_fn(self.v.get(), val, order) }
}
core::sync::atomic::AtomicUsize::fetch_nand pub fn fetch_nand(&self, val: $int_type, order: Ordering) -> $int_type {
// SAFETY: data races are prevented by atomic intrinsics.
unsafe { atomic_nand(self.v.get(), val, order) }
}
core::sync::atomic::AtomicUsize::fetch_or pub fn fetch_or(&self, val: $int_type, order: Ordering) -> $int_type {
// SAFETY: data races are prevented by atomic intrinsics.
unsafe { atomic_or(self.v.get(), val, order) }
}
core::sync::atomic::AtomicUsize::fetch_sub pub fn fetch_sub(&self, val: $int_type, order: Ordering) -> $int_type {
// SAFETY: data races are prevented by atomic intrinsics.
unsafe { atomic_sub(self.v.get(), val, order) }
}
core::sync::atomic::AtomicUsize::fetch_update pub fn fetch_update<F>(&self,
set_order: Ordering,
fetch_order: Ordering,
mut f: F) -> Result<$int_type, $int_type>
where F: FnMut($int_type) -> Option<$int_type> {
let mut prev = self.load(fetch_order);
while let Some(next) = f(prev) {
match self.compare_exchange_weak(prev, next, set_order, fetch_order) {
x @ Ok(_) => return x,
Err(next_prev) => prev = next_prev
}
}
Err(prev)
}
core::sync::atomic::AtomicUsize::fetch_xor pub fn fetch_xor(&self, val: $int_type, order: Ordering) -> $int_type {
// SAFETY: data races are prevented by atomic intrinsics.
unsafe { atomic_xor(self.v.get(), val, order) }
}
core::sync::atomic::AtomicUsize::from_mut pub fn from_mut(v: &mut $int_type) -> &mut Self {
let [] = [(); align_of::<Self>() - align_of::<$int_type>()];
// SAFETY:
// - the mutable reference guarantees unique ownership.
// - the alignment of `$int_type` and `Self` is the
// same, as promised by $cfg_align and verified above.
unsafe { &mut *(v as *mut $int_type as *mut Self) }
}
core::sync::atomic::AtomicUsize::from_mut_slice pub fn from_mut_slice(v: &mut [$int_type]) -> &mut [Self] {
let [] = [(); align_of::<Self>() - align_of::<$int_type>()];
// SAFETY:
// - the mutable reference guarantees unique ownership.
// - the alignment of `$int_type` and `Self` is the
// same, as promised by $cfg_align and verified above.
unsafe { &mut *(v as *mut [$int_type] as *mut [Self]) }
}
core::sync::atomic::AtomicUsize::from_ptr pub const unsafe fn from_ptr<'a>(ptr: *mut $int_type) -> &'a $atomic_type {
// SAFETY: guaranteed by the caller
unsafe { &*ptr.cast() }
}
core::sync::atomic::AtomicUsize::get_mut pub fn get_mut(&mut self) -> &mut $int_type {
self.v.get_mut()
}
core::sync::atomic::AtomicUsize::get_mut_slice pub fn get_mut_slice(this: &mut [Self]) -> &mut [$int_type] {
// SAFETY: the mutable reference guarantees unique ownership.
unsafe { &mut *(this as *mut [Self] as *mut [$int_type]) }
}
core::sync::atomic::AtomicUsize::into_inner pub const fn into_inner(self) -> $int_type {
self.v.into_inner()
}
core::sync::atomic::AtomicUsize::load pub fn load(&self, order: Ordering) -> $int_type {
// SAFETY: data races are prevented by atomic intrinsics.
unsafe { atomic_load(self.v.get(), order) }
}
core::sync::atomic::AtomicUsize::new pub const fn new(v: $int_type) -> Self {
Self {v: UnsafeCell::new(v)}
}
core::sync::atomic::AtomicUsize::store pub fn store(&self, val: $int_type, order: Ordering) {
// SAFETY: data races are prevented by atomic intrinsics.
unsafe { atomic_store(self.v.get(), val, order); }
}
core::sync::atomic::AtomicUsize::swap pub fn swap(&self, val: $int_type, order: Ordering) -> $int_type {
// SAFETY: data races are prevented by atomic intrinsics.
unsafe { atomic_swap(self.v.get(), val, order) }
}
core::sync::atomic::AtomicUsize::try_update pub fn try_update(
&self,
set_order: Ordering,
fetch_order: Ordering,
f: impl FnMut($int_type) -> Option<$int_type>,
) -> Result<$int_type, $int_type> {
// FIXME(atomic_try_update): this is currently an unstable alias to `fetch_update`;
// when stabilizing, turn `fetch_update` into a deprecated alias to `try_update`.
self.fetch_update(set_order, fetch_order, f)
}
core::sync::atomic::AtomicUsize::update pub fn update(
&self,
set_order: Ordering,
fetch_order: Ordering,
mut f: impl FnMut($int_type) -> $int_type,
) -> $int_type {
let mut prev = self.load(fetch_order);
loop {
match self.compare_exchange_weak(prev, f(prev), set_order, fetch_order) {
Ok(x) => break x,
Err(next_prev) => prev = next_prev,
}
}
}
core::sync::atomic::atomic_addunsafe fn atomic_add<T: Copy, U: Copy>(dst: *mut T, val: U, order: Ordering) -> T {
// SAFETY: the caller must uphold the safety contract for `atomic_add`.
unsafe {
match order {
Relaxed => intrinsics::atomic_xadd::<T, U, { AO::Relaxed }>(dst, val),
Acquire => intrinsics::atomic_xadd::<T, U, { AO::Acquire }>(dst, val),
Release => intrinsics::atomic_xadd::<T, U, { AO::Release }>(dst, val),
AcqRel => intrinsics::atomic_xadd::<T, U, { AO::AcqRel }>(dst, val),
SeqCst => intrinsics::atomic_xadd::<T, U, { AO::SeqCst }>(dst, val),
}
}
}
core::sync::atomic::atomic_andunsafe fn atomic_and<T: Copy, U: Copy>(dst: *mut T, val: U, order: Ordering) -> T {
// SAFETY: the caller must uphold the safety contract for `atomic_and`
unsafe {
match order {
Relaxed => intrinsics::atomic_and::<T, U, { AO::Relaxed }>(dst, val),
Acquire => intrinsics::atomic_and::<T, U, { AO::Acquire }>(dst, val),
Release => intrinsics::atomic_and::<T, U, { AO::Release }>(dst, val),
AcqRel => intrinsics::atomic_and::<T, U, { AO::AcqRel }>(dst, val),
SeqCst => intrinsics::atomic_and::<T, U, { AO::SeqCst }>(dst, val),
}
}
}
core::sync::atomic::atomic_compare_exchangepub unsafe fn atomic_compare_exchange<T: Copy>(
dst: *mut T,
old: T,
new: T,
success: Ordering,
failure: Ordering,
) -> Result<T, T> {
// SAFETY: the caller must uphold the safety contract for `atomic_compare_exchange`.
let (val, ok) = unsafe {
match (success, failure) {
(Relaxed, Relaxed) => {
intrinsics::atomic_cxchg::<T, { AO::Relaxed }, { AO::Relaxed }>(dst, old, new)
}
(Relaxed, Acquire) => {
intrinsics::atomic_cxchg::<T, { AO::Relaxed }, { AO::Acquire }>(dst, old, new)
}
(Relaxed, SeqCst) => {
intrinsics::atomic_cxchg::<T, { AO::Relaxed }, { AO::SeqCst }>(dst, old, new)
}
(Acquire, Relaxed) => {
intrinsics::atomic_cxchg::<T, { AO::Acquire }, { AO::Relaxed }>(dst, old, new)
}
(Acquire, Acquire) => {
intrinsics::atomic_cxchg::<T, { AO::Acquire }, { AO::Acquire }>(dst, old, new)
}
(Acquire, SeqCst) => {
intrinsics::atomic_cxchg::<T, { AO::Acquire }, { AO::SeqCst }>(dst, old, new)
}
(Release, Relaxed) => {
intrinsics::atomic_cxchg::<T, { AO::Release }, { AO::Relaxed }>(dst, old, new)
}
(Release, Acquire) => {
intrinsics::atomic_cxchg::<T, { AO::Release }, { AO::Acquire }>(dst, old, new)
}
(Release, SeqCst) => {
intrinsics::atomic_cxchg::<T, { AO::Release }, { AO::SeqCst }>(dst, old, new)
}
(AcqRel, Relaxed) => {
intrinsics::atomic_cxchg::<T, { AO::AcqRel }, { AO::Relaxed }>(dst, old, new)
}
(AcqRel, Acquire) => {
intrinsics::atomic_cxchg::<T, { AO::AcqRel }, { AO::Acquire }>(dst, old, new)
}
(AcqRel, SeqCst) => {
intrinsics::atomic_cxchg::<T, { AO::AcqRel }, { AO::SeqCst }>(dst, old, new)
}
(SeqCst, Relaxed) => {
intrinsics::atomic_cxchg::<T, { AO::SeqCst }, { AO::Relaxed }>(dst, old, new)
}
(SeqCst, Acquire) => {
intrinsics::atomic_cxchg::<T, { AO::SeqCst }, { AO::Acquire }>(dst, old, new)
}
(SeqCst, SeqCst) => {
intrinsics::atomic_cxchg::<T, { AO::SeqCst }, { AO::SeqCst }>(dst, old, new)
}
(_, AcqRel) => panic!("there is no such thing as an acquire-release failure ordering"),
(_, Release) => panic!("there is no such thing as a release failure ordering"),
}
};
if ok { Ok(val) } else { Err(val) }
}
core::sync::atomic::atomic_compare_exchange_weakunsafe fn atomic_compare_exchange_weak<T: Copy>(
dst: *mut T,
old: T,
new: T,
success: Ordering,
failure: Ordering,
) -> Result<T, T> {
// SAFETY: the caller must uphold the safety contract for `atomic_compare_exchange_weak`.
let (val, ok) = unsafe {
match (success, failure) {
(Relaxed, Relaxed) => {
intrinsics::atomic_cxchgweak::<T, { AO::Relaxed }, { AO::Relaxed }>(dst, old, new)
}
(Relaxed, Acquire) => {
intrinsics::atomic_cxchgweak::<T, { AO::Relaxed }, { AO::Acquire }>(dst, old, new)
}
(Relaxed, SeqCst) => {
intrinsics::atomic_cxchgweak::<T, { AO::Relaxed }, { AO::SeqCst }>(dst, old, new)
}
(Acquire, Relaxed) => {
intrinsics::atomic_cxchgweak::<T, { AO::Acquire }, { AO::Relaxed }>(dst, old, new)
}
(Acquire, Acquire) => {
intrinsics::atomic_cxchgweak::<T, { AO::Acquire }, { AO::Acquire }>(dst, old, new)
}
(Acquire, SeqCst) => {
intrinsics::atomic_cxchgweak::<T, { AO::Acquire }, { AO::SeqCst }>(dst, old, new)
}
(Release, Relaxed) => {
intrinsics::atomic_cxchgweak::<T, { AO::Release }, { AO::Relaxed }>(dst, old, new)
}
(Release, Acquire) => {
intrinsics::atomic_cxchgweak::<T, { AO::Release }, { AO::Acquire }>(dst, old, new)
}
(Release, SeqCst) => {
intrinsics::atomic_cxchgweak::<T, { AO::Release }, { AO::SeqCst }>(dst, old, new)
}
(AcqRel, Relaxed) => {
intrinsics::atomic_cxchgweak::<T, { AO::AcqRel }, { AO::Relaxed }>(dst, old, new)
}
(AcqRel, Acquire) => {
intrinsics::atomic_cxchgweak::<T, { AO::AcqRel }, { AO::Acquire }>(dst, old, new)
}
(AcqRel, SeqCst) => {
intrinsics::atomic_cxchgweak::<T, { AO::AcqRel }, { AO::SeqCst }>(dst, old, new)
}
(SeqCst, Relaxed) => {
intrinsics::atomic_cxchgweak::<T, { AO::SeqCst }, { AO::Relaxed }>(dst, old, new)
}
(SeqCst, Acquire) => {
intrinsics::atomic_cxchgweak::<T, { AO::SeqCst }, { AO::Acquire }>(dst, old, new)
}
(SeqCst, SeqCst) => {
intrinsics::atomic_cxchgweak::<T, { AO::SeqCst }, { AO::SeqCst }>(dst, old, new)
}
(_, AcqRel) => panic!("there is no such thing as an acquire-release failure ordering"),
(_, Release) => panic!("there is no such thing as a release failure ordering"),
}
};
if ok { Ok(val) } else { Err(val) }
}
core::sync::atomic::atomic_loadunsafe fn atomic_load<T: Copy>(dst: *const T, order: Ordering) -> T {
// SAFETY: the caller must uphold the safety contract for `atomic_load`.
unsafe {
match order {
Relaxed => intrinsics::atomic_load::<T, { AO::Relaxed }>(dst),
Acquire => intrinsics::atomic_load::<T, { AO::Acquire }>(dst),
SeqCst => intrinsics::atomic_load::<T, { AO::SeqCst }>(dst),
Release => panic!("there is no such thing as a release load"),
AcqRel => panic!("there is no such thing as an acquire-release load"),
}
}
}
core::sync::atomic::atomic_nandunsafe fn atomic_nand<T: Copy, U: Copy>(dst: *mut T, val: U, order: Ordering) -> T {
// SAFETY: the caller must uphold the safety contract for `atomic_nand`
unsafe {
match order {
Relaxed => intrinsics::atomic_nand::<T, U, { AO::Relaxed }>(dst, val),
Acquire => intrinsics::atomic_nand::<T, U, { AO::Acquire }>(dst, val),
Release => intrinsics::atomic_nand::<T, U, { AO::Release }>(dst, val),
AcqRel => intrinsics::atomic_nand::<T, U, { AO::AcqRel }>(dst, val),
SeqCst => intrinsics::atomic_nand::<T, U, { AO::SeqCst }>(dst, val),
}
}
}
core::sync::atomic::atomic_orunsafe fn atomic_or<T: Copy, U: Copy>(dst: *mut T, val: U, order: Ordering) -> T {
// SAFETY: the caller must uphold the safety contract for `atomic_or`
unsafe {
match order {
SeqCst => intrinsics::atomic_or::<T, U, { AO::SeqCst }>(dst, val),
Acquire => intrinsics::atomic_or::<T, U, { AO::Acquire }>(dst, val),
Release => intrinsics::atomic_or::<T, U, { AO::Release }>(dst, val),
AcqRel => intrinsics::atomic_or::<T, U, { AO::AcqRel }>(dst, val),
Relaxed => intrinsics::atomic_or::<T, U, { AO::Relaxed }>(dst, val),
}
}
}
core::sync::atomic::atomic_storeunsafe fn atomic_store<T: Copy>(dst: *mut T, val: T, order: Ordering) {
// SAFETY: the caller must uphold the safety contract for `atomic_store`.
unsafe {
match order {
Relaxed => intrinsics::atomic_store::<T, { AO::Relaxed }>(dst, val),
Release => intrinsics::atomic_store::<T, { AO::Release }>(dst, val),
SeqCst => intrinsics::atomic_store::<T, { AO::SeqCst }>(dst, val),
Acquire => panic!("there is no such thing as an acquire store"),
AcqRel => panic!("there is no such thing as an acquire-release store"),
}
}
}
core::sync::atomic::atomic_subunsafe fn atomic_sub<T: Copy, U: Copy>(dst: *mut T, val: U, order: Ordering) -> T {
// SAFETY: the caller must uphold the safety contract for `atomic_sub`.
unsafe {
match order {
Relaxed => intrinsics::atomic_xsub::<T, U, { AO::Relaxed }>(dst, val),
Acquire => intrinsics::atomic_xsub::<T, U, { AO::Acquire }>(dst, val),
Release => intrinsics::atomic_xsub::<T, U, { AO::Release }>(dst, val),
AcqRel => intrinsics::atomic_xsub::<T, U, { AO::AcqRel }>(dst, val),
SeqCst => intrinsics::atomic_xsub::<T, U, { AO::SeqCst }>(dst, val),
}
}
}
core::sync::atomic::atomic_swapunsafe fn atomic_swap<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
// SAFETY: the caller must uphold the safety contract for `atomic_swap`.
unsafe {
match order {
Relaxed => intrinsics::atomic_xchg::<T, { AO::Relaxed }>(dst, val),
Acquire => intrinsics::atomic_xchg::<T, { AO::Acquire }>(dst, val),
Release => intrinsics::atomic_xchg::<T, { AO::Release }>(dst, val),
AcqRel => intrinsics::atomic_xchg::<T, { AO::AcqRel }>(dst, val),
SeqCst => intrinsics::atomic_xchg::<T, { AO::SeqCst }>(dst, val),
}
}
}
core::sync::atomic::atomic_umaxunsafe fn atomic_umax<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
// SAFETY: the caller must uphold the safety contract for `atomic_umax`
unsafe {
match order {
Relaxed => intrinsics::atomic_umax::<T, { AO::Relaxed }>(dst, val),
Acquire => intrinsics::atomic_umax::<T, { AO::Acquire }>(dst, val),
Release => intrinsics::atomic_umax::<T, { AO::Release }>(dst, val),
AcqRel => intrinsics::atomic_umax::<T, { AO::AcqRel }>(dst, val),
SeqCst => intrinsics::atomic_umax::<T, { AO::SeqCst }>(dst, val),
}
}
}
core::sync::atomic::atomic_uminunsafe fn atomic_umin<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
// SAFETY: the caller must uphold the safety contract for `atomic_umin`
unsafe {
match order {
Relaxed => intrinsics::atomic_umin::<T, { AO::Relaxed }>(dst, val),
Acquire => intrinsics::atomic_umin::<T, { AO::Acquire }>(dst, val),
Release => intrinsics::atomic_umin::<T, { AO::Release }>(dst, val),
AcqRel => intrinsics::atomic_umin::<T, { AO::AcqRel }>(dst, val),
SeqCst => intrinsics::atomic_umin::<T, { AO::SeqCst }>(dst, val),
}
}
}
core::sync::atomic::atomic_xorunsafe fn atomic_xor<T: Copy, U: Copy>(dst: *mut T, val: U, order: Ordering) -> T {
// SAFETY: the caller must uphold the safety contract for `atomic_xor`
unsafe {
match order {
SeqCst => intrinsics::atomic_xor::<T, U, { AO::SeqCst }>(dst, val),
Acquire => intrinsics::atomic_xor::<T, U, { AO::Acquire }>(dst, val),
Release => intrinsics::atomic_xor::<T, U, { AO::Release }>(dst, val),
AcqRel => intrinsics::atomic_xor::<T, U, { AO::AcqRel }>(dst, val),
Relaxed => intrinsics::atomic_xor::<T, U, { AO::Relaxed }>(dst, val),
}
}
}
core::time::Duration::as_micros pub const fn as_micros(&self) -> u128 {
self.secs as u128 * MICROS_PER_SEC as u128
+ (self.nanos.as_inner() / NANOS_PER_MICRO) as u128
}
core::time::Duration::as_millis pub const fn as_millis(&self) -> u128 {
self.secs as u128 * MILLIS_PER_SEC as u128
+ (self.nanos.as_inner() / NANOS_PER_MILLI) as u128
}
core::time::Duration::as_millis_f32 pub const fn as_millis_f32(&self) -> f32 {
(self.secs as f32) * (MILLIS_PER_SEC as f32)
+ (self.nanos.as_inner() as f32) / (NANOS_PER_MILLI as f32)
}
core::time::Duration::as_millis_f64 pub const fn as_millis_f64(&self) -> f64 {
(self.secs as f64) * (MILLIS_PER_SEC as f64)
+ (self.nanos.as_inner() as f64) / (NANOS_PER_MILLI as f64)
}
core::time::Duration::as_nanos pub const fn as_nanos(&self) -> u128 {
self.secs as u128 * NANOS_PER_SEC as u128 + self.nanos.as_inner() as u128
}
core::time::Duration::as_secs pub const fn as_secs(&self) -> u64 {
self.secs
}
core::time::Duration::as_secs_f32 pub const fn as_secs_f32(&self) -> f32 {
(self.secs as f32) + (self.nanos.as_inner() as f32) / (NANOS_PER_SEC as f32)
}
core::time::Duration::as_secs_f64 pub const fn as_secs_f64(&self) -> f64 {
(self.secs as f64) + (self.nanos.as_inner() as f64) / (NANOS_PER_SEC as f64)
}
core::time::Duration::checked_div pub const fn checked_div(self, rhs: u32) -> Option<Duration> {
if rhs != 0 {
let (secs, extra_secs) = (self.secs / (rhs as u64), self.secs % (rhs as u64));
let (mut nanos, extra_nanos) =
(self.nanos.as_inner() / rhs, self.nanos.as_inner() % rhs);
nanos +=
((extra_secs * (NANOS_PER_SEC as u64) + extra_nanos as u64) / (rhs as u64)) as u32;
debug_assert!(nanos < NANOS_PER_SEC);
Some(Duration::new(secs, nanos))
} else {
None
}
}
core::time::Duration::div_duration_f32 pub const fn div_duration_f32(self, rhs: Duration) -> f32 {
let self_nanos =
(self.secs as f32) * (NANOS_PER_SEC as f32) + (self.nanos.as_inner() as f32);
let rhs_nanos = (rhs.secs as f32) * (NANOS_PER_SEC as f32) + (rhs.nanos.as_inner() as f32);
self_nanos / rhs_nanos
}
core::time::Duration::div_duration_f64 pub const fn div_duration_f64(self, rhs: Duration) -> f64 {
let self_nanos =
(self.secs as f64) * (NANOS_PER_SEC as f64) + (self.nanos.as_inner() as f64);
let rhs_nanos = (rhs.secs as f64) * (NANOS_PER_SEC as f64) + (rhs.nanos.as_inner() as f64);
self_nanos / rhs_nanos
}
core::time::Duration::from_days pub const fn from_days(days: u64) -> Duration {
if days > u64::MAX / (SECS_PER_MINUTE * MINS_PER_HOUR * HOURS_PER_DAY) {
panic!("overflow in Duration::from_days");
}
Duration::from_secs(days * MINS_PER_HOUR * SECS_PER_MINUTE * HOURS_PER_DAY)
}
core::time::Duration::from_hours pub const fn from_hours(hours: u64) -> Duration {
if hours > u64::MAX / (SECS_PER_MINUTE * MINS_PER_HOUR) {
panic!("overflow in Duration::from_hours");
}
Duration::from_secs(hours * MINS_PER_HOUR * SECS_PER_MINUTE)
}
core::time::Duration::from_micros pub const fn from_micros(micros: u64) -> Duration {
let secs = micros / MICROS_PER_SEC;
let subsec_micros = (micros % MICROS_PER_SEC) as u32;
// SAFETY: (x % 1_000_000) * 1_000 < 1_000_000_000
// => x % 1_000_000 < 1_000_000
let subsec_nanos = unsafe { Nanoseconds::new_unchecked(subsec_micros * NANOS_PER_MICRO) };
Duration { secs, nanos: subsec_nanos }
}
core::time::Duration::from_millis pub const fn from_millis(millis: u64) -> Duration {
let secs = millis / MILLIS_PER_SEC;
let subsec_millis = (millis % MILLIS_PER_SEC) as u32;
// SAFETY: (x % 1_000) * 1_000_000 < 1_000_000_000
// => x % 1_000 < 1_000
let subsec_nanos = unsafe { Nanoseconds::new_unchecked(subsec_millis * NANOS_PER_MILLI) };
Duration { secs, nanos: subsec_nanos }
}
core::time::Duration::from_mins pub const fn from_mins(mins: u64) -> Duration {
if mins > u64::MAX / SECS_PER_MINUTE {
panic!("overflow in Duration::from_mins");
}
Duration::from_secs(mins * SECS_PER_MINUTE)
}
core::time::Duration::from_nanos pub const fn from_nanos(nanos: u64) -> Duration {
const NANOS_PER_SEC: u64 = self::NANOS_PER_SEC as u64;
let secs = nanos / NANOS_PER_SEC;
let subsec_nanos = (nanos % NANOS_PER_SEC) as u32;
// SAFETY: x % 1_000_000_000 < 1_000_000_000
let subsec_nanos = unsafe { Nanoseconds::new_unchecked(subsec_nanos) };
Duration { secs, nanos: subsec_nanos }
}
core::time::Duration::from_secs pub const fn from_secs(secs: u64) -> Duration {
Duration { secs, nanos: Nanoseconds::ZERO }
}
core::time::Duration::from_weeks pub const fn from_weeks(weeks: u64) -> Duration {
if weeks > u64::MAX / (SECS_PER_MINUTE * MINS_PER_HOUR * HOURS_PER_DAY * DAYS_PER_WEEK) {
panic!("overflow in Duration::from_weeks");
}
Duration::from_secs(weeks * MINS_PER_HOUR * SECS_PER_MINUTE * HOURS_PER_DAY * DAYS_PER_WEEK)
}
core::time::Duration::is_zero pub const fn is_zero(&self) -> bool {
self.secs == 0 && self.nanos.as_inner() == 0
}
core::time::Duration::new pub const fn new(secs: u64, nanos: u32) -> Duration {
if nanos < NANOS_PER_SEC {
// SAFETY: nanos < NANOS_PER_SEC, therefore nanos is within the valid range
Duration { secs, nanos: unsafe { Nanoseconds::new_unchecked(nanos) } }
} else {
let secs = secs
.checked_add((nanos / NANOS_PER_SEC) as u64)
.expect("overflow in Duration::new");
let nanos = nanos % NANOS_PER_SEC;
// SAFETY: nanos % NANOS_PER_SEC < NANOS_PER_SEC, therefore nanos is within the valid range
Duration { secs, nanos: unsafe { Nanoseconds::new_unchecked(nanos) } }
}
}
core::time::Duration::subsec_micros pub const fn subsec_micros(&self) -> u32 {
self.nanos.as_inner() / NANOS_PER_MICRO
}
core::time::Duration::subsec_millis pub const fn subsec_millis(&self) -> u32 {
self.nanos.as_inner() / NANOS_PER_MILLI
}
core::time::Duration::subsec_nanos pub const fn subsec_nanos(&self) -> u32 {
self.nanos.as_inner()
}
core::tuple::<impl core::cmp::PartialEq for (A, Z, Y, X, W, V, U, T)>::eq fn eq(&self, other: &($($T,)+)) -> bool {
$( ${ignore($T)} self.${index()} == other.${index()} )&&+
}
core::tuple::<impl core::cmp::PartialEq for (A, Z, Y, X, W, V, U, T)>::ne fn ne(&self, other: &($($T,)+)) -> bool {
$( ${ignore($T)} self.${index()} != other.${index()} )||+
}
core::tuple::<impl core::cmp::PartialEq for (B, A, Z, Y, X, W, V, U, T)>::eq fn eq(&self, other: &($($T,)+)) -> bool {
$( ${ignore($T)} self.${index()} == other.${index()} )&&+
}
core::tuple::<impl core::cmp::PartialEq for (B, A, Z, Y, X, W, V, U, T)>::ne fn ne(&self, other: &($($T,)+)) -> bool {
$( ${ignore($T)} self.${index()} != other.${index()} )||+
}
core::tuple::<impl core::cmp::PartialEq for (C, B, A, Z, Y, X, W, V, U, T)>::eq fn eq(&self, other: &($($T,)+)) -> bool {
$( ${ignore($T)} self.${index()} == other.${index()} )&&+
}
core::tuple::<impl core::cmp::PartialEq for (C, B, A, Z, Y, X, W, V, U, T)>::ne fn ne(&self, other: &($($T,)+)) -> bool {
$( ${ignore($T)} self.${index()} != other.${index()} )||+
}
core::tuple::<impl core::cmp::PartialEq for (D, C, B, A, Z, Y, X, W, V, U, T)>::eq fn eq(&self, other: &($($T,)+)) -> bool {
$( ${ignore($T)} self.${index()} == other.${index()} )&&+
}
core::tuple::<impl core::cmp::PartialEq for (D, C, B, A, Z, Y, X, W, V, U, T)>::ne fn ne(&self, other: &($($T,)+)) -> bool {
$( ${ignore($T)} self.${index()} != other.${index()} )||+
}
core::tuple::<impl core::cmp::PartialEq for (E, D, C, B, A, Z, Y, X, W, V, U, T)>::eq fn eq(&self, other: &($($T,)+)) -> bool {
$( ${ignore($T)} self.${index()} == other.${index()} )&&+
}
core::tuple::<impl core::cmp::PartialEq for (E, D, C, B, A, Z, Y, X, W, V, U, T)>::ne fn ne(&self, other: &($($T,)+)) -> bool {
$( ${ignore($T)} self.${index()} != other.${index()} )||+
}
core::tuple::<impl core::cmp::PartialEq for (T,)>::eq fn eq(&self, other: &($($T,)+)) -> bool {
$( ${ignore($T)} self.${index()} == other.${index()} )&&+
}
core::tuple::<impl core::cmp::PartialEq for (T,)>::ne fn ne(&self, other: &($($T,)+)) -> bool {
$( ${ignore($T)} self.${index()} != other.${index()} )||+
}
core::tuple::<impl core::cmp::PartialEq for (U, T)>::eq fn eq(&self, other: &($($T,)+)) -> bool {
$( ${ignore($T)} self.${index()} == other.${index()} )&&+
}
core::tuple::<impl core::cmp::PartialEq for (U, T)>::ne fn ne(&self, other: &($($T,)+)) -> bool {
$( ${ignore($T)} self.${index()} != other.${index()} )||+
}
core::tuple::<impl core::cmp::PartialEq for (V, U, T)>::eq fn eq(&self, other: &($($T,)+)) -> bool {
$( ${ignore($T)} self.${index()} == other.${index()} )&&+
}
core::tuple::<impl core::cmp::PartialEq for (V, U, T)>::ne fn ne(&self, other: &($($T,)+)) -> bool {
$( ${ignore($T)} self.${index()} != other.${index()} )||+
}
core::tuple::<impl core::cmp::PartialEq for (W, V, U, T)>::eq fn eq(&self, other: &($($T,)+)) -> bool {
$( ${ignore($T)} self.${index()} == other.${index()} )&&+
}
core::tuple::<impl core::cmp::PartialEq for (W, V, U, T)>::ne fn ne(&self, other: &($($T,)+)) -> bool {
$( ${ignore($T)} self.${index()} != other.${index()} )||+
}
core::tuple::<impl core::cmp::PartialEq for (X, W, V, U, T)>::eq fn eq(&self, other: &($($T,)+)) -> bool {
$( ${ignore($T)} self.${index()} == other.${index()} )&&+
}
core::tuple::<impl core::cmp::PartialEq for (X, W, V, U, T)>::ne fn ne(&self, other: &($($T,)+)) -> bool {
$( ${ignore($T)} self.${index()} != other.${index()} )||+
}
core::tuple::<impl core::cmp::PartialEq for (Y, X, W, V, U, T)>::eq fn eq(&self, other: &($($T,)+)) -> bool {
$( ${ignore($T)} self.${index()} == other.${index()} )&&+
}
core::tuple::<impl core::cmp::PartialEq for (Y, X, W, V, U, T)>::ne fn ne(&self, other: &($($T,)+)) -> bool {
$( ${ignore($T)} self.${index()} != other.${index()} )||+
}
core::tuple::<impl core::cmp::PartialEq for (Z, Y, X, W, V, U, T)>::eq fn eq(&self, other: &($($T,)+)) -> bool {
$( ${ignore($T)} self.${index()} == other.${index()} )&&+
}
core::tuple::<impl core::cmp::PartialEq for (Z, Y, X, W, V, U, T)>::ne fn ne(&self, other: &($($T,)+)) -> bool {
$( ${ignore($T)} self.${index()} != other.${index()} )||+
}
core::ub_checks::check_language_ubpub(crate) const fn check_language_ub() -> bool {
// Only used for UB checks so we may const_eval_select.
intrinsics::ub_checks()
&& const_eval_select!(
@capture { } -> bool:
if const {
// Always disable UB checks.
false
} else {
// Disable UB checks in Miri.
!cfg!(miri)
}
)
}
core::ub_checks::check_language_ub::runtime fn runtime$(<$($binders)*>)?($($arg: $ty),*) $( -> $ret )? {
$runtime
}
core::ub_checks::is_valid_allocation_sizepub(crate) const fn is_valid_allocation_size(size: usize, len: usize) -> bool {
let max_len = if size == 0 { usize::MAX } else { isize::MAX as usize / size };
len <= max_len
}
core::ub_checks::maybe_is_alignedpub(crate) const fn maybe_is_aligned(ptr: *const (), align: usize) -> bool {
// This is just for safety checks so we can const_eval_select.
const_eval_select!(
@capture { ptr: *const (), align: usize } -> bool:
if const {
true
} else {
ptr.is_aligned_to(align)
}
)
}
core::ub_checks::maybe_is_aligned::runtime fn runtime$(<$($binders)*>)?($($arg: $ty),*) $( -> $ret )? {
$runtime
}
core::ub_checks::maybe_is_aligned_and_not_nullpub(crate) const fn maybe_is_aligned_and_not_null(
ptr: *const (),
align: usize,
is_zst: bool,
) -> bool {
// This is just for safety checks so we can const_eval_select.
maybe_is_aligned(ptr, align) && (is_zst || !ptr.is_null())
}
core::ub_checks::maybe_is_nonoverlappingpub(crate) const fn maybe_is_nonoverlapping(
src: *const (),
dst: *const (),
size: usize,
count: usize,
) -> bool {
// This is just for safety checks so we can const_eval_select.
const_eval_select!(
@capture { src: *const (), dst: *const (), size: usize, count: usize } -> bool:
if const {
true
} else {
let src_usize = src.addr();
let dst_usize = dst.addr();
let Some(size) = size.checked_mul(count) else {
crate::panicking::panic_nounwind(
"is_nonoverlapping: `size_of::<T>() * count` overflows a usize",
)
};
let diff = src_usize.abs_diff(dst_usize);
// If the absolute distance between the ptrs is at least as big as the size of the buffer,
// they do not overlap.
diff >= size
}
)
}
core::ub_checks::maybe_is_nonoverlapping::runtime fn runtime$(<$($binders)*>)?($($arg: $ty),*) $( -> $ret )? {
$runtime
}
<core::any::TypeId as core::clone::Clone>::clone#[cfg_attr(feature = "ferrocene_certified", derive_const(Clone))]
<core::array::iter::IntoIter<T, N> as core::clone::Clone>::clone#[derive(Clone)]
<core::convert::Infallible as core::clone::Clone>::clone fn clone(&self) -> Infallible {
match *self {}
}
<core::convert::Infallible as core::cmp::PartialEq>::eq fn eq(&self, _: &Infallible) -> bool {
match *self {}
}
<core::intrinsics::AtomicOrdering as core::cmp::Eq>::assert_receiver_is_total_eq#[cfg_attr(feature = "ferrocene_certified", derive(ConstParamTy, PartialEq, Eq))]
<core::intrinsics::AtomicOrdering as core::cmp::PartialEq>::eq#[cfg_attr(feature = "ferrocene_certified", derive(ConstParamTy, PartialEq, Eq))]
<core::iter::adapters::cloned::Cloned<I> as core::clone::Clone>::clone#[cfg_attr(feature = "ferrocene_certified", derive(Clone))]
<core::iter::adapters::filter::Filter<I, P> as core::clone::Clone>::clone#[derive(Clone)]
<core::iter::adapters::map::Map<I, F> as core::clone::Clone>::clone#[derive(Clone)]
<core::iter::adapters::zip::Zip<A, B> as core::clone::Clone>::clone#[derive(Clone)]
<core::iter::sources::from_fn::FromFn<F> as core::clone::Clone>::clone#[derive(Clone)]
<core::mem::manually_drop::ManuallyDrop<T> as core::clone::Clone>::clone#[cfg_attr(feature = "ferrocene_certified", derive(Copy, Clone, PartialEq))]
<core::mem::manually_drop::ManuallyDrop<T> as core::cmp::PartialEq>::eq#[cfg_attr(feature = "ferrocene_certified", derive(Copy, Clone, PartialEq))]
<core::num::niche_types::Nanoseconds as core::clone::Clone>::clone #[derive(Clone, Copy, Eq)]
<core::num::niche_types::Nanoseconds as core::cmp::Eq>::assert_receiver_is_total_eq #[derive(Clone, Copy, Eq)]
<core::num::niche_types::NonZeroCharInner as core::clone::Clone>::clone #[derive(Clone, Copy, Eq)]
<core::num::niche_types::NonZeroCharInner as core::cmp::Eq>::assert_receiver_is_total_eq #[derive(Clone, Copy, Eq)]
<core::num::niche_types::NonZeroI128Inner as core::clone::Clone>::clone #[derive(Clone, Copy, Eq)]
<core::num::niche_types::NonZeroI128Inner as core::cmp::Eq>::assert_receiver_is_total_eq #[derive(Clone, Copy, Eq)]
<core::num::niche_types::NonZeroI16Inner as core::clone::Clone>::clone #[derive(Clone, Copy, Eq)]
<core::num::niche_types::NonZeroI16Inner as core::cmp::Eq>::assert_receiver_is_total_eq #[derive(Clone, Copy, Eq)]
<core::num::niche_types::NonZeroI32Inner as core::clone::Clone>::clone #[derive(Clone, Copy, Eq)]
<core::num::niche_types::NonZeroI32Inner as core::cmp::Eq>::assert_receiver_is_total_eq #[derive(Clone, Copy, Eq)]
<core::num::niche_types::NonZeroI64Inner as core::clone::Clone>::clone #[derive(Clone, Copy, Eq)]
<core::num::niche_types::NonZeroI64Inner as core::cmp::Eq>::assert_receiver_is_total_eq #[derive(Clone, Copy, Eq)]
<core::num::niche_types::NonZeroI8Inner as core::clone::Clone>::clone #[derive(Clone, Copy, Eq)]
<core::num::niche_types::NonZeroI8Inner as core::cmp::Eq>::assert_receiver_is_total_eq #[derive(Clone, Copy, Eq)]
<core::num::niche_types::NonZeroIsizeInner as core::clone::Clone>::clone #[derive(Clone, Copy, Eq)]
<core::num::niche_types::NonZeroIsizeInner as core::cmp::Eq>::assert_receiver_is_total_eq #[derive(Clone, Copy, Eq)]
<core::num::niche_types::NonZeroU128Inner as core::clone::Clone>::clone #[derive(Clone, Copy, Eq)]
<core::num::niche_types::NonZeroU128Inner as core::cmp::Eq>::assert_receiver_is_total_eq #[derive(Clone, Copy, Eq)]
<core::num::niche_types::NonZeroU16Inner as core::clone::Clone>::clone #[derive(Clone, Copy, Eq)]
<core::num::niche_types::NonZeroU16Inner as core::cmp::Eq>::assert_receiver_is_total_eq #[derive(Clone, Copy, Eq)]
<core::num::niche_types::NonZeroU32Inner as core::clone::Clone>::clone #[derive(Clone, Copy, Eq)]
<core::num::niche_types::NonZeroU32Inner as core::cmp::Eq>::assert_receiver_is_total_eq #[derive(Clone, Copy, Eq)]
<core::num::niche_types::NonZeroU64Inner as core::clone::Clone>::clone #[derive(Clone, Copy, Eq)]
<core::num::niche_types::NonZeroU64Inner as core::cmp::Eq>::assert_receiver_is_total_eq #[derive(Clone, Copy, Eq)]
<core::num::niche_types::NonZeroU8Inner as core::clone::Clone>::clone #[derive(Clone, Copy, Eq)]
<core::num::niche_types::NonZeroU8Inner as core::cmp::Eq>::assert_receiver_is_total_eq #[derive(Clone, Copy, Eq)]
<core::num::niche_types::NonZeroUsizeInner as core::clone::Clone>::clone #[derive(Clone, Copy, Eq)]
<core::num::niche_types::NonZeroUsizeInner as core::cmp::Eq>::assert_receiver_is_total_eq #[derive(Clone, Copy, Eq)]
<core::num::niche_types::UsizeNoHighBit as core::clone::Clone>::clone #[derive(Clone, Copy, Eq)]
<core::num::niche_types::UsizeNoHighBit as core::cmp::Eq>::assert_receiver_is_total_eq #[derive(Clone, Copy, Eq)]
<core::ops::control_flow::ControlFlow<B, C> as core::clone::Clone>::clone#[derive_const(Clone, PartialEq, Eq)]
<core::ops::control_flow::ControlFlow<B, C> as core::cmp::Eq>::assert_receiver_is_total_eq#[derive_const(Clone, PartialEq, Eq)]
<core::ops::control_flow::ControlFlow<B, C> as core::cmp::PartialEq>::eq#[derive_const(Clone, PartialEq, Eq)]
<core::ops::index_range::IndexRange as core::clone::Clone>::clone#[cfg_attr(feature = "ferrocene_certified", derive_const(Clone, PartialEq))]
<core::ops::index_range::IndexRange as core::cmp::PartialEq>::eq#[cfg_attr(feature = "ferrocene_certified", derive_const(Clone, PartialEq))]
<core::ops::range::Range<Idx> as core::clone::Clone>::clone#[derive_const(Clone, Default, PartialEq)] // not Copy -- see #27186
<core::ops::range::Range<Idx> as core::cmp::PartialEq>::eq#[derive_const(Clone, Default, PartialEq)] // not Copy -- see #27186
<core::ops::range::Range<Idx> as core::default::Default>::default#[derive_const(Clone, Default, PartialEq)] // not Copy -- see #27186
<core::ops::range::RangeFrom<Idx> as core::clone::Clone>::clone#[derive_const(Clone, PartialEq)] // not Copy -- see #27186
<core::ops::range::RangeFrom<Idx> as core::cmp::PartialEq>::eq#[derive_const(Clone, PartialEq)] // not Copy -- see #27186
<core::ops::try_trait::NeverShortCircuit<T> as core::ops::try_trait::FromResidual>::from_residual fn from_residual(never: NeverShortCircuitResidual) -> Self {
match never {}
}
<core::ptr::alignment::Alignment as core::clone::Clone>::clone#[cfg_attr(feature = "ferrocene_certified", derive(Copy, Clone))]
<core::ptr::alignment::AlignmentEnum as core::clone::Clone>::clone#[derive(Copy, Clone)]
<core::sync::atomic::Ordering as core::clone::Clone>::clone#[cfg_attr(feature = "ferrocene_certified", derive(Copy, Clone))]
<core::time::Duration as core::clone::Clone>::clone#[cfg_attr(feature = "ferrocene_certified", derive(Clone, Copy, PartialEq, PartialOrd))]
<core::time::Duration as core::cmp::PartialEq>::eq#[cfg_attr(feature = "ferrocene_certified", derive(Clone, Copy, PartialEq, PartialOrd))]
<core::time::Duration as core::cmp::PartialOrd>::partial_cmp#[cfg_attr(feature = "ferrocene_certified", derive(Clone, Copy, PartialEq, PartialOrd))]
core::cmp::Eq::assert_receiver_is_total_eq fn assert_receiver_is_total_eq(&self) {}
core::cmp::impls::<impl core::cmp::Ord for !>::cmp fn cmp(&self, _: &!) -> Ordering {
*self
}
core::cmp::impls::<impl core::cmp::PartialEq for !>::eq fn eq(&self, _: &!) -> bool {
*self
}
core::cmp::impls::<impl core::cmp::PartialOrd for !>::partial_cmp fn partial_cmp(&self, _: &!) -> Option<Ordering> {
*self
}
core::hint::unreachable_uncheckedpub const unsafe fn unreachable_unchecked() -> ! {
ub_checks::assert_unsafe_precondition!(
check_language_ub,
"hint::unreachable_unchecked must never be reached",
() => false
);
// SAFETY: the safety contract for `intrinsics::unreachable` must
// be upheld by the caller.
unsafe { intrinsics::unreachable() }
}
core::panic::panic_info::PanicInfo::<'a>::new pub(crate) fn new(message: &'a PanicFmt<'a>, location: &'a Location<'a>) -> Self {
PanicInfo { message, location }
}
core::panicking::panic_fmt::panic_impl fn panic_impl(pi: &PanicInfo<'_>) -> !;
core::panicking::panic_nounwind_fmt::runtime::panic_impl fn panic_impl(pi: &PanicInfo<'_>) -> !;
core::ptr::drop_in_placepub const unsafe fn drop_in_place<T: PointeeSized>(to_drop: *mut T)
where
T: [const] Destruct,
{
// Code here does not matter - this is replaced by the
// real drop glue by the compiler.
// SAFETY: see comment above
unsafe { drop_in_place(to_drop) }
}