<core::iter::adapters::zip::Zip<A, B> as core::iter::adapters::zip::ZipImpl<A, B>>::size_hint default fn size_hint(&self) -> (usize, Option<usize>) {
let (a_lower, a_upper) = self.a.size_hint();
let (b_lower, b_upper) = self.b.size_hint();
let lower = cmp::min(a_lower, b_lower);
let upper = match (a_upper, b_upper) {
(Some(x), Some(y)) => Some(cmp::min(x, y)),
(Some(x), None) => Some(x),
(None, Some(y)) => Some(y),
(None, None) => None,
};
(lower, upper)
}
<core::ops::index_range::IndexRange as core::slice::index::SliceIndex<[T]>>::get_unchecked::precondition_check const fn precondition_check($($name:$ty),*) {
if !$e {
let msg = concat!("unsafe precondition(s) violated: ", $message,
"\n\nThis indicates a bug in the program. \
This Undefined Behavior check is optional, and cannot be relied on for safety.");
// blocked on fmt::Arguments
#[cfg(not(feature = "ferrocene_certified"))]
::core::panicking::panic_nounwind_fmt(::core::fmt::Arguments::new_const(&[msg]), false);
#[cfg(feature = "ferrocene_certified")]
::core::panicking::panic_nounwind(msg);
}
}
<core::ops::index_range::IndexRange as core::slice::index::SliceIndex<[T]>>::get_unchecked_mut::precondition_check const fn precondition_check($($name:$ty),*) {
if !$e {
let msg = concat!("unsafe precondition(s) violated: ", $message,
"\n\nThis indicates a bug in the program. \
This Undefined Behavior check is optional, and cannot be relied on for safety.");
// blocked on fmt::Arguments
#[cfg(not(feature = "ferrocene_certified"))]
::core::panicking::panic_nounwind_fmt(::core::fmt::Arguments::new_const(&[msg]), false);
#[cfg(feature = "ferrocene_certified")]
::core::panicking::panic_nounwind(msg);
}
}
<core::ops::range::Range<usize> as core::slice::index::SliceIndex<[T]>>::get_unchecked::precondition_check const fn precondition_check($($name:$ty),*) {
if !$e {
let msg = concat!("unsafe precondition(s) violated: ", $message,
"\n\nThis indicates a bug in the program. \
This Undefined Behavior check is optional, and cannot be relied on for safety.");
// blocked on fmt::Arguments
#[cfg(not(feature = "ferrocene_certified"))]
::core::panicking::panic_nounwind_fmt(::core::fmt::Arguments::new_const(&[msg]), false);
#[cfg(feature = "ferrocene_certified")]
::core::panicking::panic_nounwind(msg);
}
}
<core::ops::range::Range<usize> as core::slice::index::SliceIndex<[T]>>::get_unchecked_mut::precondition_check const fn precondition_check($($name:$ty),*) {
if !$e {
let msg = concat!("unsafe precondition(s) violated: ", $message,
"\n\nThis indicates a bug in the program. \
This Undefined Behavior check is optional, and cannot be relied on for safety.");
// blocked on fmt::Arguments
#[cfg(not(feature = "ferrocene_certified"))]
::core::panicking::panic_nounwind_fmt(::core::fmt::Arguments::new_const(&[msg]), false);
#[cfg(feature = "ferrocene_certified")]
::core::panicking::panic_nounwind(msg);
}
}
<usize as core::slice::index::SliceIndex<[T]>>::get_mut fn get_mut(self, slice: &mut [T]) -> Option<&mut T> {
if self < slice.len() {
// SAFETY: `self` is checked to be in bounds.
unsafe { Some(slice_get_unchecked(slice, self)) }
} else {
None
}
}
<usize as core::slice::index::SliceIndex<[T]>>::get_unchecked::precondition_check const fn precondition_check($($name:$ty),*) {
if !$e {
let msg = concat!("unsafe precondition(s) violated: ", $message,
"\n\nThis indicates a bug in the program. \
This Undefined Behavior check is optional, and cannot be relied on for safety.");
// blocked on fmt::Arguments
#[cfg(not(feature = "ferrocene_certified"))]
::core::panicking::panic_nounwind_fmt(::core::fmt::Arguments::new_const(&[msg]), false);
#[cfg(feature = "ferrocene_certified")]
::core::panicking::panic_nounwind(msg);
}
}
<usize as core::slice::index::SliceIndex<[T]>>::get_unchecked_mut::precondition_check const fn precondition_check($($name:$ty),*) {
if !$e {
let msg = concat!("unsafe precondition(s) violated: ", $message,
"\n\nThis indicates a bug in the program. \
This Undefined Behavior check is optional, and cannot be relied on for safety.");
// blocked on fmt::Arguments
#[cfg(not(feature = "ferrocene_certified"))]
::core::panicking::panic_nounwind_fmt(::core::fmt::Arguments::new_const(&[msg]), false);
#[cfg(feature = "ferrocene_certified")]
::core::panicking::panic_nounwind(msg);
}
}
core::alloc::layout::Layout::from_size_align_unchecked::precondition_check const fn precondition_check($($name:$ty),*) {
if !$e {
let msg = concat!("unsafe precondition(s) violated: ", $message,
"\n\nThis indicates a bug in the program. \
This Undefined Behavior check is optional, and cannot be relied on for safety.");
// blocked on fmt::Arguments
#[cfg(not(feature = "ferrocene_certified"))]
::core::panicking::panic_nounwind_fmt(::core::fmt::Arguments::new_const(&[msg]), false);
#[cfg(feature = "ferrocene_certified")]
::core::panicking::panic_nounwind(msg);
}
}
core::cmp::impls::<impl core::cmp::Ord for bool>::cmp fn cmp(&self, other: &bool) -> Ordering {
// Casting to i8's and converting the difference to an Ordering generates
// more optimal assembly.
// See <https://github.com/rust-lang/rust/issues/66780> for more info.
match (*self as i8) - (*other as i8) {
-1 => Less,
0 => Equal,
1 => Greater,
#[ferrocene::annotation(
"This match arm cannot be covered because it is unreachable. See the safety comment below."
)]
// SAFETY: bool as i8 returns 0 or 1, so the difference can't be anything else
_ => unsafe { unreachable_unchecked() },
}
}
core::hint::unreachable_unchecked::precondition_check const fn precondition_check($($name:$ty),*) {
if !$e {
let msg = concat!("unsafe precondition(s) violated: ", $message,
"\n\nThis indicates a bug in the program. \
This Undefined Behavior check is optional, and cannot be relied on for safety.");
// blocked on fmt::Arguments
#[cfg(not(feature = "ferrocene_certified"))]
::core::panicking::panic_nounwind_fmt(::core::fmt::Arguments::new_const(&[msg]), false);
#[cfg(feature = "ferrocene_certified")]
::core::panicking::panic_nounwind(msg);
}
}
core::num::<impl u128>::unchecked_add::precondition_check const fn precondition_check($($name:$ty),*) {
if !$e {
let msg = concat!("unsafe precondition(s) violated: ", $message,
"\n\nThis indicates a bug in the program. \
This Undefined Behavior check is optional, and cannot be relied on for safety.");
// blocked on fmt::Arguments
#[cfg(not(feature = "ferrocene_certified"))]
::core::panicking::panic_nounwind_fmt(::core::fmt::Arguments::new_const(&[msg]), false);
#[cfg(feature = "ferrocene_certified")]
::core::panicking::panic_nounwind(msg);
}
}
core::num::<impl u128>::unchecked_sub::precondition_check const fn precondition_check($($name:$ty),*) {
if !$e {
let msg = concat!("unsafe precondition(s) violated: ", $message,
"\n\nThis indicates a bug in the program. \
This Undefined Behavior check is optional, and cannot be relied on for safety.");
// blocked on fmt::Arguments
#[cfg(not(feature = "ferrocene_certified"))]
::core::panicking::panic_nounwind_fmt(::core::fmt::Arguments::new_const(&[msg]), false);
#[cfg(feature = "ferrocene_certified")]
::core::panicking::panic_nounwind(msg);
}
}
core::num::<impl u16>::unchecked_add::precondition_check const fn precondition_check($($name:$ty),*) {
if !$e {
let msg = concat!("unsafe precondition(s) violated: ", $message,
"\n\nThis indicates a bug in the program. \
This Undefined Behavior check is optional, and cannot be relied on for safety.");
// blocked on fmt::Arguments
#[cfg(not(feature = "ferrocene_certified"))]
::core::panicking::panic_nounwind_fmt(::core::fmt::Arguments::new_const(&[msg]), false);
#[cfg(feature = "ferrocene_certified")]
::core::panicking::panic_nounwind(msg);
}
}
core::num::<impl u16>::unchecked_sub::precondition_check const fn precondition_check($($name:$ty),*) {
if !$e {
let msg = concat!("unsafe precondition(s) violated: ", $message,
"\n\nThis indicates a bug in the program. \
This Undefined Behavior check is optional, and cannot be relied on for safety.");
// blocked on fmt::Arguments
#[cfg(not(feature = "ferrocene_certified"))]
::core::panicking::panic_nounwind_fmt(::core::fmt::Arguments::new_const(&[msg]), false);
#[cfg(feature = "ferrocene_certified")]
::core::panicking::panic_nounwind(msg);
}
}
core::num::<impl u32>::unchecked_add::precondition_check const fn precondition_check($($name:$ty),*) {
if !$e {
let msg = concat!("unsafe precondition(s) violated: ", $message,
"\n\nThis indicates a bug in the program. \
This Undefined Behavior check is optional, and cannot be relied on for safety.");
// blocked on fmt::Arguments
#[cfg(not(feature = "ferrocene_certified"))]
::core::panicking::panic_nounwind_fmt(::core::fmt::Arguments::new_const(&[msg]), false);
#[cfg(feature = "ferrocene_certified")]
::core::panicking::panic_nounwind(msg);
}
}
core::num::<impl u32>::unchecked_sub::precondition_check const fn precondition_check($($name:$ty),*) {
if !$e {
let msg = concat!("unsafe precondition(s) violated: ", $message,
"\n\nThis indicates a bug in the program. \
This Undefined Behavior check is optional, and cannot be relied on for safety.");
// blocked on fmt::Arguments
#[cfg(not(feature = "ferrocene_certified"))]
::core::panicking::panic_nounwind_fmt(::core::fmt::Arguments::new_const(&[msg]), false);
#[cfg(feature = "ferrocene_certified")]
::core::panicking::panic_nounwind(msg);
}
}
core::num::<impl u64>::unchecked_add::precondition_check const fn precondition_check($($name:$ty),*) {
if !$e {
let msg = concat!("unsafe precondition(s) violated: ", $message,
"\n\nThis indicates a bug in the program. \
This Undefined Behavior check is optional, and cannot be relied on for safety.");
// blocked on fmt::Arguments
#[cfg(not(feature = "ferrocene_certified"))]
::core::panicking::panic_nounwind_fmt(::core::fmt::Arguments::new_const(&[msg]), false);
#[cfg(feature = "ferrocene_certified")]
::core::panicking::panic_nounwind(msg);
}
}
core::num::<impl u64>::unchecked_sub::precondition_check const fn precondition_check($($name:$ty),*) {
if !$e {
let msg = concat!("unsafe precondition(s) violated: ", $message,
"\n\nThis indicates a bug in the program. \
This Undefined Behavior check is optional, and cannot be relied on for safety.");
// blocked on fmt::Arguments
#[cfg(not(feature = "ferrocene_certified"))]
::core::panicking::panic_nounwind_fmt(::core::fmt::Arguments::new_const(&[msg]), false);
#[cfg(feature = "ferrocene_certified")]
::core::panicking::panic_nounwind(msg);
}
}
core::num::<impl u8>::unchecked_add::precondition_check const fn precondition_check($($name:$ty),*) {
if !$e {
let msg = concat!("unsafe precondition(s) violated: ", $message,
"\n\nThis indicates a bug in the program. \
This Undefined Behavior check is optional, and cannot be relied on for safety.");
// blocked on fmt::Arguments
#[cfg(not(feature = "ferrocene_certified"))]
::core::panicking::panic_nounwind_fmt(::core::fmt::Arguments::new_const(&[msg]), false);
#[cfg(feature = "ferrocene_certified")]
::core::panicking::panic_nounwind(msg);
}
}
core::num::<impl u8>::unchecked_sub::precondition_check const fn precondition_check($($name:$ty),*) {
if !$e {
let msg = concat!("unsafe precondition(s) violated: ", $message,
"\n\nThis indicates a bug in the program. \
This Undefined Behavior check is optional, and cannot be relied on for safety.");
// blocked on fmt::Arguments
#[cfg(not(feature = "ferrocene_certified"))]
::core::panicking::panic_nounwind_fmt(::core::fmt::Arguments::new_const(&[msg]), false);
#[cfg(feature = "ferrocene_certified")]
::core::panicking::panic_nounwind(msg);
}
}
core::num::<impl usize>::unchecked_add::precondition_check const fn precondition_check($($name:$ty),*) {
if !$e {
let msg = concat!("unsafe precondition(s) violated: ", $message,
"\n\nThis indicates a bug in the program. \
This Undefined Behavior check is optional, and cannot be relied on for safety.");
// blocked on fmt::Arguments
#[cfg(not(feature = "ferrocene_certified"))]
::core::panicking::panic_nounwind_fmt(::core::fmt::Arguments::new_const(&[msg]), false);
#[cfg(feature = "ferrocene_certified")]
::core::panicking::panic_nounwind(msg);
}
}
core::num::<impl usize>::unchecked_sub::precondition_check const fn precondition_check($($name:$ty),*) {
if !$e {
let msg = concat!("unsafe precondition(s) violated: ", $message,
"\n\nThis indicates a bug in the program. \
This Undefined Behavior check is optional, and cannot be relied on for safety.");
// blocked on fmt::Arguments
#[cfg(not(feature = "ferrocene_certified"))]
::core::panicking::panic_nounwind_fmt(::core::fmt::Arguments::new_const(&[msg]), false);
#[cfg(feature = "ferrocene_certified")]
::core::panicking::panic_nounwind(msg);
}
}
core::num::niche_types::Nanoseconds::new pub const fn new(val: $int) -> Option<Self> {
if (val as $uint) >= ($low as $uint) && (val as $uint) <= ($high as $uint) {
// SAFETY: just checked the inclusive range
Some(unsafe { $name(val) })
} else {
None
}
}
core::ops::index_range::IndexRange::new_unchecked::precondition_check const fn precondition_check($($name:$ty),*) {
if !$e {
let msg = concat!("unsafe precondition(s) violated: ", $message,
"\n\nThis indicates a bug in the program. \
This Undefined Behavior check is optional, and cannot be relied on for safety.");
// blocked on fmt::Arguments
#[cfg(not(feature = "ferrocene_certified"))]
::core::panicking::panic_nounwind_fmt(::core::fmt::Arguments::new_const(&[msg]), false);
#[cfg(feature = "ferrocene_certified")]
::core::panicking::panic_nounwind(msg);
}
}
core::option::Option::<T>::unwrap_unchecked pub const unsafe fn unwrap_unchecked(self) -> T {
match self {
Some(val) => val,
#[ferrocene::annotation(
"This line cannot be covered as reaching `unreachable_unchecked` is undefined behavior."
)]
// SAFETY: the safety contract must be upheld by the caller.
None => unsafe { hint::unreachable_unchecked() },
}
}
core::panicking::panic_bounds_checkfn panic_bounds_check(index: usize, len: usize) -> ! {
if cfg!(panic = "immediate-abort") {
super::intrinsics::abort()
}
panic!("index out of bounds: the len is {len} but the index is {index}")
}
core::panicking::panic_fmtpub const fn panic_fmt(fmt: PanicFmt<'_>) -> ! {
#[ferrocene::annotation(
"The `immediate-abort` behavior is not certified, we only support `abort`."
)]
if cfg!(panic = "immediate-abort") {
super::intrinsics::abort()
};
// NOTE This function never crosses the FFI boundary; it's a Rust-to-Rust call
// that gets resolved to the `#[panic_handler]` function.
unsafe extern "Rust" {
#[lang = "panic_impl"]
fn panic_impl(pi: &PanicInfo<'_>) -> !;
}
#[cfg(not(feature = "ferrocene_certified"))]
let pi = PanicInfo::new(
&fmt,
Location::caller(),
/* can_unwind */ true,
/* force_no_backtrace */ false,
);
#[cfg(feature = "ferrocene_certified")]
let pi = PanicInfo::new(&fmt);
// SAFETY: `panic_impl` is defined in safe Rust code and thus is safe to call.
unsafe { panic_impl(&pi) }
}
core::ptr::alignment::Alignment::new_unchecked::precondition_check const fn precondition_check($($name:$ty),*) {
if !$e {
let msg = concat!("unsafe precondition(s) violated: ", $message,
"\n\nThis indicates a bug in the program. \
This Undefined Behavior check is optional, and cannot be relied on for safety.");
// blocked on fmt::Arguments
#[cfg(not(feature = "ferrocene_certified"))]
::core::panicking::panic_nounwind_fmt(::core::fmt::Arguments::new_const(&[msg]), false);
#[cfg(feature = "ferrocene_certified")]
::core::panicking::panic_nounwind(msg);
}
}
core::ptr::const_ptr::<impl *const T>::add::precondition_check const fn precondition_check($($name:$ty),*) {
if !$e {
let msg = concat!("unsafe precondition(s) violated: ", $message,
"\n\nThis indicates a bug in the program. \
This Undefined Behavior check is optional, and cannot be relied on for safety.");
// blocked on fmt::Arguments
#[cfg(not(feature = "ferrocene_certified"))]
::core::panicking::panic_nounwind_fmt(::core::fmt::Arguments::new_const(&[msg]), false);
#[cfg(feature = "ferrocene_certified")]
::core::panicking::panic_nounwind(msg);
}
}
core::ptr::const_ptr::<impl *const T>::align_offset pub fn align_offset(self, align: usize) -> usize
where
T: Sized,
{
if !align.is_power_of_two() {
panic!("align_offset: align is not a power-of-two");
}
// SAFETY: `align` has been checked to be a power of 2 above
let ret = unsafe { align_offset(self, align) };
// Inform Miri that we want to consider the resulting pointer to be suitably aligned.
#[cfg(miri)]
if ret != usize::MAX {
intrinsics::miri_promise_symbolic_alignment(self.wrapping_add(ret).cast(), align);
}
ret
}
core::ptr::const_ptr::<impl *const T>::guaranteed_eq pub const fn guaranteed_eq(self, other: *const T) -> Option<bool>
where
T: Sized,
{
match intrinsics::ptr_guaranteed_cmp(self, other) {
#[ferrocene::annotation(
"This cannot be reached in runtime code so it cannot be covered."
)]
2 => None,
other => Some(other == 1),
}
}
core::ptr::copy::precondition_check const fn precondition_check($($name:$ty),*) {
if !$e {
let msg = concat!("unsafe precondition(s) violated: ", $message,
"\n\nThis indicates a bug in the program. \
This Undefined Behavior check is optional, and cannot be relied on for safety.");
// blocked on fmt::Arguments
#[cfg(not(feature = "ferrocene_certified"))]
::core::panicking::panic_nounwind_fmt(::core::fmt::Arguments::new_const(&[msg]), false);
#[cfg(feature = "ferrocene_certified")]
::core::panicking::panic_nounwind(msg);
}
}
core::ptr::copy_nonoverlapping::precondition_check const fn precondition_check($($name:$ty),*) {
if !$e {
let msg = concat!("unsafe precondition(s) violated: ", $message,
"\n\nThis indicates a bug in the program. \
This Undefined Behavior check is optional, and cannot be relied on for safety.");
// blocked on fmt::Arguments
#[cfg(not(feature = "ferrocene_certified"))]
::core::panicking::panic_nounwind_fmt(::core::fmt::Arguments::new_const(&[msg]), false);
#[cfg(feature = "ferrocene_certified")]
::core::panicking::panic_nounwind(msg);
}
}
core::ptr::mut_ptr::<impl *mut T>::add::precondition_check const fn precondition_check($($name:$ty),*) {
if !$e {
let msg = concat!("unsafe precondition(s) violated: ", $message,
"\n\nThis indicates a bug in the program. \
This Undefined Behavior check is optional, and cannot be relied on for safety.");
// blocked on fmt::Arguments
#[cfg(not(feature = "ferrocene_certified"))]
::core::panicking::panic_nounwind_fmt(::core::fmt::Arguments::new_const(&[msg]), false);
#[cfg(feature = "ferrocene_certified")]
::core::panicking::panic_nounwind(msg);
}
}
core::ptr::read::precondition_check const fn precondition_check($($name:$ty),*) {
if !$e {
let msg = concat!("unsafe precondition(s) violated: ", $message,
"\n\nThis indicates a bug in the program. \
This Undefined Behavior check is optional, and cannot be relied on for safety.");
// blocked on fmt::Arguments
#[cfg(not(feature = "ferrocene_certified"))]
::core::panicking::panic_nounwind_fmt(::core::fmt::Arguments::new_const(&[msg]), false);
#[cfg(feature = "ferrocene_certified")]
::core::panicking::panic_nounwind(msg);
}
}
core::ptr::read_volatile::precondition_check const fn precondition_check($($name:$ty),*) {
if !$e {
let msg = concat!("unsafe precondition(s) violated: ", $message,
"\n\nThis indicates a bug in the program. \
This Undefined Behavior check is optional, and cannot be relied on for safety.");
// blocked on fmt::Arguments
#[cfg(not(feature = "ferrocene_certified"))]
::core::panicking::panic_nounwind_fmt(::core::fmt::Arguments::new_const(&[msg]), false);
#[cfg(feature = "ferrocene_certified")]
::core::panicking::panic_nounwind(msg);
}
}
core::ptr::write::precondition_check const fn precondition_check($($name:$ty),*) {
if !$e {
let msg = concat!("unsafe precondition(s) violated: ", $message,
"\n\nThis indicates a bug in the program. \
This Undefined Behavior check is optional, and cannot be relied on for safety.");
// blocked on fmt::Arguments
#[cfg(not(feature = "ferrocene_certified"))]
::core::panicking::panic_nounwind_fmt(::core::fmt::Arguments::new_const(&[msg]), false);
#[cfg(feature = "ferrocene_certified")]
::core::panicking::panic_nounwind(msg);
}
}
core::ptr::write_bytes::precondition_check const fn precondition_check($($name:$ty),*) {
if !$e {
let msg = concat!("unsafe precondition(s) violated: ", $message,
"\n\nThis indicates a bug in the program. \
This Undefined Behavior check is optional, and cannot be relied on for safety.");
// blocked on fmt::Arguments
#[cfg(not(feature = "ferrocene_certified"))]
::core::panicking::panic_nounwind_fmt(::core::fmt::Arguments::new_const(&[msg]), false);
#[cfg(feature = "ferrocene_certified")]
::core::panicking::panic_nounwind(msg);
}
}
core::ptr::write_volatile::precondition_check const fn precondition_check($($name:$ty),*) {
if !$e {
let msg = concat!("unsafe precondition(s) violated: ", $message,
"\n\nThis indicates a bug in the program. \
This Undefined Behavior check is optional, and cannot be relied on for safety.");
// blocked on fmt::Arguments
#[cfg(not(feature = "ferrocene_certified"))]
::core::panicking::panic_nounwind_fmt(::core::fmt::Arguments::new_const(&[msg]), false);
#[cfg(feature = "ferrocene_certified")]
::core::panicking::panic_nounwind(msg);
}
}
core::result::Result::<T, E>::unwrap_err_unchecked pub unsafe fn unwrap_err_unchecked(self) -> E {
match self {
#[ferrocene::annotation(
"This line cannot be covered as reaching `unreachable_unchecked` is undefined behavior"
)]
// SAFETY: the safety contract must be upheld by the caller.
Ok(_) => unsafe { hint::unreachable_unchecked() },
Err(e) => e,
}
}
core::result::Result::<T, E>::unwrap_unchecked pub unsafe fn unwrap_unchecked(self) -> T {
match self {
Ok(t) => t,
#[ferrocene::annotation(
"This line cannot be covered as reaching `unreachable_unchecked` is undefined behavior"
)]
// SAFETY: the safety contract must be upheld by the caller.
Err(_) => unsafe { hint::unreachable_unchecked() },
}
}
core::slice::index::into_slice_rangepub(crate) fn into_slice_range(
len: usize,
(start, end): (ops::Bound<usize>, ops::Bound<usize>),
) -> ops::Range<usize> {
let end = match end {
ops::Bound::Included(end) if end >= len => slice_index_fail(0, end, len),
// Cannot overflow because `end < len` implies `end < usize::MAX`.
ops::Bound::Included(end) => end + 1,
ops::Bound::Excluded(end) if end > len => slice_index_fail(0, end, len),
ops::Bound::Excluded(end) => end,
ops::Bound::Unbounded => len,
};
let start = match start {
ops::Bound::Excluded(start) if start >= end => slice_index_fail(start, end, len),
// Cannot overflow because `start < end` implies `start < usize::MAX`.
ops::Bound::Excluded(start) => start + 1,
ops::Bound::Included(start) if start > end => slice_index_fail(start, end, len),
ops::Bound::Included(start) => start,
ops::Bound::Unbounded => 0,
};
start..end
}
core::slice::raw::from_raw_parts::precondition_check const fn precondition_check($($name:$ty),*) {
if !$e {
let msg = concat!("unsafe precondition(s) violated: ", $message,
"\n\nThis indicates a bug in the program. \
This Undefined Behavior check is optional, and cannot be relied on for safety.");
// blocked on fmt::Arguments
#[cfg(not(feature = "ferrocene_certified"))]
::core::panicking::panic_nounwind_fmt(::core::fmt::Arguments::new_const(&[msg]), false);
#[cfg(feature = "ferrocene_certified")]
::core::panicking::panic_nounwind(msg);
}
}
core::slice::raw::from_raw_parts_mut::precondition_check const fn precondition_check($($name:$ty),*) {
if !$e {
let msg = concat!("unsafe precondition(s) violated: ", $message,
"\n\nThis indicates a bug in the program. \
This Undefined Behavior check is optional, and cannot be relied on for safety.");
// blocked on fmt::Arguments
#[cfg(not(feature = "ferrocene_certified"))]
::core::panicking::panic_nounwind_fmt(::core::fmt::Arguments::new_const(&[msg]), false);
#[cfg(feature = "ferrocene_certified")]
::core::panicking::panic_nounwind(msg);
}
}
core::str::validations::run_utf8_validationpub(super) const fn run_utf8_validation(v: &[u8]) -> Result<(), Utf8Error> {
let mut index = 0;
let len = v.len();
const USIZE_BYTES: usize = size_of::<usize>();
let ascii_block_size = 2 * USIZE_BYTES;
let blocks_end = if len >= ascii_block_size { len - ascii_block_size + 1 } else { 0 };
// Below, we safely fall back to a slower codepath if the offset is `usize::MAX`,
// so the end-to-end behavior is the same at compiletime and runtime.
let align = const_eval_select!(
@capture { v: &[u8] } -> usize:
if const {
usize::MAX
} else {
v.as_ptr().align_offset(USIZE_BYTES)
}
);
while index < len {
let old_offset = index;
macro_rules! err {
($error_len: expr) => {
return Err(Utf8Error { valid_up_to: old_offset, error_len: $error_len })
};
}
macro_rules! next {
() => {{
index += 1;
// we needed data, but there was none: error!
if index >= len {
err!(None)
}
v[index]
}};
}
let first = v[index];
if first >= 128 {
let w = utf8_char_width(first);
// 2-byte encoding is for codepoints \u{0080} to \u{07ff}
// first C2 80 last DF BF
// 3-byte encoding is for codepoints \u{0800} to \u{ffff}
// first E0 A0 80 last EF BF BF
// excluding surrogates codepoints \u{d800} to \u{dfff}
// ED A0 80 to ED BF BF
// 4-byte encoding is for codepoints \u{10000} to \u{10ffff}
// first F0 90 80 80 last F4 8F BF BF
//
// Use the UTF-8 syntax from the RFC
//
// https://tools.ietf.org/html/rfc3629
// UTF8-1 = %x00-7F
// UTF8-2 = %xC2-DF UTF8-tail
// UTF8-3 = %xE0 %xA0-BF UTF8-tail / %xE1-EC 2( UTF8-tail ) /
// %xED %x80-9F UTF8-tail / %xEE-EF 2( UTF8-tail )
// UTF8-4 = %xF0 %x90-BF 2( UTF8-tail ) / %xF1-F3 3( UTF8-tail ) /
// %xF4 %x80-8F 2( UTF8-tail )
match w {
2 => {
if next!() as i8 >= -64 {
err!(Some(1))
}
}
3 => {
match (first, next!()) {
(0xE0, 0xA0..=0xBF)
| (0xE1..=0xEC, 0x80..=0xBF)
| (0xED, 0x80..=0x9F)
| (0xEE..=0xEF, 0x80..=0xBF) => {}
_ => err!(Some(1)),
}
if next!() as i8 >= -64 {
err!(Some(2))
}
}
4 => {
match (first, next!()) {
(0xF0, 0x90..=0xBF) | (0xF1..=0xF3, 0x80..=0xBF) | (0xF4, 0x80..=0x8F) => {}
_ => err!(Some(1)),
}
if next!() as i8 >= -64 {
err!(Some(2))
}
if next!() as i8 >= -64 {
err!(Some(3))
}
}
_ => err!(Some(1)),
}
index += 1;
} else {
// Ascii case, try to skip forward quickly.
// When the pointer is aligned, read 2 words of data per iteration
// until we find a word containing a non-ascii byte.
if align != usize::MAX && align.wrapping_sub(index).is_multiple_of(USIZE_BYTES) {
let ptr = v.as_ptr();
while index < blocks_end {
// SAFETY: since `align - index` and `ascii_block_size` are
// multiples of `USIZE_BYTES`, `block = ptr.add(index)` is
// always aligned with a `usize` so it's safe to dereference
// both `block` and `block.add(1)`.
unsafe {
let block = ptr.add(index) as *const usize;
// break if there is a nonascii byte
let zu = contains_nonascii(*block);
let zv = contains_nonascii(*block.add(1));
if zu || zv {
break;
}
}
index += ascii_block_size;
}
// step from the point where the wordwise loop stopped
while index < len && v[index] < 128 {
index += 1;
}
} else {
index += 1;
}
}
}
Ok(())
}
<&mut I as core::iter::traits::exact_size::ExactSizeIterator>::is_empty fn is_empty(&self) -> bool {
(**self).is_empty()
}
<core::any::TypeId as core::cmp::PartialEq>::eq::compiletime const fn compiletime$(<$($binders)*>)?($($arg: $ty),*) $( -> $ret )? {
// Don't warn if one of the arguments is unused.
$(let _ = $arg;)*
$compiletime
}
<core::ops::index_range::IndexRange as core::iter::traits::iterator::Iterator>::size_hint fn size_hint(&self) -> (usize, Option<usize>) {
let len = self.len();
(len, Some(len))
}
<core::ops::index_range::IndexRange as core::slice::index::SliceIndex<[T]>>::get fn get(self, slice: &[T]) -> Option<&[T]> {
if self.end() <= slice.len() {
// SAFETY: `self` is checked to be valid and in bounds above.
unsafe { Some(&*get_offset_len_noubcheck(slice, self.start(), self.len())) }
} else {
None
}
}
<core::ops::index_range::IndexRange as core::slice::index::SliceIndex<[T]>>::get_mut fn get_mut(self, slice: &mut [T]) -> Option<&mut [T]> {
if self.end() <= slice.len() {
// SAFETY: `self` is checked to be valid and in bounds above.
unsafe { Some(&mut *get_offset_len_mut_noubcheck(slice, self.start(), self.len())) }
} else {
None
}
}
<core::ops::index_range::IndexRange as core::slice::index::SliceIndex<[T]>>::index fn index(self, slice: &[T]) -> &[T] {
if self.end() <= slice.len() {
// SAFETY: `self` is checked to be valid and in bounds above.
unsafe { &*get_offset_len_noubcheck(slice, self.start(), self.len()) }
} else {
slice_index_fail(self.start(), self.end(), slice.len())
}
}
<core::ops::index_range::IndexRange as core::slice::index::SliceIndex<[T]>>::index_mut fn index_mut(self, slice: &mut [T]) -> &mut [T] {
if self.end() <= slice.len() {
// SAFETY: `self` is checked to be valid and in bounds above.
unsafe { &mut *get_offset_len_mut_noubcheck(slice, self.start(), self.len()) }
} else {
slice_index_fail(self.start(), self.end(), slice.len())
}
}
core::clone::impls::<impl core::clone::Clone for !>::clone fn clone(&self) -> Self {
*self
}
core::clone::impls::<impl core::clone::Clone for *const T>::clone fn clone(&self) -> Self {
*self
}
core::clone::impls::<impl core::clone::Clone for *mut T>::clone fn clone(&self) -> Self {
*self
}
core::intrinsics::assumepub const unsafe fn assume(b: bool) {
if !b {
// SAFETY: the caller must guarantee the argument is never `false`
unsafe { unreachable() }
}
}
core::intrinsics::cold_pathpub const fn cold_path() {}
core::intrinsics::const_make_globalpub const unsafe fn const_make_global(ptr: *mut u8) -> *const u8 {
// const eval overrides this function; at runtime, it is a NOP.
ptr
}
core::intrinsics::type_id_eqpub const fn type_id_eq(a: crate::any::TypeId, b: crate::any::TypeId) -> bool {
a.data == b.data
}
core::intrinsics::ub_checkspub const fn ub_checks() -> bool {
cfg!(ub_checks)
}
core::panicking::panic_cannot_unwindfn panic_cannot_unwind() -> ! {
// Keep the text in sync with `UnwindTerminateReason::as_str` in `rustc_middle`.
panic_nounwind("panic in a function that cannot unwind")
}
core::panicking::panic_const::panic_const_add_overflow pub const fn $lang() -> ! {
// Use Arguments::new_const instead of format_args!("{expr}") to potentially
// reduce size overhead. The format_args! macro uses str's Display trait to
// write expr, which calls Formatter::pad, which must accommodate string
// truncation and padding (even though none is used here). Using
// Arguments::new_const may allow the compiler to omit Formatter::pad from the
// output binary, saving up to a few kilobytes.
#[cfg(not(feature = "ferrocene_certified"))]
panic_fmt(fmt::Arguments::new_const(&[$message]));
#[cfg(feature = "ferrocene_certified")]
panic_fmt(&$message);
}
core::panicking::panic_const::panic_const_async_fn_resumed pub const fn $lang() -> ! {
// Use Arguments::new_const instead of format_args!("{expr}") to potentially
// reduce size overhead. The format_args! macro uses str's Display trait to
// write expr, which calls Formatter::pad, which must accommodate string
// truncation and padding (even though none is used here). Using
// Arguments::new_const may allow the compiler to omit Formatter::pad from the
// output binary, saving up to a few kilobytes.
#[cfg(not(feature = "ferrocene_certified"))]
panic_fmt(fmt::Arguments::new_const(&[$message]));
#[cfg(feature = "ferrocene_certified")]
panic_fmt(&$message);
}
core::panicking::panic_const::panic_const_async_fn_resumed_drop pub const fn $lang() -> ! {
// Use Arguments::new_const instead of format_args!("{expr}") to potentially
// reduce size overhead. The format_args! macro uses str's Display trait to
// write expr, which calls Formatter::pad, which must accommodate string
// truncation and padding (even though none is used here). Using
// Arguments::new_const may allow the compiler to omit Formatter::pad from the
// output binary, saving up to a few kilobytes.
#[cfg(not(feature = "ferrocene_certified"))]
panic_fmt(fmt::Arguments::new_const(&[$message]));
#[cfg(feature = "ferrocene_certified")]
panic_fmt(&$message);
}
core::panicking::panic_const::panic_const_async_fn_resumed_panic pub const fn $lang() -> ! {
// Use Arguments::new_const instead of format_args!("{expr}") to potentially
// reduce size overhead. The format_args! macro uses str's Display trait to
// write expr, which calls Formatter::pad, which must accommodate string
// truncation and padding (even though none is used here). Using
// Arguments::new_const may allow the compiler to omit Formatter::pad from the
// output binary, saving up to a few kilobytes.
#[cfg(not(feature = "ferrocene_certified"))]
panic_fmt(fmt::Arguments::new_const(&[$message]));
#[cfg(feature = "ferrocene_certified")]
panic_fmt(&$message);
}
core::panicking::panic_const::panic_const_async_gen_fn_resumed pub const fn $lang() -> ! {
// Use Arguments::new_const instead of format_args!("{expr}") to potentially
// reduce size overhead. The format_args! macro uses str's Display trait to
// write expr, which calls Formatter::pad, which must accommodate string
// truncation and padding (even though none is used here). Using
// Arguments::new_const may allow the compiler to omit Formatter::pad from the
// output binary, saving up to a few kilobytes.
#[cfg(not(feature = "ferrocene_certified"))]
panic_fmt(fmt::Arguments::new_const(&[$message]));
#[cfg(feature = "ferrocene_certified")]
panic_fmt(&$message);
}
core::panicking::panic_const::panic_const_async_gen_fn_resumed_drop pub const fn $lang() -> ! {
// Use Arguments::new_const instead of format_args!("{expr}") to potentially
// reduce size overhead. The format_args! macro uses str's Display trait to
// write expr, which calls Formatter::pad, which must accommodate string
// truncation and padding (even though none is used here). Using
// Arguments::new_const may allow the compiler to omit Formatter::pad from the
// output binary, saving up to a few kilobytes.
#[cfg(not(feature = "ferrocene_certified"))]
panic_fmt(fmt::Arguments::new_const(&[$message]));
#[cfg(feature = "ferrocene_certified")]
panic_fmt(&$message);
}
core::panicking::panic_const::panic_const_async_gen_fn_resumed_panic pub const fn $lang() -> ! {
// Use Arguments::new_const instead of format_args!("{expr}") to potentially
// reduce size overhead. The format_args! macro uses str's Display trait to
// write expr, which calls Formatter::pad, which must accommodate string
// truncation and padding (even though none is used here). Using
// Arguments::new_const may allow the compiler to omit Formatter::pad from the
// output binary, saving up to a few kilobytes.
#[cfg(not(feature = "ferrocene_certified"))]
panic_fmt(fmt::Arguments::new_const(&[$message]));
#[cfg(feature = "ferrocene_certified")]
panic_fmt(&$message);
}
core::panicking::panic_const::panic_const_coroutine_resumed pub const fn $lang() -> ! {
// Use Arguments::new_const instead of format_args!("{expr}") to potentially
// reduce size overhead. The format_args! macro uses str's Display trait to
// write expr, which calls Formatter::pad, which must accommodate string
// truncation and padding (even though none is used here). Using
// Arguments::new_const may allow the compiler to omit Formatter::pad from the
// output binary, saving up to a few kilobytes.
#[cfg(not(feature = "ferrocene_certified"))]
panic_fmt(fmt::Arguments::new_const(&[$message]));
#[cfg(feature = "ferrocene_certified")]
panic_fmt(&$message);
}
core::panicking::panic_const::panic_const_coroutine_resumed_drop pub const fn $lang() -> ! {
// Use Arguments::new_const instead of format_args!("{expr}") to potentially
// reduce size overhead. The format_args! macro uses str's Display trait to
// write expr, which calls Formatter::pad, which must accommodate string
// truncation and padding (even though none is used here). Using
// Arguments::new_const may allow the compiler to omit Formatter::pad from the
// output binary, saving up to a few kilobytes.
#[cfg(not(feature = "ferrocene_certified"))]
panic_fmt(fmt::Arguments::new_const(&[$message]));
#[cfg(feature = "ferrocene_certified")]
panic_fmt(&$message);
}
core::panicking::panic_const::panic_const_coroutine_resumed_panic pub const fn $lang() -> ! {
// Use Arguments::new_const instead of format_args!("{expr}") to potentially
// reduce size overhead. The format_args! macro uses str's Display trait to
// write expr, which calls Formatter::pad, which must accommodate string
// truncation and padding (even though none is used here). Using
// Arguments::new_const may allow the compiler to omit Formatter::pad from the
// output binary, saving up to a few kilobytes.
#[cfg(not(feature = "ferrocene_certified"))]
panic_fmt(fmt::Arguments::new_const(&[$message]));
#[cfg(feature = "ferrocene_certified")]
panic_fmt(&$message);
}
core::panicking::panic_const::panic_const_div_by_zero pub const fn $lang() -> ! {
// Use Arguments::new_const instead of format_args!("{expr}") to potentially
// reduce size overhead. The format_args! macro uses str's Display trait to
// write expr, which calls Formatter::pad, which must accommodate string
// truncation and padding (even though none is used here). Using
// Arguments::new_const may allow the compiler to omit Formatter::pad from the
// output binary, saving up to a few kilobytes.
#[cfg(not(feature = "ferrocene_certified"))]
panic_fmt(fmt::Arguments::new_const(&[$message]));
#[cfg(feature = "ferrocene_certified")]
panic_fmt(&$message);
}
core::panicking::panic_const::panic_const_div_overflow pub const fn $lang() -> ! {
// Use Arguments::new_const instead of format_args!("{expr}") to potentially
// reduce size overhead. The format_args! macro uses str's Display trait to
// write expr, which calls Formatter::pad, which must accommodate string
// truncation and padding (even though none is used here). Using
// Arguments::new_const may allow the compiler to omit Formatter::pad from the
// output binary, saving up to a few kilobytes.
#[cfg(not(feature = "ferrocene_certified"))]
panic_fmt(fmt::Arguments::new_const(&[$message]));
#[cfg(feature = "ferrocene_certified")]
panic_fmt(&$message);
}
core::panicking::panic_const::panic_const_gen_fn_none pub const fn $lang() -> ! {
// Use Arguments::new_const instead of format_args!("{expr}") to potentially
// reduce size overhead. The format_args! macro uses str's Display trait to
// write expr, which calls Formatter::pad, which must accommodate string
// truncation and padding (even though none is used here). Using
// Arguments::new_const may allow the compiler to omit Formatter::pad from the
// output binary, saving up to a few kilobytes.
#[cfg(not(feature = "ferrocene_certified"))]
panic_fmt(fmt::Arguments::new_const(&[$message]));
#[cfg(feature = "ferrocene_certified")]
panic_fmt(&$message);
}
core::panicking::panic_const::panic_const_gen_fn_none_drop pub const fn $lang() -> ! {
// Use Arguments::new_const instead of format_args!("{expr}") to potentially
// reduce size overhead. The format_args! macro uses str's Display trait to
// write expr, which calls Formatter::pad, which must accommodate string
// truncation and padding (even though none is used here). Using
// Arguments::new_const may allow the compiler to omit Formatter::pad from the
// output binary, saving up to a few kilobytes.
#[cfg(not(feature = "ferrocene_certified"))]
panic_fmt(fmt::Arguments::new_const(&[$message]));
#[cfg(feature = "ferrocene_certified")]
panic_fmt(&$message);
}
core::panicking::panic_const::panic_const_gen_fn_none_panic pub const fn $lang() -> ! {
// Use Arguments::new_const instead of format_args!("{expr}") to potentially
// reduce size overhead. The format_args! macro uses str's Display trait to
// write expr, which calls Formatter::pad, which must accommodate string
// truncation and padding (even though none is used here). Using
// Arguments::new_const may allow the compiler to omit Formatter::pad from the
// output binary, saving up to a few kilobytes.
#[cfg(not(feature = "ferrocene_certified"))]
panic_fmt(fmt::Arguments::new_const(&[$message]));
#[cfg(feature = "ferrocene_certified")]
panic_fmt(&$message);
}
core::panicking::panic_const::panic_const_mul_overflow pub const fn $lang() -> ! {
// Use Arguments::new_const instead of format_args!("{expr}") to potentially
// reduce size overhead. The format_args! macro uses str's Display trait to
// write expr, which calls Formatter::pad, which must accommodate string
// truncation and padding (even though none is used here). Using
// Arguments::new_const may allow the compiler to omit Formatter::pad from the
// output binary, saving up to a few kilobytes.
#[cfg(not(feature = "ferrocene_certified"))]
panic_fmt(fmt::Arguments::new_const(&[$message]));
#[cfg(feature = "ferrocene_certified")]
panic_fmt(&$message);
}
core::panicking::panic_const::panic_const_neg_overflow pub const fn $lang() -> ! {
// Use Arguments::new_const instead of format_args!("{expr}") to potentially
// reduce size overhead. The format_args! macro uses str's Display trait to
// write expr, which calls Formatter::pad, which must accommodate string
// truncation and padding (even though none is used here). Using
// Arguments::new_const may allow the compiler to omit Formatter::pad from the
// output binary, saving up to a few kilobytes.
#[cfg(not(feature = "ferrocene_certified"))]
panic_fmt(fmt::Arguments::new_const(&[$message]));
#[cfg(feature = "ferrocene_certified")]
panic_fmt(&$message);
}
core::panicking::panic_const::panic_const_rem_by_zero pub const fn $lang() -> ! {
// Use Arguments::new_const instead of format_args!("{expr}") to potentially
// reduce size overhead. The format_args! macro uses str's Display trait to
// write expr, which calls Formatter::pad, which must accommodate string
// truncation and padding (even though none is used here). Using
// Arguments::new_const may allow the compiler to omit Formatter::pad from the
// output binary, saving up to a few kilobytes.
#[cfg(not(feature = "ferrocene_certified"))]
panic_fmt(fmt::Arguments::new_const(&[$message]));
#[cfg(feature = "ferrocene_certified")]
panic_fmt(&$message);
}
core::panicking::panic_const::panic_const_rem_overflow pub const fn $lang() -> ! {
// Use Arguments::new_const instead of format_args!("{expr}") to potentially
// reduce size overhead. The format_args! macro uses str's Display trait to
// write expr, which calls Formatter::pad, which must accommodate string
// truncation and padding (even though none is used here). Using
// Arguments::new_const may allow the compiler to omit Formatter::pad from the
// output binary, saving up to a few kilobytes.
#[cfg(not(feature = "ferrocene_certified"))]
panic_fmt(fmt::Arguments::new_const(&[$message]));
#[cfg(feature = "ferrocene_certified")]
panic_fmt(&$message);
}
core::panicking::panic_const::panic_const_shl_overflow pub const fn $lang() -> ! {
// Use Arguments::new_const instead of format_args!("{expr}") to potentially
// reduce size overhead. The format_args! macro uses str's Display trait to
// write expr, which calls Formatter::pad, which must accommodate string
// truncation and padding (even though none is used here). Using
// Arguments::new_const may allow the compiler to omit Formatter::pad from the
// output binary, saving up to a few kilobytes.
#[cfg(not(feature = "ferrocene_certified"))]
panic_fmt(fmt::Arguments::new_const(&[$message]));
#[cfg(feature = "ferrocene_certified")]
panic_fmt(&$message);
}
core::panicking::panic_const::panic_const_shr_overflow pub const fn $lang() -> ! {
// Use Arguments::new_const instead of format_args!("{expr}") to potentially
// reduce size overhead. The format_args! macro uses str's Display trait to
// write expr, which calls Formatter::pad, which must accommodate string
// truncation and padding (even though none is used here). Using
// Arguments::new_const may allow the compiler to omit Formatter::pad from the
// output binary, saving up to a few kilobytes.
#[cfg(not(feature = "ferrocene_certified"))]
panic_fmt(fmt::Arguments::new_const(&[$message]));
#[cfg(feature = "ferrocene_certified")]
panic_fmt(&$message);
}
core::panicking::panic_const::panic_const_sub_overflow pub const fn $lang() -> ! {
// Use Arguments::new_const instead of format_args!("{expr}") to potentially
// reduce size overhead. The format_args! macro uses str's Display trait to
// write expr, which calls Formatter::pad, which must accommodate string
// truncation and padding (even though none is used here). Using
// Arguments::new_const may allow the compiler to omit Formatter::pad from the
// output binary, saving up to a few kilobytes.
#[cfg(not(feature = "ferrocene_certified"))]
panic_fmt(fmt::Arguments::new_const(&[$message]));
#[cfg(feature = "ferrocene_certified")]
panic_fmt(&$message);
}
core::panicking::panic_nounwindpub const fn panic_nounwind(expr: &'static str) -> ! {
#[cfg(not(feature = "ferrocene_certified"))]
panic_nounwind_fmt(fmt::Arguments::new_const(&[expr]), /* force_no_backtrace */ false);
#[cfg(feature = "ferrocene_certified")]
panic_nounwind_fmt(&expr, /* force_no_backtrace */ false);
}
core::panicking::panic_nounwind_fmtpub const fn panic_nounwind_fmt(fmt: PanicFmt<'_>, _force_no_backtrace: bool) -> ! {
const_eval_select!(
@capture { fmt: PanicFmt<'_>, _force_no_backtrace: bool } -> !:
if const #[track_caller] {
// We don't unwind anyway at compile-time so we can call the regular `panic_fmt`.
panic_fmt(fmt)
} else #[track_caller] {
if cfg!(panic = "immediate-abort") {
super::intrinsics::abort()
}
// NOTE This function never crosses the FFI boundary; it's a Rust-to-Rust call
// that gets resolved to the `#[panic_handler]` function.
unsafe extern "Rust" {
#[lang = "panic_impl"]
fn panic_impl(pi: &PanicInfo<'_>) -> !;
}
// PanicInfo with the `can_unwind` flag set to false forces an abort.
#[cfg(not(feature = "ferrocene_certified"))]
let pi = PanicInfo::new(
&fmt,
Location::caller(),
/* can_unwind */ false,
_force_no_backtrace,
);
#[cfg(feature = "ferrocene_certified")]
let pi = PanicInfo::new(&fmt);
// SAFETY: `panic_impl` is defined in safe Rust code and thus is safe to call.
unsafe { panic_impl(&pi) }
}
)
}
core::panicking::panic_nounwind_fmt::compiletime const fn compiletime$(<$($binders)*>)?($($arg: $ty),*) $( -> $ret )? {
// Don't warn if one of the arguments is unused.
$(let _ = $arg;)*
$compiletime
}
core::ptr::const_ptr::<impl *const T>::add::runtime_add_nowrap::compiletime const fn compiletime$(<$($binders)*>)?($($arg: $ty),*) $( -> $ret )? {
// Don't warn if one of the arguments is unused.
$(let _ = $arg;)*
$compiletime
}
core::ptr::const_ptr::<impl *const T>::is_null::compiletime const fn compiletime$(<$($binders)*>)?($($arg: $ty),*) $( -> $ret )? {
// Don't warn if one of the arguments is unused.
$(let _ = $arg;)*
$compiletime
}
core::ptr::mut_ptr::<impl *mut T>::add::runtime_add_nowrap::compiletime const fn compiletime$(<$($binders)*>)?($($arg: $ty),*) $( -> $ret )? {
// Don't warn if one of the arguments is unused.
$(let _ = $arg;)*
$compiletime
}
core::slice::<impl [T]>::copy_from_slice::len_mismatch_fail::do_panic::compiletime const fn compiletime$(<$($binders)*>)?($($arg: $ty),*) $( -> $ret )? {
// Don't warn if one of the arguments is unused.
$(let _ = $arg;)*
$compiletime
}
core::slice::<impl [T]>::len pub const fn len(&self) -> usize {
ptr::metadata(self)
}
core::slice::index::slice_index_fail::do_panic::compiletime const fn compiletime$(<$($binders)*>)?($($arg: $ty),*) $( -> $ret )? {
// Don't warn if one of the arguments is unused.
$(let _ = $arg;)*
$compiletime
}
core::slice::index::slice_index_fail::do_panic::compiletime const fn compiletime$(<$($binders)*>)?($($arg: $ty),*) $( -> $ret )? {
// Don't warn if one of the arguments is unused.
$(let _ = $arg;)*
$compiletime
}
core::slice::index::slice_index_fail::do_panic::compiletime const fn compiletime$(<$($binders)*>)?($($arg: $ty),*) $( -> $ret )? {
// Don't warn if one of the arguments is unused.
$(let _ = $arg;)*
$compiletime
}
core::slice::index::slice_index_fail::do_panic::compiletime const fn compiletime$(<$($binders)*>)?($($arg: $ty),*) $( -> $ret )? {
// Don't warn if one of the arguments is unused.
$(let _ = $arg;)*
$compiletime
}
core::str::error::Utf8Error::error_len pub const fn error_len(&self) -> Option<usize> {
// FIXME(const-hack): This should become `map` again, once it's `const`
match self.error_len {
Some(len) => Some(len as usize),
None => None,
}
}
core::str::validations::run_utf8_validation::compiletime const fn compiletime$(<$($binders)*>)?($($arg: $ty),*) $( -> $ret )? {
// Don't warn if one of the arguments is unused.
$(let _ = $arg;)*
$compiletime
}
core::ub_checks::check_language_ub::compiletime const fn compiletime$(<$($binders)*>)?($($arg: $ty),*) $( -> $ret )? {
// Don't warn if one of the arguments is unused.
$(let _ = $arg;)*
$compiletime
}
core::ub_checks::maybe_is_aligned::compiletime const fn compiletime$(<$($binders)*>)?($($arg: $ty),*) $( -> $ret )? {
// Don't warn if one of the arguments is unused.
$(let _ = $arg;)*
$compiletime
}
core::ub_checks::maybe_is_nonoverlapping::compiletime const fn compiletime$(<$($binders)*>)?($($arg: $ty),*) $( -> $ret )? {
// Don't warn if one of the arguments is unused.
$(let _ = $arg;)*
$compiletime
}
<&T as core::convert::AsRef<U>>::as_ref fn as_ref(&self) -> &U {
<T as AsRef<U>>::as_ref(*self)
}
<&T as core::ops::deref::Deref>::deref fn deref(&self) -> &T {
self
}
<&bool as core::ops::bit::BitAnd<&bool>>::bitand fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&bool as core::ops::bit::BitAnd<bool>>::bitand fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&bool as core::ops::bit::BitOr<&bool>>::bitor fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&bool as core::ops::bit::BitOr<bool>>::bitor fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&bool as core::ops::bit::BitXor<&bool>>::bitxor fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&bool as core::ops::bit::BitXor<bool>>::bitxor fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&bool as core::ops::bit::Not>::not fn $method(self) -> <$t as $imp>::Output {
$imp::$method(*self)
}
<&f128 as core::ops::arith::Add<&f128>>::add fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&f128 as core::ops::arith::Add<f128>>::add fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&f128 as core::ops::arith::Div<&f128>>::div fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&f128 as core::ops::arith::Div<f128>>::div fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&f128 as core::ops::arith::Mul<&f128>>::mul fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&f128 as core::ops::arith::Mul<f128>>::mul fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&f128 as core::ops::arith::Neg>::neg fn $method(self) -> <$t as $imp>::Output {
$imp::$method(*self)
}
<&f128 as core::ops::arith::Rem<&f128>>::rem fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&f128 as core::ops::arith::Rem<f128>>::rem fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&f128 as core::ops::arith::Sub<&f128>>::sub fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&f128 as core::ops::arith::Sub<f128>>::sub fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&f16 as core::ops::arith::Add<&f16>>::add fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&f16 as core::ops::arith::Add<f16>>::add fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&f16 as core::ops::arith::Div<&f16>>::div fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&f16 as core::ops::arith::Div<f16>>::div fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&f16 as core::ops::arith::Mul<&f16>>::mul fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&f16 as core::ops::arith::Mul<f16>>::mul fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&f16 as core::ops::arith::Neg>::neg fn $method(self) -> <$t as $imp>::Output {
$imp::$method(*self)
}
<&f16 as core::ops::arith::Rem<&f16>>::rem fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&f16 as core::ops::arith::Rem<f16>>::rem fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&f16 as core::ops::arith::Sub<&f16>>::sub fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&f16 as core::ops::arith::Sub<f16>>::sub fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&f32 as core::ops::arith::Add<&f32>>::add fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&f32 as core::ops::arith::Add<f32>>::add fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&f32 as core::ops::arith::Div<&f32>>::div fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&f32 as core::ops::arith::Div<f32>>::div fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&f32 as core::ops::arith::Mul<&f32>>::mul fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&f32 as core::ops::arith::Mul<f32>>::mul fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&f32 as core::ops::arith::Neg>::neg fn $method(self) -> <$t as $imp>::Output {
$imp::$method(*self)
}
<&f32 as core::ops::arith::Rem<&f32>>::rem fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&f32 as core::ops::arith::Rem<f32>>::rem fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&f32 as core::ops::arith::Sub<&f32>>::sub fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&f32 as core::ops::arith::Sub<f32>>::sub fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&f64 as core::ops::arith::Add<&f64>>::add fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&f64 as core::ops::arith::Add<f64>>::add fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&f64 as core::ops::arith::Div<&f64>>::div fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&f64 as core::ops::arith::Div<f64>>::div fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&f64 as core::ops::arith::Mul<&f64>>::mul fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&f64 as core::ops::arith::Mul<f64>>::mul fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&f64 as core::ops::arith::Neg>::neg fn $method(self) -> <$t as $imp>::Output {
$imp::$method(*self)
}
<&f64 as core::ops::arith::Rem<&f64>>::rem fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&f64 as core::ops::arith::Rem<f64>>::rem fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&f64 as core::ops::arith::Sub<&f64>>::sub fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&f64 as core::ops::arith::Sub<f64>>::sub fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i128 as core::ops::arith::Add<&i128>>::add fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i128 as core::ops::arith::Add<i128>>::add fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i128 as core::ops::arith::Div<&i128>>::div fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i128 as core::ops::arith::Div<i128>>::div fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i128 as core::ops::arith::Mul<&i128>>::mul fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i128 as core::ops::arith::Mul<i128>>::mul fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i128 as core::ops::arith::Neg>::neg fn $method(self) -> <$t as $imp>::Output {
$imp::$method(*self)
}
<&i128 as core::ops::arith::Rem<&i128>>::rem fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i128 as core::ops::arith::Rem<i128>>::rem fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i128 as core::ops::arith::Sub<&i128>>::sub fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i128 as core::ops::arith::Sub<i128>>::sub fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i128 as core::ops::bit::BitAnd<&i128>>::bitand fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i128 as core::ops::bit::BitAnd<i128>>::bitand fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i128 as core::ops::bit::BitOr<&i128>>::bitor fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i128 as core::ops::bit::BitOr<i128>>::bitor fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i128 as core::ops::bit::BitXor<&i128>>::bitxor fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i128 as core::ops::bit::BitXor<i128>>::bitxor fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i128 as core::ops::bit::Not>::not fn $method(self) -> <$t as $imp>::Output {
$imp::$method(*self)
}
<&i128 as core::ops::bit::Shl<&i128>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i128 as core::ops::bit::Shl<&i16>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i128 as core::ops::bit::Shl<&i32>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i128 as core::ops::bit::Shl<&i64>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i128 as core::ops::bit::Shl<&i8>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i128 as core::ops::bit::Shl<&isize>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i128 as core::ops::bit::Shl<&u128>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i128 as core::ops::bit::Shl<&u16>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i128 as core::ops::bit::Shl<&u32>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i128 as core::ops::bit::Shl<&u64>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i128 as core::ops::bit::Shl<&u8>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i128 as core::ops::bit::Shl<&usize>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i128 as core::ops::bit::Shl<i128>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i128 as core::ops::bit::Shl<i16>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i128 as core::ops::bit::Shl<i32>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i128 as core::ops::bit::Shl<i64>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i128 as core::ops::bit::Shl<i8>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i128 as core::ops::bit::Shl<isize>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i128 as core::ops::bit::Shl<u128>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i128 as core::ops::bit::Shl<u16>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i128 as core::ops::bit::Shl<u32>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i128 as core::ops::bit::Shl<u64>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i128 as core::ops::bit::Shl<u8>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i128 as core::ops::bit::Shl<usize>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i128 as core::ops::bit::Shr<&i128>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i128 as core::ops::bit::Shr<&i16>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i128 as core::ops::bit::Shr<&i32>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i128 as core::ops::bit::Shr<&i64>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i128 as core::ops::bit::Shr<&i8>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i128 as core::ops::bit::Shr<&isize>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i128 as core::ops::bit::Shr<&u128>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i128 as core::ops::bit::Shr<&u16>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i128 as core::ops::bit::Shr<&u32>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i128 as core::ops::bit::Shr<&u64>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i128 as core::ops::bit::Shr<&u8>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i128 as core::ops::bit::Shr<&usize>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i128 as core::ops::bit::Shr<i128>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i128 as core::ops::bit::Shr<i16>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i128 as core::ops::bit::Shr<i32>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i128 as core::ops::bit::Shr<i64>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i128 as core::ops::bit::Shr<i8>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i128 as core::ops::bit::Shr<isize>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i128 as core::ops::bit::Shr<u128>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i128 as core::ops::bit::Shr<u16>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i128 as core::ops::bit::Shr<u32>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i128 as core::ops::bit::Shr<u64>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i128 as core::ops::bit::Shr<u8>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i128 as core::ops::bit::Shr<usize>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i16 as core::ops::arith::Add<&i16>>::add fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i16 as core::ops::arith::Add<i16>>::add fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i16 as core::ops::arith::Div<&i16>>::div fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i16 as core::ops::arith::Div<i16>>::div fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i16 as core::ops::arith::Mul<&i16>>::mul fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i16 as core::ops::arith::Mul<i16>>::mul fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i16 as core::ops::arith::Neg>::neg fn $method(self) -> <$t as $imp>::Output {
$imp::$method(*self)
}
<&i16 as core::ops::arith::Rem<&i16>>::rem fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i16 as core::ops::arith::Rem<i16>>::rem fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i16 as core::ops::arith::Sub<&i16>>::sub fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i16 as core::ops::arith::Sub<i16>>::sub fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i16 as core::ops::bit::BitAnd<&i16>>::bitand fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i16 as core::ops::bit::BitAnd<i16>>::bitand fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i16 as core::ops::bit::BitOr<&i16>>::bitor fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i16 as core::ops::bit::BitOr<i16>>::bitor fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i16 as core::ops::bit::BitXor<&i16>>::bitxor fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i16 as core::ops::bit::BitXor<i16>>::bitxor fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i16 as core::ops::bit::Not>::not fn $method(self) -> <$t as $imp>::Output {
$imp::$method(*self)
}
<&i16 as core::ops::bit::Shl<&i128>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i16 as core::ops::bit::Shl<&i16>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i16 as core::ops::bit::Shl<&i32>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i16 as core::ops::bit::Shl<&i64>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i16 as core::ops::bit::Shl<&i8>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i16 as core::ops::bit::Shl<&isize>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i16 as core::ops::bit::Shl<&u128>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i16 as core::ops::bit::Shl<&u16>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i16 as core::ops::bit::Shl<&u32>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i16 as core::ops::bit::Shl<&u64>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i16 as core::ops::bit::Shl<&u8>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i16 as core::ops::bit::Shl<&usize>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i16 as core::ops::bit::Shl<i128>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i16 as core::ops::bit::Shl<i16>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i16 as core::ops::bit::Shl<i32>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i16 as core::ops::bit::Shl<i64>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i16 as core::ops::bit::Shl<i8>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i16 as core::ops::bit::Shl<isize>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i16 as core::ops::bit::Shl<u128>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i16 as core::ops::bit::Shl<u16>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i16 as core::ops::bit::Shl<u32>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i16 as core::ops::bit::Shl<u64>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i16 as core::ops::bit::Shl<u8>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i16 as core::ops::bit::Shl<usize>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i16 as core::ops::bit::Shr<&i128>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i16 as core::ops::bit::Shr<&i16>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i16 as core::ops::bit::Shr<&i32>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i16 as core::ops::bit::Shr<&i64>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i16 as core::ops::bit::Shr<&i8>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i16 as core::ops::bit::Shr<&isize>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i16 as core::ops::bit::Shr<&u128>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i16 as core::ops::bit::Shr<&u16>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i16 as core::ops::bit::Shr<&u32>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i16 as core::ops::bit::Shr<&u64>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i16 as core::ops::bit::Shr<&u8>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i16 as core::ops::bit::Shr<&usize>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i16 as core::ops::bit::Shr<i128>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i16 as core::ops::bit::Shr<i16>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i16 as core::ops::bit::Shr<i32>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i16 as core::ops::bit::Shr<i64>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i16 as core::ops::bit::Shr<i8>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i16 as core::ops::bit::Shr<isize>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i16 as core::ops::bit::Shr<u128>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i16 as core::ops::bit::Shr<u16>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i16 as core::ops::bit::Shr<u32>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i16 as core::ops::bit::Shr<u64>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i16 as core::ops::bit::Shr<u8>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i16 as core::ops::bit::Shr<usize>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i32 as core::ops::arith::Add<&i32>>::add fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i32 as core::ops::arith::Add<i32>>::add fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i32 as core::ops::arith::Div<&i32>>::div fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i32 as core::ops::arith::Div<i32>>::div fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i32 as core::ops::arith::Mul<&i32>>::mul fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i32 as core::ops::arith::Mul<i32>>::mul fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i32 as core::ops::arith::Neg>::neg fn $method(self) -> <$t as $imp>::Output {
$imp::$method(*self)
}
<&i32 as core::ops::arith::Rem<&i32>>::rem fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i32 as core::ops::arith::Rem<i32>>::rem fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i32 as core::ops::arith::Sub<&i32>>::sub fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i32 as core::ops::arith::Sub<i32>>::sub fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i32 as core::ops::bit::BitAnd<&i32>>::bitand fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i32 as core::ops::bit::BitAnd<i32>>::bitand fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i32 as core::ops::bit::BitOr<&i32>>::bitor fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i32 as core::ops::bit::BitOr<i32>>::bitor fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i32 as core::ops::bit::BitXor<&i32>>::bitxor fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i32 as core::ops::bit::BitXor<i32>>::bitxor fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i32 as core::ops::bit::Not>::not fn $method(self) -> <$t as $imp>::Output {
$imp::$method(*self)
}
<&i32 as core::ops::bit::Shl<&i128>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i32 as core::ops::bit::Shl<&i16>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i32 as core::ops::bit::Shl<&i32>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i32 as core::ops::bit::Shl<&i64>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i32 as core::ops::bit::Shl<&i8>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i32 as core::ops::bit::Shl<&isize>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i32 as core::ops::bit::Shl<&u128>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i32 as core::ops::bit::Shl<&u16>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i32 as core::ops::bit::Shl<&u32>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i32 as core::ops::bit::Shl<&u64>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i32 as core::ops::bit::Shl<&u8>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i32 as core::ops::bit::Shl<&usize>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i32 as core::ops::bit::Shl<i128>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i32 as core::ops::bit::Shl<i16>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i32 as core::ops::bit::Shl<i32>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i32 as core::ops::bit::Shl<i64>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i32 as core::ops::bit::Shl<i8>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i32 as core::ops::bit::Shl<isize>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i32 as core::ops::bit::Shl<u128>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i32 as core::ops::bit::Shl<u16>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i32 as core::ops::bit::Shl<u32>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i32 as core::ops::bit::Shl<u64>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i32 as core::ops::bit::Shl<u8>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i32 as core::ops::bit::Shl<usize>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i32 as core::ops::bit::Shr<&i128>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i32 as core::ops::bit::Shr<&i16>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i32 as core::ops::bit::Shr<&i32>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i32 as core::ops::bit::Shr<&i64>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i32 as core::ops::bit::Shr<&i8>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i32 as core::ops::bit::Shr<&isize>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i32 as core::ops::bit::Shr<&u128>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i32 as core::ops::bit::Shr<&u16>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i32 as core::ops::bit::Shr<&u32>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i32 as core::ops::bit::Shr<&u64>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i32 as core::ops::bit::Shr<&u8>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i32 as core::ops::bit::Shr<&usize>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i32 as core::ops::bit::Shr<i128>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i32 as core::ops::bit::Shr<i16>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i32 as core::ops::bit::Shr<i32>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i32 as core::ops::bit::Shr<i64>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i32 as core::ops::bit::Shr<i8>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i32 as core::ops::bit::Shr<isize>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i32 as core::ops::bit::Shr<u128>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i32 as core::ops::bit::Shr<u16>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i32 as core::ops::bit::Shr<u32>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i32 as core::ops::bit::Shr<u64>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i32 as core::ops::bit::Shr<u8>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i32 as core::ops::bit::Shr<usize>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i64 as core::ops::arith::Add<&i64>>::add fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i64 as core::ops::arith::Add<i64>>::add fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i64 as core::ops::arith::Div<&i64>>::div fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i64 as core::ops::arith::Div<i64>>::div fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i64 as core::ops::arith::Mul<&i64>>::mul fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i64 as core::ops::arith::Mul<i64>>::mul fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i64 as core::ops::arith::Neg>::neg fn $method(self) -> <$t as $imp>::Output {
$imp::$method(*self)
}
<&i64 as core::ops::arith::Rem<&i64>>::rem fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i64 as core::ops::arith::Rem<i64>>::rem fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i64 as core::ops::arith::Sub<&i64>>::sub fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i64 as core::ops::arith::Sub<i64>>::sub fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i64 as core::ops::bit::BitAnd<&i64>>::bitand fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i64 as core::ops::bit::BitAnd<i64>>::bitand fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i64 as core::ops::bit::BitOr<&i64>>::bitor fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i64 as core::ops::bit::BitOr<i64>>::bitor fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i64 as core::ops::bit::BitXor<&i64>>::bitxor fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i64 as core::ops::bit::BitXor<i64>>::bitxor fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i64 as core::ops::bit::Not>::not fn $method(self) -> <$t as $imp>::Output {
$imp::$method(*self)
}
<&i64 as core::ops::bit::Shl<&i128>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i64 as core::ops::bit::Shl<&i16>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i64 as core::ops::bit::Shl<&i32>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i64 as core::ops::bit::Shl<&i64>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i64 as core::ops::bit::Shl<&i8>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i64 as core::ops::bit::Shl<&isize>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i64 as core::ops::bit::Shl<&u128>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i64 as core::ops::bit::Shl<&u16>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i64 as core::ops::bit::Shl<&u32>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i64 as core::ops::bit::Shl<&u64>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i64 as core::ops::bit::Shl<&u8>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i64 as core::ops::bit::Shl<&usize>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i64 as core::ops::bit::Shl<i128>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i64 as core::ops::bit::Shl<i16>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i64 as core::ops::bit::Shl<i32>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i64 as core::ops::bit::Shl<i64>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i64 as core::ops::bit::Shl<i8>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i64 as core::ops::bit::Shl<isize>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i64 as core::ops::bit::Shl<u128>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i64 as core::ops::bit::Shl<u16>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i64 as core::ops::bit::Shl<u32>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i64 as core::ops::bit::Shl<u64>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i64 as core::ops::bit::Shl<u8>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i64 as core::ops::bit::Shl<usize>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i64 as core::ops::bit::Shr<&i128>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i64 as core::ops::bit::Shr<&i16>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i64 as core::ops::bit::Shr<&i32>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i64 as core::ops::bit::Shr<&i64>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i64 as core::ops::bit::Shr<&i8>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i64 as core::ops::bit::Shr<&isize>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i64 as core::ops::bit::Shr<&u128>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i64 as core::ops::bit::Shr<&u16>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i64 as core::ops::bit::Shr<&u32>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i64 as core::ops::bit::Shr<&u64>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i64 as core::ops::bit::Shr<&u8>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i64 as core::ops::bit::Shr<&usize>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i64 as core::ops::bit::Shr<i128>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i64 as core::ops::bit::Shr<i16>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i64 as core::ops::bit::Shr<i32>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i64 as core::ops::bit::Shr<i64>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i64 as core::ops::bit::Shr<i8>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i64 as core::ops::bit::Shr<isize>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i64 as core::ops::bit::Shr<u128>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i64 as core::ops::bit::Shr<u16>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i64 as core::ops::bit::Shr<u32>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i64 as core::ops::bit::Shr<u64>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i64 as core::ops::bit::Shr<u8>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i64 as core::ops::bit::Shr<usize>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i8 as core::ops::arith::Add<&i8>>::add fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i8 as core::ops::arith::Add<i8>>::add fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i8 as core::ops::arith::Div<&i8>>::div fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i8 as core::ops::arith::Div<i8>>::div fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i8 as core::ops::arith::Mul<&i8>>::mul fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i8 as core::ops::arith::Mul<i8>>::mul fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i8 as core::ops::arith::Neg>::neg fn $method(self) -> <$t as $imp>::Output {
$imp::$method(*self)
}
<&i8 as core::ops::arith::Rem<&i8>>::rem fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i8 as core::ops::arith::Rem<i8>>::rem fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i8 as core::ops::arith::Sub<&i8>>::sub fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i8 as core::ops::arith::Sub<i8>>::sub fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i8 as core::ops::bit::BitAnd<&i8>>::bitand fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i8 as core::ops::bit::BitAnd<i8>>::bitand fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i8 as core::ops::bit::BitOr<&i8>>::bitor fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i8 as core::ops::bit::BitOr<i8>>::bitor fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i8 as core::ops::bit::BitXor<&i8>>::bitxor fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i8 as core::ops::bit::BitXor<i8>>::bitxor fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i8 as core::ops::bit::Not>::not fn $method(self) -> <$t as $imp>::Output {
$imp::$method(*self)
}
<&i8 as core::ops::bit::Shl<&i128>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i8 as core::ops::bit::Shl<&i16>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i8 as core::ops::bit::Shl<&i32>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i8 as core::ops::bit::Shl<&i64>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i8 as core::ops::bit::Shl<&i8>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i8 as core::ops::bit::Shl<&isize>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i8 as core::ops::bit::Shl<&u128>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i8 as core::ops::bit::Shl<&u16>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i8 as core::ops::bit::Shl<&u32>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i8 as core::ops::bit::Shl<&u64>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i8 as core::ops::bit::Shl<&u8>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i8 as core::ops::bit::Shl<&usize>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i8 as core::ops::bit::Shl<i128>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i8 as core::ops::bit::Shl<i16>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i8 as core::ops::bit::Shl<i32>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i8 as core::ops::bit::Shl<i64>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i8 as core::ops::bit::Shl<i8>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i8 as core::ops::bit::Shl<isize>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i8 as core::ops::bit::Shl<u128>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i8 as core::ops::bit::Shl<u16>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i8 as core::ops::bit::Shl<u32>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i8 as core::ops::bit::Shl<u64>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i8 as core::ops::bit::Shl<u8>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i8 as core::ops::bit::Shl<usize>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i8 as core::ops::bit::Shr<&i128>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i8 as core::ops::bit::Shr<&i16>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i8 as core::ops::bit::Shr<&i32>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i8 as core::ops::bit::Shr<&i64>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i8 as core::ops::bit::Shr<&i8>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i8 as core::ops::bit::Shr<&isize>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i8 as core::ops::bit::Shr<&u128>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i8 as core::ops::bit::Shr<&u16>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i8 as core::ops::bit::Shr<&u32>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i8 as core::ops::bit::Shr<&u64>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i8 as core::ops::bit::Shr<&u8>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i8 as core::ops::bit::Shr<&usize>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&i8 as core::ops::bit::Shr<i128>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i8 as core::ops::bit::Shr<i16>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i8 as core::ops::bit::Shr<i32>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i8 as core::ops::bit::Shr<i64>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i8 as core::ops::bit::Shr<i8>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i8 as core::ops::bit::Shr<isize>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i8 as core::ops::bit::Shr<u128>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i8 as core::ops::bit::Shr<u16>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i8 as core::ops::bit::Shr<u32>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i8 as core::ops::bit::Shr<u64>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i8 as core::ops::bit::Shr<u8>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&i8 as core::ops::bit::Shr<usize>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&isize as core::ops::arith::Add<&isize>>::add fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&isize as core::ops::arith::Add<isize>>::add fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&isize as core::ops::arith::Div<&isize>>::div fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&isize as core::ops::arith::Div<isize>>::div fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&isize as core::ops::arith::Mul<&isize>>::mul fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&isize as core::ops::arith::Mul<isize>>::mul fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&isize as core::ops::arith::Neg>::neg fn $method(self) -> <$t as $imp>::Output {
$imp::$method(*self)
}
<&isize as core::ops::arith::Rem<&isize>>::rem fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&isize as core::ops::arith::Rem<isize>>::rem fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&isize as core::ops::arith::Sub<&isize>>::sub fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&isize as core::ops::arith::Sub<isize>>::sub fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&isize as core::ops::bit::BitAnd<&isize>>::bitand fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&isize as core::ops::bit::BitAnd<isize>>::bitand fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&isize as core::ops::bit::BitOr<&isize>>::bitor fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&isize as core::ops::bit::BitOr<isize>>::bitor fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&isize as core::ops::bit::BitXor<&isize>>::bitxor fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&isize as core::ops::bit::BitXor<isize>>::bitxor fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&isize as core::ops::bit::Not>::not fn $method(self) -> <$t as $imp>::Output {
$imp::$method(*self)
}
<&isize as core::ops::bit::Shl<&i128>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&isize as core::ops::bit::Shl<&i16>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&isize as core::ops::bit::Shl<&i32>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&isize as core::ops::bit::Shl<&i64>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&isize as core::ops::bit::Shl<&i8>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&isize as core::ops::bit::Shl<&isize>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&isize as core::ops::bit::Shl<&u128>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&isize as core::ops::bit::Shl<&u16>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&isize as core::ops::bit::Shl<&u32>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&isize as core::ops::bit::Shl<&u64>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&isize as core::ops::bit::Shl<&u8>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&isize as core::ops::bit::Shl<&usize>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&isize as core::ops::bit::Shl<i128>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&isize as core::ops::bit::Shl<i16>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&isize as core::ops::bit::Shl<i32>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&isize as core::ops::bit::Shl<i64>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&isize as core::ops::bit::Shl<i8>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&isize as core::ops::bit::Shl<isize>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&isize as core::ops::bit::Shl<u128>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&isize as core::ops::bit::Shl<u16>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&isize as core::ops::bit::Shl<u32>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&isize as core::ops::bit::Shl<u64>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&isize as core::ops::bit::Shl<u8>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&isize as core::ops::bit::Shl<usize>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&isize as core::ops::bit::Shr<&i128>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&isize as core::ops::bit::Shr<&i16>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&isize as core::ops::bit::Shr<&i32>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&isize as core::ops::bit::Shr<&i64>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&isize as core::ops::bit::Shr<&i8>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&isize as core::ops::bit::Shr<&isize>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&isize as core::ops::bit::Shr<&u128>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&isize as core::ops::bit::Shr<&u16>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&isize as core::ops::bit::Shr<&u32>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&isize as core::ops::bit::Shr<&u64>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&isize as core::ops::bit::Shr<&u8>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&isize as core::ops::bit::Shr<&usize>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&isize as core::ops::bit::Shr<i128>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&isize as core::ops::bit::Shr<i16>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&isize as core::ops::bit::Shr<i32>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&isize as core::ops::bit::Shr<i64>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&isize as core::ops::bit::Shr<i8>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&isize as core::ops::bit::Shr<isize>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&isize as core::ops::bit::Shr<u128>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&isize as core::ops::bit::Shr<u16>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&isize as core::ops::bit::Shr<u32>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&isize as core::ops::bit::Shr<u64>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&isize as core::ops::bit::Shr<u8>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&isize as core::ops::bit::Shr<usize>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&mut I as core::iter::traits::exact_size::ExactSizeIterator>::len fn len(&self) -> usize {
(**self).len()
}
<&mut I as core::iter::traits::iterator::Iterator>::next fn next(&mut self) -> Option<I::Item> {
(**self).next()
}
<&mut I as core::iter::traits::iterator::Iterator>::size_hint fn size_hint(&self) -> (usize, Option<usize>) {
(**self).size_hint()
}
<&mut T as core::convert::AsMut<U>>::as_mut fn as_mut(&mut self) -> &mut U {
(*self).as_mut()
}
<&mut T as core::convert::AsRef<U>>::as_ref fn as_ref(&self) -> &U {
<T as AsRef<U>>::as_ref(*self)
}
<&mut T as core::ops::deref::Deref>::deref fn deref(&self) -> &T {
self
}
<&mut T as core::ops::deref::DerefMut>::deref_mut fn deref_mut(&mut self) -> &mut T {
self
}
<&u128 as core::ops::arith::Add<&u128>>::add fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u128 as core::ops::arith::Add<u128>>::add fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u128 as core::ops::arith::Div<&u128>>::div fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u128 as core::ops::arith::Div<u128>>::div fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u128 as core::ops::arith::Mul<&u128>>::mul fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u128 as core::ops::arith::Mul<u128>>::mul fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u128 as core::ops::arith::Rem<&u128>>::rem fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u128 as core::ops::arith::Rem<u128>>::rem fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u128 as core::ops::arith::Sub<&u128>>::sub fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u128 as core::ops::arith::Sub<u128>>::sub fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u128 as core::ops::bit::BitAnd<&u128>>::bitand fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u128 as core::ops::bit::BitAnd<u128>>::bitand fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u128 as core::ops::bit::BitOr<&u128>>::bitor fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u128 as core::ops::bit::BitOr<u128>>::bitor fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u128 as core::ops::bit::BitXor<&u128>>::bitxor fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u128 as core::ops::bit::BitXor<u128>>::bitxor fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u128 as core::ops::bit::Not>::not fn $method(self) -> <$t as $imp>::Output {
$imp::$method(*self)
}
<&u128 as core::ops::bit::Shl<&i128>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u128 as core::ops::bit::Shl<&i16>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u128 as core::ops::bit::Shl<&i32>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u128 as core::ops::bit::Shl<&i64>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u128 as core::ops::bit::Shl<&i8>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u128 as core::ops::bit::Shl<&isize>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u128 as core::ops::bit::Shl<&u128>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u128 as core::ops::bit::Shl<&u16>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u128 as core::ops::bit::Shl<&u32>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u128 as core::ops::bit::Shl<&u64>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u128 as core::ops::bit::Shl<&u8>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u128 as core::ops::bit::Shl<&usize>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u128 as core::ops::bit::Shl<i128>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u128 as core::ops::bit::Shl<i16>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u128 as core::ops::bit::Shl<i32>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u128 as core::ops::bit::Shl<i64>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u128 as core::ops::bit::Shl<i8>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u128 as core::ops::bit::Shl<isize>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u128 as core::ops::bit::Shl<u128>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u128 as core::ops::bit::Shl<u16>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u128 as core::ops::bit::Shl<u32>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u128 as core::ops::bit::Shl<u64>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u128 as core::ops::bit::Shl<u8>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u128 as core::ops::bit::Shl<usize>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u128 as core::ops::bit::Shr<&i128>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u128 as core::ops::bit::Shr<&i16>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u128 as core::ops::bit::Shr<&i32>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u128 as core::ops::bit::Shr<&i64>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u128 as core::ops::bit::Shr<&i8>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u128 as core::ops::bit::Shr<&isize>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u128 as core::ops::bit::Shr<&u128>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u128 as core::ops::bit::Shr<&u16>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u128 as core::ops::bit::Shr<&u32>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u128 as core::ops::bit::Shr<&u64>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u128 as core::ops::bit::Shr<&u8>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u128 as core::ops::bit::Shr<&usize>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u128 as core::ops::bit::Shr<i128>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u128 as core::ops::bit::Shr<i16>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u128 as core::ops::bit::Shr<i32>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u128 as core::ops::bit::Shr<i64>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u128 as core::ops::bit::Shr<i8>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u128 as core::ops::bit::Shr<isize>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u128 as core::ops::bit::Shr<u128>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u128 as core::ops::bit::Shr<u16>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u128 as core::ops::bit::Shr<u32>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u128 as core::ops::bit::Shr<u64>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u128 as core::ops::bit::Shr<u8>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u128 as core::ops::bit::Shr<usize>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u16 as core::ops::arith::Add<&u16>>::add fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u16 as core::ops::arith::Add<u16>>::add fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u16 as core::ops::arith::Div<&u16>>::div fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u16 as core::ops::arith::Div<u16>>::div fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u16 as core::ops::arith::Mul<&u16>>::mul fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u16 as core::ops::arith::Mul<u16>>::mul fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u16 as core::ops::arith::Rem<&u16>>::rem fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u16 as core::ops::arith::Rem<u16>>::rem fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u16 as core::ops::arith::Sub<&u16>>::sub fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u16 as core::ops::arith::Sub<u16>>::sub fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u16 as core::ops::bit::BitAnd<&u16>>::bitand fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u16 as core::ops::bit::BitAnd<u16>>::bitand fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u16 as core::ops::bit::BitOr<&u16>>::bitor fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u16 as core::ops::bit::BitOr<u16>>::bitor fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u16 as core::ops::bit::BitXor<&u16>>::bitxor fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u16 as core::ops::bit::BitXor<u16>>::bitxor fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u16 as core::ops::bit::Not>::not fn $method(self) -> <$t as $imp>::Output {
$imp::$method(*self)
}
<&u16 as core::ops::bit::Shl<&i128>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u16 as core::ops::bit::Shl<&i16>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u16 as core::ops::bit::Shl<&i32>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u16 as core::ops::bit::Shl<&i64>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u16 as core::ops::bit::Shl<&i8>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u16 as core::ops::bit::Shl<&isize>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u16 as core::ops::bit::Shl<&u128>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u16 as core::ops::bit::Shl<&u16>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u16 as core::ops::bit::Shl<&u32>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u16 as core::ops::bit::Shl<&u64>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u16 as core::ops::bit::Shl<&u8>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u16 as core::ops::bit::Shl<&usize>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u16 as core::ops::bit::Shl<i128>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u16 as core::ops::bit::Shl<i16>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u16 as core::ops::bit::Shl<i32>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u16 as core::ops::bit::Shl<i64>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u16 as core::ops::bit::Shl<i8>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u16 as core::ops::bit::Shl<isize>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u16 as core::ops::bit::Shl<u128>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u16 as core::ops::bit::Shl<u16>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u16 as core::ops::bit::Shl<u32>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u16 as core::ops::bit::Shl<u64>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u16 as core::ops::bit::Shl<u8>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u16 as core::ops::bit::Shl<usize>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u16 as core::ops::bit::Shr<&i128>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u16 as core::ops::bit::Shr<&i16>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u16 as core::ops::bit::Shr<&i32>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u16 as core::ops::bit::Shr<&i64>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u16 as core::ops::bit::Shr<&i8>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u16 as core::ops::bit::Shr<&isize>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u16 as core::ops::bit::Shr<&u128>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u16 as core::ops::bit::Shr<&u16>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u16 as core::ops::bit::Shr<&u32>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u16 as core::ops::bit::Shr<&u64>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u16 as core::ops::bit::Shr<&u8>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u16 as core::ops::bit::Shr<&usize>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u16 as core::ops::bit::Shr<i128>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u16 as core::ops::bit::Shr<i16>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u16 as core::ops::bit::Shr<i32>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u16 as core::ops::bit::Shr<i64>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u16 as core::ops::bit::Shr<i8>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u16 as core::ops::bit::Shr<isize>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u16 as core::ops::bit::Shr<u128>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u16 as core::ops::bit::Shr<u16>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u16 as core::ops::bit::Shr<u32>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u16 as core::ops::bit::Shr<u64>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u16 as core::ops::bit::Shr<u8>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u16 as core::ops::bit::Shr<usize>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u32 as core::ops::arith::Add<&u32>>::add fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u32 as core::ops::arith::Add<u32>>::add fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u32 as core::ops::arith::Div<&u32>>::div fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u32 as core::ops::arith::Div<u32>>::div fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u32 as core::ops::arith::Mul<&u32>>::mul fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u32 as core::ops::arith::Mul<u32>>::mul fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u32 as core::ops::arith::Rem<&u32>>::rem fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u32 as core::ops::arith::Rem<u32>>::rem fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u32 as core::ops::arith::Sub<&u32>>::sub fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u32 as core::ops::arith::Sub<u32>>::sub fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u32 as core::ops::bit::BitAnd<&u32>>::bitand fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u32 as core::ops::bit::BitAnd<u32>>::bitand fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u32 as core::ops::bit::BitOr<&u32>>::bitor fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u32 as core::ops::bit::BitOr<u32>>::bitor fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u32 as core::ops::bit::BitXor<&u32>>::bitxor fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u32 as core::ops::bit::BitXor<u32>>::bitxor fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u32 as core::ops::bit::Not>::not fn $method(self) -> <$t as $imp>::Output {
$imp::$method(*self)
}
<&u32 as core::ops::bit::Shl<&i128>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u32 as core::ops::bit::Shl<&i16>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u32 as core::ops::bit::Shl<&i32>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u32 as core::ops::bit::Shl<&i64>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u32 as core::ops::bit::Shl<&i8>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u32 as core::ops::bit::Shl<&isize>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u32 as core::ops::bit::Shl<&u128>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u32 as core::ops::bit::Shl<&u16>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u32 as core::ops::bit::Shl<&u32>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u32 as core::ops::bit::Shl<&u64>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u32 as core::ops::bit::Shl<&u8>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u32 as core::ops::bit::Shl<&usize>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u32 as core::ops::bit::Shl<i128>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u32 as core::ops::bit::Shl<i16>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u32 as core::ops::bit::Shl<i32>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u32 as core::ops::bit::Shl<i64>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u32 as core::ops::bit::Shl<i8>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u32 as core::ops::bit::Shl<isize>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u32 as core::ops::bit::Shl<u128>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u32 as core::ops::bit::Shl<u16>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u32 as core::ops::bit::Shl<u32>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u32 as core::ops::bit::Shl<u64>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u32 as core::ops::bit::Shl<u8>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u32 as core::ops::bit::Shl<usize>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u32 as core::ops::bit::Shr<&i128>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u32 as core::ops::bit::Shr<&i16>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u32 as core::ops::bit::Shr<&i32>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u32 as core::ops::bit::Shr<&i64>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u32 as core::ops::bit::Shr<&i8>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u32 as core::ops::bit::Shr<&isize>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u32 as core::ops::bit::Shr<&u128>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u32 as core::ops::bit::Shr<&u16>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u32 as core::ops::bit::Shr<&u32>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u32 as core::ops::bit::Shr<&u64>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u32 as core::ops::bit::Shr<&u8>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u32 as core::ops::bit::Shr<&usize>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u32 as core::ops::bit::Shr<i128>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u32 as core::ops::bit::Shr<i16>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u32 as core::ops::bit::Shr<i32>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u32 as core::ops::bit::Shr<i64>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u32 as core::ops::bit::Shr<i8>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u32 as core::ops::bit::Shr<isize>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u32 as core::ops::bit::Shr<u128>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u32 as core::ops::bit::Shr<u16>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u32 as core::ops::bit::Shr<u32>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u32 as core::ops::bit::Shr<u64>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u32 as core::ops::bit::Shr<u8>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u32 as core::ops::bit::Shr<usize>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u64 as core::ops::arith::Add<&u64>>::add fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u64 as core::ops::arith::Add<u64>>::add fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u64 as core::ops::arith::Div<&u64>>::div fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u64 as core::ops::arith::Div<u64>>::div fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u64 as core::ops::arith::Mul<&u64>>::mul fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u64 as core::ops::arith::Mul<u64>>::mul fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u64 as core::ops::arith::Rem<&u64>>::rem fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u64 as core::ops::arith::Rem<u64>>::rem fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u64 as core::ops::arith::Sub<&u64>>::sub fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u64 as core::ops::arith::Sub<u64>>::sub fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u64 as core::ops::bit::BitAnd<&u64>>::bitand fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u64 as core::ops::bit::BitAnd<u64>>::bitand fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u64 as core::ops::bit::BitOr<&u64>>::bitor fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u64 as core::ops::bit::BitOr<u64>>::bitor fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u64 as core::ops::bit::BitXor<&u64>>::bitxor fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u64 as core::ops::bit::BitXor<u64>>::bitxor fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u64 as core::ops::bit::Not>::not fn $method(self) -> <$t as $imp>::Output {
$imp::$method(*self)
}
<&u64 as core::ops::bit::Shl<&i128>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u64 as core::ops::bit::Shl<&i16>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u64 as core::ops::bit::Shl<&i32>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u64 as core::ops::bit::Shl<&i64>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u64 as core::ops::bit::Shl<&i8>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u64 as core::ops::bit::Shl<&isize>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u64 as core::ops::bit::Shl<&u128>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u64 as core::ops::bit::Shl<&u16>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u64 as core::ops::bit::Shl<&u32>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u64 as core::ops::bit::Shl<&u64>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u64 as core::ops::bit::Shl<&u8>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u64 as core::ops::bit::Shl<&usize>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u64 as core::ops::bit::Shl<i128>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u64 as core::ops::bit::Shl<i16>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u64 as core::ops::bit::Shl<i32>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u64 as core::ops::bit::Shl<i64>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u64 as core::ops::bit::Shl<i8>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u64 as core::ops::bit::Shl<isize>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u64 as core::ops::bit::Shl<u128>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u64 as core::ops::bit::Shl<u16>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u64 as core::ops::bit::Shl<u32>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u64 as core::ops::bit::Shl<u64>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u64 as core::ops::bit::Shl<u8>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u64 as core::ops::bit::Shl<usize>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u64 as core::ops::bit::Shr<&i128>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u64 as core::ops::bit::Shr<&i16>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u64 as core::ops::bit::Shr<&i32>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u64 as core::ops::bit::Shr<&i64>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u64 as core::ops::bit::Shr<&i8>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u64 as core::ops::bit::Shr<&isize>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u64 as core::ops::bit::Shr<&u128>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u64 as core::ops::bit::Shr<&u16>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u64 as core::ops::bit::Shr<&u32>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u64 as core::ops::bit::Shr<&u64>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u64 as core::ops::bit::Shr<&u8>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u64 as core::ops::bit::Shr<&usize>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u64 as core::ops::bit::Shr<i128>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u64 as core::ops::bit::Shr<i16>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u64 as core::ops::bit::Shr<i32>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u64 as core::ops::bit::Shr<i64>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u64 as core::ops::bit::Shr<i8>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u64 as core::ops::bit::Shr<isize>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u64 as core::ops::bit::Shr<u128>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u64 as core::ops::bit::Shr<u16>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u64 as core::ops::bit::Shr<u32>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u64 as core::ops::bit::Shr<u64>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u64 as core::ops::bit::Shr<u8>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u64 as core::ops::bit::Shr<usize>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u8 as core::ops::arith::Add<&u8>>::add fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u8 as core::ops::arith::Add<u8>>::add fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u8 as core::ops::arith::Div<&u8>>::div fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u8 as core::ops::arith::Div<u8>>::div fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u8 as core::ops::arith::Mul<&u8>>::mul fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u8 as core::ops::arith::Mul<u8>>::mul fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u8 as core::ops::arith::Rem<&u8>>::rem fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u8 as core::ops::arith::Rem<u8>>::rem fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u8 as core::ops::arith::Sub<&u8>>::sub fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u8 as core::ops::arith::Sub<u8>>::sub fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u8 as core::ops::bit::BitAnd<&u8>>::bitand fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u8 as core::ops::bit::BitAnd<u8>>::bitand fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u8 as core::ops::bit::BitOr<&u8>>::bitor fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u8 as core::ops::bit::BitOr<u8>>::bitor fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u8 as core::ops::bit::BitXor<&u8>>::bitxor fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u8 as core::ops::bit::BitXor<u8>>::bitxor fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u8 as core::ops::bit::Not>::not fn $method(self) -> <$t as $imp>::Output {
$imp::$method(*self)
}
<&u8 as core::ops::bit::Shl<&i128>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u8 as core::ops::bit::Shl<&i16>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u8 as core::ops::bit::Shl<&i32>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u8 as core::ops::bit::Shl<&i64>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u8 as core::ops::bit::Shl<&i8>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u8 as core::ops::bit::Shl<&isize>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u8 as core::ops::bit::Shl<&u128>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u8 as core::ops::bit::Shl<&u16>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u8 as core::ops::bit::Shl<&u32>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u8 as core::ops::bit::Shl<&u64>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u8 as core::ops::bit::Shl<&u8>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u8 as core::ops::bit::Shl<&usize>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u8 as core::ops::bit::Shl<i128>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u8 as core::ops::bit::Shl<i16>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u8 as core::ops::bit::Shl<i32>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u8 as core::ops::bit::Shl<i64>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u8 as core::ops::bit::Shl<i8>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u8 as core::ops::bit::Shl<isize>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u8 as core::ops::bit::Shl<u128>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u8 as core::ops::bit::Shl<u16>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u8 as core::ops::bit::Shl<u32>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u8 as core::ops::bit::Shl<u64>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u8 as core::ops::bit::Shl<u8>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u8 as core::ops::bit::Shl<usize>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u8 as core::ops::bit::Shr<&i128>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u8 as core::ops::bit::Shr<&i16>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u8 as core::ops::bit::Shr<&i32>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u8 as core::ops::bit::Shr<&i64>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u8 as core::ops::bit::Shr<&i8>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u8 as core::ops::bit::Shr<&isize>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u8 as core::ops::bit::Shr<&u128>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u8 as core::ops::bit::Shr<&u16>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u8 as core::ops::bit::Shr<&u32>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u8 as core::ops::bit::Shr<&u64>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u8 as core::ops::bit::Shr<&u8>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u8 as core::ops::bit::Shr<&usize>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&u8 as core::ops::bit::Shr<i128>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u8 as core::ops::bit::Shr<i16>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u8 as core::ops::bit::Shr<i32>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u8 as core::ops::bit::Shr<i64>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u8 as core::ops::bit::Shr<i8>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u8 as core::ops::bit::Shr<isize>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u8 as core::ops::bit::Shr<u128>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u8 as core::ops::bit::Shr<u16>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u8 as core::ops::bit::Shr<u32>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u8 as core::ops::bit::Shr<u64>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u8 as core::ops::bit::Shr<u8>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&u8 as core::ops::bit::Shr<usize>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&usize as core::ops::arith::Add<&usize>>::add fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&usize as core::ops::arith::Add<usize>>::add fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&usize as core::ops::arith::Div<&usize>>::div fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&usize as core::ops::arith::Div<usize>>::div fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&usize as core::ops::arith::Mul<&usize>>::mul fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&usize as core::ops::arith::Mul<usize>>::mul fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&usize as core::ops::arith::Rem<&usize>>::rem fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&usize as core::ops::arith::Rem<usize>>::rem fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&usize as core::ops::arith::Sub<&usize>>::sub fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&usize as core::ops::arith::Sub<usize>>::sub fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&usize as core::ops::bit::BitAnd<&usize>>::bitand fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&usize as core::ops::bit::BitAnd<usize>>::bitand fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&usize as core::ops::bit::BitOr<&usize>>::bitor fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&usize as core::ops::bit::BitOr<usize>>::bitor fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&usize as core::ops::bit::BitXor<&usize>>::bitxor fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&usize as core::ops::bit::BitXor<usize>>::bitxor fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&usize as core::ops::bit::Not>::not fn $method(self) -> <$t as $imp>::Output {
$imp::$method(*self)
}
<&usize as core::ops::bit::Shl<&i128>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&usize as core::ops::bit::Shl<&i16>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&usize as core::ops::bit::Shl<&i32>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&usize as core::ops::bit::Shl<&i64>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&usize as core::ops::bit::Shl<&i8>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&usize as core::ops::bit::Shl<&isize>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&usize as core::ops::bit::Shl<&u128>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&usize as core::ops::bit::Shl<&u16>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&usize as core::ops::bit::Shl<&u32>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&usize as core::ops::bit::Shl<&u64>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&usize as core::ops::bit::Shl<&u8>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&usize as core::ops::bit::Shl<&usize>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&usize as core::ops::bit::Shl<i128>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&usize as core::ops::bit::Shl<i16>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&usize as core::ops::bit::Shl<i32>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&usize as core::ops::bit::Shl<i64>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&usize as core::ops::bit::Shl<i8>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&usize as core::ops::bit::Shl<isize>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&usize as core::ops::bit::Shl<u128>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&usize as core::ops::bit::Shl<u16>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&usize as core::ops::bit::Shl<u32>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&usize as core::ops::bit::Shl<u64>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&usize as core::ops::bit::Shl<u8>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&usize as core::ops::bit::Shl<usize>>::shl fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&usize as core::ops::bit::Shr<&i128>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&usize as core::ops::bit::Shr<&i16>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&usize as core::ops::bit::Shr<&i32>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&usize as core::ops::bit::Shr<&i64>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&usize as core::ops::bit::Shr<&i8>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&usize as core::ops::bit::Shr<&isize>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&usize as core::ops::bit::Shr<&u128>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&usize as core::ops::bit::Shr<&u16>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&usize as core::ops::bit::Shr<&u32>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&usize as core::ops::bit::Shr<&u64>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&usize as core::ops::bit::Shr<&u8>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&usize as core::ops::bit::Shr<&usize>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, *other)
}
<&usize as core::ops::bit::Shr<i128>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&usize as core::ops::bit::Shr<i16>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&usize as core::ops::bit::Shr<i32>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&usize as core::ops::bit::Shr<i64>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&usize as core::ops::bit::Shr<i8>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&usize as core::ops::bit::Shr<isize>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&usize as core::ops::bit::Shr<u128>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&usize as core::ops::bit::Shr<u16>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&usize as core::ops::bit::Shr<u32>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&usize as core::ops::bit::Shr<u64>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&usize as core::ops::bit::Shr<u8>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<&usize as core::ops::bit::Shr<usize>>::shr fn $method(self, other: $u) -> <$t as $imp<$u>>::Output {
$imp::$method(*self, other)
}
<() as core::default::Default>::default fn default() -> $t {
$v
}
<(core::ops::range::Bound<T>, core::ops::range::Bound<T>) as core::ops::range::IntoBounds<T>>::into_bounds fn into_bounds(self) -> (Bound<T>, Bound<T>) {
self
}
<(core::ops::range::Bound<T>, core::ops::range::Bound<T>) as core::ops::range::RangeBounds<T>>::end_bound fn end_bound(&self) -> Bound<&T> {
match *self {
(_, Included(ref end)) => Included(end),
(_, Excluded(ref end)) => Excluded(end),
(_, Unbounded) => Unbounded,
}
}
<(core::ops::range::Bound<T>, core::ops::range::Bound<T>) as core::ops::range::RangeBounds<T>>::start_bound fn start_bound(&self) -> Bound<&T> {
match *self {
(Included(ref start), _) => Included(start),
(Excluded(ref start), _) => Excluded(start),
(Unbounded, _) => Unbounded,
}
}
<(core::ops::range::Bound<usize>, core::ops::range::Bound<usize>) as core::slice::index::SliceIndex<[T]>>::get fn get(self, slice: &[T]) -> Option<&Self::Output> {
into_range(slice.len(), self)?.get(slice)
}
<(core::ops::range::Bound<usize>, core::ops::range::Bound<usize>) as core::slice::index::SliceIndex<[T]>>::get_mut fn get_mut(self, slice: &mut [T]) -> Option<&mut Self::Output> {
into_range(slice.len(), self)?.get_mut(slice)
}
<(core::ops::range::Bound<usize>, core::ops::range::Bound<usize>) as core::slice::index::SliceIndex<[T]>>::get_unchecked unsafe fn get_unchecked(self, slice: *const [T]) -> *const Self::Output {
// SAFETY: the caller has to uphold the safety contract for `get_unchecked`.
unsafe { into_range_unchecked(slice.len(), self).get_unchecked(slice) }
}
<(core::ops::range::Bound<usize>, core::ops::range::Bound<usize>) as core::slice::index::SliceIndex<[T]>>::get_unchecked_mut unsafe fn get_unchecked_mut(self, slice: *mut [T]) -> *mut Self::Output {
// SAFETY: the caller has to uphold the safety contract for `get_unchecked_mut`.
unsafe { into_range_unchecked(slice.len(), self).get_unchecked_mut(slice) }
}
<(core::ops::range::Bound<usize>, core::ops::range::Bound<usize>) as core::slice::index::SliceIndex<[T]>>::index fn index(self, slice: &[T]) -> &Self::Output {
into_slice_range(slice.len(), self).index(slice)
}
<(core::ops::range::Bound<usize>, core::ops::range::Bound<usize>) as core::slice::index::SliceIndex<[T]>>::index_mut fn index_mut(self, slice: &mut [T]) -> &mut Self::Output {
into_slice_range(slice.len(), self).index_mut(slice)
}
<I as core::iter::traits::collect::IntoIterator>::into_iter fn into_iter(self) -> I {
self
}
<T as core::array::SpecArrayClone>::clone default fn clone<const N: usize>(array: &[T; N]) -> [T; N] {
from_trusted_iterator(array.iter().cloned())
}
<T as core::array::SpecArrayClone>::clone fn clone<const N: usize>(array: &[T; N]) -> [T; N] {
*array
}
<T as core::array::equality::SpecArrayEq<Other, N>>::spec_eq default fn spec_eq(a: &[Self; N], b: &[Other; N]) -> bool {
a[..] == b[..]
}
<T as core::array::equality::SpecArrayEq<Other, N>>::spec_ne default fn spec_ne(a: &[Self; N], b: &[Other; N]) -> bool {
a[..] != b[..]
}
<T as core::convert::From<T>>::from fn from(t: T) -> T {
t
}
<T as core::convert::Into<U>>::into fn into(self) -> U {
U::from(self)
}
<T as core::convert::TryFrom<U>>::try_from fn try_from(value: U) -> Result<Self, Self::Error> {
Ok(U::into(value))
}
<T as core::convert::TryInto<U>>::try_into fn try_into(self) -> Result<U, U::Error> {
U::try_from(self)
}
<[A] as core::slice::cmp::SlicePartialEq<B>>::equal default fn equal(&self, other: &[B]) -> bool {
if self.len() != other.len() {
return false;
}
// Implemented as explicit indexing rather
// than zipped iterators for performance reasons.
// See PR https://github.com/rust-lang/rust/pull/116846
// FIXME(const_hack): make this a `for idx in 0..self.len()` loop.
let mut idx = 0;
while idx < self.len() {
// bound checks are optimized away
if self[idx] != other[idx] {
return false;
}
idx += 1;
}
true
}
<[T] as core::convert::AsMut<[T]>>::as_mut fn as_mut(&mut self) -> &mut [T] {
self
}
<[core::mem::maybe_uninit::MaybeUninit<T>; N] as core::array::iter::iter_inner::PartialDrop>::partial_drop unsafe fn partial_drop(&mut self, alive: IndexRange) {
let slice: &mut [MaybeUninit<T>] = self;
// SAFETY: Initialized elements in the array are also initialized in the slice.
unsafe { slice.partial_drop(alive) }
}
<[core::mem::maybe_uninit::MaybeUninit<T>; N]>::transpose pub const fn transpose(self) -> MaybeUninit<[T; N]> {
// SAFETY: T and MaybeUninit<T> have the same layout
unsafe { intrinsics::transmute_unchecked(self) }
}
<[core::mem::maybe_uninit::MaybeUninit<T>] as core::array::iter::iter_inner::PartialDrop>::partial_drop unsafe fn partial_drop(&mut self, alive: IndexRange) {
// SAFETY: We know that all elements within `alive` are properly initialized.
unsafe { self.get_unchecked_mut(alive).assume_init_drop() }
}
<[core::mem::maybe_uninit::MaybeUninit<T>]>::assume_init_drop pub const unsafe fn assume_init_drop(&mut self)
where
T: [const] Destruct,
{
if !self.is_empty() {
// SAFETY: the caller must guarantee that every element of `self`
// is initialized and satisfies all invariants of `T`.
// Dropping the value in place is safe if that is the case.
unsafe { ptr::drop_in_place(self as *mut [MaybeUninit<T>] as *mut [T]) }
}
}
<[core::mem::maybe_uninit::MaybeUninit<T>]>::assume_init_ref pub const unsafe fn assume_init_ref(&self) -> &[T] {
// SAFETY: casting `slice` to a `*const [T]` is safe since the caller guarantees that
// `slice` is initialized, and `MaybeUninit` is guaranteed to have the same layout as `T`.
// The pointer obtained is valid since it refers to memory owned by `slice` which is a
// reference and thus guaranteed to be valid for reads.
unsafe { &*(self as *const Self as *const [T]) }
}
<bool as core::default::Default>::default fn default() -> $t {
$v
}
<bool as core::ops::bit::BitAnd<&bool>>::bitand fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<bool as core::ops::bit::BitAnd>::bitand fn bitand(self, rhs: $t) -> $t { self & rhs }
<bool as core::ops::bit::BitAndAssign<&bool>>::bitand_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<bool as core::ops::bit::BitAndAssign>::bitand_assign fn bitand_assign(&mut self, other: $t) { *self &= other }
<bool as core::ops::bit::BitOr<&bool>>::bitor fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<bool as core::ops::bit::BitOr>::bitor fn bitor(self, rhs: $t) -> $t { self | rhs }
<bool as core::ops::bit::BitOrAssign<&bool>>::bitor_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<bool as core::ops::bit::BitOrAssign>::bitor_assign fn bitor_assign(&mut self, other: $t) { *self |= other }
<bool as core::ops::bit::BitXor<&bool>>::bitxor fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<bool as core::ops::bit::BitXor>::bitxor fn bitxor(self, other: $t) -> $t { self ^ other }
<bool as core::ops::bit::BitXorAssign<&bool>>::bitxor_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<bool as core::ops::bit::BitXorAssign>::bitxor_assign fn bitxor_assign(&mut self, other: $t) { *self ^= other }
<bool as core::ops::bit::Not>::not fn not(self) -> $t { !self }
<char as core::default::Default>::default fn default() -> $t {
$v
}
<core::any::TypeId as core::cmp::PartialEq>::eq fn eq(&self, other: &Self) -> bool {
#[cfg(miri)]
return crate::intrinsics::type_id_eq(*self, *other);
#[cfg(not(miri))]
{
let this = self;
crate::intrinsics::const_eval_select!(
@capture { this: &TypeId, other: &TypeId } -> bool:
if const {
crate::intrinsics::type_id_eq(*this, *other)
} else {
// Ideally we would just invoke `type_id_eq` unconditionally here,
// but since we do not MIR inline intrinsics, because backends
// may want to override them (and miri does!), MIR opts do not
// clean up this call sufficiently for LLVM to turn repeated calls
// of `TypeId` comparisons against one specific `TypeId` into
// a lookup table.
// SAFETY: We know that at runtime none of the bits have provenance and all bits
// are initialized. So we can just convert the whole thing to a `u128` and compare that.
unsafe {
crate::mem::transmute::<_, u128>(*this) == crate::mem::transmute::<_, u128>(*other)
}
}
)
}
}
<core::any::TypeId as core::cmp::PartialEq>::eq::runtime fn runtime$(<$($binders)*>)?($($arg: $ty),*) $( -> $ret )? {
$runtime
}
<core::array::Guard<'_, T> as core::ops::drop::Drop>::drop fn drop(&mut self) {
debug_assert!(self.initialized <= self.array_mut.len());
// SAFETY: this slice will contain only initialized objects.
unsafe {
self.array_mut.get_unchecked_mut(..self.initialized).assume_init_drop();
}
}
<core::array::iter::IntoIter<T, N> as core::iter::traits::iterator::Iterator>::next fn next(&mut self) -> Option<Self::Item> {
self.unsize_mut().next()
}
<core::array::iter::IntoIter<T, N> as core::iter::traits::iterator::Iterator>::size_hint fn size_hint(&self) -> (usize, Option<usize>) {
self.unsize().size_hint()
}
<core::array::iter::IntoIter<T, N> as core::ops::drop::Drop>::drop fn drop(&mut self) {
if crate::mem::needs_drop::<T>() {
// SAFETY: This is the only place where we drop this field.
unsafe { ManuallyDrop::drop(&mut self.inner) }
}
}
<core::array::iter::iter_inner::PolymorphicIter<DATA> as core::ops::drop::Drop>::drop fn drop(&mut self) {
// SAFETY: by our type invariant `self.alive` is exactly the initialized
// items, and this is drop so nothing can use the items afterwards.
unsafe { self.data.partial_drop(self.alive.clone()) }
}
<core::array::iter::iter_inner::PolymorphicIter<[core::mem::maybe_uninit::MaybeUninit<T>; N]> as core::clone::Clone>::clone fn clone(&self) -> Self {
// Note, we don't really need to match the exact same alive range, so
// we can just clone into offset 0 regardless of where `self` is.
let mut new = Self::empty();
fn clone_into_new<U: Clone>(
source: &PolymorphicIter<[MaybeUninit<U>]>,
target: &mut PolymorphicIter<[MaybeUninit<U>]>,
) {
// Clone all alive elements.
for (src, dst) in iter::zip(source.as_slice(), &mut target.data) {
// Write a clone into the new array, then update its alive range.
// If cloning panics, we'll correctly drop the previous items.
dst.write(src.clone());
// This addition cannot overflow as we're iterating a slice,
// the length of which always fits in usize.
target.alive = IndexRange::zero_to(target.alive.end() + 1);
}
}
clone_into_new(self, &mut new);
new
}
<core::array::iter::iter_inner::PolymorphicIter<[core::mem::maybe_uninit::MaybeUninit<T>; N]> as core::clone::Clone>::clone::clone_into_new fn clone_into_new<U: Clone>(
source: &PolymorphicIter<[MaybeUninit<U>]>,
target: &mut PolymorphicIter<[MaybeUninit<U>]>,
) {
// Clone all alive elements.
for (src, dst) in iter::zip(source.as_slice(), &mut target.data) {
// Write a clone into the new array, then update its alive range.
// If cloning panics, we'll correctly drop the previous items.
dst.write(src.clone());
// This addition cannot overflow as we're iterating a slice,
// the length of which always fits in usize.
target.alive = IndexRange::zero_to(target.alive.end() + 1);
}
}
<core::iter::adapters::cloned::Cloned<I> as core::iter::traits::iterator::Iterator>::next fn next(&mut self) -> Option<T> {
self.it.next().cloned()
}
<core::iter::adapters::cloned::Cloned<I> as core::iter::traits::iterator::Iterator>::size_hint fn size_hint(&self) -> (usize, Option<usize>) {
self.it.size_hint()
}
<core::iter::adapters::cloned::Cloned<I> as core::iter::traits::unchecked_iterator::UncheckedIterator>::next_unchecked unsafe fn next_unchecked(&mut self) -> T {
// SAFETY: `Cloned` is 1:1 with the inner iterator, so if the caller promised
// that there's an element left, the inner iterator has one too.
let item = unsafe { self.it.next_unchecked() };
item.clone()
}
<core::iter::adapters::map::Map<I, F> as core::iter::traits::iterator::Iterator>::next fn next(&mut self) -> Option<B> {
self.iter.next().map(&mut self.f)
}
<core::iter::adapters::map::Map<I, F> as core::iter::traits::iterator::Iterator>::size_hint fn size_hint(&self) -> (usize, Option<usize>) {
self.iter.size_hint()
}
<core::iter::adapters::map::Map<I, F> as core::iter::traits::unchecked_iterator::UncheckedIterator>::next_unchecked unsafe fn next_unchecked(&mut self) -> B {
// SAFETY: `Map` is 1:1 with the inner iterator, so if the caller promised
// that there's an element left, the inner iterator has one too.
let item = unsafe { self.iter.next_unchecked() };
(self.f)(item)
}
<core::iter::adapters::zip::Zip<A, B> as core::iter::adapters::zip::ZipImpl<A, B>>::new default fn new(a: A, b: B) -> Self {
Zip {
a,
b,
index: 0, // unused
len: 0, // unused
}
}
<core::iter::adapters::zip::Zip<A, B> as core::iter::adapters::zip::ZipImpl<A, B>>::next default fn next(&mut self) -> Option<(A::Item, B::Item)> {
let x = self.a.next()?;
let y = self.b.next()?;
Some((x, y))
}
<core::iter::adapters::zip::Zip<A, B> as core::iter::traits::iterator::Iterator>::next fn next(&mut self) -> Option<Self::Item> {
ZipImpl::next(self)
}
<core::iter::adapters::zip::Zip<A, B> as core::iter::traits::iterator::Iterator>::size_hint fn size_hint(&self) -> (usize, Option<usize>) {
ZipImpl::size_hint(self)
}
<core::mem::manually_drop::ManuallyDrop<T> as core::ops::deref::Deref>::deref fn deref(&self) -> &T {
&self.value
}
<core::mem::manually_drop::ManuallyDrop<T> as core::ops::deref::DerefMut>::deref_mut fn deref_mut(&mut self) -> &mut T {
&mut self.value
}
<core::mem::maybe_uninit::MaybeUninit<T> as core::clone::Clone>::clone fn clone(&self) -> Self {
// Not calling `T::clone()`, we cannot know if we are initialized enough for that.
*self
}
<core::num::niche_types::Nanoseconds as core::cmp::PartialEq>::eq fn eq(&self, other: &Self) -> bool {
self.as_inner() == other.as_inner()
}
<core::num::niche_types::Nanoseconds as core::default::Default>::default fn default() -> Self {
Self::ZERO
}
<core::ops::control_flow::ControlFlow<B, C> as core::ops::try_trait::FromResidual<core::ops::control_flow::ControlFlow<B, core::convert::Infallible>>>::from_residual fn from_residual(residual: ControlFlow<B, convert::Infallible>) -> Self {
match residual {
ControlFlow::Break(b) => ControlFlow::Break(b),
}
}
<core::ops::control_flow::ControlFlow<B, C> as core::ops::try_trait::Try>::branch fn branch(self) -> ControlFlow<Self::Residual, Self::Output> {
match self {
ControlFlow::Continue(c) => ControlFlow::Continue(c),
ControlFlow::Break(b) => ControlFlow::Break(ControlFlow::Break(b)),
}
}
<core::ops::control_flow::ControlFlow<B, C> as core::ops::try_trait::Try>::from_output fn from_output(output: Self::Output) -> Self {
ControlFlow::Continue(output)
}
<core::ops::index_range::IndexRange as core::iter::traits::iterator::Iterator>::next fn next(&mut self) -> Option<usize> {
if self.len() > 0 {
// SAFETY: We just checked that the range is non-empty
unsafe { Some(self.next_unchecked()) }
} else {
None
}
}
<core::ops::index_range::IndexRange as core::slice::index::SliceIndex<[T]>>::get_unchecked unsafe fn get_unchecked(self, slice: *const [T]) -> *const [T] {
assert_unsafe_precondition!(
check_library_ub,
"slice::get_unchecked requires that the index is within the slice",
(end: usize = self.end(), len: usize = slice.len()) => end <= len
);
// SAFETY: the caller guarantees that `slice` is not dangling, so it
// cannot be longer than `isize::MAX`. They also guarantee that
// `self` is in bounds of `slice` so `self` cannot overflow an `isize`,
// so the call to `add` is safe.
unsafe { get_offset_len_noubcheck(slice, self.start(), self.len()) }
}
<core::ops::index_range::IndexRange as core::slice::index::SliceIndex<[T]>>::get_unchecked_mut unsafe fn get_unchecked_mut(self, slice: *mut [T]) -> *mut [T] {
assert_unsafe_precondition!(
check_library_ub,
"slice::get_unchecked_mut requires that the index is within the slice",
(end: usize = self.end(), len: usize = slice.len()) => end <= len
);
// SAFETY: see comments for `get_unchecked` above.
unsafe { get_offset_len_mut_noubcheck(slice, self.start(), self.len()) }
}
<core::ops::range::Range<&T> as core::ops::range::RangeBounds<T>>::end_bound fn end_bound(&self) -> Bound<&T> {
Excluded(self.end)
}
<core::ops::range::Range<&T> as core::ops::range::RangeBounds<T>>::start_bound fn start_bound(&self) -> Bound<&T> {
Included(self.start)
}
<core::ops::range::Range<T> as core::ops::range::IntoBounds<T>>::into_bounds fn into_bounds(self) -> (Bound<T>, Bound<T>) {
(Included(self.start), Excluded(self.end))
}
<core::ops::range::Range<T> as core::ops::range::RangeBounds<T>>::end_bound fn end_bound(&self) -> Bound<&T> {
Excluded(&self.end)
}
<core::ops::range::Range<T> as core::ops::range::RangeBounds<T>>::start_bound fn start_bound(&self) -> Bound<&T> {
Included(&self.start)
}
<core::ops::range::Range<usize> as core::slice::index::SliceIndex<[T]>>::get fn get(self, slice: &[T]) -> Option<&[T]> {
// Using checked_sub is a safe way to get `SubUnchecked` in MIR
if let Some(new_len) = usize::checked_sub(self.end, self.start)
&& self.end <= slice.len()
{
// SAFETY: `self` is checked to be valid and in bounds above.
unsafe { Some(&*get_offset_len_noubcheck(slice, self.start, new_len)) }
} else {
None
}
}
<core::ops::range::Range<usize> as core::slice::index::SliceIndex<[T]>>::get_mut fn get_mut(self, slice: &mut [T]) -> Option<&mut [T]> {
if let Some(new_len) = usize::checked_sub(self.end, self.start)
&& self.end <= slice.len()
{
// SAFETY: `self` is checked to be valid and in bounds above.
unsafe { Some(&mut *get_offset_len_mut_noubcheck(slice, self.start, new_len)) }
} else {
None
}
}
<core::ops::range::Range<usize> as core::slice::index::SliceIndex<[T]>>::get_unchecked unsafe fn get_unchecked(self, slice: *const [T]) -> *const [T] {
assert_unsafe_precondition!(
check_library_ub,
"slice::get_unchecked requires that the range is within the slice",
(
start: usize = self.start,
end: usize = self.end,
len: usize = slice.len()
) => end >= start && end <= len
);
// SAFETY: the caller guarantees that `slice` is not dangling, so it
// cannot be longer than `isize::MAX`. They also guarantee that
// `self` is in bounds of `slice` so `self` cannot overflow an `isize`,
// so the call to `add` is safe and the length calculation cannot overflow.
unsafe {
// Using the intrinsic avoids a superfluous UB check,
// since the one on this method already checked `end >= start`.
let new_len = crate::intrinsics::unchecked_sub(self.end, self.start);
get_offset_len_noubcheck(slice, self.start, new_len)
}
}
<core::ops::range::Range<usize> as core::slice::index::SliceIndex<[T]>>::get_unchecked_mut unsafe fn get_unchecked_mut(self, slice: *mut [T]) -> *mut [T] {
assert_unsafe_precondition!(
check_library_ub,
"slice::get_unchecked_mut requires that the range is within the slice",
(
start: usize = self.start,
end: usize = self.end,
len: usize = slice.len()
) => end >= start && end <= len
);
// SAFETY: see comments for `get_unchecked` above.
unsafe {
let new_len = crate::intrinsics::unchecked_sub(self.end, self.start);
get_offset_len_mut_noubcheck(slice, self.start, new_len)
}
}
<core::ops::range::Range<usize> as core::slice::index::SliceIndex<[T]>>::index fn index(self, slice: &[T]) -> &[T] {
// Using checked_sub is a safe way to get `SubUnchecked` in MIR
if let Some(new_len) = usize::checked_sub(self.end, self.start)
&& self.end <= slice.len()
{
// SAFETY: `self` is checked to be valid and in bounds above.
unsafe { &*get_offset_len_noubcheck(slice, self.start, new_len) }
} else {
slice_index_fail(self.start, self.end, slice.len())
}
}
<core::ops::range::Range<usize> as core::slice::index::SliceIndex<[T]>>::index_mut fn index_mut(self, slice: &mut [T]) -> &mut [T] {
// Using checked_sub is a safe way to get `SubUnchecked` in MIR
if let Some(new_len) = usize::checked_sub(self.end, self.start)
&& self.end <= slice.len()
{
// SAFETY: `self` is checked to be valid and in bounds above.
unsafe { &mut *get_offset_len_mut_noubcheck(slice, self.start, new_len) }
} else {
slice_index_fail(self.start, self.end, slice.len())
}
}
<core::ops::range::RangeFrom<&T> as core::ops::range::RangeBounds<T>>::end_bound fn end_bound(&self) -> Bound<&T> {
Unbounded
}
<core::ops::range::RangeFrom<&T> as core::ops::range::RangeBounds<T>>::start_bound fn start_bound(&self) -> Bound<&T> {
Included(self.start)
}
<core::ops::range::RangeFrom<T> as core::ops::range::IntoBounds<T>>::into_bounds fn into_bounds(self) -> (Bound<T>, Bound<T>) {
(Included(self.start), Unbounded)
}
<core::ops::range::RangeFrom<T> as core::ops::range::OneSidedRange<T>>::bound fn bound(self) -> (OneSidedRangeBound, T) {
(OneSidedRangeBound::StartInclusive, self.start)
}
<core::ops::range::RangeFrom<T> as core::ops::range::RangeBounds<T>>::end_bound fn end_bound(&self) -> Bound<&T> {
Unbounded
}
<core::ops::range::RangeFrom<T> as core::ops::range::RangeBounds<T>>::start_bound fn start_bound(&self) -> Bound<&T> {
Included(&self.start)
}
<core::ops::range::RangeFrom<usize> as core::slice::index::SliceIndex<[T]>>::get fn get(self, slice: &[T]) -> Option<&[T]> {
(self.start..slice.len()).get(slice)
}
<core::ops::range::RangeFrom<usize> as core::slice::index::SliceIndex<[T]>>::get_mut fn get_mut(self, slice: &mut [T]) -> Option<&mut [T]> {
(self.start..slice.len()).get_mut(slice)
}
<core::ops::range::RangeFrom<usize> as core::slice::index::SliceIndex<[T]>>::get_unchecked unsafe fn get_unchecked(self, slice: *const [T]) -> *const [T] {
// SAFETY: the caller has to uphold the safety contract for `get_unchecked`.
unsafe { (self.start..slice.len()).get_unchecked(slice) }
}
<core::ops::range::RangeFrom<usize> as core::slice::index::SliceIndex<[T]>>::get_unchecked_mut unsafe fn get_unchecked_mut(self, slice: *mut [T]) -> *mut [T] {
// SAFETY: the caller has to uphold the safety contract for `get_unchecked_mut`.
unsafe { (self.start..slice.len()).get_unchecked_mut(slice) }
}
<core::ops::range::RangeFrom<usize> as core::slice::index::SliceIndex<[T]>>::index fn index(self, slice: &[T]) -> &[T] {
if self.start > slice.len() {
slice_index_fail(self.start, slice.len(), slice.len())
}
// SAFETY: `self` is checked to be valid and in bounds above.
unsafe {
let new_len = crate::intrinsics::unchecked_sub(slice.len(), self.start);
&*get_offset_len_noubcheck(slice, self.start, new_len)
}
}
<core::ops::range::RangeFrom<usize> as core::slice::index::SliceIndex<[T]>>::index_mut fn index_mut(self, slice: &mut [T]) -> &mut [T] {
if self.start > slice.len() {
slice_index_fail(self.start, slice.len(), slice.len())
}
// SAFETY: `self` is checked to be valid and in bounds above.
unsafe {
let new_len = crate::intrinsics::unchecked_sub(slice.len(), self.start);
&mut *get_offset_len_mut_noubcheck(slice, self.start, new_len)
}
}
<core::ops::range::RangeFull as core::ops::range::IntoBounds<T>>::into_bounds fn into_bounds(self) -> (Bound<T>, Bound<T>) {
(Unbounded, Unbounded)
}
<core::ops::range::RangeFull as core::ops::range::RangeBounds<T>>::end_bound fn end_bound(&self) -> Bound<&T> {
Unbounded
}
<core::ops::range::RangeFull as core::ops::range::RangeBounds<T>>::start_bound fn start_bound(&self) -> Bound<&T> {
Unbounded
}
<core::ops::range::RangeFull as core::slice::index::SliceIndex<[T]>>::get fn get(self, slice: &[T]) -> Option<&[T]> {
Some(slice)
}
<core::ops::range::RangeFull as core::slice::index::SliceIndex<[T]>>::get_mut fn get_mut(self, slice: &mut [T]) -> Option<&mut [T]> {
Some(slice)
}
<core::ops::range::RangeFull as core::slice::index::SliceIndex<[T]>>::get_unchecked unsafe fn get_unchecked(self, slice: *const [T]) -> *const [T] {
slice
}
<core::ops::range::RangeFull as core::slice::index::SliceIndex<[T]>>::get_unchecked_mut unsafe fn get_unchecked_mut(self, slice: *mut [T]) -> *mut [T] {
slice
}
<core::ops::range::RangeFull as core::slice::index::SliceIndex<[T]>>::index fn index(self, slice: &[T]) -> &[T] {
slice
}
<core::ops::range::RangeFull as core::slice::index::SliceIndex<[T]>>::index_mut fn index_mut(self, slice: &mut [T]) -> &mut [T] {
slice
}
<core::ops::range::RangeInclusive<&T> as core::ops::range::RangeBounds<T>>::end_bound fn end_bound(&self) -> Bound<&T> {
Included(self.end)
}
<core::ops::range::RangeInclusive<&T> as core::ops::range::RangeBounds<T>>::start_bound fn start_bound(&self) -> Bound<&T> {
Included(self.start)
}
<core::ops::range::RangeInclusive<T> as core::ops::range::IntoBounds<T>>::into_bounds fn into_bounds(self) -> (Bound<T>, Bound<T>) {
(
Included(self.start),
if self.exhausted {
// When the iterator is exhausted, we usually have start == end,
// but we want the range to appear empty, containing nothing.
Excluded(self.end)
} else {
Included(self.end)
},
)
}
<core::ops::range::RangeInclusive<T> as core::ops::range::RangeBounds<T>>::end_bound fn end_bound(&self) -> Bound<&T> {
if self.exhausted {
// When the iterator is exhausted, we usually have start == end,
// but we want the range to appear empty, containing nothing.
Excluded(&self.end)
} else {
Included(&self.end)
}
}
<core::ops::range::RangeInclusive<T> as core::ops::range::RangeBounds<T>>::start_bound fn start_bound(&self) -> Bound<&T> {
Included(&self.start)
}
<core::ops::range::RangeInclusive<usize> as core::slice::index::SliceIndex<[T]>>::get fn get(self, slice: &[T]) -> Option<&[T]> {
if *self.end() == usize::MAX { None } else { self.into_slice_range().get(slice) }
}
<core::ops::range::RangeInclusive<usize> as core::slice::index::SliceIndex<[T]>>::get_mut fn get_mut(self, slice: &mut [T]) -> Option<&mut [T]> {
if *self.end() == usize::MAX { None } else { self.into_slice_range().get_mut(slice) }
}
<core::ops::range::RangeInclusive<usize> as core::slice::index::SliceIndex<[T]>>::get_unchecked unsafe fn get_unchecked(self, slice: *const [T]) -> *const [T] {
// SAFETY: the caller has to uphold the safety contract for `get_unchecked`.
unsafe { self.into_slice_range().get_unchecked(slice) }
}
<core::ops::range::RangeInclusive<usize> as core::slice::index::SliceIndex<[T]>>::get_unchecked_mut unsafe fn get_unchecked_mut(self, slice: *mut [T]) -> *mut [T] {
// SAFETY: the caller has to uphold the safety contract for `get_unchecked_mut`.
unsafe { self.into_slice_range().get_unchecked_mut(slice) }
}
<core::ops::range::RangeInclusive<usize> as core::slice::index::SliceIndex<[T]>>::index fn index(self, slice: &[T]) -> &[T] {
let Self { mut start, mut end, exhausted } = self;
let len = slice.len();
if end < len {
end = end + 1;
start = if exhausted { end } else { start };
if let Some(new_len) = usize::checked_sub(end, start) {
// SAFETY: `self` is checked to be valid and in bounds above.
unsafe { return &*get_offset_len_noubcheck(slice, start, new_len) }
}
}
slice_index_fail(start, end, slice.len())
}
<core::ops::range::RangeInclusive<usize> as core::slice::index::SliceIndex<[T]>>::index_mut fn index_mut(self, slice: &mut [T]) -> &mut [T] {
let Self { mut start, mut end, exhausted } = self;
let len = slice.len();
if end < len {
end = end + 1;
start = if exhausted { end } else { start };
if let Some(new_len) = usize::checked_sub(end, start) {
// SAFETY: `self` is checked to be valid and in bounds above.
unsafe { return &mut *get_offset_len_mut_noubcheck(slice, start, new_len) }
}
}
slice_index_fail(start, end, slice.len())
}
<core::ops::range::RangeTo<&T> as core::ops::range::RangeBounds<T>>::end_bound fn end_bound(&self) -> Bound<&T> {
Excluded(self.end)
}
<core::ops::range::RangeTo<&T> as core::ops::range::RangeBounds<T>>::start_bound fn start_bound(&self) -> Bound<&T> {
Unbounded
}
<core::ops::range::RangeTo<T> as core::ops::range::IntoBounds<T>>::into_bounds fn into_bounds(self) -> (Bound<T>, Bound<T>) {
(Unbounded, Excluded(self.end))
}
<core::ops::range::RangeTo<T> as core::ops::range::OneSidedRange<T>>::bound fn bound(self) -> (OneSidedRangeBound, T) {
(OneSidedRangeBound::End, self.end)
}
<core::ops::range::RangeTo<T> as core::ops::range::RangeBounds<T>>::end_bound fn end_bound(&self) -> Bound<&T> {
Excluded(&self.end)
}
<core::ops::range::RangeTo<T> as core::ops::range::RangeBounds<T>>::start_bound fn start_bound(&self) -> Bound<&T> {
Unbounded
}
<core::ops::range::RangeTo<usize> as core::slice::index::SliceIndex<[T]>>::get fn get(self, slice: &[T]) -> Option<&[T]> {
(0..self.end).get(slice)
}
<core::ops::range::RangeTo<usize> as core::slice::index::SliceIndex<[T]>>::get_mut fn get_mut(self, slice: &mut [T]) -> Option<&mut [T]> {
(0..self.end).get_mut(slice)
}
<core::ops::range::RangeTo<usize> as core::slice::index::SliceIndex<[T]>>::get_unchecked unsafe fn get_unchecked(self, slice: *const [T]) -> *const [T] {
// SAFETY: the caller has to uphold the safety contract for `get_unchecked`.
unsafe { (0..self.end).get_unchecked(slice) }
}
<core::ops::range::RangeTo<usize> as core::slice::index::SliceIndex<[T]>>::get_unchecked_mut unsafe fn get_unchecked_mut(self, slice: *mut [T]) -> *mut [T] {
// SAFETY: the caller has to uphold the safety contract for `get_unchecked_mut`.
unsafe { (0..self.end).get_unchecked_mut(slice) }
}
<core::ops::range::RangeTo<usize> as core::slice::index::SliceIndex<[T]>>::index fn index(self, slice: &[T]) -> &[T] {
(0..self.end).index(slice)
}
<core::ops::range::RangeTo<usize> as core::slice::index::SliceIndex<[T]>>::index_mut fn index_mut(self, slice: &mut [T]) -> &mut [T] {
(0..self.end).index_mut(slice)
}
<core::ops::range::RangeToInclusive<&T> as core::ops::range::RangeBounds<T>>::end_bound fn end_bound(&self) -> Bound<&T> {
Included(self.end)
}
<core::ops::range::RangeToInclusive<&T> as core::ops::range::RangeBounds<T>>::start_bound fn start_bound(&self) -> Bound<&T> {
Unbounded
}
<core::ops::range::RangeToInclusive<T> as core::ops::range::IntoBounds<T>>::into_bounds fn into_bounds(self) -> (Bound<T>, Bound<T>) {
(Unbounded, Included(self.end))
}
<core::ops::range::RangeToInclusive<T> as core::ops::range::OneSidedRange<T>>::bound fn bound(self) -> (OneSidedRangeBound, T) {
(OneSidedRangeBound::EndInclusive, self.end)
}
<core::ops::range::RangeToInclusive<T> as core::ops::range::RangeBounds<T>>::end_bound fn end_bound(&self) -> Bound<&T> {
Included(&self.end)
}
<core::ops::range::RangeToInclusive<T> as core::ops::range::RangeBounds<T>>::start_bound fn start_bound(&self) -> Bound<&T> {
Unbounded
}
<core::ops::range::RangeToInclusive<usize> as core::slice::index::SliceIndex<[T]>>::get fn get(self, slice: &[T]) -> Option<&[T]> {
(0..=self.end).get(slice)
}
<core::ops::range::RangeToInclusive<usize> as core::slice::index::SliceIndex<[T]>>::get_mut fn get_mut(self, slice: &mut [T]) -> Option<&mut [T]> {
(0..=self.end).get_mut(slice)
}
<core::ops::range::RangeToInclusive<usize> as core::slice::index::SliceIndex<[T]>>::get_unchecked unsafe fn get_unchecked(self, slice: *const [T]) -> *const [T] {
// SAFETY: the caller has to uphold the safety contract for `get_unchecked`.
unsafe { (0..=self.end).get_unchecked(slice) }
}
<core::ops::range::RangeToInclusive<usize> as core::slice::index::SliceIndex<[T]>>::get_unchecked_mut unsafe fn get_unchecked_mut(self, slice: *mut [T]) -> *mut [T] {
// SAFETY: the caller has to uphold the safety contract for `get_unchecked_mut`.
unsafe { (0..=self.end).get_unchecked_mut(slice) }
}
<core::ops::range::RangeToInclusive<usize> as core::slice::index::SliceIndex<[T]>>::index fn index(self, slice: &[T]) -> &[T] {
(0..=self.end).index(slice)
}
<core::ops::range::RangeToInclusive<usize> as core::slice::index::SliceIndex<[T]>>::index_mut fn index_mut(self, slice: &mut [T]) -> &mut [T] {
(0..=self.end).index_mut(slice)
}
<core::ops::try_trait::NeverShortCircuit<T> as core::ops::try_trait::Try>::branch fn branch(self) -> ControlFlow<NeverShortCircuitResidual, T> {
ControlFlow::Continue(self.0)
}
<core::ops::try_trait::NeverShortCircuit<T> as core::ops::try_trait::Try>::from_output fn from_output(x: T) -> Self {
NeverShortCircuit(x)
}
<core::option::IntoIter<A> as core::iter::traits::iterator::Iterator>::next fn next(&mut self) -> Option<A> {
self.inner.next()
}
<core::option::IntoIter<A> as core::iter::traits::iterator::Iterator>::size_hint fn size_hint(&self) -> (usize, Option<usize>) {
self.inner.size_hint()
}
<core::option::Item<A> as core::iter::traits::exact_size::ExactSizeIterator>::len fn len(&self) -> usize {
self.opt.len()
}
<core::option::Item<A> as core::iter::traits::iterator::Iterator>::next fn next(&mut self) -> Option<A> {
self.opt.take()
}
<core::option::Item<A> as core::iter::traits::iterator::Iterator>::size_hint fn size_hint(&self) -> (usize, Option<usize>) {
let len = self.len();
(len, Some(len))
}
<core::option::Option<&'a T> as core::convert::From<&'a core::option::Option<T>>>::from fn from(o: &'a Option<T>) -> Option<&'a T> {
o.as_ref()
}
<core::option::Option<&'a mut T> as core::convert::From<&'a mut core::option::Option<T>>>::from fn from(o: &'a mut Option<T>) -> Option<&'a mut T> {
o.as_mut()
}
<core::option::Option<T> as core::clone::Clone>::clone fn clone(&self) -> Self {
match self {
Some(x) => Some(x.clone()),
None => None,
}
}
<core::option::Option<T> as core::clone::Clone>::clone_from fn clone_from(&mut self, source: &Self) {
match (self, source) {
(Some(to), Some(from)) => to.clone_from(from),
(to, from) => *to = from.clone(),
}
}
<core::option::Option<T> as core::convert::From<T>>::from fn from(val: T) -> Option<T> {
Some(val)
}
<core::option::Option<T> as core::default::Default>::default fn default() -> Option<T> {
None
}
<core::option::Option<T> as core::iter::traits::collect::IntoIterator>::into_iter fn into_iter(self) -> IntoIter<T> {
IntoIter { inner: Item { opt: self } }
}
<core::option::Option<T> as core::ops::try_trait::FromResidual<core::option::Option<core::convert::Infallible>>>::from_residual fn from_residual(residual: Option<convert::Infallible>) -> Self {
match residual {
None => None,
}
}
<core::option::Option<T> as core::ops::try_trait::Try>::branch fn branch(self) -> ControlFlow<Self::Residual, Self::Output> {
match self {
Some(v) => ControlFlow::Continue(v),
None => ControlFlow::Break(None),
}
}
<core::option::Option<T> as core::ops::try_trait::Try>::from_output fn from_output(output: Self::Output) -> Self {
Some(output)
}
<core::ptr::non_null::NonNull<T> as core::clone::Clone>::clone fn clone(&self) -> Self {
*self
}
<core::ptr::non_null::NonNull<T> as core::cmp::PartialEq>::eq fn eq(&self, other: &Self) -> bool {
self.as_ptr() == other.as_ptr()
}
<core::result::Result<T, E> as core::ops::try_trait::Try>::branch fn branch(self) -> ControlFlow<Self::Residual, Self::Output> {
match self {
Ok(v) => ControlFlow::Continue(v),
Err(e) => ControlFlow::Break(Err(e)),
}
}
<core::result::Result<T, E> as core::ops::try_trait::Try>::from_output fn from_output(output: Self::Output) -> Self {
Ok(output)
}
<core::result::Result<T, F> as core::ops::try_trait::FromResidual<core::result::Result<core::convert::Infallible, E>>>::from_residual fn from_residual(residual: Result<convert::Infallible, E>) -> Self {
match residual {
Err(e) => Err(From::from(e)),
}
}
<core::slice::iter::Iter<'a, T> as core::iter::traits::iterator::Iterator>::next fn next(&mut self) -> Option<$elem> {
// intentionally not using the helpers because this is
// one of the most mono'd things in the library.
let ptr = self.ptr;
let end_or_len = self.end_or_len;
// SAFETY: See inner comments. (For some reason having multiple
// block breaks inlining this -- if you can fix that please do!)
unsafe {
if T::IS_ZST {
let len = end_or_len.addr();
if len == 0 {
return None;
}
// SAFETY: just checked that it's not zero, so subtracting one
// cannot wrap. (Ideally this would be `checked_sub`, which
// does the same thing internally, but as of 2025-02 that
// doesn't optimize quite as small in MIR.)
self.end_or_len = without_provenance_mut(len.unchecked_sub(1));
} else {
// SAFETY: by type invariant, the `end_or_len` field is always
// non-null for a non-ZST pointee. (This transmute ensures we
// get `!nonnull` metadata on the load of the field.)
if ptr == crate::intrinsics::transmute::<$ptr, NonNull<T>>(end_or_len) {
return None;
}
// SAFETY: since it's not empty, per the check above, moving
// forward one keeps us inside the slice, and this is valid.
self.ptr = ptr.add(1);
}
// SAFETY: Now that we know it wasn't empty and we've moved past
// the first one (to avoid giving a duplicate `&mut` next time),
// we can give out a reference to it.
Some({ptr}.$into_ref())
}
}
<core::slice::iter::Iter<'a, T> as core::iter::traits::unchecked_iterator::UncheckedIterator>::next_unchecked unsafe fn next_unchecked(&mut self) -> $elem {
// SAFETY: The caller promised there's at least one more item.
unsafe {
self.post_inc_start(1).$into_ref()
}
}
<core::slice::iter::IterMut<'a, T> as core::iter::traits::iterator::Iterator>::next fn next(&mut self) -> Option<$elem> {
// intentionally not using the helpers because this is
// one of the most mono'd things in the library.
let ptr = self.ptr;
let end_or_len = self.end_or_len;
// SAFETY: See inner comments. (For some reason having multiple
// block breaks inlining this -- if you can fix that please do!)
unsafe {
if T::IS_ZST {
let len = end_or_len.addr();
if len == 0 {
return None;
}
// SAFETY: just checked that it's not zero, so subtracting one
// cannot wrap. (Ideally this would be `checked_sub`, which
// does the same thing internally, but as of 2025-02 that
// doesn't optimize quite as small in MIR.)
self.end_or_len = without_provenance_mut(len.unchecked_sub(1));
} else {
// SAFETY: by type invariant, the `end_or_len` field is always
// non-null for a non-ZST pointee. (This transmute ensures we
// get `!nonnull` metadata on the load of the field.)
if ptr == crate::intrinsics::transmute::<$ptr, NonNull<T>>(end_or_len) {
return None;
}
// SAFETY: since it's not empty, per the check above, moving
// forward one keeps us inside the slice, and this is valid.
self.ptr = ptr.add(1);
}
// SAFETY: Now that we know it wasn't empty and we've moved past
// the first one (to avoid giving a duplicate `&mut` next time),
// we can give out a reference to it.
Some({ptr}.$into_ref())
}
}
<core::slice::iter::IterMut<'a, T> as core::iter::traits::unchecked_iterator::UncheckedIterator>::next_unchecked unsafe fn next_unchecked(&mut self) -> $elem {
// SAFETY: The caller promised there's at least one more item.
unsafe {
self.post_inc_start(1).$into_ref()
}
}
<core::sync::atomic::AtomicU32 as core::convert::From<u32>>::from fn from(v: $int_type) -> Self { Self::new(v) }
<core::sync::atomic::AtomicU32 as core::default::Default>::default fn default() -> Self {
Self::new(Default::default())
}
<f128 as core::default::Default>::default fn default() -> $t {
$v
}
<f128 as core::ops::arith::Add<&f128>>::add fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<f128 as core::ops::arith::Add>::add fn add(self, other: $t) -> $t { self + other }
<f128 as core::ops::arith::AddAssign<&f128>>::add_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<f128 as core::ops::arith::AddAssign>::add_assign fn add_assign(&mut self, other: $t) { *self += other }
<f128 as core::ops::arith::Div<&f128>>::div fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<f128 as core::ops::arith::Div>::div fn div(self, other: $t) -> $t { self / other }
<f128 as core::ops::arith::DivAssign<&f128>>::div_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<f128 as core::ops::arith::DivAssign>::div_assign fn div_assign(&mut self, other: $t) { *self /= other }
<f128 as core::ops::arith::Mul<&f128>>::mul fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<f128 as core::ops::arith::Mul>::mul fn mul(self, other: $t) -> $t { self * other }
<f128 as core::ops::arith::MulAssign<&f128>>::mul_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<f128 as core::ops::arith::MulAssign>::mul_assign fn mul_assign(&mut self, other: $t) { *self *= other }
<f128 as core::ops::arith::Neg>::neg fn neg(self) -> $t { -self }
<f128 as core::ops::arith::Rem<&f128>>::rem fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<f128 as core::ops::arith::Rem>::rem fn rem(self, other: $t) -> $t { self % other }
<f128 as core::ops::arith::RemAssign<&f128>>::rem_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<f128 as core::ops::arith::RemAssign>::rem_assign fn rem_assign(&mut self, other: $t) { *self %= other }
<f128 as core::ops::arith::Sub<&f128>>::sub fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<f128 as core::ops::arith::Sub>::sub fn sub(self, other: $t) -> $t { self - other }
<f128 as core::ops::arith::SubAssign<&f128>>::sub_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<f128 as core::ops::arith::SubAssign>::sub_assign fn sub_assign(&mut self, other: $t) { *self -= other }
<f16 as core::default::Default>::default fn default() -> $t {
$v
}
<f16 as core::ops::arith::Add<&f16>>::add fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<f16 as core::ops::arith::Add>::add fn add(self, other: $t) -> $t { self + other }
<f16 as core::ops::arith::AddAssign<&f16>>::add_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<f16 as core::ops::arith::AddAssign>::add_assign fn add_assign(&mut self, other: $t) { *self += other }
<f16 as core::ops::arith::Div<&f16>>::div fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<f16 as core::ops::arith::Div>::div fn div(self, other: $t) -> $t { self / other }
<f16 as core::ops::arith::DivAssign<&f16>>::div_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<f16 as core::ops::arith::DivAssign>::div_assign fn div_assign(&mut self, other: $t) { *self /= other }
<f16 as core::ops::arith::Mul<&f16>>::mul fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<f16 as core::ops::arith::Mul>::mul fn mul(self, other: $t) -> $t { self * other }
<f16 as core::ops::arith::MulAssign<&f16>>::mul_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<f16 as core::ops::arith::MulAssign>::mul_assign fn mul_assign(&mut self, other: $t) { *self *= other }
<f16 as core::ops::arith::Neg>::neg fn neg(self) -> $t { -self }
<f16 as core::ops::arith::Rem<&f16>>::rem fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<f16 as core::ops::arith::Rem>::rem fn rem(self, other: $t) -> $t { self % other }
<f16 as core::ops::arith::RemAssign<&f16>>::rem_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<f16 as core::ops::arith::RemAssign>::rem_assign fn rem_assign(&mut self, other: $t) { *self %= other }
<f16 as core::ops::arith::Sub<&f16>>::sub fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<f16 as core::ops::arith::Sub>::sub fn sub(self, other: $t) -> $t { self - other }
<f16 as core::ops::arith::SubAssign<&f16>>::sub_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<f16 as core::ops::arith::SubAssign>::sub_assign fn sub_assign(&mut self, other: $t) { *self -= other }
<f32 as core::default::Default>::default fn default() -> $t {
$v
}
<f32 as core::ops::arith::Add<&f32>>::add fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<f32 as core::ops::arith::Add>::add fn add(self, other: $t) -> $t { self + other }
<f32 as core::ops::arith::AddAssign<&f32>>::add_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<f32 as core::ops::arith::AddAssign>::add_assign fn add_assign(&mut self, other: $t) { *self += other }
<f32 as core::ops::arith::Div<&f32>>::div fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<f32 as core::ops::arith::Div>::div fn div(self, other: $t) -> $t { self / other }
<f32 as core::ops::arith::DivAssign<&f32>>::div_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<f32 as core::ops::arith::DivAssign>::div_assign fn div_assign(&mut self, other: $t) { *self /= other }
<f32 as core::ops::arith::Mul<&f32>>::mul fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<f32 as core::ops::arith::Mul>::mul fn mul(self, other: $t) -> $t { self * other }
<f32 as core::ops::arith::MulAssign<&f32>>::mul_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<f32 as core::ops::arith::MulAssign>::mul_assign fn mul_assign(&mut self, other: $t) { *self *= other }
<f32 as core::ops::arith::Neg>::neg fn neg(self) -> $t { -self }
<f32 as core::ops::arith::Rem<&f32>>::rem fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<f32 as core::ops::arith::Rem>::rem fn rem(self, other: $t) -> $t { self % other }
<f32 as core::ops::arith::RemAssign<&f32>>::rem_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<f32 as core::ops::arith::RemAssign>::rem_assign fn rem_assign(&mut self, other: $t) { *self %= other }
<f32 as core::ops::arith::Sub<&f32>>::sub fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<f32 as core::ops::arith::Sub>::sub fn sub(self, other: $t) -> $t { self - other }
<f32 as core::ops::arith::SubAssign<&f32>>::sub_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<f32 as core::ops::arith::SubAssign>::sub_assign fn sub_assign(&mut self, other: $t) { *self -= other }
<f64 as core::default::Default>::default fn default() -> $t {
$v
}
<f64 as core::ops::arith::Add<&f64>>::add fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<f64 as core::ops::arith::Add>::add fn add(self, other: $t) -> $t { self + other }
<f64 as core::ops::arith::AddAssign<&f64>>::add_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<f64 as core::ops::arith::AddAssign>::add_assign fn add_assign(&mut self, other: $t) { *self += other }
<f64 as core::ops::arith::Div<&f64>>::div fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<f64 as core::ops::arith::Div>::div fn div(self, other: $t) -> $t { self / other }
<f64 as core::ops::arith::DivAssign<&f64>>::div_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<f64 as core::ops::arith::DivAssign>::div_assign fn div_assign(&mut self, other: $t) { *self /= other }
<f64 as core::ops::arith::Mul<&f64>>::mul fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<f64 as core::ops::arith::Mul>::mul fn mul(self, other: $t) -> $t { self * other }
<f64 as core::ops::arith::MulAssign<&f64>>::mul_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<f64 as core::ops::arith::MulAssign>::mul_assign fn mul_assign(&mut self, other: $t) { *self *= other }
<f64 as core::ops::arith::Neg>::neg fn neg(self) -> $t { -self }
<f64 as core::ops::arith::Rem<&f64>>::rem fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<f64 as core::ops::arith::Rem>::rem fn rem(self, other: $t) -> $t { self % other }
<f64 as core::ops::arith::RemAssign<&f64>>::rem_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<f64 as core::ops::arith::RemAssign>::rem_assign fn rem_assign(&mut self, other: $t) { *self %= other }
<f64 as core::ops::arith::Sub<&f64>>::sub fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<f64 as core::ops::arith::Sub>::sub fn sub(self, other: $t) -> $t { self - other }
<f64 as core::ops::arith::SubAssign<&f64>>::sub_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<f64 as core::ops::arith::SubAssign>::sub_assign fn sub_assign(&mut self, other: $t) { *self -= other }
<i128 as core::default::Default>::default fn default() -> $t {
$v
}
<i128 as core::ops::arith::Add<&i128>>::add fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i128 as core::ops::arith::Add>::add fn add(self, other: $t) -> $t { self + other }
<i128 as core::ops::arith::AddAssign<&i128>>::add_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i128 as core::ops::arith::AddAssign>::add_assign fn add_assign(&mut self, other: $t) { *self += other }
<i128 as core::ops::arith::Div<&i128>>::div fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i128 as core::ops::arith::Div>::div fn div(self, other: $t) -> $t { self / other }
<i128 as core::ops::arith::DivAssign<&i128>>::div_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i128 as core::ops::arith::DivAssign>::div_assign fn div_assign(&mut self, other: $t) { *self /= other }
<i128 as core::ops::arith::Mul<&i128>>::mul fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i128 as core::ops::arith::Mul>::mul fn mul(self, other: $t) -> $t { self * other }
<i128 as core::ops::arith::MulAssign<&i128>>::mul_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i128 as core::ops::arith::MulAssign>::mul_assign fn mul_assign(&mut self, other: $t) { *self *= other }
<i128 as core::ops::arith::Neg>::neg fn neg(self) -> $t { -self }
<i128 as core::ops::arith::Rem<&i128>>::rem fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i128 as core::ops::arith::Rem>::rem fn rem(self, other: $t) -> $t { self % other }
<i128 as core::ops::arith::RemAssign<&i128>>::rem_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i128 as core::ops::arith::RemAssign>::rem_assign fn rem_assign(&mut self, other: $t) { *self %= other }
<i128 as core::ops::arith::Sub<&i128>>::sub fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i128 as core::ops::arith::Sub>::sub fn sub(self, other: $t) -> $t { self - other }
<i128 as core::ops::arith::SubAssign<&i128>>::sub_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i128 as core::ops::arith::SubAssign>::sub_assign fn sub_assign(&mut self, other: $t) { *self -= other }
<i128 as core::ops::bit::BitAnd<&i128>>::bitand fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i128 as core::ops::bit::BitAnd>::bitand fn bitand(self, rhs: $t) -> $t { self & rhs }
<i128 as core::ops::bit::BitAndAssign<&i128>>::bitand_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i128 as core::ops::bit::BitAndAssign>::bitand_assign fn bitand_assign(&mut self, other: $t) { *self &= other }
<i128 as core::ops::bit::BitOr<&i128>>::bitor fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i128 as core::ops::bit::BitOr>::bitor fn bitor(self, rhs: $t) -> $t { self | rhs }
<i128 as core::ops::bit::BitOrAssign<&i128>>::bitor_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i128 as core::ops::bit::BitOrAssign>::bitor_assign fn bitor_assign(&mut self, other: $t) { *self |= other }
<i128 as core::ops::bit::BitXor<&i128>>::bitxor fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i128 as core::ops::bit::BitXor>::bitxor fn bitxor(self, other: $t) -> $t { self ^ other }
<i128 as core::ops::bit::BitXorAssign<&i128>>::bitxor_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i128 as core::ops::bit::BitXorAssign>::bitxor_assign fn bitxor_assign(&mut self, other: $t) { *self ^= other }
<i128 as core::ops::bit::Not>::not fn not(self) -> $t { !self }
<i128 as core::ops::bit::Shl<&i128>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i128 as core::ops::bit::Shl<&i16>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i128 as core::ops::bit::Shl<&i32>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i128 as core::ops::bit::Shl<&i64>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i128 as core::ops::bit::Shl<&i8>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i128 as core::ops::bit::Shl<&isize>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i128 as core::ops::bit::Shl<&u128>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i128 as core::ops::bit::Shl<&u16>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i128 as core::ops::bit::Shl<&u32>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i128 as core::ops::bit::Shl<&u64>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i128 as core::ops::bit::Shl<&u8>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i128 as core::ops::bit::Shl<&usize>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i128 as core::ops::bit::Shl<i16>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<i128 as core::ops::bit::Shl<i32>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<i128 as core::ops::bit::Shl<i64>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<i128 as core::ops::bit::Shl<i8>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<i128 as core::ops::bit::Shl<isize>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<i128 as core::ops::bit::Shl<u128>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<i128 as core::ops::bit::Shl<u16>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<i128 as core::ops::bit::Shl<u32>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<i128 as core::ops::bit::Shl<u64>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<i128 as core::ops::bit::Shl<u8>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<i128 as core::ops::bit::Shl<usize>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<i128 as core::ops::bit::Shl>::shl fn shl(self, other: $f) -> $t {
self << other
}
<i128 as core::ops::bit::ShlAssign<&i128>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i128 as core::ops::bit::ShlAssign<&i16>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i128 as core::ops::bit::ShlAssign<&i32>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i128 as core::ops::bit::ShlAssign<&i64>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i128 as core::ops::bit::ShlAssign<&i8>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i128 as core::ops::bit::ShlAssign<&isize>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i128 as core::ops::bit::ShlAssign<&u128>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i128 as core::ops::bit::ShlAssign<&u16>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i128 as core::ops::bit::ShlAssign<&u32>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i128 as core::ops::bit::ShlAssign<&u64>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i128 as core::ops::bit::ShlAssign<&u8>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i128 as core::ops::bit::ShlAssign<&usize>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i128 as core::ops::bit::ShlAssign<i16>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<i128 as core::ops::bit::ShlAssign<i32>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<i128 as core::ops::bit::ShlAssign<i64>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<i128 as core::ops::bit::ShlAssign<i8>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<i128 as core::ops::bit::ShlAssign<isize>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<i128 as core::ops::bit::ShlAssign<u128>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<i128 as core::ops::bit::ShlAssign<u16>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<i128 as core::ops::bit::ShlAssign<u32>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<i128 as core::ops::bit::ShlAssign<u64>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<i128 as core::ops::bit::ShlAssign<u8>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<i128 as core::ops::bit::ShlAssign<usize>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<i128 as core::ops::bit::ShlAssign>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<i128 as core::ops::bit::Shr<&i128>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i128 as core::ops::bit::Shr<&i16>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i128 as core::ops::bit::Shr<&i32>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i128 as core::ops::bit::Shr<&i64>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i128 as core::ops::bit::Shr<&i8>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i128 as core::ops::bit::Shr<&isize>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i128 as core::ops::bit::Shr<&u128>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i128 as core::ops::bit::Shr<&u16>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i128 as core::ops::bit::Shr<&u32>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i128 as core::ops::bit::Shr<&u64>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i128 as core::ops::bit::Shr<&u8>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i128 as core::ops::bit::Shr<&usize>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i128 as core::ops::bit::Shr<i16>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<i128 as core::ops::bit::Shr<i32>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<i128 as core::ops::bit::Shr<i64>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<i128 as core::ops::bit::Shr<i8>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<i128 as core::ops::bit::Shr<isize>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<i128 as core::ops::bit::Shr<u128>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<i128 as core::ops::bit::Shr<u16>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<i128 as core::ops::bit::Shr<u32>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<i128 as core::ops::bit::Shr<u64>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<i128 as core::ops::bit::Shr<u8>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<i128 as core::ops::bit::Shr<usize>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<i128 as core::ops::bit::Shr>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<i128 as core::ops::bit::ShrAssign<&i128>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i128 as core::ops::bit::ShrAssign<&i16>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i128 as core::ops::bit::ShrAssign<&i32>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i128 as core::ops::bit::ShrAssign<&i64>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i128 as core::ops::bit::ShrAssign<&i8>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i128 as core::ops::bit::ShrAssign<&isize>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i128 as core::ops::bit::ShrAssign<&u128>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i128 as core::ops::bit::ShrAssign<&u16>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i128 as core::ops::bit::ShrAssign<&u32>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i128 as core::ops::bit::ShrAssign<&u64>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i128 as core::ops::bit::ShrAssign<&u8>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i128 as core::ops::bit::ShrAssign<&usize>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i128 as core::ops::bit::ShrAssign<i16>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<i128 as core::ops::bit::ShrAssign<i32>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<i128 as core::ops::bit::ShrAssign<i64>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<i128 as core::ops::bit::ShrAssign<i8>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<i128 as core::ops::bit::ShrAssign<isize>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<i128 as core::ops::bit::ShrAssign<u128>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<i128 as core::ops::bit::ShrAssign<u16>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<i128 as core::ops::bit::ShrAssign<u32>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<i128 as core::ops::bit::ShrAssign<u64>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<i128 as core::ops::bit::ShrAssign<u8>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<i128 as core::ops::bit::ShrAssign<usize>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<i128 as core::ops::bit::ShrAssign>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<i16 as core::default::Default>::default fn default() -> $t {
$v
}
<i16 as core::ops::arith::Add<&i16>>::add fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i16 as core::ops::arith::Add>::add fn add(self, other: $t) -> $t { self + other }
<i16 as core::ops::arith::AddAssign<&i16>>::add_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i16 as core::ops::arith::AddAssign>::add_assign fn add_assign(&mut self, other: $t) { *self += other }
<i16 as core::ops::arith::Div<&i16>>::div fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i16 as core::ops::arith::Div>::div fn div(self, other: $t) -> $t { self / other }
<i16 as core::ops::arith::DivAssign<&i16>>::div_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i16 as core::ops::arith::DivAssign>::div_assign fn div_assign(&mut self, other: $t) { *self /= other }
<i16 as core::ops::arith::Mul<&i16>>::mul fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i16 as core::ops::arith::Mul>::mul fn mul(self, other: $t) -> $t { self * other }
<i16 as core::ops::arith::MulAssign<&i16>>::mul_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i16 as core::ops::arith::MulAssign>::mul_assign fn mul_assign(&mut self, other: $t) { *self *= other }
<i16 as core::ops::arith::Neg>::neg fn neg(self) -> $t { -self }
<i16 as core::ops::arith::Rem<&i16>>::rem fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i16 as core::ops::arith::Rem>::rem fn rem(self, other: $t) -> $t { self % other }
<i16 as core::ops::arith::RemAssign<&i16>>::rem_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i16 as core::ops::arith::RemAssign>::rem_assign fn rem_assign(&mut self, other: $t) { *self %= other }
<i16 as core::ops::arith::Sub<&i16>>::sub fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i16 as core::ops::arith::Sub>::sub fn sub(self, other: $t) -> $t { self - other }
<i16 as core::ops::arith::SubAssign<&i16>>::sub_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i16 as core::ops::arith::SubAssign>::sub_assign fn sub_assign(&mut self, other: $t) { *self -= other }
<i16 as core::ops::bit::BitAnd<&i16>>::bitand fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i16 as core::ops::bit::BitAnd>::bitand fn bitand(self, rhs: $t) -> $t { self & rhs }
<i16 as core::ops::bit::BitAndAssign<&i16>>::bitand_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i16 as core::ops::bit::BitAndAssign>::bitand_assign fn bitand_assign(&mut self, other: $t) { *self &= other }
<i16 as core::ops::bit::BitOr<&i16>>::bitor fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i16 as core::ops::bit::BitOr>::bitor fn bitor(self, rhs: $t) -> $t { self | rhs }
<i16 as core::ops::bit::BitOrAssign<&i16>>::bitor_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i16 as core::ops::bit::BitOrAssign>::bitor_assign fn bitor_assign(&mut self, other: $t) { *self |= other }
<i16 as core::ops::bit::BitXor<&i16>>::bitxor fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i16 as core::ops::bit::BitXor>::bitxor fn bitxor(self, other: $t) -> $t { self ^ other }
<i16 as core::ops::bit::BitXorAssign<&i16>>::bitxor_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i16 as core::ops::bit::BitXorAssign>::bitxor_assign fn bitxor_assign(&mut self, other: $t) { *self ^= other }
<i16 as core::ops::bit::Not>::not fn not(self) -> $t { !self }
<i16 as core::ops::bit::Shl<&i128>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i16 as core::ops::bit::Shl<&i16>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i16 as core::ops::bit::Shl<&i32>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i16 as core::ops::bit::Shl<&i64>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i16 as core::ops::bit::Shl<&i8>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i16 as core::ops::bit::Shl<&isize>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i16 as core::ops::bit::Shl<&u128>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i16 as core::ops::bit::Shl<&u16>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i16 as core::ops::bit::Shl<&u32>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i16 as core::ops::bit::Shl<&u64>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i16 as core::ops::bit::Shl<&u8>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i16 as core::ops::bit::Shl<&usize>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i16 as core::ops::bit::Shl<i128>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<i16 as core::ops::bit::Shl<i32>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<i16 as core::ops::bit::Shl<i64>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<i16 as core::ops::bit::Shl<i8>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<i16 as core::ops::bit::Shl<isize>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<i16 as core::ops::bit::Shl<u128>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<i16 as core::ops::bit::Shl<u16>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<i16 as core::ops::bit::Shl<u32>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<i16 as core::ops::bit::Shl<u64>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<i16 as core::ops::bit::Shl<u8>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<i16 as core::ops::bit::Shl<usize>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<i16 as core::ops::bit::Shl>::shl fn shl(self, other: $f) -> $t {
self << other
}
<i16 as core::ops::bit::ShlAssign<&i128>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i16 as core::ops::bit::ShlAssign<&i16>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i16 as core::ops::bit::ShlAssign<&i32>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i16 as core::ops::bit::ShlAssign<&i64>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i16 as core::ops::bit::ShlAssign<&i8>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i16 as core::ops::bit::ShlAssign<&isize>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i16 as core::ops::bit::ShlAssign<&u128>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i16 as core::ops::bit::ShlAssign<&u16>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i16 as core::ops::bit::ShlAssign<&u32>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i16 as core::ops::bit::ShlAssign<&u64>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i16 as core::ops::bit::ShlAssign<&u8>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i16 as core::ops::bit::ShlAssign<&usize>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i16 as core::ops::bit::ShlAssign<i128>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<i16 as core::ops::bit::ShlAssign<i32>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<i16 as core::ops::bit::ShlAssign<i64>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<i16 as core::ops::bit::ShlAssign<i8>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<i16 as core::ops::bit::ShlAssign<isize>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<i16 as core::ops::bit::ShlAssign<u128>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<i16 as core::ops::bit::ShlAssign<u16>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<i16 as core::ops::bit::ShlAssign<u32>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<i16 as core::ops::bit::ShlAssign<u64>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<i16 as core::ops::bit::ShlAssign<u8>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<i16 as core::ops::bit::ShlAssign<usize>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<i16 as core::ops::bit::ShlAssign>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<i16 as core::ops::bit::Shr<&i128>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i16 as core::ops::bit::Shr<&i16>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i16 as core::ops::bit::Shr<&i32>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i16 as core::ops::bit::Shr<&i64>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i16 as core::ops::bit::Shr<&i8>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i16 as core::ops::bit::Shr<&isize>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i16 as core::ops::bit::Shr<&u128>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i16 as core::ops::bit::Shr<&u16>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i16 as core::ops::bit::Shr<&u32>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i16 as core::ops::bit::Shr<&u64>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i16 as core::ops::bit::Shr<&u8>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i16 as core::ops::bit::Shr<&usize>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i16 as core::ops::bit::Shr<i128>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<i16 as core::ops::bit::Shr<i32>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<i16 as core::ops::bit::Shr<i64>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<i16 as core::ops::bit::Shr<i8>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<i16 as core::ops::bit::Shr<isize>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<i16 as core::ops::bit::Shr<u128>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<i16 as core::ops::bit::Shr<u16>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<i16 as core::ops::bit::Shr<u32>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<i16 as core::ops::bit::Shr<u64>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<i16 as core::ops::bit::Shr<u8>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<i16 as core::ops::bit::Shr<usize>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<i16 as core::ops::bit::Shr>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<i16 as core::ops::bit::ShrAssign<&i128>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i16 as core::ops::bit::ShrAssign<&i16>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i16 as core::ops::bit::ShrAssign<&i32>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i16 as core::ops::bit::ShrAssign<&i64>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i16 as core::ops::bit::ShrAssign<&i8>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i16 as core::ops::bit::ShrAssign<&isize>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i16 as core::ops::bit::ShrAssign<&u128>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i16 as core::ops::bit::ShrAssign<&u16>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i16 as core::ops::bit::ShrAssign<&u32>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i16 as core::ops::bit::ShrAssign<&u64>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i16 as core::ops::bit::ShrAssign<&u8>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i16 as core::ops::bit::ShrAssign<&usize>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i16 as core::ops::bit::ShrAssign<i128>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<i16 as core::ops::bit::ShrAssign<i32>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<i16 as core::ops::bit::ShrAssign<i64>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<i16 as core::ops::bit::ShrAssign<i8>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<i16 as core::ops::bit::ShrAssign<isize>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<i16 as core::ops::bit::ShrAssign<u128>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<i16 as core::ops::bit::ShrAssign<u16>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<i16 as core::ops::bit::ShrAssign<u32>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<i16 as core::ops::bit::ShrAssign<u64>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<i16 as core::ops::bit::ShrAssign<u8>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<i16 as core::ops::bit::ShrAssign<usize>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<i16 as core::ops::bit::ShrAssign>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<i32 as core::default::Default>::default fn default() -> $t {
$v
}
<i32 as core::ops::arith::Add<&i32>>::add fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i32 as core::ops::arith::Add>::add fn add(self, other: $t) -> $t { self + other }
<i32 as core::ops::arith::AddAssign<&i32>>::add_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i32 as core::ops::arith::AddAssign>::add_assign fn add_assign(&mut self, other: $t) { *self += other }
<i32 as core::ops::arith::Div<&i32>>::div fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i32 as core::ops::arith::Div>::div fn div(self, other: $t) -> $t { self / other }
<i32 as core::ops::arith::DivAssign<&i32>>::div_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i32 as core::ops::arith::DivAssign>::div_assign fn div_assign(&mut self, other: $t) { *self /= other }
<i32 as core::ops::arith::Mul<&i32>>::mul fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i32 as core::ops::arith::Mul>::mul fn mul(self, other: $t) -> $t { self * other }
<i32 as core::ops::arith::MulAssign<&i32>>::mul_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i32 as core::ops::arith::MulAssign>::mul_assign fn mul_assign(&mut self, other: $t) { *self *= other }
<i32 as core::ops::arith::Neg>::neg fn neg(self) -> $t { -self }
<i32 as core::ops::arith::Rem<&i32>>::rem fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i32 as core::ops::arith::Rem>::rem fn rem(self, other: $t) -> $t { self % other }
<i32 as core::ops::arith::RemAssign<&i32>>::rem_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i32 as core::ops::arith::RemAssign>::rem_assign fn rem_assign(&mut self, other: $t) { *self %= other }
<i32 as core::ops::arith::Sub<&i32>>::sub fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i32 as core::ops::arith::Sub>::sub fn sub(self, other: $t) -> $t { self - other }
<i32 as core::ops::arith::SubAssign<&i32>>::sub_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i32 as core::ops::arith::SubAssign>::sub_assign fn sub_assign(&mut self, other: $t) { *self -= other }
<i32 as core::ops::bit::BitAnd<&i32>>::bitand fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i32 as core::ops::bit::BitAnd>::bitand fn bitand(self, rhs: $t) -> $t { self & rhs }
<i32 as core::ops::bit::BitAndAssign<&i32>>::bitand_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i32 as core::ops::bit::BitAndAssign>::bitand_assign fn bitand_assign(&mut self, other: $t) { *self &= other }
<i32 as core::ops::bit::BitOr<&i32>>::bitor fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i32 as core::ops::bit::BitOr>::bitor fn bitor(self, rhs: $t) -> $t { self | rhs }
<i32 as core::ops::bit::BitOrAssign<&i32>>::bitor_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i32 as core::ops::bit::BitOrAssign>::bitor_assign fn bitor_assign(&mut self, other: $t) { *self |= other }
<i32 as core::ops::bit::BitXor<&i32>>::bitxor fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i32 as core::ops::bit::BitXor>::bitxor fn bitxor(self, other: $t) -> $t { self ^ other }
<i32 as core::ops::bit::BitXorAssign<&i32>>::bitxor_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i32 as core::ops::bit::BitXorAssign>::bitxor_assign fn bitxor_assign(&mut self, other: $t) { *self ^= other }
<i32 as core::ops::bit::Not>::not fn not(self) -> $t { !self }
<i32 as core::ops::bit::Shl<&i128>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i32 as core::ops::bit::Shl<&i16>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i32 as core::ops::bit::Shl<&i32>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i32 as core::ops::bit::Shl<&i64>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i32 as core::ops::bit::Shl<&i8>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i32 as core::ops::bit::Shl<&isize>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i32 as core::ops::bit::Shl<&u128>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i32 as core::ops::bit::Shl<&u16>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i32 as core::ops::bit::Shl<&u32>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i32 as core::ops::bit::Shl<&u64>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i32 as core::ops::bit::Shl<&u8>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i32 as core::ops::bit::Shl<&usize>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i32 as core::ops::bit::Shl<i128>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<i32 as core::ops::bit::Shl<i16>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<i32 as core::ops::bit::Shl<i64>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<i32 as core::ops::bit::Shl<i8>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<i32 as core::ops::bit::Shl<isize>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<i32 as core::ops::bit::Shl<u128>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<i32 as core::ops::bit::Shl<u16>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<i32 as core::ops::bit::Shl<u32>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<i32 as core::ops::bit::Shl<u64>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<i32 as core::ops::bit::Shl<u8>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<i32 as core::ops::bit::Shl<usize>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<i32 as core::ops::bit::Shl>::shl fn shl(self, other: $f) -> $t {
self << other
}
<i32 as core::ops::bit::ShlAssign<&i128>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i32 as core::ops::bit::ShlAssign<&i16>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i32 as core::ops::bit::ShlAssign<&i32>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i32 as core::ops::bit::ShlAssign<&i64>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i32 as core::ops::bit::ShlAssign<&i8>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i32 as core::ops::bit::ShlAssign<&isize>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i32 as core::ops::bit::ShlAssign<&u128>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i32 as core::ops::bit::ShlAssign<&u16>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i32 as core::ops::bit::ShlAssign<&u32>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i32 as core::ops::bit::ShlAssign<&u64>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i32 as core::ops::bit::ShlAssign<&u8>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i32 as core::ops::bit::ShlAssign<&usize>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i32 as core::ops::bit::ShlAssign<i128>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<i32 as core::ops::bit::ShlAssign<i16>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<i32 as core::ops::bit::ShlAssign<i64>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<i32 as core::ops::bit::ShlAssign<i8>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<i32 as core::ops::bit::ShlAssign<isize>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<i32 as core::ops::bit::ShlAssign<u128>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<i32 as core::ops::bit::ShlAssign<u16>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<i32 as core::ops::bit::ShlAssign<u32>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<i32 as core::ops::bit::ShlAssign<u64>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<i32 as core::ops::bit::ShlAssign<u8>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<i32 as core::ops::bit::ShlAssign<usize>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<i32 as core::ops::bit::ShlAssign>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<i32 as core::ops::bit::Shr<&i128>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i32 as core::ops::bit::Shr<&i16>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i32 as core::ops::bit::Shr<&i32>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i32 as core::ops::bit::Shr<&i64>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i32 as core::ops::bit::Shr<&i8>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i32 as core::ops::bit::Shr<&isize>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i32 as core::ops::bit::Shr<&u128>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i32 as core::ops::bit::Shr<&u16>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i32 as core::ops::bit::Shr<&u32>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i32 as core::ops::bit::Shr<&u64>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i32 as core::ops::bit::Shr<&u8>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i32 as core::ops::bit::Shr<&usize>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i32 as core::ops::bit::Shr<i128>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<i32 as core::ops::bit::Shr<i16>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<i32 as core::ops::bit::Shr<i64>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<i32 as core::ops::bit::Shr<i8>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<i32 as core::ops::bit::Shr<isize>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<i32 as core::ops::bit::Shr<u128>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<i32 as core::ops::bit::Shr<u16>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<i32 as core::ops::bit::Shr<u32>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<i32 as core::ops::bit::Shr<u64>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<i32 as core::ops::bit::Shr<u8>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<i32 as core::ops::bit::Shr<usize>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<i32 as core::ops::bit::Shr>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<i32 as core::ops::bit::ShrAssign<&i128>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i32 as core::ops::bit::ShrAssign<&i16>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i32 as core::ops::bit::ShrAssign<&i32>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i32 as core::ops::bit::ShrAssign<&i64>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i32 as core::ops::bit::ShrAssign<&i8>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i32 as core::ops::bit::ShrAssign<&isize>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i32 as core::ops::bit::ShrAssign<&u128>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i32 as core::ops::bit::ShrAssign<&u16>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i32 as core::ops::bit::ShrAssign<&u32>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i32 as core::ops::bit::ShrAssign<&u64>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i32 as core::ops::bit::ShrAssign<&u8>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i32 as core::ops::bit::ShrAssign<&usize>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i32 as core::ops::bit::ShrAssign<i128>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<i32 as core::ops::bit::ShrAssign<i16>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<i32 as core::ops::bit::ShrAssign<i64>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<i32 as core::ops::bit::ShrAssign<i8>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<i32 as core::ops::bit::ShrAssign<isize>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<i32 as core::ops::bit::ShrAssign<u128>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<i32 as core::ops::bit::ShrAssign<u16>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<i32 as core::ops::bit::ShrAssign<u32>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<i32 as core::ops::bit::ShrAssign<u64>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<i32 as core::ops::bit::ShrAssign<u8>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<i32 as core::ops::bit::ShrAssign<usize>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<i32 as core::ops::bit::ShrAssign>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<i64 as core::default::Default>::default fn default() -> $t {
$v
}
<i64 as core::ops::arith::Add<&i64>>::add fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i64 as core::ops::arith::Add>::add fn add(self, other: $t) -> $t { self + other }
<i64 as core::ops::arith::AddAssign<&i64>>::add_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i64 as core::ops::arith::AddAssign>::add_assign fn add_assign(&mut self, other: $t) { *self += other }
<i64 as core::ops::arith::Div<&i64>>::div fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i64 as core::ops::arith::Div>::div fn div(self, other: $t) -> $t { self / other }
<i64 as core::ops::arith::DivAssign<&i64>>::div_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i64 as core::ops::arith::DivAssign>::div_assign fn div_assign(&mut self, other: $t) { *self /= other }
<i64 as core::ops::arith::Mul<&i64>>::mul fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i64 as core::ops::arith::Mul>::mul fn mul(self, other: $t) -> $t { self * other }
<i64 as core::ops::arith::MulAssign<&i64>>::mul_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i64 as core::ops::arith::MulAssign>::mul_assign fn mul_assign(&mut self, other: $t) { *self *= other }
<i64 as core::ops::arith::Neg>::neg fn neg(self) -> $t { -self }
<i64 as core::ops::arith::Rem<&i64>>::rem fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i64 as core::ops::arith::Rem>::rem fn rem(self, other: $t) -> $t { self % other }
<i64 as core::ops::arith::RemAssign<&i64>>::rem_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i64 as core::ops::arith::RemAssign>::rem_assign fn rem_assign(&mut self, other: $t) { *self %= other }
<i64 as core::ops::arith::Sub<&i64>>::sub fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i64 as core::ops::arith::Sub>::sub fn sub(self, other: $t) -> $t { self - other }
<i64 as core::ops::arith::SubAssign<&i64>>::sub_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i64 as core::ops::arith::SubAssign>::sub_assign fn sub_assign(&mut self, other: $t) { *self -= other }
<i64 as core::ops::bit::BitAnd<&i64>>::bitand fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i64 as core::ops::bit::BitAnd>::bitand fn bitand(self, rhs: $t) -> $t { self & rhs }
<i64 as core::ops::bit::BitAndAssign<&i64>>::bitand_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i64 as core::ops::bit::BitAndAssign>::bitand_assign fn bitand_assign(&mut self, other: $t) { *self &= other }
<i64 as core::ops::bit::BitOr<&i64>>::bitor fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i64 as core::ops::bit::BitOr>::bitor fn bitor(self, rhs: $t) -> $t { self | rhs }
<i64 as core::ops::bit::BitOrAssign<&i64>>::bitor_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i64 as core::ops::bit::BitOrAssign>::bitor_assign fn bitor_assign(&mut self, other: $t) { *self |= other }
<i64 as core::ops::bit::BitXor<&i64>>::bitxor fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i64 as core::ops::bit::BitXor>::bitxor fn bitxor(self, other: $t) -> $t { self ^ other }
<i64 as core::ops::bit::BitXorAssign<&i64>>::bitxor_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i64 as core::ops::bit::BitXorAssign>::bitxor_assign fn bitxor_assign(&mut self, other: $t) { *self ^= other }
<i64 as core::ops::bit::Not>::not fn not(self) -> $t { !self }
<i64 as core::ops::bit::Shl<&i128>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i64 as core::ops::bit::Shl<&i16>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i64 as core::ops::bit::Shl<&i32>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i64 as core::ops::bit::Shl<&i64>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i64 as core::ops::bit::Shl<&i8>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i64 as core::ops::bit::Shl<&isize>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i64 as core::ops::bit::Shl<&u128>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i64 as core::ops::bit::Shl<&u16>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i64 as core::ops::bit::Shl<&u32>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i64 as core::ops::bit::Shl<&u64>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i64 as core::ops::bit::Shl<&u8>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i64 as core::ops::bit::Shl<&usize>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i64 as core::ops::bit::Shl<i128>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<i64 as core::ops::bit::Shl<i16>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<i64 as core::ops::bit::Shl<i32>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<i64 as core::ops::bit::Shl<i8>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<i64 as core::ops::bit::Shl<isize>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<i64 as core::ops::bit::Shl<u128>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<i64 as core::ops::bit::Shl<u16>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<i64 as core::ops::bit::Shl<u32>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<i64 as core::ops::bit::Shl<u64>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<i64 as core::ops::bit::Shl<u8>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<i64 as core::ops::bit::Shl<usize>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<i64 as core::ops::bit::Shl>::shl fn shl(self, other: $f) -> $t {
self << other
}
<i64 as core::ops::bit::ShlAssign<&i128>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i64 as core::ops::bit::ShlAssign<&i16>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i64 as core::ops::bit::ShlAssign<&i32>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i64 as core::ops::bit::ShlAssign<&i64>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i64 as core::ops::bit::ShlAssign<&i8>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i64 as core::ops::bit::ShlAssign<&isize>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i64 as core::ops::bit::ShlAssign<&u128>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i64 as core::ops::bit::ShlAssign<&u16>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i64 as core::ops::bit::ShlAssign<&u32>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i64 as core::ops::bit::ShlAssign<&u64>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i64 as core::ops::bit::ShlAssign<&u8>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i64 as core::ops::bit::ShlAssign<&usize>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i64 as core::ops::bit::ShlAssign<i128>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<i64 as core::ops::bit::ShlAssign<i16>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<i64 as core::ops::bit::ShlAssign<i32>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<i64 as core::ops::bit::ShlAssign<i8>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<i64 as core::ops::bit::ShlAssign<isize>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<i64 as core::ops::bit::ShlAssign<u128>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<i64 as core::ops::bit::ShlAssign<u16>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<i64 as core::ops::bit::ShlAssign<u32>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<i64 as core::ops::bit::ShlAssign<u64>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<i64 as core::ops::bit::ShlAssign<u8>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<i64 as core::ops::bit::ShlAssign<usize>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<i64 as core::ops::bit::ShlAssign>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<i64 as core::ops::bit::Shr<&i128>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i64 as core::ops::bit::Shr<&i16>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i64 as core::ops::bit::Shr<&i32>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i64 as core::ops::bit::Shr<&i64>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i64 as core::ops::bit::Shr<&i8>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i64 as core::ops::bit::Shr<&isize>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i64 as core::ops::bit::Shr<&u128>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i64 as core::ops::bit::Shr<&u16>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i64 as core::ops::bit::Shr<&u32>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i64 as core::ops::bit::Shr<&u64>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i64 as core::ops::bit::Shr<&u8>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i64 as core::ops::bit::Shr<&usize>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i64 as core::ops::bit::Shr<i128>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<i64 as core::ops::bit::Shr<i16>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<i64 as core::ops::bit::Shr<i32>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<i64 as core::ops::bit::Shr<i8>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<i64 as core::ops::bit::Shr<isize>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<i64 as core::ops::bit::Shr<u128>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<i64 as core::ops::bit::Shr<u16>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<i64 as core::ops::bit::Shr<u32>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<i64 as core::ops::bit::Shr<u64>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<i64 as core::ops::bit::Shr<u8>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<i64 as core::ops::bit::Shr<usize>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<i64 as core::ops::bit::Shr>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<i64 as core::ops::bit::ShrAssign<&i128>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i64 as core::ops::bit::ShrAssign<&i16>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i64 as core::ops::bit::ShrAssign<&i32>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i64 as core::ops::bit::ShrAssign<&i64>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i64 as core::ops::bit::ShrAssign<&i8>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i64 as core::ops::bit::ShrAssign<&isize>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i64 as core::ops::bit::ShrAssign<&u128>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i64 as core::ops::bit::ShrAssign<&u16>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i64 as core::ops::bit::ShrAssign<&u32>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i64 as core::ops::bit::ShrAssign<&u64>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i64 as core::ops::bit::ShrAssign<&u8>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i64 as core::ops::bit::ShrAssign<&usize>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i64 as core::ops::bit::ShrAssign<i128>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<i64 as core::ops::bit::ShrAssign<i16>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<i64 as core::ops::bit::ShrAssign<i32>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<i64 as core::ops::bit::ShrAssign<i8>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<i64 as core::ops::bit::ShrAssign<isize>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<i64 as core::ops::bit::ShrAssign<u128>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<i64 as core::ops::bit::ShrAssign<u16>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<i64 as core::ops::bit::ShrAssign<u32>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<i64 as core::ops::bit::ShrAssign<u64>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<i64 as core::ops::bit::ShrAssign<u8>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<i64 as core::ops::bit::ShrAssign<usize>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<i64 as core::ops::bit::ShrAssign>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<i8 as core::default::Default>::default fn default() -> $t {
$v
}
<i8 as core::ops::arith::Add<&i8>>::add fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i8 as core::ops::arith::Add>::add fn add(self, other: $t) -> $t { self + other }
<i8 as core::ops::arith::AddAssign<&i8>>::add_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i8 as core::ops::arith::AddAssign>::add_assign fn add_assign(&mut self, other: $t) { *self += other }
<i8 as core::ops::arith::Div<&i8>>::div fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i8 as core::ops::arith::Div>::div fn div(self, other: $t) -> $t { self / other }
<i8 as core::ops::arith::DivAssign<&i8>>::div_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i8 as core::ops::arith::DivAssign>::div_assign fn div_assign(&mut self, other: $t) { *self /= other }
<i8 as core::ops::arith::Mul<&i8>>::mul fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i8 as core::ops::arith::Mul>::mul fn mul(self, other: $t) -> $t { self * other }
<i8 as core::ops::arith::MulAssign<&i8>>::mul_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i8 as core::ops::arith::MulAssign>::mul_assign fn mul_assign(&mut self, other: $t) { *self *= other }
<i8 as core::ops::arith::Neg>::neg fn neg(self) -> $t { -self }
<i8 as core::ops::arith::Rem<&i8>>::rem fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i8 as core::ops::arith::Rem>::rem fn rem(self, other: $t) -> $t { self % other }
<i8 as core::ops::arith::RemAssign<&i8>>::rem_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i8 as core::ops::arith::RemAssign>::rem_assign fn rem_assign(&mut self, other: $t) { *self %= other }
<i8 as core::ops::arith::Sub<&i8>>::sub fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i8 as core::ops::arith::Sub>::sub fn sub(self, other: $t) -> $t { self - other }
<i8 as core::ops::arith::SubAssign<&i8>>::sub_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i8 as core::ops::arith::SubAssign>::sub_assign fn sub_assign(&mut self, other: $t) { *self -= other }
<i8 as core::ops::bit::BitAnd<&i8>>::bitand fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i8 as core::ops::bit::BitAnd>::bitand fn bitand(self, rhs: $t) -> $t { self & rhs }
<i8 as core::ops::bit::BitAndAssign<&i8>>::bitand_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i8 as core::ops::bit::BitAndAssign>::bitand_assign fn bitand_assign(&mut self, other: $t) { *self &= other }
<i8 as core::ops::bit::BitOr<&i8>>::bitor fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i8 as core::ops::bit::BitOr>::bitor fn bitor(self, rhs: $t) -> $t { self | rhs }
<i8 as core::ops::bit::BitOrAssign<&i8>>::bitor_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i8 as core::ops::bit::BitOrAssign>::bitor_assign fn bitor_assign(&mut self, other: $t) { *self |= other }
<i8 as core::ops::bit::BitXor<&i8>>::bitxor fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i8 as core::ops::bit::BitXor>::bitxor fn bitxor(self, other: $t) -> $t { self ^ other }
<i8 as core::ops::bit::BitXorAssign<&i8>>::bitxor_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i8 as core::ops::bit::BitXorAssign>::bitxor_assign fn bitxor_assign(&mut self, other: $t) { *self ^= other }
<i8 as core::ops::bit::Not>::not fn not(self) -> $t { !self }
<i8 as core::ops::bit::Shl<&i128>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i8 as core::ops::bit::Shl<&i16>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i8 as core::ops::bit::Shl<&i32>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i8 as core::ops::bit::Shl<&i64>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i8 as core::ops::bit::Shl<&i8>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i8 as core::ops::bit::Shl<&isize>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i8 as core::ops::bit::Shl<&u128>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i8 as core::ops::bit::Shl<&u16>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i8 as core::ops::bit::Shl<&u32>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i8 as core::ops::bit::Shl<&u64>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i8 as core::ops::bit::Shl<&u8>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i8 as core::ops::bit::Shl<&usize>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i8 as core::ops::bit::Shl<i128>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<i8 as core::ops::bit::Shl<i16>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<i8 as core::ops::bit::Shl<i32>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<i8 as core::ops::bit::Shl<i64>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<i8 as core::ops::bit::Shl<isize>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<i8 as core::ops::bit::Shl<u128>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<i8 as core::ops::bit::Shl<u16>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<i8 as core::ops::bit::Shl<u32>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<i8 as core::ops::bit::Shl<u64>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<i8 as core::ops::bit::Shl<u8>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<i8 as core::ops::bit::Shl<usize>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<i8 as core::ops::bit::Shl>::shl fn shl(self, other: $f) -> $t {
self << other
}
<i8 as core::ops::bit::ShlAssign<&i128>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i8 as core::ops::bit::ShlAssign<&i16>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i8 as core::ops::bit::ShlAssign<&i32>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i8 as core::ops::bit::ShlAssign<&i64>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i8 as core::ops::bit::ShlAssign<&i8>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i8 as core::ops::bit::ShlAssign<&isize>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i8 as core::ops::bit::ShlAssign<&u128>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i8 as core::ops::bit::ShlAssign<&u16>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i8 as core::ops::bit::ShlAssign<&u32>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i8 as core::ops::bit::ShlAssign<&u64>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i8 as core::ops::bit::ShlAssign<&u8>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i8 as core::ops::bit::ShlAssign<&usize>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i8 as core::ops::bit::ShlAssign<i128>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<i8 as core::ops::bit::ShlAssign<i16>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<i8 as core::ops::bit::ShlAssign<i32>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<i8 as core::ops::bit::ShlAssign<i64>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<i8 as core::ops::bit::ShlAssign<isize>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<i8 as core::ops::bit::ShlAssign<u128>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<i8 as core::ops::bit::ShlAssign<u16>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<i8 as core::ops::bit::ShlAssign<u32>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<i8 as core::ops::bit::ShlAssign<u64>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<i8 as core::ops::bit::ShlAssign<u8>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<i8 as core::ops::bit::ShlAssign<usize>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<i8 as core::ops::bit::ShlAssign>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<i8 as core::ops::bit::Shr<&i128>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i8 as core::ops::bit::Shr<&i16>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i8 as core::ops::bit::Shr<&i32>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i8 as core::ops::bit::Shr<&i64>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i8 as core::ops::bit::Shr<&i8>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i8 as core::ops::bit::Shr<&isize>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i8 as core::ops::bit::Shr<&u128>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i8 as core::ops::bit::Shr<&u16>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i8 as core::ops::bit::Shr<&u32>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i8 as core::ops::bit::Shr<&u64>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i8 as core::ops::bit::Shr<&u8>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i8 as core::ops::bit::Shr<&usize>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<i8 as core::ops::bit::Shr<i128>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<i8 as core::ops::bit::Shr<i16>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<i8 as core::ops::bit::Shr<i32>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<i8 as core::ops::bit::Shr<i64>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<i8 as core::ops::bit::Shr<isize>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<i8 as core::ops::bit::Shr<u128>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<i8 as core::ops::bit::Shr<u16>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<i8 as core::ops::bit::Shr<u32>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<i8 as core::ops::bit::Shr<u64>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<i8 as core::ops::bit::Shr<u8>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<i8 as core::ops::bit::Shr<usize>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<i8 as core::ops::bit::Shr>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<i8 as core::ops::bit::ShrAssign<&i128>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i8 as core::ops::bit::ShrAssign<&i16>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i8 as core::ops::bit::ShrAssign<&i32>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i8 as core::ops::bit::ShrAssign<&i64>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i8 as core::ops::bit::ShrAssign<&i8>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i8 as core::ops::bit::ShrAssign<&isize>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i8 as core::ops::bit::ShrAssign<&u128>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i8 as core::ops::bit::ShrAssign<&u16>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i8 as core::ops::bit::ShrAssign<&u32>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i8 as core::ops::bit::ShrAssign<&u64>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i8 as core::ops::bit::ShrAssign<&u8>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i8 as core::ops::bit::ShrAssign<&usize>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<i8 as core::ops::bit::ShrAssign<i128>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<i8 as core::ops::bit::ShrAssign<i16>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<i8 as core::ops::bit::ShrAssign<i32>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<i8 as core::ops::bit::ShrAssign<i64>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<i8 as core::ops::bit::ShrAssign<isize>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<i8 as core::ops::bit::ShrAssign<u128>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<i8 as core::ops::bit::ShrAssign<u16>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<i8 as core::ops::bit::ShrAssign<u32>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<i8 as core::ops::bit::ShrAssign<u64>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<i8 as core::ops::bit::ShrAssign<u8>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<i8 as core::ops::bit::ShrAssign<usize>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<i8 as core::ops::bit::ShrAssign>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<isize as core::default::Default>::default fn default() -> $t {
$v
}
<isize as core::ops::arith::Add<&isize>>::add fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<isize as core::ops::arith::Add>::add fn add(self, other: $t) -> $t { self + other }
<isize as core::ops::arith::AddAssign<&isize>>::add_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<isize as core::ops::arith::AddAssign>::add_assign fn add_assign(&mut self, other: $t) { *self += other }
<isize as core::ops::arith::Div<&isize>>::div fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<isize as core::ops::arith::Div>::div fn div(self, other: $t) -> $t { self / other }
<isize as core::ops::arith::DivAssign<&isize>>::div_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<isize as core::ops::arith::DivAssign>::div_assign fn div_assign(&mut self, other: $t) { *self /= other }
<isize as core::ops::arith::Mul<&isize>>::mul fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<isize as core::ops::arith::Mul>::mul fn mul(self, other: $t) -> $t { self * other }
<isize as core::ops::arith::MulAssign<&isize>>::mul_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<isize as core::ops::arith::MulAssign>::mul_assign fn mul_assign(&mut self, other: $t) { *self *= other }
<isize as core::ops::arith::Neg>::neg fn neg(self) -> $t { -self }
<isize as core::ops::arith::Rem<&isize>>::rem fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<isize as core::ops::arith::Rem>::rem fn rem(self, other: $t) -> $t { self % other }
<isize as core::ops::arith::RemAssign<&isize>>::rem_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<isize as core::ops::arith::RemAssign>::rem_assign fn rem_assign(&mut self, other: $t) { *self %= other }
<isize as core::ops::arith::Sub<&isize>>::sub fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<isize as core::ops::arith::Sub>::sub fn sub(self, other: $t) -> $t { self - other }
<isize as core::ops::arith::SubAssign<&isize>>::sub_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<isize as core::ops::arith::SubAssign>::sub_assign fn sub_assign(&mut self, other: $t) { *self -= other }
<isize as core::ops::bit::BitAnd<&isize>>::bitand fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<isize as core::ops::bit::BitAnd>::bitand fn bitand(self, rhs: $t) -> $t { self & rhs }
<isize as core::ops::bit::BitAndAssign<&isize>>::bitand_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<isize as core::ops::bit::BitAndAssign>::bitand_assign fn bitand_assign(&mut self, other: $t) { *self &= other }
<isize as core::ops::bit::BitOr<&isize>>::bitor fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<isize as core::ops::bit::BitOr>::bitor fn bitor(self, rhs: $t) -> $t { self | rhs }
<isize as core::ops::bit::BitOrAssign<&isize>>::bitor_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<isize as core::ops::bit::BitOrAssign>::bitor_assign fn bitor_assign(&mut self, other: $t) { *self |= other }
<isize as core::ops::bit::BitXor<&isize>>::bitxor fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<isize as core::ops::bit::BitXor>::bitxor fn bitxor(self, other: $t) -> $t { self ^ other }
<isize as core::ops::bit::BitXorAssign<&isize>>::bitxor_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<isize as core::ops::bit::BitXorAssign>::bitxor_assign fn bitxor_assign(&mut self, other: $t) { *self ^= other }
<isize as core::ops::bit::Not>::not fn not(self) -> $t { !self }
<isize as core::ops::bit::Shl<&i128>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<isize as core::ops::bit::Shl<&i16>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<isize as core::ops::bit::Shl<&i32>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<isize as core::ops::bit::Shl<&i64>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<isize as core::ops::bit::Shl<&i8>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<isize as core::ops::bit::Shl<&isize>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<isize as core::ops::bit::Shl<&u128>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<isize as core::ops::bit::Shl<&u16>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<isize as core::ops::bit::Shl<&u32>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<isize as core::ops::bit::Shl<&u64>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<isize as core::ops::bit::Shl<&u8>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<isize as core::ops::bit::Shl<&usize>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<isize as core::ops::bit::Shl<i128>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<isize as core::ops::bit::Shl<i16>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<isize as core::ops::bit::Shl<i32>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<isize as core::ops::bit::Shl<i64>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<isize as core::ops::bit::Shl<i8>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<isize as core::ops::bit::Shl<u128>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<isize as core::ops::bit::Shl<u16>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<isize as core::ops::bit::Shl<u32>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<isize as core::ops::bit::Shl<u64>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<isize as core::ops::bit::Shl<u8>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<isize as core::ops::bit::Shl<usize>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<isize as core::ops::bit::Shl>::shl fn shl(self, other: $f) -> $t {
self << other
}
<isize as core::ops::bit::ShlAssign<&i128>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<isize as core::ops::bit::ShlAssign<&i16>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<isize as core::ops::bit::ShlAssign<&i32>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<isize as core::ops::bit::ShlAssign<&i64>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<isize as core::ops::bit::ShlAssign<&i8>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<isize as core::ops::bit::ShlAssign<&isize>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<isize as core::ops::bit::ShlAssign<&u128>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<isize as core::ops::bit::ShlAssign<&u16>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<isize as core::ops::bit::ShlAssign<&u32>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<isize as core::ops::bit::ShlAssign<&u64>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<isize as core::ops::bit::ShlAssign<&u8>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<isize as core::ops::bit::ShlAssign<&usize>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<isize as core::ops::bit::ShlAssign<i128>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<isize as core::ops::bit::ShlAssign<i16>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<isize as core::ops::bit::ShlAssign<i32>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<isize as core::ops::bit::ShlAssign<i64>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<isize as core::ops::bit::ShlAssign<i8>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<isize as core::ops::bit::ShlAssign<u128>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<isize as core::ops::bit::ShlAssign<u16>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<isize as core::ops::bit::ShlAssign<u32>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<isize as core::ops::bit::ShlAssign<u64>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<isize as core::ops::bit::ShlAssign<u8>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<isize as core::ops::bit::ShlAssign<usize>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<isize as core::ops::bit::ShlAssign>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<isize as core::ops::bit::Shr<&i128>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<isize as core::ops::bit::Shr<&i16>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<isize as core::ops::bit::Shr<&i32>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<isize as core::ops::bit::Shr<&i64>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<isize as core::ops::bit::Shr<&i8>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<isize as core::ops::bit::Shr<&isize>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<isize as core::ops::bit::Shr<&u128>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<isize as core::ops::bit::Shr<&u16>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<isize as core::ops::bit::Shr<&u32>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<isize as core::ops::bit::Shr<&u64>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<isize as core::ops::bit::Shr<&u8>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<isize as core::ops::bit::Shr<&usize>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<isize as core::ops::bit::Shr<i128>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<isize as core::ops::bit::Shr<i16>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<isize as core::ops::bit::Shr<i32>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<isize as core::ops::bit::Shr<i64>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<isize as core::ops::bit::Shr<i8>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<isize as core::ops::bit::Shr<u128>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<isize as core::ops::bit::Shr<u16>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<isize as core::ops::bit::Shr<u32>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<isize as core::ops::bit::Shr<u64>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<isize as core::ops::bit::Shr<u8>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<isize as core::ops::bit::Shr<usize>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<isize as core::ops::bit::Shr>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<isize as core::ops::bit::ShrAssign<&i128>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<isize as core::ops::bit::ShrAssign<&i16>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<isize as core::ops::bit::ShrAssign<&i32>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<isize as core::ops::bit::ShrAssign<&i64>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<isize as core::ops::bit::ShrAssign<&i8>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<isize as core::ops::bit::ShrAssign<&isize>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<isize as core::ops::bit::ShrAssign<&u128>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<isize as core::ops::bit::ShrAssign<&u16>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<isize as core::ops::bit::ShrAssign<&u32>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<isize as core::ops::bit::ShrAssign<&u64>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<isize as core::ops::bit::ShrAssign<&u8>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<isize as core::ops::bit::ShrAssign<&usize>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<isize as core::ops::bit::ShrAssign<i128>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<isize as core::ops::bit::ShrAssign<i16>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<isize as core::ops::bit::ShrAssign<i32>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<isize as core::ops::bit::ShrAssign<i64>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<isize as core::ops::bit::ShrAssign<i8>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<isize as core::ops::bit::ShrAssign<u128>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<isize as core::ops::bit::ShrAssign<u16>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<isize as core::ops::bit::ShrAssign<u32>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<isize as core::ops::bit::ShrAssign<u64>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<isize as core::ops::bit::ShrAssign<u8>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<isize as core::ops::bit::ShrAssign<usize>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<isize as core::ops::bit::ShrAssign>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<str as core::convert::AsMut<str>>::as_mut fn as_mut(&mut self) -> &mut str {
self
}
<u128 as core::default::Default>::default fn default() -> $t {
$v
}
<u128 as core::ops::arith::Add<&u128>>::add fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u128 as core::ops::arith::Add>::add fn add(self, other: $t) -> $t { self + other }
<u128 as core::ops::arith::AddAssign<&u128>>::add_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u128 as core::ops::arith::AddAssign>::add_assign fn add_assign(&mut self, other: $t) { *self += other }
<u128 as core::ops::arith::Div<&u128>>::div fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u128 as core::ops::arith::Div>::div fn div(self, other: $t) -> $t { self / other }
<u128 as core::ops::arith::DivAssign<&u128>>::div_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u128 as core::ops::arith::DivAssign>::div_assign fn div_assign(&mut self, other: $t) { *self /= other }
<u128 as core::ops::arith::Mul<&u128>>::mul fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u128 as core::ops::arith::Mul>::mul fn mul(self, other: $t) -> $t { self * other }
<u128 as core::ops::arith::MulAssign<&u128>>::mul_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u128 as core::ops::arith::MulAssign>::mul_assign fn mul_assign(&mut self, other: $t) { *self *= other }
<u128 as core::ops::arith::Rem<&u128>>::rem fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u128 as core::ops::arith::Rem>::rem fn rem(self, other: $t) -> $t { self % other }
<u128 as core::ops::arith::RemAssign<&u128>>::rem_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u128 as core::ops::arith::RemAssign>::rem_assign fn rem_assign(&mut self, other: $t) { *self %= other }
<u128 as core::ops::arith::Sub<&u128>>::sub fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u128 as core::ops::arith::Sub>::sub fn sub(self, other: $t) -> $t { self - other }
<u128 as core::ops::arith::SubAssign<&u128>>::sub_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u128 as core::ops::arith::SubAssign>::sub_assign fn sub_assign(&mut self, other: $t) { *self -= other }
<u128 as core::ops::bit::BitAnd<&u128>>::bitand fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u128 as core::ops::bit::BitAnd>::bitand fn bitand(self, rhs: $t) -> $t { self & rhs }
<u128 as core::ops::bit::BitAndAssign<&u128>>::bitand_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u128 as core::ops::bit::BitAndAssign>::bitand_assign fn bitand_assign(&mut self, other: $t) { *self &= other }
<u128 as core::ops::bit::BitOr<&u128>>::bitor fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u128 as core::ops::bit::BitOr>::bitor fn bitor(self, rhs: $t) -> $t { self | rhs }
<u128 as core::ops::bit::BitOrAssign<&u128>>::bitor_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u128 as core::ops::bit::BitOrAssign>::bitor_assign fn bitor_assign(&mut self, other: $t) { *self |= other }
<u128 as core::ops::bit::BitXor<&u128>>::bitxor fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u128 as core::ops::bit::BitXor>::bitxor fn bitxor(self, other: $t) -> $t { self ^ other }
<u128 as core::ops::bit::BitXorAssign<&u128>>::bitxor_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u128 as core::ops::bit::BitXorAssign>::bitxor_assign fn bitxor_assign(&mut self, other: $t) { *self ^= other }
<u128 as core::ops::bit::Not>::not fn not(self) -> $t { !self }
<u128 as core::ops::bit::Shl<&i128>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u128 as core::ops::bit::Shl<&i16>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u128 as core::ops::bit::Shl<&i32>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u128 as core::ops::bit::Shl<&i64>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u128 as core::ops::bit::Shl<&i8>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u128 as core::ops::bit::Shl<&isize>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u128 as core::ops::bit::Shl<&u128>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u128 as core::ops::bit::Shl<&u16>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u128 as core::ops::bit::Shl<&u32>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u128 as core::ops::bit::Shl<&u64>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u128 as core::ops::bit::Shl<&u8>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u128 as core::ops::bit::Shl<&usize>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u128 as core::ops::bit::Shl<i128>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<u128 as core::ops::bit::Shl<i16>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<u128 as core::ops::bit::Shl<i32>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<u128 as core::ops::bit::Shl<i64>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<u128 as core::ops::bit::Shl<i8>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<u128 as core::ops::bit::Shl<isize>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<u128 as core::ops::bit::Shl<u16>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<u128 as core::ops::bit::Shl<u32>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<u128 as core::ops::bit::Shl<u64>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<u128 as core::ops::bit::Shl<u8>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<u128 as core::ops::bit::Shl<usize>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<u128 as core::ops::bit::Shl>::shl fn shl(self, other: $f) -> $t {
self << other
}
<u128 as core::ops::bit::ShlAssign<&i128>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u128 as core::ops::bit::ShlAssign<&i16>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u128 as core::ops::bit::ShlAssign<&i32>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u128 as core::ops::bit::ShlAssign<&i64>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u128 as core::ops::bit::ShlAssign<&i8>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u128 as core::ops::bit::ShlAssign<&isize>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u128 as core::ops::bit::ShlAssign<&u128>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u128 as core::ops::bit::ShlAssign<&u16>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u128 as core::ops::bit::ShlAssign<&u32>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u128 as core::ops::bit::ShlAssign<&u64>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u128 as core::ops::bit::ShlAssign<&u8>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u128 as core::ops::bit::ShlAssign<&usize>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u128 as core::ops::bit::ShlAssign<i128>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<u128 as core::ops::bit::ShlAssign<i16>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<u128 as core::ops::bit::ShlAssign<i32>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<u128 as core::ops::bit::ShlAssign<i64>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<u128 as core::ops::bit::ShlAssign<i8>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<u128 as core::ops::bit::ShlAssign<isize>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<u128 as core::ops::bit::ShlAssign<u16>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<u128 as core::ops::bit::ShlAssign<u32>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<u128 as core::ops::bit::ShlAssign<u64>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<u128 as core::ops::bit::ShlAssign<u8>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<u128 as core::ops::bit::ShlAssign<usize>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<u128 as core::ops::bit::ShlAssign>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<u128 as core::ops::bit::Shr<&i128>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u128 as core::ops::bit::Shr<&i16>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u128 as core::ops::bit::Shr<&i32>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u128 as core::ops::bit::Shr<&i64>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u128 as core::ops::bit::Shr<&i8>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u128 as core::ops::bit::Shr<&isize>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u128 as core::ops::bit::Shr<&u128>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u128 as core::ops::bit::Shr<&u16>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u128 as core::ops::bit::Shr<&u32>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u128 as core::ops::bit::Shr<&u64>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u128 as core::ops::bit::Shr<&u8>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u128 as core::ops::bit::Shr<&usize>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u128 as core::ops::bit::Shr<i128>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<u128 as core::ops::bit::Shr<i16>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<u128 as core::ops::bit::Shr<i32>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<u128 as core::ops::bit::Shr<i64>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<u128 as core::ops::bit::Shr<i8>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<u128 as core::ops::bit::Shr<isize>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<u128 as core::ops::bit::Shr<u16>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<u128 as core::ops::bit::Shr<u32>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<u128 as core::ops::bit::Shr<u64>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<u128 as core::ops::bit::Shr<u8>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<u128 as core::ops::bit::Shr<usize>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<u128 as core::ops::bit::Shr>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<u128 as core::ops::bit::ShrAssign<&i128>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u128 as core::ops::bit::ShrAssign<&i16>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u128 as core::ops::bit::ShrAssign<&i32>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u128 as core::ops::bit::ShrAssign<&i64>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u128 as core::ops::bit::ShrAssign<&i8>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u128 as core::ops::bit::ShrAssign<&isize>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u128 as core::ops::bit::ShrAssign<&u128>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u128 as core::ops::bit::ShrAssign<&u16>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u128 as core::ops::bit::ShrAssign<&u32>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u128 as core::ops::bit::ShrAssign<&u64>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u128 as core::ops::bit::ShrAssign<&u8>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u128 as core::ops::bit::ShrAssign<&usize>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u128 as core::ops::bit::ShrAssign<i128>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<u128 as core::ops::bit::ShrAssign<i16>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<u128 as core::ops::bit::ShrAssign<i32>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<u128 as core::ops::bit::ShrAssign<i64>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<u128 as core::ops::bit::ShrAssign<i8>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<u128 as core::ops::bit::ShrAssign<isize>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<u128 as core::ops::bit::ShrAssign<u16>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<u128 as core::ops::bit::ShrAssign<u32>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<u128 as core::ops::bit::ShrAssign<u64>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<u128 as core::ops::bit::ShrAssign<u8>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<u128 as core::ops::bit::ShrAssign<usize>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<u128 as core::ops::bit::ShrAssign>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<u16 as core::default::Default>::default fn default() -> $t {
$v
}
<u16 as core::ops::arith::Add<&u16>>::add fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u16 as core::ops::arith::Add>::add fn add(self, other: $t) -> $t { self + other }
<u16 as core::ops::arith::AddAssign<&u16>>::add_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u16 as core::ops::arith::AddAssign>::add_assign fn add_assign(&mut self, other: $t) { *self += other }
<u16 as core::ops::arith::Div<&u16>>::div fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u16 as core::ops::arith::Div>::div fn div(self, other: $t) -> $t { self / other }
<u16 as core::ops::arith::DivAssign<&u16>>::div_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u16 as core::ops::arith::DivAssign>::div_assign fn div_assign(&mut self, other: $t) { *self /= other }
<u16 as core::ops::arith::Mul<&u16>>::mul fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u16 as core::ops::arith::Mul>::mul fn mul(self, other: $t) -> $t { self * other }
<u16 as core::ops::arith::MulAssign<&u16>>::mul_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u16 as core::ops::arith::MulAssign>::mul_assign fn mul_assign(&mut self, other: $t) { *self *= other }
<u16 as core::ops::arith::Rem<&u16>>::rem fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u16 as core::ops::arith::Rem>::rem fn rem(self, other: $t) -> $t { self % other }
<u16 as core::ops::arith::RemAssign<&u16>>::rem_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u16 as core::ops::arith::RemAssign>::rem_assign fn rem_assign(&mut self, other: $t) { *self %= other }
<u16 as core::ops::arith::Sub<&u16>>::sub fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u16 as core::ops::arith::Sub>::sub fn sub(self, other: $t) -> $t { self - other }
<u16 as core::ops::arith::SubAssign<&u16>>::sub_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u16 as core::ops::arith::SubAssign>::sub_assign fn sub_assign(&mut self, other: $t) { *self -= other }
<u16 as core::ops::bit::BitAnd<&u16>>::bitand fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u16 as core::ops::bit::BitAnd>::bitand fn bitand(self, rhs: $t) -> $t { self & rhs }
<u16 as core::ops::bit::BitAndAssign<&u16>>::bitand_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u16 as core::ops::bit::BitAndAssign>::bitand_assign fn bitand_assign(&mut self, other: $t) { *self &= other }
<u16 as core::ops::bit::BitOr<&u16>>::bitor fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u16 as core::ops::bit::BitOr>::bitor fn bitor(self, rhs: $t) -> $t { self | rhs }
<u16 as core::ops::bit::BitOrAssign<&u16>>::bitor_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u16 as core::ops::bit::BitOrAssign>::bitor_assign fn bitor_assign(&mut self, other: $t) { *self |= other }
<u16 as core::ops::bit::BitXor<&u16>>::bitxor fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u16 as core::ops::bit::BitXor>::bitxor fn bitxor(self, other: $t) -> $t { self ^ other }
<u16 as core::ops::bit::BitXorAssign<&u16>>::bitxor_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u16 as core::ops::bit::BitXorAssign>::bitxor_assign fn bitxor_assign(&mut self, other: $t) { *self ^= other }
<u16 as core::ops::bit::Not>::not fn not(self) -> $t { !self }
<u16 as core::ops::bit::Shl<&i128>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u16 as core::ops::bit::Shl<&i16>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u16 as core::ops::bit::Shl<&i32>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u16 as core::ops::bit::Shl<&i64>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u16 as core::ops::bit::Shl<&i8>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u16 as core::ops::bit::Shl<&isize>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u16 as core::ops::bit::Shl<&u128>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u16 as core::ops::bit::Shl<&u16>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u16 as core::ops::bit::Shl<&u32>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u16 as core::ops::bit::Shl<&u64>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u16 as core::ops::bit::Shl<&u8>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u16 as core::ops::bit::Shl<&usize>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u16 as core::ops::bit::Shl<i128>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<u16 as core::ops::bit::Shl<i16>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<u16 as core::ops::bit::Shl<i32>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<u16 as core::ops::bit::Shl<i64>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<u16 as core::ops::bit::Shl<i8>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<u16 as core::ops::bit::Shl<isize>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<u16 as core::ops::bit::Shl<u128>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<u16 as core::ops::bit::Shl<u32>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<u16 as core::ops::bit::Shl<u64>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<u16 as core::ops::bit::Shl<u8>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<u16 as core::ops::bit::Shl<usize>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<u16 as core::ops::bit::Shl>::shl fn shl(self, other: $f) -> $t {
self << other
}
<u16 as core::ops::bit::ShlAssign<&i128>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u16 as core::ops::bit::ShlAssign<&i16>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u16 as core::ops::bit::ShlAssign<&i32>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u16 as core::ops::bit::ShlAssign<&i64>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u16 as core::ops::bit::ShlAssign<&i8>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u16 as core::ops::bit::ShlAssign<&isize>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u16 as core::ops::bit::ShlAssign<&u128>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u16 as core::ops::bit::ShlAssign<&u16>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u16 as core::ops::bit::ShlAssign<&u32>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u16 as core::ops::bit::ShlAssign<&u64>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u16 as core::ops::bit::ShlAssign<&u8>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u16 as core::ops::bit::ShlAssign<&usize>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u16 as core::ops::bit::ShlAssign<i128>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<u16 as core::ops::bit::ShlAssign<i16>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<u16 as core::ops::bit::ShlAssign<i32>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<u16 as core::ops::bit::ShlAssign<i64>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<u16 as core::ops::bit::ShlAssign<i8>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<u16 as core::ops::bit::ShlAssign<isize>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<u16 as core::ops::bit::ShlAssign<u128>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<u16 as core::ops::bit::ShlAssign<u32>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<u16 as core::ops::bit::ShlAssign<u64>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<u16 as core::ops::bit::ShlAssign<u8>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<u16 as core::ops::bit::ShlAssign<usize>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<u16 as core::ops::bit::ShlAssign>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<u16 as core::ops::bit::Shr<&i128>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u16 as core::ops::bit::Shr<&i16>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u16 as core::ops::bit::Shr<&i32>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u16 as core::ops::bit::Shr<&i64>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u16 as core::ops::bit::Shr<&i8>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u16 as core::ops::bit::Shr<&isize>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u16 as core::ops::bit::Shr<&u128>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u16 as core::ops::bit::Shr<&u16>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u16 as core::ops::bit::Shr<&u32>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u16 as core::ops::bit::Shr<&u64>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u16 as core::ops::bit::Shr<&u8>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u16 as core::ops::bit::Shr<&usize>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u16 as core::ops::bit::Shr<i128>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<u16 as core::ops::bit::Shr<i16>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<u16 as core::ops::bit::Shr<i32>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<u16 as core::ops::bit::Shr<i64>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<u16 as core::ops::bit::Shr<i8>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<u16 as core::ops::bit::Shr<isize>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<u16 as core::ops::bit::Shr<u128>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<u16 as core::ops::bit::Shr<u32>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<u16 as core::ops::bit::Shr<u64>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<u16 as core::ops::bit::Shr<u8>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<u16 as core::ops::bit::Shr<usize>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<u16 as core::ops::bit::Shr>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<u16 as core::ops::bit::ShrAssign<&i128>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u16 as core::ops::bit::ShrAssign<&i16>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u16 as core::ops::bit::ShrAssign<&i32>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u16 as core::ops::bit::ShrAssign<&i64>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u16 as core::ops::bit::ShrAssign<&i8>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u16 as core::ops::bit::ShrAssign<&isize>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u16 as core::ops::bit::ShrAssign<&u128>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u16 as core::ops::bit::ShrAssign<&u16>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u16 as core::ops::bit::ShrAssign<&u32>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u16 as core::ops::bit::ShrAssign<&u64>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u16 as core::ops::bit::ShrAssign<&u8>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u16 as core::ops::bit::ShrAssign<&usize>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u16 as core::ops::bit::ShrAssign<i128>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<u16 as core::ops::bit::ShrAssign<i16>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<u16 as core::ops::bit::ShrAssign<i32>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<u16 as core::ops::bit::ShrAssign<i64>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<u16 as core::ops::bit::ShrAssign<i8>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<u16 as core::ops::bit::ShrAssign<isize>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<u16 as core::ops::bit::ShrAssign<u128>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<u16 as core::ops::bit::ShrAssign<u32>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<u16 as core::ops::bit::ShrAssign<u64>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<u16 as core::ops::bit::ShrAssign<u8>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<u16 as core::ops::bit::ShrAssign<usize>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<u16 as core::ops::bit::ShrAssign>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<u32 as core::default::Default>::default fn default() -> $t {
$v
}
<u32 as core::ops::arith::Add<&u32>>::add fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u32 as core::ops::arith::Add>::add fn add(self, other: $t) -> $t { self + other }
<u32 as core::ops::arith::AddAssign<&u32>>::add_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u32 as core::ops::arith::AddAssign>::add_assign fn add_assign(&mut self, other: $t) { *self += other }
<u32 as core::ops::arith::Div<&u32>>::div fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u32 as core::ops::arith::Div>::div fn div(self, other: $t) -> $t { self / other }
<u32 as core::ops::arith::DivAssign<&u32>>::div_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u32 as core::ops::arith::DivAssign>::div_assign fn div_assign(&mut self, other: $t) { *self /= other }
<u32 as core::ops::arith::Mul<&u32>>::mul fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u32 as core::ops::arith::Mul>::mul fn mul(self, other: $t) -> $t { self * other }
<u32 as core::ops::arith::MulAssign<&u32>>::mul_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u32 as core::ops::arith::MulAssign>::mul_assign fn mul_assign(&mut self, other: $t) { *self *= other }
<u32 as core::ops::arith::Rem<&u32>>::rem fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u32 as core::ops::arith::Rem>::rem fn rem(self, other: $t) -> $t { self % other }
<u32 as core::ops::arith::RemAssign<&u32>>::rem_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u32 as core::ops::arith::RemAssign>::rem_assign fn rem_assign(&mut self, other: $t) { *self %= other }
<u32 as core::ops::arith::Sub<&u32>>::sub fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u32 as core::ops::arith::Sub>::sub fn sub(self, other: $t) -> $t { self - other }
<u32 as core::ops::arith::SubAssign<&u32>>::sub_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u32 as core::ops::arith::SubAssign>::sub_assign fn sub_assign(&mut self, other: $t) { *self -= other }
<u32 as core::ops::bit::BitAnd<&u32>>::bitand fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u32 as core::ops::bit::BitAnd>::bitand fn bitand(self, rhs: $t) -> $t { self & rhs }
<u32 as core::ops::bit::BitAndAssign<&u32>>::bitand_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u32 as core::ops::bit::BitAndAssign>::bitand_assign fn bitand_assign(&mut self, other: $t) { *self &= other }
<u32 as core::ops::bit::BitOr<&u32>>::bitor fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u32 as core::ops::bit::BitOr>::bitor fn bitor(self, rhs: $t) -> $t { self | rhs }
<u32 as core::ops::bit::BitOrAssign<&u32>>::bitor_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u32 as core::ops::bit::BitOrAssign>::bitor_assign fn bitor_assign(&mut self, other: $t) { *self |= other }
<u32 as core::ops::bit::BitXor<&u32>>::bitxor fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u32 as core::ops::bit::BitXor>::bitxor fn bitxor(self, other: $t) -> $t { self ^ other }
<u32 as core::ops::bit::BitXorAssign<&u32>>::bitxor_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u32 as core::ops::bit::BitXorAssign>::bitxor_assign fn bitxor_assign(&mut self, other: $t) { *self ^= other }
<u32 as core::ops::bit::Not>::not fn not(self) -> $t { !self }
<u32 as core::ops::bit::Shl<&i128>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u32 as core::ops::bit::Shl<&i16>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u32 as core::ops::bit::Shl<&i32>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u32 as core::ops::bit::Shl<&i64>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u32 as core::ops::bit::Shl<&i8>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u32 as core::ops::bit::Shl<&isize>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u32 as core::ops::bit::Shl<&u128>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u32 as core::ops::bit::Shl<&u16>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u32 as core::ops::bit::Shl<&u32>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u32 as core::ops::bit::Shl<&u64>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u32 as core::ops::bit::Shl<&u8>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u32 as core::ops::bit::Shl<&usize>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u32 as core::ops::bit::Shl<i128>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<u32 as core::ops::bit::Shl<i16>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<u32 as core::ops::bit::Shl<i32>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<u32 as core::ops::bit::Shl<i64>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<u32 as core::ops::bit::Shl<i8>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<u32 as core::ops::bit::Shl<isize>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<u32 as core::ops::bit::Shl<u128>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<u32 as core::ops::bit::Shl<u16>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<u32 as core::ops::bit::Shl<u64>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<u32 as core::ops::bit::Shl<u8>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<u32 as core::ops::bit::Shl<usize>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<u32 as core::ops::bit::Shl>::shl fn shl(self, other: $f) -> $t {
self << other
}
<u32 as core::ops::bit::ShlAssign<&i128>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u32 as core::ops::bit::ShlAssign<&i16>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u32 as core::ops::bit::ShlAssign<&i32>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u32 as core::ops::bit::ShlAssign<&i64>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u32 as core::ops::bit::ShlAssign<&i8>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u32 as core::ops::bit::ShlAssign<&isize>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u32 as core::ops::bit::ShlAssign<&u128>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u32 as core::ops::bit::ShlAssign<&u16>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u32 as core::ops::bit::ShlAssign<&u32>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u32 as core::ops::bit::ShlAssign<&u64>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u32 as core::ops::bit::ShlAssign<&u8>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u32 as core::ops::bit::ShlAssign<&usize>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u32 as core::ops::bit::ShlAssign<i128>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<u32 as core::ops::bit::ShlAssign<i16>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<u32 as core::ops::bit::ShlAssign<i32>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<u32 as core::ops::bit::ShlAssign<i64>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<u32 as core::ops::bit::ShlAssign<i8>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<u32 as core::ops::bit::ShlAssign<isize>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<u32 as core::ops::bit::ShlAssign<u128>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<u32 as core::ops::bit::ShlAssign<u16>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<u32 as core::ops::bit::ShlAssign<u64>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<u32 as core::ops::bit::ShlAssign<u8>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<u32 as core::ops::bit::ShlAssign<usize>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<u32 as core::ops::bit::ShlAssign>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<u32 as core::ops::bit::Shr<&i128>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u32 as core::ops::bit::Shr<&i16>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u32 as core::ops::bit::Shr<&i32>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u32 as core::ops::bit::Shr<&i64>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u32 as core::ops::bit::Shr<&i8>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u32 as core::ops::bit::Shr<&isize>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u32 as core::ops::bit::Shr<&u128>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u32 as core::ops::bit::Shr<&u16>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u32 as core::ops::bit::Shr<&u32>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u32 as core::ops::bit::Shr<&u64>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u32 as core::ops::bit::Shr<&u8>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u32 as core::ops::bit::Shr<&usize>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u32 as core::ops::bit::Shr<i128>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<u32 as core::ops::bit::Shr<i16>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<u32 as core::ops::bit::Shr<i32>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<u32 as core::ops::bit::Shr<i64>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<u32 as core::ops::bit::Shr<i8>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<u32 as core::ops::bit::Shr<isize>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<u32 as core::ops::bit::Shr<u128>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<u32 as core::ops::bit::Shr<u16>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<u32 as core::ops::bit::Shr<u64>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<u32 as core::ops::bit::Shr<u8>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<u32 as core::ops::bit::Shr<usize>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<u32 as core::ops::bit::Shr>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<u32 as core::ops::bit::ShrAssign<&i128>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u32 as core::ops::bit::ShrAssign<&i16>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u32 as core::ops::bit::ShrAssign<&i32>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u32 as core::ops::bit::ShrAssign<&i64>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u32 as core::ops::bit::ShrAssign<&i8>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u32 as core::ops::bit::ShrAssign<&isize>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u32 as core::ops::bit::ShrAssign<&u128>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u32 as core::ops::bit::ShrAssign<&u16>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u32 as core::ops::bit::ShrAssign<&u32>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u32 as core::ops::bit::ShrAssign<&u64>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u32 as core::ops::bit::ShrAssign<&u8>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u32 as core::ops::bit::ShrAssign<&usize>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u32 as core::ops::bit::ShrAssign<i128>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<u32 as core::ops::bit::ShrAssign<i16>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<u32 as core::ops::bit::ShrAssign<i32>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<u32 as core::ops::bit::ShrAssign<i64>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<u32 as core::ops::bit::ShrAssign<i8>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<u32 as core::ops::bit::ShrAssign<isize>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<u32 as core::ops::bit::ShrAssign<u128>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<u32 as core::ops::bit::ShrAssign<u16>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<u32 as core::ops::bit::ShrAssign<u64>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<u32 as core::ops::bit::ShrAssign<u8>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<u32 as core::ops::bit::ShrAssign<usize>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<u32 as core::ops::bit::ShrAssign>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<u64 as core::default::Default>::default fn default() -> $t {
$v
}
<u64 as core::ops::arith::Add<&u64>>::add fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u64 as core::ops::arith::Add>::add fn add(self, other: $t) -> $t { self + other }
<u64 as core::ops::arith::AddAssign<&u64>>::add_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u64 as core::ops::arith::AddAssign>::add_assign fn add_assign(&mut self, other: $t) { *self += other }
<u64 as core::ops::arith::Div<&u64>>::div fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u64 as core::ops::arith::Div>::div fn div(self, other: $t) -> $t { self / other }
<u64 as core::ops::arith::DivAssign<&u64>>::div_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u64 as core::ops::arith::DivAssign>::div_assign fn div_assign(&mut self, other: $t) { *self /= other }
<u64 as core::ops::arith::Mul<&u64>>::mul fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u64 as core::ops::arith::Mul>::mul fn mul(self, other: $t) -> $t { self * other }
<u64 as core::ops::arith::MulAssign<&u64>>::mul_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u64 as core::ops::arith::MulAssign>::mul_assign fn mul_assign(&mut self, other: $t) { *self *= other }
<u64 as core::ops::arith::Rem<&u64>>::rem fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u64 as core::ops::arith::Rem>::rem fn rem(self, other: $t) -> $t { self % other }
<u64 as core::ops::arith::RemAssign<&u64>>::rem_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u64 as core::ops::arith::RemAssign>::rem_assign fn rem_assign(&mut self, other: $t) { *self %= other }
<u64 as core::ops::arith::Sub<&u64>>::sub fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u64 as core::ops::arith::Sub>::sub fn sub(self, other: $t) -> $t { self - other }
<u64 as core::ops::arith::SubAssign<&u64>>::sub_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u64 as core::ops::arith::SubAssign>::sub_assign fn sub_assign(&mut self, other: $t) { *self -= other }
<u64 as core::ops::bit::BitAnd<&u64>>::bitand fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u64 as core::ops::bit::BitAnd>::bitand fn bitand(self, rhs: $t) -> $t { self & rhs }
<u64 as core::ops::bit::BitAndAssign<&u64>>::bitand_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u64 as core::ops::bit::BitAndAssign>::bitand_assign fn bitand_assign(&mut self, other: $t) { *self &= other }
<u64 as core::ops::bit::BitOr<&u64>>::bitor fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u64 as core::ops::bit::BitOr>::bitor fn bitor(self, rhs: $t) -> $t { self | rhs }
<u64 as core::ops::bit::BitOrAssign<&u64>>::bitor_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u64 as core::ops::bit::BitOrAssign>::bitor_assign fn bitor_assign(&mut self, other: $t) { *self |= other }
<u64 as core::ops::bit::BitXor<&u64>>::bitxor fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u64 as core::ops::bit::BitXor>::bitxor fn bitxor(self, other: $t) -> $t { self ^ other }
<u64 as core::ops::bit::BitXorAssign<&u64>>::bitxor_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u64 as core::ops::bit::BitXorAssign>::bitxor_assign fn bitxor_assign(&mut self, other: $t) { *self ^= other }
<u64 as core::ops::bit::Not>::not fn not(self) -> $t { !self }
<u64 as core::ops::bit::Shl<&i128>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u64 as core::ops::bit::Shl<&i16>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u64 as core::ops::bit::Shl<&i32>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u64 as core::ops::bit::Shl<&i64>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u64 as core::ops::bit::Shl<&i8>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u64 as core::ops::bit::Shl<&isize>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u64 as core::ops::bit::Shl<&u128>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u64 as core::ops::bit::Shl<&u16>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u64 as core::ops::bit::Shl<&u32>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u64 as core::ops::bit::Shl<&u64>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u64 as core::ops::bit::Shl<&u8>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u64 as core::ops::bit::Shl<&usize>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u64 as core::ops::bit::Shl<i128>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<u64 as core::ops::bit::Shl<i16>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<u64 as core::ops::bit::Shl<i32>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<u64 as core::ops::bit::Shl<i64>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<u64 as core::ops::bit::Shl<i8>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<u64 as core::ops::bit::Shl<isize>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<u64 as core::ops::bit::Shl<u128>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<u64 as core::ops::bit::Shl<u16>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<u64 as core::ops::bit::Shl<u32>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<u64 as core::ops::bit::Shl<u8>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<u64 as core::ops::bit::Shl<usize>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<u64 as core::ops::bit::Shl>::shl fn shl(self, other: $f) -> $t {
self << other
}
<u64 as core::ops::bit::ShlAssign<&i128>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u64 as core::ops::bit::ShlAssign<&i16>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u64 as core::ops::bit::ShlAssign<&i32>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u64 as core::ops::bit::ShlAssign<&i64>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u64 as core::ops::bit::ShlAssign<&i8>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u64 as core::ops::bit::ShlAssign<&isize>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u64 as core::ops::bit::ShlAssign<&u128>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u64 as core::ops::bit::ShlAssign<&u16>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u64 as core::ops::bit::ShlAssign<&u32>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u64 as core::ops::bit::ShlAssign<&u64>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u64 as core::ops::bit::ShlAssign<&u8>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u64 as core::ops::bit::ShlAssign<&usize>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u64 as core::ops::bit::ShlAssign<i128>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<u64 as core::ops::bit::ShlAssign<i16>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<u64 as core::ops::bit::ShlAssign<i32>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<u64 as core::ops::bit::ShlAssign<i64>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<u64 as core::ops::bit::ShlAssign<i8>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<u64 as core::ops::bit::ShlAssign<isize>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<u64 as core::ops::bit::ShlAssign<u128>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<u64 as core::ops::bit::ShlAssign<u16>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<u64 as core::ops::bit::ShlAssign<u32>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<u64 as core::ops::bit::ShlAssign<u8>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<u64 as core::ops::bit::ShlAssign<usize>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<u64 as core::ops::bit::ShlAssign>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<u64 as core::ops::bit::Shr<&i128>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u64 as core::ops::bit::Shr<&i16>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u64 as core::ops::bit::Shr<&i32>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u64 as core::ops::bit::Shr<&i64>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u64 as core::ops::bit::Shr<&i8>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u64 as core::ops::bit::Shr<&isize>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u64 as core::ops::bit::Shr<&u128>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u64 as core::ops::bit::Shr<&u16>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u64 as core::ops::bit::Shr<&u32>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u64 as core::ops::bit::Shr<&u64>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u64 as core::ops::bit::Shr<&u8>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u64 as core::ops::bit::Shr<&usize>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u64 as core::ops::bit::Shr<i128>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<u64 as core::ops::bit::Shr<i16>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<u64 as core::ops::bit::Shr<i32>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<u64 as core::ops::bit::Shr<i64>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<u64 as core::ops::bit::Shr<i8>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<u64 as core::ops::bit::Shr<isize>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<u64 as core::ops::bit::Shr<u128>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<u64 as core::ops::bit::Shr<u16>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<u64 as core::ops::bit::Shr<u32>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<u64 as core::ops::bit::Shr<u8>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<u64 as core::ops::bit::Shr<usize>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<u64 as core::ops::bit::Shr>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<u64 as core::ops::bit::ShrAssign<&i128>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u64 as core::ops::bit::ShrAssign<&i16>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u64 as core::ops::bit::ShrAssign<&i32>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u64 as core::ops::bit::ShrAssign<&i64>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u64 as core::ops::bit::ShrAssign<&i8>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u64 as core::ops::bit::ShrAssign<&isize>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u64 as core::ops::bit::ShrAssign<&u128>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u64 as core::ops::bit::ShrAssign<&u16>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u64 as core::ops::bit::ShrAssign<&u32>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u64 as core::ops::bit::ShrAssign<&u64>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u64 as core::ops::bit::ShrAssign<&u8>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u64 as core::ops::bit::ShrAssign<&usize>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u64 as core::ops::bit::ShrAssign<i128>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<u64 as core::ops::bit::ShrAssign<i16>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<u64 as core::ops::bit::ShrAssign<i32>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<u64 as core::ops::bit::ShrAssign<i64>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<u64 as core::ops::bit::ShrAssign<i8>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<u64 as core::ops::bit::ShrAssign<isize>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<u64 as core::ops::bit::ShrAssign<u128>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<u64 as core::ops::bit::ShrAssign<u16>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<u64 as core::ops::bit::ShrAssign<u32>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<u64 as core::ops::bit::ShrAssign<u8>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<u64 as core::ops::bit::ShrAssign<usize>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<u64 as core::ops::bit::ShrAssign>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<u8 as core::default::Default>::default fn default() -> $t {
$v
}
<u8 as core::ops::arith::Add<&u8>>::add fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u8 as core::ops::arith::Add>::add fn add(self, other: $t) -> $t { self + other }
<u8 as core::ops::arith::AddAssign<&u8>>::add_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u8 as core::ops::arith::AddAssign>::add_assign fn add_assign(&mut self, other: $t) { *self += other }
<u8 as core::ops::arith::Div<&u8>>::div fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u8 as core::ops::arith::Div>::div fn div(self, other: $t) -> $t { self / other }
<u8 as core::ops::arith::DivAssign<&u8>>::div_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u8 as core::ops::arith::DivAssign>::div_assign fn div_assign(&mut self, other: $t) { *self /= other }
<u8 as core::ops::arith::Mul<&u8>>::mul fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u8 as core::ops::arith::Mul>::mul fn mul(self, other: $t) -> $t { self * other }
<u8 as core::ops::arith::MulAssign<&u8>>::mul_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u8 as core::ops::arith::MulAssign>::mul_assign fn mul_assign(&mut self, other: $t) { *self *= other }
<u8 as core::ops::arith::Rem<&u8>>::rem fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u8 as core::ops::arith::Rem>::rem fn rem(self, other: $t) -> $t { self % other }
<u8 as core::ops::arith::RemAssign<&u8>>::rem_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u8 as core::ops::arith::RemAssign>::rem_assign fn rem_assign(&mut self, other: $t) { *self %= other }
<u8 as core::ops::arith::Sub<&u8>>::sub fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u8 as core::ops::arith::Sub>::sub fn sub(self, other: $t) -> $t { self - other }
<u8 as core::ops::arith::SubAssign<&u8>>::sub_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u8 as core::ops::arith::SubAssign>::sub_assign fn sub_assign(&mut self, other: $t) { *self -= other }
<u8 as core::ops::bit::BitAnd<&u8>>::bitand fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u8 as core::ops::bit::BitAnd>::bitand fn bitand(self, rhs: $t) -> $t { self & rhs }
<u8 as core::ops::bit::BitAndAssign<&u8>>::bitand_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u8 as core::ops::bit::BitAndAssign>::bitand_assign fn bitand_assign(&mut self, other: $t) { *self &= other }
<u8 as core::ops::bit::BitOr<&u8>>::bitor fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u8 as core::ops::bit::BitOr>::bitor fn bitor(self, rhs: $t) -> $t { self | rhs }
<u8 as core::ops::bit::BitOrAssign<&u8>>::bitor_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u8 as core::ops::bit::BitOrAssign>::bitor_assign fn bitor_assign(&mut self, other: $t) { *self |= other }
<u8 as core::ops::bit::BitXor<&u8>>::bitxor fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u8 as core::ops::bit::BitXor>::bitxor fn bitxor(self, other: $t) -> $t { self ^ other }
<u8 as core::ops::bit::BitXorAssign<&u8>>::bitxor_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u8 as core::ops::bit::BitXorAssign>::bitxor_assign fn bitxor_assign(&mut self, other: $t) { *self ^= other }
<u8 as core::ops::bit::Not>::not fn not(self) -> $t { !self }
<u8 as core::ops::bit::Shl<&i128>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u8 as core::ops::bit::Shl<&i16>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u8 as core::ops::bit::Shl<&i32>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u8 as core::ops::bit::Shl<&i64>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u8 as core::ops::bit::Shl<&i8>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u8 as core::ops::bit::Shl<&isize>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u8 as core::ops::bit::Shl<&u128>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u8 as core::ops::bit::Shl<&u16>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u8 as core::ops::bit::Shl<&u32>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u8 as core::ops::bit::Shl<&u64>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u8 as core::ops::bit::Shl<&u8>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u8 as core::ops::bit::Shl<&usize>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u8 as core::ops::bit::Shl<i128>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<u8 as core::ops::bit::Shl<i16>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<u8 as core::ops::bit::Shl<i32>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<u8 as core::ops::bit::Shl<i64>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<u8 as core::ops::bit::Shl<i8>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<u8 as core::ops::bit::Shl<isize>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<u8 as core::ops::bit::Shl<u128>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<u8 as core::ops::bit::Shl<u16>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<u8 as core::ops::bit::Shl<u32>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<u8 as core::ops::bit::Shl<u64>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<u8 as core::ops::bit::Shl<usize>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<u8 as core::ops::bit::Shl>::shl fn shl(self, other: $f) -> $t {
self << other
}
<u8 as core::ops::bit::ShlAssign<&i128>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u8 as core::ops::bit::ShlAssign<&i16>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u8 as core::ops::bit::ShlAssign<&i32>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u8 as core::ops::bit::ShlAssign<&i64>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u8 as core::ops::bit::ShlAssign<&i8>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u8 as core::ops::bit::ShlAssign<&isize>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u8 as core::ops::bit::ShlAssign<&u128>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u8 as core::ops::bit::ShlAssign<&u16>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u8 as core::ops::bit::ShlAssign<&u32>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u8 as core::ops::bit::ShlAssign<&u64>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u8 as core::ops::bit::ShlAssign<&u8>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u8 as core::ops::bit::ShlAssign<&usize>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u8 as core::ops::bit::ShlAssign<i128>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<u8 as core::ops::bit::ShlAssign<i16>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<u8 as core::ops::bit::ShlAssign<i32>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<u8 as core::ops::bit::ShlAssign<i64>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<u8 as core::ops::bit::ShlAssign<i8>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<u8 as core::ops::bit::ShlAssign<isize>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<u8 as core::ops::bit::ShlAssign<u128>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<u8 as core::ops::bit::ShlAssign<u16>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<u8 as core::ops::bit::ShlAssign<u32>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<u8 as core::ops::bit::ShlAssign<u64>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<u8 as core::ops::bit::ShlAssign<usize>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<u8 as core::ops::bit::ShlAssign>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<u8 as core::ops::bit::Shr<&i128>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u8 as core::ops::bit::Shr<&i16>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u8 as core::ops::bit::Shr<&i32>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u8 as core::ops::bit::Shr<&i64>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u8 as core::ops::bit::Shr<&i8>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u8 as core::ops::bit::Shr<&isize>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u8 as core::ops::bit::Shr<&u128>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u8 as core::ops::bit::Shr<&u16>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u8 as core::ops::bit::Shr<&u32>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u8 as core::ops::bit::Shr<&u64>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u8 as core::ops::bit::Shr<&u8>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u8 as core::ops::bit::Shr<&usize>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<u8 as core::ops::bit::Shr<i128>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<u8 as core::ops::bit::Shr<i16>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<u8 as core::ops::bit::Shr<i32>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<u8 as core::ops::bit::Shr<i64>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<u8 as core::ops::bit::Shr<i8>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<u8 as core::ops::bit::Shr<isize>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<u8 as core::ops::bit::Shr<u128>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<u8 as core::ops::bit::Shr<u16>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<u8 as core::ops::bit::Shr<u32>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<u8 as core::ops::bit::Shr<u64>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<u8 as core::ops::bit::Shr<usize>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<u8 as core::ops::bit::Shr>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<u8 as core::ops::bit::ShrAssign<&i128>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u8 as core::ops::bit::ShrAssign<&i16>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u8 as core::ops::bit::ShrAssign<&i32>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u8 as core::ops::bit::ShrAssign<&i64>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u8 as core::ops::bit::ShrAssign<&i8>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u8 as core::ops::bit::ShrAssign<&isize>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u8 as core::ops::bit::ShrAssign<&u128>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u8 as core::ops::bit::ShrAssign<&u16>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u8 as core::ops::bit::ShrAssign<&u32>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u8 as core::ops::bit::ShrAssign<&u64>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u8 as core::ops::bit::ShrAssign<&u8>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u8 as core::ops::bit::ShrAssign<&usize>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<u8 as core::ops::bit::ShrAssign<i128>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<u8 as core::ops::bit::ShrAssign<i16>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<u8 as core::ops::bit::ShrAssign<i32>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<u8 as core::ops::bit::ShrAssign<i64>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<u8 as core::ops::bit::ShrAssign<i8>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<u8 as core::ops::bit::ShrAssign<isize>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<u8 as core::ops::bit::ShrAssign<u128>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<u8 as core::ops::bit::ShrAssign<u16>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<u8 as core::ops::bit::ShrAssign<u32>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<u8 as core::ops::bit::ShrAssign<u64>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<u8 as core::ops::bit::ShrAssign<usize>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<u8 as core::ops::bit::ShrAssign>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<usize as core::default::Default>::default fn default() -> $t {
$v
}
<usize as core::ops::arith::Add<&usize>>::add fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<usize as core::ops::arith::Add>::add fn add(self, other: $t) -> $t { self + other }
<usize as core::ops::arith::AddAssign<&usize>>::add_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<usize as core::ops::arith::AddAssign>::add_assign fn add_assign(&mut self, other: $t) { *self += other }
<usize as core::ops::arith::Div<&usize>>::div fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<usize as core::ops::arith::Div>::div fn div(self, other: $t) -> $t { self / other }
<usize as core::ops::arith::DivAssign<&usize>>::div_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<usize as core::ops::arith::DivAssign>::div_assign fn div_assign(&mut self, other: $t) { *self /= other }
<usize as core::ops::arith::Mul<&usize>>::mul fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<usize as core::ops::arith::Mul>::mul fn mul(self, other: $t) -> $t { self * other }
<usize as core::ops::arith::MulAssign<&usize>>::mul_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<usize as core::ops::arith::MulAssign>::mul_assign fn mul_assign(&mut self, other: $t) { *self *= other }
<usize as core::ops::arith::Rem<&usize>>::rem fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<usize as core::ops::arith::Rem>::rem fn rem(self, other: $t) -> $t { self % other }
<usize as core::ops::arith::RemAssign<&usize>>::rem_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<usize as core::ops::arith::RemAssign>::rem_assign fn rem_assign(&mut self, other: $t) { *self %= other }
<usize as core::ops::arith::Sub<&usize>>::sub fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<usize as core::ops::arith::Sub>::sub fn sub(self, other: $t) -> $t { self - other }
<usize as core::ops::arith::SubAssign<&usize>>::sub_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<usize as core::ops::arith::SubAssign>::sub_assign fn sub_assign(&mut self, other: $t) { *self -= other }
<usize as core::ops::bit::BitAnd<&usize>>::bitand fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<usize as core::ops::bit::BitAnd>::bitand fn bitand(self, rhs: $t) -> $t { self & rhs }
<usize as core::ops::bit::BitAndAssign<&usize>>::bitand_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<usize as core::ops::bit::BitAndAssign>::bitand_assign fn bitand_assign(&mut self, other: $t) { *self &= other }
<usize as core::ops::bit::BitOr<&usize>>::bitor fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<usize as core::ops::bit::BitOr>::bitor fn bitor(self, rhs: $t) -> $t { self | rhs }
<usize as core::ops::bit::BitOrAssign<&usize>>::bitor_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<usize as core::ops::bit::BitOrAssign>::bitor_assign fn bitor_assign(&mut self, other: $t) { *self |= other }
<usize as core::ops::bit::BitXor<&usize>>::bitxor fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<usize as core::ops::bit::BitXor>::bitxor fn bitxor(self, other: $t) -> $t { self ^ other }
<usize as core::ops::bit::BitXorAssign<&usize>>::bitxor_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<usize as core::ops::bit::BitXorAssign>::bitxor_assign fn bitxor_assign(&mut self, other: $t) { *self ^= other }
<usize as core::ops::bit::Not>::not fn not(self) -> $t { !self }
<usize as core::ops::bit::Shl<&i128>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<usize as core::ops::bit::Shl<&i16>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<usize as core::ops::bit::Shl<&i32>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<usize as core::ops::bit::Shl<&i64>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<usize as core::ops::bit::Shl<&i8>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<usize as core::ops::bit::Shl<&isize>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<usize as core::ops::bit::Shl<&u128>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<usize as core::ops::bit::Shl<&u16>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<usize as core::ops::bit::Shl<&u32>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<usize as core::ops::bit::Shl<&u64>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<usize as core::ops::bit::Shl<&u8>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<usize as core::ops::bit::Shl<&usize>>::shl fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<usize as core::ops::bit::Shl<i128>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<usize as core::ops::bit::Shl<i16>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<usize as core::ops::bit::Shl<i32>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<usize as core::ops::bit::Shl<i64>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<usize as core::ops::bit::Shl<i8>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<usize as core::ops::bit::Shl<isize>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<usize as core::ops::bit::Shl<u128>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<usize as core::ops::bit::Shl<u16>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<usize as core::ops::bit::Shl<u32>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<usize as core::ops::bit::Shl<u64>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<usize as core::ops::bit::Shl<u8>>::shl fn shl(self, other: $f) -> $t {
self << other
}
<usize as core::ops::bit::Shl>::shl fn shl(self, other: $f) -> $t {
self << other
}
<usize as core::ops::bit::ShlAssign<&i128>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<usize as core::ops::bit::ShlAssign<&i16>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<usize as core::ops::bit::ShlAssign<&i32>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<usize as core::ops::bit::ShlAssign<&i64>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<usize as core::ops::bit::ShlAssign<&i8>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<usize as core::ops::bit::ShlAssign<&isize>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<usize as core::ops::bit::ShlAssign<&u128>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<usize as core::ops::bit::ShlAssign<&u16>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<usize as core::ops::bit::ShlAssign<&u32>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<usize as core::ops::bit::ShlAssign<&u64>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<usize as core::ops::bit::ShlAssign<&u8>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<usize as core::ops::bit::ShlAssign<&usize>>::shl_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<usize as core::ops::bit::ShlAssign<i128>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<usize as core::ops::bit::ShlAssign<i16>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<usize as core::ops::bit::ShlAssign<i32>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<usize as core::ops::bit::ShlAssign<i64>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<usize as core::ops::bit::ShlAssign<i8>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<usize as core::ops::bit::ShlAssign<isize>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<usize as core::ops::bit::ShlAssign<u128>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<usize as core::ops::bit::ShlAssign<u16>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<usize as core::ops::bit::ShlAssign<u32>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<usize as core::ops::bit::ShlAssign<u64>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<usize as core::ops::bit::ShlAssign<u8>>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<usize as core::ops::bit::ShlAssign>::shl_assign fn shl_assign(&mut self, other: $f) {
*self <<= other
}
<usize as core::ops::bit::Shr<&i128>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<usize as core::ops::bit::Shr<&i16>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<usize as core::ops::bit::Shr<&i32>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<usize as core::ops::bit::Shr<&i64>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<usize as core::ops::bit::Shr<&i8>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<usize as core::ops::bit::Shr<&isize>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<usize as core::ops::bit::Shr<&u128>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<usize as core::ops::bit::Shr<&u16>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<usize as core::ops::bit::Shr<&u32>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<usize as core::ops::bit::Shr<&u64>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<usize as core::ops::bit::Shr<&u8>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<usize as core::ops::bit::Shr<&usize>>::shr fn $method(self, other: &$u) -> <$t as $imp<$u>>::Output {
$imp::$method(self, *other)
}
<usize as core::ops::bit::Shr<i128>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<usize as core::ops::bit::Shr<i16>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<usize as core::ops::bit::Shr<i32>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<usize as core::ops::bit::Shr<i64>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<usize as core::ops::bit::Shr<i8>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<usize as core::ops::bit::Shr<isize>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<usize as core::ops::bit::Shr<u128>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<usize as core::ops::bit::Shr<u16>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<usize as core::ops::bit::Shr<u32>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<usize as core::ops::bit::Shr<u64>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<usize as core::ops::bit::Shr<u8>>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<usize as core::ops::bit::Shr>::shr fn shr(self, other: $f) -> $t {
self >> other
}
<usize as core::ops::bit::ShrAssign<&i128>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<usize as core::ops::bit::ShrAssign<&i16>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<usize as core::ops::bit::ShrAssign<&i32>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<usize as core::ops::bit::ShrAssign<&i64>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<usize as core::ops::bit::ShrAssign<&i8>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<usize as core::ops::bit::ShrAssign<&isize>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<usize as core::ops::bit::ShrAssign<&u128>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<usize as core::ops::bit::ShrAssign<&u16>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<usize as core::ops::bit::ShrAssign<&u32>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<usize as core::ops::bit::ShrAssign<&u64>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<usize as core::ops::bit::ShrAssign<&u8>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<usize as core::ops::bit::ShrAssign<&usize>>::shr_assign fn $method(&mut self, other: &$u) {
$imp::$method(self, *other);
}
<usize as core::ops::bit::ShrAssign<i128>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<usize as core::ops::bit::ShrAssign<i16>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<usize as core::ops::bit::ShrAssign<i32>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<usize as core::ops::bit::ShrAssign<i64>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<usize as core::ops::bit::ShrAssign<i8>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<usize as core::ops::bit::ShrAssign<isize>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<usize as core::ops::bit::ShrAssign<u128>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<usize as core::ops::bit::ShrAssign<u16>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<usize as core::ops::bit::ShrAssign<u32>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<usize as core::ops::bit::ShrAssign<u64>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<usize as core::ops::bit::ShrAssign<u8>>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<usize as core::ops::bit::ShrAssign>::shr_assign fn shr_assign(&mut self, other: $f) {
*self >>= other
}
<usize as core::slice::index::SliceIndex<[T]>>::get fn get(self, slice: &[T]) -> Option<&T> {
if self < slice.len() {
// SAFETY: `self` is checked to be in bounds.
unsafe { Some(slice_get_unchecked(slice, self)) }
} else {
None
}
}
<usize as core::slice::index::SliceIndex<[T]>>::get_unchecked unsafe fn get_unchecked(self, slice: *const [T]) -> *const T {
assert_unsafe_precondition!(
check_language_ub, // okay because of the `assume` below
"slice::get_unchecked requires that the index is within the slice",
(this: usize = self, len: usize = slice.len()) => this < len
);
// SAFETY: the caller guarantees that `slice` is not dangling, so it
// cannot be longer than `isize::MAX`. They also guarantee that
// `self` is in bounds of `slice` so `self` cannot overflow an `isize`,
// so the call to `add` is safe.
unsafe {
// Use intrinsics::assume instead of hint::assert_unchecked so that we don't check the
// precondition of this function twice.
crate::intrinsics::assume(self < slice.len());
slice_get_unchecked(slice, self)
}
}
<usize as core::slice::index::SliceIndex<[T]>>::get_unchecked_mut unsafe fn get_unchecked_mut(self, slice: *mut [T]) -> *mut T {
assert_unsafe_precondition!(
check_library_ub,
"slice::get_unchecked_mut requires that the index is within the slice",
(this: usize = self, len: usize = slice.len()) => this < len
);
// SAFETY: see comments for `get_unchecked` above.
unsafe { slice_get_unchecked(slice, self) }
}
<usize as core::slice::index::SliceIndex<[T]>>::index fn index(self, slice: &[T]) -> &T {
// N.B., use intrinsic indexing
&(*slice)[self]
}
<usize as core::slice::index::SliceIndex<[T]>>::index_mut fn index_mut(self, slice: &mut [T]) -> &mut T {
// N.B., use intrinsic indexing
&mut (*slice)[self]
}
core::alloc::layout::Layout::align pub const fn align(&self) -> usize {
self.align.as_usize()
}
core::alloc::layout::Layout::from_size_align_unchecked pub const unsafe fn from_size_align_unchecked(size: usize, align: usize) -> Self {
assert_unsafe_precondition!(
check_library_ub,
"Layout::from_size_align_unchecked requires that align is a power of 2 \
and the rounded-up allocation size does not exceed isize::MAX",
(
size: usize = size,
align: usize = align,
) => Layout::is_size_align_valid(size, align)
);
// SAFETY: the caller is required to uphold the preconditions.
unsafe { Layout { size, align: mem::transmute(align) } }
}
core::alloc::layout::Layout::is_size_align_valid const fn is_size_align_valid(size: usize, align: usize) -> bool {
let Some(align) = Alignment::new(align) else { return false };
if size > Self::max_size_for_align(align) {
return false;
}
true
}
core::alloc::layout::Layout::max_size_for_align const fn max_size_for_align(align: Alignment) -> usize {
// (power-of-two implies align != 0.)
// Rounded up size is:
// size_rounded_up = (size + align - 1) & !(align - 1);
//
// We know from above that align != 0. If adding (align - 1)
// does not overflow, then rounding up will be fine.
//
// Conversely, &-masking with !(align - 1) will subtract off
// only low-order-bits. Thus if overflow occurs with the sum,
// the &-mask cannot subtract enough to undo that overflow.
//
// Above implies that checking for summation overflow is both
// necessary and sufficient.
// SAFETY: the maximum possible alignment is `isize::MAX + 1`,
// so the subtraction cannot overflow.
unsafe { unchecked_sub(isize::MAX as usize + 1, align.as_usize()) }
}
core::alloc::layout::Layout::new pub const fn new<T>() -> Self {
let (size, align) = size_align::<T>();
// SAFETY: if the type is instantiated, rustc already ensures that its
// layout is valid. Use the unchecked constructor to avoid inserting a
// panicking codepath that needs to be optimized out.
unsafe { Layout::from_size_align_unchecked(size, align) }
}
core::alloc::layout::Layout::size pub const fn size(&self) -> usize {
self.size
}
core::alloc::layout::size_alignconst fn size_align<T>() -> (usize, usize) {
(size_of::<T>(), align_of::<T>())
}
core::any::TypeId::of pub const fn of<T: ?Sized + 'static>() -> TypeId {
const { intrinsics::type_id::<T>() }
}
core::any::type_namepub const fn type_name<T: ?Sized>() -> &'static str {
const { intrinsics::type_name::<T>() }
}
core::any::type_name_of_valpub const fn type_name_of_val<T: ?Sized>(_val: &T) -> &'static str {
type_name::<T>()
}
core::array::<impl [T; N]>::as_mut_slice pub const fn as_mut_slice(&mut self) -> &mut [T] {
self
}
core::array::<impl [T; N]>::as_slice pub const fn as_slice(&self) -> &[T] {
self
}
core::array::<impl core::clone::Clone for [T; N]>::clone fn clone(&self) -> Self {
SpecArrayClone::clone(self)
}
core::array::<impl core::convert::TryFrom<&'a [T]> for &'a [T; N]>::try_from fn try_from(slice: &'a [T]) -> Result<&'a [T; N], TryFromSliceError> {
slice.as_array().ok_or(TryFromSliceError(()))
}
core::array::<impl core::convert::TryFrom<&'a mut [T]> for &'a mut [T; N]>::try_from fn try_from(slice: &'a mut [T]) -> Result<&'a mut [T; N], TryFromSliceError> {
slice.as_mut_array().ok_or(TryFromSliceError(()))
}
core::array::<impl core::convert::TryFrom<&[T]> for [T; N]>::try_from fn try_from(slice: &[T]) -> Result<[T; N], TryFromSliceError> {
<&Self>::try_from(slice).copied()
}
core::array::<impl core::convert::TryFrom<&mut [T]> for [T; N]>::try_from fn try_from(slice: &mut [T]) -> Result<[T; N], TryFromSliceError> {
<Self>::try_from(&*slice)
}
core::array::<impl core::iter::traits::collect::IntoIterator for &'a [T; N]>::into_iter fn into_iter(self) -> Iter<'a, T> {
self.iter()
}
core::array::<impl core::iter::traits::collect::IntoIterator for &'a mut [T; N]>::into_iter fn into_iter(self) -> IterMut<'a, T> {
self.iter_mut()
}
core::array::<impl core::ops::index::Index<I> for [T; N]>::index fn index(&self, index: I) -> &Self::Output {
Index::index(self as &[T], index)
}
core::array::<impl core::ops::index::IndexMut<I> for [T; N]>::index_mut fn index_mut(&mut self, index: I) -> &mut Self::Output {
IndexMut::index_mut(self as &mut [T], index)
}
core::array::Guard::<'_, T>::push_unchecked pub(crate) unsafe fn push_unchecked(&mut self, item: T) {
// SAFETY: If `initialized` was correct before and the caller does not
// invoke this method more than N times then writes will be in-bounds
// and slots will not be initialized more than once.
unsafe {
self.array_mut.get_unchecked_mut(self.initialized).write(item);
self.initialized = self.initialized.unchecked_add(1);
}
}
core::array::equality::<impl core::cmp::PartialEq<&[U]> for [T; N]>::eq fn eq(&self, other: &&[U]) -> bool {
*self == **other
}
core::array::equality::<impl core::cmp::PartialEq<&[U]> for [T; N]>::ne fn ne(&self, other: &&[U]) -> bool {
*self != **other
}
core::array::equality::<impl core::cmp::PartialEq<&mut [U]> for [T; N]>::eq fn eq(&self, other: &&mut [U]) -> bool {
*self == **other
}
core::array::equality::<impl core::cmp::PartialEq<&mut [U]> for [T; N]>::ne fn ne(&self, other: &&mut [U]) -> bool {
*self != **other
}
core::array::equality::<impl core::cmp::PartialEq<[U; N]> for &[T]>::eq fn eq(&self, other: &[U; N]) -> bool {
**self == *other
}
core::array::equality::<impl core::cmp::PartialEq<[U; N]> for &[T]>::ne fn ne(&self, other: &[U; N]) -> bool {
**self != *other
}
core::array::equality::<impl core::cmp::PartialEq<[U; N]> for &mut [T]>::eq fn eq(&self, other: &[U; N]) -> bool {
**self == *other
}
core::array::equality::<impl core::cmp::PartialEq<[U; N]> for &mut [T]>::ne fn ne(&self, other: &[U; N]) -> bool {
**self != *other
}
core::array::equality::<impl core::cmp::PartialEq<[U; N]> for [T; N]>::eq fn eq(&self, other: &[U; N]) -> bool {
SpecArrayEq::spec_eq(self, other)
}
core::array::equality::<impl core::cmp::PartialEq<[U; N]> for [T; N]>::ne fn ne(&self, other: &[U; N]) -> bool {
SpecArrayEq::spec_ne(self, other)
}
core::array::equality::<impl core::cmp::PartialEq<[U; N]> for [T]>::eq fn eq(&self, other: &[U; N]) -> bool {
match self.as_array::<N>() {
Some(b) => *b == *other,
None => false,
}
}
core::array::equality::<impl core::cmp::PartialEq<[U; N]> for [T]>::ne fn ne(&self, other: &[U; N]) -> bool {
match self.as_array::<N>() {
Some(b) => *b != *other,
None => true,
}
}
core::array::equality::<impl core::cmp::PartialEq<[U]> for [T; N]>::eq fn eq(&self, other: &[U]) -> bool {
match other.as_array::<N>() {
Some(b) => *self == *b,
None => false,
}
}
core::array::equality::<impl core::cmp::PartialEq<[U]> for [T; N]>::ne fn ne(&self, other: &[U]) -> bool {
match other.as_array::<N>() {
Some(b) => *self != *b,
None => true,
}
}
core::array::from_mutpub const fn from_mut<T>(s: &mut T) -> &mut [T; 1] {
// SAFETY: Converting `&mut T` to `&mut [T; 1]` is sound.
unsafe { &mut *(s as *mut T).cast::<[T; 1]>() }
}
core::array::from_refpub const fn from_ref<T>(s: &T) -> &[T; 1] {
// SAFETY: Converting `&T` to `&[T; 1]` is sound.
unsafe { &*(s as *const T).cast::<[T; 1]>() }
}
core::array::from_trusted_iteratorfn from_trusted_iterator<T, const N: usize>(iter: impl UncheckedIterator<Item = T>) -> [T; N] {
try_from_trusted_iterator(iter.map(NeverShortCircuit)).0
}
core::array::iter::<impl core::iter::traits::collect::IntoIterator for [T; N]>::into_iter fn into_iter(self) -> Self::IntoIter {
// SAFETY: The transmute here is actually safe. The docs of `MaybeUninit`
// promise:
//
// > `MaybeUninit<T>` is guaranteed to have the same size and alignment
// > as `T`.
//
// The docs even show a transmute from an array of `MaybeUninit<T>` to
// an array of `T`.
//
// With that, this initialization satisfies the invariants.
//
// FIXME: If normal `transmute` ever gets smart enough to allow this
// directly, use it instead of `transmute_unchecked`.
let data: [MaybeUninit<T>; N] = unsafe { transmute_unchecked(self) };
// SAFETY: The original array was entirely initialized and the the alive
// range we're passing here represents that fact.
let inner = unsafe { InnerSized::new_unchecked(IndexRange::zero_to(N), data) };
IntoIter { inner: ManuallyDrop::new(inner) }
}
core::array::iter::IntoIter::<T, N>::unsize fn unsize(&self) -> &InnerUnsized<T> {
self.inner.deref()
}
core::array::iter::IntoIter::<T, N>::unsize_mut fn unsize_mut(&mut self) -> &mut InnerUnsized<T> {
self.inner.deref_mut()
}
core::array::iter::iter_inner::PolymorphicIter::<DATA>::len pub(super) const fn len(&self) -> usize {
self.alive.len()
}
core::array::iter::iter_inner::PolymorphicIter::<[core::mem::maybe_uninit::MaybeUninit<T>; N]>::empty pub(super) const fn empty() -> Self {
Self { alive: IndexRange::zero_to(0), data: [const { MaybeUninit::uninit() }; N] }
}
core::array::iter::iter_inner::PolymorphicIter::<[core::mem::maybe_uninit::MaybeUninit<T>; N]>::new_unchecked pub(super) const unsafe fn new_unchecked(alive: IndexRange, data: [MaybeUninit<T>; N]) -> Self {
Self { alive, data }
}
core::array::iter::iter_inner::PolymorphicIter::<[core::mem::maybe_uninit::MaybeUninit<T>]>::as_slice pub(super) fn as_slice(&self) -> &[T] {
// SAFETY: We know that all elements within `alive` are properly initialized.
unsafe {
let slice = self.data.get_unchecked(self.alive.clone());
slice.assume_init_ref()
}
}
core::array::iter::iter_inner::PolymorphicIter::<[core::mem::maybe_uninit::MaybeUninit<T>]>::next pub(super) fn next(&mut self) -> Option<T> {
// Get the next index from the front.
//
// Increasing `alive.start` by 1 maintains the invariant regarding
// `alive`. However, due to this change, for a short time, the alive
// zone is not `data[alive]` anymore, but `data[idx..alive.end]`.
self.alive.next().map(|idx| {
// Read the element from the array.
// SAFETY: `idx` is an index into the former "alive" region of the
// array. Reading this element means that `data[idx]` is regarded as
// dead now (i.e. do not touch). As `idx` was the start of the
// alive-zone, the alive zone is now `data[alive]` again, restoring
// all invariants.
unsafe { self.data.get_unchecked(idx).assume_init_read() }
})
}
core::array::iter::iter_inner::PolymorphicIter::<[core::mem::maybe_uninit::MaybeUninit<T>]>::size_hint pub(super) fn size_hint(&self) -> (usize, Option<usize>) {
let len = self.len();
(len, Some(len))
}
core::array::try_from_fnpub fn try_from_fn<R, const N: usize, F>(cb: F) -> ChangeOutputType<R, [R::Output; N]>
where
F: FnMut(usize) -> R,
R: Try,
R::Residual: Residual<[R::Output; N]>,
{
let mut array = [const { MaybeUninit::uninit() }; N];
match try_from_fn_erased(&mut array, cb) {
ControlFlow::Break(r) => FromResidual::from_residual(r),
ControlFlow::Continue(()) => {
// SAFETY: All elements of the array were populated.
try { unsafe { MaybeUninit::array_assume_init(array) } }
}
}
}
core::array::try_from_fn_erasedfn try_from_fn_erased<T, R>(
buffer: &mut [MaybeUninit<T>],
mut generator: impl FnMut(usize) -> R,
) -> ControlFlow<R::Residual>
where
R: Try<Output = T>,
{
let mut guard = Guard { array_mut: buffer, initialized: 0 };
while guard.initialized < guard.array_mut.len() {
let item = generator(guard.initialized).branch()?;
// SAFETY: The loop condition ensures we have space to push the item
unsafe { guard.push_unchecked(item) };
}
mem::forget(guard);
ControlFlow::Continue(())
}
core::array::try_from_trusted_iteratorfn try_from_trusted_iterator<T, R, const N: usize>(
iter: impl UncheckedIterator<Item = R>,
) -> ChangeOutputType<R, [T; N]>
where
R: Try<Output = T>,
R::Residual: Residual<[T; N]>,
{
assert!(iter.size_hint().0 >= N);
fn next<T>(mut iter: impl UncheckedIterator<Item = T>) -> impl FnMut(usize) -> T {
move |_| {
// SAFETY: We know that `from_fn` will call this at most N times,
// and we checked to ensure that we have at least that many items.
unsafe { iter.next_unchecked() }
}
}
try_from_fn(next(iter))
}
core::array::try_from_trusted_iterator::next fn next<T>(mut iter: impl UncheckedIterator<Item = T>) -> impl FnMut(usize) -> T {
move |_| {
// SAFETY: We know that `from_fn` will call this at most N times,
// and we checked to ensure that we have at least that many items.
unsafe { iter.next_unchecked() }
}
}
core::bool::<impl bool>::ok_or pub fn ok_or<E>(self, err: E) -> Result<(), E> {
if self { Ok(()) } else { Err(err) }
}
core::bool::<impl bool>::ok_or_else pub fn ok_or_else<E, F: FnOnce() -> E>(self, f: F) -> Result<(), E> {
if self { Ok(()) } else { Err(f()) }
}
core::bool::<impl bool>::then pub fn then<T, F: FnOnce() -> T>(self, f: F) -> Option<T> {
if self { Some(f()) } else { None }
}
core::bool::<impl bool>::then_some pub fn then_some<T>(self, t: T) -> Option<T> {
if self { Some(t) } else { None }
}
core::cell::UnsafeCell::<T>::get pub const fn get(&self) -> *mut T {
// We can just cast the pointer from `UnsafeCell<T>` to `T` because of
// #[repr(transparent)]. This exploits std's special status, there is
// no guarantee for user code that this will work in future versions of the compiler!
self as *const UnsafeCell<T> as *const T as *mut T
}
core::cell::UnsafeCell::<T>::get_mut pub const fn get_mut(&mut self) -> &mut T {
&mut self.value
}
core::cell::UnsafeCell::<T>::into_inner pub const fn into_inner(self) -> T {
self.value
}
core::cell::UnsafeCell::<T>::new pub const fn new(value: T) -> UnsafeCell<T> {
UnsafeCell { value }
}
core::clone::Clone::clone_from fn clone_from(&mut self, source: &Self)
where
Self: [const] Destruct,
{
*self = source.clone()
}
core::clone::impls::<impl core::clone::Clone for &T>::clone fn clone(&self) -> Self {
self
}
core::clone::impls::<impl core::clone::Clone for bool>::clone fn clone(&self) -> Self {
*self
}
core::clone::impls::<impl core::clone::Clone for char>::clone fn clone(&self) -> Self {
*self
}
core::clone::impls::<impl core::clone::Clone for f128>::clone fn clone(&self) -> Self {
*self
}
core::clone::impls::<impl core::clone::Clone for f16>::clone fn clone(&self) -> Self {
*self
}
core::clone::impls::<impl core::clone::Clone for f32>::clone fn clone(&self) -> Self {
*self
}
core::clone::impls::<impl core::clone::Clone for f64>::clone fn clone(&self) -> Self {
*self
}
core::clone::impls::<impl core::clone::Clone for i128>::clone fn clone(&self) -> Self {
*self
}
core::clone::impls::<impl core::clone::Clone for i16>::clone fn clone(&self) -> Self {
*self
}
core::clone::impls::<impl core::clone::Clone for i32>::clone fn clone(&self) -> Self {
*self
}
core::clone::impls::<impl core::clone::Clone for i64>::clone fn clone(&self) -> Self {
*self
}
core::clone::impls::<impl core::clone::Clone for i8>::clone fn clone(&self) -> Self {
*self
}
core::clone::impls::<impl core::clone::Clone for isize>::clone fn clone(&self) -> Self {
*self
}
core::clone::impls::<impl core::clone::Clone for u128>::clone fn clone(&self) -> Self {
*self
}
core::clone::impls::<impl core::clone::Clone for u16>::clone fn clone(&self) -> Self {
*self
}
core::clone::impls::<impl core::clone::Clone for u32>::clone fn clone(&self) -> Self {
*self
}
core::clone::impls::<impl core::clone::Clone for u64>::clone fn clone(&self) -> Self {
*self
}
core::clone::impls::<impl core::clone::Clone for u8>::clone fn clone(&self) -> Self {
*self
}
core::clone::impls::<impl core::clone::Clone for usize>::clone fn clone(&self) -> Self {
*self
}
core::cmp::Ord::clamp fn clamp(self, min: Self, max: Self) -> Self
where
Self: Sized + [const] Destruct,
{
assert!(min <= max);
if self < min {
min
} else if self > max {
max
} else {
self
}
}
core::cmp::Ord::max fn max(self, other: Self) -> Self
where
Self: Sized + [const] Destruct,
{
if other < self { self } else { other }
}
core::cmp::Ord::min fn min(self, other: Self) -> Self
where
Self: Sized + [const] Destruct,
{
if other < self { other } else { self }
}
core::cmp::Ordering::as_raw const fn as_raw(self) -> i8 {
// FIXME(const-hack): just use `PartialOrd` against `Equal` once that's const
crate::intrinsics::discriminant_value(&self)
}
core::cmp::Ordering::is_eq pub const fn is_eq(self) -> bool {
// All the `is_*` methods are implemented as comparisons against zero
// to follow how clang's libcxx implements their equivalents in
// <https://github.com/llvm/llvm-project/blob/60486292b79885b7800b082754153202bef5b1f0/libcxx/include/__compare/is_eq.h#L23-L28>
self.as_raw() == 0
}
core::cmp::Ordering::is_ge pub const fn is_ge(self) -> bool {
self.as_raw() >= 0
}
core::cmp::Ordering::is_gt pub const fn is_gt(self) -> bool {
self.as_raw() > 0
}
core::cmp::Ordering::is_le pub const fn is_le(self) -> bool {
self.as_raw() <= 0
}
core::cmp::Ordering::is_lt pub const fn is_lt(self) -> bool {
self.as_raw() < 0
}
core::cmp::Ordering::is_ne pub const fn is_ne(self) -> bool {
self.as_raw() != 0
}
core::cmp::Ordering::reverse pub const fn reverse(self) -> Ordering {
match self {
Less => Greater,
Equal => Equal,
Greater => Less,
}
}
core::cmp::Ordering::then pub const fn then(self, other: Ordering) -> Ordering {
match self {
Equal => other,
_ => self,
}
}
core::cmp::PartialEq::ne fn ne(&self, other: &Rhs) -> bool {
!self.eq(other)
}
core::cmp::PartialOrd::__chaining_ge fn __chaining_ge(&self, other: &Rhs) -> ControlFlow<bool> {
default_chaining_impl(self, other, Ordering::is_ge)
}
core::cmp::PartialOrd::__chaining_gt fn __chaining_gt(&self, other: &Rhs) -> ControlFlow<bool> {
default_chaining_impl(self, other, Ordering::is_gt)
}
core::cmp::PartialOrd::__chaining_le fn __chaining_le(&self, other: &Rhs) -> ControlFlow<bool> {
default_chaining_impl(self, other, Ordering::is_le)
}
core::cmp::PartialOrd::__chaining_lt fn __chaining_lt(&self, other: &Rhs) -> ControlFlow<bool> {
default_chaining_impl(self, other, Ordering::is_lt)
}
core::cmp::PartialOrd::ge fn ge(&self, other: &Rhs) -> bool {
self.partial_cmp(other).is_some_and(Ordering::is_ge)
}
core::cmp::PartialOrd::gt fn gt(&self, other: &Rhs) -> bool {
self.partial_cmp(other).is_some_and(Ordering::is_gt)
}
core::cmp::PartialOrd::le fn le(&self, other: &Rhs) -> bool {
self.partial_cmp(other).is_some_and(Ordering::is_le)
}
core::cmp::PartialOrd::lt fn lt(&self, other: &Rhs) -> bool {
self.partial_cmp(other).is_some_and(Ordering::is_lt)
}
core::cmp::default_chaining_implconst fn default_chaining_impl<T, U>(
lhs: &T,
rhs: &U,
p: impl [const] FnOnce(Ordering) -> bool + [const] Destruct,
) -> ControlFlow<bool>
where
T: [const] PartialOrd<U> + PointeeSized,
U: PointeeSized,
{
// It's important that this only call `partial_cmp` once, not call `eq` then
// one of the relational operators. We don't want to `bcmp`-then-`memcp` a
// `String`, for example, or similarly for other data structures (#108157).
match <T as PartialOrd<U>>::partial_cmp(lhs, rhs) {
Some(Equal) => ControlFlow::Continue(()),
Some(c) => ControlFlow::Break(p(c)),
None => ControlFlow::Break(false),
}
}
core::cmp::impls::<impl core::cmp::Ord for ()>::cmp fn cmp(&self, _other: &()) -> Ordering {
Equal
}
core::cmp::impls::<impl core::cmp::Ord for bool>::clamp fn clamp(self, min: bool, max: bool) -> bool {
assert!(min <= max);
self.max(min).min(max)
}
core::cmp::impls::<impl core::cmp::Ord for bool>::max fn max(self, other: bool) -> bool {
self | other
}
core::cmp::impls::<impl core::cmp::Ord for bool>::min fn min(self, other: bool) -> bool {
self & other
}
core::cmp::impls::<impl core::cmp::Ord for char>::cmp fn cmp(&self, other: &Self) -> Ordering {
crate::intrinsics::three_way_compare(*self, *other)
}
core::cmp::impls::<impl core::cmp::Ord for i128>::cmp fn cmp(&self, other: &Self) -> Ordering {
crate::intrinsics::three_way_compare(*self, *other)
}
core::cmp::impls::<impl core::cmp::Ord for i16>::cmp fn cmp(&self, other: &Self) -> Ordering {
crate::intrinsics::three_way_compare(*self, *other)
}
core::cmp::impls::<impl core::cmp::Ord for i32>::cmp fn cmp(&self, other: &Self) -> Ordering {
crate::intrinsics::three_way_compare(*self, *other)
}
core::cmp::impls::<impl core::cmp::Ord for i64>::cmp fn cmp(&self, other: &Self) -> Ordering {
crate::intrinsics::three_way_compare(*self, *other)
}
core::cmp::impls::<impl core::cmp::Ord for i8>::cmp fn cmp(&self, other: &Self) -> Ordering {
crate::intrinsics::three_way_compare(*self, *other)
}
core::cmp::impls::<impl core::cmp::Ord for isize>::cmp fn cmp(&self, other: &Self) -> Ordering {
crate::intrinsics::three_way_compare(*self, *other)
}
core::cmp::impls::<impl core::cmp::Ord for u128>::cmp fn cmp(&self, other: &Self) -> Ordering {
crate::intrinsics::three_way_compare(*self, *other)
}
core::cmp::impls::<impl core::cmp::Ord for u16>::cmp fn cmp(&self, other: &Self) -> Ordering {
crate::intrinsics::three_way_compare(*self, *other)
}
core::cmp::impls::<impl core::cmp::Ord for u32>::cmp fn cmp(&self, other: &Self) -> Ordering {
crate::intrinsics::three_way_compare(*self, *other)
}
core::cmp::impls::<impl core::cmp::Ord for u64>::cmp fn cmp(&self, other: &Self) -> Ordering {
crate::intrinsics::three_way_compare(*self, *other)
}
core::cmp::impls::<impl core::cmp::Ord for u8>::cmp fn cmp(&self, other: &Self) -> Ordering {
crate::intrinsics::three_way_compare(*self, *other)
}
core::cmp::impls::<impl core::cmp::Ord for usize>::cmp fn cmp(&self, other: &Self) -> Ordering {
crate::intrinsics::three_way_compare(*self, *other)
}
core::cmp::impls::<impl core::cmp::PartialEq for ()>::eq fn eq(&self, _other: &()) -> bool {
true
}
core::cmp::impls::<impl core::cmp::PartialEq for ()>::ne fn ne(&self, _other: &()) -> bool {
false
}
core::cmp::impls::<impl core::cmp::PartialEq for bool>::eq fn eq(&self, other: &Self) -> bool { *self == *other }
core::cmp::impls::<impl core::cmp::PartialEq for bool>::ne fn ne(&self, other: &Self) -> bool { *self != *other }
core::cmp::impls::<impl core::cmp::PartialEq for char>::eq fn eq(&self, other: &Self) -> bool { *self == *other }
core::cmp::impls::<impl core::cmp::PartialEq for char>::ne fn ne(&self, other: &Self) -> bool { *self != *other }
core::cmp::impls::<impl core::cmp::PartialEq for f128>::eq fn eq(&self, other: &Self) -> bool { *self == *other }
core::cmp::impls::<impl core::cmp::PartialEq for f128>::ne fn ne(&self, other: &Self) -> bool { *self != *other }
core::cmp::impls::<impl core::cmp::PartialEq for f16>::eq fn eq(&self, other: &Self) -> bool { *self == *other }
core::cmp::impls::<impl core::cmp::PartialEq for f16>::ne fn ne(&self, other: &Self) -> bool { *self != *other }
core::cmp::impls::<impl core::cmp::PartialEq for f32>::eq fn eq(&self, other: &Self) -> bool { *self == *other }
core::cmp::impls::<impl core::cmp::PartialEq for f32>::ne fn ne(&self, other: &Self) -> bool { *self != *other }
core::cmp::impls::<impl core::cmp::PartialEq for f64>::eq fn eq(&self, other: &Self) -> bool { *self == *other }
core::cmp::impls::<impl core::cmp::PartialEq for f64>::ne fn ne(&self, other: &Self) -> bool { *self != *other }
core::cmp::impls::<impl core::cmp::PartialEq for i128>::eq fn eq(&self, other: &Self) -> bool { *self == *other }
core::cmp::impls::<impl core::cmp::PartialEq for i128>::ne fn ne(&self, other: &Self) -> bool { *self != *other }
core::cmp::impls::<impl core::cmp::PartialEq for i16>::eq fn eq(&self, other: &Self) -> bool { *self == *other }
core::cmp::impls::<impl core::cmp::PartialEq for i16>::ne fn ne(&self, other: &Self) -> bool { *self != *other }
core::cmp::impls::<impl core::cmp::PartialEq for i32>::eq fn eq(&self, other: &Self) -> bool { *self == *other }
core::cmp::impls::<impl core::cmp::PartialEq for i32>::ne fn ne(&self, other: &Self) -> bool { *self != *other }
core::cmp::impls::<impl core::cmp::PartialEq for i64>::eq fn eq(&self, other: &Self) -> bool { *self == *other }
core::cmp::impls::<impl core::cmp::PartialEq for i64>::ne fn ne(&self, other: &Self) -> bool { *self != *other }
core::cmp::impls::<impl core::cmp::PartialEq for i8>::eq fn eq(&self, other: &Self) -> bool { *self == *other }
core::cmp::impls::<impl core::cmp::PartialEq for i8>::ne fn ne(&self, other: &Self) -> bool { *self != *other }
core::cmp::impls::<impl core::cmp::PartialEq for isize>::eq fn eq(&self, other: &Self) -> bool { *self == *other }
core::cmp::impls::<impl core::cmp::PartialEq for isize>::ne fn ne(&self, other: &Self) -> bool { *self != *other }
core::cmp::impls::<impl core::cmp::PartialEq for u128>::eq fn eq(&self, other: &Self) -> bool { *self == *other }
core::cmp::impls::<impl core::cmp::PartialEq for u128>::ne fn ne(&self, other: &Self) -> bool { *self != *other }
core::cmp::impls::<impl core::cmp::PartialEq for u16>::eq fn eq(&self, other: &Self) -> bool { *self == *other }
core::cmp::impls::<impl core::cmp::PartialEq for u16>::ne fn ne(&self, other: &Self) -> bool { *self != *other }
core::cmp::impls::<impl core::cmp::PartialEq for u32>::eq fn eq(&self, other: &Self) -> bool { *self == *other }
core::cmp::impls::<impl core::cmp::PartialEq for u32>::ne fn ne(&self, other: &Self) -> bool { *self != *other }
core::cmp::impls::<impl core::cmp::PartialEq for u64>::eq fn eq(&self, other: &Self) -> bool { *self == *other }
core::cmp::impls::<impl core::cmp::PartialEq for u64>::ne fn ne(&self, other: &Self) -> bool { *self != *other }
core::cmp::impls::<impl core::cmp::PartialEq for u8>::eq fn eq(&self, other: &Self) -> bool { *self == *other }
core::cmp::impls::<impl core::cmp::PartialEq for u8>::ne fn ne(&self, other: &Self) -> bool { *self != *other }
core::cmp::impls::<impl core::cmp::PartialEq for usize>::eq fn eq(&self, other: &Self) -> bool { *self == *other }
core::cmp::impls::<impl core::cmp::PartialEq for usize>::ne fn ne(&self, other: &Self) -> bool { *self != *other }
core::cmp::impls::<impl core::cmp::PartialEq<&B> for &A>::eq fn eq(&self, other: &&B) -> bool {
PartialEq::eq(*self, *other)
}
core::cmp::impls::<impl core::cmp::PartialEq<&B> for &A>::ne fn ne(&self, other: &&B) -> bool {
PartialEq::ne(*self, *other)
}
core::cmp::impls::<impl core::cmp::PartialEq<&B> for &mut A>::eq fn eq(&self, other: &&B) -> bool {
PartialEq::eq(*self, *other)
}
core::cmp::impls::<impl core::cmp::PartialEq<&B> for &mut A>::ne fn ne(&self, other: &&B) -> bool {
PartialEq::ne(*self, *other)
}
core::cmp::impls::<impl core::cmp::PartialEq<&mut B> for &A>::eq fn eq(&self, other: &&mut B) -> bool {
PartialEq::eq(*self, *other)
}
core::cmp::impls::<impl core::cmp::PartialEq<&mut B> for &A>::ne fn ne(&self, other: &&mut B) -> bool {
PartialEq::ne(*self, *other)
}
core::cmp::impls::<impl core::cmp::PartialEq<&mut B> for &mut A>::eq fn eq(&self, other: &&mut B) -> bool {
PartialEq::eq(*self, *other)
}
core::cmp::impls::<impl core::cmp::PartialEq<&mut B> for &mut A>::ne fn ne(&self, other: &&mut B) -> bool {
PartialEq::ne(*self, *other)
}
core::cmp::impls::<impl core::cmp::PartialOrd for ()>::partial_cmp fn partial_cmp(&self, _: &()) -> Option<Ordering> {
Some(Equal)
}
core::cmp::impls::<impl core::cmp::PartialOrd for bool>::__chaining_ge fn __chaining_ge(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs >= rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for bool>::__chaining_gt fn __chaining_gt(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs > rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for bool>::__chaining_le fn __chaining_le(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs <= rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for bool>::__chaining_lt fn __chaining_lt(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs < rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for bool>::ge fn ge(&self, other: &Self) -> bool { *self >= *other }
core::cmp::impls::<impl core::cmp::PartialOrd for bool>::gt fn gt(&self, other: &Self) -> bool { *self > *other }
core::cmp::impls::<impl core::cmp::PartialOrd for bool>::le fn le(&self, other: &Self) -> bool { *self <= *other }
core::cmp::impls::<impl core::cmp::PartialOrd for bool>::lt fn lt(&self, other: &Self) -> bool { *self < *other }
core::cmp::impls::<impl core::cmp::PartialOrd for bool>::partial_cmp fn partial_cmp(&self, other: &bool) -> Option<Ordering> {
Some(self.cmp(other))
}
core::cmp::impls::<impl core::cmp::PartialOrd for char>::__chaining_ge fn __chaining_ge(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs >= rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for char>::__chaining_gt fn __chaining_gt(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs > rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for char>::__chaining_le fn __chaining_le(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs <= rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for char>::__chaining_lt fn __chaining_lt(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs < rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for char>::ge fn ge(&self, other: &Self) -> bool { *self >= *other }
core::cmp::impls::<impl core::cmp::PartialOrd for char>::gt fn gt(&self, other: &Self) -> bool { *self > *other }
core::cmp::impls::<impl core::cmp::PartialOrd for char>::le fn le(&self, other: &Self) -> bool { *self <= *other }
core::cmp::impls::<impl core::cmp::PartialOrd for char>::lt fn lt(&self, other: &Self) -> bool { *self < *other }
core::cmp::impls::<impl core::cmp::PartialOrd for char>::partial_cmp fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(crate::intrinsics::three_way_compare(*self, *other))
}
core::cmp::impls::<impl core::cmp::PartialOrd for f128>::__chaining_ge fn __chaining_ge(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs >= rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for f128>::__chaining_gt fn __chaining_gt(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs > rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for f128>::__chaining_le fn __chaining_le(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs <= rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for f128>::__chaining_lt fn __chaining_lt(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs < rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for f128>::ge fn ge(&self, other: &Self) -> bool { *self >= *other }
core::cmp::impls::<impl core::cmp::PartialOrd for f128>::gt fn gt(&self, other: &Self) -> bool { *self > *other }
core::cmp::impls::<impl core::cmp::PartialOrd for f128>::le fn le(&self, other: &Self) -> bool { *self <= *other }
core::cmp::impls::<impl core::cmp::PartialOrd for f128>::lt fn lt(&self, other: &Self) -> bool { *self < *other }
core::cmp::impls::<impl core::cmp::PartialOrd for f128>::partial_cmp fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
match (*self <= *other, *self >= *other) {
(false, false) => None,
(false, true) => Some(Greater),
(true, false) => Some(Less),
(true, true) => Some(Equal),
}
}
core::cmp::impls::<impl core::cmp::PartialOrd for f16>::__chaining_ge fn __chaining_ge(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs >= rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for f16>::__chaining_gt fn __chaining_gt(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs > rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for f16>::__chaining_le fn __chaining_le(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs <= rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for f16>::__chaining_lt fn __chaining_lt(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs < rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for f16>::ge fn ge(&self, other: &Self) -> bool { *self >= *other }
core::cmp::impls::<impl core::cmp::PartialOrd for f16>::gt fn gt(&self, other: &Self) -> bool { *self > *other }
core::cmp::impls::<impl core::cmp::PartialOrd for f16>::le fn le(&self, other: &Self) -> bool { *self <= *other }
core::cmp::impls::<impl core::cmp::PartialOrd for f16>::lt fn lt(&self, other: &Self) -> bool { *self < *other }
core::cmp::impls::<impl core::cmp::PartialOrd for f16>::partial_cmp fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
match (*self <= *other, *self >= *other) {
(false, false) => None,
(false, true) => Some(Greater),
(true, false) => Some(Less),
(true, true) => Some(Equal),
}
}
core::cmp::impls::<impl core::cmp::PartialOrd for f32>::__chaining_ge fn __chaining_ge(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs >= rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for f32>::__chaining_gt fn __chaining_gt(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs > rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for f32>::__chaining_le fn __chaining_le(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs <= rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for f32>::__chaining_lt fn __chaining_lt(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs < rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for f32>::ge fn ge(&self, other: &Self) -> bool { *self >= *other }
core::cmp::impls::<impl core::cmp::PartialOrd for f32>::gt fn gt(&self, other: &Self) -> bool { *self > *other }
core::cmp::impls::<impl core::cmp::PartialOrd for f32>::le fn le(&self, other: &Self) -> bool { *self <= *other }
core::cmp::impls::<impl core::cmp::PartialOrd for f32>::lt fn lt(&self, other: &Self) -> bool { *self < *other }
core::cmp::impls::<impl core::cmp::PartialOrd for f32>::partial_cmp fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
match (*self <= *other, *self >= *other) {
(false, false) => None,
(false, true) => Some(Greater),
(true, false) => Some(Less),
(true, true) => Some(Equal),
}
}
core::cmp::impls::<impl core::cmp::PartialOrd for f64>::__chaining_ge fn __chaining_ge(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs >= rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for f64>::__chaining_gt fn __chaining_gt(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs > rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for f64>::__chaining_le fn __chaining_le(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs <= rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for f64>::__chaining_lt fn __chaining_lt(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs < rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for f64>::ge fn ge(&self, other: &Self) -> bool { *self >= *other }
core::cmp::impls::<impl core::cmp::PartialOrd for f64>::gt fn gt(&self, other: &Self) -> bool { *self > *other }
core::cmp::impls::<impl core::cmp::PartialOrd for f64>::le fn le(&self, other: &Self) -> bool { *self <= *other }
core::cmp::impls::<impl core::cmp::PartialOrd for f64>::lt fn lt(&self, other: &Self) -> bool { *self < *other }
core::cmp::impls::<impl core::cmp::PartialOrd for f64>::partial_cmp fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
match (*self <= *other, *self >= *other) {
(false, false) => None,
(false, true) => Some(Greater),
(true, false) => Some(Less),
(true, true) => Some(Equal),
}
}
core::cmp::impls::<impl core::cmp::PartialOrd for i128>::__chaining_ge fn __chaining_ge(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs >= rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for i128>::__chaining_gt fn __chaining_gt(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs > rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for i128>::__chaining_le fn __chaining_le(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs <= rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for i128>::__chaining_lt fn __chaining_lt(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs < rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for i128>::ge fn ge(&self, other: &Self) -> bool { *self >= *other }
core::cmp::impls::<impl core::cmp::PartialOrd for i128>::gt fn gt(&self, other: &Self) -> bool { *self > *other }
core::cmp::impls::<impl core::cmp::PartialOrd for i128>::le fn le(&self, other: &Self) -> bool { *self <= *other }
core::cmp::impls::<impl core::cmp::PartialOrd for i128>::lt fn lt(&self, other: &Self) -> bool { *self < *other }
core::cmp::impls::<impl core::cmp::PartialOrd for i128>::partial_cmp fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(crate::intrinsics::three_way_compare(*self, *other))
}
core::cmp::impls::<impl core::cmp::PartialOrd for i16>::__chaining_ge fn __chaining_ge(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs >= rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for i16>::__chaining_gt fn __chaining_gt(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs > rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for i16>::__chaining_le fn __chaining_le(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs <= rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for i16>::__chaining_lt fn __chaining_lt(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs < rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for i16>::ge fn ge(&self, other: &Self) -> bool { *self >= *other }
core::cmp::impls::<impl core::cmp::PartialOrd for i16>::gt fn gt(&self, other: &Self) -> bool { *self > *other }
core::cmp::impls::<impl core::cmp::PartialOrd for i16>::le fn le(&self, other: &Self) -> bool { *self <= *other }
core::cmp::impls::<impl core::cmp::PartialOrd for i16>::lt fn lt(&self, other: &Self) -> bool { *self < *other }
core::cmp::impls::<impl core::cmp::PartialOrd for i16>::partial_cmp fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(crate::intrinsics::three_way_compare(*self, *other))
}
core::cmp::impls::<impl core::cmp::PartialOrd for i32>::__chaining_ge fn __chaining_ge(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs >= rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for i32>::__chaining_gt fn __chaining_gt(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs > rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for i32>::__chaining_le fn __chaining_le(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs <= rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for i32>::__chaining_lt fn __chaining_lt(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs < rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for i32>::ge fn ge(&self, other: &Self) -> bool { *self >= *other }
core::cmp::impls::<impl core::cmp::PartialOrd for i32>::gt fn gt(&self, other: &Self) -> bool { *self > *other }
core::cmp::impls::<impl core::cmp::PartialOrd for i32>::le fn le(&self, other: &Self) -> bool { *self <= *other }
core::cmp::impls::<impl core::cmp::PartialOrd for i32>::lt fn lt(&self, other: &Self) -> bool { *self < *other }
core::cmp::impls::<impl core::cmp::PartialOrd for i32>::partial_cmp fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(crate::intrinsics::three_way_compare(*self, *other))
}
core::cmp::impls::<impl core::cmp::PartialOrd for i64>::__chaining_ge fn __chaining_ge(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs >= rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for i64>::__chaining_gt fn __chaining_gt(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs > rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for i64>::__chaining_le fn __chaining_le(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs <= rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for i64>::__chaining_lt fn __chaining_lt(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs < rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for i64>::ge fn ge(&self, other: &Self) -> bool { *self >= *other }
core::cmp::impls::<impl core::cmp::PartialOrd for i64>::gt fn gt(&self, other: &Self) -> bool { *self > *other }
core::cmp::impls::<impl core::cmp::PartialOrd for i64>::le fn le(&self, other: &Self) -> bool { *self <= *other }
core::cmp::impls::<impl core::cmp::PartialOrd for i64>::lt fn lt(&self, other: &Self) -> bool { *self < *other }
core::cmp::impls::<impl core::cmp::PartialOrd for i64>::partial_cmp fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(crate::intrinsics::three_way_compare(*self, *other))
}
core::cmp::impls::<impl core::cmp::PartialOrd for i8>::__chaining_ge fn __chaining_ge(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs >= rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for i8>::__chaining_gt fn __chaining_gt(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs > rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for i8>::__chaining_le fn __chaining_le(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs <= rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for i8>::__chaining_lt fn __chaining_lt(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs < rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for i8>::ge fn ge(&self, other: &Self) -> bool { *self >= *other }
core::cmp::impls::<impl core::cmp::PartialOrd for i8>::gt fn gt(&self, other: &Self) -> bool { *self > *other }
core::cmp::impls::<impl core::cmp::PartialOrd for i8>::le fn le(&self, other: &Self) -> bool { *self <= *other }
core::cmp::impls::<impl core::cmp::PartialOrd for i8>::lt fn lt(&self, other: &Self) -> bool { *self < *other }
core::cmp::impls::<impl core::cmp::PartialOrd for i8>::partial_cmp fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(crate::intrinsics::three_way_compare(*self, *other))
}
core::cmp::impls::<impl core::cmp::PartialOrd for isize>::__chaining_ge fn __chaining_ge(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs >= rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for isize>::__chaining_gt fn __chaining_gt(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs > rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for isize>::__chaining_le fn __chaining_le(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs <= rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for isize>::__chaining_lt fn __chaining_lt(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs < rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for isize>::ge fn ge(&self, other: &Self) -> bool { *self >= *other }
core::cmp::impls::<impl core::cmp::PartialOrd for isize>::gt fn gt(&self, other: &Self) -> bool { *self > *other }
core::cmp::impls::<impl core::cmp::PartialOrd for isize>::le fn le(&self, other: &Self) -> bool { *self <= *other }
core::cmp::impls::<impl core::cmp::PartialOrd for isize>::lt fn lt(&self, other: &Self) -> bool { *self < *other }
core::cmp::impls::<impl core::cmp::PartialOrd for isize>::partial_cmp fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(crate::intrinsics::three_way_compare(*self, *other))
}
core::cmp::impls::<impl core::cmp::PartialOrd for u128>::__chaining_ge fn __chaining_ge(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs >= rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for u128>::__chaining_gt fn __chaining_gt(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs > rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for u128>::__chaining_le fn __chaining_le(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs <= rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for u128>::__chaining_lt fn __chaining_lt(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs < rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for u128>::ge fn ge(&self, other: &Self) -> bool { *self >= *other }
core::cmp::impls::<impl core::cmp::PartialOrd for u128>::gt fn gt(&self, other: &Self) -> bool { *self > *other }
core::cmp::impls::<impl core::cmp::PartialOrd for u128>::le fn le(&self, other: &Self) -> bool { *self <= *other }
core::cmp::impls::<impl core::cmp::PartialOrd for u128>::lt fn lt(&self, other: &Self) -> bool { *self < *other }
core::cmp::impls::<impl core::cmp::PartialOrd for u128>::partial_cmp fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(crate::intrinsics::three_way_compare(*self, *other))
}
core::cmp::impls::<impl core::cmp::PartialOrd for u16>::__chaining_ge fn __chaining_ge(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs >= rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for u16>::__chaining_gt fn __chaining_gt(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs > rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for u16>::__chaining_le fn __chaining_le(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs <= rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for u16>::__chaining_lt fn __chaining_lt(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs < rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for u16>::ge fn ge(&self, other: &Self) -> bool { *self >= *other }
core::cmp::impls::<impl core::cmp::PartialOrd for u16>::gt fn gt(&self, other: &Self) -> bool { *self > *other }
core::cmp::impls::<impl core::cmp::PartialOrd for u16>::le fn le(&self, other: &Self) -> bool { *self <= *other }
core::cmp::impls::<impl core::cmp::PartialOrd for u16>::lt fn lt(&self, other: &Self) -> bool { *self < *other }
core::cmp::impls::<impl core::cmp::PartialOrd for u16>::partial_cmp fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(crate::intrinsics::three_way_compare(*self, *other))
}
core::cmp::impls::<impl core::cmp::PartialOrd for u32>::__chaining_ge fn __chaining_ge(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs >= rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for u32>::__chaining_gt fn __chaining_gt(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs > rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for u32>::__chaining_le fn __chaining_le(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs <= rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for u32>::__chaining_lt fn __chaining_lt(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs < rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for u32>::ge fn ge(&self, other: &Self) -> bool { *self >= *other }
core::cmp::impls::<impl core::cmp::PartialOrd for u32>::gt fn gt(&self, other: &Self) -> bool { *self > *other }
core::cmp::impls::<impl core::cmp::PartialOrd for u32>::le fn le(&self, other: &Self) -> bool { *self <= *other }
core::cmp::impls::<impl core::cmp::PartialOrd for u32>::lt fn lt(&self, other: &Self) -> bool { *self < *other }
core::cmp::impls::<impl core::cmp::PartialOrd for u32>::partial_cmp fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(crate::intrinsics::three_way_compare(*self, *other))
}
core::cmp::impls::<impl core::cmp::PartialOrd for u64>::__chaining_ge fn __chaining_ge(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs >= rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for u64>::__chaining_gt fn __chaining_gt(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs > rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for u64>::__chaining_le fn __chaining_le(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs <= rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for u64>::__chaining_lt fn __chaining_lt(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs < rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for u64>::ge fn ge(&self, other: &Self) -> bool { *self >= *other }
core::cmp::impls::<impl core::cmp::PartialOrd for u64>::gt fn gt(&self, other: &Self) -> bool { *self > *other }
core::cmp::impls::<impl core::cmp::PartialOrd for u64>::le fn le(&self, other: &Self) -> bool { *self <= *other }
core::cmp::impls::<impl core::cmp::PartialOrd for u64>::lt fn lt(&self, other: &Self) -> bool { *self < *other }
core::cmp::impls::<impl core::cmp::PartialOrd for u64>::partial_cmp fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(crate::intrinsics::three_way_compare(*self, *other))
}
core::cmp::impls::<impl core::cmp::PartialOrd for u8>::__chaining_ge fn __chaining_ge(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs >= rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for u8>::__chaining_gt fn __chaining_gt(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs > rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for u8>::__chaining_le fn __chaining_le(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs <= rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for u8>::__chaining_lt fn __chaining_lt(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs < rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for u8>::ge fn ge(&self, other: &Self) -> bool { *self >= *other }
core::cmp::impls::<impl core::cmp::PartialOrd for u8>::gt fn gt(&self, other: &Self) -> bool { *self > *other }
core::cmp::impls::<impl core::cmp::PartialOrd for u8>::le fn le(&self, other: &Self) -> bool { *self <= *other }
core::cmp::impls::<impl core::cmp::PartialOrd for u8>::lt fn lt(&self, other: &Self) -> bool { *self < *other }
core::cmp::impls::<impl core::cmp::PartialOrd for u8>::partial_cmp fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(crate::intrinsics::three_way_compare(*self, *other))
}
core::cmp::impls::<impl core::cmp::PartialOrd for usize>::__chaining_ge fn __chaining_ge(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs >= rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for usize>::__chaining_gt fn __chaining_gt(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs > rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for usize>::__chaining_le fn __chaining_le(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs <= rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for usize>::__chaining_lt fn __chaining_lt(&self, other: &Self) -> ControlFlow<bool> {
let (lhs, rhs) = (*self, *other);
if lhs == rhs { Continue(()) } else { Break(lhs < rhs) }
}
core::cmp::impls::<impl core::cmp::PartialOrd for usize>::ge fn ge(&self, other: &Self) -> bool { *self >= *other }
core::cmp::impls::<impl core::cmp::PartialOrd for usize>::gt fn gt(&self, other: &Self) -> bool { *self > *other }
core::cmp::impls::<impl core::cmp::PartialOrd for usize>::le fn le(&self, other: &Self) -> bool { *self <= *other }
core::cmp::impls::<impl core::cmp::PartialOrd for usize>::lt fn lt(&self, other: &Self) -> bool { *self < *other }
core::cmp::impls::<impl core::cmp::PartialOrd for usize>::partial_cmp fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
Some(crate::intrinsics::three_way_compare(*self, *other))
}
core::cmp::maxpub const fn max<T: [const] Ord + [const] Destruct>(v1: T, v2: T) -> T {
v1.max(v2)
}
core::cmp::minpub const fn min<T: [const] Ord + [const] Destruct>(v1: T, v2: T) -> T {
v1.min(v2)
}
core::convert::identitypub const fn identity<T>(x: T) -> T {
x
}
core::convert::num::<impl core::convert::TryFrom<u128> for u16>::try_from fn try_from(u: $source) -> Result<Self, Self::Error> {
if u > (Self::MAX as $source) {
Err(TryFromIntError(()))
} else {
Ok(u as Self)
}
}
core::convert::num::<impl core::convert::TryFrom<u128> for u32>::try_from fn try_from(u: $source) -> Result<Self, Self::Error> {
if u > (Self::MAX as $source) {
Err(TryFromIntError(()))
} else {
Ok(u as Self)
}
}
core::convert::num::<impl core::convert::TryFrom<u128> for u64>::try_from fn try_from(u: $source) -> Result<Self, Self::Error> {
if u > (Self::MAX as $source) {
Err(TryFromIntError(()))
} else {
Ok(u as Self)
}
}
core::convert::num::<impl core::convert::TryFrom<u128> for u8>::try_from fn try_from(u: $source) -> Result<Self, Self::Error> {
if u > (Self::MAX as $source) {
Err(TryFromIntError(()))
} else {
Ok(u as Self)
}
}
core::convert::num::<impl core::convert::TryFrom<u16> for u8>::try_from fn try_from(u: $source) -> Result<Self, Self::Error> {
if u > (Self::MAX as $source) {
Err(TryFromIntError(()))
} else {
Ok(u as Self)
}
}
core::convert::num::<impl core::convert::TryFrom<u32> for u16>::try_from fn try_from(u: $source) -> Result<Self, Self::Error> {
if u > (Self::MAX as $source) {
Err(TryFromIntError(()))
} else {
Ok(u as Self)
}
}
core::convert::num::<impl core::convert::TryFrom<u32> for u8>::try_from fn try_from(u: $source) -> Result<Self, Self::Error> {
if u > (Self::MAX as $source) {
Err(TryFromIntError(()))
} else {
Ok(u as Self)
}
}
core::convert::num::<impl core::convert::TryFrom<u64> for u16>::try_from fn try_from(u: $source) -> Result<Self, Self::Error> {
if u > (Self::MAX as $source) {
Err(TryFromIntError(()))
} else {
Ok(u as Self)
}
}
core::convert::num::<impl core::convert::TryFrom<u64> for u32>::try_from fn try_from(u: $source) -> Result<Self, Self::Error> {
if u > (Self::MAX as $source) {
Err(TryFromIntError(()))
} else {
Ok(u as Self)
}
}
core::convert::num::<impl core::convert::TryFrom<u64> for u8>::try_from fn try_from(u: $source) -> Result<Self, Self::Error> {
if u > (Self::MAX as $source) {
Err(TryFromIntError(()))
} else {
Ok(u as Self)
}
}
core::convert::num::ptr_try_from_impls::<impl core::convert::TryFrom<u128> for usize>::try_from fn try_from(u: $source) -> Result<Self, Self::Error> {
if u > (Self::MAX as $source) {
Err(TryFromIntError(()))
} else {
Ok(u as Self)
}
}
core::convert::num::ptr_try_from_impls::<impl core::convert::TryFrom<u32> for usize>::try_from fn try_from(value: $source) -> Result<Self, Self::Error> {
Ok(value as Self)
}
core::convert::num::ptr_try_from_impls::<impl core::convert::TryFrom<u64> for usize>::try_from fn try_from(value: $source) -> Result<Self, Self::Error> {
Ok(value as Self)
}
core::convert::num::ptr_try_from_impls::<impl core::convert::TryFrom<usize> for u128>::try_from fn try_from(value: $source) -> Result<Self, Self::Error> {
Ok(value as Self)
}
core::convert::num::ptr_try_from_impls::<impl core::convert::TryFrom<usize> for u16>::try_from fn try_from(u: $source) -> Result<Self, Self::Error> {
if u > (Self::MAX as $source) {
Err(TryFromIntError(()))
} else {
Ok(u as Self)
}
}
core::convert::num::ptr_try_from_impls::<impl core::convert::TryFrom<usize> for u32>::try_from fn try_from(u: $source) -> Result<Self, Self::Error> {
if u > (Self::MAX as $source) {
Err(TryFromIntError(()))
} else {
Ok(u as Self)
}
}
core::convert::num::ptr_try_from_impls::<impl core::convert::TryFrom<usize> for u64>::try_from fn try_from(value: $source) -> Result<Self, Self::Error> {
Ok(value as Self)
}
core::convert::num::ptr_try_from_impls::<impl core::convert::TryFrom<usize> for u8>::try_from fn try_from(u: $source) -> Result<Self, Self::Error> {
if u > (Self::MAX as $source) {
Err(TryFromIntError(()))
} else {
Ok(u as Self)
}
}
core::f32::<impl f32>::from_bits pub const fn from_bits(v: u32) -> Self {
// It turns out the safety issues with sNaN were overblown! Hooray!
// SAFETY: `u32` is a plain old datatype so we can always transmute from it.
unsafe { mem::transmute(v) }
}
core::f32::<impl f32>::from_le_bytes pub const fn from_le_bytes(bytes: [u8; 4]) -> Self {
Self::from_bits(u32::from_le_bytes(bytes))
}
core::f32::<impl f32>::to_bits pub const fn to_bits(self) -> u32 {
// SAFETY: `u32` is a plain old datatype so we can always transmute to it.
unsafe { mem::transmute(self) }
}
core::f32::<impl f32>::to_le_bytes pub const fn to_le_bytes(self) -> [u8; 4] {
self.to_bits().to_le_bytes()
}
core::f64::<impl f64>::from_bits pub const fn from_bits(v: u64) -> Self {
// It turns out the safety issues with sNaN were overblown! Hooray!
// SAFETY: `u64` is a plain old datatype so we can always transmute from it.
unsafe { mem::transmute(v) }
}
core::f64::<impl f64>::from_le_bytes pub const fn from_le_bytes(bytes: [u8; 8]) -> Self {
Self::from_bits(u64::from_le_bytes(bytes))
}
core::f64::<impl f64>::to_bits pub const fn to_bits(self) -> u64 {
// SAFETY: `u64` is a plain old datatype so we can always transmute to it.
unsafe { mem::transmute(self) }
}
core::f64::<impl f64>::to_le_bytes pub const fn to_le_bytes(self) -> [u8; 8] {
self.to_bits().to_le_bytes()
}
core::intrinsics::ptr_guaranteed_cmppub const fn ptr_guaranteed_cmp<T>(ptr: *const T, other: *const T) -> u8 {
(ptr == other) as u8
}
core::intrinsics::unlikelypub const fn unlikely(b: bool) -> bool {
if b {
cold_path();
true
} else {
false
}
}
core::iter::adapters::cloned::Cloned::<I>::new pub(in crate::iter) fn new(it: I) -> Cloned<I> {
Cloned { it }
}
core::iter::adapters::map::Map::<I, F>::new pub(in crate::iter) fn new(iter: I, f: F) -> Map<I, F> {
Map { iter, f }
}
core::iter::adapters::zip::zippub fn zip<A, B>(a: A, b: B) -> Zip<A::IntoIter, B::IntoIter>
where
A: IntoIterator,
B: IntoIterator,
{
ZipImpl::new(a.into_iter(), b.into_iter())
}
core::iter::traits::collect::Extend::extend_one fn extend_one(&mut self, item: A) {
self.extend(Some(item));
}
core::iter::traits::collect::Extend::extend_one_unchecked unsafe fn extend_one_unchecked(&mut self, item: A)
where
Self: Sized,
{
self.extend_one(item);
}
core::iter::traits::collect::Extend::extend_reserve fn extend_reserve(&mut self, additional: usize) {
let _ = additional;
}
core::iter::traits::exact_size::ExactSizeIterator::is_empty fn is_empty(&self) -> bool {
self.len() == 0
}
core::iter::traits::iterator::Iterator::cloned fn cloned<'a, T>(self) -> Cloned<Self>
where
T: Clone + 'a,
Self: Sized + Iterator<Item = &'a T>,
{
Cloned::new(self)
}
core::iter::traits::iterator::Iterator::map fn map<B, F>(self, f: F) -> Map<Self, F>
where
Self: Sized,
F: FnMut(Self::Item) -> B,
{
Map::new(self, f)
}
core::iter::traits::iterator::Iterator::size_hint fn size_hint(&self) -> (usize, Option<usize>) {
(0, None)
}
core::mem::align_ofpub const fn align_of<T>() -> usize {
<T as SizedTypeProperties>::ALIGN
}
core::mem::align_of_valpub const fn align_of_val<T: ?Sized>(val: &T) -> usize {
// SAFETY: val is a reference, so it's a valid raw pointer
unsafe { intrinsics::align_of_val(val) }
}
core::mem::droppub const fn drop<T>(_x: T)
where
T: [const] Destruct,
{
}
core::mem::forgetpub const fn forget<T>(t: T) {
let _ = ManuallyDrop::new(t);
}
core::mem::manually_drop::ManuallyDrop::<T>::drop pub const unsafe fn drop(slot: &mut ManuallyDrop<T>)
where
T: [const] Destruct,
{
// SAFETY: we are dropping the value pointed to by a mutable reference
// which is guaranteed to be valid for writes.
// It is up to the caller to make sure that `slot` isn't dropped again.
unsafe { ptr::drop_in_place(&mut slot.value) }
}
core::mem::manually_drop::ManuallyDrop::<T>::into_inner pub const fn into_inner(slot: ManuallyDrop<T>) -> T {
slot.value
}
core::mem::manually_drop::ManuallyDrop::<T>::new pub const fn new(value: T) -> ManuallyDrop<T> {
ManuallyDrop { value }
}
core::mem::manually_drop::ManuallyDrop::<T>::take pub unsafe fn take(slot: &mut ManuallyDrop<T>) -> T {
// SAFETY: we are reading from a reference, which is guaranteed
// to be valid for reads.
unsafe { ptr::read(&slot.value) }
}
core::mem::maybe_uninit::MaybeUninit::<T>::array_assume_init pub const unsafe fn array_assume_init<const N: usize>(array: [Self; N]) -> [T; N] {
// SAFETY:
// * The caller guarantees that all elements of the array are initialized
// * `MaybeUninit<T>` and T are guaranteed to have the same layout
// * `MaybeUninit` does not drop, so there are no double-frees
// And thus the conversion is safe
unsafe {
intrinsics::assert_inhabited::<[T; N]>();
intrinsics::transmute_unchecked(array)
}
}
core::mem::maybe_uninit::MaybeUninit::<T>::as_bytes pub const fn as_bytes(&self) -> &[MaybeUninit<u8>] {
// SAFETY: MaybeUninit<u8> is always valid, even for padding bytes
unsafe {
slice::from_raw_parts(self.as_ptr().cast::<MaybeUninit<u8>>(), super::size_of::<T>())
}
}
core::mem::maybe_uninit::MaybeUninit::<T>::as_bytes_mut pub const fn as_bytes_mut(&mut self) -> &mut [MaybeUninit<u8>] {
// SAFETY: MaybeUninit<u8> is always valid, even for padding bytes
unsafe {
slice::from_raw_parts_mut(
self.as_mut_ptr().cast::<MaybeUninit<u8>>(),
super::size_of::<T>(),
)
}
}
core::mem::maybe_uninit::MaybeUninit::<T>::as_mut_ptr pub const fn as_mut_ptr(&mut self) -> *mut T {
// `MaybeUninit` and `ManuallyDrop` are both `repr(transparent)` so we can cast the pointer.
self as *mut _ as *mut T
}
core::mem::maybe_uninit::MaybeUninit::<T>::as_ptr pub const fn as_ptr(&self) -> *const T {
// `MaybeUninit` and `ManuallyDrop` are both `repr(transparent)` so we can cast the pointer.
self as *const _ as *const T
}
core::mem::maybe_uninit::MaybeUninit::<T>::assume_init pub const unsafe fn assume_init(self) -> T {
// SAFETY: the caller must guarantee that `self` is initialized.
// This also means that `self` must be a `value` variant.
unsafe {
intrinsics::assert_inhabited::<T>();
// We do this via a raw ptr read instead of `ManuallyDrop::into_inner` so that there's
// no trace of `ManuallyDrop` in Miri's error messages here.
(&raw const self.value).cast::<T>().read()
}
}
core::mem::maybe_uninit::MaybeUninit::<T>::assume_init_drop pub const unsafe fn assume_init_drop(&mut self)
where
T: [const] Destruct,
{
// SAFETY: the caller must guarantee that `self` is initialized and
// satisfies all invariants of `T`.
// Dropping the value in place is safe if that is the case.
unsafe { ptr::drop_in_place(self.as_mut_ptr()) }
}
core::mem::maybe_uninit::MaybeUninit::<T>::assume_init_mut pub const unsafe fn assume_init_mut(&mut self) -> &mut T {
// SAFETY: the caller must guarantee that `self` is initialized.
// This also means that `self` must be a `value` variant.
unsafe {
intrinsics::assert_inhabited::<T>();
&mut *self.as_mut_ptr()
}
}
core::mem::maybe_uninit::MaybeUninit::<T>::assume_init_read pub const unsafe fn assume_init_read(&self) -> T {
// SAFETY: the caller must guarantee that `self` is initialized.
// Reading from `self.as_ptr()` is safe since `self` should be initialized.
unsafe {
intrinsics::assert_inhabited::<T>();
self.as_ptr().read()
}
}
core::mem::maybe_uninit::MaybeUninit::<T>::assume_init_ref pub const unsafe fn assume_init_ref(&self) -> &T {
// SAFETY: the caller must guarantee that `self` is initialized.
// This also means that `self` must be a `value` variant.
unsafe {
intrinsics::assert_inhabited::<T>();
&*self.as_ptr()
}
}
core::mem::maybe_uninit::MaybeUninit::<T>::new pub const fn new(val: T) -> MaybeUninit<T> {
MaybeUninit { value: ManuallyDrop::new(val) }
}
core::mem::maybe_uninit::MaybeUninit::<T>::slice_as_mut_ptr pub const fn slice_as_mut_ptr(this: &mut [MaybeUninit<T>]) -> *mut T {
this.as_mut_ptr() as *mut T
}
core::mem::maybe_uninit::MaybeUninit::<T>::slice_as_ptr pub const fn slice_as_ptr(this: &[MaybeUninit<T>]) -> *const T {
this.as_ptr() as *const T
}
core::mem::maybe_uninit::MaybeUninit::<T>::uninit pub const fn uninit() -> MaybeUninit<T> {
MaybeUninit { uninit: () }
}
core::mem::maybe_uninit::MaybeUninit::<T>::write pub const fn write(&mut self, val: T) -> &mut T {
*self = MaybeUninit::new(val);
// SAFETY: We just initialized this value.
unsafe { self.assume_init_mut() }
}
core::mem::maybe_uninit::MaybeUninit::<T>::zeroed pub const fn zeroed() -> MaybeUninit<T> {
let mut u = MaybeUninit::<T>::uninit();
// SAFETY: `u.as_mut_ptr()` points to allocated memory.
unsafe { u.as_mut_ptr().write_bytes(0u8, 1) };
u
}
core::mem::maybe_uninit::MaybeUninit::<[T; N]>::transpose pub const fn transpose(self) -> [MaybeUninit<T>; N] {
// SAFETY: T and MaybeUninit<T> have the same layout
unsafe { intrinsics::transmute_unchecked(self) }
}
core::mem::needs_droppub const fn needs_drop<T: ?Sized>() -> bool {
const { intrinsics::needs_drop::<T>() }
}
core::mem::replacepub const fn replace<T>(dest: &mut T, src: T) -> T {
// It may be tempting to use `swap` to avoid `unsafe` here. Don't!
// The compiler optimizes the implementation below to two `memcpy`s
// while `swap` would require at least three. See PR#83022 for details.
// SAFETY: We read from `dest` but directly write `src` into it afterwards,
// such that the old value is not duplicated. Nothing is dropped and
// nothing here can panic.
unsafe {
// Ideally we wouldn't use the intrinsics here, but going through the
// `ptr` methods introduces two unnecessary UbChecks, so until we can
// remove those for pointers that come from references, this uses the
// intrinsics instead so this stays very cheap in MIR (and debug).
let result = crate::intrinsics::read_via_copy(dest);
crate::intrinsics::write_via_move(dest, src);
result
}
}
core::mem::size_ofpub const fn size_of<T>() -> usize {
<T as SizedTypeProperties>::SIZE
}
core::mem::size_of_valpub const fn size_of_val<T: ?Sized>(val: &T) -> usize {
// SAFETY: `val` is a reference, so it's a valid raw pointer
unsafe { intrinsics::size_of_val(val) }
}
core::mem::transmute_copypub const unsafe fn transmute_copy<Src, Dst>(src: &Src) -> Dst {
assert!(
size_of::<Src>() >= size_of::<Dst>(),
"cannot transmute_copy if Dst is larger than Src"
);
// If Dst has a higher alignment requirement, src might not be suitably aligned.
if align_of::<Dst>() > align_of::<Src>() {
// SAFETY: `src` is a reference which is guaranteed to be valid for reads.
// The caller must guarantee that the actual transmutation is safe.
unsafe { ptr::read_unaligned(src as *const Src as *const Dst) }
} else {
// SAFETY: `src` is a reference which is guaranteed to be valid for reads.
// We just checked that `src as *const Dst` was properly aligned.
// The caller must guarantee that the actual transmutation is safe.
unsafe { ptr::read(src as *const Src as *const Dst) }
}
}
core::num::<impl i128>::from_le pub const fn from_le(x: Self) -> Self {
#[cfg(target_endian = "little")]
{
x
}
#[cfg(not(target_endian = "little"))]
{
x.swap_bytes()
}
}
core::num::<impl i128>::from_le_bytes pub const fn from_le_bytes(bytes: [u8; size_of::<Self>()]) -> Self {
Self::from_le(Self::from_ne_bytes(bytes))
}
core::num::<impl i128>::from_ne_bytes pub const fn from_ne_bytes(bytes: [u8; size_of::<Self>()]) -> Self {
// SAFETY: integers are plain old datatypes so we can always transmute to them
unsafe { mem::transmute(bytes) }
}
core::num::<impl i128>::is_negative pub const fn is_negative(self) -> bool { self < 0 }
core::num::<impl i128>::to_le pub const fn to_le(self) -> Self {
#[cfg(target_endian = "little")]
{
self
}
#[cfg(not(target_endian = "little"))]
{
self.swap_bytes()
}
}
core::num::<impl i128>::to_le_bytes pub const fn to_le_bytes(self) -> [u8; size_of::<Self>()] {
self.to_le().to_ne_bytes()
}
core::num::<impl i128>::to_ne_bytes pub const fn to_ne_bytes(self) -> [u8; size_of::<Self>()] {
// SAFETY: integers are plain old datatypes so we can always transmute them to
// arrays of bytes
unsafe { mem::transmute(self) }
}
core::num::<impl i128>::unsigned_abs pub const fn unsigned_abs(self) -> $UnsignedT {
self.wrapping_abs() as $UnsignedT
}
core::num::<impl i128>::wrapping_abs pub const fn wrapping_abs(self) -> Self {
if self.is_negative() {
self.wrapping_neg()
} else {
self
}
}
core::num::<impl i128>::wrapping_neg pub const fn wrapping_neg(self) -> Self {
(0 as $SelfT).wrapping_sub(self)
}
core::num::<impl i128>::wrapping_sub pub const fn wrapping_sub(self, rhs: Self) -> Self {
intrinsics::wrapping_sub(self, rhs)
}
core::num::<impl i16>::from_le pub const fn from_le(x: Self) -> Self {
#[cfg(target_endian = "little")]
{
x
}
#[cfg(not(target_endian = "little"))]
{
x.swap_bytes()
}
}
core::num::<impl i16>::from_le_bytes pub const fn from_le_bytes(bytes: [u8; size_of::<Self>()]) -> Self {
Self::from_le(Self::from_ne_bytes(bytes))
}
core::num::<impl i16>::from_ne_bytes pub const fn from_ne_bytes(bytes: [u8; size_of::<Self>()]) -> Self {
// SAFETY: integers are plain old datatypes so we can always transmute to them
unsafe { mem::transmute(bytes) }
}
core::num::<impl i16>::is_negative pub const fn is_negative(self) -> bool { self < 0 }
core::num::<impl i16>::to_le pub const fn to_le(self) -> Self {
#[cfg(target_endian = "little")]
{
self
}
#[cfg(not(target_endian = "little"))]
{
self.swap_bytes()
}
}
core::num::<impl i16>::to_le_bytes pub const fn to_le_bytes(self) -> [u8; size_of::<Self>()] {
self.to_le().to_ne_bytes()
}
core::num::<impl i16>::to_ne_bytes pub const fn to_ne_bytes(self) -> [u8; size_of::<Self>()] {
// SAFETY: integers are plain old datatypes so we can always transmute them to
// arrays of bytes
unsafe { mem::transmute(self) }
}
core::num::<impl i16>::unsigned_abs pub const fn unsigned_abs(self) -> $UnsignedT {
self.wrapping_abs() as $UnsignedT
}
core::num::<impl i16>::wrapping_abs pub const fn wrapping_abs(self) -> Self {
if self.is_negative() {
self.wrapping_neg()
} else {
self
}
}
core::num::<impl i16>::wrapping_neg pub const fn wrapping_neg(self) -> Self {
(0 as $SelfT).wrapping_sub(self)
}
core::num::<impl i16>::wrapping_sub pub const fn wrapping_sub(self, rhs: Self) -> Self {
intrinsics::wrapping_sub(self, rhs)
}
core::num::<impl i32>::from_le pub const fn from_le(x: Self) -> Self {
#[cfg(target_endian = "little")]
{
x
}
#[cfg(not(target_endian = "little"))]
{
x.swap_bytes()
}
}
core::num::<impl i32>::from_le_bytes pub const fn from_le_bytes(bytes: [u8; size_of::<Self>()]) -> Self {
Self::from_le(Self::from_ne_bytes(bytes))
}
core::num::<impl i32>::from_ne_bytes pub const fn from_ne_bytes(bytes: [u8; size_of::<Self>()]) -> Self {
// SAFETY: integers are plain old datatypes so we can always transmute to them
unsafe { mem::transmute(bytes) }
}
core::num::<impl i32>::is_negative pub const fn is_negative(self) -> bool { self < 0 }
core::num::<impl i32>::to_le pub const fn to_le(self) -> Self {
#[cfg(target_endian = "little")]
{
self
}
#[cfg(not(target_endian = "little"))]
{
self.swap_bytes()
}
}
core::num::<impl i32>::to_le_bytes pub const fn to_le_bytes(self) -> [u8; size_of::<Self>()] {
self.to_le().to_ne_bytes()
}
core::num::<impl i32>::to_ne_bytes pub const fn to_ne_bytes(self) -> [u8; size_of::<Self>()] {
// SAFETY: integers are plain old datatypes so we can always transmute them to
// arrays of bytes
unsafe { mem::transmute(self) }
}
core::num::<impl i32>::unsigned_abs pub const fn unsigned_abs(self) -> $UnsignedT {
self.wrapping_abs() as $UnsignedT
}
core::num::<impl i32>::wrapping_abs pub const fn wrapping_abs(self) -> Self {
if self.is_negative() {
self.wrapping_neg()
} else {
self
}
}
core::num::<impl i32>::wrapping_neg pub const fn wrapping_neg(self) -> Self {
(0 as $SelfT).wrapping_sub(self)
}
core::num::<impl i32>::wrapping_sub pub const fn wrapping_sub(self, rhs: Self) -> Self {
intrinsics::wrapping_sub(self, rhs)
}
core::num::<impl i64>::from_le pub const fn from_le(x: Self) -> Self {
#[cfg(target_endian = "little")]
{
x
}
#[cfg(not(target_endian = "little"))]
{
x.swap_bytes()
}
}
core::num::<impl i64>::from_le_bytes pub const fn from_le_bytes(bytes: [u8; size_of::<Self>()]) -> Self {
Self::from_le(Self::from_ne_bytes(bytes))
}
core::num::<impl i64>::from_ne_bytes pub const fn from_ne_bytes(bytes: [u8; size_of::<Self>()]) -> Self {
// SAFETY: integers are plain old datatypes so we can always transmute to them
unsafe { mem::transmute(bytes) }
}
core::num::<impl i64>::is_negative pub const fn is_negative(self) -> bool { self < 0 }
core::num::<impl i64>::to_le pub const fn to_le(self) -> Self {
#[cfg(target_endian = "little")]
{
self
}
#[cfg(not(target_endian = "little"))]
{
self.swap_bytes()
}
}
core::num::<impl i64>::to_le_bytes pub const fn to_le_bytes(self) -> [u8; size_of::<Self>()] {
self.to_le().to_ne_bytes()
}
core::num::<impl i64>::to_ne_bytes pub const fn to_ne_bytes(self) -> [u8; size_of::<Self>()] {
// SAFETY: integers are plain old datatypes so we can always transmute them to
// arrays of bytes
unsafe { mem::transmute(self) }
}
core::num::<impl i64>::unsigned_abs pub const fn unsigned_abs(self) -> $UnsignedT {
self.wrapping_abs() as $UnsignedT
}
core::num::<impl i64>::wrapping_abs pub const fn wrapping_abs(self) -> Self {
if self.is_negative() {
self.wrapping_neg()
} else {
self
}
}
core::num::<impl i64>::wrapping_neg pub const fn wrapping_neg(self) -> Self {
(0 as $SelfT).wrapping_sub(self)
}
core::num::<impl i64>::wrapping_sub pub const fn wrapping_sub(self, rhs: Self) -> Self {
intrinsics::wrapping_sub(self, rhs)
}
core::num::<impl i8>::from_le pub const fn from_le(x: Self) -> Self {
#[cfg(target_endian = "little")]
{
x
}
#[cfg(not(target_endian = "little"))]
{
x.swap_bytes()
}
}
core::num::<impl i8>::from_le_bytes pub const fn from_le_bytes(bytes: [u8; size_of::<Self>()]) -> Self {
Self::from_le(Self::from_ne_bytes(bytes))
}
core::num::<impl i8>::from_ne_bytes pub const fn from_ne_bytes(bytes: [u8; size_of::<Self>()]) -> Self {
// SAFETY: integers are plain old datatypes so we can always transmute to them
unsafe { mem::transmute(bytes) }
}
core::num::<impl i8>::is_negative pub const fn is_negative(self) -> bool { self < 0 }
core::num::<impl i8>::to_le pub const fn to_le(self) -> Self {
#[cfg(target_endian = "little")]
{
self
}
#[cfg(not(target_endian = "little"))]
{
self.swap_bytes()
}
}
core::num::<impl i8>::to_le_bytes pub const fn to_le_bytes(self) -> [u8; size_of::<Self>()] {
self.to_le().to_ne_bytes()
}
core::num::<impl i8>::to_ne_bytes pub const fn to_ne_bytes(self) -> [u8; size_of::<Self>()] {
// SAFETY: integers are plain old datatypes so we can always transmute them to
// arrays of bytes
unsafe { mem::transmute(self) }
}
core::num::<impl i8>::unsigned_abs pub const fn unsigned_abs(self) -> $UnsignedT {
self.wrapping_abs() as $UnsignedT
}
core::num::<impl i8>::wrapping_abs pub const fn wrapping_abs(self) -> Self {
if self.is_negative() {
self.wrapping_neg()
} else {
self
}
}
core::num::<impl i8>::wrapping_neg pub const fn wrapping_neg(self) -> Self {
(0 as $SelfT).wrapping_sub(self)
}
core::num::<impl i8>::wrapping_sub pub const fn wrapping_sub(self, rhs: Self) -> Self {
intrinsics::wrapping_sub(self, rhs)
}
core::num::<impl isize>::from_le pub const fn from_le(x: Self) -> Self {
#[cfg(target_endian = "little")]
{
x
}
#[cfg(not(target_endian = "little"))]
{
x.swap_bytes()
}
}
core::num::<impl isize>::from_le_bytes pub const fn from_le_bytes(bytes: [u8; size_of::<Self>()]) -> Self {
Self::from_le(Self::from_ne_bytes(bytes))
}
core::num::<impl isize>::from_ne_bytes pub const fn from_ne_bytes(bytes: [u8; size_of::<Self>()]) -> Self {
// SAFETY: integers are plain old datatypes so we can always transmute to them
unsafe { mem::transmute(bytes) }
}
core::num::<impl isize>::is_negative pub const fn is_negative(self) -> bool { self < 0 }
core::num::<impl isize>::to_le pub const fn to_le(self) -> Self {
#[cfg(target_endian = "little")]
{
self
}
#[cfg(not(target_endian = "little"))]
{
self.swap_bytes()
}
}
core::num::<impl isize>::to_le_bytes pub const fn to_le_bytes(self) -> [u8; size_of::<Self>()] {
self.to_le().to_ne_bytes()
}
core::num::<impl isize>::to_ne_bytes pub const fn to_ne_bytes(self) -> [u8; size_of::<Self>()] {
// SAFETY: integers are plain old datatypes so we can always transmute them to
// arrays of bytes
unsafe { mem::transmute(self) }
}
core::num::<impl isize>::unsigned_abs pub const fn unsigned_abs(self) -> $UnsignedT {
self.wrapping_abs() as $UnsignedT
}
core::num::<impl isize>::wrapping_abs pub const fn wrapping_abs(self) -> Self {
if self.is_negative() {
self.wrapping_neg()
} else {
self
}
}
core::num::<impl isize>::wrapping_neg pub const fn wrapping_neg(self) -> Self {
(0 as $SelfT).wrapping_sub(self)
}
core::num::<impl isize>::wrapping_sub pub const fn wrapping_sub(self, rhs: Self) -> Self {
intrinsics::wrapping_sub(self, rhs)
}
core::num::<impl u128>::abs_diff pub const fn abs_diff(self, other: Self) -> Self {
if size_of::<Self>() == 1 {
// Trick LLVM into generating the psadbw instruction when SSE2
// is available and this function is autovectorized for u8's.
(self as i32).wrapping_sub(other as i32).unsigned_abs() as Self
} else {
if self < other {
other - self
} else {
self - other
}
}
}
core::num::<impl u128>::checked_add pub const fn checked_add(self, rhs: Self) -> Option<Self> {
// This used to use `overflowing_add`, but that means it ends up being
// a `wrapping_add`, losing some optimization opportunities. Notably,
// phrasing it this way helps `.checked_add(1)` optimize to a check
// against `MAX` and a `add nuw`.
// Per <https://github.com/rust-lang/rust/pull/124114#issuecomment-2066173305>,
// LLVM is happy to re-form the intrinsic later if useful.
if intrinsics::unlikely(intrinsics::add_with_overflow(self, rhs).1) {
None
} else {
// SAFETY: Just checked it doesn't overflow
Some(unsafe { intrinsics::unchecked_add(self, rhs) })
}
}
core::num::<impl u128>::checked_mul pub const fn checked_mul(self, rhs: Self) -> Option<Self> {
let (a, b) = self.overflowing_mul(rhs);
if intrinsics::unlikely(b) { None } else { Some(a) }
}
core::num::<impl u128>::checked_sub pub const fn checked_sub(self, rhs: Self) -> Option<Self> {
// Per PR#103299, there's no advantage to the `overflowing` intrinsic
// for *unsigned* subtraction and we just emit the manual check anyway.
// Thus, rather than using `overflowing_sub` that produces a wrapping
// subtraction, check it ourself so we can use an unchecked one.
if self < rhs {
None
} else {
// SAFETY: just checked this can't overflow
Some(unsafe { intrinsics::unchecked_sub(self, rhs) })
}
}
core::num::<impl u128>::count_ones pub const fn count_ones(self) -> u32 {
return intrinsics::ctpop(self);
}
core::num::<impl u128>::div_ceil pub const fn div_ceil(self, rhs: Self) -> Self {
let d = self / rhs;
let r = self % rhs;
if r > 0 {
d + 1
} else {
d
}
}
core::num::<impl u128>::from_le pub const fn from_le(x: Self) -> Self {
#[cfg(target_endian = "little")]
{
x
}
#[cfg(not(target_endian = "little"))]
{
x.swap_bytes()
}
}
core::num::<impl u128>::from_le_bytes pub const fn from_le_bytes(bytes: [u8; size_of::<Self>()]) -> Self {
Self::from_le(Self::from_ne_bytes(bytes))
}
core::num::<impl u128>::from_ne_bytes pub const fn from_ne_bytes(bytes: [u8; size_of::<Self>()]) -> Self {
// SAFETY: integers are plain old datatypes so we can always transmute to them
unsafe { mem::transmute(bytes) }
}
core::num::<impl u128>::is_multiple_of pub const fn is_multiple_of(self, rhs: Self) -> bool {
match rhs {
0 => self == 0,
_ => self % rhs == 0,
}
}
core::num::<impl u128>::is_power_of_two pub const fn is_power_of_two(self) -> bool {
self.count_ones() == 1
}
core::num::<impl u128>::overflowing_add pub const fn overflowing_add(self, rhs: Self) -> (Self, bool) {
let (a, b) = intrinsics::add_with_overflow(self as $ActualT, rhs as $ActualT);
(a as Self, b)
}
core::num::<impl u128>::overflowing_mul pub const fn overflowing_mul(self, rhs: Self) -> (Self, bool) {
let (a, b) = intrinsics::mul_with_overflow(self as $ActualT, rhs as $ActualT);
(a as Self, b)
}
core::num::<impl u128>::overflowing_sub pub const fn overflowing_sub(self, rhs: Self) -> (Self, bool) {
let (a, b) = intrinsics::sub_with_overflow(self as $ActualT, rhs as $ActualT);
(a as Self, b)
}
core::num::<impl u128>::to_le pub const fn to_le(self) -> Self {
#[cfg(target_endian = "little")]
{
self
}
#[cfg(not(target_endian = "little"))]
{
self.swap_bytes()
}
}
core::num::<impl u128>::to_le_bytes pub const fn to_le_bytes(self) -> [u8; size_of::<Self>()] {
self.to_le().to_ne_bytes()
}
core::num::<impl u128>::to_ne_bytes pub const fn to_ne_bytes(self) -> [u8; size_of::<Self>()] {
// SAFETY: integers are plain old datatypes so we can always transmute them to
// arrays of bytes
unsafe { mem::transmute(self) }
}
core::num::<impl u128>::unchecked_add pub const unsafe fn unchecked_add(self, rhs: Self) -> Self {
assert_unsafe_precondition!(
check_language_ub,
concat!(stringify!($SelfT), "::unchecked_add cannot overflow"),
(
lhs: $SelfT = self,
rhs: $SelfT = rhs,
) => !lhs.overflowing_add(rhs).1,
);
// SAFETY: this is guaranteed to be safe by the caller.
unsafe {
intrinsics::unchecked_add(self, rhs)
}
}
core::num::<impl u128>::unchecked_sub pub const unsafe fn unchecked_sub(self, rhs: Self) -> Self {
assert_unsafe_precondition!(
check_language_ub,
concat!(stringify!($SelfT), "::unchecked_sub cannot overflow"),
(
lhs: $SelfT = self,
rhs: $SelfT = rhs,
) => !lhs.overflowing_sub(rhs).1,
);
// SAFETY: this is guaranteed to be safe by the caller.
unsafe {
intrinsics::unchecked_sub(self, rhs)
}
}
core::num::<impl u128>::wrapping_sub pub const fn wrapping_sub(self, rhs: Self) -> Self {
intrinsics::wrapping_sub(self, rhs)
}
core::num::<impl u16>::abs_diff pub const fn abs_diff(self, other: Self) -> Self {
if size_of::<Self>() == 1 {
// Trick LLVM into generating the psadbw instruction when SSE2
// is available and this function is autovectorized for u8's.
(self as i32).wrapping_sub(other as i32).unsigned_abs() as Self
} else {
if self < other {
other - self
} else {
self - other
}
}
}
core::num::<impl u16>::checked_add pub const fn checked_add(self, rhs: Self) -> Option<Self> {
// This used to use `overflowing_add`, but that means it ends up being
// a `wrapping_add`, losing some optimization opportunities. Notably,
// phrasing it this way helps `.checked_add(1)` optimize to a check
// against `MAX` and a `add nuw`.
// Per <https://github.com/rust-lang/rust/pull/124114#issuecomment-2066173305>,
// LLVM is happy to re-form the intrinsic later if useful.
if intrinsics::unlikely(intrinsics::add_with_overflow(self, rhs).1) {
None
} else {
// SAFETY: Just checked it doesn't overflow
Some(unsafe { intrinsics::unchecked_add(self, rhs) })
}
}
core::num::<impl u16>::checked_mul pub const fn checked_mul(self, rhs: Self) -> Option<Self> {
let (a, b) = self.overflowing_mul(rhs);
if intrinsics::unlikely(b) { None } else { Some(a) }
}
core::num::<impl u16>::checked_sub pub const fn checked_sub(self, rhs: Self) -> Option<Self> {
// Per PR#103299, there's no advantage to the `overflowing` intrinsic
// for *unsigned* subtraction and we just emit the manual check anyway.
// Thus, rather than using `overflowing_sub` that produces a wrapping
// subtraction, check it ourself so we can use an unchecked one.
if self < rhs {
None
} else {
// SAFETY: just checked this can't overflow
Some(unsafe { intrinsics::unchecked_sub(self, rhs) })
}
}
core::num::<impl u16>::count_ones pub const fn count_ones(self) -> u32 {
return intrinsics::ctpop(self);
}
core::num::<impl u16>::div_ceil pub const fn div_ceil(self, rhs: Self) -> Self {
let d = self / rhs;
let r = self % rhs;
if r > 0 {
d + 1
} else {
d
}
}
core::num::<impl u16>::from_le pub const fn from_le(x: Self) -> Self {
#[cfg(target_endian = "little")]
{
x
}
#[cfg(not(target_endian = "little"))]
{
x.swap_bytes()
}
}
core::num::<impl u16>::from_le_bytes pub const fn from_le_bytes(bytes: [u8; size_of::<Self>()]) -> Self {
Self::from_le(Self::from_ne_bytes(bytes))
}
core::num::<impl u16>::from_ne_bytes pub const fn from_ne_bytes(bytes: [u8; size_of::<Self>()]) -> Self {
// SAFETY: integers are plain old datatypes so we can always transmute to them
unsafe { mem::transmute(bytes) }
}
core::num::<impl u16>::is_multiple_of pub const fn is_multiple_of(self, rhs: Self) -> bool {
match rhs {
0 => self == 0,
_ => self % rhs == 0,
}
}
core::num::<impl u16>::is_power_of_two pub const fn is_power_of_two(self) -> bool {
self.count_ones() == 1
}
core::num::<impl u16>::overflowing_add pub const fn overflowing_add(self, rhs: Self) -> (Self, bool) {
let (a, b) = intrinsics::add_with_overflow(self as $ActualT, rhs as $ActualT);
(a as Self, b)
}
core::num::<impl u16>::overflowing_mul pub const fn overflowing_mul(self, rhs: Self) -> (Self, bool) {
let (a, b) = intrinsics::mul_with_overflow(self as $ActualT, rhs as $ActualT);
(a as Self, b)
}
core::num::<impl u16>::overflowing_sub pub const fn overflowing_sub(self, rhs: Self) -> (Self, bool) {
let (a, b) = intrinsics::sub_with_overflow(self as $ActualT, rhs as $ActualT);
(a as Self, b)
}
core::num::<impl u16>::to_le pub const fn to_le(self) -> Self {
#[cfg(target_endian = "little")]
{
self
}
#[cfg(not(target_endian = "little"))]
{
self.swap_bytes()
}
}
core::num::<impl u16>::to_le_bytes pub const fn to_le_bytes(self) -> [u8; size_of::<Self>()] {
self.to_le().to_ne_bytes()
}
core::num::<impl u16>::to_ne_bytes pub const fn to_ne_bytes(self) -> [u8; size_of::<Self>()] {
// SAFETY: integers are plain old datatypes so we can always transmute them to
// arrays of bytes
unsafe { mem::transmute(self) }
}
core::num::<impl u16>::unchecked_add pub const unsafe fn unchecked_add(self, rhs: Self) -> Self {
assert_unsafe_precondition!(
check_language_ub,
concat!(stringify!($SelfT), "::unchecked_add cannot overflow"),
(
lhs: $SelfT = self,
rhs: $SelfT = rhs,
) => !lhs.overflowing_add(rhs).1,
);
// SAFETY: this is guaranteed to be safe by the caller.
unsafe {
intrinsics::unchecked_add(self, rhs)
}
}
core::num::<impl u16>::unchecked_sub pub const unsafe fn unchecked_sub(self, rhs: Self) -> Self {
assert_unsafe_precondition!(
check_language_ub,
concat!(stringify!($SelfT), "::unchecked_sub cannot overflow"),
(
lhs: $SelfT = self,
rhs: $SelfT = rhs,
) => !lhs.overflowing_sub(rhs).1,
);
// SAFETY: this is guaranteed to be safe by the caller.
unsafe {
intrinsics::unchecked_sub(self, rhs)
}
}
core::num::<impl u16>::wrapping_sub pub const fn wrapping_sub(self, rhs: Self) -> Self {
intrinsics::wrapping_sub(self, rhs)
}
core::num::<impl u32>::abs_diff pub const fn abs_diff(self, other: Self) -> Self {
if size_of::<Self>() == 1 {
// Trick LLVM into generating the psadbw instruction when SSE2
// is available and this function is autovectorized for u8's.
(self as i32).wrapping_sub(other as i32).unsigned_abs() as Self
} else {
if self < other {
other - self
} else {
self - other
}
}
}
core::num::<impl u32>::checked_add pub const fn checked_add(self, rhs: Self) -> Option<Self> {
// This used to use `overflowing_add`, but that means it ends up being
// a `wrapping_add`, losing some optimization opportunities. Notably,
// phrasing it this way helps `.checked_add(1)` optimize to a check
// against `MAX` and a `add nuw`.
// Per <https://github.com/rust-lang/rust/pull/124114#issuecomment-2066173305>,
// LLVM is happy to re-form the intrinsic later if useful.
if intrinsics::unlikely(intrinsics::add_with_overflow(self, rhs).1) {
None
} else {
// SAFETY: Just checked it doesn't overflow
Some(unsafe { intrinsics::unchecked_add(self, rhs) })
}
}
core::num::<impl u32>::checked_mul pub const fn checked_mul(self, rhs: Self) -> Option<Self> {
let (a, b) = self.overflowing_mul(rhs);
if intrinsics::unlikely(b) { None } else { Some(a) }
}
core::num::<impl u32>::checked_sub pub const fn checked_sub(self, rhs: Self) -> Option<Self> {
// Per PR#103299, there's no advantage to the `overflowing` intrinsic
// for *unsigned* subtraction and we just emit the manual check anyway.
// Thus, rather than using `overflowing_sub` that produces a wrapping
// subtraction, check it ourself so we can use an unchecked one.
if self < rhs {
None
} else {
// SAFETY: just checked this can't overflow
Some(unsafe { intrinsics::unchecked_sub(self, rhs) })
}
}
core::num::<impl u32>::count_ones pub const fn count_ones(self) -> u32 {
return intrinsics::ctpop(self);
}
core::num::<impl u32>::div_ceil pub const fn div_ceil(self, rhs: Self) -> Self {
let d = self / rhs;
let r = self % rhs;
if r > 0 {
d + 1
} else {
d
}
}
core::num::<impl u32>::from_le pub const fn from_le(x: Self) -> Self {
#[cfg(target_endian = "little")]
{
x
}
#[cfg(not(target_endian = "little"))]
{
x.swap_bytes()
}
}
core::num::<impl u32>::from_le_bytes pub const fn from_le_bytes(bytes: [u8; size_of::<Self>()]) -> Self {
Self::from_le(Self::from_ne_bytes(bytes))
}
core::num::<impl u32>::from_ne_bytes pub const fn from_ne_bytes(bytes: [u8; size_of::<Self>()]) -> Self {
// SAFETY: integers are plain old datatypes so we can always transmute to them
unsafe { mem::transmute(bytes) }
}
core::num::<impl u32>::is_multiple_of pub const fn is_multiple_of(self, rhs: Self) -> bool {
match rhs {
0 => self == 0,
_ => self % rhs == 0,
}
}
core::num::<impl u32>::is_power_of_two pub const fn is_power_of_two(self) -> bool {
self.count_ones() == 1
}
core::num::<impl u32>::overflowing_add pub const fn overflowing_add(self, rhs: Self) -> (Self, bool) {
let (a, b) = intrinsics::add_with_overflow(self as $ActualT, rhs as $ActualT);
(a as Self, b)
}
core::num::<impl u32>::overflowing_mul pub const fn overflowing_mul(self, rhs: Self) -> (Self, bool) {
let (a, b) = intrinsics::mul_with_overflow(self as $ActualT, rhs as $ActualT);
(a as Self, b)
}
core::num::<impl u32>::overflowing_sub pub const fn overflowing_sub(self, rhs: Self) -> (Self, bool) {
let (a, b) = intrinsics::sub_with_overflow(self as $ActualT, rhs as $ActualT);
(a as Self, b)
}
core::num::<impl u32>::to_le pub const fn to_le(self) -> Self {
#[cfg(target_endian = "little")]
{
self
}
#[cfg(not(target_endian = "little"))]
{
self.swap_bytes()
}
}
core::num::<impl u32>::to_le_bytes pub const fn to_le_bytes(self) -> [u8; size_of::<Self>()] {
self.to_le().to_ne_bytes()
}
core::num::<impl u32>::to_ne_bytes pub const fn to_ne_bytes(self) -> [u8; size_of::<Self>()] {
// SAFETY: integers are plain old datatypes so we can always transmute them to
// arrays of bytes
unsafe { mem::transmute(self) }
}
core::num::<impl u32>::unchecked_add pub const unsafe fn unchecked_add(self, rhs: Self) -> Self {
assert_unsafe_precondition!(
check_language_ub,
concat!(stringify!($SelfT), "::unchecked_add cannot overflow"),
(
lhs: $SelfT = self,
rhs: $SelfT = rhs,
) => !lhs.overflowing_add(rhs).1,
);
// SAFETY: this is guaranteed to be safe by the caller.
unsafe {
intrinsics::unchecked_add(self, rhs)
}
}
core::num::<impl u32>::unchecked_sub pub const unsafe fn unchecked_sub(self, rhs: Self) -> Self {
assert_unsafe_precondition!(
check_language_ub,
concat!(stringify!($SelfT), "::unchecked_sub cannot overflow"),
(
lhs: $SelfT = self,
rhs: $SelfT = rhs,
) => !lhs.overflowing_sub(rhs).1,
);
// SAFETY: this is guaranteed to be safe by the caller.
unsafe {
intrinsics::unchecked_sub(self, rhs)
}
}
core::num::<impl u32>::wrapping_sub pub const fn wrapping_sub(self, rhs: Self) -> Self {
intrinsics::wrapping_sub(self, rhs)
}
core::num::<impl u64>::abs_diff pub const fn abs_diff(self, other: Self) -> Self {
if size_of::<Self>() == 1 {
// Trick LLVM into generating the psadbw instruction when SSE2
// is available and this function is autovectorized for u8's.
(self as i32).wrapping_sub(other as i32).unsigned_abs() as Self
} else {
if self < other {
other - self
} else {
self - other
}
}
}
core::num::<impl u64>::checked_add pub const fn checked_add(self, rhs: Self) -> Option<Self> {
// This used to use `overflowing_add`, but that means it ends up being
// a `wrapping_add`, losing some optimization opportunities. Notably,
// phrasing it this way helps `.checked_add(1)` optimize to a check
// against `MAX` and a `add nuw`.
// Per <https://github.com/rust-lang/rust/pull/124114#issuecomment-2066173305>,
// LLVM is happy to re-form the intrinsic later if useful.
if intrinsics::unlikely(intrinsics::add_with_overflow(self, rhs).1) {
None
} else {
// SAFETY: Just checked it doesn't overflow
Some(unsafe { intrinsics::unchecked_add(self, rhs) })
}
}
core::num::<impl u64>::checked_mul pub const fn checked_mul(self, rhs: Self) -> Option<Self> {
let (a, b) = self.overflowing_mul(rhs);
if intrinsics::unlikely(b) { None } else { Some(a) }
}
core::num::<impl u64>::checked_sub pub const fn checked_sub(self, rhs: Self) -> Option<Self> {
// Per PR#103299, there's no advantage to the `overflowing` intrinsic
// for *unsigned* subtraction and we just emit the manual check anyway.
// Thus, rather than using `overflowing_sub` that produces a wrapping
// subtraction, check it ourself so we can use an unchecked one.
if self < rhs {
None
} else {
// SAFETY: just checked this can't overflow
Some(unsafe { intrinsics::unchecked_sub(self, rhs) })
}
}
core::num::<impl u64>::count_ones pub const fn count_ones(self) -> u32 {
return intrinsics::ctpop(self);
}
core::num::<impl u64>::div_ceil pub const fn div_ceil(self, rhs: Self) -> Self {
let d = self / rhs;
let r = self % rhs;
if r > 0 {
d + 1
} else {
d
}
}
core::num::<impl u64>::from_le pub const fn from_le(x: Self) -> Self {
#[cfg(target_endian = "little")]
{
x
}
#[cfg(not(target_endian = "little"))]
{
x.swap_bytes()
}
}
core::num::<impl u64>::from_le_bytes pub const fn from_le_bytes(bytes: [u8; size_of::<Self>()]) -> Self {
Self::from_le(Self::from_ne_bytes(bytes))
}
core::num::<impl u64>::from_ne_bytes pub const fn from_ne_bytes(bytes: [u8; size_of::<Self>()]) -> Self {
// SAFETY: integers are plain old datatypes so we can always transmute to them
unsafe { mem::transmute(bytes) }
}
core::num::<impl u64>::is_multiple_of pub const fn is_multiple_of(self, rhs: Self) -> bool {
match rhs {
0 => self == 0,
_ => self % rhs == 0,
}
}
core::num::<impl u64>::is_power_of_two pub const fn is_power_of_two(self) -> bool {
self.count_ones() == 1
}
core::num::<impl u64>::overflowing_add pub const fn overflowing_add(self, rhs: Self) -> (Self, bool) {
let (a, b) = intrinsics::add_with_overflow(self as $ActualT, rhs as $ActualT);
(a as Self, b)
}
core::num::<impl u64>::overflowing_mul pub const fn overflowing_mul(self, rhs: Self) -> (Self, bool) {
let (a, b) = intrinsics::mul_with_overflow(self as $ActualT, rhs as $ActualT);
(a as Self, b)
}
core::num::<impl u64>::overflowing_sub pub const fn overflowing_sub(self, rhs: Self) -> (Self, bool) {
let (a, b) = intrinsics::sub_with_overflow(self as $ActualT, rhs as $ActualT);
(a as Self, b)
}
core::num::<impl u64>::to_le pub const fn to_le(self) -> Self {
#[cfg(target_endian = "little")]
{
self
}
#[cfg(not(target_endian = "little"))]
{
self.swap_bytes()
}
}
core::num::<impl u64>::to_le_bytes pub const fn to_le_bytes(self) -> [u8; size_of::<Self>()] {
self.to_le().to_ne_bytes()
}
core::num::<impl u64>::to_ne_bytes pub const fn to_ne_bytes(self) -> [u8; size_of::<Self>()] {
// SAFETY: integers are plain old datatypes so we can always transmute them to
// arrays of bytes
unsafe { mem::transmute(self) }
}
core::num::<impl u64>::unchecked_add pub const unsafe fn unchecked_add(self, rhs: Self) -> Self {
assert_unsafe_precondition!(
check_language_ub,
concat!(stringify!($SelfT), "::unchecked_add cannot overflow"),
(
lhs: $SelfT = self,
rhs: $SelfT = rhs,
) => !lhs.overflowing_add(rhs).1,
);
// SAFETY: this is guaranteed to be safe by the caller.
unsafe {
intrinsics::unchecked_add(self, rhs)
}
}
core::num::<impl u64>::unchecked_sub pub const unsafe fn unchecked_sub(self, rhs: Self) -> Self {
assert_unsafe_precondition!(
check_language_ub,
concat!(stringify!($SelfT), "::unchecked_sub cannot overflow"),
(
lhs: $SelfT = self,
rhs: $SelfT = rhs,
) => !lhs.overflowing_sub(rhs).1,
);
// SAFETY: this is guaranteed to be safe by the caller.
unsafe {
intrinsics::unchecked_sub(self, rhs)
}
}
core::num::<impl u64>::wrapping_sub pub const fn wrapping_sub(self, rhs: Self) -> Self {
intrinsics::wrapping_sub(self, rhs)
}
core::num::<impl u8>::abs_diff pub const fn abs_diff(self, other: Self) -> Self {
if size_of::<Self>() == 1 {
// Trick LLVM into generating the psadbw instruction when SSE2
// is available and this function is autovectorized for u8's.
(self as i32).wrapping_sub(other as i32).unsigned_abs() as Self
} else {
if self < other {
other - self
} else {
self - other
}
}
}
core::num::<impl u8>::checked_add pub const fn checked_add(self, rhs: Self) -> Option<Self> {
// This used to use `overflowing_add`, but that means it ends up being
// a `wrapping_add`, losing some optimization opportunities. Notably,
// phrasing it this way helps `.checked_add(1)` optimize to a check
// against `MAX` and a `add nuw`.
// Per <https://github.com/rust-lang/rust/pull/124114#issuecomment-2066173305>,
// LLVM is happy to re-form the intrinsic later if useful.
if intrinsics::unlikely(intrinsics::add_with_overflow(self, rhs).1) {
None
} else {
// SAFETY: Just checked it doesn't overflow
Some(unsafe { intrinsics::unchecked_add(self, rhs) })
}
}
core::num::<impl u8>::checked_mul pub const fn checked_mul(self, rhs: Self) -> Option<Self> {
let (a, b) = self.overflowing_mul(rhs);
if intrinsics::unlikely(b) { None } else { Some(a) }
}
core::num::<impl u8>::checked_sub pub const fn checked_sub(self, rhs: Self) -> Option<Self> {
// Per PR#103299, there's no advantage to the `overflowing` intrinsic
// for *unsigned* subtraction and we just emit the manual check anyway.
// Thus, rather than using `overflowing_sub` that produces a wrapping
// subtraction, check it ourself so we can use an unchecked one.
if self < rhs {
None
} else {
// SAFETY: just checked this can't overflow
Some(unsafe { intrinsics::unchecked_sub(self, rhs) })
}
}
core::num::<impl u8>::count_ones pub const fn count_ones(self) -> u32 {
return intrinsics::ctpop(self);
}
core::num::<impl u8>::div_ceil pub const fn div_ceil(self, rhs: Self) -> Self {
let d = self / rhs;
let r = self % rhs;
if r > 0 {
d + 1
} else {
d
}
}
core::num::<impl u8>::from_le pub const fn from_le(x: Self) -> Self {
#[cfg(target_endian = "little")]
{
x
}
#[cfg(not(target_endian = "little"))]
{
x.swap_bytes()
}
}
core::num::<impl u8>::from_le_bytes pub const fn from_le_bytes(bytes: [u8; size_of::<Self>()]) -> Self {
Self::from_le(Self::from_ne_bytes(bytes))
}
core::num::<impl u8>::from_ne_bytes pub const fn from_ne_bytes(bytes: [u8; size_of::<Self>()]) -> Self {
// SAFETY: integers are plain old datatypes so we can always transmute to them
unsafe { mem::transmute(bytes) }
}
core::num::<impl u8>::is_multiple_of pub const fn is_multiple_of(self, rhs: Self) -> bool {
match rhs {
0 => self == 0,
_ => self % rhs == 0,
}
}
core::num::<impl u8>::is_power_of_two pub const fn is_power_of_two(self) -> bool {
self.count_ones() == 1
}
core::num::<impl u8>::overflowing_add pub const fn overflowing_add(self, rhs: Self) -> (Self, bool) {
let (a, b) = intrinsics::add_with_overflow(self as $ActualT, rhs as $ActualT);
(a as Self, b)
}
core::num::<impl u8>::overflowing_mul pub const fn overflowing_mul(self, rhs: Self) -> (Self, bool) {
let (a, b) = intrinsics::mul_with_overflow(self as $ActualT, rhs as $ActualT);
(a as Self, b)
}
core::num::<impl u8>::overflowing_sub pub const fn overflowing_sub(self, rhs: Self) -> (Self, bool) {
let (a, b) = intrinsics::sub_with_overflow(self as $ActualT, rhs as $ActualT);
(a as Self, b)
}
core::num::<impl u8>::to_le pub const fn to_le(self) -> Self {
#[cfg(target_endian = "little")]
{
self
}
#[cfg(not(target_endian = "little"))]
{
self.swap_bytes()
}
}
core::num::<impl u8>::to_le_bytes pub const fn to_le_bytes(self) -> [u8; size_of::<Self>()] {
self.to_le().to_ne_bytes()
}
core::num::<impl u8>::to_ne_bytes pub const fn to_ne_bytes(self) -> [u8; size_of::<Self>()] {
// SAFETY: integers are plain old datatypes so we can always transmute them to
// arrays of bytes
unsafe { mem::transmute(self) }
}
core::num::<impl u8>::unchecked_add pub const unsafe fn unchecked_add(self, rhs: Self) -> Self {
assert_unsafe_precondition!(
check_language_ub,
concat!(stringify!($SelfT), "::unchecked_add cannot overflow"),
(
lhs: $SelfT = self,
rhs: $SelfT = rhs,
) => !lhs.overflowing_add(rhs).1,
);
// SAFETY: this is guaranteed to be safe by the caller.
unsafe {
intrinsics::unchecked_add(self, rhs)
}
}
core::num::<impl u8>::unchecked_sub pub const unsafe fn unchecked_sub(self, rhs: Self) -> Self {
assert_unsafe_precondition!(
check_language_ub,
concat!(stringify!($SelfT), "::unchecked_sub cannot overflow"),
(
lhs: $SelfT = self,
rhs: $SelfT = rhs,
) => !lhs.overflowing_sub(rhs).1,
);
// SAFETY: this is guaranteed to be safe by the caller.
unsafe {
intrinsics::unchecked_sub(self, rhs)
}
}
core::num::<impl u8>::wrapping_sub pub const fn wrapping_sub(self, rhs: Self) -> Self {
intrinsics::wrapping_sub(self, rhs)
}
core::num::<impl usize>::abs_diff pub const fn abs_diff(self, other: Self) -> Self {
if size_of::<Self>() == 1 {
// Trick LLVM into generating the psadbw instruction when SSE2
// is available and this function is autovectorized for u8's.
(self as i32).wrapping_sub(other as i32).unsigned_abs() as Self
} else {
if self < other {
other - self
} else {
self - other
}
}
}
core::num::<impl usize>::checked_add pub const fn checked_add(self, rhs: Self) -> Option<Self> {
// This used to use `overflowing_add`, but that means it ends up being
// a `wrapping_add`, losing some optimization opportunities. Notably,
// phrasing it this way helps `.checked_add(1)` optimize to a check
// against `MAX` and a `add nuw`.
// Per <https://github.com/rust-lang/rust/pull/124114#issuecomment-2066173305>,
// LLVM is happy to re-form the intrinsic later if useful.
if intrinsics::unlikely(intrinsics::add_with_overflow(self, rhs).1) {
None
} else {
// SAFETY: Just checked it doesn't overflow
Some(unsafe { intrinsics::unchecked_add(self, rhs) })
}
}
core::num::<impl usize>::checked_mul pub const fn checked_mul(self, rhs: Self) -> Option<Self> {
let (a, b) = self.overflowing_mul(rhs);
if intrinsics::unlikely(b) { None } else { Some(a) }
}
core::num::<impl usize>::checked_sub pub const fn checked_sub(self, rhs: Self) -> Option<Self> {
// Per PR#103299, there's no advantage to the `overflowing` intrinsic
// for *unsigned* subtraction and we just emit the manual check anyway.
// Thus, rather than using `overflowing_sub` that produces a wrapping
// subtraction, check it ourself so we can use an unchecked one.
if self < rhs {
None
} else {
// SAFETY: just checked this can't overflow
Some(unsafe { intrinsics::unchecked_sub(self, rhs) })
}
}
core::num::<impl usize>::count_ones pub const fn count_ones(self) -> u32 {
return intrinsics::ctpop(self);
}
core::num::<impl usize>::div_ceil pub const fn div_ceil(self, rhs: Self) -> Self {
let d = self / rhs;
let r = self % rhs;
if r > 0 {
d + 1
} else {
d
}
}
core::num::<impl usize>::from_le pub const fn from_le(x: Self) -> Self {
#[cfg(target_endian = "little")]
{
x
}
#[cfg(not(target_endian = "little"))]
{
x.swap_bytes()
}
}
core::num::<impl usize>::from_le_bytes pub const fn from_le_bytes(bytes: [u8; size_of::<Self>()]) -> Self {
Self::from_le(Self::from_ne_bytes(bytes))
}
core::num::<impl usize>::from_ne_bytes pub const fn from_ne_bytes(bytes: [u8; size_of::<Self>()]) -> Self {
// SAFETY: integers are plain old datatypes so we can always transmute to them
unsafe { mem::transmute(bytes) }
}
core::num::<impl usize>::is_multiple_of pub const fn is_multiple_of(self, rhs: Self) -> bool {
match rhs {
0 => self == 0,
_ => self % rhs == 0,
}
}
core::num::<impl usize>::is_power_of_two pub const fn is_power_of_two(self) -> bool {
self.count_ones() == 1
}
core::num::<impl usize>::overflowing_add pub const fn overflowing_add(self, rhs: Self) -> (Self, bool) {
let (a, b) = intrinsics::add_with_overflow(self as $ActualT, rhs as $ActualT);
(a as Self, b)
}
core::num::<impl usize>::overflowing_mul pub const fn overflowing_mul(self, rhs: Self) -> (Self, bool) {
let (a, b) = intrinsics::mul_with_overflow(self as $ActualT, rhs as $ActualT);
(a as Self, b)
}
core::num::<impl usize>::overflowing_sub pub const fn overflowing_sub(self, rhs: Self) -> (Self, bool) {
let (a, b) = intrinsics::sub_with_overflow(self as $ActualT, rhs as $ActualT);
(a as Self, b)
}
core::num::<impl usize>::repeat_u8 pub(crate) const fn repeat_u8(x: u8) -> usize {
usize::from_ne_bytes([x; size_of::<usize>()])
}
core::num::<impl usize>::to_le pub const fn to_le(self) -> Self {
#[cfg(target_endian = "little")]
{
self
}
#[cfg(not(target_endian = "little"))]
{
self.swap_bytes()
}
}
core::num::<impl usize>::to_le_bytes pub const fn to_le_bytes(self) -> [u8; size_of::<Self>()] {
self.to_le().to_ne_bytes()
}
core::num::<impl usize>::to_ne_bytes pub const fn to_ne_bytes(self) -> [u8; size_of::<Self>()] {
// SAFETY: integers are plain old datatypes so we can always transmute them to
// arrays of bytes
unsafe { mem::transmute(self) }
}
core::num::<impl usize>::unchecked_add pub const unsafe fn unchecked_add(self, rhs: Self) -> Self {
assert_unsafe_precondition!(
check_language_ub,
concat!(stringify!($SelfT), "::unchecked_add cannot overflow"),
(
lhs: $SelfT = self,
rhs: $SelfT = rhs,
) => !lhs.overflowing_add(rhs).1,
);
// SAFETY: this is guaranteed to be safe by the caller.
unsafe {
intrinsics::unchecked_add(self, rhs)
}
}
core::num::<impl usize>::unchecked_sub pub const unsafe fn unchecked_sub(self, rhs: Self) -> Self {
assert_unsafe_precondition!(
check_language_ub,
concat!(stringify!($SelfT), "::unchecked_sub cannot overflow"),
(
lhs: $SelfT = self,
rhs: $SelfT = rhs,
) => !lhs.overflowing_sub(rhs).1,
);
// SAFETY: this is guaranteed to be safe by the caller.
unsafe {
intrinsics::unchecked_sub(self, rhs)
}
}
core::num::<impl usize>::wrapping_sub pub const fn wrapping_sub(self, rhs: Self) -> Self {
intrinsics::wrapping_sub(self, rhs)
}
core::num::niche_types::Nanoseconds::as_inner pub const fn as_inner(self) -> $int {
// SAFETY: This is a transparent wrapper, so unwrapping it is sound
// (Not using `.0` due to MCP#807.)
unsafe { crate::mem::transmute(self) }
}
core::num::niche_types::Nanoseconds::new_unchecked pub const unsafe fn new_unchecked(val: $int) -> Self {
// SAFETY: Caller promised that `val` is within the valid range.
unsafe { $name(val) }
}
core::ops::control_flow::ControlFlow::<B, C>::break_ok pub fn break_ok(self) -> Result<B, C> {
match self {
ControlFlow::Continue(c) => Err(c),
ControlFlow::Break(b) => Ok(b),
}
}
core::ops::control_flow::ControlFlow::<B, C>::break_value pub fn break_value(self) -> Option<B> {
match self {
ControlFlow::Continue(..) => None,
ControlFlow::Break(x) => Some(x),
}
}
core::ops::control_flow::ControlFlow::<B, C>::continue_ok pub fn continue_ok(self) -> Result<C, B> {
match self {
ControlFlow::Continue(c) => Ok(c),
ControlFlow::Break(b) => Err(b),
}
}
core::ops::control_flow::ControlFlow::<B, C>::continue_value pub fn continue_value(self) -> Option<C> {
match self {
ControlFlow::Continue(x) => Some(x),
ControlFlow::Break(..) => None,
}
}
core::ops::control_flow::ControlFlow::<B, C>::is_break pub fn is_break(&self) -> bool {
matches!(*self, ControlFlow::Break(_))
}
core::ops::control_flow::ControlFlow::<B, C>::is_continue pub fn is_continue(&self) -> bool {
matches!(*self, ControlFlow::Continue(_))
}
core::ops::control_flow::ControlFlow::<B, C>::map_break pub fn map_break<T>(self, f: impl FnOnce(B) -> T) -> ControlFlow<T, C> {
match self {
ControlFlow::Continue(x) => ControlFlow::Continue(x),
ControlFlow::Break(x) => ControlFlow::Break(f(x)),
}
}
core::ops::control_flow::ControlFlow::<B, C>::map_continue pub fn map_continue<T>(self, f: impl FnOnce(C) -> T) -> ControlFlow<B, T> {
match self {
ControlFlow::Continue(x) => ControlFlow::Continue(f(x)),
ControlFlow::Break(x) => ControlFlow::Break(x),
}
}
core::ops::control_flow::ControlFlow::<T, T>::into_value pub const fn into_value(self) -> T {
match self {
ControlFlow::Continue(x) | ControlFlow::Break(x) => x,
}
}
core::ops::function::impls::<impl core::ops::function::Fn<A> for &F>::call extern "rust-call" fn call(&self, args: A) -> F::Output {
(**self).call(args)
}
core::ops::function::impls::<impl core::ops::function::FnMut<A> for &F>::call_mut extern "rust-call" fn call_mut(&mut self, args: A) -> F::Output {
(**self).call(args)
}
core::ops::function::impls::<impl core::ops::function::FnMut<A> for &mut F>::call_mut extern "rust-call" fn call_mut(&mut self, args: A) -> F::Output {
(*self).call_mut(args)
}
core::ops::function::impls::<impl core::ops::function::FnOnce<A> for &F>::call_once extern "rust-call" fn call_once(self, args: A) -> F::Output {
(*self).call(args)
}
core::ops::function::impls::<impl core::ops::function::FnOnce<A> for &mut F>::call_once extern "rust-call" fn call_once(self, args: A) -> F::Output {
(*self).call_mut(args)
}
core::ops::index_range::IndexRange::end pub(crate) const fn end(&self) -> usize {
self.end
}
core::ops::index_range::IndexRange::len pub(crate) const fn len(&self) -> usize {
// SAFETY: By invariant, this cannot wrap
// Using the intrinsic because a UB check here impedes LLVM optimization. (#131563)
unsafe { crate::intrinsics::unchecked_sub(self.end, self.start) }
}
core::ops::index_range::IndexRange::new_unchecked pub(crate) const unsafe fn new_unchecked(start: usize, end: usize) -> Self {
ub_checks::assert_unsafe_precondition!(
check_library_ub,
"IndexRange::new_unchecked requires `start <= end`",
(start: usize = start, end: usize = end) => start <= end,
);
IndexRange { start, end }
}
core::ops::index_range::IndexRange::next_unchecked const unsafe fn next_unchecked(&mut self) -> usize {
debug_assert!(self.start < self.end);
let value = self.start;
// SAFETY: The range isn't empty, so this cannot overflow
self.start = unsafe { value.unchecked_add(1) };
value
}
core::ops::index_range::IndexRange::start pub(crate) const fn start(&self) -> usize {
self.start
}
core::ops::index_range::IndexRange::zero_to pub(crate) const fn zero_to(end: usize) -> Self {
IndexRange { start: 0, end }
}
core::ops::range::Bound::<&T>::cloned pub const fn cloned(self) -> Bound<T>
where
T: [const] Clone,
{
match self {
Bound::Unbounded => Bound::Unbounded,
Bound::Included(x) => Bound::Included(x.clone()),
Bound::Excluded(x) => Bound::Excluded(x.clone()),
}
}
core::ops::range::Bound::<&T>::copied pub fn copied(self) -> Bound<T> {
match self {
Bound::Unbounded => Bound::Unbounded,
Bound::Included(x) => Bound::Included(*x),
Bound::Excluded(x) => Bound::Excluded(*x),
}
}
core::ops::range::Bound::<T>::as_mut pub const fn as_mut(&mut self) -> Bound<&mut T> {
match *self {
Included(ref mut x) => Included(x),
Excluded(ref mut x) => Excluded(x),
Unbounded => Unbounded,
}
}
core::ops::range::Bound::<T>::as_ref pub const fn as_ref(&self) -> Bound<&T> {
match *self {
Included(ref x) => Included(x),
Excluded(ref x) => Excluded(x),
Unbounded => Unbounded,
}
}
core::ops::range::Bound::<T>::map pub fn map<U, F: FnOnce(T) -> U>(self, f: F) -> Bound<U> {
match self {
Unbounded => Unbounded,
Included(x) => Included(f(x)),
Excluded(x) => Excluded(f(x)),
}
}
core::ops::range::RangeInclusive::<Idx>::end pub const fn end(&self) -> &Idx {
&self.end
}
core::ops::range::RangeInclusive::<Idx>::into_inner pub const fn into_inner(self) -> (Idx, Idx) {
(self.start, self.end)
}
core::ops::range::RangeInclusive::<Idx>::new pub const fn new(start: Idx, end: Idx) -> Self {
Self { start, end, exhausted: false }
}
core::ops::range::RangeInclusive::<Idx>::start pub const fn start(&self) -> &Idx {
&self.start
}
core::ops::range::RangeInclusive::<usize>::into_slice_range pub(crate) const fn into_slice_range(self) -> Range<usize> {
// If we're not exhausted, we want to simply slice `start..end + 1`.
// If we are exhausted, then slicing with `end + 1..end + 1` gives us an
// empty range that is still subject to bounds-checks for that endpoint.
let exclusive_end = self.end + 1;
let start = if self.exhausted { exclusive_end } else { self.start };
start..exclusive_end
}
core::option::Option::<&T>::cloned pub fn cloned(self) -> Option<T>
where
T: Clone,
{
match self {
Some(t) => Some(t.clone()),
None => None,
}
}
core::option::Option::<&T>::copied pub const fn copied(self) -> Option<T>
where
T: Copy,
{
// FIXME(const-hack): this implementation, which sidesteps using `Option::map` since it's not const
// ready yet, should be reverted when possible to avoid code repetition
match self {
Some(&v) => Some(v),
None => None,
}
}
core::option::Option::<&mut T>::cloned pub fn cloned(self) -> Option<T>
where
T: Clone,
{
match self {
Some(t) => Some(t.clone()),
None => None,
}
}
core::option::Option::<&mut T>::copied pub const fn copied(self) -> Option<T>
where
T: Copy,
{
match self {
Some(&mut t) => Some(t),
None => None,
}
}
core::option::Option::<(T, U)>::unzip pub fn unzip(self) -> (Option<T>, Option<U>) {
match self {
Some((a, b)) => (Some(a), Some(b)),
None => (None, None),
}
}
core::option::Option::<T>::and pub const fn and<U>(self, optb: Option<U>) -> Option<U>
where
T: [const] Destruct,
U: [const] Destruct,
{
match self {
Some(_) => optb,
None => None,
}
}
core::option::Option::<T>::and_then pub const fn and_then<U, F>(self, f: F) -> Option<U>
where
F: [const] FnOnce(T) -> Option<U> + [const] Destruct,
{
match self {
Some(x) => f(x),
None => None,
}
}
core::option::Option::<T>::as_deref pub const fn as_deref(&self) -> Option<&T::Target>
where
T: [const] Deref,
{
self.as_ref().map(Deref::deref)
}
core::option::Option::<T>::as_deref_mut pub const fn as_deref_mut(&mut self) -> Option<&mut T::Target>
where
T: [const] DerefMut,
{
self.as_mut().map(DerefMut::deref_mut)
}
core::option::Option::<T>::as_mut pub const fn as_mut(&mut self) -> Option<&mut T> {
match *self {
Some(ref mut x) => Some(x),
None => None,
}
}
core::option::Option::<T>::as_ref pub const fn as_ref(&self) -> Option<&T> {
match *self {
Some(ref x) => Some(x),
None => None,
}
}
core::option::Option::<T>::expect pub const fn expect(
self,
#[cfg(not(feature = "ferrocene_certified"))] msg: &str,
#[cfg(feature = "ferrocene_certified")] msg: &'static str,
) -> T {
match self {
Some(val) => val,
#[cfg(not(feature = "ferrocene_certified"))]
None => expect_failed(msg),
#[cfg(feature = "ferrocene_certified")]
None => panic(msg),
}
}
core::option::Option::<T>::filter pub const fn filter<P>(self, predicate: P) -> Self
where
P: [const] FnOnce(&T) -> bool + [const] Destruct,
T: [const] Destruct,
{
if let Some(x) = self {
if predicate(&x) {
return Some(x);
}
}
None
}
core::option::Option::<T>::inspect pub const fn inspect<F>(self, f: F) -> Self
where
F: [const] FnOnce(&T) + [const] Destruct,
{
if let Some(ref x) = self {
f(x);
}
self
}
core::option::Option::<T>::is_none_or pub const fn is_none_or(self, f: impl [const] FnOnce(T) -> bool + [const] Destruct) -> bool {
match self {
None => true,
Some(x) => f(x),
}
}
core::option::Option::<T>::is_some pub const fn is_some(&self) -> bool {
matches!(*self, Some(_))
}
core::option::Option::<T>::is_some_and pub const fn is_some_and(self, f: impl [const] FnOnce(T) -> bool + [const] Destruct) -> bool {
match self {
None => false,
Some(x) => f(x),
}
}
core::option::Option::<T>::iter pub fn iter(&self) -> Iter<'_, T> {
Iter { inner: Item { opt: self.as_ref() } }
}
core::option::Option::<T>::iter_mut pub fn iter_mut(&mut self) -> IterMut<'_, T> {
IterMut { inner: Item { opt: self.as_mut() } }
}
core::option::Option::<T>::len const fn len(&self) -> usize {
// Using the intrinsic avoids emitting a branch to get the 0 or 1.
let discriminant: isize = crate::intrinsics::discriminant_value(self);
discriminant as usize
}
core::option::Option::<T>::map pub const fn map<U, F>(self, f: F) -> Option<U>
where
F: [const] FnOnce(T) -> U + [const] Destruct,
{
match self {
Some(x) => Some(f(x)),
None => None,
}
}
core::option::Option::<T>::map_or pub const fn map_or<U, F>(self, default: U, f: F) -> U
where
F: [const] FnOnce(T) -> U + [const] Destruct,
U: [const] Destruct,
{
match self {
Some(t) => f(t),
None => default,
}
}
core::option::Option::<T>::map_or_default pub const fn map_or_default<U, F>(self, f: F) -> U
where
U: [const] Default,
F: [const] FnOnce(T) -> U + [const] Destruct,
{
match self {
Some(t) => f(t),
None => U::default(),
}
}
core::option::Option::<T>::map_or_else pub const fn map_or_else<U, D, F>(self, default: D, f: F) -> U
where
D: [const] FnOnce() -> U + [const] Destruct,
F: [const] FnOnce(T) -> U + [const] Destruct,
{
match self {
Some(t) => f(t),
None => default(),
}
}
core::option::Option::<T>::ok_or pub const fn ok_or<E: [const] Destruct>(self, err: E) -> Result<T, E> {
match self {
Some(v) => Ok(v),
None => Err(err),
}
}
core::option::Option::<T>::ok_or_else pub const fn ok_or_else<E, F>(self, err: F) -> Result<T, E>
where
F: [const] FnOnce() -> E + [const] Destruct,
{
match self {
Some(v) => Ok(v),
None => Err(err()),
}
}
core::option::Option::<T>::or pub const fn or(self, optb: Option<T>) -> Option<T>
where
T: [const] Destruct,
{
match self {
x @ Some(_) => x,
None => optb,
}
}
core::option::Option::<T>::or_else pub const fn or_else<F>(self, f: F) -> Option<T>
where
F: [const] FnOnce() -> Option<T> + [const] Destruct,
//FIXME(const_hack): this `T: [const] Destruct` is unnecessary, but even precise live drops can't tell
// no value of type `T` gets dropped here
T: [const] Destruct,
{
match self {
x @ Some(_) => x,
None => f(),
}
}
core::option::Option::<T>::reduce pub fn reduce<U, R, F>(self, other: Option<U>, f: F) -> Option<R>
where
T: Into<R>,
U: Into<R>,
F: FnOnce(T, U) -> R,
{
match (self, other) {
(Some(a), Some(b)) => Some(f(a, b)),
(Some(a), _) => Some(a.into()),
(_, Some(b)) => Some(b.into()),
_ => None,
}
}
core::option::Option::<T>::take pub const fn take(&mut self) -> Option<T> {
// FIXME(const-hack) replace `mem::replace` by `mem::take` when the latter is const ready
mem::replace(self, None)
}
core::option::Option::<T>::unwrap pub const fn unwrap(self) -> T {
match self {
Some(val) => val,
None => unwrap_failed(),
}
}
core::option::Option::<T>::unwrap_or pub const fn unwrap_or(self, default: T) -> T
where
T: [const] Destruct,
{
match self {
Some(x) => x,
None => default,
}
}
core::option::Option::<T>::unwrap_or_default pub const fn unwrap_or_default(self) -> T
where
T: [const] Default,
{
match self {
Some(x) => x,
None => T::default(),
}
}
core::option::Option::<T>::unwrap_or_else pub const fn unwrap_or_else<F>(self, f: F) -> T
where
F: [const] FnOnce() -> T + [const] Destruct,
{
match self {
Some(x) => x,
None => f(),
}
}
core::option::Option::<T>::xor pub const fn xor(self, optb: Option<T>) -> Option<T>
where
T: [const] Destruct,
{
match (self, optb) {
(a @ Some(_), None) => a,
(None, b @ Some(_)) => b,
_ => None,
}
}
core::option::Option::<T>::zip pub const fn zip<U>(self, other: Option<U>) -> Option<(T, U)>
where
T: [const] Destruct,
U: [const] Destruct,
{
match (self, other) {
(Some(a), Some(b)) => Some((a, b)),
_ => None,
}
}
core::option::Option::<core::option::Option<T>>::flatten pub const fn flatten(self) -> Option<T> {
// FIXME(const-hack): could be written with `and_then`
match self {
Some(inner) => inner,
None => None,
}
}
core::option::Option::<core::result::Result<T, E>>::transpose pub const fn transpose(self) -> Result<Option<T>, E> {
match self {
Some(Ok(x)) => Ok(Some(x)),
Some(Err(e)) => Err(e),
None => Ok(None),
}
}
core::option::unwrap_failedconst fn unwrap_failed() -> ! {
panic("called `Option::unwrap()` on a `None` value")
}
core::panic::location::Location::<'a>::column pub const fn column(&self) -> u32 {
self.col
}
core::panic::location::Location::<'a>::line pub const fn line(&self) -> u32 {
self.line
}
core::panicking::panicpub const fn panic(expr: &'static str) -> ! {
// Use Arguments::new_const instead of format_args!("{expr}") to potentially
// reduce size overhead. The format_args! macro uses str's Display trait to
// write expr, which calls Formatter::pad, which must accommodate string
// truncation and padding (even though none is used here). Using
// Arguments::new_const may allow the compiler to omit Formatter::pad from the
// output binary, saving up to a few kilobytes.
// However, this optimization only works for `'static` strings: `new_const` also makes this
// message return `Some` from `Arguments::as_str`, which means it can become part of the panic
// payload without any allocation or copying. Shorter-lived strings would become invalid as
// stack frames get popped during unwinding, and couldn't be directly referenced from the
// payload.
#[cfg(not(feature = "ferrocene_certified"))]
panic_fmt(fmt::Arguments::new_const(&[expr]));
#[cfg(feature = "ferrocene_certified")]
panic_fmt(&expr)
}
core::panicking::panic_nounwind_fmt::runtime fn runtime$(<$($binders)*>)?($($arg: $ty),*) $( -> $ret )? {
$runtime
}
core::ptr::align_offsetpub(crate) unsafe fn align_offset<T: Sized>(p: *const T, a: usize) -> usize {
// FIXME(#75598): Direct use of these intrinsics improves codegen significantly at opt-level <=
// 1, where the method versions of these operations are not inlined.
use intrinsics::{
assume, cttz_nonzero, exact_div, mul_with_overflow, unchecked_rem, unchecked_shl,
unchecked_shr, unchecked_sub, wrapping_add, wrapping_mul, wrapping_sub,
};
/// Calculate multiplicative modular inverse of `x` modulo `m`.
///
/// This implementation is tailored for `align_offset` and has following preconditions:
///
/// * `m` is a power-of-two;
/// * `x < m`; (if `x ≥ m`, pass in `x % m` instead)
///
/// Implementation of this function shall not panic. Ever.
#[inline]
const unsafe fn mod_inv(x: usize, m: usize) -> usize {
/// Multiplicative modular inverse table modulo 2⁴ = 16.
///
/// Note, that this table does not contain values where inverse does not exist (i.e., for
/// `0⁻¹ mod 16`, `2⁻¹ mod 16`, etc.)
const INV_TABLE_MOD_16: [u8; 8] = [1, 11, 13, 7, 9, 3, 5, 15];
/// Modulo for which the `INV_TABLE_MOD_16` is intended.
const INV_TABLE_MOD: usize = 16;
// SAFETY: `m` is required to be a power-of-two, hence non-zero.
let m_minus_one = unsafe { unchecked_sub(m, 1) };
let mut inverse = INV_TABLE_MOD_16[(x & (INV_TABLE_MOD - 1)) >> 1] as usize;
let mut mod_gate = INV_TABLE_MOD;
// We iterate "up" using the following formula:
//
// $$ xy ≡ 1 (mod 2ⁿ) → xy (2 - xy) ≡ 1 (mod 2²ⁿ) $$
//
// This application needs to be applied at least until `2²ⁿ ≥ m`, at which point we can
// finally reduce the computation to our desired `m` by taking `inverse mod m`.
//
// This computation is `O(log log m)`, which is to say, that on 64-bit machines this loop
// will always finish in at most 4 iterations.
loop {
// y = y * (2 - xy) mod n
//
// Note, that we use wrapping operations here intentionally – the original formula
// uses e.g., subtraction `mod n`. It is entirely fine to do them `mod
// usize::MAX` instead, because we take the result `mod n` at the end
// anyway.
if mod_gate >= m {
break;
}
inverse = wrapping_mul(inverse, wrapping_sub(2usize, wrapping_mul(x, inverse)));
let (new_gate, overflow) = mul_with_overflow(mod_gate, mod_gate);
if overflow {
break;
}
mod_gate = new_gate;
}
inverse & m_minus_one
}
let stride = size_of::<T>();
let addr: usize = p.addr();
// SAFETY: `a` is a power-of-two, therefore non-zero.
let a_minus_one = unsafe { unchecked_sub(a, 1) };
if stride == 0 {
// SPECIAL_CASE: handle 0-sized types. No matter how many times we step, the address will
// stay the same, so no offset will be able to align the pointer unless it is already
// aligned. This branch _will_ be optimized out as `stride` is known at compile-time.
let p_mod_a = addr & a_minus_one;
return if p_mod_a == 0 { 0 } else { usize::MAX };
}
// SAFETY: `stride == 0` case has been handled by the special case above.
let a_mod_stride = unsafe { unchecked_rem(a, stride) };
if a_mod_stride == 0 {
// SPECIAL_CASE: In cases where the `a` is divisible by `stride`, byte offset to align a
// pointer can be computed more simply through `-p (mod a)`. In the off-chance the byte
// offset is not a multiple of `stride`, the input pointer was misaligned and no pointer
// offset will be able to produce a `p` aligned to the specified `a`.
//
// The naive `-p (mod a)` equation inhibits LLVM's ability to select instructions
// like `lea`. We compute `(round_up_to_next_alignment(p, a) - p)` instead. This
// redistributes operations around the load-bearing, but pessimizing `and` instruction
// sufficiently for LLVM to be able to utilize the various optimizations it knows about.
//
// LLVM handles the branch here particularly nicely. If this branch needs to be evaluated
// at runtime, it will produce a mask `if addr_mod_stride == 0 { 0 } else { usize::MAX }`
// in a branch-free way and then bitwise-OR it with whatever result the `-p mod a`
// computation produces.
let aligned_address = wrapping_add(addr, a_minus_one) & wrapping_sub(0, a);
let byte_offset = wrapping_sub(aligned_address, addr);
// FIXME: Remove the assume after <https://github.com/llvm/llvm-project/issues/62502>
// SAFETY: Masking by `-a` can only affect the low bits, and thus cannot have reduced
// the value by more than `a-1`, so even though the intermediate values might have
// wrapped, the byte_offset is always in `[0, a)`.
unsafe { assume(byte_offset < a) };
// SAFETY: `stride == 0` case has been handled by the special case above.
let addr_mod_stride = unsafe { unchecked_rem(addr, stride) };
return if addr_mod_stride == 0 {
// SAFETY: `stride` is non-zero. This is guaranteed to divide exactly as well, because
// addr has been verified to be aligned to the original type’s alignment requirements.
unsafe { exact_div(byte_offset, stride) }
} else {
usize::MAX
};
}
// GENERAL_CASE: From here on we’re handling the very general case where `addr` may be
// misaligned, there isn’t an obvious relationship between `stride` and `a` that we can take an
// advantage of, etc. This case produces machine code that isn’t particularly high quality,
// compared to the special cases above. The code produced here is still within the realm of
// miracles, given the situations this case has to deal with.
// SAFETY: a is power-of-two hence non-zero. stride == 0 case is handled above.
// FIXME(const-hack) replace with min
let gcdpow = unsafe {
let x = cttz_nonzero(stride);
let y = cttz_nonzero(a);
if x < y { x } else { y }
};
// SAFETY: gcdpow has an upper-bound that’s at most the number of bits in a `usize`.
let gcd = unsafe { unchecked_shl(1usize, gcdpow) };
// SAFETY: gcd is always greater or equal to 1.
if addr & unsafe { unchecked_sub(gcd, 1) } == 0 {
// This branch solves for the following linear congruence equation:
//
// ` p + so = 0 mod a `
//
// `p` here is the pointer value, `s` - stride of `T`, `o` offset in `T`s, and `a` - the
// requested alignment.
//
// With `g = gcd(a, s)`, and the above condition asserting that `p` is also divisible by
// `g`, we can denote `a' = a/g`, `s' = s/g`, `p' = p/g`, then this becomes equivalent to:
//
// ` p' + s'o = 0 mod a' `
// ` o = (a' - (p' mod a')) * (s'^-1 mod a') `
//
// The first term is "the relative alignment of `p` to `a`" (divided by the `g`), the
// second term is "how does incrementing `p` by `s` bytes change the relative alignment of
// `p`" (again divided by `g`). Division by `g` is necessary to make the inverse well
// formed if `a` and `s` are not co-prime.
//
// Furthermore, the result produced by this solution is not "minimal", so it is necessary
// to take the result `o mod lcm(s, a)`. This `lcm(s, a)` is the same as `a'`.
// SAFETY: `gcdpow` has an upper-bound not greater than the number of trailing 0-bits in
// `a`.
let a2 = unsafe { unchecked_shr(a, gcdpow) };
// SAFETY: `a2` is non-zero. Shifting `a` by `gcdpow` cannot shift out any of the set bits
// in `a` (of which it has exactly one).
let a2minus1 = unsafe { unchecked_sub(a2, 1) };
// SAFETY: `gcdpow` has an upper-bound not greater than the number of trailing 0-bits in
// `a`.
let s2 = unsafe { unchecked_shr(stride & a_minus_one, gcdpow) };
// SAFETY: `gcdpow` has an upper-bound not greater than the number of trailing 0-bits in
// `a`. Furthermore, the subtraction cannot overflow, because `a2 = a >> gcdpow` will
// always be strictly greater than `(p % a) >> gcdpow`.
let minusp2 = unsafe { unchecked_sub(a2, unchecked_shr(addr & a_minus_one, gcdpow)) };
// SAFETY: `a2` is a power-of-two, as proven above. `s2` is strictly less than `a2`
// because `(s % a) >> gcdpow` is strictly less than `a >> gcdpow`.
return wrapping_mul(minusp2, unsafe { mod_inv(s2, a2) }) & a2minus1;
}
// Cannot be aligned at all.
usize::MAX
}
core::ptr::align_offset::mod_inv const unsafe fn mod_inv(x: usize, m: usize) -> usize {
/// Multiplicative modular inverse table modulo 2⁴ = 16.
///
/// Note, that this table does not contain values where inverse does not exist (i.e., for
/// `0⁻¹ mod 16`, `2⁻¹ mod 16`, etc.)
const INV_TABLE_MOD_16: [u8; 8] = [1, 11, 13, 7, 9, 3, 5, 15];
/// Modulo for which the `INV_TABLE_MOD_16` is intended.
const INV_TABLE_MOD: usize = 16;
// SAFETY: `m` is required to be a power-of-two, hence non-zero.
let m_minus_one = unsafe { unchecked_sub(m, 1) };
let mut inverse = INV_TABLE_MOD_16[(x & (INV_TABLE_MOD - 1)) >> 1] as usize;
let mut mod_gate = INV_TABLE_MOD;
// We iterate "up" using the following formula:
//
// $$ xy ≡ 1 (mod 2ⁿ) → xy (2 - xy) ≡ 1 (mod 2²ⁿ) $$
//
// This application needs to be applied at least until `2²ⁿ ≥ m`, at which point we can
// finally reduce the computation to our desired `m` by taking `inverse mod m`.
//
// This computation is `O(log log m)`, which is to say, that on 64-bit machines this loop
// will always finish in at most 4 iterations.
loop {
// y = y * (2 - xy) mod n
//
// Note, that we use wrapping operations here intentionally – the original formula
// uses e.g., subtraction `mod n`. It is entirely fine to do them `mod
// usize::MAX` instead, because we take the result `mod n` at the end
// anyway.
if mod_gate >= m {
break;
}
inverse = wrapping_mul(inverse, wrapping_sub(2usize, wrapping_mul(x, inverse)));
let (new_gate, overflow) = mul_with_overflow(mod_gate, mod_gate);
if overflow {
break;
}
mod_gate = new_gate;
}
inverse & m_minus_one
}
core::ptr::alignment::Alignment::as_usize pub const fn as_usize(self) -> usize {
self.0 as usize
}
core::ptr::alignment::Alignment::new pub const fn new(align: usize) -> Option<Self> {
if align.is_power_of_two() {
// SAFETY: Just checked it only has one bit set
Some(unsafe { Self::new_unchecked(align) })
} else {
None
}
}
core::ptr::alignment::Alignment::new_unchecked pub const unsafe fn new_unchecked(align: usize) -> Self {
assert_unsafe_precondition!(
check_language_ub,
"Alignment::new_unchecked requires a power of two",
(align: usize = align) => align.is_power_of_two()
);
// SAFETY: By precondition, this must be a power of two, and
// our variants encompass all possible powers of two.
unsafe { mem::transmute::<usize, Alignment>(align) }
}
core::ptr::const_ptr::<impl *const T>::add pub const unsafe fn add(self, count: usize) -> Self
where
T: Sized,
{
#[cfg(debug_assertions)]
#[inline]
#[rustc_allow_const_fn_unstable(const_eval_select)]
const fn runtime_add_nowrap(this: *const (), count: usize, size: usize) -> bool {
const_eval_select!(
@capture { this: *const (), count: usize, size: usize } -> bool:
if const {
true
} else {
let Some(byte_offset) = count.checked_mul(size) else {
return false;
};
let (_, overflow) = this.addr().overflowing_add(byte_offset);
byte_offset <= (isize::MAX as usize) && !overflow
}
)
}
#[cfg(debug_assertions)] // Expensive, and doesn't catch much in the wild.
ub_checks::assert_unsafe_precondition!(
check_language_ub,
"ptr::add requires that the address calculation does not overflow",
(
this: *const () = self as *const (),
count: usize = count,
size: usize = size_of::<T>(),
) => runtime_add_nowrap(this, count, size)
);
// SAFETY: the caller must uphold the safety contract for `offset`.
unsafe { intrinsics::offset(self, count) }
}
core::ptr::const_ptr::<impl *const T>::add::runtime_add_nowrap const fn runtime_add_nowrap(this: *const (), count: usize, size: usize) -> bool {
const_eval_select!(
@capture { this: *const (), count: usize, size: usize } -> bool:
if const {
true
} else {
let Some(byte_offset) = count.checked_mul(size) else {
return false;
};
let (_, overflow) = this.addr().overflowing_add(byte_offset);
byte_offset <= (isize::MAX as usize) && !overflow
}
)
}
core::ptr::const_ptr::<impl *const T>::add::runtime_add_nowrap::runtime fn runtime$(<$($binders)*>)?($($arg: $ty),*) $( -> $ret )? {
$runtime
}
core::ptr::const_ptr::<impl *const T>::addr pub fn addr(self) -> usize {
// A pointer-to-integer transmute currently has exactly the right semantics: it returns the
// address without exposing the provenance. Note that this is *not* a stable guarantee about
// transmute semantics, it relies on sysroot crates having special status.
// SAFETY: Pointer-to-integer transmutes are valid (if you are okay with losing the
// provenance).
unsafe { mem::transmute(self.cast::<()>()) }
}
core::ptr::const_ptr::<impl *const T>::cast pub const fn cast<U>(self) -> *const U {
self as _
}
core::ptr::const_ptr::<impl *const T>::cast_array pub const fn cast_array<const N: usize>(self) -> *const [T; N] {
self.cast()
}
core::ptr::const_ptr::<impl *const T>::is_aligned_to pub fn is_aligned_to(self, align: usize) -> bool {
if !align.is_power_of_two() {
panic!("is_aligned_to: align is not a power-of-two");
}
self.addr() & (align - 1) == 0
}
core::ptr::const_ptr::<impl *const T>::is_null pub const fn is_null(self) -> bool {
// Compare via a cast to a thin pointer, so fat pointers are only
// considering their "data" part for null-ness.
let ptr = self as *const u8;
const_eval_select!(
@capture { ptr: *const u8 } -> bool:
// This use of `const_raw_ptr_comparison` has been explicitly blessed by t-lang.
if const #[rustc_allow_const_fn_unstable(const_raw_ptr_comparison)] {
match (ptr).guaranteed_eq(null_mut()) {
Some(res) => res,
// To remain maximally conservative, we stop execution when we don't
// know whether the pointer is null or not.
// We can *not* return `false` here, that would be unsound in `NonNull::new`!
None => panic!("null-ness of this pointer cannot be determined in const context"),
}
} else {
ptr.addr() == 0
}
)
}
core::ptr::const_ptr::<impl *const T>::is_null::runtime fn runtime$(<$($binders)*>)?($($arg: $ty),*) $( -> $ret )? {
$runtime
}
core::ptr::const_ptr::<impl *const T>::read pub const unsafe fn read(self) -> T
where
T: Sized,
{
// SAFETY: the caller must uphold the safety contract for `read`.
unsafe { read(self) }
}
core::ptr::const_ptr::<impl *const T>::try_cast_aligned pub fn try_cast_aligned<U>(self) -> Option<*const U> {
if self.is_aligned_to(align_of::<U>()) { Some(self.cast()) } else { None }
}
core::ptr::const_ptr::<impl *const [T]>::as_array pub const fn as_array<const N: usize>(self) -> Option<*const [T; N]> {
if self.len() == N {
let me = self.as_ptr() as *const [T; N];
Some(me)
} else {
None
}
}
core::ptr::const_ptr::<impl *const [T]>::as_ptr pub const fn as_ptr(self) -> *const T {
self as *const T
}
core::ptr::const_ptr::<impl *const [T]>::is_empty pub const fn is_empty(self) -> bool {
self.len() == 0
}
core::ptr::const_ptr::<impl *const [T]>::len pub const fn len(self) -> usize {
metadata(self)
}
core::ptr::const_ptr::<impl core::cmp::PartialEq for *const T>::eq fn eq(&self, other: &*const T) -> bool {
*self == *other
}
core::ptr::copypub const unsafe fn copy<T>(src: *const T, dst: *mut T, count: usize) {
// SAFETY: the safety contract for `copy` must be upheld by the caller.
unsafe {
ub_checks::assert_unsafe_precondition!(
check_language_ub,
"ptr::copy requires that both pointer arguments are aligned and non-null",
(
src: *const () = src as *const (),
dst: *mut () = dst as *mut (),
align: usize = align_of::<T>(),
zero_size: bool = T::IS_ZST || count == 0,
) =>
ub_checks::maybe_is_aligned_and_not_null(src, align, zero_size)
&& ub_checks::maybe_is_aligned_and_not_null(dst, align, zero_size)
);
crate::intrinsics::copy(src, dst, count)
}
}
core::ptr::copy_nonoverlappingpub const unsafe fn copy_nonoverlapping<T>(src: *const T, dst: *mut T, count: usize) {
ub_checks::assert_unsafe_precondition!(
check_language_ub,
"ptr::copy_nonoverlapping requires that both pointer arguments are aligned and non-null \
and the specified memory ranges do not overlap",
(
src: *const () = src as *const (),
dst: *mut () = dst as *mut (),
size: usize = size_of::<T>(),
align: usize = align_of::<T>(),
count: usize = count,
) => {
let zero_size = count == 0 || size == 0;
ub_checks::maybe_is_aligned_and_not_null(src, align, zero_size)
&& ub_checks::maybe_is_aligned_and_not_null(dst, align, zero_size)
&& ub_checks::maybe_is_nonoverlapping(src, dst, size, count)
}
);
// SAFETY: the safety contract for `copy_nonoverlapping` must be
// upheld by the caller.
unsafe { crate::intrinsics::copy_nonoverlapping(src, dst, count) }
}
core::ptr::from_refpub const fn from_ref<T: PointeeSized>(r: &T) -> *const T {
r
}
core::ptr::metadata::from_raw_partspub const fn from_raw_parts<T: PointeeSized>(
data_pointer: *const impl Thin,
metadata: <T as Pointee>::Metadata,
) -> *const T {
aggregate_raw_ptr(data_pointer, metadata)
}
core::ptr::metadata::from_raw_parts_mutpub const fn from_raw_parts_mut<T: PointeeSized>(
data_pointer: *mut impl Thin,
metadata: <T as Pointee>::Metadata,
) -> *mut T {
aggregate_raw_ptr(data_pointer, metadata)
}
core::ptr::metadata::metadatapub const fn metadata<T: PointeeSized>(ptr: *const T) -> <T as Pointee>::Metadata {
ptr_metadata(ptr)
}
core::ptr::mut_ptr::<impl *mut T>::add pub const unsafe fn add(self, count: usize) -> Self
where
T: Sized,
{
#[cfg(debug_assertions)]
#[inline]
#[rustc_allow_const_fn_unstable(const_eval_select)]
const fn runtime_add_nowrap(this: *const (), count: usize, size: usize) -> bool {
const_eval_select!(
@capture { this: *const (), count: usize, size: usize } -> bool:
if const {
true
} else {
let Some(byte_offset) = count.checked_mul(size) else {
return false;
};
let (_, overflow) = this.addr().overflowing_add(byte_offset);
byte_offset <= (isize::MAX as usize) && !overflow
}
)
}
#[cfg(debug_assertions)] // Expensive, and doesn't catch much in the wild.
ub_checks::assert_unsafe_precondition!(
check_language_ub,
"ptr::add requires that the address calculation does not overflow",
(
this: *const () = self as *const (),
count: usize = count,
size: usize = size_of::<T>(),
) => runtime_add_nowrap(this, count, size)
);
// SAFETY: the caller must uphold the safety contract for `offset`.
unsafe { intrinsics::offset(self, count) }
}
core::ptr::mut_ptr::<impl *mut T>::add::runtime_add_nowrap const fn runtime_add_nowrap(this: *const (), count: usize, size: usize) -> bool {
const_eval_select!(
@capture { this: *const (), count: usize, size: usize } -> bool:
if const {
true
} else {
let Some(byte_offset) = count.checked_mul(size) else {
return false;
};
let (_, overflow) = this.addr().overflowing_add(byte_offset);
byte_offset <= (isize::MAX as usize) && !overflow
}
)
}
core::ptr::mut_ptr::<impl *mut T>::add::runtime_add_nowrap::runtime fn runtime$(<$($binders)*>)?($($arg: $ty),*) $( -> $ret )? {
$runtime
}
core::ptr::mut_ptr::<impl *mut T>::addr pub fn addr(self) -> usize {
// A pointer-to-integer transmute currently has exactly the right semantics: it returns the
// address without exposing the provenance. Note that this is *not* a stable guarantee about
// transmute semantics, it relies on sysroot crates having special status.
// SAFETY: Pointer-to-integer transmutes are valid (if you are okay with losing the
// provenance).
unsafe { mem::transmute(self.cast::<()>()) }
}
core::ptr::mut_ptr::<impl *mut T>::cast pub const fn cast<U>(self) -> *mut U {
self as _
}
core::ptr::mut_ptr::<impl *mut T>::cast_array pub const fn cast_array<const N: usize>(self) -> *mut [T; N] {
self.cast()
}
core::ptr::mut_ptr::<impl *mut T>::cast_const pub const fn cast_const(self) -> *const T {
self as _
}
core::ptr::mut_ptr::<impl *mut T>::read pub const unsafe fn read(self) -> T
where
T: Sized,
{
// SAFETY: the caller must uphold the safety contract for ``.
unsafe { read(self) }
}
core::ptr::mut_ptr::<impl *mut T>::write_bytes pub const unsafe fn write_bytes(self, val: u8, count: usize)
where
T: Sized,
{
// SAFETY: the caller must uphold the safety contract for `write_bytes`.
unsafe { write_bytes(self, val, count) }
}
core::ptr::mut_ptr::<impl *mut [T]>::as_mut_array pub const fn as_mut_array<const N: usize>(self) -> Option<*mut [T; N]> {
if self.len() == N {
let me = self.as_mut_ptr() as *mut [T; N];
Some(me)
} else {
None
}
}
core::ptr::mut_ptr::<impl *mut [T]>::as_mut_ptr pub const fn as_mut_ptr(self) -> *mut T {
self as *mut T
}
core::ptr::mut_ptr::<impl *mut [T]>::is_empty pub const fn is_empty(self) -> bool {
self.len() == 0
}
core::ptr::mut_ptr::<impl *mut [T]>::len pub const fn len(self) -> usize {
metadata(self)
}
core::ptr::mut_ptr::<impl core::cmp::PartialEq for *mut T>::eq fn eq(&self, other: &*mut T) -> bool {
*self == *other
}
core::ptr::non_null::NonNull::<T>::add pub const unsafe fn add(self, count: usize) -> Self
where
T: Sized,
{
// SAFETY: the caller must uphold the safety contract for `offset`.
// Additionally safety contract of `offset` guarantees that the resulting pointer is
// pointing to an allocation, there can't be an allocation at null, thus it's safe to
// construct `NonNull`.
unsafe { NonNull { pointer: intrinsics::offset(self.as_ptr(), count) } }
}
core::ptr::non_null::NonNull::<T>::as_mut pub const unsafe fn as_mut<'a>(&mut self) -> &'a mut T {
// SAFETY: the caller must guarantee that `self` meets all the
// requirements for a mutable reference.
unsafe { &mut *self.as_ptr() }
}
core::ptr::non_null::NonNull::<T>::as_ptr pub const fn as_ptr(self) -> *mut T {
// This is a transmute for the same reasons as `NonZero::get`.
// SAFETY: `NonNull` is `transparent` over a `*const T`, and `*const T`
// and `*mut T` have the same layout, so transitively we can transmute
// our `NonNull` to a `*mut T` directly.
unsafe { mem::transmute::<Self, *mut T>(self) }
}
core::ptr::non_null::NonNull::<T>::as_ref pub const unsafe fn as_ref<'a>(&self) -> &'a T {
// SAFETY: the caller must guarantee that `self` meets all the
// requirements for a reference.
// `cast_const` avoids a mutable raw pointer deref.
unsafe { &*self.as_ptr().cast_const() }
}
core::ptr::non_null::NonNull::<T>::cast pub const fn cast<U>(self) -> NonNull<U> {
// SAFETY: `self` is a `NonNull` pointer which is necessarily non-null
unsafe { NonNull { pointer: self.as_ptr() as *mut U } }
}
core::ptr::non_null::NonNull::<T>::from_mut pub const fn from_mut(r: &mut T) -> Self {
// SAFETY: A mutable reference cannot be null.
unsafe { NonNull { pointer: r as *mut T } }
}
core::ptr::non_null::NonNull::<T>::from_ref pub const fn from_ref(r: &T) -> Self {
// SAFETY: A reference cannot be null.
unsafe { NonNull { pointer: r as *const T } }
}
core::ptr::nullpub const fn null<T: PointeeSized + Thin>() -> *const T {
from_raw_parts(without_provenance::<()>(0), ())
}
core::ptr::null_mutpub const fn null_mut<T: PointeeSized + Thin>() -> *mut T {
from_raw_parts_mut(without_provenance_mut::<()>(0), ())
}
core::ptr::readpub const unsafe fn read<T>(src: *const T) -> T {
// It would be semantically correct to implement this via `copy_nonoverlapping`
// and `MaybeUninit`, as was done before PR #109035. Calling `assume_init`
// provides enough information to know that this is a typed operation.
// However, as of March 2023 the compiler was not capable of taking advantage
// of that information. Thus, the implementation here switched to an intrinsic,
// which lowers to `_0 = *src` in MIR, to address a few issues:
//
// - Using `MaybeUninit::assume_init` after a `copy_nonoverlapping` was not
// turning the untyped copy into a typed load. As such, the generated
// `load` in LLVM didn't get various metadata, such as `!range` (#73258),
// `!nonnull`, and `!noundef`, resulting in poorer optimization.
// - Going through the extra local resulted in multiple extra copies, even
// in optimized MIR. (Ignoring StorageLive/Dead, the intrinsic is one
// MIR statement, while the previous implementation was eight.) LLVM
// could sometimes optimize them away, but because `read` is at the core
// of so many things, not having them in the first place improves what we
// hand off to the backend. For example, `mem::replace::<Big>` previously
// emitted 4 `alloca` and 6 `memcpy`s, but is now 1 `alloc` and 3 `memcpy`s.
// - In general, this approach keeps us from getting any more bugs (like
// #106369) that boil down to "`read(p)` is worse than `*p`", as this
// makes them look identical to the backend (or other MIR consumers).
//
// Future enhancements to MIR optimizations might well allow this to return
// to the previous implementation, rather than using an intrinsic.
// SAFETY: the caller must guarantee that `src` is valid for reads.
unsafe {
#[cfg(debug_assertions)] // Too expensive to always enable (for now?)
ub_checks::assert_unsafe_precondition!(
check_language_ub,
"ptr::read requires that the pointer argument is aligned and non-null",
(
addr: *const () = src as *const (),
align: usize = align_of::<T>(),
is_zst: bool = T::IS_ZST,
) => ub_checks::maybe_is_aligned_and_not_null(addr, align, is_zst)
);
crate::intrinsics::read_via_copy(src)
}
}
core::ptr::read_unalignedpub const unsafe fn read_unaligned<T>(src: *const T) -> T {
let mut tmp = MaybeUninit::<T>::uninit();
// SAFETY: the caller must guarantee that `src` is valid for reads.
// `src` cannot overlap `tmp` because `tmp` was just allocated on
// the stack as a separate allocation.
//
// Also, since we just wrote a valid value into `tmp`, it is guaranteed
// to be properly initialized.
unsafe {
copy_nonoverlapping(src as *const u8, tmp.as_mut_ptr() as *mut u8, size_of::<T>());
tmp.assume_init()
}
}
core::ptr::read_volatilepub unsafe fn read_volatile<T>(src: *const T) -> T {
// SAFETY: the caller must uphold the safety contract for `volatile_load`.
unsafe {
ub_checks::assert_unsafe_precondition!(
check_language_ub,
"ptr::read_volatile requires that the pointer argument is aligned",
(
addr: *const () = src as *const (),
align: usize = align_of::<T>(),
) => ub_checks::maybe_is_aligned(addr, align)
);
intrinsics::volatile_load(src)
}
}
core::ptr::slice_from_raw_partspub const fn slice_from_raw_parts<T>(data: *const T, len: usize) -> *const [T] {
from_raw_parts(data, len)
}
core::ptr::slice_from_raw_parts_mutpub const fn slice_from_raw_parts_mut<T>(data: *mut T, len: usize) -> *mut [T] {
from_raw_parts_mut(data, len)
}
core::ptr::without_provenancepub const fn without_provenance<T>(addr: usize) -> *const T {
without_provenance_mut(addr)
}
core::ptr::without_provenance_mutpub const fn without_provenance_mut<T>(addr: usize) -> *mut T {
// An int-to-pointer transmute currently has exactly the intended semantics: it creates a
// pointer without provenance. Note that this is *not* a stable guarantee about transmute
// semantics, it relies on sysroot crates having special status.
// SAFETY: every valid integer is also a valid pointer (as long as you don't dereference that
// pointer).
unsafe { mem::transmute(addr) }
}
core::ptr::writepub const unsafe fn write<T>(dst: *mut T, src: T) {
// Semantically, it would be fine for this to be implemented as a
// `copy_nonoverlapping` and appropriate drop suppression of `src`.
// However, implementing via that currently produces more MIR than is ideal.
// Using an intrinsic keeps it down to just the simple `*dst = move src` in
// MIR (11 statements shorter, at the time of writing), and also allows
// `src` to stay an SSA value in codegen_ssa, rather than a memory one.
// SAFETY: the caller must guarantee that `dst` is valid for writes.
// `dst` cannot overlap `src` because the caller has mutable access
// to `dst` while `src` is owned by this function.
unsafe {
#[cfg(debug_assertions)] // Too expensive to always enable (for now?)
ub_checks::assert_unsafe_precondition!(
check_language_ub,
"ptr::write requires that the pointer argument is aligned and non-null",
(
addr: *mut () = dst as *mut (),
align: usize = align_of::<T>(),
is_zst: bool = T::IS_ZST,
) => ub_checks::maybe_is_aligned_and_not_null(addr, align, is_zst)
);
intrinsics::write_via_move(dst, src)
}
}
core::ptr::write_bytespub const unsafe fn write_bytes<T>(dst: *mut T, val: u8, count: usize) {
// SAFETY: the safety contract for `write_bytes` must be upheld by the caller.
unsafe {
ub_checks::assert_unsafe_precondition!(
check_language_ub,
"ptr::write_bytes requires that the destination pointer is aligned and non-null",
(
addr: *const () = dst as *const (),
align: usize = align_of::<T>(),
zero_size: bool = T::IS_ZST || count == 0,
) => ub_checks::maybe_is_aligned_and_not_null(addr, align, zero_size)
);
crate::intrinsics::write_bytes(dst, val, count)
}
}
core::ptr::write_volatilepub unsafe fn write_volatile<T>(dst: *mut T, src: T) {
// SAFETY: the caller must uphold the safety contract for `volatile_store`.
unsafe {
ub_checks::assert_unsafe_precondition!(
check_language_ub,
"ptr::write_volatile requires that the pointer argument is aligned",
(
addr: *mut () = dst as *mut (),
align: usize = align_of::<T>(),
) => ub_checks::maybe_is_aligned(addr, align)
);
intrinsics::volatile_store(dst, src);
}
}
core::result::Result::<&T, E>::cloned pub fn cloned(self) -> Result<T, E>
where
T: Clone,
{
self.map(|t| t.clone())
}
core::result::Result::<&T, E>::copied pub const fn copied(self) -> Result<T, E>
where
T: Copy,
{
// FIXME(const-hack): this implementation, which sidesteps using `Result::map` since it's not const
// ready yet, should be reverted when possible to avoid code repetition
match self {
Ok(&v) => Ok(v),
Err(e) => Err(e),
}
}
core::result::Result::<&mut T, E>::cloned pub fn cloned(self) -> Result<T, E>
where
T: Clone,
{
self.map(|t| t.clone())
}
core::result::Result::<&mut T, E>::copied pub const fn copied(self) -> Result<T, E>
where
T: Copy,
{
// FIXME(const-hack): this implementation, which sidesteps using `Result::map` since it's not const
// ready yet, should be reverted when possible to avoid code repetition
match self {
Ok(&mut v) => Ok(v),
Err(e) => Err(e),
}
}
core::result::Result::<T, E>::and pub const fn and<U>(self, res: Result<U, E>) -> Result<U, E>
where
T: [const] Destruct,
E: [const] Destruct,
U: [const] Destruct,
{
match self {
Ok(_) => res,
Err(e) => Err(e),
}
}
core::result::Result::<T, E>::and_then pub const fn and_then<U, F>(self, op: F) -> Result<U, E>
where
F: [const] FnOnce(T) -> Result<U, E> + [const] Destruct,
{
match self {
Ok(t) => op(t),
Err(e) => Err(e),
}
}
core::result::Result::<T, E>::as_deref pub const fn as_deref(&self) -> Result<&T::Target, &E>
where
T: [const] Deref,
{
self.as_ref().map(Deref::deref)
}
core::result::Result::<T, E>::as_deref_mut pub const fn as_deref_mut(&mut self) -> Result<&mut T::Target, &mut E>
where
T: [const] DerefMut,
{
self.as_mut().map(DerefMut::deref_mut)
}
core::result::Result::<T, E>::as_mut pub const fn as_mut(&mut self) -> Result<&mut T, &mut E> {
match *self {
Ok(ref mut x) => Ok(x),
Err(ref mut x) => Err(x),
}
}
core::result::Result::<T, E>::as_ref pub const fn as_ref(&self) -> Result<&T, &E> {
match *self {
Ok(ref x) => Ok(x),
Err(ref x) => Err(x),
}
}
core::result::Result::<T, E>::err pub const fn err(self) -> Option<E>
where
T: [const] Destruct,
E: [const] Destruct,
{
match self {
Ok(_) => None,
Err(x) => Some(x),
}
}
core::result::Result::<T, E>::inspect pub const fn inspect<F>(self, f: F) -> Self
where
F: [const] FnOnce(&T) + [const] Destruct,
{
if let Ok(ref t) = self {
f(t);
}
self
}
core::result::Result::<T, E>::inspect_err pub const fn inspect_err<F>(self, f: F) -> Self
where
F: [const] FnOnce(&E) + [const] Destruct,
{
if let Err(ref e) = self {
f(e);
}
self
}
core::result::Result::<T, E>::is_err pub const fn is_err(&self) -> bool {
!self.is_ok()
}
core::result::Result::<T, E>::is_err_and pub const fn is_err_and<F>(self, f: F) -> bool
where
F: [const] FnOnce(E) -> bool + [const] Destruct,
E: [const] Destruct,
T: [const] Destruct,
{
match self {
Ok(_) => false,
Err(e) => f(e),
}
}
core::result::Result::<T, E>::is_ok pub const fn is_ok(&self) -> bool {
matches!(*self, Ok(_))
}
core::result::Result::<T, E>::is_ok_and pub const fn is_ok_and<F>(self, f: F) -> bool
where
F: [const] FnOnce(T) -> bool + [const] Destruct,
T: [const] Destruct,
E: [const] Destruct,
{
match self {
Err(_) => false,
Ok(x) => f(x),
}
}
core::result::Result::<T, E>::map pub const fn map<U, F>(self, op: F) -> Result<U, E>
where
F: [const] FnOnce(T) -> U + [const] Destruct,
{
match self {
Ok(t) => Ok(op(t)),
Err(e) => Err(e),
}
}
core::result::Result::<T, E>::map_err pub const fn map_err<F, O>(self, op: O) -> Result<T, F>
where
O: [const] FnOnce(E) -> F + [const] Destruct,
{
match self {
Ok(t) => Ok(t),
Err(e) => Err(op(e)),
}
}
core::result::Result::<T, E>::map_or pub const fn map_or<U, F>(self, default: U, f: F) -> U
where
F: [const] FnOnce(T) -> U + [const] Destruct,
T: [const] Destruct,
E: [const] Destruct,
U: [const] Destruct,
{
match self {
Ok(t) => f(t),
Err(_) => default,
}
}
core::result::Result::<T, E>::map_or_default pub const fn map_or_default<U, F>(self, f: F) -> U
where
F: [const] FnOnce(T) -> U + [const] Destruct,
U: [const] Default,
T: [const] Destruct,
E: [const] Destruct,
{
match self {
Ok(t) => f(t),
Err(_) => U::default(),
}
}
core::result::Result::<T, E>::map_or_else pub const fn map_or_else<U, D, F>(self, default: D, f: F) -> U
where
D: [const] FnOnce(E) -> U + [const] Destruct,
F: [const] FnOnce(T) -> U + [const] Destruct,
{
match self {
Ok(t) => f(t),
Err(e) => default(e),
}
}
core::result::Result::<T, E>::ok pub const fn ok(self) -> Option<T>
where
T: [const] Destruct,
E: [const] Destruct,
{
match self {
Ok(x) => Some(x),
Err(_) => None,
}
}
core::result::Result::<T, E>::or pub const fn or<F>(self, res: Result<T, F>) -> Result<T, F>
where
T: [const] Destruct,
E: [const] Destruct,
F: [const] Destruct,
{
match self {
Ok(v) => Ok(v),
Err(_) => res,
}
}
core::result::Result::<T, E>::or_else pub const fn or_else<F, O>(self, op: O) -> Result<T, F>
where
O: [const] FnOnce(E) -> Result<T, F> + [const] Destruct,
{
match self {
Ok(t) => Ok(t),
Err(e) => op(e),
}
}
core::result::Result::<T, E>::unwrap_or pub const fn unwrap_or(self, default: T) -> T
where
T: [const] Destruct,
E: [const] Destruct,
{
match self {
Ok(t) => t,
Err(_) => default,
}
}
core::result::Result::<T, E>::unwrap_or_default pub const fn unwrap_or_default(self) -> T
where
T: [const] Default + [const] Destruct,
E: [const] Destruct,
{
match self {
Ok(x) => x,
Err(_) => Default::default(),
}
}
core::result::Result::<T, E>::unwrap_or_else pub const fn unwrap_or_else<F>(self, op: F) -> T
where
F: [const] FnOnce(E) -> T + [const] Destruct,
{
match self {
Ok(t) => t,
Err(e) => op(e),
}
}
core::result::Result::<core::option::Option<T>, E>::transpose pub const fn transpose(self) -> Option<Result<T, E>> {
match self {
Ok(Some(x)) => Some(Ok(x)),
Ok(None) => None,
Err(e) => Some(Err(e)),
}
}
core::slice::<impl [T]>::as_array pub const fn as_array<const N: usize>(&self) -> Option<&[T; N]> {
if self.len() == N {
let ptr = self.as_ptr().cast_array();
// SAFETY: The underlying array of a slice can be reinterpreted as an actual array `[T; N]` if `N` is not greater than the slice's length.
let me = unsafe { &*ptr };
Some(me)
} else {
None
}
}
core::slice::<impl [T]>::as_mut_array pub const fn as_mut_array<const N: usize>(&mut self) -> Option<&mut [T; N]> {
if self.len() == N {
let ptr = self.as_mut_ptr().cast_array();
// SAFETY: The underlying array of a slice can be reinterpreted as an actual array `[T; N]` if `N` is not greater than the slice's length.
let me = unsafe { &mut *ptr };
Some(me)
} else {
None
}
}
core::slice::<impl [T]>::as_mut_ptr pub const fn as_mut_ptr(&mut self) -> *mut T {
self as *mut [T] as *mut T
}
core::slice::<impl [T]>::as_ptr pub const fn as_ptr(&self) -> *const T {
self as *const [T] as *const T
}
core::slice::<impl [T]>::copy_from_slice pub const fn copy_from_slice(&mut self, src: &[T])
where
T: Copy,
{
// The panic code path was put into a cold function to not bloat the
// call site.
#[cfg_attr(not(panic = "immediate-abort"), inline(never), cold)]
#[cfg_attr(panic = "immediate-abort", inline)]
#[track_caller]
const fn len_mismatch_fail(dst_len: usize, src_len: usize) -> ! {
const_panic!(
"copy_from_slice: source slice length does not match destination slice length",
"copy_from_slice: source slice length ({src_len}) does not match destination slice length ({dst_len})",
src_len: usize,
dst_len: usize,
)
}
if self.len() != src.len() {
len_mismatch_fail(self.len(), src.len());
}
// SAFETY: `self` is valid for `self.len()` elements by definition, and `src` was
// checked to have the same length. The slices cannot overlap because
// mutable references are exclusive.
unsafe {
ptr::copy_nonoverlapping(src.as_ptr(), self.as_mut_ptr(), self.len());
}
}
core::slice::<impl [T]>::copy_from_slice::len_mismatch_fail const fn len_mismatch_fail(dst_len: usize, src_len: usize) -> ! {
const_panic!(
"copy_from_slice: source slice length does not match destination slice length",
"copy_from_slice: source slice length ({src_len}) does not match destination slice length ({dst_len})",
src_len: usize,
dst_len: usize,
)
}
core::slice::<impl [T]>::copy_from_slice::len_mismatch_fail::do_panic const fn do_panic($($arg: $ty),*) -> ! {
$crate::intrinsics::const_eval_select!(
@capture { $($arg: $ty = $arg),* } -> !:
#[noinline]
if const #[track_caller] #[inline] { // Inline this, to prevent codegen
$crate::panic!($const_msg)
} else #[track_caller] { // Do not inline this, it makes perf worse
$crate::panic!($runtime_msg)
}
)
}
core::slice::<impl [T]>::copy_from_slice::len_mismatch_fail::do_panic::runtime fn runtime$(<$($binders)*>)?($($arg: $ty),*) $( -> $ret )? {
$runtime
}
core::slice::<impl [T]>::first pub const fn first(&self) -> Option<&T> {
if let [first, ..] = self { Some(first) } else { None }
}
core::slice::<impl [T]>::first_chunk pub const fn first_chunk<const N: usize>(&self) -> Option<&[T; N]> {
if self.len() < N {
None
} else {
// SAFETY: We explicitly check for the correct number of elements,
// and do not let the reference outlive the slice.
Some(unsafe { &*(self.as_ptr().cast_array()) })
}
}
core::slice::<impl [T]>::first_chunk_mut pub const fn first_chunk_mut<const N: usize>(&mut self) -> Option<&mut [T; N]> {
if self.len() < N {
None
} else {
// SAFETY: We explicitly check for the correct number of elements,
// do not let the reference outlive the slice,
// and require exclusive access to the entire slice to mutate the chunk.
Some(unsafe { &mut *(self.as_mut_ptr().cast_array()) })
}
}
core::slice::<impl [T]>::first_mut pub const fn first_mut(&mut self) -> Option<&mut T> {
if let [first, ..] = self { Some(first) } else { None }
}
core::slice::<impl [T]>::get pub const fn get<I>(&self, index: I) -> Option<&I::Output>
where
I: [const] SliceIndex<Self>,
{
index.get(self)
}
core::slice::<impl [T]>::get_mut pub const fn get_mut<I>(&mut self, index: I) -> Option<&mut I::Output>
where
I: [const] SliceIndex<Self>,
{
index.get_mut(self)
}
core::slice::<impl [T]>::get_unchecked pub const unsafe fn get_unchecked<I>(&self, index: I) -> &I::Output
where
I: [const] SliceIndex<Self>,
{
// SAFETY: the caller must uphold most of the safety requirements for `get_unchecked`;
// the slice is dereferenceable because `self` is a safe reference.
// The returned pointer is safe because impls of `SliceIndex` have to guarantee that it is.
unsafe { &*index.get_unchecked(self) }
}
core::slice::<impl [T]>::get_unchecked_mut pub const unsafe fn get_unchecked_mut<I>(&mut self, index: I) -> &mut I::Output
where
I: [const] SliceIndex<Self>,
{
// SAFETY: the caller must uphold the safety requirements for `get_unchecked_mut`;
// the slice is dereferenceable because `self` is a safe reference.
// The returned pointer is safe because impls of `SliceIndex` have to guarantee that it is.
unsafe { &mut *index.get_unchecked_mut(self) }
}
core::slice::<impl [T]>::is_empty pub const fn is_empty(&self) -> bool {
self.len() == 0
}
core::slice::<impl [T]>::iter pub const fn iter(&self) -> Iter<'_, T> {
Iter::new(self)
}
core::slice::<impl [T]>::iter_mut pub const fn iter_mut(&mut self) -> IterMut<'_, T> {
IterMut::new(self)
}
core::slice::<impl [T]>::last pub const fn last(&self) -> Option<&T> {
if let [.., last] = self { Some(last) } else { None }
}
core::slice::<impl [T]>::last_mut pub const fn last_mut(&mut self) -> Option<&mut T> {
if let [.., last] = self { Some(last) } else { None }
}
core::slice::<impl [T]>::split_first pub const fn split_first(&self) -> Option<(&T, &[T])> {
if let [first, tail @ ..] = self { Some((first, tail)) } else { None }
}
core::slice::<impl [T]>::split_first_mut pub const fn split_first_mut(&mut self) -> Option<(&mut T, &mut [T])> {
if let [first, tail @ ..] = self { Some((first, tail)) } else { None }
}
core::slice::<impl [T]>::split_last pub const fn split_last(&self) -> Option<(&T, &[T])> {
if let [init @ .., last] = self { Some((last, init)) } else { None }
}
core::slice::<impl [T]>::split_last_mut pub const fn split_last_mut(&mut self) -> Option<(&mut T, &mut [T])> {
if let [init @ .., last] = self { Some((last, init)) } else { None }
}
core::slice::cmp::<impl core::cmp::PartialEq<[U]> for [T]>::eq fn eq(&self, other: &[U]) -> bool {
SlicePartialEq::equal(self, other)
}
core::slice::cmp::<impl core::cmp::PartialEq<[U]> for [T]>::ne fn ne(&self, other: &[U]) -> bool {
SlicePartialEq::not_equal(self, other)
}
core::slice::cmp::SlicePartialEq::not_equal fn not_equal(&self, other: &[B]) -> bool {
!self.equal(other)
}
core::slice::index::<impl core::ops::index::Index<I> for [T]>::index fn index(&self, index: I) -> &I::Output {
index.index(self)
}
core::slice::index::<impl core::ops::index::IndexMut<I> for [T]>::index_mut fn index_mut(&mut self, index: I) -> &mut I::Output {
index.index_mut(self)
}
core::slice::index::get_offset_len_mut_noubcheckconst unsafe fn get_offset_len_mut_noubcheck<T>(
ptr: *mut [T],
offset: usize,
len: usize,
) -> *mut [T] {
let ptr = ptr as *mut T;
// SAFETY: The caller already checked these preconditions
let ptr = unsafe { crate::intrinsics::offset(ptr, offset) };
crate::intrinsics::aggregate_raw_ptr(ptr, len)
}
core::slice::index::get_offset_len_noubcheckconst unsafe fn get_offset_len_noubcheck<T>(
ptr: *const [T],
offset: usize,
len: usize,
) -> *const [T] {
let ptr = ptr as *const T;
// SAFETY: The caller already checked these preconditions
let ptr = unsafe { crate::intrinsics::offset(ptr, offset) };
crate::intrinsics::aggregate_raw_ptr(ptr, len)
}
core::slice::index::into_rangepub(crate) const fn into_range(
len: usize,
(start, end): (ops::Bound<usize>, ops::Bound<usize>),
) -> Option<ops::Range<usize>> {
use ops::Bound;
let start = match start {
Bound::Included(start) => start,
Bound::Excluded(start) => start.checked_add(1)?,
Bound::Unbounded => 0,
};
let end = match end {
Bound::Included(end) => end.checked_add(1)?,
Bound::Excluded(end) => end,
Bound::Unbounded => len,
};
// Don't bother with checking `start < end` and `end <= len`
// since these checks are handled by `Range` impls
Some(start..end)
}
core::slice::index::into_range_uncheckedpub(crate) const fn into_range_unchecked(
len: usize,
(start, end): (ops::Bound<usize>, ops::Bound<usize>),
) -> ops::Range<usize> {
use ops::Bound;
let start = match start {
Bound::Included(i) => i,
Bound::Excluded(i) => i + 1,
Bound::Unbounded => 0,
};
let end = match end {
Bound::Included(i) => i + 1,
Bound::Excluded(i) => i,
Bound::Unbounded => len,
};
start..end
}
core::slice::index::slice_index_failconst fn slice_index_fail(start: usize, end: usize, len: usize) -> ! {
if start > len {
const_panic!(
"slice start index is out of range for slice",
"range start index {start} out of range for slice of length {len}",
start: usize,
len: usize,
)
}
if end > len {
const_panic!(
"slice end index is out of range for slice",
"range end index {end} out of range for slice of length {len}",
end: usize,
len: usize,
)
}
if start > end {
const_panic!(
"slice index start is larger than end",
"slice index starts at {start} but ends at {end}",
start: usize,
end: usize,
)
}
// Only reachable if the range was a `RangeInclusive` or a
// `RangeToInclusive`, with `end == len`.
const_panic!(
"slice end index is out of range for slice",
"range end index {end} out of range for slice of length {len}",
end: usize,
len: usize,
)
}
core::slice::index::slice_index_fail::do_panic const fn do_panic($($arg: $ty),*) -> ! {
$crate::intrinsics::const_eval_select!(
@capture { $($arg: $ty = $arg),* } -> !:
#[noinline]
if const #[track_caller] #[inline] { // Inline this, to prevent codegen
$crate::panic!($const_msg)
} else #[track_caller] { // Do not inline this, it makes perf worse
$crate::panic!($runtime_msg)
}
)
}
core::slice::index::slice_index_fail::do_panic const fn do_panic($($arg: $ty),*) -> ! {
$crate::intrinsics::const_eval_select!(
@capture { $($arg: $ty = $arg),* } -> !:
#[noinline]
if const #[track_caller] #[inline] { // Inline this, to prevent codegen
$crate::panic!($const_msg)
} else #[track_caller] { // Do not inline this, it makes perf worse
$crate::panic!($runtime_msg)
}
)
}
core::slice::index::slice_index_fail::do_panic const fn do_panic($($arg: $ty),*) -> ! {
$crate::intrinsics::const_eval_select!(
@capture { $($arg: $ty = $arg),* } -> !:
#[noinline]
if const #[track_caller] #[inline] { // Inline this, to prevent codegen
$crate::panic!($const_msg)
} else #[track_caller] { // Do not inline this, it makes perf worse
$crate::panic!($runtime_msg)
}
)
}
core::slice::index::slice_index_fail::do_panic const fn do_panic($($arg: $ty),*) -> ! {
$crate::intrinsics::const_eval_select!(
@capture { $($arg: $ty = $arg),* } -> !:
#[noinline]
if const #[track_caller] #[inline] { // Inline this, to prevent codegen
$crate::panic!($const_msg)
} else #[track_caller] { // Do not inline this, it makes perf worse
$crate::panic!($runtime_msg)
}
)
}
core::slice::index::slice_index_fail::do_panic::runtime fn runtime$(<$($binders)*>)?($($arg: $ty),*) $( -> $ret )? {
$runtime
}
core::slice::index::slice_index_fail::do_panic::runtime fn runtime$(<$($binders)*>)?($($arg: $ty),*) $( -> $ret )? {
$runtime
}
core::slice::index::slice_index_fail::do_panic::runtime fn runtime$(<$($binders)*>)?($($arg: $ty),*) $( -> $ret )? {
$runtime
}
core::slice::index::slice_index_fail::do_panic::runtime fn runtime$(<$($binders)*>)?($($arg: $ty),*) $( -> $ret )? {
$runtime
}
core::slice::iter::<impl core::iter::traits::collect::IntoIterator for &'a [T]>::into_iter fn into_iter(self) -> Iter<'a, T> {
self.iter()
}
core::slice::iter::<impl core::iter::traits::collect::IntoIterator for &'a mut [T]>::into_iter fn into_iter(self) -> IterMut<'a, T> {
self.iter_mut()
}
core::slice::iter::Iter::<'a, T>::new pub(super) const fn new(slice: &'a [T]) -> Self {
let len = slice.len();
let ptr: NonNull<T> = NonNull::from_ref(slice).cast();
// SAFETY: Similar to `IterMut::new`.
unsafe {
let end_or_len =
if T::IS_ZST { without_provenance(len) } else { ptr.as_ptr().add(len) };
Self { ptr, end_or_len, _marker: PhantomData }
}
}
core::slice::iter::Iter::<'a, T>::post_inc_start unsafe fn post_inc_start(&mut self, offset: usize) -> NonNull<T> {
let old = self.ptr;
// SAFETY: the caller guarantees that `offset` doesn't exceed `self.len()`,
// so this new pointer is inside `self` and thus guaranteed to be non-null.
unsafe {
if_zst!(mut self,
// Using the intrinsic directly avoids emitting a UbCheck
len => *len = crate::intrinsics::unchecked_sub(*len, offset),
_end => self.ptr = self.ptr.add(offset),
);
}
old
}
core::slice::iter::IterMut::<'a, T>::new pub(super) const fn new(slice: &'a mut [T]) -> Self {
let len = slice.len();
let ptr: NonNull<T> = NonNull::from_mut(slice).cast();
// SAFETY: There are several things here:
//
// `ptr` has been obtained by `slice.as_ptr()` where `slice` is a valid
// reference thus it is non-NUL and safe to use and pass to
// `NonNull::new_unchecked` .
//
// Adding `slice.len()` to the starting pointer gives a pointer
// at the end of `slice`. `end` will never be dereferenced, only checked
// for direct pointer equality with `ptr` to check if the iterator is
// done.
//
// In the case of a ZST, the end pointer is just the length. It's never
// used as a pointer at all, and thus it's fine to have no provenance.
//
// See the `next_unchecked!` and `is_empty!` macros as well as the
// `post_inc_start` method for more information.
unsafe {
let end_or_len =
if T::IS_ZST { without_provenance_mut(len) } else { ptr.as_ptr().add(len) };
Self { ptr, end_or_len, _marker: PhantomData }
}
}
core::slice::iter::IterMut::<'a, T>::post_inc_start unsafe fn post_inc_start(&mut self, offset: usize) -> NonNull<T> {
let old = self.ptr;
// SAFETY: the caller guarantees that `offset` doesn't exceed `self.len()`,
// so this new pointer is inside `self` and thus guaranteed to be non-null.
unsafe {
if_zst!(mut self,
// Using the intrinsic directly avoids emitting a UbCheck
len => *len = crate::intrinsics::unchecked_sub(*len, offset),
_end => self.ptr = self.ptr.add(offset),
);
}
old
}
core::slice::raw::from_mutpub const fn from_mut<T>(s: &mut T) -> &mut [T] {
array::from_mut(s)
}
core::slice::raw::from_raw_partspub const unsafe fn from_raw_parts<'a, T>(data: *const T, len: usize) -> &'a [T] {
// SAFETY: the caller must uphold the safety contract for `from_raw_parts`.
unsafe {
ub_checks::assert_unsafe_precondition!(
check_language_ub,
"slice::from_raw_parts requires the pointer to be aligned and non-null, and the total size of the slice not to exceed `isize::MAX`",
(
data: *mut () = data as *mut (),
size: usize = size_of::<T>(),
align: usize = align_of::<T>(),
len: usize = len,
) =>
ub_checks::maybe_is_aligned_and_not_null(data, align, false)
&& ub_checks::is_valid_allocation_size(size, len)
);
&*ptr::slice_from_raw_parts(data, len)
}
}
core::slice::raw::from_raw_parts_mutpub const unsafe fn from_raw_parts_mut<'a, T>(data: *mut T, len: usize) -> &'a mut [T] {
// SAFETY: the caller must uphold the safety contract for `from_raw_parts_mut`.
unsafe {
ub_checks::assert_unsafe_precondition!(
check_language_ub,
"slice::from_raw_parts_mut requires the pointer to be aligned and non-null, and the total size of the slice not to exceed `isize::MAX`",
(
data: *mut () = data as *mut (),
size: usize = size_of::<T>(),
align: usize = align_of::<T>(),
len: usize = len,
) =>
ub_checks::maybe_is_aligned_and_not_null(data, align, false)
&& ub_checks::is_valid_allocation_size(size, len)
);
&mut *ptr::slice_from_raw_parts_mut(data, len)
}
}
core::slice::raw::from_refpub const fn from_ref<T>(s: &T) -> &[T] {
array::from_ref(s)
}
core::str::<impl core::convert::AsRef<[u8]> for str>::as_ref fn as_ref(&self) -> &[u8] {
self.as_bytes()
}
core::str::<impl core::default::Default for &str>::default fn default() -> Self {
""
}
core::str::<impl str>::as_bytes pub const fn as_bytes(&self) -> &[u8] {
// SAFETY: const sound because we transmute two types with the same layout
unsafe { mem::transmute(self) }
}
core::str::<impl str>::as_bytes_mut pub const unsafe fn as_bytes_mut(&mut self) -> &mut [u8] {
// SAFETY: the cast from `&str` to `&[u8]` is safe since `str`
// has the same layout as `&[u8]` (only std can make this guarantee).
// The pointer dereference is safe since it comes from a mutable reference which
// is guaranteed to be valid for writes.
unsafe { &mut *(self as *mut str as *mut [u8]) }
}
core::str::<impl str>::as_mut_ptr pub const fn as_mut_ptr(&mut self) -> *mut u8 {
self as *mut str as *mut u8
}
core::str::<impl str>::as_ptr pub const fn as_ptr(&self) -> *const u8 {
self as *const str as *const u8
}
core::str::<impl str>::as_str pub const fn as_str(&self) -> &str {
self
}
core::str::<impl str>::is_empty pub const fn is_empty(&self) -> bool {
self.len() == 0
}
core::str::<impl str>::len pub const fn len(&self) -> usize {
self.as_bytes().len()
}
core::str::converts::from_utf8pub const fn from_utf8(v: &[u8]) -> Result<&str, Utf8Error> {
// FIXME(const-hack): This should use `?` again, once it's `const`
match run_utf8_validation(v) {
Ok(_) => {
// SAFETY: validation succeeded.
Ok(unsafe { from_utf8_unchecked(v) })
}
Err(err) => Err(err),
}
}
core::str::converts::from_utf8_uncheckedpub const unsafe fn from_utf8_unchecked(v: &[u8]) -> &str {
// SAFETY: the caller must guarantee that the bytes `v` are valid UTF-8.
// Also relies on `&str` and `&[u8]` having the same layout.
unsafe { mem::transmute(v) }
}
core::str::error::Utf8Error::valid_up_to pub const fn valid_up_to(&self) -> usize {
self.valid_up_to
}
core::str::validations::contains_nonasciiconst fn contains_nonascii(x: usize) -> bool {
(x & NONASCII_MASK) != 0
}
core::str::validations::run_utf8_validation::runtime fn runtime$(<$($binders)*>)?($($arg: $ty),*) $( -> $ret )? {
$runtime
}
core::str::validations::utf8_char_widthpub const fn utf8_char_width(b: u8) -> usize {
UTF8_CHAR_WIDTH[b as usize] as usize
}
core::sync::atomic::AtomicU32::as_ptr pub const fn as_ptr(&self) -> *mut $int_type {
self.v.get()
}
core::sync::atomic::AtomicU32::compare_exchange pub fn compare_exchange(&self,
current: $int_type,
new: $int_type,
success: Ordering,
failure: Ordering) -> Result<$int_type, $int_type> {
// SAFETY: data races are prevented by atomic intrinsics.
unsafe { atomic_compare_exchange(self.v.get(), current, new, success, failure) }
}
core::sync::atomic::AtomicU32::compare_exchange_weak pub fn compare_exchange_weak(&self,
current: $int_type,
new: $int_type,
success: Ordering,
failure: Ordering) -> Result<$int_type, $int_type> {
// SAFETY: data races are prevented by atomic intrinsics.
unsafe {
atomic_compare_exchange_weak(self.v.get(), current, new, success, failure)
}
}
core::sync::atomic::AtomicU32::fetch_add pub fn fetch_add(&self, val: $int_type, order: Ordering) -> $int_type {
// SAFETY: data races are prevented by atomic intrinsics.
unsafe { atomic_add(self.v.get(), val, order) }
}
core::sync::atomic::AtomicU32::fetch_and pub fn fetch_and(&self, val: $int_type, order: Ordering) -> $int_type {
// SAFETY: data races are prevented by atomic intrinsics.
unsafe { atomic_and(self.v.get(), val, order) }
}
core::sync::atomic::AtomicU32::fetch_max pub fn fetch_max(&self, val: $int_type, order: Ordering) -> $int_type {
// SAFETY: data races are prevented by atomic intrinsics.
unsafe { $max_fn(self.v.get(), val, order) }
}
core::sync::atomic::AtomicU32::fetch_min pub fn fetch_min(&self, val: $int_type, order: Ordering) -> $int_type {
// SAFETY: data races are prevented by atomic intrinsics.
unsafe { $min_fn(self.v.get(), val, order) }
}
core::sync::atomic::AtomicU32::fetch_nand pub fn fetch_nand(&self, val: $int_type, order: Ordering) -> $int_type {
// SAFETY: data races are prevented by atomic intrinsics.
unsafe { atomic_nand(self.v.get(), val, order) }
}
core::sync::atomic::AtomicU32::fetch_or pub fn fetch_or(&self, val: $int_type, order: Ordering) -> $int_type {
// SAFETY: data races are prevented by atomic intrinsics.
unsafe { atomic_or(self.v.get(), val, order) }
}
core::sync::atomic::AtomicU32::fetch_sub pub fn fetch_sub(&self, val: $int_type, order: Ordering) -> $int_type {
// SAFETY: data races are prevented by atomic intrinsics.
unsafe { atomic_sub(self.v.get(), val, order) }
}
core::sync::atomic::AtomicU32::fetch_update pub fn fetch_update<F>(&self,
set_order: Ordering,
fetch_order: Ordering,
mut f: F) -> Result<$int_type, $int_type>
where F: FnMut($int_type) -> Option<$int_type> {
let mut prev = self.load(fetch_order);
while let Some(next) = f(prev) {
match self.compare_exchange_weak(prev, next, set_order, fetch_order) {
x @ Ok(_) => return x,
Err(next_prev) => prev = next_prev
}
}
Err(prev)
}
core::sync::atomic::AtomicU32::fetch_xor pub fn fetch_xor(&self, val: $int_type, order: Ordering) -> $int_type {
// SAFETY: data races are prevented by atomic intrinsics.
unsafe { atomic_xor(self.v.get(), val, order) }
}
core::sync::atomic::AtomicU32::from_mut pub fn from_mut(v: &mut $int_type) -> &mut Self {
let [] = [(); align_of::<Self>() - align_of::<$int_type>()];
// SAFETY:
// - the mutable reference guarantees unique ownership.
// - the alignment of `$int_type` and `Self` is the
// same, as promised by $cfg_align and verified above.
unsafe { &mut *(v as *mut $int_type as *mut Self) }
}
core::sync::atomic::AtomicU32::from_mut_slice pub fn from_mut_slice(v: &mut [$int_type]) -> &mut [Self] {
let [] = [(); align_of::<Self>() - align_of::<$int_type>()];
// SAFETY:
// - the mutable reference guarantees unique ownership.
// - the alignment of `$int_type` and `Self` is the
// same, as promised by $cfg_align and verified above.
unsafe { &mut *(v as *mut [$int_type] as *mut [Self]) }
}
core::sync::atomic::AtomicU32::from_ptr pub const unsafe fn from_ptr<'a>(ptr: *mut $int_type) -> &'a $atomic_type {
// SAFETY: guaranteed by the caller
unsafe { &*ptr.cast() }
}
core::sync::atomic::AtomicU32::get_mut pub fn get_mut(&mut self) -> &mut $int_type {
self.v.get_mut()
}
core::sync::atomic::AtomicU32::get_mut_slice pub fn get_mut_slice(this: &mut [Self]) -> &mut [$int_type] {
// SAFETY: the mutable reference guarantees unique ownership.
unsafe { &mut *(this as *mut [Self] as *mut [$int_type]) }
}
core::sync::atomic::AtomicU32::into_inner pub const fn into_inner(self) -> $int_type {
self.v.into_inner()
}
core::sync::atomic::AtomicU32::load pub fn load(&self, order: Ordering) -> $int_type {
// SAFETY: data races are prevented by atomic intrinsics.
unsafe { atomic_load(self.v.get(), order) }
}
core::sync::atomic::AtomicU32::new pub const fn new(v: $int_type) -> Self {
Self {v: UnsafeCell::new(v)}
}
core::sync::atomic::AtomicU32::store pub fn store(&self, val: $int_type, order: Ordering) {
// SAFETY: data races are prevented by atomic intrinsics.
unsafe { atomic_store(self.v.get(), val, order); }
}
core::sync::atomic::AtomicU32::swap pub fn swap(&self, val: $int_type, order: Ordering) -> $int_type {
// SAFETY: data races are prevented by atomic intrinsics.
unsafe { atomic_swap(self.v.get(), val, order) }
}
core::sync::atomic::AtomicU32::try_update pub fn try_update(
&self,
set_order: Ordering,
fetch_order: Ordering,
f: impl FnMut($int_type) -> Option<$int_type>,
) -> Result<$int_type, $int_type> {
// FIXME(atomic_try_update): this is currently an unstable alias to `fetch_update`;
// when stabilizing, turn `fetch_update` into a deprecated alias to `try_update`.
self.fetch_update(set_order, fetch_order, f)
}
core::sync::atomic::AtomicU32::update pub fn update(
&self,
set_order: Ordering,
fetch_order: Ordering,
mut f: impl FnMut($int_type) -> $int_type,
) -> $int_type {
let mut prev = self.load(fetch_order);
loop {
match self.compare_exchange_weak(prev, f(prev), set_order, fetch_order) {
Ok(x) => break x,
Err(next_prev) => prev = next_prev,
}
}
}
core::sync::atomic::atomic_addunsafe fn atomic_add<T: Copy, U: Copy>(dst: *mut T, val: U, order: Ordering) -> T {
// SAFETY: the caller must uphold the safety contract for `atomic_add`.
unsafe {
match order {
Relaxed => intrinsics::atomic_xadd::<T, U, { AO::Relaxed }>(dst, val),
Acquire => intrinsics::atomic_xadd::<T, U, { AO::Acquire }>(dst, val),
Release => intrinsics::atomic_xadd::<T, U, { AO::Release }>(dst, val),
AcqRel => intrinsics::atomic_xadd::<T, U, { AO::AcqRel }>(dst, val),
SeqCst => intrinsics::atomic_xadd::<T, U, { AO::SeqCst }>(dst, val),
}
}
}
core::sync::atomic::atomic_andunsafe fn atomic_and<T: Copy, U: Copy>(dst: *mut T, val: U, order: Ordering) -> T {
// SAFETY: the caller must uphold the safety contract for `atomic_and`
unsafe {
match order {
Relaxed => intrinsics::atomic_and::<T, U, { AO::Relaxed }>(dst, val),
Acquire => intrinsics::atomic_and::<T, U, { AO::Acquire }>(dst, val),
Release => intrinsics::atomic_and::<T, U, { AO::Release }>(dst, val),
AcqRel => intrinsics::atomic_and::<T, U, { AO::AcqRel }>(dst, val),
SeqCst => intrinsics::atomic_and::<T, U, { AO::SeqCst }>(dst, val),
}
}
}
core::sync::atomic::atomic_compare_exchangepub unsafe fn atomic_compare_exchange<T: Copy>(
dst: *mut T,
old: T,
new: T,
success: Ordering,
failure: Ordering,
) -> Result<T, T> {
// SAFETY: the caller must uphold the safety contract for `atomic_compare_exchange`.
let (val, ok) = unsafe {
match (success, failure) {
(Relaxed, Relaxed) => {
intrinsics::atomic_cxchg::<T, { AO::Relaxed }, { AO::Relaxed }>(dst, old, new)
}
(Relaxed, Acquire) => {
intrinsics::atomic_cxchg::<T, { AO::Relaxed }, { AO::Acquire }>(dst, old, new)
}
(Relaxed, SeqCst) => {
intrinsics::atomic_cxchg::<T, { AO::Relaxed }, { AO::SeqCst }>(dst, old, new)
}
(Acquire, Relaxed) => {
intrinsics::atomic_cxchg::<T, { AO::Acquire }, { AO::Relaxed }>(dst, old, new)
}
(Acquire, Acquire) => {
intrinsics::atomic_cxchg::<T, { AO::Acquire }, { AO::Acquire }>(dst, old, new)
}
(Acquire, SeqCst) => {
intrinsics::atomic_cxchg::<T, { AO::Acquire }, { AO::SeqCst }>(dst, old, new)
}
(Release, Relaxed) => {
intrinsics::atomic_cxchg::<T, { AO::Release }, { AO::Relaxed }>(dst, old, new)
}
(Release, Acquire) => {
intrinsics::atomic_cxchg::<T, { AO::Release }, { AO::Acquire }>(dst, old, new)
}
(Release, SeqCst) => {
intrinsics::atomic_cxchg::<T, { AO::Release }, { AO::SeqCst }>(dst, old, new)
}
(AcqRel, Relaxed) => {
intrinsics::atomic_cxchg::<T, { AO::AcqRel }, { AO::Relaxed }>(dst, old, new)
}
(AcqRel, Acquire) => {
intrinsics::atomic_cxchg::<T, { AO::AcqRel }, { AO::Acquire }>(dst, old, new)
}
(AcqRel, SeqCst) => {
intrinsics::atomic_cxchg::<T, { AO::AcqRel }, { AO::SeqCst }>(dst, old, new)
}
(SeqCst, Relaxed) => {
intrinsics::atomic_cxchg::<T, { AO::SeqCst }, { AO::Relaxed }>(dst, old, new)
}
(SeqCst, Acquire) => {
intrinsics::atomic_cxchg::<T, { AO::SeqCst }, { AO::Acquire }>(dst, old, new)
}
(SeqCst, SeqCst) => {
intrinsics::atomic_cxchg::<T, { AO::SeqCst }, { AO::SeqCst }>(dst, old, new)
}
(_, AcqRel) => panic!("there is no such thing as an acquire-release failure ordering"),
(_, Release) => panic!("there is no such thing as a release failure ordering"),
}
};
if ok { Ok(val) } else { Err(val) }
}
core::sync::atomic::atomic_compare_exchange_weakunsafe fn atomic_compare_exchange_weak<T: Copy>(
dst: *mut T,
old: T,
new: T,
success: Ordering,
failure: Ordering,
) -> Result<T, T> {
// SAFETY: the caller must uphold the safety contract for `atomic_compare_exchange_weak`.
let (val, ok) = unsafe {
match (success, failure) {
(Relaxed, Relaxed) => {
intrinsics::atomic_cxchgweak::<T, { AO::Relaxed }, { AO::Relaxed }>(dst, old, new)
}
(Relaxed, Acquire) => {
intrinsics::atomic_cxchgweak::<T, { AO::Relaxed }, { AO::Acquire }>(dst, old, new)
}
(Relaxed, SeqCst) => {
intrinsics::atomic_cxchgweak::<T, { AO::Relaxed }, { AO::SeqCst }>(dst, old, new)
}
(Acquire, Relaxed) => {
intrinsics::atomic_cxchgweak::<T, { AO::Acquire }, { AO::Relaxed }>(dst, old, new)
}
(Acquire, Acquire) => {
intrinsics::atomic_cxchgweak::<T, { AO::Acquire }, { AO::Acquire }>(dst, old, new)
}
(Acquire, SeqCst) => {
intrinsics::atomic_cxchgweak::<T, { AO::Acquire }, { AO::SeqCst }>(dst, old, new)
}
(Release, Relaxed) => {
intrinsics::atomic_cxchgweak::<T, { AO::Release }, { AO::Relaxed }>(dst, old, new)
}
(Release, Acquire) => {
intrinsics::atomic_cxchgweak::<T, { AO::Release }, { AO::Acquire }>(dst, old, new)
}
(Release, SeqCst) => {
intrinsics::atomic_cxchgweak::<T, { AO::Release }, { AO::SeqCst }>(dst, old, new)
}
(AcqRel, Relaxed) => {
intrinsics::atomic_cxchgweak::<T, { AO::AcqRel }, { AO::Relaxed }>(dst, old, new)
}
(AcqRel, Acquire) => {
intrinsics::atomic_cxchgweak::<T, { AO::AcqRel }, { AO::Acquire }>(dst, old, new)
}
(AcqRel, SeqCst) => {
intrinsics::atomic_cxchgweak::<T, { AO::AcqRel }, { AO::SeqCst }>(dst, old, new)
}
(SeqCst, Relaxed) => {
intrinsics::atomic_cxchgweak::<T, { AO::SeqCst }, { AO::Relaxed }>(dst, old, new)
}
(SeqCst, Acquire) => {
intrinsics::atomic_cxchgweak::<T, { AO::SeqCst }, { AO::Acquire }>(dst, old, new)
}
(SeqCst, SeqCst) => {
intrinsics::atomic_cxchgweak::<T, { AO::SeqCst }, { AO::SeqCst }>(dst, old, new)
}
(_, AcqRel) => panic!("there is no such thing as an acquire-release failure ordering"),
(_, Release) => panic!("there is no such thing as a release failure ordering"),
}
};
if ok { Ok(val) } else { Err(val) }
}
core::sync::atomic::atomic_loadunsafe fn atomic_load<T: Copy>(dst: *const T, order: Ordering) -> T {
// SAFETY: the caller must uphold the safety contract for `atomic_load`.
unsafe {
match order {
Relaxed => intrinsics::atomic_load::<T, { AO::Relaxed }>(dst),
Acquire => intrinsics::atomic_load::<T, { AO::Acquire }>(dst),
SeqCst => intrinsics::atomic_load::<T, { AO::SeqCst }>(dst),
Release => panic!("there is no such thing as a release load"),
AcqRel => panic!("there is no such thing as an acquire-release load"),
}
}
}
core::sync::atomic::atomic_nandunsafe fn atomic_nand<T: Copy, U: Copy>(dst: *mut T, val: U, order: Ordering) -> T {
// SAFETY: the caller must uphold the safety contract for `atomic_nand`
unsafe {
match order {
Relaxed => intrinsics::atomic_nand::<T, U, { AO::Relaxed }>(dst, val),
Acquire => intrinsics::atomic_nand::<T, U, { AO::Acquire }>(dst, val),
Release => intrinsics::atomic_nand::<T, U, { AO::Release }>(dst, val),
AcqRel => intrinsics::atomic_nand::<T, U, { AO::AcqRel }>(dst, val),
SeqCst => intrinsics::atomic_nand::<T, U, { AO::SeqCst }>(dst, val),
}
}
}
core::sync::atomic::atomic_orunsafe fn atomic_or<T: Copy, U: Copy>(dst: *mut T, val: U, order: Ordering) -> T {
// SAFETY: the caller must uphold the safety contract for `atomic_or`
unsafe {
match order {
SeqCst => intrinsics::atomic_or::<T, U, { AO::SeqCst }>(dst, val),
Acquire => intrinsics::atomic_or::<T, U, { AO::Acquire }>(dst, val),
Release => intrinsics::atomic_or::<T, U, { AO::Release }>(dst, val),
AcqRel => intrinsics::atomic_or::<T, U, { AO::AcqRel }>(dst, val),
Relaxed => intrinsics::atomic_or::<T, U, { AO::Relaxed }>(dst, val),
}
}
}
core::sync::atomic::atomic_storeunsafe fn atomic_store<T: Copy>(dst: *mut T, val: T, order: Ordering) {
// SAFETY: the caller must uphold the safety contract for `atomic_store`.
unsafe {
match order {
Relaxed => intrinsics::atomic_store::<T, { AO::Relaxed }>(dst, val),
Release => intrinsics::atomic_store::<T, { AO::Release }>(dst, val),
SeqCst => intrinsics::atomic_store::<T, { AO::SeqCst }>(dst, val),
Acquire => panic!("there is no such thing as an acquire store"),
AcqRel => panic!("there is no such thing as an acquire-release store"),
}
}
}
core::sync::atomic::atomic_subunsafe fn atomic_sub<T: Copy, U: Copy>(dst: *mut T, val: U, order: Ordering) -> T {
// SAFETY: the caller must uphold the safety contract for `atomic_sub`.
unsafe {
match order {
Relaxed => intrinsics::atomic_xsub::<T, U, { AO::Relaxed }>(dst, val),
Acquire => intrinsics::atomic_xsub::<T, U, { AO::Acquire }>(dst, val),
Release => intrinsics::atomic_xsub::<T, U, { AO::Release }>(dst, val),
AcqRel => intrinsics::atomic_xsub::<T, U, { AO::AcqRel }>(dst, val),
SeqCst => intrinsics::atomic_xsub::<T, U, { AO::SeqCst }>(dst, val),
}
}
}
core::sync::atomic::atomic_swapunsafe fn atomic_swap<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
// SAFETY: the caller must uphold the safety contract for `atomic_swap`.
unsafe {
match order {
Relaxed => intrinsics::atomic_xchg::<T, { AO::Relaxed }>(dst, val),
Acquire => intrinsics::atomic_xchg::<T, { AO::Acquire }>(dst, val),
Release => intrinsics::atomic_xchg::<T, { AO::Release }>(dst, val),
AcqRel => intrinsics::atomic_xchg::<T, { AO::AcqRel }>(dst, val),
SeqCst => intrinsics::atomic_xchg::<T, { AO::SeqCst }>(dst, val),
}
}
}
core::sync::atomic::atomic_umaxunsafe fn atomic_umax<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
// SAFETY: the caller must uphold the safety contract for `atomic_umax`
unsafe {
match order {
Relaxed => intrinsics::atomic_umax::<T, { AO::Relaxed }>(dst, val),
Acquire => intrinsics::atomic_umax::<T, { AO::Acquire }>(dst, val),
Release => intrinsics::atomic_umax::<T, { AO::Release }>(dst, val),
AcqRel => intrinsics::atomic_umax::<T, { AO::AcqRel }>(dst, val),
SeqCst => intrinsics::atomic_umax::<T, { AO::SeqCst }>(dst, val),
}
}
}
core::sync::atomic::atomic_uminunsafe fn atomic_umin<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
// SAFETY: the caller must uphold the safety contract for `atomic_umin`
unsafe {
match order {
Relaxed => intrinsics::atomic_umin::<T, { AO::Relaxed }>(dst, val),
Acquire => intrinsics::atomic_umin::<T, { AO::Acquire }>(dst, val),
Release => intrinsics::atomic_umin::<T, { AO::Release }>(dst, val),
AcqRel => intrinsics::atomic_umin::<T, { AO::AcqRel }>(dst, val),
SeqCst => intrinsics::atomic_umin::<T, { AO::SeqCst }>(dst, val),
}
}
}
core::sync::atomic::atomic_xorunsafe fn atomic_xor<T: Copy, U: Copy>(dst: *mut T, val: U, order: Ordering) -> T {
// SAFETY: the caller must uphold the safety contract for `atomic_xor`
unsafe {
match order {
SeqCst => intrinsics::atomic_xor::<T, U, { AO::SeqCst }>(dst, val),
Acquire => intrinsics::atomic_xor::<T, U, { AO::Acquire }>(dst, val),
Release => intrinsics::atomic_xor::<T, U, { AO::Release }>(dst, val),
AcqRel => intrinsics::atomic_xor::<T, U, { AO::AcqRel }>(dst, val),
Relaxed => intrinsics::atomic_xor::<T, U, { AO::Relaxed }>(dst, val),
}
}
}
core::time::Duration::as_micros pub const fn as_micros(&self) -> u128 {
self.secs as u128 * MICROS_PER_SEC as u128
+ (self.nanos.as_inner() / NANOS_PER_MICRO) as u128
}
core::time::Duration::as_millis pub const fn as_millis(&self) -> u128 {
self.secs as u128 * MILLIS_PER_SEC as u128
+ (self.nanos.as_inner() / NANOS_PER_MILLI) as u128
}
core::time::Duration::as_millis_f32 pub const fn as_millis_f32(&self) -> f32 {
(self.secs as f32) * (MILLIS_PER_SEC as f32)
+ (self.nanos.as_inner() as f32) / (NANOS_PER_MILLI as f32)
}
core::time::Duration::as_millis_f64 pub const fn as_millis_f64(&self) -> f64 {
(self.secs as f64) * (MILLIS_PER_SEC as f64)
+ (self.nanos.as_inner() as f64) / (NANOS_PER_MILLI as f64)
}
core::time::Duration::as_nanos pub const fn as_nanos(&self) -> u128 {
self.secs as u128 * NANOS_PER_SEC as u128 + self.nanos.as_inner() as u128
}
core::time::Duration::as_secs pub const fn as_secs(&self) -> u64 {
self.secs
}
core::time::Duration::as_secs_f32 pub const fn as_secs_f32(&self) -> f32 {
(self.secs as f32) + (self.nanos.as_inner() as f32) / (NANOS_PER_SEC as f32)
}
core::time::Duration::as_secs_f64 pub const fn as_secs_f64(&self) -> f64 {
(self.secs as f64) + (self.nanos.as_inner() as f64) / (NANOS_PER_SEC as f64)
}
core::time::Duration::div_duration_f32 pub const fn div_duration_f32(self, rhs: Duration) -> f32 {
let self_nanos =
(self.secs as f32) * (NANOS_PER_SEC as f32) + (self.nanos.as_inner() as f32);
let rhs_nanos = (rhs.secs as f32) * (NANOS_PER_SEC as f32) + (rhs.nanos.as_inner() as f32);
self_nanos / rhs_nanos
}
core::time::Duration::div_duration_f64 pub const fn div_duration_f64(self, rhs: Duration) -> f64 {
let self_nanos =
(self.secs as f64) * (NANOS_PER_SEC as f64) + (self.nanos.as_inner() as f64);
let rhs_nanos = (rhs.secs as f64) * (NANOS_PER_SEC as f64) + (rhs.nanos.as_inner() as f64);
self_nanos / rhs_nanos
}
core::time::Duration::from_days pub const fn from_days(days: u64) -> Duration {
if days > u64::MAX / (SECS_PER_MINUTE * MINS_PER_HOUR * HOURS_PER_DAY) {
panic!("overflow in Duration::from_days");
}
Duration::from_secs(days * MINS_PER_HOUR * SECS_PER_MINUTE * HOURS_PER_DAY)
}
core::time::Duration::from_hours pub const fn from_hours(hours: u64) -> Duration {
if hours > u64::MAX / (SECS_PER_MINUTE * MINS_PER_HOUR) {
panic!("overflow in Duration::from_hours");
}
Duration::from_secs(hours * MINS_PER_HOUR * SECS_PER_MINUTE)
}
core::time::Duration::from_micros pub const fn from_micros(micros: u64) -> Duration {
let secs = micros / MICROS_PER_SEC;
let subsec_micros = (micros % MICROS_PER_SEC) as u32;
// SAFETY: (x % 1_000_000) * 1_000 < 1_000_000_000
// => x % 1_000_000 < 1_000_000
let subsec_nanos = unsafe { Nanoseconds::new_unchecked(subsec_micros * NANOS_PER_MICRO) };
Duration { secs, nanos: subsec_nanos }
}
core::time::Duration::from_millis pub const fn from_millis(millis: u64) -> Duration {
let secs = millis / MILLIS_PER_SEC;
let subsec_millis = (millis % MILLIS_PER_SEC) as u32;
// SAFETY: (x % 1_000) * 1_000_000 < 1_000_000_000
// => x % 1_000 < 1_000
let subsec_nanos = unsafe { Nanoseconds::new_unchecked(subsec_millis * NANOS_PER_MILLI) };
Duration { secs, nanos: subsec_nanos }
}
core::time::Duration::from_mins pub const fn from_mins(mins: u64) -> Duration {
if mins > u64::MAX / SECS_PER_MINUTE {
panic!("overflow in Duration::from_mins");
}
Duration::from_secs(mins * SECS_PER_MINUTE)
}
core::time::Duration::from_nanos pub const fn from_nanos(nanos: u64) -> Duration {
const NANOS_PER_SEC: u64 = self::NANOS_PER_SEC as u64;
let secs = nanos / NANOS_PER_SEC;
let subsec_nanos = (nanos % NANOS_PER_SEC) as u32;
// SAFETY: x % 1_000_000_000 < 1_000_000_000
let subsec_nanos = unsafe { Nanoseconds::new_unchecked(subsec_nanos) };
Duration { secs, nanos: subsec_nanos }
}
core::time::Duration::from_secs pub const fn from_secs(secs: u64) -> Duration {
Duration { secs, nanos: Nanoseconds::ZERO }
}
core::time::Duration::from_weeks pub const fn from_weeks(weeks: u64) -> Duration {
if weeks > u64::MAX / (SECS_PER_MINUTE * MINS_PER_HOUR * HOURS_PER_DAY * DAYS_PER_WEEK) {
panic!("overflow in Duration::from_weeks");
}
Duration::from_secs(weeks * MINS_PER_HOUR * SECS_PER_MINUTE * HOURS_PER_DAY * DAYS_PER_WEEK)
}
core::time::Duration::is_zero pub const fn is_zero(&self) -> bool {
self.secs == 0 && self.nanos.as_inner() == 0
}
core::time::Duration::new pub const fn new(secs: u64, nanos: u32) -> Duration {
if nanos < NANOS_PER_SEC {
// SAFETY: nanos < NANOS_PER_SEC, therefore nanos is within the valid range
Duration { secs, nanos: unsafe { Nanoseconds::new_unchecked(nanos) } }
} else {
let secs = secs
.checked_add((nanos / NANOS_PER_SEC) as u64)
.expect("overflow in Duration::new");
let nanos = nanos % NANOS_PER_SEC;
// SAFETY: nanos % NANOS_PER_SEC < NANOS_PER_SEC, therefore nanos is within the valid range
Duration { secs, nanos: unsafe { Nanoseconds::new_unchecked(nanos) } }
}
}
core::time::Duration::subsec_micros pub const fn subsec_micros(&self) -> u32 {
self.nanos.as_inner() / NANOS_PER_MICRO
}
core::time::Duration::subsec_millis pub const fn subsec_millis(&self) -> u32 {
self.nanos.as_inner() / NANOS_PER_MILLI
}
core::time::Duration::subsec_nanos pub const fn subsec_nanos(&self) -> u32 {
self.nanos.as_inner()
}
core::ub_checks::check_language_ubpub(crate) const fn check_language_ub() -> bool {
// Only used for UB checks so we may const_eval_select.
intrinsics::ub_checks()
&& const_eval_select!(
@capture { } -> bool:
if const {
// Always disable UB checks.
false
} else {
// Disable UB checks in Miri.
!cfg!(miri)
}
)
}
core::ub_checks::check_language_ub::runtime fn runtime$(<$($binders)*>)?($($arg: $ty),*) $( -> $ret )? {
$runtime
}
core::ub_checks::is_valid_allocation_sizepub(crate) const fn is_valid_allocation_size(size: usize, len: usize) -> bool {
let max_len = if size == 0 { usize::MAX } else { isize::MAX as usize / size };
len <= max_len
}
core::ub_checks::maybe_is_alignedpub(crate) const fn maybe_is_aligned(ptr: *const (), align: usize) -> bool {
// This is just for safety checks so we can const_eval_select.
const_eval_select!(
@capture { ptr: *const (), align: usize } -> bool:
if const {
true
} else {
ptr.is_aligned_to(align)
}
)
}
core::ub_checks::maybe_is_aligned::runtime fn runtime$(<$($binders)*>)?($($arg: $ty),*) $( -> $ret )? {
$runtime
}
core::ub_checks::maybe_is_aligned_and_not_nullpub(crate) const fn maybe_is_aligned_and_not_null(
ptr: *const (),
align: usize,
is_zst: bool,
) -> bool {
// This is just for safety checks so we can const_eval_select.
maybe_is_aligned(ptr, align) && (is_zst || !ptr.is_null())
}
core::ub_checks::maybe_is_nonoverlappingpub(crate) const fn maybe_is_nonoverlapping(
src: *const (),
dst: *const (),
size: usize,
count: usize,
) -> bool {
// This is just for safety checks so we can const_eval_select.
const_eval_select!(
@capture { src: *const (), dst: *const (), size: usize, count: usize } -> bool:
if const {
true
} else {
let src_usize = src.addr();
let dst_usize = dst.addr();
let Some(size) = size.checked_mul(count) else {
crate::panicking::panic_nounwind(
"is_nonoverlapping: `size_of::<T>() * count` overflows a usize",
)
};
let diff = src_usize.abs_diff(dst_usize);
// If the absolute distance between the ptrs is at least as big as the size of the buffer,
// they do not overlap.
diff >= size
}
)
}
core::ub_checks::maybe_is_nonoverlapping::runtime fn runtime$(<$($binders)*>)?($($arg: $ty),*) $( -> $ret )? {
$runtime
}
<core::any::TypeId as core::clone::Clone>::clone#[cfg_attr(feature = "ferrocene_certified", derive_const(Clone))]
<core::array::iter::IntoIter<T, N> as core::clone::Clone>::clone#[derive(Clone)]
<core::convert::Infallible as core::clone::Clone>::clone fn clone(&self) -> Infallible {
match *self {}
}
<core::convert::Infallible as core::cmp::PartialEq>::eq fn eq(&self, _: &Infallible) -> bool {
match *self {}
}
<core::intrinsics::AtomicOrdering as core::cmp::Eq>::assert_receiver_is_total_eq#[cfg_attr(feature = "ferrocene_certified", derive(ConstParamTy, PartialEq, Eq))]
<core::intrinsics::AtomicOrdering as core::cmp::PartialEq>::eq#[cfg_attr(feature = "ferrocene_certified", derive(ConstParamTy, PartialEq, Eq))]
<core::iter::adapters::cloned::Cloned<I> as core::clone::Clone>::clone#[cfg_attr(feature = "ferrocene_certified", derive(Clone))]
<core::iter::adapters::map::Map<I, F> as core::clone::Clone>::clone#[derive(Clone)]
<core::iter::adapters::zip::Zip<A, B> as core::clone::Clone>::clone#[derive(Clone)]
<core::mem::manually_drop::ManuallyDrop<T> as core::clone::Clone>::clone#[cfg_attr(feature = "ferrocene_certified", derive(Copy, Clone, PartialEq))]
<core::mem::manually_drop::ManuallyDrop<T> as core::cmp::PartialEq>::eq#[cfg_attr(feature = "ferrocene_certified", derive(Copy, Clone, PartialEq))]
<core::num::niche_types::Nanoseconds as core::clone::Clone>::clone #[cfg_attr(feature = "ferrocene_certified", derive(Clone, Copy))]
<core::ops::index_range::IndexRange as core::clone::Clone>::clone#[cfg_attr(feature = "ferrocene_certified", derive_const(Clone, PartialEq))]
<core::ops::index_range::IndexRange as core::cmp::PartialEq>::eq#[cfg_attr(feature = "ferrocene_certified", derive_const(Clone, PartialEq))]
<core::ops::try_trait::NeverShortCircuit<T> as core::ops::try_trait::FromResidual>::from_residual fn from_residual(never: NeverShortCircuitResidual) -> Self {
match never {}
}
<core::ptr::alignment::Alignment as core::clone::Clone>::clone#[cfg_attr(feature = "ferrocene_certified", derive(Copy, Clone))]
<core::ptr::alignment::AlignmentEnum as core::clone::Clone>::clone#[derive(Copy, Clone)]
<core::sync::atomic::Ordering as core::clone::Clone>::clone#[cfg_attr(feature = "ferrocene_certified", derive(Copy, Clone))]
<core::time::Duration as core::clone::Clone>::clone#[cfg_attr(feature = "ferrocene_certified", derive(Clone, Copy, PartialEq))]
<core::time::Duration as core::cmp::PartialEq>::eq#[cfg_attr(feature = "ferrocene_certified", derive(Clone, Copy, PartialEq))]
core::cmp::Eq::assert_receiver_is_total_eq fn assert_receiver_is_total_eq(&self) {}
core::cmp::impls::<impl core::cmp::Ord for !>::cmp fn cmp(&self, _: &!) -> Ordering {
*self
}
core::cmp::impls::<impl core::cmp::PartialEq for !>::eq fn eq(&self, _: &!) -> bool {
*self
}
core::cmp::impls::<impl core::cmp::PartialOrd for !>::partial_cmp fn partial_cmp(&self, _: &!) -> Option<Ordering> {
*self
}
core::hint::unreachable_uncheckedpub const unsafe fn unreachable_unchecked() -> ! {
ub_checks::assert_unsafe_precondition!(
check_language_ub,
"hint::unreachable_unchecked must never be reached",
() => false
);
// SAFETY: the safety contract for `intrinsics::unreachable` must
// be upheld by the caller.
unsafe { intrinsics::unreachable() }
}
core::panic::panic_info::PanicInfo::<'a>::new pub(crate) fn new(message: &'a PanicFmt<'a>) -> Self {
PanicInfo { message }
}
core::panicking::panic_fmt::panic_impl fn panic_impl(pi: &PanicInfo<'_>) -> !;
core::panicking::panic_nounwind_fmt::runtime::panic_impl fn panic_impl(pi: &PanicInfo<'_>) -> !;
core::ptr::drop_in_placepub const unsafe fn drop_in_place<T: PointeeSized>(to_drop: *mut T)
where
T: [const] Destruct,
{
// Code here does not matter - this is replaced by the
// real drop glue by the compiler.
// SAFETY: see comment above
unsafe { drop_in_place(to_drop) }
}