Skip to main content

core/num/
f16.rs

1//! Constants for the `f16` half-precision floating point type.
2//!
3//! *[See also the `f16` primitive type][f16].*
4//!
5//! Mathematically significant numbers are provided in the `consts` sub-module.
6//!
7//! For the constants defined directly in this module
8//! (as distinct from those defined in the `consts` sub-module),
9//! new code should instead use the associated constants
10//! defined directly on the `f16` type.
11
12#![unstable(feature = "f16", issue = "116909")]
13
14use crate::convert::FloatToInt;
15use crate::num::FpCategory;
16#[cfg(not(test))]
17use crate::num::imp::libm;
18use crate::panic::const_assert;
19use crate::{intrinsics, mem};
20
21/// Basic mathematical constants.
22#[unstable(feature = "f16", issue = "116909")]
23#[rustc_diagnostic_item = "f16_consts_mod"]
24pub mod consts {
25    // FIXME: replace with mathematical constants from cmath.
26
27    /// Archimedes' constant (π)
28    #[unstable(feature = "f16", issue = "116909")]
29    pub const PI: f16 = 3.14159265358979323846264338327950288_f16;
30
31    /// The full circle constant (τ)
32    ///
33    /// Equal to 2π.
34    #[unstable(feature = "f16", issue = "116909")]
35    pub const TAU: f16 = 6.28318530717958647692528676655900577_f16;
36
37    /// The golden ratio (φ)
38    #[unstable(feature = "f16", issue = "116909")]
39    pub const GOLDEN_RATIO: f16 = 1.618033988749894848204586834365638118_f16;
40
41    /// The Euler-Mascheroni constant (γ)
42    #[unstable(feature = "f16", issue = "116909")]
43    pub const EULER_GAMMA: f16 = 0.577215664901532860606512090082402431_f16;
44
45    /// π/2
46    #[unstable(feature = "f16", issue = "116909")]
47    pub const FRAC_PI_2: f16 = 1.57079632679489661923132169163975144_f16;
48
49    /// π/3
50    #[unstable(feature = "f16", issue = "116909")]
51    pub const FRAC_PI_3: f16 = 1.04719755119659774615421446109316763_f16;
52
53    /// π/4
54    #[unstable(feature = "f16", issue = "116909")]
55    pub const FRAC_PI_4: f16 = 0.785398163397448309615660845819875721_f16;
56
57    /// π/6
58    #[unstable(feature = "f16", issue = "116909")]
59    pub const FRAC_PI_6: f16 = 0.52359877559829887307710723054658381_f16;
60
61    /// π/8
62    #[unstable(feature = "f16", issue = "116909")]
63    pub const FRAC_PI_8: f16 = 0.39269908169872415480783042290993786_f16;
64
65    /// 1/π
66    #[unstable(feature = "f16", issue = "116909")]
67    pub const FRAC_1_PI: f16 = 0.318309886183790671537767526745028724_f16;
68
69    /// 1/sqrt(π)
70    #[unstable(feature = "f16", issue = "116909")]
71    // Also, #[unstable(feature = "more_float_constants", issue = "146939")]
72    pub const FRAC_1_SQRT_PI: f16 = 0.564189583547756286948079451560772586_f16;
73
74    /// 1/sqrt(2π)
75    #[doc(alias = "FRAC_1_SQRT_TAU")]
76    #[unstable(feature = "f16", issue = "116909")]
77    // Also, #[unstable(feature = "more_float_constants", issue = "146939")]
78    pub const FRAC_1_SQRT_2PI: f16 = 0.398942280401432677939946059934381868_f16;
79
80    /// 2/π
81    #[unstable(feature = "f16", issue = "116909")]
82    pub const FRAC_2_PI: f16 = 0.636619772367581343075535053490057448_f16;
83
84    /// 2/sqrt(π)
85    #[unstable(feature = "f16", issue = "116909")]
86    pub const FRAC_2_SQRT_PI: f16 = 1.12837916709551257389615890312154517_f16;
87
88    /// sqrt(2)
89    #[unstable(feature = "f16", issue = "116909")]
90    pub const SQRT_2: f16 = 1.41421356237309504880168872420969808_f16;
91
92    /// 1/sqrt(2)
93    #[unstable(feature = "f16", issue = "116909")]
94    pub const FRAC_1_SQRT_2: f16 = 0.707106781186547524400844362104849039_f16;
95
96    /// sqrt(3)
97    #[unstable(feature = "f16", issue = "116909")]
98    // Also, #[unstable(feature = "more_float_constants", issue = "146939")]
99    pub const SQRT_3: f16 = 1.732050807568877293527446341505872367_f16;
100
101    /// 1/sqrt(3)
102    #[unstable(feature = "f16", issue = "116909")]
103    // Also, #[unstable(feature = "more_float_constants", issue = "146939")]
104    pub const FRAC_1_SQRT_3: f16 = 0.577350269189625764509148780501957456_f16;
105
106    /// sqrt(5)
107    #[unstable(feature = "more_float_constants", issue = "146939")]
108    // Also, #[unstable(feature = "f16", issue = "116909")]
109    pub const SQRT_5: f16 = 2.23606797749978969640917366873127623_f16;
110
111    /// 1/sqrt(5)
112    #[unstable(feature = "more_float_constants", issue = "146939")]
113    // Also, #[unstable(feature = "f16", issue = "116909")]
114    pub const FRAC_1_SQRT_5: f16 = 0.44721359549995793928183473374625524_f16;
115
116    /// Euler's number (e)
117    #[unstable(feature = "f16", issue = "116909")]
118    pub const E: f16 = 2.71828182845904523536028747135266250_f16;
119
120    /// log<sub>2</sub>(10)
121    #[unstable(feature = "f16", issue = "116909")]
122    pub const LOG2_10: f16 = 3.32192809488736234787031942948939018_f16;
123
124    /// log<sub>2</sub>(e)
125    #[unstable(feature = "f16", issue = "116909")]
126    pub const LOG2_E: f16 = 1.44269504088896340735992468100189214_f16;
127
128    /// log<sub>10</sub>(2)
129    #[unstable(feature = "f16", issue = "116909")]
130    pub const LOG10_2: f16 = 0.301029995663981195213738894724493027_f16;
131
132    /// log<sub>10</sub>(e)
133    #[unstable(feature = "f16", issue = "116909")]
134    pub const LOG10_E: f16 = 0.434294481903251827651128918916605082_f16;
135
136    /// ln(2)
137    #[unstable(feature = "f16", issue = "116909")]
138    pub const LN_2: f16 = 0.693147180559945309417232121458176568_f16;
139
140    /// ln(10)
141    #[unstable(feature = "f16", issue = "116909")]
142    pub const LN_10: f16 = 2.30258509299404568401799145468436421_f16;
143}
144
145#[doc(test(attr(
146    feature(cfg_target_has_reliable_f16_f128),
147    allow(internal_features, unused_features)
148)))]
149impl f16 {
150    /// The radix or base of the internal representation of `f16`.
151    #[unstable(feature = "f16", issue = "116909")]
152    pub const RADIX: u32 = 2;
153
154    /// The size of this float type in bits.
155    // #[unstable(feature = "f16", issue = "116909")]
156    #[unstable(feature = "float_bits_const", issue = "151073")]
157    pub const BITS: u32 = 16;
158
159    /// Number of significant digits in base 2.
160    ///
161    /// Note that the size of the mantissa in the bitwise representation is one
162    /// smaller than this since the leading 1 is not stored explicitly.
163    #[unstable(feature = "f16", issue = "116909")]
164    pub const MANTISSA_DIGITS: u32 = 11;
165
166    /// Approximate number of significant digits in base 10.
167    ///
168    /// This is the maximum <i>x</i> such that any decimal number with <i>x</i>
169    /// significant digits can be converted to `f16` and back without loss.
170    ///
171    /// Equal to floor(log<sub>10</sub>&nbsp;2<sup>[`MANTISSA_DIGITS`]&nbsp;&minus;&nbsp;1</sup>).
172    ///
173    /// [`MANTISSA_DIGITS`]: f16::MANTISSA_DIGITS
174    #[unstable(feature = "f16", issue = "116909")]
175    pub const DIGITS: u32 = 3;
176
177    /// [Machine epsilon] value for `f16`.
178    ///
179    /// This is the difference between `1.0` and the next larger representable number.
180    ///
181    /// Equal to 2<sup>1&nbsp;&minus;&nbsp;[`MANTISSA_DIGITS`]</sup>.
182    ///
183    /// [Machine epsilon]: https://en.wikipedia.org/wiki/Machine_epsilon
184    /// [`MANTISSA_DIGITS`]: f16::MANTISSA_DIGITS
185    #[unstable(feature = "f16", issue = "116909")]
186    #[rustc_diagnostic_item = "f16_epsilon"]
187    pub const EPSILON: f16 = 9.7656e-4_f16;
188
189    /// Smallest finite `f16` value.
190    ///
191    /// Equal to &minus;[`MAX`].
192    ///
193    /// [`MAX`]: f16::MAX
194    #[unstable(feature = "f16", issue = "116909")]
195    pub const MIN: f16 = -6.5504e+4_f16;
196    /// Smallest positive normal `f16` value.
197    ///
198    /// Equal to 2<sup>[`MIN_EXP`]&nbsp;&minus;&nbsp;1</sup>.
199    ///
200    /// [`MIN_EXP`]: f16::MIN_EXP
201    #[unstable(feature = "f16", issue = "116909")]
202    pub const MIN_POSITIVE: f16 = 6.1035e-5_f16;
203    /// Largest finite `f16` value.
204    ///
205    /// Equal to
206    /// (1&nbsp;&minus;&nbsp;2<sup>&minus;[`MANTISSA_DIGITS`]</sup>)&nbsp;2<sup>[`MAX_EXP`]</sup>.
207    ///
208    /// [`MANTISSA_DIGITS`]: f16::MANTISSA_DIGITS
209    /// [`MAX_EXP`]: f16::MAX_EXP
210    #[unstable(feature = "f16", issue = "116909")]
211    pub const MAX: f16 = 6.5504e+4_f16;
212
213    /// One greater than the minimum possible *normal* power of 2 exponent
214    /// for a significand bounded by 1 ≤ x < 2 (i.e. the IEEE definition).
215    ///
216    /// This corresponds to the exact minimum possible *normal* power of 2 exponent
217    /// for a significand bounded by 0.5 ≤ x < 1 (i.e. the C definition).
218    /// In other words, all normal numbers representable by this type are
219    /// greater than or equal to 0.5&nbsp;×&nbsp;2<sup><i>MIN_EXP</i></sup>.
220    #[unstable(feature = "f16", issue = "116909")]
221    pub const MIN_EXP: i32 = -13;
222    /// One greater than the maximum possible power of 2 exponent
223    /// for a significand bounded by 1 ≤ x < 2 (i.e. the IEEE definition).
224    ///
225    /// This corresponds to the exact maximum possible power of 2 exponent
226    /// for a significand bounded by 0.5 ≤ x < 1 (i.e. the C definition).
227    /// In other words, all numbers representable by this type are
228    /// strictly less than 2<sup><i>MAX_EXP</i></sup>.
229    #[unstable(feature = "f16", issue = "116909")]
230    pub const MAX_EXP: i32 = 16;
231
232    /// Minimum <i>x</i> for which 10<sup><i>x</i></sup> is normal.
233    ///
234    /// Equal to ceil(log<sub>10</sub>&nbsp;[`MIN_POSITIVE`]).
235    ///
236    /// [`MIN_POSITIVE`]: f16::MIN_POSITIVE
237    #[unstable(feature = "f16", issue = "116909")]
238    pub const MIN_10_EXP: i32 = -4;
239    /// Maximum <i>x</i> for which 10<sup><i>x</i></sup> is normal.
240    ///
241    /// Equal to floor(log<sub>10</sub>&nbsp;[`MAX`]).
242    ///
243    /// [`MAX`]: f16::MAX
244    #[unstable(feature = "f16", issue = "116909")]
245    pub const MAX_10_EXP: i32 = 4;
246
247    /// Not a Number (NaN).
248    ///
249    /// Note that IEEE 754 doesn't define just a single NaN value; a plethora of bit patterns are
250    /// considered to be NaN. Furthermore, the standard makes a difference between a "signaling" and
251    /// a "quiet" NaN, and allows inspecting its "payload" (the unspecified bits in the bit pattern)
252    /// and its sign. See the [specification of NaN bit patterns](f32#nan-bit-patterns) for more
253    /// info.
254    ///
255    /// This constant is guaranteed to be a quiet NaN (on targets that follow the Rust assumptions
256    /// that the quiet/signaling bit being set to 1 indicates a quiet NaN). Beyond that, nothing is
257    /// guaranteed about the specific bit pattern chosen here: both payload and sign are arbitrary.
258    /// The concrete bit pattern may change across Rust versions and target platforms.
259    #[allow(clippy::eq_op)]
260    #[rustc_diagnostic_item = "f16_nan"]
261    #[unstable(feature = "f16", issue = "116909")]
262    pub const NAN: f16 = 0.0_f16 / 0.0_f16;
263
264    /// Infinity (∞).
265    #[unstable(feature = "f16", issue = "116909")]
266    pub const INFINITY: f16 = 1.0_f16 / 0.0_f16;
267
268    /// Negative infinity (−∞).
269    #[unstable(feature = "f16", issue = "116909")]
270    pub const NEG_INFINITY: f16 = -1.0_f16 / 0.0_f16;
271
272    /// Maximum integer that can be represented exactly in an [`f16`] value,
273    /// with no other integer converting to the same floating point value.
274    ///
275    /// For an integer `x` which satisfies `MIN_EXACT_INTEGER <= x <= MAX_EXACT_INTEGER`,
276    /// there is a "one-to-one" mapping between [`i16`] and [`f16`] values.
277    /// `MAX_EXACT_INTEGER + 1` also converts losslessly to [`f16`] and back to
278    /// [`i16`], but `MAX_EXACT_INTEGER + 2` converts to the same [`f16`] value
279    /// (and back to `MAX_EXACT_INTEGER + 1` as an integer) so there is not a
280    /// "one-to-one" mapping.
281    ///
282    /// [`MAX_EXACT_INTEGER`]: f16::MAX_EXACT_INTEGER
283    /// [`MIN_EXACT_INTEGER`]: f16::MIN_EXACT_INTEGER
284    /// ```
285    /// #![feature(f16)]
286    /// #![feature(float_exact_integer_constants)]
287    /// # // FIXME(#152635): Float rounding on `i586` does not adhere to IEEE 754
288    /// # #[cfg(not(all(target_arch = "x86", not(target_feature = "sse"))))] {
289    /// # #[cfg(target_has_reliable_f16)] {
290    /// let max_exact_int = f16::MAX_EXACT_INTEGER;
291    /// assert_eq!(max_exact_int, max_exact_int as f16 as i16);
292    /// assert_eq!(max_exact_int + 1, (max_exact_int + 1) as f16 as i16);
293    /// assert_ne!(max_exact_int + 2, (max_exact_int + 2) as f16 as i16);
294    ///
295    /// // Beyond `f16::MAX_EXACT_INTEGER`, multiple integers can map to one float value
296    /// assert_eq!((max_exact_int + 1) as f16, (max_exact_int + 2) as f16);
297    /// # }}
298    /// ```
299    // #[unstable(feature = "f16", issue = "116909")]
300    #[unstable(feature = "float_exact_integer_constants", issue = "152466")]
301    pub const MAX_EXACT_INTEGER: i16 = (1 << Self::MANTISSA_DIGITS) - 1;
302
303    /// Minimum integer that can be represented exactly in an [`f16`] value,
304    /// with no other integer converting to the same floating point value.
305    ///
306    /// For an integer `x` which satisfies `MIN_EXACT_INTEGER <= x <= MAX_EXACT_INTEGER`,
307    /// there is a "one-to-one" mapping between [`i16`] and [`f16`] values.
308    /// `MAX_EXACT_INTEGER + 1` also converts losslessly to [`f16`] and back to
309    /// [`i16`], but `MAX_EXACT_INTEGER + 2` converts to the same [`f16`] value
310    /// (and back to `MAX_EXACT_INTEGER + 1` as an integer) so there is not a
311    /// "one-to-one" mapping.
312    ///
313    /// This constant is equivalent to `-MAX_EXACT_INTEGER`.
314    ///
315    /// [`MAX_EXACT_INTEGER`]: f16::MAX_EXACT_INTEGER
316    /// [`MIN_EXACT_INTEGER`]: f16::MIN_EXACT_INTEGER
317    /// ```
318    /// #![feature(f16)]
319    /// #![feature(float_exact_integer_constants)]
320    /// # // FIXME(#152635): Float rounding on `i586` does not adhere to IEEE 754
321    /// # #[cfg(not(all(target_arch = "x86", not(target_feature = "sse"))))] {
322    /// # #[cfg(target_has_reliable_f16)] {
323    /// let min_exact_int = f16::MIN_EXACT_INTEGER;
324    /// assert_eq!(min_exact_int, min_exact_int as f16 as i16);
325    /// assert_eq!(min_exact_int - 1, (min_exact_int - 1) as f16 as i16);
326    /// assert_ne!(min_exact_int - 2, (min_exact_int - 2) as f16 as i16);
327    ///
328    /// // Below `f16::MIN_EXACT_INTEGER`, multiple integers can map to one float value
329    /// assert_eq!((min_exact_int - 1) as f16, (min_exact_int - 2) as f16);
330    /// # }}
331    /// ```
332    // #[unstable(feature = "f16", issue = "116909")]
333    #[unstable(feature = "float_exact_integer_constants", issue = "152466")]
334    pub const MIN_EXACT_INTEGER: i16 = -Self::MAX_EXACT_INTEGER;
335
336    /// The mask of the bit used to encode the sign of an [`f16`].
337    ///
338    /// This bit is set when the sign is negative and unset when the sign is
339    /// positive.
340    /// If you only need to check whether a value is positive or negative,
341    /// [`is_sign_positive`] or [`is_sign_negative`] can be used.
342    ///
343    /// [`is_sign_positive`]: f16::is_sign_positive
344    /// [`is_sign_negative`]: f16::is_sign_negative
345    /// ```rust
346    /// #![feature(float_masks)]
347    /// #![feature(f16)]
348    /// # #[cfg(target_has_reliable_f16)] {
349    /// let sign_mask = f16::SIGN_MASK;
350    /// let a = 1.6552f16;
351    /// let a_bits = a.to_bits();
352    ///
353    /// assert_eq!(a_bits & sign_mask, 0x0);
354    /// assert_eq!(f16::from_bits(a_bits ^ sign_mask), -a);
355    /// assert_eq!(sign_mask, (-0.0f16).to_bits());
356    /// # }
357    /// ```
358    #[unstable(feature = "float_masks", issue = "154064")]
359    pub const SIGN_MASK: u16 = 0x8000;
360
361    /// The mask of the bits used to encode the exponent of an [`f16`].
362    ///
363    /// Note that the exponent is stored as a biased value, with a bias of 15 for `f16`.
364    ///
365    /// ```rust
366    /// #![feature(float_masks)]
367    /// #![feature(f16)]
368    /// # #[cfg(target_has_reliable_f16)] {
369    /// let exponent_mask = f16::EXPONENT_MASK;
370    ///
371    /// fn get_exp(a: f16) -> i16 {
372    ///     let bias = 15;
373    ///     let biased = a.to_bits() & f16::EXPONENT_MASK;
374    ///     (biased >> (f16::MANTISSA_DIGITS - 1)).cast_signed() - bias
375    /// }
376    ///
377    /// assert_eq!(get_exp(0.5), -1);
378    /// assert_eq!(get_exp(1.0), 0);
379    /// assert_eq!(get_exp(2.0), 1);
380    /// assert_eq!(get_exp(4.0), 2);
381    /// # }
382    /// ```
383    #[unstable(feature = "float_masks", issue = "154064")]
384    pub const EXPONENT_MASK: u16 = 0x7c00;
385
386    /// The mask of the bits used to encode the mantissa of an [`f16`].
387    ///
388    /// ```rust
389    /// #![feature(float_masks)]
390    /// #![feature(f16)]
391    /// # #[cfg(target_has_reliable_f16)] {
392    /// let mantissa_mask = f16::MANTISSA_MASK;
393    ///
394    /// assert_eq!(0f16.to_bits() & mantissa_mask, 0x0);
395    /// assert_eq!(1f16.to_bits() & mantissa_mask, 0x0);
396    ///
397    /// // multiplying a finite value by a power of 2 doesn't change its mantissa
398    /// // unless the result or initial value is not normal.
399    /// let a = 1.6552f16;
400    /// let b = 4.0 * a;
401    /// assert_eq!(a.to_bits() & mantissa_mask, b.to_bits() & mantissa_mask);
402    ///
403    /// // The maximum and minimum values have a saturated significand
404    /// assert_eq!(f16::MAX.to_bits() & f16::MANTISSA_MASK, f16::MANTISSA_MASK);
405    /// assert_eq!(f16::MIN.to_bits() & f16::MANTISSA_MASK, f16::MANTISSA_MASK);
406    /// # }
407    /// ```
408    #[unstable(feature = "float_masks", issue = "154064")]
409    pub const MANTISSA_MASK: u16 = 0x03ff;
410
411    /// Minimum representable positive value (min subnormal)
412    const TINY_BITS: u16 = 0x1;
413
414    /// Minimum representable negative value (min negative subnormal)
415    const NEG_TINY_BITS: u16 = Self::TINY_BITS | Self::SIGN_MASK;
416
417    /// Returns `true` if this value is NaN.
418    ///
419    /// ```
420    /// #![feature(f16)]
421    /// # #[cfg(target_has_reliable_f16)] {
422    ///
423    /// let nan = f16::NAN;
424    /// let f = 7.0_f16;
425    ///
426    /// assert!(nan.is_nan());
427    /// assert!(!f.is_nan());
428    /// # }
429    /// ```
430    #[inline]
431    #[must_use]
432    #[unstable(feature = "f16", issue = "116909")]
433    #[allow(clippy::eq_op)] // > if you intended to check if the operand is NaN, use `.is_nan()` instead :)
434    pub const fn is_nan(self) -> bool {
435        self != self
436    }
437
438    /// Returns `true` if this value is positive infinity or negative infinity, and
439    /// `false` otherwise.
440    ///
441    /// ```
442    /// #![feature(f16)]
443    /// # #[cfg(target_has_reliable_f16)] {
444    ///
445    /// let f = 7.0f16;
446    /// let inf = f16::INFINITY;
447    /// let neg_inf = f16::NEG_INFINITY;
448    /// let nan = f16::NAN;
449    ///
450    /// assert!(!f.is_infinite());
451    /// assert!(!nan.is_infinite());
452    ///
453    /// assert!(inf.is_infinite());
454    /// assert!(neg_inf.is_infinite());
455    /// # }
456    /// ```
457    #[inline]
458    #[must_use]
459    #[unstable(feature = "f16", issue = "116909")]
460    pub const fn is_infinite(self) -> bool {
461        (self == f16::INFINITY) | (self == f16::NEG_INFINITY)
462    }
463
464    /// Returns `true` if this number is neither infinite nor NaN.
465    ///
466    /// ```
467    /// #![feature(f16)]
468    /// # #[cfg(target_has_reliable_f16)] {
469    ///
470    /// let f = 7.0f16;
471    /// let inf: f16 = f16::INFINITY;
472    /// let neg_inf: f16 = f16::NEG_INFINITY;
473    /// let nan: f16 = f16::NAN;
474    ///
475    /// assert!(f.is_finite());
476    ///
477    /// assert!(!nan.is_finite());
478    /// assert!(!inf.is_finite());
479    /// assert!(!neg_inf.is_finite());
480    /// # }
481    /// ```
482    #[inline]
483    #[must_use]
484    #[unstable(feature = "f16", issue = "116909")]
485    #[rustc_const_unstable(feature = "f16", issue = "116909")]
486    pub const fn is_finite(self) -> bool {
487        // There's no need to handle NaN separately: if self is NaN,
488        // the comparison is not true, exactly as desired.
489        self.abs() < Self::INFINITY
490    }
491
492    /// Returns `true` if the number is [subnormal].
493    ///
494    /// ```
495    /// #![feature(f16)]
496    /// # #[cfg(target_has_reliable_f16)] {
497    ///
498    /// let min = f16::MIN_POSITIVE; // 6.1035e-5
499    /// let max = f16::MAX;
500    /// let lower_than_min = 1.0e-7_f16;
501    /// let zero = 0.0_f16;
502    ///
503    /// assert!(!min.is_subnormal());
504    /// assert!(!max.is_subnormal());
505    ///
506    /// assert!(!zero.is_subnormal());
507    /// assert!(!f16::NAN.is_subnormal());
508    /// assert!(!f16::INFINITY.is_subnormal());
509    /// // Values between `0` and `min` are Subnormal.
510    /// assert!(lower_than_min.is_subnormal());
511    /// # }
512    /// ```
513    /// [subnormal]: https://en.wikipedia.org/wiki/Denormal_number
514    #[inline]
515    #[must_use]
516    #[unstable(feature = "f16", issue = "116909")]
517    pub const fn is_subnormal(self) -> bool {
518        matches!(self.classify(), FpCategory::Subnormal)
519    }
520
521    /// Returns `true` if the number is neither zero, infinite, [subnormal], or NaN.
522    ///
523    /// ```
524    /// #![feature(f16)]
525    /// # #[cfg(target_has_reliable_f16)] {
526    ///
527    /// let min = f16::MIN_POSITIVE; // 6.1035e-5
528    /// let max = f16::MAX;
529    /// let lower_than_min = 1.0e-7_f16;
530    /// let zero = 0.0_f16;
531    ///
532    /// assert!(min.is_normal());
533    /// assert!(max.is_normal());
534    ///
535    /// assert!(!zero.is_normal());
536    /// assert!(!f16::NAN.is_normal());
537    /// assert!(!f16::INFINITY.is_normal());
538    /// // Values between `0` and `min` are Subnormal.
539    /// assert!(!lower_than_min.is_normal());
540    /// # }
541    /// ```
542    /// [subnormal]: https://en.wikipedia.org/wiki/Denormal_number
543    #[inline]
544    #[must_use]
545    #[unstable(feature = "f16", issue = "116909")]
546    pub const fn is_normal(self) -> bool {
547        matches!(self.classify(), FpCategory::Normal)
548    }
549
550    /// Returns the floating point category of the number. If only one property
551    /// is going to be tested, it is generally faster to use the specific
552    /// predicate instead.
553    ///
554    /// ```
555    /// #![feature(f16)]
556    /// # #[cfg(target_has_reliable_f16)] {
557    ///
558    /// use std::num::FpCategory;
559    ///
560    /// let num = 12.4_f16;
561    /// let inf = f16::INFINITY;
562    ///
563    /// assert_eq!(num.classify(), FpCategory::Normal);
564    /// assert_eq!(inf.classify(), FpCategory::Infinite);
565    /// # }
566    /// ```
567    #[ferrocene::prevalidated]
568    #[inline]
569    #[unstable(feature = "f16", issue = "116909")]
570    #[must_use]
571    pub const fn classify(self) -> FpCategory {
572        let b = self.to_bits();
573        match (b & Self::MANTISSA_MASK, b & Self::EXPONENT_MASK) {
574            (0, Self::EXPONENT_MASK) => FpCategory::Infinite,
575            (_, Self::EXPONENT_MASK) => FpCategory::Nan,
576            (0, 0) => FpCategory::Zero,
577            (_, 0) => FpCategory::Subnormal,
578            _ => FpCategory::Normal,
579        }
580    }
581
582    /// Returns `true` if `self` has a positive sign, including `+0.0`, NaNs with
583    /// positive sign bit and positive infinity.
584    ///
585    /// Note that IEEE 754 doesn't assign any meaning to the sign bit in case of
586    /// a NaN, and as Rust doesn't guarantee that the bit pattern of NaNs are
587    /// conserved over arithmetic operations, the result of `is_sign_positive` on
588    /// a NaN might produce an unexpected or non-portable result. See the [specification
589    /// of NaN bit patterns](f32#nan-bit-patterns) for more info. Use `self.signum() == 1.0`
590    /// if you need fully portable behavior (will return `false` for all NaNs).
591    ///
592    /// ```
593    /// #![feature(f16)]
594    /// # #[cfg(target_has_reliable_f16)] {
595    ///
596    /// let f = 7.0_f16;
597    /// let g = -7.0_f16;
598    ///
599    /// assert!(f.is_sign_positive());
600    /// assert!(!g.is_sign_positive());
601    /// # }
602    /// ```
603    #[inline]
604    #[must_use]
605    #[unstable(feature = "f16", issue = "116909")]
606    pub const fn is_sign_positive(self) -> bool {
607        !self.is_sign_negative()
608    }
609
610    /// Returns `true` if `self` has a negative sign, including `-0.0`, NaNs with
611    /// negative sign bit and negative infinity.
612    ///
613    /// Note that IEEE 754 doesn't assign any meaning to the sign bit in case of
614    /// a NaN, and as Rust doesn't guarantee that the bit pattern of NaNs are
615    /// conserved over arithmetic operations, the result of `is_sign_negative` on
616    /// a NaN might produce an unexpected or non-portable result. See the [specification
617    /// of NaN bit patterns](f32#nan-bit-patterns) for more info. Use `self.signum() == -1.0`
618    /// if you need fully portable behavior (will return `false` for all NaNs).
619    ///
620    /// ```
621    /// #![feature(f16)]
622    /// # #[cfg(target_has_reliable_f16)] {
623    ///
624    /// let f = 7.0_f16;
625    /// let g = -7.0_f16;
626    ///
627    /// assert!(!f.is_sign_negative());
628    /// assert!(g.is_sign_negative());
629    /// # }
630    /// ```
631    #[inline]
632    #[must_use]
633    #[unstable(feature = "f16", issue = "116909")]
634    pub const fn is_sign_negative(self) -> bool {
635        // IEEE754 says: isSignMinus(x) is true if and only if x has negative sign. isSignMinus
636        // applies to zeros and NaNs as well.
637        // SAFETY: This is just transmuting to get the sign bit, it's fine.
638        (self.to_bits() & (1 << 15)) != 0
639    }
640
641    /// Returns the least number greater than `self`.
642    ///
643    /// Let `TINY` be the smallest representable positive `f16`. Then,
644    ///  - if `self.is_nan()`, this returns `self`;
645    ///  - if `self` is [`NEG_INFINITY`], this returns [`MIN`];
646    ///  - if `self` is `-TINY`, this returns -0.0;
647    ///  - if `self` is -0.0 or +0.0, this returns `TINY`;
648    ///  - if `self` is [`MAX`] or [`INFINITY`], this returns [`INFINITY`];
649    ///  - otherwise the unique least value greater than `self` is returned.
650    ///
651    /// The identity `x.next_up() == -(-x).next_down()` holds for all non-NaN `x`. When `x`
652    /// is finite `x == x.next_up().next_down()` also holds.
653    ///
654    /// ```rust
655    /// #![feature(f16)]
656    /// # #[cfg(target_has_reliable_f16)] {
657    ///
658    /// // f16::EPSILON is the difference between 1.0 and the next number up.
659    /// assert_eq!(1.0f16.next_up(), 1.0 + f16::EPSILON);
660    /// // But not for most numbers.
661    /// assert!(0.1f16.next_up() < 0.1 + f16::EPSILON);
662    /// assert_eq!(4356f16.next_up(), 4360.0);
663    /// # }
664    /// ```
665    ///
666    /// This operation corresponds to IEEE-754 `nextUp`.
667    ///
668    /// [`NEG_INFINITY`]: Self::NEG_INFINITY
669    /// [`INFINITY`]: Self::INFINITY
670    /// [`MIN`]: Self::MIN
671    /// [`MAX`]: Self::MAX
672    #[inline]
673    #[doc(alias = "nextUp")]
674    #[unstable(feature = "f16", issue = "116909")]
675    #[must_use = "method returns a new number and does not mutate the original value"]
676    pub const fn next_up(self) -> Self {
677        // Some targets violate Rust's assumption of IEEE semantics, e.g. by flushing
678        // denormals to zero. This is in general unsound and unsupported, but here
679        // we do our best to still produce the correct result on such targets.
680        let bits = self.to_bits();
681        if self.is_nan() || bits == Self::INFINITY.to_bits() {
682            return self;
683        }
684
685        let abs = bits & !Self::SIGN_MASK;
686        let next_bits = if abs == 0 {
687            Self::TINY_BITS
688        } else if bits == abs {
689            bits + 1
690        } else {
691            bits - 1
692        };
693        Self::from_bits(next_bits)
694    }
695
696    /// Returns the greatest number less than `self`.
697    ///
698    /// Let `TINY` be the smallest representable positive `f16`. Then,
699    ///  - if `self.is_nan()`, this returns `self`;
700    ///  - if `self` is [`INFINITY`], this returns [`MAX`];
701    ///  - if `self` is `TINY`, this returns 0.0;
702    ///  - if `self` is -0.0 or +0.0, this returns `-TINY`;
703    ///  - if `self` is [`MIN`] or [`NEG_INFINITY`], this returns [`NEG_INFINITY`];
704    ///  - otherwise the unique greatest value less than `self` is returned.
705    ///
706    /// The identity `x.next_down() == -(-x).next_up()` holds for all non-NaN `x`. When `x`
707    /// is finite `x == x.next_down().next_up()` also holds.
708    ///
709    /// ```rust
710    /// #![feature(f16)]
711    /// # #[cfg(target_has_reliable_f16)] {
712    ///
713    /// let x = 1.0f16;
714    /// // Clamp value into range [0, 1).
715    /// let clamped = x.clamp(0.0, 1.0f16.next_down());
716    /// assert!(clamped < 1.0);
717    /// assert_eq!(clamped.next_up(), 1.0);
718    /// # }
719    /// ```
720    ///
721    /// This operation corresponds to IEEE-754 `nextDown`.
722    ///
723    /// [`NEG_INFINITY`]: Self::NEG_INFINITY
724    /// [`INFINITY`]: Self::INFINITY
725    /// [`MIN`]: Self::MIN
726    /// [`MAX`]: Self::MAX
727    #[inline]
728    #[doc(alias = "nextDown")]
729    #[unstable(feature = "f16", issue = "116909")]
730    #[must_use = "method returns a new number and does not mutate the original value"]
731    pub const fn next_down(self) -> Self {
732        // Some targets violate Rust's assumption of IEEE semantics, e.g. by flushing
733        // denormals to zero. This is in general unsound and unsupported, but here
734        // we do our best to still produce the correct result on such targets.
735        let bits = self.to_bits();
736        if self.is_nan() || bits == Self::NEG_INFINITY.to_bits() {
737            return self;
738        }
739
740        let abs = bits & !Self::SIGN_MASK;
741        let next_bits = if abs == 0 {
742            Self::NEG_TINY_BITS
743        } else if bits == abs {
744            bits - 1
745        } else {
746            bits + 1
747        };
748        Self::from_bits(next_bits)
749    }
750
751    /// Takes the reciprocal (inverse) of a number, `1/x`.
752    ///
753    /// ```
754    /// #![feature(f16)]
755    /// # #[cfg(target_has_reliable_f16)] {
756    ///
757    /// let x = 2.0_f16;
758    /// let abs_difference = (x.recip() - (1.0 / x)).abs();
759    ///
760    /// assert!(abs_difference <= f16::EPSILON);
761    /// # }
762    /// ```
763    #[inline]
764    #[unstable(feature = "f16", issue = "116909")]
765    #[must_use = "this returns the result of the operation, without modifying the original"]
766    pub const fn recip(self) -> Self {
767        1.0 / self
768    }
769
770    /// Converts radians to degrees.
771    ///
772    /// # Unspecified precision
773    ///
774    /// The precision of this function is non-deterministic. This means it varies by platform,
775    /// Rust version, and can even differ within the same execution from one invocation to the next.
776    ///
777    /// # Examples
778    ///
779    /// ```
780    /// #![feature(f16)]
781    /// # #[cfg(target_has_reliable_f16)] {
782    ///
783    /// let angle = std::f16::consts::PI;
784    ///
785    /// let abs_difference = (angle.to_degrees() - 180.0).abs();
786    /// assert!(abs_difference <= 0.5);
787    /// # }
788    /// ```
789    #[inline]
790    #[unstable(feature = "f16", issue = "116909")]
791    #[must_use = "this returns the result of the operation, without modifying the original"]
792    pub const fn to_degrees(self) -> Self {
793        // Use a literal to avoid double rounding, consts::PI is already rounded,
794        // and dividing would round again.
795        const PIS_IN_180: f16 = 57.2957795130823208767981548141051703_f16;
796        self * PIS_IN_180
797    }
798
799    /// Converts degrees to radians.
800    ///
801    /// # Unspecified precision
802    ///
803    /// The precision of this function is non-deterministic. This means it varies by platform,
804    /// Rust version, and can even differ within the same execution from one invocation to the next.
805    ///
806    /// # Examples
807    ///
808    /// ```
809    /// #![feature(f16)]
810    /// # #[cfg(target_has_reliable_f16)] {
811    ///
812    /// let angle = 180.0f16;
813    ///
814    /// let abs_difference = (angle.to_radians() - std::f16::consts::PI).abs();
815    ///
816    /// assert!(abs_difference <= 0.01);
817    /// # }
818    /// ```
819    #[inline]
820    #[unstable(feature = "f16", issue = "116909")]
821    #[must_use = "this returns the result of the operation, without modifying the original"]
822    pub const fn to_radians(self) -> f16 {
823        // Use a literal to avoid double rounding, consts::PI is already rounded,
824        // and dividing would round again.
825        const RADS_PER_DEG: f16 = 0.017453292519943295769236907684886_f16;
826        self * RADS_PER_DEG
827    }
828
829    /// Returns the maximum of the two numbers, ignoring NaN.
830    ///
831    /// If exactly one of the arguments is NaN (quiet or signaling), then the other argument is
832    /// returned. If both arguments are NaN, the return value is NaN, with the bit pattern picked
833    /// using the usual [rules for arithmetic operations](f32#nan-bit-patterns). If the inputs
834    /// compare equal (such as for the case of `+0.0` and `-0.0`), either input may be returned
835    /// non-deterministically.
836    ///
837    /// The handling of NaNs follows the IEEE 754-2019 semantics for `maximumNumber`, treating all
838    /// NaNs the same way to ensure the operation is associative. The handling of signed zeros
839    /// follows the IEEE 754-2008 semantics for `maxNum`.
840    ///
841    /// ```
842    /// #![feature(f16)]
843    /// # #[cfg(target_has_reliable_f16)] {
844    ///
845    /// let x = 1.0f16;
846    /// let y = 2.0f16;
847    ///
848    /// assert_eq!(x.max(y), y);
849    /// assert_eq!(x.max(f16::NAN), x);
850    /// # }
851    /// ```
852    #[inline]
853    #[unstable(feature = "f16", issue = "116909")]
854    #[rustc_const_unstable(feature = "f16", issue = "116909")]
855    #[must_use = "this returns the result of the comparison, without modifying either input"]
856    pub const fn max(self, other: f16) -> f16 {
857        intrinsics::maximum_number_nsz_f16(self, other)
858    }
859
860    /// Returns the minimum of the two numbers, ignoring NaN.
861    ///
862    /// If exactly one of the arguments is NaN (quiet or signaling), then the other argument is
863    /// returned. If both arguments are NaN, the return value is NaN, with the bit pattern picked
864    /// using the usual [rules for arithmetic operations](f32#nan-bit-patterns). If the inputs
865    /// compare equal (such as for the case of `+0.0` and `-0.0`), either input may be returned
866    /// non-deterministically.
867    ///
868    /// The handling of NaNs follows the IEEE 754-2019 semantics for `minimumNumber`, treating all
869    /// NaNs the same way to ensure the operation is associative. The handling of signed zeros
870    /// follows the IEEE 754-2008 semantics for `minNum`.
871    ///
872    /// ```
873    /// #![feature(f16)]
874    /// # #[cfg(target_has_reliable_f16)] {
875    ///
876    /// let x = 1.0f16;
877    /// let y = 2.0f16;
878    ///
879    /// assert_eq!(x.min(y), x);
880    /// assert_eq!(x.min(f16::NAN), x);
881    /// # }
882    /// ```
883    #[inline]
884    #[unstable(feature = "f16", issue = "116909")]
885    #[rustc_const_unstable(feature = "f16", issue = "116909")]
886    #[must_use = "this returns the result of the comparison, without modifying either input"]
887    pub const fn min(self, other: f16) -> f16 {
888        intrinsics::minimum_number_nsz_f16(self, other)
889    }
890
891    /// Returns the maximum of the two numbers, propagating NaN.
892    ///
893    /// If at least one of the arguments is NaN, the return value is NaN, with the bit pattern
894    /// picked using the usual [rules for arithmetic operations](f32#nan-bit-patterns). Furthermore,
895    /// `-0.0` is considered to be less than `+0.0`, making this function fully deterministic for
896    /// non-NaN inputs.
897    ///
898    /// This is in contrast to [`f16::max`] which only returns NaN when *both* arguments are NaN,
899    /// and which does not reliably order `-0.0` and `+0.0`.
900    ///
901    /// This follows the IEEE 754-2019 semantics for `maximum`.
902    ///
903    /// ```
904    /// #![feature(f16)]
905    /// #![feature(float_minimum_maximum)]
906    /// # #[cfg(target_has_reliable_f16)] {
907    ///
908    /// let x = 1.0f16;
909    /// let y = 2.0f16;
910    ///
911    /// assert_eq!(x.maximum(y), y);
912    /// assert!(x.maximum(f16::NAN).is_nan());
913    /// # }
914    /// ```
915    #[inline]
916    #[unstable(feature = "f16", issue = "116909")]
917    // #[unstable(feature = "float_minimum_maximum", issue = "91079")]
918    #[must_use = "this returns the result of the comparison, without modifying either input"]
919    pub const fn maximum(self, other: f16) -> f16 {
920        intrinsics::maximumf16(self, other)
921    }
922
923    /// Returns the minimum of the two numbers, propagating NaN.
924    ///
925    /// If at least one of the arguments is NaN, the return value is NaN, with the bit pattern
926    /// picked using the usual [rules for arithmetic operations](f32#nan-bit-patterns). Furthermore,
927    /// `-0.0` is considered to be less than `+0.0`, making this function fully deterministic for
928    /// non-NaN inputs.
929    ///
930    /// This is in contrast to [`f16::min`] which only returns NaN when *both* arguments are NaN,
931    /// and which does not reliably order `-0.0` and `+0.0`.
932    ///
933    /// This follows the IEEE 754-2019 semantics for `minimum`.
934    ///
935    /// ```
936    /// #![feature(f16)]
937    /// #![feature(float_minimum_maximum)]
938    /// # #[cfg(target_has_reliable_f16)] {
939    ///
940    /// let x = 1.0f16;
941    /// let y = 2.0f16;
942    ///
943    /// assert_eq!(x.minimum(y), x);
944    /// assert!(x.minimum(f16::NAN).is_nan());
945    /// # }
946    /// ```
947    #[inline]
948    #[unstable(feature = "f16", issue = "116909")]
949    // #[unstable(feature = "float_minimum_maximum", issue = "91079")]
950    #[must_use = "this returns the result of the comparison, without modifying either input"]
951    pub const fn minimum(self, other: f16) -> f16 {
952        intrinsics::minimumf16(self, other)
953    }
954
955    /// Calculates the midpoint (average) between `self` and `rhs`.
956    ///
957    /// This returns NaN when *either* argument is NaN or if a combination of
958    /// +inf and -inf is provided as arguments.
959    ///
960    /// # Examples
961    ///
962    /// ```
963    /// #![feature(f16)]
964    /// # #[cfg(target_has_reliable_f16)] {
965    ///
966    /// assert_eq!(1f16.midpoint(4.0), 2.5);
967    /// assert_eq!((-5.5f16).midpoint(8.0), 1.25);
968    /// # }
969    /// ```
970    #[inline]
971    #[doc(alias = "average")]
972    #[unstable(feature = "f16", issue = "116909")]
973    #[rustc_const_unstable(feature = "f16", issue = "116909")]
974    #[must_use = "this returns the result of the operation, \
975                  without modifying the original"]
976    pub const fn midpoint(self, other: f16) -> f16 {
977        const HI: f16 = f16::MAX / 2.;
978
979        let (a, b) = (self, other);
980        let abs_a = a.abs();
981        let abs_b = b.abs();
982
983        if abs_a <= HI && abs_b <= HI {
984            // Overflow is impossible
985            (a + b) / 2.
986        } else {
987            (a / 2.) + (b / 2.)
988        }
989    }
990
991    /// Rounds toward zero and converts to any primitive integer type,
992    /// assuming that the value is finite and fits in that type.
993    ///
994    /// ```
995    /// #![feature(f16)]
996    /// # #[cfg(target_has_reliable_f16)] {
997    ///
998    /// let value = 4.6_f16;
999    /// let rounded = unsafe { value.to_int_unchecked::<u16>() };
1000    /// assert_eq!(rounded, 4);
1001    ///
1002    /// let value = -128.9_f16;
1003    /// let rounded = unsafe { value.to_int_unchecked::<i8>() };
1004    /// assert_eq!(rounded, i8::MIN);
1005    /// # }
1006    /// ```
1007    ///
1008    /// # Safety
1009    ///
1010    /// The value must:
1011    ///
1012    /// * Not be `NaN`
1013    /// * Not be infinite
1014    /// * Be representable in the return type `Int`, after truncating off its fractional part
1015    #[inline]
1016    #[unstable(feature = "f16", issue = "116909")]
1017    #[must_use = "this returns the result of the operation, without modifying the original"]
1018    pub unsafe fn to_int_unchecked<Int>(self) -> Int
1019    where
1020        Self: FloatToInt<Int>,
1021    {
1022        // SAFETY: the caller must uphold the safety contract for
1023        // `FloatToInt::to_int_unchecked`.
1024        unsafe { FloatToInt::<Int>::to_int_unchecked(self) }
1025    }
1026
1027    /// Raw transmutation to `u16`.
1028    ///
1029    /// This is currently identical to `transmute::<f16, u16>(self)` on all platforms.
1030    ///
1031    /// See [`from_bits`](#method.from_bits) for some discussion of the
1032    /// portability of this operation (there are almost no issues).
1033    ///
1034    /// Note that this function is distinct from `as` casting, which attempts to
1035    /// preserve the *numeric* value, and not the bitwise value.
1036    ///
1037    /// ```
1038    /// #![feature(f16)]
1039    /// # #[cfg(target_has_reliable_f16)] {
1040    ///
1041    /// assert_ne!((1f16).to_bits(), 1f16 as u16); // to_bits() is not casting!
1042    /// assert_eq!((12.5f16).to_bits(), 0x4a40);
1043    /// # }
1044    /// ```
1045    #[inline]
1046    #[unstable(feature = "f16", issue = "116909")]
1047    #[must_use = "this returns the result of the operation, without modifying the original"]
1048    #[allow(unnecessary_transmutes)]
1049    #[ferrocene::prevalidated]
1050    pub const fn to_bits(self) -> u16 {
1051        // SAFETY: `u16` is a plain old datatype so we can always transmute to it.
1052        unsafe { mem::transmute(self) }
1053    }
1054
1055    /// Raw transmutation from `u16`.
1056    ///
1057    /// This is currently identical to `transmute::<u16, f16>(v)` on all platforms.
1058    /// It turns out this is incredibly portable, for two reasons:
1059    ///
1060    /// * Floats and Ints have the same endianness on all supported platforms.
1061    /// * IEEE 754 very precisely specifies the bit layout of floats.
1062    ///
1063    /// However there is one caveat: prior to the 2008 version of IEEE 754, how
1064    /// to interpret the NaN signaling bit wasn't actually specified. Most platforms
1065    /// (notably x86 and ARM) picked the interpretation that was ultimately
1066    /// standardized in 2008, but some didn't (notably MIPS). As a result, all
1067    /// signaling NaNs on MIPS are quiet NaNs on x86, and vice-versa.
1068    ///
1069    /// Rather than trying to preserve signaling-ness cross-platform, this
1070    /// implementation favors preserving the exact bits. This means that
1071    /// any payloads encoded in NaNs will be preserved even if the result of
1072    /// this method is sent over the network from an x86 machine to a MIPS one.
1073    ///
1074    /// If the results of this method are only manipulated by the same
1075    /// architecture that produced them, then there is no portability concern.
1076    ///
1077    /// If the input isn't NaN, then there is no portability concern.
1078    ///
1079    /// If you don't care about signalingness (very likely), then there is no
1080    /// portability concern.
1081    ///
1082    /// Note that this function is distinct from `as` casting, which attempts to
1083    /// preserve the *numeric* value, and not the bitwise value.
1084    ///
1085    /// ```
1086    /// #![feature(f16)]
1087    /// # #[cfg(target_has_reliable_f16)] {
1088    ///
1089    /// let v = f16::from_bits(0x4a40);
1090    /// assert_eq!(v, 12.5);
1091    /// # }
1092    /// ```
1093    #[inline]
1094    #[must_use]
1095    #[unstable(feature = "f16", issue = "116909")]
1096    #[allow(unnecessary_transmutes)]
1097    #[ferrocene::prevalidated]
1098    pub const fn from_bits(v: u16) -> Self {
1099        // It turns out the safety issues with sNaN were overblown! Hooray!
1100        // SAFETY: `u16` is a plain old datatype so we can always transmute from it.
1101        unsafe { mem::transmute(v) }
1102    }
1103
1104    /// Returns the memory representation of this floating point number as a byte array in
1105    /// big-endian (network) byte order.
1106    ///
1107    /// See [`from_bits`](Self::from_bits) for some discussion of the
1108    /// portability of this operation (there are almost no issues).
1109    ///
1110    /// # Examples
1111    ///
1112    /// ```
1113    /// #![feature(f16)]
1114    /// # #[cfg(target_has_reliable_f16)] {
1115    ///
1116    /// let bytes = 12.5f16.to_be_bytes();
1117    /// assert_eq!(bytes, [0x4a, 0x40]);
1118    /// # }
1119    /// ```
1120    #[inline]
1121    #[unstable(feature = "f16", issue = "116909")]
1122    #[must_use = "this returns the result of the operation, without modifying the original"]
1123    pub const fn to_be_bytes(self) -> [u8; 2] {
1124        self.to_bits().to_be_bytes()
1125    }
1126
1127    /// Returns the memory representation of this floating point number as a byte array in
1128    /// little-endian byte order.
1129    ///
1130    /// See [`from_bits`](Self::from_bits) for some discussion of the
1131    /// portability of this operation (there are almost no issues).
1132    ///
1133    /// # Examples
1134    ///
1135    /// ```
1136    /// #![feature(f16)]
1137    /// # #[cfg(target_has_reliable_f16)] {
1138    ///
1139    /// let bytes = 12.5f16.to_le_bytes();
1140    /// assert_eq!(bytes, [0x40, 0x4a]);
1141    /// # }
1142    /// ```
1143    #[inline]
1144    #[unstable(feature = "f16", issue = "116909")]
1145    #[must_use = "this returns the result of the operation, without modifying the original"]
1146    pub const fn to_le_bytes(self) -> [u8; 2] {
1147        self.to_bits().to_le_bytes()
1148    }
1149
1150    /// Returns the memory representation of this floating point number as a byte array in
1151    /// native byte order.
1152    ///
1153    /// As the target platform's native endianness is used, portable code
1154    /// should use [`to_be_bytes`] or [`to_le_bytes`], as appropriate, instead.
1155    ///
1156    /// [`to_be_bytes`]: f16::to_be_bytes
1157    /// [`to_le_bytes`]: f16::to_le_bytes
1158    ///
1159    /// See [`from_bits`](Self::from_bits) for some discussion of the
1160    /// portability of this operation (there are almost no issues).
1161    ///
1162    /// # Examples
1163    ///
1164    /// ```
1165    /// #![feature(f16)]
1166    /// # #[cfg(target_has_reliable_f16)] {
1167    ///
1168    /// let bytes = 12.5f16.to_ne_bytes();
1169    /// assert_eq!(
1170    ///     bytes,
1171    ///     if cfg!(target_endian = "big") {
1172    ///         [0x4a, 0x40]
1173    ///     } else {
1174    ///         [0x40, 0x4a]
1175    ///     }
1176    /// );
1177    /// # }
1178    /// ```
1179    #[inline]
1180    #[unstable(feature = "f16", issue = "116909")]
1181    #[must_use = "this returns the result of the operation, without modifying the original"]
1182    pub const fn to_ne_bytes(self) -> [u8; 2] {
1183        self.to_bits().to_ne_bytes()
1184    }
1185
1186    /// Creates a floating point value from its representation as a byte array in big endian.
1187    ///
1188    /// See [`from_bits`](Self::from_bits) for some discussion of the
1189    /// portability of this operation (there are almost no issues).
1190    ///
1191    /// # Examples
1192    ///
1193    /// ```
1194    /// #![feature(f16)]
1195    /// # #[cfg(target_has_reliable_f16)] {
1196    ///
1197    /// let value = f16::from_be_bytes([0x4a, 0x40]);
1198    /// assert_eq!(value, 12.5);
1199    /// # }
1200    /// ```
1201    #[inline]
1202    #[must_use]
1203    #[unstable(feature = "f16", issue = "116909")]
1204    pub const fn from_be_bytes(bytes: [u8; 2]) -> Self {
1205        Self::from_bits(u16::from_be_bytes(bytes))
1206    }
1207
1208    /// Creates a floating point value from its representation as a byte array in little endian.
1209    ///
1210    /// See [`from_bits`](Self::from_bits) for some discussion of the
1211    /// portability of this operation (there are almost no issues).
1212    ///
1213    /// # Examples
1214    ///
1215    /// ```
1216    /// #![feature(f16)]
1217    /// # #[cfg(target_has_reliable_f16)] {
1218    ///
1219    /// let value = f16::from_le_bytes([0x40, 0x4a]);
1220    /// assert_eq!(value, 12.5);
1221    /// # }
1222    /// ```
1223    #[inline]
1224    #[must_use]
1225    #[unstable(feature = "f16", issue = "116909")]
1226    pub const fn from_le_bytes(bytes: [u8; 2]) -> Self {
1227        Self::from_bits(u16::from_le_bytes(bytes))
1228    }
1229
1230    /// Creates a floating point value from its representation as a byte array in native endian.
1231    ///
1232    /// As the target platform's native endianness is used, portable code
1233    /// likely wants to use [`from_be_bytes`] or [`from_le_bytes`], as
1234    /// appropriate instead.
1235    ///
1236    /// [`from_be_bytes`]: f16::from_be_bytes
1237    /// [`from_le_bytes`]: f16::from_le_bytes
1238    ///
1239    /// See [`from_bits`](Self::from_bits) for some discussion of the
1240    /// portability of this operation (there are almost no issues).
1241    ///
1242    /// # Examples
1243    ///
1244    /// ```
1245    /// #![feature(f16)]
1246    /// # #[cfg(target_has_reliable_f16)] {
1247    ///
1248    /// let value = f16::from_ne_bytes(if cfg!(target_endian = "big") {
1249    ///     [0x4a, 0x40]
1250    /// } else {
1251    ///     [0x40, 0x4a]
1252    /// });
1253    /// assert_eq!(value, 12.5);
1254    /// # }
1255    /// ```
1256    #[inline]
1257    #[must_use]
1258    #[unstable(feature = "f16", issue = "116909")]
1259    pub const fn from_ne_bytes(bytes: [u8; 2]) -> Self {
1260        Self::from_bits(u16::from_ne_bytes(bytes))
1261    }
1262
1263    /// Returns the ordering between `self` and `other`.
1264    ///
1265    /// Unlike the standard partial comparison between floating point numbers,
1266    /// this comparison always produces an ordering in accordance to
1267    /// the `totalOrder` predicate as defined in the IEEE 754 (2008 revision)
1268    /// floating point standard. The values are ordered in the following sequence:
1269    ///
1270    /// - negative quiet NaN
1271    /// - negative signaling NaN
1272    /// - negative infinity
1273    /// - negative numbers
1274    /// - negative subnormal numbers
1275    /// - negative zero
1276    /// - positive zero
1277    /// - positive subnormal numbers
1278    /// - positive numbers
1279    /// - positive infinity
1280    /// - positive signaling NaN
1281    /// - positive quiet NaN.
1282    ///
1283    /// The ordering established by this function does not always agree with the
1284    /// [`PartialOrd`] and [`PartialEq`] implementations of `f16`. For example,
1285    /// they consider negative and positive zero equal, while `total_cmp`
1286    /// doesn't.
1287    ///
1288    /// The interpretation of the signaling NaN bit follows the definition in
1289    /// the IEEE 754 standard, which may not match the interpretation by some of
1290    /// the older, non-conformant (e.g. MIPS) hardware implementations.
1291    ///
1292    /// # Example
1293    ///
1294    /// ```
1295    /// #![feature(f16)]
1296    /// # #[cfg(target_has_reliable_f16)] {
1297    ///
1298    /// struct GoodBoy {
1299    ///     name: &'static str,
1300    ///     weight: f16,
1301    /// }
1302    ///
1303    /// let mut bois = vec![
1304    ///     GoodBoy { name: "Pucci", weight: 0.1 },
1305    ///     GoodBoy { name: "Woofer", weight: 99.0 },
1306    ///     GoodBoy { name: "Yapper", weight: 10.0 },
1307    ///     GoodBoy { name: "Chonk", weight: f16::INFINITY },
1308    ///     GoodBoy { name: "Abs. Unit", weight: f16::NAN },
1309    ///     GoodBoy { name: "Floaty", weight: -5.0 },
1310    /// ];
1311    ///
1312    /// bois.sort_by(|a, b| a.weight.total_cmp(&b.weight));
1313    ///
1314    /// // `f16::NAN` could be positive or negative, which will affect the sort order.
1315    /// if f16::NAN.is_sign_negative() {
1316    ///     bois.into_iter().map(|b| b.weight)
1317    ///         .zip([f16::NAN, -5.0, 0.1, 10.0, 99.0, f16::INFINITY].iter())
1318    ///         .for_each(|(a, b)| assert_eq!(a.to_bits(), b.to_bits()))
1319    /// } else {
1320    ///     bois.into_iter().map(|b| b.weight)
1321    ///         .zip([-5.0, 0.1, 10.0, 99.0, f16::INFINITY, f16::NAN].iter())
1322    ///         .for_each(|(a, b)| assert_eq!(a.to_bits(), b.to_bits()))
1323    /// }
1324    /// # }
1325    /// ```
1326    #[inline]
1327    #[must_use]
1328    #[unstable(feature = "f16", issue = "116909")]
1329    #[rustc_const_unstable(feature = "const_cmp", issue = "143800")]
1330    pub const fn total_cmp(&self, other: &Self) -> crate::cmp::Ordering {
1331        let mut left = self.to_bits() as i16;
1332        let mut right = other.to_bits() as i16;
1333
1334        // In case of negatives, flip all the bits except the sign
1335        // to achieve a similar layout as two's complement integers
1336        //
1337        // Why does this work? IEEE 754 floats consist of three fields:
1338        // Sign bit, exponent and mantissa. The set of exponent and mantissa
1339        // fields as a whole have the property that their bitwise order is
1340        // equal to the numeric magnitude where the magnitude is defined.
1341        // The magnitude is not normally defined on NaN values, but
1342        // IEEE 754 totalOrder defines the NaN values also to follow the
1343        // bitwise order. This leads to order explained in the doc comment.
1344        // However, the representation of magnitude is the same for negative
1345        // and positive numbers – only the sign bit is different.
1346        // To easily compare the floats as signed integers, we need to
1347        // flip the exponent and mantissa bits in case of negative numbers.
1348        // We effectively convert the numbers to "two's complement" form.
1349        //
1350        // To do the flipping, we construct a mask and XOR against it.
1351        // We branchlessly calculate an "all-ones except for the sign bit"
1352        // mask from negative-signed values: right shifting sign-extends
1353        // the integer, so we "fill" the mask with sign bits, and then
1354        // convert to unsigned to push one more zero bit.
1355        // On positive values, the mask is all zeros, so it's a no-op.
1356        left ^= (((left >> 15) as u16) >> 1) as i16;
1357        right ^= (((right >> 15) as u16) >> 1) as i16;
1358
1359        left.cmp(&right)
1360    }
1361
1362    /// Restrict a value to a certain interval unless it is NaN.
1363    ///
1364    /// Returns `max` if `self` is greater than `max`, and `min` if `self` is
1365    /// less than `min`. Otherwise this returns `self`.
1366    ///
1367    /// Note that this function returns NaN if the initial value was NaN as
1368    /// well. If the result is zero and among the three inputs `self`, `min`, and `max` there are
1369    /// zeros with different sign, either `0.0` or `-0.0` is returned non-deterministically.
1370    ///
1371    /// # Panics
1372    ///
1373    /// Panics if `min > max`, `min` is NaN, or `max` is NaN.
1374    ///
1375    /// # Examples
1376    ///
1377    /// ```
1378    /// #![feature(f16)]
1379    /// # #[cfg(target_has_reliable_f16)] {
1380    ///
1381    /// assert!((-3.0f16).clamp(-2.0, 1.0) == -2.0);
1382    /// assert!((0.0f16).clamp(-2.0, 1.0) == 0.0);
1383    /// assert!((2.0f16).clamp(-2.0, 1.0) == 1.0);
1384    /// assert!((f16::NAN).clamp(-2.0, 1.0).is_nan());
1385    ///
1386    /// // These always returns zero, but the sign (which is ignored by `==`) is non-deterministic.
1387    /// assert!((0.0f16).clamp(-0.0, -0.0) == 0.0);
1388    /// assert!((1.0f16).clamp(-0.0, 0.0) == 0.0);
1389    /// // This is definitely a negative zero.
1390    /// assert!((-1.0f16).clamp(-0.0, 1.0).is_sign_negative());
1391    /// # }
1392    /// ```
1393    #[inline]
1394    #[unstable(feature = "f16", issue = "116909")]
1395    #[must_use = "method returns a new number and does not mutate the original value"]
1396    pub const fn clamp(mut self, min: f16, max: f16) -> f16 {
1397        const_assert!(
1398            min <= max,
1399            "min > max, or either was NaN",
1400            "min > max, or either was NaN. min = {min:?}, max = {max:?}",
1401            min: f16,
1402            max: f16,
1403        );
1404
1405        if self < min {
1406            self = min;
1407        }
1408        if self > max {
1409            self = max;
1410        }
1411        self
1412    }
1413
1414    /// Clamps this number to a symmetric range centered around zero.
1415    ///
1416    /// The method clamps the number's magnitude (absolute value) to be at most `limit`.
1417    ///
1418    /// This is functionally equivalent to `self.clamp(-limit, limit)`, but is more
1419    /// explicit about the intent.
1420    ///
1421    /// # Panics
1422    ///
1423    /// Panics if `limit` is negative or NaN, as this indicates a logic error.
1424    ///
1425    /// # Examples
1426    ///
1427    /// ```
1428    /// #![feature(f16)]
1429    /// #![feature(clamp_magnitude)]
1430    /// # #[cfg(target_has_reliable_f16)] {
1431    /// assert_eq!(5.0f16.clamp_magnitude(3.0), 3.0);
1432    /// assert_eq!((-5.0f16).clamp_magnitude(3.0), -3.0);
1433    /// assert_eq!(2.0f16.clamp_magnitude(3.0), 2.0);
1434    /// assert_eq!((-2.0f16).clamp_magnitude(3.0), -2.0);
1435    /// # }
1436    /// ```
1437    #[inline]
1438    #[unstable(feature = "clamp_magnitude", issue = "148519")]
1439    #[must_use = "this returns the clamped value and does not modify the original"]
1440    pub fn clamp_magnitude(self, limit: f16) -> f16 {
1441        assert!(limit >= 0.0, "limit must be non-negative");
1442        let limit = limit.abs(); // Canonicalises -0.0 to 0.0
1443        self.clamp(-limit, limit)
1444    }
1445
1446    /// Computes the absolute value of `self`.
1447    ///
1448    /// This function always returns the precise result.
1449    ///
1450    /// # Examples
1451    ///
1452    /// ```
1453    /// #![feature(f16)]
1454    /// # #[cfg(target_has_reliable_f16_math)] {
1455    ///
1456    /// let x = 3.5_f16;
1457    /// let y = -3.5_f16;
1458    ///
1459    /// assert_eq!(x.abs(), x);
1460    /// assert_eq!(y.abs(), -y);
1461    ///
1462    /// assert!(f16::NAN.abs().is_nan());
1463    /// # }
1464    /// ```
1465    #[inline]
1466    #[unstable(feature = "f16", issue = "116909")]
1467    #[rustc_const_unstable(feature = "f16", issue = "116909")]
1468    #[must_use = "method returns a new number and does not mutate the original value"]
1469    #[ferrocene::prevalidated]
1470    pub const fn abs(self) -> Self {
1471        intrinsics::fabs(self)
1472    }
1473
1474    /// Returns a number that represents the sign of `self`.
1475    ///
1476    /// - `1.0` if the number is positive, `+0.0` or `INFINITY`
1477    /// - `-1.0` if the number is negative, `-0.0` or `NEG_INFINITY`
1478    /// - NaN if the number is NaN
1479    ///
1480    /// # Examples
1481    ///
1482    /// ```
1483    /// #![feature(f16)]
1484    /// # #[cfg(target_has_reliable_f16)] {
1485    ///
1486    /// let f = 3.5_f16;
1487    ///
1488    /// assert_eq!(f.signum(), 1.0);
1489    /// assert_eq!(f16::NEG_INFINITY.signum(), -1.0);
1490    ///
1491    /// assert!(f16::NAN.signum().is_nan());
1492    /// # }
1493    /// ```
1494    #[inline]
1495    #[unstable(feature = "f16", issue = "116909")]
1496    #[rustc_const_unstable(feature = "f16", issue = "116909")]
1497    #[must_use = "method returns a new number and does not mutate the original value"]
1498    pub const fn signum(self) -> f16 {
1499        if self.is_nan() { Self::NAN } else { 1.0_f16.copysign(self) }
1500    }
1501
1502    /// Returns a number composed of the magnitude of `self` and the sign of
1503    /// `sign`.
1504    ///
1505    /// Equal to `self` if the sign of `self` and `sign` are the same, otherwise equal to `-self`.
1506    /// If `self` is a NaN, then a NaN with the same payload as `self` and the sign bit of `sign` is
1507    /// returned.
1508    ///
1509    /// If `sign` is a NaN, then this operation will still carry over its sign into the result. Note
1510    /// that IEEE 754 doesn't assign any meaning to the sign bit in case of a NaN, and as Rust
1511    /// doesn't guarantee that the bit pattern of NaNs are conserved over arithmetic operations, the
1512    /// result of `copysign` with `sign` being a NaN might produce an unexpected or non-portable
1513    /// result. See the [specification of NaN bit patterns](primitive@f32#nan-bit-patterns) for more
1514    /// info.
1515    ///
1516    /// # Examples
1517    ///
1518    /// ```
1519    /// #![feature(f16)]
1520    /// # #[cfg(target_has_reliable_f16_math)] {
1521    ///
1522    /// let f = 3.5_f16;
1523    ///
1524    /// assert_eq!(f.copysign(0.42), 3.5_f16);
1525    /// assert_eq!(f.copysign(-0.42), -3.5_f16);
1526    /// assert_eq!((-f).copysign(0.42), 3.5_f16);
1527    /// assert_eq!((-f).copysign(-0.42), -3.5_f16);
1528    ///
1529    /// assert!(f16::NAN.copysign(1.0).is_nan());
1530    /// # }
1531    /// ```
1532    #[inline]
1533    #[unstable(feature = "f16", issue = "116909")]
1534    #[rustc_const_unstable(feature = "f16", issue = "116909")]
1535    #[must_use = "method returns a new number and does not mutate the original value"]
1536    pub const fn copysign(self, sign: f16) -> f16 {
1537        intrinsics::copysignf16(self, sign)
1538    }
1539
1540    /// Float addition that allows optimizations based on algebraic rules.
1541    ///
1542    /// See [algebraic operators](primitive@f32#algebraic-operators) for more info.
1543    #[must_use = "method returns a new number and does not mutate the original value"]
1544    #[unstable(feature = "float_algebraic", issue = "136469")]
1545    #[rustc_const_unstable(feature = "float_algebraic", issue = "136469")]
1546    #[inline]
1547    pub const fn algebraic_add(self, rhs: f16) -> f16 {
1548        intrinsics::fadd_algebraic(self, rhs)
1549    }
1550
1551    /// Float subtraction that allows optimizations based on algebraic rules.
1552    ///
1553    /// See [algebraic operators](primitive@f32#algebraic-operators) for more info.
1554    #[must_use = "method returns a new number and does not mutate the original value"]
1555    #[unstable(feature = "float_algebraic", issue = "136469")]
1556    #[rustc_const_unstable(feature = "float_algebraic", issue = "136469")]
1557    #[inline]
1558    pub const fn algebraic_sub(self, rhs: f16) -> f16 {
1559        intrinsics::fsub_algebraic(self, rhs)
1560    }
1561
1562    /// Float multiplication that allows optimizations based on algebraic rules.
1563    ///
1564    /// See [algebraic operators](primitive@f32#algebraic-operators) for more info.
1565    #[must_use = "method returns a new number and does not mutate the original value"]
1566    #[unstable(feature = "float_algebraic", issue = "136469")]
1567    #[rustc_const_unstable(feature = "float_algebraic", issue = "136469")]
1568    #[inline]
1569    pub const fn algebraic_mul(self, rhs: f16) -> f16 {
1570        intrinsics::fmul_algebraic(self, rhs)
1571    }
1572
1573    /// Float division that allows optimizations based on algebraic rules.
1574    ///
1575    /// See [algebraic operators](primitive@f32#algebraic-operators) for more info.
1576    #[must_use = "method returns a new number and does not mutate the original value"]
1577    #[unstable(feature = "float_algebraic", issue = "136469")]
1578    #[rustc_const_unstable(feature = "float_algebraic", issue = "136469")]
1579    #[inline]
1580    pub const fn algebraic_div(self, rhs: f16) -> f16 {
1581        intrinsics::fdiv_algebraic(self, rhs)
1582    }
1583
1584    /// Float remainder that allows optimizations based on algebraic rules.
1585    ///
1586    /// See [algebraic operators](primitive@f32#algebraic-operators) for more info.
1587    #[must_use = "method returns a new number and does not mutate the original value"]
1588    #[unstable(feature = "float_algebraic", issue = "136469")]
1589    #[rustc_const_unstable(feature = "float_algebraic", issue = "136469")]
1590    #[inline]
1591    pub const fn algebraic_rem(self, rhs: f16) -> f16 {
1592        intrinsics::frem_algebraic(self, rhs)
1593    }
1594}
1595
1596// Functions in this module fall into `core_float_math`
1597// #[unstable(feature = "core_float_math", issue = "137578")]
1598#[cfg(not(test))]
1599#[doc(test(attr(
1600    feature(cfg_target_has_reliable_f16_f128),
1601    expect(internal_features),
1602    allow(unused_features)
1603)))]
1604impl f16 {
1605    /// Returns the largest integer less than or equal to `self`.
1606    ///
1607    /// This function always returns the precise result.
1608    ///
1609    /// # Examples
1610    ///
1611    /// ```
1612    /// #![feature(f16)]
1613    /// # #[cfg(not(miri))]
1614    /// # #[cfg(target_has_reliable_f16)] {
1615    ///
1616    /// let f = 3.7_f16;
1617    /// let g = 3.0_f16;
1618    /// let h = -3.7_f16;
1619    ///
1620    /// assert_eq!(f.floor(), 3.0);
1621    /// assert_eq!(g.floor(), 3.0);
1622    /// assert_eq!(h.floor(), -4.0);
1623    /// # }
1624    /// ```
1625    #[inline]
1626    #[rustc_allow_incoherent_impl]
1627    #[unstable(feature = "f16", issue = "116909")]
1628    #[rustc_const_unstable(feature = "f16", issue = "116909")]
1629    #[must_use = "method returns a new number and does not mutate the original value"]
1630    pub const fn floor(self) -> f16 {
1631        intrinsics::floorf16(self)
1632    }
1633
1634    /// Returns the smallest integer greater than or equal to `self`.
1635    ///
1636    /// This function always returns the precise result.
1637    ///
1638    /// # Examples
1639    ///
1640    /// ```
1641    /// #![feature(f16)]
1642    /// # #[cfg(not(miri))]
1643    /// # #[cfg(target_has_reliable_f16)] {
1644    ///
1645    /// let f = 3.01_f16;
1646    /// let g = 4.0_f16;
1647    ///
1648    /// assert_eq!(f.ceil(), 4.0);
1649    /// assert_eq!(g.ceil(), 4.0);
1650    /// # }
1651    /// ```
1652    #[inline]
1653    #[doc(alias = "ceiling")]
1654    #[rustc_allow_incoherent_impl]
1655    #[unstable(feature = "f16", issue = "116909")]
1656    #[rustc_const_unstable(feature = "f16", issue = "116909")]
1657    #[must_use = "method returns a new number and does not mutate the original value"]
1658    pub const fn ceil(self) -> f16 {
1659        intrinsics::ceilf16(self)
1660    }
1661
1662    /// Returns the nearest integer to `self`. If a value is half-way between two
1663    /// integers, round away from `0.0`.
1664    ///
1665    /// This function always returns the precise result.
1666    ///
1667    /// # Examples
1668    ///
1669    /// ```
1670    /// #![feature(f16)]
1671    /// # #[cfg(not(miri))]
1672    /// # #[cfg(target_has_reliable_f16)] {
1673    ///
1674    /// let f = 3.3_f16;
1675    /// let g = -3.3_f16;
1676    /// let h = -3.7_f16;
1677    /// let i = 3.5_f16;
1678    /// let j = 4.5_f16;
1679    ///
1680    /// assert_eq!(f.round(), 3.0);
1681    /// assert_eq!(g.round(), -3.0);
1682    /// assert_eq!(h.round(), -4.0);
1683    /// assert_eq!(i.round(), 4.0);
1684    /// assert_eq!(j.round(), 5.0);
1685    /// # }
1686    /// ```
1687    #[inline]
1688    #[rustc_allow_incoherent_impl]
1689    #[unstable(feature = "f16", issue = "116909")]
1690    #[rustc_const_unstable(feature = "f16", issue = "116909")]
1691    #[must_use = "method returns a new number and does not mutate the original value"]
1692    pub const fn round(self) -> f16 {
1693        intrinsics::roundf16(self)
1694    }
1695
1696    /// Returns the nearest integer to a number. Rounds half-way cases to the number
1697    /// with an even least significant digit.
1698    ///
1699    /// This function always returns the precise result.
1700    ///
1701    /// # Examples
1702    ///
1703    /// ```
1704    /// #![feature(f16)]
1705    /// # #[cfg(not(miri))]
1706    /// # #[cfg(target_has_reliable_f16)] {
1707    ///
1708    /// let f = 3.3_f16;
1709    /// let g = -3.3_f16;
1710    /// let h = 3.5_f16;
1711    /// let i = 4.5_f16;
1712    ///
1713    /// assert_eq!(f.round_ties_even(), 3.0);
1714    /// assert_eq!(g.round_ties_even(), -3.0);
1715    /// assert_eq!(h.round_ties_even(), 4.0);
1716    /// assert_eq!(i.round_ties_even(), 4.0);
1717    /// # }
1718    /// ```
1719    #[inline]
1720    #[rustc_allow_incoherent_impl]
1721    #[unstable(feature = "f16", issue = "116909")]
1722    #[rustc_const_unstable(feature = "f16", issue = "116909")]
1723    #[must_use = "method returns a new number and does not mutate the original value"]
1724    pub const fn round_ties_even(self) -> f16 {
1725        intrinsics::round_ties_even_f16(self)
1726    }
1727
1728    /// Returns the integer part of `self`.
1729    /// This means that non-integer numbers are always truncated towards zero.
1730    ///
1731    /// This function always returns the precise result.
1732    ///
1733    /// # Examples
1734    ///
1735    /// ```
1736    /// #![feature(f16)]
1737    /// # #[cfg(not(miri))]
1738    /// # #[cfg(target_has_reliable_f16)] {
1739    ///
1740    /// let f = 3.7_f16;
1741    /// let g = 3.0_f16;
1742    /// let h = -3.7_f16;
1743    ///
1744    /// assert_eq!(f.trunc(), 3.0);
1745    /// assert_eq!(g.trunc(), 3.0);
1746    /// assert_eq!(h.trunc(), -3.0);
1747    /// # }
1748    /// ```
1749    #[inline]
1750    #[doc(alias = "truncate")]
1751    #[rustc_allow_incoherent_impl]
1752    #[unstable(feature = "f16", issue = "116909")]
1753    #[rustc_const_unstable(feature = "f16", issue = "116909")]
1754    #[must_use = "method returns a new number and does not mutate the original value"]
1755    pub const fn trunc(self) -> f16 {
1756        intrinsics::truncf16(self)
1757    }
1758
1759    /// Returns the fractional part of `self`.
1760    ///
1761    /// This function always returns the precise result.
1762    ///
1763    /// # Examples
1764    ///
1765    /// ```
1766    /// #![feature(f16)]
1767    /// # #[cfg(not(miri))]
1768    /// # #[cfg(target_has_reliable_f16)] {
1769    ///
1770    /// let x = 3.6_f16;
1771    /// let y = -3.6_f16;
1772    /// let abs_difference_x = (x.fract() - 0.6).abs();
1773    /// let abs_difference_y = (y.fract() - (-0.6)).abs();
1774    ///
1775    /// assert!(abs_difference_x <= f16::EPSILON);
1776    /// assert!(abs_difference_y <= f16::EPSILON);
1777    /// # }
1778    /// ```
1779    #[inline]
1780    #[rustc_allow_incoherent_impl]
1781    #[unstable(feature = "f16", issue = "116909")]
1782    #[rustc_const_unstable(feature = "f16", issue = "116909")]
1783    #[must_use = "method returns a new number and does not mutate the original value"]
1784    pub const fn fract(self) -> f16 {
1785        self - self.trunc()
1786    }
1787
1788    /// Fused multiply-add. Computes `(self * a) + b` with only one rounding
1789    /// error, yielding a more accurate result than an unfused multiply-add.
1790    ///
1791    /// Using `mul_add` *may* be more performant than an unfused multiply-add if
1792    /// the target architecture has a dedicated `fma` CPU instruction. However,
1793    /// this is not always true, and will be heavily dependant on designing
1794    /// algorithms with specific target hardware in mind.
1795    ///
1796    /// # Precision
1797    ///
1798    /// The result of this operation is guaranteed to be the rounded
1799    /// infinite-precision result. It is specified by IEEE 754 as
1800    /// `fusedMultiplyAdd` and guaranteed not to change.
1801    ///
1802    /// # Examples
1803    ///
1804    /// ```
1805    /// #![feature(f16)]
1806    /// # #[cfg(not(miri))]
1807    /// # #[cfg(target_has_reliable_f16)] {
1808    ///
1809    /// let m = 10.0_f16;
1810    /// let x = 4.0_f16;
1811    /// let b = 60.0_f16;
1812    ///
1813    /// assert_eq!(m.mul_add(x, b), 100.0);
1814    /// assert_eq!(m * x + b, 100.0);
1815    ///
1816    /// let one_plus_eps = 1.0_f16 + f16::EPSILON;
1817    /// let one_minus_eps = 1.0_f16 - f16::EPSILON;
1818    /// let minus_one = -1.0_f16;
1819    ///
1820    /// // The exact result (1 + eps) * (1 - eps) = 1 - eps * eps.
1821    /// assert_eq!(one_plus_eps.mul_add(one_minus_eps, minus_one), -f16::EPSILON * f16::EPSILON);
1822    /// // Different rounding with the non-fused multiply and add.
1823    /// assert_eq!(one_plus_eps * one_minus_eps + minus_one, 0.0);
1824    /// # }
1825    /// ```
1826    #[inline]
1827    #[rustc_allow_incoherent_impl]
1828    #[unstable(feature = "f16", issue = "116909")]
1829    #[doc(alias = "fmaf16", alias = "fusedMultiplyAdd")]
1830    #[must_use = "method returns a new number and does not mutate the original value"]
1831    pub const fn mul_add(self, a: f16, b: f16) -> f16 {
1832        intrinsics::fmaf16(self, a, b)
1833    }
1834
1835    /// Calculates Euclidean division, the matching method for `rem_euclid`.
1836    ///
1837    /// This computes the integer `n` such that
1838    /// `self = n * rhs + self.rem_euclid(rhs)`.
1839    /// In other words, the result is `self / rhs` rounded to the integer `n`
1840    /// such that `self >= n * rhs`.
1841    ///
1842    /// # Precision
1843    ///
1844    /// The result of this operation is guaranteed to be the rounded
1845    /// infinite-precision result.
1846    ///
1847    /// # Examples
1848    ///
1849    /// ```
1850    /// #![feature(f16)]
1851    /// # #[cfg(not(miri))]
1852    /// # #[cfg(target_has_reliable_f16)] {
1853    ///
1854    /// let a: f16 = 7.0;
1855    /// let b = 4.0;
1856    /// assert_eq!(a.div_euclid(b), 1.0); // 7.0 > 4.0 * 1.0
1857    /// assert_eq!((-a).div_euclid(b), -2.0); // -7.0 >= 4.0 * -2.0
1858    /// assert_eq!(a.div_euclid(-b), -1.0); // 7.0 >= -4.0 * -1.0
1859    /// assert_eq!((-a).div_euclid(-b), 2.0); // -7.0 >= -4.0 * 2.0
1860    /// # }
1861    /// ```
1862    #[inline]
1863    #[rustc_allow_incoherent_impl]
1864    #[unstable(feature = "f16", issue = "116909")]
1865    #[must_use = "method returns a new number and does not mutate the original value"]
1866    pub fn div_euclid(self, rhs: f16) -> f16 {
1867        let q = (self / rhs).trunc();
1868        if self % rhs < 0.0 {
1869            return if rhs > 0.0 { q - 1.0 } else { q + 1.0 };
1870        }
1871        q
1872    }
1873
1874    /// Calculates the least nonnegative remainder of `self` when
1875    /// divided by `rhs`.
1876    ///
1877    /// In particular, the return value `r` satisfies `0.0 <= r < rhs.abs()` in
1878    /// most cases. However, due to a floating point round-off error it can
1879    /// result in `r == rhs.abs()`, violating the mathematical definition, if
1880    /// `self` is much smaller than `rhs.abs()` in magnitude and `self < 0.0`.
1881    /// This result is not an element of the function's codomain, but it is the
1882    /// closest floating point number in the real numbers and thus fulfills the
1883    /// property `self == self.div_euclid(rhs) * rhs + self.rem_euclid(rhs)`
1884    /// approximately.
1885    ///
1886    /// # Precision
1887    ///
1888    /// The result of this operation is guaranteed to be the rounded
1889    /// infinite-precision result.
1890    ///
1891    /// # Examples
1892    ///
1893    /// ```
1894    /// #![feature(f16)]
1895    /// # #[cfg(not(miri))]
1896    /// # #[cfg(target_has_reliable_f16)] {
1897    ///
1898    /// let a: f16 = 7.0;
1899    /// let b = 4.0;
1900    /// assert_eq!(a.rem_euclid(b), 3.0);
1901    /// assert_eq!((-a).rem_euclid(b), 1.0);
1902    /// assert_eq!(a.rem_euclid(-b), 3.0);
1903    /// assert_eq!((-a).rem_euclid(-b), 1.0);
1904    /// // limitation due to round-off error
1905    /// assert!((-f16::EPSILON).rem_euclid(3.0) != 0.0);
1906    /// # }
1907    /// ```
1908    #[inline]
1909    #[rustc_allow_incoherent_impl]
1910    #[doc(alias = "modulo", alias = "mod")]
1911    #[unstable(feature = "f16", issue = "116909")]
1912    #[must_use = "method returns a new number and does not mutate the original value"]
1913    pub fn rem_euclid(self, rhs: f16) -> f16 {
1914        let r = self % rhs;
1915        if r < 0.0 { r + rhs.abs() } else { r }
1916    }
1917
1918    /// Raises a number to an integer power.
1919    ///
1920    /// Using this function is generally faster than using `powf`.
1921    /// It might have a different sequence of rounding operations than `powf`,
1922    /// so the results are not guaranteed to agree.
1923    ///
1924    /// Note that this function is special in that it can return non-NaN results for NaN inputs. For
1925    /// example, `f16::powi(f16::NAN, 0)` returns `1.0`. However, if an input is a *signaling*
1926    /// NaN, then the result is non-deterministically either a NaN or the result that the
1927    /// corresponding quiet NaN would produce.
1928    ///
1929    /// # Unspecified precision
1930    ///
1931    /// The precision of this function is non-deterministic. This means it varies by platform,
1932    /// Rust version, and can even differ within the same execution from one invocation to the next.
1933    ///
1934    /// # Examples
1935    ///
1936    /// ```
1937    /// #![feature(f16)]
1938    /// # #[cfg(not(miri))]
1939    /// # #[cfg(target_has_reliable_f16)] {
1940    ///
1941    /// let x = 2.0_f16;
1942    /// let abs_difference = (x.powi(2) - (x * x)).abs();
1943    /// assert!(abs_difference <= f16::EPSILON);
1944    ///
1945    /// assert_eq!(f16::powi(f16::NAN, 0), 1.0);
1946    /// assert_eq!(f16::powi(0.0, 0), 1.0);
1947    /// # }
1948    /// ```
1949    #[inline]
1950    #[rustc_allow_incoherent_impl]
1951    #[unstable(feature = "f16", issue = "116909")]
1952    #[must_use = "method returns a new number and does not mutate the original value"]
1953    pub fn powi(self, n: i32) -> f16 {
1954        intrinsics::powif16(self, n)
1955    }
1956
1957    /// Returns the square root of a number.
1958    ///
1959    /// Returns NaN if `self` is a negative number other than `-0.0`.
1960    ///
1961    /// # Precision
1962    ///
1963    /// The result of this operation is guaranteed to be the rounded
1964    /// infinite-precision result. It is specified by IEEE 754 as `squareRoot`
1965    /// and guaranteed not to change.
1966    ///
1967    /// # Examples
1968    ///
1969    /// ```
1970    /// #![feature(f16)]
1971    /// # #[cfg(not(miri))]
1972    /// # #[cfg(target_has_reliable_f16)] {
1973    ///
1974    /// let positive = 4.0_f16;
1975    /// let negative = -4.0_f16;
1976    /// let negative_zero = -0.0_f16;
1977    ///
1978    /// assert_eq!(positive.sqrt(), 2.0);
1979    /// assert!(negative.sqrt().is_nan());
1980    /// assert!(negative_zero.sqrt() == negative_zero);
1981    /// # }
1982    /// ```
1983    #[inline]
1984    #[doc(alias = "squareRoot")]
1985    #[rustc_allow_incoherent_impl]
1986    #[unstable(feature = "f16", issue = "116909")]
1987    #[must_use = "method returns a new number and does not mutate the original value"]
1988    pub fn sqrt(self) -> f16 {
1989        intrinsics::sqrtf16(self)
1990    }
1991
1992    /// Returns the cube root of a number.
1993    ///
1994    /// # Unspecified precision
1995    ///
1996    /// The precision of this function is non-deterministic. This means it varies by platform,
1997    /// Rust version, and can even differ within the same execution from one invocation to the next.
1998    ///
1999    /// This function currently corresponds to the `cbrtf` from libc on Unix
2000    /// and Windows. Note that this might change in the future.
2001    ///
2002    /// # Examples
2003    ///
2004    /// ```
2005    /// #![feature(f16)]
2006    /// # #[cfg(not(miri))]
2007    /// # #[cfg(target_has_reliable_f16)] {
2008    ///
2009    /// let x = 8.0f16;
2010    ///
2011    /// // x^(1/3) - 2 == 0
2012    /// let abs_difference = (x.cbrt() - 2.0).abs();
2013    ///
2014    /// assert!(abs_difference <= f16::EPSILON);
2015    /// # }
2016    /// ```
2017    #[inline]
2018    #[rustc_allow_incoherent_impl]
2019    #[unstable(feature = "f16", issue = "116909")]
2020    #[must_use = "method returns a new number and does not mutate the original value"]
2021    pub fn cbrt(self) -> f16 {
2022        libm::cbrtf(self as f32) as f16
2023    }
2024}