core/num/f16.rs
1//! Constants for the `f16` half-precision floating point type.
2//!
3//! *[See also the `f16` primitive type][f16].*
4//!
5//! Mathematically significant numbers are provided in the `consts` sub-module.
6//!
7//! For the constants defined directly in this module
8//! (as distinct from those defined in the `consts` sub-module),
9//! new code should instead use the associated constants
10//! defined directly on the `f16` type.
11
12#![unstable(feature = "f16", issue = "116909")]
13
14use crate::convert::FloatToInt;
15use crate::num::FpCategory;
16#[cfg(not(test))]
17use crate::num::libm;
18use crate::panic::const_assert;
19use crate::{intrinsics, mem};
20
21/// Basic mathematical constants.
22#[unstable(feature = "f16", issue = "116909")]
23pub mod consts {
24 // FIXME: replace with mathematical constants from cmath.
25
26 /// Archimedes' constant (π)
27 #[unstable(feature = "f16", issue = "116909")]
28 pub const PI: f16 = 3.14159265358979323846264338327950288_f16;
29
30 /// The full circle constant (τ)
31 ///
32 /// Equal to 2π.
33 #[unstable(feature = "f16", issue = "116909")]
34 pub const TAU: f16 = 6.28318530717958647692528676655900577_f16;
35
36 /// The golden ratio (φ)
37 #[unstable(feature = "f16", issue = "116909")]
38 // Also, #[unstable(feature = "more_float_constants", issue = "103883")]
39 pub const PHI: f16 = 1.618033988749894848204586834365638118_f16;
40
41 /// The Euler-Mascheroni constant (γ)
42 #[unstable(feature = "f16", issue = "116909")]
43 // Also, #[unstable(feature = "more_float_constants", issue = "103883")]
44 pub const EGAMMA: f16 = 0.577215664901532860606512090082402431_f16;
45
46 /// π/2
47 #[unstable(feature = "f16", issue = "116909")]
48 pub const FRAC_PI_2: f16 = 1.57079632679489661923132169163975144_f16;
49
50 /// π/3
51 #[unstable(feature = "f16", issue = "116909")]
52 pub const FRAC_PI_3: f16 = 1.04719755119659774615421446109316763_f16;
53
54 /// π/4
55 #[unstable(feature = "f16", issue = "116909")]
56 pub const FRAC_PI_4: f16 = 0.785398163397448309615660845819875721_f16;
57
58 /// π/6
59 #[unstable(feature = "f16", issue = "116909")]
60 pub const FRAC_PI_6: f16 = 0.52359877559829887307710723054658381_f16;
61
62 /// π/8
63 #[unstable(feature = "f16", issue = "116909")]
64 pub const FRAC_PI_8: f16 = 0.39269908169872415480783042290993786_f16;
65
66 /// 1/π
67 #[unstable(feature = "f16", issue = "116909")]
68 pub const FRAC_1_PI: f16 = 0.318309886183790671537767526745028724_f16;
69
70 /// 1/sqrt(π)
71 #[unstable(feature = "f16", issue = "116909")]
72 // Also, #[unstable(feature = "more_float_constants", issue = "103883")]
73 pub const FRAC_1_SQRT_PI: f16 = 0.564189583547756286948079451560772586_f16;
74
75 /// 1/sqrt(2π)
76 #[doc(alias = "FRAC_1_SQRT_TAU")]
77 #[unstable(feature = "f16", issue = "116909")]
78 // Also, #[unstable(feature = "more_float_constants", issue = "103883")]
79 pub const FRAC_1_SQRT_2PI: f16 = 0.398942280401432677939946059934381868_f16;
80
81 /// 2/π
82 #[unstable(feature = "f16", issue = "116909")]
83 pub const FRAC_2_PI: f16 = 0.636619772367581343075535053490057448_f16;
84
85 /// 2/sqrt(π)
86 #[unstable(feature = "f16", issue = "116909")]
87 pub const FRAC_2_SQRT_PI: f16 = 1.12837916709551257389615890312154517_f16;
88
89 /// sqrt(2)
90 #[unstable(feature = "f16", issue = "116909")]
91 pub const SQRT_2: f16 = 1.41421356237309504880168872420969808_f16;
92
93 /// 1/sqrt(2)
94 #[unstable(feature = "f16", issue = "116909")]
95 pub const FRAC_1_SQRT_2: f16 = 0.707106781186547524400844362104849039_f16;
96
97 /// sqrt(3)
98 #[unstable(feature = "f16", issue = "116909")]
99 // Also, #[unstable(feature = "more_float_constants", issue = "103883")]
100 pub const SQRT_3: f16 = 1.732050807568877293527446341505872367_f16;
101
102 /// 1/sqrt(3)
103 #[unstable(feature = "f16", issue = "116909")]
104 // Also, #[unstable(feature = "more_float_constants", issue = "103883")]
105 pub const FRAC_1_SQRT_3: f16 = 0.577350269189625764509148780501957456_f16;
106
107 /// Euler's number (e)
108 #[unstable(feature = "f16", issue = "116909")]
109 pub const E: f16 = 2.71828182845904523536028747135266250_f16;
110
111 /// log<sub>2</sub>(10)
112 #[unstable(feature = "f16", issue = "116909")]
113 pub const LOG2_10: f16 = 3.32192809488736234787031942948939018_f16;
114
115 /// log<sub>2</sub>(e)
116 #[unstable(feature = "f16", issue = "116909")]
117 pub const LOG2_E: f16 = 1.44269504088896340735992468100189214_f16;
118
119 /// log<sub>10</sub>(2)
120 #[unstable(feature = "f16", issue = "116909")]
121 pub const LOG10_2: f16 = 0.301029995663981195213738894724493027_f16;
122
123 /// log<sub>10</sub>(e)
124 #[unstable(feature = "f16", issue = "116909")]
125 pub const LOG10_E: f16 = 0.434294481903251827651128918916605082_f16;
126
127 /// ln(2)
128 #[unstable(feature = "f16", issue = "116909")]
129 pub const LN_2: f16 = 0.693147180559945309417232121458176568_f16;
130
131 /// ln(10)
132 #[unstable(feature = "f16", issue = "116909")]
133 pub const LN_10: f16 = 2.30258509299404568401799145468436421_f16;
134}
135
136impl f16 {
137 // FIXME(f16_f128): almost all methods in this `impl` are missing examples and a const
138 // implementation. Add these once we can run code on all platforms and have f16/f128 in CTFE.
139
140 /// The radix or base of the internal representation of `f16`.
141 #[unstable(feature = "f16", issue = "116909")]
142 pub const RADIX: u32 = 2;
143
144 /// Number of significant digits in base 2.
145 ///
146 /// Note that the size of the mantissa in the bitwise representation is one
147 /// smaller than this since the leading 1 is not stored explicitly.
148 #[unstable(feature = "f16", issue = "116909")]
149 pub const MANTISSA_DIGITS: u32 = 11;
150
151 /// Approximate number of significant digits in base 10.
152 ///
153 /// This is the maximum <i>x</i> such that any decimal number with <i>x</i>
154 /// significant digits can be converted to `f16` and back without loss.
155 ///
156 /// Equal to floor(log<sub>10</sub> 2<sup>[`MANTISSA_DIGITS`] − 1</sup>).
157 ///
158 /// [`MANTISSA_DIGITS`]: f16::MANTISSA_DIGITS
159 #[unstable(feature = "f16", issue = "116909")]
160 pub const DIGITS: u32 = 3;
161
162 /// [Machine epsilon] value for `f16`.
163 ///
164 /// This is the difference between `1.0` and the next larger representable number.
165 ///
166 /// Equal to 2<sup>1 − [`MANTISSA_DIGITS`]</sup>.
167 ///
168 /// [Machine epsilon]: https://en.wikipedia.org/wiki/Machine_epsilon
169 /// [`MANTISSA_DIGITS`]: f16::MANTISSA_DIGITS
170 #[unstable(feature = "f16", issue = "116909")]
171 #[rustc_diagnostic_item = "f16_epsilon"]
172 pub const EPSILON: f16 = 9.7656e-4_f16;
173
174 /// Smallest finite `f16` value.
175 ///
176 /// Equal to −[`MAX`].
177 ///
178 /// [`MAX`]: f16::MAX
179 #[unstable(feature = "f16", issue = "116909")]
180 pub const MIN: f16 = -6.5504e+4_f16;
181 /// Smallest positive normal `f16` value.
182 ///
183 /// Equal to 2<sup>[`MIN_EXP`] − 1</sup>.
184 ///
185 /// [`MIN_EXP`]: f16::MIN_EXP
186 #[unstable(feature = "f16", issue = "116909")]
187 pub const MIN_POSITIVE: f16 = 6.1035e-5_f16;
188 /// Largest finite `f16` value.
189 ///
190 /// Equal to
191 /// (1 − 2<sup>−[`MANTISSA_DIGITS`]</sup>) 2<sup>[`MAX_EXP`]</sup>.
192 ///
193 /// [`MANTISSA_DIGITS`]: f16::MANTISSA_DIGITS
194 /// [`MAX_EXP`]: f16::MAX_EXP
195 #[unstable(feature = "f16", issue = "116909")]
196 pub const MAX: f16 = 6.5504e+4_f16;
197
198 /// One greater than the minimum possible *normal* power of 2 exponent
199 /// for a significand bounded by 1 ≤ x < 2 (i.e. the IEEE definition).
200 ///
201 /// This corresponds to the exact minimum possible *normal* power of 2 exponent
202 /// for a significand bounded by 0.5 ≤ x < 1 (i.e. the C definition).
203 /// In other words, all normal numbers representable by this type are
204 /// greater than or equal to 0.5 × 2<sup><i>MIN_EXP</i></sup>.
205 #[unstable(feature = "f16", issue = "116909")]
206 pub const MIN_EXP: i32 = -13;
207 /// One greater than the maximum possible power of 2 exponent
208 /// for a significand bounded by 1 ≤ x < 2 (i.e. the IEEE definition).
209 ///
210 /// This corresponds to the exact maximum possible power of 2 exponent
211 /// for a significand bounded by 0.5 ≤ x < 1 (i.e. the C definition).
212 /// In other words, all numbers representable by this type are
213 /// strictly less than 2<sup><i>MAX_EXP</i></sup>.
214 #[unstable(feature = "f16", issue = "116909")]
215 pub const MAX_EXP: i32 = 16;
216
217 /// Minimum <i>x</i> for which 10<sup><i>x</i></sup> is normal.
218 ///
219 /// Equal to ceil(log<sub>10</sub> [`MIN_POSITIVE`]).
220 ///
221 /// [`MIN_POSITIVE`]: f16::MIN_POSITIVE
222 #[unstable(feature = "f16", issue = "116909")]
223 pub const MIN_10_EXP: i32 = -4;
224 /// Maximum <i>x</i> for which 10<sup><i>x</i></sup> is normal.
225 ///
226 /// Equal to floor(log<sub>10</sub> [`MAX`]).
227 ///
228 /// [`MAX`]: f16::MAX
229 #[unstable(feature = "f16", issue = "116909")]
230 pub const MAX_10_EXP: i32 = 4;
231
232 /// Not a Number (NaN).
233 ///
234 /// Note that IEEE 754 doesn't define just a single NaN value; a plethora of bit patterns are
235 /// considered to be NaN. Furthermore, the standard makes a difference between a "signaling" and
236 /// a "quiet" NaN, and allows inspecting its "payload" (the unspecified bits in the bit pattern)
237 /// and its sign. See the [specification of NaN bit patterns](f32#nan-bit-patterns) for more
238 /// info.
239 ///
240 /// This constant is guaranteed to be a quiet NaN (on targets that follow the Rust assumptions
241 /// that the quiet/signaling bit being set to 1 indicates a quiet NaN). Beyond that, nothing is
242 /// guaranteed about the specific bit pattern chosen here: both payload and sign are arbitrary.
243 /// The concrete bit pattern may change across Rust versions and target platforms.
244 #[allow(clippy::eq_op)]
245 #[rustc_diagnostic_item = "f16_nan"]
246 #[unstable(feature = "f16", issue = "116909")]
247 pub const NAN: f16 = 0.0_f16 / 0.0_f16;
248
249 /// Infinity (∞).
250 #[unstable(feature = "f16", issue = "116909")]
251 pub const INFINITY: f16 = 1.0_f16 / 0.0_f16;
252
253 /// Negative infinity (−∞).
254 #[unstable(feature = "f16", issue = "116909")]
255 pub const NEG_INFINITY: f16 = -1.0_f16 / 0.0_f16;
256
257 /// Sign bit
258 pub(crate) const SIGN_MASK: u16 = 0x8000;
259
260 /// Exponent mask
261 pub(crate) const EXP_MASK: u16 = 0x7c00;
262
263 /// Mantissa mask
264 pub(crate) const MAN_MASK: u16 = 0x03ff;
265
266 /// Minimum representable positive value (min subnormal)
267 const TINY_BITS: u16 = 0x1;
268
269 /// Minimum representable negative value (min negative subnormal)
270 const NEG_TINY_BITS: u16 = Self::TINY_BITS | Self::SIGN_MASK;
271
272 /// Returns `true` if this value is NaN.
273 ///
274 /// ```
275 /// #![feature(f16)]
276 /// # #[cfg(all(target_arch = "x86_64", target_os = "linux"))] {
277 ///
278 /// let nan = f16::NAN;
279 /// let f = 7.0_f16;
280 ///
281 /// assert!(nan.is_nan());
282 /// assert!(!f.is_nan());
283 /// # }
284 /// ```
285 #[inline]
286 #[must_use]
287 #[unstable(feature = "f16", issue = "116909")]
288 #[allow(clippy::eq_op)] // > if you intended to check if the operand is NaN, use `.is_nan()` instead :)
289 pub const fn is_nan(self) -> bool {
290 self != self
291 }
292
293 /// Returns `true` if this value is positive infinity or negative infinity, and
294 /// `false` otherwise.
295 ///
296 /// ```
297 /// #![feature(f16)]
298 /// # #[cfg(all(target_arch = "x86_64", target_os = "linux"))] {
299 ///
300 /// let f = 7.0f16;
301 /// let inf = f16::INFINITY;
302 /// let neg_inf = f16::NEG_INFINITY;
303 /// let nan = f16::NAN;
304 ///
305 /// assert!(!f.is_infinite());
306 /// assert!(!nan.is_infinite());
307 ///
308 /// assert!(inf.is_infinite());
309 /// assert!(neg_inf.is_infinite());
310 /// # }
311 /// ```
312 #[inline]
313 #[must_use]
314 #[unstable(feature = "f16", issue = "116909")]
315 pub const fn is_infinite(self) -> bool {
316 (self == f16::INFINITY) | (self == f16::NEG_INFINITY)
317 }
318
319 /// Returns `true` if this number is neither infinite nor NaN.
320 ///
321 /// ```
322 /// #![feature(f16)]
323 /// # #[cfg(all(target_arch = "x86_64", target_os = "linux"))] {
324 ///
325 /// let f = 7.0f16;
326 /// let inf: f16 = f16::INFINITY;
327 /// let neg_inf: f16 = f16::NEG_INFINITY;
328 /// let nan: f16 = f16::NAN;
329 ///
330 /// assert!(f.is_finite());
331 ///
332 /// assert!(!nan.is_finite());
333 /// assert!(!inf.is_finite());
334 /// assert!(!neg_inf.is_finite());
335 /// # }
336 /// ```
337 #[inline]
338 #[must_use]
339 #[unstable(feature = "f16", issue = "116909")]
340 #[rustc_const_unstable(feature = "f16", issue = "116909")]
341 pub const fn is_finite(self) -> bool {
342 // There's no need to handle NaN separately: if self is NaN,
343 // the comparison is not true, exactly as desired.
344 self.abs() < Self::INFINITY
345 }
346
347 /// Returns `true` if the number is [subnormal].
348 ///
349 /// ```
350 /// #![feature(f16)]
351 /// # #[cfg(all(target_arch = "x86_64", target_os = "linux"))] {
352 ///
353 /// let min = f16::MIN_POSITIVE; // 6.1035e-5
354 /// let max = f16::MAX;
355 /// let lower_than_min = 1.0e-7_f16;
356 /// let zero = 0.0_f16;
357 ///
358 /// assert!(!min.is_subnormal());
359 /// assert!(!max.is_subnormal());
360 ///
361 /// assert!(!zero.is_subnormal());
362 /// assert!(!f16::NAN.is_subnormal());
363 /// assert!(!f16::INFINITY.is_subnormal());
364 /// // Values between `0` and `min` are Subnormal.
365 /// assert!(lower_than_min.is_subnormal());
366 /// # }
367 /// ```
368 /// [subnormal]: https://en.wikipedia.org/wiki/Denormal_number
369 #[inline]
370 #[must_use]
371 #[unstable(feature = "f16", issue = "116909")]
372 pub const fn is_subnormal(self) -> bool {
373 matches!(self.classify(), FpCategory::Subnormal)
374 }
375
376 /// Returns `true` if the number is neither zero, infinite, [subnormal], or NaN.
377 ///
378 /// ```
379 /// #![feature(f16)]
380 /// # #[cfg(all(target_arch = "x86_64", target_os = "linux"))] {
381 ///
382 /// let min = f16::MIN_POSITIVE; // 6.1035e-5
383 /// let max = f16::MAX;
384 /// let lower_than_min = 1.0e-7_f16;
385 /// let zero = 0.0_f16;
386 ///
387 /// assert!(min.is_normal());
388 /// assert!(max.is_normal());
389 ///
390 /// assert!(!zero.is_normal());
391 /// assert!(!f16::NAN.is_normal());
392 /// assert!(!f16::INFINITY.is_normal());
393 /// // Values between `0` and `min` are Subnormal.
394 /// assert!(!lower_than_min.is_normal());
395 /// # }
396 /// ```
397 /// [subnormal]: https://en.wikipedia.org/wiki/Denormal_number
398 #[inline]
399 #[must_use]
400 #[unstable(feature = "f16", issue = "116909")]
401 pub const fn is_normal(self) -> bool {
402 matches!(self.classify(), FpCategory::Normal)
403 }
404
405 /// Returns the floating point category of the number. If only one property
406 /// is going to be tested, it is generally faster to use the specific
407 /// predicate instead.
408 ///
409 /// ```
410 /// #![feature(f16)]
411 /// # #[cfg(all(target_arch = "x86_64", target_os = "linux"))] {
412 ///
413 /// use std::num::FpCategory;
414 ///
415 /// let num = 12.4_f16;
416 /// let inf = f16::INFINITY;
417 ///
418 /// assert_eq!(num.classify(), FpCategory::Normal);
419 /// assert_eq!(inf.classify(), FpCategory::Infinite);
420 /// # }
421 /// ```
422 #[inline]
423 #[unstable(feature = "f16", issue = "116909")]
424 pub const fn classify(self) -> FpCategory {
425 let b = self.to_bits();
426 match (b & Self::MAN_MASK, b & Self::EXP_MASK) {
427 (0, Self::EXP_MASK) => FpCategory::Infinite,
428 (_, Self::EXP_MASK) => FpCategory::Nan,
429 (0, 0) => FpCategory::Zero,
430 (_, 0) => FpCategory::Subnormal,
431 _ => FpCategory::Normal,
432 }
433 }
434
435 /// Returns `true` if `self` has a positive sign, including `+0.0`, NaNs with
436 /// positive sign bit and positive infinity.
437 ///
438 /// Note that IEEE 754 doesn't assign any meaning to the sign bit in case of
439 /// a NaN, and as Rust doesn't guarantee that the bit pattern of NaNs are
440 /// conserved over arithmetic operations, the result of `is_sign_positive` on
441 /// a NaN might produce an unexpected or non-portable result. See the [specification
442 /// of NaN bit patterns](f32#nan-bit-patterns) for more info. Use `self.signum() == 1.0`
443 /// if you need fully portable behavior (will return `false` for all NaNs).
444 ///
445 /// ```
446 /// #![feature(f16)]
447 /// # // FIXME(f16_f128): LLVM crashes on s390x, llvm/llvm-project#50374
448 /// # #[cfg(all(target_arch = "x86_64", target_os = "linux"))] {
449 ///
450 /// let f = 7.0_f16;
451 /// let g = -7.0_f16;
452 ///
453 /// assert!(f.is_sign_positive());
454 /// assert!(!g.is_sign_positive());
455 /// # }
456 /// ```
457 #[inline]
458 #[must_use]
459 #[unstable(feature = "f16", issue = "116909")]
460 pub const fn is_sign_positive(self) -> bool {
461 !self.is_sign_negative()
462 }
463
464 /// Returns `true` if `self` has a negative sign, including `-0.0`, NaNs with
465 /// negative sign bit and negative infinity.
466 ///
467 /// Note that IEEE 754 doesn't assign any meaning to the sign bit in case of
468 /// a NaN, and as Rust doesn't guarantee that the bit pattern of NaNs are
469 /// conserved over arithmetic operations, the result of `is_sign_negative` on
470 /// a NaN might produce an unexpected or non-portable result. See the [specification
471 /// of NaN bit patterns](f32#nan-bit-patterns) for more info. Use `self.signum() == -1.0`
472 /// if you need fully portable behavior (will return `false` for all NaNs).
473 ///
474 /// ```
475 /// #![feature(f16)]
476 /// # // FIXME(f16_f128): LLVM crashes on s390x, llvm/llvm-project#50374
477 /// # #[cfg(all(target_arch = "x86_64", target_os = "linux"))] {
478 ///
479 /// let f = 7.0_f16;
480 /// let g = -7.0_f16;
481 ///
482 /// assert!(!f.is_sign_negative());
483 /// assert!(g.is_sign_negative());
484 /// # }
485 /// ```
486 #[inline]
487 #[must_use]
488 #[unstable(feature = "f16", issue = "116909")]
489 pub const fn is_sign_negative(self) -> bool {
490 // IEEE754 says: isSignMinus(x) is true if and only if x has negative sign. isSignMinus
491 // applies to zeros and NaNs as well.
492 // SAFETY: This is just transmuting to get the sign bit, it's fine.
493 (self.to_bits() & (1 << 15)) != 0
494 }
495
496 /// Returns the least number greater than `self`.
497 ///
498 /// Let `TINY` be the smallest representable positive `f16`. Then,
499 /// - if `self.is_nan()`, this returns `self`;
500 /// - if `self` is [`NEG_INFINITY`], this returns [`MIN`];
501 /// - if `self` is `-TINY`, this returns -0.0;
502 /// - if `self` is -0.0 or +0.0, this returns `TINY`;
503 /// - if `self` is [`MAX`] or [`INFINITY`], this returns [`INFINITY`];
504 /// - otherwise the unique least value greater than `self` is returned.
505 ///
506 /// The identity `x.next_up() == -(-x).next_down()` holds for all non-NaN `x`. When `x`
507 /// is finite `x == x.next_up().next_down()` also holds.
508 ///
509 /// ```rust
510 /// #![feature(f16)]
511 /// # // FIXME(f16_f128): ABI issues on MSVC
512 /// # #[cfg(all(target_arch = "x86_64", target_os = "linux"))] {
513 ///
514 /// // f16::EPSILON is the difference between 1.0 and the next number up.
515 /// assert_eq!(1.0f16.next_up(), 1.0 + f16::EPSILON);
516 /// // But not for most numbers.
517 /// assert!(0.1f16.next_up() < 0.1 + f16::EPSILON);
518 /// assert_eq!(4356f16.next_up(), 4360.0);
519 /// # }
520 /// ```
521 ///
522 /// This operation corresponds to IEEE-754 `nextUp`.
523 ///
524 /// [`NEG_INFINITY`]: Self::NEG_INFINITY
525 /// [`INFINITY`]: Self::INFINITY
526 /// [`MIN`]: Self::MIN
527 /// [`MAX`]: Self::MAX
528 #[inline]
529 #[doc(alias = "nextUp")]
530 #[unstable(feature = "f16", issue = "116909")]
531 pub const fn next_up(self) -> Self {
532 // Some targets violate Rust's assumption of IEEE semantics, e.g. by flushing
533 // denormals to zero. This is in general unsound and unsupported, but here
534 // we do our best to still produce the correct result on such targets.
535 let bits = self.to_bits();
536 if self.is_nan() || bits == Self::INFINITY.to_bits() {
537 return self;
538 }
539
540 let abs = bits & !Self::SIGN_MASK;
541 let next_bits = if abs == 0 {
542 Self::TINY_BITS
543 } else if bits == abs {
544 bits + 1
545 } else {
546 bits - 1
547 };
548 Self::from_bits(next_bits)
549 }
550
551 /// Returns the greatest number less than `self`.
552 ///
553 /// Let `TINY` be the smallest representable positive `f16`. Then,
554 /// - if `self.is_nan()`, this returns `self`;
555 /// - if `self` is [`INFINITY`], this returns [`MAX`];
556 /// - if `self` is `TINY`, this returns 0.0;
557 /// - if `self` is -0.0 or +0.0, this returns `-TINY`;
558 /// - if `self` is [`MIN`] or [`NEG_INFINITY`], this returns [`NEG_INFINITY`];
559 /// - otherwise the unique greatest value less than `self` is returned.
560 ///
561 /// The identity `x.next_down() == -(-x).next_up()` holds for all non-NaN `x`. When `x`
562 /// is finite `x == x.next_down().next_up()` also holds.
563 ///
564 /// ```rust
565 /// #![feature(f16)]
566 /// # // FIXME(f16_f128): ABI issues on MSVC
567 /// # #[cfg(all(target_arch = "x86_64", target_os = "linux"))] {
568 ///
569 /// let x = 1.0f16;
570 /// // Clamp value into range [0, 1).
571 /// let clamped = x.clamp(0.0, 1.0f16.next_down());
572 /// assert!(clamped < 1.0);
573 /// assert_eq!(clamped.next_up(), 1.0);
574 /// # }
575 /// ```
576 ///
577 /// This operation corresponds to IEEE-754 `nextDown`.
578 ///
579 /// [`NEG_INFINITY`]: Self::NEG_INFINITY
580 /// [`INFINITY`]: Self::INFINITY
581 /// [`MIN`]: Self::MIN
582 /// [`MAX`]: Self::MAX
583 #[inline]
584 #[doc(alias = "nextDown")]
585 #[unstable(feature = "f16", issue = "116909")]
586 pub const fn next_down(self) -> Self {
587 // Some targets violate Rust's assumption of IEEE semantics, e.g. by flushing
588 // denormals to zero. This is in general unsound and unsupported, but here
589 // we do our best to still produce the correct result on such targets.
590 let bits = self.to_bits();
591 if self.is_nan() || bits == Self::NEG_INFINITY.to_bits() {
592 return self;
593 }
594
595 let abs = bits & !Self::SIGN_MASK;
596 let next_bits = if abs == 0 {
597 Self::NEG_TINY_BITS
598 } else if bits == abs {
599 bits - 1
600 } else {
601 bits + 1
602 };
603 Self::from_bits(next_bits)
604 }
605
606 /// Takes the reciprocal (inverse) of a number, `1/x`.
607 ///
608 /// ```
609 /// #![feature(f16)]
610 /// # // FIXME(f16_f128): extendhfsf2, truncsfhf2, __gnu_h2f_ieee, __gnu_f2h_ieee missing for many platforms
611 /// # #[cfg(all(target_arch = "x86_64", target_os = "linux"))] {
612 ///
613 /// let x = 2.0_f16;
614 /// let abs_difference = (x.recip() - (1.0 / x)).abs();
615 ///
616 /// assert!(abs_difference <= f16::EPSILON);
617 /// # }
618 /// ```
619 #[inline]
620 #[unstable(feature = "f16", issue = "116909")]
621 #[must_use = "this returns the result of the operation, without modifying the original"]
622 pub const fn recip(self) -> Self {
623 1.0 / self
624 }
625
626 /// Converts radians to degrees.
627 ///
628 /// # Unspecified precision
629 ///
630 /// The precision of this function is non-deterministic. This means it varies by platform,
631 /// Rust version, and can even differ within the same execution from one invocation to the next.
632 ///
633 /// # Examples
634 ///
635 /// ```
636 /// #![feature(f16)]
637 /// # // FIXME(f16_f128): extendhfsf2, truncsfhf2, __gnu_h2f_ieee, __gnu_f2h_ieee missing for many platforms
638 /// # #[cfg(all(target_arch = "x86_64", target_os = "linux"))] {
639 ///
640 /// let angle = std::f16::consts::PI;
641 ///
642 /// let abs_difference = (angle.to_degrees() - 180.0).abs();
643 /// assert!(abs_difference <= 0.5);
644 /// # }
645 /// ```
646 #[inline]
647 #[unstable(feature = "f16", issue = "116909")]
648 #[must_use = "this returns the result of the operation, without modifying the original"]
649 pub const fn to_degrees(self) -> Self {
650 // Use a literal to avoid double rounding, consts::PI is already rounded,
651 // and dividing would round again.
652 const PIS_IN_180: f16 = 57.2957795130823208767981548141051703_f16;
653 self * PIS_IN_180
654 }
655
656 /// Converts degrees to radians.
657 ///
658 /// # Unspecified precision
659 ///
660 /// The precision of this function is non-deterministic. This means it varies by platform,
661 /// Rust version, and can even differ within the same execution from one invocation to the next.
662 ///
663 /// # Examples
664 ///
665 /// ```
666 /// #![feature(f16)]
667 /// # // FIXME(f16_f128): extendhfsf2, truncsfhf2, __gnu_h2f_ieee, __gnu_f2h_ieee missing for many platforms
668 /// # #[cfg(all(target_arch = "x86_64", target_os = "linux"))] {
669 ///
670 /// let angle = 180.0f16;
671 ///
672 /// let abs_difference = (angle.to_radians() - std::f16::consts::PI).abs();
673 ///
674 /// assert!(abs_difference <= 0.01);
675 /// # }
676 /// ```
677 #[inline]
678 #[unstable(feature = "f16", issue = "116909")]
679 #[must_use = "this returns the result of the operation, without modifying the original"]
680 pub const fn to_radians(self) -> f16 {
681 // Use a literal to avoid double rounding, consts::PI is already rounded,
682 // and dividing would round again.
683 const RADS_PER_DEG: f16 = 0.017453292519943295769236907684886_f16;
684 self * RADS_PER_DEG
685 }
686
687 /// Returns the maximum of the two numbers, ignoring NaN.
688 ///
689 /// If one of the arguments is NaN, then the other argument is returned.
690 /// This follows the IEEE 754-2008 semantics for maxNum, except for handling of signaling NaNs;
691 /// this function handles all NaNs the same way and avoids maxNum's problems with associativity.
692 /// This also matches the behavior of libm’s fmax. In particular, if the inputs compare equal
693 /// (such as for the case of `+0.0` and `-0.0`), either input may be returned non-deterministically.
694 ///
695 /// ```
696 /// #![feature(f16)]
697 /// # #[cfg(target_arch = "aarch64")] { // FIXME(f16_F128): rust-lang/rust#123885
698 ///
699 /// let x = 1.0f16;
700 /// let y = 2.0f16;
701 ///
702 /// assert_eq!(x.max(y), y);
703 /// # }
704 /// ```
705 #[inline]
706 #[unstable(feature = "f16", issue = "116909")]
707 #[rustc_const_unstable(feature = "f16", issue = "116909")]
708 #[must_use = "this returns the result of the comparison, without modifying either input"]
709 pub const fn max(self, other: f16) -> f16 {
710 intrinsics::maxnumf16(self, other)
711 }
712
713 /// Returns the minimum of the two numbers, ignoring NaN.
714 ///
715 /// If one of the arguments is NaN, then the other argument is returned.
716 /// This follows the IEEE 754-2008 semantics for minNum, except for handling of signaling NaNs;
717 /// this function handles all NaNs the same way and avoids minNum's problems with associativity.
718 /// This also matches the behavior of libm’s fmin. In particular, if the inputs compare equal
719 /// (such as for the case of `+0.0` and `-0.0`), either input may be returned non-deterministically.
720 ///
721 /// ```
722 /// #![feature(f16)]
723 /// # #[cfg(target_arch = "aarch64")] { // FIXME(f16_F128): rust-lang/rust#123885
724 ///
725 /// let x = 1.0f16;
726 /// let y = 2.0f16;
727 ///
728 /// assert_eq!(x.min(y), x);
729 /// # }
730 /// ```
731 #[inline]
732 #[unstable(feature = "f16", issue = "116909")]
733 #[rustc_const_unstable(feature = "f16", issue = "116909")]
734 #[must_use = "this returns the result of the comparison, without modifying either input"]
735 pub const fn min(self, other: f16) -> f16 {
736 intrinsics::minnumf16(self, other)
737 }
738
739 /// Returns the maximum of the two numbers, propagating NaN.
740 ///
741 /// This returns NaN when *either* argument is NaN, as opposed to
742 /// [`f16::max`] which only returns NaN when *both* arguments are NaN.
743 ///
744 /// ```
745 /// #![feature(f16)]
746 /// #![feature(float_minimum_maximum)]
747 /// # #[cfg(target_arch = "aarch64")] { // FIXME(f16_F128): rust-lang/rust#123885
748 ///
749 /// let x = 1.0f16;
750 /// let y = 2.0f16;
751 ///
752 /// assert_eq!(x.maximum(y), y);
753 /// assert!(x.maximum(f16::NAN).is_nan());
754 /// # }
755 /// ```
756 ///
757 /// If one of the arguments is NaN, then NaN is returned. Otherwise this returns the greater
758 /// of the two numbers. For this operation, -0.0 is considered to be less than +0.0.
759 /// Note that this follows the semantics specified in IEEE 754-2019.
760 ///
761 /// Also note that "propagation" of NaNs here doesn't necessarily mean that the bitpattern of a NaN
762 /// operand is conserved; see the [specification of NaN bit patterns](f32#nan-bit-patterns) for more info.
763 #[inline]
764 #[unstable(feature = "f16", issue = "116909")]
765 // #[unstable(feature = "float_minimum_maximum", issue = "91079")]
766 #[must_use = "this returns the result of the comparison, without modifying either input"]
767 pub const fn maximum(self, other: f16) -> f16 {
768 intrinsics::maximumf16(self, other)
769 }
770
771 /// Returns the minimum of the two numbers, propagating NaN.
772 ///
773 /// This returns NaN when *either* argument is NaN, as opposed to
774 /// [`f16::min`] which only returns NaN when *both* arguments are NaN.
775 ///
776 /// ```
777 /// #![feature(f16)]
778 /// #![feature(float_minimum_maximum)]
779 /// # #[cfg(target_arch = "aarch64")] { // FIXME(f16_F128): rust-lang/rust#123885
780 ///
781 /// let x = 1.0f16;
782 /// let y = 2.0f16;
783 ///
784 /// assert_eq!(x.minimum(y), x);
785 /// assert!(x.minimum(f16::NAN).is_nan());
786 /// # }
787 /// ```
788 ///
789 /// If one of the arguments is NaN, then NaN is returned. Otherwise this returns the lesser
790 /// of the two numbers. For this operation, -0.0 is considered to be less than +0.0.
791 /// Note that this follows the semantics specified in IEEE 754-2019.
792 ///
793 /// Also note that "propagation" of NaNs here doesn't necessarily mean that the bitpattern of a NaN
794 /// operand is conserved; see the [specification of NaN bit patterns](f32#nan-bit-patterns) for more info.
795 #[inline]
796 #[unstable(feature = "f16", issue = "116909")]
797 // #[unstable(feature = "float_minimum_maximum", issue = "91079")]
798 #[must_use = "this returns the result of the comparison, without modifying either input"]
799 pub const fn minimum(self, other: f16) -> f16 {
800 intrinsics::minimumf16(self, other)
801 }
802
803 /// Calculates the midpoint (average) between `self` and `rhs`.
804 ///
805 /// This returns NaN when *either* argument is NaN or if a combination of
806 /// +inf and -inf is provided as arguments.
807 ///
808 /// # Examples
809 ///
810 /// ```
811 /// #![feature(f16)]
812 /// # #[cfg(target_arch = "aarch64")] { // FIXME(f16_F128): rust-lang/rust#123885
813 ///
814 /// assert_eq!(1f16.midpoint(4.0), 2.5);
815 /// assert_eq!((-5.5f16).midpoint(8.0), 1.25);
816 /// # }
817 /// ```
818 #[inline]
819 #[doc(alias = "average")]
820 #[unstable(feature = "f16", issue = "116909")]
821 #[rustc_const_unstable(feature = "f16", issue = "116909")]
822 pub const fn midpoint(self, other: f16) -> f16 {
823 const HI: f16 = f16::MAX / 2.;
824
825 let (a, b) = (self, other);
826 let abs_a = a.abs();
827 let abs_b = b.abs();
828
829 if abs_a <= HI && abs_b <= HI {
830 // Overflow is impossible
831 (a + b) / 2.
832 } else {
833 (a / 2.) + (b / 2.)
834 }
835 }
836
837 /// Rounds toward zero and converts to any primitive integer type,
838 /// assuming that the value is finite and fits in that type.
839 ///
840 /// ```
841 /// #![feature(f16)]
842 /// # #[cfg(all(target_arch = "x86_64", target_os = "linux"))] {
843 ///
844 /// let value = 4.6_f16;
845 /// let rounded = unsafe { value.to_int_unchecked::<u16>() };
846 /// assert_eq!(rounded, 4);
847 ///
848 /// let value = -128.9_f16;
849 /// let rounded = unsafe { value.to_int_unchecked::<i8>() };
850 /// assert_eq!(rounded, i8::MIN);
851 /// # }
852 /// ```
853 ///
854 /// # Safety
855 ///
856 /// The value must:
857 ///
858 /// * Not be `NaN`
859 /// * Not be infinite
860 /// * Be representable in the return type `Int`, after truncating off its fractional part
861 #[inline]
862 #[unstable(feature = "f16", issue = "116909")]
863 #[must_use = "this returns the result of the operation, without modifying the original"]
864 pub unsafe fn to_int_unchecked<Int>(self) -> Int
865 where
866 Self: FloatToInt<Int>,
867 {
868 // SAFETY: the caller must uphold the safety contract for
869 // `FloatToInt::to_int_unchecked`.
870 unsafe { FloatToInt::<Int>::to_int_unchecked(self) }
871 }
872
873 /// Raw transmutation to `u16`.
874 ///
875 /// This is currently identical to `transmute::<f16, u16>(self)` on all platforms.
876 ///
877 /// See [`from_bits`](#method.from_bits) for some discussion of the
878 /// portability of this operation (there are almost no issues).
879 ///
880 /// Note that this function is distinct from `as` casting, which attempts to
881 /// preserve the *numeric* value, and not the bitwise value.
882 ///
883 /// ```
884 /// #![feature(f16)]
885 /// # #[cfg(all(target_arch = "x86_64", target_os = "linux"))] {
886 ///
887 /// # // FIXME(f16_f128): enable this once const casting works
888 /// # // assert_ne!((1f16).to_bits(), 1f16 as u128); // to_bits() is not casting!
889 /// assert_eq!((12.5f16).to_bits(), 0x4a40);
890 /// # }
891 /// ```
892 #[inline]
893 #[unstable(feature = "f16", issue = "116909")]
894 #[must_use = "this returns the result of the operation, without modifying the original"]
895 #[allow(unnecessary_transmutes)]
896 pub const fn to_bits(self) -> u16 {
897 // SAFETY: `u16` is a plain old datatype so we can always transmute to it.
898 unsafe { mem::transmute(self) }
899 }
900
901 /// Raw transmutation from `u16`.
902 ///
903 /// This is currently identical to `transmute::<u16, f16>(v)` on all platforms.
904 /// It turns out this is incredibly portable, for two reasons:
905 ///
906 /// * Floats and Ints have the same endianness on all supported platforms.
907 /// * IEEE 754 very precisely specifies the bit layout of floats.
908 ///
909 /// However there is one caveat: prior to the 2008 version of IEEE 754, how
910 /// to interpret the NaN signaling bit wasn't actually specified. Most platforms
911 /// (notably x86 and ARM) picked the interpretation that was ultimately
912 /// standardized in 2008, but some didn't (notably MIPS). As a result, all
913 /// signaling NaNs on MIPS are quiet NaNs on x86, and vice-versa.
914 ///
915 /// Rather than trying to preserve signaling-ness cross-platform, this
916 /// implementation favors preserving the exact bits. This means that
917 /// any payloads encoded in NaNs will be preserved even if the result of
918 /// this method is sent over the network from an x86 machine to a MIPS one.
919 ///
920 /// If the results of this method are only manipulated by the same
921 /// architecture that produced them, then there is no portability concern.
922 ///
923 /// If the input isn't NaN, then there is no portability concern.
924 ///
925 /// If you don't care about signalingness (very likely), then there is no
926 /// portability concern.
927 ///
928 /// Note that this function is distinct from `as` casting, which attempts to
929 /// preserve the *numeric* value, and not the bitwise value.
930 ///
931 /// ```
932 /// #![feature(f16)]
933 /// # #[cfg(all(target_arch = "x86_64", target_os = "linux"))] {
934 ///
935 /// let v = f16::from_bits(0x4a40);
936 /// assert_eq!(v, 12.5);
937 /// # }
938 /// ```
939 #[inline]
940 #[must_use]
941 #[unstable(feature = "f16", issue = "116909")]
942 #[allow(unnecessary_transmutes)]
943 pub const fn from_bits(v: u16) -> Self {
944 // It turns out the safety issues with sNaN were overblown! Hooray!
945 // SAFETY: `u16` is a plain old datatype so we can always transmute from it.
946 unsafe { mem::transmute(v) }
947 }
948
949 /// Returns the memory representation of this floating point number as a byte array in
950 /// big-endian (network) byte order.
951 ///
952 /// See [`from_bits`](Self::from_bits) for some discussion of the
953 /// portability of this operation (there are almost no issues).
954 ///
955 /// # Examples
956 ///
957 /// ```
958 /// #![feature(f16)]
959 /// # // FIXME(f16_f128): LLVM crashes on s390x, llvm/llvm-project#50374
960 /// # #[cfg(all(target_arch = "x86_64", target_os = "linux"))] {
961 ///
962 /// let bytes = 12.5f16.to_be_bytes();
963 /// assert_eq!(bytes, [0x4a, 0x40]);
964 /// # }
965 /// ```
966 #[inline]
967 #[unstable(feature = "f16", issue = "116909")]
968 #[must_use = "this returns the result of the operation, without modifying the original"]
969 pub const fn to_be_bytes(self) -> [u8; 2] {
970 self.to_bits().to_be_bytes()
971 }
972
973 /// Returns the memory representation of this floating point number as a byte array in
974 /// little-endian byte order.
975 ///
976 /// See [`from_bits`](Self::from_bits) for some discussion of the
977 /// portability of this operation (there are almost no issues).
978 ///
979 /// # Examples
980 ///
981 /// ```
982 /// #![feature(f16)]
983 /// # // FIXME(f16_f128): LLVM crashes on s390x, llvm/llvm-project#50374
984 /// # #[cfg(all(target_arch = "x86_64", target_os = "linux"))] {
985 ///
986 /// let bytes = 12.5f16.to_le_bytes();
987 /// assert_eq!(bytes, [0x40, 0x4a]);
988 /// # }
989 /// ```
990 #[inline]
991 #[unstable(feature = "f16", issue = "116909")]
992 #[must_use = "this returns the result of the operation, without modifying the original"]
993 pub const fn to_le_bytes(self) -> [u8; 2] {
994 self.to_bits().to_le_bytes()
995 }
996
997 /// Returns the memory representation of this floating point number as a byte array in
998 /// native byte order.
999 ///
1000 /// As the target platform's native endianness is used, portable code
1001 /// should use [`to_be_bytes`] or [`to_le_bytes`], as appropriate, instead.
1002 ///
1003 /// [`to_be_bytes`]: f16::to_be_bytes
1004 /// [`to_le_bytes`]: f16::to_le_bytes
1005 ///
1006 /// See [`from_bits`](Self::from_bits) for some discussion of the
1007 /// portability of this operation (there are almost no issues).
1008 ///
1009 /// # Examples
1010 ///
1011 /// ```
1012 /// #![feature(f16)]
1013 /// # // FIXME(f16_f128): LLVM crashes on s390x, llvm/llvm-project#50374
1014 /// # #[cfg(all(target_arch = "x86_64", target_os = "linux"))] {
1015 ///
1016 /// let bytes = 12.5f16.to_ne_bytes();
1017 /// assert_eq!(
1018 /// bytes,
1019 /// if cfg!(target_endian = "big") {
1020 /// [0x4a, 0x40]
1021 /// } else {
1022 /// [0x40, 0x4a]
1023 /// }
1024 /// );
1025 /// # }
1026 /// ```
1027 #[inline]
1028 #[unstable(feature = "f16", issue = "116909")]
1029 #[must_use = "this returns the result of the operation, without modifying the original"]
1030 pub const fn to_ne_bytes(self) -> [u8; 2] {
1031 self.to_bits().to_ne_bytes()
1032 }
1033
1034 /// Creates a floating point value from its representation as a byte array in big endian.
1035 ///
1036 /// See [`from_bits`](Self::from_bits) for some discussion of the
1037 /// portability of this operation (there are almost no issues).
1038 ///
1039 /// # Examples
1040 ///
1041 /// ```
1042 /// #![feature(f16)]
1043 /// # #[cfg(all(target_arch = "x86_64", target_os = "linux"))] {
1044 ///
1045 /// let value = f16::from_be_bytes([0x4a, 0x40]);
1046 /// assert_eq!(value, 12.5);
1047 /// # }
1048 /// ```
1049 #[inline]
1050 #[must_use]
1051 #[unstable(feature = "f16", issue = "116909")]
1052 pub const fn from_be_bytes(bytes: [u8; 2]) -> Self {
1053 Self::from_bits(u16::from_be_bytes(bytes))
1054 }
1055
1056 /// Creates a floating point value from its representation as a byte array in little endian.
1057 ///
1058 /// See [`from_bits`](Self::from_bits) for some discussion of the
1059 /// portability of this operation (there are almost no issues).
1060 ///
1061 /// # Examples
1062 ///
1063 /// ```
1064 /// #![feature(f16)]
1065 /// # #[cfg(all(target_arch = "x86_64", target_os = "linux"))] {
1066 ///
1067 /// let value = f16::from_le_bytes([0x40, 0x4a]);
1068 /// assert_eq!(value, 12.5);
1069 /// # }
1070 /// ```
1071 #[inline]
1072 #[must_use]
1073 #[unstable(feature = "f16", issue = "116909")]
1074 pub const fn from_le_bytes(bytes: [u8; 2]) -> Self {
1075 Self::from_bits(u16::from_le_bytes(bytes))
1076 }
1077
1078 /// Creates a floating point value from its representation as a byte array in native endian.
1079 ///
1080 /// As the target platform's native endianness is used, portable code
1081 /// likely wants to use [`from_be_bytes`] or [`from_le_bytes`], as
1082 /// appropriate instead.
1083 ///
1084 /// [`from_be_bytes`]: f16::from_be_bytes
1085 /// [`from_le_bytes`]: f16::from_le_bytes
1086 ///
1087 /// See [`from_bits`](Self::from_bits) for some discussion of the
1088 /// portability of this operation (there are almost no issues).
1089 ///
1090 /// # Examples
1091 ///
1092 /// ```
1093 /// #![feature(f16)]
1094 /// # #[cfg(all(target_arch = "x86_64", target_os = "linux"))] {
1095 ///
1096 /// let value = f16::from_ne_bytes(if cfg!(target_endian = "big") {
1097 /// [0x4a, 0x40]
1098 /// } else {
1099 /// [0x40, 0x4a]
1100 /// });
1101 /// assert_eq!(value, 12.5);
1102 /// # }
1103 /// ```
1104 #[inline]
1105 #[must_use]
1106 #[unstable(feature = "f16", issue = "116909")]
1107 pub const fn from_ne_bytes(bytes: [u8; 2]) -> Self {
1108 Self::from_bits(u16::from_ne_bytes(bytes))
1109 }
1110
1111 /// Returns the ordering between `self` and `other`.
1112 ///
1113 /// Unlike the standard partial comparison between floating point numbers,
1114 /// this comparison always produces an ordering in accordance to
1115 /// the `totalOrder` predicate as defined in the IEEE 754 (2008 revision)
1116 /// floating point standard. The values are ordered in the following sequence:
1117 ///
1118 /// - negative quiet NaN
1119 /// - negative signaling NaN
1120 /// - negative infinity
1121 /// - negative numbers
1122 /// - negative subnormal numbers
1123 /// - negative zero
1124 /// - positive zero
1125 /// - positive subnormal numbers
1126 /// - positive numbers
1127 /// - positive infinity
1128 /// - positive signaling NaN
1129 /// - positive quiet NaN.
1130 ///
1131 /// The ordering established by this function does not always agree with the
1132 /// [`PartialOrd`] and [`PartialEq`] implementations of `f16`. For example,
1133 /// they consider negative and positive zero equal, while `total_cmp`
1134 /// doesn't.
1135 ///
1136 /// The interpretation of the signaling NaN bit follows the definition in
1137 /// the IEEE 754 standard, which may not match the interpretation by some of
1138 /// the older, non-conformant (e.g. MIPS) hardware implementations.
1139 ///
1140 /// # Example
1141 ///
1142 /// ```
1143 /// #![feature(f16)]
1144 /// # // FIXME(f16_f128): extendhfsf2, truncsfhf2, __gnu_h2f_ieee, __gnu_f2h_ieee missing for many platforms
1145 /// # #[cfg(all(target_arch = "x86_64", target_os = "linux"))] {
1146 ///
1147 /// struct GoodBoy {
1148 /// name: &'static str,
1149 /// weight: f16,
1150 /// }
1151 ///
1152 /// let mut bois = vec![
1153 /// GoodBoy { name: "Pucci", weight: 0.1 },
1154 /// GoodBoy { name: "Woofer", weight: 99.0 },
1155 /// GoodBoy { name: "Yapper", weight: 10.0 },
1156 /// GoodBoy { name: "Chonk", weight: f16::INFINITY },
1157 /// GoodBoy { name: "Abs. Unit", weight: f16::NAN },
1158 /// GoodBoy { name: "Floaty", weight: -5.0 },
1159 /// ];
1160 ///
1161 /// bois.sort_by(|a, b| a.weight.total_cmp(&b.weight));
1162 ///
1163 /// // `f16::NAN` could be positive or negative, which will affect the sort order.
1164 /// if f16::NAN.is_sign_negative() {
1165 /// bois.into_iter().map(|b| b.weight)
1166 /// .zip([f16::NAN, -5.0, 0.1, 10.0, 99.0, f16::INFINITY].iter())
1167 /// .for_each(|(a, b)| assert_eq!(a.to_bits(), b.to_bits()))
1168 /// } else {
1169 /// bois.into_iter().map(|b| b.weight)
1170 /// .zip([-5.0, 0.1, 10.0, 99.0, f16::INFINITY, f16::NAN].iter())
1171 /// .for_each(|(a, b)| assert_eq!(a.to_bits(), b.to_bits()))
1172 /// }
1173 /// # }
1174 /// ```
1175 #[inline]
1176 #[must_use]
1177 #[unstable(feature = "f16", issue = "116909")]
1178 pub fn total_cmp(&self, other: &Self) -> crate::cmp::Ordering {
1179 let mut left = self.to_bits() as i16;
1180 let mut right = other.to_bits() as i16;
1181
1182 // In case of negatives, flip all the bits except the sign
1183 // to achieve a similar layout as two's complement integers
1184 //
1185 // Why does this work? IEEE 754 floats consist of three fields:
1186 // Sign bit, exponent and mantissa. The set of exponent and mantissa
1187 // fields as a whole have the property that their bitwise order is
1188 // equal to the numeric magnitude where the magnitude is defined.
1189 // The magnitude is not normally defined on NaN values, but
1190 // IEEE 754 totalOrder defines the NaN values also to follow the
1191 // bitwise order. This leads to order explained in the doc comment.
1192 // However, the representation of magnitude is the same for negative
1193 // and positive numbers – only the sign bit is different.
1194 // To easily compare the floats as signed integers, we need to
1195 // flip the exponent and mantissa bits in case of negative numbers.
1196 // We effectively convert the numbers to "two's complement" form.
1197 //
1198 // To do the flipping, we construct a mask and XOR against it.
1199 // We branchlessly calculate an "all-ones except for the sign bit"
1200 // mask from negative-signed values: right shifting sign-extends
1201 // the integer, so we "fill" the mask with sign bits, and then
1202 // convert to unsigned to push one more zero bit.
1203 // On positive values, the mask is all zeros, so it's a no-op.
1204 left ^= (((left >> 15) as u16) >> 1) as i16;
1205 right ^= (((right >> 15) as u16) >> 1) as i16;
1206
1207 left.cmp(&right)
1208 }
1209
1210 /// Restrict a value to a certain interval unless it is NaN.
1211 ///
1212 /// Returns `max` if `self` is greater than `max`, and `min` if `self` is
1213 /// less than `min`. Otherwise this returns `self`.
1214 ///
1215 /// Note that this function returns NaN if the initial value was NaN as
1216 /// well.
1217 ///
1218 /// # Panics
1219 ///
1220 /// Panics if `min > max`, `min` is NaN, or `max` is NaN.
1221 ///
1222 /// # Examples
1223 ///
1224 /// ```
1225 /// #![feature(f16)]
1226 /// # #[cfg(all(target_arch = "x86_64", target_os = "linux"))] {
1227 ///
1228 /// assert!((-3.0f16).clamp(-2.0, 1.0) == -2.0);
1229 /// assert!((0.0f16).clamp(-2.0, 1.0) == 0.0);
1230 /// assert!((2.0f16).clamp(-2.0, 1.0) == 1.0);
1231 /// assert!((f16::NAN).clamp(-2.0, 1.0).is_nan());
1232 /// # }
1233 /// ```
1234 #[inline]
1235 #[unstable(feature = "f16", issue = "116909")]
1236 #[must_use = "method returns a new number and does not mutate the original value"]
1237 pub const fn clamp(mut self, min: f16, max: f16) -> f16 {
1238 const_assert!(
1239 min <= max,
1240 "min > max, or either was NaN",
1241 "min > max, or either was NaN. min = {min:?}, max = {max:?}",
1242 min: f16,
1243 max: f16,
1244 );
1245
1246 if self < min {
1247 self = min;
1248 }
1249 if self > max {
1250 self = max;
1251 }
1252 self
1253 }
1254
1255 /// Computes the absolute value of `self`.
1256 ///
1257 /// This function always returns the precise result.
1258 ///
1259 /// # Examples
1260 ///
1261 /// ```
1262 /// #![feature(f16)]
1263 /// # #[cfg(all(target_arch = "x86_64", target_os = "linux"))] {
1264 ///
1265 /// let x = 3.5_f16;
1266 /// let y = -3.5_f16;
1267 ///
1268 /// assert_eq!(x.abs(), x);
1269 /// assert_eq!(y.abs(), -y);
1270 ///
1271 /// assert!(f16::NAN.abs().is_nan());
1272 /// # }
1273 /// ```
1274 #[inline]
1275 #[unstable(feature = "f16", issue = "116909")]
1276 #[rustc_const_unstable(feature = "f16", issue = "116909")]
1277 #[must_use = "method returns a new number and does not mutate the original value"]
1278 pub const fn abs(self) -> Self {
1279 // FIXME(f16_f128): replace with `intrinsics::fabsf16` when available
1280 Self::from_bits(self.to_bits() & !(1 << 15))
1281 }
1282
1283 /// Returns a number that represents the sign of `self`.
1284 ///
1285 /// - `1.0` if the number is positive, `+0.0` or `INFINITY`
1286 /// - `-1.0` if the number is negative, `-0.0` or `NEG_INFINITY`
1287 /// - NaN if the number is NaN
1288 ///
1289 /// # Examples
1290 ///
1291 /// ```
1292 /// #![feature(f16)]
1293 /// # #[cfg(all(target_arch = "x86_64", target_os = "linux"))] {
1294 ///
1295 /// let f = 3.5_f16;
1296 ///
1297 /// assert_eq!(f.signum(), 1.0);
1298 /// assert_eq!(f16::NEG_INFINITY.signum(), -1.0);
1299 ///
1300 /// assert!(f16::NAN.signum().is_nan());
1301 /// # }
1302 /// ```
1303 #[inline]
1304 #[unstable(feature = "f16", issue = "116909")]
1305 #[rustc_const_unstable(feature = "f16", issue = "116909")]
1306 #[must_use = "method returns a new number and does not mutate the original value"]
1307 pub const fn signum(self) -> f16 {
1308 if self.is_nan() { Self::NAN } else { 1.0_f16.copysign(self) }
1309 }
1310
1311 /// Returns a number composed of the magnitude of `self` and the sign of
1312 /// `sign`.
1313 ///
1314 /// Equal to `self` if the sign of `self` and `sign` are the same, otherwise equal to `-self`.
1315 /// If `self` is a NaN, then a NaN with the same payload as `self` and the sign bit of `sign` is
1316 /// returned.
1317 ///
1318 /// If `sign` is a NaN, then this operation will still carry over its sign into the result. Note
1319 /// that IEEE 754 doesn't assign any meaning to the sign bit in case of a NaN, and as Rust
1320 /// doesn't guarantee that the bit pattern of NaNs are conserved over arithmetic operations, the
1321 /// result of `copysign` with `sign` being a NaN might produce an unexpected or non-portable
1322 /// result. See the [specification of NaN bit patterns](primitive@f32#nan-bit-patterns) for more
1323 /// info.
1324 ///
1325 /// # Examples
1326 ///
1327 /// ```
1328 /// #![feature(f16)]
1329 /// # #[cfg(all(target_arch = "x86_64", target_os = "linux"))] {
1330 ///
1331 /// let f = 3.5_f16;
1332 ///
1333 /// assert_eq!(f.copysign(0.42), 3.5_f16);
1334 /// assert_eq!(f.copysign(-0.42), -3.5_f16);
1335 /// assert_eq!((-f).copysign(0.42), 3.5_f16);
1336 /// assert_eq!((-f).copysign(-0.42), -3.5_f16);
1337 ///
1338 /// assert!(f16::NAN.copysign(1.0).is_nan());
1339 /// # }
1340 /// ```
1341 #[inline]
1342 #[unstable(feature = "f16", issue = "116909")]
1343 #[rustc_const_unstable(feature = "f16", issue = "116909")]
1344 #[must_use = "method returns a new number and does not mutate the original value"]
1345 pub const fn copysign(self, sign: f16) -> f16 {
1346 // SAFETY: this is actually a safe intrinsic
1347 unsafe { intrinsics::copysignf16(self, sign) }
1348 }
1349
1350 /// Float addition that allows optimizations based on algebraic rules.
1351 ///
1352 /// See [algebraic operators](primitive@f32#algebraic-operators) for more info.
1353 #[must_use = "method returns a new number and does not mutate the original value"]
1354 #[unstable(feature = "float_algebraic", issue = "136469")]
1355 #[rustc_const_unstable(feature = "float_algebraic", issue = "136469")]
1356 #[inline]
1357 pub const fn algebraic_add(self, rhs: f16) -> f16 {
1358 intrinsics::fadd_algebraic(self, rhs)
1359 }
1360
1361 /// Float subtraction that allows optimizations based on algebraic rules.
1362 ///
1363 /// See [algebraic operators](primitive@f32#algebraic-operators) for more info.
1364 #[must_use = "method returns a new number and does not mutate the original value"]
1365 #[unstable(feature = "float_algebraic", issue = "136469")]
1366 #[rustc_const_unstable(feature = "float_algebraic", issue = "136469")]
1367 #[inline]
1368 pub const fn algebraic_sub(self, rhs: f16) -> f16 {
1369 intrinsics::fsub_algebraic(self, rhs)
1370 }
1371
1372 /// Float multiplication that allows optimizations based on algebraic rules.
1373 ///
1374 /// See [algebraic operators](primitive@f32#algebraic-operators) for more info.
1375 #[must_use = "method returns a new number and does not mutate the original value"]
1376 #[unstable(feature = "float_algebraic", issue = "136469")]
1377 #[rustc_const_unstable(feature = "float_algebraic", issue = "136469")]
1378 #[inline]
1379 pub const fn algebraic_mul(self, rhs: f16) -> f16 {
1380 intrinsics::fmul_algebraic(self, rhs)
1381 }
1382
1383 /// Float division that allows optimizations based on algebraic rules.
1384 ///
1385 /// See [algebraic operators](primitive@f32#algebraic-operators) for more info.
1386 #[must_use = "method returns a new number and does not mutate the original value"]
1387 #[unstable(feature = "float_algebraic", issue = "136469")]
1388 #[rustc_const_unstable(feature = "float_algebraic", issue = "136469")]
1389 #[inline]
1390 pub const fn algebraic_div(self, rhs: f16) -> f16 {
1391 intrinsics::fdiv_algebraic(self, rhs)
1392 }
1393
1394 /// Float remainder that allows optimizations based on algebraic rules.
1395 ///
1396 /// See [algebraic operators](primitive@f32#algebraic-operators) for more info.
1397 #[must_use = "method returns a new number and does not mutate the original value"]
1398 #[unstable(feature = "float_algebraic", issue = "136469")]
1399 #[rustc_const_unstable(feature = "float_algebraic", issue = "136469")]
1400 #[inline]
1401 pub const fn algebraic_rem(self, rhs: f16) -> f16 {
1402 intrinsics::frem_algebraic(self, rhs)
1403 }
1404}
1405
1406// Functions in this module fall into `core_float_math`
1407// #[unstable(feature = "core_float_math", issue = "137578")]
1408#[cfg(not(test))]
1409#[doc(test(attr(feature(cfg_target_has_reliable_f16_f128), expect(internal_features))))]
1410impl f16 {
1411 /// Returns the largest integer less than or equal to `self`.
1412 ///
1413 /// This function always returns the precise result.
1414 ///
1415 /// # Examples
1416 ///
1417 /// ```
1418 /// #![feature(f16)]
1419 /// # #[cfg(not(miri))]
1420 /// # #[cfg(target_has_reliable_f16_math)] {
1421 ///
1422 /// let f = 3.7_f16;
1423 /// let g = 3.0_f16;
1424 /// let h = -3.7_f16;
1425 ///
1426 /// assert_eq!(f.floor(), 3.0);
1427 /// assert_eq!(g.floor(), 3.0);
1428 /// assert_eq!(h.floor(), -4.0);
1429 /// # }
1430 /// ```
1431 #[inline]
1432 #[rustc_allow_incoherent_impl]
1433 #[unstable(feature = "f16", issue = "116909")]
1434 #[rustc_const_unstable(feature = "f16", issue = "116909")]
1435 #[must_use = "method returns a new number and does not mutate the original value"]
1436 pub const fn floor(self) -> f16 {
1437 // SAFETY: intrinsic with no preconditions
1438 unsafe { intrinsics::floorf16(self) }
1439 }
1440
1441 /// Returns the smallest integer greater than or equal to `self`.
1442 ///
1443 /// This function always returns the precise result.
1444 ///
1445 /// # Examples
1446 ///
1447 /// ```
1448 /// #![feature(f16)]
1449 /// # #[cfg(not(miri))]
1450 /// # #[cfg(target_has_reliable_f16_math)] {
1451 ///
1452 /// let f = 3.01_f16;
1453 /// let g = 4.0_f16;
1454 ///
1455 /// assert_eq!(f.ceil(), 4.0);
1456 /// assert_eq!(g.ceil(), 4.0);
1457 /// # }
1458 /// ```
1459 #[inline]
1460 #[doc(alias = "ceiling")]
1461 #[rustc_allow_incoherent_impl]
1462 #[unstable(feature = "f16", issue = "116909")]
1463 #[rustc_const_unstable(feature = "f16", issue = "116909")]
1464 #[must_use = "method returns a new number and does not mutate the original value"]
1465 pub const fn ceil(self) -> f16 {
1466 // SAFETY: intrinsic with no preconditions
1467 unsafe { intrinsics::ceilf16(self) }
1468 }
1469
1470 /// Returns the nearest integer to `self`. If a value is half-way between two
1471 /// integers, round away from `0.0`.
1472 ///
1473 /// This function always returns the precise result.
1474 ///
1475 /// # Examples
1476 ///
1477 /// ```
1478 /// #![feature(f16)]
1479 /// # #[cfg(not(miri))]
1480 /// # #[cfg(target_has_reliable_f16_math)] {
1481 ///
1482 /// let f = 3.3_f16;
1483 /// let g = -3.3_f16;
1484 /// let h = -3.7_f16;
1485 /// let i = 3.5_f16;
1486 /// let j = 4.5_f16;
1487 ///
1488 /// assert_eq!(f.round(), 3.0);
1489 /// assert_eq!(g.round(), -3.0);
1490 /// assert_eq!(h.round(), -4.0);
1491 /// assert_eq!(i.round(), 4.0);
1492 /// assert_eq!(j.round(), 5.0);
1493 /// # }
1494 /// ```
1495 #[inline]
1496 #[rustc_allow_incoherent_impl]
1497 #[unstable(feature = "f16", issue = "116909")]
1498 #[rustc_const_unstable(feature = "f16", issue = "116909")]
1499 #[must_use = "method returns a new number and does not mutate the original value"]
1500 pub const fn round(self) -> f16 {
1501 // SAFETY: intrinsic with no preconditions
1502 unsafe { intrinsics::roundf16(self) }
1503 }
1504
1505 /// Returns the nearest integer to a number. Rounds half-way cases to the number
1506 /// with an even least significant digit.
1507 ///
1508 /// This function always returns the precise result.
1509 ///
1510 /// # Examples
1511 ///
1512 /// ```
1513 /// #![feature(f16)]
1514 /// # #[cfg(not(miri))]
1515 /// # #[cfg(target_has_reliable_f16_math)] {
1516 ///
1517 /// let f = 3.3_f16;
1518 /// let g = -3.3_f16;
1519 /// let h = 3.5_f16;
1520 /// let i = 4.5_f16;
1521 ///
1522 /// assert_eq!(f.round_ties_even(), 3.0);
1523 /// assert_eq!(g.round_ties_even(), -3.0);
1524 /// assert_eq!(h.round_ties_even(), 4.0);
1525 /// assert_eq!(i.round_ties_even(), 4.0);
1526 /// # }
1527 /// ```
1528 #[inline]
1529 #[rustc_allow_incoherent_impl]
1530 #[unstable(feature = "f16", issue = "116909")]
1531 #[rustc_const_unstable(feature = "f16", issue = "116909")]
1532 #[must_use = "method returns a new number and does not mutate the original value"]
1533 pub const fn round_ties_even(self) -> f16 {
1534 intrinsics::round_ties_even_f16(self)
1535 }
1536
1537 /// Returns the integer part of `self`.
1538 /// This means that non-integer numbers are always truncated towards zero.
1539 ///
1540 /// This function always returns the precise result.
1541 ///
1542 /// # Examples
1543 ///
1544 /// ```
1545 /// #![feature(f16)]
1546 /// # #[cfg(not(miri))]
1547 /// # #[cfg(target_has_reliable_f16_math)] {
1548 ///
1549 /// let f = 3.7_f16;
1550 /// let g = 3.0_f16;
1551 /// let h = -3.7_f16;
1552 ///
1553 /// assert_eq!(f.trunc(), 3.0);
1554 /// assert_eq!(g.trunc(), 3.0);
1555 /// assert_eq!(h.trunc(), -3.0);
1556 /// # }
1557 /// ```
1558 #[inline]
1559 #[doc(alias = "truncate")]
1560 #[rustc_allow_incoherent_impl]
1561 #[unstable(feature = "f16", issue = "116909")]
1562 #[rustc_const_unstable(feature = "f16", issue = "116909")]
1563 #[must_use = "method returns a new number and does not mutate the original value"]
1564 pub const fn trunc(self) -> f16 {
1565 // SAFETY: intrinsic with no preconditions
1566 unsafe { intrinsics::truncf16(self) }
1567 }
1568
1569 /// Returns the fractional part of `self`.
1570 ///
1571 /// This function always returns the precise result.
1572 ///
1573 /// # Examples
1574 ///
1575 /// ```
1576 /// #![feature(f16)]
1577 /// # #[cfg(not(miri))]
1578 /// # #[cfg(target_has_reliable_f16_math)] {
1579 ///
1580 /// let x = 3.6_f16;
1581 /// let y = -3.6_f16;
1582 /// let abs_difference_x = (x.fract() - 0.6).abs();
1583 /// let abs_difference_y = (y.fract() - (-0.6)).abs();
1584 ///
1585 /// assert!(abs_difference_x <= f16::EPSILON);
1586 /// assert!(abs_difference_y <= f16::EPSILON);
1587 /// # }
1588 /// ```
1589 #[inline]
1590 #[rustc_allow_incoherent_impl]
1591 #[unstable(feature = "f16", issue = "116909")]
1592 #[rustc_const_unstable(feature = "f16", issue = "116909")]
1593 #[must_use = "method returns a new number and does not mutate the original value"]
1594 pub const fn fract(self) -> f16 {
1595 self - self.trunc()
1596 }
1597
1598 /// Fused multiply-add. Computes `(self * a) + b` with only one rounding
1599 /// error, yielding a more accurate result than an unfused multiply-add.
1600 ///
1601 /// Using `mul_add` *may* be more performant than an unfused multiply-add if
1602 /// the target architecture has a dedicated `fma` CPU instruction. However,
1603 /// this is not always true, and will be heavily dependant on designing
1604 /// algorithms with specific target hardware in mind.
1605 ///
1606 /// # Precision
1607 ///
1608 /// The result of this operation is guaranteed to be the rounded
1609 /// infinite-precision result. It is specified by IEEE 754 as
1610 /// `fusedMultiplyAdd` and guaranteed not to change.
1611 ///
1612 /// # Examples
1613 ///
1614 /// ```
1615 /// #![feature(f16)]
1616 /// # #[cfg(not(miri))]
1617 /// # #[cfg(target_has_reliable_f16_math)] {
1618 ///
1619 /// let m = 10.0_f16;
1620 /// let x = 4.0_f16;
1621 /// let b = 60.0_f16;
1622 ///
1623 /// assert_eq!(m.mul_add(x, b), 100.0);
1624 /// assert_eq!(m * x + b, 100.0);
1625 ///
1626 /// let one_plus_eps = 1.0_f16 + f16::EPSILON;
1627 /// let one_minus_eps = 1.0_f16 - f16::EPSILON;
1628 /// let minus_one = -1.0_f16;
1629 ///
1630 /// // The exact result (1 + eps) * (1 - eps) = 1 - eps * eps.
1631 /// assert_eq!(one_plus_eps.mul_add(one_minus_eps, minus_one), -f16::EPSILON * f16::EPSILON);
1632 /// // Different rounding with the non-fused multiply and add.
1633 /// assert_eq!(one_plus_eps * one_minus_eps + minus_one, 0.0);
1634 /// # }
1635 /// ```
1636 #[inline]
1637 #[rustc_allow_incoherent_impl]
1638 #[unstable(feature = "f16", issue = "116909")]
1639 #[doc(alias = "fmaf16", alias = "fusedMultiplyAdd")]
1640 #[must_use = "method returns a new number and does not mutate the original value"]
1641 pub fn mul_add(self, a: f16, b: f16) -> f16 {
1642 // SAFETY: intrinsic with no preconditions
1643 unsafe { intrinsics::fmaf16(self, a, b) }
1644 }
1645
1646 /// Calculates Euclidean division, the matching method for `rem_euclid`.
1647 ///
1648 /// This computes the integer `n` such that
1649 /// `self = n * rhs + self.rem_euclid(rhs)`.
1650 /// In other words, the result is `self / rhs` rounded to the integer `n`
1651 /// such that `self >= n * rhs`.
1652 ///
1653 /// # Precision
1654 ///
1655 /// The result of this operation is guaranteed to be the rounded
1656 /// infinite-precision result.
1657 ///
1658 /// # Examples
1659 ///
1660 /// ```
1661 /// #![feature(f16)]
1662 /// # #[cfg(not(miri))]
1663 /// # #[cfg(target_has_reliable_f16_math)] {
1664 ///
1665 /// let a: f16 = 7.0;
1666 /// let b = 4.0;
1667 /// assert_eq!(a.div_euclid(b), 1.0); // 7.0 > 4.0 * 1.0
1668 /// assert_eq!((-a).div_euclid(b), -2.0); // -7.0 >= 4.0 * -2.0
1669 /// assert_eq!(a.div_euclid(-b), -1.0); // 7.0 >= -4.0 * -1.0
1670 /// assert_eq!((-a).div_euclid(-b), 2.0); // -7.0 >= -4.0 * 2.0
1671 /// # }
1672 /// ```
1673 #[inline]
1674 #[rustc_allow_incoherent_impl]
1675 #[unstable(feature = "f16", issue = "116909")]
1676 #[must_use = "method returns a new number and does not mutate the original value"]
1677 pub fn div_euclid(self, rhs: f16) -> f16 {
1678 let q = (self / rhs).trunc();
1679 if self % rhs < 0.0 {
1680 return if rhs > 0.0 { q - 1.0 } else { q + 1.0 };
1681 }
1682 q
1683 }
1684
1685 /// Calculates the least nonnegative remainder of `self (mod rhs)`.
1686 ///
1687 /// In particular, the return value `r` satisfies `0.0 <= r < rhs.abs()` in
1688 /// most cases. However, due to a floating point round-off error it can
1689 /// result in `r == rhs.abs()`, violating the mathematical definition, if
1690 /// `self` is much smaller than `rhs.abs()` in magnitude and `self < 0.0`.
1691 /// This result is not an element of the function's codomain, but it is the
1692 /// closest floating point number in the real numbers and thus fulfills the
1693 /// property `self == self.div_euclid(rhs) * rhs + self.rem_euclid(rhs)`
1694 /// approximately.
1695 ///
1696 /// # Precision
1697 ///
1698 /// The result of this operation is guaranteed to be the rounded
1699 /// infinite-precision result.
1700 ///
1701 /// # Examples
1702 ///
1703 /// ```
1704 /// #![feature(f16)]
1705 /// # #[cfg(not(miri))]
1706 /// # #[cfg(target_has_reliable_f16_math)] {
1707 ///
1708 /// let a: f16 = 7.0;
1709 /// let b = 4.0;
1710 /// assert_eq!(a.rem_euclid(b), 3.0);
1711 /// assert_eq!((-a).rem_euclid(b), 1.0);
1712 /// assert_eq!(a.rem_euclid(-b), 3.0);
1713 /// assert_eq!((-a).rem_euclid(-b), 1.0);
1714 /// // limitation due to round-off error
1715 /// assert!((-f16::EPSILON).rem_euclid(3.0) != 0.0);
1716 /// # }
1717 /// ```
1718 #[inline]
1719 #[rustc_allow_incoherent_impl]
1720 #[doc(alias = "modulo", alias = "mod")]
1721 #[unstable(feature = "f16", issue = "116909")]
1722 #[must_use = "method returns a new number and does not mutate the original value"]
1723 pub fn rem_euclid(self, rhs: f16) -> f16 {
1724 let r = self % rhs;
1725 if r < 0.0 { r + rhs.abs() } else { r }
1726 }
1727
1728 /// Raises a number to an integer power.
1729 ///
1730 /// Using this function is generally faster than using `powf`.
1731 /// It might have a different sequence of rounding operations than `powf`,
1732 /// so the results are not guaranteed to agree.
1733 ///
1734 /// # Unspecified precision
1735 ///
1736 /// The precision of this function is non-deterministic. This means it varies by platform,
1737 /// Rust version, and can even differ within the same execution from one invocation to the next.
1738 ///
1739 /// # Examples
1740 ///
1741 /// ```
1742 /// #![feature(f16)]
1743 /// # #[cfg(not(miri))]
1744 /// # #[cfg(target_has_reliable_f16_math)] {
1745 ///
1746 /// let x = 2.0_f16;
1747 /// let abs_difference = (x.powi(2) - (x * x)).abs();
1748 /// assert!(abs_difference <= f16::EPSILON);
1749 ///
1750 /// assert_eq!(f16::powi(f16::NAN, 0), 1.0);
1751 /// # }
1752 /// ```
1753 #[inline]
1754 #[rustc_allow_incoherent_impl]
1755 #[unstable(feature = "f16", issue = "116909")]
1756 #[must_use = "method returns a new number and does not mutate the original value"]
1757 pub fn powi(self, n: i32) -> f16 {
1758 // SAFETY: intrinsic with no preconditions
1759 unsafe { intrinsics::powif16(self, n) }
1760 }
1761
1762 /// Returns the square root of a number.
1763 ///
1764 /// Returns NaN if `self` is a negative number other than `-0.0`.
1765 ///
1766 /// # Precision
1767 ///
1768 /// The result of this operation is guaranteed to be the rounded
1769 /// infinite-precision result. It is specified by IEEE 754 as `squareRoot`
1770 /// and guaranteed not to change.
1771 ///
1772 /// # Examples
1773 ///
1774 /// ```
1775 /// #![feature(f16)]
1776 /// # #[cfg(not(miri))]
1777 /// # #[cfg(target_has_reliable_f16_math)] {
1778 ///
1779 /// let positive = 4.0_f16;
1780 /// let negative = -4.0_f16;
1781 /// let negative_zero = -0.0_f16;
1782 ///
1783 /// assert_eq!(positive.sqrt(), 2.0);
1784 /// assert!(negative.sqrt().is_nan());
1785 /// assert!(negative_zero.sqrt() == negative_zero);
1786 /// # }
1787 /// ```
1788 #[inline]
1789 #[doc(alias = "squareRoot")]
1790 #[rustc_allow_incoherent_impl]
1791 #[unstable(feature = "f16", issue = "116909")]
1792 #[must_use = "method returns a new number and does not mutate the original value"]
1793 pub fn sqrt(self) -> f16 {
1794 // SAFETY: intrinsic with no preconditions
1795 unsafe { intrinsics::sqrtf16(self) }
1796 }
1797
1798 /// Returns the cube root of a number.
1799 ///
1800 /// # Unspecified precision
1801 ///
1802 /// The precision of this function is non-deterministic. This means it varies by platform,
1803 /// Rust version, and can even differ within the same execution from one invocation to the next.
1804 ///
1805 /// This function currently corresponds to the `cbrtf` from libc on Unix
1806 /// and Windows. Note that this might change in the future.
1807 ///
1808 /// # Examples
1809 ///
1810 /// ```
1811 /// #![feature(f16)]
1812 /// # #[cfg(not(miri))]
1813 /// # #[cfg(target_has_reliable_f16_math)] {
1814 ///
1815 /// let x = 8.0f16;
1816 ///
1817 /// // x^(1/3) - 2 == 0
1818 /// let abs_difference = (x.cbrt() - 2.0).abs();
1819 ///
1820 /// assert!(abs_difference <= f16::EPSILON);
1821 /// # }
1822 /// ```
1823 #[inline]
1824 #[rustc_allow_incoherent_impl]
1825 #[unstable(feature = "f16", issue = "116909")]
1826 #[must_use = "method returns a new number and does not mutate the original value"]
1827 pub fn cbrt(self) -> f16 {
1828 libm::cbrtf(self as f32) as f16
1829 }
1830}