core/num/f16.rs
1//! Constants for the `f16` half-precision floating point type.
2//!
3//! *[See also the `f16` primitive type][f16].*
4//!
5//! Mathematically significant numbers are provided in the `consts` sub-module.
6//!
7//! For the constants defined directly in this module
8//! (as distinct from those defined in the `consts` sub-module),
9//! new code should instead use the associated constants
10//! defined directly on the `f16` type.
11
12#![unstable(feature = "f16", issue = "116909")]
13
14use crate::convert::FloatToInt;
15use crate::num::FpCategory;
16#[cfg(not(test))]
17use crate::num::imp::libm;
18use crate::panic::const_assert;
19use crate::{intrinsics, mem};
20
21/// Basic mathematical constants.
22#[unstable(feature = "f16", issue = "116909")]
23#[rustc_diagnostic_item = "f16_consts_mod"]
24pub mod consts {
25 // FIXME: replace with mathematical constants from cmath.
26
27 /// Archimedes' constant (π)
28 #[unstable(feature = "f16", issue = "116909")]
29 pub const PI: f16 = 3.14159265358979323846264338327950288_f16;
30
31 /// The full circle constant (τ)
32 ///
33 /// Equal to 2π.
34 #[unstable(feature = "f16", issue = "116909")]
35 pub const TAU: f16 = 6.28318530717958647692528676655900577_f16;
36
37 /// The golden ratio (φ)
38 #[unstable(feature = "f16", issue = "116909")]
39 pub const GOLDEN_RATIO: f16 = 1.618033988749894848204586834365638118_f16;
40
41 /// The Euler-Mascheroni constant (γ)
42 #[unstable(feature = "f16", issue = "116909")]
43 pub const EULER_GAMMA: f16 = 0.577215664901532860606512090082402431_f16;
44
45 /// π/2
46 #[unstable(feature = "f16", issue = "116909")]
47 pub const FRAC_PI_2: f16 = 1.57079632679489661923132169163975144_f16;
48
49 /// π/3
50 #[unstable(feature = "f16", issue = "116909")]
51 pub const FRAC_PI_3: f16 = 1.04719755119659774615421446109316763_f16;
52
53 /// π/4
54 #[unstable(feature = "f16", issue = "116909")]
55 pub const FRAC_PI_4: f16 = 0.785398163397448309615660845819875721_f16;
56
57 /// π/6
58 #[unstable(feature = "f16", issue = "116909")]
59 pub const FRAC_PI_6: f16 = 0.52359877559829887307710723054658381_f16;
60
61 /// π/8
62 #[unstable(feature = "f16", issue = "116909")]
63 pub const FRAC_PI_8: f16 = 0.39269908169872415480783042290993786_f16;
64
65 /// 1/π
66 #[unstable(feature = "f16", issue = "116909")]
67 pub const FRAC_1_PI: f16 = 0.318309886183790671537767526745028724_f16;
68
69 /// 1/sqrt(π)
70 #[unstable(feature = "f16", issue = "116909")]
71 // Also, #[unstable(feature = "more_float_constants", issue = "146939")]
72 pub const FRAC_1_SQRT_PI: f16 = 0.564189583547756286948079451560772586_f16;
73
74 /// 1/sqrt(2π)
75 #[doc(alias = "FRAC_1_SQRT_TAU")]
76 #[unstable(feature = "f16", issue = "116909")]
77 // Also, #[unstable(feature = "more_float_constants", issue = "146939")]
78 pub const FRAC_1_SQRT_2PI: f16 = 0.398942280401432677939946059934381868_f16;
79
80 /// 2/π
81 #[unstable(feature = "f16", issue = "116909")]
82 pub const FRAC_2_PI: f16 = 0.636619772367581343075535053490057448_f16;
83
84 /// 2/sqrt(π)
85 #[unstable(feature = "f16", issue = "116909")]
86 pub const FRAC_2_SQRT_PI: f16 = 1.12837916709551257389615890312154517_f16;
87
88 /// sqrt(2)
89 #[unstable(feature = "f16", issue = "116909")]
90 pub const SQRT_2: f16 = 1.41421356237309504880168872420969808_f16;
91
92 /// 1/sqrt(2)
93 #[unstable(feature = "f16", issue = "116909")]
94 pub const FRAC_1_SQRT_2: f16 = 0.707106781186547524400844362104849039_f16;
95
96 /// sqrt(3)
97 #[unstable(feature = "f16", issue = "116909")]
98 // Also, #[unstable(feature = "more_float_constants", issue = "146939")]
99 pub const SQRT_3: f16 = 1.732050807568877293527446341505872367_f16;
100
101 /// 1/sqrt(3)
102 #[unstable(feature = "f16", issue = "116909")]
103 // Also, #[unstable(feature = "more_float_constants", issue = "146939")]
104 pub const FRAC_1_SQRT_3: f16 = 0.577350269189625764509148780501957456_f16;
105
106 /// sqrt(5)
107 #[unstable(feature = "more_float_constants", issue = "146939")]
108 // Also, #[unstable(feature = "f16", issue = "116909")]
109 pub const SQRT_5: f16 = 2.23606797749978969640917366873127623_f16;
110
111 /// 1/sqrt(5)
112 #[unstable(feature = "more_float_constants", issue = "146939")]
113 // Also, #[unstable(feature = "f16", issue = "116909")]
114 pub const FRAC_1_SQRT_5: f16 = 0.44721359549995793928183473374625524_f16;
115
116 /// Euler's number (e)
117 #[unstable(feature = "f16", issue = "116909")]
118 pub const E: f16 = 2.71828182845904523536028747135266250_f16;
119
120 /// log<sub>2</sub>(10)
121 #[unstable(feature = "f16", issue = "116909")]
122 pub const LOG2_10: f16 = 3.32192809488736234787031942948939018_f16;
123
124 /// log<sub>2</sub>(e)
125 #[unstable(feature = "f16", issue = "116909")]
126 pub const LOG2_E: f16 = 1.44269504088896340735992468100189214_f16;
127
128 /// log<sub>10</sub>(2)
129 #[unstable(feature = "f16", issue = "116909")]
130 pub const LOG10_2: f16 = 0.301029995663981195213738894724493027_f16;
131
132 /// log<sub>10</sub>(e)
133 #[unstable(feature = "f16", issue = "116909")]
134 pub const LOG10_E: f16 = 0.434294481903251827651128918916605082_f16;
135
136 /// ln(2)
137 #[unstable(feature = "f16", issue = "116909")]
138 pub const LN_2: f16 = 0.693147180559945309417232121458176568_f16;
139
140 /// ln(10)
141 #[unstable(feature = "f16", issue = "116909")]
142 pub const LN_10: f16 = 2.30258509299404568401799145468436421_f16;
143}
144
145#[doc(test(attr(
146 feature(cfg_target_has_reliable_f16_f128),
147 allow(internal_features, unused_features)
148)))]
149impl f16 {
150 /// The radix or base of the internal representation of `f16`.
151 #[unstable(feature = "f16", issue = "116909")]
152 pub const RADIX: u32 = 2;
153
154 /// The size of this float type in bits.
155 // #[unstable(feature = "f16", issue = "116909")]
156 #[unstable(feature = "float_bits_const", issue = "151073")]
157 pub const BITS: u32 = 16;
158
159 /// Number of significant digits in base 2.
160 ///
161 /// Note that the size of the mantissa in the bitwise representation is one
162 /// smaller than this since the leading 1 is not stored explicitly.
163 #[unstable(feature = "f16", issue = "116909")]
164 pub const MANTISSA_DIGITS: u32 = 11;
165
166 /// Approximate number of significant digits in base 10.
167 ///
168 /// This is the maximum <i>x</i> such that any decimal number with <i>x</i>
169 /// significant digits can be converted to `f16` and back without loss.
170 ///
171 /// Equal to floor(log<sub>10</sub> 2<sup>[`MANTISSA_DIGITS`] − 1</sup>).
172 ///
173 /// [`MANTISSA_DIGITS`]: f16::MANTISSA_DIGITS
174 #[unstable(feature = "f16", issue = "116909")]
175 pub const DIGITS: u32 = 3;
176
177 /// [Machine epsilon] value for `f16`.
178 ///
179 /// This is the difference between `1.0` and the next larger representable number.
180 ///
181 /// Equal to 2<sup>1 − [`MANTISSA_DIGITS`]</sup>.
182 ///
183 /// [Machine epsilon]: https://en.wikipedia.org/wiki/Machine_epsilon
184 /// [`MANTISSA_DIGITS`]: f16::MANTISSA_DIGITS
185 #[unstable(feature = "f16", issue = "116909")]
186 #[rustc_diagnostic_item = "f16_epsilon"]
187 pub const EPSILON: f16 = 9.7656e-4_f16;
188
189 /// Smallest finite `f16` value.
190 ///
191 /// Equal to −[`MAX`].
192 ///
193 /// [`MAX`]: f16::MAX
194 #[unstable(feature = "f16", issue = "116909")]
195 pub const MIN: f16 = -6.5504e+4_f16;
196 /// Smallest positive normal `f16` value.
197 ///
198 /// Equal to 2<sup>[`MIN_EXP`] − 1</sup>.
199 ///
200 /// [`MIN_EXP`]: f16::MIN_EXP
201 #[unstable(feature = "f16", issue = "116909")]
202 pub const MIN_POSITIVE: f16 = 6.1035e-5_f16;
203 /// Largest finite `f16` value.
204 ///
205 /// Equal to
206 /// (1 − 2<sup>−[`MANTISSA_DIGITS`]</sup>) 2<sup>[`MAX_EXP`]</sup>.
207 ///
208 /// [`MANTISSA_DIGITS`]: f16::MANTISSA_DIGITS
209 /// [`MAX_EXP`]: f16::MAX_EXP
210 #[unstable(feature = "f16", issue = "116909")]
211 pub const MAX: f16 = 6.5504e+4_f16;
212
213 /// One greater than the minimum possible *normal* power of 2 exponent
214 /// for a significand bounded by 1 ≤ x < 2 (i.e. the IEEE definition).
215 ///
216 /// This corresponds to the exact minimum possible *normal* power of 2 exponent
217 /// for a significand bounded by 0.5 ≤ x < 1 (i.e. the C definition).
218 /// In other words, all normal numbers representable by this type are
219 /// greater than or equal to 0.5 × 2<sup><i>MIN_EXP</i></sup>.
220 #[unstable(feature = "f16", issue = "116909")]
221 pub const MIN_EXP: i32 = -13;
222 /// One greater than the maximum possible power of 2 exponent
223 /// for a significand bounded by 1 ≤ x < 2 (i.e. the IEEE definition).
224 ///
225 /// This corresponds to the exact maximum possible power of 2 exponent
226 /// for a significand bounded by 0.5 ≤ x < 1 (i.e. the C definition).
227 /// In other words, all numbers representable by this type are
228 /// strictly less than 2<sup><i>MAX_EXP</i></sup>.
229 #[unstable(feature = "f16", issue = "116909")]
230 pub const MAX_EXP: i32 = 16;
231
232 /// Minimum <i>x</i> for which 10<sup><i>x</i></sup> is normal.
233 ///
234 /// Equal to ceil(log<sub>10</sub> [`MIN_POSITIVE`]).
235 ///
236 /// [`MIN_POSITIVE`]: f16::MIN_POSITIVE
237 #[unstable(feature = "f16", issue = "116909")]
238 pub const MIN_10_EXP: i32 = -4;
239 /// Maximum <i>x</i> for which 10<sup><i>x</i></sup> is normal.
240 ///
241 /// Equal to floor(log<sub>10</sub> [`MAX`]).
242 ///
243 /// [`MAX`]: f16::MAX
244 #[unstable(feature = "f16", issue = "116909")]
245 pub const MAX_10_EXP: i32 = 4;
246
247 /// Not a Number (NaN).
248 ///
249 /// Note that IEEE 754 doesn't define just a single NaN value; a plethora of bit patterns are
250 /// considered to be NaN. Furthermore, the standard makes a difference between a "signaling" and
251 /// a "quiet" NaN, and allows inspecting its "payload" (the unspecified bits in the bit pattern)
252 /// and its sign. See the [specification of NaN bit patterns](f32#nan-bit-patterns) for more
253 /// info.
254 ///
255 /// This constant is guaranteed to be a quiet NaN (on targets that follow the Rust assumptions
256 /// that the quiet/signaling bit being set to 1 indicates a quiet NaN). Beyond that, nothing is
257 /// guaranteed about the specific bit pattern chosen here: both payload and sign are arbitrary.
258 /// The concrete bit pattern may change across Rust versions and target platforms.
259 #[allow(clippy::eq_op)]
260 #[rustc_diagnostic_item = "f16_nan"]
261 #[unstable(feature = "f16", issue = "116909")]
262 pub const NAN: f16 = 0.0_f16 / 0.0_f16;
263
264 /// Infinity (∞).
265 #[unstable(feature = "f16", issue = "116909")]
266 pub const INFINITY: f16 = 1.0_f16 / 0.0_f16;
267
268 /// Negative infinity (−∞).
269 #[unstable(feature = "f16", issue = "116909")]
270 pub const NEG_INFINITY: f16 = -1.0_f16 / 0.0_f16;
271
272 /// Maximum integer that can be represented exactly in an [`f16`] value,
273 /// with no other integer converting to the same floating point value.
274 ///
275 /// For an integer `x` which satisfies `MIN_EXACT_INTEGER <= x <= MAX_EXACT_INTEGER`,
276 /// there is a "one-to-one" mapping between [`i16`] and [`f16`] values.
277 /// `MAX_EXACT_INTEGER + 1` also converts losslessly to [`f16`] and back to
278 /// [`i16`], but `MAX_EXACT_INTEGER + 2` converts to the same [`f16`] value
279 /// (and back to `MAX_EXACT_INTEGER + 1` as an integer) so there is not a
280 /// "one-to-one" mapping.
281 ///
282 /// [`MAX_EXACT_INTEGER`]: f16::MAX_EXACT_INTEGER
283 /// [`MIN_EXACT_INTEGER`]: f16::MIN_EXACT_INTEGER
284 /// ```
285 /// #![feature(f16)]
286 /// #![feature(float_exact_integer_constants)]
287 /// # // FIXME(#152635): Float rounding on `i586` does not adhere to IEEE 754
288 /// # #[cfg(not(all(target_arch = "x86", not(target_feature = "sse"))))] {
289 /// # #[cfg(target_has_reliable_f16)] {
290 /// let max_exact_int = f16::MAX_EXACT_INTEGER;
291 /// assert_eq!(max_exact_int, max_exact_int as f16 as i16);
292 /// assert_eq!(max_exact_int + 1, (max_exact_int + 1) as f16 as i16);
293 /// assert_ne!(max_exact_int + 2, (max_exact_int + 2) as f16 as i16);
294 ///
295 /// // Beyond `f16::MAX_EXACT_INTEGER`, multiple integers can map to one float value
296 /// assert_eq!((max_exact_int + 1) as f16, (max_exact_int + 2) as f16);
297 /// # }}
298 /// ```
299 // #[unstable(feature = "f16", issue = "116909")]
300 #[unstable(feature = "float_exact_integer_constants", issue = "152466")]
301 pub const MAX_EXACT_INTEGER: i16 = (1 << Self::MANTISSA_DIGITS) - 1;
302
303 /// Minimum integer that can be represented exactly in an [`f16`] value,
304 /// with no other integer converting to the same floating point value.
305 ///
306 /// For an integer `x` which satisfies `MIN_EXACT_INTEGER <= x <= MAX_EXACT_INTEGER`,
307 /// there is a "one-to-one" mapping between [`i16`] and [`f16`] values.
308 /// `MAX_EXACT_INTEGER + 1` also converts losslessly to [`f16`] and back to
309 /// [`i16`], but `MAX_EXACT_INTEGER + 2` converts to the same [`f16`] value
310 /// (and back to `MAX_EXACT_INTEGER + 1` as an integer) so there is not a
311 /// "one-to-one" mapping.
312 ///
313 /// This constant is equivalent to `-MAX_EXACT_INTEGER`.
314 ///
315 /// [`MAX_EXACT_INTEGER`]: f16::MAX_EXACT_INTEGER
316 /// [`MIN_EXACT_INTEGER`]: f16::MIN_EXACT_INTEGER
317 /// ```
318 /// #![feature(f16)]
319 /// #![feature(float_exact_integer_constants)]
320 /// # // FIXME(#152635): Float rounding on `i586` does not adhere to IEEE 754
321 /// # #[cfg(not(all(target_arch = "x86", not(target_feature = "sse"))))] {
322 /// # #[cfg(target_has_reliable_f16)] {
323 /// let min_exact_int = f16::MIN_EXACT_INTEGER;
324 /// assert_eq!(min_exact_int, min_exact_int as f16 as i16);
325 /// assert_eq!(min_exact_int - 1, (min_exact_int - 1) as f16 as i16);
326 /// assert_ne!(min_exact_int - 2, (min_exact_int - 2) as f16 as i16);
327 ///
328 /// // Below `f16::MIN_EXACT_INTEGER`, multiple integers can map to one float value
329 /// assert_eq!((min_exact_int - 1) as f16, (min_exact_int - 2) as f16);
330 /// # }}
331 /// ```
332 // #[unstable(feature = "f16", issue = "116909")]
333 #[unstable(feature = "float_exact_integer_constants", issue = "152466")]
334 pub const MIN_EXACT_INTEGER: i16 = -Self::MAX_EXACT_INTEGER;
335
336 /// Sign bit
337 pub(crate) const SIGN_MASK: u16 = 0x8000;
338
339 /// Exponent mask
340 pub(crate) const EXP_MASK: u16 = 0x7c00;
341
342 /// Mantissa mask
343 pub(crate) const MAN_MASK: u16 = 0x03ff;
344
345 /// Minimum representable positive value (min subnormal)
346 const TINY_BITS: u16 = 0x1;
347
348 /// Minimum representable negative value (min negative subnormal)
349 const NEG_TINY_BITS: u16 = Self::TINY_BITS | Self::SIGN_MASK;
350
351 /// Returns `true` if this value is NaN.
352 ///
353 /// ```
354 /// #![feature(f16)]
355 /// # #[cfg(target_has_reliable_f16)] {
356 ///
357 /// let nan = f16::NAN;
358 /// let f = 7.0_f16;
359 ///
360 /// assert!(nan.is_nan());
361 /// assert!(!f.is_nan());
362 /// # }
363 /// ```
364 #[inline]
365 #[must_use]
366 #[unstable(feature = "f16", issue = "116909")]
367 #[allow(clippy::eq_op)] // > if you intended to check if the operand is NaN, use `.is_nan()` instead :)
368 pub const fn is_nan(self) -> bool {
369 self != self
370 }
371
372 /// Returns `true` if this value is positive infinity or negative infinity, and
373 /// `false` otherwise.
374 ///
375 /// ```
376 /// #![feature(f16)]
377 /// # #[cfg(target_has_reliable_f16)] {
378 ///
379 /// let f = 7.0f16;
380 /// let inf = f16::INFINITY;
381 /// let neg_inf = f16::NEG_INFINITY;
382 /// let nan = f16::NAN;
383 ///
384 /// assert!(!f.is_infinite());
385 /// assert!(!nan.is_infinite());
386 ///
387 /// assert!(inf.is_infinite());
388 /// assert!(neg_inf.is_infinite());
389 /// # }
390 /// ```
391 #[inline]
392 #[must_use]
393 #[unstable(feature = "f16", issue = "116909")]
394 pub const fn is_infinite(self) -> bool {
395 (self == f16::INFINITY) | (self == f16::NEG_INFINITY)
396 }
397
398 /// Returns `true` if this number is neither infinite nor NaN.
399 ///
400 /// ```
401 /// #![feature(f16)]
402 /// # #[cfg(target_has_reliable_f16)] {
403 ///
404 /// let f = 7.0f16;
405 /// let inf: f16 = f16::INFINITY;
406 /// let neg_inf: f16 = f16::NEG_INFINITY;
407 /// let nan: f16 = f16::NAN;
408 ///
409 /// assert!(f.is_finite());
410 ///
411 /// assert!(!nan.is_finite());
412 /// assert!(!inf.is_finite());
413 /// assert!(!neg_inf.is_finite());
414 /// # }
415 /// ```
416 #[inline]
417 #[must_use]
418 #[unstable(feature = "f16", issue = "116909")]
419 #[rustc_const_unstable(feature = "f16", issue = "116909")]
420 pub const fn is_finite(self) -> bool {
421 // There's no need to handle NaN separately: if self is NaN,
422 // the comparison is not true, exactly as desired.
423 self.abs() < Self::INFINITY
424 }
425
426 /// Returns `true` if the number is [subnormal].
427 ///
428 /// ```
429 /// #![feature(f16)]
430 /// # #[cfg(target_has_reliable_f16)] {
431 ///
432 /// let min = f16::MIN_POSITIVE; // 6.1035e-5
433 /// let max = f16::MAX;
434 /// let lower_than_min = 1.0e-7_f16;
435 /// let zero = 0.0_f16;
436 ///
437 /// assert!(!min.is_subnormal());
438 /// assert!(!max.is_subnormal());
439 ///
440 /// assert!(!zero.is_subnormal());
441 /// assert!(!f16::NAN.is_subnormal());
442 /// assert!(!f16::INFINITY.is_subnormal());
443 /// // Values between `0` and `min` are Subnormal.
444 /// assert!(lower_than_min.is_subnormal());
445 /// # }
446 /// ```
447 /// [subnormal]: https://en.wikipedia.org/wiki/Denormal_number
448 #[inline]
449 #[must_use]
450 #[unstable(feature = "f16", issue = "116909")]
451 pub const fn is_subnormal(self) -> bool {
452 matches!(self.classify(), FpCategory::Subnormal)
453 }
454
455 /// Returns `true` if the number is neither zero, infinite, [subnormal], or NaN.
456 ///
457 /// ```
458 /// #![feature(f16)]
459 /// # #[cfg(target_has_reliable_f16)] {
460 ///
461 /// let min = f16::MIN_POSITIVE; // 6.1035e-5
462 /// let max = f16::MAX;
463 /// let lower_than_min = 1.0e-7_f16;
464 /// let zero = 0.0_f16;
465 ///
466 /// assert!(min.is_normal());
467 /// assert!(max.is_normal());
468 ///
469 /// assert!(!zero.is_normal());
470 /// assert!(!f16::NAN.is_normal());
471 /// assert!(!f16::INFINITY.is_normal());
472 /// // Values between `0` and `min` are Subnormal.
473 /// assert!(!lower_than_min.is_normal());
474 /// # }
475 /// ```
476 /// [subnormal]: https://en.wikipedia.org/wiki/Denormal_number
477 #[inline]
478 #[must_use]
479 #[unstable(feature = "f16", issue = "116909")]
480 pub const fn is_normal(self) -> bool {
481 matches!(self.classify(), FpCategory::Normal)
482 }
483
484 /// Returns the floating point category of the number. If only one property
485 /// is going to be tested, it is generally faster to use the specific
486 /// predicate instead.
487 ///
488 /// ```
489 /// #![feature(f16)]
490 /// # #[cfg(target_has_reliable_f16)] {
491 ///
492 /// use std::num::FpCategory;
493 ///
494 /// let num = 12.4_f16;
495 /// let inf = f16::INFINITY;
496 ///
497 /// assert_eq!(num.classify(), FpCategory::Normal);
498 /// assert_eq!(inf.classify(), FpCategory::Infinite);
499 /// # }
500 /// ```
501 #[inline]
502 #[unstable(feature = "f16", issue = "116909")]
503 #[ferrocene::prevalidated]
504 pub const fn classify(self) -> FpCategory {
505 let b = self.to_bits();
506 match (b & Self::MAN_MASK, b & Self::EXP_MASK) {
507 (0, Self::EXP_MASK) => FpCategory::Infinite,
508 (_, Self::EXP_MASK) => FpCategory::Nan,
509 (0, 0) => FpCategory::Zero,
510 (_, 0) => FpCategory::Subnormal,
511 _ => FpCategory::Normal,
512 }
513 }
514
515 /// Returns `true` if `self` has a positive sign, including `+0.0`, NaNs with
516 /// positive sign bit and positive infinity.
517 ///
518 /// Note that IEEE 754 doesn't assign any meaning to the sign bit in case of
519 /// a NaN, and as Rust doesn't guarantee that the bit pattern of NaNs are
520 /// conserved over arithmetic operations, the result of `is_sign_positive` on
521 /// a NaN might produce an unexpected or non-portable result. See the [specification
522 /// of NaN bit patterns](f32#nan-bit-patterns) for more info. Use `self.signum() == 1.0`
523 /// if you need fully portable behavior (will return `false` for all NaNs).
524 ///
525 /// ```
526 /// #![feature(f16)]
527 /// # #[cfg(target_has_reliable_f16)] {
528 ///
529 /// let f = 7.0_f16;
530 /// let g = -7.0_f16;
531 ///
532 /// assert!(f.is_sign_positive());
533 /// assert!(!g.is_sign_positive());
534 /// # }
535 /// ```
536 #[inline]
537 #[must_use]
538 #[unstable(feature = "f16", issue = "116909")]
539 pub const fn is_sign_positive(self) -> bool {
540 !self.is_sign_negative()
541 }
542
543 /// Returns `true` if `self` has a negative sign, including `-0.0`, NaNs with
544 /// negative sign bit and negative infinity.
545 ///
546 /// Note that IEEE 754 doesn't assign any meaning to the sign bit in case of
547 /// a NaN, and as Rust doesn't guarantee that the bit pattern of NaNs are
548 /// conserved over arithmetic operations, the result of `is_sign_negative` on
549 /// a NaN might produce an unexpected or non-portable result. See the [specification
550 /// of NaN bit patterns](f32#nan-bit-patterns) for more info. Use `self.signum() == -1.0`
551 /// if you need fully portable behavior (will return `false` for all NaNs).
552 ///
553 /// ```
554 /// #![feature(f16)]
555 /// # #[cfg(target_has_reliable_f16)] {
556 ///
557 /// let f = 7.0_f16;
558 /// let g = -7.0_f16;
559 ///
560 /// assert!(!f.is_sign_negative());
561 /// assert!(g.is_sign_negative());
562 /// # }
563 /// ```
564 #[inline]
565 #[must_use]
566 #[unstable(feature = "f16", issue = "116909")]
567 pub const fn is_sign_negative(self) -> bool {
568 // IEEE754 says: isSignMinus(x) is true if and only if x has negative sign. isSignMinus
569 // applies to zeros and NaNs as well.
570 // SAFETY: This is just transmuting to get the sign bit, it's fine.
571 (self.to_bits() & (1 << 15)) != 0
572 }
573
574 /// Returns the least number greater than `self`.
575 ///
576 /// Let `TINY` be the smallest representable positive `f16`. Then,
577 /// - if `self.is_nan()`, this returns `self`;
578 /// - if `self` is [`NEG_INFINITY`], this returns [`MIN`];
579 /// - if `self` is `-TINY`, this returns -0.0;
580 /// - if `self` is -0.0 or +0.0, this returns `TINY`;
581 /// - if `self` is [`MAX`] or [`INFINITY`], this returns [`INFINITY`];
582 /// - otherwise the unique least value greater than `self` is returned.
583 ///
584 /// The identity `x.next_up() == -(-x).next_down()` holds for all non-NaN `x`. When `x`
585 /// is finite `x == x.next_up().next_down()` also holds.
586 ///
587 /// ```rust
588 /// #![feature(f16)]
589 /// # #[cfg(target_has_reliable_f16)] {
590 ///
591 /// // f16::EPSILON is the difference between 1.0 and the next number up.
592 /// assert_eq!(1.0f16.next_up(), 1.0 + f16::EPSILON);
593 /// // But not for most numbers.
594 /// assert!(0.1f16.next_up() < 0.1 + f16::EPSILON);
595 /// assert_eq!(4356f16.next_up(), 4360.0);
596 /// # }
597 /// ```
598 ///
599 /// This operation corresponds to IEEE-754 `nextUp`.
600 ///
601 /// [`NEG_INFINITY`]: Self::NEG_INFINITY
602 /// [`INFINITY`]: Self::INFINITY
603 /// [`MIN`]: Self::MIN
604 /// [`MAX`]: Self::MAX
605 #[inline]
606 #[doc(alias = "nextUp")]
607 #[unstable(feature = "f16", issue = "116909")]
608 pub const fn next_up(self) -> Self {
609 // Some targets violate Rust's assumption of IEEE semantics, e.g. by flushing
610 // denormals to zero. This is in general unsound and unsupported, but here
611 // we do our best to still produce the correct result on such targets.
612 let bits = self.to_bits();
613 if self.is_nan() || bits == Self::INFINITY.to_bits() {
614 return self;
615 }
616
617 let abs = bits & !Self::SIGN_MASK;
618 let next_bits = if abs == 0 {
619 Self::TINY_BITS
620 } else if bits == abs {
621 bits + 1
622 } else {
623 bits - 1
624 };
625 Self::from_bits(next_bits)
626 }
627
628 /// Returns the greatest number less than `self`.
629 ///
630 /// Let `TINY` be the smallest representable positive `f16`. Then,
631 /// - if `self.is_nan()`, this returns `self`;
632 /// - if `self` is [`INFINITY`], this returns [`MAX`];
633 /// - if `self` is `TINY`, this returns 0.0;
634 /// - if `self` is -0.0 or +0.0, this returns `-TINY`;
635 /// - if `self` is [`MIN`] or [`NEG_INFINITY`], this returns [`NEG_INFINITY`];
636 /// - otherwise the unique greatest value less than `self` is returned.
637 ///
638 /// The identity `x.next_down() == -(-x).next_up()` holds for all non-NaN `x`. When `x`
639 /// is finite `x == x.next_down().next_up()` also holds.
640 ///
641 /// ```rust
642 /// #![feature(f16)]
643 /// # #[cfg(target_has_reliable_f16)] {
644 ///
645 /// let x = 1.0f16;
646 /// // Clamp value into range [0, 1).
647 /// let clamped = x.clamp(0.0, 1.0f16.next_down());
648 /// assert!(clamped < 1.0);
649 /// assert_eq!(clamped.next_up(), 1.0);
650 /// # }
651 /// ```
652 ///
653 /// This operation corresponds to IEEE-754 `nextDown`.
654 ///
655 /// [`NEG_INFINITY`]: Self::NEG_INFINITY
656 /// [`INFINITY`]: Self::INFINITY
657 /// [`MIN`]: Self::MIN
658 /// [`MAX`]: Self::MAX
659 #[inline]
660 #[doc(alias = "nextDown")]
661 #[unstable(feature = "f16", issue = "116909")]
662 pub const fn next_down(self) -> Self {
663 // Some targets violate Rust's assumption of IEEE semantics, e.g. by flushing
664 // denormals to zero. This is in general unsound and unsupported, but here
665 // we do our best to still produce the correct result on such targets.
666 let bits = self.to_bits();
667 if self.is_nan() || bits == Self::NEG_INFINITY.to_bits() {
668 return self;
669 }
670
671 let abs = bits & !Self::SIGN_MASK;
672 let next_bits = if abs == 0 {
673 Self::NEG_TINY_BITS
674 } else if bits == abs {
675 bits - 1
676 } else {
677 bits + 1
678 };
679 Self::from_bits(next_bits)
680 }
681
682 /// Takes the reciprocal (inverse) of a number, `1/x`.
683 ///
684 /// ```
685 /// #![feature(f16)]
686 /// # #[cfg(target_has_reliable_f16)] {
687 ///
688 /// let x = 2.0_f16;
689 /// let abs_difference = (x.recip() - (1.0 / x)).abs();
690 ///
691 /// assert!(abs_difference <= f16::EPSILON);
692 /// # }
693 /// ```
694 #[inline]
695 #[unstable(feature = "f16", issue = "116909")]
696 #[must_use = "this returns the result of the operation, without modifying the original"]
697 pub const fn recip(self) -> Self {
698 1.0 / self
699 }
700
701 /// Converts radians to degrees.
702 ///
703 /// # Unspecified precision
704 ///
705 /// The precision of this function is non-deterministic. This means it varies by platform,
706 /// Rust version, and can even differ within the same execution from one invocation to the next.
707 ///
708 /// # Examples
709 ///
710 /// ```
711 /// #![feature(f16)]
712 /// # #[cfg(target_has_reliable_f16)] {
713 ///
714 /// let angle = std::f16::consts::PI;
715 ///
716 /// let abs_difference = (angle.to_degrees() - 180.0).abs();
717 /// assert!(abs_difference <= 0.5);
718 /// # }
719 /// ```
720 #[inline]
721 #[unstable(feature = "f16", issue = "116909")]
722 #[must_use = "this returns the result of the operation, without modifying the original"]
723 pub const fn to_degrees(self) -> Self {
724 // Use a literal to avoid double rounding, consts::PI is already rounded,
725 // and dividing would round again.
726 const PIS_IN_180: f16 = 57.2957795130823208767981548141051703_f16;
727 self * PIS_IN_180
728 }
729
730 /// Converts degrees to radians.
731 ///
732 /// # Unspecified precision
733 ///
734 /// The precision of this function is non-deterministic. This means it varies by platform,
735 /// Rust version, and can even differ within the same execution from one invocation to the next.
736 ///
737 /// # Examples
738 ///
739 /// ```
740 /// #![feature(f16)]
741 /// # #[cfg(target_has_reliable_f16)] {
742 ///
743 /// let angle = 180.0f16;
744 ///
745 /// let abs_difference = (angle.to_radians() - std::f16::consts::PI).abs();
746 ///
747 /// assert!(abs_difference <= 0.01);
748 /// # }
749 /// ```
750 #[inline]
751 #[unstable(feature = "f16", issue = "116909")]
752 #[must_use = "this returns the result of the operation, without modifying the original"]
753 pub const fn to_radians(self) -> f16 {
754 // Use a literal to avoid double rounding, consts::PI is already rounded,
755 // and dividing would round again.
756 const RADS_PER_DEG: f16 = 0.017453292519943295769236907684886_f16;
757 self * RADS_PER_DEG
758 }
759
760 /// Returns the maximum of the two numbers, ignoring NaN.
761 ///
762 /// If exactly one of the arguments is NaN (quiet or signaling), then the other argument is
763 /// returned. If both arguments are NaN, the return value is NaN, with the bit pattern picked
764 /// using the usual [rules for arithmetic operations](f32#nan-bit-patterns). If the inputs
765 /// compare equal (such as for the case of `+0.0` and `-0.0`), either input may be returned
766 /// non-deterministically.
767 ///
768 /// The handling of NaNs follows the IEEE 754-2019 semantics for `maximumNumber`, treating all
769 /// NaNs the same way to ensure the operation is associative. The handling of signed zeros
770 /// follows the IEEE 754-2008 semantics for `maxNum`.
771 ///
772 /// ```
773 /// #![feature(f16)]
774 /// # #[cfg(target_has_reliable_f16)] {
775 ///
776 /// let x = 1.0f16;
777 /// let y = 2.0f16;
778 ///
779 /// assert_eq!(x.max(y), y);
780 /// assert_eq!(x.max(f16::NAN), x);
781 /// # }
782 /// ```
783 #[inline]
784 #[unstable(feature = "f16", issue = "116909")]
785 #[rustc_const_unstable(feature = "f16", issue = "116909")]
786 #[must_use = "this returns the result of the comparison, without modifying either input"]
787 pub const fn max(self, other: f16) -> f16 {
788 intrinsics::maximum_number_nsz_f16(self, other)
789 }
790
791 /// Returns the minimum of the two numbers, ignoring NaN.
792 ///
793 /// If exactly one of the arguments is NaN (quiet or signaling), then the other argument is
794 /// returned. If both arguments are NaN, the return value is NaN, with the bit pattern picked
795 /// using the usual [rules for arithmetic operations](f32#nan-bit-patterns). If the inputs
796 /// compare equal (such as for the case of `+0.0` and `-0.0`), either input may be returned
797 /// non-deterministically.
798 ///
799 /// The handling of NaNs follows the IEEE 754-2019 semantics for `minimumNumber`, treating all
800 /// NaNs the same way to ensure the operation is associative. The handling of signed zeros
801 /// follows the IEEE 754-2008 semantics for `minNum`.
802 ///
803 /// ```
804 /// #![feature(f16)]
805 /// # #[cfg(target_has_reliable_f16)] {
806 ///
807 /// let x = 1.0f16;
808 /// let y = 2.0f16;
809 ///
810 /// assert_eq!(x.min(y), x);
811 /// assert_eq!(x.min(f16::NAN), x);
812 /// # }
813 /// ```
814 #[inline]
815 #[unstable(feature = "f16", issue = "116909")]
816 #[rustc_const_unstable(feature = "f16", issue = "116909")]
817 #[must_use = "this returns the result of the comparison, without modifying either input"]
818 pub const fn min(self, other: f16) -> f16 {
819 intrinsics::minimum_number_nsz_f16(self, other)
820 }
821
822 /// Returns the maximum of the two numbers, propagating NaN.
823 ///
824 /// If at least one of the arguments is NaN, the return value is NaN, with the bit pattern
825 /// picked using the usual [rules for arithmetic operations](f32#nan-bit-patterns). Furthermore,
826 /// `-0.0` is considered to be less than `+0.0`, making this function fully deterministic for
827 /// non-NaN inputs.
828 ///
829 /// This is in contrast to [`f16::max`] which only returns NaN when *both* arguments are NaN,
830 /// and which does not reliably order `-0.0` and `+0.0`.
831 ///
832 /// This follows the IEEE 754-2019 semantics for `maximum`.
833 ///
834 /// ```
835 /// #![feature(f16)]
836 /// #![feature(float_minimum_maximum)]
837 /// # #[cfg(target_has_reliable_f16)] {
838 ///
839 /// let x = 1.0f16;
840 /// let y = 2.0f16;
841 ///
842 /// assert_eq!(x.maximum(y), y);
843 /// assert!(x.maximum(f16::NAN).is_nan());
844 /// # }
845 /// ```
846 #[inline]
847 #[unstable(feature = "f16", issue = "116909")]
848 // #[unstable(feature = "float_minimum_maximum", issue = "91079")]
849 #[must_use = "this returns the result of the comparison, without modifying either input"]
850 pub const fn maximum(self, other: f16) -> f16 {
851 intrinsics::maximumf16(self, other)
852 }
853
854 /// Returns the minimum of the two numbers, propagating NaN.
855 ///
856 /// If at least one of the arguments is NaN, the return value is NaN, with the bit pattern
857 /// picked using the usual [rules for arithmetic operations](f32#nan-bit-patterns). Furthermore,
858 /// `-0.0` is considered to be less than `+0.0`, making this function fully deterministic for
859 /// non-NaN inputs.
860 ///
861 /// This is in contrast to [`f16::min`] which only returns NaN when *both* arguments are NaN,
862 /// and which does not reliably order `-0.0` and `+0.0`.
863 ///
864 /// This follows the IEEE 754-2019 semantics for `minimum`.
865 ///
866 /// ```
867 /// #![feature(f16)]
868 /// #![feature(float_minimum_maximum)]
869 /// # #[cfg(target_has_reliable_f16)] {
870 ///
871 /// let x = 1.0f16;
872 /// let y = 2.0f16;
873 ///
874 /// assert_eq!(x.minimum(y), x);
875 /// assert!(x.minimum(f16::NAN).is_nan());
876 /// # }
877 /// ```
878 #[inline]
879 #[unstable(feature = "f16", issue = "116909")]
880 // #[unstable(feature = "float_minimum_maximum", issue = "91079")]
881 #[must_use = "this returns the result of the comparison, without modifying either input"]
882 pub const fn minimum(self, other: f16) -> f16 {
883 intrinsics::minimumf16(self, other)
884 }
885
886 /// Calculates the midpoint (average) between `self` and `rhs`.
887 ///
888 /// This returns NaN when *either* argument is NaN or if a combination of
889 /// +inf and -inf is provided as arguments.
890 ///
891 /// # Examples
892 ///
893 /// ```
894 /// #![feature(f16)]
895 /// # #[cfg(target_has_reliable_f16)] {
896 ///
897 /// assert_eq!(1f16.midpoint(4.0), 2.5);
898 /// assert_eq!((-5.5f16).midpoint(8.0), 1.25);
899 /// # }
900 /// ```
901 #[inline]
902 #[doc(alias = "average")]
903 #[unstable(feature = "f16", issue = "116909")]
904 #[rustc_const_unstable(feature = "f16", issue = "116909")]
905 pub const fn midpoint(self, other: f16) -> f16 {
906 const HI: f16 = f16::MAX / 2.;
907
908 let (a, b) = (self, other);
909 let abs_a = a.abs();
910 let abs_b = b.abs();
911
912 if abs_a <= HI && abs_b <= HI {
913 // Overflow is impossible
914 (a + b) / 2.
915 } else {
916 (a / 2.) + (b / 2.)
917 }
918 }
919
920 /// Rounds toward zero and converts to any primitive integer type,
921 /// assuming that the value is finite and fits in that type.
922 ///
923 /// ```
924 /// #![feature(f16)]
925 /// # #[cfg(target_has_reliable_f16)] {
926 ///
927 /// let value = 4.6_f16;
928 /// let rounded = unsafe { value.to_int_unchecked::<u16>() };
929 /// assert_eq!(rounded, 4);
930 ///
931 /// let value = -128.9_f16;
932 /// let rounded = unsafe { value.to_int_unchecked::<i8>() };
933 /// assert_eq!(rounded, i8::MIN);
934 /// # }
935 /// ```
936 ///
937 /// # Safety
938 ///
939 /// The value must:
940 ///
941 /// * Not be `NaN`
942 /// * Not be infinite
943 /// * Be representable in the return type `Int`, after truncating off its fractional part
944 #[inline]
945 #[unstable(feature = "f16", issue = "116909")]
946 #[must_use = "this returns the result of the operation, without modifying the original"]
947 pub unsafe fn to_int_unchecked<Int>(self) -> Int
948 where
949 Self: FloatToInt<Int>,
950 {
951 // SAFETY: the caller must uphold the safety contract for
952 // `FloatToInt::to_int_unchecked`.
953 unsafe { FloatToInt::<Int>::to_int_unchecked(self) }
954 }
955
956 /// Raw transmutation to `u16`.
957 ///
958 /// This is currently identical to `transmute::<f16, u16>(self)` on all platforms.
959 ///
960 /// See [`from_bits`](#method.from_bits) for some discussion of the
961 /// portability of this operation (there are almost no issues).
962 ///
963 /// Note that this function is distinct from `as` casting, which attempts to
964 /// preserve the *numeric* value, and not the bitwise value.
965 ///
966 /// ```
967 /// #![feature(f16)]
968 /// # #[cfg(target_has_reliable_f16)] {
969 ///
970 /// assert_ne!((1f16).to_bits(), 1f16 as u16); // to_bits() is not casting!
971 /// assert_eq!((12.5f16).to_bits(), 0x4a40);
972 /// # }
973 /// ```
974 #[inline]
975 #[unstable(feature = "f16", issue = "116909")]
976 #[must_use = "this returns the result of the operation, without modifying the original"]
977 #[allow(unnecessary_transmutes)]
978 #[ferrocene::prevalidated]
979 pub const fn to_bits(self) -> u16 {
980 // SAFETY: `u16` is a plain old datatype so we can always transmute to it.
981 unsafe { mem::transmute(self) }
982 }
983
984 /// Raw transmutation from `u16`.
985 ///
986 /// This is currently identical to `transmute::<u16, f16>(v)` on all platforms.
987 /// It turns out this is incredibly portable, for two reasons:
988 ///
989 /// * Floats and Ints have the same endianness on all supported platforms.
990 /// * IEEE 754 very precisely specifies the bit layout of floats.
991 ///
992 /// However there is one caveat: prior to the 2008 version of IEEE 754, how
993 /// to interpret the NaN signaling bit wasn't actually specified. Most platforms
994 /// (notably x86 and ARM) picked the interpretation that was ultimately
995 /// standardized in 2008, but some didn't (notably MIPS). As a result, all
996 /// signaling NaNs on MIPS are quiet NaNs on x86, and vice-versa.
997 ///
998 /// Rather than trying to preserve signaling-ness cross-platform, this
999 /// implementation favors preserving the exact bits. This means that
1000 /// any payloads encoded in NaNs will be preserved even if the result of
1001 /// this method is sent over the network from an x86 machine to a MIPS one.
1002 ///
1003 /// If the results of this method are only manipulated by the same
1004 /// architecture that produced them, then there is no portability concern.
1005 ///
1006 /// If the input isn't NaN, then there is no portability concern.
1007 ///
1008 /// If you don't care about signalingness (very likely), then there is no
1009 /// portability concern.
1010 ///
1011 /// Note that this function is distinct from `as` casting, which attempts to
1012 /// preserve the *numeric* value, and not the bitwise value.
1013 ///
1014 /// ```
1015 /// #![feature(f16)]
1016 /// # #[cfg(target_has_reliable_f16)] {
1017 ///
1018 /// let v = f16::from_bits(0x4a40);
1019 /// assert_eq!(v, 12.5);
1020 /// # }
1021 /// ```
1022 #[inline]
1023 #[must_use]
1024 #[unstable(feature = "f16", issue = "116909")]
1025 #[allow(unnecessary_transmutes)]
1026 #[ferrocene::prevalidated]
1027 pub const fn from_bits(v: u16) -> Self {
1028 // It turns out the safety issues with sNaN were overblown! Hooray!
1029 // SAFETY: `u16` is a plain old datatype so we can always transmute from it.
1030 unsafe { mem::transmute(v) }
1031 }
1032
1033 /// Returns the memory representation of this floating point number as a byte array in
1034 /// big-endian (network) byte order.
1035 ///
1036 /// See [`from_bits`](Self::from_bits) for some discussion of the
1037 /// portability of this operation (there are almost no issues).
1038 ///
1039 /// # Examples
1040 ///
1041 /// ```
1042 /// #![feature(f16)]
1043 /// # #[cfg(target_has_reliable_f16)] {
1044 ///
1045 /// let bytes = 12.5f16.to_be_bytes();
1046 /// assert_eq!(bytes, [0x4a, 0x40]);
1047 /// # }
1048 /// ```
1049 #[inline]
1050 #[unstable(feature = "f16", issue = "116909")]
1051 #[must_use = "this returns the result of the operation, without modifying the original"]
1052 pub const fn to_be_bytes(self) -> [u8; 2] {
1053 self.to_bits().to_be_bytes()
1054 }
1055
1056 /// Returns the memory representation of this floating point number as a byte array in
1057 /// little-endian byte order.
1058 ///
1059 /// See [`from_bits`](Self::from_bits) for some discussion of the
1060 /// portability of this operation (there are almost no issues).
1061 ///
1062 /// # Examples
1063 ///
1064 /// ```
1065 /// #![feature(f16)]
1066 /// # #[cfg(target_has_reliable_f16)] {
1067 ///
1068 /// let bytes = 12.5f16.to_le_bytes();
1069 /// assert_eq!(bytes, [0x40, 0x4a]);
1070 /// # }
1071 /// ```
1072 #[inline]
1073 #[unstable(feature = "f16", issue = "116909")]
1074 #[must_use = "this returns the result of the operation, without modifying the original"]
1075 pub const fn to_le_bytes(self) -> [u8; 2] {
1076 self.to_bits().to_le_bytes()
1077 }
1078
1079 /// Returns the memory representation of this floating point number as a byte array in
1080 /// native byte order.
1081 ///
1082 /// As the target platform's native endianness is used, portable code
1083 /// should use [`to_be_bytes`] or [`to_le_bytes`], as appropriate, instead.
1084 ///
1085 /// [`to_be_bytes`]: f16::to_be_bytes
1086 /// [`to_le_bytes`]: f16::to_le_bytes
1087 ///
1088 /// See [`from_bits`](Self::from_bits) for some discussion of the
1089 /// portability of this operation (there are almost no issues).
1090 ///
1091 /// # Examples
1092 ///
1093 /// ```
1094 /// #![feature(f16)]
1095 /// # #[cfg(target_has_reliable_f16)] {
1096 ///
1097 /// let bytes = 12.5f16.to_ne_bytes();
1098 /// assert_eq!(
1099 /// bytes,
1100 /// if cfg!(target_endian = "big") {
1101 /// [0x4a, 0x40]
1102 /// } else {
1103 /// [0x40, 0x4a]
1104 /// }
1105 /// );
1106 /// # }
1107 /// ```
1108 #[inline]
1109 #[unstable(feature = "f16", issue = "116909")]
1110 #[must_use = "this returns the result of the operation, without modifying the original"]
1111 pub const fn to_ne_bytes(self) -> [u8; 2] {
1112 self.to_bits().to_ne_bytes()
1113 }
1114
1115 /// Creates a floating point value from its representation as a byte array in big endian.
1116 ///
1117 /// See [`from_bits`](Self::from_bits) for some discussion of the
1118 /// portability of this operation (there are almost no issues).
1119 ///
1120 /// # Examples
1121 ///
1122 /// ```
1123 /// #![feature(f16)]
1124 /// # #[cfg(target_has_reliable_f16)] {
1125 ///
1126 /// let value = f16::from_be_bytes([0x4a, 0x40]);
1127 /// assert_eq!(value, 12.5);
1128 /// # }
1129 /// ```
1130 #[inline]
1131 #[must_use]
1132 #[unstable(feature = "f16", issue = "116909")]
1133 pub const fn from_be_bytes(bytes: [u8; 2]) -> Self {
1134 Self::from_bits(u16::from_be_bytes(bytes))
1135 }
1136
1137 /// Creates a floating point value from its representation as a byte array in little endian.
1138 ///
1139 /// See [`from_bits`](Self::from_bits) for some discussion of the
1140 /// portability of this operation (there are almost no issues).
1141 ///
1142 /// # Examples
1143 ///
1144 /// ```
1145 /// #![feature(f16)]
1146 /// # #[cfg(target_has_reliable_f16)] {
1147 ///
1148 /// let value = f16::from_le_bytes([0x40, 0x4a]);
1149 /// assert_eq!(value, 12.5);
1150 /// # }
1151 /// ```
1152 #[inline]
1153 #[must_use]
1154 #[unstable(feature = "f16", issue = "116909")]
1155 pub const fn from_le_bytes(bytes: [u8; 2]) -> Self {
1156 Self::from_bits(u16::from_le_bytes(bytes))
1157 }
1158
1159 /// Creates a floating point value from its representation as a byte array in native endian.
1160 ///
1161 /// As the target platform's native endianness is used, portable code
1162 /// likely wants to use [`from_be_bytes`] or [`from_le_bytes`], as
1163 /// appropriate instead.
1164 ///
1165 /// [`from_be_bytes`]: f16::from_be_bytes
1166 /// [`from_le_bytes`]: f16::from_le_bytes
1167 ///
1168 /// See [`from_bits`](Self::from_bits) for some discussion of the
1169 /// portability of this operation (there are almost no issues).
1170 ///
1171 /// # Examples
1172 ///
1173 /// ```
1174 /// #![feature(f16)]
1175 /// # #[cfg(target_has_reliable_f16)] {
1176 ///
1177 /// let value = f16::from_ne_bytes(if cfg!(target_endian = "big") {
1178 /// [0x4a, 0x40]
1179 /// } else {
1180 /// [0x40, 0x4a]
1181 /// });
1182 /// assert_eq!(value, 12.5);
1183 /// # }
1184 /// ```
1185 #[inline]
1186 #[must_use]
1187 #[unstable(feature = "f16", issue = "116909")]
1188 pub const fn from_ne_bytes(bytes: [u8; 2]) -> Self {
1189 Self::from_bits(u16::from_ne_bytes(bytes))
1190 }
1191
1192 /// Returns the ordering between `self` and `other`.
1193 ///
1194 /// Unlike the standard partial comparison between floating point numbers,
1195 /// this comparison always produces an ordering in accordance to
1196 /// the `totalOrder` predicate as defined in the IEEE 754 (2008 revision)
1197 /// floating point standard. The values are ordered in the following sequence:
1198 ///
1199 /// - negative quiet NaN
1200 /// - negative signaling NaN
1201 /// - negative infinity
1202 /// - negative numbers
1203 /// - negative subnormal numbers
1204 /// - negative zero
1205 /// - positive zero
1206 /// - positive subnormal numbers
1207 /// - positive numbers
1208 /// - positive infinity
1209 /// - positive signaling NaN
1210 /// - positive quiet NaN.
1211 ///
1212 /// The ordering established by this function does not always agree with the
1213 /// [`PartialOrd`] and [`PartialEq`] implementations of `f16`. For example,
1214 /// they consider negative and positive zero equal, while `total_cmp`
1215 /// doesn't.
1216 ///
1217 /// The interpretation of the signaling NaN bit follows the definition in
1218 /// the IEEE 754 standard, which may not match the interpretation by some of
1219 /// the older, non-conformant (e.g. MIPS) hardware implementations.
1220 ///
1221 /// # Example
1222 ///
1223 /// ```
1224 /// #![feature(f16)]
1225 /// # #[cfg(target_has_reliable_f16)] {
1226 ///
1227 /// struct GoodBoy {
1228 /// name: &'static str,
1229 /// weight: f16,
1230 /// }
1231 ///
1232 /// let mut bois = vec![
1233 /// GoodBoy { name: "Pucci", weight: 0.1 },
1234 /// GoodBoy { name: "Woofer", weight: 99.0 },
1235 /// GoodBoy { name: "Yapper", weight: 10.0 },
1236 /// GoodBoy { name: "Chonk", weight: f16::INFINITY },
1237 /// GoodBoy { name: "Abs. Unit", weight: f16::NAN },
1238 /// GoodBoy { name: "Floaty", weight: -5.0 },
1239 /// ];
1240 ///
1241 /// bois.sort_by(|a, b| a.weight.total_cmp(&b.weight));
1242 ///
1243 /// // `f16::NAN` could be positive or negative, which will affect the sort order.
1244 /// if f16::NAN.is_sign_negative() {
1245 /// bois.into_iter().map(|b| b.weight)
1246 /// .zip([f16::NAN, -5.0, 0.1, 10.0, 99.0, f16::INFINITY].iter())
1247 /// .for_each(|(a, b)| assert_eq!(a.to_bits(), b.to_bits()))
1248 /// } else {
1249 /// bois.into_iter().map(|b| b.weight)
1250 /// .zip([-5.0, 0.1, 10.0, 99.0, f16::INFINITY, f16::NAN].iter())
1251 /// .for_each(|(a, b)| assert_eq!(a.to_bits(), b.to_bits()))
1252 /// }
1253 /// # }
1254 /// ```
1255 #[inline]
1256 #[must_use]
1257 #[unstable(feature = "f16", issue = "116909")]
1258 #[rustc_const_unstable(feature = "const_cmp", issue = "143800")]
1259 pub const fn total_cmp(&self, other: &Self) -> crate::cmp::Ordering {
1260 let mut left = self.to_bits() as i16;
1261 let mut right = other.to_bits() as i16;
1262
1263 // In case of negatives, flip all the bits except the sign
1264 // to achieve a similar layout as two's complement integers
1265 //
1266 // Why does this work? IEEE 754 floats consist of three fields:
1267 // Sign bit, exponent and mantissa. The set of exponent and mantissa
1268 // fields as a whole have the property that their bitwise order is
1269 // equal to the numeric magnitude where the magnitude is defined.
1270 // The magnitude is not normally defined on NaN values, but
1271 // IEEE 754 totalOrder defines the NaN values also to follow the
1272 // bitwise order. This leads to order explained in the doc comment.
1273 // However, the representation of magnitude is the same for negative
1274 // and positive numbers – only the sign bit is different.
1275 // To easily compare the floats as signed integers, we need to
1276 // flip the exponent and mantissa bits in case of negative numbers.
1277 // We effectively convert the numbers to "two's complement" form.
1278 //
1279 // To do the flipping, we construct a mask and XOR against it.
1280 // We branchlessly calculate an "all-ones except for the sign bit"
1281 // mask from negative-signed values: right shifting sign-extends
1282 // the integer, so we "fill" the mask with sign bits, and then
1283 // convert to unsigned to push one more zero bit.
1284 // On positive values, the mask is all zeros, so it's a no-op.
1285 left ^= (((left >> 15) as u16) >> 1) as i16;
1286 right ^= (((right >> 15) as u16) >> 1) as i16;
1287
1288 left.cmp(&right)
1289 }
1290
1291 /// Restrict a value to a certain interval unless it is NaN.
1292 ///
1293 /// Returns `max` if `self` is greater than `max`, and `min` if `self` is
1294 /// less than `min`. Otherwise this returns `self`.
1295 ///
1296 /// Note that this function returns NaN if the initial value was NaN as
1297 /// well. If the result is zero and among the three inputs `self`, `min`, and `max` there are
1298 /// zeros with different sign, either `0.0` or `-0.0` is returned non-deterministically.
1299 ///
1300 /// # Panics
1301 ///
1302 /// Panics if `min > max`, `min` is NaN, or `max` is NaN.
1303 ///
1304 /// # Examples
1305 ///
1306 /// ```
1307 /// #![feature(f16)]
1308 /// # #[cfg(target_has_reliable_f16)] {
1309 ///
1310 /// assert!((-3.0f16).clamp(-2.0, 1.0) == -2.0);
1311 /// assert!((0.0f16).clamp(-2.0, 1.0) == 0.0);
1312 /// assert!((2.0f16).clamp(-2.0, 1.0) == 1.0);
1313 /// assert!((f16::NAN).clamp(-2.0, 1.0).is_nan());
1314 ///
1315 /// // These always returns zero, but the sign (which is ignored by `==`) is non-deterministic.
1316 /// assert!((0.0f16).clamp(-0.0, -0.0) == 0.0);
1317 /// assert!((1.0f16).clamp(-0.0, 0.0) == 0.0);
1318 /// // This is definitely a negative zero.
1319 /// assert!((-1.0f16).clamp(-0.0, 1.0).is_sign_negative());
1320 /// # }
1321 /// ```
1322 #[inline]
1323 #[unstable(feature = "f16", issue = "116909")]
1324 #[must_use = "method returns a new number and does not mutate the original value"]
1325 pub const fn clamp(mut self, min: f16, max: f16) -> f16 {
1326 const_assert!(
1327 min <= max,
1328 "min > max, or either was NaN",
1329 "min > max, or either was NaN. min = {min:?}, max = {max:?}",
1330 min: f16,
1331 max: f16,
1332 );
1333
1334 if self < min {
1335 self = min;
1336 }
1337 if self > max {
1338 self = max;
1339 }
1340 self
1341 }
1342
1343 /// Clamps this number to a symmetric range centered around zero.
1344 ///
1345 /// The method clamps the number's magnitude (absolute value) to be at most `limit`.
1346 ///
1347 /// This is functionally equivalent to `self.clamp(-limit, limit)`, but is more
1348 /// explicit about the intent.
1349 ///
1350 /// # Panics
1351 ///
1352 /// Panics if `limit` is negative or NaN, as this indicates a logic error.
1353 ///
1354 /// # Examples
1355 ///
1356 /// ```
1357 /// #![feature(f16)]
1358 /// #![feature(clamp_magnitude)]
1359 /// # #[cfg(target_has_reliable_f16)] {
1360 /// assert_eq!(5.0f16.clamp_magnitude(3.0), 3.0);
1361 /// assert_eq!((-5.0f16).clamp_magnitude(3.0), -3.0);
1362 /// assert_eq!(2.0f16.clamp_magnitude(3.0), 2.0);
1363 /// assert_eq!((-2.0f16).clamp_magnitude(3.0), -2.0);
1364 /// # }
1365 /// ```
1366 #[inline]
1367 #[unstable(feature = "clamp_magnitude", issue = "148519")]
1368 #[must_use = "this returns the clamped value and does not modify the original"]
1369 pub fn clamp_magnitude(self, limit: f16) -> f16 {
1370 assert!(limit >= 0.0, "limit must be non-negative");
1371 let limit = limit.abs(); // Canonicalises -0.0 to 0.0
1372 self.clamp(-limit, limit)
1373 }
1374
1375 /// Computes the absolute value of `self`.
1376 ///
1377 /// This function always returns the precise result.
1378 ///
1379 /// # Examples
1380 ///
1381 /// ```
1382 /// #![feature(f16)]
1383 /// # #[cfg(target_has_reliable_f16_math)] {
1384 ///
1385 /// let x = 3.5_f16;
1386 /// let y = -3.5_f16;
1387 ///
1388 /// assert_eq!(x.abs(), x);
1389 /// assert_eq!(y.abs(), -y);
1390 ///
1391 /// assert!(f16::NAN.abs().is_nan());
1392 /// # }
1393 /// ```
1394 #[inline]
1395 #[unstable(feature = "f16", issue = "116909")]
1396 #[rustc_const_unstable(feature = "f16", issue = "116909")]
1397 #[must_use = "method returns a new number and does not mutate the original value"]
1398 #[ferrocene::prevalidated]
1399 pub const fn abs(self) -> Self {
1400 intrinsics::fabsf16(self)
1401 }
1402
1403 /// Returns a number that represents the sign of `self`.
1404 ///
1405 /// - `1.0` if the number is positive, `+0.0` or `INFINITY`
1406 /// - `-1.0` if the number is negative, `-0.0` or `NEG_INFINITY`
1407 /// - NaN if the number is NaN
1408 ///
1409 /// # Examples
1410 ///
1411 /// ```
1412 /// #![feature(f16)]
1413 /// # #[cfg(target_has_reliable_f16)] {
1414 ///
1415 /// let f = 3.5_f16;
1416 ///
1417 /// assert_eq!(f.signum(), 1.0);
1418 /// assert_eq!(f16::NEG_INFINITY.signum(), -1.0);
1419 ///
1420 /// assert!(f16::NAN.signum().is_nan());
1421 /// # }
1422 /// ```
1423 #[inline]
1424 #[unstable(feature = "f16", issue = "116909")]
1425 #[rustc_const_unstable(feature = "f16", issue = "116909")]
1426 #[must_use = "method returns a new number and does not mutate the original value"]
1427 pub const fn signum(self) -> f16 {
1428 if self.is_nan() { Self::NAN } else { 1.0_f16.copysign(self) }
1429 }
1430
1431 /// Returns a number composed of the magnitude of `self` and the sign of
1432 /// `sign`.
1433 ///
1434 /// Equal to `self` if the sign of `self` and `sign` are the same, otherwise equal to `-self`.
1435 /// If `self` is a NaN, then a NaN with the same payload as `self` and the sign bit of `sign` is
1436 /// returned.
1437 ///
1438 /// If `sign` is a NaN, then this operation will still carry over its sign into the result. Note
1439 /// that IEEE 754 doesn't assign any meaning to the sign bit in case of a NaN, and as Rust
1440 /// doesn't guarantee that the bit pattern of NaNs are conserved over arithmetic operations, the
1441 /// result of `copysign` with `sign` being a NaN might produce an unexpected or non-portable
1442 /// result. See the [specification of NaN bit patterns](primitive@f32#nan-bit-patterns) for more
1443 /// info.
1444 ///
1445 /// # Examples
1446 ///
1447 /// ```
1448 /// #![feature(f16)]
1449 /// # #[cfg(target_has_reliable_f16_math)] {
1450 ///
1451 /// let f = 3.5_f16;
1452 ///
1453 /// assert_eq!(f.copysign(0.42), 3.5_f16);
1454 /// assert_eq!(f.copysign(-0.42), -3.5_f16);
1455 /// assert_eq!((-f).copysign(0.42), 3.5_f16);
1456 /// assert_eq!((-f).copysign(-0.42), -3.5_f16);
1457 ///
1458 /// assert!(f16::NAN.copysign(1.0).is_nan());
1459 /// # }
1460 /// ```
1461 #[inline]
1462 #[unstable(feature = "f16", issue = "116909")]
1463 #[rustc_const_unstable(feature = "f16", issue = "116909")]
1464 #[must_use = "method returns a new number and does not mutate the original value"]
1465 pub const fn copysign(self, sign: f16) -> f16 {
1466 intrinsics::copysignf16(self, sign)
1467 }
1468
1469 /// Float addition that allows optimizations based on algebraic rules.
1470 ///
1471 /// See [algebraic operators](primitive@f32#algebraic-operators) for more info.
1472 #[must_use = "method returns a new number and does not mutate the original value"]
1473 #[unstable(feature = "float_algebraic", issue = "136469")]
1474 #[rustc_const_unstable(feature = "float_algebraic", issue = "136469")]
1475 #[inline]
1476 pub const fn algebraic_add(self, rhs: f16) -> f16 {
1477 intrinsics::fadd_algebraic(self, rhs)
1478 }
1479
1480 /// Float subtraction that allows optimizations based on algebraic rules.
1481 ///
1482 /// See [algebraic operators](primitive@f32#algebraic-operators) for more info.
1483 #[must_use = "method returns a new number and does not mutate the original value"]
1484 #[unstable(feature = "float_algebraic", issue = "136469")]
1485 #[rustc_const_unstable(feature = "float_algebraic", issue = "136469")]
1486 #[inline]
1487 pub const fn algebraic_sub(self, rhs: f16) -> f16 {
1488 intrinsics::fsub_algebraic(self, rhs)
1489 }
1490
1491 /// Float multiplication that allows optimizations based on algebraic rules.
1492 ///
1493 /// See [algebraic operators](primitive@f32#algebraic-operators) for more info.
1494 #[must_use = "method returns a new number and does not mutate the original value"]
1495 #[unstable(feature = "float_algebraic", issue = "136469")]
1496 #[rustc_const_unstable(feature = "float_algebraic", issue = "136469")]
1497 #[inline]
1498 pub const fn algebraic_mul(self, rhs: f16) -> f16 {
1499 intrinsics::fmul_algebraic(self, rhs)
1500 }
1501
1502 /// Float division that allows optimizations based on algebraic rules.
1503 ///
1504 /// See [algebraic operators](primitive@f32#algebraic-operators) for more info.
1505 #[must_use = "method returns a new number and does not mutate the original value"]
1506 #[unstable(feature = "float_algebraic", issue = "136469")]
1507 #[rustc_const_unstable(feature = "float_algebraic", issue = "136469")]
1508 #[inline]
1509 pub const fn algebraic_div(self, rhs: f16) -> f16 {
1510 intrinsics::fdiv_algebraic(self, rhs)
1511 }
1512
1513 /// Float remainder that allows optimizations based on algebraic rules.
1514 ///
1515 /// See [algebraic operators](primitive@f32#algebraic-operators) for more info.
1516 #[must_use = "method returns a new number and does not mutate the original value"]
1517 #[unstable(feature = "float_algebraic", issue = "136469")]
1518 #[rustc_const_unstable(feature = "float_algebraic", issue = "136469")]
1519 #[inline]
1520 pub const fn algebraic_rem(self, rhs: f16) -> f16 {
1521 intrinsics::frem_algebraic(self, rhs)
1522 }
1523}
1524
1525// Functions in this module fall into `core_float_math`
1526// #[unstable(feature = "core_float_math", issue = "137578")]
1527#[cfg(not(test))]
1528#[doc(test(attr(
1529 feature(cfg_target_has_reliable_f16_f128),
1530 expect(internal_features),
1531 allow(unused_features)
1532)))]
1533impl f16 {
1534 /// Returns the largest integer less than or equal to `self`.
1535 ///
1536 /// This function always returns the precise result.
1537 ///
1538 /// # Examples
1539 ///
1540 /// ```
1541 /// #![feature(f16)]
1542 /// # #[cfg(not(miri))]
1543 /// # #[cfg(target_has_reliable_f16)] {
1544 ///
1545 /// let f = 3.7_f16;
1546 /// let g = 3.0_f16;
1547 /// let h = -3.7_f16;
1548 ///
1549 /// assert_eq!(f.floor(), 3.0);
1550 /// assert_eq!(g.floor(), 3.0);
1551 /// assert_eq!(h.floor(), -4.0);
1552 /// # }
1553 /// ```
1554 #[inline]
1555 #[rustc_allow_incoherent_impl]
1556 #[unstable(feature = "f16", issue = "116909")]
1557 #[rustc_const_unstable(feature = "f16", issue = "116909")]
1558 #[must_use = "method returns a new number and does not mutate the original value"]
1559 pub const fn floor(self) -> f16 {
1560 intrinsics::floorf16(self)
1561 }
1562
1563 /// Returns the smallest integer greater than or equal to `self`.
1564 ///
1565 /// This function always returns the precise result.
1566 ///
1567 /// # Examples
1568 ///
1569 /// ```
1570 /// #![feature(f16)]
1571 /// # #[cfg(not(miri))]
1572 /// # #[cfg(target_has_reliable_f16)] {
1573 ///
1574 /// let f = 3.01_f16;
1575 /// let g = 4.0_f16;
1576 ///
1577 /// assert_eq!(f.ceil(), 4.0);
1578 /// assert_eq!(g.ceil(), 4.0);
1579 /// # }
1580 /// ```
1581 #[inline]
1582 #[doc(alias = "ceiling")]
1583 #[rustc_allow_incoherent_impl]
1584 #[unstable(feature = "f16", issue = "116909")]
1585 #[rustc_const_unstable(feature = "f16", issue = "116909")]
1586 #[must_use = "method returns a new number and does not mutate the original value"]
1587 pub const fn ceil(self) -> f16 {
1588 intrinsics::ceilf16(self)
1589 }
1590
1591 /// Returns the nearest integer to `self`. If a value is half-way between two
1592 /// integers, round away from `0.0`.
1593 ///
1594 /// This function always returns the precise result.
1595 ///
1596 /// # Examples
1597 ///
1598 /// ```
1599 /// #![feature(f16)]
1600 /// # #[cfg(not(miri))]
1601 /// # #[cfg(target_has_reliable_f16)] {
1602 ///
1603 /// let f = 3.3_f16;
1604 /// let g = -3.3_f16;
1605 /// let h = -3.7_f16;
1606 /// let i = 3.5_f16;
1607 /// let j = 4.5_f16;
1608 ///
1609 /// assert_eq!(f.round(), 3.0);
1610 /// assert_eq!(g.round(), -3.0);
1611 /// assert_eq!(h.round(), -4.0);
1612 /// assert_eq!(i.round(), 4.0);
1613 /// assert_eq!(j.round(), 5.0);
1614 /// # }
1615 /// ```
1616 #[inline]
1617 #[rustc_allow_incoherent_impl]
1618 #[unstable(feature = "f16", issue = "116909")]
1619 #[rustc_const_unstable(feature = "f16", issue = "116909")]
1620 #[must_use = "method returns a new number and does not mutate the original value"]
1621 pub const fn round(self) -> f16 {
1622 intrinsics::roundf16(self)
1623 }
1624
1625 /// Returns the nearest integer to a number. Rounds half-way cases to the number
1626 /// with an even least significant digit.
1627 ///
1628 /// This function always returns the precise result.
1629 ///
1630 /// # Examples
1631 ///
1632 /// ```
1633 /// #![feature(f16)]
1634 /// # #[cfg(not(miri))]
1635 /// # #[cfg(target_has_reliable_f16)] {
1636 ///
1637 /// let f = 3.3_f16;
1638 /// let g = -3.3_f16;
1639 /// let h = 3.5_f16;
1640 /// let i = 4.5_f16;
1641 ///
1642 /// assert_eq!(f.round_ties_even(), 3.0);
1643 /// assert_eq!(g.round_ties_even(), -3.0);
1644 /// assert_eq!(h.round_ties_even(), 4.0);
1645 /// assert_eq!(i.round_ties_even(), 4.0);
1646 /// # }
1647 /// ```
1648 #[inline]
1649 #[rustc_allow_incoherent_impl]
1650 #[unstable(feature = "f16", issue = "116909")]
1651 #[rustc_const_unstable(feature = "f16", issue = "116909")]
1652 #[must_use = "method returns a new number and does not mutate the original value"]
1653 pub const fn round_ties_even(self) -> f16 {
1654 intrinsics::round_ties_even_f16(self)
1655 }
1656
1657 /// Returns the integer part of `self`.
1658 /// This means that non-integer numbers are always truncated towards zero.
1659 ///
1660 /// This function always returns the precise result.
1661 ///
1662 /// # Examples
1663 ///
1664 /// ```
1665 /// #![feature(f16)]
1666 /// # #[cfg(not(miri))]
1667 /// # #[cfg(target_has_reliable_f16)] {
1668 ///
1669 /// let f = 3.7_f16;
1670 /// let g = 3.0_f16;
1671 /// let h = -3.7_f16;
1672 ///
1673 /// assert_eq!(f.trunc(), 3.0);
1674 /// assert_eq!(g.trunc(), 3.0);
1675 /// assert_eq!(h.trunc(), -3.0);
1676 /// # }
1677 /// ```
1678 #[inline]
1679 #[doc(alias = "truncate")]
1680 #[rustc_allow_incoherent_impl]
1681 #[unstable(feature = "f16", issue = "116909")]
1682 #[rustc_const_unstable(feature = "f16", issue = "116909")]
1683 #[must_use = "method returns a new number and does not mutate the original value"]
1684 pub const fn trunc(self) -> f16 {
1685 intrinsics::truncf16(self)
1686 }
1687
1688 /// Returns the fractional part of `self`.
1689 ///
1690 /// This function always returns the precise result.
1691 ///
1692 /// # Examples
1693 ///
1694 /// ```
1695 /// #![feature(f16)]
1696 /// # #[cfg(not(miri))]
1697 /// # #[cfg(target_has_reliable_f16)] {
1698 ///
1699 /// let x = 3.6_f16;
1700 /// let y = -3.6_f16;
1701 /// let abs_difference_x = (x.fract() - 0.6).abs();
1702 /// let abs_difference_y = (y.fract() - (-0.6)).abs();
1703 ///
1704 /// assert!(abs_difference_x <= f16::EPSILON);
1705 /// assert!(abs_difference_y <= f16::EPSILON);
1706 /// # }
1707 /// ```
1708 #[inline]
1709 #[rustc_allow_incoherent_impl]
1710 #[unstable(feature = "f16", issue = "116909")]
1711 #[rustc_const_unstable(feature = "f16", issue = "116909")]
1712 #[must_use = "method returns a new number and does not mutate the original value"]
1713 pub const fn fract(self) -> f16 {
1714 self - self.trunc()
1715 }
1716
1717 /// Fused multiply-add. Computes `(self * a) + b` with only one rounding
1718 /// error, yielding a more accurate result than an unfused multiply-add.
1719 ///
1720 /// Using `mul_add` *may* be more performant than an unfused multiply-add if
1721 /// the target architecture has a dedicated `fma` CPU instruction. However,
1722 /// this is not always true, and will be heavily dependant on designing
1723 /// algorithms with specific target hardware in mind.
1724 ///
1725 /// # Precision
1726 ///
1727 /// The result of this operation is guaranteed to be the rounded
1728 /// infinite-precision result. It is specified by IEEE 754 as
1729 /// `fusedMultiplyAdd` and guaranteed not to change.
1730 ///
1731 /// # Examples
1732 ///
1733 /// ```
1734 /// #![feature(f16)]
1735 /// # #[cfg(not(miri))]
1736 /// # #[cfg(target_has_reliable_f16)] {
1737 ///
1738 /// let m = 10.0_f16;
1739 /// let x = 4.0_f16;
1740 /// let b = 60.0_f16;
1741 ///
1742 /// assert_eq!(m.mul_add(x, b), 100.0);
1743 /// assert_eq!(m * x + b, 100.0);
1744 ///
1745 /// let one_plus_eps = 1.0_f16 + f16::EPSILON;
1746 /// let one_minus_eps = 1.0_f16 - f16::EPSILON;
1747 /// let minus_one = -1.0_f16;
1748 ///
1749 /// // The exact result (1 + eps) * (1 - eps) = 1 - eps * eps.
1750 /// assert_eq!(one_plus_eps.mul_add(one_minus_eps, minus_one), -f16::EPSILON * f16::EPSILON);
1751 /// // Different rounding with the non-fused multiply and add.
1752 /// assert_eq!(one_plus_eps * one_minus_eps + minus_one, 0.0);
1753 /// # }
1754 /// ```
1755 #[inline]
1756 #[rustc_allow_incoherent_impl]
1757 #[unstable(feature = "f16", issue = "116909")]
1758 #[doc(alias = "fmaf16", alias = "fusedMultiplyAdd")]
1759 #[must_use = "method returns a new number and does not mutate the original value"]
1760 pub const fn mul_add(self, a: f16, b: f16) -> f16 {
1761 intrinsics::fmaf16(self, a, b)
1762 }
1763
1764 /// Calculates Euclidean division, the matching method for `rem_euclid`.
1765 ///
1766 /// This computes the integer `n` such that
1767 /// `self = n * rhs + self.rem_euclid(rhs)`.
1768 /// In other words, the result is `self / rhs` rounded to the integer `n`
1769 /// such that `self >= n * rhs`.
1770 ///
1771 /// # Precision
1772 ///
1773 /// The result of this operation is guaranteed to be the rounded
1774 /// infinite-precision result.
1775 ///
1776 /// # Examples
1777 ///
1778 /// ```
1779 /// #![feature(f16)]
1780 /// # #[cfg(not(miri))]
1781 /// # #[cfg(target_has_reliable_f16)] {
1782 ///
1783 /// let a: f16 = 7.0;
1784 /// let b = 4.0;
1785 /// assert_eq!(a.div_euclid(b), 1.0); // 7.0 > 4.0 * 1.0
1786 /// assert_eq!((-a).div_euclid(b), -2.0); // -7.0 >= 4.0 * -2.0
1787 /// assert_eq!(a.div_euclid(-b), -1.0); // 7.0 >= -4.0 * -1.0
1788 /// assert_eq!((-a).div_euclid(-b), 2.0); // -7.0 >= -4.0 * 2.0
1789 /// # }
1790 /// ```
1791 #[inline]
1792 #[rustc_allow_incoherent_impl]
1793 #[unstable(feature = "f16", issue = "116909")]
1794 #[must_use = "method returns a new number and does not mutate the original value"]
1795 pub fn div_euclid(self, rhs: f16) -> f16 {
1796 let q = (self / rhs).trunc();
1797 if self % rhs < 0.0 {
1798 return if rhs > 0.0 { q - 1.0 } else { q + 1.0 };
1799 }
1800 q
1801 }
1802
1803 /// Calculates the least nonnegative remainder of `self` when
1804 /// divided by `rhs`.
1805 ///
1806 /// In particular, the return value `r` satisfies `0.0 <= r < rhs.abs()` in
1807 /// most cases. However, due to a floating point round-off error it can
1808 /// result in `r == rhs.abs()`, violating the mathematical definition, if
1809 /// `self` is much smaller than `rhs.abs()` in magnitude and `self < 0.0`.
1810 /// This result is not an element of the function's codomain, but it is the
1811 /// closest floating point number in the real numbers and thus fulfills the
1812 /// property `self == self.div_euclid(rhs) * rhs + self.rem_euclid(rhs)`
1813 /// approximately.
1814 ///
1815 /// # Precision
1816 ///
1817 /// The result of this operation is guaranteed to be the rounded
1818 /// infinite-precision result.
1819 ///
1820 /// # Examples
1821 ///
1822 /// ```
1823 /// #![feature(f16)]
1824 /// # #[cfg(not(miri))]
1825 /// # #[cfg(target_has_reliable_f16)] {
1826 ///
1827 /// let a: f16 = 7.0;
1828 /// let b = 4.0;
1829 /// assert_eq!(a.rem_euclid(b), 3.0);
1830 /// assert_eq!((-a).rem_euclid(b), 1.0);
1831 /// assert_eq!(a.rem_euclid(-b), 3.0);
1832 /// assert_eq!((-a).rem_euclid(-b), 1.0);
1833 /// // limitation due to round-off error
1834 /// assert!((-f16::EPSILON).rem_euclid(3.0) != 0.0);
1835 /// # }
1836 /// ```
1837 #[inline]
1838 #[rustc_allow_incoherent_impl]
1839 #[doc(alias = "modulo", alias = "mod")]
1840 #[unstable(feature = "f16", issue = "116909")]
1841 #[must_use = "method returns a new number and does not mutate the original value"]
1842 pub fn rem_euclid(self, rhs: f16) -> f16 {
1843 let r = self % rhs;
1844 if r < 0.0 { r + rhs.abs() } else { r }
1845 }
1846
1847 /// Raises a number to an integer power.
1848 ///
1849 /// Using this function is generally faster than using `powf`.
1850 /// It might have a different sequence of rounding operations than `powf`,
1851 /// so the results are not guaranteed to agree.
1852 ///
1853 /// Note that this function is special in that it can return non-NaN results for NaN inputs. For
1854 /// example, `f16::powi(f16::NAN, 0)` returns `1.0`. However, if an input is a *signaling*
1855 /// NaN, then the result is non-deterministically either a NaN or the result that the
1856 /// corresponding quiet NaN would produce.
1857 ///
1858 /// # Unspecified precision
1859 ///
1860 /// The precision of this function is non-deterministic. This means it varies by platform,
1861 /// Rust version, and can even differ within the same execution from one invocation to the next.
1862 ///
1863 /// # Examples
1864 ///
1865 /// ```
1866 /// #![feature(f16)]
1867 /// # #[cfg(not(miri))]
1868 /// # #[cfg(target_has_reliable_f16)] {
1869 ///
1870 /// let x = 2.0_f16;
1871 /// let abs_difference = (x.powi(2) - (x * x)).abs();
1872 /// assert!(abs_difference <= f16::EPSILON);
1873 ///
1874 /// assert_eq!(f16::powi(f16::NAN, 0), 1.0);
1875 /// assert_eq!(f16::powi(0.0, 0), 1.0);
1876 /// # }
1877 /// ```
1878 #[inline]
1879 #[rustc_allow_incoherent_impl]
1880 #[unstable(feature = "f16", issue = "116909")]
1881 #[must_use = "method returns a new number and does not mutate the original value"]
1882 pub fn powi(self, n: i32) -> f16 {
1883 intrinsics::powif16(self, n)
1884 }
1885
1886 /// Returns the square root of a number.
1887 ///
1888 /// Returns NaN if `self` is a negative number other than `-0.0`.
1889 ///
1890 /// # Precision
1891 ///
1892 /// The result of this operation is guaranteed to be the rounded
1893 /// infinite-precision result. It is specified by IEEE 754 as `squareRoot`
1894 /// and guaranteed not to change.
1895 ///
1896 /// # Examples
1897 ///
1898 /// ```
1899 /// #![feature(f16)]
1900 /// # #[cfg(not(miri))]
1901 /// # #[cfg(target_has_reliable_f16)] {
1902 ///
1903 /// let positive = 4.0_f16;
1904 /// let negative = -4.0_f16;
1905 /// let negative_zero = -0.0_f16;
1906 ///
1907 /// assert_eq!(positive.sqrt(), 2.0);
1908 /// assert!(negative.sqrt().is_nan());
1909 /// assert!(negative_zero.sqrt() == negative_zero);
1910 /// # }
1911 /// ```
1912 #[inline]
1913 #[doc(alias = "squareRoot")]
1914 #[rustc_allow_incoherent_impl]
1915 #[unstable(feature = "f16", issue = "116909")]
1916 #[must_use = "method returns a new number and does not mutate the original value"]
1917 pub fn sqrt(self) -> f16 {
1918 intrinsics::sqrtf16(self)
1919 }
1920
1921 /// Returns the cube root of a number.
1922 ///
1923 /// # Unspecified precision
1924 ///
1925 /// The precision of this function is non-deterministic. This means it varies by platform,
1926 /// Rust version, and can even differ within the same execution from one invocation to the next.
1927 ///
1928 /// This function currently corresponds to the `cbrtf` from libc on Unix
1929 /// and Windows. Note that this might change in the future.
1930 ///
1931 /// # Examples
1932 ///
1933 /// ```
1934 /// #![feature(f16)]
1935 /// # #[cfg(not(miri))]
1936 /// # #[cfg(target_has_reliable_f16)] {
1937 ///
1938 /// let x = 8.0f16;
1939 ///
1940 /// // x^(1/3) - 2 == 0
1941 /// let abs_difference = (x.cbrt() - 2.0).abs();
1942 ///
1943 /// assert!(abs_difference <= f16::EPSILON);
1944 /// # }
1945 /// ```
1946 #[inline]
1947 #[rustc_allow_incoherent_impl]
1948 #[unstable(feature = "f16", issue = "116909")]
1949 #[must_use = "method returns a new number and does not mutate the original value"]
1950 pub fn cbrt(self) -> f16 {
1951 libm::cbrtf(self as f32) as f16
1952 }
1953}