core/sync/atomic.rs
1//! Atomic types
2//!
3//! Atomic types provide primitive shared-memory communication between
4//! threads, and are the building blocks of other concurrent
5//! types.
6//!
7//! This module defines atomic versions of a select number of primitive
8//! types, including [`AtomicBool`], [`AtomicIsize`], [`AtomicUsize`],
9//! [`AtomicI8`], [`AtomicU16`], etc.
10//! Atomic types present operations that, when used correctly, synchronize
11//! updates between threads.
12//!
13//! Atomic variables are safe to share between threads (they implement [`Sync`])
14//! but they do not themselves provide the mechanism for sharing and follow the
15//! [threading model](../../../std/thread/index.html#the-threading-model) of Rust.
16//! The most common way to share an atomic variable is to put it into an [`Arc`][arc] (an
17//! atomically-reference-counted shared pointer).
18//!
19//! [arc]: ../../../std/sync/struct.Arc.html
20//!
21//! Atomic types may be stored in static variables, initialized using
22//! the constant initializers like [`AtomicBool::new`]. Atomic statics
23//! are often used for lazy global initialization.
24//!
25//! ## Memory model for atomic accesses
26//!
27//! Rust atomics currently follow the same rules as [C++20 atomics][cpp], specifically the rules
28//! from the [`intro.races`][cpp-intro.races] section, without the "consume" memory ordering. Since
29//! C++ uses an object-based memory model whereas Rust is access-based, a bit of translation work
30//! has to be done to apply the C++ rules to Rust: whenever C++ talks about "the value of an
31//! object", we understand that to mean the resulting bytes obtained when doing a read. When the C++
32//! standard talks about "the value of an atomic object", this refers to the result of doing an
33//! atomic load (via the operations provided in this module). A "modification of an atomic object"
34//! refers to an atomic store.
35//!
36//! The end result is *almost* equivalent to saying that creating a *shared reference* to one of the
37//! Rust atomic types corresponds to creating an `atomic_ref` in C++, with the `atomic_ref` being
38//! destroyed when the lifetime of the shared reference ends. The main difference is that Rust
39//! permits concurrent atomic and non-atomic reads to the same memory as those cause no issue in the
40//! C++ memory model, they are just forbidden in C++ because memory is partitioned into "atomic
41//! objects" and "non-atomic objects" (with `atomic_ref` temporarily converting a non-atomic object
42//! into an atomic object).
43//!
44//! The most important aspect of this model is that *data races* are undefined behavior. A data race
45//! is defined as conflicting non-synchronized accesses where at least one of the accesses is
46//! non-atomic. Here, accesses are *conflicting* if they affect overlapping regions of memory and at
47//! least one of them is a write. (A `compare_exchange` or `compare_exchange_weak` that does not
48//! succeed is not considered a write.) They are *non-synchronized* if neither of them
49//! *happens-before* the other, according to the happens-before order of the memory model.
50//!
51//! The other possible cause of undefined behavior in the memory model are mixed-size accesses: Rust
52//! inherits the C++ limitation that non-synchronized conflicting atomic accesses may not partially
53//! overlap. In other words, every pair of non-synchronized atomic accesses must be either disjoint,
54//! access the exact same memory (including using the same access size), or both be reads.
55//!
56//! Each atomic access takes an [`Ordering`] which defines how the operation interacts with the
57//! happens-before order. These orderings behave the same as the corresponding [C++20 atomic
58//! orderings][cpp_memory_order]. For more information, see the [nomicon].
59//!
60//! [cpp]: https://en.cppreference.com/w/cpp/atomic
61//! [cpp-intro.races]: https://timsong-cpp.github.io/cppwp/n4868/intro.multithread#intro.races
62//! [cpp_memory_order]: https://en.cppreference.com/w/cpp/atomic/memory_order
63//! [nomicon]: ../../../nomicon/atomics.html
64//!
65//! ```rust,no_run undefined_behavior
66//! use std::sync::atomic::{AtomicU16, AtomicU8, Ordering};
67//! use std::mem::transmute;
68//! use std::thread;
69//!
70//! let atomic = AtomicU16::new(0);
71//!
72//! thread::scope(|s| {
73//! // This is UB: conflicting non-synchronized accesses, at least one of which is non-atomic.
74//! s.spawn(|| atomic.store(1, Ordering::Relaxed)); // atomic store
75//! s.spawn(|| unsafe { atomic.as_ptr().write(2) }); // non-atomic write
76//! });
77//!
78//! thread::scope(|s| {
79//! // This is fine: the accesses do not conflict (as none of them performs any modification).
80//! // In C++ this would be disallowed since creating an `atomic_ref` precludes
81//! // further non-atomic accesses, but Rust does not have that limitation.
82//! s.spawn(|| atomic.load(Ordering::Relaxed)); // atomic load
83//! s.spawn(|| unsafe { atomic.as_ptr().read() }); // non-atomic read
84//! });
85//!
86//! thread::scope(|s| {
87//! // This is fine: `join` synchronizes the code in a way such that the atomic
88//! // store happens-before the non-atomic write.
89//! let handle = s.spawn(|| atomic.store(1, Ordering::Relaxed)); // atomic store
90//! handle.join().expect("thread won't panic"); // synchronize
91//! s.spawn(|| unsafe { atomic.as_ptr().write(2) }); // non-atomic write
92//! });
93//!
94//! thread::scope(|s| {
95//! // This is UB: non-synchronized conflicting differently-sized atomic accesses.
96//! s.spawn(|| atomic.store(1, Ordering::Relaxed));
97//! s.spawn(|| unsafe {
98//! let differently_sized = transmute::<&AtomicU16, &AtomicU8>(&atomic);
99//! differently_sized.store(2, Ordering::Relaxed);
100//! });
101//! });
102//!
103//! thread::scope(|s| {
104//! // This is fine: `join` synchronizes the code in a way such that
105//! // the 1-byte store happens-before the 2-byte store.
106//! let handle = s.spawn(|| atomic.store(1, Ordering::Relaxed));
107//! handle.join().expect("thread won't panic");
108//! s.spawn(|| unsafe {
109//! let differently_sized = transmute::<&AtomicU16, &AtomicU8>(&atomic);
110//! differently_sized.store(2, Ordering::Relaxed);
111//! });
112//! });
113//! ```
114//!
115//! # Portability
116//!
117//! All atomic types in this module are guaranteed to be [lock-free] if they're
118//! available. This means they don't internally acquire a global mutex. Atomic
119//! types and operations are not guaranteed to be wait-free. This means that
120//! operations like `fetch_or` may be implemented with a compare-and-swap loop.
121//!
122//! Atomic operations may be implemented at the instruction layer with
123//! larger-size atomics. For example some platforms use 4-byte atomic
124//! instructions to implement `AtomicI8`. Note that this emulation should not
125//! have an impact on correctness of code, it's just something to be aware of.
126//!
127//! The atomic types in this module might not be available on all platforms. The
128//! atomic types here are all widely available, however, and can generally be
129//! relied upon existing. Some notable exceptions are:
130//!
131//! * PowerPC and MIPS platforms with 32-bit pointers do not have `AtomicU64` or
132//! `AtomicI64` types.
133//! * ARM platforms like `armv5te` that aren't for Linux only provide `load`
134//! and `store` operations, and do not support Compare and Swap (CAS)
135//! operations, such as `swap`, `fetch_add`, etc. Additionally on Linux,
136//! these CAS operations are implemented via [operating system support], which
137//! may come with a performance penalty.
138//! * ARM targets with `thumbv6m` only provide `load` and `store` operations,
139//! and do not support Compare and Swap (CAS) operations, such as `swap`,
140//! `fetch_add`, etc.
141//!
142//! [operating system support]: https://www.kernel.org/doc/Documentation/arm/kernel_user_helpers.txt
143//!
144//! Note that future platforms may be added that also do not have support for
145//! some atomic operations. Maximally portable code will want to be careful
146//! about which atomic types are used. `AtomicUsize` and `AtomicIsize` are
147//! generally the most portable, but even then they're not available everywhere.
148//! For reference, the `std` library requires `AtomicBool`s and pointer-sized atomics, although
149//! `core` does not.
150//!
151//! The `#[cfg(target_has_atomic)]` attribute can be used to conditionally
152//! compile based on the target's supported bit widths. It is a key-value
153//! option set for each supported size, with values "8", "16", "32", "64",
154//! "128", and "ptr" for pointer-sized atomics.
155//!
156//! [lock-free]: https://en.wikipedia.org/wiki/Non-blocking_algorithm
157//!
158//! # Atomic accesses to read-only memory
159//!
160//! In general, *all* atomic accesses on read-only memory are undefined behavior. For instance, attempting
161//! to do a `compare_exchange` that will definitely fail (making it conceptually a read-only
162//! operation) can still cause a segmentation fault if the underlying memory page is mapped read-only. Since
163//! atomic `load`s might be implemented using compare-exchange operations, even a `load` can fault
164//! on read-only memory.
165//!
166//! For the purpose of this section, "read-only memory" is defined as memory that is read-only in
167//! the underlying target, i.e., the pages are mapped with a read-only flag and any attempt to write
168//! will cause a page fault. In particular, an `&u128` reference that points to memory that is
169//! read-write mapped is *not* considered to point to "read-only memory". In Rust, almost all memory
170//! is read-write; the only exceptions are memory created by `const` items or `static` items without
171//! interior mutability, and memory that was specifically marked as read-only by the operating
172//! system via platform-specific APIs.
173//!
174//! As an exception from the general rule stated above, "sufficiently small" atomic loads with
175//! `Ordering::Relaxed` are implemented in a way that works on read-only memory, and are hence not
176//! undefined behavior. The exact size limit for what makes a load "sufficiently small" varies
177//! depending on the target:
178//!
179//! | `target_arch` | Size limit |
180//! |---------------|---------|
181//! | `x86`, `arm`, `loongarch32`, `mips`, `mips32r6`, `powerpc`, `riscv32`, `sparc`, `hexagon` | 4 bytes |
182//! | `x86_64`, `aarch64`, `loongarch64`, `mips64`, `mips64r6`, `powerpc64`, `riscv64`, `sparc64`, `s390x` | 8 bytes |
183//!
184//! Atomics loads that are larger than this limit as well as atomic loads with ordering other
185//! than `Relaxed`, as well as *all* atomic loads on targets not listed in the table, might still be
186//! read-only under certain conditions, but that is not a stable guarantee and should not be relied
187//! upon.
188//!
189//! If you need to do an acquire load on read-only memory, you can do a relaxed load followed by an
190//! acquire fence instead.
191//!
192//! # Examples
193//!
194//! A simple spinlock:
195//!
196//! ```ignore-wasm
197//! use std::sync::Arc;
198//! use std::sync::atomic::{AtomicUsize, Ordering};
199//! use std::{hint, thread};
200//!
201//! fn main() {
202//! let spinlock = Arc::new(AtomicUsize::new(1));
203//!
204//! let spinlock_clone = Arc::clone(&spinlock);
205//!
206//! let thread = thread::spawn(move || {
207//! spinlock_clone.store(0, Ordering::Release);
208//! });
209//!
210//! // Wait for the other thread to release the lock
211//! while spinlock.load(Ordering::Acquire) != 0 {
212//! hint::spin_loop();
213//! }
214//!
215//! if let Err(panic) = thread.join() {
216//! println!("Thread had an error: {panic:?}");
217//! }
218//! }
219//! ```
220//!
221//! Keep a global count of live threads:
222//!
223//! ```
224//! use std::sync::atomic::{AtomicUsize, Ordering};
225//!
226//! static GLOBAL_THREAD_COUNT: AtomicUsize = AtomicUsize::new(0);
227//!
228//! // Note that Relaxed ordering doesn't synchronize anything
229//! // except the global thread counter itself.
230//! let old_thread_count = GLOBAL_THREAD_COUNT.fetch_add(1, Ordering::Relaxed);
231//! // Note that this number may not be true at the moment of printing
232//! // because some other thread may have changed static value already.
233//! println!("live threads: {}", old_thread_count + 1);
234//! ```
235
236#![stable(feature = "rust1", since = "1.0.0")]
237#![cfg_attr(not(target_has_atomic_load_store = "8"), allow(dead_code))]
238#![cfg_attr(not(target_has_atomic_load_store = "8"), allow(unused_imports))]
239#![rustc_diagnostic_item = "atomic_mod"]
240// Clippy complains about the pattern of "safe function calling unsafe function taking pointers".
241// This happens with AtomicPtr intrinsics but is fine, as the pointers clippy is concerned about
242// are just normal values that get loaded/stored, but not dereferenced.
243#![allow(clippy::not_unsafe_ptr_arg_deref)]
244
245use self::Ordering::*;
246use crate::cell::UnsafeCell;
247#[cfg(not(feature = "ferrocene_certified"))]
248use crate::hint::spin_loop;
249use crate::intrinsics::AtomicOrdering as AO;
250#[cfg(not(feature = "ferrocene_certified"))]
251use crate::{fmt, intrinsics};
252
253// Ferrocene addition: imports for certified subset
254#[cfg(feature = "ferrocene_certified")]
255#[rustfmt::skip]
256use crate::intrinsics;
257
258trait Sealed {}
259
260/// A marker trait for primitive types which can be modified atomically.
261///
262/// This is an implementation detail for <code>[Atomic]\<T></code> which may disappear or be replaced at any time.
263///
264/// # Safety
265///
266/// Types implementing this trait must be primitives that can be modified atomically.
267///
268/// The associated `Self::AtomicInner` type must have the same size and bit validity as `Self`,
269/// but may have a higher alignment requirement, so the following `transmute`s are sound:
270///
271/// - `&mut Self::AtomicInner` as `&mut Self`
272/// - `Self` as `Self::AtomicInner` or the reverse
273#[unstable(
274 feature = "atomic_internals",
275 reason = "implementation detail which may disappear or be replaced at any time",
276 issue = "none"
277)]
278#[expect(private_bounds)]
279pub unsafe trait AtomicPrimitive: Sized + Copy + Sealed {
280 /// Temporary implementation detail.
281 type AtomicInner: Sized;
282}
283
284macro impl_atomic_primitive(
285 $Atom:ident $(<$T:ident>)? ($Primitive:ty),
286 size($size:literal),
287 align($align:literal) $(,)?
288) {
289 impl $(<$T>)? Sealed for $Primitive {}
290
291 #[unstable(
292 feature = "atomic_internals",
293 reason = "implementation detail which may disappear or be replaced at any time",
294 issue = "none"
295 )]
296 #[cfg(target_has_atomic_load_store = $size)]
297 unsafe impl $(<$T>)? AtomicPrimitive for $Primitive {
298 type AtomicInner = $Atom $(<$T>)?;
299 }
300}
301
302#[cfg(not(feature = "ferrocene_certified"))]
303impl_atomic_primitive!(AtomicBool(bool), size("8"), align(1));
304#[cfg(not(feature = "ferrocene_certified"))]
305impl_atomic_primitive!(AtomicI8(i8), size("8"), align(1));
306#[cfg(not(feature = "ferrocene_certified"))]
307impl_atomic_primitive!(AtomicU8(u8), size("8"), align(1));
308#[cfg(not(feature = "ferrocene_certified"))]
309impl_atomic_primitive!(AtomicI16(i16), size("16"), align(2));
310#[cfg(not(feature = "ferrocene_certified"))]
311impl_atomic_primitive!(AtomicU16(u16), size("16"), align(2));
312#[cfg(not(feature = "ferrocene_certified"))]
313impl_atomic_primitive!(AtomicI32(i32), size("32"), align(4));
314impl_atomic_primitive!(AtomicU32(u32), size("32"), align(4));
315#[cfg(not(feature = "ferrocene_certified"))]
316impl_atomic_primitive!(AtomicI64(i64), size("64"), align(8));
317#[cfg(not(feature = "ferrocene_certified"))]
318impl_atomic_primitive!(AtomicU64(u64), size("64"), align(8));
319#[cfg(not(feature = "ferrocene_certified"))]
320impl_atomic_primitive!(AtomicI128(i128), size("128"), align(16));
321#[cfg(not(feature = "ferrocene_certified"))]
322impl_atomic_primitive!(AtomicU128(u128), size("128"), align(16));
323
324#[cfg(target_pointer_width = "16")]
325#[cfg(not(feature = "ferrocene_certified"))]
326impl_atomic_primitive!(AtomicIsize(isize), size("ptr"), align(2));
327#[cfg(target_pointer_width = "32")]
328#[cfg(not(feature = "ferrocene_certified"))]
329impl_atomic_primitive!(AtomicIsize(isize), size("ptr"), align(4));
330#[cfg(target_pointer_width = "64")]
331#[cfg(not(feature = "ferrocene_certified"))]
332impl_atomic_primitive!(AtomicIsize(isize), size("ptr"), align(8));
333
334#[cfg(target_pointer_width = "16")]
335#[cfg(not(feature = "ferrocene_certified"))]
336impl_atomic_primitive!(AtomicUsize(usize), size("ptr"), align(2));
337#[cfg(target_pointer_width = "32")]
338#[cfg(not(feature = "ferrocene_certified"))]
339impl_atomic_primitive!(AtomicUsize(usize), size("ptr"), align(4));
340#[cfg(target_pointer_width = "64")]
341#[cfg(not(feature = "ferrocene_certified"))]
342impl_atomic_primitive!(AtomicUsize(usize), size("ptr"), align(8));
343
344#[cfg(target_pointer_width = "16")]
345#[cfg(not(feature = "ferrocene_certified"))]
346impl_atomic_primitive!(AtomicPtr<T>(*mut T), size("ptr"), align(2));
347#[cfg(target_pointer_width = "32")]
348#[cfg(not(feature = "ferrocene_certified"))]
349impl_atomic_primitive!(AtomicPtr<T>(*mut T), size("ptr"), align(4));
350#[cfg(target_pointer_width = "64")]
351#[cfg(not(feature = "ferrocene_certified"))]
352impl_atomic_primitive!(AtomicPtr<T>(*mut T), size("ptr"), align(8));
353
354/// A memory location which can be safely modified from multiple threads.
355///
356/// This has the same size and bit validity as the underlying type `T`. However,
357/// the alignment of this type is always equal to its size, even on targets where
358/// `T` has alignment less than its size.
359///
360/// For more about the differences between atomic types and non-atomic types as
361/// well as information about the portability of this type, please see the
362/// [module-level documentation].
363///
364/// **Note:** This type is only available on platforms that support atomic loads
365/// and stores of `T`.
366///
367/// [module-level documentation]: crate::sync::atomic
368#[unstable(feature = "generic_atomic", issue = "130539")]
369#[cfg(not(feature = "ferrocene_certified"))]
370pub type Atomic<T> = <T as AtomicPrimitive>::AtomicInner;
371
372// Some architectures don't have byte-sized atomics, which results in LLVM
373// emulating them using a LL/SC loop. However for AtomicBool we can take
374// advantage of the fact that it only ever contains 0 or 1 and use atomic OR/AND
375// instead, which LLVM can emulate using a larger atomic OR/AND operation.
376//
377// This list should only contain architectures which have word-sized atomic-or/
378// atomic-and instructions but don't natively support byte-sized atomics.
379#[cfg(target_has_atomic = "8")]
380#[cfg(not(feature = "ferrocene_certified"))]
381const EMULATE_ATOMIC_BOOL: bool = cfg!(any(
382 target_arch = "riscv32",
383 target_arch = "riscv64",
384 target_arch = "loongarch32",
385 target_arch = "loongarch64"
386));
387
388/// A boolean type which can be safely shared between threads.
389///
390/// This type has the same size, alignment, and bit validity as a [`bool`].
391///
392/// **Note**: This type is only available on platforms that support atomic
393/// loads and stores of `u8`.
394#[cfg(target_has_atomic_load_store = "8")]
395#[stable(feature = "rust1", since = "1.0.0")]
396#[rustc_diagnostic_item = "AtomicBool"]
397#[repr(C, align(1))]
398#[cfg(not(feature = "ferrocene_certified"))]
399pub struct AtomicBool {
400 v: UnsafeCell<u8>,
401}
402
403#[cfg(target_has_atomic_load_store = "8")]
404#[stable(feature = "rust1", since = "1.0.0")]
405#[cfg(not(feature = "ferrocene_certified"))]
406impl Default for AtomicBool {
407 /// Creates an `AtomicBool` initialized to `false`.
408 #[inline]
409 fn default() -> Self {
410 Self::new(false)
411 }
412}
413
414// Send is implicitly implemented for AtomicBool.
415#[cfg(target_has_atomic_load_store = "8")]
416#[stable(feature = "rust1", since = "1.0.0")]
417#[cfg(not(feature = "ferrocene_certified"))]
418unsafe impl Sync for AtomicBool {}
419
420/// A raw pointer type which can be safely shared between threads.
421///
422/// This type has the same size and bit validity as a `*mut T`.
423///
424/// **Note**: This type is only available on platforms that support atomic
425/// loads and stores of pointers. Its size depends on the target pointer's size.
426#[cfg(target_has_atomic_load_store = "ptr")]
427#[stable(feature = "rust1", since = "1.0.0")]
428#[rustc_diagnostic_item = "AtomicPtr"]
429#[cfg_attr(target_pointer_width = "16", repr(C, align(2)))]
430#[cfg_attr(target_pointer_width = "32", repr(C, align(4)))]
431#[cfg_attr(target_pointer_width = "64", repr(C, align(8)))]
432#[cfg(not(feature = "ferrocene_certified"))]
433pub struct AtomicPtr<T> {
434 p: UnsafeCell<*mut T>,
435}
436
437#[cfg(target_has_atomic_load_store = "ptr")]
438#[stable(feature = "rust1", since = "1.0.0")]
439#[cfg(not(feature = "ferrocene_certified"))]
440impl<T> Default for AtomicPtr<T> {
441 /// Creates a null `AtomicPtr<T>`.
442 fn default() -> AtomicPtr<T> {
443 AtomicPtr::new(crate::ptr::null_mut())
444 }
445}
446
447#[cfg(target_has_atomic_load_store = "ptr")]
448#[stable(feature = "rust1", since = "1.0.0")]
449#[cfg(not(feature = "ferrocene_certified"))]
450unsafe impl<T> Send for AtomicPtr<T> {}
451#[cfg(target_has_atomic_load_store = "ptr")]
452#[stable(feature = "rust1", since = "1.0.0")]
453#[cfg(not(feature = "ferrocene_certified"))]
454unsafe impl<T> Sync for AtomicPtr<T> {}
455
456/// Atomic memory orderings
457///
458/// Memory orderings specify the way atomic operations synchronize memory.
459/// In its weakest [`Ordering::Relaxed`], only the memory directly touched by the
460/// operation is synchronized. On the other hand, a store-load pair of [`Ordering::SeqCst`]
461/// operations synchronize other memory while additionally preserving a total order of such
462/// operations across all threads.
463///
464/// Rust's memory orderings are [the same as those of
465/// C++20](https://en.cppreference.com/w/cpp/atomic/memory_order).
466///
467/// For more information see the [nomicon].
468///
469/// [nomicon]: ../../../nomicon/atomics.html
470#[stable(feature = "rust1", since = "1.0.0")]
471#[cfg_attr(not(feature = "ferrocene_certified"), derive(Copy, Clone, Debug, Eq, PartialEq, Hash))]
472#[cfg_attr(feature = "ferrocene_certified", derive(Copy, Clone))]
473#[non_exhaustive]
474#[rustc_diagnostic_item = "Ordering"]
475pub enum Ordering {
476 /// No ordering constraints, only atomic operations.
477 ///
478 /// Corresponds to [`memory_order_relaxed`] in C++20.
479 ///
480 /// [`memory_order_relaxed`]: https://en.cppreference.com/w/cpp/atomic/memory_order#Relaxed_ordering
481 #[stable(feature = "rust1", since = "1.0.0")]
482 Relaxed,
483 /// When coupled with a store, all previous operations become ordered
484 /// before any load of this value with [`Acquire`] (or stronger) ordering.
485 /// In particular, all previous writes become visible to all threads
486 /// that perform an [`Acquire`] (or stronger) load of this value.
487 ///
488 /// Notice that using this ordering for an operation that combines loads
489 /// and stores leads to a [`Relaxed`] load operation!
490 ///
491 /// This ordering is only applicable for operations that can perform a store.
492 ///
493 /// Corresponds to [`memory_order_release`] in C++20.
494 ///
495 /// [`memory_order_release`]: https://en.cppreference.com/w/cpp/atomic/memory_order#Release-Acquire_ordering
496 #[stable(feature = "rust1", since = "1.0.0")]
497 Release,
498 /// When coupled with a load, if the loaded value was written by a store operation with
499 /// [`Release`] (or stronger) ordering, then all subsequent operations
500 /// become ordered after that store. In particular, all subsequent loads will see data
501 /// written before the store.
502 ///
503 /// Notice that using this ordering for an operation that combines loads
504 /// and stores leads to a [`Relaxed`] store operation!
505 ///
506 /// This ordering is only applicable for operations that can perform a load.
507 ///
508 /// Corresponds to [`memory_order_acquire`] in C++20.
509 ///
510 /// [`memory_order_acquire`]: https://en.cppreference.com/w/cpp/atomic/memory_order#Release-Acquire_ordering
511 #[stable(feature = "rust1", since = "1.0.0")]
512 Acquire,
513 /// Has the effects of both [`Acquire`] and [`Release`] together:
514 /// For loads it uses [`Acquire`] ordering. For stores it uses the [`Release`] ordering.
515 ///
516 /// Notice that in the case of `compare_and_swap`, it is possible that the operation ends up
517 /// not performing any store and hence it has just [`Acquire`] ordering. However,
518 /// `AcqRel` will never perform [`Relaxed`] accesses.
519 ///
520 /// This ordering is only applicable for operations that combine both loads and stores.
521 ///
522 /// Corresponds to [`memory_order_acq_rel`] in C++20.
523 ///
524 /// [`memory_order_acq_rel`]: https://en.cppreference.com/w/cpp/atomic/memory_order#Release-Acquire_ordering
525 #[stable(feature = "rust1", since = "1.0.0")]
526 AcqRel,
527 /// Like [`Acquire`]/[`Release`]/[`AcqRel`] (for load, store, and load-with-store
528 /// operations, respectively) with the additional guarantee that all threads see all
529 /// sequentially consistent operations in the same order.
530 ///
531 /// Corresponds to [`memory_order_seq_cst`] in C++20.
532 ///
533 /// [`memory_order_seq_cst`]: https://en.cppreference.com/w/cpp/atomic/memory_order#Sequentially-consistent_ordering
534 #[stable(feature = "rust1", since = "1.0.0")]
535 SeqCst,
536}
537
538/// An [`AtomicBool`] initialized to `false`.
539#[cfg(target_has_atomic_load_store = "8")]
540#[stable(feature = "rust1", since = "1.0.0")]
541#[deprecated(
542 since = "1.34.0",
543 note = "the `new` function is now preferred",
544 suggestion = "AtomicBool::new(false)"
545)]
546#[cfg(not(feature = "ferrocene_certified"))]
547pub const ATOMIC_BOOL_INIT: AtomicBool = AtomicBool::new(false);
548
549#[cfg(target_has_atomic_load_store = "8")]
550#[cfg(not(feature = "ferrocene_certified"))]
551impl AtomicBool {
552 /// Creates a new `AtomicBool`.
553 ///
554 /// # Examples
555 ///
556 /// ```
557 /// use std::sync::atomic::AtomicBool;
558 ///
559 /// let atomic_true = AtomicBool::new(true);
560 /// let atomic_false = AtomicBool::new(false);
561 /// ```
562 #[inline]
563 #[stable(feature = "rust1", since = "1.0.0")]
564 #[rustc_const_stable(feature = "const_atomic_new", since = "1.24.0")]
565 #[must_use]
566 pub const fn new(v: bool) -> AtomicBool {
567 AtomicBool { v: UnsafeCell::new(v as u8) }
568 }
569
570 /// Creates a new `AtomicBool` from a pointer.
571 ///
572 /// # Examples
573 ///
574 /// ```
575 /// use std::sync::atomic::{self, AtomicBool};
576 ///
577 /// // Get a pointer to an allocated value
578 /// let ptr: *mut bool = Box::into_raw(Box::new(false));
579 ///
580 /// assert!(ptr.cast::<AtomicBool>().is_aligned());
581 ///
582 /// {
583 /// // Create an atomic view of the allocated value
584 /// let atomic = unsafe { AtomicBool::from_ptr(ptr) };
585 ///
586 /// // Use `atomic` for atomic operations, possibly share it with other threads
587 /// atomic.store(true, atomic::Ordering::Relaxed);
588 /// }
589 ///
590 /// // It's ok to non-atomically access the value behind `ptr`,
591 /// // since the reference to the atomic ended its lifetime in the block above
592 /// assert_eq!(unsafe { *ptr }, true);
593 ///
594 /// // Deallocate the value
595 /// unsafe { drop(Box::from_raw(ptr)) }
596 /// ```
597 ///
598 /// # Safety
599 ///
600 /// * `ptr` must be aligned to `align_of::<AtomicBool>()` (note that this is always true, since
601 /// `align_of::<AtomicBool>() == 1`).
602 /// * `ptr` must be [valid] for both reads and writes for the whole lifetime `'a`.
603 /// * You must adhere to the [Memory model for atomic accesses]. In particular, it is not
604 /// allowed to mix conflicting atomic and non-atomic accesses, or atomic accesses of different
605 /// sizes, without synchronization.
606 ///
607 /// [valid]: crate::ptr#safety
608 /// [Memory model for atomic accesses]: self#memory-model-for-atomic-accesses
609 #[inline]
610 #[stable(feature = "atomic_from_ptr", since = "1.75.0")]
611 #[rustc_const_stable(feature = "const_atomic_from_ptr", since = "1.84.0")]
612 pub const unsafe fn from_ptr<'a>(ptr: *mut bool) -> &'a AtomicBool {
613 // SAFETY: guaranteed by the caller
614 unsafe { &*ptr.cast() }
615 }
616
617 /// Returns a mutable reference to the underlying [`bool`].
618 ///
619 /// This is safe because the mutable reference guarantees that no other threads are
620 /// concurrently accessing the atomic data.
621 ///
622 /// # Examples
623 ///
624 /// ```
625 /// use std::sync::atomic::{AtomicBool, Ordering};
626 ///
627 /// let mut some_bool = AtomicBool::new(true);
628 /// assert_eq!(*some_bool.get_mut(), true);
629 /// *some_bool.get_mut() = false;
630 /// assert_eq!(some_bool.load(Ordering::SeqCst), false);
631 /// ```
632 #[inline]
633 #[stable(feature = "atomic_access", since = "1.15.0")]
634 pub fn get_mut(&mut self) -> &mut bool {
635 // SAFETY: the mutable reference guarantees unique ownership.
636 unsafe { &mut *(self.v.get() as *mut bool) }
637 }
638
639 /// Gets atomic access to a `&mut bool`.
640 ///
641 /// # Examples
642 ///
643 /// ```
644 /// #![feature(atomic_from_mut)]
645 /// use std::sync::atomic::{AtomicBool, Ordering};
646 ///
647 /// let mut some_bool = true;
648 /// let a = AtomicBool::from_mut(&mut some_bool);
649 /// a.store(false, Ordering::Relaxed);
650 /// assert_eq!(some_bool, false);
651 /// ```
652 #[inline]
653 #[cfg(target_has_atomic_equal_alignment = "8")]
654 #[unstable(feature = "atomic_from_mut", issue = "76314")]
655 pub fn from_mut(v: &mut bool) -> &mut Self {
656 // SAFETY: the mutable reference guarantees unique ownership, and
657 // alignment of both `bool` and `Self` is 1.
658 unsafe { &mut *(v as *mut bool as *mut Self) }
659 }
660
661 /// Gets non-atomic access to a `&mut [AtomicBool]` slice.
662 ///
663 /// This is safe because the mutable reference guarantees that no other threads are
664 /// concurrently accessing the atomic data.
665 ///
666 /// # Examples
667 ///
668 /// ```ignore-wasm
669 /// #![feature(atomic_from_mut)]
670 /// use std::sync::atomic::{AtomicBool, Ordering};
671 ///
672 /// let mut some_bools = [const { AtomicBool::new(false) }; 10];
673 ///
674 /// let view: &mut [bool] = AtomicBool::get_mut_slice(&mut some_bools);
675 /// assert_eq!(view, [false; 10]);
676 /// view[..5].copy_from_slice(&[true; 5]);
677 ///
678 /// std::thread::scope(|s| {
679 /// for t in &some_bools[..5] {
680 /// s.spawn(move || assert_eq!(t.load(Ordering::Relaxed), true));
681 /// }
682 ///
683 /// for f in &some_bools[5..] {
684 /// s.spawn(move || assert_eq!(f.load(Ordering::Relaxed), false));
685 /// }
686 /// });
687 /// ```
688 #[inline]
689 #[unstable(feature = "atomic_from_mut", issue = "76314")]
690 pub fn get_mut_slice(this: &mut [Self]) -> &mut [bool] {
691 // SAFETY: the mutable reference guarantees unique ownership.
692 unsafe { &mut *(this as *mut [Self] as *mut [bool]) }
693 }
694
695 /// Gets atomic access to a `&mut [bool]` slice.
696 ///
697 /// # Examples
698 ///
699 /// ```rust,ignore-wasm
700 /// #![feature(atomic_from_mut)]
701 /// use std::sync::atomic::{AtomicBool, Ordering};
702 ///
703 /// let mut some_bools = [false; 10];
704 /// let a = &*AtomicBool::from_mut_slice(&mut some_bools);
705 /// std::thread::scope(|s| {
706 /// for i in 0..a.len() {
707 /// s.spawn(move || a[i].store(true, Ordering::Relaxed));
708 /// }
709 /// });
710 /// assert_eq!(some_bools, [true; 10]);
711 /// ```
712 #[inline]
713 #[cfg(target_has_atomic_equal_alignment = "8")]
714 #[unstable(feature = "atomic_from_mut", issue = "76314")]
715 pub fn from_mut_slice(v: &mut [bool]) -> &mut [Self] {
716 // SAFETY: the mutable reference guarantees unique ownership, and
717 // alignment of both `bool` and `Self` is 1.
718 unsafe { &mut *(v as *mut [bool] as *mut [Self]) }
719 }
720
721 /// Consumes the atomic and returns the contained value.
722 ///
723 /// This is safe because passing `self` by value guarantees that no other threads are
724 /// concurrently accessing the atomic data.
725 ///
726 /// # Examples
727 ///
728 /// ```
729 /// use std::sync::atomic::AtomicBool;
730 ///
731 /// let some_bool = AtomicBool::new(true);
732 /// assert_eq!(some_bool.into_inner(), true);
733 /// ```
734 #[inline]
735 #[stable(feature = "atomic_access", since = "1.15.0")]
736 #[rustc_const_stable(feature = "const_atomic_into_inner", since = "1.79.0")]
737 pub const fn into_inner(self) -> bool {
738 self.v.into_inner() != 0
739 }
740
741 /// Loads a value from the bool.
742 ///
743 /// `load` takes an [`Ordering`] argument which describes the memory ordering
744 /// of this operation. Possible values are [`SeqCst`], [`Acquire`] and [`Relaxed`].
745 ///
746 /// # Panics
747 ///
748 /// Panics if `order` is [`Release`] or [`AcqRel`].
749 ///
750 /// # Examples
751 ///
752 /// ```
753 /// use std::sync::atomic::{AtomicBool, Ordering};
754 ///
755 /// let some_bool = AtomicBool::new(true);
756 ///
757 /// assert_eq!(some_bool.load(Ordering::Relaxed), true);
758 /// ```
759 #[inline]
760 #[stable(feature = "rust1", since = "1.0.0")]
761 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
762 pub fn load(&self, order: Ordering) -> bool {
763 // SAFETY: any data races are prevented by atomic intrinsics and the raw
764 // pointer passed in is valid because we got it from a reference.
765 unsafe { atomic_load(self.v.get(), order) != 0 }
766 }
767
768 /// Stores a value into the bool.
769 ///
770 /// `store` takes an [`Ordering`] argument which describes the memory ordering
771 /// of this operation. Possible values are [`SeqCst`], [`Release`] and [`Relaxed`].
772 ///
773 /// # Panics
774 ///
775 /// Panics if `order` is [`Acquire`] or [`AcqRel`].
776 ///
777 /// # Examples
778 ///
779 /// ```
780 /// use std::sync::atomic::{AtomicBool, Ordering};
781 ///
782 /// let some_bool = AtomicBool::new(true);
783 ///
784 /// some_bool.store(false, Ordering::Relaxed);
785 /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
786 /// ```
787 #[inline]
788 #[stable(feature = "rust1", since = "1.0.0")]
789 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
790 pub fn store(&self, val: bool, order: Ordering) {
791 // SAFETY: any data races are prevented by atomic intrinsics and the raw
792 // pointer passed in is valid because we got it from a reference.
793 unsafe {
794 atomic_store(self.v.get(), val as u8, order);
795 }
796 }
797
798 /// Stores a value into the bool, returning the previous value.
799 ///
800 /// `swap` takes an [`Ordering`] argument which describes the memory ordering
801 /// of this operation. All ordering modes are possible. Note that using
802 /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
803 /// using [`Release`] makes the load part [`Relaxed`].
804 ///
805 /// **Note:** This method is only available on platforms that support atomic
806 /// operations on `u8`.
807 ///
808 /// # Examples
809 ///
810 /// ```
811 /// use std::sync::atomic::{AtomicBool, Ordering};
812 ///
813 /// let some_bool = AtomicBool::new(true);
814 ///
815 /// assert_eq!(some_bool.swap(false, Ordering::Relaxed), true);
816 /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
817 /// ```
818 #[inline]
819 #[stable(feature = "rust1", since = "1.0.0")]
820 #[cfg(target_has_atomic = "8")]
821 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
822 pub fn swap(&self, val: bool, order: Ordering) -> bool {
823 if EMULATE_ATOMIC_BOOL {
824 if val { self.fetch_or(true, order) } else { self.fetch_and(false, order) }
825 } else {
826 // SAFETY: data races are prevented by atomic intrinsics.
827 unsafe { atomic_swap(self.v.get(), val as u8, order) != 0 }
828 }
829 }
830
831 /// Stores a value into the [`bool`] if the current value is the same as the `current` value.
832 ///
833 /// The return value is always the previous value. If it is equal to `current`, then the value
834 /// was updated.
835 ///
836 /// `compare_and_swap` also takes an [`Ordering`] argument which describes the memory
837 /// ordering of this operation. Notice that even when using [`AcqRel`], the operation
838 /// might fail and hence just perform an `Acquire` load, but not have `Release` semantics.
839 /// Using [`Acquire`] makes the store part of this operation [`Relaxed`] if it
840 /// happens, and using [`Release`] makes the load part [`Relaxed`].
841 ///
842 /// **Note:** This method is only available on platforms that support atomic
843 /// operations on `u8`.
844 ///
845 /// # Migrating to `compare_exchange` and `compare_exchange_weak`
846 ///
847 /// `compare_and_swap` is equivalent to `compare_exchange` with the following mapping for
848 /// memory orderings:
849 ///
850 /// Original | Success | Failure
851 /// -------- | ------- | -------
852 /// Relaxed | Relaxed | Relaxed
853 /// Acquire | Acquire | Acquire
854 /// Release | Release | Relaxed
855 /// AcqRel | AcqRel | Acquire
856 /// SeqCst | SeqCst | SeqCst
857 ///
858 /// `compare_and_swap` and `compare_exchange` also differ in their return type. You can use
859 /// `compare_exchange(...).unwrap_or_else(|x| x)` to recover the behavior of `compare_and_swap`,
860 /// but in most cases it is more idiomatic to check whether the return value is `Ok` or `Err`
861 /// rather than to infer success vs failure based on the value that was read.
862 ///
863 /// During migration, consider whether it makes sense to use `compare_exchange_weak` instead.
864 /// `compare_exchange_weak` is allowed to fail spuriously even when the comparison succeeds,
865 /// which allows the compiler to generate better assembly code when the compare and swap
866 /// is used in a loop.
867 ///
868 /// # Examples
869 ///
870 /// ```
871 /// use std::sync::atomic::{AtomicBool, Ordering};
872 ///
873 /// let some_bool = AtomicBool::new(true);
874 ///
875 /// assert_eq!(some_bool.compare_and_swap(true, false, Ordering::Relaxed), true);
876 /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
877 ///
878 /// assert_eq!(some_bool.compare_and_swap(true, true, Ordering::Relaxed), false);
879 /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
880 /// ```
881 #[cfg(not(feature = "ferrocene_certified"))]
882 #[inline]
883 #[stable(feature = "rust1", since = "1.0.0")]
884 #[deprecated(
885 since = "1.50.0",
886 note = "Use `compare_exchange` or `compare_exchange_weak` instead"
887 )]
888 #[cfg(target_has_atomic = "8")]
889 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
890 pub fn compare_and_swap(&self, current: bool, new: bool, order: Ordering) -> bool {
891 match self.compare_exchange(current, new, order, strongest_failure_ordering(order)) {
892 Ok(x) => x,
893 Err(x) => x,
894 }
895 }
896
897 /// Stores a value into the [`bool`] if the current value is the same as the `current` value.
898 ///
899 /// The return value is a result indicating whether the new value was written and containing
900 /// the previous value. On success this value is guaranteed to be equal to `current`.
901 ///
902 /// `compare_exchange` takes two [`Ordering`] arguments to describe the memory
903 /// ordering of this operation. `success` describes the required ordering for the
904 /// read-modify-write operation that takes place if the comparison with `current` succeeds.
905 /// `failure` describes the required ordering for the load operation that takes place when
906 /// the comparison fails. Using [`Acquire`] as success ordering makes the store part
907 /// of this operation [`Relaxed`], and using [`Release`] makes the successful load
908 /// [`Relaxed`]. The failure ordering can only be [`SeqCst`], [`Acquire`] or [`Relaxed`].
909 ///
910 /// **Note:** This method is only available on platforms that support atomic
911 /// operations on `u8`.
912 ///
913 /// # Examples
914 ///
915 /// ```
916 /// use std::sync::atomic::{AtomicBool, Ordering};
917 ///
918 /// let some_bool = AtomicBool::new(true);
919 ///
920 /// assert_eq!(some_bool.compare_exchange(true,
921 /// false,
922 /// Ordering::Acquire,
923 /// Ordering::Relaxed),
924 /// Ok(true));
925 /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
926 ///
927 /// assert_eq!(some_bool.compare_exchange(true, true,
928 /// Ordering::SeqCst,
929 /// Ordering::Acquire),
930 /// Err(false));
931 /// assert_eq!(some_bool.load(Ordering::Relaxed), false);
932 /// ```
933 ///
934 /// # Considerations
935 ///
936 /// `compare_exchange` is a [compare-and-swap operation] and thus exhibits the usual downsides
937 /// of CAS operations. In particular, a load of the value followed by a successful
938 /// `compare_exchange` with the previous load *does not ensure* that other threads have not
939 /// changed the value in the interim. This is usually important when the *equality* check in
940 /// the `compare_exchange` is being used to check the *identity* of a value, but equality
941 /// does not necessarily imply identity. In this case, `compare_exchange` can lead to the
942 /// [ABA problem].
943 ///
944 /// [ABA Problem]: https://en.wikipedia.org/wiki/ABA_problem
945 /// [compare-and-swap operation]: https://en.wikipedia.org/wiki/Compare-and-swap
946 #[inline]
947 #[stable(feature = "extended_compare_and_swap", since = "1.10.0")]
948 #[doc(alias = "compare_and_swap")]
949 #[cfg(target_has_atomic = "8")]
950 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
951 pub fn compare_exchange(
952 &self,
953 current: bool,
954 new: bool,
955 success: Ordering,
956 failure: Ordering,
957 ) -> Result<bool, bool> {
958 if EMULATE_ATOMIC_BOOL {
959 // Pick the strongest ordering from success and failure.
960 let order = match (success, failure) {
961 (SeqCst, _) => SeqCst,
962 (_, SeqCst) => SeqCst,
963 (AcqRel, _) => AcqRel,
964 (_, AcqRel) => {
965 panic!("there is no such thing as an acquire-release failure ordering")
966 }
967 (Release, Acquire) => AcqRel,
968 (Acquire, _) => Acquire,
969 (_, Acquire) => Acquire,
970 (Release, Relaxed) => Release,
971 (_, Release) => panic!("there is no such thing as a release failure ordering"),
972 (Relaxed, Relaxed) => Relaxed,
973 };
974 let old = if current == new {
975 // This is a no-op, but we still need to perform the operation
976 // for memory ordering reasons.
977 self.fetch_or(false, order)
978 } else {
979 // This sets the value to the new one and returns the old one.
980 self.swap(new, order)
981 };
982 if old == current { Ok(old) } else { Err(old) }
983 } else {
984 // SAFETY: data races are prevented by atomic intrinsics.
985 match unsafe {
986 atomic_compare_exchange(self.v.get(), current as u8, new as u8, success, failure)
987 } {
988 Ok(x) => Ok(x != 0),
989 Err(x) => Err(x != 0),
990 }
991 }
992 }
993
994 /// Stores a value into the [`bool`] if the current value is the same as the `current` value.
995 ///
996 /// Unlike [`AtomicBool::compare_exchange`], this function is allowed to spuriously fail even when the
997 /// comparison succeeds, which can result in more efficient code on some platforms. The
998 /// return value is a result indicating whether the new value was written and containing the
999 /// previous value.
1000 ///
1001 /// `compare_exchange_weak` takes two [`Ordering`] arguments to describe the memory
1002 /// ordering of this operation. `success` describes the required ordering for the
1003 /// read-modify-write operation that takes place if the comparison with `current` succeeds.
1004 /// `failure` describes the required ordering for the load operation that takes place when
1005 /// the comparison fails. Using [`Acquire`] as success ordering makes the store part
1006 /// of this operation [`Relaxed`], and using [`Release`] makes the successful load
1007 /// [`Relaxed`]. The failure ordering can only be [`SeqCst`], [`Acquire`] or [`Relaxed`].
1008 ///
1009 /// **Note:** This method is only available on platforms that support atomic
1010 /// operations on `u8`.
1011 ///
1012 /// # Examples
1013 ///
1014 /// ```
1015 /// use std::sync::atomic::{AtomicBool, Ordering};
1016 ///
1017 /// let val = AtomicBool::new(false);
1018 ///
1019 /// let new = true;
1020 /// let mut old = val.load(Ordering::Relaxed);
1021 /// loop {
1022 /// match val.compare_exchange_weak(old, new, Ordering::SeqCst, Ordering::Relaxed) {
1023 /// Ok(_) => break,
1024 /// Err(x) => old = x,
1025 /// }
1026 /// }
1027 /// ```
1028 ///
1029 /// # Considerations
1030 ///
1031 /// `compare_exchange` is a [compare-and-swap operation] and thus exhibits the usual downsides
1032 /// of CAS operations. In particular, a load of the value followed by a successful
1033 /// `compare_exchange` with the previous load *does not ensure* that other threads have not
1034 /// changed the value in the interim. This is usually important when the *equality* check in
1035 /// the `compare_exchange` is being used to check the *identity* of a value, but equality
1036 /// does not necessarily imply identity. In this case, `compare_exchange` can lead to the
1037 /// [ABA problem].
1038 ///
1039 /// [ABA Problem]: https://en.wikipedia.org/wiki/ABA_problem
1040 /// [compare-and-swap operation]: https://en.wikipedia.org/wiki/Compare-and-swap
1041 #[inline]
1042 #[stable(feature = "extended_compare_and_swap", since = "1.10.0")]
1043 #[doc(alias = "compare_and_swap")]
1044 #[cfg(target_has_atomic = "8")]
1045 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
1046 pub fn compare_exchange_weak(
1047 &self,
1048 current: bool,
1049 new: bool,
1050 success: Ordering,
1051 failure: Ordering,
1052 ) -> Result<bool, bool> {
1053 if EMULATE_ATOMIC_BOOL {
1054 return self.compare_exchange(current, new, success, failure);
1055 }
1056
1057 // SAFETY: data races are prevented by atomic intrinsics.
1058 match unsafe {
1059 atomic_compare_exchange_weak(self.v.get(), current as u8, new as u8, success, failure)
1060 } {
1061 Ok(x) => Ok(x != 0),
1062 Err(x) => Err(x != 0),
1063 }
1064 }
1065
1066 /// Logical "and" with a boolean value.
1067 ///
1068 /// Performs a logical "and" operation on the current value and the argument `val`, and sets
1069 /// the new value to the result.
1070 ///
1071 /// Returns the previous value.
1072 ///
1073 /// `fetch_and` takes an [`Ordering`] argument which describes the memory ordering
1074 /// of this operation. All ordering modes are possible. Note that using
1075 /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
1076 /// using [`Release`] makes the load part [`Relaxed`].
1077 ///
1078 /// **Note:** This method is only available on platforms that support atomic
1079 /// operations on `u8`.
1080 ///
1081 /// # Examples
1082 ///
1083 /// ```
1084 /// use std::sync::atomic::{AtomicBool, Ordering};
1085 ///
1086 /// let foo = AtomicBool::new(true);
1087 /// assert_eq!(foo.fetch_and(false, Ordering::SeqCst), true);
1088 /// assert_eq!(foo.load(Ordering::SeqCst), false);
1089 ///
1090 /// let foo = AtomicBool::new(true);
1091 /// assert_eq!(foo.fetch_and(true, Ordering::SeqCst), true);
1092 /// assert_eq!(foo.load(Ordering::SeqCst), true);
1093 ///
1094 /// let foo = AtomicBool::new(false);
1095 /// assert_eq!(foo.fetch_and(false, Ordering::SeqCst), false);
1096 /// assert_eq!(foo.load(Ordering::SeqCst), false);
1097 /// ```
1098 #[inline]
1099 #[stable(feature = "rust1", since = "1.0.0")]
1100 #[cfg(target_has_atomic = "8")]
1101 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
1102 pub fn fetch_and(&self, val: bool, order: Ordering) -> bool {
1103 // SAFETY: data races are prevented by atomic intrinsics.
1104 unsafe { atomic_and(self.v.get(), val as u8, order) != 0 }
1105 }
1106
1107 /// Logical "nand" with a boolean value.
1108 ///
1109 /// Performs a logical "nand" operation on the current value and the argument `val`, and sets
1110 /// the new value to the result.
1111 ///
1112 /// Returns the previous value.
1113 ///
1114 /// `fetch_nand` takes an [`Ordering`] argument which describes the memory ordering
1115 /// of this operation. All ordering modes are possible. Note that using
1116 /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
1117 /// using [`Release`] makes the load part [`Relaxed`].
1118 ///
1119 /// **Note:** This method is only available on platforms that support atomic
1120 /// operations on `u8`.
1121 ///
1122 /// # Examples
1123 ///
1124 /// ```
1125 /// use std::sync::atomic::{AtomicBool, Ordering};
1126 ///
1127 /// let foo = AtomicBool::new(true);
1128 /// assert_eq!(foo.fetch_nand(false, Ordering::SeqCst), true);
1129 /// assert_eq!(foo.load(Ordering::SeqCst), true);
1130 ///
1131 /// let foo = AtomicBool::new(true);
1132 /// assert_eq!(foo.fetch_nand(true, Ordering::SeqCst), true);
1133 /// assert_eq!(foo.load(Ordering::SeqCst) as usize, 0);
1134 /// assert_eq!(foo.load(Ordering::SeqCst), false);
1135 ///
1136 /// let foo = AtomicBool::new(false);
1137 /// assert_eq!(foo.fetch_nand(false, Ordering::SeqCst), false);
1138 /// assert_eq!(foo.load(Ordering::SeqCst), true);
1139 /// ```
1140 #[inline]
1141 #[stable(feature = "rust1", since = "1.0.0")]
1142 #[cfg(target_has_atomic = "8")]
1143 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
1144 pub fn fetch_nand(&self, val: bool, order: Ordering) -> bool {
1145 // We can't use atomic_nand here because it can result in a bool with
1146 // an invalid value. This happens because the atomic operation is done
1147 // with an 8-bit integer internally, which would set the upper 7 bits.
1148 // So we just use fetch_xor or swap instead.
1149 if val {
1150 // !(x & true) == !x
1151 // We must invert the bool.
1152 self.fetch_xor(true, order)
1153 } else {
1154 // !(x & false) == true
1155 // We must set the bool to true.
1156 self.swap(true, order)
1157 }
1158 }
1159
1160 /// Logical "or" with a boolean value.
1161 ///
1162 /// Performs a logical "or" operation on the current value and the argument `val`, and sets the
1163 /// new value to the result.
1164 ///
1165 /// Returns the previous value.
1166 ///
1167 /// `fetch_or` takes an [`Ordering`] argument which describes the memory ordering
1168 /// of this operation. All ordering modes are possible. Note that using
1169 /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
1170 /// using [`Release`] makes the load part [`Relaxed`].
1171 ///
1172 /// **Note:** This method is only available on platforms that support atomic
1173 /// operations on `u8`.
1174 ///
1175 /// # Examples
1176 ///
1177 /// ```
1178 /// use std::sync::atomic::{AtomicBool, Ordering};
1179 ///
1180 /// let foo = AtomicBool::new(true);
1181 /// assert_eq!(foo.fetch_or(false, Ordering::SeqCst), true);
1182 /// assert_eq!(foo.load(Ordering::SeqCst), true);
1183 ///
1184 /// let foo = AtomicBool::new(true);
1185 /// assert_eq!(foo.fetch_or(true, Ordering::SeqCst), true);
1186 /// assert_eq!(foo.load(Ordering::SeqCst), true);
1187 ///
1188 /// let foo = AtomicBool::new(false);
1189 /// assert_eq!(foo.fetch_or(false, Ordering::SeqCst), false);
1190 /// assert_eq!(foo.load(Ordering::SeqCst), false);
1191 /// ```
1192 #[inline]
1193 #[stable(feature = "rust1", since = "1.0.0")]
1194 #[cfg(target_has_atomic = "8")]
1195 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
1196 pub fn fetch_or(&self, val: bool, order: Ordering) -> bool {
1197 // SAFETY: data races are prevented by atomic intrinsics.
1198 unsafe { atomic_or(self.v.get(), val as u8, order) != 0 }
1199 }
1200
1201 /// Logical "xor" with a boolean value.
1202 ///
1203 /// Performs a logical "xor" operation on the current value and the argument `val`, and sets
1204 /// the new value to the result.
1205 ///
1206 /// Returns the previous value.
1207 ///
1208 /// `fetch_xor` takes an [`Ordering`] argument which describes the memory ordering
1209 /// of this operation. All ordering modes are possible. Note that using
1210 /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
1211 /// using [`Release`] makes the load part [`Relaxed`].
1212 ///
1213 /// **Note:** This method is only available on platforms that support atomic
1214 /// operations on `u8`.
1215 ///
1216 /// # Examples
1217 ///
1218 /// ```
1219 /// use std::sync::atomic::{AtomicBool, Ordering};
1220 ///
1221 /// let foo = AtomicBool::new(true);
1222 /// assert_eq!(foo.fetch_xor(false, Ordering::SeqCst), true);
1223 /// assert_eq!(foo.load(Ordering::SeqCst), true);
1224 ///
1225 /// let foo = AtomicBool::new(true);
1226 /// assert_eq!(foo.fetch_xor(true, Ordering::SeqCst), true);
1227 /// assert_eq!(foo.load(Ordering::SeqCst), false);
1228 ///
1229 /// let foo = AtomicBool::new(false);
1230 /// assert_eq!(foo.fetch_xor(false, Ordering::SeqCst), false);
1231 /// assert_eq!(foo.load(Ordering::SeqCst), false);
1232 /// ```
1233 #[inline]
1234 #[stable(feature = "rust1", since = "1.0.0")]
1235 #[cfg(target_has_atomic = "8")]
1236 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
1237 pub fn fetch_xor(&self, val: bool, order: Ordering) -> bool {
1238 // SAFETY: data races are prevented by atomic intrinsics.
1239 unsafe { atomic_xor(self.v.get(), val as u8, order) != 0 }
1240 }
1241
1242 /// Logical "not" with a boolean value.
1243 ///
1244 /// Performs a logical "not" operation on the current value, and sets
1245 /// the new value to the result.
1246 ///
1247 /// Returns the previous value.
1248 ///
1249 /// `fetch_not` takes an [`Ordering`] argument which describes the memory ordering
1250 /// of this operation. All ordering modes are possible. Note that using
1251 /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
1252 /// using [`Release`] makes the load part [`Relaxed`].
1253 ///
1254 /// **Note:** This method is only available on platforms that support atomic
1255 /// operations on `u8`.
1256 ///
1257 /// # Examples
1258 ///
1259 /// ```
1260 /// use std::sync::atomic::{AtomicBool, Ordering};
1261 ///
1262 /// let foo = AtomicBool::new(true);
1263 /// assert_eq!(foo.fetch_not(Ordering::SeqCst), true);
1264 /// assert_eq!(foo.load(Ordering::SeqCst), false);
1265 ///
1266 /// let foo = AtomicBool::new(false);
1267 /// assert_eq!(foo.fetch_not(Ordering::SeqCst), false);
1268 /// assert_eq!(foo.load(Ordering::SeqCst), true);
1269 /// ```
1270 #[inline]
1271 #[stable(feature = "atomic_bool_fetch_not", since = "1.81.0")]
1272 #[cfg(target_has_atomic = "8")]
1273 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
1274 pub fn fetch_not(&self, order: Ordering) -> bool {
1275 self.fetch_xor(true, order)
1276 }
1277
1278 /// Returns a mutable pointer to the underlying [`bool`].
1279 ///
1280 /// Doing non-atomic reads and writes on the resulting boolean can be a data race.
1281 /// This method is mostly useful for FFI, where the function signature may use
1282 /// `*mut bool` instead of `&AtomicBool`.
1283 ///
1284 /// Returning an `*mut` pointer from a shared reference to this atomic is safe because the
1285 /// atomic types work with interior mutability. All modifications of an atomic change the value
1286 /// through a shared reference, and can do so safely as long as they use atomic operations. Any
1287 /// use of the returned raw pointer requires an `unsafe` block and still has to uphold the
1288 /// requirements of the [memory model].
1289 ///
1290 /// # Examples
1291 ///
1292 /// ```ignore (extern-declaration)
1293 /// # fn main() {
1294 /// use std::sync::atomic::AtomicBool;
1295 ///
1296 /// extern "C" {
1297 /// fn my_atomic_op(arg: *mut bool);
1298 /// }
1299 ///
1300 /// let mut atomic = AtomicBool::new(true);
1301 /// unsafe {
1302 /// my_atomic_op(atomic.as_ptr());
1303 /// }
1304 /// # }
1305 /// ```
1306 ///
1307 /// [memory model]: self#memory-model-for-atomic-accesses
1308 #[inline]
1309 #[stable(feature = "atomic_as_ptr", since = "1.70.0")]
1310 #[rustc_const_stable(feature = "atomic_as_ptr", since = "1.70.0")]
1311 #[rustc_never_returns_null_ptr]
1312 pub const fn as_ptr(&self) -> *mut bool {
1313 self.v.get().cast()
1314 }
1315
1316 /// Fetches the value, and applies a function to it that returns an optional
1317 /// new value. Returns a `Result` of `Ok(previous_value)` if the function
1318 /// returned `Some(_)`, else `Err(previous_value)`.
1319 ///
1320 /// Note: This may call the function multiple times if the value has been
1321 /// changed from other threads in the meantime, as long as the function
1322 /// returns `Some(_)`, but the function will have been applied only once to
1323 /// the stored value.
1324 ///
1325 /// `fetch_update` takes two [`Ordering`] arguments to describe the memory
1326 /// ordering of this operation. The first describes the required ordering for
1327 /// when the operation finally succeeds while the second describes the
1328 /// required ordering for loads. These correspond to the success and failure
1329 /// orderings of [`AtomicBool::compare_exchange`] respectively.
1330 ///
1331 /// Using [`Acquire`] as success ordering makes the store part of this
1332 /// operation [`Relaxed`], and using [`Release`] makes the final successful
1333 /// load [`Relaxed`]. The (failed) load ordering can only be [`SeqCst`],
1334 /// [`Acquire`] or [`Relaxed`].
1335 ///
1336 /// **Note:** This method is only available on platforms that support atomic
1337 /// operations on `u8`.
1338 ///
1339 /// # Considerations
1340 ///
1341 /// This method is not magic; it is not provided by the hardware, and does not act like a
1342 /// critical section or mutex.
1343 ///
1344 /// It is implemented on top of an atomic [compare-and-swap operation], and thus is subject to
1345 /// the usual drawbacks of CAS operations. In particular, be careful of the [ABA problem].
1346 ///
1347 /// [ABA Problem]: https://en.wikipedia.org/wiki/ABA_problem
1348 /// [compare-and-swap operation]: https://en.wikipedia.org/wiki/Compare-and-swap
1349 ///
1350 /// # Examples
1351 ///
1352 /// ```rust
1353 /// use std::sync::atomic::{AtomicBool, Ordering};
1354 ///
1355 /// let x = AtomicBool::new(false);
1356 /// assert_eq!(x.fetch_update(Ordering::SeqCst, Ordering::SeqCst, |_| None), Err(false));
1357 /// assert_eq!(x.fetch_update(Ordering::SeqCst, Ordering::SeqCst, |x| Some(!x)), Ok(false));
1358 /// assert_eq!(x.fetch_update(Ordering::SeqCst, Ordering::SeqCst, |x| Some(!x)), Ok(true));
1359 /// assert_eq!(x.load(Ordering::SeqCst), false);
1360 /// ```
1361 #[inline]
1362 #[stable(feature = "atomic_fetch_update", since = "1.53.0")]
1363 #[cfg(target_has_atomic = "8")]
1364 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
1365 pub fn fetch_update<F>(
1366 &self,
1367 set_order: Ordering,
1368 fetch_order: Ordering,
1369 mut f: F,
1370 ) -> Result<bool, bool>
1371 where
1372 F: FnMut(bool) -> Option<bool>,
1373 {
1374 let mut prev = self.load(fetch_order);
1375 while let Some(next) = f(prev) {
1376 match self.compare_exchange_weak(prev, next, set_order, fetch_order) {
1377 x @ Ok(_) => return x,
1378 Err(next_prev) => prev = next_prev,
1379 }
1380 }
1381 Err(prev)
1382 }
1383
1384 /// Fetches the value, and applies a function to it that returns an optional
1385 /// new value. Returns a `Result` of `Ok(previous_value)` if the function
1386 /// returned `Some(_)`, else `Err(previous_value)`.
1387 ///
1388 /// See also: [`update`](`AtomicBool::update`).
1389 ///
1390 /// Note: This may call the function multiple times if the value has been
1391 /// changed from other threads in the meantime, as long as the function
1392 /// returns `Some(_)`, but the function will have been applied only once to
1393 /// the stored value.
1394 ///
1395 /// `try_update` takes two [`Ordering`] arguments to describe the memory
1396 /// ordering of this operation. The first describes the required ordering for
1397 /// when the operation finally succeeds while the second describes the
1398 /// required ordering for loads. These correspond to the success and failure
1399 /// orderings of [`AtomicBool::compare_exchange`] respectively.
1400 ///
1401 /// Using [`Acquire`] as success ordering makes the store part of this
1402 /// operation [`Relaxed`], and using [`Release`] makes the final successful
1403 /// load [`Relaxed`]. The (failed) load ordering can only be [`SeqCst`],
1404 /// [`Acquire`] or [`Relaxed`].
1405 ///
1406 /// **Note:** This method is only available on platforms that support atomic
1407 /// operations on `u8`.
1408 ///
1409 /// # Considerations
1410 ///
1411 /// This method is not magic; it is not provided by the hardware, and does not act like a
1412 /// critical section or mutex.
1413 ///
1414 /// It is implemented on top of an atomic [compare-and-swap operation], and thus is subject to
1415 /// the usual drawbacks of CAS operations. In particular, be careful of the [ABA problem].
1416 ///
1417 /// [ABA Problem]: https://en.wikipedia.org/wiki/ABA_problem
1418 /// [compare-and-swap operation]: https://en.wikipedia.org/wiki/Compare-and-swap
1419 ///
1420 /// # Examples
1421 ///
1422 /// ```rust
1423 /// #![feature(atomic_try_update)]
1424 /// use std::sync::atomic::{AtomicBool, Ordering};
1425 ///
1426 /// let x = AtomicBool::new(false);
1427 /// assert_eq!(x.try_update(Ordering::SeqCst, Ordering::SeqCst, |_| None), Err(false));
1428 /// assert_eq!(x.try_update(Ordering::SeqCst, Ordering::SeqCst, |x| Some(!x)), Ok(false));
1429 /// assert_eq!(x.try_update(Ordering::SeqCst, Ordering::SeqCst, |x| Some(!x)), Ok(true));
1430 /// assert_eq!(x.load(Ordering::SeqCst), false);
1431 /// ```
1432 #[inline]
1433 #[unstable(feature = "atomic_try_update", issue = "135894")]
1434 #[cfg(target_has_atomic = "8")]
1435 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
1436 pub fn try_update(
1437 &self,
1438 set_order: Ordering,
1439 fetch_order: Ordering,
1440 f: impl FnMut(bool) -> Option<bool>,
1441 ) -> Result<bool, bool> {
1442 // FIXME(atomic_try_update): this is currently an unstable alias to `fetch_update`;
1443 // when stabilizing, turn `fetch_update` into a deprecated alias to `try_update`.
1444 self.fetch_update(set_order, fetch_order, f)
1445 }
1446
1447 /// Fetches the value, applies a function to it that it return a new value.
1448 /// The new value is stored and the old value is returned.
1449 ///
1450 /// See also: [`try_update`](`AtomicBool::try_update`).
1451 ///
1452 /// Note: This may call the function multiple times if the value has been changed from other threads in
1453 /// the meantime, but the function will have been applied only once to the stored value.
1454 ///
1455 /// `update` takes two [`Ordering`] arguments to describe the memory
1456 /// ordering of this operation. The first describes the required ordering for
1457 /// when the operation finally succeeds while the second describes the
1458 /// required ordering for loads. These correspond to the success and failure
1459 /// orderings of [`AtomicBool::compare_exchange`] respectively.
1460 ///
1461 /// Using [`Acquire`] as success ordering makes the store part
1462 /// of this operation [`Relaxed`], and using [`Release`] makes the final successful load
1463 /// [`Relaxed`]. The (failed) load ordering can only be [`SeqCst`], [`Acquire`] or [`Relaxed`].
1464 ///
1465 /// **Note:** This method is only available on platforms that support atomic operations on `u8`.
1466 ///
1467 /// # Considerations
1468 ///
1469 /// This method is not magic; it is not provided by the hardware, and does not act like a
1470 /// critical section or mutex.
1471 ///
1472 /// It is implemented on top of an atomic [compare-and-swap operation], and thus is subject to
1473 /// the usual drawbacks of CAS operations. In particular, be careful of the [ABA problem].
1474 ///
1475 /// [ABA Problem]: https://en.wikipedia.org/wiki/ABA_problem
1476 /// [compare-and-swap operation]: https://en.wikipedia.org/wiki/Compare-and-swap
1477 ///
1478 /// # Examples
1479 ///
1480 /// ```rust
1481 /// #![feature(atomic_try_update)]
1482 ///
1483 /// use std::sync::atomic::{AtomicBool, Ordering};
1484 ///
1485 /// let x = AtomicBool::new(false);
1486 /// assert_eq!(x.update(Ordering::SeqCst, Ordering::SeqCst, |x| !x), false);
1487 /// assert_eq!(x.update(Ordering::SeqCst, Ordering::SeqCst, |x| !x), true);
1488 /// assert_eq!(x.load(Ordering::SeqCst), false);
1489 /// ```
1490 #[inline]
1491 #[unstable(feature = "atomic_try_update", issue = "135894")]
1492 #[cfg(target_has_atomic = "8")]
1493 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
1494 pub fn update(
1495 &self,
1496 set_order: Ordering,
1497 fetch_order: Ordering,
1498 mut f: impl FnMut(bool) -> bool,
1499 ) -> bool {
1500 let mut prev = self.load(fetch_order);
1501 loop {
1502 match self.compare_exchange_weak(prev, f(prev), set_order, fetch_order) {
1503 Ok(x) => break x,
1504 Err(next_prev) => prev = next_prev,
1505 }
1506 }
1507 }
1508}
1509
1510#[cfg(target_has_atomic_load_store = "ptr")]
1511#[cfg(not(feature = "ferrocene_certified"))]
1512impl<T> AtomicPtr<T> {
1513 /// Creates a new `AtomicPtr`.
1514 ///
1515 /// # Examples
1516 ///
1517 /// ```
1518 /// use std::sync::atomic::AtomicPtr;
1519 ///
1520 /// let ptr = &mut 5;
1521 /// let atomic_ptr = AtomicPtr::new(ptr);
1522 /// ```
1523 #[inline]
1524 #[stable(feature = "rust1", since = "1.0.0")]
1525 #[rustc_const_stable(feature = "const_atomic_new", since = "1.24.0")]
1526 pub const fn new(p: *mut T) -> AtomicPtr<T> {
1527 AtomicPtr { p: UnsafeCell::new(p) }
1528 }
1529
1530 /// Creates a new `AtomicPtr` from a pointer.
1531 ///
1532 /// # Examples
1533 ///
1534 /// ```
1535 /// use std::sync::atomic::{self, AtomicPtr};
1536 ///
1537 /// // Get a pointer to an allocated value
1538 /// let ptr: *mut *mut u8 = Box::into_raw(Box::new(std::ptr::null_mut()));
1539 ///
1540 /// assert!(ptr.cast::<AtomicPtr<u8>>().is_aligned());
1541 ///
1542 /// {
1543 /// // Create an atomic view of the allocated value
1544 /// let atomic = unsafe { AtomicPtr::from_ptr(ptr) };
1545 ///
1546 /// // Use `atomic` for atomic operations, possibly share it with other threads
1547 /// atomic.store(std::ptr::NonNull::dangling().as_ptr(), atomic::Ordering::Relaxed);
1548 /// }
1549 ///
1550 /// // It's ok to non-atomically access the value behind `ptr`,
1551 /// // since the reference to the atomic ended its lifetime in the block above
1552 /// assert!(!unsafe { *ptr }.is_null());
1553 ///
1554 /// // Deallocate the value
1555 /// unsafe { drop(Box::from_raw(ptr)) }
1556 /// ```
1557 ///
1558 /// # Safety
1559 ///
1560 /// * `ptr` must be aligned to `align_of::<AtomicPtr<T>>()` (note that on some platforms this
1561 /// can be bigger than `align_of::<*mut T>()`).
1562 /// * `ptr` must be [valid] for both reads and writes for the whole lifetime `'a`.
1563 /// * You must adhere to the [Memory model for atomic accesses]. In particular, it is not
1564 /// allowed to mix conflicting atomic and non-atomic accesses, or atomic accesses of different
1565 /// sizes, without synchronization.
1566 ///
1567 /// [valid]: crate::ptr#safety
1568 /// [Memory model for atomic accesses]: self#memory-model-for-atomic-accesses
1569 #[inline]
1570 #[stable(feature = "atomic_from_ptr", since = "1.75.0")]
1571 #[rustc_const_stable(feature = "const_atomic_from_ptr", since = "1.84.0")]
1572 pub const unsafe fn from_ptr<'a>(ptr: *mut *mut T) -> &'a AtomicPtr<T> {
1573 // SAFETY: guaranteed by the caller
1574 unsafe { &*ptr.cast() }
1575 }
1576
1577 /// Returns a mutable reference to the underlying pointer.
1578 ///
1579 /// This is safe because the mutable reference guarantees that no other threads are
1580 /// concurrently accessing the atomic data.
1581 ///
1582 /// # Examples
1583 ///
1584 /// ```
1585 /// use std::sync::atomic::{AtomicPtr, Ordering};
1586 ///
1587 /// let mut data = 10;
1588 /// let mut atomic_ptr = AtomicPtr::new(&mut data);
1589 /// let mut other_data = 5;
1590 /// *atomic_ptr.get_mut() = &mut other_data;
1591 /// assert_eq!(unsafe { *atomic_ptr.load(Ordering::SeqCst) }, 5);
1592 /// ```
1593 #[inline]
1594 #[stable(feature = "atomic_access", since = "1.15.0")]
1595 pub fn get_mut(&mut self) -> &mut *mut T {
1596 self.p.get_mut()
1597 }
1598
1599 /// Gets atomic access to a pointer.
1600 ///
1601 /// # Examples
1602 ///
1603 /// ```
1604 /// #![feature(atomic_from_mut)]
1605 /// use std::sync::atomic::{AtomicPtr, Ordering};
1606 ///
1607 /// let mut data = 123;
1608 /// let mut some_ptr = &mut data as *mut i32;
1609 /// let a = AtomicPtr::from_mut(&mut some_ptr);
1610 /// let mut other_data = 456;
1611 /// a.store(&mut other_data, Ordering::Relaxed);
1612 /// assert_eq!(unsafe { *some_ptr }, 456);
1613 /// ```
1614 #[inline]
1615 #[cfg(target_has_atomic_equal_alignment = "ptr")]
1616 #[unstable(feature = "atomic_from_mut", issue = "76314")]
1617 pub fn from_mut(v: &mut *mut T) -> &mut Self {
1618 let [] = [(); align_of::<AtomicPtr<()>>() - align_of::<*mut ()>()];
1619 // SAFETY:
1620 // - the mutable reference guarantees unique ownership.
1621 // - the alignment of `*mut T` and `Self` is the same on all platforms
1622 // supported by rust, as verified above.
1623 unsafe { &mut *(v as *mut *mut T as *mut Self) }
1624 }
1625
1626 /// Gets non-atomic access to a `&mut [AtomicPtr]` slice.
1627 ///
1628 /// This is safe because the mutable reference guarantees that no other threads are
1629 /// concurrently accessing the atomic data.
1630 ///
1631 /// # Examples
1632 ///
1633 /// ```ignore-wasm
1634 /// #![feature(atomic_from_mut)]
1635 /// use std::ptr::null_mut;
1636 /// use std::sync::atomic::{AtomicPtr, Ordering};
1637 ///
1638 /// let mut some_ptrs = [const { AtomicPtr::new(null_mut::<String>()) }; 10];
1639 ///
1640 /// let view: &mut [*mut String] = AtomicPtr::get_mut_slice(&mut some_ptrs);
1641 /// assert_eq!(view, [null_mut::<String>(); 10]);
1642 /// view
1643 /// .iter_mut()
1644 /// .enumerate()
1645 /// .for_each(|(i, ptr)| *ptr = Box::into_raw(Box::new(format!("iteration#{i}"))));
1646 ///
1647 /// std::thread::scope(|s| {
1648 /// for ptr in &some_ptrs {
1649 /// s.spawn(move || {
1650 /// let ptr = ptr.load(Ordering::Relaxed);
1651 /// assert!(!ptr.is_null());
1652 ///
1653 /// let name = unsafe { Box::from_raw(ptr) };
1654 /// println!("Hello, {name}!");
1655 /// });
1656 /// }
1657 /// });
1658 /// ```
1659 #[inline]
1660 #[unstable(feature = "atomic_from_mut", issue = "76314")]
1661 pub fn get_mut_slice(this: &mut [Self]) -> &mut [*mut T] {
1662 // SAFETY: the mutable reference guarantees unique ownership.
1663 unsafe { &mut *(this as *mut [Self] as *mut [*mut T]) }
1664 }
1665
1666 /// Gets atomic access to a slice of pointers.
1667 ///
1668 /// # Examples
1669 ///
1670 /// ```ignore-wasm
1671 /// #![feature(atomic_from_mut)]
1672 /// use std::ptr::null_mut;
1673 /// use std::sync::atomic::{AtomicPtr, Ordering};
1674 ///
1675 /// let mut some_ptrs = [null_mut::<String>(); 10];
1676 /// let a = &*AtomicPtr::from_mut_slice(&mut some_ptrs);
1677 /// std::thread::scope(|s| {
1678 /// for i in 0..a.len() {
1679 /// s.spawn(move || {
1680 /// let name = Box::new(format!("thread{i}"));
1681 /// a[i].store(Box::into_raw(name), Ordering::Relaxed);
1682 /// });
1683 /// }
1684 /// });
1685 /// for p in some_ptrs {
1686 /// assert!(!p.is_null());
1687 /// let name = unsafe { Box::from_raw(p) };
1688 /// println!("Hello, {name}!");
1689 /// }
1690 /// ```
1691 #[inline]
1692 #[cfg(target_has_atomic_equal_alignment = "ptr")]
1693 #[unstable(feature = "atomic_from_mut", issue = "76314")]
1694 pub fn from_mut_slice(v: &mut [*mut T]) -> &mut [Self] {
1695 // SAFETY:
1696 // - the mutable reference guarantees unique ownership.
1697 // - the alignment of `*mut T` and `Self` is the same on all platforms
1698 // supported by rust, as verified above.
1699 unsafe { &mut *(v as *mut [*mut T] as *mut [Self]) }
1700 }
1701
1702 /// Consumes the atomic and returns the contained value.
1703 ///
1704 /// This is safe because passing `self` by value guarantees that no other threads are
1705 /// concurrently accessing the atomic data.
1706 ///
1707 /// # Examples
1708 ///
1709 /// ```
1710 /// use std::sync::atomic::AtomicPtr;
1711 ///
1712 /// let mut data = 5;
1713 /// let atomic_ptr = AtomicPtr::new(&mut data);
1714 /// assert_eq!(unsafe { *atomic_ptr.into_inner() }, 5);
1715 /// ```
1716 #[inline]
1717 #[stable(feature = "atomic_access", since = "1.15.0")]
1718 #[rustc_const_stable(feature = "const_atomic_into_inner", since = "1.79.0")]
1719 pub const fn into_inner(self) -> *mut T {
1720 self.p.into_inner()
1721 }
1722
1723 /// Loads a value from the pointer.
1724 ///
1725 /// `load` takes an [`Ordering`] argument which describes the memory ordering
1726 /// of this operation. Possible values are [`SeqCst`], [`Acquire`] and [`Relaxed`].
1727 ///
1728 /// # Panics
1729 ///
1730 /// Panics if `order` is [`Release`] or [`AcqRel`].
1731 ///
1732 /// # Examples
1733 ///
1734 /// ```
1735 /// use std::sync::atomic::{AtomicPtr, Ordering};
1736 ///
1737 /// let ptr = &mut 5;
1738 /// let some_ptr = AtomicPtr::new(ptr);
1739 ///
1740 /// let value = some_ptr.load(Ordering::Relaxed);
1741 /// ```
1742 #[inline]
1743 #[stable(feature = "rust1", since = "1.0.0")]
1744 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
1745 pub fn load(&self, order: Ordering) -> *mut T {
1746 // SAFETY: data races are prevented by atomic intrinsics.
1747 unsafe { atomic_load(self.p.get(), order) }
1748 }
1749
1750 /// Stores a value into the pointer.
1751 ///
1752 /// `store` takes an [`Ordering`] argument which describes the memory ordering
1753 /// of this operation. Possible values are [`SeqCst`], [`Release`] and [`Relaxed`].
1754 ///
1755 /// # Panics
1756 ///
1757 /// Panics if `order` is [`Acquire`] or [`AcqRel`].
1758 ///
1759 /// # Examples
1760 ///
1761 /// ```
1762 /// use std::sync::atomic::{AtomicPtr, Ordering};
1763 ///
1764 /// let ptr = &mut 5;
1765 /// let some_ptr = AtomicPtr::new(ptr);
1766 ///
1767 /// let other_ptr = &mut 10;
1768 ///
1769 /// some_ptr.store(other_ptr, Ordering::Relaxed);
1770 /// ```
1771 #[inline]
1772 #[stable(feature = "rust1", since = "1.0.0")]
1773 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
1774 pub fn store(&self, ptr: *mut T, order: Ordering) {
1775 // SAFETY: data races are prevented by atomic intrinsics.
1776 unsafe {
1777 atomic_store(self.p.get(), ptr, order);
1778 }
1779 }
1780
1781 /// Stores a value into the pointer, returning the previous value.
1782 ///
1783 /// `swap` takes an [`Ordering`] argument which describes the memory ordering
1784 /// of this operation. All ordering modes are possible. Note that using
1785 /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
1786 /// using [`Release`] makes the load part [`Relaxed`].
1787 ///
1788 /// **Note:** This method is only available on platforms that support atomic
1789 /// operations on pointers.
1790 ///
1791 /// # Examples
1792 ///
1793 /// ```
1794 /// use std::sync::atomic::{AtomicPtr, Ordering};
1795 ///
1796 /// let ptr = &mut 5;
1797 /// let some_ptr = AtomicPtr::new(ptr);
1798 ///
1799 /// let other_ptr = &mut 10;
1800 ///
1801 /// let value = some_ptr.swap(other_ptr, Ordering::Relaxed);
1802 /// ```
1803 #[inline]
1804 #[stable(feature = "rust1", since = "1.0.0")]
1805 #[cfg(target_has_atomic = "ptr")]
1806 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
1807 pub fn swap(&self, ptr: *mut T, order: Ordering) -> *mut T {
1808 // SAFETY: data races are prevented by atomic intrinsics.
1809 unsafe { atomic_swap(self.p.get(), ptr, order) }
1810 }
1811
1812 /// Stores a value into the pointer if the current value is the same as the `current` value.
1813 ///
1814 /// The return value is always the previous value. If it is equal to `current`, then the value
1815 /// was updated.
1816 ///
1817 /// `compare_and_swap` also takes an [`Ordering`] argument which describes the memory
1818 /// ordering of this operation. Notice that even when using [`AcqRel`], the operation
1819 /// might fail and hence just perform an `Acquire` load, but not have `Release` semantics.
1820 /// Using [`Acquire`] makes the store part of this operation [`Relaxed`] if it
1821 /// happens, and using [`Release`] makes the load part [`Relaxed`].
1822 ///
1823 /// **Note:** This method is only available on platforms that support atomic
1824 /// operations on pointers.
1825 ///
1826 /// # Migrating to `compare_exchange` and `compare_exchange_weak`
1827 ///
1828 /// `compare_and_swap` is equivalent to `compare_exchange` with the following mapping for
1829 /// memory orderings:
1830 ///
1831 /// Original | Success | Failure
1832 /// -------- | ------- | -------
1833 /// Relaxed | Relaxed | Relaxed
1834 /// Acquire | Acquire | Acquire
1835 /// Release | Release | Relaxed
1836 /// AcqRel | AcqRel | Acquire
1837 /// SeqCst | SeqCst | SeqCst
1838 ///
1839 /// `compare_and_swap` and `compare_exchange` also differ in their return type. You can use
1840 /// `compare_exchange(...).unwrap_or_else(|x| x)` to recover the behavior of `compare_and_swap`,
1841 /// but in most cases it is more idiomatic to check whether the return value is `Ok` or `Err`
1842 /// rather than to infer success vs failure based on the value that was read.
1843 ///
1844 /// During migration, consider whether it makes sense to use `compare_exchange_weak` instead.
1845 /// `compare_exchange_weak` is allowed to fail spuriously even when the comparison succeeds,
1846 /// which allows the compiler to generate better assembly code when the compare and swap
1847 /// is used in a loop.
1848 ///
1849 /// # Examples
1850 ///
1851 /// ```
1852 /// use std::sync::atomic::{AtomicPtr, Ordering};
1853 ///
1854 /// let ptr = &mut 5;
1855 /// let some_ptr = AtomicPtr::new(ptr);
1856 ///
1857 /// let other_ptr = &mut 10;
1858 ///
1859 /// let value = some_ptr.compare_and_swap(ptr, other_ptr, Ordering::Relaxed);
1860 /// ```
1861 #[inline]
1862 #[stable(feature = "rust1", since = "1.0.0")]
1863 #[deprecated(
1864 since = "1.50.0",
1865 note = "Use `compare_exchange` or `compare_exchange_weak` instead"
1866 )]
1867 #[cfg(target_has_atomic = "ptr")]
1868 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
1869 pub fn compare_and_swap(&self, current: *mut T, new: *mut T, order: Ordering) -> *mut T {
1870 match self.compare_exchange(current, new, order, strongest_failure_ordering(order)) {
1871 Ok(x) => x,
1872 Err(x) => x,
1873 }
1874 }
1875
1876 /// Stores a value into the pointer if the current value is the same as the `current` value.
1877 ///
1878 /// The return value is a result indicating whether the new value was written and containing
1879 /// the previous value. On success this value is guaranteed to be equal to `current`.
1880 ///
1881 /// `compare_exchange` takes two [`Ordering`] arguments to describe the memory
1882 /// ordering of this operation. `success` describes the required ordering for the
1883 /// read-modify-write operation that takes place if the comparison with `current` succeeds.
1884 /// `failure` describes the required ordering for the load operation that takes place when
1885 /// the comparison fails. Using [`Acquire`] as success ordering makes the store part
1886 /// of this operation [`Relaxed`], and using [`Release`] makes the successful load
1887 /// [`Relaxed`]. The failure ordering can only be [`SeqCst`], [`Acquire`] or [`Relaxed`].
1888 ///
1889 /// **Note:** This method is only available on platforms that support atomic
1890 /// operations on pointers.
1891 ///
1892 /// # Examples
1893 ///
1894 /// ```
1895 /// use std::sync::atomic::{AtomicPtr, Ordering};
1896 ///
1897 /// let ptr = &mut 5;
1898 /// let some_ptr = AtomicPtr::new(ptr);
1899 ///
1900 /// let other_ptr = &mut 10;
1901 ///
1902 /// let value = some_ptr.compare_exchange(ptr, other_ptr,
1903 /// Ordering::SeqCst, Ordering::Relaxed);
1904 /// ```
1905 ///
1906 /// # Considerations
1907 ///
1908 /// `compare_exchange` is a [compare-and-swap operation] and thus exhibits the usual downsides
1909 /// of CAS operations. In particular, a load of the value followed by a successful
1910 /// `compare_exchange` with the previous load *does not ensure* that other threads have not
1911 /// changed the value in the interim. This is usually important when the *equality* check in
1912 /// the `compare_exchange` is being used to check the *identity* of a value, but equality
1913 /// does not necessarily imply identity. This is a particularly common case for pointers, as
1914 /// a pointer holding the same address does not imply that the same object exists at that
1915 /// address! In this case, `compare_exchange` can lead to the [ABA problem].
1916 ///
1917 /// [ABA Problem]: https://en.wikipedia.org/wiki/ABA_problem
1918 /// [compare-and-swap operation]: https://en.wikipedia.org/wiki/Compare-and-swap
1919 #[inline]
1920 #[stable(feature = "extended_compare_and_swap", since = "1.10.0")]
1921 #[cfg(target_has_atomic = "ptr")]
1922 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
1923 pub fn compare_exchange(
1924 &self,
1925 current: *mut T,
1926 new: *mut T,
1927 success: Ordering,
1928 failure: Ordering,
1929 ) -> Result<*mut T, *mut T> {
1930 // SAFETY: data races are prevented by atomic intrinsics.
1931 unsafe { atomic_compare_exchange(self.p.get(), current, new, success, failure) }
1932 }
1933
1934 /// Stores a value into the pointer if the current value is the same as the `current` value.
1935 ///
1936 /// Unlike [`AtomicPtr::compare_exchange`], this function is allowed to spuriously fail even when the
1937 /// comparison succeeds, which can result in more efficient code on some platforms. The
1938 /// return value is a result indicating whether the new value was written and containing the
1939 /// previous value.
1940 ///
1941 /// `compare_exchange_weak` takes two [`Ordering`] arguments to describe the memory
1942 /// ordering of this operation. `success` describes the required ordering for the
1943 /// read-modify-write operation that takes place if the comparison with `current` succeeds.
1944 /// `failure` describes the required ordering for the load operation that takes place when
1945 /// the comparison fails. Using [`Acquire`] as success ordering makes the store part
1946 /// of this operation [`Relaxed`], and using [`Release`] makes the successful load
1947 /// [`Relaxed`]. The failure ordering can only be [`SeqCst`], [`Acquire`] or [`Relaxed`].
1948 ///
1949 /// **Note:** This method is only available on platforms that support atomic
1950 /// operations on pointers.
1951 ///
1952 /// # Examples
1953 ///
1954 /// ```
1955 /// use std::sync::atomic::{AtomicPtr, Ordering};
1956 ///
1957 /// let some_ptr = AtomicPtr::new(&mut 5);
1958 ///
1959 /// let new = &mut 10;
1960 /// let mut old = some_ptr.load(Ordering::Relaxed);
1961 /// loop {
1962 /// match some_ptr.compare_exchange_weak(old, new, Ordering::SeqCst, Ordering::Relaxed) {
1963 /// Ok(_) => break,
1964 /// Err(x) => old = x,
1965 /// }
1966 /// }
1967 /// ```
1968 ///
1969 /// # Considerations
1970 ///
1971 /// `compare_exchange` is a [compare-and-swap operation] and thus exhibits the usual downsides
1972 /// of CAS operations. In particular, a load of the value followed by a successful
1973 /// `compare_exchange` with the previous load *does not ensure* that other threads have not
1974 /// changed the value in the interim. This is usually important when the *equality* check in
1975 /// the `compare_exchange` is being used to check the *identity* of a value, but equality
1976 /// does not necessarily imply identity. This is a particularly common case for pointers, as
1977 /// a pointer holding the same address does not imply that the same object exists at that
1978 /// address! In this case, `compare_exchange` can lead to the [ABA problem].
1979 ///
1980 /// [ABA Problem]: https://en.wikipedia.org/wiki/ABA_problem
1981 /// [compare-and-swap operation]: https://en.wikipedia.org/wiki/Compare-and-swap
1982 #[inline]
1983 #[stable(feature = "extended_compare_and_swap", since = "1.10.0")]
1984 #[cfg(target_has_atomic = "ptr")]
1985 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
1986 pub fn compare_exchange_weak(
1987 &self,
1988 current: *mut T,
1989 new: *mut T,
1990 success: Ordering,
1991 failure: Ordering,
1992 ) -> Result<*mut T, *mut T> {
1993 // SAFETY: This intrinsic is unsafe because it operates on a raw pointer
1994 // but we know for sure that the pointer is valid (we just got it from
1995 // an `UnsafeCell` that we have by reference) and the atomic operation
1996 // itself allows us to safely mutate the `UnsafeCell` contents.
1997 unsafe { atomic_compare_exchange_weak(self.p.get(), current, new, success, failure) }
1998 }
1999
2000 /// Fetches the value, and applies a function to it that returns an optional
2001 /// new value. Returns a `Result` of `Ok(previous_value)` if the function
2002 /// returned `Some(_)`, else `Err(previous_value)`.
2003 ///
2004 /// Note: This may call the function multiple times if the value has been
2005 /// changed from other threads in the meantime, as long as the function
2006 /// returns `Some(_)`, but the function will have been applied only once to
2007 /// the stored value.
2008 ///
2009 /// `fetch_update` takes two [`Ordering`] arguments to describe the memory
2010 /// ordering of this operation. The first describes the required ordering for
2011 /// when the operation finally succeeds while the second describes the
2012 /// required ordering for loads. These correspond to the success and failure
2013 /// orderings of [`AtomicPtr::compare_exchange`] respectively.
2014 ///
2015 /// Using [`Acquire`] as success ordering makes the store part of this
2016 /// operation [`Relaxed`], and using [`Release`] makes the final successful
2017 /// load [`Relaxed`]. The (failed) load ordering can only be [`SeqCst`],
2018 /// [`Acquire`] or [`Relaxed`].
2019 ///
2020 /// **Note:** This method is only available on platforms that support atomic
2021 /// operations on pointers.
2022 ///
2023 /// # Considerations
2024 ///
2025 /// This method is not magic; it is not provided by the hardware, and does not act like a
2026 /// critical section or mutex.
2027 ///
2028 /// It is implemented on top of an atomic [compare-and-swap operation], and thus is subject to
2029 /// the usual drawbacks of CAS operations. In particular, be careful of the [ABA problem],
2030 /// which is a particularly common pitfall for pointers!
2031 ///
2032 /// [ABA Problem]: https://en.wikipedia.org/wiki/ABA_problem
2033 /// [compare-and-swap operation]: https://en.wikipedia.org/wiki/Compare-and-swap
2034 ///
2035 /// # Examples
2036 ///
2037 /// ```rust
2038 /// use std::sync::atomic::{AtomicPtr, Ordering};
2039 ///
2040 /// let ptr: *mut _ = &mut 5;
2041 /// let some_ptr = AtomicPtr::new(ptr);
2042 ///
2043 /// let new: *mut _ = &mut 10;
2044 /// assert_eq!(some_ptr.fetch_update(Ordering::SeqCst, Ordering::SeqCst, |_| None), Err(ptr));
2045 /// let result = some_ptr.fetch_update(Ordering::SeqCst, Ordering::SeqCst, |x| {
2046 /// if x == ptr {
2047 /// Some(new)
2048 /// } else {
2049 /// None
2050 /// }
2051 /// });
2052 /// assert_eq!(result, Ok(ptr));
2053 /// assert_eq!(some_ptr.load(Ordering::SeqCst), new);
2054 /// ```
2055 #[inline]
2056 #[stable(feature = "atomic_fetch_update", since = "1.53.0")]
2057 #[cfg(target_has_atomic = "ptr")]
2058 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
2059 pub fn fetch_update<F>(
2060 &self,
2061 set_order: Ordering,
2062 fetch_order: Ordering,
2063 mut f: F,
2064 ) -> Result<*mut T, *mut T>
2065 where
2066 F: FnMut(*mut T) -> Option<*mut T>,
2067 {
2068 let mut prev = self.load(fetch_order);
2069 while let Some(next) = f(prev) {
2070 match self.compare_exchange_weak(prev, next, set_order, fetch_order) {
2071 x @ Ok(_) => return x,
2072 Err(next_prev) => prev = next_prev,
2073 }
2074 }
2075 Err(prev)
2076 }
2077 /// Fetches the value, and applies a function to it that returns an optional
2078 /// new value. Returns a `Result` of `Ok(previous_value)` if the function
2079 /// returned `Some(_)`, else `Err(previous_value)`.
2080 ///
2081 /// See also: [`update`](`AtomicPtr::update`).
2082 ///
2083 /// Note: This may call the function multiple times if the value has been
2084 /// changed from other threads in the meantime, as long as the function
2085 /// returns `Some(_)`, but the function will have been applied only once to
2086 /// the stored value.
2087 ///
2088 /// `try_update` takes two [`Ordering`] arguments to describe the memory
2089 /// ordering of this operation. The first describes the required ordering for
2090 /// when the operation finally succeeds while the second describes the
2091 /// required ordering for loads. These correspond to the success and failure
2092 /// orderings of [`AtomicPtr::compare_exchange`] respectively.
2093 ///
2094 /// Using [`Acquire`] as success ordering makes the store part of this
2095 /// operation [`Relaxed`], and using [`Release`] makes the final successful
2096 /// load [`Relaxed`]. The (failed) load ordering can only be [`SeqCst`],
2097 /// [`Acquire`] or [`Relaxed`].
2098 ///
2099 /// **Note:** This method is only available on platforms that support atomic
2100 /// operations on pointers.
2101 ///
2102 /// # Considerations
2103 ///
2104 /// This method is not magic; it is not provided by the hardware, and does not act like a
2105 /// critical section or mutex.
2106 ///
2107 /// It is implemented on top of an atomic [compare-and-swap operation], and thus is subject to
2108 /// the usual drawbacks of CAS operations. In particular, be careful of the [ABA problem],
2109 /// which is a particularly common pitfall for pointers!
2110 ///
2111 /// [ABA Problem]: https://en.wikipedia.org/wiki/ABA_problem
2112 /// [compare-and-swap operation]: https://en.wikipedia.org/wiki/Compare-and-swap
2113 ///
2114 /// # Examples
2115 ///
2116 /// ```rust
2117 /// #![feature(atomic_try_update)]
2118 /// use std::sync::atomic::{AtomicPtr, Ordering};
2119 ///
2120 /// let ptr: *mut _ = &mut 5;
2121 /// let some_ptr = AtomicPtr::new(ptr);
2122 ///
2123 /// let new: *mut _ = &mut 10;
2124 /// assert_eq!(some_ptr.try_update(Ordering::SeqCst, Ordering::SeqCst, |_| None), Err(ptr));
2125 /// let result = some_ptr.try_update(Ordering::SeqCst, Ordering::SeqCst, |x| {
2126 /// if x == ptr {
2127 /// Some(new)
2128 /// } else {
2129 /// None
2130 /// }
2131 /// });
2132 /// assert_eq!(result, Ok(ptr));
2133 /// assert_eq!(some_ptr.load(Ordering::SeqCst), new);
2134 /// ```
2135 #[inline]
2136 #[unstable(feature = "atomic_try_update", issue = "135894")]
2137 #[cfg(target_has_atomic = "ptr")]
2138 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
2139 pub fn try_update(
2140 &self,
2141 set_order: Ordering,
2142 fetch_order: Ordering,
2143 f: impl FnMut(*mut T) -> Option<*mut T>,
2144 ) -> Result<*mut T, *mut T> {
2145 // FIXME(atomic_try_update): this is currently an unstable alias to `fetch_update`;
2146 // when stabilizing, turn `fetch_update` into a deprecated alias to `try_update`.
2147 self.fetch_update(set_order, fetch_order, f)
2148 }
2149
2150 /// Fetches the value, applies a function to it that it return a new value.
2151 /// The new value is stored and the old value is returned.
2152 ///
2153 /// See also: [`try_update`](`AtomicPtr::try_update`).
2154 ///
2155 /// Note: This may call the function multiple times if the value has been changed from other threads in
2156 /// the meantime, but the function will have been applied only once to the stored value.
2157 ///
2158 /// `update` takes two [`Ordering`] arguments to describe the memory
2159 /// ordering of this operation. The first describes the required ordering for
2160 /// when the operation finally succeeds while the second describes the
2161 /// required ordering for loads. These correspond to the success and failure
2162 /// orderings of [`AtomicPtr::compare_exchange`] respectively.
2163 ///
2164 /// Using [`Acquire`] as success ordering makes the store part
2165 /// of this operation [`Relaxed`], and using [`Release`] makes the final successful load
2166 /// [`Relaxed`]. The (failed) load ordering can only be [`SeqCst`], [`Acquire`] or [`Relaxed`].
2167 ///
2168 /// **Note:** This method is only available on platforms that support atomic
2169 /// operations on pointers.
2170 ///
2171 /// # Considerations
2172 ///
2173 /// This method is not magic; it is not provided by the hardware, and does not act like a
2174 /// critical section or mutex.
2175 ///
2176 /// It is implemented on top of an atomic [compare-and-swap operation], and thus is subject to
2177 /// the usual drawbacks of CAS operations. In particular, be careful of the [ABA problem],
2178 /// which is a particularly common pitfall for pointers!
2179 ///
2180 /// [ABA Problem]: https://en.wikipedia.org/wiki/ABA_problem
2181 /// [compare-and-swap operation]: https://en.wikipedia.org/wiki/Compare-and-swap
2182 ///
2183 /// # Examples
2184 ///
2185 /// ```rust
2186 /// #![feature(atomic_try_update)]
2187 ///
2188 /// use std::sync::atomic::{AtomicPtr, Ordering};
2189 ///
2190 /// let ptr: *mut _ = &mut 5;
2191 /// let some_ptr = AtomicPtr::new(ptr);
2192 ///
2193 /// let new: *mut _ = &mut 10;
2194 /// let result = some_ptr.update(Ordering::SeqCst, Ordering::SeqCst, |_| new);
2195 /// assert_eq!(result, ptr);
2196 /// assert_eq!(some_ptr.load(Ordering::SeqCst), new);
2197 /// ```
2198 #[inline]
2199 #[unstable(feature = "atomic_try_update", issue = "135894")]
2200 #[cfg(target_has_atomic = "8")]
2201 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
2202 pub fn update(
2203 &self,
2204 set_order: Ordering,
2205 fetch_order: Ordering,
2206 mut f: impl FnMut(*mut T) -> *mut T,
2207 ) -> *mut T {
2208 let mut prev = self.load(fetch_order);
2209 loop {
2210 match self.compare_exchange_weak(prev, f(prev), set_order, fetch_order) {
2211 Ok(x) => break x,
2212 Err(next_prev) => prev = next_prev,
2213 }
2214 }
2215 }
2216
2217 /// Offsets the pointer's address by adding `val` (in units of `T`),
2218 /// returning the previous pointer.
2219 ///
2220 /// This is equivalent to using [`wrapping_add`] to atomically perform the
2221 /// equivalent of `ptr = ptr.wrapping_add(val);`.
2222 ///
2223 /// This method operates in units of `T`, which means that it cannot be used
2224 /// to offset the pointer by an amount which is not a multiple of
2225 /// `size_of::<T>()`. This can sometimes be inconvenient, as you may want to
2226 /// work with a deliberately misaligned pointer. In such cases, you may use
2227 /// the [`fetch_byte_add`](Self::fetch_byte_add) method instead.
2228 ///
2229 /// `fetch_ptr_add` takes an [`Ordering`] argument which describes the
2230 /// memory ordering of this operation. All ordering modes are possible. Note
2231 /// that using [`Acquire`] makes the store part of this operation
2232 /// [`Relaxed`], and using [`Release`] makes the load part [`Relaxed`].
2233 ///
2234 /// **Note**: This method is only available on platforms that support atomic
2235 /// operations on [`AtomicPtr`].
2236 ///
2237 /// [`wrapping_add`]: pointer::wrapping_add
2238 ///
2239 /// # Examples
2240 ///
2241 /// ```
2242 /// use core::sync::atomic::{AtomicPtr, Ordering};
2243 ///
2244 /// let atom = AtomicPtr::<i64>::new(core::ptr::null_mut());
2245 /// assert_eq!(atom.fetch_ptr_add(1, Ordering::Relaxed).addr(), 0);
2246 /// // Note: units of `size_of::<i64>()`.
2247 /// assert_eq!(atom.load(Ordering::Relaxed).addr(), 8);
2248 /// ```
2249 #[inline]
2250 #[cfg(target_has_atomic = "ptr")]
2251 #[stable(feature = "strict_provenance_atomic_ptr", since = "1.91.0")]
2252 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
2253 pub fn fetch_ptr_add(&self, val: usize, order: Ordering) -> *mut T {
2254 self.fetch_byte_add(val.wrapping_mul(size_of::<T>()), order)
2255 }
2256
2257 /// Offsets the pointer's address by subtracting `val` (in units of `T`),
2258 /// returning the previous pointer.
2259 ///
2260 /// This is equivalent to using [`wrapping_sub`] to atomically perform the
2261 /// equivalent of `ptr = ptr.wrapping_sub(val);`.
2262 ///
2263 /// This method operates in units of `T`, which means that it cannot be used
2264 /// to offset the pointer by an amount which is not a multiple of
2265 /// `size_of::<T>()`. This can sometimes be inconvenient, as you may want to
2266 /// work with a deliberately misaligned pointer. In such cases, you may use
2267 /// the [`fetch_byte_sub`](Self::fetch_byte_sub) method instead.
2268 ///
2269 /// `fetch_ptr_sub` takes an [`Ordering`] argument which describes the memory
2270 /// ordering of this operation. All ordering modes are possible. Note that
2271 /// using [`Acquire`] makes the store part of this operation [`Relaxed`],
2272 /// and using [`Release`] makes the load part [`Relaxed`].
2273 ///
2274 /// **Note**: This method is only available on platforms that support atomic
2275 /// operations on [`AtomicPtr`].
2276 ///
2277 /// [`wrapping_sub`]: pointer::wrapping_sub
2278 ///
2279 /// # Examples
2280 ///
2281 /// ```
2282 /// use core::sync::atomic::{AtomicPtr, Ordering};
2283 ///
2284 /// let array = [1i32, 2i32];
2285 /// let atom = AtomicPtr::new(array.as_ptr().wrapping_add(1) as *mut _);
2286 ///
2287 /// assert!(core::ptr::eq(
2288 /// atom.fetch_ptr_sub(1, Ordering::Relaxed),
2289 /// &array[1],
2290 /// ));
2291 /// assert!(core::ptr::eq(atom.load(Ordering::Relaxed), &array[0]));
2292 /// ```
2293 #[inline]
2294 #[cfg(target_has_atomic = "ptr")]
2295 #[stable(feature = "strict_provenance_atomic_ptr", since = "1.91.0")]
2296 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
2297 pub fn fetch_ptr_sub(&self, val: usize, order: Ordering) -> *mut T {
2298 self.fetch_byte_sub(val.wrapping_mul(size_of::<T>()), order)
2299 }
2300
2301 /// Offsets the pointer's address by adding `val` *bytes*, returning the
2302 /// previous pointer.
2303 ///
2304 /// This is equivalent to using [`wrapping_byte_add`] to atomically
2305 /// perform `ptr = ptr.wrapping_byte_add(val)`.
2306 ///
2307 /// `fetch_byte_add` takes an [`Ordering`] argument which describes the
2308 /// memory ordering of this operation. All ordering modes are possible. Note
2309 /// that using [`Acquire`] makes the store part of this operation
2310 /// [`Relaxed`], and using [`Release`] makes the load part [`Relaxed`].
2311 ///
2312 /// **Note**: This method is only available on platforms that support atomic
2313 /// operations on [`AtomicPtr`].
2314 ///
2315 /// [`wrapping_byte_add`]: pointer::wrapping_byte_add
2316 ///
2317 /// # Examples
2318 ///
2319 /// ```
2320 /// use core::sync::atomic::{AtomicPtr, Ordering};
2321 ///
2322 /// let atom = AtomicPtr::<i64>::new(core::ptr::null_mut());
2323 /// assert_eq!(atom.fetch_byte_add(1, Ordering::Relaxed).addr(), 0);
2324 /// // Note: in units of bytes, not `size_of::<i64>()`.
2325 /// assert_eq!(atom.load(Ordering::Relaxed).addr(), 1);
2326 /// ```
2327 #[inline]
2328 #[cfg(target_has_atomic = "ptr")]
2329 #[stable(feature = "strict_provenance_atomic_ptr", since = "1.91.0")]
2330 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
2331 pub fn fetch_byte_add(&self, val: usize, order: Ordering) -> *mut T {
2332 // SAFETY: data races are prevented by atomic intrinsics.
2333 unsafe { atomic_add(self.p.get(), val, order).cast() }
2334 }
2335
2336 /// Offsets the pointer's address by subtracting `val` *bytes*, returning the
2337 /// previous pointer.
2338 ///
2339 /// This is equivalent to using [`wrapping_byte_sub`] to atomically
2340 /// perform `ptr = ptr.wrapping_byte_sub(val)`.
2341 ///
2342 /// `fetch_byte_sub` takes an [`Ordering`] argument which describes the
2343 /// memory ordering of this operation. All ordering modes are possible. Note
2344 /// that using [`Acquire`] makes the store part of this operation
2345 /// [`Relaxed`], and using [`Release`] makes the load part [`Relaxed`].
2346 ///
2347 /// **Note**: This method is only available on platforms that support atomic
2348 /// operations on [`AtomicPtr`].
2349 ///
2350 /// [`wrapping_byte_sub`]: pointer::wrapping_byte_sub
2351 ///
2352 /// # Examples
2353 ///
2354 /// ```
2355 /// use core::sync::atomic::{AtomicPtr, Ordering};
2356 ///
2357 /// let mut arr = [0i64, 1];
2358 /// let atom = AtomicPtr::<i64>::new(&raw mut arr[1]);
2359 /// assert_eq!(atom.fetch_byte_sub(8, Ordering::Relaxed).addr(), (&raw const arr[1]).addr());
2360 /// assert_eq!(atom.load(Ordering::Relaxed).addr(), (&raw const arr[0]).addr());
2361 /// ```
2362 #[inline]
2363 #[cfg(target_has_atomic = "ptr")]
2364 #[stable(feature = "strict_provenance_atomic_ptr", since = "1.91.0")]
2365 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
2366 pub fn fetch_byte_sub(&self, val: usize, order: Ordering) -> *mut T {
2367 // SAFETY: data races are prevented by atomic intrinsics.
2368 unsafe { atomic_sub(self.p.get(), val, order).cast() }
2369 }
2370
2371 /// Performs a bitwise "or" operation on the address of the current pointer,
2372 /// and the argument `val`, and stores a pointer with provenance of the
2373 /// current pointer and the resulting address.
2374 ///
2375 /// This is equivalent to using [`map_addr`] to atomically perform
2376 /// `ptr = ptr.map_addr(|a| a | val)`. This can be used in tagged
2377 /// pointer schemes to atomically set tag bits.
2378 ///
2379 /// **Caveat**: This operation returns the previous value. To compute the
2380 /// stored value without losing provenance, you may use [`map_addr`]. For
2381 /// example: `a.fetch_or(val).map_addr(|a| a | val)`.
2382 ///
2383 /// `fetch_or` takes an [`Ordering`] argument which describes the memory
2384 /// ordering of this operation. All ordering modes are possible. Note that
2385 /// using [`Acquire`] makes the store part of this operation [`Relaxed`],
2386 /// and using [`Release`] makes the load part [`Relaxed`].
2387 ///
2388 /// **Note**: This method is only available on platforms that support atomic
2389 /// operations on [`AtomicPtr`].
2390 ///
2391 /// This API and its claimed semantics are part of the Strict Provenance
2392 /// experiment, see the [module documentation for `ptr`][crate::ptr] for
2393 /// details.
2394 ///
2395 /// [`map_addr`]: pointer::map_addr
2396 ///
2397 /// # Examples
2398 ///
2399 /// ```
2400 /// use core::sync::atomic::{AtomicPtr, Ordering};
2401 ///
2402 /// let pointer = &mut 3i64 as *mut i64;
2403 ///
2404 /// let atom = AtomicPtr::<i64>::new(pointer);
2405 /// // Tag the bottom bit of the pointer.
2406 /// assert_eq!(atom.fetch_or(1, Ordering::Relaxed).addr() & 1, 0);
2407 /// // Extract and untag.
2408 /// let tagged = atom.load(Ordering::Relaxed);
2409 /// assert_eq!(tagged.addr() & 1, 1);
2410 /// assert_eq!(tagged.map_addr(|p| p & !1), pointer);
2411 /// ```
2412 #[inline]
2413 #[cfg(target_has_atomic = "ptr")]
2414 #[stable(feature = "strict_provenance_atomic_ptr", since = "1.91.0")]
2415 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
2416 pub fn fetch_or(&self, val: usize, order: Ordering) -> *mut T {
2417 // SAFETY: data races are prevented by atomic intrinsics.
2418 unsafe { atomic_or(self.p.get(), val, order).cast() }
2419 }
2420
2421 /// Performs a bitwise "and" operation on the address of the current
2422 /// pointer, and the argument `val`, and stores a pointer with provenance of
2423 /// the current pointer and the resulting address.
2424 ///
2425 /// This is equivalent to using [`map_addr`] to atomically perform
2426 /// `ptr = ptr.map_addr(|a| a & val)`. This can be used in tagged
2427 /// pointer schemes to atomically unset tag bits.
2428 ///
2429 /// **Caveat**: This operation returns the previous value. To compute the
2430 /// stored value without losing provenance, you may use [`map_addr`]. For
2431 /// example: `a.fetch_and(val).map_addr(|a| a & val)`.
2432 ///
2433 /// `fetch_and` takes an [`Ordering`] argument which describes the memory
2434 /// ordering of this operation. All ordering modes are possible. Note that
2435 /// using [`Acquire`] makes the store part of this operation [`Relaxed`],
2436 /// and using [`Release`] makes the load part [`Relaxed`].
2437 ///
2438 /// **Note**: This method is only available on platforms that support atomic
2439 /// operations on [`AtomicPtr`].
2440 ///
2441 /// This API and its claimed semantics are part of the Strict Provenance
2442 /// experiment, see the [module documentation for `ptr`][crate::ptr] for
2443 /// details.
2444 ///
2445 /// [`map_addr`]: pointer::map_addr
2446 ///
2447 /// # Examples
2448 ///
2449 /// ```
2450 /// use core::sync::atomic::{AtomicPtr, Ordering};
2451 ///
2452 /// let pointer = &mut 3i64 as *mut i64;
2453 /// // A tagged pointer
2454 /// let atom = AtomicPtr::<i64>::new(pointer.map_addr(|a| a | 1));
2455 /// assert_eq!(atom.fetch_or(1, Ordering::Relaxed).addr() & 1, 1);
2456 /// // Untag, and extract the previously tagged pointer.
2457 /// let untagged = atom.fetch_and(!1, Ordering::Relaxed)
2458 /// .map_addr(|a| a & !1);
2459 /// assert_eq!(untagged, pointer);
2460 /// ```
2461 #[inline]
2462 #[cfg(target_has_atomic = "ptr")]
2463 #[stable(feature = "strict_provenance_atomic_ptr", since = "1.91.0")]
2464 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
2465 pub fn fetch_and(&self, val: usize, order: Ordering) -> *mut T {
2466 // SAFETY: data races are prevented by atomic intrinsics.
2467 unsafe { atomic_and(self.p.get(), val, order).cast() }
2468 }
2469
2470 /// Performs a bitwise "xor" operation on the address of the current
2471 /// pointer, and the argument `val`, and stores a pointer with provenance of
2472 /// the current pointer and the resulting address.
2473 ///
2474 /// This is equivalent to using [`map_addr`] to atomically perform
2475 /// `ptr = ptr.map_addr(|a| a ^ val)`. This can be used in tagged
2476 /// pointer schemes to atomically toggle tag bits.
2477 ///
2478 /// **Caveat**: This operation returns the previous value. To compute the
2479 /// stored value without losing provenance, you may use [`map_addr`]. For
2480 /// example: `a.fetch_xor(val).map_addr(|a| a ^ val)`.
2481 ///
2482 /// `fetch_xor` takes an [`Ordering`] argument which describes the memory
2483 /// ordering of this operation. All ordering modes are possible. Note that
2484 /// using [`Acquire`] makes the store part of this operation [`Relaxed`],
2485 /// and using [`Release`] makes the load part [`Relaxed`].
2486 ///
2487 /// **Note**: This method is only available on platforms that support atomic
2488 /// operations on [`AtomicPtr`].
2489 ///
2490 /// This API and its claimed semantics are part of the Strict Provenance
2491 /// experiment, see the [module documentation for `ptr`][crate::ptr] for
2492 /// details.
2493 ///
2494 /// [`map_addr`]: pointer::map_addr
2495 ///
2496 /// # Examples
2497 ///
2498 /// ```
2499 /// use core::sync::atomic::{AtomicPtr, Ordering};
2500 ///
2501 /// let pointer = &mut 3i64 as *mut i64;
2502 /// let atom = AtomicPtr::<i64>::new(pointer);
2503 ///
2504 /// // Toggle a tag bit on the pointer.
2505 /// atom.fetch_xor(1, Ordering::Relaxed);
2506 /// assert_eq!(atom.load(Ordering::Relaxed).addr() & 1, 1);
2507 /// ```
2508 #[inline]
2509 #[cfg(target_has_atomic = "ptr")]
2510 #[stable(feature = "strict_provenance_atomic_ptr", since = "1.91.0")]
2511 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
2512 pub fn fetch_xor(&self, val: usize, order: Ordering) -> *mut T {
2513 // SAFETY: data races are prevented by atomic intrinsics.
2514 unsafe { atomic_xor(self.p.get(), val, order).cast() }
2515 }
2516
2517 /// Returns a mutable pointer to the underlying pointer.
2518 ///
2519 /// Doing non-atomic reads and writes on the resulting pointer can be a data race.
2520 /// This method is mostly useful for FFI, where the function signature may use
2521 /// `*mut *mut T` instead of `&AtomicPtr<T>`.
2522 ///
2523 /// Returning an `*mut` pointer from a shared reference to this atomic is safe because the
2524 /// atomic types work with interior mutability. All modifications of an atomic change the value
2525 /// through a shared reference, and can do so safely as long as they use atomic operations. Any
2526 /// use of the returned raw pointer requires an `unsafe` block and still has to uphold the
2527 /// requirements of the [memory model].
2528 ///
2529 /// # Examples
2530 ///
2531 /// ```ignore (extern-declaration)
2532 /// use std::sync::atomic::AtomicPtr;
2533 ///
2534 /// extern "C" {
2535 /// fn my_atomic_op(arg: *mut *mut u32);
2536 /// }
2537 ///
2538 /// let mut value = 17;
2539 /// let atomic = AtomicPtr::new(&mut value);
2540 ///
2541 /// // SAFETY: Safe as long as `my_atomic_op` is atomic.
2542 /// unsafe {
2543 /// my_atomic_op(atomic.as_ptr());
2544 /// }
2545 /// ```
2546 ///
2547 /// [memory model]: self#memory-model-for-atomic-accesses
2548 #[inline]
2549 #[stable(feature = "atomic_as_ptr", since = "1.70.0")]
2550 #[rustc_const_stable(feature = "atomic_as_ptr", since = "1.70.0")]
2551 #[rustc_never_returns_null_ptr]
2552 pub const fn as_ptr(&self) -> *mut *mut T {
2553 self.p.get()
2554 }
2555}
2556
2557#[cfg(target_has_atomic_load_store = "8")]
2558#[stable(feature = "atomic_bool_from", since = "1.24.0")]
2559#[rustc_const_unstable(feature = "const_convert", issue = "143773")]
2560#[cfg(not(feature = "ferrocene_certified"))]
2561impl const From<bool> for AtomicBool {
2562 /// Converts a `bool` into an `AtomicBool`.
2563 ///
2564 /// # Examples
2565 ///
2566 /// ```
2567 /// use std::sync::atomic::AtomicBool;
2568 /// let atomic_bool = AtomicBool::from(true);
2569 /// assert_eq!(format!("{atomic_bool:?}"), "true")
2570 /// ```
2571 #[inline]
2572 fn from(b: bool) -> Self {
2573 Self::new(b)
2574 }
2575}
2576
2577#[cfg(target_has_atomic_load_store = "ptr")]
2578#[stable(feature = "atomic_from", since = "1.23.0")]
2579#[rustc_const_unstable(feature = "const_convert", issue = "143773")]
2580#[cfg(not(feature = "ferrocene_certified"))]
2581impl<T> const From<*mut T> for AtomicPtr<T> {
2582 /// Converts a `*mut T` into an `AtomicPtr<T>`.
2583 #[inline]
2584 fn from(p: *mut T) -> Self {
2585 Self::new(p)
2586 }
2587}
2588
2589#[allow(unused_macros)] // This macro ends up being unused on some architectures.
2590macro_rules! if_8_bit {
2591 (u8, $( yes = [$($yes:tt)*], )? $( no = [$($no:tt)*], )? ) => { concat!("", $($($yes)*)?) };
2592 (i8, $( yes = [$($yes:tt)*], )? $( no = [$($no:tt)*], )? ) => { concat!("", $($($yes)*)?) };
2593 ($_:ident, $( yes = [$($yes:tt)*], )? $( no = [$($no:tt)*], )? ) => { concat!("", $($($no)*)?) };
2594}
2595
2596#[cfg(target_has_atomic_load_store)]
2597macro_rules! atomic_int {
2598 ($cfg_cas:meta,
2599 $cfg_align:meta,
2600 $stable:meta,
2601 $stable_cxchg:meta,
2602 $stable_debug:meta,
2603 $stable_access:meta,
2604 $stable_from:meta,
2605 $stable_nand:meta,
2606 $const_stable_new:meta,
2607 $const_stable_into_inner:meta,
2608 $diagnostic_item:meta,
2609 $s_int_type:literal,
2610 $extra_feature:expr,
2611 $min_fn:ident, $max_fn:ident,
2612 $align:expr,
2613 $int_type:ident $atomic_type:ident) => {
2614 /// An integer type which can be safely shared between threads.
2615 ///
2616 /// This type has the same
2617 #[doc = if_8_bit!(
2618 $int_type,
2619 yes = ["size, alignment, and bit validity"],
2620 no = ["size and bit validity"],
2621 )]
2622 /// as the underlying integer type, [`
2623 #[doc = $s_int_type]
2624 /// `].
2625 #[doc = if_8_bit! {
2626 $int_type,
2627 no = [
2628 "However, the alignment of this type is always equal to its ",
2629 "size, even on targets where [`", $s_int_type, "`] has a ",
2630 "lesser alignment."
2631 ],
2632 }]
2633 ///
2634 /// For more about the differences between atomic types and
2635 /// non-atomic types as well as information about the portability of
2636 /// this type, please see the [module-level documentation].
2637 ///
2638 /// **Note:** This type is only available on platforms that support
2639 /// atomic loads and stores of [`
2640 #[doc = $s_int_type]
2641 /// `].
2642 ///
2643 /// [module-level documentation]: crate::sync::atomic
2644 #[$stable]
2645 #[$diagnostic_item]
2646 #[repr(C, align($align))]
2647 pub struct $atomic_type {
2648 v: UnsafeCell<$int_type>,
2649 }
2650
2651 #[$stable]
2652 impl Default for $atomic_type {
2653 #[inline]
2654 fn default() -> Self {
2655 Self::new(Default::default())
2656 }
2657 }
2658
2659 #[$stable_from]
2660 #[rustc_const_unstable(feature = "const_convert", issue = "143773")]
2661 impl const From<$int_type> for $atomic_type {
2662 #[doc = concat!("Converts an `", stringify!($int_type), "` into an `", stringify!($atomic_type), "`.")]
2663 #[inline]
2664 fn from(v: $int_type) -> Self { Self::new(v) }
2665 }
2666
2667 #[$stable_debug]
2668 #[cfg(not(feature = "ferrocene_certified"))]
2669 impl fmt::Debug for $atomic_type {
2670 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
2671 fmt::Debug::fmt(&self.load(Ordering::Relaxed), f)
2672 }
2673 }
2674
2675 // Send is implicitly implemented.
2676 #[$stable]
2677 unsafe impl Sync for $atomic_type {}
2678
2679 impl $atomic_type {
2680 /// Creates a new atomic integer.
2681 ///
2682 /// # Examples
2683 ///
2684 /// ```
2685 #[doc = concat!($extra_feature, "use std::sync::atomic::", stringify!($atomic_type), ";")]
2686 ///
2687 #[doc = concat!("let atomic_forty_two = ", stringify!($atomic_type), "::new(42);")]
2688 /// ```
2689 #[inline]
2690 #[$stable]
2691 #[$const_stable_new]
2692 #[must_use]
2693 pub const fn new(v: $int_type) -> Self {
2694 Self {v: UnsafeCell::new(v)}
2695 }
2696
2697 /// Creates a new reference to an atomic integer from a pointer.
2698 ///
2699 /// # Examples
2700 ///
2701 /// ```
2702 #[doc = concat!($extra_feature, "use std::sync::atomic::{self, ", stringify!($atomic_type), "};")]
2703 ///
2704 /// // Get a pointer to an allocated value
2705 #[doc = concat!("let ptr: *mut ", stringify!($int_type), " = Box::into_raw(Box::new(0));")]
2706 ///
2707 #[doc = concat!("assert!(ptr.cast::<", stringify!($atomic_type), ">().is_aligned());")]
2708 ///
2709 /// {
2710 /// // Create an atomic view of the allocated value
2711 // SAFETY: this is a doc comment, tidy, it can't hurt you (also guaranteed by the construction of `ptr` and the assert above)
2712 #[doc = concat!(" let atomic = unsafe {", stringify!($atomic_type), "::from_ptr(ptr) };")]
2713 ///
2714 /// // Use `atomic` for atomic operations, possibly share it with other threads
2715 /// atomic.store(1, atomic::Ordering::Relaxed);
2716 /// }
2717 ///
2718 /// // It's ok to non-atomically access the value behind `ptr`,
2719 /// // since the reference to the atomic ended its lifetime in the block above
2720 /// assert_eq!(unsafe { *ptr }, 1);
2721 ///
2722 /// // Deallocate the value
2723 /// unsafe { drop(Box::from_raw(ptr)) }
2724 /// ```
2725 ///
2726 /// # Safety
2727 ///
2728 /// * `ptr` must be aligned to
2729 #[doc = concat!(" `align_of::<", stringify!($atomic_type), ">()`")]
2730 #[doc = if_8_bit!{
2731 $int_type,
2732 yes = [
2733 " (note that this is always true, since `align_of::<",
2734 stringify!($atomic_type), ">() == 1`)."
2735 ],
2736 no = [
2737 " (note that on some platforms this can be bigger than `align_of::<",
2738 stringify!($int_type), ">()`)."
2739 ],
2740 }]
2741 /// * `ptr` must be [valid] for both reads and writes for the whole lifetime `'a`.
2742 /// * You must adhere to the [Memory model for atomic accesses]. In particular, it is not
2743 /// allowed to mix conflicting atomic and non-atomic accesses, or atomic accesses of different
2744 /// sizes, without synchronization.
2745 ///
2746 /// [valid]: crate::ptr#safety
2747 /// [Memory model for atomic accesses]: self#memory-model-for-atomic-accesses
2748 #[inline]
2749 #[stable(feature = "atomic_from_ptr", since = "1.75.0")]
2750 #[rustc_const_stable(feature = "const_atomic_from_ptr", since = "1.84.0")]
2751 pub const unsafe fn from_ptr<'a>(ptr: *mut $int_type) -> &'a $atomic_type {
2752 // SAFETY: guaranteed by the caller
2753 unsafe { &*ptr.cast() }
2754 }
2755
2756
2757 /// Returns a mutable reference to the underlying integer.
2758 ///
2759 /// This is safe because the mutable reference guarantees that no other threads are
2760 /// concurrently accessing the atomic data.
2761 ///
2762 /// # Examples
2763 ///
2764 /// ```
2765 #[doc = concat!($extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};")]
2766 ///
2767 #[doc = concat!("let mut some_var = ", stringify!($atomic_type), "::new(10);")]
2768 /// assert_eq!(*some_var.get_mut(), 10);
2769 /// *some_var.get_mut() = 5;
2770 /// assert_eq!(some_var.load(Ordering::SeqCst), 5);
2771 /// ```
2772 #[inline]
2773 #[$stable_access]
2774 pub fn get_mut(&mut self) -> &mut $int_type {
2775 self.v.get_mut()
2776 }
2777
2778 #[doc = concat!("Get atomic access to a `&mut ", stringify!($int_type), "`.")]
2779 ///
2780 #[doc = if_8_bit! {
2781 $int_type,
2782 no = [
2783 "**Note:** This function is only available on targets where `",
2784 stringify!($atomic_type), "` has the same alignment as `", stringify!($int_type), "`."
2785 ],
2786 }]
2787 ///
2788 /// # Examples
2789 ///
2790 /// ```
2791 /// #![feature(atomic_from_mut)]
2792 #[doc = concat!($extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};")]
2793 ///
2794 /// let mut some_int = 123;
2795 #[doc = concat!("let a = ", stringify!($atomic_type), "::from_mut(&mut some_int);")]
2796 /// a.store(100, Ordering::Relaxed);
2797 /// assert_eq!(some_int, 100);
2798 /// ```
2799 ///
2800 #[inline]
2801 #[$cfg_align]
2802 #[unstable(feature = "atomic_from_mut", issue = "76314")]
2803 pub fn from_mut(v: &mut $int_type) -> &mut Self {
2804 let [] = [(); align_of::<Self>() - align_of::<$int_type>()];
2805 // SAFETY:
2806 // - the mutable reference guarantees unique ownership.
2807 // - the alignment of `$int_type` and `Self` is the
2808 // same, as promised by $cfg_align and verified above.
2809 unsafe { &mut *(v as *mut $int_type as *mut Self) }
2810 }
2811
2812 #[doc = concat!("Get non-atomic access to a `&mut [", stringify!($atomic_type), "]` slice")]
2813 ///
2814 /// This is safe because the mutable reference guarantees that no other threads are
2815 /// concurrently accessing the atomic data.
2816 ///
2817 /// # Examples
2818 ///
2819 /// ```ignore-wasm
2820 /// #![feature(atomic_from_mut)]
2821 #[doc = concat!($extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};")]
2822 ///
2823 #[doc = concat!("let mut some_ints = [const { ", stringify!($atomic_type), "::new(0) }; 10];")]
2824 ///
2825 #[doc = concat!("let view: &mut [", stringify!($int_type), "] = ", stringify!($atomic_type), "::get_mut_slice(&mut some_ints);")]
2826 /// assert_eq!(view, [0; 10]);
2827 /// view
2828 /// .iter_mut()
2829 /// .enumerate()
2830 /// .for_each(|(idx, int)| *int = idx as _);
2831 ///
2832 /// std::thread::scope(|s| {
2833 /// some_ints
2834 /// .iter()
2835 /// .enumerate()
2836 /// .for_each(|(idx, int)| {
2837 /// s.spawn(move || assert_eq!(int.load(Ordering::Relaxed), idx as _));
2838 /// })
2839 /// });
2840 /// ```
2841 #[inline]
2842 #[unstable(feature = "atomic_from_mut", issue = "76314")]
2843 pub fn get_mut_slice(this: &mut [Self]) -> &mut [$int_type] {
2844 // SAFETY: the mutable reference guarantees unique ownership.
2845 unsafe { &mut *(this as *mut [Self] as *mut [$int_type]) }
2846 }
2847
2848 #[doc = concat!("Get atomic access to a `&mut [", stringify!($int_type), "]` slice.")]
2849 ///
2850 /// # Examples
2851 ///
2852 /// ```ignore-wasm
2853 /// #![feature(atomic_from_mut)]
2854 #[doc = concat!($extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};")]
2855 ///
2856 /// let mut some_ints = [0; 10];
2857 #[doc = concat!("let a = &*", stringify!($atomic_type), "::from_mut_slice(&mut some_ints);")]
2858 /// std::thread::scope(|s| {
2859 /// for i in 0..a.len() {
2860 /// s.spawn(move || a[i].store(i as _, Ordering::Relaxed));
2861 /// }
2862 /// });
2863 /// for (i, n) in some_ints.into_iter().enumerate() {
2864 /// assert_eq!(i, n as usize);
2865 /// }
2866 /// ```
2867 #[inline]
2868 #[$cfg_align]
2869 #[unstable(feature = "atomic_from_mut", issue = "76314")]
2870 pub fn from_mut_slice(v: &mut [$int_type]) -> &mut [Self] {
2871 let [] = [(); align_of::<Self>() - align_of::<$int_type>()];
2872 // SAFETY:
2873 // - the mutable reference guarantees unique ownership.
2874 // - the alignment of `$int_type` and `Self` is the
2875 // same, as promised by $cfg_align and verified above.
2876 unsafe { &mut *(v as *mut [$int_type] as *mut [Self]) }
2877 }
2878
2879 /// Consumes the atomic and returns the contained value.
2880 ///
2881 /// This is safe because passing `self` by value guarantees that no other threads are
2882 /// concurrently accessing the atomic data.
2883 ///
2884 /// # Examples
2885 ///
2886 /// ```
2887 #[doc = concat!($extra_feature, "use std::sync::atomic::", stringify!($atomic_type), ";")]
2888 ///
2889 #[doc = concat!("let some_var = ", stringify!($atomic_type), "::new(5);")]
2890 /// assert_eq!(some_var.into_inner(), 5);
2891 /// ```
2892 #[inline]
2893 #[$stable_access]
2894 #[$const_stable_into_inner]
2895 pub const fn into_inner(self) -> $int_type {
2896 self.v.into_inner()
2897 }
2898
2899 /// Loads a value from the atomic integer.
2900 ///
2901 /// `load` takes an [`Ordering`] argument which describes the memory ordering of this operation.
2902 /// Possible values are [`SeqCst`], [`Acquire`] and [`Relaxed`].
2903 ///
2904 /// # Panics
2905 ///
2906 /// Panics if `order` is [`Release`] or [`AcqRel`].
2907 ///
2908 /// # Examples
2909 ///
2910 /// ```
2911 #[doc = concat!($extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};")]
2912 ///
2913 #[doc = concat!("let some_var = ", stringify!($atomic_type), "::new(5);")]
2914 ///
2915 /// assert_eq!(some_var.load(Ordering::Relaxed), 5);
2916 /// ```
2917 #[inline]
2918 #[$stable]
2919 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
2920 pub fn load(&self, order: Ordering) -> $int_type {
2921 // SAFETY: data races are prevented by atomic intrinsics.
2922 unsafe { atomic_load(self.v.get(), order) }
2923 }
2924
2925 /// Stores a value into the atomic integer.
2926 ///
2927 /// `store` takes an [`Ordering`] argument which describes the memory ordering of this operation.
2928 /// Possible values are [`SeqCst`], [`Release`] and [`Relaxed`].
2929 ///
2930 /// # Panics
2931 ///
2932 /// Panics if `order` is [`Acquire`] or [`AcqRel`].
2933 ///
2934 /// # Examples
2935 ///
2936 /// ```
2937 #[doc = concat!($extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};")]
2938 ///
2939 #[doc = concat!("let some_var = ", stringify!($atomic_type), "::new(5);")]
2940 ///
2941 /// some_var.store(10, Ordering::Relaxed);
2942 /// assert_eq!(some_var.load(Ordering::Relaxed), 10);
2943 /// ```
2944 #[inline]
2945 #[$stable]
2946 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
2947 pub fn store(&self, val: $int_type, order: Ordering) {
2948 // SAFETY: data races are prevented by atomic intrinsics.
2949 unsafe { atomic_store(self.v.get(), val, order); }
2950 }
2951
2952 /// Stores a value into the atomic integer, returning the previous value.
2953 ///
2954 /// `swap` takes an [`Ordering`] argument which describes the memory ordering
2955 /// of this operation. All ordering modes are possible. Note that using
2956 /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
2957 /// using [`Release`] makes the load part [`Relaxed`].
2958 ///
2959 /// **Note**: This method is only available on platforms that support atomic operations on
2960 #[doc = concat!("[`", $s_int_type, "`].")]
2961 ///
2962 /// # Examples
2963 ///
2964 /// ```
2965 #[doc = concat!($extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};")]
2966 ///
2967 #[doc = concat!("let some_var = ", stringify!($atomic_type), "::new(5);")]
2968 ///
2969 /// assert_eq!(some_var.swap(10, Ordering::Relaxed), 5);
2970 /// ```
2971 #[inline]
2972 #[$stable]
2973 #[$cfg_cas]
2974 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
2975 pub fn swap(&self, val: $int_type, order: Ordering) -> $int_type {
2976 // SAFETY: data races are prevented by atomic intrinsics.
2977 unsafe { atomic_swap(self.v.get(), val, order) }
2978 }
2979
2980 /// Stores a value into the atomic integer if the current value is the same as
2981 /// the `current` value.
2982 ///
2983 /// The return value is always the previous value. If it is equal to `current`, then the
2984 /// value was updated.
2985 ///
2986 /// `compare_and_swap` also takes an [`Ordering`] argument which describes the memory
2987 /// ordering of this operation. Notice that even when using [`AcqRel`], the operation
2988 /// might fail and hence just perform an `Acquire` load, but not have `Release` semantics.
2989 /// Using [`Acquire`] makes the store part of this operation [`Relaxed`] if it
2990 /// happens, and using [`Release`] makes the load part [`Relaxed`].
2991 ///
2992 /// **Note**: This method is only available on platforms that support atomic operations on
2993 #[doc = concat!("[`", $s_int_type, "`].")]
2994 ///
2995 /// # Migrating to `compare_exchange` and `compare_exchange_weak`
2996 ///
2997 /// `compare_and_swap` is equivalent to `compare_exchange` with the following mapping for
2998 /// memory orderings:
2999 ///
3000 /// Original | Success | Failure
3001 /// -------- | ------- | -------
3002 /// Relaxed | Relaxed | Relaxed
3003 /// Acquire | Acquire | Acquire
3004 /// Release | Release | Relaxed
3005 /// AcqRel | AcqRel | Acquire
3006 /// SeqCst | SeqCst | SeqCst
3007 ///
3008 /// `compare_and_swap` and `compare_exchange` also differ in their return type. You can use
3009 /// `compare_exchange(...).unwrap_or_else(|x| x)` to recover the behavior of `compare_and_swap`,
3010 /// but in most cases it is more idiomatic to check whether the return value is `Ok` or `Err`
3011 /// rather than to infer success vs failure based on the value that was read.
3012 ///
3013 /// During migration, consider whether it makes sense to use `compare_exchange_weak` instead.
3014 /// `compare_exchange_weak` is allowed to fail spuriously even when the comparison succeeds,
3015 /// which allows the compiler to generate better assembly code when the compare and swap
3016 /// is used in a loop.
3017 ///
3018 /// # Examples
3019 ///
3020 /// ```
3021 #[doc = concat!($extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};")]
3022 ///
3023 #[doc = concat!("let some_var = ", stringify!($atomic_type), "::new(5);")]
3024 ///
3025 /// assert_eq!(some_var.compare_and_swap(5, 10, Ordering::Relaxed), 5);
3026 /// assert_eq!(some_var.load(Ordering::Relaxed), 10);
3027 ///
3028 /// assert_eq!(some_var.compare_and_swap(6, 12, Ordering::Relaxed), 10);
3029 /// assert_eq!(some_var.load(Ordering::Relaxed), 10);
3030 /// ```
3031 #[cfg(not(feature = "ferrocene_certified"))]
3032 #[inline]
3033 #[$stable]
3034 #[deprecated(
3035 since = "1.50.0",
3036 note = "Use `compare_exchange` or `compare_exchange_weak` instead")
3037 ]
3038 #[$cfg_cas]
3039 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
3040 pub fn compare_and_swap(&self,
3041 current: $int_type,
3042 new: $int_type,
3043 order: Ordering) -> $int_type {
3044 match self.compare_exchange(current,
3045 new,
3046 order,
3047 strongest_failure_ordering(order)) {
3048 Ok(x) => x,
3049 Err(x) => x,
3050 }
3051 }
3052
3053 /// Stores a value into the atomic integer if the current value is the same as
3054 /// the `current` value.
3055 ///
3056 /// The return value is a result indicating whether the new value was written and
3057 /// containing the previous value. On success this value is guaranteed to be equal to
3058 /// `current`.
3059 ///
3060 /// `compare_exchange` takes two [`Ordering`] arguments to describe the memory
3061 /// ordering of this operation. `success` describes the required ordering for the
3062 /// read-modify-write operation that takes place if the comparison with `current` succeeds.
3063 /// `failure` describes the required ordering for the load operation that takes place when
3064 /// the comparison fails. Using [`Acquire`] as success ordering makes the store part
3065 /// of this operation [`Relaxed`], and using [`Release`] makes the successful load
3066 /// [`Relaxed`]. The failure ordering can only be [`SeqCst`], [`Acquire`] or [`Relaxed`].
3067 ///
3068 /// **Note**: This method is only available on platforms that support atomic operations on
3069 #[doc = concat!("[`", $s_int_type, "`].")]
3070 ///
3071 /// # Examples
3072 ///
3073 /// ```
3074 #[doc = concat!($extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};")]
3075 ///
3076 #[doc = concat!("let some_var = ", stringify!($atomic_type), "::new(5);")]
3077 ///
3078 /// assert_eq!(some_var.compare_exchange(5, 10,
3079 /// Ordering::Acquire,
3080 /// Ordering::Relaxed),
3081 /// Ok(5));
3082 /// assert_eq!(some_var.load(Ordering::Relaxed), 10);
3083 ///
3084 /// assert_eq!(some_var.compare_exchange(6, 12,
3085 /// Ordering::SeqCst,
3086 /// Ordering::Acquire),
3087 /// Err(10));
3088 /// assert_eq!(some_var.load(Ordering::Relaxed), 10);
3089 /// ```
3090 ///
3091 /// # Considerations
3092 ///
3093 /// `compare_exchange` is a [compare-and-swap operation] and thus exhibits the usual downsides
3094 /// of CAS operations. In particular, a load of the value followed by a successful
3095 /// `compare_exchange` with the previous load *does not ensure* that other threads have not
3096 /// changed the value in the interim! This is usually important when the *equality* check in
3097 /// the `compare_exchange` is being used to check the *identity* of a value, but equality
3098 /// does not necessarily imply identity. This is a particularly common case for pointers, as
3099 /// a pointer holding the same address does not imply that the same object exists at that
3100 /// address! In this case, `compare_exchange` can lead to the [ABA problem].
3101 ///
3102 /// [ABA Problem]: https://en.wikipedia.org/wiki/ABA_problem
3103 /// [compare-and-swap operation]: https://en.wikipedia.org/wiki/Compare-and-swap
3104 #[inline]
3105 #[$stable_cxchg]
3106 #[$cfg_cas]
3107 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
3108 pub fn compare_exchange(&self,
3109 current: $int_type,
3110 new: $int_type,
3111 success: Ordering,
3112 failure: Ordering) -> Result<$int_type, $int_type> {
3113 // SAFETY: data races are prevented by atomic intrinsics.
3114 unsafe { atomic_compare_exchange(self.v.get(), current, new, success, failure) }
3115 }
3116
3117 /// Stores a value into the atomic integer if the current value is the same as
3118 /// the `current` value.
3119 ///
3120 #[doc = concat!("Unlike [`", stringify!($atomic_type), "::compare_exchange`],")]
3121 /// this function is allowed to spuriously fail even
3122 /// when the comparison succeeds, which can result in more efficient code on some
3123 /// platforms. The return value is a result indicating whether the new value was
3124 /// written and containing the previous value.
3125 ///
3126 /// `compare_exchange_weak` takes two [`Ordering`] arguments to describe the memory
3127 /// ordering of this operation. `success` describes the required ordering for the
3128 /// read-modify-write operation that takes place if the comparison with `current` succeeds.
3129 /// `failure` describes the required ordering for the load operation that takes place when
3130 /// the comparison fails. Using [`Acquire`] as success ordering makes the store part
3131 /// of this operation [`Relaxed`], and using [`Release`] makes the successful load
3132 /// [`Relaxed`]. The failure ordering can only be [`SeqCst`], [`Acquire`] or [`Relaxed`].
3133 ///
3134 /// **Note**: This method is only available on platforms that support atomic operations on
3135 #[doc = concat!("[`", $s_int_type, "`].")]
3136 ///
3137 /// # Examples
3138 ///
3139 /// ```
3140 #[doc = concat!($extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};")]
3141 ///
3142 #[doc = concat!("let val = ", stringify!($atomic_type), "::new(4);")]
3143 ///
3144 /// let mut old = val.load(Ordering::Relaxed);
3145 /// loop {
3146 /// let new = old * 2;
3147 /// match val.compare_exchange_weak(old, new, Ordering::SeqCst, Ordering::Relaxed) {
3148 /// Ok(_) => break,
3149 /// Err(x) => old = x,
3150 /// }
3151 /// }
3152 /// ```
3153 ///
3154 /// # Considerations
3155 ///
3156 /// `compare_exchange` is a [compare-and-swap operation] and thus exhibits the usual downsides
3157 /// of CAS operations. In particular, a load of the value followed by a successful
3158 /// `compare_exchange` with the previous load *does not ensure* that other threads have not
3159 /// changed the value in the interim. This is usually important when the *equality* check in
3160 /// the `compare_exchange` is being used to check the *identity* of a value, but equality
3161 /// does not necessarily imply identity. This is a particularly common case for pointers, as
3162 /// a pointer holding the same address does not imply that the same object exists at that
3163 /// address! In this case, `compare_exchange` can lead to the [ABA problem].
3164 ///
3165 /// [ABA Problem]: https://en.wikipedia.org/wiki/ABA_problem
3166 /// [compare-and-swap operation]: https://en.wikipedia.org/wiki/Compare-and-swap
3167 #[inline]
3168 #[$stable_cxchg]
3169 #[$cfg_cas]
3170 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
3171 pub fn compare_exchange_weak(&self,
3172 current: $int_type,
3173 new: $int_type,
3174 success: Ordering,
3175 failure: Ordering) -> Result<$int_type, $int_type> {
3176 // SAFETY: data races are prevented by atomic intrinsics.
3177 unsafe {
3178 atomic_compare_exchange_weak(self.v.get(), current, new, success, failure)
3179 }
3180 }
3181
3182 /// Adds to the current value, returning the previous value.
3183 ///
3184 /// This operation wraps around on overflow.
3185 ///
3186 /// `fetch_add` takes an [`Ordering`] argument which describes the memory ordering
3187 /// of this operation. All ordering modes are possible. Note that using
3188 /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
3189 /// using [`Release`] makes the load part [`Relaxed`].
3190 ///
3191 /// **Note**: This method is only available on platforms that support atomic operations on
3192 #[doc = concat!("[`", $s_int_type, "`].")]
3193 ///
3194 /// # Examples
3195 ///
3196 /// ```
3197 #[doc = concat!($extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};")]
3198 ///
3199 #[doc = concat!("let foo = ", stringify!($atomic_type), "::new(0);")]
3200 /// assert_eq!(foo.fetch_add(10, Ordering::SeqCst), 0);
3201 /// assert_eq!(foo.load(Ordering::SeqCst), 10);
3202 /// ```
3203 #[inline]
3204 #[$stable]
3205 #[$cfg_cas]
3206 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
3207 pub fn fetch_add(&self, val: $int_type, order: Ordering) -> $int_type {
3208 // SAFETY: data races are prevented by atomic intrinsics.
3209 unsafe { atomic_add(self.v.get(), val, order) }
3210 }
3211
3212 /// Subtracts from the current value, returning the previous value.
3213 ///
3214 /// This operation wraps around on overflow.
3215 ///
3216 /// `fetch_sub` takes an [`Ordering`] argument which describes the memory ordering
3217 /// of this operation. All ordering modes are possible. Note that using
3218 /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
3219 /// using [`Release`] makes the load part [`Relaxed`].
3220 ///
3221 /// **Note**: This method is only available on platforms that support atomic operations on
3222 #[doc = concat!("[`", $s_int_type, "`].")]
3223 ///
3224 /// # Examples
3225 ///
3226 /// ```
3227 #[doc = concat!($extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};")]
3228 ///
3229 #[doc = concat!("let foo = ", stringify!($atomic_type), "::new(20);")]
3230 /// assert_eq!(foo.fetch_sub(10, Ordering::SeqCst), 20);
3231 /// assert_eq!(foo.load(Ordering::SeqCst), 10);
3232 /// ```
3233 #[inline]
3234 #[$stable]
3235 #[$cfg_cas]
3236 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
3237 pub fn fetch_sub(&self, val: $int_type, order: Ordering) -> $int_type {
3238 // SAFETY: data races are prevented by atomic intrinsics.
3239 unsafe { atomic_sub(self.v.get(), val, order) }
3240 }
3241
3242 /// Bitwise "and" with the current value.
3243 ///
3244 /// Performs a bitwise "and" operation on the current value and the argument `val`, and
3245 /// sets the new value to the result.
3246 ///
3247 /// Returns the previous value.
3248 ///
3249 /// `fetch_and` takes an [`Ordering`] argument which describes the memory ordering
3250 /// of this operation. All ordering modes are possible. Note that using
3251 /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
3252 /// using [`Release`] makes the load part [`Relaxed`].
3253 ///
3254 /// **Note**: This method is only available on platforms that support atomic operations on
3255 #[doc = concat!("[`", $s_int_type, "`].")]
3256 ///
3257 /// # Examples
3258 ///
3259 /// ```
3260 #[doc = concat!($extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};")]
3261 ///
3262 #[doc = concat!("let foo = ", stringify!($atomic_type), "::new(0b101101);")]
3263 /// assert_eq!(foo.fetch_and(0b110011, Ordering::SeqCst), 0b101101);
3264 /// assert_eq!(foo.load(Ordering::SeqCst), 0b100001);
3265 /// ```
3266 #[inline]
3267 #[$stable]
3268 #[$cfg_cas]
3269 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
3270 pub fn fetch_and(&self, val: $int_type, order: Ordering) -> $int_type {
3271 // SAFETY: data races are prevented by atomic intrinsics.
3272 unsafe { atomic_and(self.v.get(), val, order) }
3273 }
3274
3275 /// Bitwise "nand" with the current value.
3276 ///
3277 /// Performs a bitwise "nand" operation on the current value and the argument `val`, and
3278 /// sets the new value to the result.
3279 ///
3280 /// Returns the previous value.
3281 ///
3282 /// `fetch_nand` takes an [`Ordering`] argument which describes the memory ordering
3283 /// of this operation. All ordering modes are possible. Note that using
3284 /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
3285 /// using [`Release`] makes the load part [`Relaxed`].
3286 ///
3287 /// **Note**: This method is only available on platforms that support atomic operations on
3288 #[doc = concat!("[`", $s_int_type, "`].")]
3289 ///
3290 /// # Examples
3291 ///
3292 /// ```
3293 #[doc = concat!($extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};")]
3294 ///
3295 #[doc = concat!("let foo = ", stringify!($atomic_type), "::new(0x13);")]
3296 /// assert_eq!(foo.fetch_nand(0x31, Ordering::SeqCst), 0x13);
3297 /// assert_eq!(foo.load(Ordering::SeqCst), !(0x13 & 0x31));
3298 /// ```
3299 #[inline]
3300 #[$stable_nand]
3301 #[$cfg_cas]
3302 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
3303 pub fn fetch_nand(&self, val: $int_type, order: Ordering) -> $int_type {
3304 // SAFETY: data races are prevented by atomic intrinsics.
3305 unsafe { atomic_nand(self.v.get(), val, order) }
3306 }
3307
3308 /// Bitwise "or" with the current value.
3309 ///
3310 /// Performs a bitwise "or" operation on the current value and the argument `val`, and
3311 /// sets the new value to the result.
3312 ///
3313 /// Returns the previous value.
3314 ///
3315 /// `fetch_or` takes an [`Ordering`] argument which describes the memory ordering
3316 /// of this operation. All ordering modes are possible. Note that using
3317 /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
3318 /// using [`Release`] makes the load part [`Relaxed`].
3319 ///
3320 /// **Note**: This method is only available on platforms that support atomic operations on
3321 #[doc = concat!("[`", $s_int_type, "`].")]
3322 ///
3323 /// # Examples
3324 ///
3325 /// ```
3326 #[doc = concat!($extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};")]
3327 ///
3328 #[doc = concat!("let foo = ", stringify!($atomic_type), "::new(0b101101);")]
3329 /// assert_eq!(foo.fetch_or(0b110011, Ordering::SeqCst), 0b101101);
3330 /// assert_eq!(foo.load(Ordering::SeqCst), 0b111111);
3331 /// ```
3332 #[inline]
3333 #[$stable]
3334 #[$cfg_cas]
3335 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
3336 pub fn fetch_or(&self, val: $int_type, order: Ordering) -> $int_type {
3337 // SAFETY: data races are prevented by atomic intrinsics.
3338 unsafe { atomic_or(self.v.get(), val, order) }
3339 }
3340
3341 /// Bitwise "xor" with the current value.
3342 ///
3343 /// Performs a bitwise "xor" operation on the current value and the argument `val`, and
3344 /// sets the new value to the result.
3345 ///
3346 /// Returns the previous value.
3347 ///
3348 /// `fetch_xor` takes an [`Ordering`] argument which describes the memory ordering
3349 /// of this operation. All ordering modes are possible. Note that using
3350 /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
3351 /// using [`Release`] makes the load part [`Relaxed`].
3352 ///
3353 /// **Note**: This method is only available on platforms that support atomic operations on
3354 #[doc = concat!("[`", $s_int_type, "`].")]
3355 ///
3356 /// # Examples
3357 ///
3358 /// ```
3359 #[doc = concat!($extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};")]
3360 ///
3361 #[doc = concat!("let foo = ", stringify!($atomic_type), "::new(0b101101);")]
3362 /// assert_eq!(foo.fetch_xor(0b110011, Ordering::SeqCst), 0b101101);
3363 /// assert_eq!(foo.load(Ordering::SeqCst), 0b011110);
3364 /// ```
3365 #[inline]
3366 #[$stable]
3367 #[$cfg_cas]
3368 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
3369 pub fn fetch_xor(&self, val: $int_type, order: Ordering) -> $int_type {
3370 // SAFETY: data races are prevented by atomic intrinsics.
3371 unsafe { atomic_xor(self.v.get(), val, order) }
3372 }
3373
3374 /// Fetches the value, and applies a function to it that returns an optional
3375 /// new value. Returns a `Result` of `Ok(previous_value)` if the function returned `Some(_)`, else
3376 /// `Err(previous_value)`.
3377 ///
3378 /// Note: This may call the function multiple times if the value has been changed from other threads in
3379 /// the meantime, as long as the function returns `Some(_)`, but the function will have been applied
3380 /// only once to the stored value.
3381 ///
3382 /// `fetch_update` takes two [`Ordering`] arguments to describe the memory ordering of this operation.
3383 /// The first describes the required ordering for when the operation finally succeeds while the second
3384 /// describes the required ordering for loads. These correspond to the success and failure orderings of
3385 #[doc = concat!("[`", stringify!($atomic_type), "::compare_exchange`]")]
3386 /// respectively.
3387 ///
3388 /// Using [`Acquire`] as success ordering makes the store part
3389 /// of this operation [`Relaxed`], and using [`Release`] makes the final successful load
3390 /// [`Relaxed`]. The (failed) load ordering can only be [`SeqCst`], [`Acquire`] or [`Relaxed`].
3391 ///
3392 /// **Note**: This method is only available on platforms that support atomic operations on
3393 #[doc = concat!("[`", $s_int_type, "`].")]
3394 ///
3395 /// # Considerations
3396 ///
3397 /// This method is not magic; it is not provided by the hardware, and does not act like a
3398 /// critical section or mutex.
3399 ///
3400 /// It is implemented on top of an atomic [compare-and-swap operation], and thus is subject to
3401 /// the usual drawbacks of CAS operations. In particular, be careful of the [ABA problem]
3402 /// if this atomic integer is an index or more generally if knowledge of only the *bitwise value*
3403 /// of the atomic is not in and of itself sufficient to ensure any required preconditions.
3404 ///
3405 /// [ABA Problem]: https://en.wikipedia.org/wiki/ABA_problem
3406 /// [compare-and-swap operation]: https://en.wikipedia.org/wiki/Compare-and-swap
3407 ///
3408 /// # Examples
3409 ///
3410 /// ```rust
3411 #[doc = concat!($extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};")]
3412 ///
3413 #[doc = concat!("let x = ", stringify!($atomic_type), "::new(7);")]
3414 /// assert_eq!(x.fetch_update(Ordering::SeqCst, Ordering::SeqCst, |_| None), Err(7));
3415 /// assert_eq!(x.fetch_update(Ordering::SeqCst, Ordering::SeqCst, |x| Some(x + 1)), Ok(7));
3416 /// assert_eq!(x.fetch_update(Ordering::SeqCst, Ordering::SeqCst, |x| Some(x + 1)), Ok(8));
3417 /// assert_eq!(x.load(Ordering::SeqCst), 9);
3418 /// ```
3419 #[inline]
3420 #[stable(feature = "no_more_cas", since = "1.45.0")]
3421 #[$cfg_cas]
3422 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
3423 pub fn fetch_update<F>(&self,
3424 set_order: Ordering,
3425 fetch_order: Ordering,
3426 mut f: F) -> Result<$int_type, $int_type>
3427 where F: FnMut($int_type) -> Option<$int_type> {
3428 let mut prev = self.load(fetch_order);
3429 while let Some(next) = f(prev) {
3430 match self.compare_exchange_weak(prev, next, set_order, fetch_order) {
3431 x @ Ok(_) => return x,
3432 Err(next_prev) => prev = next_prev
3433 }
3434 }
3435 Err(prev)
3436 }
3437
3438 /// Fetches the value, and applies a function to it that returns an optional
3439 /// new value. Returns a `Result` of `Ok(previous_value)` if the function returned `Some(_)`, else
3440 /// `Err(previous_value)`.
3441 ///
3442 #[doc = concat!("See also: [`update`](`", stringify!($atomic_type), "::update`).")]
3443 ///
3444 /// Note: This may call the function multiple times if the value has been changed from other threads in
3445 /// the meantime, as long as the function returns `Some(_)`, but the function will have been applied
3446 /// only once to the stored value.
3447 ///
3448 /// `try_update` takes two [`Ordering`] arguments to describe the memory ordering of this operation.
3449 /// The first describes the required ordering for when the operation finally succeeds while the second
3450 /// describes the required ordering for loads. These correspond to the success and failure orderings of
3451 #[doc = concat!("[`", stringify!($atomic_type), "::compare_exchange`]")]
3452 /// respectively.
3453 ///
3454 /// Using [`Acquire`] as success ordering makes the store part
3455 /// of this operation [`Relaxed`], and using [`Release`] makes the final successful load
3456 /// [`Relaxed`]. The (failed) load ordering can only be [`SeqCst`], [`Acquire`] or [`Relaxed`].
3457 ///
3458 /// **Note**: This method is only available on platforms that support atomic operations on
3459 #[doc = concat!("[`", $s_int_type, "`].")]
3460 ///
3461 /// # Considerations
3462 ///
3463 /// This method is not magic; it is not provided by the hardware, and does not act like a
3464 /// critical section or mutex.
3465 ///
3466 /// It is implemented on top of an atomic [compare-and-swap operation], and thus is subject to
3467 /// the usual drawbacks of CAS operations. In particular, be careful of the [ABA problem]
3468 /// if this atomic integer is an index or more generally if knowledge of only the *bitwise value*
3469 /// of the atomic is not in and of itself sufficient to ensure any required preconditions.
3470 ///
3471 /// [ABA Problem]: https://en.wikipedia.org/wiki/ABA_problem
3472 /// [compare-and-swap operation]: https://en.wikipedia.org/wiki/Compare-and-swap
3473 ///
3474 /// # Examples
3475 ///
3476 /// ```rust
3477 /// #![feature(atomic_try_update)]
3478 #[doc = concat!($extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};")]
3479 ///
3480 #[doc = concat!("let x = ", stringify!($atomic_type), "::new(7);")]
3481 /// assert_eq!(x.try_update(Ordering::SeqCst, Ordering::SeqCst, |_| None), Err(7));
3482 /// assert_eq!(x.try_update(Ordering::SeqCst, Ordering::SeqCst, |x| Some(x + 1)), Ok(7));
3483 /// assert_eq!(x.try_update(Ordering::SeqCst, Ordering::SeqCst, |x| Some(x + 1)), Ok(8));
3484 /// assert_eq!(x.load(Ordering::SeqCst), 9);
3485 /// ```
3486 #[inline]
3487 #[unstable(feature = "atomic_try_update", issue = "135894")]
3488 #[$cfg_cas]
3489 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
3490 pub fn try_update(
3491 &self,
3492 set_order: Ordering,
3493 fetch_order: Ordering,
3494 f: impl FnMut($int_type) -> Option<$int_type>,
3495 ) -> Result<$int_type, $int_type> {
3496 // FIXME(atomic_try_update): this is currently an unstable alias to `fetch_update`;
3497 // when stabilizing, turn `fetch_update` into a deprecated alias to `try_update`.
3498 self.fetch_update(set_order, fetch_order, f)
3499 }
3500
3501 /// Fetches the value, applies a function to it that it return a new value.
3502 /// The new value is stored and the old value is returned.
3503 ///
3504 #[doc = concat!("See also: [`try_update`](`", stringify!($atomic_type), "::try_update`).")]
3505 ///
3506 /// Note: This may call the function multiple times if the value has been changed from other threads in
3507 /// the meantime, but the function will have been applied only once to the stored value.
3508 ///
3509 /// `update` takes two [`Ordering`] arguments to describe the memory ordering of this operation.
3510 /// The first describes the required ordering for when the operation finally succeeds while the second
3511 /// describes the required ordering for loads. These correspond to the success and failure orderings of
3512 #[doc = concat!("[`", stringify!($atomic_type), "::compare_exchange`]")]
3513 /// respectively.
3514 ///
3515 /// Using [`Acquire`] as success ordering makes the store part
3516 /// of this operation [`Relaxed`], and using [`Release`] makes the final successful load
3517 /// [`Relaxed`]. The (failed) load ordering can only be [`SeqCst`], [`Acquire`] or [`Relaxed`].
3518 ///
3519 /// **Note**: This method is only available on platforms that support atomic operations on
3520 #[doc = concat!("[`", $s_int_type, "`].")]
3521 ///
3522 /// # Considerations
3523 ///
3524 /// [CAS operation]: https://en.wikipedia.org/wiki/Compare-and-swap
3525 /// This method is not magic; it is not provided by the hardware, and does not act like a
3526 /// critical section or mutex.
3527 ///
3528 /// It is implemented on top of an atomic [compare-and-swap operation], and thus is subject to
3529 /// the usual drawbacks of CAS operations. In particular, be careful of the [ABA problem]
3530 /// if this atomic integer is an index or more generally if knowledge of only the *bitwise value*
3531 /// of the atomic is not in and of itself sufficient to ensure any required preconditions.
3532 ///
3533 /// [ABA Problem]: https://en.wikipedia.org/wiki/ABA_problem
3534 /// [compare-and-swap operation]: https://en.wikipedia.org/wiki/Compare-and-swap
3535 ///
3536 /// # Examples
3537 ///
3538 /// ```rust
3539 /// #![feature(atomic_try_update)]
3540 #[doc = concat!($extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};")]
3541 ///
3542 #[doc = concat!("let x = ", stringify!($atomic_type), "::new(7);")]
3543 /// assert_eq!(x.update(Ordering::SeqCst, Ordering::SeqCst, |x| x + 1), 7);
3544 /// assert_eq!(x.update(Ordering::SeqCst, Ordering::SeqCst, |x| x + 1), 8);
3545 /// assert_eq!(x.load(Ordering::SeqCst), 9);
3546 /// ```
3547 #[inline]
3548 #[unstable(feature = "atomic_try_update", issue = "135894")]
3549 #[$cfg_cas]
3550 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
3551 pub fn update(
3552 &self,
3553 set_order: Ordering,
3554 fetch_order: Ordering,
3555 mut f: impl FnMut($int_type) -> $int_type,
3556 ) -> $int_type {
3557 let mut prev = self.load(fetch_order);
3558 loop {
3559 match self.compare_exchange_weak(prev, f(prev), set_order, fetch_order) {
3560 Ok(x) => break x,
3561 Err(next_prev) => prev = next_prev,
3562 }
3563 }
3564 }
3565
3566 /// Maximum with the current value.
3567 ///
3568 /// Finds the maximum of the current value and the argument `val`, and
3569 /// sets the new value to the result.
3570 ///
3571 /// Returns the previous value.
3572 ///
3573 /// `fetch_max` takes an [`Ordering`] argument which describes the memory ordering
3574 /// of this operation. All ordering modes are possible. Note that using
3575 /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
3576 /// using [`Release`] makes the load part [`Relaxed`].
3577 ///
3578 /// **Note**: This method is only available on platforms that support atomic operations on
3579 #[doc = concat!("[`", $s_int_type, "`].")]
3580 ///
3581 /// # Examples
3582 ///
3583 /// ```
3584 #[doc = concat!($extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};")]
3585 ///
3586 #[doc = concat!("let foo = ", stringify!($atomic_type), "::new(23);")]
3587 /// assert_eq!(foo.fetch_max(42, Ordering::SeqCst), 23);
3588 /// assert_eq!(foo.load(Ordering::SeqCst), 42);
3589 /// ```
3590 ///
3591 /// If you want to obtain the maximum value in one step, you can use the following:
3592 ///
3593 /// ```
3594 #[doc = concat!($extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};")]
3595 ///
3596 #[doc = concat!("let foo = ", stringify!($atomic_type), "::new(23);")]
3597 /// let bar = 42;
3598 /// let max_foo = foo.fetch_max(bar, Ordering::SeqCst).max(bar);
3599 /// assert!(max_foo == 42);
3600 /// ```
3601 #[inline]
3602 #[stable(feature = "atomic_min_max", since = "1.45.0")]
3603 #[$cfg_cas]
3604 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
3605 pub fn fetch_max(&self, val: $int_type, order: Ordering) -> $int_type {
3606 // SAFETY: data races are prevented by atomic intrinsics.
3607 unsafe { $max_fn(self.v.get(), val, order) }
3608 }
3609
3610 /// Minimum with the current value.
3611 ///
3612 /// Finds the minimum of the current value and the argument `val`, and
3613 /// sets the new value to the result.
3614 ///
3615 /// Returns the previous value.
3616 ///
3617 /// `fetch_min` takes an [`Ordering`] argument which describes the memory ordering
3618 /// of this operation. All ordering modes are possible. Note that using
3619 /// [`Acquire`] makes the store part of this operation [`Relaxed`], and
3620 /// using [`Release`] makes the load part [`Relaxed`].
3621 ///
3622 /// **Note**: This method is only available on platforms that support atomic operations on
3623 #[doc = concat!("[`", $s_int_type, "`].")]
3624 ///
3625 /// # Examples
3626 ///
3627 /// ```
3628 #[doc = concat!($extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};")]
3629 ///
3630 #[doc = concat!("let foo = ", stringify!($atomic_type), "::new(23);")]
3631 /// assert_eq!(foo.fetch_min(42, Ordering::Relaxed), 23);
3632 /// assert_eq!(foo.load(Ordering::Relaxed), 23);
3633 /// assert_eq!(foo.fetch_min(22, Ordering::Relaxed), 23);
3634 /// assert_eq!(foo.load(Ordering::Relaxed), 22);
3635 /// ```
3636 ///
3637 /// If you want to obtain the minimum value in one step, you can use the following:
3638 ///
3639 /// ```
3640 #[doc = concat!($extra_feature, "use std::sync::atomic::{", stringify!($atomic_type), ", Ordering};")]
3641 ///
3642 #[doc = concat!("let foo = ", stringify!($atomic_type), "::new(23);")]
3643 /// let bar = 12;
3644 /// let min_foo = foo.fetch_min(bar, Ordering::SeqCst).min(bar);
3645 /// assert_eq!(min_foo, 12);
3646 /// ```
3647 #[inline]
3648 #[stable(feature = "atomic_min_max", since = "1.45.0")]
3649 #[$cfg_cas]
3650 #[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
3651 pub fn fetch_min(&self, val: $int_type, order: Ordering) -> $int_type {
3652 // SAFETY: data races are prevented by atomic intrinsics.
3653 unsafe { $min_fn(self.v.get(), val, order) }
3654 }
3655
3656 /// Returns a mutable pointer to the underlying integer.
3657 ///
3658 /// Doing non-atomic reads and writes on the resulting integer can be a data race.
3659 /// This method is mostly useful for FFI, where the function signature may use
3660 #[doc = concat!("`*mut ", stringify!($int_type), "` instead of `&", stringify!($atomic_type), "`.")]
3661 ///
3662 /// Returning an `*mut` pointer from a shared reference to this atomic is safe because the
3663 /// atomic types work with interior mutability. All modifications of an atomic change the value
3664 /// through a shared reference, and can do so safely as long as they use atomic operations. Any
3665 /// use of the returned raw pointer requires an `unsafe` block and still has to uphold the
3666 /// requirements of the [memory model].
3667 ///
3668 /// # Examples
3669 ///
3670 /// ```ignore (extern-declaration)
3671 /// # fn main() {
3672 #[doc = concat!($extra_feature, "use std::sync::atomic::", stringify!($atomic_type), ";")]
3673 ///
3674 /// extern "C" {
3675 #[doc = concat!(" fn my_atomic_op(arg: *mut ", stringify!($int_type), ");")]
3676 /// }
3677 ///
3678 #[doc = concat!("let atomic = ", stringify!($atomic_type), "::new(1);")]
3679 ///
3680 /// // SAFETY: Safe as long as `my_atomic_op` is atomic.
3681 /// unsafe {
3682 /// my_atomic_op(atomic.as_ptr());
3683 /// }
3684 /// # }
3685 /// ```
3686 ///
3687 /// [memory model]: self#memory-model-for-atomic-accesses
3688 #[inline]
3689 #[stable(feature = "atomic_as_ptr", since = "1.70.0")]
3690 #[rustc_const_stable(feature = "atomic_as_ptr", since = "1.70.0")]
3691 #[rustc_never_returns_null_ptr]
3692 pub const fn as_ptr(&self) -> *mut $int_type {
3693 self.v.get()
3694 }
3695 }
3696 }
3697}
3698
3699#[cfg(target_has_atomic_load_store = "8")]
3700#[cfg(not(feature = "ferrocene_certified"))]
3701atomic_int! {
3702 cfg(target_has_atomic = "8"),
3703 cfg(target_has_atomic_equal_alignment = "8"),
3704 stable(feature = "integer_atomics_stable", since = "1.34.0"),
3705 stable(feature = "integer_atomics_stable", since = "1.34.0"),
3706 stable(feature = "integer_atomics_stable", since = "1.34.0"),
3707 stable(feature = "integer_atomics_stable", since = "1.34.0"),
3708 stable(feature = "integer_atomics_stable", since = "1.34.0"),
3709 stable(feature = "integer_atomics_stable", since = "1.34.0"),
3710 rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"),
3711 rustc_const_stable(feature = "const_atomic_into_inner", since = "1.79.0"),
3712 rustc_diagnostic_item = "AtomicI8",
3713 "i8",
3714 "",
3715 atomic_min, atomic_max,
3716 1,
3717 i8 AtomicI8
3718}
3719#[cfg(target_has_atomic_load_store = "8")]
3720#[cfg(not(feature = "ferrocene_certified"))]
3721atomic_int! {
3722 cfg(target_has_atomic = "8"),
3723 cfg(target_has_atomic_equal_alignment = "8"),
3724 stable(feature = "integer_atomics_stable", since = "1.34.0"),
3725 stable(feature = "integer_atomics_stable", since = "1.34.0"),
3726 stable(feature = "integer_atomics_stable", since = "1.34.0"),
3727 stable(feature = "integer_atomics_stable", since = "1.34.0"),
3728 stable(feature = "integer_atomics_stable", since = "1.34.0"),
3729 stable(feature = "integer_atomics_stable", since = "1.34.0"),
3730 rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"),
3731 rustc_const_stable(feature = "const_atomic_into_inner", since = "1.79.0"),
3732 rustc_diagnostic_item = "AtomicU8",
3733 "u8",
3734 "",
3735 atomic_umin, atomic_umax,
3736 1,
3737 u8 AtomicU8
3738}
3739#[cfg(target_has_atomic_load_store = "16")]
3740#[cfg(not(feature = "ferrocene_certified"))]
3741atomic_int! {
3742 cfg(target_has_atomic = "16"),
3743 cfg(target_has_atomic_equal_alignment = "16"),
3744 stable(feature = "integer_atomics_stable", since = "1.34.0"),
3745 stable(feature = "integer_atomics_stable", since = "1.34.0"),
3746 stable(feature = "integer_atomics_stable", since = "1.34.0"),
3747 stable(feature = "integer_atomics_stable", since = "1.34.0"),
3748 stable(feature = "integer_atomics_stable", since = "1.34.0"),
3749 stable(feature = "integer_atomics_stable", since = "1.34.0"),
3750 rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"),
3751 rustc_const_stable(feature = "const_atomic_into_inner", since = "1.79.0"),
3752 rustc_diagnostic_item = "AtomicI16",
3753 "i16",
3754 "",
3755 atomic_min, atomic_max,
3756 2,
3757 i16 AtomicI16
3758}
3759#[cfg(target_has_atomic_load_store = "16")]
3760#[cfg(not(feature = "ferrocene_certified"))]
3761atomic_int! {
3762 cfg(target_has_atomic = "16"),
3763 cfg(target_has_atomic_equal_alignment = "16"),
3764 stable(feature = "integer_atomics_stable", since = "1.34.0"),
3765 stable(feature = "integer_atomics_stable", since = "1.34.0"),
3766 stable(feature = "integer_atomics_stable", since = "1.34.0"),
3767 stable(feature = "integer_atomics_stable", since = "1.34.0"),
3768 stable(feature = "integer_atomics_stable", since = "1.34.0"),
3769 stable(feature = "integer_atomics_stable", since = "1.34.0"),
3770 rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"),
3771 rustc_const_stable(feature = "const_atomic_into_inner", since = "1.79.0"),
3772 rustc_diagnostic_item = "AtomicU16",
3773 "u16",
3774 "",
3775 atomic_umin, atomic_umax,
3776 2,
3777 u16 AtomicU16
3778}
3779#[cfg(target_has_atomic_load_store = "32")]
3780#[cfg(not(feature = "ferrocene_certified"))]
3781atomic_int! {
3782 cfg(target_has_atomic = "32"),
3783 cfg(target_has_atomic_equal_alignment = "32"),
3784 stable(feature = "integer_atomics_stable", since = "1.34.0"),
3785 stable(feature = "integer_atomics_stable", since = "1.34.0"),
3786 stable(feature = "integer_atomics_stable", since = "1.34.0"),
3787 stable(feature = "integer_atomics_stable", since = "1.34.0"),
3788 stable(feature = "integer_atomics_stable", since = "1.34.0"),
3789 stable(feature = "integer_atomics_stable", since = "1.34.0"),
3790 rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"),
3791 rustc_const_stable(feature = "const_atomic_into_inner", since = "1.79.0"),
3792 rustc_diagnostic_item = "AtomicI32",
3793 "i32",
3794 "",
3795 atomic_min, atomic_max,
3796 4,
3797 i32 AtomicI32
3798}
3799#[cfg(target_has_atomic_load_store = "32")]
3800atomic_int! {
3801 cfg(target_has_atomic = "32"),
3802 cfg(target_has_atomic_equal_alignment = "32"),
3803 stable(feature = "integer_atomics_stable", since = "1.34.0"),
3804 stable(feature = "integer_atomics_stable", since = "1.34.0"),
3805 stable(feature = "integer_atomics_stable", since = "1.34.0"),
3806 stable(feature = "integer_atomics_stable", since = "1.34.0"),
3807 stable(feature = "integer_atomics_stable", since = "1.34.0"),
3808 stable(feature = "integer_atomics_stable", since = "1.34.0"),
3809 rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"),
3810 rustc_const_stable(feature = "const_atomic_into_inner", since = "1.79.0"),
3811 rustc_diagnostic_item = "AtomicU32",
3812 "u32",
3813 "",
3814 atomic_umin, atomic_umax,
3815 4,
3816 u32 AtomicU32
3817}
3818#[cfg(target_has_atomic_load_store = "64")]
3819#[cfg(not(feature = "ferrocene_certified"))]
3820atomic_int! {
3821 cfg(target_has_atomic = "64"),
3822 cfg(target_has_atomic_equal_alignment = "64"),
3823 stable(feature = "integer_atomics_stable", since = "1.34.0"),
3824 stable(feature = "integer_atomics_stable", since = "1.34.0"),
3825 stable(feature = "integer_atomics_stable", since = "1.34.0"),
3826 stable(feature = "integer_atomics_stable", since = "1.34.0"),
3827 stable(feature = "integer_atomics_stable", since = "1.34.0"),
3828 stable(feature = "integer_atomics_stable", since = "1.34.0"),
3829 rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"),
3830 rustc_const_stable(feature = "const_atomic_into_inner", since = "1.79.0"),
3831 rustc_diagnostic_item = "AtomicI64",
3832 "i64",
3833 "",
3834 atomic_min, atomic_max,
3835 8,
3836 i64 AtomicI64
3837}
3838#[cfg(target_has_atomic_load_store = "64")]
3839#[cfg(not(feature = "ferrocene_certified"))]
3840atomic_int! {
3841 cfg(target_has_atomic = "64"),
3842 cfg(target_has_atomic_equal_alignment = "64"),
3843 stable(feature = "integer_atomics_stable", since = "1.34.0"),
3844 stable(feature = "integer_atomics_stable", since = "1.34.0"),
3845 stable(feature = "integer_atomics_stable", since = "1.34.0"),
3846 stable(feature = "integer_atomics_stable", since = "1.34.0"),
3847 stable(feature = "integer_atomics_stable", since = "1.34.0"),
3848 stable(feature = "integer_atomics_stable", since = "1.34.0"),
3849 rustc_const_stable(feature = "const_integer_atomics", since = "1.34.0"),
3850 rustc_const_stable(feature = "const_atomic_into_inner", since = "1.79.0"),
3851 rustc_diagnostic_item = "AtomicU64",
3852 "u64",
3853 "",
3854 atomic_umin, atomic_umax,
3855 8,
3856 u64 AtomicU64
3857}
3858#[cfg(target_has_atomic_load_store = "128")]
3859#[cfg(not(feature = "ferrocene_certified"))]
3860atomic_int! {
3861 cfg(target_has_atomic = "128"),
3862 cfg(target_has_atomic_equal_alignment = "128"),
3863 unstable(feature = "integer_atomics", issue = "99069"),
3864 unstable(feature = "integer_atomics", issue = "99069"),
3865 unstable(feature = "integer_atomics", issue = "99069"),
3866 unstable(feature = "integer_atomics", issue = "99069"),
3867 unstable(feature = "integer_atomics", issue = "99069"),
3868 unstable(feature = "integer_atomics", issue = "99069"),
3869 rustc_const_unstable(feature = "integer_atomics", issue = "99069"),
3870 rustc_const_unstable(feature = "integer_atomics", issue = "99069"),
3871 rustc_diagnostic_item = "AtomicI128",
3872 "i128",
3873 "#![feature(integer_atomics)]\n\n",
3874 atomic_min, atomic_max,
3875 16,
3876 i128 AtomicI128
3877}
3878#[cfg(target_has_atomic_load_store = "128")]
3879#[cfg(not(feature = "ferrocene_certified"))]
3880atomic_int! {
3881 cfg(target_has_atomic = "128"),
3882 cfg(target_has_atomic_equal_alignment = "128"),
3883 unstable(feature = "integer_atomics", issue = "99069"),
3884 unstable(feature = "integer_atomics", issue = "99069"),
3885 unstable(feature = "integer_atomics", issue = "99069"),
3886 unstable(feature = "integer_atomics", issue = "99069"),
3887 unstable(feature = "integer_atomics", issue = "99069"),
3888 unstable(feature = "integer_atomics", issue = "99069"),
3889 rustc_const_unstable(feature = "integer_atomics", issue = "99069"),
3890 rustc_const_unstable(feature = "integer_atomics", issue = "99069"),
3891 rustc_diagnostic_item = "AtomicU128",
3892 "u128",
3893 "#![feature(integer_atomics)]\n\n",
3894 atomic_umin, atomic_umax,
3895 16,
3896 u128 AtomicU128
3897}
3898
3899#[cfg(target_has_atomic_load_store = "ptr")]
3900#[cfg(not(feature = "ferrocene_certified"))]
3901macro_rules! atomic_int_ptr_sized {
3902 ( $($target_pointer_width:literal $align:literal)* ) => { $(
3903 #[cfg(target_pointer_width = $target_pointer_width)]
3904 atomic_int! {
3905 cfg(target_has_atomic = "ptr"),
3906 cfg(target_has_atomic_equal_alignment = "ptr"),
3907 stable(feature = "rust1", since = "1.0.0"),
3908 stable(feature = "extended_compare_and_swap", since = "1.10.0"),
3909 stable(feature = "atomic_debug", since = "1.3.0"),
3910 stable(feature = "atomic_access", since = "1.15.0"),
3911 stable(feature = "atomic_from", since = "1.23.0"),
3912 stable(feature = "atomic_nand", since = "1.27.0"),
3913 rustc_const_stable(feature = "const_ptr_sized_atomics", since = "1.24.0"),
3914 rustc_const_stable(feature = "const_atomic_into_inner", since = "1.79.0"),
3915 rustc_diagnostic_item = "AtomicIsize",
3916 "isize",
3917 "",
3918 atomic_min, atomic_max,
3919 $align,
3920 isize AtomicIsize
3921 }
3922 #[cfg(target_pointer_width = $target_pointer_width)]
3923 atomic_int! {
3924 cfg(target_has_atomic = "ptr"),
3925 cfg(target_has_atomic_equal_alignment = "ptr"),
3926 stable(feature = "rust1", since = "1.0.0"),
3927 stable(feature = "extended_compare_and_swap", since = "1.10.0"),
3928 stable(feature = "atomic_debug", since = "1.3.0"),
3929 stable(feature = "atomic_access", since = "1.15.0"),
3930 stable(feature = "atomic_from", since = "1.23.0"),
3931 stable(feature = "atomic_nand", since = "1.27.0"),
3932 rustc_const_stable(feature = "const_ptr_sized_atomics", since = "1.24.0"),
3933 rustc_const_stable(feature = "const_atomic_into_inner", since = "1.79.0"),
3934 rustc_diagnostic_item = "AtomicUsize",
3935 "usize",
3936 "",
3937 atomic_umin, atomic_umax,
3938 $align,
3939 usize AtomicUsize
3940 }
3941
3942 /// An [`AtomicIsize`] initialized to `0`.
3943 #[cfg(target_pointer_width = $target_pointer_width)]
3944 #[stable(feature = "rust1", since = "1.0.0")]
3945 #[deprecated(
3946 since = "1.34.0",
3947 note = "the `new` function is now preferred",
3948 suggestion = "AtomicIsize::new(0)",
3949 )]
3950 pub const ATOMIC_ISIZE_INIT: AtomicIsize = AtomicIsize::new(0);
3951
3952 /// An [`AtomicUsize`] initialized to `0`.
3953 #[cfg(target_pointer_width = $target_pointer_width)]
3954 #[stable(feature = "rust1", since = "1.0.0")]
3955 #[deprecated(
3956 since = "1.34.0",
3957 note = "the `new` function is now preferred",
3958 suggestion = "AtomicUsize::new(0)",
3959 )]
3960 pub const ATOMIC_USIZE_INIT: AtomicUsize = AtomicUsize::new(0);
3961 )* };
3962}
3963
3964#[cfg(target_has_atomic_load_store = "ptr")]
3965#[cfg(not(feature = "ferrocene_certified"))]
3966atomic_int_ptr_sized! {
3967 "16" 2
3968 "32" 4
3969 "64" 8
3970}
3971
3972#[cfg(not(feature = "ferrocene_certified"))]
3973#[inline]
3974#[cfg(target_has_atomic)]
3975fn strongest_failure_ordering(order: Ordering) -> Ordering {
3976 match order {
3977 Release => Relaxed,
3978 Relaxed => Relaxed,
3979 SeqCst => SeqCst,
3980 Acquire => Acquire,
3981 AcqRel => Acquire,
3982 }
3983}
3984
3985#[inline]
3986#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
3987unsafe fn atomic_store<T: Copy>(dst: *mut T, val: T, order: Ordering) {
3988 // SAFETY: the caller must uphold the safety contract for `atomic_store`.
3989 unsafe {
3990 match order {
3991 Relaxed => intrinsics::atomic_store::<T, { AO::Relaxed }>(dst, val),
3992 Release => intrinsics::atomic_store::<T, { AO::Release }>(dst, val),
3993 SeqCst => intrinsics::atomic_store::<T, { AO::SeqCst }>(dst, val),
3994 Acquire => panic!("there is no such thing as an acquire store"),
3995 AcqRel => panic!("there is no such thing as an acquire-release store"),
3996 }
3997 }
3998}
3999
4000#[inline]
4001#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
4002unsafe fn atomic_load<T: Copy>(dst: *const T, order: Ordering) -> T {
4003 // SAFETY: the caller must uphold the safety contract for `atomic_load`.
4004 unsafe {
4005 match order {
4006 Relaxed => intrinsics::atomic_load::<T, { AO::Relaxed }>(dst),
4007 Acquire => intrinsics::atomic_load::<T, { AO::Acquire }>(dst),
4008 SeqCst => intrinsics::atomic_load::<T, { AO::SeqCst }>(dst),
4009 Release => panic!("there is no such thing as a release load"),
4010 AcqRel => panic!("there is no such thing as an acquire-release load"),
4011 }
4012 }
4013}
4014
4015#[inline]
4016#[cfg(target_has_atomic)]
4017#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
4018unsafe fn atomic_swap<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
4019 // SAFETY: the caller must uphold the safety contract for `atomic_swap`.
4020 unsafe {
4021 match order {
4022 Relaxed => intrinsics::atomic_xchg::<T, { AO::Relaxed }>(dst, val),
4023 Acquire => intrinsics::atomic_xchg::<T, { AO::Acquire }>(dst, val),
4024 Release => intrinsics::atomic_xchg::<T, { AO::Release }>(dst, val),
4025 AcqRel => intrinsics::atomic_xchg::<T, { AO::AcqRel }>(dst, val),
4026 SeqCst => intrinsics::atomic_xchg::<T, { AO::SeqCst }>(dst, val),
4027 }
4028 }
4029}
4030
4031/// Returns the previous value (like __sync_fetch_and_add).
4032#[inline]
4033#[cfg(target_has_atomic)]
4034#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
4035unsafe fn atomic_add<T: Copy, U: Copy>(dst: *mut T, val: U, order: Ordering) -> T {
4036 // SAFETY: the caller must uphold the safety contract for `atomic_add`.
4037 unsafe {
4038 match order {
4039 Relaxed => intrinsics::atomic_xadd::<T, U, { AO::Relaxed }>(dst, val),
4040 Acquire => intrinsics::atomic_xadd::<T, U, { AO::Acquire }>(dst, val),
4041 Release => intrinsics::atomic_xadd::<T, U, { AO::Release }>(dst, val),
4042 AcqRel => intrinsics::atomic_xadd::<T, U, { AO::AcqRel }>(dst, val),
4043 SeqCst => intrinsics::atomic_xadd::<T, U, { AO::SeqCst }>(dst, val),
4044 }
4045 }
4046}
4047
4048/// Returns the previous value (like __sync_fetch_and_sub).
4049#[inline]
4050#[cfg(target_has_atomic)]
4051#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
4052unsafe fn atomic_sub<T: Copy, U: Copy>(dst: *mut T, val: U, order: Ordering) -> T {
4053 // SAFETY: the caller must uphold the safety contract for `atomic_sub`.
4054 unsafe {
4055 match order {
4056 Relaxed => intrinsics::atomic_xsub::<T, U, { AO::Relaxed }>(dst, val),
4057 Acquire => intrinsics::atomic_xsub::<T, U, { AO::Acquire }>(dst, val),
4058 Release => intrinsics::atomic_xsub::<T, U, { AO::Release }>(dst, val),
4059 AcqRel => intrinsics::atomic_xsub::<T, U, { AO::AcqRel }>(dst, val),
4060 SeqCst => intrinsics::atomic_xsub::<T, U, { AO::SeqCst }>(dst, val),
4061 }
4062 }
4063}
4064
4065/// Publicly exposed for stdarch; nobody else should use this.
4066#[inline]
4067#[cfg(target_has_atomic)]
4068#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
4069#[unstable(feature = "core_intrinsics", issue = "none")]
4070#[doc(hidden)]
4071pub unsafe fn atomic_compare_exchange<T: Copy>(
4072 dst: *mut T,
4073 old: T,
4074 new: T,
4075 success: Ordering,
4076 failure: Ordering,
4077) -> Result<T, T> {
4078 // SAFETY: the caller must uphold the safety contract for `atomic_compare_exchange`.
4079 let (val, ok) = unsafe {
4080 match (success, failure) {
4081 (Relaxed, Relaxed) => {
4082 intrinsics::atomic_cxchg::<T, { AO::Relaxed }, { AO::Relaxed }>(dst, old, new)
4083 }
4084 (Relaxed, Acquire) => {
4085 intrinsics::atomic_cxchg::<T, { AO::Relaxed }, { AO::Acquire }>(dst, old, new)
4086 }
4087 (Relaxed, SeqCst) => {
4088 intrinsics::atomic_cxchg::<T, { AO::Relaxed }, { AO::SeqCst }>(dst, old, new)
4089 }
4090 (Acquire, Relaxed) => {
4091 intrinsics::atomic_cxchg::<T, { AO::Acquire }, { AO::Relaxed }>(dst, old, new)
4092 }
4093 (Acquire, Acquire) => {
4094 intrinsics::atomic_cxchg::<T, { AO::Acquire }, { AO::Acquire }>(dst, old, new)
4095 }
4096 (Acquire, SeqCst) => {
4097 intrinsics::atomic_cxchg::<T, { AO::Acquire }, { AO::SeqCst }>(dst, old, new)
4098 }
4099 (Release, Relaxed) => {
4100 intrinsics::atomic_cxchg::<T, { AO::Release }, { AO::Relaxed }>(dst, old, new)
4101 }
4102 (Release, Acquire) => {
4103 intrinsics::atomic_cxchg::<T, { AO::Release }, { AO::Acquire }>(dst, old, new)
4104 }
4105 (Release, SeqCst) => {
4106 intrinsics::atomic_cxchg::<T, { AO::Release }, { AO::SeqCst }>(dst, old, new)
4107 }
4108 (AcqRel, Relaxed) => {
4109 intrinsics::atomic_cxchg::<T, { AO::AcqRel }, { AO::Relaxed }>(dst, old, new)
4110 }
4111 (AcqRel, Acquire) => {
4112 intrinsics::atomic_cxchg::<T, { AO::AcqRel }, { AO::Acquire }>(dst, old, new)
4113 }
4114 (AcqRel, SeqCst) => {
4115 intrinsics::atomic_cxchg::<T, { AO::AcqRel }, { AO::SeqCst }>(dst, old, new)
4116 }
4117 (SeqCst, Relaxed) => {
4118 intrinsics::atomic_cxchg::<T, { AO::SeqCst }, { AO::Relaxed }>(dst, old, new)
4119 }
4120 (SeqCst, Acquire) => {
4121 intrinsics::atomic_cxchg::<T, { AO::SeqCst }, { AO::Acquire }>(dst, old, new)
4122 }
4123 (SeqCst, SeqCst) => {
4124 intrinsics::atomic_cxchg::<T, { AO::SeqCst }, { AO::SeqCst }>(dst, old, new)
4125 }
4126 (_, AcqRel) => panic!("there is no such thing as an acquire-release failure ordering"),
4127 (_, Release) => panic!("there is no such thing as a release failure ordering"),
4128 }
4129 };
4130 if ok { Ok(val) } else { Err(val) }
4131}
4132
4133#[inline]
4134#[cfg(target_has_atomic)]
4135#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
4136unsafe fn atomic_compare_exchange_weak<T: Copy>(
4137 dst: *mut T,
4138 old: T,
4139 new: T,
4140 success: Ordering,
4141 failure: Ordering,
4142) -> Result<T, T> {
4143 // SAFETY: the caller must uphold the safety contract for `atomic_compare_exchange_weak`.
4144 let (val, ok) = unsafe {
4145 match (success, failure) {
4146 (Relaxed, Relaxed) => {
4147 intrinsics::atomic_cxchgweak::<T, { AO::Relaxed }, { AO::Relaxed }>(dst, old, new)
4148 }
4149 (Relaxed, Acquire) => {
4150 intrinsics::atomic_cxchgweak::<T, { AO::Relaxed }, { AO::Acquire }>(dst, old, new)
4151 }
4152 (Relaxed, SeqCst) => {
4153 intrinsics::atomic_cxchgweak::<T, { AO::Relaxed }, { AO::SeqCst }>(dst, old, new)
4154 }
4155 (Acquire, Relaxed) => {
4156 intrinsics::atomic_cxchgweak::<T, { AO::Acquire }, { AO::Relaxed }>(dst, old, new)
4157 }
4158 (Acquire, Acquire) => {
4159 intrinsics::atomic_cxchgweak::<T, { AO::Acquire }, { AO::Acquire }>(dst, old, new)
4160 }
4161 (Acquire, SeqCst) => {
4162 intrinsics::atomic_cxchgweak::<T, { AO::Acquire }, { AO::SeqCst }>(dst, old, new)
4163 }
4164 (Release, Relaxed) => {
4165 intrinsics::atomic_cxchgweak::<T, { AO::Release }, { AO::Relaxed }>(dst, old, new)
4166 }
4167 (Release, Acquire) => {
4168 intrinsics::atomic_cxchgweak::<T, { AO::Release }, { AO::Acquire }>(dst, old, new)
4169 }
4170 (Release, SeqCst) => {
4171 intrinsics::atomic_cxchgweak::<T, { AO::Release }, { AO::SeqCst }>(dst, old, new)
4172 }
4173 (AcqRel, Relaxed) => {
4174 intrinsics::atomic_cxchgweak::<T, { AO::AcqRel }, { AO::Relaxed }>(dst, old, new)
4175 }
4176 (AcqRel, Acquire) => {
4177 intrinsics::atomic_cxchgweak::<T, { AO::AcqRel }, { AO::Acquire }>(dst, old, new)
4178 }
4179 (AcqRel, SeqCst) => {
4180 intrinsics::atomic_cxchgweak::<T, { AO::AcqRel }, { AO::SeqCst }>(dst, old, new)
4181 }
4182 (SeqCst, Relaxed) => {
4183 intrinsics::atomic_cxchgweak::<T, { AO::SeqCst }, { AO::Relaxed }>(dst, old, new)
4184 }
4185 (SeqCst, Acquire) => {
4186 intrinsics::atomic_cxchgweak::<T, { AO::SeqCst }, { AO::Acquire }>(dst, old, new)
4187 }
4188 (SeqCst, SeqCst) => {
4189 intrinsics::atomic_cxchgweak::<T, { AO::SeqCst }, { AO::SeqCst }>(dst, old, new)
4190 }
4191 (_, AcqRel) => panic!("there is no such thing as an acquire-release failure ordering"),
4192 (_, Release) => panic!("there is no such thing as a release failure ordering"),
4193 }
4194 };
4195 if ok { Ok(val) } else { Err(val) }
4196}
4197
4198#[inline]
4199#[cfg(target_has_atomic)]
4200#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
4201unsafe fn atomic_and<T: Copy, U: Copy>(dst: *mut T, val: U, order: Ordering) -> T {
4202 // SAFETY: the caller must uphold the safety contract for `atomic_and`
4203 unsafe {
4204 match order {
4205 Relaxed => intrinsics::atomic_and::<T, U, { AO::Relaxed }>(dst, val),
4206 Acquire => intrinsics::atomic_and::<T, U, { AO::Acquire }>(dst, val),
4207 Release => intrinsics::atomic_and::<T, U, { AO::Release }>(dst, val),
4208 AcqRel => intrinsics::atomic_and::<T, U, { AO::AcqRel }>(dst, val),
4209 SeqCst => intrinsics::atomic_and::<T, U, { AO::SeqCst }>(dst, val),
4210 }
4211 }
4212}
4213
4214#[inline]
4215#[cfg(target_has_atomic)]
4216#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
4217unsafe fn atomic_nand<T: Copy, U: Copy>(dst: *mut T, val: U, order: Ordering) -> T {
4218 // SAFETY: the caller must uphold the safety contract for `atomic_nand`
4219 unsafe {
4220 match order {
4221 Relaxed => intrinsics::atomic_nand::<T, U, { AO::Relaxed }>(dst, val),
4222 Acquire => intrinsics::atomic_nand::<T, U, { AO::Acquire }>(dst, val),
4223 Release => intrinsics::atomic_nand::<T, U, { AO::Release }>(dst, val),
4224 AcqRel => intrinsics::atomic_nand::<T, U, { AO::AcqRel }>(dst, val),
4225 SeqCst => intrinsics::atomic_nand::<T, U, { AO::SeqCst }>(dst, val),
4226 }
4227 }
4228}
4229
4230#[inline]
4231#[cfg(target_has_atomic)]
4232#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
4233unsafe fn atomic_or<T: Copy, U: Copy>(dst: *mut T, val: U, order: Ordering) -> T {
4234 // SAFETY: the caller must uphold the safety contract for `atomic_or`
4235 unsafe {
4236 match order {
4237 SeqCst => intrinsics::atomic_or::<T, U, { AO::SeqCst }>(dst, val),
4238 Acquire => intrinsics::atomic_or::<T, U, { AO::Acquire }>(dst, val),
4239 Release => intrinsics::atomic_or::<T, U, { AO::Release }>(dst, val),
4240 AcqRel => intrinsics::atomic_or::<T, U, { AO::AcqRel }>(dst, val),
4241 Relaxed => intrinsics::atomic_or::<T, U, { AO::Relaxed }>(dst, val),
4242 }
4243 }
4244}
4245
4246#[inline]
4247#[cfg(target_has_atomic)]
4248#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
4249unsafe fn atomic_xor<T: Copy, U: Copy>(dst: *mut T, val: U, order: Ordering) -> T {
4250 // SAFETY: the caller must uphold the safety contract for `atomic_xor`
4251 unsafe {
4252 match order {
4253 SeqCst => intrinsics::atomic_xor::<T, U, { AO::SeqCst }>(dst, val),
4254 Acquire => intrinsics::atomic_xor::<T, U, { AO::Acquire }>(dst, val),
4255 Release => intrinsics::atomic_xor::<T, U, { AO::Release }>(dst, val),
4256 AcqRel => intrinsics::atomic_xor::<T, U, { AO::AcqRel }>(dst, val),
4257 Relaxed => intrinsics::atomic_xor::<T, U, { AO::Relaxed }>(dst, val),
4258 }
4259 }
4260}
4261
4262/// Updates `*dst` to the max value of `val` and the old value (signed comparison)
4263#[inline]
4264#[cfg(target_has_atomic)]
4265#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
4266#[cfg(not(feature = "ferrocene_certified"))]
4267unsafe fn atomic_max<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
4268 // SAFETY: the caller must uphold the safety contract for `atomic_max`
4269 unsafe {
4270 match order {
4271 Relaxed => intrinsics::atomic_max::<T, { AO::Relaxed }>(dst, val),
4272 Acquire => intrinsics::atomic_max::<T, { AO::Acquire }>(dst, val),
4273 Release => intrinsics::atomic_max::<T, { AO::Release }>(dst, val),
4274 AcqRel => intrinsics::atomic_max::<T, { AO::AcqRel }>(dst, val),
4275 SeqCst => intrinsics::atomic_max::<T, { AO::SeqCst }>(dst, val),
4276 }
4277 }
4278}
4279
4280/// Updates `*dst` to the min value of `val` and the old value (signed comparison)
4281#[inline]
4282#[cfg(target_has_atomic)]
4283#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
4284#[cfg(not(feature = "ferrocene_certified"))]
4285unsafe fn atomic_min<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
4286 // SAFETY: the caller must uphold the safety contract for `atomic_min`
4287 unsafe {
4288 match order {
4289 Relaxed => intrinsics::atomic_min::<T, { AO::Relaxed }>(dst, val),
4290 Acquire => intrinsics::atomic_min::<T, { AO::Acquire }>(dst, val),
4291 Release => intrinsics::atomic_min::<T, { AO::Release }>(dst, val),
4292 AcqRel => intrinsics::atomic_min::<T, { AO::AcqRel }>(dst, val),
4293 SeqCst => intrinsics::atomic_min::<T, { AO::SeqCst }>(dst, val),
4294 }
4295 }
4296}
4297
4298/// Updates `*dst` to the max value of `val` and the old value (unsigned comparison)
4299#[inline]
4300#[cfg(target_has_atomic)]
4301#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
4302unsafe fn atomic_umax<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
4303 // SAFETY: the caller must uphold the safety contract for `atomic_umax`
4304 unsafe {
4305 match order {
4306 Relaxed => intrinsics::atomic_umax::<T, { AO::Relaxed }>(dst, val),
4307 Acquire => intrinsics::atomic_umax::<T, { AO::Acquire }>(dst, val),
4308 Release => intrinsics::atomic_umax::<T, { AO::Release }>(dst, val),
4309 AcqRel => intrinsics::atomic_umax::<T, { AO::AcqRel }>(dst, val),
4310 SeqCst => intrinsics::atomic_umax::<T, { AO::SeqCst }>(dst, val),
4311 }
4312 }
4313}
4314
4315/// Updates `*dst` to the min value of `val` and the old value (unsigned comparison)
4316#[inline]
4317#[cfg(target_has_atomic)]
4318#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
4319unsafe fn atomic_umin<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
4320 // SAFETY: the caller must uphold the safety contract for `atomic_umin`
4321 unsafe {
4322 match order {
4323 Relaxed => intrinsics::atomic_umin::<T, { AO::Relaxed }>(dst, val),
4324 Acquire => intrinsics::atomic_umin::<T, { AO::Acquire }>(dst, val),
4325 Release => intrinsics::atomic_umin::<T, { AO::Release }>(dst, val),
4326 AcqRel => intrinsics::atomic_umin::<T, { AO::AcqRel }>(dst, val),
4327 SeqCst => intrinsics::atomic_umin::<T, { AO::SeqCst }>(dst, val),
4328 }
4329 }
4330}
4331
4332/// An atomic fence.
4333///
4334/// Fences create synchronization between themselves and atomic operations or fences in other
4335/// threads. To achieve this, a fence prevents the compiler and CPU from reordering certain types of
4336/// memory operations around it.
4337///
4338/// A fence 'A' which has (at least) [`Release`] ordering semantics, synchronizes
4339/// with a fence 'B' with (at least) [`Acquire`] semantics, if and only if there
4340/// exist operations X and Y, both operating on some atomic object 'm' such
4341/// that A is sequenced before X, Y is sequenced before B and Y observes
4342/// the change to m. This provides a happens-before dependence between A and B.
4343///
4344/// ```text
4345/// Thread 1 Thread 2
4346///
4347/// fence(Release); A --------------
4348/// m.store(3, Relaxed); X --------- |
4349/// | |
4350/// | |
4351/// -------------> Y if m.load(Relaxed) == 3 {
4352/// |-------> B fence(Acquire);
4353/// ...
4354/// }
4355/// ```
4356///
4357/// Note that in the example above, it is crucial that the accesses to `m` are atomic. Fences cannot
4358/// be used to establish synchronization among non-atomic accesses in different threads. However,
4359/// thanks to the happens-before relationship between A and B, any non-atomic accesses that
4360/// happen-before A are now also properly synchronized with any non-atomic accesses that
4361/// happen-after B.
4362///
4363/// Atomic operations with [`Release`] or [`Acquire`] semantics can also synchronize
4364/// with a fence.
4365///
4366/// A fence which has [`SeqCst`] ordering, in addition to having both [`Acquire`]
4367/// and [`Release`] semantics, participates in the global program order of the
4368/// other [`SeqCst`] operations and/or fences.
4369///
4370/// Accepts [`Acquire`], [`Release`], [`AcqRel`] and [`SeqCst`] orderings.
4371///
4372/// # Panics
4373///
4374/// Panics if `order` is [`Relaxed`].
4375///
4376/// # Examples
4377///
4378/// ```
4379/// use std::sync::atomic::AtomicBool;
4380/// use std::sync::atomic::fence;
4381/// use std::sync::atomic::Ordering;
4382///
4383/// // A mutual exclusion primitive based on spinlock.
4384/// pub struct Mutex {
4385/// flag: AtomicBool,
4386/// }
4387///
4388/// impl Mutex {
4389/// pub fn new() -> Mutex {
4390/// Mutex {
4391/// flag: AtomicBool::new(false),
4392/// }
4393/// }
4394///
4395/// pub fn lock(&self) {
4396/// // Wait until the old value is `false`.
4397/// while self
4398/// .flag
4399/// .compare_exchange_weak(false, true, Ordering::Relaxed, Ordering::Relaxed)
4400/// .is_err()
4401/// {}
4402/// // This fence synchronizes-with store in `unlock`.
4403/// fence(Ordering::Acquire);
4404/// }
4405///
4406/// pub fn unlock(&self) {
4407/// self.flag.store(false, Ordering::Release);
4408/// }
4409/// }
4410/// ```
4411#[inline]
4412#[stable(feature = "rust1", since = "1.0.0")]
4413#[rustc_diagnostic_item = "fence"]
4414#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
4415#[cfg(not(feature = "ferrocene_certified"))]
4416pub fn fence(order: Ordering) {
4417 // SAFETY: using an atomic fence is safe.
4418 unsafe {
4419 match order {
4420 Acquire => intrinsics::atomic_fence::<{ AO::Acquire }>(),
4421 Release => intrinsics::atomic_fence::<{ AO::Release }>(),
4422 AcqRel => intrinsics::atomic_fence::<{ AO::AcqRel }>(),
4423 SeqCst => intrinsics::atomic_fence::<{ AO::SeqCst }>(),
4424 Relaxed => panic!("there is no such thing as a relaxed fence"),
4425 }
4426 }
4427}
4428
4429/// A "compiler-only" atomic fence.
4430///
4431/// Like [`fence`], this function establishes synchronization with other atomic operations and
4432/// fences. However, unlike [`fence`], `compiler_fence` only establishes synchronization with
4433/// operations *in the same thread*. This may at first sound rather useless, since code within a
4434/// thread is typically already totally ordered and does not need any further synchronization.
4435/// However, there are cases where code can run on the same thread without being ordered:
4436/// - The most common case is that of a *signal handler*: a signal handler runs in the same thread
4437/// as the code it interrupted, but it is not ordered with respect to that code. `compiler_fence`
4438/// can be used to establish synchronization between a thread and its signal handler, the same way
4439/// that `fence` can be used to establish synchronization across threads.
4440/// - Similar situations can arise in embedded programming with interrupt handlers, or in custom
4441/// implementations of preemptive green threads. In general, `compiler_fence` can establish
4442/// synchronization with code that is guaranteed to run on the same hardware CPU.
4443///
4444/// See [`fence`] for how a fence can be used to achieve synchronization. Note that just like
4445/// [`fence`], synchronization still requires atomic operations to be used in both threads -- it is
4446/// not possible to perform synchronization entirely with fences and non-atomic operations.
4447///
4448/// `compiler_fence` does not emit any machine code, but restricts the kinds of memory re-ordering
4449/// the compiler is allowed to do. `compiler_fence` corresponds to [`atomic_signal_fence`] in C and
4450/// C++.
4451///
4452/// [`atomic_signal_fence`]: https://en.cppreference.com/w/cpp/atomic/atomic_signal_fence
4453///
4454/// # Panics
4455///
4456/// Panics if `order` is [`Relaxed`].
4457///
4458/// # Examples
4459///
4460/// Without the two `compiler_fence` calls, the read of `IMPORTANT_VARIABLE` in `signal_handler`
4461/// is *undefined behavior* due to a data race, despite everything happening in a single thread.
4462/// This is because the signal handler is considered to run concurrently with its associated
4463/// thread, and explicit synchronization is required to pass data between a thread and its
4464/// signal handler. The code below uses two `compiler_fence` calls to establish the usual
4465/// release-acquire synchronization pattern (see [`fence`] for an image).
4466///
4467/// ```
4468/// use std::sync::atomic::AtomicBool;
4469/// use std::sync::atomic::Ordering;
4470/// use std::sync::atomic::compiler_fence;
4471///
4472/// static mut IMPORTANT_VARIABLE: usize = 0;
4473/// static IS_READY: AtomicBool = AtomicBool::new(false);
4474///
4475/// fn main() {
4476/// unsafe { IMPORTANT_VARIABLE = 42 };
4477/// // Marks earlier writes as being released with future relaxed stores.
4478/// compiler_fence(Ordering::Release);
4479/// IS_READY.store(true, Ordering::Relaxed);
4480/// }
4481///
4482/// fn signal_handler() {
4483/// if IS_READY.load(Ordering::Relaxed) {
4484/// // Acquires writes that were released with relaxed stores that we read from.
4485/// compiler_fence(Ordering::Acquire);
4486/// assert_eq!(unsafe { IMPORTANT_VARIABLE }, 42);
4487/// }
4488/// }
4489/// ```
4490#[inline]
4491#[stable(feature = "compiler_fences", since = "1.21.0")]
4492#[rustc_diagnostic_item = "compiler_fence"]
4493#[cfg_attr(miri, track_caller)] // even without panics, this helps for Miri backtraces
4494#[cfg(not(feature = "ferrocene_certified"))]
4495pub fn compiler_fence(order: Ordering) {
4496 // SAFETY: using an atomic fence is safe.
4497 unsafe {
4498 match order {
4499 Acquire => intrinsics::atomic_singlethreadfence::<{ AO::Acquire }>(),
4500 Release => intrinsics::atomic_singlethreadfence::<{ AO::Release }>(),
4501 AcqRel => intrinsics::atomic_singlethreadfence::<{ AO::AcqRel }>(),
4502 SeqCst => intrinsics::atomic_singlethreadfence::<{ AO::SeqCst }>(),
4503 Relaxed => panic!("there is no such thing as a relaxed fence"),
4504 }
4505 }
4506}
4507
4508#[cfg(target_has_atomic_load_store = "8")]
4509#[stable(feature = "atomic_debug", since = "1.3.0")]
4510#[cfg(not(feature = "ferrocene_certified"))]
4511impl fmt::Debug for AtomicBool {
4512 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
4513 fmt::Debug::fmt(&self.load(Ordering::Relaxed), f)
4514 }
4515}
4516
4517#[cfg(target_has_atomic_load_store = "ptr")]
4518#[stable(feature = "atomic_debug", since = "1.3.0")]
4519#[cfg(not(feature = "ferrocene_certified"))]
4520impl<T> fmt::Debug for AtomicPtr<T> {
4521 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
4522 fmt::Debug::fmt(&self.load(Ordering::Relaxed), f)
4523 }
4524}
4525
4526#[cfg(target_has_atomic_load_store = "ptr")]
4527#[stable(feature = "atomic_pointer", since = "1.24.0")]
4528#[cfg(not(feature = "ferrocene_certified"))]
4529impl<T> fmt::Pointer for AtomicPtr<T> {
4530 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
4531 fmt::Pointer::fmt(&self.load(Ordering::Relaxed), f)
4532 }
4533}
4534
4535/// Signals the processor that it is inside a busy-wait spin-loop ("spin lock").
4536///
4537/// This function is deprecated in favor of [`hint::spin_loop`].
4538///
4539/// [`hint::spin_loop`]: crate::hint::spin_loop
4540#[inline]
4541#[stable(feature = "spin_loop_hint", since = "1.24.0")]
4542#[deprecated(since = "1.51.0", note = "use hint::spin_loop instead")]
4543#[cfg(not(feature = "ferrocene_certified"))]
4544pub fn spin_loop_hint() {
4545 spin_loop()
4546}